Merge branch '3.0' into feature/TD-14481-3.0

This commit is contained in:
Cary Xu 2022-05-28 23:01:10 +08:00
commit 046c6d5cf9
119 changed files with 1887 additions and 7710 deletions

View File

@ -49,7 +49,7 @@ IF(${TD_WINDOWS})
option(
BUILD_TEST
"If build unit tests using googletest"
OFF
ON
)
ELSE ()

View File

@ -26,14 +26,17 @@ extern "C" {
typedef struct SQnode SQnode;
typedef struct {
int64_t numOfStartTask;
int64_t numOfStopTask;
int64_t numOfRecvedFetch;
int64_t numOfSentHb;
int64_t numOfSentFetch;
int64_t numOfTaskInQueue;
int64_t numOfProcessedQuery;
int64_t numOfProcessedCQuery;
int64_t numOfProcessedFetch;
int64_t numOfProcessedDrop;
int64_t memSizeInCache;
int64_t dataSizeSend;
int64_t dataSizeRecv;
int64_t numOfQueryInQueue;
int64_t numOfFetchInQueue;
int64_t numOfErrors;
int64_t waitTimeInQueryQUeue;
int64_t waitTimeInFetchQUeue;
} SQnodeLoad;
typedef struct {
@ -71,10 +74,10 @@ int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad);
* @param pQnode The qnode object.
* @param pMsg The request message
*/
int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg);
int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg);
#ifdef __cplusplus
}
#endif
#endif /*_TD_QNODE_H_*/
#endif /*_TD_QNODE_H_*/

View File

@ -23,6 +23,9 @@ extern "C" {
#include "function.h"
#include "querynodes.h"
#define FUNC_AGGREGATE_UDF_ID 5001
#define FUNC_SCALAR_UDF_ID 5002
typedef enum EFunctionType {
// aggregate function
FUNCTION_TYPE_APERCENTILE = 1,
@ -126,21 +129,12 @@ typedef enum EFunctionType {
struct SqlFunctionCtx;
struct SResultRowEntryInfo;
struct STimeWindow;
struct SCatalog;
typedef struct SFmGetFuncInfoParam {
struct SCatalog* pCtg;
void* pRpc;
const SEpSet* pMgmtEps;
char* pErrBuf;
int32_t errBufLen;
} SFmGetFuncInfoParam;
int32_t fmFuncMgtInit();
void fmFuncMgtDestroy();
int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc);
int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen);
bool fmIsBuiltinFunc(const char* pFunc);

View File

@ -52,22 +52,24 @@ typedef struct {
int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb);
int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
void qWorkerDestroy(void **qWorkerMgmt);
int64_t qWorkerGetWaitTimeInQueue(void *qWorkerMgmt, EQueueType type);
#ifdef __cplusplus
}
#endif

View File

@ -69,6 +69,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_DUP_KEY TAOS_DEF_ERROR_CODE(0, 0x0027)
#define TSDB_CODE_NEED_RETRY TAOS_DEF_ERROR_CODE(0, 0x0028)
#define TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE TAOS_DEF_ERROR_CODE(0, 0x0029)
#define TSDB_CODE_INVALID_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x0030)
#define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0040)
#define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0041)
@ -655,7 +656,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_FUNC_FUNTION_PARA_NUM TAOS_DEF_ERROR_CODE(0, 0x2801)
#define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802)
#define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803)
#define TSDB_CODE_FUNC_INVALID_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
#define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
//udf
#define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901)

View File

@ -46,6 +46,7 @@ typedef struct {
void *ahandle;
int32_t workerId;
int32_t threadNum;
int64_t timestamp;
} SQueueInfo;
typedef enum {
@ -80,7 +81,7 @@ int32_t taosAddIntoQset(STaosQset *qset, STaosQueue *queue, void *ahandle);
void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue);
int32_t taosGetQueueNumber(STaosQset *qset);
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp);
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp);
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp);
void taosResetQsetThread(STaosQset *qset, void *pItem);

View File

@ -1249,6 +1249,8 @@ void resetConnectDB(STscObj* pTscObj) {
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4) {
assert(pResultInfo != NULL && pRsp != NULL);
taosMemoryFreeClear(pResultInfo->pRspMsg);
pResultInfo->pRspMsg = (const char*)pRsp;
pResultInfo->pData = (void*)pRsp->data;
pResultInfo->numOfRows = htonl(pRsp->numOfRows);

View File

@ -611,6 +611,7 @@ int32_t blockDataFromBuf1(SSDataBlock* pBlock, const char* buf, size_t capacity)
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i);
pCol->hasNull = true;
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
size_t metaSize = capacity * sizeof(int32_t);
@ -1292,8 +1293,8 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) {
static void colDataTrimFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) {
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n));
memset(&pColInfoData->varmeta.offset[total - n - 1], 0, n);
memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n) * sizeof(int32_t));
memset(&pColInfoData->varmeta.offset[total - n], 0, n);
} else {
int32_t bytes = pColInfoData->info.bytes;
memmove(pColInfoData->pData, ((char*)pColInfoData->pData + n * bytes), (total - n) * bytes);

View File

@ -521,10 +521,10 @@ int32_t convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_VARBINARY) {
newColData = taosMemoryCalloc(1, charLen + 1);
memcpy(newColData, varDataVal(inputData), charLen);
bool ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight);
int32_t ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight);
if (ret != TSDB_CODE_SUCCESS) {
taosMemoryFree(newColData);
return ret;
return TSDB_CODE_INVALID_TIMESTAMP;
}
taosMemoryFree(newColData);
} else if (type == TSDB_DATA_TYPE_NCHAR) {

View File

@ -16,7 +16,11 @@
#define _DEFAULT_SOURCE
#include "qmInt.h"
void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) {}
void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) {
SQnodeLoad qload = {0};
qndGetLoad(pMgmt->pQnode, &qload);
}
int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pMsg) {
SMonQmInfo qmInfo = {0};

View File

@ -36,7 +36,7 @@ static void qmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
code = qmProcessGetMonitorInfoReq(pMgmt, pMsg);
break;
default:
code = qndProcessQueryMsg(pMgmt->pQnode, pMsg);
code = qndProcessQueryMsg(pMgmt->pQnode, pInfo->timestamp, pMsg);
break;
}

View File

@ -62,7 +62,7 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
dmProcessNetTestReq(pDnode, pRpc);
return;
} else if (pRpc->msgType == TDMT_MND_SYSTABLE_RETRIEVE_RSP || pRpc->msgType == TDMT_VND_FETCH_RSP) {
qWorkerProcessFetchRsp(NULL, NULL, pRpc);
qWorkerProcessFetchRsp(NULL, NULL, pRpc, 0);
return;
} else if (pRpc->msgType == TDMT_MND_STATUS_RSP && pEpSet != NULL) {
dmSetMnodeEpSet(&pDnode->data, pEpSet);

View File

@ -26,19 +26,19 @@ int32_t mndProcessQueryMsg(SRpcMsg *pMsg) {
mTrace("msg:%p, in query queue is processing", pMsg);
switch (pMsg->msgType) {
case TDMT_VND_QUERY:
code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg);
code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_QUERY_CONTINUE:
code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg);
code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_FETCH:
code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg);
code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_DROP_TASK:
code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg);
code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_QUERY_HEARTBEAT:
code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg);
code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg, 0);
break;
default:
terrno = TSDB_CODE_VND_APP_ERROR;

View File

@ -48,6 +48,10 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
mError("trans:%d, failed to propose since %s", transId, tstrerror(pMgmt->errCode));
}
tsem_post(&pMgmt->syncSem);
} else {
if (cbMeta.index - sdbGetApplyIndex(pMnode->pSdb) > 100) {
sdbWriteFile(pMnode->pSdb);
}
}
}
@ -71,33 +75,28 @@ int32_t mndSnapshotRead(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, void
SMnode *pMnode = pFsm->data;
mInfo("start to read snapshot from sdb");
int32_t code = sdbReadSnapshot(pMnode->pSdb, (SSdbIter **)ppIter, ppBuf, len);
if (code != 0) {
mError("failed to read snapshot from sdb since %s", terrstr());
} else {
if (*ppIter == NULL) {
mInfo("successfully to read snapshot from sdb");
}
}
// sdbStartRead
// sdbDoRead
// sdbStopRead
return code;
return 0;
}
int32_t mndSnapshotApply(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, char *pBuf, int32_t len) {
SMnode *pMnode = pFsm->data;
mndSetRestore(pMnode, false);
mInfo("start to apply snapshot to sdb, len:%d", len);
int32_t code = sdbApplySnapshot(pMnode->pSdb, pBuf, len);
if (code != 0) {
mError("failed to apply snapshot to sdb, len:%d", len);
} else {
mInfo("successfully to apply snapshot to sdb, len:%d", len);
mndSetRestore(pMnode, true);
}
// sdbStartWrite
// sdbDoWrite
mndSetRestore(pMnode, false);
mInfo("start to apply snapshot to sdb");
// sdbStopWrite
mInfo("successfully to apply snapshot to sdb");
mndSetRestore(pMnode, true);
// taosMemoryFree(pBuf);
return code;
return 0;
}
void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) {

View File

@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME acctTest
COMMAND acctTest
)
if(NOT TD_WINDOWS)
add_test(
NAME acctTest
COMMAND acctTest
)
endif(NOT TD_WINDOWS)

View File

@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME funcTest
COMMAND funcTest
)
if(NOT TD_WINDOWS)
add_test(
NAME funcTest
COMMAND funcTest
)
endif(NOT TD_WINDOWS)

View File

@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME profileTest
COMMAND profileTest
)
if(NOT TD_WINDOWS)
add_test(
NAME profileTest
COMMAND profileTest
)
endif(NOT TD_WINDOWS)

View File

@ -492,7 +492,7 @@ TEST_F(MndTestSdb, 01_Write_Str) {
ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2);
ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1);
ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2 );
ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2);
sdbSetApplyIndex(pSdb, -1);
ASSERT_EQ(sdbGetApplyIndex(pSdb), -1);
ASSERT_EQ(mnode.insertTimes, 2);
@ -895,7 +895,35 @@ TEST_F(MndTestSdb, 01_Read_Str) {
ASSERT_EQ(code, TSDB_CODE_SDB_OBJ_CREATING);
}
{
SSdbIter *pReader = NULL;
SSdbIter *pWritter = NULL;
void *pBuf = NULL;
int32_t len = 0;
int32_t code = 0;
code = sdbStartRead(pSdb, &pReader);
ASSERT_EQ(code, 0);
code = sdbStartWrite(pSdb, &pWritter);
ASSERT_EQ(code, 0);
while (sdbDoRead(pSdb, pReader, &pBuf, &len) == 0) {
if (pBuf != NULL && len != 0) {
sdbDoWrite(pSdb, pWritter, pBuf, len);
taosMemoryFree(pBuf);
} else {
break;
}
}
sdbStopRead(pSdb, pReader);
sdbStopWrite(pSdb, pWritter, true);
}
ASSERT_EQ(sdbGetSize(pSdb, SDB_CONSUMER), 1);
ASSERT_EQ(sdbGetTableVer(pSdb, SDB_CONSUMER), 4);
sdbCleanup(pSdb);
ASSERT_EQ(mnode.insertTimes, 5);
ASSERT_EQ(mnode.deleteTimes, 5);
ASSERT_EQ(mnode.insertTimes, 9);
ASSERT_EQ(mnode.deleteTimes, 9);
}

View File

@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME showTest
COMMAND showTest
)
if(NOT TD_WINDOWS)
add_test(
NAME showTest
COMMAND showTest
)
endif(NOT TD_WINDOWS)

View File

@ -187,6 +187,7 @@ typedef struct SSdb {
typedef struct SSdbIter {
TdFilePtr file;
int64_t total;
char *name;
} SSdbIter;
typedef struct {
@ -380,8 +381,13 @@ SSdbRow *sdbAllocRow(int32_t objSize);
void *sdbGetRowObj(SSdbRow *pRow);
void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc);
int32_t sdbReadSnapshot(SSdb *pSdb, SSdbIter **ppIter, char **ppBuf, int32_t *len);
int32_t sdbApplySnapshot(SSdb *pSdb, char *pBuf, int32_t len);
int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter);
int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter);
int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len);
int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter);
int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply);
int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len);
const char *sdbTableName(ESdbType type);
void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper);

View File

@ -71,6 +71,7 @@ void sdbCleanup(SSdb *pSdb) {
}
if (pSdb->tmpDir != NULL) {
taosRemoveDir(pSdb->tmpDir);
taosMemoryFreeClear(pSdb->tmpDir);
}

View File

@ -445,168 +445,167 @@ int32_t sdbDeploy(SSdb *pSdb) {
return 0;
}
static SSdbIter *sdbOpenIter(SSdb *pSdb) {
char datafile[PATH_MAX] = {0};
char tmpfile[PATH_MAX] = {0};
snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
snprintf(tmpfile, sizeof(tmpfile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP);
taosThreadMutexLock(&pSdb->filelock);
if (taosCopyFile(datafile, tmpfile) != 0) {
taosThreadMutexUnlock(&pSdb->filelock);
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to copy file %s to %s since %s", datafile, tmpfile, terrstr());
return NULL;
}
taosThreadMutexUnlock(&pSdb->filelock);
static SSdbIter *sdbCreateIter(SSdb *pSdb) {
SSdbIter *pIter = taosMemoryCalloc(1, sizeof(SSdbIter));
if (pIter == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
pIter->file = taosOpenFile(tmpfile, TD_FILE_READ);
if (pIter->file == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to read file:%s since %s", tmpfile, terrstr());
char name[PATH_MAX + 100] = {0};
snprintf(name, sizeof(name), "%s%ssdb.data.%" PRIu64, pSdb->tmpDir, TD_DIRSEP, (uint64_t)pIter);
pIter->name = strdup(name);
if (pIter->name == NULL) {
taosMemoryFree(pIter);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
return pIter;
}
static void sdbCloseIter(SSdb *pSdb, SSdbIter *pIter) {
static void sdbCloseIter(SSdbIter *pIter) {
if (pIter == NULL) return;
if (pIter->file != NULL) {
taosCloseFile(&pIter->file);
pIter->file = NULL;
}
char tmpfile[PATH_MAX] = {0};
snprintf(tmpfile, sizeof(tmpfile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP);
taosRemoveFile(tmpfile);
if (pIter->name != NULL) {
taosRemoveFile(pIter->name);
taosMemoryFree(pIter->name);
pIter->name = NULL;
}
mInfo("sdbiter:%p, is closed, total:%" PRId64, pIter, pIter->total);
taosMemoryFree(pIter);
mInfo("sdbiter:%p, is closed", pIter);
}
static SSdbIter *sdbGetIter(SSdb *pSdb, SSdbIter **ppIter) {
SSdbIter *pIter = NULL;
if (ppIter != NULL) pIter = *ppIter;
if (pIter == NULL) {
pIter = sdbOpenIter(pSdb);
if (pIter != NULL) {
mInfo("sdbiter:%p, is created to read snapshot", pIter);
*ppIter = pIter;
} else {
mError("failed to create sdbiter to read snapshot since %s", terrstr());
*ppIter = NULL;
return NULL;
}
} else {
mInfo("sdbiter:%p, continue to read snapshot, total:%" PRId64, pIter, pIter->total);
}
return pIter;
}
int32_t sdbReadSnapshot(SSdb *pSdb, SSdbIter **ppIter, char **ppBuf, int32_t *len) {
SSdbIter *pIter = sdbGetIter(pSdb, ppIter);
int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) {
SSdbIter *pIter = sdbCreateIter(pSdb);
if (pIter == NULL) return -1;
char datafile[PATH_MAX] = {0};
snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
taosThreadMutexLock(&pSdb->filelock);
if (taosCopyFile(datafile, pIter->name) < 0) {
taosThreadMutexUnlock(&pSdb->filelock);
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to copy file %s to %s since %s", datafile, pIter->name, terrstr());
sdbCloseIter(pIter);
return -1;
}
taosThreadMutexUnlock(&pSdb->filelock);
pIter->file = taosOpenFile(pIter->name, TD_FILE_READ);
if (pIter->file == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to open file:%s since %s", pIter->name, terrstr());
sdbCloseIter(pIter);
return -1;
}
*ppIter = pIter;
mInfo("sdbiter:%p, is created to read snapshot, file:%s", pIter, pIter->name);
return 0;
}
int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter) {
sdbCloseIter(pIter);
return 0;
}
int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len) {
int32_t maxlen = 100;
char *pBuf = taosMemoryCalloc(1, maxlen);
void *pBuf = taosMemoryCalloc(1, maxlen);
if (pBuf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
sdbCloseIter(pSdb, pIter);
sdbCloseIter(pIter);
return -1;
}
int32_t readlen = taosReadFile(pIter->file, pBuf, maxlen);
if (readlen < 0 || (readlen == 0 && errno != 0)) {
if (readlen < 0 || readlen > maxlen) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("sdbiter:%p, failed to read snapshot since %s, total:%" PRId64, pIter, terrstr(), pIter->total);
*ppBuf = NULL;
*len = 0;
*ppIter = NULL;
sdbCloseIter(pSdb, pIter);
taosMemoryFree(pBuf);
return -1;
} else if (readlen == 0) {
mInfo("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total);
*ppBuf = NULL;
*len = 0;
*ppIter = NULL;
sdbCloseIter(pSdb, pIter);
taosMemoryFree(pBuf);
return 0;
} else if ((readlen < maxlen && errno != 0) || readlen == maxlen) {
} else { // (readlen <= maxlen)
pIter->total += readlen;
mInfo("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total);
*ppBuf = pBuf;
*len = readlen;
return 0;
} else if (readlen < maxlen && errno == 0) {
mInfo("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total);
*ppBuf = pBuf;
*len = readlen;
*ppIter = NULL;
sdbCloseIter(pSdb, pIter);
return 0;
} else {
// impossible
mError("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total);
*ppBuf = NULL;
*len = 0;
*ppIter = NULL;
sdbCloseIter(pSdb, pIter);
taosMemoryFree(pBuf);
return -1;
}
}
int32_t sdbApplySnapshot(SSdb *pSdb, char *pBuf, int32_t len) {
char datafile[PATH_MAX] = {0};
char tmpfile[PATH_MAX] = {0};
snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
snprintf(tmpfile, sizeof(tmpfile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP);
int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter) {
SSdbIter *pIter = sdbCreateIter(pSdb);
if (pIter == NULL) return -1;
TdFilePtr pFile = taosOpenFile(tmpfile, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (pFile == NULL) {
pIter->file = taosOpenFile(pIter->name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (pIter->file == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to write %s since %s", tmpfile, terrstr());
mError("failed to open %s since %s", pIter->name, terrstr());
return -1;
}
int32_t writelen = taosWriteFile(pFile, pBuf, len);
*ppIter = pIter;
mInfo("sdbiter:%p, is created to write snapshot, file:%s", pIter, pIter->name);
return 0;
}
int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply) {
int32_t code = 0;
if (!isApply) {
sdbCloseIter(pIter);
mInfo("sdbiter:%p, not apply to sdb", pIter);
return 0;
}
taosFsyncFile(pIter->file);
taosCloseFile(&pIter->file);
pIter->file = NULL;
char datafile[PATH_MAX] = {0};
snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
if (taosRenameFile(pIter->name, datafile) != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("sdbiter:%p, failed to rename file %s to %s since %s", pIter, pIter->name, datafile, terrstr());
sdbCloseIter(pIter);
return -1;
}
sdbCloseIter(pIter);
if (sdbReadFile(pSdb) != 0) {
mError("sdbiter:%p, failed to read from %s since %s", pIter, datafile, terrstr());
return -1;
}
mInfo("sdbiter:%p, successfully applyed to sdb", pIter);
return 0;
}
int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len) {
int32_t writelen = taosWriteFile(pIter->file, pBuf, len);
if (writelen != len) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to write %s since %s", tmpfile, terrstr());
taosCloseFile(&pFile);
return -1;
}
if (taosFsyncFile(pFile) != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to fsync %s since %s", tmpfile, terrstr());
taosCloseFile(&pFile);
return -1;
}
(void)taosCloseFile(&pFile);
if (taosRenameFile(tmpfile, datafile) != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to rename file %s to %s since %s", tmpfile, datafile, terrstr());
return -1;
}
if (sdbReadFile(pSdb) != 0) {
mError("failed to read from %s since %s", datafile, terrstr());
mError("failed to write len:%d since %s, total:%" PRId64, len, terrstr(), pIter->total);
return -1;
}
pIter->total += writelen;
mInfo("sdbiter:%p, write:%d bytes to snapshot, total:%" PRId64, pIter, writelen, pIter->total);
return 0;
}

View File

@ -40,37 +40,46 @@ void qndClose(SQnode *pQnode) {
taosMemoryFree(pQnode);
}
int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { return 0; }
int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) {
SMsgCb* pCb = &pQnode->msgCb;
int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg) {
pLoad->numOfQueryInQueue = pCb->qsizeFp(pCb->mgmt, pQnode->qndId, QUERY_QUEUE);
pLoad->numOfFetchInQueue = pCb->qsizeFp(pCb->mgmt, pQnode->qndId, FETCH_QUEUE);
pLoad->waitTimeInQueryQUeue = qWorkerGetWaitTimeInQueue(pQnode->pQuery, QUERY_QUEUE);
pLoad->waitTimeInFetchQUeue = qWorkerGetWaitTimeInQueue(pQnode->pQuery, FETCH_QUEUE);
return 0;
}
int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg) {
int32_t code = -1;
SReadHandle handle = {.pMsgCb = &pQnode->msgCb};
qTrace("message in qnode queue is processing");
switch (pMsg->msgType) {
case TDMT_VND_QUERY:
code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg);
code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_QUERY_CONTINUE:
code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg);
code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_FETCH:
code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg);
code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_FETCH_RSP:
code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg);
code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_CANCEL_TASK:
code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg);
code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_DROP_TASK:
code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg);
code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_CONSUME:
// code = tqProcessConsumeReq(pQnode->pTq, pMsg);
// break;
case TDMT_VND_QUERY_HEARTBEAT:
code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg);
code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
default:
qError("unknown msg type:%d in qnode queue", pMsg->msgType);

View File

@ -191,9 +191,9 @@ int vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
SReadHandle handle = {.meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb};
switch (pMsg->msgType) {
case TDMT_VND_QUERY:
return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg);
return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
case TDMT_VND_QUERY_CONTINUE:
return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg);
return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
default:
vError("unknown msg type:%d in query queue", pMsg->msgType);
return TSDB_CODE_VND_APP_ERROR;
@ -206,13 +206,16 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
switch (pMsg->msgType) {
case TDMT_VND_FETCH:
return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg);
return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_FETCH_RSP:
return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg);
return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_CANCEL_TASK:
return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg);
return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_DROP_TASK:
return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg);
return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_QUERY_HEARTBEAT:
return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_TABLE_META:
return vnodeGetTableMeta(pVnode, pMsg);
case TDMT_VND_CONSUME:
@ -231,9 +234,6 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg);
case TDMT_VND_TASK_RECOVER_RSP:
return tqProcessTaskRecoverRsp(pVnode->pTq, pMsg);
case TDMT_VND_QUERY_HEARTBEAT:
return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg);
default:
vError("unknown msg type:%d in fetch queue", pMsg->msgType);
return TSDB_CODE_VND_APP_ERROR;

View File

@ -71,6 +71,16 @@ void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) {
qDebug("empty db vgroup");
}
if (pResult->pDbInfo && taosArrayGetSize(pResult->pDbInfo) > 0) {
num = taosArrayGetSize(pResult->pDbInfo);
for (int32_t i = 0; i < num; ++i) {
SDbInfo *pDb = taosArrayGet(pResult->pDbInfo, i);
qDebug("db %d dbInfo: vgVer:%d, tbNum:%d, dbId:%" PRIx64, i, pDb->vgVer, pDb->tbNum, pDb->dbId);
}
} else {
qDebug("empty db info");
}
if (pResult->pTableHash && taosArrayGetSize(pResult->pTableHash) > 0) {
num = taosArrayGetSize(pResult->pTableHash);
for (int32_t i = 0; i < num; ++i) {
@ -127,6 +137,7 @@ int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps
SCatalogReq req = {0};
req.pTableMeta = taosArrayInit(2, sizeof(SName));
req.pDbVgroup = taosArrayInit(2, TSDB_DB_FNAME_LEN);
req.pDbInfo = taosArrayInit(2, TSDB_DB_FNAME_LEN);
req.pTableHash = taosArrayInit(2, sizeof(SName));
req.pUdf = taosArrayInit(2, TSDB_FUNC_NAME_LEN);
req.pDbCfg = taosArrayInit(2, TSDB_DB_FNAME_LEN);
@ -149,9 +160,11 @@ int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps
strcpy(dbFName, "1.db1");
taosArrayPush(req.pDbVgroup, dbFName);
taosArrayPush(req.pDbCfg, dbFName);
taosArrayPush(req.pDbInfo, dbFName);
strcpy(dbFName, "1.db2");
taosArrayPush(req.pDbVgroup, dbFName);
taosArrayPush(req.pDbCfg, dbFName);
taosArrayPush(req.pDbInfo, dbFName);
strcpy(funcName, "udf1");
taosArrayPush(req.pUdf, funcName);

View File

@ -41,7 +41,6 @@
namespace {
extern "C" int32_t ctgdGetClusterCacheNum(struct SCatalog* pCatalog, int32_t type);
extern "C" int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *action);
extern "C" int32_t ctgdEnableDebug(char *option);
extern "C" int32_t ctgdGetStatNum(char *option, void *res);

View File

@ -560,8 +560,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
}
EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
if (pAggNode->pAggFuncs) {
EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
}
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pAggNode->node.pOutputDataBlockDesc->totalRowSize);
if (pAggNode->pGroupKeys) {
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);

View File

@ -2601,6 +2601,7 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI
pStart += sizeof(int32_t) * numOfRows;
if (colLen[i] > 0) {
taosMemoryFreeClear(pColInfoData->pData);
pColInfoData->pData = taosMemoryMalloc(colLen[i]);
}
} else {
@ -2758,6 +2759,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
pExchangeInfo->loadInfo.totalRows);
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
completed += 1;
taosMemoryFreeClear(pDataInfo->pRsp);
continue;
}
@ -2765,6 +2767,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
code = setSDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data,
pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL);
if (code != 0) {
taosMemoryFreeClear(pDataInfo->pRsp);
goto _error;
}
@ -2785,10 +2788,12 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
pDataInfo->status = EX_SOURCE_DATA_NOT_READY;
code = doSendFetchDataRequest(pExchangeInfo, pTaskInfo, i);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pDataInfo->pRsp);
goto _error;
}
}
taosMemoryFreeClear(pDataInfo->pRsp);
return pExchangeInfo->pResult;
}
@ -2890,7 +2895,8 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
pDataInfo->totalRows, pLoadInfo->totalRows);
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
pExchangeInfo->current += 1;
pExchangeInfo->current += 1;
taosMemoryFreeClear(pDataInfo->pRsp);
continue;
}
@ -2916,6 +2922,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
}
pOperator->resultInfo.totalRows += pRes->info.rows;
taosMemoryFreeClear(pDataInfo->pRsp);
return pExchangeInfo->pResult;
}
}
@ -4510,7 +4517,6 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
} else {
qDebug("%s pDataReader is not NULL", GET_TASKID(pTaskInfo));
}
SArray* tableIdList = extractTableIdList(pTableListInfo);
SOperatorInfo* pOperator = createStreamScanOperatorInfo(pDataReader, pHandle,
tableIdList, pTableScanNode, pTaskInfo, &twSup, pTableScanNode->tsColId);
@ -4702,9 +4708,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
}
int32_t compareTimeWindow(const void* p1, const void* p2, const void* param) {
const SQueryTableDataCond *pCond = param;
const STimeWindow *pWin1 = p1;
const STimeWindow *pWin2 = p2;
const SQueryTableDataCond* pCond = param;
const STimeWindow* pWin1 = p1;
const STimeWindow* pWin2 = p2;
if (pCond->order == TSDB_ORDER_ASC) {
return pWin1->skey - pWin2->skey;
} else if (pCond->order == TSDB_ORDER_DESC) {
@ -4724,8 +4730,8 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi
return terrno;
}
//pCond->twindow = pTableScanNode->scanRange;
//TODO: get it from stable scan node
// pCond->twindow = pTableScanNode->scanRange;
// TODO: get it from stable scan node
pCond->numOfTWindows = 1;
pCond->twindows = taosMemoryCalloc(pCond->numOfTWindows, sizeof(STimeWindow));
pCond->twindows[0] = pTableScanNode->scanRange;
@ -4746,11 +4752,7 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi
TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey);
}
}
taosqsort(pCond->twindows,
pCond->numOfTWindows,
sizeof(STimeWindow),
pCond,
compareTimeWindow);
taosqsort(pCond->twindows, pCond->numOfTWindows, sizeof(STimeWindow), pCond, compareTimeWindow);
pCond->type = BLOCK_LOAD_OFFSET_SEQ_ORDER;
// pCond->type = pTableScanNode->scanFlag;
@ -4910,7 +4912,8 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod
return pList;
}
int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, SNode* pTagCond) {
int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo,
SNode* pTagCond) {
int32_t code = TSDB_CODE_SUCCESS;
pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo));
@ -4921,12 +4924,12 @@ int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STa
SArray* res = taosArrayInit(8, sizeof(uint64_t));
code = doFilterTag(pTagCond, &metaArg, res);
if (code != TSDB_CODE_SUCCESS) {
qError("doFilterTag error:%d", code);
qError("failed to get tableIds, reason: %s, suid: %" PRIu64 "", tstrerror(code), tableUid);
taosArrayDestroy(res);
terrno = code;
return code;
} else {
qDebug("doFilterTag error:%d, suid: %" PRIu64 "", code, tableUid);
qDebug("sucess to get tableIds, size: %d, suid: %" PRIu64 "", (int)taosArrayGetSize(res), tableUid);
}
for (int i = 0; i < taosArrayGetSize(res); i++) {
STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i)};
@ -5197,5 +5200,5 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey) {
if (bufSize <= pageSize) {
bufSize = pageSize * 4;
}
return createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, "/tmp/");
return createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, TD_TMP_DIR_PATH);
}

View File

@ -14,7 +14,7 @@ target_include_directories(
target_link_libraries(
function
PRIVATE os util common nodes scalar catalog qcom transport
PRIVATE os util common nodes scalar qcom transport
PUBLIC uv_a
)

View File

@ -44,9 +44,7 @@ extern "C" {
#define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0)
#define FUNC_UDF_ID_START 5000
#define FUNC_AGGREGATE_UDF_ID 5001
#define FUNC_SCALAR_UDF_ID 5002
#define FUNC_UDF_ID_START 5000
extern const int funcMgtUdfNum;

View File

@ -746,7 +746,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
}
uint8_t resType;
if (IS_SIGNED_NUMERIC_TYPE(colType)) {
if (IS_SIGNED_NUMERIC_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) {
resType = TSDB_DATA_TYPE_BIGINT;
} else {
resType = TSDB_DATA_TYPE_DOUBLE;

View File

@ -16,7 +16,6 @@
#include "functionMgt.h"
#include "builtins.h"
#include "catalog.h"
#include "functionMgtInt.h"
#include "taos.h"
#include "taoserror.h"
@ -65,35 +64,19 @@ static bool isSpecificClassifyFunc(int32_t funcId, uint64_t classification) {
return FUNC_MGT_TEST_MASK(funcMgtBuiltins[funcId].classification, classification);
}
static int32_t getUdfInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc) {
SFuncInfo funcInfo = {0};
int32_t code = catalogGetUdfInfo(pParam->pCtg, pParam->pRpc, pParam->pMgmtEps, pFunc->functionName, &funcInfo);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
pFunc->funcType = FUNCTION_TYPE_UDF;
pFunc->funcId = TSDB_FUNC_TYPE_AGGREGATE == funcInfo.funcType ? FUNC_AGGREGATE_UDF_ID : FUNC_SCALAR_UDF_ID;
pFunc->node.resType.type = funcInfo.outputType;
pFunc->node.resType.bytes = funcInfo.outputLen;
pFunc->udfBufSize = funcInfo.bufSize;
tFreeSFuncInfo(&funcInfo);
return TSDB_CODE_SUCCESS;
}
int32_t fmFuncMgtInit() {
taosThreadOnce(&functionHashTableInit, doInitFunctionTable);
return initFunctionCode;
}
int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc) {
int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen) {
void* pVal = taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc->functionName, strlen(pFunc->functionName));
if (NULL != pVal) {
pFunc->funcId = *(int32_t*)pVal;
pFunc->funcType = funcMgtBuiltins[pFunc->funcId].type;
return funcMgtBuiltins[pFunc->funcId].translateFunc(pFunc, pParam->pErrBuf, pParam->errBufLen);
return funcMgtBuiltins[pFunc->funcId].translateFunc(pFunc, pMsg, msgLen);
}
return getUdfInfo(pParam, pFunc);
return TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION;
}
bool fmIsBuiltinFunc(const char* pFunc) {

View File

@ -92,10 +92,12 @@ target_link_libraries (idxJsonUT
index
)
add_test(
NAME idxtest
COMMAND idxTest
)
if(NOT TD_WINDOWS)
add_test(
NAME idxtest
COMMAND idxTest
)
endif(NOT TD_WINDOWS)
add_test(
NAME idxJsonUT
COMMAND idxJsonUT

View File

@ -272,7 +272,7 @@ void validateFst() {
}
delete m;
}
static std::string logDir = "/tmp/log";
static std::string logDir = TD_TMP_DIR_PATH "log";
static void initLog() {
const char* defaultLogFileNamePrefix = "taoslog";
@ -916,7 +916,7 @@ TEST_F(IndexEnv2, testIndexOpen) {
}
}
TEST_F(IndexEnv2, testEmptyIndexOpen) {
std::string path = "/tmp/test";
std::string path = TD_TMP_DIR_PATH "test";
if (index->Init(path) != 0) {
std::cout << "failed to init index" << std::endl;
exit(1);

View File

@ -2555,7 +2555,7 @@ static const char* jkSessionWindowTsPrimaryKey = "TsPrimaryKey";
static const char* jkSessionWindowGap = "Gap";
static int32_t sessionWindowNodeToJson(const void* pObj, SJson* pJson) {
const SSessionWindowNode * pNode = (const SSessionWindowNode*)pObj;
const SSessionWindowNode* pNode = (const SSessionWindowNode*)pObj;
int32_t code = tjsonAddObject(pJson, jkSessionWindowTsPrimaryKey, nodeToJson, pNode->pCol);
if (TSDB_CODE_SUCCESS == code) {
@ -2567,9 +2567,9 @@ static int32_t sessionWindowNodeToJson(const void* pObj, SJson* pJson) {
static int32_t jsonToSessionWindowNode(const SJson* pJson, void* pObj) {
SSessionWindowNode* pNode = (SSessionWindowNode*)pObj;
int32_t code = jsonToNodeObject(pJson, jkSessionWindowTsPrimaryKey, (SNode **)&pNode->pCol);
int32_t code = jsonToNodeObject(pJson, jkSessionWindowTsPrimaryKey, (SNode**)&pNode->pCol);
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkSessionWindowGap, (SNode **)&pNode->pGap);
code = jsonToNodeObject(pJson, jkSessionWindowGap, (SNode**)&pNode->pGap);
}
return code;
}
@ -2796,6 +2796,150 @@ static int32_t jsonToDownstreamSourceNode(const SJson* pJson, void* pObj) {
return code;
}
static const char* jkDatabaseOptionsBuffer = "Buffer";
static const char* jkDatabaseOptionsCachelast = "Cachelast";
static const char* jkDatabaseOptionsCompressionLevel = "CompressionLevel";
static const char* jkDatabaseOptionsDaysPerFileNode = "DaysPerFileNode";
static const char* jkDatabaseOptionsDaysPerFile = "DaysPerFile";
static const char* jkDatabaseOptionsFsyncPeriod = "FsyncPeriod";
static const char* jkDatabaseOptionsMaxRowsPerBlock = "MaxRowsPerBlock";
static const char* jkDatabaseOptionsMinRowsPerBlock = "MinRowsPerBlock";
static const char* jkDatabaseOptionsKeep = "Keep";
static const char* jkDatabaseOptionsPages = "Pages";
static const char* jkDatabaseOptionsPagesize = "Pagesize";
static const char* jkDatabaseOptionsPrecision = "Precision";
static const char* jkDatabaseOptionsReplica = "Replica";
static const char* jkDatabaseOptionsStrict = "Strict";
static const char* jkDatabaseOptionsWalLevel = "WalLevel";
static const char* jkDatabaseOptionsNumOfVgroups = "NumOfVgroups";
static const char* jkDatabaseOptionsSingleStable = "SingleStable";
static const char* jkDatabaseOptionsRetentions = "Retentions";
static const char* jkDatabaseOptionsSchemaless = "Schemaless";
static int32_t databaseOptionsToJson(const void* pObj, SJson* pJson) {
const SDatabaseOptions* pNode = (const SDatabaseOptions*)pObj;
int32_t code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsBuffer, pNode->buffer);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCachelast, pNode->cachelast);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCompressionLevel, pNode->compressionLevel);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkDatabaseOptionsDaysPerFileNode, nodeToJson, pNode->pDaysPerFile);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsDaysPerFile, pNode->daysPerFile);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsFsyncPeriod, pNode->fsyncPeriod);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsMaxRowsPerBlock, pNode->maxRowsPerBlock);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsMinRowsPerBlock, pNode->minRowsPerBlock);
}
if (TSDB_CODE_SUCCESS == code) {
code = nodeListToJson(pJson, jkDatabaseOptionsKeep, pNode->pKeep);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsPages, pNode->pages);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsPagesize, pNode->pagesize);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkDatabaseOptionsPrecision, pNode->precisionStr);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsReplica, pNode->replica);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsStrict, pNode->strict);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsWalLevel, pNode->walLevel);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsNumOfVgroups, pNode->numOfVgroups);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsSingleStable, pNode->singleStable);
}
if (TSDB_CODE_SUCCESS == code) {
code = nodeListToJson(pJson, jkDatabaseOptionsRetentions, pNode->pRetentions);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsSchemaless, pNode->schemaless);
}
return code;
}
static int32_t jsonToDatabaseOptions(const SJson* pJson, void* pObj) {
SDatabaseOptions* pNode = (SDatabaseOptions*)pObj;
int32_t code = tjsonGetIntValue(pJson, jkDatabaseOptionsBuffer, &pNode->buffer);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCachelast, &pNode->cachelast);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCompressionLevel, &pNode->compressionLevel);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkDatabaseOptionsDaysPerFileNode, (SNode**)&pNode->pDaysPerFile);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkDatabaseOptionsDaysPerFile, &pNode->daysPerFile);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkDatabaseOptionsFsyncPeriod, &pNode->fsyncPeriod);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkDatabaseOptionsMaxRowsPerBlock, &pNode->maxRowsPerBlock);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkDatabaseOptionsMinRowsPerBlock, &pNode->minRowsPerBlock);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkDatabaseOptionsKeep, &pNode->pKeep);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkDatabaseOptionsPages, &pNode->pages);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkDatabaseOptionsPagesize, &pNode->pagesize);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkDatabaseOptionsPrecision, pNode->precisionStr);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsReplica, &pNode->replica);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsStrict, &pNode->strict);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsWalLevel, &pNode->walLevel);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkDatabaseOptionsNumOfVgroups, &pNode->numOfVgroups);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsSingleStable, &pNode->singleStable);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkDatabaseOptionsRetentions, &pNode->pRetentions);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsSchemaless, &pNode->schemaless);
}
return code;
}
static const char* jkDataBlockDescDataBlockId = "DataBlockId";
static const char* jkDataBlockDescSlots = "Slots";
static const char* jkDataBlockTotalRowSize = "TotalRowSize";
@ -2998,6 +3142,130 @@ static int32_t jsonToSelectStmt(const SJson* pJson, void* pObj) {
return code;
}
static const char* jkAlterDatabaseStmtDbName = "DbName";
static const char* jkAlterDatabaseStmtOptions = "Options";
static int32_t alterDatabaseStmtToJson(const void* pObj, SJson* pJson) {
const SAlterDatabaseStmt* pNode = (const SAlterDatabaseStmt*)pObj;
int32_t code = tjsonAddStringToObject(pJson, jkAlterDatabaseStmtDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkAlterDatabaseStmtOptions, nodeToJson, pNode->pOptions);
}
return code;
}
static int32_t jsonToAlterDatabaseStmt(const SJson* pJson, void* pObj) {
SAlterDatabaseStmt* pNode = (SAlterDatabaseStmt*)pObj;
int32_t code = tjsonGetStringValue(pJson, jkAlterDatabaseStmtDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkAlterDatabaseStmtOptions, (SNode**)&pNode->pOptions);
}
return code;
}
static const char* jkAlterTableStmtDbName = "DbName";
static const char* jkAlterTableStmtTableName = "TableName";
static const char* jkAlterTableStmtAlterType = "AlterType";
static const char* jkAlterTableStmtColName = "ColName";
static const char* jkAlterTableStmtNewColName = "NewColName";
static const char* jkAlterTableStmtOptions = "Options";
static const char* jkAlterTableStmtNewDataType = "NewDataType";
static const char* jkAlterTableStmtNewTagVal = "NewTagVal";
static int32_t alterTableStmtToJson(const void* pObj, SJson* pJson) {
const SAlterTableStmt* pNode = (const SAlterTableStmt*)pObj;
int32_t code = tjsonAddStringToObject(pJson, jkAlterTableStmtDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkAlterTableStmtTableName, pNode->tableName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkAlterTableStmtAlterType, pNode->alterType);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkAlterTableStmtColName, pNode->colName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkAlterTableStmtNewColName, pNode->newColName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkAlterTableStmtOptions, nodeToJson, pNode->pOptions);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkAlterTableStmtNewDataType, dataTypeToJson, &pNode->dataType);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkAlterTableStmtOptions, nodeToJson, pNode->pVal);
}
return code;
}
static int32_t jsonToAlterTableStmt(const SJson* pJson, void* pObj) {
SAlterTableStmt* pNode = (SAlterTableStmt*)pObj;
int32_t code = tjsonGetStringValue(pJson, jkAlterTableStmtDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkAlterTableStmtTableName, pNode->tableName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetTinyIntValue(pJson, jkAlterTableStmtAlterType, &pNode->alterType);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkAlterTableStmtColName, pNode->colName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkAlterTableStmtNewColName, pNode->newColName);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkAlterTableStmtOptions, (SNode**)&pNode->pOptions);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonToObject(pJson, jkAlterTableStmtNewDataType, jsonToDataType, &pNode->dataType);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkAlterTableStmtOptions, (SNode**)&pNode->pVal);
}
return code;
}
static const char* jkAlterDnodeStmtDnodeId = "DnodeId";
static const char* jkAlterDnodeStmtConfig = "Config";
static const char* jkAlterDnodeStmtValue = "Value";
static int32_t alterDnodeStmtToJson(const void* pObj, SJson* pJson) {
const SAlterDnodeStmt* pNode = (const SAlterDnodeStmt*)pObj;
int32_t code = tjsonAddIntegerToObject(pJson, jkAlterDnodeStmtDnodeId, pNode->dnodeId);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkAlterDnodeStmtConfig, pNode->config);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkAlterDnodeStmtValue, pNode->value);
}
return code;
}
static int32_t jsonToAlterDnodeStmt(const SJson* pJson, void* pObj) {
SAlterDnodeStmt* pNode = (SAlterDnodeStmt*)pObj;
int32_t code = tjsonGetIntValue(pJson, jkAlterDnodeStmtDnodeId, &pNode->dnodeId);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkAlterDnodeStmtConfig, pNode->config);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkAlterDnodeStmtValue, pNode->value);
}
return code;
}
static const char* jkCreateTopicStmtTopicName = "TopicName";
static const char* jkCreateTopicStmtSubscribeDbName = "SubscribeDbName";
static const char* jkCreateTopicStmtIgnoreExists = "IgnoreExists";
@ -3082,6 +3350,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
break;
case QUERY_NODE_DOWNSTREAM_SOURCE:
return downstreamSourceNodeToJson(pObj, pJson);
case QUERY_NODE_DATABASE_OPTIONS:
return databaseOptionsToJson(pObj, pJson);
case QUERY_NODE_LEFT_VALUE:
return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to serialize.
case QUERY_NODE_SET_OPERATOR:
@ -3090,8 +3360,17 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return selectStmtToJson(pObj, pJson);
case QUERY_NODE_VNODE_MODIF_STMT:
case QUERY_NODE_CREATE_DATABASE_STMT:
break;
case QUERY_NODE_ALTER_DATABASE_STMT:
return alterDatabaseStmtToJson(pObj, pJson);
case QUERY_NODE_CREATE_TABLE_STMT:
break;
case QUERY_NODE_ALTER_TABLE_STMT:
return alterTableStmtToJson(pObj, pJson);
case QUERY_NODE_USE_DATABASE_STMT:
break;
case QUERY_NODE_ALTER_DNODE_STMT:
return alterDnodeStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_DATABASES_STMT:
case QUERY_NODE_SHOW_TABLES_STMT:
break;
@ -3198,12 +3477,20 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToSlotDescNode(pJson, pObj);
case QUERY_NODE_DOWNSTREAM_SOURCE:
return jsonToDownstreamSourceNode(pJson, pObj);
case QUERY_NODE_DATABASE_OPTIONS:
return jsonToDatabaseOptions(pJson, pObj);
case QUERY_NODE_LEFT_VALUE:
return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to deserialize.
case QUERY_NODE_SET_OPERATOR:
return jsonToSetOperator(pJson, pObj);
case QUERY_NODE_SELECT_STMT:
return jsonToSelectStmt(pJson, pObj);
case QUERY_NODE_ALTER_DATABASE_STMT:
return jsonToAlterDatabaseStmt(pJson, pObj);
case QUERY_NODE_ALTER_TABLE_STMT:
return jsonToAlterTableStmt(pJson, pObj);
case QUERY_NODE_ALTER_DNODE_STMT:
return jsonToAlterDnodeStmt(pJson, pObj);
case QUERY_NODE_CREATE_TOPIC_STMT:
return jsonToCreateTopicStmt(pJson, pObj);
case QUERY_NODE_LOGIC_PLAN_SCAN:

View File

@ -27,14 +27,13 @@ extern "C" {
#include "querynodes.h"
typedef struct SAstCreateContext {
SParseContext* pQueryCxt;
SMsgBuf msgBuf;
bool notSupport;
SNode* pRootNode;
int16_t placeholderNo;
SArray* pPlaceholderValues;
int32_t errCode;
SParseMetaCache* pMetaCache;
SParseContext* pQueryCxt;
SMsgBuf msgBuf;
bool notSupport;
SNode* pRootNode;
int16_t placeholderNo;
SArray* pPlaceholderValues;
int32_t errCode;
} SAstCreateContext;
typedef enum EDatabaseOptionType {
@ -75,7 +74,7 @@ typedef struct SAlterOption {
extern SToken nil_token;
int32_t initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt);
void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt);
SNode* createRawExprNode(SAstCreateContext* pCxt, const SToken* pToken, SNode* pNode);
SNode* createRawExprNodeExt(SAstCreateContext* pCxt, const SToken* pStart, const SToken* pEnd, SNode* pNode);

View File

@ -26,6 +26,7 @@ extern "C" {
int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery);
int32_t parse(SParseContext* pParseCxt, SQuery** pQuery);
int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery);
int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery);
int32_t translate(SParseContext* pParseCxt, SQuery* pQuery);
int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema);

View File

@ -24,12 +24,12 @@ extern "C" {
#include "os.h"
#include "query.h"
#define parserFatal(param, ...) qFatal("PARSER: " param, __VA_ARGS__)
#define parserError(param, ...) qError("PARSER: " param, __VA_ARGS__)
#define parserWarn(param, ...) qWarn("PARSER: " param, __VA_ARGS__)
#define parserInfo(param, ...) qInfo("PARSER: " param, __VA_ARGS__)
#define parserDebug(param, ...) qDebug("PARSER: " param, __VA_ARGS__)
#define parserTrace(param, ...) qTrace("PARSER: " param, __VA_ARGS__)
#define parserFatal(param, ...) qFatal("PARSER: " param, ##__VA_ARGS__)
#define parserError(param, ...) qError("PARSER: " param, ##__VA_ARGS__)
#define parserWarn(param, ...) qWarn("PARSER: " param, ##__VA_ARGS__)
#define parserInfo(param, ...) qInfo("PARSER: " param, ##__VA_ARGS__)
#define parserDebug(param, ...) qDebug("PARSER: " param, ##__VA_ARGS__)
#define parserTrace(param, ...) qTrace("PARSER: " param, ##__VA_ARGS__)
#define PK_TS_COL_INTERNAL_NAME "_rowts"
@ -42,7 +42,10 @@ typedef struct SParseMetaCache {
SHashObj* pTableMeta; // key is tbFName, element is STableMeta*
SHashObj* pDbVgroup; // key is dbFName, element is SArray<SVgroupInfo>*
SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo*
SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo
SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo*
SHashObj* pDbInfo; // key is tbFName, element is SDbInfo*
SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass
SHashObj* pUdf; // key is funcName, element is SFuncInfo*
} SParseMetaCache;
int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...);
@ -62,12 +65,22 @@ int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen);
int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq);
int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache);
int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache);
int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache);
int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type,
SParseMetaCache* pMetaCache);
int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache);
int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta);
int32_t getDBVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo);
int32_t getTableHashVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup);
int32_t getDBVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo);
int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup);
int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
int32_t* pTableNum);
int32_t getDBCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo);
int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo);
int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDb, AUTH_TYPE type,
bool* pPass);
int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo);
#ifdef __cplusplus
}

View File

@ -38,7 +38,7 @@
SToken nil_token = {.type = TK_NK_NIL, .n = 0, .z = NULL};
int32_t initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) {
void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) {
memset(pCxt, 0, sizeof(SAstCreateContext));
pCxt->pQueryCxt = pParseCxt;
pCxt->msgBuf.buf = pParseCxt->pMsg;
@ -48,13 +48,6 @@ int32_t initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt)
pCxt->placeholderNo = 0;
pCxt->pPlaceholderValues = NULL;
pCxt->errCode = TSDB_CODE_SUCCESS;
if (pParseCxt->async) {
pCxt->pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache));
if (NULL == pCxt->pMetaCache) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
return TSDB_CODE_SUCCESS;
}
static void copyStringFormStringToken(SToken* pToken, char* pBuf, int32_t len) {
@ -472,13 +465,6 @@ SNode* createRealTableNode(SAstCreateContext* pCxt, SToken* pDbName, SToken* pTa
strncpy(realTable->table.tableAlias, pTableName->z, pTableName->n);
}
strncpy(realTable->table.tableName, pTableName->z, pTableName->n);
if (NULL != pCxt->pMetaCache) {
if (TSDB_CODE_SUCCESS != reserveTableMetaInCache(pCxt->pQueryCxt->acctId, realTable->table.dbName,
realTable->table.tableName, pCxt->pMetaCache)) {
nodesDestroyNode(realTable);
CHECK_OUT_OF_MEM(NULL);
}
}
return (SNode*)realTable;
}

View File

@ -13,11 +13,12 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "functionMgt.h"
#include "os.h"
#include "parInt.h"
#include "parAst.h"
#include "parInt.h"
#include "parToken.h"
#include "systable.h"
typedef void* (*FMalloc)(size_t);
typedef void (*FFree)(void*);
@ -82,8 +83,386 @@ abort_parse:
(*pQuery)->pRoot = cxt.pRootNode;
(*pQuery)->placeholderNum = cxt.placeholderNo;
TSWAP((*pQuery)->pPlaceholderValues, cxt.pPlaceholderValues);
TSWAP((*pQuery)->pMetaCache, cxt.pMetaCache);
}
taosArrayDestroy(cxt.pPlaceholderValues);
return cxt.errCode;
}
typedef struct SCollectMetaKeyCxt {
SParseContext* pParseCxt;
SParseMetaCache* pMetaCache;
} SCollectMetaKeyCxt;
static void destroyCollectMetaKeyCxt(SCollectMetaKeyCxt* pCxt) {
if (NULL != pCxt->pMetaCache) {
// TODO
}
}
typedef struct SCollectMetaKeyFromExprCxt {
SCollectMetaKeyCxt* pComCxt;
int32_t errCode;
} SCollectMetaKeyFromExprCxt;
static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt);
static EDealRes collectMetaKeyFromFunction(SCollectMetaKeyFromExprCxt* pCxt, SFunctionNode* pFunc) {
if (fmIsBuiltinFunc(pFunc->functionName)) {
return TSDB_CODE_SUCCESS;
}
return reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache);
}
static EDealRes collectMetaKeyFromRealTable(SCollectMetaKeyFromExprCxt* pCxt, SRealTableNode* pRealTable) {
pCxt->errCode = reserveTableMetaInCache(pCxt->pComCxt->pParseCxt->acctId, pRealTable->table.dbName,
pRealTable->table.tableName, pCxt->pComCxt->pMetaCache);
if (TSDB_CODE_SUCCESS == pCxt->errCode) {
pCxt->errCode = reserveTableVgroupInCache(pCxt->pComCxt->pParseCxt->acctId, pRealTable->table.dbName,
pRealTable->table.tableName, pCxt->pComCxt->pMetaCache);
}
if (TSDB_CODE_SUCCESS == pCxt->errCode) {
pCxt->errCode = reserveUserAuthInCache(pCxt->pComCxt->pParseCxt->acctId, pCxt->pComCxt->pParseCxt->pUser,
pRealTable->table.dbName, AUTH_TYPE_READ, pCxt->pComCxt->pMetaCache);
}
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
}
static EDealRes collectMetaKeyFromTempTable(SCollectMetaKeyFromExprCxt* pCxt, STempTableNode* pTempTable) {
pCxt->errCode = collectMetaKeyFromQuery(pCxt->pComCxt, pTempTable->pSubquery);
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
}
static EDealRes collectMetaKeyFromExprImpl(SNode* pNode, void* pContext) {
SCollectMetaKeyFromExprCxt* pCxt = pContext;
switch (nodeType(pNode)) {
case QUERY_NODE_FUNCTION:
return collectMetaKeyFromFunction(pCxt, (SFunctionNode*)pNode);
case QUERY_NODE_REAL_TABLE:
return collectMetaKeyFromRealTable(pCxt, (SRealTableNode*)pNode);
case QUERY_NODE_TEMP_TABLE:
return collectMetaKeyFromTempTable(pCxt, (STempTableNode*)pNode);
default:
break;
}
return DEAL_RES_CONTINUE;
}
static int32_t collectMetaKeyFromExprs(SCollectMetaKeyCxt* pCxt, SNodeList* pList) {
SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS};
nodesWalkExprs(pList, collectMetaKeyFromExprImpl, &cxt);
return cxt.errCode;
}
static int32_t collectMetaKeyFromSetOperator(SCollectMetaKeyCxt* pCxt, SSetOperator* pStmt) {
int32_t code = collectMetaKeyFromQuery(pCxt, pStmt->pLeft);
if (TSDB_CODE_SUCCESS == code) {
code = collectMetaKeyFromQuery(pCxt, pStmt->pRight);
}
if (TSDB_CODE_SUCCESS == code) {
code = collectMetaKeyFromExprs(pCxt, pStmt->pOrderByList);
}
return code;
}
static int32_t collectMetaKeyFromSelect(SCollectMetaKeyCxt* pCxt, SSelectStmt* pStmt) {
SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS};
nodesWalkSelectStmt(pStmt, SQL_CLAUSE_FROM, collectMetaKeyFromExprImpl, &cxt);
return cxt.errCode;
}
static int32_t collectMetaKeyFromCreateTable(SCollectMetaKeyCxt* pCxt, SCreateTableStmt* pStmt) {
if (NULL == pStmt->pTags) {
return reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache);
} else {
return reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache);
}
}
static int32_t collectMetaKeyFromCreateMultiTable(SCollectMetaKeyCxt* pCxt, SCreateMultiTableStmt* pStmt) {
int32_t code = TSDB_CODE_SUCCESS;
SNode* pNode = NULL;
FOREACH(pNode, pStmt->pSubTables) {
SCreateSubTableClause* pClause = (SCreateSubTableClause*)pNode;
code =
reserveTableMetaInCache(pCxt->pParseCxt->acctId, pClause->useDbName, pClause->useTableName, pCxt->pMetaCache);
if (TSDB_CODE_SUCCESS == code) {
code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pClause->dbName, pClause->tableName, pCxt->pMetaCache);
}
if (TSDB_CODE_SUCCESS != code) {
break;
}
}
return code;
}
static int32_t collectMetaKeyFromAlterTable(SCollectMetaKeyCxt* pCxt, SAlterTableStmt* pStmt) {
int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache);
if (TSDB_CODE_SUCCESS == code) {
code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache);
}
return code;
}
static int32_t collectMetaKeyFromUseDatabase(SCollectMetaKeyCxt* pCxt, SUseDatabaseStmt* pStmt) {
return reserveDbVgVersionInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromCreateIndex(SCollectMetaKeyCxt* pCxt, SCreateIndexStmt* pStmt) {
int32_t code = TSDB_CODE_SUCCESS;
if (INDEX_TYPE_SMA == pStmt->indexType) {
code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->tableName, pCxt->pMetaCache);
if (TSDB_CODE_SUCCESS == code) {
code =
reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->tableName, pCxt->pMetaCache);
}
}
return code;
}
static int32_t collectMetaKeyFromCreateTopic(SCollectMetaKeyCxt* pCxt, SCreateTopicStmt* pStmt) {
if (NULL != pStmt->pQuery) {
return collectMetaKeyFromQuery(pCxt, pStmt->pQuery);
}
return TSDB_CODE_SUCCESS;
}
static int32_t collectMetaKeyFromExplain(SCollectMetaKeyCxt* pCxt, SExplainStmt* pStmt) {
return collectMetaKeyFromQuery(pCxt, pStmt->pQuery);
}
static int32_t collectMetaKeyFromCreateStream(SCollectMetaKeyCxt* pCxt, SCreateStreamStmt* pStmt) {
return collectMetaKeyFromQuery(pCxt, pStmt->pQuery);
}
static int32_t collectMetaKeyFromShowDnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODES,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowMnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MNODES,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowModules(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MODULES,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowQnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_QNODES,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowSnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SNODES,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowBnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_BNODES,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowDatabases(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_DATABASES,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowFunctions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_FUNCTIONS,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowIndexes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_INDEXES,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowStables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_STABLES,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowStreams(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowTables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB,
TSDB_INS_TABLE_USER_TABLES, pCxt->pMetaCache);
if (TSDB_CODE_SUCCESS == code) {
if (NULL != pStmt->pDbName) {
code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
} else {
code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, pCxt->pMetaCache);
}
}
return code;
}
static int32_t collectMetaKeyFromShowUsers(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_USERS,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowLicence(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_LICENCES,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowVgroups(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VGROUPS,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowTopics(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TOPICS,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowTransactions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TRANS,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
switch (nodeType(pStmt)) {
case QUERY_NODE_SET_OPERATOR:
return collectMetaKeyFromSetOperator(pCxt, (SSetOperator*)pStmt);
case QUERY_NODE_SELECT_STMT:
return collectMetaKeyFromSelect(pCxt, (SSelectStmt*)pStmt);
case QUERY_NODE_VNODE_MODIF_STMT:
case QUERY_NODE_CREATE_DATABASE_STMT:
case QUERY_NODE_DROP_DATABASE_STMT:
case QUERY_NODE_ALTER_DATABASE_STMT:
break;
case QUERY_NODE_CREATE_TABLE_STMT:
return collectMetaKeyFromCreateTable(pCxt, (SCreateTableStmt*)pStmt);
case QUERY_NODE_CREATE_SUBTABLE_CLAUSE:
break;
case QUERY_NODE_CREATE_MULTI_TABLE_STMT:
return collectMetaKeyFromCreateMultiTable(pCxt, (SCreateMultiTableStmt*)pStmt);
case QUERY_NODE_DROP_TABLE_CLAUSE:
case QUERY_NODE_DROP_TABLE_STMT:
case QUERY_NODE_DROP_SUPER_TABLE_STMT:
break;
case QUERY_NODE_ALTER_TABLE_STMT:
return collectMetaKeyFromAlterTable(pCxt, (SAlterTableStmt*)pStmt);
case QUERY_NODE_CREATE_USER_STMT:
case QUERY_NODE_ALTER_USER_STMT:
case QUERY_NODE_DROP_USER_STMT:
break;
case QUERY_NODE_USE_DATABASE_STMT:
return collectMetaKeyFromUseDatabase(pCxt, (SUseDatabaseStmt*)pStmt);
case QUERY_NODE_CREATE_DNODE_STMT:
case QUERY_NODE_DROP_DNODE_STMT:
case QUERY_NODE_ALTER_DNODE_STMT:
break;
case QUERY_NODE_CREATE_INDEX_STMT:
return collectMetaKeyFromCreateIndex(pCxt, (SCreateIndexStmt*)pStmt);
case QUERY_NODE_DROP_INDEX_STMT:
case QUERY_NODE_CREATE_QNODE_STMT:
case QUERY_NODE_DROP_QNODE_STMT:
case QUERY_NODE_CREATE_BNODE_STMT:
case QUERY_NODE_DROP_BNODE_STMT:
case QUERY_NODE_CREATE_SNODE_STMT:
case QUERY_NODE_DROP_SNODE_STMT:
case QUERY_NODE_CREATE_MNODE_STMT:
case QUERY_NODE_DROP_MNODE_STMT:
break;
case QUERY_NODE_CREATE_TOPIC_STMT:
return collectMetaKeyFromCreateTopic(pCxt, (SCreateTopicStmt*)pStmt);
case QUERY_NODE_DROP_TOPIC_STMT:
case QUERY_NODE_DROP_CGROUP_STMT:
case QUERY_NODE_ALTER_LOCAL_STMT:
break;
case QUERY_NODE_EXPLAIN_STMT:
return collectMetaKeyFromExplain(pCxt, (SExplainStmt*)pStmt);
case QUERY_NODE_DESCRIBE_STMT:
case QUERY_NODE_RESET_QUERY_CACHE_STMT:
case QUERY_NODE_COMPACT_STMT:
case QUERY_NODE_CREATE_FUNCTION_STMT:
case QUERY_NODE_DROP_FUNCTION_STMT:
break;
case QUERY_NODE_CREATE_STREAM_STMT:
return collectMetaKeyFromCreateStream(pCxt, (SCreateStreamStmt*)pStmt);
case QUERY_NODE_DROP_STREAM_STMT:
case QUERY_NODE_MERGE_VGROUP_STMT:
case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT:
case QUERY_NODE_SPLIT_VGROUP_STMT:
case QUERY_NODE_SYNCDB_STMT:
case QUERY_NODE_GRANT_STMT:
case QUERY_NODE_REVOKE_STMT:
case QUERY_NODE_SHOW_DNODES_STMT:
return collectMetaKeyFromShowDnodes(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_MNODES_STMT:
return collectMetaKeyFromShowMnodes(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_MODULES_STMT:
return collectMetaKeyFromShowModules(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_QNODES_STMT:
return collectMetaKeyFromShowQnodes(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_SNODES_STMT:
return collectMetaKeyFromShowSnodes(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_BNODES_STMT:
return collectMetaKeyFromShowBnodes(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_CLUSTER_STMT:
break;
case QUERY_NODE_SHOW_DATABASES_STMT:
return collectMetaKeyFromShowDatabases(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_FUNCTIONS_STMT:
return collectMetaKeyFromShowFunctions(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_INDEXES_STMT:
return collectMetaKeyFromShowIndexes(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_STABLES_STMT:
return collectMetaKeyFromShowStables(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_STREAMS_STMT:
return collectMetaKeyFromShowStreams(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_TABLES_STMT:
return collectMetaKeyFromShowTables(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_USERS_STMT:
return collectMetaKeyFromShowUsers(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_LICENCE_STMT:
return collectMetaKeyFromShowLicence(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_VGROUPS_STMT:
return collectMetaKeyFromShowVgroups(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_TOPICS_STMT:
return collectMetaKeyFromShowTopics(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_CONSUMERS_STMT:
case QUERY_NODE_SHOW_SUBSCRIBES_STMT:
case QUERY_NODE_SHOW_SMAS_STMT:
case QUERY_NODE_SHOW_CONFIGS_STMT:
case QUERY_NODE_SHOW_CONNECTIONS_STMT:
case QUERY_NODE_SHOW_QUERIES_STMT:
case QUERY_NODE_SHOW_VNODES_STMT:
case QUERY_NODE_SHOW_APPS_STMT:
case QUERY_NODE_SHOW_SCORES_STMT:
case QUERY_NODE_SHOW_VARIABLE_STMT:
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
break;
case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
return collectMetaKeyFromShowTransactions(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_KILL_CONNECTION_STMT:
case QUERY_NODE_KILL_QUERY_STMT:
case QUERY_NODE_KILL_TRANSACTION_STMT:
default:
break;
}
return TSDB_CODE_SUCCESS;
}
int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery) {
SCollectMetaKeyCxt cxt = {.pParseCxt = pParseCxt, .pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache))};
if (NULL == cxt.pMetaCache) {
return TSDB_CODE_OUT_OF_MEMORY;
}
int32_t code = collectMetaKeyFromQuery(&cxt, pQuery->pRoot);
if (TSDB_CODE_SUCCESS == code) {
TSWAP(pQuery->pMetaCache, cxt.pMetaCache);
}
destroyCollectMetaKeyCxt(&cxt);
return code;
}

View File

@ -18,23 +18,30 @@
#include "parInt.h"
typedef struct SAuthCxt {
SParseContext* pParseCxt;
int32_t errCode;
SParseContext* pParseCxt;
SParseMetaCache* pMetaCache;
int32_t errCode;
} SAuthCxt;
static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt);
static int32_t checkAuth(SParseContext* pCxt, const char* pDbName, AUTH_TYPE type) {
if (pCxt->isSuperUser) {
static int32_t checkAuth(SAuthCxt* pCxt, const char* pDbName, AUTH_TYPE type) {
SParseContext* pParseCxt = pCxt->pParseCxt;
if (pParseCxt->isSuperUser) {
return TSDB_CODE_SUCCESS;
}
SName name;
tNameSetDbName(&name, pCxt->acctId, pDbName, strlen(pDbName));
tNameSetDbName(&name, pParseCxt->acctId, pDbName, strlen(pDbName));
char dbFname[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(&name, dbFname);
int32_t code = TSDB_CODE_SUCCESS;
bool pass = false;
int32_t code =
catalogChkAuth(pCxt->pCatalog, pCxt->pTransporter, &pCxt->mgmtEpSet, pCxt->pUser, dbFname, type, &pass);
if (NULL != pCxt->pMetaCache) {
code = getUserAuthFromCache(pCxt->pMetaCache, pParseCxt->pUser, dbFname, type, &pass);
} else {
code = catalogChkAuth(pParseCxt->pCatalog, pParseCxt->pTransporter, &pParseCxt->mgmtEpSet, pParseCxt->pUser,
dbFname, type, &pass);
}
return TSDB_CODE_SUCCESS == code ? (pass ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED) : code;
}
@ -45,7 +52,7 @@ static EDealRes authSubquery(SAuthCxt* pCxt, SNode* pStmt) {
static EDealRes authSelectImpl(SNode* pNode, void* pContext) {
SAuthCxt* pCxt = pContext;
if (QUERY_NODE_REAL_TABLE == nodeType(pNode)) {
pCxt->errCode = checkAuth(pCxt->pParseCxt, ((SRealTableNode*)pNode)->table.dbName, AUTH_TYPE_READ);
pCxt->errCode = checkAuth(pCxt, ((SRealTableNode*)pNode)->table.dbName, AUTH_TYPE_READ);
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
} else if (QUERY_NODE_TEMP_TABLE == nodeType(pNode)) {
return authSubquery(pCxt, ((STempTableNode*)pNode)->pSubquery);
@ -79,87 +86,8 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
return authSetOperator(pCxt, (SSetOperator*)pStmt);
case QUERY_NODE_SELECT_STMT:
return authSelect(pCxt, (SSelectStmt*)pStmt);
case QUERY_NODE_CREATE_DATABASE_STMT:
case QUERY_NODE_DROP_DATABASE_STMT:
case QUERY_NODE_ALTER_DATABASE_STMT:
case QUERY_NODE_CREATE_TABLE_STMT:
case QUERY_NODE_CREATE_SUBTABLE_CLAUSE:
case QUERY_NODE_CREATE_MULTI_TABLE_STMT:
case QUERY_NODE_DROP_TABLE_CLAUSE:
case QUERY_NODE_DROP_TABLE_STMT:
case QUERY_NODE_DROP_SUPER_TABLE_STMT:
case QUERY_NODE_ALTER_TABLE_STMT:
case QUERY_NODE_CREATE_USER_STMT:
case QUERY_NODE_ALTER_USER_STMT:
break;
case QUERY_NODE_DROP_USER_STMT: {
case QUERY_NODE_DROP_USER_STMT:
return authDropUser(pCxt, (SDropUserStmt*)pStmt);
}
case QUERY_NODE_USE_DATABASE_STMT:
case QUERY_NODE_CREATE_DNODE_STMT:
case QUERY_NODE_DROP_DNODE_STMT:
case QUERY_NODE_ALTER_DNODE_STMT:
case QUERY_NODE_CREATE_INDEX_STMT:
case QUERY_NODE_DROP_INDEX_STMT:
case QUERY_NODE_CREATE_QNODE_STMT:
case QUERY_NODE_DROP_QNODE_STMT:
case QUERY_NODE_CREATE_BNODE_STMT:
case QUERY_NODE_DROP_BNODE_STMT:
case QUERY_NODE_CREATE_SNODE_STMT:
case QUERY_NODE_DROP_SNODE_STMT:
case QUERY_NODE_CREATE_MNODE_STMT:
case QUERY_NODE_DROP_MNODE_STMT:
case QUERY_NODE_CREATE_TOPIC_STMT:
case QUERY_NODE_DROP_TOPIC_STMT:
case QUERY_NODE_ALTER_LOCAL_STMT:
case QUERY_NODE_EXPLAIN_STMT:
case QUERY_NODE_DESCRIBE_STMT:
case QUERY_NODE_RESET_QUERY_CACHE_STMT:
case QUERY_NODE_COMPACT_STMT:
case QUERY_NODE_CREATE_FUNCTION_STMT:
case QUERY_NODE_DROP_FUNCTION_STMT:
case QUERY_NODE_CREATE_STREAM_STMT:
case QUERY_NODE_DROP_STREAM_STMT:
case QUERY_NODE_MERGE_VGROUP_STMT:
case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT:
case QUERY_NODE_SPLIT_VGROUP_STMT:
case QUERY_NODE_SYNCDB_STMT:
case QUERY_NODE_GRANT_STMT:
case QUERY_NODE_REVOKE_STMT:
case QUERY_NODE_SHOW_DNODES_STMT:
case QUERY_NODE_SHOW_MNODES_STMT:
case QUERY_NODE_SHOW_MODULES_STMT:
case QUERY_NODE_SHOW_QNODES_STMT:
case QUERY_NODE_SHOW_SNODES_STMT:
case QUERY_NODE_SHOW_BNODES_STMT:
case QUERY_NODE_SHOW_CLUSTER_STMT:
case QUERY_NODE_SHOW_DATABASES_STMT:
case QUERY_NODE_SHOW_FUNCTIONS_STMT:
case QUERY_NODE_SHOW_INDEXES_STMT:
case QUERY_NODE_SHOW_STABLES_STMT:
case QUERY_NODE_SHOW_STREAMS_STMT:
case QUERY_NODE_SHOW_TABLES_STMT:
case QUERY_NODE_SHOW_USERS_STMT:
case QUERY_NODE_SHOW_LICENCE_STMT:
case QUERY_NODE_SHOW_VGROUPS_STMT:
case QUERY_NODE_SHOW_TOPICS_STMT:
case QUERY_NODE_SHOW_CONSUMERS_STMT:
case QUERY_NODE_SHOW_SUBSCRIBES_STMT:
case QUERY_NODE_SHOW_SMAS_STMT:
case QUERY_NODE_SHOW_CONFIGS_STMT:
case QUERY_NODE_SHOW_CONNECTIONS_STMT:
case QUERY_NODE_SHOW_QUERIES_STMT:
case QUERY_NODE_SHOW_VNODES_STMT:
case QUERY_NODE_SHOW_APPS_STMT:
case QUERY_NODE_SHOW_SCORES_STMT:
case QUERY_NODE_SHOW_VARIABLE_STMT:
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
case QUERY_NODE_KILL_CONNECTION_STMT:
case QUERY_NODE_KILL_QUERY_STMT:
case QUERY_NODE_KILL_TRANSACTION_STMT:
default:
break;
}
@ -168,6 +96,6 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
}
int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery) {
SAuthCxt cxt = {.pParseCxt = pParseCxt, .errCode = TSDB_CODE_SUCCESS};
SAuthCxt cxt = {.pParseCxt = pParseCxt, .pMetaCache = pQuery->pMetaCache, .errCode = TSDB_CODE_SUCCESS};
return authQuery(&cxt, pQuery->pRoot);
}

View File

@ -152,7 +152,7 @@ static int32_t getDBVgInfoImpl(STranslateContext* pCxt, const SName* pName, SArr
tNameGetFullDbName(pName, fullDbName);
int32_t code = TSDB_CODE_SUCCESS;
if (pParCxt->async) {
code = getDBVgInfoFromCache(pCxt->pMetaCache, fullDbName, pVgInfo);
code = getDbVgInfoFromCache(pCxt->pMetaCache, fullDbName, pVgInfo);
} else {
code = collectUseDatabaseImpl(fullDbName, pCxt->pDbs);
if (TSDB_CODE_SUCCESS == code) {
@ -177,7 +177,7 @@ static int32_t getTableHashVgroupImpl(STranslateContext* pCxt, const SName* pNam
SParseContext* pParCxt = pCxt->pParseCxt;
int32_t code = TSDB_CODE_SUCCESS;
if (pParCxt->async) {
code = getTableHashVgroupFromCache(pCxt->pMetaCache, pName, pInfo);
code = getTableVgroupFromCache(pCxt->pMetaCache, pName, pInfo);
} else {
code = collectUseDatabase(pName, pCxt->pDbs);
if (TSDB_CODE_SUCCESS == code) {
@ -205,7 +205,7 @@ static int32_t getDBVgVersion(STranslateContext* pCxt, const char* pDbFName, int
SParseContext* pParCxt = pCxt->pParseCxt;
int32_t code = TSDB_CODE_SUCCESS;
if (pParCxt->async) {
code = getDBVgVersionFromCache(pCxt->pMetaCache, pDbFName, pVersion, pDbId, pTableNum);
code = getDbVgVersionFromCache(pCxt->pMetaCache, pDbFName, pVersion, pDbId, pTableNum);
} else {
code = collectUseDatabaseImpl(pDbFName, pCxt->pDbs);
if (TSDB_CODE_SUCCESS == code) {
@ -226,7 +226,7 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo
tNameGetFullDbName(&name, dbFname);
int32_t code = TSDB_CODE_SUCCESS;
if (pParCxt->async) {
code = getDBCfgFromCache(pCxt->pMetaCache, dbFname, pInfo);
code = getDbCfgFromCache(pCxt->pMetaCache, dbFname, pInfo);
} else {
code = collectUseDatabaseImpl(dbFname, pCxt->pDbs);
if (TSDB_CODE_SUCCESS == code) {
@ -239,6 +239,27 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo
return code;
}
static int32_t getUdfInfo(STranslateContext* pCxt, SFunctionNode* pFunc) {
SParseContext* pParCxt = pCxt->pParseCxt;
SFuncInfo funcInfo = {0};
int32_t code = TSDB_CODE_SUCCESS;
if (pParCxt->async) {
code = getUdfInfoFromCache(pCxt->pMetaCache, pFunc->functionName, &funcInfo);
} else {
code = catalogGetUdfInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pFunc->functionName,
&funcInfo);
}
if (TSDB_CODE_SUCCESS == code) {
pFunc->funcType = FUNCTION_TYPE_UDF;
pFunc->funcId = TSDB_FUNC_TYPE_AGGREGATE == funcInfo.funcType ? FUNC_AGGREGATE_UDF_ID : FUNC_SCALAR_UDF_ID;
pFunc->node.resType.type = funcInfo.outputType;
pFunc->node.resType.bytes = funcInfo.outputLen;
pFunc->udfBufSize = funcInfo.bufSize;
tFreeSFuncInfo(&funcInfo);
}
return code;
}
static int32_t initTranslateContext(SParseContext* pParseCxt, SParseMetaCache* pMetaCache, STranslateContext* pCxt) {
pCxt->pParseCxt = pParseCxt;
pCxt->errCode = TSDB_CODE_SUCCESS;
@ -873,12 +894,11 @@ static bool hasInvalidFuncNesting(SNodeList* pParameterList) {
}
static int32_t getFuncInfo(STranslateContext* pCxt, SFunctionNode* pFunc) {
SFmGetFuncInfoParam param = {.pCtg = pCxt->pParseCxt->pCatalog,
.pRpc = pCxt->pParseCxt->pTransporter,
.pMgmtEps = &pCxt->pParseCxt->mgmtEpSet,
.pErrBuf = pCxt->msgBuf.buf,
.errBufLen = pCxt->msgBuf.len};
return fmGetFuncInfo(&param, pFunc);
int32_t code = fmGetFuncInfo(pFunc, pCxt->msgBuf.buf, pCxt->msgBuf.len);
if (TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION == code) {
code = getUdfInfo(pCxt, pFunc);
}
return code;
}
static int32_t translateAggFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
@ -1212,7 +1232,6 @@ static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRea
int32_t code = TSDB_CODE_SUCCESS;
SArray* vgroupList = NULL;
if ('\0' != pRealTable->qualDbName[0]) {
// todo release after mnode can be processed
if (0 != strcmp(pRealTable->qualDbName, TSDB_INFORMATION_SCHEMA_DB)) {
code = getDBVgInfo(pCxt, pRealTable->qualDbName, &vgroupList);
}
@ -1220,7 +1239,6 @@ static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRea
code = getDBVgInfoImpl(pCxt, pName, &vgroupList);
}
// todo release after mnode can be processed
if (TSDB_CODE_SUCCESS == code) {
code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &vgroupList);
}

View File

@ -15,6 +15,9 @@
#include "parUtil.h"
#include "cJSON.h"
#include "querynodes.h"
#define USER_AUTH_KEY_MAX_LEN TSDB_USER_LEN + TSDB_DB_FNAME_LEN + 2
static char* getSyntaxErrFormat(int32_t errCode) {
switch (errCode) {
@ -255,17 +258,8 @@ STableComInfo getTableInfo(const STableMeta* pTableMeta) {
return pTableMeta->tableInfo;
}
static uint32_t getTableMetaSize(const STableMeta* pTableMeta) {
int32_t totalCols = 0;
if (pTableMeta->tableInfo.numOfColumns >= 0) {
totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags;
}
return sizeof(STableMeta) + totalCols * sizeof(SSchema);
}
STableMeta* tableMetaDup(const STableMeta* pTableMeta) {
size_t size = getTableMetaSize(pTableMeta);
size_t size = TABLE_META_SIZE(pTableMeta);
STableMeta* p = taosMemoryMalloc(size);
memcpy(p, pTableMeta, size);
@ -449,6 +443,26 @@ end:
return retCode;
}
static int32_t userAuthToString(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, char* pStr) {
return sprintf(pStr, "%s*%d.%s*%d", pUser, acctId, pDb, type);
}
static int32_t userAuthToStringExt(const char* pUser, const char* pDbFName, AUTH_TYPE type, char* pStr) {
return sprintf(pStr, "%s*%s*%d", pUser, pDbFName, type);
}
static void stringToUserAuth(const char* pStr, int32_t len, SUserAuthInfo* pUserAuth) {
char* p1 = strchr(pStr, '*');
strncpy(pUserAuth->user, pStr, p1 - pStr);
++p1;
char* p2 = strchr(p1, '*');
strncpy(pUserAuth->dbFName, p1, p2 - p1);
++p2;
char buf[10] = {0};
strncpy(buf, p2, len - (p2 - pStr));
pUserAuth->type = taosStr2Int32(buf, NULL, 10);
}
static int32_t buildTableReq(SHashObj* pTablesHash, SArray** pTables) {
if (NULL != pTablesHash) {
*pTables = taosArrayInit(taosHashGetSize(pTablesHash), sizeof(SName));
@ -503,6 +517,44 @@ static int32_t buildTableVgroupReq(SHashObj* pTableVgroupHash, SArray** pTableVg
static int32_t buildDbCfgReq(SHashObj* pDbCfgHash, SArray** pDbCfg) { return buildDbReq(pDbCfgHash, pDbCfg); }
static int32_t buildUserAuthReq(SHashObj* pUserAuthHash, SArray** pUserAuth) {
if (NULL != pUserAuthHash) {
*pUserAuth = taosArrayInit(taosHashGetSize(pUserAuthHash), sizeof(SUserAuthInfo));
if (NULL == *pUserAuth) {
return TSDB_CODE_OUT_OF_MEMORY;
}
void* p = taosHashIterate(pUserAuthHash, NULL);
while (NULL != p) {
size_t len = 0;
char* pKey = taosHashGetKey(p, &len);
SUserAuthInfo userAuth = {0};
stringToUserAuth(pKey, len, &userAuth);
taosArrayPush(*pUserAuth, &userAuth);
p = taosHashIterate(pUserAuthHash, p);
}
}
return TSDB_CODE_SUCCESS;
}
static int32_t buildUdfReq(SHashObj* pUdfHash, SArray** pUdf) {
if (NULL != pUdfHash) {
*pUdf = taosArrayInit(taosHashGetSize(pUdfHash), TSDB_FUNC_NAME_LEN);
if (NULL == *pUdf) {
return TSDB_CODE_OUT_OF_MEMORY;
}
void* p = taosHashIterate(pUdfHash, NULL);
while (NULL != p) {
size_t len = 0;
char* pFunc = taosHashGetKey(p, &len);
char func[TSDB_FUNC_NAME_LEN] = {0};
strncpy(func, pFunc, len);
taosArrayPush(*pUdf, func);
p = taosHashIterate(pUdfHash, p);
}
}
return TSDB_CODE_SUCCESS;
}
int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
int32_t code = buildTableMetaReq(pMetaCache->pTableMeta, &pCatalogReq->pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
@ -512,7 +564,13 @@ int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalog
code = buildTableVgroupReq(pMetaCache->pTableVgroup, &pCatalogReq->pTableHash);
}
if (TSDB_CODE_SUCCESS == code) {
code = buildDbCfgReq(pMetaCache->pDbVgroup, &pCatalogReq->pDbCfg);
code = buildDbCfgReq(pMetaCache->pDbCfg, &pCatalogReq->pDbCfg);
}
if (TSDB_CODE_SUCCESS == code) {
code = buildUserAuthReq(pMetaCache->pUserAuth, &pCatalogReq->pUser);
}
if (TSDB_CODE_SUCCESS == code) {
code = buildUdfReq(pMetaCache->pUdf, &pCatalogReq->pUdf);
}
return code;
}
@ -568,6 +626,31 @@ static int32_t putDbCfgToCache(const SArray* pDbCfgReq, const SArray* pDbCfgData
return TSDB_CODE_SUCCESS;
}
static int32_t putUserAuthToCache(const SArray* pUserAuthReq, const SArray* pUserAuthData, SHashObj* pUserAuth) {
int32_t nvgs = taosArrayGetSize(pUserAuthReq);
for (int32_t i = 0; i < nvgs; ++i) {
SUserAuthInfo* pUser = taosArrayGet(pUserAuthReq, i);
char key[USER_AUTH_KEY_MAX_LEN] = {0};
int32_t len = userAuthToStringExt(pUser->user, pUser->dbFName, pUser->type, key);
if (TSDB_CODE_SUCCESS != taosHashPut(pUserAuth, key, len, taosArrayGet(pUserAuthData, i), sizeof(bool))) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
return TSDB_CODE_SUCCESS;
}
static int32_t putUdfToCache(const SArray* pUdfReq, const SArray* pUdfData, SHashObj* pUdf) {
int32_t num = taosArrayGetSize(pUdfReq);
for (int32_t i = 0; i < num; ++i) {
char* pFunc = taosArrayGet(pUdfReq, i);
SFuncInfo* pInfo = taosArrayGet(pUdfData, i);
if (TSDB_CODE_SUCCESS != taosHashPut(pUdf, pFunc, strlen(pFunc), &pInfo, POINTER_BYTES)) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
return TSDB_CODE_SUCCESS;
}
int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
int32_t code = putTableMetaToCache(pCatalogReq->pTableMeta, pMetaData->pTableMeta, pMetaCache->pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
@ -579,54 +662,161 @@ int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMet
if (TSDB_CODE_SUCCESS == code) {
code = putDbCfgToCache(pCatalogReq->pDbCfg, pMetaData->pDbCfg, pMetaCache->pDbCfg);
}
if (TSDB_CODE_SUCCESS == code) {
code = putUserAuthToCache(pCatalogReq->pUser, pMetaData->pUser, pMetaCache->pUserAuth);
}
if (TSDB_CODE_SUCCESS == code) {
code = putUdfToCache(pCatalogReq->pUdf, pMetaData->pUdfList, pMetaCache->pUdf);
}
return code;
}
int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) {
if (NULL == pMetaCache->pTableMeta) {
pMetaCache->pTableMeta = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == pMetaCache->pTableMeta) {
static int32_t reserveTableReqInCache(int32_t acctId, const char* pDb, const char* pTable, SHashObj** pTables) {
if (NULL == *pTables) {
*pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == *pTables) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
char fullName[TSDB_TABLE_FNAME_LEN];
int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s.%s", acctId, pDb, pTable);
return taosHashPut(pMetaCache->pTableMeta, fullName, len, &len, POINTER_BYTES);
return taosHashPut(*pTables, fullName, len, &pTables, POINTER_BYTES);
}
int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) {
return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableMeta);
}
int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta) {
char fullName[TSDB_TABLE_FNAME_LEN];
tNameExtractFullName(pName, fullName);
*pMeta = taosHashGet(pMetaCache->pTableMeta, fullName, strlen(fullName));
return NULL == *pMeta ? TSDB_CODE_PAR_INTERNAL_ERROR : TSDB_CODE_SUCCESS;
STableMeta** pRes = taosHashGet(pMetaCache->pTableMeta, fullName, strlen(fullName));
if (NULL == pRes || NULL == *pRes) {
return TSDB_CODE_PAR_INTERNAL_ERROR;
}
*pMeta = tableMetaDup(*pRes);
if (NULL == *pMeta) {
return TSDB_CODE_OUT_OF_MEMORY;
}
return TSDB_CODE_SUCCESS;
}
int32_t getDBVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo) {
*pVgInfo = taosHashGet(pMetaCache->pDbVgroup, pDbFName, strlen(pDbFName));
return NULL == *pVgInfo ? TSDB_CODE_PAR_INTERNAL_ERROR : TSDB_CODE_SUCCESS;
static int32_t reserveDbReqInCache(int32_t acctId, const char* pDb, SHashObj** pDbs) {
if (NULL == *pDbs) {
*pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == *pDbs) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
char fullName[TSDB_TABLE_FNAME_LEN];
int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s", acctId, pDb);
return taosHashPut(*pDbs, fullName, len, &pDbs, POINTER_BYTES);
}
int32_t getTableHashVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) {
int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) {
return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbVgroup);
}
int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo) {
SArray** pRes = taosHashGet(pMetaCache->pDbVgroup, pDbFName, strlen(pDbFName));
if (NULL == pRes) {
return TSDB_CODE_PAR_INTERNAL_ERROR;
}
// *pRes is null, which is a legal value, indicating that the user DB has not been created
if (NULL != *pRes) {
*pVgInfo = taosArrayDup(*pRes);
if (NULL == *pVgInfo) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
return TSDB_CODE_SUCCESS;
}
int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) {
return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableVgroup);
}
int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) {
char fullName[TSDB_TABLE_FNAME_LEN];
tNameExtractFullName(pName, fullName);
SVgroupInfo* pInfo = taosHashGet(pMetaCache->pTableVgroup, fullName, strlen(fullName));
if (NULL == pInfo) {
SVgroupInfo** pRes = taosHashGet(pMetaCache->pTableVgroup, fullName, strlen(fullName));
if (NULL == pRes || NULL == *pRes) {
return TSDB_CODE_PAR_INTERNAL_ERROR;
}
memcpy(pVgroup, pInfo, sizeof(SVgroupInfo));
memcpy(pVgroup, *pRes, sizeof(SVgroupInfo));
return TSDB_CODE_SUCCESS;
}
int32_t getDBVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) {
return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbCfg);
}
int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
int32_t* pTableNum) {
return TSDB_CODE_PAR_INTERNAL_ERROR;
}
int32_t getDBCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo) {
SDbCfgInfo* pDbCfg = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName));
if (NULL == pDbCfg) {
SDbInfo** pRes = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName));
if (NULL == pRes || NULL == *pRes) {
return TSDB_CODE_PAR_INTERNAL_ERROR;
}
memcpy(pInfo, pDbCfg, sizeof(SDbCfgInfo));
*pVersion = (*pRes)->vgVer;
*pDbId = (*pRes)->dbId;
*pTableNum = (*pRes)->tbNum;
return TSDB_CODE_SUCCESS;
}
int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) {
return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbCfg);
}
int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo) {
SDbCfgInfo** pRes = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName));
if (NULL == pRes || NULL == *pRes) {
return TSDB_CODE_PAR_INTERNAL_ERROR;
}
memcpy(pInfo, *pRes, sizeof(SDbCfgInfo));
return TSDB_CODE_SUCCESS;
}
int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type,
SParseMetaCache* pMetaCache) {
if (NULL == pMetaCache->pUserAuth) {
pMetaCache->pUserAuth = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == pMetaCache->pUserAuth) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
char key[USER_AUTH_KEY_MAX_LEN] = {0};
int32_t len = userAuthToString(acctId, pUser, pDb, type, key);
bool pass = false;
return taosHashPut(pMetaCache->pUserAuth, key, len, &pass, sizeof(pass));
}
int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type,
bool* pPass) {
char key[USER_AUTH_KEY_MAX_LEN] = {0};
int32_t len = userAuthToStringExt(pUser, pDbFName, type, key);
bool* pRes = taosHashGet(pMetaCache->pUserAuth, key, len);
if (NULL == pRes) {
return TSDB_CODE_PAR_INTERNAL_ERROR;
}
*pPass = *pRes;
return TSDB_CODE_SUCCESS;
}
int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache) {
if (NULL == pMetaCache->pUdf) {
pMetaCache->pUdf = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == pMetaCache->pUdf) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
return taosHashPut(pMetaCache->pUdf, pFunc, strlen(pFunc), &pMetaCache, POINTER_BYTES);
}
int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo) {
SFuncInfo** pRes = taosHashGet(pMetaCache->pUdf, pFunc, strlen(pFunc));
if (NULL == pRes || NULL == *pRes) {
return TSDB_CODE_PAR_INTERNAL_ERROR;
}
memcpy(pInfo, *pRes, sizeof(SFuncInfo));
return TSDB_CODE_SUCCESS;
}

View File

@ -59,6 +59,14 @@ static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) {
return code;
}
static int32_t syntaxParseSql(SParseContext* pCxt, SQuery** pQuery) {
int32_t code = parse(pCxt, pQuery);
if (TSDB_CODE_SUCCESS == code) {
code = collectMetaKey(pCxt, *pQuery);
}
return code;
}
static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
if (pParam->is_null && 1 == *(pParam->is_null)) {
pVal->node.resType.type = TSDB_DATA_TYPE_NULL;
@ -98,6 +106,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
}
varDataSetLen(pVal->datum.p, pVal->node.resType.bytes);
strncpy(varDataVal(pVal->datum.p), (const char*)pParam->buffer, pVal->node.resType.bytes);
pVal->node.resType.bytes += VARSTR_HEADER_SIZE;
break;
case TSDB_DATA_TYPE_NCHAR: {
pVal->node.resType.bytes *= TSDB_NCHAR_SIZE;
@ -112,7 +121,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
return errno;
}
varDataSetLen(pVal->datum.p, output);
pVal->node.resType.bytes = output;
pVal->node.resType.bytes = output + VARSTR_HEADER_SIZE;
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
@ -188,7 +197,7 @@ int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq
if (qIsInsertSql(pCxt->pSql, pCxt->sqlLen)) {
// todo insert sql
} else {
code = parse(pCxt, pQuery);
code = syntaxParseSql(pCxt, pQuery);
}
if (TSDB_CODE_SUCCESS == code) {
code = buildCatalogReq((*pQuery)->pMetaCache, pCatalogReq);

View File

@ -26,7 +26,9 @@ if(${BUILD_WINGETOPT})
target_link_libraries(parserTest PUBLIC wingetopt)
endif()
add_test(
NAME parserTest
COMMAND parserTest
)
if(NOT TD_WINDOWS)
add_test(
NAME parserTest
COMMAND parserTest
)
endif(NOT TD_WINDOWS)

View File

@ -103,7 +103,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) {
}
{
ITableBuilder& builder = mcs->createTableBuilder("performance_schema", "streams", TSDB_SYSTEM_TABLE, 1)
.addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
.addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
}
@ -157,6 +157,12 @@ void generateTestST1(MockCatalogService* mcs) {
mcs->createSubTable("test", "st1", "st1s3", 1);
}
void generateFunctions(MockCatalogService* mcs) {
mcs->createFunction("udf1", TSDB_FUNC_TYPE_SCALAR, TSDB_DATA_TYPE_INT, tDataTypes[TSDB_DATA_TYPE_INT].bytes, 0);
mcs->createFunction("udf2", TSDB_FUNC_TYPE_AGGREGATE, TSDB_DATA_TYPE_DOUBLE, tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes,
8);
}
} // namespace
int32_t __catalogGetHandle(const char* clusterId, struct SCatalog** catalogHandle) { return 0; }
@ -196,6 +202,11 @@ int32_t __catalogChkAuth(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, con
return 0;
}
int32_t __catalogGetUdfInfo(SCatalog* pCtg, void* pTrans, const SEpSet* pMgmtEps, const char* funcName,
SFuncInfo* pInfo) {
return g_mockCatalogService->catalogGetUdfInfo(funcName, pInfo);
}
void initMetaDataEnv() {
g_mockCatalogService.reset(new MockCatalogService());
@ -209,6 +220,7 @@ void initMetaDataEnv() {
stub.set(catalogGetDBVgInfo, __catalogGetDBVgInfo);
stub.set(catalogGetDBCfg, __catalogGetDBCfg);
stub.set(catalogChkAuth, __catalogChkAuth);
stub.set(catalogGetUdfInfo, __catalogGetUdfInfo);
// {
// AddrAny any("libcatalog.so");
// std::map<std::string,void*> result;
@ -256,6 +268,7 @@ void generateMetaData() {
generatePerformanceSchema(g_mockCatalogService.get());
generateTestT1(g_mockCatalogService.get());
generateTestST1(g_mockCatalogService.get());
generateFunctions(g_mockCatalogService.get());
g_mockCatalogService->showTables();
}

View File

@ -120,11 +120,35 @@ class MockCatalogServiceImpl {
return copyTableVgroup(db, tNameGetTableName(pTableName), vgList);
}
int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const {
auto it = udf_.find(funcName);
if (udf_.end() == it) {
return TSDB_CODE_FAILED;
}
memcpy(pInfo, it->second.get(), sizeof(SFuncInfo));
return TSDB_CODE_SUCCESS;
}
int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const {
int32_t code = getAllTableMeta(pCatalogReq->pTableMeta, &pMetaData->pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
code = getAllTableVgroup(pCatalogReq->pTableHash, &pMetaData->pTableHash);
}
if (TSDB_CODE_SUCCESS == code) {
code = getAllDbVgroup(pCatalogReq->pDbVgroup, &pMetaData->pDbVgroup);
}
if (TSDB_CODE_SUCCESS == code) {
code = getAllDbCfg(pCatalogReq->pDbCfg, &pMetaData->pDbCfg);
}
if (TSDB_CODE_SUCCESS == code) {
code = getAllDbInfo(pCatalogReq->pDbInfo, &pMetaData->pDbInfo);
}
if (TSDB_CODE_SUCCESS == code) {
code = getAllUserAuth(pCatalogReq->pUser, &pMetaData->pUser);
}
if (TSDB_CODE_SUCCESS == code) {
code = getAllUdf(pCatalogReq->pUdf, &pMetaData->pUdfList);
}
return code;
}
@ -211,21 +235,21 @@ class MockCatalogServiceImpl {
}
}
std::shared_ptr<MockTableMeta> getTableMeta(const std::string& db, const std::string& tbname) const {
DbMetaCache::const_iterator it = meta_.find(db);
if (meta_.end() == it) {
return std::shared_ptr<MockTableMeta>();
}
TableMetaCache::const_iterator tit = it->second.find(tbname);
if (it->second.end() == tit) {
return std::shared_ptr<MockTableMeta>();
}
return tit->second;
void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize) {
std::shared_ptr<SFuncInfo> info(new SFuncInfo);
strcpy(info->name, func.c_str());
info->funcType = funcType;
info->scriptType = TSDB_FUNC_SCRIPT_BIN_LIB;
info->outputType = outputType;
info->outputLen = outputLen;
info->bufSize = bufSize;
udf_.insert(std::make_pair(func, info));
}
private:
typedef std::map<std::string, std::shared_ptr<MockTableMeta>> TableMetaCache;
typedef std::map<std::string, TableMetaCache> DbMetaCache;
typedef std::map<std::string, std::shared_ptr<SFuncInfo>> UdfMetaCache;
std::string toDbname(const std::string& dbFullName) const {
std::string::size_type n = dbFullName.find(".");
@ -308,6 +332,18 @@ class MockCatalogServiceImpl {
return TSDB_CODE_SUCCESS;
}
std::shared_ptr<MockTableMeta> getTableMeta(const std::string& db, const std::string& tbname) const {
DbMetaCache::const_iterator it = meta_.find(db);
if (meta_.end() == it) {
return std::shared_ptr<MockTableMeta>();
}
TableMetaCache::const_iterator tit = it->second.find(tbname);
if (it->second.end() == tit) {
return std::shared_ptr<MockTableMeta>();
}
return tit->second;
}
int32_t getAllTableMeta(SArray* pTableMetaReq, SArray** pTableMetaData) const {
int32_t code = TSDB_CODE_SUCCESS;
if (NULL != pTableMetaReq) {
@ -330,12 +366,82 @@ class MockCatalogServiceImpl {
int32_t code = TSDB_CODE_SUCCESS;
if (NULL != pTableVgroupReq) {
int32_t ntables = taosArrayGetSize(pTableVgroupReq);
*pTableVgroupData = taosArrayInit(ntables, POINTER_BYTES);
*pTableVgroupData = taosArrayInit(ntables, sizeof(SVgroupInfo));
for (int32_t i = 0; i < ntables; ++i) {
SVgroupInfo* pVgInfo = (SVgroupInfo*)taosMemoryCalloc(1, sizeof(SVgroupInfo));
code = catalogGetTableHashVgroup((const SName*)taosArrayGet(pTableVgroupReq, i), pVgInfo);
SVgroupInfo vgInfo = {0};
code = catalogGetTableHashVgroup((const SName*)taosArrayGet(pTableVgroupReq, i), &vgInfo);
if (TSDB_CODE_SUCCESS == code) {
taosArrayPush(*pTableVgroupData, &pVgInfo);
taosArrayPush(*pTableVgroupData, &vgInfo);
} else {
break;
}
}
}
return code;
}
int32_t getAllDbVgroup(SArray* pDbVgroupReq, SArray** pDbVgroupData) const {
int32_t code = TSDB_CODE_SUCCESS;
if (NULL != pDbVgroupReq) {
int32_t ndbs = taosArrayGetSize(pDbVgroupReq);
*pDbVgroupData = taosArrayInit(ndbs, POINTER_BYTES);
for (int32_t i = 0; i < ndbs; ++i) {
int64_t zeroVg = 0;
taosArrayPush(*pDbVgroupData, &zeroVg);
}
}
return code;
}
int32_t getAllDbCfg(SArray* pDbCfgReq, SArray** pDbCfgData) const {
int32_t code = TSDB_CODE_SUCCESS;
if (NULL != pDbCfgReq) {
int32_t ndbs = taosArrayGetSize(pDbCfgReq);
*pDbCfgData = taosArrayInit(ndbs, sizeof(SDbCfgInfo));
for (int32_t i = 0; i < ndbs; ++i) {
SDbCfgInfo dbCfg = {0};
taosArrayPush(*pDbCfgData, &dbCfg);
}
}
return code;
}
int32_t getAllDbInfo(SArray* pDbInfoReq, SArray** pDbInfoData) const {
int32_t code = TSDB_CODE_SUCCESS;
if (NULL != pDbInfoReq) {
int32_t ndbs = taosArrayGetSize(pDbInfoReq);
*pDbInfoData = taosArrayInit(ndbs, sizeof(SDbCfgInfo));
for (int32_t i = 0; i < ndbs; ++i) {
SDbInfo dbInfo = {0};
taosArrayPush(*pDbInfoData, &dbInfo);
}
}
return code;
}
int32_t getAllUserAuth(SArray* pUserAuthReq, SArray** pUserAuthData) const {
int32_t code = TSDB_CODE_SUCCESS;
if (NULL != pUserAuthReq) {
int32_t num = taosArrayGetSize(pUserAuthReq);
*pUserAuthData = taosArrayInit(num, sizeof(bool));
for (int32_t i = 0; i < num; ++i) {
bool pass = true;
taosArrayPush(*pUserAuthData, &pass);
}
}
return code;
}
int32_t getAllUdf(SArray* pUdfReq, SArray** pUdfData) const {
int32_t code = TSDB_CODE_SUCCESS;
if (NULL != pUdfReq) {
int32_t num = taosArrayGetSize(pUdfReq);
*pUdfData = taosArrayInit(num, sizeof(SFuncInfo));
for (int32_t i = 0; i < num; ++i) {
SFuncInfo info = {0};
code = catalogGetUdfInfo((char*)taosArrayGet(pUdfReq, i), &info);
if (TSDB_CODE_SUCCESS == code) {
taosArrayPush(*pUdfData, &info);
} else {
break;
}
@ -347,6 +453,7 @@ class MockCatalogServiceImpl {
uint64_t id_;
std::unique_ptr<TableBuilder> builder_;
DbMetaCache meta_;
UdfMetaCache udf_;
};
MockCatalogService::MockCatalogService() : impl_(new MockCatalogServiceImpl()) {}
@ -365,9 +472,9 @@ void MockCatalogService::createSubTable(const std::string& db, const std::string
void MockCatalogService::showTables() const { impl_->showTables(); }
std::shared_ptr<MockTableMeta> MockCatalogService::getTableMeta(const std::string& db,
const std::string& tbname) const {
return impl_->getTableMeta(db, tbname);
void MockCatalogService::createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen,
int32_t bufSize) {
impl_->createFunction(func, funcType, outputType, outputLen, bufSize);
}
int32_t MockCatalogService::catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const {
@ -382,6 +489,10 @@ int32_t MockCatalogService::catalogGetTableDistVgInfo(const SName* pTableName, S
return impl_->catalogGetTableDistVgInfo(pTableName, pVgList);
}
int32_t MockCatalogService::catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const {
return impl_->catalogGetUdfInfo(funcName, pInfo);
}
int32_t MockCatalogService::catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const {
return impl_->catalogGetAllMeta(pCatalogReq, pMetaData);
}

View File

@ -56,11 +56,12 @@ class MockCatalogService {
int32_t numOfColumns, int32_t numOfTags = 0);
void createSubTable(const std::string& db, const std::string& stbname, const std::string& tbname, int16_t vgid);
void showTables() const;
std::shared_ptr<MockTableMeta> getTableMeta(const std::string& db, const std::string& tbname) const;
void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize);
int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const;
int32_t catalogGetTableHashVgroup(const SName* pTableName, SVgroupInfo* vgInfo) const;
int32_t catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const;
int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const;
int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const;
private:

View File

@ -228,7 +228,44 @@ TEST_F(ParserInitialCTest, createDnode) {
run("CREATE DNODE 1.1.1.1 PORT 9000");
}
// todo CREATE FUNCTION
// CREATE [AGGREGATE] FUNCTION [IF NOT EXISTS] func_name AS library_path OUTPUTTYPE type_name [BUFSIZE value]
TEST_F(ParserInitialCTest, createFunction) {
useDb("root", "test");
SCreateFuncReq expect = {0};
auto setCreateFuncReqFunc = [&](const char* pUdfName, int8_t outputType, int32_t outputBytes = 0,
int8_t funcType = TSDB_FUNC_TYPE_SCALAR, int8_t igExists = 0, int32_t bufSize = 0) {
memset(&expect, 0, sizeof(SCreateFuncReq));
strcpy(expect.name, pUdfName);
expect.igExists = igExists;
expect.funcType = funcType;
expect.scriptType = TSDB_FUNC_SCRIPT_BIN_LIB;
expect.outputType = outputType;
expect.outputLen = outputBytes > 0 ? outputBytes : tDataTypes[outputType].bytes;
expect.bufSize = bufSize;
};
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_FUNCTION_STMT);
SCreateFuncReq req = {0};
ASSERT_TRUE(TSDB_CODE_SUCCESS == tDeserializeSCreateFuncReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
ASSERT_EQ(std::string(req.name), std::string(expect.name));
ASSERT_EQ(req.igExists, expect.igExists);
ASSERT_EQ(req.funcType, expect.funcType);
ASSERT_EQ(req.scriptType, expect.scriptType);
ASSERT_EQ(req.outputType, expect.outputType);
ASSERT_EQ(req.outputLen, expect.outputLen);
ASSERT_EQ(req.bufSize, expect.bufSize);
});
setCreateFuncReqFunc("udf1", TSDB_DATA_TYPE_INT);
// run("CREATE FUNCTION udf1 AS './build/lib/libudf1.so' OUTPUTTYPE INT");
setCreateFuncReqFunc("udf2", TSDB_DATA_TYPE_DOUBLE, 0, TSDB_FUNC_TYPE_AGGREGATE, 1, 8);
// run("CREATE AGGREGATE FUNCTION IF NOT EXISTS udf2 AS './build/lib/libudf2.so' OUTPUTTYPE DOUBLE BUFSIZE 8");
}
TEST_F(ParserInitialCTest, createIndexSma) {
useDb("root", "test");

View File

@ -103,6 +103,7 @@ TEST_F(ParserInitialDTest, dropTopic) {
}
TEST_F(ParserInitialDTest, dropUser) {
login("root");
useDb("root", "test");
run("drop user wxy");

View File

@ -141,6 +141,14 @@ TEST_F(ParserSelectTest, IndefiniteRowsFuncSemanticCheck) {
// run("SELECT DIFF(c1) FROM t1 INTERVAL(10s)");
}
TEST_F(ParserSelectTest, useDefinedFunc) {
useDb("root", "test");
run("SELECT udf1(c1) FROM t1");
run("SELECT udf2(c1) FROM t1 GROUP BY c2");
}
TEST_F(ParserSelectTest, groupBy) {
useDb("root", "test");

View File

@ -37,6 +37,7 @@ class ParserEnv : public testing::Environment {
virtual void SetUp() {
initMetaDataEnv();
generateMetaData();
initLog(TD_TMP_DIR_PATH "td");
}
virtual void TearDown() {
@ -47,20 +48,55 @@ class ParserEnv : public testing::Environment {
ParserEnv() {}
virtual ~ParserEnv() {}
private:
void initLog(const char* path) {
int32_t logLevel = getLogLevel();
dDebugFlag = logLevel;
vDebugFlag = logLevel;
mDebugFlag = logLevel;
cDebugFlag = logLevel;
jniDebugFlag = logLevel;
tmrDebugFlag = logLevel;
uDebugFlag = logLevel;
rpcDebugFlag = logLevel;
qDebugFlag = logLevel;
wDebugFlag = logLevel;
sDebugFlag = logLevel;
tsdbDebugFlag = logLevel;
tsLogEmbedded = 1;
tsAsyncLog = 0;
taosRemoveDir(path);
taosMkDir(path);
tstrncpy(tsLogDir, path, PATH_MAX);
if (taosInitLog("taoslog", 1) != 0) {
std::cout << "failed to init log file" << std::endl;
}
}
};
static void parseArg(int argc, char* argv[]) {
int opt = 0;
const char* optstring = "";
int opt = 0;
const char* optstring = "";
// clang-format off
static struct option long_options[] = {
{"dump", no_argument, NULL, 'd'}, {"async", no_argument, NULL, 'a'}, {0, 0, 0, 0}};
{"dump", no_argument, NULL, 'd'},
{"async", required_argument, NULL, 'a'},
{"skipSql", required_argument, NULL, 's'},
{0, 0, 0, 0}
};
// clang-format on
while ((opt = getopt_long(argc, argv, optstring, long_options, NULL)) != -1) {
switch (opt) {
case 'd':
g_dump = true;
break;
case 'a':
g_testAsyncApis = true;
setAsyncFlag(optarg);
break;
case 's':
setSkipSqlNum(optarg);
break;
default:
break;

View File

@ -44,23 +44,40 @@ namespace ParserTest {
} \
} while (0);
bool g_dump = false;
bool g_testAsyncApis = false;
bool g_dump = false;
bool g_testAsyncApis = true;
int32_t g_logLevel = 131;
int32_t g_skipSql = 0;
void setAsyncFlag(const char* pFlag) { g_testAsyncApis = stoi(pFlag) > 0 ? true : false; }
void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); }
struct TerminateFlag : public exception {
const char* what() const throw() { return "success and terminate"; }
};
void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); }
int32_t getLogLevel() { return g_logLevel; }
class ParserTestBaseImpl {
public:
ParserTestBaseImpl(ParserTestBase* pBase) : pBase_(pBase) {}
void login(const std::string& user) { caseEnv_.user_ = user; }
void useDb(const string& acctId, const string& db) {
caseEnv_.acctId_ = acctId;
caseEnv_.db_ = db;
caseEnv_.nsql_ = g_skipSql;
}
void run(const string& sql, int32_t expect, ParserStage checkStage) {
if (caseEnv_.nsql_ > 0) {
--(caseEnv_.nsql_);
return;
}
reset(expect, checkStage);
try {
SParseContext cxt = {0};
@ -69,6 +86,8 @@ class ParserTestBaseImpl {
SQuery* pQuery = nullptr;
doParse(&cxt, &pQuery);
doAuthenticate(&cxt, pQuery);
doTranslate(&cxt, pQuery);
doCalculateConstant(&cxt, pQuery);
@ -89,59 +108,14 @@ class ParserTestBaseImpl {
}
}
void runAsync(const string& sql, int32_t expect, ParserStage checkStage) {
reset(expect, checkStage);
try {
SParseContext cxt = {0};
setParseContext(sql, &cxt, true);
SQuery* pQuery = nullptr;
doParse(&cxt, &pQuery);
SCatalogReq catalogReq = {0};
doBuildCatalogReq(pQuery->pMetaCache, &catalogReq);
string err;
thread t1([&]() {
try {
SMetaData metaData = {0};
doGetAllMeta(&catalogReq, &metaData);
doPutMetaDataToCache(&catalogReq, &metaData, pQuery->pMetaCache);
doTranslate(&cxt, pQuery);
doCalculateConstant(&cxt, pQuery);
} catch (const TerminateFlag& e) {
// success and terminate
} catch (const runtime_error& e) {
err = e.what();
} catch (...) {
err = "unknown error";
}
});
t1.join();
if (!err.empty()) {
throw runtime_error(err);
}
if (g_dump) {
dump();
}
} catch (const TerminateFlag& e) {
// success and terminate
return;
} catch (...) {
dump();
throw;
}
}
private:
struct caseEnv {
string acctId_;
string db_;
string acctId_;
string user_;
string db_;
int32_t nsql_;
caseEnv() : user_("wangxiaoyu"), nsql_(0) {}
};
struct stmtEnv {
@ -207,6 +181,8 @@ class ParserTestBaseImpl {
pCxt->acctId = atoi(caseEnv_.acctId_.c_str());
pCxt->db = caseEnv_.db_.c_str();
pCxt->pUser = caseEnv_.user_.c_str();
pCxt->isSuperUser = caseEnv_.user_ == "root";
pCxt->pSql = stmtEnv_.sql_.c_str();
pCxt->sqlLen = stmtEnv_.sql_.length();
pCxt->pMsg = stmtEnv_.msgBuf_.data();
@ -220,6 +196,11 @@ class ParserTestBaseImpl {
res_.parsedAst_ = toString((*pQuery)->pRoot);
}
void doCollectMetaKey(SParseContext* pCxt, SQuery* pQuery) {
DO_WITH_THROW(collectMetaKey, pCxt, pQuery);
ASSERT_NE(pQuery->pMetaCache, nullptr);
}
void doBuildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
DO_WITH_THROW(buildCatalogReq, pMetaCache, pCatalogReq);
}
@ -232,6 +213,8 @@ class ParserTestBaseImpl {
DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache);
}
void doAuthenticate(SParseContext* pCxt, SQuery* pQuery) { DO_WITH_THROW(authenticate, pCxt, pQuery); }
void doTranslate(SParseContext* pCxt, SQuery* pQuery) {
DO_WITH_THROW(translate, pCxt, pQuery);
checkQuery(pQuery, PARSER_STAGE_TRANSLATE);
@ -254,6 +237,59 @@ class ParserTestBaseImpl {
void checkQuery(const SQuery* pQuery, ParserStage stage) { pBase_->checkDdl(pQuery, stage); }
void runAsync(const string& sql, int32_t expect, ParserStage checkStage) {
reset(expect, checkStage);
try {
SParseContext cxt = {0};
setParseContext(sql, &cxt, true);
SQuery* pQuery = nullptr;
doParse(&cxt, &pQuery);
doCollectMetaKey(&cxt, pQuery);
SCatalogReq catalogReq = {0};
doBuildCatalogReq(pQuery->pMetaCache, &catalogReq);
string err;
thread t1([&]() {
try {
SMetaData metaData = {0};
doGetAllMeta(&catalogReq, &metaData);
doPutMetaDataToCache(&catalogReq, &metaData, pQuery->pMetaCache);
doAuthenticate(&cxt, pQuery);
doTranslate(&cxt, pQuery);
doCalculateConstant(&cxt, pQuery);
} catch (const TerminateFlag& e) {
// success and terminate
} catch (const runtime_error& e) {
err = e.what();
} catch (...) {
err = "unknown error";
}
});
t1.join();
if (!err.empty()) {
throw runtime_error(err);
}
if (g_dump) {
dump();
}
} catch (const TerminateFlag& e) {
// success and terminate
return;
} catch (...) {
dump();
throw;
}
}
caseEnv caseEnv_;
stmtEnv stmtEnv_;
stmtRes res_;
@ -264,16 +300,14 @@ ParserTestBase::ParserTestBase() : impl_(new ParserTestBaseImpl(this)) {}
ParserTestBase::~ParserTestBase() {}
void ParserTestBase::login(const std::string& user) { return impl_->login(user); }
void ParserTestBase::useDb(const std::string& acctId, const std::string& db) { impl_->useDb(acctId, db); }
void ParserTestBase::run(const std::string& sql, int32_t expect, ParserStage checkStage) {
return impl_->run(sql, expect, checkStage);
}
void ParserTestBase::runAsync(const std::string& sql, int32_t expect, ParserStage checkStage) {
return impl_->runAsync(sql, expect, checkStage);
}
void ParserTestBase::checkDdl(const SQuery* pQuery, ParserStage stage) { return; }
} // namespace ParserTest

View File

@ -34,9 +34,9 @@ class ParserTestBase : public testing::Test {
ParserTestBase();
virtual ~ParserTestBase();
void login(const std::string& user);
void useDb(const std::string& acctId, const std::string& db);
void run(const std::string& sql, int32_t expect = TSDB_CODE_SUCCESS, ParserStage checkStage = PARSER_STAGE_ALL);
void runAsync(const std::string& sql, int32_t expect = TSDB_CODE_SUCCESS, ParserStage checkStage = PARSER_STAGE_ALL);
virtual void checkDdl(const SQuery* pQuery, ParserStage stage);
@ -65,7 +65,11 @@ class ParserDdlTest : public ParserTestBase {
};
extern bool g_dump;
extern bool g_testAsyncApis;
extern void setAsyncFlag(const char* pFlag);
extern void setLogLevel(const char* pLogLevel);
extern int32_t getLogLevel();
extern void setSkipSqlNum(const char* pNum);
} // namespace ParserTest

View File

@ -32,7 +32,9 @@ if(${BUILD_WINGETOPT})
target_link_libraries(plannerTest PUBLIC wingetopt)
endif()
add_test(
NAME plannerTest
COMMAND plannerTest
)
if(NOT TD_WINDOWS)
add_test(
NAME plannerTest
COMMAND plannerTest
)
endif(NOT TD_WINDOWS)

View File

@ -73,7 +73,7 @@ void setDumpModule(const char* pModule) {
}
}
void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(optarg); }
void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); }
void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); }

View File

@ -145,6 +145,15 @@ typedef struct SQWSchStatus {
SHashObj *tasksHash; // key:queryId+taskId, value: SQWTaskStatus
} SQWSchStatus;
typedef struct SQWWaitTimeStat {
uint64_t num;
uint64_t total;
} SQWWaitTimeStat;
typedef struct SQWStat {
SQWWaitTimeStat msgWait[2];
} SQWStat;
// Qnode/Vnode level task management
typedef struct SQWorker {
int64_t refId;
@ -155,9 +164,10 @@ typedef struct SQWorker {
tmr_h hbTimer;
SRWLatch schLock;
// SRWLatch ctxLock;
SHashObj *schHash; // key: schedulerId, value: SQWSchStatus
SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx
SMsgCb msgCb;
SHashObj *schHash; // key: schedulerId, value: SQWSchStatus
SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx
SMsgCb msgCb;
SQWStat stat;
} SQWorker;
typedef struct SQWorkerMgmt {
@ -322,6 +332,8 @@ int32_t qwDropTask(QW_FPARAMS_DEF);
void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx);
int32_t qwOpenRef(void);
void qwSetHbParam(int64_t refId, SQWHbParam **pParam);
int32_t qwUpdateWaitTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type);
int64_t qwGetWaitTimeInQueue(SQWorker *mgmt, EQueueType type);
void qwDbgDumpMgmtInfo(SQWorker *mgmt);
int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore);

View File

@ -248,7 +248,7 @@ int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *
return TSDB_CODE_SUCCESS;
}
int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
@ -257,6 +257,8 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SSubQueryMsg *msg = pMsg->pCont;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
qwUpdateWaitTimeInQueue(mgmt, ts, QUERY_QUEUE);
if (NULL == msg || pMsg->contLen <= sizeof(*msg)) {
QW_ELOG("invalid query msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@ -286,7 +288,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
int32_t code = 0;
int8_t status = 0;
bool queryDone = false;
@ -295,6 +297,8 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SQWTaskCtx * handles = NULL;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
qwUpdateWaitTimeInQueue(mgmt, ts, QUERY_QUEUE);
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
QW_ELOG("invalid cquery msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@ -316,7 +320,7 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
@ -324,6 +328,8 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SResFetchReq *msg = pMsg->pCont;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE);
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
QW_ELOG("invalid fetch msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@ -349,13 +355,16 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE);
qProcessFetchRsp(NULL, pMsg, NULL);
pMsg->pCont = NULL;
return TSDB_CODE_SUCCESS;
}
int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
@ -363,6 +372,9 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
int32_t code = 0;
STaskCancelReq *msg = pMsg->pCont;
qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE);
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
qError("invalid task cancel msg");
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@ -390,7 +402,7 @@ _return:
return TSDB_CODE_SUCCESS;
}
int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
@ -399,6 +411,8 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
STaskDropReq *msg = pMsg->pCont;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE);
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
QW_ELOG("invalid task drop msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@ -429,7 +443,7 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
@ -438,6 +452,8 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SSchedulerHbReq req = {0};
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE);
if (NULL == pMsg->pCont) {
QW_ELOG("invalid hb msg, msg:%p, msgLen:%d", pMsg->pCont, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);

View File

@ -499,4 +499,43 @@ int32_t qwOpenRef(void) {
return TSDB_CODE_SUCCESS;
}
int32_t qwUpdateWaitTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type) {
if (ts <= 0) {
return TSDB_CODE_SUCCESS;
}
int64_t duration = taosGetTimestampUs() - ts;
switch (type) {
case QUERY_QUEUE:
++mgmt->stat.msgWait[0].num;
mgmt->stat.msgWait[0].total += duration;
break;
case FETCH_QUEUE:
++mgmt->stat.msgWait[1].num;
mgmt->stat.msgWait[1].total += duration;
break;
default:
qError("unsupported queue type %d", type);
return TSDB_CODE_APP_ERROR;
}
return TSDB_CODE_SUCCESS;
}
int64_t qwGetWaitTimeInQueue(SQWorker *mgmt, EQueueType type) {
SQWWaitTimeStat *pStat = NULL;
switch (type) {
case QUERY_QUEUE:
pStat = &mgmt->stat.msgWait[0];
return pStat->num ? (pStat->total/pStat->num) : 0;
case FETCH_QUEUE:
pStat = &mgmt->stat.msgWait[1];
return pStat->num ? (pStat->total/pStat->num) : 0;
default:
qError("unsupported queue type %d", type);
return -1;
}
}

View File

@ -950,4 +950,9 @@ void qWorkerDestroy(void **qWorkerMgmt) {
}
}
int64_t qWorkerGetWaitTimeInQueue(void *qWorkerMgmt, EQueueType type) {
return qwGetWaitTimeInQueue((SQWorker *)qWorkerMgmt, type);
}

View File

@ -635,7 +635,7 @@ void *queryThread(void *param) {
while (!qwtTestStop) {
qwtBuildQueryReqMsg(&queryRpc);
qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(taosRand()%5);
}
@ -657,7 +657,7 @@ void *fetchThread(void *param) {
while (!qwtTestStop) {
qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc);
code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc);
code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(taosRand()%5);
}
@ -679,7 +679,7 @@ void *dropThread(void *param) {
while (!qwtTestStop) {
qwtBuildDropReqMsg(&dropMsg, &dropRpc);
code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(taosRand()%5);
}
@ -758,9 +758,9 @@ void *queryQueueThread(void *param) {
}
if (TDMT_VND_QUERY == queryRpc->msgType) {
qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc);
qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc, 0);
} else if (TDMT_VND_QUERY_CONTINUE == queryRpc->msgType) {
qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc);
qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc, 0);
} else {
printf("unknown msg in query queue, type:%d\n", queryRpc->msgType);
assert(0);
@ -815,13 +815,13 @@ void *fetchQueueThread(void *param) {
switch (fetchRpc->msgType) {
case TDMT_VND_FETCH:
qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc);
qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc, 0);
break;
case TDMT_VND_CANCEL_TASK:
qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc);
qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc, 0);
break;
case TDMT_VND_DROP_TASK:
qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc);
qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc, 0);
break;
default:
printf("unknown msg type:%d in fetch queue", fetchRpc->msgType);
@ -878,16 +878,16 @@ TEST(seqTest, normalCase) {
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0);
ASSERT_EQ(code, 0);
//code = qWorkerProcessReadyMsg(mockPointer, mgmt, &readyRpc);
//ASSERT_EQ(code, 0);
code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc);
code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0);
ASSERT_EQ(code, 0);
code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0);
ASSERT_EQ(code, 0);
qWorkerDestroy(&mgmt);
@ -914,10 +914,10 @@ TEST(seqTest, cancelFirst) {
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0);
ASSERT_EQ(code, 0);
code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0);
ASSERT_TRUE(0 != code);
qWorkerDestroy(&mgmt);
@ -959,7 +959,7 @@ TEST(seqTest, randCase) {
if (r >= 0 && r < maxr/5) {
printf("Query,%d\n", t++);
qwtBuildQueryReqMsg(&queryRpc);
code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0);
} else if (r >= maxr/5 && r < maxr * 2/5) {
//printf("Ready,%d\n", t++);
//qwtBuildReadyReqMsg(&readyMsg, &readyRpc);
@ -970,14 +970,14 @@ TEST(seqTest, randCase) {
} else if (r >= maxr * 2/5 && r < maxr* 3/5) {
printf("Fetch,%d\n", t++);
qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc);
code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc);
code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(1);
}
} else if (r >= maxr * 3/5 && r < maxr * 4/5) {
printf("Drop,%d\n", t++);
qwtBuildDropReqMsg(&dropMsg, &dropRpc);
code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(1);
}

View File

@ -51,7 +51,7 @@ typedef struct SScalarCtx {
int32_t doConvertDataType(SValueNode* pValueNode, SScalarParam* out);
SColumnInfoData* createColumnInfoData(SDataType* pType, int32_t numOfRows);
void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode);
int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode);
#define GET_PARAM_TYPE(_c) ((_c)->columnData->info.type)
#define GET_PARAM_BYTES(_c) ((_c)->columnData->info.bytes)

View File

@ -3553,7 +3553,11 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) {
return DEAL_RES_CONTINUE;
}
sclConvertToTsValueNode(stat->precision, valueNode);
int32_t code = sclConvertToTsValueNode(stat->precision, valueNode);
if (code) {
stat->code = code;
return DEAL_RES_ERROR;
}
return DEAL_RES_CONTINUE;
}
@ -3687,7 +3691,7 @@ int32_t fltReviseNodes(SFilterInfo *pInfo, SNode** pNode, SFltTreeStat *pStat) {
for (int32_t i = 0; i < nodeNum; ++i) {
SValueNode *valueNode = *(SValueNode **)taosArrayGet(pStat->nodeList, i);
sclConvertToTsValueNode(pStat->precision, valueNode);
FLT_ERR_JRET(sclConvertToTsValueNode(pStat->precision, valueNode));
}
_return:

View File

@ -20,17 +20,19 @@ int32_t scalarGetOperatorParamNum(EOperatorType type) {
return 2;
}
void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) {
int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) {
char *timeStr = valueNode->datum.p;
if (convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i) !=
TSDB_CODE_SUCCESS) {
valueNode->datum.i = 0;
int32_t code = convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
taosMemoryFree(timeStr);
valueNode->typeData = valueNode->datum.i;
valueNode->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP;
valueNode->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes;
return TSDB_CODE_SUCCESS;
}
@ -546,6 +548,7 @@ EDealRes sclRewriteBasedOnOptr(SNode** pNode, SScalarCtx *ctx, EOperatorType opT
EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
SOperatorNode *node = (SOperatorNode *)*pNode;
int32_t code = 0;
if (node->pLeft && (QUERY_NODE_VALUE == nodeType(node->pLeft))) {
SValueNode *valueNode = (SValueNode *)node->pLeft;
@ -555,7 +558,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pRight && nodesIsExprNode(node->pRight)
&& ((SExprNode*)node->pRight)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) {
sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode);
code = sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode);
if (code) {
ctx->code = code;
return DEAL_RES_ERROR;
}
}
}
@ -567,7 +574,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pLeft && nodesIsExprNode(node->pLeft)
&& ((SExprNode*)node->pLeft)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) {
sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode);
code = sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode);
if (code) {
ctx->code = code;
return DEAL_RES_ERROR;
}
}
}

View File

@ -633,7 +633,7 @@ static int32_t doTrimFunction(SScalarParam *pInput, int32_t inputNum, SScalarPar
continue;
}
char *input = colDataGetData(pInput[0].columnData, i);
char *input = colDataGetData(pInputData, i);
int32_t len = varDataLen(input);
int32_t charLen = (type == TSDB_DATA_TYPE_VARCHAR) ? len : len / TSDB_NCHAR_SIZE;
trimFn(input, output, type, charLen);

View File

@ -17,7 +17,9 @@ TARGET_INCLUDE_DIRECTORIES(
PUBLIC "${TD_SOURCE_DIR}/source/libs/parser/inc"
PRIVATE "${TD_SOURCE_DIR}/source/libs/scalar/inc"
)
add_test(
NAME scalarTest
COMMAND scalarTest
)
if(NOT TD_WINDOWS)
add_test(
NAME scalarTest
COMMAND scalarTest
)
endif(NOT TD_WINDOWS)

View File

@ -94,6 +94,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
if (schJobNeedToStop(pJob, &status)) {
SCH_TASK_ELOG("rsp not processed cause of job status, job status:%s, rspCode:0x%x", jobTaskStatusStr(status),
rspCode);
taosMemoryFreeClear(msg);
SCH_RET(atomic_load_32(&pJob->errCode));
}
@ -121,6 +122,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
}
SCH_ERR_JRET(rspCode);
taosMemoryFreeClear(msg);
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
}
@ -145,6 +148,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
}
SCH_ERR_JRET(rspCode);
taosMemoryFreeClear(msg);
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
}
@ -164,6 +169,9 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
if (NULL == msg) {
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
}
taosMemoryFreeClear(msg);
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
}
@ -210,6 +218,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
}
taosMemoryFreeClear(msg);
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
@ -224,6 +234,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_JRET(rsp->code);
SCH_ERR_JRET(schSaveJobQueryRes(pJob, rsp));
taosMemoryFreeClear(msg);
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
@ -275,6 +287,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_JRET(schProcessOnExplainDone(pJob, pTask, pRsp));
}
taosMemoryFreeClear(msg);
return TSDB_CODE_SUCCESS;
}
@ -282,6 +296,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_JRET(schFetchFromRemote(pJob));
taosMemoryFreeClear(msg);
return TSDB_CODE_SUCCESS;
}
@ -300,6 +316,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_TASK_DLOG("got fetch rsp, rows:%d, complete:%d", htonl(rsp->numOfRows), rsp->completed);
msg = NULL;
schProcessOnDataFetched(pJob);
break;
}
@ -322,6 +340,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
_return:
taosMemoryFreeClear(msg);
SCH_RET(schProcessOnTaskFailure(pJob, pTask, code));
}

View File

@ -20,15 +20,15 @@
static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT;
static char* notify = "a";
static int transSrvInst = 0;
static int tranSSvrInst = 0;
typedef struct {
int notifyCount; //
int init; // init or not
STransMsg msg;
} SSrvRegArg;
} SSvrRegArg;
typedef struct SSrvConn {
typedef struct SSvrConn {
T_REF_DECLARE()
uv_tcp_t* pTcp;
uv_write_t pWriter;
@ -42,7 +42,7 @@ typedef struct SSrvConn {
void* hostThrd;
STransQueue srvMsgs;
SSrvRegArg regArg;
SSvrRegArg regArg;
bool broken; // conn broken;
ConnStatus status;
@ -55,14 +55,14 @@ typedef struct SSrvConn {
char user[TSDB_UNI_LEN]; // user ID for the link
char secret[TSDB_PASSWORD_LEN];
char ckey[TSDB_PASSWORD_LEN]; // ciphering key
} SSrvConn;
} SSvrConn;
typedef struct SSrvMsg {
SSrvConn* pConn;
typedef struct SSvrMsg {
SSvrConn* pConn;
STransMsg msg;
queue q;
STransMsgType type;
} SSrvMsg;
} SSvrMsg;
typedef struct SWorkThrdObj {
TdThread thread;
@ -127,25 +127,25 @@ static void uvWorkAfterTask(uv_work_t* req, int status);
static void uvWalkCb(uv_handle_t* handle, void* arg);
static void uvFreeCb(uv_handle_t* handle);
static void uvStartSendRespInternal(SSrvMsg* smsg);
static void uvPrepareSendData(SSrvMsg* msg, uv_buf_t* wb);
static void uvStartSendResp(SSrvMsg* msg);
static void uvStartSendRespInternal(SSvrMsg* smsg);
static void uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb);
static void uvStartSendResp(SSvrMsg* msg);
static void uvNotifyLinkBrokenToApp(SSrvConn* conn);
static void uvNotifyLinkBrokenToApp(SSvrConn* conn);
static void destroySmsg(SSrvMsg* smsg);
static void destroySmsg(SSvrMsg* smsg);
// check whether already read complete packet
static SSrvConn* createConn(void* hThrd);
static void destroyConn(SSrvConn* conn, bool clear /*clear handle or not*/);
static void destroyConnRegArg(SSrvConn* conn);
static SSvrConn* createConn(void* hThrd);
static void destroyConn(SSvrConn* conn, bool clear /*clear handle or not*/);
static void destroyConnRegArg(SSvrConn* conn);
static int reallocConnRefHandle(SSrvConn* conn);
static int reallocConnRefHandle(SSvrConn* conn);
static void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd);
static void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd);
static void uvHandleResp(SSrvMsg* msg, SWorkThrdObj* thrd);
static void uvHandleRegister(SSrvMsg* msg, SWorkThrdObj* thrd);
static void (*transAsyncHandle[])(SSrvMsg* msg, SWorkThrdObj* thrd) = {uvHandleResp, uvHandleQuit, uvHandleRelease,
static void uvHandleQuit(SSvrMsg* msg, SWorkThrdObj* thrd);
static void uvHandleRelease(SSvrMsg* msg, SWorkThrdObj* thrd);
static void uvHandleResp(SSvrMsg* msg, SWorkThrdObj* thrd);
static void uvHandleRegister(SSvrMsg* msg, SWorkThrdObj* thrd);
static void (*transAsyncHandle[])(SSvrMsg* msg, SWorkThrdObj* thrd) = {uvHandleResp, uvHandleQuit, uvHandleRelease,
uvHandleRegister, NULL};
static int32_t exHandlesMgt;
@ -178,7 +178,7 @@ static bool addHandleToAcceptloop(void* arg);
tTrace("server conn %p received release request", conn); \
\
STransMsg tmsg = {.code = 0, .info.handle = (void*)conn, .info.ahandle = NULL}; \
SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); \
SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg)); \
srvMsg->msg = tmsg; \
srvMsg->type = Release; \
srvMsg->pConn = conn; \
@ -233,18 +233,18 @@ static bool addHandleToAcceptloop(void* arg);
} while (0)
void uvAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) {
SSrvConn* conn = handle->data;
SSvrConn* conn = handle->data;
SConnBuffer* pBuf = &conn->readBuf;
transAllocBuffer(pBuf, buf);
}
// refers specifically to query or insert timeout
static void uvHandleActivityTimeout(uv_timer_t* handle) {
SSrvConn* conn = handle->data;
SSvrConn* conn = handle->data;
tDebug("%p timeout since no activity", conn);
}
static void uvHandleReq(SSrvConn* pConn) {
static void uvHandleReq(SSvrConn* pConn) {
SConnBuffer* pBuf = &pConn->readBuf;
char* msg = pBuf->buf;
uint32_t msgLen = pBuf->len;
@ -316,7 +316,7 @@ static void uvHandleReq(SSrvConn* pConn) {
void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) {
// opt
SSrvConn* conn = cli->data;
SSvrConn* conn = cli->data;
SConnBuffer* pBuf = &conn->readBuf;
if (nread > 0) {
pBuf->len += nread;
@ -354,17 +354,17 @@ void uvAllocConnBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* b
void uvOnTimeoutCb(uv_timer_t* handle) {
// opt
SSrvConn* pConn = handle->data;
SSvrConn* pConn = handle->data;
tError("server conn %p time out", pConn);
}
void uvOnSendCb(uv_write_t* req, int status) {
SSrvConn* conn = req->data;
SSvrConn* conn = req->data;
// transClearBuffer(&conn->readBuf);
if (status == 0) {
tTrace("server conn %p data already was written on stream", conn);
if (!transQueueEmpty(&conn->srvMsgs)) {
SSrvMsg* msg = transQueuePop(&conn->srvMsgs);
SSvrMsg* msg = transQueuePop(&conn->srvMsgs);
// if (msg->type == Release && conn->status != ConnNormal) {
// conn->status = ConnNormal;
// transUnrefSrvHandle(conn);
@ -376,7 +376,7 @@ void uvOnSendCb(uv_write_t* req, int status) {
destroySmsg(msg);
// send second data, just use for push
if (!transQueueEmpty(&conn->srvMsgs)) {
msg = (SSrvMsg*)transQueueGet(&conn->srvMsgs, 0);
msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0);
if (msg->type == Register && conn->status == ConnAcquire) {
conn->regArg.notifyCount = 0;
conn->regArg.init = 1;
@ -389,7 +389,7 @@ void uvOnSendCb(uv_write_t* req, int status) {
transQueuePop(&conn->srvMsgs);
taosMemoryFree(msg);
msg = (SSrvMsg*)transQueueGet(&conn->srvMsgs, 0);
msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0);
if (msg != NULL) {
uvStartSendRespInternal(msg);
}
@ -415,10 +415,10 @@ static void uvOnPipeWriteCb(uv_write_t* req, int status) {
taosMemoryFree(req);
}
static void uvPrepareSendData(SSrvMsg* smsg, uv_buf_t* wb) {
static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
tTrace("server conn %p prepare to send resp", smsg->pConn);
SSrvConn* pConn = smsg->pConn;
SSvrConn* pConn = smsg->pConn;
STransMsg* pMsg = &smsg->msg;
if (pMsg->pCont == 0) {
pMsg->pCont = (void*)rpcMallocCont(0);
@ -455,17 +455,17 @@ static void uvPrepareSendData(SSrvMsg* smsg, uv_buf_t* wb) {
wb->len = len;
}
static void uvStartSendRespInternal(SSrvMsg* smsg) {
static void uvStartSendRespInternal(SSvrMsg* smsg) {
uv_buf_t wb;
uvPrepareSendData(smsg, &wb);
SSrvConn* pConn = smsg->pConn;
SSvrConn* pConn = smsg->pConn;
// uv_timer_stop(&pConn->pTimer);
uv_write(&pConn->pWriter, (uv_stream_t*)pConn->pTcp, &wb, 1, uvOnSendCb);
}
static void uvStartSendResp(SSrvMsg* smsg) {
static void uvStartSendResp(SSvrMsg* smsg) {
// impl
SSrvConn* pConn = smsg->pConn;
SSvrConn* pConn = smsg->pConn;
if (pConn->broken == true) {
// persist by
@ -485,7 +485,7 @@ static void uvStartSendResp(SSrvMsg* smsg) {
return;
}
static void destroySmsg(SSrvMsg* smsg) {
static void destroySmsg(SSvrMsg* smsg) {
if (smsg == NULL) {
return;
}
@ -499,7 +499,7 @@ static void destroyAllConn(SWorkThrdObj* pThrd) {
QUEUE_REMOVE(h);
QUEUE_INIT(h);
SSrvConn* c = QUEUE_DATA(h, SSrvConn, queue);
SSvrConn* c = QUEUE_DATA(h, SSvrConn, queue);
while (T_REF_VAL_GET(c) >= 2) {
transUnrefSrvHandle(c);
}
@ -509,7 +509,7 @@ static void destroyAllConn(SWorkThrdObj* pThrd) {
void uvWorkerAsyncCb(uv_async_t* handle) {
SAsyncItem* item = handle->data;
SWorkThrdObj* pThrd = item->pThrd;
SSrvConn* conn = NULL;
SSvrConn* conn = NULL;
queue wq;
// batch process to avoid to lock/unlock frequently
@ -521,7 +521,7 @@ void uvWorkerAsyncCb(uv_async_t* handle) {
queue* head = QUEUE_HEAD(&wq);
QUEUE_REMOVE(head);
SSrvMsg* msg = QUEUE_DATA(head, SSrvMsg, q);
SSvrMsg* msg = QUEUE_DATA(head, SSvrMsg, q);
if (msg == NULL) {
tError("unexcept occurred, continue");
continue;
@ -649,7 +649,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
uv_handle_type pending = uv_pipe_pending_type(pipe);
assert(pending == UV_TCP);
SSrvConn* pConn = createConn(pThrd);
SSvrConn* pConn = createConn(pThrd);
pConn->pTransInst = pThrd->pTransInst;
/* init conn timer*/
@ -768,10 +768,10 @@ void* transWorkerThread(void* arg) {
return NULL;
}
static SSrvConn* createConn(void* hThrd) {
static SSvrConn* createConn(void* hThrd) {
SWorkThrdObj* pThrd = hThrd;
SSrvConn* pConn = (SSrvConn*)taosMemoryCalloc(1, sizeof(SSrvConn));
SSvrConn* pConn = (SSvrConn*)taosMemoryCalloc(1, sizeof(SSvrConn));
QUEUE_INIT(&pConn->queue);
QUEUE_PUSH(&pThrd->conn, &pConn->queue);
@ -794,7 +794,7 @@ static SSrvConn* createConn(void* hThrd) {
return pConn;
}
static void destroyConn(SSrvConn* conn, bool clear) {
static void destroyConn(SSvrConn* conn, bool clear) {
if (conn == NULL) {
return;
}
@ -808,13 +808,13 @@ static void destroyConn(SSrvConn* conn, bool clear) {
// uv_shutdown(req, (uv_stream_t*)conn->pTcp, uvShutDownCb);
}
}
static void destroyConnRegArg(SSrvConn* conn) {
static void destroyConnRegArg(SSvrConn* conn) {
if (conn->regArg.init == 1) {
transFreeMsg(conn->regArg.msg.pCont);
conn->regArg.init = 0;
}
}
static int reallocConnRefHandle(SSrvConn* conn) {
static int reallocConnRefHandle(SSvrConn* conn) {
uvReleaseExHandle(conn->refId);
uvRemoveExHandle(conn->refId);
// avoid app continue to send msg on invalid handle
@ -828,7 +828,7 @@ static int reallocConnRefHandle(SSrvConn* conn) {
return 0;
}
static void uvDestroyConn(uv_handle_t* handle) {
SSrvConn* conn = handle->data;
SSvrConn* conn = handle->data;
if (conn == NULL) {
return;
}
@ -884,7 +884,7 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
uv_loop_init(srv->loop);
taosThreadOnce(&transModuleInit, uvInitEnv);
transSrvInst++;
tranSSvrInst++;
assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0));
#ifdef WINDOWS
@ -981,7 +981,7 @@ void uvDestoryExHandle(void* handle) {
taosMemoryFree(handle);
}
void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd) {
void uvHandleQuit(SSvrMsg* msg, SWorkThrdObj* thrd) {
thrd->quit = true;
if (QUEUE_IS_EMPTY(&thrd->conn)) {
uv_walk(thrd->loop, uvWalkCb, NULL);
@ -990,8 +990,8 @@ void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd) {
}
taosMemoryFree(msg);
}
void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd) {
SSrvConn* conn = msg->pConn;
void uvHandleRelease(SSvrMsg* msg, SWorkThrdObj* thrd) {
SSvrConn* conn = msg->pConn;
if (conn->status == ConnAcquire) {
reallocConnRefHandle(conn);
if (!transQueuePush(&conn->srvMsgs, msg)) {
@ -1004,13 +1004,13 @@ void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd) {
}
destroySmsg(msg);
}
void uvHandleResp(SSrvMsg* msg, SWorkThrdObj* thrd) {
void uvHandleResp(SSvrMsg* msg, SWorkThrdObj* thrd) {
// send msg to client
tDebug("server conn %p start to send resp (2/2)", msg->pConn);
uvStartSendResp(msg);
}
void uvHandleRegister(SSrvMsg* msg, SWorkThrdObj* thrd) {
SSrvConn* conn = msg->pConn;
void uvHandleRegister(SSvrMsg* msg, SWorkThrdObj* thrd) {
SSvrConn* conn = msg->pConn;
tDebug("server conn %p register brokenlink callback", conn);
if (conn->status == ConnAcquire) {
if (!transQueuePush(&conn->srvMsgs, msg)) {
@ -1036,13 +1036,13 @@ void destroyWorkThrd(SWorkThrdObj* pThrd) {
}
taosThreadJoin(pThrd->thread, NULL);
SRV_RELEASE_UV(pThrd->loop);
TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSrvMsg, destroySmsg);
TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSvrMsg, destroySmsg);
transDestroyAsyncPool(pThrd->asyncPool);
taosMemoryFree(pThrd->loop);
taosMemoryFree(pThrd);
}
void sendQuitToWorkThrd(SWorkThrdObj* pThrd) {
SSrvMsg* msg = taosMemoryCalloc(1, sizeof(SSrvMsg));
SSvrMsg* msg = taosMemoryCalloc(1, sizeof(SSvrMsg));
msg->type = Quit;
tDebug("server send quit msg to work thread");
transSendAsync(pThrd->asyncPool, &msg->q);
@ -1075,8 +1075,8 @@ void transCloseServer(void* arg) {
taosMemoryFree(srv);
transSrvInst--;
if (transSrvInst == 0) {
tranSSvrInst--;
if (tranSSvrInst == 0) {
TdThreadOnce tmpInit = PTHREAD_ONCE_INIT;
memcpy(&transModuleInit, &tmpInit, sizeof(TdThreadOnce));
uvCloseExHandleMgt();
@ -1087,7 +1087,7 @@ void transRefSrvHandle(void* handle) {
if (handle == NULL) {
return;
}
int ref = T_REF_INC((SSrvConn*)handle);
int ref = T_REF_INC((SSvrConn*)handle);
tDebug("server conn %p ref count: %d", handle, ref);
}
@ -1095,10 +1095,10 @@ void transUnrefSrvHandle(void* handle) {
if (handle == NULL) {
return;
}
int ref = T_REF_DEC((SSrvConn*)handle);
int ref = T_REF_DEC((SSvrConn*)handle);
tDebug("server conn %p ref count: %d", handle, ref);
if (ref == 0) {
destroyConn((SSrvConn*)handle, true);
destroyConn((SSvrConn*)handle, true);
}
}
@ -1113,12 +1113,12 @@ void transReleaseSrvHandle(void* handle) {
STransMsg tmsg = {.code = 0, .info.handle = exh, .info.ahandle = NULL, .info.refId = refId};
SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg));
srvMsg->msg = tmsg;
srvMsg->type = Release;
SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg));
m->msg = tmsg;
m->type = Release;
tTrace("server conn %p start to release", exh->handle);
transSendAsync(pThrd->asyncPool, &srvMsg->q);
transSendAsync(pThrd->asyncPool, &m->q);
uvReleaseExHandle(refId);
return;
_return1:
@ -1141,11 +1141,11 @@ void transSendResponse(const STransMsg* msg) {
SWorkThrdObj* pThrd = exh->pThrd;
ASYNC_ERR_JRET(pThrd);
SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg));
srvMsg->msg = tmsg;
srvMsg->type = Normal;
SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg));
m->msg = tmsg;
m->type = Normal;
tDebug("server conn %p start to send resp (1/2)", exh->handle);
transSendAsync(pThrd->asyncPool, &srvMsg->q);
transSendAsync(pThrd->asyncPool, &m->q);
uvReleaseExHandle(refId);
return;
_return1:
@ -1169,11 +1169,11 @@ void transRegisterMsg(const STransMsg* msg) {
SWorkThrdObj* pThrd = exh->pThrd;
ASYNC_ERR_JRET(pThrd);
SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg));
srvMsg->msg = tmsg;
srvMsg->type = Register;
SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg));
m->msg = tmsg;
m->type = Register;
tTrace("server conn %p start to register brokenlink callback", exh->handle);
transSendAsync(pThrd->asyncPool, &srvMsg->q);
transSendAsync(pThrd->asyncPool, &m->q);
uvReleaseExHandle(refId);
return;
@ -1193,7 +1193,7 @@ int transGetConnInfo(void* thandle, STransHandleInfo* pInfo) {
return -1;
}
SExHandle* ex = thandle;
SSrvConn* pConn = ex->handle;
SSvrConn* pConn = ex->handle;
struct sockaddr_in addr = pConn->addr;
pInfo->clientIp = (uint32_t)(addr.sin_addr.s_addr);

View File

@ -111,10 +111,12 @@ target_link_libraries (pushServer
)
add_test(
NAME transUT
COMMAND transUT
)
if(NOT TD_WINDOWS)
add_test(
NAME transUT
COMMAND transUT
)
endif(NOT TD_WINDOWS)
add_test(
NAME transUtilUt
COMMAND transportTest

View File

@ -74,6 +74,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_REPEAT_INIT, "Repeat initialization
TAOS_DEFINE_ERROR(TSDB_CODE_DUP_KEY, "Cannot add duplicate keys to hash")
TAOS_DEFINE_ERROR(TSDB_CODE_NEED_RETRY, "Retry needed")
TAOS_DEFINE_ERROR(TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE, "Out of memory in rpc queue")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TIMESTAMP, "Invalid timestamp format")
TAOS_DEFINE_ERROR(TSDB_CODE_REF_NO_MEMORY, "Ref out of memory")
TAOS_DEFINE_ERROR(TSDB_CODE_REF_FULL, "too many Ref Objs")

View File

@ -26,6 +26,7 @@ typedef struct STaosQnode STaosQnode;
typedef struct STaosQnode {
STaosQnode *next;
STaosQueue *queue;
int64_t timestamp;
int32_t size;
int8_t itype;
int8_t reserved[3];
@ -144,6 +145,7 @@ void *taosAllocateQitem(int32_t size, EQItype itype) {
STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size);
pNode->size = size;
pNode->itype = itype;
pNode->timestamp = taosGetTimestampUs();
if (pNode == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -393,7 +395,7 @@ void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue) {
int32_t taosGetQueueNumber(STaosQset *qset) { return qset->numOfQueues; }
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp) {
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp) {
STaosQnode *pNode = NULL;
int32_t code = 0;
@ -415,6 +417,7 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FI
*ppItem = pNode->item;
if (ahandle) *ahandle = queue->ahandle;
if (itemFp) *itemFp = queue->itemFp;
if (ts) *ts = pNode->timestamp;
queue->head = pNode->next;
if (queue->head == NULL) queue->tail = NULL;

View File

@ -75,19 +75,20 @@ static void *tQWorkerThreadFp(SQWorker *worker) {
void *msg = NULL;
void *ahandle = NULL;
int32_t code = 0;
int64_t ts = 0;
taosBlockSIGPIPE();
setThreadName(pool->name);
uDebug("worker:%s:%d is running", pool->name, worker->id);
while (1) {
if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ahandle, &fp) == 0) {
if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ts, &ahandle, &fp) == 0) {
uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset);
break;
}
if (fp != NULL) {
SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num};
SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num, .timestamp = ts};
(*fp)(&info, msg);
}
}

View File

@ -1,80 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print ============================ dnode1 start
$i = 0
$dbPrefix = acdb
$tbPrefix = actb
$db = $dbPrefix . $i
$tb = $tbPrefix . $i
$accountPrefix = acac
print =============== step1-4
sql show accounts
if $rows != 1 then
return -1
endi
sql show users
if $rows != 3 then
return -1
endi
$i = 0
$acc = $accountPrefix . $i
sql_error create account $acc PASS pass123
sql create account $acc PASS 'pass123'
#sql create account $acc PASS 'pass123' -x step1
# return -1
#step1:
sql create user $acc PASS 'pass123' -x step2
return -1
step2:
sql show accounts
if $rows != 2 then
return -1
endi
sql show users
if $rows != 3 then
return -1
endi
print =============== step5-6
sql drop account $acc
sql drop account $acc -x step5
return -1
step5:
sql show accounts
if $rows != 1 then
return -1
endi
sql show users
if $rows != 3 then
return -1
endi
print =============== step7
sql create account $acc PASS 'pass123'
#sql create account $acc PASS 'pass123' -x step7
# return -1
#step7:
sql show accounts
if $rows != 2 then
return -1
endi
sql drop account $acc
sql show accounts
if $rows != 1 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,99 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print ============= step1
sql create account oroot pass 'taosdata'
sql close
sql connect oroot
sleep 2000
print ============= step2
sql create user read pass 'taosdata'
sql create user write pass 'taosdata'
sql create database d1
sql create database d2
sql create table d1.t1 (ts timestamp, i int)
sql create table d2.t2 (ts timestamp, i int)
sql insert into d1.t1 values(now, 1)
sql insert into d2.t2 values(now, 1)
sql insert into d2.t2 values(now+1s, 2)
sql show databases
if $rows != 2 then
return -1
endi
sql show users
if $rows != 4 then
return -1
endi
sql select * from d1.t1
if $rows != 1 then
return -1
endi
sql select * from d2.t2
if $rows != 2 then
return -1
endi
print ============= step3
sql close
sql connect
sleep 2000
sql show databases
if $rows != 0 then
return -1
endi
sql show dnodes
print $data00 $data01 $data02 $data03
if $data02 != 2 then
return -1
endi
sql drop account oroot
print ============= step4
$x = 0
show4:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show dnodes
if $data02 != 0 then
goto show4
endi
print ============= step5
sql create account oroot pass 'taosdata'
sql close
sql connect oroot
sleep 2000
sql show databases
if $rows != 0 then
return -1
endi
sql show users
if $rows != 2 then
return -1
endi
sql close
sql connect
sleep 2000
sql drop account oroot
sql show accounts
if $rows != 1 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,92 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
$i = 0
$dbPrefix = aldb
$tbPrefix = altb
$db = $dbPrefix . $i
$tb = $tbPrefix . $i
print =============== step1
sql drop account ac -x step0
return -1
step0:
sql create account PASS 123 -x step1
return -1
step1:
sql show accounts
if $rows != 1 then
return -1
endi
print =============== step2
sql drop account a -x step2
step2:
sql create account a PASS '123'
sql show accounts
if $rows != 2 then
return -1
endi
sql drop account a
sql show accounts
if $rows != 1 then
return -1
endi
print =============== step3
sql drop account abc01234567890123456789 -x step3
step3:
sql create account abc01234567890123456789 PASS '123'
sql show accounts
if $rows != 2 then
return -1
endi
sql drop account abc01234567890123456789
sql show accounts
if $rows != 1 then
return -1
endi
print =============== step4
sql create account abcd01234567890123456789012345689012345 PASS '123' -x step4
return -1
step4:
sql show accounts
if $rows != 1 then
return -1
endi
print =============== step5
sql drop account 123 -x step5
step5:
sql create account 123 pass '123' -x step51
return -1
step51:
sql create account a123 PASS '123'
sql show accounts
if $rows != 2 then
return -1
endi
sql drop account a123
sql show accounts
if $rows != 1 then
return -1
endi
sql show users
if $rows != 3 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,346 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print ============= step1
sql create user read pass 'taosdata'
sql create user write pass 'taosdata'
sql create user manage pass 'taosdata'
sql create user a PASS 'ade' privilege -x step11
return -1
step11:
sql create user a PASS 'ade' privilege a -x step12
return -1
step12:
sql create user a PASS 'ade' privilege read -x step13
return -1
step13:
sql show accounts
if $rows != 1 then
return -1
endi
sql show users
if $rows != 6 then
return -1
endi
sql alter user read privilege read
sql alter user write privilege write
sql_error alter user manage privilege super
print ============= step2
sql close
sql connect write
sleep 2000
sql create database d1
sql create database d2
sql create table d1.t1 (ts timestamp, i int)
sql create table d2.t2 (ts timestamp, i int)
sql insert into d1.t1 values(now, 1)
sql insert into d2.t2 values(now, 1)
sql insert into d2.t2 values(now+1s, 2)
sql show accounts
if $rows != 1 then
return -1
endi
sql show users
if $rows != 6 then
return -1
endi
sql show databases
if $rows != 2 then
return -1
endi
sql select * from d1.t1
if $rows != 1 then
return -1
endi
sql select * from d2.t2
if $rows != 2 then
return -1
endi
sql create account t1 pass 'taosdata' -x step21
return -1
step21:
sql create user t1 pass 'taosdata' -x step22
return -1
step22:
sql alter user read pass 'taosdata' -x step23
return -1
step23:
sql create dnode $hostname2 -x step24
return -1
step24:
sql drop dnode $hostname2 -x step25
return -1
step25:
sql create mnode 192.168.0.2 -x step26
return -1
step26:
sql drop mnode 192.168.0.2 -x step27
return -1
step27:
sql drop account root -x step28
return -1
step28:
sql alter user write pass 'taosdata'
print ============= step3
sql close
sql connect read
sleep 2000
sql create database d3 -x step31
return -1
step31:
sql create table d1.t3 (ts timestamp, i int) -x step32
return -1
step32:
#sql insert into d1.t1 values(now, 2) -x step33
# return -1
#step33:
sql show accounts
if $rows != 1 then
return -1
endi
sql show users
if $rows != 6 then
return -1
endi
sql show databases
if $rows != 2 then
return -1
endi
sql select * from d1.t1
if $rows != 1 then
return -1
endi
sql select * from d2.t2
if $rows != 2 then
return -1
endi
sql sql create account t1 pass 'taosdata' -x step34
return -1
step34:
sql sql create user t1 pass 'taosdata' -x step35
return -1
step35:
print ============= step4
sql close
sql connect manage
sleep 2000
sql create database d3
sql create database d4
sql create table d3.t3 (ts timestamp, i int)
sql create table d4.t4 (ts timestamp, i int)
sql show accounts
if $rows != 1 then
return -1
endi
sql show users
if $rows != 6 then
return -1
endi
sql show databases
if $rows != 4 then
return -1
endi
sql select * from d1.t1
if $rows != 1 then
return -1
endi
sql select * from d2.t2
if $rows != 2 then
return -1
endi
sql create account other pass 'taosdata' -x step41
return -1
step41:
sql close
sql connect
sleep 2000
sql create account other pass 'taosdata'
print ============= step5
sql close
sql connect other
sleep 2000
sql create user read pass 'taosdata' -x step51
return -1
step51:
sql create other write pass 'taosdata' -x step52
return -1
step52:
sql create user oread pass 'taosdata'
sql create user owrite pass 'taosdata'
sql create user omanage pass 'taosdata'
sql show users
print show users $rows
if $rows != 5 then
return -1
endi
sql alter user oread privilege read
sql alter user owrite privilege write
sql alter user oroot privilege super -x step53
return -1
step53:
sql alter user read privilege read -x step54
return -1
step54:
print ============= step6
sql close
sql connect owrite
sleep 2000
sql reset query cache
sleep 1000
sql create database d1
sql create database d3
sql create table d1.t1 (ts timestamp, i int)
sql create table d3.t3 (ts timestamp, i int)
sql insert into d1.t1 values(now, 11)
sql insert into d3.t3 values(now, 11)
sql insert into d3.t3 values(now+1s, 12)
sql show databases
if $rows != 2 then
return -1
endi
sql select * from d1.t1
if $rows != 1 then
return -1
endi
sql select * from d2.t2 -x step6
return -1
step6:
sql select * from d3.t3
if $rows != 2 then
return -1
endi
sql sql create account t1 pass 'taosdata' -x step61
return -1
step61:
sql sql create user t1 pass 'taosdata' -x step62
return -1
step62:
print ============= step7
sql close
sql connect oread
sleep 2000
sql create database d7 -x step71
return -1
step71:
sql show databases
if $rows != 2 then
return -1
endi
sql select * from d1.t1
if $rows != 1 then
return -1
endi
sql select * from d2.t2 -x step72
return -1
step72:
sql select * from d3.t3
if $rows != 2 then
return -1
endi
sql sql create account t1 pass 'taosdata' -x step73
return -1
step73:
sql sql create user t1 pass 'taosdata' -x step74
return -1
step74:
print ============= step8
sql close
sql connect omanage
sleep 2000
sql create account t1 pass 'taosdata' -x step81
return -1
step81:
sql create database d4
sql create table d4.t4 (ts timestamp, i int)
sql show databases
if $rows != 3 then
return -1
endi
sql select * from d1.t1
if $rows != 1 then
return -1
endi
sql select * from d2.t2 -x step82
return -1
step82:
sql select * from d3.t3
if $rows != 2 then
return -1
endi
print ============= step9
sql close
sql connect
sleep 2000
sql show databases
if $rows != 4 then
return -1
endi
sql drop account other
sql drop user read
sql drop user manage
sql drop user write
sql close
sql connect
sleep 2000
sql drop database d1
sql drop database d2
sql drop database d3
sql drop database d4
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,46 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print =============== show accounts
sql show accounts
if $rows != 1 then
return -1
endi
print $data00 $data01 $data02
print =============== create account1
sql create account account1 PASS 'account1'
sql show accounts
if $rows != 2 then
return -1
endi
print $data00 $data01 $data02
print $data10 $data11 $data22
print =============== create account2
sql create account account2 PASS 'account2'
sql show accounts
if $rows != 3 then
return -1
endi
print $data00 $data01 $data02
print $data10 $data11 $data22
print $data20 $data11 $data22
print =============== drop account1
sql drop account account1
sql show accounts
if $rows != 2 then
return -1
endi
print $data00 $data01 $data02
print $data10 $data11 $data22
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,114 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print =============== show accounts
sql show accounts
if $rows != 1 then
return -1
endi
print $data00 $data01 $data02 $data03 $data04 $data05 $data06
if $data00 != root then
return -1
endi
if $data02 != 3/128 then
return -1
endi
if $data03 != 0/128 then
return -1
endi
if $data04 != 0/2147483647 then
return -1
endi
if $data05 != 0/1000 then
return -1
endi
if $data06 != 0.000/unlimited then
return -1
endi
print =============== create account
sql create account hou pass "hou" tseries 80000 storage 10737418240 streams 10 qtime 3600 dbs 3 users 3 conns 10
sql show accounts
if $rows != 2 then
return -1
endi
print $data10 $data11 $data12 $data13 $data14 $data15 $data16
if $data10 != hou then
return -1
endi
if $data12 != 2/3 then
return -1
endi
if $data13 != 0/3 then
return -1
endi
if $data14 != 0/80000 then
return -1
endi
if $data15 != 0/10 then
return -1
endi
if $data16 != 0.000/10.000 then
return -1
endi
print =============== alter account
sql alter account hou pass "hou" tseries 8000 streams 10 dbs 5 users 5
sql show accounts
if $rows != 2 then
return -1
endi
print $data10 $data11 $data12 $data13 $data14 $data15 $data16
if $data10 != hou then
return -1
endi
if $data12 != 2/5 then
return -1
endi
if $data13 != 0/5 then
return -1
endi
if $data14 != 0/8000 then
return -1
endi
if $data15 != 0/10 then
return -1
endi
if $data16 != 0.000/10.000 then
return -1
endi
print =============== alter account
sql create account hou pass "hou" tseries 8000 streams 10 dbs 5 users 6
sql show accounts
if $rows != 2 then
return -1
endi
print $data10 $data11 $data12 $data13 $data14 $data15 $data16
if $data10 != hou then
return -1
endi
if $data12 != 2/6 then
return -1
endi
if $data13 != 0/5 then
return -1
endi
if $data14 != 0/8000 then
return -1
endi
if $data15 != 0/10 then
return -1
endi
if $data16 != 0.000/10.000 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,116 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print ============= step1
sql create user read pass 'taosdata1'
sql create user write pass 'taosdata1'
sql alter user read pass 'taosdata'
sql alter user write pass 'taosdata'
sql show accounts
if $rows != 1 then
return -1
endi
sql show users
if $rows != 5 then
return -1
endi
print ============= step2
sql close
sql connect read
sleep 2000
sql alter user read pass 'taosdata'
sql alter user write pass 'taosdata1' -x step2
return -1
step2:
print ============= step3
sql close
sql connect write
sleep 2000
sql alter user write pass 'taosdata'
sql alter user read pass 'taosdata' -x step3
return -1
step3:
print ============= step4
sql close
sleep 1000
sql connect
sleep 2000
sql create account oroot pass 'taosdata'
sql show accounts
if $rows != 2 then
return -1
endi
sql show users
if $rows != 5 then
return -1
endi
print ============= step5
sql close
sql connect oroot
sleep 2000
sql create user oread pass 'taosdata1'
sql create user owrite pass 'taosdata1'
sql alter user oread pass 'taosdata'
sql alter user owrite pass 'taosdata'
sql create user read pass 'taosdata1' -x step51
return -1
step51:
sql alter user read pass 'taosdata1' -x step52
return -1
step52:
sql show accounts -x step53
return -1
step53:
sql show users
print show users $rows
if $rows != 4 then
return -1
endi
print ============= step6
sql close
sql connect oread
sleep 2000
sql alter user oread pass 'taosdata'
sql alter user owrite pass 'taosdata1' -x step6
return -1
step6:
print ============= step7
sql close
sql connect owrite
sleep 2000
sql alter user owrite pass 'taosdata'
sql alter user oread pass 'taosdata' -x step7
return -1
step7:
print ============= step8
sql close
sql connect
sleep 2000
sql alter user oread pass 'taosdata'
sql alter user owrite pass 'taosdata'
sql alter user oroot pass 'taosdata'
sql drop account oroot
sql drop user read
sql drop user write
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,81 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
$i = 0
$dbPrefix = apdb
$tbPrefix = aptb
$db = $dbPrefix . $i
$tb = $tbPrefix . $i
$userPrefix = apusr
print =============== step1
$i = 0
$user = $userPrefix . $i
sql drop user $user -x step11
return -1
step11:
sql create user $user PASS -x step12
return -1
step12:
sql create user $user PASS 'taosdata'
sql show users
if $rows != 4 then
return -1
endi
print =============== step2
$i = 1
$user = $userPrefix . $i
sql drop user $user -x step2
step2:
sql create user $user PASS '1'
sql show users
if $rows != 5 then
return -1
endi
print =============== step3
$i = 2
$user = $userPrefix . $i
sql drop user $user -x step3
step3:
sql create user $user PASS 'abc0123456789'
sql show users
if $rows != 6 then
return -1
endi
print =============== step4
$i = 3
$user = $userPrefix . $i
sql create user $user PASS 'abcd012345678901234567891234567890' -x step4
return -1
step4:
sql show users
if $rows != 6 then
return -1
endi
$i = 0
while $i < 3
$user = $userPrefix . $i
sql drop user $user
$i = $i + 1
endw
sql show users
if $rows != 3 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,11 +0,0 @@
run unique/account/account_create.sim
run unique/account/account_delete.sim
run unique/account/account_len.sim
run unique/account/authority.sim
run unique/account/basic.sim
run unique/account/paras.sim
run unique/account/pass_alter.sim
run unique/account/pass_len.sim
run unique/account/usage.sim
run unique/account/user_create.sim
run unique/account/user_len.sim

View File

@ -1,154 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
#system sh/exec.sh -n monitor -s 1
system sh/exec.sh -n monitorInterval -s 1
sleep 2000
sql connect
print =============== show accounts
print =============== create account
sql alter account root pass "taosdata" tseries 8000 streams 10 dbs 5 users 5
sql show accounts
print $data00 $data01 $data02 $data03 $data04 $data05 $data06
if $data00 != root then
return -1
endi
if $data02 != 3/5 then
return -1
endi
if $data03 != 0/5 then
return -1
endi
if $data04 != 0/8000 then
return -1
endi
if $data05 != 0/10 then
return -1
endi
if $data06 != 0.000/unlimited then
return -1
endi
print =============== check usage account
sql create database d1 wal 2
sql create database d2 wal 2
sql create database d3 wal 2
sql create database d4 wal 2
sql create database d5 wal 2
sql create table d1.t1 (ts timestamp, i int);
sql create user u1 pass "u1"
sql show accounts
print $data10 $data11 $data12 $data13 $data14 $data15 $data16
if $data00 != root then
return -1
endi
if $data02 != 4/5 then
return -1
endi
if $data03 != 5/5 then
return -1
endi
if $data04 != 1/8000 then
return -1
endi
if $data05 != 0/10 then
return -1
endi
if $data06 != 0.000/unlimited then
return -1
endi
print =============== step2
sql alter account root pass "taosdata" tseries 10 storage 1073741824 streams 10 dbs 5 users 5
sql show accounts
print $data00 $data01 $data02 $data03 $data04 $data05 $data06
if $data00 != root then
return -1
endi
if $data02 != 4/5 then
return -1
endi
if $data03 != 5/5 then
return -1
endi
if $data04 != 1/10 then
return -1
endi
if $data05 != 0/10 then
return -1
endi
if $data06 != 0.000/1.000 then
return -1
endi
print =============== step3
sql alter account root pass "taosdata" tseries 10 storage 16 streams 10 dbs 5 users 5
sql show accounts
print $data00 $data01 $data02 $data03 $data04 $data05 $data06
if $data00 != root then
return -1
endi
if $data02 != 4/5 then
return -1
endi
if $data03 != 5/5 then
return -1
endi
if $data04 != 1/10 then
return -1
endi
if $data05 != 0/10 then
return -1
endi
if $data06 != 0.000/0.000 then
return -1
endi
print =============== step4
sql insert into d1.t1 values(now + 1s, 1)
sql insert into d1.t1 values(now + 2s, 2)
sleep 10000
print no write auth
sql_error insert into d1.t1 values(now + 3s, 2)
sql_error insert into d1.t1 values(now + 4s, 2)
sql alter account root pass "taosdata" tseries 10 storage 36 streams 10 dbs 5 users 5
sleep 10000
print has write auth
sql insert into d1.t1 values(now + 5s, 1)
sql insert into d1.t1 values(now + 6s, 2)
# no write auth
sleep 10000
print no write auth
sql_error insert into d1.t1 values(now + 7s, 2)
sql_error insert into d1.t1 values(now + 8s, 2)
print =============== step5
sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all
sleep 10000
sql insert into d1.t1 values(now + 11s, 1)
sql insert into d1.t1 values(now + 12s, 2)
sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state no
sleep 10000
print no write auth
sql_error insert into d1.t1 values(now + 13s, 2)
sql_error insert into d1.t1 values(now + 14s, 2)
sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all
sleep 10000
print has write auth
sql insert into d1.t1 values(now + 15s, 1)
sql insert into d1.t1 values(now + 16s, 2)
print =============== check grant
sql_error create database d6
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,84 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print =============== step1
sql show users
if $rows != 3 then
return -1
endi
sql create user read PASS 'pass123'
sql create user read PASS 'pass123' -x step1
return -1
step1:
sql show users
if $rows != 4 then
return -1
endi
sql alter user read PASS 'taosdata'
print =============== step2
sql close
sql connect read
sleep 2000
sql alter user read PASS 'taosdata'
print =============== step3
sql drop user read -x step31
return -1
step31:
sql drop user _root -x step32
return -1
step32:
sql drop user monitor -x step33
return -1
step33:
print =============== step4
sql close
sql connect
sleep 2000
sql alter user read privilege read
sql show users
print $data1_read
if $data1_read != readable then
return -1
endi
sql_error alter user read privilege super
sql show users
print $data1_read
if $data1_read != readable then
return -1
endi
sql alter user read privilege write
sql show users
if $data1_read != writable then
return -1
endi
sql alter user read privilege 1 -x step43
return -1
step43:
sql drop user _root -x step41
return -1
step41:
sql drop user monitor -x step42
return -1
step42:
sql drop user read
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,94 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
$i = 0
$dbPrefix = lm_us_db
$tbPrefix = lm_us_tb
$db = $dbPrefix . $i
$tb = $tbPrefix . $i
print =============== step1
sql drop user ac -x step0
return -1
step0:
sql create user PASS '123' -x step1
return -1
step1:
sql show users
if $rows != 3 then
return -1
endi
print =============== step2
sql drop user a -x step2
step2:
sleep 1000
sql create user a PASS '123'
sql show users
if $rows != 4 then
return -1
endi
sql drop user a
sql show users
if $rows != 3 then
return -1
endi
print =============== step3
sql drop user abc01234567890123456789 -x step3
step3:
sql create user abc01234567890123456789 PASS '123'
sql show users
if $rows != 4 then
return -1
endi
sql drop user abc01234567890123456789
sql show users
if $rows != 3 then
return -1
endi
print =============== step4
sql create user abcd0123456789012345678901234567890111 PASS '123' -x step4
return -1
step4:
sql show users
if $rows != 3 then
return -1
endi
print =============== step5
sql drop user 123 -x step5
step5:
sql create user 123 PASS '123' -x step61
return -1
step61:
sql create user a123 PASS '123'
sql show users
if $rows != 4 then
return -1
endi
sql drop user a123
sql show users
if $rows != 3 then
return -1
endi
sql show accounts
if $rows != 1 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,192 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
#system sh/cfg.sh -n dnode1 -c adminRowLimit -v 10
system sh/cfg.sh -n dnode1 -c httpDebugFlag -v 135
system sh/exec.sh -n dnode1 -s start
sql connect
sleep 2000
print ============================ dnode1 start
print =============== step0 - prepare data
sql create database d1
sql use d1
sql create table table_admin (ts timestamp, i int)
sql insert into table_admin values('2017-12-25 21:28:41.022', 1)
sql insert into table_admin values('2017-12-25 21:28:42.022', 2)
sql insert into table_admin values('2017-12-25 21:28:43.022', 3)
sql insert into table_admin values('2017-12-25 21:28:44.022', 4)
sql insert into table_admin values('2017-12-25 21:28:45.022', 5)
sql insert into table_admin values('2017-12-25 21:28:46.022', 6)
sql insert into table_admin values('2017-12-25 21:28:47.022', 7)
sql insert into table_admin values('2017-12-25 21:28:48.022', 8)
sql insert into table_admin values('2017-12-25 21:28:49.022', 9)
sql insert into table_admin values('2017-12-25 21:28:50.022', 10)
print =============== step1 - login
system_content curl 127.0.0.1:7111/admin/
print 1-> $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
print actual: $system_content
return -1
endi
system_content curl 127.0.0.1:7111/admin/xx
print 2-> $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/admin/login
print 3-> $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/admin/login/root
print 4-> $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/admin/login/root/123
print 5-> $system_content
if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/admin/login/root/123/1/1/3
print 6-> $system_content
if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then
return -1
endi
system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.' -d 'show databases' 127.0.0.1:7111/admin/login/root/1
print 7-> $system_content
if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' 127.0.0.1:7111/admin/login/root/1
print 8-> $system_content
if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then
return -1
endi
sleep 2000
system_content curl 127.0.0.1:7111/admin/login/root/taosdata
print 9 -----> $system_content
if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then
return -1
endi
#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/admin/login/root/1
#print 10-> $system_content
#if $system_content != @{"status":"error","code":29,"desc":"failed to connect to server"}@ then
# return -1
#endi
print =============== step2 - logout
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/logout
print 10 -----> $system_content
if $system_content != @{"status":"succ","code":0,"desc":"logout success"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/admin/logout
print 11 -----> $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
print =============== step3 - info
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info
print curl 127.0.0.1:7111/admin/info -----> $system_content
if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then
return -1
endi
print =============== step4 - meta
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show mnodes' 127.0.0.1:7111/admin/meta
print curl 127.0.0.1:7111/admin/meta -----> $system_content
#if $system_content != @{"status":"succ","head":["column type","column name","column bytes"],"data":[["binary","IP",16],["timestamp","created time",8],["binary","status",10],["binary","role",10],["binary","public ip",16]],"rows":5}@ then
# return -1
#endi
print =============== step5 - query data
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql
print curl 127.0.0.1:7111/admin/all -----> $system_content
if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql
print curl 127.0.0.1:7111/admin/sql -----> $system_content
if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then
return -1
endi
print =============== step6 - insert data
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_admin values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/admin/sql
print curl 127.0.0.1:7111/admin/sql -----> $system_content
if $system_content != @{"status":"succ","head":["affect_rows"],"data":[[1]],"rows":1}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/all
print curl 127.0.0.1:7111/admin/all -----> $system_content
if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then
print actual: $system_content
print expect =======> {"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}
return -1
endi
#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql
#print curl 127.0.0.1:7111/admin/sql -----> $system_content
#if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:51.022",11],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:42.022",2]],"rows":10}@ then
# return -1
#endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info
print curl 127.0.0.1:7111/admin/info -----> $system_content
if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then
return -1
endi
print =============== step7 - use dbs
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1;' 127.0.0.1:7111/admin/all
print 23-> $system_content
if $system_content != @{"status":"error","code":4360,"desc":"no need to execute use db cmd"}@ then
return -1
endi
print =============== step8 - monitor dbs
#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes;show mnodes;' 127.0.0.1:7111/admin/sqls
#print 24-> $system_content
#if $system_content != @[{"status":"succ","head":["IP","created time","open vnodes","free vnodes","status","balance state"],"data":[["127.0.0.1","2018-09-04 #11:16:13.985",1,3,"ready","balanced"]],"rows":1},{"status":"succ","head":["IP","created time","status","role"],"data":[["127.0.0.1","2018-09-04 11:16:13.371","serving","master"]],"rows":1}]@ then
# return -1
# endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT
system sh/exec.sh -n dnode8 -s stop -x SIGINT

View File

@ -1,247 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print ============================ dnode1 start
print =============== step1 - parse
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/
print $system_content
if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db123456789012345678901234567890db
print $system_content
if $system_content != @{"status":"error","code":4497,"desc":"database name too long"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/
print $system_content
if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put2
print $system_content
if $system_content != @{"status":"error","code":4354,"desc":"invalid url format"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4498,"desc":"invalid opentsdb json fromat"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{}' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4501,"desc":"metric name not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": 1,"timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4502,"desc":"metric name type should be string"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4503,"desc":"metric name length is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "ab1234567890123456789012345678ab1234567890123456789012345678","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"errors":[{"datapoint":{"metric":"ab1234567890123456789012345678ab1234567890123456789012345678","stable":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb","table":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb_lga_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"status":"error","code":1547,"desc":"Timestamp data out of range"}}],"failed":1,"success":0,"affected_rows":0}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4505,"desc":"timestamp not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": "2","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4506,"desc":"timestamp type should be integer"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": -1,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4507,"desc":"timestamp value smaller than 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4517,"desc":"value not find"}@ then
return -1
endi
#######
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4508,"desc":"tags not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": 0}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":866,"desc":"failed to create table"}}],"failed":1,"success":0,"affected_rows":0}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"": "web01"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4512,"desc":"tag name is null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host01123456789001123456789001123456789001123456789001123456789001123456789": "01"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4513,"desc":"tag name length too long"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web011234567890011234567890011234567890011234567890011234567890011234567890011234567890011234567890"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4516,"desc":"tag value can not more than 64"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": ""}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"status":"error","code":4515,"desc":"tag value is null"}@ then
return -1
endi
sleep 2000
print =============== step2 - insert single data
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then
return -1
endi
system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/
print $system_content
if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000]],"rows":1}@ then
return -1
endi
print =============== step3 - multi-query data
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402000,"value": 18,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846405000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}},{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web02","timestamp":1346846402000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web02"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":2,"affected_rows":2}@ then
return -1
endi
system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/
print $system_content
if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000],["2012-09-05 20:00:05.000",18.000000000]],"rows":2}@ then
return -1
endi
system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/
print $system_content
if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[3]],"rows":1}@ then
return -1
endi
print =============== step4 - summary-put data
system_content curl -u root:taosdata -d '[{"metric": "sys_mem","timestamp": 1346846400000,"value": 8,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_mem","timestamp": 1346846405000,"value": 9,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put?details=false
print $system_content
if $system_content != @{"failed":0,"success":2}@ then
return -1
endi
system_content curl -u root:taosdata -d 'select * from db.sys_mem_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/
print $system_content
if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",8.000000000],["2012-09-05 20:00:05.000",9.000000000]],"rows":2}@ then
return -1
endi
system_content curl -u root:taosdata -d 'select count(*) from db.sys_mem_d_bbb' 127.0.0.1:7111/rest/sql/
print $system_content
if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[2]],"rows":1}@ then
return -1
endi
print =============== step5 - prepare data
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846402000,"value": 19,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402,"value": 19,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846403000,"value": 20,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846403,"value": 20,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846404000,"value": 21,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846404,"value": 21,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 22,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846405,"value": 22,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846406000,"value": 23,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846406,"value": 23,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/
print $system_content
if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[7]],"rows":1}@ then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT
system sh/exec.sh -n dnode8 -s stop -x SIGINT

View File

@ -1,2 +0,0 @@
run unique/http/admin.sim
run general/http/opentsdb.sim

View File

@ -1,88 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode1 -c monitor -v 1
system sh/cfg.sh -n dnode2 -c monitor -v 1
print ============== step1
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sql connect
print ============== step2
sql create dnode $hostname2
$x = 0
show2:
$x = $x + 1
sleep 1000
if $x == 20 then
return -1
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
goto show2
endi
if $data2_2 != slave then
goto show2
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
print ============== step3
system sh/exec.sh -n dnode2 -s start
sleep 10000
system sh/exec.sh -n dnode1 -s start
sql connect
print =============== step4
sql select * from log.dn1
$d1_first = $rows
sql select * from log.dn2
$d2_first = $rows
$x = 0
show4:
$x = $x + 1
sleep 1000
if $x == 20 then
return -1
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
goto show4
endi
if $data2_2 != slave then
goto show4
endi
sleep 2000
sql select * from log.dn1
$d1_second = $rows
sql select * from log.dn2
$d2_second = $rows
print dnode1 $d1_first $d1_second
print dnode2 $d2_first $d2_second
if $d1_first >= $d1_second then
return -1
endi
if $d2_first >= $d2_second then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT

View File

@ -1,44 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
print ============== step1
system sh/exec.sh -n dnode2 -s start
sleep 10000
system sh/exec.sh -n dnode1 -s start
sql connect
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
return -1
endi
print ============== step2
sql create dnode $hostname2
$x = 0
show2:
$x = $x + 1
sleep 2000
if $x == 5 then
return -1
endi
sql show mnodes -x show2
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
goto show2
endi
if $data2_2 != slave then
goto show2
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT

View File

@ -1,114 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
print ============== step1
system sh/exec.sh -n dnode1 -s start
sql connect
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
return -1
endi
print ============== step2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
$x = 0
show2:
$x = $x + 1
sleep 1000
if $x == 20 then
return -1
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
goto show2
endi
if $data2_2 != slave then
goto show2
endi
print ============== step3
sql_error drop dnode $hostname1 -x error1
print should not drop master
print ============== step4
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 3000
sql_error show mnodes
print error of no master
print ============== step5
sql_error drop dnode $hostname1
print error of no master
print ============== step6
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql close
sql connect
$x = 0
show6:
$x = $x + 1
sleep 1000
if $x == 20 then
return -1
endi
sql show mnodes -x show6
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
goto show6
endi
if $data2_2 != slave then
goto show6
endi
print ============== step7
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3
$x = 0
show7:
$x = $x + 1
sleep 1000
if $x == 20 then
return -1
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
if $data2_1 != master then
goto show7
endi
if $data2_2 != slave then
goto show7
endi
if $data3_3 != null then
goto show7
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT
system sh/exec.sh -n dnode8 -s stop -x SIGINT

View File

@ -1,141 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
print ============== step1
system sh/exec.sh -n dnode1 -s start
sql connect
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
return -1
endi
print ============== step2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
$x = 0
show2:
$x = $x + 1
sleep 1000
if $x == 20 then
return -1
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
goto show2
endi
if $data2_2 != slave then
goto show2
endi
print ============== step3
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3
sleep 8000
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
return -1
endi
if $dnode2Role != slave then
return -1
endi
if $dnode3Role != null then
return -1
endi
print ============== step4
sql drop dnode $hostname2
$x = 0
step4:
$x = $x + 1
sleep 1000
if $x == 20 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
goto step4
endi
if $dnode2Role != null then
goto step4
endi
if $dnode3Role != slave then
goto step4
endi
system sh/exec.sh -n dnode2 -s stop
print ============== step5
sleep 2000
sql create dnode $hostname2
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
system sh/exec.sh -n dnode2 -s start
$x = 0
step5:
$x = $x + 1
sleep 1000
if $x == 20 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
goto step5
endi
if $dnode2Role != null then
goto step5
endi
if $dnode3Role != slave then
goto step5
endi
print ============== step6
system sh/exec.sh -n dnode1 -s stop
sql_error show mnodes
print ============== step7
sql_error drop dnode $hostname1
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT

View File

@ -1,84 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
print ============== step1
system sh/exec.sh -n dnode1 -s start
sql connect
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
return -1
endi
print ============== step2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
$x = 0
show2:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
goto show2
endi
if $data2_2 != slave then
goto show2
endi
print ============== step3
system sh/exec.sh -n dnode1 -s stop
sleep 2000
sql_error show mnodes
print ============== step4
sql_error drop dnode $hostname1
print ============== step5
system sh/exec.sh -n dnode1 -s start
sql_error create dnode $hostname1
sql close
sql connect
$x = 0
step5:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show mnodes -x step5
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
goto step5
endi
if $data2_2 != slave then
goto step5
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT
system sh/exec.sh -n dnode8 -s stop -x SIGINT

View File

@ -1,95 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
print ============== step1
system sh/exec.sh -n dnode1 -s start
sql connect
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
return -1
endi
print ============== step2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
$x = 0
show2:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
goto show2
endi
if $data2_2 != slave then
goto show2
endi
print ============== step3
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3
sleep 6000
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
return -1
endi
if $dnode2Role != slave then
return -1
endi
if $dnode3Role != null then
return -1
endi
print ============== step4
sql drop dnode $hostname2
sleep 6000
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
return -1
endi
if $dnode2Role != null then
return -1
endi
if $dnode3Role != slave then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT
system sh/exec.sh -n dnode8 -s stop -x SIGINT

View File

@ -1,123 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
print ============== step1
system sh/exec.sh -n dnode1 -s start
sql connect
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
return -1
endi
print ============== step2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
$x = 0
show2:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
goto show2
endi
if $data2_2 != slave then
goto show2
endi
print ============== step3
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3
sleep 6000
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
return -1
endi
if $dnode2Role != slave then
return -1
endi
if $dnode3Role != null then
return -1
endi
print ============== step4
sql drop dnode $hostname2
sleep 6000
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
return -1
endi
if $dnode2Role != null then
return -1
endi
if $dnode3Role != slave then
return -1
endi
print ============== step5
system sh/exec.sh -n dnode2 -s stop
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
sleep 3000
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
sleep 6000
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
return -1
endi
if $dnode2Role != null then
return -1
endi
if $dnode3Role != slave then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT
system sh/exec.sh -n dnode8 -s stop -x SIGINT

View File

@ -1,68 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode1 -c balanceInterval -v 3000
system sh/cfg.sh -n dnode2 -c balanceInterval -v 3000
system sh/cfg.sh -n dnode3 -c balanceInterval -v 3000
print ============== step1
system sh/exec.sh -n dnode1 -s start
sql connect
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data3_3
if $data2_1 != master then
return -1
endi
if $data3_2 != null then
return -1
endi
if $data3_3 != null then
return -1
endi
print ============== step2
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
sleep 3000
sql create dnode $hostname2
sql create dnode $hostname3
$x = 0
step2:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
goto step2
endi
if $dnode2Role != slave then
goto step2
endi
if $dnode3Role != slave then
goto step2
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT

View File

@ -1,214 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
print ============== step1
system sh/exec.sh -n dnode1 -s start
sql connect
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data3_3
if $data2_1 != master then
return -1
endi
if $data3_2 != null then
return -1
endi
if $data3_3 != null then
return -1
endi
print ============== step2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
$x = 0
step2:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
goto step2
endi
if $dnode2Role != slave then
goto step2
endi
if $dnode3Role != null then
goto step2
endi
print ============== step3
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3
$x = 0
step3:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
goto step3
endi
if $dnode2Role != slave then
goto step3
endi
if $dnode3Role != slave then
goto step3
endi
print ============== step4
sql drop dnode $hostname2
$x = 0
step4:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
goto step4
endi
if $dnode2Role != null then
goto step4
endi
if $dnode3Role != slave then
goto step4
endi
system sh/exec.sh -n dnode2 -s stop
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/exec.sh -n dnode2 -s start
print ============== step5
sql create dnode $hostname2
$x = 0
step5:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_4
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
goto step5
endi
if $dnode2Role != slave then
goto step5
endi
if $dnode3Role != slave then
goto step5
endi
print ============== step6
system sh/exec.sh -n dnode1 -s stop
$x = 0
step6:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes -x step6
$dnode1Role = $data2_1
$dnode2Role = $data2_4
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != offline then
goto step6
endi
#if $dnode2Role != master then
# return -1
#endi
#if $dnode3Role != slave then
# return -1
#endi
print ============== step7
sql drop dnode $hostname1
$x = 0
step7:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes -x step7
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != null then
goto step7
endi
#if $dnode2Role != master then
# return -1
#endi
#if $dnode3Role != slave then
# return -1
#endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT
system sh/exec.sh -n dnode8 -s stop -x SIGINT

View File

@ -1,269 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
print ============== step1
system sh/exec.sh -n dnode1 -s start
sql connect
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data3_3
if $data2_1 != master then
return -1
endi
if $data3_2 != null then
return -1
endi
if $data3_3 != null then
return -1
endi
print ============== step2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
$x = 0
step2:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
if $dnode1Role != master then
goto step2
endi
if $dnode2Role != slave then
goto step2
endi
if $dnode3Role != null then
goto step2
endi
if $dnode4Role != null then
goto step2
endi
print ============== step3
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3
$x = 0
step3:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
if $dnode1Role != master then
goto step3
endi
if $dnode2Role != slave then
goto step3
endi
if $dnode3Role != slave then
goto step3
endi
if $dnode4Role != null then
goto step3
endi
print ============== step4
system sh/exec.sh -n dnode4 -s start
sql create dnode $hostname4
$x = 0
step4:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
if $dnode1Role != master then
goto step4
endi
if $dnode2Role != slave then
goto step4
endi
if $dnode3Role != slave then
goto step4
endi
if $dnode4Role != null then
goto step4
endi
print ============== step5
sql drop dnode $hostname2
$x = 0
step5:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
if $dnode1Role != master then
goto step5
endi
if $dnode2Role != null then
goto step5
endi
if $dnode3Role != slave then
goto step5
endi
if $dnode4Role != slave then
goto step5
endi
system sh/exec.sh -n dnode2 -s stop
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/exec.sh -n dnode2 -s start
print ============== step6
sql create dnode $hostname2
$x = 0
step6:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
if $dnode1Role != master then
goto step6
endi
if $dnode2Role != null then
goto step6
endi
if $dnode3Role != slave then
goto step6
endi
if $dnode4Role != slave then
goto step6
endi
print ============== step7
system sh/exec.sh -n dnode1 -s stop
$x = 0
step7:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes -x step7
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
if $dnode1Role != offline then
goto step7
endi
print ============== step8
sql drop dnode $hostname1
step8:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes -x step8
$dnode1Role = $data2_1
$dnode2Role = $data2_5
$dnode3Role = $data2_3
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
if $dnode1Role != null then
goto step8
endi
if $dnode2Role != slave then
goto step8
endi
#if $dnode3Role != master then
# return -1
#endi
#if $dnode4Role != slave then
# return -1
#endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT
system sh/exec.sh -n dnode8 -s stop -x SIGINT

View File

@ -1,87 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
print ============== step1
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
return -1
endi
if $dnode2Role != null then
return -1
endi
if $dnode3Role != null then
return -1
endi
print ============== step2
sql create dnode $hostname2
sql create dnode $hostname3
print ============== step3
print ========= start dnode2 and dnode3
system sh/exec.sh -n dnode2 -s start
sleep 1000
system sh/exec.sh -n dnode3 -s start
sleep 8000
system sh/exec.sh -n dnode2 -s stop
system sh/exec.sh -n dnode3 -s stop
sleep 4000
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
sleep 4000
system sh/exec.sh -n dnode2 -s stop
system sh/exec.sh -n dnode3 -s stop
sleep 4000
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
print ============== step4
$x = 0
step4:
$x = $x + 1
sleep 1000
if $x == 20 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
goto step4
endi
if $dnode2Role != slave then
goto step4
endi
if $dnode3Role != null then
goto step4
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT

View File

@ -1,9 +0,0 @@
run unique/mnode/mgmt21.sim
run unique/mnode/mgmt22.sim
run unique/mnode/mgmt23.sim
run unique/mnode/mgmt24.sim
run unique/mnode/mgmt25.sim
run unique/mnode/mgmt26.sim
run unique/mnode/mgmt33.sim
run unique/mnode/mgmt34.sim
run unique/mnode/mgmtr2.sim

View File

@ -1,312 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode1 -c statusInterval -v 1
system sh/cfg.sh -n dnode2 -c statusInterval -v 1
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode2 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
$dbPrefix = db
$tbPrefix = tb
$mtPrefix = mt
$stPrefix = st
$tbNum = 3
$rowNum = 200
print ========= start dnode1
system sh/exec.sh -n dnode1 -s start
sql connect
print ============== step1
$db = $dbPrefix
sql create database $db
sql use $db
$i = 0
$st = $stPrefix . $i
$mt = $mtPrefix . $i
$tbNum = 3
sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( $i )
if $i == 0 then
sleep 2000
endi
$x = 0
$y = 0
while $y < $rowNum
$ms = $x . s
sql insert into $tb values (now + $ms , $y , $y )
$x = $x + 1
$y = $y + 1
endw
$i = $i + 1
endw
sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
$st = $stPrefix . $i
$mt = $mtPrefix . $i
$tbNum = 6
sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( $i )
if $i == 0 then
sleep 2000
endi
$x = 0
$y = 0
while $y < $rowNum
$ms = $x . s
sql insert into $tb values (now + $ms , $y , $y )
$x = $x + 1
$y = $y + 1
endw
$i = $i + 1
endw
sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
$st = $stPrefix . $i
$mt = $mtPrefix . $i
$tbNum = 9
sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( $i )
if $i == 0 then
sleep 2000
endi
$x = 0
$y = 0
while $y < $rowNum
$ms = $x . s
sql insert into $tb values (now + $ms , $y , $y )
$x = $x + 1
$y = $y + 1
endw
$i = $i + 1
endw
sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
$st = $stPrefix . $i
$mt = $mtPrefix . $i
$tbNum = 12
sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( $i )
if $i == 0 then
sleep 2000
endi
$x = 0
$y = 0
while $y < $rowNum
$ms = $x . s
sql insert into $tb values (now + $ms , $y , $y )
$x = $x + 1
$y = $y + 1
endw
$i = $i + 1
endw
sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
print =============== step2
sql show tables
if $rows != 16 then
return -1
endi
print =============== step3
print sleep 22 seconds
sleep 22000
$i = 0
$mt = $mtPrefix . $i
$st = $stPrefix . $i
sql select * from $st
$r0 = $rows
print $st ==> $r0 $data00 $data01 $data10 $data11
$i = 3
$mt = $mtPrefix . $i
$st = $stPrefix . $i
sql select * from $st
$r3 = $rows
print $st ==> $r3 $data00 $data01 $data10 $data11
$i = 6
$mt = $mtPrefix . $i
$st = $stPrefix . $i
sql select * from $st
$r6 = $rows
print $st ==> $r6 $data00 $data01 $data10 $data11
$i = 9
$mt = $mtPrefix . $i
$st = $stPrefix . $i
sql select * from $st
$r9 = $rows
print $st ==> $r9 $data00 $data01 $data10 $data11
print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9
$x = 0
show1:
$x = $x + 1
sleep 2000
if $x == 20 then
return -1
endi
sql show dnodes -x show1
$dnode1Vnodes = $data3_192.168.0.1
print dnode1 $dnode1Vnodes
$dnode2Vnodes = $data3_192.168.0.2
print dnode2 $dnode2Vnodes
if $dnode1Vnodes != 0 then
goto show1
endi
if $dnode2Vnodes != NULL then
goto show1
endi
print =============== step4 start dnode2
sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sleep 8000
$x = 0
show2:
$x = $x + 1
sleep 2000
if $x == 20 then
return -1
endi
sql show dnodes -x show2
$dnode1Vnodes = $data3_192.168.0.1
print dnode1 $dnode1Vnodes
$dnode2Vnodes = $data3_192.168.0.2
print dnode2 $dnode2Vnodes
if $dnode1Vnodes != 2 then
goto show2
endi
if $dnode2Vnodes != 2 then
goto show2
endi
print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9
print =============== step5
print sleep 22 seconds
sleep 22000
print =============== step6
$i = 0
$tb = $tbPrefix . $i
$st = $stPrefix . $i
sql select * from $tb
if $rows != $rowNum then
return -1
endi
$i = 3
$tb = $tbPrefix . $i
$st = $stPrefix . $i
sql select * from $tb
if $rows != $rowNum then
return -1
endi
$i = 6
$tb = $tbPrefix . $i
$st = $stPrefix . $i
sql select * from $tb
if $rows != $rowNum then
return -1
endi
$i = 9
$tb = $tbPrefix . $i
$st = $stPrefix . $i
sql select * from $tb
if $rows != $rowNum then
return -1
endi
print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9
print =============== step7
$i = 0
$tb = $tbPrefix . $i
$st = $stPrefix . $i
sql select * from $st
print $st ==> $r0 $rows , $data00 $data01 $data10 $data11
if $rows == 0 then
return -1
endi
if $rows <= $r0 then
return -1
endi
$i = 3
$tb = $tbPrefix . $i
$st = $stPrefix . $i
sql select * from $st
print $st ==> $r3 $rows , $data00 $data01 $data10 $data11
if $rows == 0 then
return -1
endi
if $rows <= $r3 then
return -1
endi
$i = 6
$tb = $tbPrefix . $i
$st = $stPrefix . $i
sql select * from $st
print $st ==> $r6 $rows , $data00 $data01 $data10 $data11
if $rows == 0 then
return -1
endi
if $rows <= $r6 then
return -1
endi
$i = 9
$tb = $tbPrefix . $i
$st = $stPrefix . $i
sql select * from $st
print $st ==> $r0 $rows , $data00 $data01 $data10 $data11
if $rows == 0 then
return -1
endi
if $rows <= $r9 then
return -1
endi
print =============== clear
system sh/exec.sh -n dnode1 -s stop
system sh/exec.sh -n dnode2 -s stop

Some files were not shown because too many files have changed in this diff Show More