Merge branch 'develop' into feature/TD-4666
This commit is contained in:
commit
7f49ece341
|
@ -147,7 +147,11 @@ IF (TD_DARWIN_64)
|
|||
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
|
||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||
MESSAGE(STATUS "darwin64 is defined")
|
||||
IF ("${CPUTYPE}" STREQUAL "apple_m1")
|
||||
SET(COMMON_FLAGS "-Wall -Werror -Wno-missing-braces -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||
ELSE ()
|
||||
SET(COMMON_FLAGS "-Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||
ENDIF ()
|
||||
IF (TD_MEMORY_SANITIZER)
|
||||
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -O0 -g3 -DDEBUG")
|
||||
ELSE ()
|
||||
|
|
|
@ -108,6 +108,10 @@ IF ("${CPUTYPE}" STREQUAL "")
|
|||
SET(TD_LINUX TRUE)
|
||||
SET(TD_LINUX_64 FALSE)
|
||||
SET(TD_MIPS_64 TRUE)
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "arm64")
|
||||
SET(CPUTYPE "apple_m1")
|
||||
MESSAGE(STATUS "Set CPUTYPE to apple silicon m1")
|
||||
SET(TD_ARM_64 TRUE)
|
||||
ENDIF ()
|
||||
|
||||
ELSE ()
|
||||
|
|
|
@ -533,7 +533,7 @@ Query OK, 1 row(s) in set (0.000141s)
|
|||
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
|
||||
| -------------------- | ----------------- | -------- |
|
||||
| 2.0.31 | 2.1.3.0 及以上 | 1.8.x |
|
||||
| 2.0.22 - 20.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
|
||||
| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
|
||||
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
|
||||
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
|
||||
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
|
||||
|
|
|
@ -259,7 +259,7 @@ typedef struct taosField {
|
|||
|
||||
获取最近一次API调用失败的原因,返回值为字符串。
|
||||
|
||||
- `char *taos_errno(TAOS_RES *res)`
|
||||
- `int taos_errno(TAOS_RES *res)`
|
||||
|
||||
获取最近一次API调用失败的原因,返回值为错误代码。
|
||||
|
||||
|
|
|
@ -637,6 +637,19 @@ int32_t bnDropDnode(SDnodeObj *pDnode) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t bnDnodeCanCreateMnode(struct SDnodeObj *pDnode) {
|
||||
if (pDnode == NULL)
|
||||
return 0;
|
||||
|
||||
if (pDnode->isMgmt || pDnode->alternativeRole == TAOS_DN_ALTERNATIVE_ROLE_VNODE
|
||||
|| pDnode->status == TAOS_DN_STATUS_DROPPING
|
||||
|| pDnode->status == TAOS_DN_STATUS_OFFLINE) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
static void bnMonitorDnodeModule() {
|
||||
int32_t numOfMnodes = mnodeGetMnodesNum();
|
||||
if (numOfMnodes >= tsNumOfMnodes) return;
|
||||
|
@ -645,13 +658,7 @@ static void bnMonitorDnodeModule() {
|
|||
SDnodeObj *pDnode = tsBnDnodes.list[i];
|
||||
if (pDnode == NULL) break;
|
||||
|
||||
if (pDnode->isMgmt || pDnode->status == TAOS_DN_STATUS_DROPPING || pDnode->status == TAOS_DN_STATUS_OFFLINE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pDnode->alternativeRole == TAOS_DN_ALTERNATIVE_ROLE_VNODE) {
|
||||
continue;
|
||||
}
|
||||
if (!bnDnodeCanCreateMnode(pDnode)) continue;
|
||||
|
||||
mLInfo("dnode:%d, numOfMnodes:%d expect:%d, create mnode in this dnode", pDnode->dnodeId, numOfMnodes, tsNumOfMnodes);
|
||||
mnodeCreateMnode(pDnode->dnodeId, pDnode->dnodeEp, true);
|
||||
|
|
|
@ -217,7 +217,7 @@ void tscColumnListDestroy(SArray* pColList);
|
|||
void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid);
|
||||
void tscColumnListCopyAll(SArray* dst, const SArray* src);
|
||||
|
||||
void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId);
|
||||
void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId, bool convertNchar);
|
||||
|
||||
void tscDequoteAndTrimToken(SStrToken* pToken);
|
||||
int32_t tscValidateName(SStrToken* pToken);
|
||||
|
|
|
@ -320,7 +320,7 @@ void tscRestoreFuncForSTableQuery(SQueryInfo *pQueryInfo);
|
|||
|
||||
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
|
||||
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
|
||||
void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock);
|
||||
void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock, bool convertNchar);
|
||||
|
||||
void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pParent);
|
||||
void destroyTableNameList(SInsertStatementParam* pInsertParam);
|
||||
|
|
|
@ -745,19 +745,23 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param,
|
|||
switch(param->type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
size = 1;
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
size = 2;
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
size = 4;
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
size = 8;
|
||||
|
|
|
@ -1311,15 +1311,8 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
|
|||
return false;
|
||||
}
|
||||
|
||||
/* timestamp in tag is not allowed */
|
||||
for (int32_t i = 0; i < numOfTags; ++i) {
|
||||
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
|
||||
|
||||
//if (p->type == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
// invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
// return false;
|
||||
//}
|
||||
|
||||
if (!isValidDataType(p->type)) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||
return false;
|
||||
|
@ -2176,7 +2169,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
const char* msg3 = "illegal column name";
|
||||
const char* msg4 = "invalid table name";
|
||||
const char* msg5 = "parameter is out of range [0, 100]";
|
||||
const char* msg6 = "function applied to tags not allowed";
|
||||
const char* msg6 = "functions applied to tags are not allowed";
|
||||
const char* msg7 = "normal table can not apply this function";
|
||||
const char* msg8 = "multi-columns selection does not support alias column name";
|
||||
const char* msg9 = "diff/derivative can no be applied to unsigned numeric type";
|
||||
|
@ -3089,9 +3082,9 @@ void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) {
|
|||
}
|
||||
|
||||
bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||
const char* msg1 = "TWA/Diff/Derivative/Irate not allowed to apply to super table directly";
|
||||
const char* msg1 = "TWA/Diff/Derivative/Irate are not allowed to apply to super table directly";
|
||||
const char* msg2 = "TWA/Diff/Derivative/Irate only support group by tbname for super table query";
|
||||
const char* msg3 = "function not support for super table query";
|
||||
const char* msg3 = "functions not support for super table query";
|
||||
|
||||
// filter sql function not supported by metric query yet.
|
||||
size_t size = tscNumOfExprs(pQueryInfo);
|
||||
|
@ -3196,34 +3189,42 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd
|
|||
const char* msg2 = "invalid column name in group by clause";
|
||||
const char* msg3 = "columns from one table allowed as group by columns";
|
||||
const char* msg4 = "join query does not support group by";
|
||||
const char* msg5 = "not allowed column type for group by";
|
||||
const char* msg6 = "tags not allowed for table query";
|
||||
const char* msg7 = "not support group by expression";
|
||||
const char* msg8 = "not allowed column type for group by";
|
||||
const char* msg9 = "tags not allowed for table query";
|
||||
const char* msg8 = "normal column can only locate at the end of group by clause";
|
||||
|
||||
// todo : handle two tables situation
|
||||
STableMetaInfo* pTableMetaInfo = NULL;
|
||||
|
||||
if (pList == NULL) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pQueryInfo->colList == NULL) {
|
||||
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
|
||||
}
|
||||
|
||||
pQueryInfo->groupbyExpr.numOfGroupCols = (int16_t)taosArrayGetSize(pList);
|
||||
if (pQueryInfo->groupbyExpr.numOfGroupCols > TSDB_MAX_TAGS) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
|
||||
if (pQueryInfo->numOfTables > 1) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
}
|
||||
|
||||
STableMeta* pTableMeta = NULL;
|
||||
SSchema* pSchema = NULL;
|
||||
SGroupbyExpr* pGroupExpr = &pQueryInfo->groupbyExpr;
|
||||
if (pGroupExpr->columnInfo == NULL) {
|
||||
pGroupExpr->columnInfo = taosArrayInit(4, sizeof(SColIndex));
|
||||
}
|
||||
|
||||
if (pQueryInfo->colList == NULL) {
|
||||
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
|
||||
}
|
||||
|
||||
if (pGroupExpr->columnInfo == NULL || pQueryInfo->colList == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pGroupExpr->numOfGroupCols = (int16_t)taosArrayGetSize(pList);
|
||||
if (pGroupExpr->numOfGroupCols > TSDB_MAX_TAGS) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
|
||||
SSchema *pSchema = NULL;
|
||||
int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL;
|
||||
int32_t numOfGroupCols = 0;
|
||||
|
||||
size_t num = taosArrayGetSize(pList);
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
|
@ -3244,28 +3245,20 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd
|
|||
}
|
||||
|
||||
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
|
||||
pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
|
||||
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
|
||||
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
pSchema = tGetTbnameColumnSchema();
|
||||
} else {
|
||||
pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
|
||||
}
|
||||
|
||||
bool groupTag = false;
|
||||
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || index.columnIndex >= numOfCols) {
|
||||
groupTag = true;
|
||||
}
|
||||
|
||||
SGroupbyExpr* pGroupExpr = &pQueryInfo->groupbyExpr;
|
||||
if (pGroupExpr->columnInfo == NULL) {
|
||||
pGroupExpr->columnInfo = taosArrayInit(4, sizeof(SColIndex));
|
||||
}
|
||||
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
|
||||
bool groupTag = (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || index.columnIndex >= numOfCols);
|
||||
|
||||
if (groupTag) {
|
||||
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||
}
|
||||
|
||||
int32_t relIndex = index.columnIndex;
|
||||
|
@ -3282,7 +3275,7 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd
|
|||
} else {
|
||||
// check if the column type is valid, here only support the bool/tinyint/smallint/bigint group by
|
||||
if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP || pSchema->type == TSDB_DATA_TYPE_FLOAT || pSchema->type == TSDB_DATA_TYPE_DOUBLE) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||
}
|
||||
|
||||
tscColumnListInsert(pQueryInfo->colList, index.columnIndex, pTableMeta->id.uid, pSchema);
|
||||
|
@ -3292,10 +3285,20 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd
|
|||
|
||||
taosArrayPush(pGroupExpr->columnInfo, &colIndex);
|
||||
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
|
||||
numOfGroupCols++;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == 0 && num > 1) {
|
||||
// 1. only one normal column allowed in the group by clause
|
||||
// 2. the normal column in the group by clause can only located in the end position
|
||||
if (numOfGroupCols > 1) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
|
||||
}
|
||||
|
||||
for(int32_t i = 0; i < num; ++i) {
|
||||
SColIndex* pIndex = taosArrayGet(pGroupExpr->columnInfo, i);
|
||||
if (TSDB_COL_IS_NORMAL_COL(pIndex->flag) && i != num - 1) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3341,6 +3344,7 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
|
|||
if (pRight->flags & (1 << EXPR_FLAG_NS_TIMESTAMP)) {
|
||||
pRight->value.i64 =
|
||||
convertTimePrecision(pRight->value.i64, TSDB_TIME_PRECISION_NANO, timePrecision);
|
||||
pRight->flags &= ~(1 << EXPR_FLAG_NS_TIMESTAMP);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4905,6 +4909,7 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
|
|||
*/
|
||||
if (pRight->flags & (1 << EXPR_FLAG_NS_TIMESTAMP)) {
|
||||
pRight->value.i64 = convertTimePrecision(pRight->value.i64, TSDB_TIME_PRECISION_NANO, timePrecision);
|
||||
pRight->flags &= ~(1 << EXPR_FLAG_NS_TIMESTAMP);
|
||||
}
|
||||
|
||||
tVariantDump(&pRight->value, (char*)&val, TSDB_DATA_TYPE_BIGINT, true);
|
||||
|
@ -5980,8 +5985,8 @@ int32_t validateLimitNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlN
|
|||
|
||||
if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
|
||||
/*
|
||||
* the offset value should be removed during retrieve data from virtual node, since the
|
||||
* global order are done in client side, so the offset is applied at the client side
|
||||
* The offset value should be removed during retrieve data from virtual node, since the
|
||||
* global order are done at the client side, so the offset is applied at the client side.
|
||||
* However, note that the maximum allowed number of result for each table should be less
|
||||
* than or equal to the value of limit.
|
||||
*/
|
||||
|
@ -6310,7 +6315,7 @@ static void updateTagPrjFunction(SQueryInfo* pQueryInfo) {
|
|||
*/
|
||||
static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
|
||||
const char* msg1 = "only one selectivity function allowed in presence of tags function";
|
||||
const char* msg3 = "aggregation function should not be mixed up with projection";
|
||||
const char* msg2 = "aggregation function should not be mixed up with projection";
|
||||
|
||||
bool tagTsColExists = false;
|
||||
int16_t numOfSelectivity = 0;
|
||||
|
@ -6389,7 +6394,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
|
|||
} else {
|
||||
if ((pQueryInfo->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0) {
|
||||
if (numOfAggregation > 0 && pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
|
||||
return invalidOperationMsg(msg, msg3);
|
||||
return invalidOperationMsg(msg, msg2);
|
||||
}
|
||||
|
||||
if (numOfAggregation > 0 || numOfSelectivity > 0) {
|
||||
|
|
|
@ -484,8 +484,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
|||
}
|
||||
|
||||
if (shouldFree) { // in case of table-meta/vgrouplist query, automatically free it
|
||||
taosRemoveRef(tscObjRef, handle);
|
||||
tscDebug("0x%"PRIx64" sqlObj is automatically freed", pSql->self);
|
||||
taosRemoveRef(tscObjRef, handle);
|
||||
}
|
||||
|
||||
taosReleaseRef(tscObjRef, handle);
|
||||
|
@ -1703,7 +1703,7 @@ int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) {
|
|||
|
||||
uint64_t localQueryId = pSql->self;
|
||||
qTableQuery(pQueryInfo->pQInfo, &localQueryId);
|
||||
convertQueryResult(pRes, pQueryInfo, pSql->self);
|
||||
convertQueryResult(pRes, pQueryInfo, pSql->self, true);
|
||||
|
||||
code = pRes->code;
|
||||
if (pRes->code == TSDB_CODE_SUCCESS) {
|
||||
|
|
|
@ -645,7 +645,7 @@ void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
|
|||
}
|
||||
}
|
||||
|
||||
void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock) {
|
||||
void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock, bool convertNchar) {
|
||||
assert(pRes->numOfCols > 0);
|
||||
|
||||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
|
@ -678,7 +678,7 @@ void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBloc
|
|||
}
|
||||
}
|
||||
|
||||
} else if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
|
||||
} else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
|
||||
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
|
||||
pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
|
||||
|
||||
|
@ -1075,14 +1075,14 @@ SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUp
|
|||
return pOperator;
|
||||
}
|
||||
|
||||
void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId) {
|
||||
void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId, bool convertNchar) {
|
||||
// set the correct result
|
||||
SSDataBlock* p = pQueryInfo->pQInfo->runtimeEnv.outputBuf;
|
||||
pRes->numOfRows = (p != NULL)? p->info.rows: 0;
|
||||
|
||||
if (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows > 0) {
|
||||
tscCreateResPointerInfo(pRes, pQueryInfo);
|
||||
tscSetResRawPtrRv(pRes, pQueryInfo, p);
|
||||
tscSetResRawPtrRv(pRes, pQueryInfo, p, convertNchar);
|
||||
}
|
||||
|
||||
tscDebug("0x%"PRIx64" retrieve result in pRes, numOfRows:%d", objId, pRes->numOfRows);
|
||||
|
@ -1202,7 +1202,7 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
|
|||
|
||||
uint64_t qId = pSql->self;
|
||||
qTableQuery(px->pQInfo, &qId);
|
||||
convertQueryResult(pOutput, px, pSql->self);
|
||||
convertQueryResult(pOutput, px, pSql->self, false);
|
||||
}
|
||||
|
||||
static void tscDestroyResPointerInfo(SSqlRes* pRes) {
|
||||
|
@ -3069,6 +3069,7 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) {
|
|||
pQueryInfo->pTableMetaInfo = NULL;
|
||||
|
||||
pQueryInfo->bufLen = pSrc->bufLen;
|
||||
pQueryInfo->orderProjectQuery = pSrc->orderProjectQuery;
|
||||
pQueryInfo->buf = malloc(pSrc->bufLen);
|
||||
if (pQueryInfo->buf == NULL) {
|
||||
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
|
@ -3654,6 +3655,7 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
|
|||
|
||||
pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES);
|
||||
pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t));
|
||||
pthread_mutex_init(&pSql->subState.mutex, NULL);
|
||||
|
||||
for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
|
||||
SQueryInfo* pSub = taosArrayGetP(pQueryInfo->pUpstream, i);
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include <gtest/gtest.h>
|
||||
#include <iostream>
|
||||
#include <inttypes.h>
|
||||
|
||||
#include "taos.h"
|
||||
#include "tglobal.h"
|
||||
|
@ -132,7 +133,7 @@ void validateResultFields() {
|
|||
taos_free_result(res);
|
||||
|
||||
char sql[512] = {0};
|
||||
sprintf(sql, "insert into t1 values(%ld, 99, 'abc', 'test')", start_ts);
|
||||
sprintf(sql, "insert into t1 values(%" PRId64 ", 99, 'abc', 'test')", start_ts);
|
||||
|
||||
res = taos_query(conn, sql);
|
||||
ASSERT_EQ(taos_errno(res), 0);
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
#include "os.h"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
#include <inttypes.h>
|
||||
|
||||
#include "os.h"
|
||||
#include "taos.h"
|
||||
#include "ttoken.h"
|
||||
#include "tutil.h"
|
||||
|
@ -165,7 +167,7 @@ TEST(testCase, parse_time) {
|
|||
|
||||
char t[] = "2021-01-08T02:11:40.000+00:00";
|
||||
taosParseTime(t, &time, strlen(t), TSDB_TIME_PRECISION_MILLI, 0);
|
||||
printf("%ld\n", time);
|
||||
printf("%" PRId64 "\n", time);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit dbc5f04ebd29522d2acd0636f6fc350060d15a6b
|
||||
Subproject commit 3530c6df097134a410bacec6b3cd013ef38a61aa
|
|
@ -126,28 +126,18 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro = false)
|
|||
}
|
||||
return res;
|
||||
}
|
||||
function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
|
||||
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
|
||||
let res = [];
|
||||
let currOffset = 0;
|
||||
while (currOffset < data.length) {
|
||||
let dataEntry = data.slice(currOffset, currOffset + nbytes);
|
||||
if (dataEntry[0] == FieldTypes.C_BINARY_NULL) {
|
||||
res.push(null);
|
||||
}
|
||||
else {
|
||||
res.push(ref.readCString(dataEntry));
|
||||
}
|
||||
currOffset += nbytes;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
|
||||
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
|
||||
let res = [];
|
||||
let dataEntry = data.slice(0, nbytes); //one entry in a row under a column;
|
||||
//TODO: should use the correct character encoding
|
||||
|
||||
let currOffset = 0;
|
||||
while (currOffset < data.length) {
|
||||
let len = data.readIntLE(currOffset, 2);
|
||||
let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column;
|
||||
res.push(dataEntry.toString("utf-8"));
|
||||
currOffset += nbytes;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -160,7 +150,7 @@ let convertFunctions = {
|
|||
[FieldTypes.C_BIGINT]: convertBigint,
|
||||
[FieldTypes.C_FLOAT]: convertFloat,
|
||||
[FieldTypes.C_DOUBLE]: convertDouble,
|
||||
[FieldTypes.C_BINARY]: convertBinary,
|
||||
[FieldTypes.C_BINARY]: convertNchar,
|
||||
[FieldTypes.C_TIMESTAMP]: convertTimestamp,
|
||||
[FieldTypes.C_NCHAR]: convertNchar
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "td2.0-connector",
|
||||
"version": "2.0.7",
|
||||
"version": "2.0.8",
|
||||
"description": "A Node.js connector for TDengine.",
|
||||
"main": "tdengine.js",
|
||||
"directories": {
|
||||
|
|
|
@ -31,6 +31,7 @@ void bnReset();
|
|||
int32_t bnAllocVnodes(struct SVgObj *pVgroup);
|
||||
int32_t bnAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId);
|
||||
int32_t bnDropDnode(struct SDnodeObj *pDnode);
|
||||
int32_t bnDnodeCanCreateMnode(struct SDnodeObj *pDnode);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -636,6 +636,9 @@ static FILE * g_fpOfInsertResult = NULL;
|
|||
#define errorPrint(fmt, ...) \
|
||||
do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0)
|
||||
|
||||
// for strncpy buffer overflow
|
||||
#define min(a, b) (((a) < (b)) ? (a) : (b))
|
||||
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
|
||||
|
@ -2574,7 +2577,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
|
|||
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
|
||||
tstrncpy(superTbls->tags[tagIndex].dataType,
|
||||
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
|
||||
fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
|
||||
min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
|
||||
superTbls->tags[tagIndex].dataLen =
|
||||
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
|
||||
tstrncpy(superTbls->tags[tagIndex].note,
|
||||
|
@ -2587,7 +2590,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
|
|||
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
|
||||
tstrncpy(superTbls->columns[columnIndex].dataType,
|
||||
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
|
||||
fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
|
||||
min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
|
||||
superTbls->columns[columnIndex].dataLen =
|
||||
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
|
||||
tstrncpy(superTbls->columns[columnIndex].note,
|
||||
|
@ -7472,7 +7475,6 @@ static void *specifiedSubscribe(void *sarg) {
|
|||
}
|
||||
}
|
||||
taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]);
|
||||
taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->querySeq], 0);
|
||||
taos_close(pThreadInfo->taos);
|
||||
|
||||
return NULL;
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "os.h"
|
||||
#include "tgrant.h"
|
||||
#include "tbn.h"
|
||||
#include "tglobal.h"
|
||||
#include "tconfig.h"
|
||||
#include "tutil.h"
|
||||
|
@ -632,7 +631,8 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t numOfMnodes = mnodeGetMnodesNum();
|
||||
if (numOfMnodes < tsNumOfMnodes && numOfMnodes < mnodeGetOnlineDnodesNum() && !pDnode->isMgmt) {
|
||||
if (numOfMnodes < tsNumOfMnodes && numOfMnodes < mnodeGetOnlineDnodesNum()
|
||||
&& bnDnodeCanCreateMnode(pDnode)) {
|
||||
bnNotify();
|
||||
}
|
||||
|
||||
|
|
|
@ -2075,7 +2075,9 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) {
|
|||
} else {
|
||||
if (pTable->info.type == TSDB_SUPER_TABLE) {
|
||||
int64_t us = taosGetTimestampUs();
|
||||
pTable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
|
||||
uint64_t x = (us&0x000000FFFFFFFFFF);
|
||||
x = x<<24;
|
||||
pTable->uid = x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
|
||||
} else {
|
||||
pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->tid) & ((1ul << 24) - 1ul)) << 24) +
|
||||
((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
|
||||
|
|
|
@ -118,7 +118,6 @@ typedef struct SQueryInfo {
|
|||
int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit
|
||||
|
||||
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
|
||||
int16_t resColumnId; // result column id
|
||||
bool distinctTag; // distinct tag or not
|
||||
int32_t round; // 0/1/....
|
||||
int32_t bufLen;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "tglobal.h"
|
||||
#include "tlog.h"
|
||||
#include "twal.h"
|
||||
#include "tfile.h"
|
||||
|
||||
int64_t ver = 0;
|
||||
void *pWal = NULL;
|
||||
|
@ -36,7 +37,7 @@ int writeToQueue(void *pVnode, void *data, int type, void *pMsg) {
|
|||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
char path[128] = "/home/jhtao/test/wal";
|
||||
char path[128] = "/tmp/wal";
|
||||
int level = 2;
|
||||
int total = 5;
|
||||
int rows = 10000;
|
||||
|
@ -75,6 +76,8 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
|
||||
taosInitLog("wal.log", 100000, 10);
|
||||
tfInit();
|
||||
walInit();
|
||||
|
||||
SWalCfg walCfg = {0};
|
||||
walCfg.walLevel = level;
|
||||
|
@ -122,13 +125,13 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
printf("index:%" PRId64 " wal:%s\n", index, name);
|
||||
if (code == 0) break;
|
||||
|
||||
index++;
|
||||
}
|
||||
|
||||
getchar();
|
||||
|
||||
walClose(pWal);
|
||||
walCleanUp();
|
||||
tfCleanup();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -110,8 +110,8 @@ pipeline {
|
|||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
|
||||
mvn clean package assembly:single -DskipTests >/dev/null
|
||||
java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
|
||||
mvn clean package >/dev/null
|
||||
java -jar target/JdbcRestfulDemo-jar-with-dependencies.jar
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
|
|
|
@ -17723,3 +17723,23 @@
|
|||
fun:_PyFunction_Vectorcall
|
||||
fun:_PyEval_EvalFrameDefault
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
match-leak-kinds: definite
|
||||
fun:malloc
|
||||
fun:__libc_alloc_buffer_allocate
|
||||
fun:alloc_buffer_allocate
|
||||
fun:__resolv_conf_allocate
|
||||
fun:__resolv_conf_load
|
||||
fun:__resolv_conf_get_current
|
||||
fun:__res_vinit
|
||||
fun:maybe_init
|
||||
fun:context_get
|
||||
fun:__resolv_context_get
|
||||
fun:gaih_inet.constprop.7
|
||||
fun:getaddrinfo
|
||||
fun:taosGetFqdn
|
||||
fun:taosCheckGlobalCfg
|
||||
fun:taos_init_imp
|
||||
}
|
|
@ -12,9 +12,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
from basic import *
|
||||
from util.sql import tdSql
|
||||
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
|
@ -36,4 +33,6 @@ td = TDTestCase()
|
|||
td.init()
|
||||
|
||||
|
||||
## usage: python3 OneMnodeMultipleVnodesTest.py
|
||||
|
||||
|
||||
|
|
|
@ -45,6 +45,15 @@ class BuildDockerCluser:
|
|||
"qdebugFlag":"135",
|
||||
"maxSQLLength":"1048576"
|
||||
}
|
||||
cmd = "mkdir -p %s" % self.dockerDir
|
||||
self.execCmd(cmd)
|
||||
|
||||
cmd = "cp *.yml %s" % self.dockerDir
|
||||
self.execCmd(cmd)
|
||||
|
||||
cmd = "cp Dockerfile %s" % self.dockerDir
|
||||
self.execCmd(cmd)
|
||||
|
||||
|
||||
# execute command, and return the output
|
||||
# ref: https://blog.csdn.net/wowocpp/article/details/80775650
|
||||
|
@ -108,10 +117,14 @@ class BuildDockerCluser:
|
|||
self.execCmd(cmd)
|
||||
|
||||
def updateLocalhosts(self):
|
||||
cmd = "grep '172.27.0.7 *tdnode1' /etc/hosts"
|
||||
cmd = "grep '172.27.0.7 *tdnode1' /etc/hosts | sed 's: ::g'"
|
||||
result = self.execCmdAndGetOutput(cmd)
|
||||
if result and not result.isspace():
|
||||
print(result)
|
||||
if result is None or result.isspace():
|
||||
print("==========")
|
||||
cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts"
|
||||
display = "echo %s" % cmd
|
||||
self.execCmd(display)
|
||||
self.execCmd(cmd)
|
||||
|
||||
def deploy(self):
|
||||
|
@ -138,13 +151,13 @@ class BuildDockerCluser:
|
|||
if self.numOfNodes < 2 or self.numOfNodes > 10:
|
||||
print("the number of nodes must be between 2 and 10")
|
||||
exit(0)
|
||||
self.clearEnv()
|
||||
self.createDirs()
|
||||
self.updateLocalhosts()
|
||||
self.deploy()
|
||||
|
||||
def run(self):
|
||||
cmd = "./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.getTaosdVersion(), self.dockerDir)
|
||||
display = "echo %s" % cmd
|
||||
self.execCmd(display)
|
||||
self.execCmd(cmd)
|
||||
self.getConnection()
|
||||
self.createDondes()
|
||||
|
|
|
@ -334,6 +334,7 @@ python3 ./test.py -f insert/unsignedInt.py
|
|||
python3 ./test.py -f insert/unsignedBigint.py
|
||||
python3 ./test.py -f insert/unsignedSmallint.py
|
||||
python3 ./test.py -f insert/unsignedTinyint.py
|
||||
python3 ./test.py -f insert/insertFromCSV.py
|
||||
python3 ./test.py -f query/filterAllUnsignedIntTypes.py
|
||||
|
||||
python3 ./test.py -f tag_lite/unsignedInt.py
|
||||
|
|
|
@ -29,15 +29,14 @@ class TDTestCase:
|
|||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
self.ts = 1500074556514
|
||||
self.csvfile = "/tmp/csvfile.csv"
|
||||
self.rows = 100000
|
||||
|
||||
def writeCSV(self):
|
||||
with open('test3.csv','w', encoding='utf-8', newline='') as csvFile:
|
||||
with open(self.csvfile, 'w', encoding='utf-8', newline='') as csvFile:
|
||||
writer = csv.writer(csvFile, dialect='excel')
|
||||
for i in range(1000000):
|
||||
newTimestamp = self.ts + random.randint(10000000, 10000000000) + random.randint(1000, 10000000) + random.randint(1, 1000)
|
||||
d = datetime.datetime.fromtimestamp(newTimestamp / 1000)
|
||||
dt = str(d.strftime("%Y-%m-%d %H:%M:%S.%f"))
|
||||
writer.writerow(["'%s'" % dt, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)])
|
||||
for i in range(self.rows):
|
||||
writer.writerow([self.ts + i, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)])
|
||||
|
||||
def removCSVHeader(self):
|
||||
data = pd.read_csv("ordered.csv")
|
||||
|
@ -45,23 +44,25 @@ class TDTestCase:
|
|||
data.to_csv("ordered.csv", header = False, index = False)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
self.writeCSV()
|
||||
|
||||
tdSql.prepare()
|
||||
tdSql.execute("create table t1(ts timestamp, c1 int, c2 float, c3 int, c4 int)")
|
||||
startTime = time.time()
|
||||
tdSql.execute("insert into t1 file 'outoforder.csv'")
|
||||
tdSql.execute("insert into t1 file '%s'" % self.csvfile)
|
||||
duration = time.time() - startTime
|
||||
print("Out of Order - Insert time: %d" % duration)
|
||||
tdSql.query("select count(*) from t1")
|
||||
rows = tdSql.getData(0, 0)
|
||||
print("Insert time: %d" % duration)
|
||||
tdSql.query("select * from t1")
|
||||
tdSql.checkRows(self.rows)
|
||||
|
||||
tdSql.execute("create table t2(ts timestamp, c1 int, c2 float, c3 int, c4 int)")
|
||||
startTime = time.time()
|
||||
tdSql.execute("insert into t2 file 'ordered.csv'")
|
||||
duration = time.time() - startTime
|
||||
print("Ordered - Insert time: %d" % duration)
|
||||
tdSql.query("select count(*) from t2")
|
||||
tdSql.checkData(0,0, rows)
|
||||
tdSql.execute("create table stb(ts timestamp, c1 int, c2 float, c3 int, c4 int) tags(t1 int, t2 binary(20))")
|
||||
tdSql.execute("insert into t2 using stb(t1) tags(1) file '%s'" % self.csvfile)
|
||||
tdSql.query("select * from stb")
|
||||
tdSql.checkRows(self.rows)
|
||||
|
||||
tdSql.execute("insert into t3 using stb tags(1, 'test') file '%s'" % self.csvfile)
|
||||
tdSql.query("select * from stb")
|
||||
tdSql.checkRows(self.rows * 2)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
|
@ -115,10 +115,11 @@ class TDTestRetetion:
|
|||
cmd = 'select * from test'
|
||||
self.queryRows=tdSql.query(cmd)
|
||||
self.checkRows(4,cmd)
|
||||
while datetime.datetime.now() < (ttime + datetime.timedelta(hours=72)):
|
||||
while datetime.datetime.now() <= (ttime + datetime.timedelta(hours=72)):
|
||||
time.sleep(0.001)
|
||||
cmd = 'select * from test'
|
||||
self.queryRows=tdSql.query(cmd)
|
||||
print(tdSql.queryResult)
|
||||
self.checkRows(3,cmd)
|
||||
|
||||
def stop(self):
|
||||
|
|
|
@ -127,6 +127,7 @@ class TDDnode:
|
|||
"anyIp":"0",
|
||||
"tsEnableTelemetryReporting":"0",
|
||||
"dDebugFlag":"135",
|
||||
"tsdbDebugFlag":"135",
|
||||
"mDebugFlag":"135",
|
||||
"sdbDebugFlag":"135",
|
||||
"rpcDebugFlag":"135",
|
||||
|
|
|
@ -781,4 +781,14 @@ if $data11 != 2 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql_error select count(*) from m1 group by tbname,k,f1;
|
||||
sql_error select count(*) from m1 group by tbname,k,a;
|
||||
sql_error select count(*) from m1 group by k, tbname;
|
||||
sql_error select count(*) from m1 group by k,f1;
|
||||
sql_error select count(*) from tm0 group by tbname;
|
||||
sql_error select count(*) from tm0 group by a;
|
||||
sql_error select count(*) from tm0 group by k,f1;
|
||||
|
||||
sql_error select count(*),f1 from m1 group by tbname,k;
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
|
|
@ -352,18 +352,24 @@ if $rows != 0 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
print ==========================> td-4783
|
||||
print ==========================> td-4783,td-4792
|
||||
sql create table where_ts(ts timestamp, f int)
|
||||
sql insert into where_ts values('2021-06-19 16:22:00', 1);
|
||||
sql insert into where_ts values('2021-06-19 16:23:00', 2);
|
||||
sql insert into where_ts values('2021-06-19 16:24:00', 3);
|
||||
sql insert into where_ts values('2021-06-19 16:25:00', 1);
|
||||
sql select * from (select * from where_ts) where ts<'2021-06-19 16:25:00' and ts>'2021-06-19 16:22:00'
|
||||
if $row != 2 then
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
print $data00, $data01
|
||||
if $data01 != 2 then
|
||||
return -1
|
||||
endi
|
||||
sql insert into where_ts values(now, 5);
|
||||
sleep 10
|
||||
sql select * from (select * from where_ts) where ts<now;
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
|
Loading…
Reference in New Issue