Merge remote-tracking branch 'origin/master' into feature/m1

This commit is contained in:
Shengliang Guan 2021-08-18 18:42:30 +08:00
commit 40e640ae3f
62 changed files with 3090 additions and 887 deletions

View File

@ -15,7 +15,7 @@ steps:
- mkdir debug
- cd debug
- cmake ..
- make
- make -j4
trigger:
event:
- pull_request
@ -23,6 +23,7 @@ steps:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: test_arm64_bionic
@ -39,7 +40,7 @@ steps:
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
- make -j4
trigger:
event:
- pull_request
@ -66,7 +67,7 @@ steps:
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
- make -j4
trigger:
event:
- pull_request
@ -91,7 +92,7 @@ steps:
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
- make -j4
trigger:
event:
- pull_request
@ -116,7 +117,7 @@ steps:
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
- make -j4
trigger:
event:
- pull_request
@ -142,7 +143,7 @@ steps:
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch32 > /dev/null
- make
- make -j4
trigger:
event:
- pull_request
@ -150,6 +151,7 @@ steps:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: build_trusty
@ -168,7 +170,7 @@ steps:
- mkdir debug
- cd debug
- cmake ..
- make
- make -j4
trigger:
event:
- pull_request
@ -176,6 +178,7 @@ steps:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: build_xenial
@ -193,7 +196,7 @@ steps:
- mkdir debug
- cd debug
- cmake ..
- make
- make -j4
trigger:
event:
- pull_request
@ -201,7 +204,7 @@ steps:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: build_bionic
@ -218,7 +221,7 @@ steps:
- mkdir debug
- cd debug
- cmake ..
- make
- make -j4
trigger:
event:
- pull_request
@ -226,6 +229,7 @@ steps:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: build_centos7
@ -241,7 +245,7 @@ steps:
- mkdir debug
- cd debug
- cmake ..
- make
- make -j4
trigger:
event:
- pull_request
@ -249,4 +253,4 @@ steps:
branch:
- develop
- master
- 2.0

21
Jenkinsfile vendored
View File

@ -160,7 +160,6 @@ pipeline {
skipbuild='2'
skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true)
println skipbuild
}
sh'''
rm -rf ${WORKSPACE}.tes
@ -225,6 +224,26 @@ pipeline {
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd >/dev/null &
sleep 10
'''
sh '''
cd ${WKC}/tests/examples/nodejs
npm install td2.0-connector > /dev/null 2>&1
node nodejsChecker.js host=localhost
'''
sh '''
cd ${WKC}/tests/examples/C#/taosdemo
mcs -out:taosdemo *.cs > /dev/null 2>&1
echo '' |./taosdemo
'''
sh '''
cd ${WKC}/tests/gotest
bash batchtest.sh
'''
sh '''
cd ${WKC}/tests
./test-all.sh b1fq

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.1.6.0")
SET(TD_VER_NUMBER "2.1.7.1")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -98,6 +98,7 @@ int ConvertString(char *buf, size_t nBytes, UINT cpFrom, UINT cpTo, LPCSTR lpDef
char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefaultChar) {
int nBytes;
char *pBuf;
char *pBuf1;
nBytes = 4 * ((int)lstrlen(string) + 1); /* Worst case for the size needed */
pBuf = (char *)malloc(nBytes);
if (!pBuf) {
@ -110,8 +111,9 @@ char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefault
free(pBuf);
return NULL;
}
pBuf = realloc(pBuf, nBytes+1);
return pBuf;
pBuf1 = realloc(pBuf, nBytes+1);
if(pBuf1 == NULL && pBuf != NULL) free(pBuf);
return pBuf1;
}
int CountCharacters(const char *string, UINT cp) {

View File

@ -68,6 +68,7 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
int iString = FALSE; /* TRUE = string mode; FALSE = non-string mode */
int nBackslash = 0;
char **ppszArg;
char **ppszArg1;
int iArg = FALSE; /* TRUE = inside an argument; FALSE = between arguments */
ppszArg = (char **)malloc((argc+1)*sizeof(char *));
@ -89,7 +90,10 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
if ((!iArg) && (c != ' ') && (c != '\t')) { /* Beginning of a new argument */
iArg = TRUE;
ppszArg[argc++] = pszCopy+j;
ppszArg = (char **)realloc(ppszArg, (argc+1)*sizeof(char *));
ppszArg1 = (char **)realloc(ppszArg, (argc+1)*sizeof(char *));
if(ppszArg1 == NULL && ppszArg != NULL)
free(ppszArg);
ppszArg = ppszArg1;
if (!ppszArg) return -1;
pszCopy[j] = c0 = '\0';
}
@ -212,7 +216,7 @@ int _initU(void) {
fprintf(stderr, "Warning: Can't convert the argument line to UTF-8\n");
_acmdln[0] = '\0';
}
realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */
//realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */
/* Should not fail since we make it smaller */
/* Record the console code page, to allow converting the output accordingly */

View File

@ -196,6 +196,7 @@ not_compact_enough:
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
char *realpath(const char *path, char *outbuf) {
char *pOutbuf = outbuf;
char *pOutbuf1 = NULL;
int iErr;
const char *pc;
@ -242,8 +243,11 @@ realpath_failed:
return NULL;
}
if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1);
return pOutbuf;
if (!outbuf) {
pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
}
return pOutbuf1;
}
#endif
@ -517,6 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) {
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
char *realpathU(const char *path, char *outbuf) {
char *pOutbuf = outbuf;
char *pOutbuf1 = NULL;
char *pPath1 = NULL;
char *pPath2 = NULL;
int iErr;
@ -590,10 +595,13 @@ realpathU_failed:
}
DEBUG_LEAVE(("return 0x%p; // \"%s\"\n", pOutbuf, pOutbuf));
if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1);
if (!outbuf) {
pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
}
free(pPath1);
free(pPath2);
return pOutbuf;
return pOutbuf1;
}
#endif /* defined(_WIN32) */

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18
version: '2.1.6.0'
version: '2.1.7.1'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.1.6.0
- usr/lib/libtaos.so.2.1.7.1
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so

View File

@ -116,8 +116,17 @@ void bnCleanupDnodes() {
static void bnCheckDnodesSize(int32_t dnodesNum) {
if (tsBnDnodes.maxSize <= dnodesNum) {
tsBnDnodes.maxSize = dnodesNum * 2;
tsBnDnodes.list = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *));
int32_t maxSize = dnodesNum * 2;
SDnodeObj** list1 = NULL;
int32_t retry = 0;
while(list1 == NULL && retry++ < 3) {
list1 = realloc(tsBnDnodes.list, maxSize * sizeof(SDnodeObj *));
}
if(list1) {
tsBnDnodes.list = list1;
tsBnDnodes.maxSize = maxSize;
}
}
}

View File

@ -29,15 +29,16 @@ extern "C" {
#include "tsched.h"
#include "tsclient.h"
#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_SUPER_TABLE))
#define UTIL_TABLE_IS_CHILD_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE))
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo) || UTIL_TABLE_IS_TMP_TABLE(metaInfo)))
#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE))
#pragma pack(push,1)
@ -142,6 +143,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo);
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
bool hasTagValOutput(SQueryInfo* pQueryInfo);
bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo);

View File

@ -1527,8 +1527,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT;
pCmd->insertParam.objectId = pSql->self;
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
pSql->sqlstr = sqlstr;
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY);

View File

@ -1940,20 +1940,6 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) {
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
}
bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
if (pQueryInfo == NULL) {
return false;
}
if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY
&& (pQueryInfo->type & TSDB_QUERY_TYPE_TABLE_QUERY) != TSDB_QUERY_TYPE_TABLE_QUERY) {
return false;
}
if (tscNumOfExprs(pQueryInfo) == 1){
return true;
}
return false;
}
static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) {
size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < numOfExprs; ++i) {
@ -2043,8 +2029,11 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
const char* msg1 = "too many items in selection clause";
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "not support query expression";
const char* msg4 = "only support distinct one column or tag";
const char* msg4 = "not support distinct mixed with proj/agg func";
const char* msg5 = "invalid function name";
const char* msg6 = "not support distinct mixed with join";
const char* msg7 = "not support distinct mixed with groupby";
const char* msg8 = "not support distinct in nest query";
// too many result columns not support order by in query
if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) {
@ -2055,19 +2044,31 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
}
bool hasDistinct = false;
bool hasAgg = false;
size_t numOfExpr = taosArrayGetSize(pSelNodeList);
int32_t distIdx = -1;
for (int32_t i = 0; i < numOfExpr; ++i) {
int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo);
tSqlExprItem* pItem = taosArrayGet(pSelNodeList, i);
if (hasDistinct == false) {
hasDistinct = (pItem->distinct == true);
distIdx = hasDistinct ? i : -1;
}
int32_t type = pItem->pNode->type;
if (type == SQL_NODE_SQLFUNCTION) {
hasAgg = true;
if (hasDistinct) break;
pItem->pNode->functionId = isValidFunction(pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n);
if (pItem->pNode->functionId == TSDB_FUNC_BLKINFO && taosArrayGetSize(pQueryInfo->pUpstream) > 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
SUdfInfo* pUdfInfo = NULL;
if (pItem->pNode->functionId < 0) {
pUdfInfo = isValidUdf(pQueryInfo->pUdfInfo, pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n);
@ -2102,10 +2103,22 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
}
}
//TODO(dengyihao), refactor as function
//handle distinct func mixed with other func
if (hasDistinct == true) {
if (!isValidDistinctSql(pQueryInfo) ) {
if (distIdx != 0 || hasAgg) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
if (joinQuery) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
if (pQueryInfo->groupbyExpr.numOfGroupCols != 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
if (pQueryInfo->pDownstream != NULL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
pQueryInfo->distinct = true;
}
@ -2630,7 +2643,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO);
} else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI);
}
}
if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
@ -2664,8 +2677,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
assert(ids.num == 1);
tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema);
}
tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid);
return TSDB_CODE_SUCCESS;
}
@ -3047,7 +3060,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, &s);
}
}
tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
return TSDB_CODE_SUCCESS;
}
@ -4512,7 +4524,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
*/
tSqlExprDestroy(*pExpr);
} else {
ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pQueryInfo->msg);
ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pCmd->payload);
}
*pExpr = NULL; // remove this expression
@ -4550,7 +4562,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
}
pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY;
ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg);
ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pCmd->payload);
*pExpr = NULL;
} else {
// do nothing
@ -4568,7 +4580,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pQueryInfo->msg);
ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pCmd->payload);
*pExpr = NULL; // remove it from expr tree
}
@ -5370,6 +5382,7 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
const char* msg3 = "top/bottom not support fill";
const char* msg4 = "illegal value or data overflow";
const char* msg5 = "fill only available for interval query";
const char* msg6 = "not supported function now";
if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
@ -5408,6 +5421,9 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
}
} else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_PREV;
if (tscIsPointInterpQuery(pQueryInfo) && pQueryInfo->order.order == TSDB_ORDER_DESC) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
} else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_NEXT;
} else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) {
@ -5486,14 +5502,19 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) {
pQueryInfo->order.order = TSDB_ORDER_ASC;
if (isTopBottomQuery(pQueryInfo)) {
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
} else { // in case of select tbname from super_table, the defualt order column can not be the primary ts column
pQueryInfo->order.orderColId = INT32_MIN;
} else { // in case of select tbname from super_table, the default order column can not be the primary ts column
pQueryInfo->order.orderColId = INT32_MIN; // todo define a macro
}
/* for super table query, set default ascending order for group output */
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
}
if (pQueryInfo->distinct) {
pQueryInfo->order.order = TSDB_ORDER_ASC;
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
}
int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) {
@ -5501,21 +5522,17 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
const char* msg1 = "invalid column name";
const char* msg2 = "order by primary timestamp, first tag or groupby column in groupby clause allowed";
const char* msg3 = "invalid column in order by clause, only primary timestamp or first tag in groupby clause allowed";
const char* msg4 = "orderby column must projected in subquery";
setDefaultOrderInfo(pQueryInfo);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pQueryInfo->distinct == true) {
pQueryInfo->order.order = TSDB_ORDER_ASC;
pQueryInfo->order.orderColId = 0;
return TSDB_CODE_SUCCESS;
}
if (pSqlNode->pSortOrder == NULL) {
if (pQueryInfo->distinct || pSqlNode->pSortOrder == NULL) {
return TSDB_CODE_SUCCESS;
}
SArray* pSortorder = pSqlNode->pSortOrder;
char* pMsgBuf = tscGetErrorMsgPayload(pCmd);
SArray* pSortOrder = pSqlNode->pSortOrder;
/*
* for table query, there is only one or none order option is allowed, which is the
@ -5523,19 +5540,19 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
*
* for super table query, the order option must be less than 3.
*/
size_t size = taosArrayGetSize(pSortorder);
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
size_t size = taosArrayGetSize(pSortOrder);
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) {
if (size > 1) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
return invalidOperationMsg(pMsgBuf, msg0);
}
} else {
if (size > 2) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return invalidOperationMsg(pMsgBuf, msg3);
}
}
// handle the first part of order by
tVariant* pVar = taosArrayGet(pSortorder, 0);
tVariant* pVar = taosArrayGet(pSortOrder, 0);
// e.g., order by 1 asc, return directly with out further check.
if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) {
@ -5547,7 +5564,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query
if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return invalidOperationMsg(pMsgBuf, msg1);
}
bool orderByTags = false;
@ -5559,7 +5576,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
// it is a tag column
if (pQueryInfo->groupbyExpr.columnInfo == NULL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return invalidOperationMsg(pMsgBuf, msg2);
}
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
if (relTagIndex == pColIndex->colIndex) {
@ -5580,13 +5597,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
orderByGroupbyCol = true;
}
}
if (!(orderByTags || orderByTS || orderByGroupbyCol) && !isTopBottomQuery(pQueryInfo)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return invalidOperationMsg(pMsgBuf, msg3);
} else { // order by top/bottom result value column is not supported in case of interval query.
assert(!(orderByTags && orderByTS && orderByGroupbyCol));
}
size_t s = taosArrayGetSize(pSortorder);
size_t s = taosArrayGetSize(pSortOrder);
if (s == 1) {
if (orderByTags) {
pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
@ -5599,13 +5617,15 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
pQueryInfo->groupbyExpr.orderType = p1->sortOrder;
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
} else if (isTopBottomQuery(pQueryInfo)) {
int32_t topBotIndex = tscGetTopBotQueryExprIndex(pQueryInfo);
assert(topBotIndex >= 1);
/* order of top/bottom query in interval is not valid */
SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
SExprInfo* pExpr = tscExprGet(pQueryInfo, topBotIndex-1);
assert(pExpr->base.functionId == TSDB_FUNC_TS);
pExpr = tscExprGet(pQueryInfo, 1);
pExpr = tscExprGet(pQueryInfo, topBotIndex);
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return invalidOperationMsg(pMsgBuf, msg2);
}
tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
@ -5620,12 +5640,21 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
// orderby ts query on super table
if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
bool found = false;
for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
found = true;
break;
}
}
if (!found && pQueryInfo->pDownstream) {
return invalidOperationMsg(pMsgBuf, msg4);
}
addPrimaryTsColIntoResult(pQueryInfo, pCmd);
}
}
}
if (s == 2) {
} else {
tVariantListItem *pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
if (orderByTags) {
pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
@ -5642,22 +5671,23 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
tVariant* pVar2 = &pItem->pVar;
SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz};
if (getColumnIndexByName(&cname, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return invalidOperationMsg(pMsgBuf, msg1);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return invalidOperationMsg(pMsgBuf, msg2);
} else {
tVariantListItem* p1 = taosArrayGet(pSortorder, 1);
tVariantListItem* p1 = taosArrayGet(pSortOrder, 1);
pQueryInfo->order.order = p1->sortOrder;
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
}
} else { // meter query
if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
} else if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) { // check order by clause for normal table & temp table
if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(pMsgBuf, msg1);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) {
bool validOrder = false;
SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
@ -5665,13 +5695,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
validOrder = (pColIndex->colIndex == index.columnIndex);
}
if (!validOrder) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return invalidOperationMsg(pMsgBuf, msg2);
}
tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId;
pQueryInfo->groupbyExpr.orderType = p1->sortOrder;
}
if (isTopBottomQuery(pQueryInfo)) {
@ -5681,19 +5712,22 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
validOrder = (pColIndex->colIndex == index.columnIndex);
} else {
int32_t topBotIndex = tscGetTopBotQueryExprIndex(pQueryInfo);
assert(topBotIndex >= 1);
/* order of top/bottom query in interval is not valid */
SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
SExprInfo* pExpr = tscExprGet(pQueryInfo, topBotIndex-1);
assert(pExpr->base.functionId == TSDB_FUNC_TS);
pExpr = tscExprGet(pQueryInfo, 1);
pExpr = tscExprGet(pQueryInfo, topBotIndex);
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return invalidOperationMsg(pMsgBuf, msg2);
}
validOrder = true;
}
if (!validOrder) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return invalidOperationMsg(pMsgBuf, msg2);
}
tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
@ -5703,6 +5737,18 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return TSDB_CODE_SUCCESS;
}
tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->order.order = pItem->sortOrder;
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
} else {
// handle the temp table order by clause. You can order by any single column in case of the temp table, created by
// inner subquery.
assert(UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo) && taosArrayGetSize(pSqlNode->pSortOrder) == 1);
if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(pMsgBuf, msg1);
}
tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->order.order = pItem->sortOrder;
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
@ -5892,6 +5938,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tVariantListItem* pItem = taosArrayGet(pVarList, 1);
SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
if (IS_VAR_DATA_TYPE(pTagsSchema->type) && (pItem->pVar.nLen > pTagsSchema->bytes * TSDB_NCHAR_SIZE)) {
return invalidOperationMsg(pMsg, msg14);
}
pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE);
if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) {
@ -8375,7 +8425,7 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) {
n += 1;
}
info->numOfColumns = n;
return meta;
}
@ -8398,13 +8448,12 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS
pSub->pUdfInfo = pUdfInfo;
pSub->udfCopy = true;
pSub->pDownstream = pQueryInfo;
int32_t code = validateSqlNode(pSql, p, pSub);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
pSub->pDownstream = pQueryInfo;
// create dummy table meta info
STableMetaInfo* pTableMetaInfo1 = calloc(1, sizeof(STableMetaInfo));
if (pTableMetaInfo1 == NULL) {
@ -8462,8 +8511,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
const char* msg8 = "condition missing for join query";
const char* msg9 = "not support 3 level select";
int32_t code = TSDB_CODE_SUCCESS;
int32_t code = TSDB_CODE_SUCCESS;
SSqlCmd* pCmd = &pSql->cmd;
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@ -8758,8 +8806,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo);
pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo);
pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo);
//pQueryInfo->globalMerge = tscIsTwoStageSTableQuery(pQueryInfo, 0);
pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo);
pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);

View File

@ -409,7 +409,7 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
if ((TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY |
TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) &&
!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) ||
(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY))) {
(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY)) || (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->distinct)) {
// do nothing in case of super table subquery
} else {
pSql->retry += 1;
@ -880,16 +880,16 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
SQueryAttr query = {{0}};
tscCreateQueryFromQueryInfo(pQueryInfo, &query, pSql);
query.vgId = pTableMeta->vgId;
SArray* tableScanOperator = createTableScanPlan(&query);
SArray* queryOperator = createExecOperatorPlan(&query);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload;
tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version));

View File

@ -887,7 +887,9 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
}
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
pSql->sqlstr = sqlstr;
if (pSql->sqlstr == NULL) {
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tfree(pSql);

View File

@ -2397,6 +2397,10 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
} else {
SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex};
tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss);
int32_t ti = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid);
assert(ti >= 0);
SColumn* x = taosArrayGetP(pNewQueryInfo->colList, ti);
tscColumnCopy(x, pCol);
}
}
}
@ -2812,7 +2816,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
pParentSql->self, pState->numOfSub, pState->numOfRetrievedRows);
SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
tscClearInterpInfo(pPQueryInfo);
code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self);
pParentSql->res.code = code;

View File

@ -369,6 +369,27 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) {
return false;
}
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
if (pExpr == NULL) {
continue;
}
if (pExpr->base.functionId == TSDB_FUNC_TS) {
continue;
}
if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) {
return i;
}
}
return -1;
}
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
@ -625,8 +646,10 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo
} else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
char* buffer = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
if(buffer == NULL)
return ;
pRes->buffer[i] = buffer;
// string terminated char for binary data
memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows);
@ -1206,7 +1229,6 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols, &pFilterInfo);
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols);
pOutput->precision = pSqlObjList[0]->res.precision;
SSchema* schema = NULL;
@ -1502,7 +1524,6 @@ void tscFreeSqlObj(SSqlObj* pSql) {
tscFreeSqlResult(pSql);
tscResetSqlCmd(pCmd, false);
memset(pCmd->payload, 0, (size_t)pCmd->allocSize);
tfree(pCmd->payload);
pCmd->allocSize = 0;
@ -3532,8 +3553,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNewQueryInfo->numOfTables = 0;
pNewQueryInfo->pTableMetaInfo = NULL;
pNewQueryInfo->bufLen = pQueryInfo->bufLen;
pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
pNewQueryInfo->distinct = pQueryInfo->distinct;
if (pNewQueryInfo->buf == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
@ -3830,6 +3853,9 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
pNew->sqlstr = strdup(pSql->sqlstr);
pNew->fp = tscSubqueryCompleteCallback;
pNew->maxRetry = pSql->maxRetry;
pNew->cmd.resColumnId = TSDB_RES_COL_ID;
tsem_init(&pNew->rspSem, 0, 0);
SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id
@ -4364,6 +4390,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
STableMeta* p = NULL;
size_t sz = 0;
STableMeta* pChild = *ppChild;
STableMeta* pChild1;
taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz);
@ -4374,7 +4401,10 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema);
int32_t tableMetaSize = sizeof(STableMeta) + totalBytes;
if (*tableMetaCapacity < tableMetaSize) {
pChild = realloc(pChild, tableMetaSize);
pChild1 = realloc(pChild, tableMetaSize);
if(pChild1 == NULL)
return -1;
pChild = pChild1;
*tableMetaCapacity = (size_t)tableMetaSize;
}

View File

@ -547,8 +547,9 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder);
static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) {
if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2;
pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
if (pBuilder->pColIdx == NULL) return -1;
SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
if (pColIdx == NULL) return -1;
pBuilder->pColIdx = pColIdx;
}
pBuilder->pColIdx[pBuilder->nCols].colId = colId;
@ -561,8 +562,9 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId,
while (tlen > pBuilder->alloc - pBuilder->size) {
pBuilder->alloc *= 2;
}
pBuilder->buf = realloc(pBuilder->buf, pBuilder->alloc);
if (pBuilder->buf == NULL) return -1;
void* buf = realloc(pBuilder->buf, pBuilder->alloc);
if (buf == NULL) return -1;
pBuilder->buf = buf;
}
memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen);

View File

@ -59,6 +59,7 @@ extern char tsLocale[];
extern char tsCharset[]; // default encode string
extern int8_t tsEnableCoreFile;
extern int32_t tsCompressMsgSize;
extern int32_t tsMaxNumOfDistinctResults;
extern char tsTempDir[];
//query buffer management

View File

@ -138,8 +138,9 @@ int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int1
if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2;
pBuilder->columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols);
if (pBuilder->columns == NULL) return -1;
STColumn* columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols);
if (columns == NULL) return -1;
pBuilder->columns = columns;
}
STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]);

View File

@ -79,7 +79,7 @@ int32_t tsCompressMsgSize = -1;
// client
int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN;
int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN;
int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN;
int8_t tsTscEnableRecordSql = 0;
// the maximum number of results for projection query on super table that are returned from
@ -89,6 +89,9 @@ int32_t tsMaxNumOfOrderedResults = 100000;
// 10 ms for sliding time, the value will changed in case of time precision changed
int32_t tsMinSlidingTime = 10;
// the maxinum number of distict query result
int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
// 1 us for interval time range, changed accordingly
int32_t tsMinIntervalTime = 1;
@ -546,6 +549,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "maxNumOfDistinctRes";
cfg.ptr = &tsMaxNumOfDistinctResults;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 10*10000;
cfg.maxValue = 10000*10000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "numOfMnodes";
cfg.ptr = &tsNumOfMnodes;
cfg.valType = TAOS_CFG_VTYPE_INT32;

@ -1 +1 @@
Subproject commit b8f76da4a708d158ec3cc4b844571dc4414e36b4
Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f

@ -1 +1 @@
Subproject commit a44ec1ca493ad01b2bf825b6418f69e11f548206
Subproject commit 32e2c97a4cf7bedaa99f5d6dd8cb036e7f4470df

@ -1 +1 @@
Subproject commit ce5201014136503d34fecbd56494b67b4961056c
Subproject commit b62a26ecc164a310104df57691691b237e091c89

View File

@ -109,6 +109,24 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0)
return res;
}
function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
while (currOffset < data.length) {
let len = data.readIntLE(currOffset, 2);
let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column;
if (dataEntry[0] == 255) {
res.push(null)
} else {
res.push(dataEntry.toString("utf-8"));
}
currOffset += nbytes;
}
return res;
}
function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
@ -117,7 +135,11 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0)
while (currOffset < data.length) {
let len = data.readIntLE(currOffset, 2);
let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column;
res.push(dataEntry.toString("utf-8"));
if (dataEntry[0] == 255 && dataEntry[1] == 255) {
res.push(null)
} else {
res.push(dataEntry.toString("utf-8"));
}
currOffset += nbytes;
}
return res;
@ -132,7 +154,7 @@ let convertFunctions = {
[FieldTypes.C_BIGINT]: convertBigint,
[FieldTypes.C_FLOAT]: convertFloat,
[FieldTypes.C_DOUBLE]: convertDouble,
[FieldTypes.C_BINARY]: convertNchar,
[FieldTypes.C_BINARY]: convertBinary,
[FieldTypes.C_TIMESTAMP]: convertTimestamp,
[FieldTypes.C_NCHAR]: convertNchar
}

View File

@ -0,0 +1,33 @@
const taos = require('../tdengine');
var conn = taos.connect({ host: "localhost" });
var c1 = conn.cursor();
function checkData(data, row, col, expect) {
let checkdata = data[row][col];
if (checkdata == expect) {
// console.log('check pass')
}
else {
console.log('check failed, expect ' + expect + ', but is ' + checkdata)
}
}
c1.execute('drop database if exists testnodejsnchar')
c1.execute('create database testnodejsnchar')
c1.execute('use testnodejsnchar');
c1.execute('create table tb (ts timestamp, value float, text binary(200))')
c1.execute("insert into tb values('2021-06-10 00:00:00', 24.7, '中文10000000000000000000000');") -
c1.execute('insert into tb values(1623254400150, 24.7, NULL);')
c1.execute('import into tb values(1623254400300, 24.7, "中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000");')
sql = 'select * from tb;'
console.log('*******************************************')
c1.execute(sql);
data = c1.fetchall();
console.log(data)
//check data about insert data
checkData(data, 0, 2, '中文10000000000000000000000')
checkData(data, 1, 2, null)
checkData(data, 2, 2, '中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000')

View File

@ -32,7 +32,7 @@ typedef enum {
typedef struct {
int8_t msgType;
int8_t sver;
int8_t sver; // sver 2 for WAL SDataRow/SMemRow compatibility
int8_t reserved[2];
int32_t len;
uint64_t version;

View File

@ -72,12 +72,13 @@ static int32_t shellShowTables(TAOS *con, char *db) {
int32_t tbIndex = tbNum++;
if (tbMallocNum < tbNum) {
tbMallocNum = (tbMallocNum * 2 + 1);
tbNames = realloc(tbNames, tbMallocNum * sizeof(char *));
if (tbNames == NULL) {
char** tbNames1 = realloc(tbNames, tbMallocNum * sizeof(char *));
if (tbNames1 == NULL) {
fprintf(stdout, "failed to malloc tablenames, num:%d\n", tbMallocNum);
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
break;
}
tbNames = tbNames1;
}
tbNames[tbIndex] = malloc(TSDB_TABLE_NAME_LEN);

File diff suppressed because it is too large Load Diff

View File

@ -149,7 +149,10 @@ float* read_float(const char* inFile, int* pcount){
//printf(" buff=%s float=%.50f \n ", buf, floats[fi]);
if ( ++fi == malloc_cnt ) {
malloc_cnt += 100000;
floats = realloc(floats, malloc_cnt*sizeof(float));
float* floats1 = realloc(floats, malloc_cnt*sizeof(float));
if(floats1 == NULL)
break;
floats = floats1;
}
memset(buf, 0, sizeof(buf));
}

View File

@ -2921,10 +2921,11 @@ static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray
(*totalMallocLen) *= 2;
}
pMultiMeta = realloc(pMultiMeta, *totalMallocLen);
if (pMultiMeta == NULL) {
SMultiTableMeta* pMultiMeta1 = realloc(pMultiMeta, *totalMallocLen);
if (pMultiMeta1 == NULL) {
return NULL;
}
pMultiMeta = pMultiMeta1;
}
return pMultiMeta;

View File

@ -504,8 +504,9 @@ void * taosTRealloc(void *ptr, size_t size) {
void * tptr = (void *)((char *)ptr - sizeof(size_t));
size_t tsize = size + sizeof(size_t);
tptr = realloc(tptr, tsize);
if (tptr == NULL) return NULL;
void* tptr1 = realloc(tptr, tsize);
if (tptr1 == NULL) return NULL;
tptr = tptr1;
*(size_t *)tptr = size;

View File

@ -81,11 +81,13 @@ int32_t getstr(char **lineptr, size_t *n, FILE *stream, char terminator, int32_t
*n += MIN_CHUNK;
nchars_avail = (int32_t)(*n + *lineptr - read_pos);
*lineptr = realloc(*lineptr, *n);
if (!*lineptr) {
char* lineptr1 = realloc(*lineptr, *n);
if (!lineptr1) {
errno = ENOMEM;
return -1;
}
*lineptr = lineptr1;
read_pos = *n - nchars_avail + *lineptr;
assert((*lineptr + *n) == (read_pos + nchars_avail));
}

View File

@ -333,6 +333,7 @@ enum OPERATOR_TYPE_E {
OP_StateWindow = 22,
OP_AllTimeWindow = 23,
OP_AllMultiTableTimeInterval = 24,
OP_Order = 25,
};
typedef struct SOperatorInfo {
@ -417,7 +418,6 @@ typedef struct STableScanInfo {
int32_t *rowCellInfoOffset;
SExprInfo *pExpr;
SSDataBlock block;
bool loadExternalRows; // load external rows (prev & next rows)
int32_t numOfOutput;
int64_t elapsedTime;
@ -510,13 +510,21 @@ typedef struct SStateWindowOperatorInfo {
bool reptScan;
} SStateWindowOperatorInfo ;
typedef struct SDistinctDataInfo {
int32_t index;
int32_t type;
int32_t bytes;
} SDistinctDataInfo;
typedef struct SDistinctOperatorInfo {
SHashObj *pSet;
SSDataBlock *pRes;
bool recordNullVal; //has already record the null value, no need to try again
int64_t threshold;
int64_t outputCapacity;
int32_t colIndex;
int32_t totalBytes;
char* buf;
SArray* pDistinctDataInfo;
} SDistinctOperatorInfo;
struct SGlobalMerger;
@ -541,6 +549,13 @@ typedef struct SMultiwayMergeInfo {
SArray *udfInfo;
} SMultiwayMergeInfo;
// todo support the disk-based sort
typedef struct SOrderOperatorInfo {
int32_t colIndex;
int32_t order;
SSDataBlock *pDataBlock;
} SOrderOperatorInfo;
void appendUpstream(SOperatorInfo* p, SOperatorInfo* pUpstream);
SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime, int32_t reverseTime);
@ -570,6 +585,7 @@ SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator
int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter);
SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUpstream, SSchema* pSchema, int32_t numOfOutput);
SOperatorInfo* createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal);
SSDataBlock* doGlobalAggregate(void* param, bool* newgroup);
SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup);

View File

@ -220,6 +220,8 @@ tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrder
void tOrderDescDestroy(tOrderDescriptor *pDesc);
void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn);
void tColModelAppend(SColumnModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows,
int32_t numOfRowsToWrite, int32_t srcCapacity);

View File

@ -3670,6 +3670,8 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
return;
}
bool ascQuery = (pCtx->order == TSDB_ORDER_ASC);
if (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) {
*(TSKEY *)pCtx->pOutput = pCtx->startTs;
} else if (type == TSDB_FILL_NULL) {
@ -3677,7 +3679,7 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
} else if (type == TSDB_FILL_SET_VALUE) {
tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true);
} else {
if (pCtx->start.key != INT64_MIN && pCtx->start.key < pCtx->startTs && pCtx->end.key > pCtx->startTs) {
if (pCtx->start.key != INT64_MIN && ((ascQuery && pCtx->start.key <= pCtx->startTs && pCtx->end.key >= pCtx->startTs) || ((!ascQuery) && pCtx->start.key >= pCtx->startTs && pCtx->end.key <= pCtx->startTs))) {
if (type == TSDB_FILL_PREV) {
if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) {
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val);
@ -3716,13 +3718,14 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY skey = GET_TS_DATA(pCtx, 0);
if (type == TSDB_FILL_PREV) {
if (skey > pCtx->startTs) {
if ((ascQuery && skey > pCtx->startTs) || ((!ascQuery) && skey < pCtx->startTs)) {
return;
}
if (pCtx->size > 1) {
TSKEY ekey = GET_TS_DATA(pCtx, 1);
if (ekey > skey && ekey <= pCtx->startTs) {
if ((ascQuery && ekey > skey && ekey <= pCtx->startTs) ||
((!ascQuery) && ekey < skey && ekey >= pCtx->startTs)){
skey = ekey;
}
}
@ -3731,10 +3734,10 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY ekey = skey;
char* val = NULL;
if (ekey < pCtx->startTs) {
if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) {
if (pCtx->size > 1) {
ekey = GET_TS_DATA(pCtx, 1);
if (ekey < pCtx->startTs) {
if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) {
return;
}
@ -3755,12 +3758,11 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY ekey = GET_TS_DATA(pCtx, 1);
// no data generated yet
if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) {
if ((ascQuery && !(skey <= pCtx->startTs && ekey >= pCtx->startTs))
|| ((!ascQuery) && !(skey >= pCtx->startTs && ekey <= pCtx->startTs))) {
return;
}
assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs);
char *start = GET_INPUT_DATA(pCtx, 0);
char *end = GET_INPUT_DATA(pCtx, 1);
@ -3788,11 +3790,37 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
static void interp_function(SQLFunctionCtx *pCtx) {
// at this point, the value is existed, return directly
if (pCtx->size > 0) {
// impose the timestamp check
TSKEY key = GET_TS_DATA(pCtx, 0);
bool ascQuery = (pCtx->order == TSDB_ORDER_ASC);
TSKEY key;
char *pData;
int32_t typedData = 0;
if (ascQuery) {
key = GET_TS_DATA(pCtx, 0);
pData = GET_INPUT_DATA(pCtx, 0);
} else {
key = pCtx->start.key;
if (key == INT64_MIN) {
key = GET_TS_DATA(pCtx, 0);
pData = GET_INPUT_DATA(pCtx, 0);
} else {
if (!(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL)) {
pData = pCtx->start.ptr;
} else {
typedData = 1;
pData = (char *)&pCtx->start.val;
}
}
}
//if (key == pCtx->startTs && (ascQuery || !(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL))) {
if (key == pCtx->startTs) {
char *pData = GET_INPUT_DATA(pCtx, 0);
assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType);
if (typedData) {
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, *(double *)pData);
} else {
assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType);
}
SET_VAL(pCtx, 1, 1);
} else {
interp_function_impl(pCtx);

View File

@ -44,6 +44,10 @@
#define SDATA_BLOCK_INITIALIZER (SDataBlockInfo) {{0}, 0}
#define MULTI_KEY_DELIM "-"
#define HASH_CAPACITY_LIMIT 10000000
#define TIME_WINDOW_COPY(_dst, _src) do {\
(_dst).skey = (_src).skey;\
(_dst).ekey = (_src).ekey;\
@ -224,6 +228,7 @@ static void destroySFillOperatorInfo(void* param, int32_t numOfOutput);
static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput);
static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput);
static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput);
static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput);
static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput);
static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput);
static void destroyAggOperatorInfo(void* param, int32_t numOfOutput);
@ -1326,6 +1331,16 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
pCtx[k].end.key = curTs;
pCtx[k].end.val = v2;
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
if (prevRowIndex == -1) {
pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[index];
} else {
pCtx[k].start.ptr = (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes;
}
pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes;
}
}
} else if (functionId == TSDB_FUNC_TWA) {
SPoint point1 = (SPoint){.key = prevTs, .val = &v1};
@ -1595,6 +1610,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
SResultRow* pResult = NULL;
int32_t forwardStep = 0;
int32_t ret = 0;
STimeWindow preWin = win;
while (1) {
// null data, failed to allocate more memory buffer
@ -1609,12 +1625,13 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
// window start(end) key interpolation
doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
preWin = win;
int32_t prevEndPos = (forwardStep - 1) * step + startPos;
startPos = getNextQualifiedWindow(pQueryAttr, &win, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos);
if (startPos < 0) {
if (win.skey <= pQueryAttr->window.ekey) {
if ((ascQuery && win.skey <= pQueryAttr->window.ekey) || ((!ascQuery) && win.ekey >= pQueryAttr->window.ekey)) {
int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId,
pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
@ -1622,12 +1639,12 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
}
startPos = pSDataBlock->info.rows - 1;
// window start(end) key interpolation
doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
}
break;
}
setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
@ -2213,6 +2230,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
}
break;
}
case OP_StateWindow: {
pRuntimeEnv->proot = createStatewindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType;
@ -2229,24 +2247,20 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
case OP_Filter: { // todo refactor
int32_t numOfFilterCols = 0;
// if (pQueryAttr->numOfFilterCols > 0) {
// pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1,
// pQueryAttr->numOfOutput, pQueryAttr->tableCols, pQueryAttr->numOfFilterCols);
// } else {
if (pQueryAttr->stableQuery) {
SColumnInfo* pColInfo =
extractColumnFilterInfo(pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, &numOfFilterCols);
pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3,
pQueryAttr->numOfExpr3, pColInfo, numOfFilterCols);
freeColumnInfo(pColInfo, pQueryAttr->numOfExpr3);
} else {
SColumnInfo* pColInfo =
extractColumnFilterInfo(pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &numOfFilterCols);
pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1,
pQueryAttr->numOfOutput, pColInfo, numOfFilterCols);
freeColumnInfo(pColInfo, pQueryAttr->numOfOutput);
}
// }
if (pQueryAttr->stableQuery) {
SColumnInfo* pColInfo =
extractColumnFilterInfo(pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, &numOfFilterCols);
pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3,
pQueryAttr->numOfExpr3, pColInfo, numOfFilterCols);
freeColumnInfo(pColInfo, pQueryAttr->numOfExpr3);
} else {
SColumnInfo* pColInfo =
extractColumnFilterInfo(pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &numOfFilterCols);
pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1,
pQueryAttr->numOfOutput, pColInfo, numOfFilterCols);
freeColumnInfo(pColInfo, pQueryAttr->numOfOutput);
}
break;
}
@ -2258,11 +2272,12 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
case OP_MultiwayMergeSort: {
bool groupMix = true;
if(pQueryAttr->slimit.offset != 0 || pQueryAttr->slimit.limit != -1) {
if (pQueryAttr->slimit.offset != 0 || pQueryAttr->slimit.limit != -1) {
groupMix = false;
}
pRuntimeEnv->proot = createMultiwaySortOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput,
4096, merger, groupMix); // TODO hack it
4096, merger, groupMix); // TODO hack it
break;
}
@ -2283,6 +2298,11 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
break;
}
case OP_Order: {
pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &pQueryAttr->order);
break;
}
default: {
assert(0);
}
@ -3092,7 +3112,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
// check if this data block is required to load
if ((*status) != BLK_DATA_ALL_NEEDED) {
bool needFilter = true;
// the pCtx[i] result is belonged to previous time window since the outputBuf has not been set yet,
// the filter result may be incorrect. So in case of interval query, we need to set the correct time output buffer
if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) {
@ -3575,6 +3595,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i
SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo;
int64_t tid = 0;
pRuntimeEnv->keyBuf = realloc(pRuntimeEnv->keyBuf, sizeof(tid) + sizeof(int64_t) + POINTER_BYTES);
SResultRow* pRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&tid, sizeof(tid), true, uid);
for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
@ -5407,6 +5428,114 @@ SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SEx
return pOperator;
}
static int32_t doMergeSDatablock(SSDataBlock* pDest, SSDataBlock* pSrc) {
assert(pSrc != NULL && pDest != NULL && pDest->info.numOfCols == pSrc->info.numOfCols);
int32_t numOfCols = pSrc->info.numOfCols;
for(int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i);
SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, i);
int32_t newSize = (pDest->info.rows + pSrc->info.rows) * pCol2->info.bytes;
char* tmp = realloc(pCol2->pData, newSize);
if (tmp != NULL) {
pCol2->pData = tmp;
int32_t offset = pCol2->info.bytes * pDest->info.rows;
memcpy(pCol2->pData + offset, pCol1->pData, pSrc->info.rows * pCol2->info.bytes);
} else {
return TSDB_CODE_VND_OUT_OF_MEMORY;
}
}
pDest->info.rows += pSrc->info.rows;
return TSDB_CODE_SUCCESS;
}
static SSDataBlock* doSort(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
SOrderOperatorInfo* pInfo = pOperator->info;
SSDataBlock* pBlock = NULL;
while(1) {
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
// start to flush data into disk and try do multiway merge sort
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
break;
}
int32_t code = doMergeSDatablock(pInfo->pDataBlock, pBlock);
if (code != TSDB_CODE_SUCCESS) {
// todo handle error
}
}
int32_t numOfCols = pInfo->pDataBlock->info.numOfCols;
void** pCols = calloc(numOfCols, POINTER_BYTES);
SSchema* pSchema = calloc(numOfCols, sizeof(SSchema));
for(int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* p1 = taosArrayGet(pInfo->pDataBlock->pDataBlock, i);
pCols[i] = p1->pData;
pSchema[i].colId = p1->info.colId;
pSchema[i].bytes = p1->info.bytes;
pSchema[i].type = (uint8_t) p1->info.type;
}
__compar_fn_t comp = getKeyComparFunc(pSchema[pInfo->colIndex].type, pInfo->order);
taoscQSort(pCols, pSchema, numOfCols, pInfo->pDataBlock->info.rows, pInfo->colIndex, comp);
tfree(pCols);
tfree(pSchema);
return (pInfo->pDataBlock->info.rows > 0)? pInfo->pDataBlock:NULL;
}
SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal) {
SOrderOperatorInfo* pInfo = calloc(1, sizeof(SOrderOperatorInfo));
{
SSDataBlock* pDataBlock = calloc(1, sizeof(SSDataBlock));
pDataBlock->pDataBlock = taosArrayInit(numOfOutput, sizeof(SColumnInfoData));
for(int32_t i = 0; i < numOfOutput; ++i) {
SColumnInfoData col = {{0}};
col.info.colId = pExpr[i].base.colInfo.colId;
col.info.bytes = pExpr[i].base.colBytes;
col.info.type = pExpr[i].base.colType;
taosArrayPush(pDataBlock->pDataBlock, &col);
if (col.info.colId == pOrderVal->orderColId) {
pInfo->colIndex = i;
}
}
pDataBlock->info.numOfCols = numOfOutput;
pInfo->order = pOrderVal->order;
pInfo->pDataBlock = pDataBlock;
}
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
pOperator->name = "InMemoryOrder";
pOperator->operatorType = OP_Order;
pOperator->blockingOptr = true;
pOperator->status = OP_IN_EXECUTING;
pOperator->info = pInfo;
pOperator->exec = doSort;
pOperator->cleanup = destroyOrderOperatorInfo;
pOperator->pRuntimeEnv = pRuntimeEnv;
appendUpstream(pOperator, upstream);
return pOperator;
}
static int32_t getTableScanOrder(STableScanInfo* pTableScanInfo) {
return pTableScanInfo->order;
}
@ -6407,6 +6536,11 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
pInfo->pRes = destroyOutputBuf(pInfo->pRes);
}
static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) {
SOrderOperatorInfo* pInfo = (SOrderOperatorInfo*) param;
pInfo->pDataBlock = destroyOutputBuf(pInfo->pDataBlock);
}
static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) {
SFilterOperatorInfo* pInfo = (SFilterOperatorInfo*) param;
doDestroyFilterInfo(pInfo->pFilterInfo, pInfo->numOfFilterCols);
@ -6415,6 +6549,8 @@ static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) {
static void destroyDistinctOperatorInfo(void* param, int32_t numOfOutput) {
SDistinctOperatorInfo* pInfo = (SDistinctOperatorInfo*) param;
taosHashCleanup(pInfo->pSet);
tfree(pInfo->buf);
taosArrayDestroy(pInfo->pDistinctDataInfo);
pInfo->pRes = destroyOutputBuf(pInfo->pRes);
}
@ -6755,7 +6891,6 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn
pOperator->numOfOutput = numOfOutput;
pOperator->info = pInfo;
pOperator->pRuntimeEnv = pRuntimeEnv;
pOperator->exec = doFill;
pOperator->cleanup = destroySFillOperatorInfo;
@ -6957,6 +7092,52 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf
return pOperator;
}
static bool initMultiDistinctInfo(SDistinctOperatorInfo *pInfo, SOperatorInfo* pOperator, SSDataBlock *pBlock) {
if (taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput) {
// distinct info already inited
return true;
}
for (int i = 0; i < pOperator->numOfOutput; i++) {
pInfo->totalBytes += pOperator->pExpr[i].base.colBytes;
}
for (int i = 0; i < pOperator->numOfOutput; i++) {
int numOfBlock = (int)taosArrayGetSize(pBlock->pDataBlock);
assert(i < numOfBlock);
for (int j = 0; j < numOfBlock; j++) {
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, j);
if (pColDataInfo->info.colId == pOperator->pExpr[i].base.resColId) {
SDistinctDataInfo item = {.index = j, .type = pColDataInfo->info.type, .bytes = pColDataInfo->info.bytes};
taosArrayInsert(pInfo->pDistinctDataInfo, i, &item);
}
}
}
pInfo->totalBytes += (int32_t)strlen(MULTI_KEY_DELIM) * (pOperator->numOfOutput);
pInfo->buf = calloc(1, pInfo->totalBytes);
return taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput ? true : false;
}
static void buildMultiDistinctKey(SDistinctOperatorInfo *pInfo, SSDataBlock *pBlock, int32_t rowId) {
char *p = pInfo->buf;
memset(p, 0, pInfo->totalBytes);
for (int i = 0; i < taosArrayGetSize(pInfo->pDistinctDataInfo); i++) {
SDistinctDataInfo* pDistDataInfo = (SDistinctDataInfo *)taosArrayGet(pInfo->pDistinctDataInfo, i);
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pDistDataInfo->index);
char *val = ((char *)pColDataInfo->pData) + pColDataInfo->info.bytes * rowId;
if (isNull(val, pDistDataInfo->type)) {
p += pDistDataInfo->bytes;
continue;
}
if (IS_VAR_DATA_TYPE(pDistDataInfo->type)) {
memcpy(p, varDataVal(val), varDataLen(val));
p += varDataLen(val);
} else {
memcpy(p, val, pDistDataInfo->bytes);
p += pDistDataInfo->bytes;
}
memcpy(p, MULTI_KEY_DELIM, strlen(MULTI_KEY_DELIM));
p += strlen(MULTI_KEY_DELIM);
}
}
static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
@ -6964,11 +7145,9 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
return NULL;
}
SDistinctOperatorInfo* pInfo = pOperator->info;
SSDataBlock* pRes = pInfo->pRes;
pRes->info.rows = 0;
SSDataBlock* pBlock = NULL;
while(1) {
@ -6981,77 +7160,60 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
pOperator->status = OP_EXEC_DONE;
break;
}
if (pInfo->colIndex == -1) {
for (int i = 0; i < taosArrayGetSize(pBlock->pDataBlock); i++) {
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, i);
if (pColDataInfo->info.colId == pOperator->pExpr[0].base.resColId) {
pInfo->colIndex = i;
break;
}
}
}
if (pInfo->colIndex == -1) {
if (!initMultiDistinctInfo(pInfo, pOperator, pBlock)) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
return NULL;
}
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->colIndex);
int16_t bytes = pColInfoData->info.bytes;
int16_t type = pColInfoData->info.type;
// ensure the output buffer size
SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, 0);
if (pRes->info.rows + pBlock->info.rows > pInfo->outputCapacity) {
int32_t newSize = pRes->info.rows + pBlock->info.rows;
char* tmp = realloc(pResultColInfoData->pData, newSize * bytes);
if (tmp == NULL) {
return NULL;
} else {
pResultColInfoData->pData = tmp;
break;
}
// ensure result output buf
if (pRes->info.rows + pBlock->info.rows > pInfo->outputCapacity) {
int32_t newSize = pRes->info.rows + pBlock->info.rows;
for (int i = 0; i < taosArrayGetSize(pRes->pDataBlock); i++) {
SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, i);
SDistinctDataInfo* pDistDataInfo = taosArrayGet(pInfo->pDistinctDataInfo, i);
char* tmp = realloc(pResultColInfoData->pData, newSize * pDistDataInfo->bytes);
if (tmp == NULL) {
return NULL;
} else {
pResultColInfoData->pData = tmp;
}
}
pInfo->outputCapacity = newSize;
}
}
for(int32_t i = 0; i < pBlock->info.rows; ++i) {
char* val = ((char*)pColInfoData->pData) + bytes * i;
if (isNull(val, type)) {
continue;
}
char* p = val;
size_t keyLen = 0;
if (IS_VAR_DATA_TYPE(pOperator->pExpr->base.colType)) {
tstr* var = (tstr*)(val);
p = var->data;
keyLen = varDataLen(var);
} else {
keyLen = bytes;
}
int dummy;
void* res = taosHashGet(pInfo->pSet, p, keyLen);
if (res == NULL) {
taosHashPut(pInfo->pSet, p, keyLen, &dummy, sizeof(dummy));
char* start = pResultColInfoData->pData + bytes * pInfo->pRes->info.rows;
memcpy(start, val, bytes);
}
for (int32_t i = 0; i < pBlock->info.rows; i++) {
buildMultiDistinctKey(pInfo, pBlock, i);
if (taosHashGet(pInfo->pSet, pInfo->buf, pInfo->totalBytes) == NULL) {
int32_t dummy;
taosHashPut(pInfo->pSet, pInfo->buf, pInfo->totalBytes, &dummy, sizeof(dummy));
for (int j = 0; j < taosArrayGetSize(pRes->pDataBlock); j++) {
SDistinctDataInfo* pDistDataInfo = taosArrayGet(pInfo->pDistinctDataInfo, j); // distinct meta info
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pDistDataInfo->index); //src
SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, j); // dist
char* val = ((char*)pColInfoData->pData) + pDistDataInfo->bytes * i;
char *start = pResultColInfoData->pData + pDistDataInfo->bytes * pInfo->pRes->info.rows;
memcpy(start, val, pDistDataInfo->bytes);
}
pRes->info.rows += 1;
}
}
}
}
if (pRes->info.rows >= pInfo->threshold) {
break;
}
}
return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
}
SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SDistinctOperatorInfo* pInfo = calloc(1, sizeof(SDistinctOperatorInfo));
pInfo->colIndex = -1;
pInfo->threshold = 10000000; // distinct result threshold
pInfo->outputCapacity = 4096;
pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(pExpr->base.colType), false, HASH_NO_LOCK);
pInfo->totalBytes = 0;
pInfo->buf = NULL;
pInfo->threshold = tsMaxNumOfDistinctResults; // distinct result threshold
pInfo->outputCapacity = 4096;
pInfo->pDistinctDataInfo = taosArrayInit(numOfOutput, sizeof(SDistinctDataInfo));
pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
pInfo->pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) pInfo->outputCapacity);
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));

View File

@ -1102,3 +1102,57 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) {
destroyColumnModel(pDesc->pColumnModel);
tfree(pDesc);
}
void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) {
assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols);
int32_t bytes = pSchema[index].bytes;
int32_t size = bytes + sizeof(int32_t);
char* buf = calloc(1, size * numOfRows);
for(int32_t i = 0; i < numOfRows; ++i) {
char* dest = buf + size * i;
memcpy(dest, ((char*) pCols[index]) + bytes * i, bytes);
*(int32_t*)(dest+bytes) = i;
}
qsort(buf, numOfRows, size, compareFn);
int32_t prevLength = 0;
char* p = NULL;
for(int32_t i = 0; i < numOfCols; ++i) {
int32_t bytes1 = pSchema[i].bytes;
if (i == index) {
for(int32_t j = 0; j < numOfRows; ++j){
char* src = buf + (j * size);
char* dest = ((char*)pCols[i]) + (j * bytes1);
memcpy(dest, src, bytes1);
}
} else {
// make sure memory buffer is enough
if (prevLength < bytes1) {
char *tmp = realloc(p, bytes1 * numOfRows);
assert(tmp);
p = tmp;
prevLength = bytes1;
}
memcpy(p, pCols[i], bytes1 * numOfRows);
for(int32_t j = 0; j < numOfRows; ++j){
char* dest = ((char*)pCols[i]) + bytes1 * j;
int32_t newPos = *(int32_t*)(buf + (j * size) + bytes);
char* src = p + (newPos * bytes1);
memcpy(dest, src, bytes1);
}
}
}
tfree(buf);
tfree(p);
}

View File

@ -237,7 +237,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval,
}
pBucket->elemPerPage = (pBucket->bufPageSize - sizeof(tFilePage))/pBucket->bytes;
pBucket->comparFn = getKeyComparFunc(pBucket->type);
pBucket->comparFn = getKeyComparFunc(pBucket->type, TSDB_ORDER_ASC);
pBucket->hashFunc = getHashFunc(pBucket->type);
if (pBucket->hashFunc == NULL) {

View File

@ -557,10 +557,9 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
int32_t op = 0;
if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query
if (onlyQueryTags(pQueryAttr)) {
op = OP_TagScan;
taosArrayPush(plan, &op);
}
op = OP_TagScan;
taosArrayPush(plan, &op);
if (pQueryAttr->distinct) {
op = OP_Distinct;
taosArrayPush(plan, &op);
@ -651,8 +650,14 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
taosArrayPush(plan, &op);
}
}
// outer query order by support
int32_t orderColId = pQueryAttr->order.orderColId;
if (pQueryAttr->vgId == 0 && orderColId != PRIMARYKEY_TIMESTAMP_COL_INDEX && orderColId != INT32_MIN) {
op = OP_Order;
taosArrayPush(plan, &op);
}
}
if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) {
op = OP_Limit;
@ -693,7 +698,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
}
// fill operator
if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) {
if (pQueryAttr->fillType != TSDB_FILL_NONE && pQueryAttr->interval.interval > 0) {
op = OP_Fill;
taosArrayPush(plan, &op);
}

View File

@ -223,8 +223,11 @@ static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) {
static void shrinkBuffer(STSList* ptsData) {
// shrink tmp buffer size if it consumes too many memory compared to the pre-defined size
if (ptsData->allocSize >= ptsData->threshold * 2) {
ptsData->rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE);
ptsData->allocSize = MEM_BUF_SIZE;
char* rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE);
if(rawBuf) {
ptsData->rawBuf = rawBuf;
ptsData->allocSize = MEM_BUF_SIZE;
}
}
}

View File

@ -22,10 +22,10 @@ extern "C" {
#include "os.h"
#define TSDB_PATTERN_MATCH 0
#define TSDB_PATTERN_NOMATCH 1
#define TSDB_PATTERN_NOWILDCARDMATCH 2
#define TSDB_PATTERN_STRING_MAX_LEN 100
#define TSDB_PATTERN_MATCH 0
#define TSDB_PATTERN_NOMATCH 1
#define TSDB_PATTERN_NOWILDCARDMATCH 2
#define TSDB_PATTERN_STRING_DEFAULT_LEN 100
#define FLT_COMPAR_TOL_FACTOR 4
#define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON))
@ -47,7 +47,7 @@ int WCSPatternMatch(const wchar_t *pattern, const wchar_t *str, size_t size, con
int32_t doCompare(const char* a, const char* b, int32_t type, size_t size);
__compar_fn_t getKeyComparFunc(int32_t keyType);
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order);
__compar_fn_t getComparFunc(int32_t type, int32_t optr);

View File

@ -16,28 +16,22 @@
#include "os.h"
#include "ttype.h"
#include "tcompare.h"
#include "tarray.h"
#include "hash.h"
int32_t compareInt32Val(const void *pLeft, const void *pRight) {
int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
int32_t setCompareBytes1(const void *pLeft, const void *pRight) {
return NULL != taosHashGet((SHashObj *)pRight, pLeft, 1) ? 1 : 0;
}
int32_t compareInt64Val(const void *pLeft, const void *pRight) {
int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
int32_t setCompareBytes2(const void *pLeft, const void *pRight) {
return NULL != taosHashGet((SHashObj *)pRight, pLeft, 2) ? 1 : 0;
}
int32_t compareInt16Val(const void *pLeft, const void *pRight) {
int16_t left = GET_INT16_VAL(pLeft), right = GET_INT16_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
int32_t setCompareBytes4(const void *pLeft, const void *pRight) {
return NULL != taosHashGet((SHashObj *)pRight, pLeft, 4) ? 1 : 0;
}
int32_t setCompareBytes8(const void *pLeft, const void *pRight) {
return NULL != taosHashGet((SHashObj *)pRight, pLeft, 8) ? 1 : 0;
}
int32_t compareInt8Val(const void *pLeft, const void *pRight) {
@ -47,27 +41,76 @@ int32_t compareInt8Val(const void *pLeft, const void *pRight) {
return 0;
}
int32_t compareUint32Val(const void *pLeft, const void *pRight) {
int32_t left = GET_UINT32_VAL(pLeft), right = GET_UINT32_VAL(pRight);
int32_t compareInt8ValDesc(const void *pLeft, const void *pRight) {
return compareInt8Val(pRight, pLeft);
}
int32_t compareInt16Val(const void *pLeft, const void *pRight) {
int16_t left = GET_INT16_VAL(pLeft), right = GET_INT16_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
}
int32_t compareInt16ValDesc(const void* pLeft, const void* pRight) {
return compareInt16Val(pRight, pLeft);
}
int32_t compareInt32Val(const void *pLeft, const void *pRight) {
int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
}
int32_t compareInt32ValDesc(const void* pLeft, const void* pRight) {
return compareInt32Val(pRight, pLeft);
}
int32_t compareInt64Val(const void *pLeft, const void *pRight) {
int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
}
int32_t compareInt64ValDesc(const void* pLeft, const void* pRight) {
return compareInt64Val(pRight, pLeft);
}
int32_t compareUint32Val(const void *pLeft, const void *pRight) {
uint32_t left = GET_UINT32_VAL(pLeft), right = GET_UINT32_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
}
int32_t compareUint32ValDesc(const void* pLeft, const void* pRight) {
return compareUint32Val(pRight, pLeft);
}
int32_t compareUint64Val(const void *pLeft, const void *pRight) {
int64_t left = GET_UINT64_VAL(pLeft), right = GET_UINT64_VAL(pRight);
uint64_t left = GET_UINT64_VAL(pLeft), right = GET_UINT64_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
}
int32_t compareUint64ValDesc(const void* pLeft, const void* pRight) {
return compareUint64Val(pRight, pLeft);
}
int32_t compareUint16Val(const void *pLeft, const void *pRight) {
int16_t left = GET_UINT16_VAL(pLeft), right = GET_UINT16_VAL(pRight);
uint16_t left = GET_UINT16_VAL(pLeft), right = GET_UINT16_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
}
int32_t compareUint16ValDesc(const void* pLeft, const void* pRight) {
return compareUint16Val(pRight, pLeft);
}
int32_t compareUint8Val(const void* pLeft, const void* pRight) {
uint8_t left = GET_UINT8_VAL(pLeft), right = GET_UINT8_VAL(pRight);
if (left > right) return 1;
@ -75,6 +118,10 @@ int32_t compareUint8Val(const void* pLeft, const void* pRight) {
return 0;
}
int32_t compareUint8ValDesc(const void* pLeft, const void* pRight) {
return compareUint8Val(pRight, pLeft);
}
int32_t compareFloatVal(const void *pLeft, const void *pRight) {
float p1 = GET_FLOAT_VAL(pLeft);
float p2 = GET_FLOAT_VAL(pRight);
@ -92,8 +139,12 @@ int32_t compareFloatVal(const void *pLeft, const void *pRight) {
}
if (FLT_EQUAL(p1, p2)) {
return 0;
}
return FLT_GREATER(p1, p2) ? 1: -1;
}
return FLT_GREATER(p1, p2) ? 1: -1;
}
int32_t compareFloatValDesc(const void* pLeft, const void* pRight) {
return compareFloatVal(pRight, pLeft);
}
int32_t compareDoubleVal(const void *pLeft, const void *pRight) {
@ -113,14 +164,18 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight) {
}
if (FLT_EQUAL(p1, p2)) {
return 0;
}
return FLT_GREATER(p1, p2) ? 1: -1;
}
return FLT_GREATER(p1, p2) ? 1: -1;
}
int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) {
return compareDoubleVal(pRight, pLeft);
}
int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) {
int32_t len1 = varDataLen(pLeft);
int32_t len2 = varDataLen(pRight);
if (len1 != len2) {
return len1 > len2? 1:-1;
} else {
@ -133,14 +188,18 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) {
}
}
int32_t compareLenPrefixedStrDesc(const void* pLeft, const void* pRight) {
return compareLenPrefixedStr(pRight, pLeft);
}
int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) {
int32_t len1 = varDataLen(pLeft);
int32_t len2 = varDataLen(pRight);
if (len1 != len2) {
return len1 > len2? 1:-1;
} else {
int32_t ret = wcsncmp(varDataVal(pLeft), varDataVal(pRight), len1/TSDB_NCHAR_SIZE);
int32_t ret = memcmp((wchar_t*) pLeft, (wchar_t*) pRight, len1);
if (ret == 0) {
return 0;
} else {
@ -149,6 +208,10 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) {
}
}
int32_t compareLenPrefixedWStrDesc(const void* pLeft, const void* pRight) {
return compareLenPrefixedWStr(pRight, pLeft);
}
/*
* Compare two strings
* TSDB_MATCH: Match
@ -161,33 +224,33 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) {
*/
int patternMatch(const char *patterStr, const char *str, size_t size, const SPatternCompareInfo *pInfo) {
char c, c1;
int32_t i = 0;
int32_t j = 0;
while ((c = patterStr[i++]) != 0) {
if (c == pInfo->matchAll) { /* Match "*" */
while ((c = patterStr[i++]) == pInfo->matchAll || c == pInfo->matchOne) {
if (c == pInfo->matchOne && (j > size || str[j++] == 0)) {
// empty string, return not match
return TSDB_PATTERN_NOWILDCARDMATCH;
}
}
if (c == 0) {
return TSDB_PATTERN_MATCH; /* "*" at the end of the pattern matches */
}
char next[3] = {toupper(c), tolower(c), 0};
while (1) {
size_t n = strcspn(str, next);
str += n;
if (str[0] == 0 || (n >= size)) {
break;
}
int32_t ret = patternMatch(&patterStr[i], ++str, size - n - 1, pInfo);
if (ret != TSDB_PATTERN_NOMATCH) {
return ret;
@ -195,18 +258,18 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat
}
return TSDB_PATTERN_NOWILDCARDMATCH;
}
c1 = str[j++];
if (j <= size) {
if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) {
continue;
}
}
return TSDB_PATTERN_NOMATCH;
}
return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH;
}
@ -214,13 +277,13 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c
wchar_t c, c1;
wchar_t matchOne = L'_'; // "_"
wchar_t matchAll = L'%'; // "%"
int32_t i = 0;
int32_t j = 0;
while ((c = patterStr[i++]) != 0) {
if (c == matchAll) { /* Match "%" */
while ((c = patterStr[i++]) == matchAll || c == matchOne) {
if (c == matchOne && (j > size || str[j++] == 0)) {
return TSDB_PATTERN_NOWILDCARDMATCH;
@ -229,40 +292,40 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c
if (c == 0) {
return TSDB_PATTERN_MATCH;
}
wchar_t accept[3] = {towupper(c), towlower(c), 0};
while (1) {
size_t n = wcscspn(str, accept);
str += n;
if (str[0] == 0 || (n >= size)) {
break;
}
int32_t ret = WCSPatternMatch(&patterStr[i], ++str, size - n - 1, pInfo);
if (ret != TSDB_PATTERN_NOMATCH) {
return ret;
}
}
return TSDB_PATTERN_NOWILDCARDMATCH;
}
c1 = str[j++];
if (j <= size) {
if (c == c1 || towlower(c) == towlower(c1) || (c == matchOne && c1 != 0)) {
continue;
}
}
return TSDB_PATTERN_NOMATCH;
}
return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH;
}
static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) {
int32_t compareStrPatternComp(const void* pLeft, const void* pRight) {
SPatternCompareInfo pInfo = {'%', '_'};
assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN);
@ -283,34 +346,54 @@ static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) {
int32_t taosArrayCompareString(const void* a, const void* b) {
const char* x = *(const char**)a;
const char* y = *(const char**)b;
return compareLenPrefixedStr(x, y);
}
//static int32_t compareFindStrInArray(const void* pLeft, const void* pRight) {
// const SArray* arr = (const SArray*) pRight;
// return taosArraySearchString(arr, pLeft, taosArrayCompareString, TD_EQ) == NULL ? 0 : 1;
//}
static int32_t compareFindItemInSet(const void *pLeft, const void* pRight) {
return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0;
int32_t compareFindItemInSet(const void *pLeft, const void* pRight) {
return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0;
}
static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
SPatternCompareInfo pInfo = {'%', '_'};
assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE);
wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo);
free(pattern);
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
}
__compar_fn_t getComparFunc(int32_t type, int32_t optr) {
__compar_fn_t comparFn = NULL;
if (optr == TSDB_RELATION_IN && (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR)) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_UTINYINT:
return setCompareBytes1;
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_USMALLINT:
return setCompareBytes2;
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_FLOAT:
return setCompareBytes4;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP:
return setCompareBytes8;
default:
assert(0);
}
}
switch (type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: comparFn = compareInt8Val; break;
@ -328,13 +411,15 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) {
} else { /* normal relational comparFn */
comparFn = compareLenPrefixedStr;
}
break;
}
case TSDB_DATA_TYPE_NCHAR: {
if (optr == TSDB_RELATION_LIKE) {
comparFn = compareWStrPatternComp;
} else if (optr == TSDB_RELATION_IN) {
comparFn = compareFindItemInSet;
} else {
comparFn = compareLenPrefixedWStr;
}
@ -350,57 +435,57 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) {
comparFn = compareInt32Val;
break;
}
return comparFn;
}
__compar_fn_t getKeyComparFunc(int32_t keyType) {
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) {
__compar_fn_t comparFn = NULL;
switch (keyType) {
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_BOOL:
comparFn = compareInt8Val;
comparFn = (order == TSDB_ORDER_ASC)? compareInt8Val:compareInt8ValDesc;
break;
case TSDB_DATA_TYPE_SMALLINT:
comparFn = compareInt16Val;
comparFn = (order == TSDB_ORDER_ASC)? compareInt16Val:compareInt16ValDesc;
break;
case TSDB_DATA_TYPE_INT:
comparFn = compareInt32Val;
comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc;
break;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP:
comparFn = compareInt64Val;
comparFn = (order == TSDB_ORDER_ASC)? compareInt64Val:compareInt64ValDesc;
break;
case TSDB_DATA_TYPE_FLOAT:
comparFn = compareFloatVal;
comparFn = (order == TSDB_ORDER_ASC)? compareFloatVal:compareFloatValDesc;
break;
case TSDB_DATA_TYPE_DOUBLE:
comparFn = compareDoubleVal;
comparFn = (order == TSDB_ORDER_ASC)? compareDoubleVal:compareDoubleValDesc;
break;
case TSDB_DATA_TYPE_UTINYINT:
comparFn = compareUint8Val;
comparFn = (order == TSDB_ORDER_ASC)? compareUint8Val:compareUint8ValDesc;
break;
case TSDB_DATA_TYPE_USMALLINT:
comparFn = compareUint16Val;
comparFn = (order == TSDB_ORDER_ASC)? compareUint16Val:compareUint16ValDesc;
break;
case TSDB_DATA_TYPE_UINT:
comparFn = compareUint32Val;
comparFn = (order == TSDB_ORDER_ASC)? compareUint32Val:compareUint32ValDesc;
break;
case TSDB_DATA_TYPE_UBIGINT:
comparFn = compareUint64Val;
comparFn = (order == TSDB_ORDER_ASC)? compareUint64Val:compareUint64ValDesc;
break;
case TSDB_DATA_TYPE_BINARY:
comparFn = compareLenPrefixedStr;
comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedStr:compareLenPrefixedStrDesc;
break;
case TSDB_DATA_TYPE_NCHAR:
comparFn = compareLenPrefixedWStr;
comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedWStr:compareLenPrefixedWStrDesc;
break;
default:
comparFn = compareInt32Val;
comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc;
break;
}
return comparFn;
}
@ -424,8 +509,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) {
if (t1->len != t2->len) {
return t1->len > t2->len? 1:-1;
}
int32_t ret = wcsncmp((wchar_t*) t1->data, (wchar_t*) t2->data, t2->len/TSDB_NCHAR_SIZE);
int32_t ret = memcmp((wchar_t*) t1, (wchar_t*) t2, t2->len);
if (ret == 0) {
return ret;
}
@ -434,7 +518,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) {
default: { // todo refactor
tstr* t1 = (tstr*) f1;
tstr* t2 = (tstr*) f2;
if (t1->len != t2->len) {
return t1->len > t2->len? 1:-1;
} else {

View File

@ -54,7 +54,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _
pSkipList->keyFn = fn;
pSkipList->seed = rand();
if (comparFn == NULL) {
pSkipList->comparFn = getKeyComparFunc(keyType);
pSkipList->comparFn = getKeyComparFunc(keyType, TSDB_ORDER_ASC);
} else {
pSkipList->comparFn = comparFn;
}

View File

@ -70,7 +70,7 @@ void doubleSkipListTest() {
}
void randKeyTest() {
SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT),
SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT, TSDB_ORDER_ASC),
false, getkey);
int32_t size = 200000;

View File

@ -17,6 +17,7 @@
#define TAOS_RANDOM_FILE_FAIL_TEST
#include "os.h"
#include "taoserror.h"
#include "taosmsg.h"
#include "tchecksum.h"
#include "tfile.h"
#include "twal.h"
@ -114,7 +115,7 @@ void walRemoveAllOldFiles(void *handle) {
#if defined(WAL_CHECKSUM_WHOLE)
static void walUpdateChecksum(SWalHead *pHead) {
pHead->sver = 1;
pHead->sver = 2;
pHead->cksum = 0;
pHead->cksum = taosCalcChecksum(0, (uint8_t *)pHead, sizeof(*pHead) + pHead->len);
}
@ -122,7 +123,7 @@ static void walUpdateChecksum(SWalHead *pHead) {
static int walValidateChecksum(SWalHead *pHead) {
if (pHead->sver == 0) { // for compatible with wal before sver 1
return taosCheckChecksumWhole((uint8_t *)pHead, sizeof(*pHead));
} else if (pHead->sver == 1) {
} else if (pHead->sver >= 1) {
uint32_t cksum = pHead->cksum;
pHead->cksum = 0;
return taosCheckChecksum((uint8_t *)pHead, sizeof(*pHead) + pHead->len, cksum);
@ -281,7 +282,7 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd,
return TSDB_CODE_SUCCESS;
}
if (pHead->sver == 1) {
if (pHead->sver >= 1) {
if (tfRead(tfd, pHead->cont, pHead->len) < pHead->len) {
wError("vgId:%d, read to end of corrupted wal file, offset:%" PRId64, pWal->vgId, pos);
return TSDB_CODE_WAL_FILE_CORRUPTED;
@ -306,7 +307,115 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd,
return TSDB_CODE_WAL_FILE_CORRUPTED;
}
// Add SMemRowType ahead of SDataRow
static void expandSubmitBlk(SSubmitBlk *pDest, SSubmitBlk *pSrc, int32_t *lenExpand) {
// copy the header firstly
memcpy(pDest, pSrc, sizeof(SSubmitBlk));
int32_t nRows = htons(pDest->numOfRows);
int32_t dataLen = htonl(pDest->dataLen);
if ((nRows <= 0) || (dataLen <= 0)) {
return;
}
char *pDestData = pDest->data;
char *pSrcData = pSrc->data;
for (int32_t i = 0; i < nRows; ++i) {
memRowSetType(pDestData, SMEM_ROW_DATA);
memcpy(memRowDataBody(pDestData), pSrcData, dataRowLen(pSrcData));
pDestData = POINTER_SHIFT(pDestData, memRowTLen(pDestData));
pSrcData = POINTER_SHIFT(pSrcData, dataRowLen(pSrcData));
++(*lenExpand);
}
pDest->dataLen = htonl(dataLen + nRows * sizeof(uint8_t));
}
// Check SDataRow by comparing the SDataRow len and SSubmitBlk dataLen
static bool walIsSDataRow(void *pBlkData, int nRows, int32_t dataLen) {
if ((nRows <= 0) || (dataLen <= 0)) {
return true;
}
int32_t len = 0, kvLen = 0;
for (int i = 0; i < nRows; ++i) {
len += dataRowLen(pBlkData);
if (len > dataLen) {
return false;
}
/**
* For SDataRow between version [2.1.5.0 and 2.1.6.X], it would never conflict.
* For SKVRow between version [2.1.5.0 and 2.1.6.X], it may conflict in below scenario
* - with 1st type byte 0x01 and sversion 0x0101(257), thus do further check
*/
if (dataRowLen(pBlkData) == 257) {
SMemRow memRow = pBlkData;
SKVRow kvRow = memRowKvBody(memRow);
int nCols = kvRowNCols(kvRow);
uint16_t calcTsOffset = (uint16_t)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nCols);
uint16_t realTsOffset = (kvRowColIdx(kvRow))->offset;
if (calcTsOffset == realTsOffset) {
kvLen += memRowKvTLen(memRow);
}
}
pBlkData = POINTER_SHIFT(pBlkData, dataRowLen(pBlkData));
}
if (len != dataLen) {
return false;
}
if (kvLen == dataLen) {
return false;
}
return true;
}
// for WAL SMemRow/SDataRow compatibility
static int walSMemRowCheck(SWalHead *pHead) {
if ((pHead->sver < 2) && (pHead->msgType == TSDB_MSG_TYPE_SUBMIT)) {
SSubmitMsg *pMsg = (SSubmitMsg *)pHead->cont;
int32_t numOfBlocks = htonl(pMsg->numOfBlocks);
if (numOfBlocks <= 0) {
return 0;
}
int32_t nTotalRows = 0;
SSubmitBlk *pBlk = (SSubmitBlk *)pMsg->blocks;
for (int32_t i = 0; i < numOfBlocks; ++i) {
int32_t dataLen = htonl(pBlk->dataLen);
int32_t nRows = htons(pBlk->numOfRows);
nTotalRows += nRows;
if (!walIsSDataRow(pBlk->data, nRows, dataLen)) {
return 0;
}
pBlk = (SSubmitBlk *)POINTER_SHIFT(pBlk, sizeof(SSubmitBlk) + dataLen);
}
ASSERT(nTotalRows >= 0);
SWalHead *pWalHead = (SWalHead *)calloc(sizeof(SWalHead) + pHead->len + nTotalRows * sizeof(uint8_t), 1);
if (pWalHead == NULL) {
return -1;
}
memcpy(pWalHead, pHead, sizeof(SWalHead) + sizeof(SSubmitMsg));
SSubmitMsg *pDestMsg = (SSubmitMsg *)pWalHead->cont;
SSubmitBlk *pDestBlks = (SSubmitBlk *)pDestMsg->blocks;
SSubmitBlk *pSrcBlks = (SSubmitBlk *)pMsg->blocks;
int32_t lenExpand = 0;
for (int32_t i = 0; i < numOfBlocks; ++i) {
expandSubmitBlk(pDestBlks, pSrcBlks, &lenExpand);
pDestBlks = POINTER_SHIFT(pDestBlks, htonl(pDestBlks->dataLen) + sizeof(SSubmitBlk));
pSrcBlks = POINTER_SHIFT(pSrcBlks, htonl(pSrcBlks->dataLen) + sizeof(SSubmitBlk));
}
if (lenExpand > 0) {
pDestMsg->header.contLen = htonl(pDestMsg->length) + lenExpand;
pDestMsg->length = htonl(pDestMsg->header.contLen);
pWalHead->len = pWalHead->len + lenExpand;
}
memcpy(pHead, pWalHead, sizeof(SWalHead) + pWalHead->len);
tfree(pWalHead);
}
return 0;
}
static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, char *name, int64_t fileId) {
int32_t size = WAL_MAX_SIZE;
@ -346,7 +455,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
}
#if defined(WAL_CHECKSUM_WHOLE)
if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 1) {
if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 2) {
wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name,
pHead->version, pHead->len, offset);
code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset);
@ -379,7 +488,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
continue;
}
if (pHead->sver == 1 && !walValidateChecksum(pHead)) {
if ((pHead->sver >= 1) && !walValidateChecksum(pHead)) {
wError("vgId:%d, file:%s, wal whole cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name,
pHead->version, pHead->len, offset);
code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset);
@ -431,7 +540,14 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
pWal->version = pHead->version;
//wInfo("writeFp: %ld", offset);
// wInfo("writeFp: %ld", offset);
if (0 != walSMemRowCheck(pHead)) {
wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64,
pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset);
tfClose(tfd);
tfree(buffer);
return TAOS_SYSTEM_ERROR(errno);
}
(*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL);
}

View File

@ -15,7 +15,8 @@ script_dir="$(dirname $(readlink -f $0))"
###### step 3: start build
cd $script_dir
rm -f go.*
go mod init demotest
go build
go mod init demotest > /dev/null 2>&1
go mod tidy > /dev/null 2>&1
go build > /dev/null 2>&1
sleep 1s
./demotest -h $1 -p $2

View File

@ -105,10 +105,10 @@ function runQueryPerfTest {
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT
echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT
echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT
}

View File

@ -76,6 +76,7 @@ python3 ./test.py -f tag_lite/set.py
python3 ./test.py -f tag_lite/smallint.py
python3 ./test.py -f tag_lite/tinyint.py
python3 ./test.py -f tag_lite/timestamp.py
python3 ./test.py -f tag_lite/TestModifyTag.py
#python3 ./test.py -f dbmgmt/database-name-boundary.py
python3 test.py -f dbmgmt/nanoSecondCheck.py
@ -151,6 +152,7 @@ python3 ./test.py -f import_merge/importCSV.py
python3 ./test.py -f import_merge/import_update_0.py
python3 ./test.py -f import_merge/import_update_1.py
python3 ./test.py -f import_merge/import_update_2.py
python3 ./test.py -f update/merge_commit_data.py
#======================p1-end===============
#======================p2-start===============
# tools
@ -179,7 +181,7 @@ python3 ./test.py -f update/allow_update-0.py
python3 ./test.py -f update/append_commit_data.py
python3 ./test.py -f update/append_commit_last-0.py
python3 ./test.py -f update/append_commit_last.py
python3 ./test.py -f update/merge_commit_data.py
python3 ./test.py -f update/merge_commit_data2.py
python3 ./test.py -f update/merge_commit_data2_update0.py

View File

@ -26,18 +26,70 @@ class TDTestCase:
self.rowNum = 10
self.ts = 1537146000000
def run(self):
tdSql.prepare()
tdSql.execute("create table t(ts timestamp, k int)")
tdSql.execute("insert into t values('2021-1-1 1:1:1', 12);")
tdSql.query("select interp(*) from t where ts='2021-1-1 1:1:1'")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 12)
tdSql.execute("create table ap1 (ts timestamp, pav float)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.119', 2.90799)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.317', 3.07399)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.517', 0.58117)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.717', 0.16150)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.918', 1.47885)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:56.569', 1.76472)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.381', 2.13722)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.574', 4.10256)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.776', 3.55345)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.976', 1.46624)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.187', 0.17943)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.372', 2.04101)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.573', 3.20924)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.768', 1.71807)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.964', 4.60900)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.155', 4.33907)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.359', 0.76940)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.553', 0.06458)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.742', 4.59857)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.938', 1.55081)")
tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV)")
tdSql.checkRows(0)
tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT)")
tdSql.checkRows(0)
tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR)")
tdSql.checkRows(0)
tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)")
tdSql.checkRows(6)
tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT)")
tdSql.checkRows(6)
tdSql.checkData(0,1,2.90799)
tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV)")
tdSql.checkRows(7)
tdSql.checkData(1,1,1.47885)
tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)")
tdSql.checkRows(7)
# check desc order
tdSql.error("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV) order by ts desc")
tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT) order by ts desc")
tdSql.checkRows(0)
tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR) order by ts desc")
tdSql.checkRows(0)
tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc")
tdSql.checkRows(6)
tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT) order by ts desc")
tdSql.checkRows(6)
tdSql.checkData(0,1,4.60900)
tdSql.error("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV) order by ts desc")
tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc")
tdSql.checkRows(7)
# check exception
tdSql.error("select interp(*) from ap1")
tdSql.error("select interp(*) from ap1 FILL(NEXT)")
tdSql.error("select interp(*) from ap1 ts >= '2021-07-25 02:19:54' FILL(NEXT)")
tdSql.error("select interp(*) from ap1 ts <= '2021-07-25 02:19:54' FILL(NEXT)")
tdSql.error("select interp(*) from ap1 where ts >'2021-07-25 02:19:59.938' and ts < now interval(1s) fill(next)")
tdSql.error("select interp(*) from t where ts >'2021-1-1 1:1:1' and ts < now interval(1s) fill(next)")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -13,6 +13,8 @@
import sys
import subprocess
import random
import math
from util.log import *
from util.cases import *
@ -56,7 +58,7 @@ class TDTestCase:
def td3690(self):
tdLog.printNoPrefix("==========TD-3690==========")
tdSql.query("show variables")
tdSql.checkData(51, 1, 864000)
tdSql.checkData(53, 1, 864000)
def td4082(self):
tdLog.printNoPrefix("==========TD-4082==========")
@ -106,6 +108,9 @@ class TDTestCase:
tdSql.execute("drop database if exists db1")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("create database if not exists db1 keep 3650")
tdSql.execute("create database if not exists new keep 3650")
tdSql.execute("create database if not exists private keep 3650")
tdSql.execute("create database if not exists db2 keep 3650")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)")
@ -122,6 +127,14 @@ class TDTestCase:
# p1 不进入指定数据库
tdSql.query("show create database db")
tdSql.checkRows(1)
tdSql.query("show create database db1")
tdSql.checkRows(1)
tdSql.query("show create database db2")
tdSql.checkRows(1)
tdSql.query("show create database new")
tdSql.checkRows(1)
tdSql.query("show create database private")
tdSql.checkRows(1)
tdSql.error("show create database ")
tdSql.error("show create databases db ")
tdSql.error("show create database db.stb1")
@ -255,7 +268,7 @@ class TDTestCase:
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db")
tdSql.query("show variables")
tdSql.checkData(36, 1, 3650)
tdSql.checkData(38, 1, 3650)
tdSql.query("show databases")
tdSql.checkData(0,7,"3650,3650,3650")
@ -283,7 +296,7 @@ class TDTestCase:
tdSql.query("show databases")
tdSql.checkData(0, 7, "3650,3650,3650")
tdSql.query("show variables")
tdSql.checkData(36, 1, 3650)
tdSql.checkData(38, 1, 3650)
tdSql.execute("alter database db1 keep 365")
tdSql.execute("drop database if exists db1")
@ -340,17 +353,552 @@ class TDTestCase:
pass
def td4889(self):
tdLog.printNoPrefix("==========TD-4889==========")
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("use db")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
for i in range(1000):
tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
for j in range(100):
tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})")
tdSql.query("show vgroups")
index = tdSql.getData(0,0)
tdSql.checkData(0, 6, 0)
tdSql.execute(f"compact vnodes in({index})")
for i in range(3):
tdSql.query("show vgroups")
if tdSql.getData(0, 6) == 1:
tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1")
break
if i == 3:
tdLog.exit("compacting not occured")
time.sleep(0.5)
pass
def td5168insert(self):
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("use db")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)")
tdSql.execute("create table db.t1 using db.stb1 tags(1)")
for i in range(5):
c1 = 1001.11 + i*0.1
c2 = 1001.11 + i*0.1 + 1*0.01
c3 = 1001.11 + i*0.1 + 2*0.01
c4 = 1001.11 + i*0.1 + 3*0.01
tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})")
# tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)")
# tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)")
# tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)")
# tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)")
# tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)")
# for i in range(1000000):
for i in range(1000000):
random1 = random.uniform(1000,1001)
random2 = random.uniform(1000,1001)
random3 = random.uniform(1000,1001)
random4 = random.uniform(1000,1001)
tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})")
pass
def td5168(self):
tdLog.printNoPrefix("==========TD-5168==========")
# 插入小范围内的随机数
tdLog.printNoPrefix("=====step0: 默认情况下插入数据========")
self.td5168insert()
# 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配
for i in range(5):
tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
# c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
for j in range(4):
locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
# tdSql.query("select * from db.t1 limit 100,1")
# f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
#
# tdSql.query("select * from db.t1 limit 1000,1")
# f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
#
# tdSql.query("select * from db.t1 limit 10000,1")
# f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
#
# tdSql.query("select * from db.t1 limit 100000,1")
# f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
#
# tdSql.query("select * from db.t1 limit 1000000,1")
# f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
# 关闭服务并获取未开启压缩情况下的数据容量
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
tdDnodes.stop(index)
cfgdir = self.getCfgDir()
cfgfile = self.getCfgFile()
lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'"
data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'"
dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
tdLog.printNoPrefix(f"close the lossyColumnsdata size is: {dsize_init};the lossyColumns line is: {lossy_args}")
###################################################
float_lossy = "float"
double_lossy = "double"
float_double_lossy = "float|double"
no_loosy = ""
double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}"
_ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8")
lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} "
lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} "
lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} "
lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} "
###################################################
# 开启有损压缩参数float并启动服务插入数据
tdLog.printNoPrefix("=====step1: lossyColumns设置为float========")
lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8")
tdDnodes.start(index)
self.td5168insert()
# 查询前面所述5个时间数据并与基准数值进行比较
for i in range(5):
tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
# c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
for j in range(4):
# locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
# print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
# 关闭服务并获取压缩参数为float情况下的数据容量
tdDnodes.stop(index)
dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
tdLog.printNoPrefix(f"open the lossyColumns data size is{dsize_float};the lossyColumns line is: {lossy_args}")
# 修改有损压缩参数double并启动服务
tdLog.printNoPrefix("=====step2: lossyColumns设置为double========")
lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8")
tdDnodes.start(index)
self.td5168insert()
# 查询前面所述5个时间数据并与基准数值进行比较
for i in range(5):
tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
for j in range(4):
tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
# 关闭服务并获取压缩参数为double情况下的数据容量
tdDnodes.stop(index)
dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
tdLog.printNoPrefix(f"open the lossyColumns, data size is{dsize_double};the lossyColumns line is: {lossy_args}")
# 修改有损压缩,参数 float&&double ,并启动服务
tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========")
lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8")
tdDnodes.start(index)
self.td5168insert()
# 查询前面所述5个时间数据并与基准数值进行比较
for i in range(5):
tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
for j in range(4):
tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
# 关闭服务并获取压缩参数为 float&&double 情况下的数据容量
tdDnodes.stop(index)
dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
tdLog.printNoPrefix(f"open the lossyColumns data size is{dsize_float_double};the lossyColumns line is: {lossy_args}")
if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) :
tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
tdLog.exit("压缩未生效")
else:
tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
tdLog.printNoPrefix("压缩生效")
pass
def td5433(self):
tdLog.printNoPrefix("==========TD-5433==========")
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("use db")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)")
tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))")
numtab=2000000
for i in range(numtab):
sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})"
tdSql.execute(sql)
tdSql.execute(f"insert into db.t{i} values (now-10d, {i})")
tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})")
tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})")
tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')")
tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')")
tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')")
tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')")
tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')")
tdSql.query("select distinct t1 from stb1 where t1 != '150'")
tdSql.checkRows(numtab-1)
tdSql.query("select distinct t1 from stb1 where t1 != 150")
tdSql.checkRows(numtab-1)
tdSql.query("select distinct t1 from stb1 where t1 = 150")
tdSql.checkRows(1)
tdSql.query("select distinct t1 from stb1 where t1 = '150'")
tdSql.checkRows(1)
tdSql.query("select distinct t1 from stb1")
tdSql.checkRows(numtab)
tdSql.query("select distinct t0 from stb1 where t0 != '2'")
tdSql.checkRows(127)
tdSql.query("select distinct t0 from stb1 where t0 != 2")
tdSql.checkRows(127)
tdSql.query("select distinct t0 from stb1 where t0 = 2")
tdSql.checkRows(1)
tdSql.query("select distinct t0 from stb1 where t0 = '2'")
tdSql.checkRows(1)
tdSql.query("select distinct t0 from stb1")
tdSql.checkRows(128)
tdSql.query("select distinct t1 from stb2 where t1 != '200'")
tdSql.checkRows(4)
tdSql.query("select distinct t1 from stb2 where t1 != 200")
tdSql.checkRows(4)
tdSql.query("select distinct t1 from stb2 where t1 = 200")
tdSql.checkRows(1)
tdSql.query("select distinct t1 from stb2 where t1 = '200'")
tdSql.checkRows(1)
tdSql.query("select distinct t1 from stb2")
tdSql.checkRows(5)
tdSql.query("select distinct t0 from stb2 where t0 != '2'")
tdSql.checkRows(4)
tdSql.query("select distinct t0 from stb2 where t0 != 2")
tdSql.checkRows(4)
tdSql.query("select distinct t0 from stb2 where t0 = 2")
tdSql.checkRows(1)
tdSql.query("select distinct t0 from stb2 where t0 = '2'")
tdSql.checkRows(1)
tdSql.query("select distinct t0 from stb2")
tdSql.checkRows(5)
pass
def td5798(self):
tdLog.printNoPrefix("==========TD-5798 + TD-5810==========")
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("use db")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)")
tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)")
maxRemainderNum=7
tbnum=101
for i in range(tbnum-1):
sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})"
tdSql.execute(sql)
tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})")
tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})")
tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})")
tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)")
tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})")
tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')")
tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')")
tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')")
tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)")
tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)")
tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)")
tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)")
tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)")
tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)")
tdSql.execute(f"insert into db.t100num (ts )values (now-7d)")
tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)")
tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)")
tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)")
tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)")
#========== TD-5810 suport distinct multi-data-coloumn ==========
tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}")
tdSql.checkRows(tbnum)
tdSql.query(f"select distinct c2 from stb1")
tdSql.checkRows(4)
tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}")
tdSql.checkRows(tbnum*3)
tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}")
tdSql.checkRows(tbnum)
tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3")
tdSql.checkRows(3)
tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}")
tdSql.checkRows(2)
tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}")
tdSql.checkRows(1)
tdSql.query(f"select distinct c2 from t1")
tdSql.checkRows(4)
tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}")
tdSql.checkRows(3)
tdSql.query(f"select distinct c1,c1 from t1 ")
tdSql.checkRows(2)
tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}")
tdSql.checkRows(1)
tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3")
tdSql.checkRows(3)
tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2")
tdSql.checkRows(1)
tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ")
tdSql.checkRows(3)
tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2")
tdSql.checkRows(2)
tdSql.error("select distinct c5 from stb1")
tdSql.error("select distinct c5 from t1")
tdSql.error("select distinct c1 from db.*")
tdSql.error("select c2, distinct c1 from stb1")
tdSql.error("select c2, distinct c1 from t1")
tdSql.error("select distinct c2 from ")
tdSql.error("distinct c2 from stb1")
tdSql.error("distinct c2 from t1")
tdSql.error("select distinct c1, c2, c3 from stb1")
tdSql.error("select distinct c1, c2, c3 from t1")
tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1")
tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1")
tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1")
tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}")
tdSql.checkRows(tbnum*3)
tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}")
tdSql.checkRows(3)
tdSql.query("select distinct c1, c2 from stb1 order by ts")
tdSql.checkRows(tbnum*3+1)
tdSql.query("select distinct c1, c2 from t1 order by ts")
tdSql.checkRows(4)
tdSql.error("select distinct c1, ts from stb1 group by c2")
tdSql.error("select distinct c1, ts from t1 group by c2")
tdSql.error("select distinct c1, max(c2) from stb1 ")
tdSql.error("select distinct c1, max(c2) from t1 ")
tdSql.error("select max(c2), distinct c1 from stb1 ")
tdSql.error("select max(c2), distinct c1 from t1 ")
tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0")
tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0")
tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ")
tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ")
tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)")
tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)")
tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)")
tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1")
tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1")
tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ")
tdSql.checkRows(6)
tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)")
tdSql.checkRows(15)
tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)")
tdSql.checkRows(3)
tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ")
tdSql.checkRows(3)
tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ")
tdSql.checkRows(3)
tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ")
tdSql.checkRows(4)
tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4")
tdSql.checkRows(3)
tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3")
tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)")
# tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)")
tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)")
# tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)")
# tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)")
tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )")
tdSql.checkRows(1)
tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )")
tdSql.checkRows(1)
tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ")
# tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)")
# tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)")
#========== TD-5798 suport distinct multi-tags-coloumn ==========
tdSql.query("select distinct t1 from stb1")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t0, t1 from stb1")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t1, t0 from stb1")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t1, t2 from stb1")
tdSql.checkRows(maxRemainderNum*2+1)
tdSql.query("select distinct t0, t1, t2 from stb1")
tdSql.checkRows(maxRemainderNum*2+1)
tdSql.query("select distinct t0 t1, t1 t2 from stb1")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t0, t0, t0 from stb1")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t0, t1 from t1")
tdSql.checkRows(1)
tdSql.query("select distinct t0, t1 from t100num")
tdSql.checkRows(1)
tdSql.query("select distinct t3 from stb2")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t2, t3 from stb2")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t3, t2 from stb2")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t4, t2 from stb2")
tdSql.checkRows(maxRemainderNum*3+1)
tdSql.query("select distinct t2, t3, t4 from stb2")
tdSql.checkRows(maxRemainderNum*3+1)
tdSql.query("select distinct t2 t1, t3 t2 from stb2")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t3, t3, t3 from stb2")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t2, t3 from t01")
tdSql.checkRows(1)
tdSql.query("select distinct t3, t4 from t0100num")
tdSql.checkRows(1)
########## should be error #########
tdSql.error("select distinct from stb1")
tdSql.error("select distinct t3 from stb1")
tdSql.error("select distinct t1 from db.*")
tdSql.error("select distinct t2 from ")
tdSql.error("distinct t2 from stb1")
tdSql.error("select distinct stb1")
tdSql.error("select distinct t0, t1, t2, t3 from stb1")
tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1")
tdSql.error("select dist t0 from stb1")
tdSql.error("select distinct stb2.t2, stb2.t3 from stb1")
tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1")
tdSql.error("select distinct t0, t1 from t1 where t0 < 7")
########## add where condition ##########
tdSql.query("select distinct t0, t1 from stb1 where t1 > 3")
tdSql.checkRows(3)
tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2")
tdSql.checkRows(2)
tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2")
tdSql.checkRows(1)
tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2")
tdSql.checkRows(3)
tdSql.error("select distinct t0, t1 from stb1 where c1 > 2")
tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5")
tdSql.checkRows(1)
tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4")
tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2")
tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0")
tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ")
tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)")
tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2")
tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2")
tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)")
tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)")
tdSql.checkRows(5)
tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ")
tdSql.checkRows(4)
tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ")
tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
tdSql.checkRows(1)
tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3")
tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)")
tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)")
tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
tdSql.checkRows(1)
tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3")
tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3")
tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ")
pass
def td5935(self):
tdLog.printNoPrefix("==========TD-5935==========")
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("use db")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)")
nowtime=int(round((time.time()*1000)))
for i in range(100):
sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})"
tdSql.execute(sql)
for j in range(1000):
tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})")
tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ")
########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ##########
stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10"
tdSql.query(stddevAndIntervalSql)
tdSql.checkRows(10)
########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ##########
fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10"
tdSql.query(fillsql)
fillResult=False
if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None):
fillResult=True
if fillResult:
tdLog.success(f"sql is :{fillsql}, fill(next) is correct")
else:
tdLog.exit("fill(next) is wrong")
pass
def run(self):
# master branch
# self.td3690()
# self.td4082()
# self.td4288()
self.td4724()
# self.td4724()
self.td5798()
# self.td5935()
# develop branch
# self.td4097()
# self.td4889()
# self.td5168()
# self.td5433()
def stop(self):
tdSql.close()

View File

@ -0,0 +1,51 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
print("==============step1")
tdSql.execute(
"create stable t6 (ts timestamp,val int,flow nchar(36)) tags(dev nchar(36),dev1 nchar(36),dev2 nchar(36))")
tdSql.execute("insert into t6004 using t6 (dev,dev1,dev2) tags ('b50c79bc-b102-48e6-bda1-4212263e46d0','b50c79bc-b102-48e6-bda1-4212263e46d0', 'b50c79bc-b102-48e6-bda1-4212263e46d0') values(now,1,'b50c79bc-b102-48e6-bda1-4212263e46d0')")
print("==============step2")
tdSql.query("select * from t6 where dev='b50c79bc-b102-48e6-bda1-4212263e46d0'")
tdSql.checkRows(1)
tdSql.query("select * from t6 where dev1='b50c79bc-b102-48e6-bda1-4212263e46d0'")
tdSql.checkRows(1)
tdSql.query("select * from t6 where dev2='b50c79bc-b102-48e6-bda1-4212263e46d0'")
tdSql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -65,6 +65,10 @@ class TDTestCase:
# TD-2208
tdSql.error("select diff(tagtype),top(tagtype,1) from dev_001")
# TD-6006
tdSql.error("select * from dev_001 where 'name' is not null")
tdSql.error("select * from dev_001 where \"name\" = 'first'")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -0,0 +1,125 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00"
self.numberOfTables = 10
self.numberOfRecords = 100
def checkCommunity(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
return False
else:
return True
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosdump" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosdump not found!")
else:
tdLog.info("taosdump found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
# basic test for alter tags
tdSql.execute("create database tagdb ")
tdSql.execute(" use tagdb")
tdSql.execute("create table st (ts timestamp , a int) tags (tg1 binary(20), tg2 binary(20), tg3 binary(20))")
tdSql.execute("insert into t using st (tg3, tg2, tg1) tags ('tg3', 'tg2', 'tg1') values (now, 1)")
tdSql.execute("alter table t set tag tg1='newtg1'")
res = tdSql.getResult("select tg1,tg2,tg3 from t")
if res == [('newtg1', 'tg2', 'tg3')]:
tdLog.info(" alter tag check has pass!")
else:
tdLog.info(" alter tag failed , please check !")
tdSql.error("alter stable st modify tag tg2 binary(2)")
tdSql.execute("alter stable st modify tag tg2 binary(30) ")
tdSql.execute("alter table t set tag tg2 = 'abcdefghijklmnopqrstuvwxyz1234'")
res = tdSql.getResult("select tg1,tg2,tg3 from t")
if res == [('newtg1', 'abcdefghijklmnopqrstuvwxyz1234', 'tg3')]:
tdLog.info(" alter tag check has pass!")
else:
tdLog.info(" alter tag failed , please check !")
# test boundary about tags
tdSql.execute("create stable stb1 (ts timestamp , a int) tags (tg1 binary(16374))")
tdSql.error("create stable stb1 (ts timestamp , a int) tags (tg1 binary(16375))")
bound_sql = "create stable stb2 (ts timestamp , a int) tags (tg1 binary(10),"
for i in range(127):
bound_sql+="tag"+str(i)+" binary(10),"
sql1 = bound_sql[:-1]+")"
tdSql.execute(sql1)
sql2 = bound_sql[:-1]+"tag127 binary(10))"
tdSql.error(sql2)
tdSql.execute("create stable stb3 (ts timestamp , a int) tags (tg1 nchar(4093))")
tdSql.error("create stable stb3 (ts timestamp , a int) tags (tg1 nchar(4094))")
tdSql.execute("create stable stb4 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(8))")
tdSql.error("create stable stb4 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(9))")
tdSql.execute("create stable stb5 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(4),tag3 binary(2))")
tdSql.error("create stable stb5 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(4),tag3 binary(3))")
tdSql.execute("create table stt (ts timestamp , a binary(100)) tags (tg1 binary(20), tg2 binary(20), tg3 binary(20))")
tdSql.execute("insert into tt using stt (tg3, tg2, tg1) tags ('tg3', 'tg2', 'tg1') values (now, 1)")
tags = "t"*16337
sql3 = "alter table tt set tag tg1=" +"'"+tags+"'"
tdSql.error(sql3)
tdSql.execute("alter stable stt modify tag tg1 binary(16337)")
tdSql.execute(sql3)
res = tdSql.getResult("select tg1,tg2,tg3 from tt")
if res == [(tags, 'tg2', 'tg3')]:
tdLog.info(" alter tag check has pass!")
else:
tdLog.info(" alter tag failed , please check !")
os.system("rm -rf ./tag_lite/TestModifyTag.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -87,7 +87,7 @@ if __name__ == "__main__":
else:
toBeKilled = "valgrind.bin"
killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP > /dev/null 2>&1" % toBeKilled
killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(psCmd, shell=True)
@ -110,7 +110,6 @@ if __name__ == "__main__":
time.sleep(2)
tdLog.info('stop All dnodes')
sys.exit(0)
tdDnodes.init(deployPath)
tdDnodes.setTestCluster(testCluster)

View File

@ -145,26 +145,26 @@ class taosdemoPerformace:
binPath = buildPath + "/build/bin/"
os.system(
"%staosdemo -f %s > taosdemoperf.txt 2>&1" %
"%staosdemo -f %s > /dev/null 2>&1" %
(binPath, self.generateJson()))
self.createTableTime = self.getCMDOutput(
"grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'")
"grep 'Spent' insert_res.txt | awk 'NR==1{print $2}'")
self.insertRecordsTime = self.getCMDOutput(
"grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'")
"grep 'Spent' insert_res.txt | awk 'NR==2{print $2}'")
self.recordsPerSecond = self.getCMDOutput(
"grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'")
"grep 'Spent' insert_res.txt | awk 'NR==2{print $16}'")
self.commitID = self.getCMDOutput("git rev-parse --short HEAD")
delay = self.getCMDOutput(
"grep 'delay' taosdemoperf.txt | awk '{print $4}'")
"grep 'delay' insert_res.txt | awk '{print $4}'")
self.avgDelay = delay[:-4]
delay = self.getCMDOutput(
"grep 'delay' taosdemoperf.txt | awk '{print $6}'")
"grep 'delay' insert_res.txt | awk '{print $6}'")
self.maxDelay = delay[:-4]
delay = self.getCMDOutput(
"grep 'delay' taosdemoperf.txt | awk '{print $8}'")
"grep 'delay' insert_res.txt | awk '{print $8}'")
self.minDelay = delay[:-3]
os.system("[ -f taosdemoperf.txt ] && rm taosdemoperf.txt")
os.system("[ -f insert_res.txt ] && rm insert_res.txt")
def createTablesAndStoreData(self):
cursor = self.conn2.cursor()
@ -185,7 +185,7 @@ class taosdemoPerformace:
cursor.close()
cursor1 = self.conn.cursor()
# cursor1.execute("drop database if exists %s" % self.insertDB)
cursor1.execute("drop database if exists %s" % self.insertDB)
cursor1.close()
if __name__ == '__main__':

View File

@ -436,7 +436,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
killCmd = "kill -9 %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
@ -445,7 +445,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
killCmd = "kill -9 %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
@ -556,7 +556,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
killCmd = "kill -9 %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(

View File

@ -84,6 +84,10 @@ if $rows != 1 then
return -1
endi
print ============== TD-5998
sql_error select _block_dist() from (select * from $nt)
sql_error select _block_dist() from (select * from $mt)
print =============== clear
sql drop database $db
sql show databases
@ -91,4 +95,4 @@ if $rows != 0 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1149,9 +1149,11 @@ endi
sql select derivative(test_column_alias_name, 1s, 0) from (select avg(k) test_column_alias_name from t1 interval(1s));
sql create table smeters (ts timestamp, current float, voltage int);
sql insert into smeters values ('2021-08-08 10:10:10', 10, 1);
sql insert into smeters values ('2021-08-08 10:10:12', 10, 2);
sql create table smeters (ts timestamp, current float, voltage int) tags (t1 int);
sql create table smeter1 using smeters tags (1);
sql insert into smeter1 values ('2021-08-08 10:10:10', 10, 2);
sql insert into smeter1 values ('2021-08-08 10:10:12', 10, 2);
sql insert into smeter1 values ('2021-08-08 10:10:14', 20, 1);
sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10 interval(1000a);
if $rows != 2 then
@ -1160,9 +1162,21 @@ endi
if $data00 != @21-08-08 10:10:10.000@ then
return -1
endi
if $data01 != 0.000000000 then
return -1
endi
if $data10 != @21-08-08 10:10:12.000@ then
return -1
endi
if $data11 != 0.000000000 then
return -1
endi
sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10;
if $rows != 1 then
return -1
endi
if $data00 != 0.000000000 then
return -1
endi

View File

@ -930,8 +930,254 @@ if $data44 != @18-11-25 19:06:00.000@ then
endi
sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear);
if $rows != 8 then
return -1
endi
if $data00 != @18-09-17 20:35:00.000@ then
return -1
endi
if $data01 != NULL then
return -1
endi
if $data10 != @18-09-17 20:36:00.000@ then
return -1
endi
if $data11 != NULL then
return -1
endi
if $data20 != @18-09-17 20:37:00.000@ then
return -1
endi
if $data21 != NULL then
return -1
endi
if $data30 != @18-09-17 20:38:00.000@ then
return -1
endi
if $data31 != NULL then
return -1
endi
if $data40 != @18-09-17 20:39:00.000@ then
return -1
endi
if $data41 != NULL then
return -1
endi
if $data50 != @18-09-17 20:40:00.000@ then
return -1
endi
if $data51 != 0 then
return -1
endi
if $data60 != @18-09-17 20:41:00.000@ then
return -1
endi
if $data61 != NULL then
return -1
endi
if $data70 != @18-09-17 20:42:00.000@ then
return -1
endi
if $data71 != NULL then
return -1
endi
sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear) order by ts desc;
if $rows != 8 then
return -1
endi
if $data00 != @18-09-17 20:42:00.000@ then
return -1
endi
if $data01 != NULL then
return -1
endi
if $data10 != @18-09-17 20:41:00.000@ then
return -1
endi
if $data11 != NULL then
return -1
endi
if $data20 != @18-09-17 20:40:00.000@ then
return -1
endi
if $data21 != 0 then
return -1
endi
if $data30 != @18-09-17 20:39:00.000@ then
return -1
endi
if $data31 != NULL then
return -1
endi
if $data40 != @18-09-17 20:38:00.000@ then
return -1
endi
if $data41 != NULL then
return -1
endi
if $data50 != @18-09-17 20:37:00.000@ then
return -1
endi
if $data51 != NULL then
return -1
endi
if $data60 != @18-09-17 20:36:00.000@ then
return -1
endi
if $data61 != NULL then
return -1
endi
if $data70 != @18-09-17 20:35:00.000@ then
return -1
endi
if $data71 != NULL then
return -1
endi
sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(2m) fill(linear) order by ts;
if $rows != 9 then
return -1
endi
if $data00 != @18-09-17 20:34:00.000@ then
return -1
endi
if $data01 != NULL then
return -1
endi
if $data10 != @18-09-17 20:36:00.000@ then
return -1
endi
if $data11 != NULL then
return -1
endi
if $data20 != @18-09-17 20:38:00.000@ then
return -1
endi
if $data21 != NULL then
return -1
endi
if $data30 != @18-09-17 20:40:00.000@ then
return -1
endi
if $data31 != 0.00000 then
return -1
endi
if $data40 != @18-09-17 20:42:00.000@ then
return -1
endi
if $data41 != 0.20000 then
return -1
endi
if $data50 != @18-09-17 20:44:00.000@ then
return -1
endi
if $data51 != 0.40000 then
return -1
endi
if $data60 != @18-09-17 20:46:00.000@ then
return -1
endi
if $data61 != 0.60000 then
return -1
endi
if $data70 != @18-09-17 20:48:00.000@ then
return -1
endi
if $data71 != 0.80000 then
return -1
endi
if $data80 != @18-09-17 20:50:00.000@ then
return -1
endi
if $data81 != 1.00000 then
return -1
endi
sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts;
if $rows != 6 then
return -1
endi
if $data00 != @18-09-17 20:33:00.000@ then
return -1
endi
if $data01 != NULL then
return -1
endi
if $data10 != @18-09-17 20:36:00.000@ then
return -1
endi
if $data11 != NULL then
return -1
endi
if $data20 != @18-09-17 20:39:00.000@ then
return -1
endi
if $data21 != NULL then
return -1
endi
if $data30 != @18-09-17 20:42:00.000@ then
return -1
endi
if $data31 != 0.20000 then
return -1
endi
if $data40 != @18-09-17 20:45:00.000@ then
return -1
endi
if $data41 != 0.50000 then
return -1
endi
if $data50 != @18-09-17 20:48:00.000@ then
return -1
endi
if $data51 != 0.80000 then
return -1
endi
sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts desc;
if $rows != 6 then
return -1
endi
if $data00 != @18-09-17 20:48:00.000@ then
return -1
endi
if $data01 != 0.80000 then
return -1
endi
if $data10 != @18-09-17 20:45:00.000@ then
return -1
endi
if $data11 != 0.50000 then
return -1
endi
if $data20 != @18-09-17 20:42:00.000@ then
return -1
endi
if $data21 != 0.20000 then
return -1
endi
if $data30 != @18-09-17 20:39:00.000@ then
return -1
endi
if $data31 != NULL then
return -1
endi
if $data40 != @18-09-17 20:36:00.000@ then
return -1
endi
if $data41 != NULL then
return -1
endi
if $data50 != @18-09-17 20:33:00.000@ then
return -1
endi
if $data51 != NULL then
return -1
endi

View File

@ -75,4 +75,9 @@ sleep 100
run general/parser/limit_tb.sim
run general/parser/limit_stb.sim
print ========> TD-6017
sql use $db
sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 order by ts desc limit 3 offset 1)
sql select * from (select ts, top(c1, 5) from $stb where ts >= $ts0 order by ts desc limit 3 offset 1)
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -355,6 +355,10 @@ sql select top(c1, 1) from $tb where ts >= $ts0 and ts <= $tsu limit 5 offset 1
if $rows != 0 then
return -1
endi
print ========> TD-6017
sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1)
sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
print select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
print $data00 $data01

View File

@ -90,6 +90,14 @@ cd ../../../debug; make
./test.sh -f general/parser/function.sim
./test.sh -f unique/cluster/vgroup100.sim
./test.sh -f unique/http/admin.sim
./test.sh -f unique/http/opentsdb.sim
./test.sh -f unique/import/replica2.sim
./test.sh -f unique/import/replica3.sim
./test.sh -f general/alter/cached_schema_after_alter.sim
#======================b1-end===============
#======================b2-start===============
@ -198,13 +206,7 @@ cd ../../../debug; make
#======================b3-end===============
#======================b4-start===============
./test.sh -f unique/http/admin.sim
./test.sh -f unique/http/opentsdb.sim
./test.sh -f unique/import/replica2.sim
./test.sh -f unique/import/replica3.sim
./test.sh -f general/alter/cached_schema_after_alter.sim
./test.sh -f general/alter/count.sim
./test.sh -f general/alter/dnode.sim
./test.sh -f general/alter/import.sim

View File

@ -12,7 +12,7 @@ IN_TDINTERNAL="community"
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail '
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
@ -24,9 +24,9 @@ function stopTaosd {
function dohavecore(){
corefile=`find $corepath -mmin 1`
core_file=`echo $corefile|cut -d " " -f2`
proc=`echo $corefile|cut -d "_" -f3`
if [ -n "$corefile" ];then
core_file=`echo $corefile|cut -d " " -f2`
proc=`file $core_file|awk -F "execfn:" '/execfn:/{print $2}'|tr -d \' |awk '{print $1}'|tr -d \,`
echo 'taosd or taos has generated core'
rm case.log
if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then
@ -46,7 +46,7 @@ function dohavecore(){
fi
fi
if [[ $1 == 1 ]];then
echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit
echo '\n'|gdb $proc $core_file -ex "bt 10" -ex quit
exit 8
fi
fi
@ -179,6 +179,9 @@ function runPyCaseOneByOnefq() {
start_time=`date +%s`
date +%F\ %T | tee -a pytest-out.log
echo -n $case
if [[ $1 =~ full ]] ; then
line=$line" -s"
fi
$line > case.log 2>&1 && \
echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \
echo -e "${RED} failed${NC}" | tee -a pytest-out.log