Merge branch 'develop' of https://github.com/taosdata/TDengine into dev

This commit is contained in:
yihaoDeng 2021-04-09 13:19:51 +08:00
commit 053fa298b3
48 changed files with 6944 additions and 1392 deletions

39
Jenkinsfile vendored
View File

@ -43,16 +43,48 @@ def pre_test(){
killall -9 gdb || echo "no gdb running"
cd ${WKC}
git reset --hard HEAD~10 >/dev/null
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WKC}
git checkout master
'''
}
else {
sh '''
cd ${WKC}
git checkout develop
'''
}
}
sh'''
cd ${WKC}
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
git clean -dfx
cd ${WK}
git reset --hard HEAD~10
git checkout develop
git pull >/dev/null
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WK}
git checkout master
'''
}
else {
sh '''
cd ${WK}
git checkout develop
'''
}
}
sh '''
cd ${WK}
git pull >/dev/null
export TZ=Asia/Harbin
date
git clean -dfx
@ -93,6 +125,7 @@ pipeline {
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
'''
script{
env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true)
}
@ -185,14 +218,12 @@ pipeline {
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
timeout(time: 45, unit: 'MINUTES'){
sh '''
date

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.18.0")
SET(TD_VER_NUMBER "2.0.19.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -212,7 +212,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。

View File

@ -14,7 +14,7 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
| **Python** | ● | ● | ● | ○ | ● | ● | ○ | -- | ○ |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ |
其中 ● 表示经过官方测试验证, ○ 表示非官方测试验证。
@ -23,7 +23,7 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
* 在没有安装TDengine服务端软件的系统中使用连接器除RESTful外访问 TDengine 数据库需要安装相应版本的客户端安装包来使应用驱动Linux系统中文件名为libtaos.soWindows系统中为taos.dll被安装在系统中否则会产生无法找到相应库文件的错误。
* 所有执行 SQL 语句的 API例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等以及其它语言中与它们对应的API每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。
* 升级到TDengine到2.0.8.0版本的用户必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。
* 升级到TDengine到2.0.8.0版本的用户必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。详细的版本依赖关系请参见 [taos-jdbcdriver 文档](https://www.taosdata.com/cn/documentation/connector/java#version)。
* 无论选用何种编程语言的连接器2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接或基于线程建立连接池以避免连接内的“USE statement”状态量在线程之间相互干扰但连接的查询和写入操作都是线程安全的
## <a class="anchor" id="driver"></a>安装连接器驱动步骤

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18
version: '2.0.18.0'
version: '2.0.19.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.18.0
- usr/lib/libtaos.so.2.0.19.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so

View File

@ -96,6 +96,25 @@ typedef struct STableMetaInfo {
SArray *tagColList; // SArray<SColumn*>, involved tag columns
} STableMetaInfo;
typedef struct SColumnIndex {
int16_t tableIndex;
int16_t columnIndex;
} SColumnIndex;
typedef struct SFieldInfo {
int16_t numOfOutput; // number of column in result
TAOS_FIELD* final;
SArray *internalField; // SArray<SInternalField>
} SFieldInfo;
typedef struct SColumn {
SColumnIndex colIndex;
int32_t numOfFilters;
SColumnFilterInfo *filterInfo;
} SColumn;
/* the structure for sql function in select clause */
typedef struct SSqlExpr {
char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
@ -109,32 +128,24 @@ typedef struct SSqlExpr {
tVariant param[3]; // parameters are not more than 3
int32_t offset; // sub result column value of arithmetic expression.
int16_t resColId; // result column id
SColumn *pFilter; // expr filter
} SSqlExpr;
typedef struct SColumnIndex {
int16_t tableIndex;
int16_t columnIndex;
} SColumnIndex;
typedef struct SExprFilter {
tSqlExpr *pExpr; //used for having parse
SSqlExpr *pSqlExpr;
SArray *fp;
SColumn *pFilters; //having filter info
}SExprFilter;
typedef struct SInternalField {
TAOS_FIELD field;
bool visible;
SExprInfo *pArithExprInfo;
SSqlExpr *pSqlExpr;
SExprFilter *pFieldFilters;
} SInternalField;
typedef struct SFieldInfo {
int16_t numOfOutput; // number of column in result
TAOS_FIELD* final;
SArray *internalField; // SArray<SInternalField>
} SFieldInfo;
typedef struct SColumn {
SColumnIndex colIndex;
int32_t numOfFilters;
SColumnFilterInfo *filterInfo;
} SColumn;
typedef struct SCond {
uint64_t uid;
int32_t len; // length of tag query condition data
@ -243,6 +254,7 @@ typedef struct SQueryInfo {
int32_t round; // 0/1/....
int32_t bufLen;
char* buf;
int32_t havingFieldNum;
} SQueryInfo;
typedef struct {

View File

@ -22,6 +22,7 @@
#include "tscUtil.h"
#include "tschemautil.h"
#include "tsclient.h"
#include "qUtil.h"
typedef struct SCompareParam {
SLocalDataSource **pLocalData;
@ -1243,6 +1244,76 @@ static bool saveGroupResultInfo(SSqlObj *pSql) {
return false;
}
bool doFilterFieldData(char *input, SExprFilter* pFieldFilters, int16_t type, bool* notSkipped) {
bool qualified = false;
for(int32_t k = 0; k < pFieldFilters->pFilters->numOfFilters; ++k) {
__filter_func_t fp = taosArrayGetP(pFieldFilters->fp, k);
SColumnFilterElem filterElem = {.filterInfo = pFieldFilters->pFilters->filterInfo[k]};
bool isnull = isNull(input, type);
if (isnull) {
if (fp == isNullOperator) {
qualified = true;
break;
} else {
continue;
}
} else {
if (fp == notNullOperator) {
qualified = true;
break;
} else if (fp == isNullOperator) {
continue;
}
}
if (fp(&filterElem, input, input, type)) {
qualified = true;
break;
}
}
*notSkipped = qualified;
return TSDB_CODE_SUCCESS;
}
int32_t doHavingFilter(SQueryInfo* pQueryInfo, tFilePage* pOutput, bool* notSkipped) {
*notSkipped = true;
if (pQueryInfo->havingFieldNum <= 0) {
return TSDB_CODE_SUCCESS;
}
//int32_t exprNum = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
size_t numOfOutput = tscNumOfFields(pQueryInfo);
for(int32_t i = 0; i < numOfOutput; ++i) {
SInternalField* pInterField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
SExprFilter* pFieldFilters = pInterField->pFieldFilters;
if (pFieldFilters == NULL) {
continue;
}
int32_t type = pInterField->field.type;
char* pInput = pOutput->data + pOutput->num* pFieldFilters->pSqlExpr->offset;
doFilterFieldData(pInput, pFieldFilters, type, notSkipped);
if (*notSkipped == false) {
return TSDB_CODE_SUCCESS;
}
}
return TSDB_CODE_SUCCESS;
}
/**
*
* @param pSql
@ -1283,6 +1354,22 @@ bool genFinalResults(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool noMoreCurren
doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalMerge->finalModel->rowSize);
}
bool notSkipped = true;
doHavingFilter(pQueryInfo, pResBuf, &notSkipped);
if (!notSkipped) {
pRes->numOfRows = 0;
pLocalMerge->discard = !noMoreCurrentGroupRes;
if (pLocalMerge->discard) {
SColumnModel *pInternModel = pLocalMerge->pDesc->pColumnModel;
tColModelAppend(pInternModel, pLocalMerge->discardData, pLocalMerge->pTempBuffer->data, 0, 1, 1);
}
return notSkipped;
}
// no interval query, no fill operation
if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
genFinalResWithoutFill(pRes, pLocalMerge, pQueryInfo);

View File

@ -34,6 +34,7 @@
#include "tstoken.h"
#include "tstrbuild.h"
#include "ttokendef.h"
#include "qUtil.h"
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
@ -1097,6 +1098,7 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) {
return true;
}
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd) {
assert(pTagsList != NULL);
@ -1676,18 +1678,6 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectLis
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
/*
* transfer sql functions that need secondary merge into another format
* in dealing with super table queries such as: count/first/last
*/
if (isSTable) {
tscTansformFuncForSTableQuery(pQueryInfo);
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
return TSDB_CODE_SUCCESS;
}
@ -3065,6 +3055,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd)
return TSDB_CODE_SUCCESS;
}
static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) {
if (pColumn == NULL) {
return NULL;
@ -3088,15 +3079,11 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) {
}
static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter,
SColumnIndex* columnIndex, tSqlExpr* pExpr) {
int16_t colType, tSqlExpr* pExpr) {
const char* msg = "not supported filter condition";
tSqlExpr* pRight = pExpr->pRight;
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, columnIndex->tableIndex);
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex->columnIndex);
int16_t colType = pSchema->type;
if (colType >= TSDB_DATA_TYPE_TINYINT && colType <= TSDB_DATA_TYPE_BIGINT) {
colType = TSDB_DATA_TYPE_BIGINT;
} else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) {
@ -3301,7 +3288,10 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC
}
pColumn->colIndex = *pIndex;
return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pIndex, pExpr);
int16_t colType = pSchema->type;
return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, colType, pExpr);
}
static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pTableCond, SStringBuilder* sb) {
@ -6030,7 +6020,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex};
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true);
SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true);
memset(pExpr->aliasName, 0, sizeof(pExpr->aliasName));
tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName));
@ -6039,7 +6029,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
// NOTE: tag column does not add to source column list
SColumnList ids = getColumnList(1, 0, pColIndex->colIndex);
insertResultField(pQueryInfo, (int32_t)size, &ids, bytes, (int8_t)type, name, pExpr);
insertResultField(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, &ids, bytes, (int8_t)type, name, pExpr);
} else {
// if this query is "group by" normal column, time window query is not allowed
if (isTimeWindowQuery(pQueryInfo)) {
@ -6166,7 +6156,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
}
// projection query on super table does not compatible with "group by" syntax
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (tscIsProjectionQuery(pQueryInfo)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@ -6769,6 +6759,313 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
int32_t tscInsertExprFields(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** interField) {
tSqlExprItem item = {.pNode = pExpr, .aliasName = NULL, .distinct = false};
int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
// ADD TRUE FOR TEST
if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, true) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
++pQueryInfo->havingFieldNum;
size_t n = tscSqlExprNumOfExprs(pQueryInfo);
SSqlExpr* pSqlExpr = tscSqlExprGet(pQueryInfo, (int32_t)n - 1);
int32_t slot = tscNumOfFields(pQueryInfo) - 1;
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot);
pInfo->visible = false;
if (pInfo->pFieldFilters == NULL) {
SExprFilter* pFieldFilters = calloc(1, sizeof(SExprFilter));
if (pFieldFilters == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SColumn* pFilters = calloc(1, sizeof(SColumn));
if (pFilters == NULL) {
tfree(pFieldFilters);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pFieldFilters->pFilters = pFilters;
pFieldFilters->pSqlExpr = pSqlExpr;
pSqlExpr->pFilter = pFilters;
pInfo->pFieldFilters = pFieldFilters;
}
pInfo->pFieldFilters->pExpr = pExpr;
*interField = pInfo;
return TSDB_CODE_SUCCESS;
}
int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** pField) {
SInternalField* pInfo = NULL;
for (int32_t i = pQueryInfo->havingFieldNum - 1; i >= 0; --i) {
pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, pQueryInfo->fieldsInfo.numOfOutput - 1 - i);
if (pInfo->pFieldFilters && 0 == tSqlExprCompare(pInfo->pFieldFilters->pExpr, pExpr)) {
*pField = pInfo;
return TSDB_CODE_SUCCESS;
}
}
int32_t ret = tscInsertExprFields(pCmd, pQueryInfo, pExpr, &pInfo);
if (ret) {
return ret;
}
*pField = pInfo;
return TSDB_CODE_SUCCESS;
}
static int32_t genExprFilter(SExprFilter * exprFilter) {
exprFilter->fp = taosArrayInit(4, sizeof(__filter_func_t));
if (exprFilter->fp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < exprFilter->pFilters->numOfFilters; ++i) {
SColumnFilterInfo *filterInfo = &exprFilter->pFilters->filterInfo[i];
int32_t lower = filterInfo->lowerRelOptr;
int32_t upper = filterInfo->upperRelOptr;
if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) {
tscError("invalid rel optr");
return TSDB_CODE_TSC_APP_ERROR;
}
__filter_func_t ffp = getFilterOperator(lower, upper);
if (ffp == NULL) {
tscError("invalid filter info");
return TSDB_CODE_TSC_APP_ERROR;
}
taosArrayPush(exprFilter->fp, &ffp);
}
return TSDB_CODE_SUCCESS;
}
static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t sqlOptr) {
const char* msg1 = "non binary column not support like operator";
const char* msg2 = "invalid operator for binary column in having clause";
const char* msg3 = "invalid operator for bool column in having clause";
SColumn* pColumn = NULL;
SColumnFilterInfo* pColFilter = NULL;
SInternalField* pInfo = NULL;
/*
* in case of TK_AND filter condition, we first find the corresponding column and build the query condition together
* the already existed condition.
*/
if (sqlOptr == TK_AND) {
int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo);
if (ret) {
return ret;
}
pColumn = pInfo->pFieldFilters->pFilters;
// this is a new filter condition on this column
if (pColumn->numOfFilters == 0) {
pColFilter = addColumnFilterInfo(pColumn);
} else { // update the existed column filter information, find the filter info here
pColFilter = &pColumn->filterInfo[0];
}
if (pColFilter == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
} else if (sqlOptr == TK_OR) {
int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo);
if (ret) {
return ret;
}
pColumn = pInfo->pFieldFilters->pFilters;
// TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2"
pColFilter = addColumnFilterInfo(pColumn);
if (pColFilter == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
} else { // error;
return TSDB_CODE_TSC_INVALID_SQL;
}
pColFilter->filterstr =
((pInfo->field.type == TSDB_DATA_TYPE_BINARY || pInfo->field.type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0);
if (pColFilter->filterstr) {
if (pExpr->tokenId != TK_EQ
&& pExpr->tokenId != TK_NE
&& pExpr->tokenId != TK_ISNULL
&& pExpr->tokenId != TK_NOTNULL
&& pExpr->tokenId != TK_LIKE
) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else {
if (pExpr->tokenId == TK_LIKE) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pInfo->field.type == TSDB_DATA_TYPE_BOOL) {
if (pExpr->tokenId != TK_EQ && pExpr->tokenId != TK_NE) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
}
int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pInfo->field.type, pExpr);
if (ret) {
return ret;
}
return genExprFilter(pInfo->pFieldFilters);
}
int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t parentOptr) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
const char* msg1 = "invalid having clause";
tSqlExpr* pLeft = pExpr->pLeft;
tSqlExpr* pRight = pExpr->pRight;
if (pExpr->tokenId == TK_AND || pExpr->tokenId == TK_OR) {
int32_t ret = getHavingExpr(pCmd, pQueryInfo, pExpr->pLeft, pExpr->tokenId);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
return getHavingExpr(pCmd, pQueryInfo, pExpr->pRight, pExpr->tokenId);
}
if (pLeft == NULL || pRight == NULL) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pLeft->type == pRight->type) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
exchangeExpr(pExpr);
pLeft = pExpr->pLeft;
pRight = pExpr->pRight;
if (pLeft->type != SQL_NODE_SQLFUNCTION) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pRight->type != SQL_NODE_VALUE) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pExpr->tokenId >= TK_BITAND) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
//if (pLeft->pParam == NULL || pLeft->pParam->nExpr < 1) {
// return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
//}
if (pLeft->pParam) {
size_t size = taosArrayGetSize(pLeft->pParam);
for (int32_t i = 0; i < size; i++) {
tSqlExprItem* pParamElem = taosArrayGet(pLeft->pParam, i);
if (pParamElem->pNode->tokenId != TK_ALL &&
pParamElem->pNode->tokenId != TK_ID &&
pParamElem->pNode->tokenId != TK_STRING &&
pParamElem->pNode->tokenId != TK_INTEGER &&
pParamElem->pNode->tokenId != TK_FLOAT) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pParamElem->pNode->tokenId == TK_ID && (pParamElem->pNode->colInfo.z == NULL && pParamElem->pNode->colInfo.n == 0)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pParamElem->pNode->tokenId == TK_ID) {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (index.columnIndex <= 0 ||
index.columnIndex >= tscGetNumOfColumns(pTableMeta)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
}
}
pLeft->functionId = isValidFunction(pLeft->operand.z, pLeft->operand.n);
if (pLeft->functionId < 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
return handleExprInHavingClause(pCmd, pQueryInfo, pExpr, parentOptr);
}
int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd, bool isSTable, int32_t joinQuery, int32_t timeWindowQuery) {
const char* msg1 = "having only works with group by";
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "invalid expression in having clause";
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
if (pQueryInfo->groupbyExpr.numOfGroupCols <= 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pExpr->pLeft == NULL || pExpr->pRight == NULL) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pQueryInfo->colList == NULL) {
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
}
int32_t ret = 0;
if ((ret = getHavingExpr(pCmd, pQueryInfo, pExpr, TK_AND)) != TSDB_CODE_SUCCESS) {
return ret;
}
//REDO function check
if (!functionCompatibleCheck(pQueryInfo, joinQuery, timeWindowQuery)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
return TSDB_CODE_SUCCESS;
}
int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t index) {
assert(pQuerySqlNode != NULL && (pQuerySqlNode->from == NULL || taosArrayGetSize(pQuerySqlNode->from->tableList) > 0));
@ -6934,6 +7231,23 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i
}
}
// parse the having clause in the first place
if (parseHavingClause(pQueryInfo, pQuerySqlNode->pHaving, pCmd, isSTable, joinQuery, timeWindowQuery) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
/*
* transfer sql functions that need secondary merge into another format
* in dealing with super table queries such as: count/first/last
*/
if (isSTable) {
tscTansformFuncForSTableQuery(pQueryInfo);
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
if (parseSessionClause(pCmd, pQueryInfo, pQuerySqlNode) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -7125,3 +7439,10 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
return false;
}

View File

@ -862,8 +862,44 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pSqlFuncExpr->functionId = htons(pExpr->functionId);
pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams);
pSqlFuncExpr->resColId = htons(pExpr->resColId);
if (pTableMeta->tableType != TSDB_SUPER_TABLE && pExpr->pFilter && pExpr->pFilter->numOfFilters > 0) {
pSqlFuncExpr->filterNum = htonl(pExpr->pFilter->numOfFilters);
} else {
pSqlFuncExpr->filterNum = 0;
}
pMsg += sizeof(SSqlFuncMsg);
if (pSqlFuncExpr->filterNum) {
pMsg += sizeof(SColumnFilterInfo) * pExpr->pFilter->numOfFilters;
// append the filter information after the basic column information
for (int32_t f = 0; f < pExpr->pFilter->numOfFilters; ++f) {
SColumnFilterInfo *pColFilter = &pExpr->pFilter->filterInfo[f];
SColumnFilterInfo *pFilterMsg = &pSqlFuncExpr->filterInfo[f];
pFilterMsg->filterstr = htons(pColFilter->filterstr);
if (pColFilter->filterstr) {
pFilterMsg->len = htobe64(pColFilter->len);
memcpy(pMsg, (void *)pColFilter->pz, (size_t)(pColFilter->len + 1));
pMsg += (pColFilter->len + 1); // append the additional filter binary info
} else {
pFilterMsg->lowerBndi = htobe64(pColFilter->lowerBndi);
pFilterMsg->upperBndi = htobe64(pColFilter->upperBndi);
}
pFilterMsg->lowerRelOptr = htons(pColFilter->lowerRelOptr);
pFilterMsg->upperRelOptr = htons(pColFilter->upperRelOptr);
if (pColFilter->lowerRelOptr == TSDB_RELATION_INVALID && pColFilter->upperRelOptr == TSDB_RELATION_INVALID) {
tscError("invalid filter info");
return TSDB_CODE_TSC_INVALID_SQL;
}
}
}
for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log
pSqlFuncExpr->arg[j].argType = htons((uint16_t)pExpr->param[j].nType);
pSqlFuncExpr->arg[j].argBytes = htons(pExpr->param[j].nLen);

View File

@ -1045,6 +1045,7 @@ SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) {
.pSqlExpr = NULL,
.pArithExprInfo = NULL,
.visible = true,
.pFieldFilters = NULL,
};
info.field = *pField;
@ -1057,6 +1058,7 @@ SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_F
.pSqlExpr = NULL,
.pArithExprInfo = NULL,
.visible = true,
.pFieldFilters = NULL,
};
info.field = *field;
@ -1130,6 +1132,22 @@ int32_t tscGetResRowLength(SArray* pExprList) {
return size;
}
static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) {
for(int32_t i = 0; i < numOfFilters; ++i) {
if (pFilterInfo[i].filterstr) {
tfree(pFilterInfo[i].pz);
}
}
tfree(pFilterInfo);
}
static void tscColumnDestroy(SColumn* pCol) {
destroyFilterInfo(pCol->filterInfo, pCol->numOfFilters);
free(pCol);
}
void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
if (pFieldInfo == NULL) {
return;
@ -1150,6 +1168,11 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
tfree(pInfo->pArithExprInfo);
}
if (pInfo->pFieldFilters != NULL) {
tscColumnDestroy(pInfo->pFieldFilters->pFilters);
tfree(pInfo->pFieldFilters);
}
}
taosArrayDestroy(pFieldInfo->internalField);
@ -1411,15 +1434,7 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
return taosArrayGetP(pColumnList, i);
}
static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) {
for(int32_t i = 0; i < numOfFilters; ++i) {
if (pFilterInfo[i].filterstr) {
tfree(pFilterInfo[i].pz);
}
}
tfree(pFilterInfo);
}
SColumn* tscColumnClone(const SColumn* src) {
assert(src != NULL);
@ -1436,10 +1451,6 @@ SColumn* tscColumnClone(const SColumn* src) {
return dst;
}
static void tscColumnDestroy(SColumn* pCol) {
destroyFilterInfo(pCol->filterInfo, pCol->numOfFilters);
free(pCol);
}
void tscColumnListCopy(SArray* dst, const SArray* src, int16_t tableIndex) {
assert(src != NULL && dst != NULL);

View File

@ -88,7 +88,7 @@ void* qOpenQueryMgmt(int32_t vgId);
void qQueryMgmtNotifyClosed(void* pExecutor);
void qQueryMgmtReOpen(void *pExecutor);
void qCleanupQueryMgmt(void* pExecutor);
void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo);
void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo);
void** qAcquireQInfo(void* pMgmt, uint64_t key);
void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle);
bool checkQIdEqual(void *qHandle, uint64_t qId);

View File

@ -399,36 +399,6 @@ typedef struct SColIndex {
char name[TSDB_COL_NAME_LEN]; // TODO remove it
} SColIndex;
/* sql function msg, to describe the message to vnode about sql function
* operations in select clause */
typedef struct SSqlFuncMsg {
int16_t functionId;
int16_t numOfParams;
int16_t resColId; // result column id, id of the current output column
int16_t colType;
int16_t colBytes;
SColIndex colInfo;
struct ArgElem {
int16_t argType;
int16_t argBytes;
union {
double d;
int64_t i64;
char * pz;
} argValue;
} arg[3];
} SSqlFuncMsg;
typedef struct SExprInfo {
SSqlFuncMsg base;
struct tExprNode* pExpr;
int16_t bytes;
int16_t type;
int32_t interBytes;
int64_t uid;
} SExprInfo;
typedef struct SColumnFilterInfo {
int16_t lowerRelOptr;
@ -451,6 +421,42 @@ typedef struct SColumnFilterInfo {
};
} SColumnFilterInfo;
/* sql function msg, to describe the message to vnode about sql function
* operations in select clause */
typedef struct SSqlFuncMsg {
int16_t functionId;
int16_t numOfParams;
int16_t resColId; // result column id, id of the current output column
int16_t colType;
int16_t colBytes;
SColIndex colInfo;
struct ArgElem {
int16_t argType;
int16_t argBytes;
union {
double d;
int64_t i64;
char * pz;
} argValue;
} arg[3];
int32_t filterNum;
SColumnFilterInfo filterInfo[];
} SSqlFuncMsg;
typedef struct SExprInfo {
SColumnFilterInfo * pFilter;
struct tExprNode* pExpr;
int16_t bytes;
int16_t type;
int32_t interBytes;
int64_t uid;
SSqlFuncMsg base;
} SExprInfo;
/*
* for client side struct, we only need the column id, type, bytes are not necessary
* But for data in vnode side, we need all the following information.

View File

@ -205,6 +205,11 @@
#define TK_VALUES 186
#define TK_SPACE 300
#define TK_COMMENT 301
#define TK_ILLEGAL 302

View File

@ -27,7 +27,7 @@
#define MAX_IP_SIZE 20
#define MAX_PASSWORD_SIZE 20
#define MAX_HISTORY_SIZE 1000
#define MAX_COMMAND_SIZE 65536
#define MAX_COMMAND_SIZE 1048586
#define HISTORY_FILE ".taos_history"
#define DEFAULT_RES_SHOW_NUM 100

View File

@ -238,7 +238,7 @@ void resetCommand(Command *cmd, const char s[]) {
clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
memset(cmd->buffer, 0, MAX_COMMAND_SIZE);
memset(cmd->command, 0, MAX_COMMAND_SIZE);
strcpy(cmd->command, s);
strncpy(cmd->command, s, MAX_COMMAND_SIZE);
int size = 0;
int width = 0;
getMbSizeInfo(s, &size, &width);

View File

@ -9,19 +9,18 @@ IF (GIT_FOUND)
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_COMMIT)
EXECUTE_PROCESS(
COMMAND bash "-c" "echo '${TAOSDEMO_COMMIT}' | awk '{print $1}' | cut -c -9"
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1)
STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1)
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_STATUS)
IF (TD_LINUX)
EXECUTE_PROCESS(
COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'"
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_STATUS)
ENDIF (TD_LINUX)
MESSAGE("taosdemo.c status: " ${TAOSDEMO_STATUS})
ELSE()
MESSAGE("Git not found")
@ -29,9 +28,9 @@ ELSE()
SET(TAOSDEMO_STATUS "unknown")
ENDIF (GIT_FOUND)
STRING(STRIP ${TAOSDEMO_COMMIT_SHA1} TAOSDEMO_COMMIT_SHA1)
STRING(STRIP "${TAOSDEMO_COMMIT_SHA1}" TAOSDEMO_COMMIT_SHA1)
MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1})
STRING(STRIP ${TAOSDEMO_STATUS} TAOSDEMO_STATUS)
STRING(STRIP "${TAOSDEMO_STATUS}" TAOSDEMO_STATUS)
IF (TAOSDEMO_STATUS MATCHES "M")
SET(TAOSDEMO_STATUS "modified")

File diff suppressed because it is too large Load Diff

View File

@ -483,13 +483,15 @@ static int queryDbImpl(TAOS *taos, char *command) {
static void parse_args(int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-E") == 0) {
if (argv[i+1]) {
char *tmp = argv[++i];
char *tmp = strdup(argv[++i]);
if (tmp) {
int64_t tmpEpoch;
if (strchr(tmp, ':') && strchr(tmp, '-')) {
if (TSDB_CODE_SUCCESS != taosParseTime(
tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) {
fprintf(stderr, "Input end time error!\n");
free(tmp);
return;
}
} else {
@ -499,9 +501,11 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
sprintf(argv[i], "%"PRId64"", tmpEpoch);
debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n",
__func__, __LINE__, tmp, i, argv[i]);
free(tmp);
} else {
fprintf(stderr, "Input end time error!\n");
return;
errorPrint("%s() LN%d, strdup() cannot allocate memory\n", __func__, __LINE__);
exit(-1);
}
} else if (strcmp(argv[i], "-g") == 0) {
arguments->debug_print = true;

View File

@ -315,6 +315,10 @@ void sdbUpdateAsync() {
taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr);
}
static int node_cmp(const void *l, const void *r) {
return ((SNodeInfo *)l)->nodeId - ((SNodeInfo *)r)->nodeId;
}
int32_t sdbUpdateSync(void *pMnodes) {
SMInfos *pMinfos = pMnodes;
if (!mnodeIsRunning()) {
@ -382,6 +386,8 @@ int32_t sdbUpdateSync(void *pMnodes) {
return TSDB_CODE_SUCCESS;
}
qsort(syncCfg.nodeInfo, syncCfg.replica, sizeof(syncCfg.nodeInfo[0]), node_cmp);
sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica);
for (int32_t i = 0; i < syncCfg.replica; ++i) {
sdbInfo("vgId:1, mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn,

View File

@ -190,6 +190,8 @@ typedef struct SQuery {
bool stabledev; // super table stddev query
int32_t interBufSize; // intermediate buffer sizse
int32_t havingNum; // having expr number
SOrderVal order;
int16_t numOfCols;
int16_t numOfTags;
@ -285,6 +287,7 @@ enum OPERATOR_TYPE_E {
OP_Fill = 13,
OP_MultiTableAggregate = 14,
OP_MultiTableTimeInterval = 15,
OP_Having = 16,
};
typedef struct SOperatorInfo {
@ -402,6 +405,11 @@ typedef struct SOffsetOperatorInfo {
int64_t offset;
} SOffsetOperatorInfo;
typedef struct SHavingOperatorInfo {
SArray* fp;
} SHavingOperatorInfo;
typedef struct SFillOperatorInfo {
SFillInfo *pFillInfo;
SSDataBlock *pRes;

View File

@ -98,6 +98,7 @@ typedef struct SQuerySqlNode {
SLimitVal limit; // limit offset [optional]
SLimitVal slimit; // group limit offset [optional]
SStrToken sqlstr; // sql string in select clause
struct tSqlExpr *pHaving; // having clause [optional]
} SQuerySqlNode;
typedef struct STableNamePair {
@ -253,6 +254,11 @@ SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder);
SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index);
SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder);
tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType);
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right);
tSqlExpr *tSqlExprClone(tSqlExpr *pSrc);
SFromInfo *setTableNameList(SFromInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias);
SFromInfo *setSubquery(SFromInfo* pFromInfo, SQuerySqlNode *pSqlNode);
void *destroyFromInfo(SFromInfo* pFromInfo);
@ -272,7 +278,7 @@ void tSqlExprListDestroy(SArray *pList);
SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps,
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit);
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving);
SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SQuerySqlNode *pSelect, int32_t type);

View File

@ -52,10 +52,19 @@ static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int
return pResultRowInfo->pResult[slot];
}
static FORCE_INLINE char *getPosInResultPage(SQuery *pQuery, tFilePage* page, int32_t rowOffset, int16_t offset) {
assert(rowOffset >= 0 && pQuery != NULL);
static FORCE_INLINE char* getPosInResultPage(SQueryRuntimeEnv* pRuntimeEnv, tFilePage* page, int32_t rowOffset,
int16_t offset, int32_t size) {
assert(rowOffset >= 0 && pRuntimeEnv != NULL);
SQuery* pQuery = pRuntimeEnv->pQuery;
int64_t pageSize = pRuntimeEnv->pResultBuf->pageSize;
int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery);
// buffer overflow check
int64_t bufEnd = (rowOffset + offset * numOfRows + size);
assert(page->num <= pageSize && bufEnd <= page->num);
return ((char*)page->data) + rowOffset + offset * numOfRows;
}

View File

@ -453,7 +453,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
%type select {SQuerySqlNode*}
%destructor select {destroyQuerySqlNode($$);}
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G);
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G, N);
}
select(A) ::= LP select(B) RP. {A = B;}
@ -471,7 +471,7 @@ cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
// select client_version()
// select server_state()
select(A) ::= SELECT(T) selcollist(W). {
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
}
// selcollist is a list of expressions that are to become the return

View File

@ -2771,14 +2771,16 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
pInfo->stage += 1;
// all data are null, set it completed
if (pInfo->numOfElems == 0) {
pResInfo->complete = true;
return;
} else {
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
}
pInfo->stage += 1;
}
// the first stage, only acquire the min/max value
@ -2857,14 +2859,16 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo);
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
pInfo->stage += 1;
// all data are null, set it completed
if (pInfo->numOfElems == 0) {
pResInfo->complete = true;
return;
} else {
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
}
pInfo->stage += 1;
}
if (pInfo->stage == 0) {

View File

@ -181,6 +181,7 @@ static SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntime
static SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
static SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput);
static SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
static SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
static void destroySFillOperatorInfo(void* param, int32_t numOfOutput);
@ -1819,6 +1820,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
}
}
if (pQuery->havingNum > 0) {
pRuntimeEnv->proot = createHavingOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQuery->pExpr1, pQuery->numOfOutput);
}
if (pQuery->limit.offset > 0) {
pRuntimeEnv->proot = createOffsetOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot);
}
@ -3243,7 +3248,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe
continue;
}
pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQuery, bufPage, pResult->offset, offset);
pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, bufPage, pResult->offset, offset, pCtx[i].outputBytes);
offset += pCtx[i].outputBytes;
int32_t functionId = pCtx[i].functionId;
@ -3301,7 +3306,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF
int16_t offset = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQuery, page, pResult->offset, offset);
pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, page, pResult->offset, offset, pCtx[i].outputBytes);
offset += pCtx[i].outputBytes;
int32_t functionId = pCtx[i].functionId;
@ -3509,8 +3514,6 @@ void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) {
*/
static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock) {
SQuery *pQuery = pRuntimeEnv->pQuery;
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
int32_t numOfResult = pBlock->info.rows; // there are already exists result rows
@ -3545,7 +3548,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo*
int32_t bytes = pColInfoData->info.bytes;
char *out = pColInfoData->pData + numOfResult * bytes;
char *in = getPosInResultPage(pQuery, page, pRow->offset, offset);
char *in = getPosInResultPage(pRuntimeEnv, page, pRow->offset, offset, bytes);
memcpy(out, in, bytes * numOfRowsToCopy);
offset += bytes;
@ -4015,7 +4018,7 @@ static SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, in
return pFillCol;
}
int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *tsdb, int32_t vgId, bool isSTableQuery) {
int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
@ -4026,8 +4029,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts
pQuery->timeWindowInterpo = timeWindowInterpoRequired(pQuery);
pQuery->stabledev = isStabledev(pQuery);
pRuntimeEnv->prevResult = prevResult;
setScanLimitationByResultBuffer(pQuery);
int32_t code = setupQueryHandle(tsdb, pQInfo, isSTableQuery);
@ -4669,6 +4670,111 @@ static SSDataBlock* doOffset(void* param) {
}
}
bool doFilterData(SColumnInfoData* p, int32_t rid, SColumnFilterElem *filterElem, __filter_func_t fp) {
char* input = p->pData + p->info.bytes * rid;
bool isnull = isNull(input, p->info.type);
if (isnull) {
return (fp == isNullOperator) ? true : false;
} else {
if (fp == notNullOperator) {
return true;
} else if (fp == isNullOperator) {
return false;
}
}
if (fp(filterElem, input, input, p->info.type)) {
return true;
}
return false;
}
void doHavingImpl(SOperatorInfo *pOperator, SSDataBlock *pBlock) {
SHavingOperatorInfo* pInfo = pOperator->info;
int32_t f = 0;
int32_t allQualified = 1;
int32_t exprQualified = 0;
for (int32_t r = 0; r < pBlock->info.rows; ++r) {
allQualified = 1;
for (int32_t i = 0; i < pOperator->numOfOutput; ++i) {
SExprInfo* pExprInfo = &(pOperator->pExpr[i]);
if (pExprInfo->pFilter == NULL) {
continue;
}
SArray* es = taosArrayGetP(pInfo->fp, i);
assert(es);
size_t fpNum = taosArrayGetSize(es);
exprQualified = 0;
for (int32_t m = 0; m < fpNum; ++m) {
__filter_func_t fp = taosArrayGetP(es, m);
assert(fp);
//SColIndex* colIdx = &pExprInfo->base.colInfo;
SColumnInfoData* p = taosArrayGet(pBlock->pDataBlock, i);
SColumnFilterElem filterElem = {.filterInfo = pExprInfo->pFilter[m]};
if (doFilterData(p, r, &filterElem, fp)) {
exprQualified = 1;
break;
}
}
if (exprQualified == 0) {
allQualified = 0;
break;
}
}
if (allQualified == 0) {
continue;
}
for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
int16_t bytes = pColInfoData->info.bytes;
memmove(pColInfoData->pData + f * bytes, pColInfoData->pData + bytes * r, bytes);
}
++f;
}
pBlock->info.rows = f;
}
static SSDataBlock* doHaving(void* param) {
SOperatorInfo *pOperator = (SOperatorInfo *)param;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
while (1) {
SSDataBlock *pBlock = pOperator->upstream->exec(pOperator->upstream);
if (pBlock == NULL) {
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
return NULL;
}
doHavingImpl(pOperator, pBlock);
return pBlock;
}
}
static SSDataBlock* doIntervalAgg(void* param) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
@ -5019,6 +5125,13 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
pInfo->pRes = destroyOutputBuf(pInfo->pRes);
}
static void destroyHavingOperatorInfo(void* param, int32_t numOfOutput) {
SHavingOperatorInfo* pInfo = (SHavingOperatorInfo*) param;
if (pInfo->fp) {
taosArrayDestroy(pInfo->fp);
}
}
SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo));
@ -5075,6 +5188,83 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI
return pOperator;
}
int32_t initFilterFp(SExprInfo* pExpr, int32_t numOfOutput, SArray** fps) {
__filter_func_t fp = NULL;
*fps = taosArrayInit(numOfOutput, sizeof(SArray*));
if (*fps == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < numOfOutput; ++i) {
SExprInfo* pExprInfo = &(pExpr[i]);
SColIndex* colIdx = &pExprInfo->base.colInfo;
if (pExprInfo->pFilter == NULL || !TSDB_COL_IS_NORMAL_COL(colIdx->flag)) {
taosArrayPush(*fps, &fp);
continue;
}
int32_t filterNum = pExprInfo->base.filterNum;
SColumnFilterInfo *filterInfo = pExprInfo->pFilter;
SArray* es = taosArrayInit(filterNum, sizeof(__filter_func_t));
for (int32_t j = 0; j < filterNum; ++j) {
int32_t lower = filterInfo->lowerRelOptr;
int32_t upper = filterInfo->upperRelOptr;
if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) {
qError("invalid rel optr");
taosArrayDestroy(es);
return TSDB_CODE_QRY_APP_ERROR;
}
__filter_func_t ffp = getFilterOperator(lower, upper);
if (ffp == NULL) {
qError("invalid filter info");
taosArrayDestroy(es);
return TSDB_CODE_QRY_APP_ERROR;
}
taosArrayPush(es, &ffp);
filterInfo += 1;
}
taosArrayPush(*fps, &es);
}
return TSDB_CODE_SUCCESS;
}
SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SHavingOperatorInfo* pInfo = calloc(1, sizeof(SHavingOperatorInfo));
initFilterFp(pExpr, numOfOutput, &pInfo->fp);
assert(pInfo->fp);
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
pOperator->name = "HavingOperator";
pOperator->operatorType = OP_Having;
pOperator->blockingOptr = false;
pOperator->status = OP_IN_EXECUTING;
pOperator->numOfOutput = numOfOutput;
pOperator->pExpr = pExpr;
pOperator->upstream = upstream;
pOperator->exec = doHaving;
pOperator->info = pInfo;
pOperator->pRuntimeEnv = pRuntimeEnv;
pOperator->cleanup = destroyHavingOperatorInfo;
return pOperator;
}
SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream) {
SLimitOperatorInfo* pInfo = calloc(1, sizeof(SLimitOperatorInfo));
pInfo->limit = pRuntimeEnv->pQuery->limit.limit;
@ -5646,9 +5836,35 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
pExprMsg->functionId = htons(pExprMsg->functionId);
pExprMsg->numOfParams = htons(pExprMsg->numOfParams);
pExprMsg->resColId = htons(pExprMsg->resColId);
pExprMsg->filterNum = htonl(pExprMsg->filterNum);
pMsg += sizeof(SSqlFuncMsg);
SColumnFilterInfo* pExprFilterInfo = pExprMsg->filterInfo;
pMsg += sizeof(SColumnFilterInfo) * pExprMsg->filterNum;
for (int32_t f = 0; f < pExprMsg->filterNum; ++f) {
SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)pExprFilterInfo;
pFilterMsg->filterstr = htons(pFilterMsg->filterstr);
if (pFilterMsg->filterstr) {
pFilterMsg->len = htobe64(pFilterMsg->len);
pFilterMsg->pz = (int64_t)pMsg;
pMsg += (pFilterMsg->len + 1);
} else {
pFilterMsg->lowerBndi = htobe64(pFilterMsg->lowerBndi);
pFilterMsg->upperBndi = htobe64(pFilterMsg->upperBndi);
}
pFilterMsg->lowerRelOptr = htons(pFilterMsg->lowerRelOptr);
pFilterMsg->upperRelOptr = htons(pFilterMsg->upperRelOptr);
pExprFilterInfo++;
}
for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) {
pExprMsg->arg[j].argType = htons(pExprMsg->arg[j].argType);
pExprMsg->arg[j].argBytes = htons(pExprMsg->arg[j].argBytes);
@ -5833,6 +6049,42 @@ _cleanup:
return code;
}
int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) {
if (filterNum <= 0) {
return TSDB_CODE_SUCCESS;
}
*dst = calloc(filterNum, sizeof(*src));
if (*dst == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
memcpy(*dst, src, sizeof(*src) * filterNum);
for (int32_t i = 0; i < filterNum; i++) {
if ((*dst)[i].filterstr && dst[i]->len > 0) {
void *pz = calloc(1, (size_t)(*dst)[i].len + 1);
if (pz == NULL) {
if (i == 0) {
free(*dst);
} else {
freeColumnFilterInfo(*dst, i);
}
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
memcpy(pz, (void *)src->pz, (size_t)src->len + 1);
(*dst)[i].pz = (int64_t)pz;
}
}
return TSDB_CODE_SUCCESS;
}
static int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) {
qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg);
@ -5946,6 +6198,13 @@ int32_t createQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t numOfOutpu
type = s->type;
bytes = s->bytes;
}
if (pExprs[i].base.filterNum > 0) {
int32_t ret = cloneExprFilterInfo(&pExprs[i].pFilter, pExprMsg[i]->filterInfo, pExprMsg[i]->filterNum);
if (ret) {
return ret;
}
}
}
int32_t param = (int32_t)pExprs[i].base.arg[0].argValue.i64;
@ -6235,6 +6494,10 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr
if (TSDB_COL_IS_TAG(pExprs[col].base.colInfo.flag)) {
pQuery->tagLen += pExprs[col].bytes;
}
if (pExprs[col].pFilter) {
++pQuery->havingNum;
}
}
doUpdateExprColumnIndex(pQuery);
@ -6338,6 +6601,10 @@ _cleanup_qinfo:
tExprTreeDestroy(pExprInfo->pExpr, NULL);
pExprInfo->pExpr = NULL;
}
if (pExprInfo->pFilter) {
freeColumnFilterInfo(pExprInfo->pFilter, pExprInfo->base.filterNum);
}
}
tfree(pExprs);
@ -6383,6 +6650,8 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p
SArray* prevResult = NULL;
if (pQueryMsg->prevResultLen > 0) {
prevResult = interResFromBinary(param->prevResult, pQueryMsg->prevResultLen);
pRuntimeEnv->prevResult = prevResult;
}
pQuery->precision = tsdbGetCfg(tsdb)->precision;
@ -6404,7 +6673,7 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p
}
// filter the qualified
if ((code = doInitQInfo(pQInfo, pTsBuf, prevResult, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) {
if ((code = doInitQInfo(pQInfo, pTsBuf, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) {
goto _error;
}
@ -6422,7 +6691,7 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) {
}
for (int32_t i = 0; i < numOfFilters; i++) {
if (pFilter[i].filterstr) {
if (pFilter[i].filterstr && pFilter[i].pz) {
free((void*)(pFilter[i].pz));
}
}
@ -6464,6 +6733,10 @@ static void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr) {
if (pExprInfo[i].pExpr != NULL) {
tExprTreeDestroy(pExprInfo[i].pExpr, NULL);
}
if (pExprInfo[i].pFilter) {
freeColumnFilterInfo(pExprInfo[i].pFilter, pExprInfo[i].base.filterNum);
}
}
tfree(pExprInfo);

View File

@ -310,6 +310,77 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
return pExpr;
}
static FORCE_INLINE int32_t tStrTokenCompare(SStrToken* left, SStrToken* right) {
return (left->type == right->type && left->n == right->n && strncasecmp(left->z, right->z, left->n) == 0) ? 0 : 1;
}
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) {
if ((left == NULL && right) || (left && right == NULL)) {
return 1;
}
if (left->type != right->type) {
return 1;
}
if (left->tokenId != right->tokenId) {
return 1;
}
if (left->functionId != right->functionId) {
return 1;
}
if ((left->pLeft && right->pLeft == NULL)
|| (left->pLeft == NULL && right->pLeft)
|| (left->pRight && right->pRight == NULL)
|| (left->pRight == NULL && right->pRight)
|| (left->pParam && right->pParam == NULL)
|| (left->pParam == NULL && right->pParam)) {
return 1;
}
if (tVariantCompare(&left->value, &right->value)) {
return 1;
}
if (tStrTokenCompare(&left->colInfo, &right->colInfo)) {
return 1;
}
if (right->pParam && left->pParam) {
size_t size = taosArrayGetSize(right->pParam);
if (left->pParam && taosArrayGetSize(left->pParam) != size) {
return 1;
}
for (int32_t i = 0; i < size; i++) {
tSqlExprItem* pLeftElem = taosArrayGet(left->pParam, i);
tSqlExpr* pSubLeft = pLeftElem->pNode;
tSqlExprItem* pRightElem = taosArrayGet(left->pParam, i);
tSqlExpr* pSubRight = pRightElem->pNode;
if (tSqlExprCompare(pSubLeft, pSubRight)) {
return 1;
}
}
}
if (left->pLeft && tSqlExprCompare(left->pLeft, right->pLeft)) {
return 1;
}
if (left->pRight && tSqlExprCompare(left->pRight, right->pRight)) {
return 1;
}
return 0;
}
tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) {
tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr));
@ -640,7 +711,7 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval,
SSessionWindowVal *pSession, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit,
SLimitVal *psLimit) {
SLimitVal *psLimit, tSqlExpr *pHaving) {
assert(pSelectList != NULL);
SQuerySqlNode *pSqlNode = calloc(1, sizeof(SQuerySqlNode));
@ -655,6 +726,7 @@ SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SF
pSqlNode->pSortOrder = pSortOrder;
pSqlNode->pWhere = pWhere;
pSqlNode->fillType = pFill;
pSqlNode->pHaving = pHaving;
if (pLimit != NULL) {
pSqlNode->limit = *pLimit;
@ -718,6 +790,9 @@ void destroyQuerySqlNode(SQuerySqlNode *pQuerySql) {
tSqlExprDestroy(pQuerySql->pWhere);
pQuerySql->pWhere = NULL;
tSqlExprDestroy(pQuerySql->pHaving);
pQuerySql->pHaving = NULL;
taosArrayDestroyEx(pQuerySql->pSortOrder, freeVariant);
pQuerySql->pSortOrder = NULL;

View File

@ -140,7 +140,7 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16
SResultRowCellInfo *pResultInfo = &pResultRow->pCellInfo[i];
int16_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes;
char * s = getPosInResultPage(pRuntimeEnv->pQuery, page, pResultRow->offset, offset);
char * s = getPosInResultPage(pRuntimeEnv, page, pResultRow->offset, offset, size);
memset(s, 0, size);
offset += size;

View File

@ -475,7 +475,7 @@ void qCleanupQueryMgmt(void* pQMgmt) {
qDebug("vgId:%d, queryMgmt cleanup completed", vgId);
}
void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo) {
void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo) {
if (pMgmt == NULL) {
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
return NULL;
@ -516,8 +516,7 @@ void** qAcquireQInfo(void* pMgmt, uint64_t _key) {
return NULL;
}
TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key;
void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE));
void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &_key, sizeof(_key));
if (handle == NULL || *handle == NULL) {
terrno = TSDB_CODE_QRY_INVALID_QHANDLE;
return NULL;

File diff suppressed because it is too large Load Diff

View File

@ -551,7 +551,10 @@ static void syncClosePeerConn(SSyncPeer *pPeer) {
if (pPeer->peerFd >= 0) {
pPeer->peerFd = -1;
void *pConn = pPeer->pConn;
if (pConn != NULL) syncFreeTcpConn(pPeer->pConn);
if (pConn != NULL) {
syncFreeTcpConn(pPeer->pConn);
pPeer->pConn = NULL;
}
}
}

View File

@ -239,7 +239,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
// current connect is broken
if (code == TSDB_CODE_SUCCESS) {
handle = qRegisterQInfo(pVnode->qMgmt, qId, (uint64_t)pQInfo);
handle = qRegisterQInfo(pVnode->qMgmt, qId, pQInfo);
if (handle == NULL) { // failed to register qhandle
pRsp->code = terrno;
terrno = 0;

View File

@ -19,6 +19,10 @@ Run lua sample:
lua test.lua
```
## Run performance test:
```
time lua benchmark.lua
```
## OpenResty Dependencies
- OpenResty:
```

View File

@ -0,0 +1,67 @@
local driver = require "luaconnector"
local config = {
password = "taosdata",
host = "127.0.0.1",
port = 6030,
database = "",
user = "root",
max_packet_size = 1024 * 1024
}
local conn
local res = driver.connect(config)
if res.code ~=0 then
print("connect--- failed: "..res.error)
return
else
conn = res.conn
print("connect--- pass.")
end
local res = driver.query(conn,"drop database if exists demo")
res = driver.query(conn,"create database demo")
if res.code ~=0 then
print("create db--- failed: "..res.error)
return
else
print("create db--- pass.")
end
res = driver.query(conn,"use demo")
if res.code ~=0 then
print("select db--- failed: "..res.error)
return
else
print("select db--- pass.")
end
res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))")
if res.code ~=0 then
print("create table---failed: "..res.error)
return
else
print("create table--- pass.")
end
local base = 1617330000000
local index =0
local count = 100000
local t
while( index < count )
do
t = base + index
local q=string.format([[insert into m1 values (%d,0,'robotspace')]],t)
res = driver.query(conn,q)
if res.code ~=0 then
print("insert records failed: "..res.error)
return
else
end
index = index+1
end
print(string.format([["Done. %d records has been stored."]],count))
driver.close(conn)

View File

@ -1,2 +1,2 @@
gcc lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos
gcc -std=c99 lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos

View File

@ -1,2 +1,2 @@
gcc lua_connector51.c -fPIC -shared -o luaconnector51.so -Wall -ltaos
gcc -std=c99 lua_connector51.c -fPIC -shared -o luaconnector51.so -Wall -ltaos

View File

@ -23,7 +23,7 @@ static int l_connect(lua_State *L){
luaL_checktype(L, 1, LUA_TTABLE);
lua_getfield(L,-1,"host");
lua_getfield(L,1,"host");
if (lua_isstring(L,-1)){
host = lua_tostring(L, -1);
// printf("host = %s\n", host);

View File

@ -17247,3 +17247,88 @@
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:PyEval_EvalCode
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:PyInit__openssl
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}

View File

@ -22,6 +22,7 @@ python3 ./test.py -f insert/alterTableAndInsert.py
python3 ./test.py -f insert/insertIntoTwoTables.py
python3 ./test.py -f insert/before_1970.py
python3 bug2265.py
python3 ./test.py -f insert/bug3654.py
#table
python3 ./test.py -f table/alter_wal0.py
@ -216,8 +217,8 @@ python3 ./test.py -f query/floatCompare.py
python3 ./test.py -f query/query1970YearsAf.py
python3 ./test.py -f query/bug3351.py
python3 ./test.py -f query/bug3375.py
python3 ./test.py -f query/queryJoin10tables.py
python3 ./test.py -f query/queryStddevWithGroupby.py
#stream
python3 ./test.py -f stream/metric_1.py

View File

@ -36,12 +36,7 @@ for defiMemError in `grep 'definitely lost:' crash_gen-definitely-lost-out.log |
do
defiMemError=(${defiMemError//,/})
if [ -n "$defiMemError" ]; then
if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then
cat valgrind.err
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
exit 8
elif [ "$defiMemError" -gt 1013 ];then #add for azure
if [ "$defiMemError" -gt 0 ]; then
cat valgrind.err
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"

View File

@ -21,7 +21,7 @@ rm -rf /var/lib/taos/*
nohup valgrind --leak-check=yes $TAOSD_DIR > $TDIR/$VALGRIND_OUT 2> $TDIR/$VALGRIND_ERR &
sleep 20
cd -
./crash_gen.sh -p -t 10 -s 200
./crash_gen.sh -p -t 10 -s 1000
ps -ef |grep valgrind|grep -v grep|awk '{print $2}'|xargs kill -term
while true
do
@ -53,12 +53,7 @@ for defiMemError in `grep 'definitely lost:' taosd-definitely-lost-out.log | awk
do
defiMemError=(${defiMemError//,/})
if [ -n "$defiMemError" ]; then
if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then
cat $VALGRIND_ERR
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
exit 8
elif [ "$defiMemError" -gt 1013 ];then #add for azure
if [ "$defiMemError" -gt 0 ]; then
cat $VALGRIND_ERR
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"

View File

@ -0,0 +1,53 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import taos
import sys
from util.log import *
from util.sql import *
from util.cases import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db keep 36500,36500,36500")
tdSql.execute("use db")
tdSql.execute("create stable if not exists stb1 (ts timestamp, c1 int) tags(t11 int)")
tdSql.execute("create table t10 using stb1 tags(1)")
gapdate = (datetime.datetime.now() - datetime.datetime(1970, 1, 1, 8, 0, 0)).days
tdLog.printNoPrefix("==========step2:insert data")
tdSql.execute(f"insert into t10 values (now-{gapdate}d, 1)")
tdSql.execute(f"insert into t10 values (now-{gapdate + 1}d, 2)")
tdLog.printNoPrefix("==========step3:query")
tdSql.query("select * from t10 where c1=1")
tdSql.checkRows(1)
tdSql.query("select * from t10 where c1=2")
tdSql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,201 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import taos
import sys
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def createtable(self):
# create stbles
tdSql.execute("create table if not exists stb1 (ts timestamp, c1 int) tags(t11 int, t12 int)")
tdSql.execute("create table if not exists stb2 (ts timestamp, c2 int) tags(t21 int, t22 int)")
tdSql.execute("create table if not exists stb3 (ts timestamp, c3 int) tags(t31 int, t32 int)")
tdSql.execute("create table if not exists stb4 (ts timestamp, c4 int) tags(t41 int, t42 int)")
tdSql.execute("create table if not exists stb5 (ts timestamp, c5 int) tags(t51 int, t52 int)")
tdSql.execute("create table if not exists stb6 (ts timestamp, c6 int) tags(t61 int, t62 int)")
tdSql.execute("create table if not exists stb7 (ts timestamp, c7 int) tags(t71 int, t72 int)")
tdSql.execute("create table if not exists stb8 (ts timestamp, c8 int) tags(t81 int, t82 int)")
tdSql.execute("create table if not exists stb9 (ts timestamp, c9 int) tags(t91 int, t92 int)")
tdSql.execute("create table if not exists stb10 (ts timestamp, c10 int) tags(t101 int, t102 int)")
tdSql.execute("create table if not exists stb11 (ts timestamp, c11 int) tags(t111 int, t112 int)")
# create normal tables
tdSql.execute("create table t10 using stb1 tags(0, 9)")
tdSql.execute("create table t11 using stb1 tags(1, 8)")
tdSql.execute("create table t12 using stb1 tags(2, 7)")
tdSql.execute("create table t13 using stb1 tags(3, 6)")
tdSql.execute("create table t14 using stb1 tags(4, 5)")
tdSql.execute("create table t15 using stb1 tags(5, 4)")
tdSql.execute("create table t16 using stb1 tags(6, 3)")
tdSql.execute("create table t17 using stb1 tags(7, 2)")
tdSql.execute("create table t18 using stb1 tags(8, 1)")
tdSql.execute("create table t19 using stb1 tags(9, 0)")
tdSql.execute("create table t110 using stb1 tags(10, 10)")
tdSql.execute("create table t20 using stb2 tags(0, 9)")
tdSql.execute("create table t21 using stb2 tags(1, 8)")
tdSql.execute("create table t22 using stb2 tags(2, 7)")
tdSql.execute("create table t30 using stb3 tags(0, 9)")
tdSql.execute("create table t31 using stb3 tags(1, 8)")
tdSql.execute("create table t32 using stb3 tags(2, 7)")
def inserttable(self):
for i in range(100):
if i<60:
tdSql.execute(f"insert into t20 values('2020-10-01 00:00:{i}.000', {i})")
tdSql.execute(f"insert into t21 values('2020-10-01 00:00:{i}.000', {i})")
tdSql.execute(f"insert into t22 values('2020-10-01 00:00:{i}.000', {i})")
tdSql.execute(f"insert into t30 values('2020-10-01 00:00:{i}.000', {i})")
tdSql.execute(f"insert into t31 values('2020-10-01 00:00:{i}.000', {i})")
tdSql.execute(f"insert into t32 values('2020-10-01 00:00:{i}.000', {i})")
else:
tdSql.execute(f"insert into t20 values('2020-10-01 00:01:{i-60}.000', {i})")
tdSql.execute(f"insert into t21 values('2020-10-01 00:01:{i-60}.000', {i})")
tdSql.execute(f"insert into t22 values('2020-10-01 00:01:{i-60}.000', {i})")
tdSql.execute(f"insert into t30 values('2020-10-01 00:01:{i-60}.000', {i})")
tdSql.execute(f"insert into t31 values('2020-10-01 00:01:{i-60}.000', {i})")
tdSql.execute(f"insert into t32 values('2020-10-01 00:01:{i-60}.000', {i})")
for j in range(11):
if i<60:
tdSql.execute(f"insert into t1{j} values('2020-10-01 00:00:{i}.000', {i})")
else:
tdSql.execute(f"insert into t1{j} values('2020-10-01 00:01:{i-60}.000', {i})")
def queryjointable(self):
tdSql.error(
'''select from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19
where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts
and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts'''
)
tdSql.error("select * from t10 where t10.ts=t11.ts")
tdSql.error("select * from where t10.ts=t11.ts")
tdSql.error("select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19")
tdSql.error("select * from stb1, stb2, stb3 where stb1.ts=stb2.ts and stb1.ts=stb3.ts")
tdSql.error("select * from stb1, stb2, stb3 where stb1.t11=stb2.t21 and stb1.t11=stb3.t31")
tdSql.error("select * from stb1, stb2, stb3")
tdSql.error(
'''select * from stb1
join stb2 on stb1.ts=stb2.ts and stb1.t11=stb2.t21
join stb3 on stb1.ts=stb3.ts and stb1.t11=stb3.t31'''
)
tdSql.error("select * from t10 join t11 on t10.ts=t11.ts join t12 on t11.ts=t12.ts")
tdSql.query(
'''select * from stb1,stb2,stb3
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21 and stb1.t11 =stb3.t31'''
)
tdSql.checkRows(300)
tdSql.query("select * from t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts")
tdSql.checkRows(100)
tdSql.error("selec * from t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts")
tdSql.error("select * form t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts <> t12.ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts != t12.ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts or t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.c1=t12.c2 and t11.c1=t13.c3")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.c3 and t11.c1=t13.ts")
tdSql.error("select ts from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts and ts>100")
tdSql.error("select * from t11,t12,stb1 when t11.ts=t12.ts and t11.ts=stb1.ts")
tdSql.error("select t14.ts from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts1")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t14.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts and t11.c1=t13.c3")
tdSql.error(
'''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20
where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts
and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts and t10.ts=t20.ts'''
)
tdSql.error(
'''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20
where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts
and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts'''
)
tdSql.error(
'''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19
where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts
and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts and t10.c1=t19.c1'''
)
tdSql.error(
'''select * from stb1,stb2,stb3
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21'''
)
tdSql.error(
'''select * from stb1,stb2,stb3
where stb1.ts=stb2.ts and stb1.t11=stb2.t21 and stb1.t11=stb3.t31'''
)
tdSql.error(
'''select * from stb1,stb2,stb3
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21 and stb1.t11=stb3.t31
and stb1.t12=stb3=t32'''
)
tdSql.error(
'''select * from stb1,stb2,stb3,stb4,stb5,stb6,stb7,stb8,stb9,stb10,stb11
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.ts=stb4.ts and stb1.ts=stb5.ts and stb1.ts=stb6.ts
and stb1.ts=stb7.ts and stb1.ts=stb8.ts and stb1.ts=stb9.ts and stb1.ts=stb10.ts and stb1.ts=stb11.ts
and stb1.t11=stb2.t21 and stb1.t11=stb3.t31 and stb1.t11=stb4.t41 and stb1.t11=stb5.t51
and stb1.t11=stb6.t61 and stb1.t11=stb7.t71 and stb1.t11=stb8.t81 and stb1.t11=stb9.t91
and stb1.t11=stb10.t101 and stb1.t11=stb11.t111'''
)
tdSql.error(
'''select * from stb1,stb2,stb3,stb4,stb5,stb6,stb7,stb8,stb9,stb10
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.ts=stb4.ts and stb1.ts=stb5.ts and stb1.ts=stb6.ts
and stb1.ts=stb7.ts and stb1.ts=stb8.ts and stb1.ts=stb9.ts and stb1.ts=stb10.ts and stb1.t11=stb2.t21
and stb1.t11=stb3.t31 and stb1.t11=stb4.t41 and stb1.t11=stb5.t51 and stb1.t11=stb6.t61
and stb1.t11=stb7.t71 and stb1.t11=stb8.t81 and stb1.t11=stb9.t91 and stb1.t11=stb10.t101
and stb1.t12=stb11.t102'''
)
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
self.createtable()
tdLog.printNoPrefix("==========step2:insert data")
self.inserttable()
tdLog.printNoPrefix("==========step3:query timestamp type")
self.queryjointable()
# after wal and sync, check again
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
tdDnodes.stop(index)
tdDnodes.start(index)
tdLog.printNoPrefix("==========step4:query again after wal")
self.queryjointable()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,68 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def querysqls(self):
tdSql.query("select stddev(c1) from t10 group by c1")
tdSql.checkRows(6)
tdSql.checkData(0, 0, 0)
tdSql.checkData(1, 0, 0)
tdSql.checkData(2, 0, 0)
tdSql.checkData(3, 0, 0)
tdSql.checkData(4, 0, 0)
tdSql.checkData(5, 0, 0)
tdSql.query("select stddev(c2) from t10")
tdSql.checkData(0, 0, 0.5)
def run(self):
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 36500")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step1:create table && insert data")
tdSql.execute("create stable stb1 (ts timestamp , c1 int ,c2 float) tags(t1 int)")
tdSql.execute("create table t10 using stb1 tags(1)")
tdSql.execute("insert into t10 values ('1969-12-31 00:00:00.000', 2,1)")
tdSql.execute("insert into t10 values ('1970-01-01 00:00:00.000', 3,1)")
tdSql.execute("insert into t10 values (0, 4,1)")
tdSql.execute("insert into t10 values (now-18725d, 1,2)")
tdSql.execute("insert into t10 values ('2021-04-06 00:00:00.000', 5,2)")
tdSql.execute("insert into t10 values (now+1d,6,2)")
tdLog.printNoPrefix("==========step2:query and check")
self.querysqls()
tdLog.printNoPrefix("==========step3:after wal,check again")
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
tdDnodes.stop(index)
tdDnodes.start(index)
self.querysqls()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -220,6 +220,7 @@ sql_error select sum(c3), ts, c2 from group_tb0 where c1 < 20 group by c1;
sql_error select sum(c3), first(ts), c2 from group_tb0 where c1 < 20 group by c1;
sql_error select first(c3), ts, c1, c2 from group_tb0 where c1 < 20 group by c1;
sql_error select first(c3), last(c3), ts, c1 from group_tb0 where c1 < 20 group by c1;
sql_error select ts from group_tb0 group by c1;
#===========================interval=====not support======================
sql_error select count(*), c1 from group_tb0 where c1<20 interval(1y) group by c1;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -80,6 +80,7 @@ function runSimCaseOneByOne {
fi
done < $1
}
function runSimCaseOneByOnefq {
start=`sed -n "/$1-start/=" jenkins/basic.txt`
@ -155,6 +156,7 @@ function runPyCaseOneByOne {
fi
done < $1
}
function runPyCaseOneByOnefq() {
cd $tests_dir/pytest
if [[ $1 =~ full ]] ; then
@ -202,13 +204,37 @@ function runPyCaseOneByOnefq() {
rm -rf ../../sim/case.log
}
######################
# main entry
######################
unameOut="$(uname -s)"
case "${unameOut}" in
Linux*) OS=Linux;;
Darwin*) OS=Darwin;;
CYGWIN*) OS=Windows;;
*) OS=Unknown;;
esac
case "${OS}" in
Linux*) TAOSLIB=libtaos.so;;
Darwin*) TAOSLIB=libtaos.dylib;;
Windows*) TAOSLIB=taos.dll;;
Unknown) TAOSLIB="UNKNOWN:${unameOut}";;
esac
echo TAOSLIB is ${TAOSLIB}
totalFailed=0
totalPyFailed=0
totalJDBCFailed=0
totalUnitFailed=0
totalExampleFailed=0
if [ "${OS}" == "Linux" ]; then
corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern`
fi
if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$2" != "example" ]; then
echo "### run TSIM test case ###"
cd $tests_dir/script
@ -290,11 +316,11 @@ if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ] && [ "$2" != "
fi
TOP_DIR=`pwd`
TAOSLIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1`
TAOSLIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1`
if [[ "$TAOSLIB_DIR" == *"$IN_TDINTERNAL"* ]]; then
LIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1|cut -d '/' --fields=2,3,4,5`
LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4,5`
else
LIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1|cut -d '/' --fields=2,3,4`
LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4`
fi
export LD_LIBRARY_PATH=$TOP_DIR/$LIB_DIR:$LD_LIBRARY_PATH
@ -499,8 +525,10 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$2" !=
echo -e "\n${RED} ### Total $totalExampleFailed examples failed! ### ${NC}"
fi
if [ "${OS}" == "Linux" ]; then
dohavecore 1
fi
fi
exit $(($totalFailed + $totalPyFailed + $totalJDBCFailed + $totalUnitFailed + $totalExampleFailed))