Merge branch 'develop' of https://github.com/taosdata/TDengine into develop

This commit is contained in:
yihaoDeng 2021-04-09 17:25:19 +08:00
commit 369650757e
71 changed files with 7704 additions and 1963 deletions

57
Jenkinsfile vendored
View File

@ -42,17 +42,49 @@ def pre_test(){
killall -9 taosd ||echo "no taosd running"
killall -9 gdb || echo "no gdb running"
cd ${WKC}
git reset --hard HEAD~10 >/dev/null
git checkout develop
git reset --hard HEAD~10 >/dev/null
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WKC}
git checkout master
'''
}
else {
sh '''
cd ${WKC}
git checkout develop
'''
}
}
sh'''
cd ${WKC}
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
git clean -dfx
cd ${WK}
git reset --hard HEAD~10
git checkout develop
git pull >/dev/null
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WK}
git checkout master
'''
}
else {
sh '''
cd ${WK}
git checkout develop
'''
}
}
sh '''
cd ${WK}
git pull >/dev/null
export TZ=Asia/Harbin
date
git clean -dfx
@ -92,7 +124,8 @@ pipeline {
git pull
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
'''
'''
script{
env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true)
}
@ -185,14 +218,12 @@ pipeline {
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
timeout(time: 45, unit: 'MINUTES'){
sh '''
date

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.18.0")
SET(TD_VER_NUMBER "2.0.19.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -101,7 +101,7 @@ $ taos -h 192.168.0.1 -s "use db; show tables;"
### 运行SQL命令脚本
TDengine终端可以通过`source`命令来运行SQL命令脚本.
TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本.
```mysql
taos> source <filename>;
@ -109,10 +109,10 @@ taos> source <filename>;
### Shell小技巧
- 可以使用上下光标键查看已经历史输入的命
- 修改用户密码。在shell中使用alter user命
- 可以使用上下光标键查看历史输入的指
- 修改用户密码。在 shell 中使用 alter user 指
- ctrl+c 中止正在进行中的查询
- 执行`RESET QUERY CACHE`清空本地缓存的表的schema
- 执行 `RESET QUERY CACHE` 清空本地缓存的表 schema
## <a class="anchor" id="demo"></a>TDengine 极速体验
@ -212,7 +212,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。

View File

@ -14,7 +14,7 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
| **Python** | ● | ● | ● | ○ | ● | ● | ○ | -- | ○ |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ |
其中 ● 表示经过官方测试验证, ○ 表示非官方测试验证。
@ -23,7 +23,7 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
* 在没有安装TDengine服务端软件的系统中使用连接器除RESTful外访问 TDengine 数据库需要安装相应版本的客户端安装包来使应用驱动Linux系统中文件名为libtaos.soWindows系统中为taos.dll被安装在系统中否则会产生无法找到相应库文件的错误。
* 所有执行 SQL 语句的 API例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等以及其它语言中与它们对应的API每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。
* 升级到TDengine到2.0.8.0版本的用户必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。
* 升级到TDengine到2.0.8.0版本的用户必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。详细的版本依赖关系请参见 [taos-jdbcdriver 文档](https://www.taosdata.com/cn/documentation/connector/java#version)。
* 无论选用何种编程语言的连接器2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接或基于线程建立连接池以避免连接内的“USE statement”状态量在线程之间相互干扰但连接的查询和写入操作都是线程安全的
## <a class="anchor" id="driver"></a>安装连接器驱动步骤

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18
version: '2.0.18.0'
version: '2.0.19.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.18.0
- usr/lib/libtaos.so.2.0.19.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so

View File

@ -36,19 +36,6 @@ extern "C" {
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
typedef struct SParsedColElem {
int16_t colIndex;
uint16_t offset;
} SParsedColElem;
typedef struct SParsedDataColInfo {
int16_t numOfCols;
int16_t numOfAssignedCols;
SParsedColElem elems[TSDB_MAX_COLUMNS];
bool hasVal[TSDB_MAX_COLUMNS];
} SParsedDataColInfo;
#pragma pack(push,1)
// this struct is transfered as binary, padding two bytes to avoid
// an 'uid' whose low bytes is 0xff being recoginized as NULL,
@ -118,6 +105,8 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta);
void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf);
void tscDestroyBoundColumnInfo(SParsedDataColInfo* pColInfo);
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes,
uint32_t offset);
@ -140,6 +129,8 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
bool tscIsTopbotQuery(SQueryInfo* pQueryInfo);
int32_t tscGetTopbotQueryParam(SQueryInfo* pQueryInfo);
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);

View File

@ -96,6 +96,25 @@ typedef struct STableMetaInfo {
SArray *tagColList; // SArray<SColumn*>, involved tag columns
} STableMetaInfo;
typedef struct SColumnIndex {
int16_t tableIndex;
int16_t columnIndex;
} SColumnIndex;
typedef struct SFieldInfo {
int16_t numOfOutput; // number of column in result
TAOS_FIELD* final;
SArray *internalField; // SArray<SInternalField>
} SFieldInfo;
typedef struct SColumn {
SColumnIndex colIndex;
int32_t numOfFilters;
SColumnFilterInfo *filterInfo;
} SColumn;
/* the structure for sql function in select clause */
typedef struct SSqlExpr {
char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
@ -109,32 +128,24 @@ typedef struct SSqlExpr {
tVariant param[3]; // parameters are not more than 3
int32_t offset; // sub result column value of arithmetic expression.
int16_t resColId; // result column id
SColumn *pFilter; // expr filter
} SSqlExpr;
typedef struct SColumnIndex {
int16_t tableIndex;
int16_t columnIndex;
} SColumnIndex;
typedef struct SExprFilter {
tSqlExpr *pExpr; //used for having parse
SSqlExpr *pSqlExpr;
SArray *fp;
SColumn *pFilters; //having filter info
}SExprFilter;
typedef struct SInternalField {
TAOS_FIELD field;
bool visible;
SExprInfo *pArithExprInfo;
SSqlExpr *pSqlExpr;
SExprFilter *pFieldFilters;
} SInternalField;
typedef struct SFieldInfo {
int16_t numOfOutput; // number of column in result
TAOS_FIELD* final;
SArray *internalField; // SArray<SInternalField>
} SFieldInfo;
typedef struct SColumn {
SColumnIndex colIndex;
int32_t numOfFilters;
SColumnFilterInfo *filterInfo;
} SColumn;
typedef struct SCond {
uint64_t uid;
int32_t len; // length of tag query condition data
@ -175,6 +186,19 @@ typedef struct SParamInfo {
uint32_t offset;
} SParamInfo;
typedef struct SBoundColumn {
bool hasVal; // denote if current column has bound or not
int32_t offset; // all column offset value
} SBoundColumn;
typedef struct SParsedDataColInfo {
int16_t numOfCols;
int16_t numOfBound;
int32_t *boundedColumns;
SBoundColumn *cols;
} SParsedDataColInfo;
typedef struct STableDataBlocks {
SName tableName;
int8_t tsSource; // where does the UNIX timestamp come from, server or client
@ -189,6 +213,8 @@ typedef struct STableDataBlocks {
STableMeta *pTableMeta; // the tableMeta of current table, the table meta will be used during submit, keep a ref to avoid to be removed from cache
char *pData;
SParsedDataColInfo boundColumnInfo;
// for parameter ('?') binding
uint32_t numOfAllocedParams;
uint32_t numOfParams;
@ -228,6 +254,7 @@ typedef struct SQueryInfo {
int32_t round; // 0/1/....
int32_t bufLen;
char* buf;
int32_t havingFieldNum;
} SQueryInfo;
typedef struct {
@ -246,10 +273,10 @@ typedef struct {
char * curSql; // current sql, resume position of sql after parsing paused
int8_t parseFinished;
char reserve2[3]; // fix bus error on arm32
char reserve2[3]; // fix bus error on arm32
int16_t numOfCols;
char reserve3[2]; // fix bus error on arm32
char reserve3[2]; // fix bus error on arm32
uint32_t allocSize;
char * payload;
int32_t payloadLen;
@ -259,9 +286,9 @@ typedef struct {
int32_t numOfParams;
int8_t dataSourceType; // load data from file or not
char reserve4[3]; // fix bus error on arm32
char reserve4[3]; // fix bus error on arm32
int8_t submitSchema; // submit block is built with table schema
char reserve5[3]; // fix bus error on arm32
char reserve5[3]; // fix bus error on arm32
STagData tagData; // NOTE: pTagData->data is used as a variant length array
SName **pTableNameList; // all involved tableMeta list of current insert sql statement.
@ -425,6 +452,7 @@ void tscRestoreFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
void destroyTableNameList(SSqlCmd* pCmd);
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
@ -462,6 +490,7 @@ char* tscGetSqlStr(SSqlObj* pSql);
bool tscIsQueryWithLimit(SSqlObj* pSql);
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols);
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);

View File

@ -22,6 +22,7 @@
#include "tscUtil.h"
#include "tschemautil.h"
#include "tsclient.h"
#include "qUtil.h"
typedef struct SCompareParam {
SLocalDataSource **pLocalData;
@ -338,11 +339,20 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
pReducer->resColModel->capacity = pReducer->nResultBufSize;
pReducer->finalModel = pFFModel;
int32_t expandFactor = 1;
if (finalmodel->rowSize > 0) {
pReducer->resColModel->capacity /= finalmodel->rowSize;
bool topBotQuery = tscIsTopbotQuery(pQueryInfo);
if (topBotQuery) {
expandFactor = tscGetTopbotQueryParam(pQueryInfo);
pReducer->resColModel->capacity /= (finalmodel->rowSize * expandFactor);
pReducer->resColModel->capacity *= expandFactor;
} else {
pReducer->resColModel->capacity /= finalmodel->rowSize;
}
}
assert(finalmodel->rowSize > 0 && finalmodel->rowSize <= pReducer->rowSize);
pReducer->pFinalRes = calloc(1, pReducer->rowSize * pReducer->resColModel->capacity);
if (pReducer->pTempBuffer == NULL || pReducer->discardData == NULL || pReducer->pResultBuf == NULL ||
@ -1150,9 +1160,10 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
memset(buf, 0, (size_t)maxBufSize);
memcpy(buf, pCtx->pOutput, (size_t)pCtx->outputBytes);
char* next = pCtx->pOutput;
for (int32_t i = 0; i < inc; ++i) {
pCtx->pOutput += pCtx->outputBytes;
memcpy(pCtx->pOutput, buf, (size_t)pCtx->outputBytes);
next += pCtx->outputBytes;
memcpy(next, buf, (size_t)pCtx->outputBytes);
}
}
@ -1233,6 +1244,76 @@ static bool saveGroupResultInfo(SSqlObj *pSql) {
return false;
}
bool doFilterFieldData(char *input, SExprFilter* pFieldFilters, int16_t type, bool* notSkipped) {
bool qualified = false;
for(int32_t k = 0; k < pFieldFilters->pFilters->numOfFilters; ++k) {
__filter_func_t fp = taosArrayGetP(pFieldFilters->fp, k);
SColumnFilterElem filterElem = {.filterInfo = pFieldFilters->pFilters->filterInfo[k]};
bool isnull = isNull(input, type);
if (isnull) {
if (fp == isNullOperator) {
qualified = true;
break;
} else {
continue;
}
} else {
if (fp == notNullOperator) {
qualified = true;
break;
} else if (fp == isNullOperator) {
continue;
}
}
if (fp(&filterElem, input, input, type)) {
qualified = true;
break;
}
}
*notSkipped = qualified;
return TSDB_CODE_SUCCESS;
}
int32_t doHavingFilter(SQueryInfo* pQueryInfo, tFilePage* pOutput, bool* notSkipped) {
*notSkipped = true;
if (pQueryInfo->havingFieldNum <= 0) {
return TSDB_CODE_SUCCESS;
}
//int32_t exprNum = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
size_t numOfOutput = tscNumOfFields(pQueryInfo);
for(int32_t i = 0; i < numOfOutput; ++i) {
SInternalField* pInterField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
SExprFilter* pFieldFilters = pInterField->pFieldFilters;
if (pFieldFilters == NULL) {
continue;
}
int32_t type = pInterField->field.type;
char* pInput = pOutput->data + pOutput->num* pFieldFilters->pSqlExpr->offset;
doFilterFieldData(pInput, pFieldFilters, type, notSkipped);
if (*notSkipped == false) {
return TSDB_CODE_SUCCESS;
}
}
return TSDB_CODE_SUCCESS;
}
/**
*
* @param pSql
@ -1273,6 +1354,22 @@ bool genFinalResults(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool noMoreCurren
doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalMerge->finalModel->rowSize);
}
bool notSkipped = true;
doHavingFilter(pQueryInfo, pResBuf, &notSkipped);
if (!notSkipped) {
pRes->numOfRows = 0;
pLocalMerge->discard = !noMoreCurrentGroupRes;
if (pLocalMerge->discard) {
SColumnModel *pInternModel = pLocalMerge->pDesc->pColumnModel;
tColModelAppend(pInternModel, pLocalMerge->discardData, pLocalMerge->pTempBuffer->data, 0, 1, 1);
}
return notSkipped;
}
// no interval query, no fill operation
if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
genFinalResWithoutFill(pRes, pLocalMerge, pQueryInfo);
@ -1440,6 +1537,11 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
tFilePage *tmpBuffer = pLocalMerge->pTempBuffer;
int32_t remain = 1;
if (tscIsTopbotQuery(pQueryInfo)) {
remain = tscGetTopbotQueryParam(pQueryInfo);
}
if (doHandleLastRemainData(pSql)) {
return TSDB_CODE_SUCCESS;
}
@ -1528,7 +1630,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
* if the previous group does NOT generate any result (pResBuf->num == 0),
* continue to process results instead of return results.
*/
if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num == pLocalMerge->resColModel->capacity)) {
if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num + remain >= pLocalMerge->resColModel->capacity)) {
// does not belong to the same group
bool notSkipped = genFinalResults(pSql, pLocalMerge, !sameGroup);

View File

@ -40,6 +40,7 @@ enum {
};
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SSchema* pSchema, char* str, char** end);
static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
errno = 0;
@ -94,12 +95,12 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
*/
SStrToken valueToken;
index = 0;
sToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL);
sToken = tStrGetToken(pTokenEnd, &index, false);
pTokenEnd += index;
if (sToken.type == TK_MINUS || sToken.type == TK_PLUS) {
index = 0;
valueToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL);
valueToken = tStrGetToken(pTokenEnd, &index, false);
pTokenEnd += index;
if (valueToken.n < 2) {
@ -117,7 +118,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
if (sToken.type == TK_PLUS) {
useconds += interval;
} else {
useconds = (useconds >= interval) ? useconds - interval : 0;
useconds = useconds - interval;
}
*next = pTokenEnd;
@ -127,13 +128,12 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
return TSDB_CODE_SUCCESS;
}
// todo extract the null value check
static bool isNullStr(SStrToken* pToken) {
return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) &&
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0));
}
int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, bool primaryKey,
int16_t timePrec) {
int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, bool primaryKey,
int16_t timePrec) {
int64_t iv;
int32_t ret;
char *endptr = NULL;
@ -417,29 +417,32 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
return TSDB_CODE_SUCCESS;
}
int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, SSqlCmd* pCmd,
int16_t timePrec, int32_t *code, char *tmpTokenBuf) {
int32_t index = 0;
SStrToken sToken = {0};
char * payload = pDataBlocks->pData + pDataBlocks->size;
int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int16_t timePrec, int32_t *len,
char *tmpTokenBuf) {
int32_t index = 0;
SStrToken sToken = {0};
char *payload = pDataBlocks->pData + pDataBlocks->size;
SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo;
SSchema *schema = tscGetTableSchema(pDataBlocks->pTableMeta);
// 1. set the parsed value from sql string
int32_t rowSize = 0;
for (int i = 0; i < spd->numOfAssignedCols; ++i) {
for (int i = 0; i < spd->numOfBound; ++i) {
// the start position in data block buffer of current value in sql
char * start = payload + spd->elems[i].offset;
int16_t colIndex = spd->elems[i].colIndex;
SSchema *pSchema = schema + colIndex;
int32_t colIndex = spd->boundedColumns[i];
char *start = payload + spd->cols[colIndex].offset;
SSchema *pSchema = &schema[colIndex];
rowSize += pSchema->bytes;
index = 0;
sToken = tStrGetToken(*str, &index, true, 0, NULL);
sToken = tStrGetToken(*str, &index, true);
*str += index;
if (sToken.type == TK_QUESTION) {
if (pCmd->insertType != TSDB_QUERY_TYPE_STMT_INSERT) {
*code = tscSQLSyntaxErrMsg(pCmd->payload, "? only allowed in binding insertion", *str);
return -1;
return tscSQLSyntaxErrMsg(pCmd->payload, "? only allowed in binding insertion", *str);
}
uint32_t offset = (uint32_t)(start - pDataBlocks->pData);
@ -448,15 +451,13 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
}
strcpy(pCmd->payload, "client out of memory");
*code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return -1;
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
int16_t type = sToken.type;
if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL &&
type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) {
*code = tscSQLSyntaxErrMsg(pCmd->payload, "invalid data or symbol", sToken.z);
return -1;
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid data or symbol", sToken.z);
}
// Remove quotation marks
@ -485,26 +486,23 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
}
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, pCmd->payload, str, isPrimaryKey, timePrec);
int32_t ret = tsParseOneColumn(pSchema, &sToken, start, pCmd->payload, str, isPrimaryKey, timePrec);
if (ret != TSDB_CODE_SUCCESS) {
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1; // NOTE: here 0 mean error!
return ret;
}
if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) {
tscInvalidSQLErrMsg(pCmd->payload, "client time/server time can not be mixed up", sToken.z);
*code = TSDB_CODE_TSC_INVALID_TIME_STAMP;
return -1;
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
}
}
// 2. set the null value for the columns that do not assign values
if (spd->numOfAssignedCols < spd->numOfCols) {
if (spd->numOfBound < spd->numOfCols) {
char *ptr = payload;
for (int32_t i = 0; i < spd->numOfCols; ++i) {
if (!spd->hasVal[i]) { // current column do not have any value to insert, set it to null
if (!spd->cols[i].hasVal) { // current column do not have any value to insert, set it to null
if (schema[i].type == TSDB_DATA_TYPE_BINARY) {
varDataSetLen(ptr, sizeof(int8_t));
*(uint8_t*) varDataVal(ptr) = TSDB_DATA_BINARY_NULL;
@ -522,7 +520,8 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
rowSize = (int32_t)(ptr - payload);
}
return rowSize;
*len = rowSize;
return TSDB_CODE_SUCCESS;
}
static int32_t rowDataCompar(const void *lhs, const void *rhs) {
@ -536,80 +535,79 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
}
}
int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMeta, int maxRows,
SParsedDataColInfo *spd, SSqlCmd* pCmd, int32_t *code, char *tmpTokenBuf) {
int32_t index = 0;
int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSqlCmd* pCmd, int32_t* numOfRows, char *tmpTokenBuf) {
int32_t index = 0;
int32_t code = 0;
(*numOfRows) = 0;
SStrToken sToken;
int32_t numOfRows = 0;
SSchema *pSchema = tscGetTableSchema(pTableMeta);
STableMeta* pTableMeta = pDataBlock->pTableMeta;
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
int32_t precision = tinfo.precision;
if (spd->hasVal[0] == false) {
*code = tscInvalidSQLErrMsg(pCmd->payload, "primary timestamp column can not be null", *str);
return -1;
}
while (1) {
index = 0;
sToken = tStrGetToken(*str, &index, false, 0, NULL);
sToken = tStrGetToken(*str, &index, false);
if (sToken.n == 0 || sToken.type != TK_LP) break;
*str += index;
if (numOfRows >= maxRows || pDataBlock->size + tinfo.rowSize >= pDataBlock->nAllocSize) {
if ((*numOfRows) >= maxRows || pDataBlock->size + tinfo.rowSize >= pDataBlock->nAllocSize) {
int32_t tSize;
*code = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize);
if (*code != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
code = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize);
if (code != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
strcpy(pCmd->payload, "client out of memory");
return -1;
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
ASSERT(tSize > maxRows);
maxRows = tSize;
}
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, pCmd, precision, code, tmpTokenBuf);
if (len <= 0) { // error message has been set in tsParseOneRowData
return -1;
int32_t len = 0;
code = tsParseOneRow(str, pDataBlock, pCmd, precision, &len, tmpTokenBuf);
if (code != TSDB_CODE_SUCCESS) { // error message has been set in tsParseOneRow, return directly
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
pDataBlock->size += len;
index = 0;
sToken = tStrGetToken(*str, &index, false, 0, NULL);
sToken = tStrGetToken(*str, &index, false);
*str += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
tscSQLSyntaxErrMsg(pCmd->payload, ") expected", *str);
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1;
}
numOfRows++;
(*numOfRows)++;
}
if (numOfRows <= 0) {
if ((*numOfRows) <= 0) {
strcpy(pCmd->payload, "no any data points");
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1;
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
} else {
return numOfRows;
return TSDB_CODE_SUCCESS;
}
}
static void tscSetAssignedColumnInfo(SParsedDataColInfo *spd, SSchema *pSchema, int32_t numOfCols) {
spd->numOfCols = numOfCols;
spd->numOfAssignedCols = numOfCols;
void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) {
pColInfo->numOfCols = numOfCols;
pColInfo->numOfBound = numOfCols;
for (int32_t i = 0; i < numOfCols; ++i) {
spd->hasVal[i] = true;
spd->elems[i].colIndex = i;
pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t));
pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn));
for (int32_t i = 0; i < pColInfo->numOfCols; ++i) {
if (i > 0) {
spd->elems[i].offset = spd->elems[i - 1].offset + pSchema[i - 1].bytes;
pColInfo->cols[i].offset = pSchema[i - 1].bytes + pColInfo->cols[i - 1].offset;
}
pColInfo->cols[i].hasVal = true;
pColInfo->boundedColumns[i] = i;
}
}
@ -697,33 +695,26 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
}
}
static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, SParsedDataColInfo *spd, int32_t *totalNum) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
STableMeta *pTableMeta = pTableMetaInfo->pTableMeta;
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
STableDataBlocks *dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &dataBuf, NULL);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
STableComInfo tinfo = tscGetTableInfo(dataBuf->pTableMeta);
int32_t maxNumOfRows;
ret = tscAllocateMemIfNeed(dataBuf, tinfo.rowSize, &maxNumOfRows);
if (TSDB_CODE_SUCCESS != ret) {
int32_t code = tscAllocateMemIfNeed(dataBuf, tinfo.rowSize, &maxNumOfRows);
if (TSDB_CODE_SUCCESS != code) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
int32_t code = TSDB_CODE_TSC_INVALID_SQL;
char * tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
code = TSDB_CODE_TSC_INVALID_SQL;
char *tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
int32_t numOfRows = tsParseValues(str, dataBuf, pTableMeta, maxNumOfRows, spd, pCmd, &code, tmpTokenBuf);
int32_t numOfRows = 0;
code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf);
free(tmpTokenBuf);
if (numOfRows <= 0) {
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -736,25 +727,23 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, SParsedDataColI
}
SSubmitBlk *pBlocks = (SSubmitBlk *)(dataBuf->pData);
code = tsSetBlockInfo(pBlocks, pTableMeta, numOfRows);
code = tsSetBlockInfo(pBlocks, dataBuf->pTableMeta, numOfRows);
if (code != TSDB_CODE_SUCCESS) {
tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", *str);
return code;
}
dataBuf->vgId = pTableMeta->vgId;
dataBuf->numOfTables = 1;
*totalNum += numOfRows;
return TSDB_CODE_SUCCESS;
}
static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundColumn) {
int32_t index = 0;
SStrToken sToken = {0};
SStrToken tableToken = {0};
int32_t code = TSDB_CODE_SUCCESS;
const int32_t TABLE_INDEX = 0;
const int32_t STABLE_INDEX = 1;
@ -767,38 +756,37 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
// get the token of specified table
index = 0;
tableToken = tStrGetToken(sql, &index, false, 0, NULL);
tableToken = tStrGetToken(sql, &index, false);
sql += index;
char *cstart = NULL;
char *cend = NULL;
// skip possibly exists column list
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sToken = tStrGetToken(sql, &index, false);
sql += index;
int32_t numOfColList = 0;
bool createTable = false;
// Bind table columns list in string, skip it and continue
if (sToken.type == TK_LP) {
cstart = &sToken.z[0];
index = 0;
*boundColumn = &sToken.z[0];
while (1) {
sToken = tStrGetToken(sql, &index, false, 0, NULL);
index = 0;
sToken = tStrGetToken(sql, &index, false);
if (sToken.type == TK_RP) {
cend = &sToken.z[0];
break;
}
sql += index;
++numOfColList;
}
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sToken = tStrGetToken(sql, &index, false);
sql += index;
}
if (numOfColList == 0 && cstart != NULL) {
if (numOfColList == 0 && (*boundColumn) != NULL) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -806,7 +794,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
if (sToken.type == TK_USING) { // create table if not exists according to the super table
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sToken = tStrGetToken(sql, &index, false);
sql += index;
//the source super table is moved to the secondary position of the pTableMetaInfo list
@ -835,82 +823,42 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
SSchema *pTagSchema = tscGetTableTagSchema(pSTableMetaInfo->pTableMeta);
STableComInfo tinfo = tscGetTableInfo(pSTableMetaInfo->pTableMeta);
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
SParsedDataColInfo spd = {0};
uint8_t numOfTags = tscGetNumOfTags(pSTableMetaInfo->pTableMeta);
spd.numOfCols = numOfTags;
tscSetBoundColumnInfo(&spd, pTagSchema, tscGetNumOfTags(pSTableMetaInfo->pTableMeta));
// if specify some tags column
if (sToken.type != TK_LP) {
tscSetAssignedColumnInfo(&spd, pTagSchema, numOfTags);
} else {
/* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen)
* tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn); */
int16_t offset[TSDB_MAX_COLUMNS] = {0};
for (int32_t t = 1; t < numOfTags; ++t) {
offset[t] = offset[t - 1] + pTagSchema[t - 1].bytes;
}
while (1) {
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
if (TK_STRING == sToken.type) {
strdequote(sToken.z);
sToken.n = (uint32_t)strtrim(sToken.z);
}
if (sToken.type == TK_RP) {
break;
}
bool findColumnIndex = false;
// todo speedup by using hash list
for (int32_t t = 0; t < numOfTags; ++t) {
if (strncmp(sToken.z, pTagSchema[t].name, sToken.n) == 0 && strlen(pTagSchema[t].name) == sToken.n) {
SParsedColElem *pElem = &spd.elems[spd.numOfAssignedCols++];
pElem->offset = offset[t];
pElem->colIndex = t;
if (spd.hasVal[t] == true) {
return tscInvalidSQLErrMsg(pCmd->payload, "duplicated tag name", sToken.z);
}
spd.hasVal[t] = true;
findColumnIndex = true;
break;
}
}
if (!findColumnIndex) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid tag name", sToken.z);
}
}
if (spd.numOfAssignedCols == 0 || spd.numOfAssignedCols > numOfTags) {
return tscInvalidSQLErrMsg(pCmd->payload, "tag name expected", sToken.z);
}
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
}
if (sToken.type != TK_TAGS) {
index = 0;
sToken = tStrGetToken(sql, &index, false);
if (sToken.type != TK_TAGS && sToken.type != TK_LP) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
}
// parse the bound tags column
if (sToken.type == TK_LP) {
/*
* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen)
* tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn);
*/
char* end = NULL;
code = parseBoundColumns(pCmd, &spd, pTagSchema, sql, &end);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
sql = end;
index = 0; // keywords of "TAGS"
sToken = tStrGetToken(sql, &index, false);
sql += index;
} else {
sql += index;
}
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sToken = tStrGetToken(sql, &index, false);
sql += index;
if (sToken.type != TK_LP) {
return tscInvalidSQLErrMsg(pCmd->payload, NULL, sToken.z);
return tscInvalidSQLErrMsg(pCmd->payload, "( is expected", sToken.z);
}
SKVRowBuilder kvRowBuilder = {0};
@ -918,13 +866,11 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
uint32_t ignoreTokenTypes = TK_LP;
uint32_t numOfIgnoreToken = 1;
for (int i = 0; i < spd.numOfAssignedCols; ++i) {
SSchema* pSchema = pTagSchema + spd.elems[i].colIndex;
for (int i = 0; i < spd.numOfBound; ++i) {
SSchema* pSchema = &pTagSchema[spd.boundedColumns[i]];
index = 0;
sToken = tStrGetToken(sql, &index, true, numOfIgnoreToken, &ignoreTokenTypes);
sToken = tStrGetToken(sql, &index, true);
sql += index;
if (TK_ILLEGAL == sToken.type) {
@ -943,7 +889,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
}
char tagVal[TSDB_MAX_TAGS_LEN];
code = tsParseOneColumnData(pSchema, &sToken, tagVal, pCmd->payload, &sql, false, tinfo.precision);
code = tsParseOneColumn(pSchema, &sToken, tagVal, pCmd->payload, &sql, false, tinfo.precision);
if (code != TSDB_CODE_SUCCESS) {
tdDestroyKVRowBuilder(&kvRowBuilder);
return code;
@ -952,6 +898,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal);
}
tscDestroyBoundColumnInfo(&spd);
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
tdDestroyKVRowBuilder(&kvRowBuilder);
if (row == NULL) {
@ -974,7 +922,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
pCmd->tagData.data = pTag;
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sToken = tStrGetToken(sql, &index, false);
sql += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
@ -989,33 +937,21 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return ret;
}
createTable = true;
code = tscGetTableMetaEx(pSql, pTableMetaInfo, true);
if (TSDB_CODE_TSC_ACTION_IN_PROGRESS == code) {
return code;
}
} else {
if (cstart != NULL) {
sql = cstart;
} else {
sql = sToken.z;
}
sql = sToken.z;
code = tscGetTableMetaEx(pSql, pTableMetaInfo, false);
if (pCmd->curSql == NULL) {
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS);
}
}
int32_t len = (int32_t)(cend - cstart + 1);
if (cstart != NULL && createTable == true) {
/* move the column list to start position of the next accessed points */
memmove(sql - len, cstart, len);
*sqlstr = sql - len;
} else {
*sqlstr = sql;
}
*sqlstr = sql;
if (*sqlstr == NULL) {
code = TSDB_CODE_TSC_INVALID_SQL;
@ -1043,6 +979,76 @@ static int32_t validateDataSource(SSqlCmd *pCmd, int8_t type, const char *sql) {
return TSDB_CODE_SUCCESS;
}
static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SSchema* pSchema,
char* str, char **end) {
pColInfo->numOfBound = 0;
memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * pColInfo->numOfCols);
for(int32_t i = 0; i < pColInfo->numOfCols; ++i) {
pColInfo->cols[i].hasVal = false;
}
int32_t code = TSDB_CODE_SUCCESS;
int32_t index = 0;
SStrToken sToken = tStrGetToken(str, &index, false);
str += index;
if (sToken.type != TK_LP) {
code = tscInvalidSQLErrMsg(pCmd->payload, "( is expected", sToken.z);
goto _clean;
}
while (1) {
index = 0;
sToken = tStrGetToken(str, &index, false);
str += index;
if (TK_STRING == sToken.type) {
tscDequoteAndTrimToken(&sToken);
}
if (sToken.type == TK_RP) {
if (end != NULL) { // set the end position
*end = str;
}
break;
}
bool findColumnIndex = false;
// todo speedup by using hash list
for (int32_t t = 0; t < pColInfo->numOfCols; ++t) {
if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
if (pColInfo->cols[t].hasVal == true) {
code = tscInvalidSQLErrMsg(pCmd->payload, "duplicated column name", sToken.z);
goto _clean;
}
pColInfo->cols[t].hasVal = true;
pColInfo->boundedColumns[pColInfo->numOfBound] = t;
pColInfo->numOfBound += 1;
findColumnIndex = true;
break;
}
}
if (!findColumnIndex) {
code = tscInvalidSQLErrMsg(pCmd->payload, "invalid column/tag name", sToken.z);
goto _clean;
}
}
memset(&pColInfo->boundedColumns[pColInfo->numOfBound], 0 , sizeof(int32_t) * (pColInfo->numOfCols - pColInfo->numOfBound));
return TSDB_CODE_SUCCESS;
_clean:
pCmd->curSql = NULL;
pCmd->parseFinished = 1;
return code;
}
/**
* parse insert sql
* @param pSql
@ -1083,7 +1089,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
while (1) {
int32_t index = 0;
SStrToken sToken = tStrGetToken(str, &index, false, 0, NULL);
SStrToken sToken = tStrGetToken(str, &index, false);
// no data in the sql string anymore.
if (sToken.n == 0) {
@ -1108,7 +1114,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
}
pCmd->curSql = sToken.z;
char buf[TSDB_TABLE_FNAME_LEN];
char buf[TSDB_TABLE_FNAME_LEN];
SStrToken sTblToken;
sTblToken.z = buf;
// Check if the table name available or not
@ -1121,7 +1127,8 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto _clean;
}
if ((code = tscCheckIfCreateTable(&str, pSql)) != TSDB_CODE_SUCCESS) {
char *bindedColumns = NULL;
if ((code = tscCheckIfCreateTable(&str, pSql, &bindedColumns)) != TSDB_CODE_SUCCESS) {
/*
* After retrieving the table meta from server, the sql string will be parsed from the paused position.
* And during the getTableMetaCallback function, the sql string will be parsed from the paused position.
@ -1129,7 +1136,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
if (TSDB_CODE_TSC_ACTION_IN_PROGRESS == code) {
return code;
}
tscError("%p async insert parse error, code:%s", pSql, tstrerror(code));
pCmd->curSql = NULL;
goto _clean;
@ -1141,41 +1148,22 @@ int tsParseInsertSql(SSqlObj *pSql) {
}
index = 0;
sToken = tStrGetToken(str, &index, false, 0, NULL);
sToken = tStrGetToken(str, &index, false);
str += index;
if (sToken.n == 0) {
if (sToken.n == 0 || (sToken.type != TK_FILE && sToken.type != TK_VALUES)) {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE required", sToken.z);
goto _clean;
}
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
if (sToken.type == TK_VALUES) {
SParsedDataColInfo spd = {.numOfCols = tinfo.numOfColumns};
SSchema *pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
tscSetAssignedColumnInfo(&spd, pSchema, tinfo.numOfColumns);
if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
goto _clean;
}
/*
* app here insert data in different vnodes, so we need to set the following
* data in another submit procedure using async insert routines
*/
code = doParseInsertStatement(pCmd, &str, &spd, &totalNum);
if (code != TSDB_CODE_SUCCESS) {
goto _clean;
}
} else if (sToken.type == TK_FILE) {
if (sToken.type == TK_FILE) {
if (validateDataSource(pCmd, DATA_FROM_DATA_FILE, sToken.z) != TSDB_CODE_SUCCESS) {
goto _clean;
}
index = 0;
sToken = tStrGetToken(str, &index, false, 0, NULL);
sToken = tStrGetToken(str, &index, false);
if (sToken.type != TK_STRING && sToken.type != TK_ID) {
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
goto _clean;
@ -1199,83 +1187,63 @@ int tsParseInsertSql(SSqlObj *pSql) {
tstrncpy(pCmd->payload, full_path.we_wordv[0], pCmd->allocSize);
wordfree(&full_path);
} else if (sToken.type == TK_LP) {
/* insert into tablename(col1, col2,..., coln) values(v1, v2,... vn); */
STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0)->pTableMeta;
SSchema * pSchema = tscGetTableSchema(pTableMeta);
} else {
if (bindedColumns == NULL) {
STableMeta *pTableMeta = pTableMetaInfo->pTableMeta;
if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
goto _clean;
}
SParsedDataColInfo spd = {0};
spd.numOfCols = tinfo.numOfColumns;
int16_t offset[TSDB_MAX_COLUMNS] = {0};
for (int32_t t = 1; t < tinfo.numOfColumns; ++t) {
offset[t] = offset[t - 1] + pSchema[t - 1].bytes;
}
while (1) {
index = 0;
sToken = tStrGetToken(str, &index, false, 0, NULL);
str += index;
if (TK_STRING == sToken.type) {
tscDequoteAndTrimToken(&sToken);
if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
goto _clean;
}
if (sToken.type == TK_RP) {
break;
STableDataBlocks *dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta,
&dataBuf, NULL);
if (ret != TSDB_CODE_SUCCESS) {
goto _clean;
}
bool findColumnIndex = false;
code = doParseInsertStatement(pCmd, &str, dataBuf, &totalNum);
if (code != TSDB_CODE_SUCCESS) {
goto _clean;
}
} else { // bindedColumns != NULL
// insert into tablename(col1, col2,..., coln) values(v1, v2,... vn);
STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0)->pTableMeta;
// todo speedup by using hash list
for (int32_t t = 0; t < tinfo.numOfColumns; ++t) {
if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
SParsedColElem *pElem = &spd.elems[spd.numOfAssignedCols++];
pElem->offset = offset[t];
pElem->colIndex = t;
if (spd.hasVal[t] == true) {
code = tscInvalidSQLErrMsg(pCmd->payload, "duplicated column name", sToken.z);
goto _clean;
}
spd.hasVal[t] = true;
findColumnIndex = true;
break;
}
if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
goto _clean;
}
if (!findColumnIndex) {
code = tscInvalidSQLErrMsg(pCmd->payload, "invalid column name", sToken.z);
STableDataBlocks *dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta,
&dataBuf, NULL);
if (ret != TSDB_CODE_SUCCESS) {
goto _clean;
}
SSchema *pSchema = tscGetTableSchema(pTableMeta);
code = parseBoundColumns(pCmd, &dataBuf->boundColumnInfo, pSchema, bindedColumns, NULL);
if (code != TSDB_CODE_SUCCESS) {
goto _clean;
}
if (dataBuf->boundColumnInfo.cols[0].hasVal == false) {
code = tscInvalidSQLErrMsg(pCmd->payload, "primary timestamp column can not be null", NULL);
goto _clean;
}
if (sToken.type != TK_VALUES) {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES is expected", sToken.z);
goto _clean;
}
code = doParseInsertStatement(pCmd, &str, dataBuf, &totalNum);
if (code != TSDB_CODE_SUCCESS) {
goto _clean;
}
}
if (spd.numOfAssignedCols == 0 || spd.numOfAssignedCols > tinfo.numOfColumns) {
code = tscInvalidSQLErrMsg(pCmd->payload, "column name expected", sToken.z);
goto _clean;
}
index = 0;
sToken = tStrGetToken(str, &index, false, 0, NULL);
str += index;
if (sToken.type != TK_VALUES) {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES is expected", sToken.z);
goto _clean;
}
code = doParseInsertStatement(pCmd, &str, &spd, &totalNum);
if (code != TSDB_CODE_SUCCESS) {
goto _clean;
}
} else {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE are required", sToken.z);
goto _clean;
}
}
@ -1294,7 +1262,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto _clean;
_clean:
pCmd->curSql = NULL;
pCmd->curSql = NULL;
pCmd->parseFinished = 1;
return code;
}
@ -1307,7 +1275,7 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
int32_t index = 0;
SSqlCmd *pCmd = &pSql->cmd;
SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false);
assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT);
pCmd->count = 0;
@ -1317,7 +1285,7 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT | pCmd->insertType);
sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
sToken = tStrGetToken(pSql->sqlstr, &index, false);
if (sToken.type != TK_INTO) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword INTO is expected", sToken.z);
}
@ -1450,13 +1418,10 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
SSchema * pSchema = tscGetTableSchema(pTableMeta);
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
SParsedDataColInfo spd = {.numOfCols = tinfo.numOfColumns};
tscSetAssignedColumnInfo(&spd, pSchema, tinfo.numOfColumns);
destroyTableNameList(pCmd);
tfree(pCmd->pTableNameList);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
if (pCmd->pTableBlockHashList == NULL) {
@ -1495,8 +1460,9 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
char *lineptr = line;
strtolower(line, line);
int32_t len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd, tinfo.precision, &code, tokenBuf);
if (len <= 0 || pTableDataBlock->numOfParams > 0) {
int32_t len = 0;
code = tsParseOneRow(&lineptr, pTableDataBlock, pCmd, tinfo.precision, &len, tokenBuf);
if (code != TSDB_CODE_SUCCESS || pTableDataBlock->numOfParams > 0) {
pSql->res.code = code;
break;
}

View File

@ -34,6 +34,7 @@
#include "tstoken.h"
#include "tstrbuild.h"
#include "ttokendef.h"
#include "qUtil.h"
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
@ -1097,6 +1098,7 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) {
return true;
}
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd) {
assert(pTagsList != NULL);
@ -1676,18 +1678,6 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectLis
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
/*
* transfer sql functions that need secondary merge into another format
* in dealing with super table queries such as: count/first/last
*/
if (isSTable) {
tscTansformFuncForSTableQuery(pQueryInfo);
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
return TSDB_CODE_SUCCESS;
}
@ -3065,6 +3055,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd)
return TSDB_CODE_SUCCESS;
}
static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) {
if (pColumn == NULL) {
return NULL;
@ -3088,15 +3079,11 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) {
}
static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter,
SColumnIndex* columnIndex, tSqlExpr* pExpr) {
int16_t colType, tSqlExpr* pExpr) {
const char* msg = "not supported filter condition";
tSqlExpr* pRight = pExpr->pRight;
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, columnIndex->tableIndex);
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex->columnIndex);
int16_t colType = pSchema->type;
if (colType >= TSDB_DATA_TYPE_TINYINT && colType <= TSDB_DATA_TYPE_BIGINT) {
colType = TSDB_DATA_TYPE_BIGINT;
} else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) {
@ -3301,7 +3288,10 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC
}
pColumn->colIndex = *pIndex;
return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pIndex, pExpr);
int16_t colType = pSchema->type;
return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, colType, pExpr);
}
static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pTableCond, SStringBuilder* sb) {
@ -6030,7 +6020,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex};
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true);
SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true);
memset(pExpr->aliasName, 0, sizeof(pExpr->aliasName));
tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName));
@ -6039,7 +6029,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
// NOTE: tag column does not add to source column list
SColumnList ids = getColumnList(1, 0, pColIndex->colIndex);
insertResultField(pQueryInfo, (int32_t)size, &ids, bytes, (int8_t)type, name, pExpr);
insertResultField(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, &ids, bytes, (int8_t)type, name, pExpr);
} else {
// if this query is "group by" normal column, time window query is not allowed
if (isTimeWindowQuery(pQueryInfo)) {
@ -6166,7 +6156,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
}
// projection query on super table does not compatible with "group by" syntax
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (tscIsProjectionQuery(pQueryInfo)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@ -6769,6 +6759,313 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
int32_t tscInsertExprFields(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** interField) {
tSqlExprItem item = {.pNode = pExpr, .aliasName = NULL, .distinct = false};
int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
// ADD TRUE FOR TEST
if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, true) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
++pQueryInfo->havingFieldNum;
size_t n = tscSqlExprNumOfExprs(pQueryInfo);
SSqlExpr* pSqlExpr = tscSqlExprGet(pQueryInfo, (int32_t)n - 1);
int32_t slot = tscNumOfFields(pQueryInfo) - 1;
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot);
pInfo->visible = false;
if (pInfo->pFieldFilters == NULL) {
SExprFilter* pFieldFilters = calloc(1, sizeof(SExprFilter));
if (pFieldFilters == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SColumn* pFilters = calloc(1, sizeof(SColumn));
if (pFilters == NULL) {
tfree(pFieldFilters);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pFieldFilters->pFilters = pFilters;
pFieldFilters->pSqlExpr = pSqlExpr;
pSqlExpr->pFilter = pFilters;
pInfo->pFieldFilters = pFieldFilters;
}
pInfo->pFieldFilters->pExpr = pExpr;
*interField = pInfo;
return TSDB_CODE_SUCCESS;
}
int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** pField) {
SInternalField* pInfo = NULL;
for (int32_t i = pQueryInfo->havingFieldNum - 1; i >= 0; --i) {
pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, pQueryInfo->fieldsInfo.numOfOutput - 1 - i);
if (pInfo->pFieldFilters && 0 == tSqlExprCompare(pInfo->pFieldFilters->pExpr, pExpr)) {
*pField = pInfo;
return TSDB_CODE_SUCCESS;
}
}
int32_t ret = tscInsertExprFields(pCmd, pQueryInfo, pExpr, &pInfo);
if (ret) {
return ret;
}
*pField = pInfo;
return TSDB_CODE_SUCCESS;
}
static int32_t genExprFilter(SExprFilter * exprFilter) {
exprFilter->fp = taosArrayInit(4, sizeof(__filter_func_t));
if (exprFilter->fp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < exprFilter->pFilters->numOfFilters; ++i) {
SColumnFilterInfo *filterInfo = &exprFilter->pFilters->filterInfo[i];
int32_t lower = filterInfo->lowerRelOptr;
int32_t upper = filterInfo->upperRelOptr;
if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) {
tscError("invalid rel optr");
return TSDB_CODE_TSC_APP_ERROR;
}
__filter_func_t ffp = getFilterOperator(lower, upper);
if (ffp == NULL) {
tscError("invalid filter info");
return TSDB_CODE_TSC_APP_ERROR;
}
taosArrayPush(exprFilter->fp, &ffp);
}
return TSDB_CODE_SUCCESS;
}
static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t sqlOptr) {
const char* msg1 = "non binary column not support like operator";
const char* msg2 = "invalid operator for binary column in having clause";
const char* msg3 = "invalid operator for bool column in having clause";
SColumn* pColumn = NULL;
SColumnFilterInfo* pColFilter = NULL;
SInternalField* pInfo = NULL;
/*
* in case of TK_AND filter condition, we first find the corresponding column and build the query condition together
* the already existed condition.
*/
if (sqlOptr == TK_AND) {
int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo);
if (ret) {
return ret;
}
pColumn = pInfo->pFieldFilters->pFilters;
// this is a new filter condition on this column
if (pColumn->numOfFilters == 0) {
pColFilter = addColumnFilterInfo(pColumn);
} else { // update the existed column filter information, find the filter info here
pColFilter = &pColumn->filterInfo[0];
}
if (pColFilter == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
} else if (sqlOptr == TK_OR) {
int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo);
if (ret) {
return ret;
}
pColumn = pInfo->pFieldFilters->pFilters;
// TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2"
pColFilter = addColumnFilterInfo(pColumn);
if (pColFilter == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
} else { // error;
return TSDB_CODE_TSC_INVALID_SQL;
}
pColFilter->filterstr =
((pInfo->field.type == TSDB_DATA_TYPE_BINARY || pInfo->field.type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0);
if (pColFilter->filterstr) {
if (pExpr->tokenId != TK_EQ
&& pExpr->tokenId != TK_NE
&& pExpr->tokenId != TK_ISNULL
&& pExpr->tokenId != TK_NOTNULL
&& pExpr->tokenId != TK_LIKE
) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else {
if (pExpr->tokenId == TK_LIKE) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pInfo->field.type == TSDB_DATA_TYPE_BOOL) {
if (pExpr->tokenId != TK_EQ && pExpr->tokenId != TK_NE) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
}
int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pInfo->field.type, pExpr);
if (ret) {
return ret;
}
return genExprFilter(pInfo->pFieldFilters);
}
int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t parentOptr) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
const char* msg1 = "invalid having clause";
tSqlExpr* pLeft = pExpr->pLeft;
tSqlExpr* pRight = pExpr->pRight;
if (pExpr->tokenId == TK_AND || pExpr->tokenId == TK_OR) {
int32_t ret = getHavingExpr(pCmd, pQueryInfo, pExpr->pLeft, pExpr->tokenId);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
return getHavingExpr(pCmd, pQueryInfo, pExpr->pRight, pExpr->tokenId);
}
if (pLeft == NULL || pRight == NULL) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pLeft->type == pRight->type) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
exchangeExpr(pExpr);
pLeft = pExpr->pLeft;
pRight = pExpr->pRight;
if (pLeft->type != SQL_NODE_SQLFUNCTION) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pRight->type != SQL_NODE_VALUE) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pExpr->tokenId >= TK_BITAND) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
//if (pLeft->pParam == NULL || pLeft->pParam->nExpr < 1) {
// return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
//}
if (pLeft->pParam) {
size_t size = taosArrayGetSize(pLeft->pParam);
for (int32_t i = 0; i < size; i++) {
tSqlExprItem* pParamElem = taosArrayGet(pLeft->pParam, i);
if (pParamElem->pNode->tokenId != TK_ALL &&
pParamElem->pNode->tokenId != TK_ID &&
pParamElem->pNode->tokenId != TK_STRING &&
pParamElem->pNode->tokenId != TK_INTEGER &&
pParamElem->pNode->tokenId != TK_FLOAT) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pParamElem->pNode->tokenId == TK_ID && (pParamElem->pNode->colInfo.z == NULL && pParamElem->pNode->colInfo.n == 0)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pParamElem->pNode->tokenId == TK_ID) {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (index.columnIndex <= 0 ||
index.columnIndex >= tscGetNumOfColumns(pTableMeta)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
}
}
pLeft->functionId = isValidFunction(pLeft->operand.z, pLeft->operand.n);
if (pLeft->functionId < 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
return handleExprInHavingClause(pCmd, pQueryInfo, pExpr, parentOptr);
}
int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd, bool isSTable, int32_t joinQuery, int32_t timeWindowQuery) {
const char* msg1 = "having only works with group by";
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "invalid expression in having clause";
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
if (pQueryInfo->groupbyExpr.numOfGroupCols <= 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pExpr->pLeft == NULL || pExpr->pRight == NULL) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pQueryInfo->colList == NULL) {
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
}
int32_t ret = 0;
if ((ret = getHavingExpr(pCmd, pQueryInfo, pExpr, TK_AND)) != TSDB_CODE_SUCCESS) {
return ret;
}
//REDO function check
if (!functionCompatibleCheck(pQueryInfo, joinQuery, timeWindowQuery)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
return TSDB_CODE_SUCCESS;
}
int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t index) {
assert(pQuerySqlNode != NULL && (pQuerySqlNode->from == NULL || taosArrayGetSize(pQuerySqlNode->from->tableList) > 0));
@ -6934,6 +7231,23 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i
}
}
// parse the having clause in the first place
if (parseHavingClause(pQueryInfo, pQuerySqlNode->pHaving, pCmd, isSTable, joinQuery, timeWindowQuery) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
/*
* transfer sql functions that need secondary merge into another format
* in dealing with super table queries such as: count/first/last
*/
if (isSTable) {
tscTansformFuncForSTableQuery(pQueryInfo);
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
if (parseSessionClause(pCmd, pQueryInfo, pQuerySqlNode) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -7125,3 +7439,10 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
return false;
}

View File

@ -520,7 +520,7 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups);
pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId);
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qhandle:%" PRIX64, pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex, pSql->res.qId);
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex, pSql->res.qId);
} else {
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
assert(vgIndex >= 0 && vgIndex < numOfVgroups);
@ -862,8 +862,44 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pSqlFuncExpr->functionId = htons(pExpr->functionId);
pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams);
pSqlFuncExpr->resColId = htons(pExpr->resColId);
if (pTableMeta->tableType != TSDB_SUPER_TABLE && pExpr->pFilter && pExpr->pFilter->numOfFilters > 0) {
pSqlFuncExpr->filterNum = htonl(pExpr->pFilter->numOfFilters);
} else {
pSqlFuncExpr->filterNum = 0;
}
pMsg += sizeof(SSqlFuncMsg);
if (pSqlFuncExpr->filterNum) {
pMsg += sizeof(SColumnFilterInfo) * pExpr->pFilter->numOfFilters;
// append the filter information after the basic column information
for (int32_t f = 0; f < pExpr->pFilter->numOfFilters; ++f) {
SColumnFilterInfo *pColFilter = &pExpr->pFilter->filterInfo[f];
SColumnFilterInfo *pFilterMsg = &pSqlFuncExpr->filterInfo[f];
pFilterMsg->filterstr = htons(pColFilter->filterstr);
if (pColFilter->filterstr) {
pFilterMsg->len = htobe64(pColFilter->len);
memcpy(pMsg, (void *)pColFilter->pz, (size_t)(pColFilter->len + 1));
pMsg += (pColFilter->len + 1); // append the additional filter binary info
} else {
pFilterMsg->lowerBndi = htobe64(pColFilter->lowerBndi);
pFilterMsg->upperBndi = htobe64(pColFilter->upperBndi);
}
pFilterMsg->lowerRelOptr = htons(pColFilter->lowerRelOptr);
pFilterMsg->upperRelOptr = htons(pColFilter->upperRelOptr);
if (pColFilter->lowerRelOptr == TSDB_RELATION_INVALID && pColFilter->upperRelOptr == TSDB_RELATION_INVALID) {
tscError("invalid filter info");
return TSDB_CODE_TSC_INVALID_SQL;
}
}
}
for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log
pSqlFuncExpr->arg[j].argType = htons((uint16_t)pExpr->param[j].nType);
pSqlFuncExpr->arg[j].argBytes = htons(pExpr->param[j].nLen);

View File

@ -2986,7 +2986,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
}
tscDebug("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql,
pVgroup->epAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex);
pVgroup->epAddr[pSql->epSet.inUse].fqdn, pVgroup->vgId, trsupport->subqueryIndex);
if (pSql->res.qId == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode
tscRetrieveFromDnodeCallBack(param, pSql, 0);

View File

@ -271,6 +271,41 @@ bool tscIsTWAQuery(SQueryInfo* pQueryInfo) {
return false;
}
bool tscIsTopbotQuery(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr == NULL) {
continue;
}
int32_t functionId = pExpr->functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
return true;
}
}
return false;
}
int32_t tscGetTopbotQueryParam(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr == NULL) {
continue;
}
int32_t functionId = pExpr->functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
return (int32_t) pExpr->param[0].i64;
}
}
return 0;
}
void tscClearInterpInfo(SQueryInfo* pQueryInfo) {
if (!tscIsPointInterpQuery(pQueryInfo)) {
return;
@ -415,6 +450,20 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
tfree(pCmd->pQueryInfo);
}
void destroyTableNameList(SSqlCmd* pCmd) {
if (pCmd->numOfTables == 0) {
assert(pCmd->pTableNameList == NULL);
return;
}
for(int32_t i = 0; i < pCmd->numOfTables; ++i) {
tfree(pCmd->pTableNameList[i]);
}
pCmd->numOfTables = 0;
tfree(pCmd->pTableNameList);
}
void tscResetSqlCmd(SSqlCmd* pCmd, bool removeMeta) {
pCmd->command = 0;
pCmd->numOfCols = 0;
@ -424,14 +473,7 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool removeMeta) {
pCmd->parseFinished = 0;
pCmd->autoCreated = 0;
for(int32_t i = 0; i < pCmd->numOfTables; ++i) {
if (pCmd->pTableNameList && pCmd->pTableNameList[i]) {
tfree(pCmd->pTableNameList[i]);
}
}
pCmd->numOfTables = 0;
tfree(pCmd->pTableNameList);
destroyTableNameList(pCmd);
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, removeMeta);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
@ -548,6 +590,11 @@ void tscFreeSqlObj(SSqlObj* pSql) {
free(pSql);
}
void tscDestroyBoundColumnInfo(SParsedDataColInfo* pColInfo) {
tfree(pColInfo->boundedColumns);
tfree(pColInfo->cols);
}
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
if (pDataBlock == NULL) {
return;
@ -568,6 +615,7 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
tscDestroyBoundColumnInfo(&pDataBlock->boundColumnInfo);
tfree(pDataBlock);
}
@ -678,7 +726,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
* @param dataBlocks
* @return
*/
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name,
int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOffset, SName* name,
STableMeta* pTableMeta, STableDataBlocks** dataBlocks) {
STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks));
if (dataBuf == NULL) {
@ -686,10 +734,12 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
dataBuf->nAllocSize = (uint32_t)initialSize;
dataBuf->headerSize = startOffset; // the header size will always be the startOffset value, reserved for the subumit block header
dataBuf->nAllocSize = (uint32_t)defaultSize;
dataBuf->headerSize = startOffset;
// the header size will always be the startOffset value, reserved for the subumit block header
if (dataBuf->nAllocSize <= dataBuf->headerSize) {
dataBuf->nAllocSize = dataBuf->headerSize*2;
dataBuf->nAllocSize = dataBuf->headerSize * 2;
}
dataBuf->pData = calloc(1, dataBuf->nAllocSize);
@ -699,25 +749,31 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
dataBuf->ordered = true;
dataBuf->prevTS = INT64_MIN;
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
dataBuf->rowSize = rowSize;
dataBuf->size = startOffset;
SParsedDataColInfo* pColInfo = &dataBuf->boundColumnInfo;
SSchema* pSchema = tscGetTableSchema(dataBuf->pTableMeta);
tscSetBoundColumnInfo(pColInfo, pSchema, dataBuf->pTableMeta->tableInfo.numOfColumns);
dataBuf->ordered = true;
dataBuf->prevTS = INT64_MIN;
dataBuf->rowSize = rowSize;
dataBuf->size = startOffset;
dataBuf->tsSource = -1;
dataBuf->vgId = dataBuf->pTableMeta->vgId;
tNameAssign(&dataBuf->tableName, name);
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
assert(initialSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
assert(defaultSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
*dataBlocks = dataBuf;
return TSDB_CODE_SUCCESS;
}
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize,
SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks, SArray* pBlockList) {
SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks,
SArray* pBlockList) {
*dataBlocks = NULL;
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pHashList, (const char*)&id, sizeof(id));
if (t1 != NULL) {
@ -826,6 +882,8 @@ static void extractTableNameList(SSqlCmd* pCmd, bool freeBlockMap) {
int32_t i = 0;
while(p1) {
STableDataBlocks* pBlocks = *p1;
tfree(pCmd->pTableNameList[i]);
pCmd->pTableNameList[i++] = tNameDup(&pBlocks->tableName);
p1 = taosHashIterate(pCmd->pTableBlockHashList, p1);
}
@ -942,7 +1000,7 @@ bool tscIsInsertData(char* sqlstr) {
int32_t index = 0;
do {
SStrToken t0 = tStrGetToken(sqlstr, &index, false, 0, NULL);
SStrToken t0 = tStrGetToken(sqlstr, &index, false);
if (t0.type != TK_LP) {
return t0.type == TK_INSERT || t0.type == TK_IMPORT;
}
@ -987,6 +1045,7 @@ SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) {
.pSqlExpr = NULL,
.pArithExprInfo = NULL,
.visible = true,
.pFieldFilters = NULL,
};
info.field = *pField;
@ -999,6 +1058,7 @@ SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_F
.pSqlExpr = NULL,
.pArithExprInfo = NULL,
.visible = true,
.pFieldFilters = NULL,
};
info.field = *field;
@ -1072,6 +1132,22 @@ int32_t tscGetResRowLength(SArray* pExprList) {
return size;
}
static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) {
for(int32_t i = 0; i < numOfFilters; ++i) {
if (pFilterInfo[i].filterstr) {
tfree(pFilterInfo[i].pz);
}
}
tfree(pFilterInfo);
}
static void tscColumnDestroy(SColumn* pCol) {
destroyFilterInfo(pCol->filterInfo, pCol->numOfFilters);
free(pCol);
}
void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
if (pFieldInfo == NULL) {
return;
@ -1092,6 +1168,11 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
tfree(pInfo->pArithExprInfo);
}
if (pInfo->pFieldFilters != NULL) {
tscColumnDestroy(pInfo->pFieldFilters->pFilters);
tfree(pInfo->pFieldFilters);
}
}
taosArrayDestroy(pFieldInfo->internalField);
@ -1353,15 +1434,7 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
return taosArrayGetP(pColumnList, i);
}
static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) {
for(int32_t i = 0; i < numOfFilters; ++i) {
if (pFilterInfo[i].filterstr) {
tfree(pFilterInfo[i].pz);
}
}
tfree(pFilterInfo);
}
SColumn* tscColumnClone(const SColumn* src) {
assert(src != NULL);
@ -1378,10 +1451,6 @@ SColumn* tscColumnClone(const SColumn* src) {
return dst;
}
static void tscColumnDestroy(SColumn* pCol) {
destroyFilterInfo(pCol->filterInfo, pCol->numOfFilters);
free(pCol);
}
void tscColumnListCopy(SArray* dst, const SArray* src, int16_t tableIndex) {
assert(src != NULL && dst != NULL);

View File

@ -294,7 +294,7 @@ void cqStop(void *handle) {
pthread_mutex_unlock(&pContext->mutex);
}
void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema) {
void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema, int start) {
if (tsEnableStream == 0) {
return NULL;
}
@ -326,7 +326,11 @@ void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, ch
pObj->rid = taosAddRef(cqObjRef, pObj);
cqCreateStream(pContext, pObj);
if(start && pContext->master) {
cqCreateStream(pContext, pObj);
} else {
pObj->pContext = pContext;
}
rid = pObj->rid;

View File

@ -70,7 +70,7 @@ int main(int argc, char *argv[]) {
tdDestroyTSchemaBuilder(&schemaBuilder);
for (int sid =1; sid<10; ++sid) {
cqCreate(pCq, sid, sid, NULL, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", pSchema);
cqCreate(pCq, sid, sid, NULL, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", pSchema, 1);
}
tdFreeSchema(pSchema);

View File

@ -88,7 +88,7 @@ void* qOpenQueryMgmt(int32_t vgId);
void qQueryMgmtNotifyClosed(void* pExecutor);
void qQueryMgmtReOpen(void *pExecutor);
void qCleanupQueryMgmt(void* pExecutor);
void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo);
void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo);
void** qAcquireQInfo(void* pMgmt, uint64_t key);
void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle);
bool checkQIdEqual(void *qHandle, uint64_t qId);

View File

@ -399,36 +399,6 @@ typedef struct SColIndex {
char name[TSDB_COL_NAME_LEN]; // TODO remove it
} SColIndex;
/* sql function msg, to describe the message to vnode about sql function
* operations in select clause */
typedef struct SSqlFuncMsg {
int16_t functionId;
int16_t numOfParams;
int16_t resColId; // result column id, id of the current output column
int16_t colType;
int16_t colBytes;
SColIndex colInfo;
struct ArgElem {
int16_t argType;
int16_t argBytes;
union {
double d;
int64_t i64;
char * pz;
} argValue;
} arg[3];
} SSqlFuncMsg;
typedef struct SExprInfo {
SSqlFuncMsg base;
struct tExprNode* pExpr;
int16_t bytes;
int16_t type;
int32_t interBytes;
int64_t uid;
} SExprInfo;
typedef struct SColumnFilterInfo {
int16_t lowerRelOptr;
@ -451,6 +421,42 @@ typedef struct SColumnFilterInfo {
};
} SColumnFilterInfo;
/* sql function msg, to describe the message to vnode about sql function
* operations in select clause */
typedef struct SSqlFuncMsg {
int16_t functionId;
int16_t numOfParams;
int16_t resColId; // result column id, id of the current output column
int16_t colType;
int16_t colBytes;
SColIndex colInfo;
struct ArgElem {
int16_t argType;
int16_t argBytes;
union {
double d;
int64_t i64;
char * pz;
} argValue;
} arg[3];
int32_t filterNum;
SColumnFilterInfo filterInfo[];
} SSqlFuncMsg;
typedef struct SExprInfo {
SColumnFilterInfo * pFilter;
struct tExprNode* pExpr;
int16_t bytes;
int16_t type;
int32_t interBytes;
int64_t uid;
SSqlFuncMsg base;
} SExprInfo;
/*
* for client side struct, we only need the column id, type, bytes are not necessary
* But for data in vnode side, we need all the following information.

View File

@ -42,7 +42,7 @@ void cqStart(void *handle);
void cqStop(void *handle);
// cqCreate is called by TSDB to start an instance of CQ
void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema);
void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema, int start);
// cqDrop is called by TSDB to stop an instance of CQ, handle is the return value of cqCreate
void cqDrop(void *handle);

View File

@ -51,7 +51,7 @@ typedef struct {
void *cqH;
int (*notifyStatus)(void *, int status, int eno);
int (*eventCallBack)(void *);
void *(*cqCreateFunc)(void *handle, uint64_t uid, int32_t sid, const char *dstTable, char *sqlStr, STSchema *pSchema);
void *(*cqCreateFunc)(void *handle, uint64_t uid, int32_t sid, const char *dstTable, char *sqlStr, STSchema *pSchema, int start);
void (*cqDropFunc)(void *handle);
} STsdbAppH;

View File

@ -205,6 +205,11 @@
#define TK_VALUES 186
#define TK_SPACE 300
#define TK_COMMENT 301
#define TK_ILLEGAL 302

View File

@ -27,7 +27,7 @@
#define MAX_IP_SIZE 20
#define MAX_PASSWORD_SIZE 20
#define MAX_HISTORY_SIZE 1000
#define MAX_COMMAND_SIZE 65536
#define MAX_COMMAND_SIZE 1048586
#define HISTORY_FILE ".taos_history"
#define DEFAULT_RES_SHOW_NUM 100

View File

@ -238,7 +238,7 @@ void resetCommand(Command *cmd, const char s[]) {
clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
memset(cmd->buffer, 0, MAX_COMMAND_SIZE);
memset(cmd->command, 0, MAX_COMMAND_SIZE);
strcpy(cmd->command, s);
strncpy(cmd->command, s, MAX_COMMAND_SIZE);
int size = 0;
int width = 0;
getMbSizeInfo(s, &size, &width);

View File

@ -9,19 +9,18 @@ IF (GIT_FOUND)
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_COMMIT)
EXECUTE_PROCESS(
COMMAND bash "-c" "echo '${TAOSDEMO_COMMIT}' | awk '{print $1}' | cut -c -9"
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1)
STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1)
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_STATUS)
EXECUTE_PROCESS(
IF (TD_LINUX)
EXECUTE_PROCESS(
COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'"
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_STATUS)
ENDIF (TD_LINUX)
MESSAGE("taosdemo.c status: " ${TAOSDEMO_STATUS})
ELSE()
MESSAGE("Git not found")
@ -29,9 +28,9 @@ ELSE()
SET(TAOSDEMO_STATUS "unknown")
ENDIF (GIT_FOUND)
STRING(STRIP ${TAOSDEMO_COMMIT_SHA1} TAOSDEMO_COMMIT_SHA1)
STRING(STRIP "${TAOSDEMO_COMMIT_SHA1}" TAOSDEMO_COMMIT_SHA1)
MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1})
STRING(STRIP ${TAOSDEMO_STATUS} TAOSDEMO_STATUS)
STRING(STRIP "${TAOSDEMO_STATUS}" TAOSDEMO_STATUS)
IF (TAOSDEMO_STATUS MATCHES "M")
SET(TAOSDEMO_STATUS "modified")

File diff suppressed because it is too large Load Diff

View File

@ -39,6 +39,22 @@ typedef struct {
int8_t type;
} SOColInfo;
#define debugPrint(fmt, ...) \
do { if (g_args.debug_print || g_args.verbose_print) \
fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0)
#define verbosePrint(fmt, ...) \
do { if (g_args.verbose_print) \
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
#define performancePrint(fmt, ...) \
do { if (g_args.performance_print) \
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
#define errorPrint(fmt, ...) \
do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0)
// -------------------------- SHOW DATABASE INTERFACE-----------------------
enum _show_db_index {
TSDB_SHOW_DB_NAME_INDEX,
@ -46,7 +62,7 @@ enum _show_db_index {
TSDB_SHOW_DB_NTABLES_INDEX,
TSDB_SHOW_DB_VGROUPS_INDEX,
TSDB_SHOW_DB_REPLICA_INDEX,
TSDB_SHOW_DB_QUORUM_INDEX,
TSDB_SHOW_DB_QUORUM_INDEX,
TSDB_SHOW_DB_DAYS_INDEX,
TSDB_SHOW_DB_KEEP_INDEX,
TSDB_SHOW_DB_CACHE_INDEX,
@ -101,10 +117,10 @@ typedef struct {
char name[TSDB_DB_NAME_LEN + 1];
char create_time[32];
int32_t ntables;
int32_t vgroups;
int32_t vgroups;
int16_t replica;
int16_t quorum;
int16_t days;
int16_t days;
char keeplist[32];
//int16_t daysToKeep;
//int16_t daysToKeep1;
@ -172,48 +188,50 @@ static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-dat
/* The options we understand. */
static struct argp_option options[] = {
// connection option
{"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0},
{"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0},
{"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0},
{"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0},
#ifdef _TD_POWER_
{"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0},
{"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0},
#else
{"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0},
{"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0},
#endif
{"port", 'P', "PORT", 0, "Port to connect", 0},
{"cversion", 'v', "CVERION", 0, "client version", 0},
{"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0},
{"port", 'P', "PORT", 0, "Port to connect", 0},
{"cversion", 'v', "CVERION", 0, "client version", 0},
{"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0},
// input/output file
{"outpath", 'o', "OUTPATH", 0, "Output file path.", 1},
{"inpath", 'i', "INPATH", 0, "Input file path.", 1},
{"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1},
{"outpath", 'o', "OUTPATH", 0, "Output file path.", 1},
{"inpath", 'i', "INPATH", 0, "Input file path.", 1},
{"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1},
#ifdef _TD_POWER_
{"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1},
{"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1},
#else
{"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1},
{"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1},
#endif
{"encode", 'e', "ENCODE", 0, "Input file encoding.", 1},
{"encode", 'e', "ENCODE", 0, "Input file encoding.", 1},
// dump unit options
{"all-databases", 'A', 0, 0, "Dump all databases.", 2},
{"databases", 'B', 0, 0, "Dump assigned databases", 2},
{"all-databases", 'A', 0, 0, "Dump all databases.", 2},
{"databases", 'B', 0, 0, "Dump assigned databases", 2},
// dump format options
{"schemaonly", 's', 0, 0, "Only dump schema.", 3},
{"with-property", 'M', 0, 0, "Dump schema with properties.", 3},
{"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3},
{"end-time", 'E', "END_TIME", 0, "End time to dump.", 3},
{"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
{"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3},
{"schemaonly", 's', 0, 0, "Only dump schema.", 3},
{"with-property", 'M', 0, 0, "Dump schema with properties.", 3},
{"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Epoch or ISO8601/RFC3339 format is acceptable. For example: 2017-10-01T18:00:00+0800", 3},
{"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
{"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3},
{"debug", 'g', 0, 0, "Print debug info.", 1},
{"verbose", 'v', 0, 0, "Print verbose debug info.", 1},
{0}};
/* Used by main to communicate with parse_opt. */
struct arguments {
typedef struct arguments {
// connection option
char *host;
char *user;
char *password;
uint16_t port;
uint16_t port;
char cversion[12];
uint16_t mysqlFlag;
// output file
@ -238,9 +256,12 @@ struct arguments {
int32_t thread_num;
int abort;
char **arg_list;
int arg_list_len;
bool isDumpIn;
};
int arg_list_len;
bool isDumpIn;
bool debug_print;
bool verbose_print;
bool performance_print;
} SArguments;
/* Parse a single option. */
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
@ -286,6 +307,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
tstrncpy(arguments->outpath, full_path.we_wordv[0], TSDB_FILENAME_LEN);
wordfree(&full_path);
break;
case 'g':
arguments->debug_print = true;
break;
case 'i':
arguments->isDumpIn = true;
if (wordexp(arg, &full_path, 0) != 0) {
@ -387,7 +411,7 @@ int taosCheckParam(struct arguments *arguments);
void taosFreeDbInfos();
static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName);
struct arguments tsArguments = {
struct arguments g_args = {
// connection option
NULL,
"root",
@ -400,18 +424,18 @@ struct arguments tsArguments = {
"",
0,
// outpath and inpath
"",
"",
"",
"./dump_result.txt",
NULL,
// dump unit option
false,
false,
false,
// dump format option
false,
false,
0,
INT64_MAX,
false,
false,
0,
INT64_MAX,
1,
TSDB_MAX_SQL_LEN,
1,
@ -419,11 +443,14 @@ struct arguments tsArguments = {
// other options
5,
0,
NULL,
0,
false
NULL,
0,
false,
false, // debug_print
false, // verbose_print
false // performance_print
};
static int queryDbImpl(TAOS *taos, char *command) {
int i;
TAOS_RES *res = NULL;
@ -434,7 +461,7 @@ static int queryDbImpl(TAOS *taos, char *command) {
taos_free_result(res);
res = NULL;
}
res = taos_query(taos, command);
code = taos_errno(res);
if (0 == code) {
@ -453,13 +480,48 @@ static int queryDbImpl(TAOS *taos, char *command) {
return 0;
}
static void parse_args(int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-E") == 0) {
char *tmp = strdup(argv[++i]);
if (tmp) {
int64_t tmpEpoch;
if (strchr(tmp, ':') && strchr(tmp, '-')) {
if (TSDB_CODE_SUCCESS != taosParseTime(
tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) {
fprintf(stderr, "Input end time error!\n");
free(tmp);
return;
}
} else {
tmpEpoch = atoll(tmp);
}
sprintf(argv[i], "%"PRId64"", tmpEpoch);
debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n",
__func__, __LINE__, tmp, i, argv[i]);
free(tmp);
} else {
errorPrint("%s() LN%d, strdup() cannot allocate memory\n", __func__, __LINE__);
exit(-1);
}
} else if (strcmp(argv[i], "-g") == 0) {
arguments->debug_print = true;
}
}
}
int main(int argc, char *argv[]) {
/* Parse our arguments; every option seen by parse_opt will be
reflected in arguments. */
argp_parse(&argp, argc, argv, 0, 0, &tsArguments);
parse_args(argc, argv, &g_args);
if (tsArguments.abort) {
argp_parse(&argp, argc, argv, 0, 0, &g_args);
if (g_args.abort) {
#ifndef _ALPINE
error(10, 0, "ABORTED");
#else
@ -469,81 +531,82 @@ int main(int argc, char *argv[]) {
printf("====== arguments config ======\n");
{
printf("host: %s\n", tsArguments.host);
printf("user: %s\n", tsArguments.user);
printf("password: %s\n", tsArguments.password);
printf("port: %u\n", tsArguments.port);
printf("cversion: %s\n", tsArguments.cversion);
printf("mysqlFlag: %d\n", tsArguments.mysqlFlag);
printf("outpath: %s\n", tsArguments.outpath);
printf("inpath: %s\n", tsArguments.inpath);
printf("resultFile: %s\n", tsArguments.resultFile);
printf("encode: %s\n", tsArguments.encode);
printf("all_databases: %d\n", tsArguments.all_databases);
printf("databases: %d\n", tsArguments.databases);
printf("schemaonly: %d\n", tsArguments.schemaonly);
printf("with_property: %d\n", tsArguments.with_property);
printf("start_time: %" PRId64 "\n", tsArguments.start_time);
printf("end_time: %" PRId64 "\n", tsArguments.end_time);
printf("data_batch: %d\n", tsArguments.data_batch);
printf("max_sql_len: %d\n", tsArguments.max_sql_len);
printf("table_batch: %d\n", tsArguments.table_batch);
printf("thread_num: %d\n", tsArguments.thread_num);
printf("allow_sys: %d\n", tsArguments.allow_sys);
printf("abort: %d\n", tsArguments.abort);
printf("isDumpIn: %d\n", tsArguments.isDumpIn);
printf("arg_list_len: %d\n", tsArguments.arg_list_len);
printf("host: %s\n", g_args.host);
printf("user: %s\n", g_args.user);
printf("password: %s\n", g_args.password);
printf("port: %u\n", g_args.port);
printf("cversion: %s\n", g_args.cversion);
printf("mysqlFlag: %d\n", g_args.mysqlFlag);
printf("outpath: %s\n", g_args.outpath);
printf("inpath: %s\n", g_args.inpath);
printf("resultFile: %s\n", g_args.resultFile);
printf("encode: %s\n", g_args.encode);
printf("all_databases: %d\n", g_args.all_databases);
printf("databases: %d\n", g_args.databases);
printf("schemaonly: %d\n", g_args.schemaonly);
printf("with_property: %d\n", g_args.with_property);
printf("start_time: %" PRId64 "\n", g_args.start_time);
printf("end_time: %" PRId64 "\n", g_args.end_time);
printf("data_batch: %d\n", g_args.data_batch);
printf("max_sql_len: %d\n", g_args.max_sql_len);
printf("table_batch: %d\n", g_args.table_batch);
printf("thread_num: %d\n", g_args.thread_num);
printf("allow_sys: %d\n", g_args.allow_sys);
printf("abort: %d\n", g_args.abort);
printf("isDumpIn: %d\n", g_args.isDumpIn);
printf("arg_list_len: %d\n", g_args.arg_list_len);
printf("debug_print: %d\n", g_args.debug_print);
for (int32_t i = 0; i < tsArguments.arg_list_len; i++) {
printf("arg_list[%d]: %s\n", i, tsArguments.arg_list[i]);
for (int32_t i = 0; i < g_args.arg_list_len; i++) {
printf("arg_list[%d]: %s\n", i, g_args.arg_list[i]);
}
}
}
printf("==============================\n");
if (tsArguments.cversion[0] != 0){
tstrncpy(version, tsArguments.cversion, 11);
if (g_args.cversion[0] != 0){
tstrncpy(version, g_args.cversion, 11);
}
if (taosCheckParam(&tsArguments) < 0) {
if (taosCheckParam(&g_args) < 0) {
exit(EXIT_FAILURE);
}
g_fpOfResult = fopen(tsArguments.resultFile, "a");
g_fpOfResult = fopen(g_args.resultFile, "a");
if (NULL == g_fpOfResult) {
fprintf(stderr, "Failed to open %s for save result\n", tsArguments.resultFile);
fprintf(stderr, "Failed to open %s for save result\n", g_args.resultFile);
return 1;
};
fprintf(g_fpOfResult, "#############################################################################\n");
fprintf(g_fpOfResult, "============================== arguments config =============================\n");
{
fprintf(g_fpOfResult, "host: %s\n", tsArguments.host);
fprintf(g_fpOfResult, "user: %s\n", tsArguments.user);
fprintf(g_fpOfResult, "password: %s\n", tsArguments.password);
fprintf(g_fpOfResult, "port: %u\n", tsArguments.port);
fprintf(g_fpOfResult, "cversion: %s\n", tsArguments.cversion);
fprintf(g_fpOfResult, "mysqlFlag: %d\n", tsArguments.mysqlFlag);
fprintf(g_fpOfResult, "outpath: %s\n", tsArguments.outpath);
fprintf(g_fpOfResult, "inpath: %s\n", tsArguments.inpath);
fprintf(g_fpOfResult, "resultFile: %s\n", tsArguments.resultFile);
fprintf(g_fpOfResult, "encode: %s\n", tsArguments.encode);
fprintf(g_fpOfResult, "all_databases: %d\n", tsArguments.all_databases);
fprintf(g_fpOfResult, "databases: %d\n", tsArguments.databases);
fprintf(g_fpOfResult, "schemaonly: %d\n", tsArguments.schemaonly);
fprintf(g_fpOfResult, "with_property: %d\n", tsArguments.with_property);
fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", tsArguments.start_time);
fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", tsArguments.end_time);
fprintf(g_fpOfResult, "data_batch: %d\n", tsArguments.data_batch);
fprintf(g_fpOfResult, "max_sql_len: %d\n", tsArguments.max_sql_len);
fprintf(g_fpOfResult, "table_batch: %d\n", tsArguments.table_batch);
fprintf(g_fpOfResult, "thread_num: %d\n", tsArguments.thread_num);
fprintf(g_fpOfResult, "allow_sys: %d\n", tsArguments.allow_sys);
fprintf(g_fpOfResult, "abort: %d\n", tsArguments.abort);
fprintf(g_fpOfResult, "isDumpIn: %d\n", tsArguments.isDumpIn);
fprintf(g_fpOfResult, "arg_list_len: %d\n", tsArguments.arg_list_len);
fprintf(g_fpOfResult, "host: %s\n", g_args.host);
fprintf(g_fpOfResult, "user: %s\n", g_args.user);
fprintf(g_fpOfResult, "password: %s\n", g_args.password);
fprintf(g_fpOfResult, "port: %u\n", g_args.port);
fprintf(g_fpOfResult, "cversion: %s\n", g_args.cversion);
fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag);
fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath);
fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath);
fprintf(g_fpOfResult, "resultFile: %s\n", g_args.resultFile);
fprintf(g_fpOfResult, "encode: %s\n", g_args.encode);
fprintf(g_fpOfResult, "all_databases: %d\n", g_args.all_databases);
fprintf(g_fpOfResult, "databases: %d\n", g_args.databases);
fprintf(g_fpOfResult, "schemaonly: %d\n", g_args.schemaonly);
fprintf(g_fpOfResult, "with_property: %d\n", g_args.with_property);
fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch);
fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len);
fprintf(g_fpOfResult, "table_batch: %d\n", g_args.table_batch);
fprintf(g_fpOfResult, "thread_num: %d\n", g_args.thread_num);
fprintf(g_fpOfResult, "allow_sys: %d\n", g_args.allow_sys);
fprintf(g_fpOfResult, "abort: %d\n", g_args.abort);
fprintf(g_fpOfResult, "isDumpIn: %d\n", g_args.isDumpIn);
fprintf(g_fpOfResult, "arg_list_len: %d\n", g_args.arg_list_len);
for (int32_t i = 0; i < tsArguments.arg_list_len; i++) {
fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, tsArguments.arg_list[i]);
for (int32_t i = 0; i < g_args.arg_list_len; i++) {
fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, g_args.arg_list[i]);
}
}
@ -552,11 +615,11 @@ int main(int argc, char *argv[]) {
time_t tTime = time(NULL);
struct tm tm = *localtime(&tTime);
if (tsArguments.isDumpIn) {
if (g_args.isDumpIn) {
fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n");
fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
if (taosDumpIn(&tsArguments) < 0) {
if (taosDumpIn(&g_args) < 0) {
fprintf(g_fpOfResult, "\n");
fclose(g_fpOfResult);
return -1;
@ -565,7 +628,7 @@ int main(int argc, char *argv[]) {
fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n");
fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
if (taosDumpOut(&tsArguments) < 0) {
if (taosDumpOut(&g_args) < 0) {
fprintf(g_fpOfResult, "\n");
fclose(g_fpOfResult);
return -1;
@ -573,9 +636,9 @@ int main(int argc, char *argv[]) {
fprintf(g_fpOfResult, "\n============================== TOTAL STATISTICS ============================== \n");
fprintf(g_fpOfResult, "# total database count: %d\n", g_resultStatistics.totalDatabasesOfDumpOut);
fprintf(g_fpOfResult, "# total super table count: %d\n", g_resultStatistics.totalSuperTblsOfDumpOut);
fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n", g_resultStatistics.totalChildTblsOfDumpOut);
fprintf(g_fpOfResult, "# total row count: %"PRId64"\n", g_resultStatistics.totalRowsOfDumpOut);
fprintf(g_fpOfResult, "# total super table count: %d\n", g_resultStatistics.totalSuperTblsOfDumpOut);
fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n", g_resultStatistics.totalChildTblsOfDumpOut);
fprintf(g_fpOfResult, "# total row count: %"PRId64"\n", g_resultStatistics.totalRowsOfDumpOut);
}
fprintf(g_fpOfResult, "\n");
@ -1236,8 +1299,8 @@ void* taosDumpOutWorkThreadFp(void *arg)
FILE *fp = NULL;
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
if (tsArguments.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex);
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex);
} else {
sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex);
}
@ -1270,7 +1333,7 @@ void* taosDumpOutWorkThreadFp(void *arg)
ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
if (readLen <= 0) break;
int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &tsArguments, fp, pThread->taosCon, pThread->dbName);
int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &g_args, fp, pThread->taosCon, pThread->dbName);
if (ret >= 0) {
// TODO: sum table count and table rows by self
pThread->tablesOfDumpOut++;
@ -1282,13 +1345,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
}
tablesInOneFile++;
if (tablesInOneFile >= tsArguments.table_batch) {
if (tablesInOneFile >= g_args.table_batch) {
fclose(fp);
tablesInOneFile = 0;
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
if (tsArguments.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex);
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex);
} else {
sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex);
}
@ -1491,14 +1554,14 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
taos_free_result(res);
lseek(fd, 0, SEEK_SET);
int maxThreads = tsArguments.thread_num;
int maxThreads = g_args.thread_num;
int tableOfPerFile ;
if (numOfTable <= tsArguments.thread_num) {
if (numOfTable <= g_args.thread_num) {
tableOfPerFile = 1;
maxThreads = numOfTable;
} else {
tableOfPerFile = numOfTable / tsArguments.thread_num;
if (0 != numOfTable % tsArguments.thread_num) {
tableOfPerFile = numOfTable / g_args.thread_num;
if (0 != numOfTable % g_args.thread_num) {
tableOfPerFile += 1;
}
}
@ -1806,9 +1869,9 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
//}
}
fprintf(fp, "\n");
fprintf(fp, "\n");
atomic_add_fetch_64(&totalDumpOutRows, totalRows);
taos_free_result(tmpResult);
free(tmpBuffer);
return totalRows;
@ -1824,7 +1887,7 @@ int taosCheckParam(struct arguments *arguments) {
fprintf(stderr, "start time is larger than end time\n");
return -1;
}
if (arguments->arg_list_len == 0) {
if ((!arguments->all_databases) && (!arguments->isDumpIn)) {
fprintf(stderr, "taosdump requires parameters\n");
@ -2214,7 +2277,7 @@ void* taosDumpInWorkThreadFp(void *arg)
continue;
}
fprintf(stderr, "Success Open input file: %s\n", SQLFileName);
taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, tsArguments.encode, SQLFileName);
taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, g_args.encode, SQLFileName);
}
}

View File

@ -628,6 +628,11 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
bnNotify();
}
if (!tsEnableBalance) {
int32_t numOfMnodes = mnodeGetMnodesNum();
if (numOfMnodes < tsNumOfMnodes) bnNotify();
}
if (openVnodes != pDnode->openVnodes) {
mnodeCheckUnCreatedVgroup(pDnode, pStatus->load, openVnodes);
}

View File

@ -381,6 +381,8 @@ static bool mnodeAllOnline() {
void *pIter = NULL;
bool allOnline = true;
sdbUpdateMnodeRoles();
while (1) {
SMnodeObj *pMnode = NULL;
pIter = mnodeGetNextMnode(pIter, &pMnode);

View File

@ -315,6 +315,10 @@ void sdbUpdateAsync() {
taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr);
}
static int node_cmp(const void *l, const void *r) {
return ((SNodeInfo *)l)->nodeId - ((SNodeInfo *)r)->nodeId;
}
int32_t sdbUpdateSync(void *pMnodes) {
SMInfos *pMinfos = pMnodes;
if (!mnodeIsRunning()) {
@ -382,6 +386,8 @@ int32_t sdbUpdateSync(void *pMnodes) {
return TSDB_CODE_SUCCESS;
}
qsort(syncCfg.nodeInfo, syncCfg.replica, sizeof(syncCfg.nodeInfo[0]), node_cmp);
sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica);
for (int32_t i = 0; i < syncCfg.replica; ++i) {
sdbInfo("vgId:1, mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn,
@ -1131,4 +1137,4 @@ static void *sdbWorkerFp(void *pWorker) {
int32_t sdbGetReplicaNum() {
return tsSdbMgmt.cfg.replica;
}
}

View File

@ -190,6 +190,8 @@ typedef struct SQuery {
bool stabledev; // super table stddev query
int32_t interBufSize; // intermediate buffer sizse
int32_t havingNum; // having expr number
SOrderVal order;
int16_t numOfCols;
int16_t numOfTags;
@ -285,6 +287,7 @@ enum OPERATOR_TYPE_E {
OP_Fill = 13,
OP_MultiTableAggregate = 14,
OP_MultiTableTimeInterval = 15,
OP_Having = 16,
};
typedef struct SOperatorInfo {
@ -402,6 +405,11 @@ typedef struct SOffsetOperatorInfo {
int64_t offset;
} SOffsetOperatorInfo;
typedef struct SHavingOperatorInfo {
SArray* fp;
} SHavingOperatorInfo;
typedef struct SFillOperatorInfo {
SFillInfo *pFillInfo;
SSDataBlock *pRes;

View File

@ -98,6 +98,7 @@ typedef struct SQuerySqlNode {
SLimitVal limit; // limit offset [optional]
SLimitVal slimit; // group limit offset [optional]
SStrToken sqlstr; // sql string in select clause
struct tSqlExpr *pHaving; // having clause [optional]
} SQuerySqlNode;
typedef struct STableNamePair {
@ -253,6 +254,11 @@ SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder);
SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index);
SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder);
tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType);
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right);
tSqlExpr *tSqlExprClone(tSqlExpr *pSrc);
SFromInfo *setTableNameList(SFromInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias);
SFromInfo *setSubquery(SFromInfo* pFromInfo, SQuerySqlNode *pSqlNode);
void *destroyFromInfo(SFromInfo* pFromInfo);
@ -272,7 +278,7 @@ void tSqlExprListDestroy(SArray *pList);
SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps,
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit);
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving);
SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SQuerySqlNode *pSelect, int32_t type);

View File

@ -52,11 +52,20 @@ static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int
return pResultRowInfo->pResult[slot];
}
static FORCE_INLINE char *getPosInResultPage(SQuery *pQuery, tFilePage* page, int32_t rowOffset, int16_t offset) {
assert(rowOffset >= 0 && pQuery != NULL);
static FORCE_INLINE char* getPosInResultPage(SQueryRuntimeEnv* pRuntimeEnv, tFilePage* page, int32_t rowOffset,
int16_t offset, int32_t size) {
assert(rowOffset >= 0 && pRuntimeEnv != NULL);
SQuery* pQuery = pRuntimeEnv->pQuery;
int64_t pageSize = pRuntimeEnv->pResultBuf->pageSize;
int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery);
return ((char *)page->data) + rowOffset + offset * numOfRows;
// buffer overflow check
int64_t bufEnd = (rowOffset + offset * numOfRows + size);
assert(page->num <= pageSize && bufEnd <= page->num);
return ((char*)page->data) + rowOffset + offset * numOfRows;
}
bool isNullOperator(SColumnFilterElem *pFilter, const char* minval, const char* maxval, int16_t type);

View File

@ -453,7 +453,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
%type select {SQuerySqlNode*}
%destructor select {destroyQuerySqlNode($$);}
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G);
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G, N);
}
select(A) ::= LP select(B) RP. {A = B;}
@ -471,7 +471,7 @@ cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
// select client_version()
// select server_state()
select(A) ::= SELECT(T) selcollist(W). {
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
}
// selcollist is a list of expressions that are to become the return
@ -842,4 +842,4 @@ cmd ::= KILL QUERY INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); s
%fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED
DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD
LIKE MATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL
NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT TBNAME JOIN STABLE NULL INSERT INTO VALUES.
NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT TBNAME JOIN STABLE NULL INSERT INTO VALUES.

View File

@ -2771,14 +2771,16 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
pInfo->stage += 1;
// all data are null, set it completed
if (pInfo->numOfElems == 0) {
pResInfo->complete = true;
return;
} else {
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
}
pInfo->stage += 1;
}
// the first stage, only acquire the min/max value
@ -2857,14 +2859,16 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo);
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
pInfo->stage += 1;
// all data are null, set it completed
if (pInfo->numOfElems == 0) {
pResInfo->complete = true;
return;
} else {
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
}
pInfo->stage += 1;
}
if (pInfo->stage == 0) {

View File

@ -181,6 +181,7 @@ static SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntime
static SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
static SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput);
static SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
static SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
static void destroySFillOperatorInfo(void* param, int32_t numOfOutput);
@ -1819,6 +1820,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
}
}
if (pQuery->havingNum > 0) {
pRuntimeEnv->proot = createHavingOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQuery->pExpr1, pQuery->numOfOutput);
}
if (pQuery->limit.offset > 0) {
pRuntimeEnv->proot = createOffsetOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot);
}
@ -2631,6 +2636,21 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
tsdbRetrieveDataBlockStatisInfo(pTableScanInfo->pQueryHandle, &pBlock->pBlockStatis);
if (pQuery->topBotQuery && pBlock->pBlockStatis != NULL) {
{ // set previous window
if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
SResultRow* pResult = NULL;
bool masterScan = IS_MASTER_SCAN(pRuntimeEnv);
TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey;
STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQuery);
if (setWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, &win, masterScan, &pResult, groupId,
pTableScanInfo->pCtx, pTableScanInfo->numOfOutput,
pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
}
}
bool load = false;
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
int32_t functionId = pTableScanInfo->pCtx[i].functionId;
@ -3228,7 +3248,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe
continue;
}
pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQuery, bufPage, pResult->offset, offset);
pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, bufPage, pResult->offset, offset, pCtx[i].outputBytes);
offset += pCtx[i].outputBytes;
int32_t functionId = pCtx[i].functionId;
@ -3286,7 +3306,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF
int16_t offset = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQuery, page, pResult->offset, offset);
pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, page, pResult->offset, offset, pCtx[i].outputBytes);
offset += pCtx[i].outputBytes;
int32_t functionId = pCtx[i].functionId;
@ -3494,8 +3514,6 @@ void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) {
*/
static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock) {
SQuery *pQuery = pRuntimeEnv->pQuery;
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
int32_t numOfResult = pBlock->info.rows; // there are already exists result rows
@ -3530,7 +3548,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo*
int32_t bytes = pColInfoData->info.bytes;
char *out = pColInfoData->pData + numOfResult * bytes;
char *in = getPosInResultPage(pQuery, page, pRow->offset, offset);
char *in = getPosInResultPage(pRuntimeEnv, page, pRow->offset, offset, bytes);
memcpy(out, in, bytes * numOfRowsToCopy);
offset += bytes;
@ -4000,7 +4018,7 @@ static SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, in
return pFillCol;
}
int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *tsdb, int32_t vgId, bool isSTableQuery) {
int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
@ -4011,8 +4029,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts
pQuery->timeWindowInterpo = timeWindowInterpoRequired(pQuery);
pQuery->stabledev = isStabledev(pQuery);
pRuntimeEnv->prevResult = prevResult;
setScanLimitationByResultBuffer(pQuery);
int32_t code = setupQueryHandle(tsdb, pQInfo, isSTableQuery);
@ -4654,6 +4670,111 @@ static SSDataBlock* doOffset(void* param) {
}
}
bool doFilterData(SColumnInfoData* p, int32_t rid, SColumnFilterElem *filterElem, __filter_func_t fp) {
char* input = p->pData + p->info.bytes * rid;
bool isnull = isNull(input, p->info.type);
if (isnull) {
return (fp == isNullOperator) ? true : false;
} else {
if (fp == notNullOperator) {
return true;
} else if (fp == isNullOperator) {
return false;
}
}
if (fp(filterElem, input, input, p->info.type)) {
return true;
}
return false;
}
void doHavingImpl(SOperatorInfo *pOperator, SSDataBlock *pBlock) {
SHavingOperatorInfo* pInfo = pOperator->info;
int32_t f = 0;
int32_t allQualified = 1;
int32_t exprQualified = 0;
for (int32_t r = 0; r < pBlock->info.rows; ++r) {
allQualified = 1;
for (int32_t i = 0; i < pOperator->numOfOutput; ++i) {
SExprInfo* pExprInfo = &(pOperator->pExpr[i]);
if (pExprInfo->pFilter == NULL) {
continue;
}
SArray* es = taosArrayGetP(pInfo->fp, i);
assert(es);
size_t fpNum = taosArrayGetSize(es);
exprQualified = 0;
for (int32_t m = 0; m < fpNum; ++m) {
__filter_func_t fp = taosArrayGetP(es, m);
assert(fp);
//SColIndex* colIdx = &pExprInfo->base.colInfo;
SColumnInfoData* p = taosArrayGet(pBlock->pDataBlock, i);
SColumnFilterElem filterElem = {.filterInfo = pExprInfo->pFilter[m]};
if (doFilterData(p, r, &filterElem, fp)) {
exprQualified = 1;
break;
}
}
if (exprQualified == 0) {
allQualified = 0;
break;
}
}
if (allQualified == 0) {
continue;
}
for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
int16_t bytes = pColInfoData->info.bytes;
memmove(pColInfoData->pData + f * bytes, pColInfoData->pData + bytes * r, bytes);
}
++f;
}
pBlock->info.rows = f;
}
static SSDataBlock* doHaving(void* param) {
SOperatorInfo *pOperator = (SOperatorInfo *)param;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
while (1) {
SSDataBlock *pBlock = pOperator->upstream->exec(pOperator->upstream);
if (pBlock == NULL) {
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
return NULL;
}
doHavingImpl(pOperator, pBlock);
return pBlock;
}
}
static SSDataBlock* doIntervalAgg(void* param) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
@ -5004,6 +5125,13 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
pInfo->pRes = destroyOutputBuf(pInfo->pRes);
}
static void destroyHavingOperatorInfo(void* param, int32_t numOfOutput) {
SHavingOperatorInfo* pInfo = (SHavingOperatorInfo*) param;
if (pInfo->fp) {
taosArrayDestroy(pInfo->fp);
}
}
SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo));
@ -5060,6 +5188,83 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI
return pOperator;
}
int32_t initFilterFp(SExprInfo* pExpr, int32_t numOfOutput, SArray** fps) {
__filter_func_t fp = NULL;
*fps = taosArrayInit(numOfOutput, sizeof(SArray*));
if (*fps == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < numOfOutput; ++i) {
SExprInfo* pExprInfo = &(pExpr[i]);
SColIndex* colIdx = &pExprInfo->base.colInfo;
if (pExprInfo->pFilter == NULL || !TSDB_COL_IS_NORMAL_COL(colIdx->flag)) {
taosArrayPush(*fps, &fp);
continue;
}
int32_t filterNum = pExprInfo->base.filterNum;
SColumnFilterInfo *filterInfo = pExprInfo->pFilter;
SArray* es = taosArrayInit(filterNum, sizeof(__filter_func_t));
for (int32_t j = 0; j < filterNum; ++j) {
int32_t lower = filterInfo->lowerRelOptr;
int32_t upper = filterInfo->upperRelOptr;
if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) {
qError("invalid rel optr");
taosArrayDestroy(es);
return TSDB_CODE_QRY_APP_ERROR;
}
__filter_func_t ffp = getFilterOperator(lower, upper);
if (ffp == NULL) {
qError("invalid filter info");
taosArrayDestroy(es);
return TSDB_CODE_QRY_APP_ERROR;
}
taosArrayPush(es, &ffp);
filterInfo += 1;
}
taosArrayPush(*fps, &es);
}
return TSDB_CODE_SUCCESS;
}
SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SHavingOperatorInfo* pInfo = calloc(1, sizeof(SHavingOperatorInfo));
initFilterFp(pExpr, numOfOutput, &pInfo->fp);
assert(pInfo->fp);
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
pOperator->name = "HavingOperator";
pOperator->operatorType = OP_Having;
pOperator->blockingOptr = false;
pOperator->status = OP_IN_EXECUTING;
pOperator->numOfOutput = numOfOutput;
pOperator->pExpr = pExpr;
pOperator->upstream = upstream;
pOperator->exec = doHaving;
pOperator->info = pInfo;
pOperator->pRuntimeEnv = pRuntimeEnv;
pOperator->cleanup = destroyHavingOperatorInfo;
return pOperator;
}
SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream) {
SLimitOperatorInfo* pInfo = calloc(1, sizeof(SLimitOperatorInfo));
pInfo->limit = pRuntimeEnv->pQuery->limit.limit;
@ -5631,9 +5836,35 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
pExprMsg->functionId = htons(pExprMsg->functionId);
pExprMsg->numOfParams = htons(pExprMsg->numOfParams);
pExprMsg->resColId = htons(pExprMsg->resColId);
pExprMsg->filterNum = htonl(pExprMsg->filterNum);
pMsg += sizeof(SSqlFuncMsg);
SColumnFilterInfo* pExprFilterInfo = pExprMsg->filterInfo;
pMsg += sizeof(SColumnFilterInfo) * pExprMsg->filterNum;
for (int32_t f = 0; f < pExprMsg->filterNum; ++f) {
SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)pExprFilterInfo;
pFilterMsg->filterstr = htons(pFilterMsg->filterstr);
if (pFilterMsg->filterstr) {
pFilterMsg->len = htobe64(pFilterMsg->len);
pFilterMsg->pz = (int64_t)pMsg;
pMsg += (pFilterMsg->len + 1);
} else {
pFilterMsg->lowerBndi = htobe64(pFilterMsg->lowerBndi);
pFilterMsg->upperBndi = htobe64(pFilterMsg->upperBndi);
}
pFilterMsg->lowerRelOptr = htons(pFilterMsg->lowerRelOptr);
pFilterMsg->upperRelOptr = htons(pFilterMsg->upperRelOptr);
pExprFilterInfo++;
}
for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) {
pExprMsg->arg[j].argType = htons(pExprMsg->arg[j].argType);
pExprMsg->arg[j].argBytes = htons(pExprMsg->arg[j].argBytes);
@ -5818,6 +6049,42 @@ _cleanup:
return code;
}
int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) {
if (filterNum <= 0) {
return TSDB_CODE_SUCCESS;
}
*dst = calloc(filterNum, sizeof(*src));
if (*dst == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
memcpy(*dst, src, sizeof(*src) * filterNum);
for (int32_t i = 0; i < filterNum; i++) {
if ((*dst)[i].filterstr && dst[i]->len > 0) {
void *pz = calloc(1, (size_t)(*dst)[i].len + 1);
if (pz == NULL) {
if (i == 0) {
free(*dst);
} else {
freeColumnFilterInfo(*dst, i);
}
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
memcpy(pz, (void *)src->pz, (size_t)src->len + 1);
(*dst)[i].pz = (int64_t)pz;
}
}
return TSDB_CODE_SUCCESS;
}
static int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) {
qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg);
@ -5931,6 +6198,13 @@ int32_t createQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t numOfOutpu
type = s->type;
bytes = s->bytes;
}
if (pExprs[i].base.filterNum > 0) {
int32_t ret = cloneExprFilterInfo(&pExprs[i].pFilter, pExprMsg[i]->filterInfo, pExprMsg[i]->filterNum);
if (ret) {
return ret;
}
}
}
int32_t param = (int32_t)pExprs[i].base.arg[0].argValue.i64;
@ -6220,6 +6494,10 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr
if (TSDB_COL_IS_TAG(pExprs[col].base.colInfo.flag)) {
pQuery->tagLen += pExprs[col].bytes;
}
if (pExprs[col].pFilter) {
++pQuery->havingNum;
}
}
doUpdateExprColumnIndex(pQuery);
@ -6323,6 +6601,10 @@ _cleanup_qinfo:
tExprTreeDestroy(pExprInfo->pExpr, NULL);
pExprInfo->pExpr = NULL;
}
if (pExprInfo->pFilter) {
freeColumnFilterInfo(pExprInfo->pFilter, pExprInfo->base.filterNum);
}
}
tfree(pExprs);
@ -6368,6 +6650,8 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p
SArray* prevResult = NULL;
if (pQueryMsg->prevResultLen > 0) {
prevResult = interResFromBinary(param->prevResult, pQueryMsg->prevResultLen);
pRuntimeEnv->prevResult = prevResult;
}
pQuery->precision = tsdbGetCfg(tsdb)->precision;
@ -6389,7 +6673,7 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p
}
// filter the qualified
if ((code = doInitQInfo(pQInfo, pTsBuf, prevResult, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) {
if ((code = doInitQInfo(pQInfo, pTsBuf, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) {
goto _error;
}
@ -6407,7 +6691,7 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) {
}
for (int32_t i = 0; i < numOfFilters; i++) {
if (pFilter[i].filterstr) {
if (pFilter[i].filterstr && pFilter[i].pz) {
free((void*)(pFilter[i].pz));
}
}
@ -6449,6 +6733,10 @@ static void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr) {
if (pExprInfo[i].pExpr != NULL) {
tExprTreeDestroy(pExprInfo[i].pExpr, NULL);
}
if (pExprInfo[i].pFilter) {
freeColumnFilterInfo(pExprInfo[i].pFilter, pExprInfo[i].base.filterNum);
}
}
tfree(pExprInfo);

View File

@ -310,6 +310,77 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
return pExpr;
}
static FORCE_INLINE int32_t tStrTokenCompare(SStrToken* left, SStrToken* right) {
return (left->type == right->type && left->n == right->n && strncasecmp(left->z, right->z, left->n) == 0) ? 0 : 1;
}
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) {
if ((left == NULL && right) || (left && right == NULL)) {
return 1;
}
if (left->type != right->type) {
return 1;
}
if (left->tokenId != right->tokenId) {
return 1;
}
if (left->functionId != right->functionId) {
return 1;
}
if ((left->pLeft && right->pLeft == NULL)
|| (left->pLeft == NULL && right->pLeft)
|| (left->pRight && right->pRight == NULL)
|| (left->pRight == NULL && right->pRight)
|| (left->pParam && right->pParam == NULL)
|| (left->pParam == NULL && right->pParam)) {
return 1;
}
if (tVariantCompare(&left->value, &right->value)) {
return 1;
}
if (tStrTokenCompare(&left->colInfo, &right->colInfo)) {
return 1;
}
if (right->pParam && left->pParam) {
size_t size = taosArrayGetSize(right->pParam);
if (left->pParam && taosArrayGetSize(left->pParam) != size) {
return 1;
}
for (int32_t i = 0; i < size; i++) {
tSqlExprItem* pLeftElem = taosArrayGet(left->pParam, i);
tSqlExpr* pSubLeft = pLeftElem->pNode;
tSqlExprItem* pRightElem = taosArrayGet(left->pParam, i);
tSqlExpr* pSubRight = pRightElem->pNode;
if (tSqlExprCompare(pSubLeft, pSubRight)) {
return 1;
}
}
}
if (left->pLeft && tSqlExprCompare(left->pLeft, right->pLeft)) {
return 1;
}
if (left->pRight && tSqlExprCompare(left->pRight, right->pRight)) {
return 1;
}
return 0;
}
tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) {
tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr));
@ -640,7 +711,7 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval,
SSessionWindowVal *pSession, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit,
SLimitVal *psLimit) {
SLimitVal *psLimit, tSqlExpr *pHaving) {
assert(pSelectList != NULL);
SQuerySqlNode *pSqlNode = calloc(1, sizeof(SQuerySqlNode));
@ -655,6 +726,7 @@ SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SF
pSqlNode->pSortOrder = pSortOrder;
pSqlNode->pWhere = pWhere;
pSqlNode->fillType = pFill;
pSqlNode->pHaving = pHaving;
if (pLimit != NULL) {
pSqlNode->limit = *pLimit;
@ -717,6 +789,9 @@ void destroyQuerySqlNode(SQuerySqlNode *pQuerySql) {
tSqlExprDestroy(pQuerySql->pWhere);
pQuerySql->pWhere = NULL;
tSqlExprDestroy(pQuerySql->pHaving);
pQuerySql->pHaving = NULL;
taosArrayDestroyEx(pQuerySql->pSortOrder, freeVariant);
pQuerySql->pSortOrder = NULL;

View File

@ -560,7 +560,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenId) {
return 0;
}
SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr, uint32_t numOfIgnoreToken, uint32_t* ignoreTokenTypes) {
SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr) {
SStrToken t0 = {0};
// here we reach the end of sql string, null-terminated string
@ -585,7 +585,10 @@ SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr, uint32_t numOfIgn
}
t0.n = tSQLGetToken(&str[*i], &t0.type);
break;
// not support user specfied ignored symbol list
#if 0
bool ignore = false;
for (uint32_t k = 0; k < numOfIgnoreToken; k++) {
if (t0.type == ignoreTokenTypes[k]) {
@ -597,6 +600,7 @@ SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr, uint32_t numOfIgn
if (!ignore) {
break;
}
#endif
}
if (t0.type == TK_SEMI) {

View File

@ -140,7 +140,7 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16
SResultRowCellInfo *pResultInfo = &pResultRow->pCellInfo[i];
int16_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes;
char * s = getPosInResultPage(pRuntimeEnv->pQuery, page, pResultRow->offset, offset);
char * s = getPosInResultPage(pRuntimeEnv, page, pResultRow->offset, offset, size);
memset(s, 0, size);
offset += size;

View File

@ -475,7 +475,7 @@ void qCleanupQueryMgmt(void* pQMgmt) {
qDebug("vgId:%d, queryMgmt cleanup completed", vgId);
}
void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo) {
void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo) {
if (pMgmt == NULL) {
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
return NULL;
@ -516,8 +516,7 @@ void** qAcquireQInfo(void* pMgmt, uint64_t _key) {
return NULL;
}
TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key;
void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE));
void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &_key, sizeof(_key));
if (handle == NULL || *handle == NULL) {
terrno = TSDB_CODE_QRY_INVALID_QHANDLE;
return NULL;

File diff suppressed because it is too large Load Diff

View File

@ -10,7 +10,7 @@ namespace {
// simple test
void simpleTest() {
SDiskbasedResultBuf* pResultBuf = NULL;
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4096, NULL);
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4096, 1);
int32_t pageId = 0;
int32_t groupId = 0;
@ -52,7 +52,7 @@ void simpleTest() {
void writeDownTest() {
SDiskbasedResultBuf* pResultBuf = NULL;
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4*1024, NULL);
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4*1024, 1);
int32_t pageId = 0;
int32_t writePageId = 0;
@ -99,7 +99,7 @@ void writeDownTest() {
void recyclePageTest() {
SDiskbasedResultBuf* pResultBuf = NULL;
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4*1024, NULL);
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4*1024, 1);
int32_t pageId = 0;
int32_t writePageId = 0;

View File

@ -551,7 +551,10 @@ static void syncClosePeerConn(SSyncPeer *pPeer) {
if (pPeer->peerFd >= 0) {
pPeer->peerFd = -1;
void *pConn = pPeer->pConn;
if (pConn != NULL) syncFreeTcpConn(pPeer->pConn);
if (pConn != NULL) {
syncFreeTcpConn(pPeer->pConn);
pPeer->pConn = NULL;
}
}
}
@ -1372,7 +1375,7 @@ static void syncMonitorNodeRole(void *param, void *tmrId) {
if (/*pPeer->role > TAOS_SYNC_ROLE_UNSYNCED && */ nodeRole > TAOS_SYNC_ROLE_UNSYNCED) continue;
if (/*pPeer->sstatus > TAOS_SYNC_STATUS_INIT || */ nodeSStatus > TAOS_SYNC_STATUS_INIT) continue;
sDebug("%s, check roles since self:%s sstatus:%s, peer:%s sstatus:%s", pPeer->id, syncRole[pPeer->role],
sDebug("%s, check roles since peer:%s sstatus:%s, self:%s sstatus:%s", pPeer->id, syncRole[pPeer->role],
syncStatus[pPeer->sstatus], syncRole[nodeRole], syncStatus[nodeSStatus]);
syncSendPeersStatusMsgToPeer(pPeer, 1, SYNC_STATUS_CHECK_ROLE, syncGenTranId());
break;

View File

@ -526,7 +526,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) {
STable *pTable = pMeta->tables[i];
if (pTable && pTable->type == TSDB_STREAM_TABLE) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql,
tsdbGetTableSchemaImpl(pTable, false, false, -1));
tsdbGetTableSchemaImpl(pTable, false, false, -1), 0);
}
}
}
@ -619,4 +619,4 @@ int tsdbRestoreInfo(STsdbRepo *pRepo) {
tsdbDestroyReadH(&readh);
return 0;
}
}

View File

@ -840,7 +840,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
if (lock && tsdbUnlockRepoMeta(pRepo) < 0) return -1;
if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql,
tsdbGetTableSchemaImpl(pTable, false, false, -1));
tsdbGetTableSchemaImpl(pTable, false, false, -1), 1);
}
tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
@ -1322,4 +1322,4 @@ static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema) {
}
return 0;
}
}

View File

@ -51,11 +51,9 @@ uint32_t tSQLGetToken(char *z, uint32_t *tokenType);
* @param str
* @param i
* @param isPrevOptr
* @param numOfIgnoreToken
* @param ignoreTokenTypes
* @return
*/
SStrToken tStrGetToken(char *str, int32_t *i, bool isPrevOptr, uint32_t numOfIgnoreToken, uint32_t *ignoreTokenTypes);
SStrToken tStrGetToken(char *str, int32_t *i, bool isPrevOptr);
/**
* check if it is a keyword or not

View File

@ -239,7 +239,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
// current connect is broken
if (code == TSDB_CODE_SUCCESS) {
handle = qRegisterQInfo(pVnode->qMgmt, qId, (uint64_t)pQInfo);
handle = qRegisterQInfo(pVnode->qMgmt, qId, pQInfo);
if (handle == NULL) { // failed to register qhandle
pRsp->code = terrno;
terrno = 0;

View File

@ -19,6 +19,10 @@ Run lua sample:
lua test.lua
```
## Run performance test:
```
time lua benchmark.lua
```
## OpenResty Dependencies
- OpenResty:
```

View File

@ -0,0 +1,67 @@
local driver = require "luaconnector"
local config = {
password = "taosdata",
host = "127.0.0.1",
port = 6030,
database = "",
user = "root",
max_packet_size = 1024 * 1024
}
local conn
local res = driver.connect(config)
if res.code ~=0 then
print("connect--- failed: "..res.error)
return
else
conn = res.conn
print("connect--- pass.")
end
local res = driver.query(conn,"drop database if exists demo")
res = driver.query(conn,"create database demo")
if res.code ~=0 then
print("create db--- failed: "..res.error)
return
else
print("create db--- pass.")
end
res = driver.query(conn,"use demo")
if res.code ~=0 then
print("select db--- failed: "..res.error)
return
else
print("select db--- pass.")
end
res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))")
if res.code ~=0 then
print("create table---failed: "..res.error)
return
else
print("create table--- pass.")
end
local base = 1617330000000
local index =0
local count = 100000
local t
while( index < count )
do
t = base + index
local q=string.format([[insert into m1 values (%d,0,'robotspace')]],t)
res = driver.query(conn,q)
if res.code ~=0 then
print("insert records failed: "..res.error)
return
else
end
index = index+1
end
print(string.format([["Done. %d records has been stored."]],count))
driver.close(conn)

View File

@ -1,2 +1,2 @@
gcc lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos
gcc -std=c99 lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos

View File

@ -1,2 +1,2 @@
gcc lua_connector51.c -fPIC -shared -o luaconnector51.so -Wall -ltaos
gcc -std=c99 lua_connector51.c -fPIC -shared -o luaconnector51.so -Wall -ltaos

View File

@ -23,7 +23,7 @@ static int l_connect(lua_State *L){
luaL_checktype(L, 1, LUA_TTABLE);
lua_getfield(L,-1,"host");
lua_getfield(L,1,"host");
if (lua_isstring(L,-1)){
host = lua_tostring(L, -1);
// printf("host = %s\n", host);

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/bash
# This is the script for us to try to cause the TDengine server or client to crash
#

View File

@ -17247,3 +17247,88 @@
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:PyEval_EvalCode
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:PyInit__openssl
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}

View File

@ -22,6 +22,7 @@ python3 ./test.py -f insert/alterTableAndInsert.py
python3 ./test.py -f insert/insertIntoTwoTables.py
python3 ./test.py -f insert/before_1970.py
python3 bug2265.py
python3 ./test.py -f insert/bug3654.py
#table
python3 ./test.py -f table/alter_wal0.py
@ -178,7 +179,7 @@ python3 ./test.py -f stable/query_after_reset.py
# perfbenchmark
python3 ./test.py -f perfbenchmark/bug3433.py
python3 ./test.py -f perfbenchmark/bug3589.py
#python3 ./test.py -f perfbenchmark/bug3589.py
#query
@ -216,8 +217,8 @@ python3 ./test.py -f query/floatCompare.py
python3 ./test.py -f query/query1970YearsAf.py
python3 ./test.py -f query/bug3351.py
python3 ./test.py -f query/bug3375.py
python3 ./test.py -f query/queryJoin10tables.py
python3 ./test.py -f query/queryStddevWithGroupby.py
#stream
python3 ./test.py -f stream/metric_1.py

View File

@ -36,16 +36,11 @@ for defiMemError in `grep 'definitely lost:' crash_gen-definitely-lost-out.log |
do
defiMemError=(${defiMemError//,/})
if [ -n "$defiMemError" ]; then
if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then
if [ "$defiMemError" -gt 0 ]; then
cat valgrind.err
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
exit 8
elif [ "$defiMemError" -gt 1013 ];then #add for azure
cat valgrind.err
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
exit 8
fi
fi
done

View File

@ -21,7 +21,7 @@ rm -rf /var/lib/taos/*
nohup valgrind --leak-check=yes $TAOSD_DIR > $TDIR/$VALGRIND_OUT 2> $TDIR/$VALGRIND_ERR &
sleep 20
cd -
./crash_gen.sh -p -t 10 -s 200
./crash_gen.sh -p -t 10 -s 1000
ps -ef |grep valgrind|grep -v grep|awk '{print $2}'|xargs kill -term
while true
do
@ -53,16 +53,11 @@ for defiMemError in `grep 'definitely lost:' taosd-definitely-lost-out.log | awk
do
defiMemError=(${defiMemError//,/})
if [ -n "$defiMemError" ]; then
if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then
if [ "$defiMemError" -gt 0 ]; then
cat $VALGRIND_ERR
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
exit 8
elif [ "$defiMemError" -gt 1013 ];then #add for azure
cat $VALGRIND_ERR
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
exit 8
fi
fi
done

View File

@ -0,0 +1,53 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import taos
import sys
from util.log import *
from util.sql import *
from util.cases import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db keep 36500,36500,36500")
tdSql.execute("use db")
tdSql.execute("create stable if not exists stb1 (ts timestamp, c1 int) tags(t11 int)")
tdSql.execute("create table t10 using stb1 tags(1)")
gapdate = (datetime.datetime.now() - datetime.datetime(1970, 1, 1, 8, 0, 0)).days
tdLog.printNoPrefix("==========step2:insert data")
tdSql.execute(f"insert into t10 values (now-{gapdate}d, 1)")
tdSql.execute(f"insert into t10 values (now-{gapdate + 1}d, 2)")
tdLog.printNoPrefix("==========step3:query")
tdSql.query("select * from t10 where c1=1")
tdSql.checkRows(1)
tdSql.query("select * from t10 where c1=2")
tdSql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -11,13 +11,13 @@
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
from multiprocessing import Process
from multiprocessing import Process
import subprocess
class TDTestCase:
def init(self, conn, logSql):
@ -40,27 +40,22 @@ class TDTestCase:
print("alter table done")
def deleteTableAndRecreate(self):
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
self.config = tdDnodes.getSimCfgPath()
self.conn = taos.connect(host = self.host, user = self.user, password = self.password, config = self.config)
self.cursor = self.conn.cursor()
self.cursor.execute("use test")
print("drop table stb")
self.cursor.execute("drop table stb")
print("create table stb")
self.cursor.execute("create table if not exists stb (ts timestamp, col1 int) tags(areaid int, city nchar(20))")
print("insert data")
sqlCmds = "use test; drop table stb;"
sqlCmds += "create table if not exists stb (ts timestamp, col1 int) tags(areaid int, city nchar(20));"
for i in range(self.tables):
city = "beijing" if i % 2 == 0 else "shanghai"
self.cursor.execute("create table tb%d using stb tags(%d, '%s')" % (i, i, city))
for j in range(self.rows):
self.cursor.execute("insert into tb%d values(%d, %d)" % (i, self.ts + j, j * 100000))
sqlCmds += "create table tb%d using stb tags(%d, '%s');" % (i, i, city)
for j in range(5):
sqlCmds += "insert into tb%d values(%d, %d);" % (i, self.ts + j, j * 100000)
command = ["taos", "-c", self.config, "-s", sqlCmds]
print("drop stb, recreate stb and insert data ")
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
if result.returncode == 0:
print("success:", result)
else:
print("error:", result)
def run(self):
tdSql.prepare()
@ -100,19 +95,17 @@ class TDTestCase:
tdSql.query("select count(*) from stb")
tdSql.checkData(0, 0, 10000)
tdSql.query("select count(*) from tb1")
tdSql.query("select count(*) from tb0")
tdSql.checkData(0, 0, 1000)
p = Process(target=self.deleteTableAndRecreate, args=())
p.start()
p.join()
p.terminate()
# drop stable in subprocess
self.deleteTableAndRecreate()
tdSql.query("select count(*) from stb")
tdSql.checkData(0, 0, 10000)
tdSql.checkData(0, 0, 5 * self.tables)
tdSql.query("select count(*) from tb1")
tdSql.checkData(0, 0, 1000)
tdSql.query("select count(*) from tb0")
tdSql.checkData(0, 0, 5)
def stop(self):
tdSql.close()

View File

@ -0,0 +1,201 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import taos
import sys
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def createtable(self):
# create stbles
tdSql.execute("create table if not exists stb1 (ts timestamp, c1 int) tags(t11 int, t12 int)")
tdSql.execute("create table if not exists stb2 (ts timestamp, c2 int) tags(t21 int, t22 int)")
tdSql.execute("create table if not exists stb3 (ts timestamp, c3 int) tags(t31 int, t32 int)")
tdSql.execute("create table if not exists stb4 (ts timestamp, c4 int) tags(t41 int, t42 int)")
tdSql.execute("create table if not exists stb5 (ts timestamp, c5 int) tags(t51 int, t52 int)")
tdSql.execute("create table if not exists stb6 (ts timestamp, c6 int) tags(t61 int, t62 int)")
tdSql.execute("create table if not exists stb7 (ts timestamp, c7 int) tags(t71 int, t72 int)")
tdSql.execute("create table if not exists stb8 (ts timestamp, c8 int) tags(t81 int, t82 int)")
tdSql.execute("create table if not exists stb9 (ts timestamp, c9 int) tags(t91 int, t92 int)")
tdSql.execute("create table if not exists stb10 (ts timestamp, c10 int) tags(t101 int, t102 int)")
tdSql.execute("create table if not exists stb11 (ts timestamp, c11 int) tags(t111 int, t112 int)")
# create normal tables
tdSql.execute("create table t10 using stb1 tags(0, 9)")
tdSql.execute("create table t11 using stb1 tags(1, 8)")
tdSql.execute("create table t12 using stb1 tags(2, 7)")
tdSql.execute("create table t13 using stb1 tags(3, 6)")
tdSql.execute("create table t14 using stb1 tags(4, 5)")
tdSql.execute("create table t15 using stb1 tags(5, 4)")
tdSql.execute("create table t16 using stb1 tags(6, 3)")
tdSql.execute("create table t17 using stb1 tags(7, 2)")
tdSql.execute("create table t18 using stb1 tags(8, 1)")
tdSql.execute("create table t19 using stb1 tags(9, 0)")
tdSql.execute("create table t110 using stb1 tags(10, 10)")
tdSql.execute("create table t20 using stb2 tags(0, 9)")
tdSql.execute("create table t21 using stb2 tags(1, 8)")
tdSql.execute("create table t22 using stb2 tags(2, 7)")
tdSql.execute("create table t30 using stb3 tags(0, 9)")
tdSql.execute("create table t31 using stb3 tags(1, 8)")
tdSql.execute("create table t32 using stb3 tags(2, 7)")
def inserttable(self):
for i in range(100):
if i<60:
tdSql.execute(f"insert into t20 values('2020-10-01 00:00:{i}.000', {i})")
tdSql.execute(f"insert into t21 values('2020-10-01 00:00:{i}.000', {i})")
tdSql.execute(f"insert into t22 values('2020-10-01 00:00:{i}.000', {i})")
tdSql.execute(f"insert into t30 values('2020-10-01 00:00:{i}.000', {i})")
tdSql.execute(f"insert into t31 values('2020-10-01 00:00:{i}.000', {i})")
tdSql.execute(f"insert into t32 values('2020-10-01 00:00:{i}.000', {i})")
else:
tdSql.execute(f"insert into t20 values('2020-10-01 00:01:{i-60}.000', {i})")
tdSql.execute(f"insert into t21 values('2020-10-01 00:01:{i-60}.000', {i})")
tdSql.execute(f"insert into t22 values('2020-10-01 00:01:{i-60}.000', {i})")
tdSql.execute(f"insert into t30 values('2020-10-01 00:01:{i-60}.000', {i})")
tdSql.execute(f"insert into t31 values('2020-10-01 00:01:{i-60}.000', {i})")
tdSql.execute(f"insert into t32 values('2020-10-01 00:01:{i-60}.000', {i})")
for j in range(11):
if i<60:
tdSql.execute(f"insert into t1{j} values('2020-10-01 00:00:{i}.000', {i})")
else:
tdSql.execute(f"insert into t1{j} values('2020-10-01 00:01:{i-60}.000', {i})")
def queryjointable(self):
tdSql.error(
'''select from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19
where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts
and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts'''
)
tdSql.error("select * from t10 where t10.ts=t11.ts")
tdSql.error("select * from where t10.ts=t11.ts")
tdSql.error("select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19")
tdSql.error("select * from stb1, stb2, stb3 where stb1.ts=stb2.ts and stb1.ts=stb3.ts")
tdSql.error("select * from stb1, stb2, stb3 where stb1.t11=stb2.t21 and stb1.t11=stb3.t31")
tdSql.error("select * from stb1, stb2, stb3")
tdSql.error(
'''select * from stb1
join stb2 on stb1.ts=stb2.ts and stb1.t11=stb2.t21
join stb3 on stb1.ts=stb3.ts and stb1.t11=stb3.t31'''
)
tdSql.error("select * from t10 join t11 on t10.ts=t11.ts join t12 on t11.ts=t12.ts")
tdSql.query(
'''select * from stb1,stb2,stb3
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21 and stb1.t11 =stb3.t31'''
)
tdSql.checkRows(300)
tdSql.query("select * from t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts")
tdSql.checkRows(100)
tdSql.error("selec * from t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts")
tdSql.error("select * form t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts <> t12.ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts != t12.ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts or t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.c1=t12.c2 and t11.c1=t13.c3")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.c3 and t11.c1=t13.ts")
tdSql.error("select ts from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts and ts>100")
tdSql.error("select * from t11,t12,stb1 when t11.ts=t12.ts and t11.ts=stb1.ts")
tdSql.error("select t14.ts from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts1")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t14.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts")
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts and t11.c1=t13.c3")
tdSql.error(
'''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20
where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts
and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts and t10.ts=t20.ts'''
)
tdSql.error(
'''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20
where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts
and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts'''
)
tdSql.error(
'''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19
where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts
and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts and t10.c1=t19.c1'''
)
tdSql.error(
'''select * from stb1,stb2,stb3
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21'''
)
tdSql.error(
'''select * from stb1,stb2,stb3
where stb1.ts=stb2.ts and stb1.t11=stb2.t21 and stb1.t11=stb3.t31'''
)
tdSql.error(
'''select * from stb1,stb2,stb3
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21 and stb1.t11=stb3.t31
and stb1.t12=stb3=t32'''
)
tdSql.error(
'''select * from stb1,stb2,stb3,stb4,stb5,stb6,stb7,stb8,stb9,stb10,stb11
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.ts=stb4.ts and stb1.ts=stb5.ts and stb1.ts=stb6.ts
and stb1.ts=stb7.ts and stb1.ts=stb8.ts and stb1.ts=stb9.ts and stb1.ts=stb10.ts and stb1.ts=stb11.ts
and stb1.t11=stb2.t21 and stb1.t11=stb3.t31 and stb1.t11=stb4.t41 and stb1.t11=stb5.t51
and stb1.t11=stb6.t61 and stb1.t11=stb7.t71 and stb1.t11=stb8.t81 and stb1.t11=stb9.t91
and stb1.t11=stb10.t101 and stb1.t11=stb11.t111'''
)
tdSql.error(
'''select * from stb1,stb2,stb3,stb4,stb5,stb6,stb7,stb8,stb9,stb10
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.ts=stb4.ts and stb1.ts=stb5.ts and stb1.ts=stb6.ts
and stb1.ts=stb7.ts and stb1.ts=stb8.ts and stb1.ts=stb9.ts and stb1.ts=stb10.ts and stb1.t11=stb2.t21
and stb1.t11=stb3.t31 and stb1.t11=stb4.t41 and stb1.t11=stb5.t51 and stb1.t11=stb6.t61
and stb1.t11=stb7.t71 and stb1.t11=stb8.t81 and stb1.t11=stb9.t91 and stb1.t11=stb10.t101
and stb1.t12=stb11.t102'''
)
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
self.createtable()
tdLog.printNoPrefix("==========step2:insert data")
self.inserttable()
tdLog.printNoPrefix("==========step3:query timestamp type")
self.queryjointable()
# after wal and sync, check again
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
tdDnodes.stop(index)
tdDnodes.start(index)
tdLog.printNoPrefix("==========step4:query again after wal")
self.queryjointable()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,68 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def querysqls(self):
tdSql.query("select stddev(c1) from t10 group by c1")
tdSql.checkRows(6)
tdSql.checkData(0, 0, 0)
tdSql.checkData(1, 0, 0)
tdSql.checkData(2, 0, 0)
tdSql.checkData(3, 0, 0)
tdSql.checkData(4, 0, 0)
tdSql.checkData(5, 0, 0)
tdSql.query("select stddev(c2) from t10")
tdSql.checkData(0, 0, 0.5)
def run(self):
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 36500")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step1:create table && insert data")
tdSql.execute("create stable stb1 (ts timestamp , c1 int ,c2 float) tags(t1 int)")
tdSql.execute("create table t10 using stb1 tags(1)")
tdSql.execute("insert into t10 values ('1969-12-31 00:00:00.000', 2,1)")
tdSql.execute("insert into t10 values ('1970-01-01 00:00:00.000', 3,1)")
tdSql.execute("insert into t10 values (0, 4,1)")
tdSql.execute("insert into t10 values (now-18725d, 1,2)")
tdSql.execute("insert into t10 values ('2021-04-06 00:00:00.000', 5,2)")
tdSql.execute("insert into t10 values (now+1d,6,2)")
tdLog.printNoPrefix("==========step2:query and check")
self.querysqls()
tdLog.printNoPrefix("==========step3:after wal,check again")
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
tdDnodes.stop(index)
tdDnodes.start(index)
self.querysqls()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -204,7 +204,13 @@ if $data03 != NULL then
return -1
endi
sql reset query cache
print ============================>TD-3366 TD-3486
sql insert into td_3366(ts, c3, c1) using mt(t1) tags(911) values('2018-1-1 11:11:11', 'new1', 12);
sql insert into td_3486(ts, c3, c1) using mt(t1) tags(-12) values('2018-1-1 11:11:11', 'new1', 12);
sql insert into ttxu(ts, c3, c1) using mt(t1) tags('-121') values('2018-1-1 11:11:11', 'new1', 12);
sql insert into tb(ts, c1, c3) using mt(t1) tags(123) values('2018-11-01 16:29:58.000', 2, 'port')
sql insert into tb values ('2018-11-01 16:29:58.000', 2, 'import', 3)
sql import into tb values ('2018-11-01 16:29:58.000', 2, 'import', 3)
sql import into tb values ('2018-11-01 16:39:58.000', 2, 'import', 3)
@ -212,6 +218,7 @@ sql select * from tb order by ts desc
if $rows != 4 then
return -1
endi
if $data03 != 3 then
return -1
endi
@ -233,10 +240,10 @@ sql_error alter table mt add column c1 int
# drop non-existing columns
sql_error alter table mt drop column c9
sql drop database $db
sql show databases
if $rows != 0 then
return -1
endi
#sql drop database $db
#sql show databases
#if $rows != 0 then
# return -1
#endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -0,0 +1,6 @@
#!/bin/bash
Cur_Dir=$(pwd)
echo $Cur_Dir
echo "'2020-1-1 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql

View File

@ -220,6 +220,7 @@ sql_error select sum(c3), ts, c2 from group_tb0 where c1 < 20 group by c1;
sql_error select sum(c3), first(ts), c2 from group_tb0 where c1 < 20 group by c1;
sql_error select first(c3), ts, c1, c2 from group_tb0 where c1 < 20 group by c1;
sql_error select first(c3), last(c3), ts, c1 from group_tb0 where c1 < 20 group by c1;
sql_error select ts from group_tb0 group by c1;
#===========================interval=====not support======================
sql_error select count(*), c1 from group_tb0 where c1<20 interval(1y) group by c1;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -8,32 +8,28 @@ sql connect
sleep 500
sql drop database if exists indb
sql create database if not exists indb
sql use indb
$inFileName = '~/data.csv'
$numOfRows = 10000
#system sh/gendata.sh $inFileName $numOfRows # input file invalid
system sh/gendata.sh ~/data.csv $numOfRows
system general/parser/gendata.sh
sql create table tbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2))
print ====== create tables success, starting import data
sql import into tbx file $inFileName
sql import into tbx file '~/data.sql'
sql select count(*) from tbx
if $rows != 1 then
return -1
endi
if $data00 != $numOfRows then
print "expect: $numOfRows, act: $data00"
return -1
endi
#if $data00 != $numOfRows then
# print "expect: $numOfRows, act: $data00"
# return -1
#endi
#system rm -f $inFileName # invalid shell
system rm -f ~/data.csv
system rm -f ~/data.sql
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -159,6 +159,15 @@ if $data03 != @abc15@ then
return -1
endi
sql select top(c6, 3) from select_tags_mt0 interval(10a)
sql select top(c3,10) from select_tags_mt0 interval(10a) group by tbname
sql select top(c6, 3) from select_tags_mt0 interval(10a) group by tbname;
sql select top(c6, 10) from select_tags_mt0 interval(10a);
if $rows != 12800 then
return -1
endi
sql select top(c1, 100), tbname, t1, t2 from select_tags_mt0;
if $rows != 100 then
return -1

View File

@ -80,6 +80,7 @@ function runSimCaseOneByOne {
fi
done < $1
}
function runSimCaseOneByOnefq {
start=`sed -n "/$1-start/=" jenkins/basic.txt`
@ -155,6 +156,7 @@ function runPyCaseOneByOne {
fi
done < $1
}
function runPyCaseOneByOnefq() {
cd $tests_dir/pytest
if [[ $1 =~ full ]] ; then
@ -202,13 +204,37 @@ function runPyCaseOneByOnefq() {
rm -rf ../../sim/case.log
}
######################
# main entry
######################
unameOut="$(uname -s)"
case "${unameOut}" in
Linux*) OS=Linux;;
Darwin*) OS=Darwin;;
CYGWIN*) OS=Windows;;
*) OS=Unknown;;
esac
case "${OS}" in
Linux*) TAOSLIB=libtaos.so;;
Darwin*) TAOSLIB=libtaos.dylib;;
Windows*) TAOSLIB=taos.dll;;
Unknown) TAOSLIB="UNKNOWN:${unameOut}";;
esac
echo TAOSLIB is ${TAOSLIB}
totalFailed=0
totalPyFailed=0
totalJDBCFailed=0
totalUnitFailed=0
totalExampleFailed=0
corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern`
if [ "${OS}" == "Linux" ]; then
corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern`
fi
if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$2" != "example" ]; then
echo "### run TSIM test case ###"
cd $tests_dir/script
@ -290,11 +316,11 @@ if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ] && [ "$2" != "
fi
TOP_DIR=`pwd`
TAOSLIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1`
TAOSLIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1`
if [[ "$TAOSLIB_DIR" == *"$IN_TDINTERNAL"* ]]; then
LIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1|cut -d '/' --fields=2,3,4,5`
LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4,5`
else
LIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1|cut -d '/' --fields=2,3,4`
LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4`
fi
export LD_LIBRARY_PATH=$TOP_DIR/$LIB_DIR:$LD_LIBRARY_PATH
@ -499,7 +525,9 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$2" !=
echo -e "\n${RED} ### Total $totalExampleFailed examples failed! ### ${NC}"
fi
dohavecore 1
if [ "${OS}" == "Linux" ]; then
dohavecore 1
fi
fi

View File

@ -149,6 +149,7 @@ extern int32_t simScriptSucced;
extern int32_t simDebugFlag;
extern char tsScriptDir[];
extern bool simAsyncQuery;
extern bool abortExecution;
SScript *simParseScript(char *fileName);
SScript *simProcessCallOver(SScript *script);

View File

@ -645,8 +645,12 @@ bool simCreateRestFulConnect(SScript *script, char *user, char *pass) {
bool simCreateNativeConnect(SScript *script, char *user, char *pass) {
simCloseTaosdConnect(script);
void *taos = NULL;
taosMsleep(2000);
for (int32_t attempt = 0; attempt < 10; ++attempt) {
if (abortExecution) {
script->killed = true;
return false;
}
taos = taos_connect(NULL, user, pass, NULL, tsDnodeShellPort);
if (taos == NULL) {
simDebug("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL),
@ -697,6 +701,11 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
TAOS_RES *pSql = NULL;
for (int32_t attempt = 0; attempt < 10; ++attempt) {
if (abortExecution) {
script->killed = true;
return false;
}
simLogSql(rest, false);
pSql = taos_query(script->taos, rest);
ret = taos_errno(pSql);

View File

@ -21,10 +21,13 @@
bool simAsyncQuery = false;
bool simExecSuccess = false;
bool abortExecution = false;
void simHandleSignal(int32_t signo, void *sigInfo, void *context) {
simSystemCleanUp();
exit(1);
abortExecution = true;
// runningScript->killed = true;
// exit(1);
}
int32_t main(int32_t argc, char *argv[]) {
@ -60,6 +63,11 @@ int32_t main(int32_t argc, char *argv[]) {
return -1;
}
if (abortExecution) {
simError("execute abort");
return -1;
}
simScriptList[++simScriptPos] = script;
simExecuteScript(script);

View File

@ -159,9 +159,17 @@ void *simExecuteScript(void *inputScript) {
script = simScriptList[simScriptPos];
}
if (abortExecution) {
script->killed = true;
}
if (script->killed || script->linePos >= script->numOfLines) {
printf("killed ---------------------->\n");
script = simProcessCallOver(script);
if (script == NULL) break;
if (script == NULL) {
printf("abort now!\n");
break;
}
} else {
SCmdLine *line = &script->lines[script->linePos];
char * option = script->optionBuffer + line->optionOffset;