Merge branch 'develop' into fix/lhhuo/TD-4872-O3-core

This commit is contained in:
tickduan 2021-06-24 18:05:28 +08:00
commit f4931b4dd6
48 changed files with 1663 additions and 1348 deletions

View File

@ -39,7 +39,7 @@ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_C_FLAGS} ${DEBUG_FLAGS}
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_C_FLAGS} ${RELEASE_FLAGS}")
# Set c++ compiler options
SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11")
SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11 -Wno-unused-function")
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}")

View File

@ -42,7 +42,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台
* [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入
* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等
* [SQL函数](/taos-sql#functions)支持各种聚合函数、选择函数、计算函数如avg, min, diff等
* [时间维度聚合](/taos-sql#aggregation):将表中数据按照时间段进行切割后聚合,降维处理
* [窗口切分聚合](/taos-sql#aggregation):将表中数据按照时间段等方式进行切割后聚合,降维处理
* [边界限制](/taos-sql#limitation)库、表、SQL等边界限制条件
* [错误码](/taos-sql/error-code)TDengine 2.0 错误码以及对应的十进制码

View File

@ -476,9 +476,10 @@ Query OK, 1 row(s) in set (0.001091s)
SELECT select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
[INTERVAL (interval_val [, interval_offset])]
[SLIDING sliding_val]
[FILL fill_val]
[SESSION(ts_col, tol_val)]
[STATE_WINDOW(col)]
[INTERVAL(interval_val [, interval_offset]) [SLIDING sliding_val]]
[FILL(fill_mod_and_val)]
[GROUP BY col_list]
[ORDER BY col_list { DESC | ASC }]
[SLIMIT limit_val [SOFFSET offset_val]]
@ -1284,39 +1285,45 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
Query OK, 3 row(s) in set (0.001046s)
```
## <a class="anchor" id="aggregation"></a>时间维度聚合
## <a class="anchor" id="aggregation"></a>按窗口切分聚合
TDengine支持按时间段进行聚合,可以将表中数据按照时间段进行切割后聚合生成结果,比如温度传感器每秒采集一次数据,但需查询每隔10分钟的温度平均值。这个聚合适合于降维(down sample)操作, 语法如下:
TDengine 支持按时间段等窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这类聚合适合于降维down sample操作语法如下:
```mysql
SELECT function_list FROM tb_name
[WHERE where_condition]
INTERVAL (interval [, offset])
[SLIDING sliding]
[FILL ({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
[SESSION(ts_col, tol_val)]
[STATE_WINDOW(col)]
[INTERVAL(interval [, offset]) [SLIDING sliding]]
[FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
SELECT function_list FROM stb_name
[WHERE where_condition]
INTERVAL (interval [, offset])
[SLIDING sliding]
[FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]
[SESSION(ts_col, tol_val)]
[STATE_WINDOW(col)]
[INTERVAL(interval [, offset]) [SLIDING sliding]]
[FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
[GROUP BY tags]
```
- 聚合时间段的长度由关键词INTERVAL指定最短时间间隔10毫秒10a并且支持偏移偏移必须小于间隔。聚合查询中能够同时执行的聚合和选择函数仅限于单个输出的函数count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last不能使用具有多行输出结果的函数例如top、bottom、diff以及四则运算
- WHERE语句可以指定查询的起止时间和其他过滤条件。
- SLIDING语句用于指定聚合时间段的前向增量。
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种
1. 不进行填充NONE(默认填充模式)。
2. VALUE填充固定值填充此时需要指定填充的数值。例如FILL(VALUE, 1.23)。
3. NULL填充使用NULL填充数据。例如FILL(NULL)。
4. PREV填充使用前一个非NULL值填充数据。例如FILL(PREV)。
5. NEXT填充使用下一个非NULL值填充数据。例如FILL(NEXT)。
- 在聚合查询中function_list 位置允许使用聚合和选择函数并要求每个函数仅输出单个结果例如COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST而不能使用具有多行输出结果的函数例如TOP、BOTTOM、DIFF 以及四则运算)。
- 查询过滤、聚合等操作按照每个切分窗口为独立的单位执行。聚合查询目前支持三种窗口的划分方式:
1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒10a并且支持偏移 offset偏移必须小于间隔也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。
2. 状态窗口:使用整数(布尔值)或字符串来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STAT_WINDOW 语句的参数来指定。
3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val则自动开启下一个窗口。
- WHERE 语句可以指定查询的起止时间和其他过滤条件。
- FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:
1. 不进行填充NONE默认填充模式
2. VALUE 填充固定值填充此时需要指定填充的数值。例如FILL(VALUE, 1.23)。
3. PREV 填充:使用前一个非 NULL 值填充数据。例如FILL(PREV)。
4. NULL 填充:使用 NULL 填充数据。例如FILL(NULL)。
5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如FILL(LINEAR)。
6. NEXT 填充:使用下一个非 NULL 值填充数据。例如FILL(NEXT)。
说明:
1. 使用FILL语句的时候可能生成大量的填充输出务必指定查询的时间区间。针对每次查询系统可返回不超过1千万条具有插值的结果。
1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
3. 如果查询对象是超级表则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用GROUP BY语句则返回的结果按照时间序列严格单调递增如果查询中使用了GROUP BY语句分组则返回结果中每个GROUP内不按照时间序列严格单调递增。
3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 GROUP BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 GROUP BY 语句分组,则返回结果中每个 GROUP 内不按照时间序列严格单调递增。
时间聚合也常被用于连续查询场景,可以参考文档 [连续查询(Continuous Query)](https://www.taosdata.com/cn/documentation/advanced-features#continuous-query)。
@ -1326,7 +1333,7 @@ SELECT function_list FROM stb_name
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
```
针对智能电表采集的数据以10分钟为一个阶段计算过去24小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值用前一个非NULL值填充。使用的查询语句如下
针对智能电表采集的数据,以 10 分钟为一个阶段,计算过去 24 小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非 NULL 值填充。使用的查询语句如下:
```mysql
SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters

View File

@ -210,7 +210,8 @@ int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaI
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta);
SColumn* tscColumnClone(const SColumn* src);
bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid);
void tscColumnCopy(SColumn* pDest, const SColumn* pSrc);
int32_t tscColumnExists(SArray* pColumnList, int32_t columnId, uint64_t uid);
SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t uid, SSchema* pSchema);
void tscColumnListDestroy(SArray* pColList);
void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid);

View File

@ -1156,27 +1156,6 @@ static void insertBatchClean(STscStmt* pStmt) {
tfree(pCmd->insertParam.pTableNameList);
/*
STableDataBlocks** p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, NULL);
STableDataBlocks* pOneTableBlock = *p;
while (1) {
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
pOneTableBlock->size = sizeof(SSubmitBlk);
pBlocks->numOfRows = 0;
p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, p);
if (p == NULL) {
break;
}
pOneTableBlock = *p;
}
*/
pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
pCmd->insertParam.numOfTables = 0;
@ -1499,7 +1478,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pRes->numOfRows = 1;
strtolower(pSql->sqlstr, sql);
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
if (tscIsInsertData(pSql->sqlstr)) {
pStmt->isInsert = true;
@ -1604,7 +1583,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) {
SHashObj* hashList = pCmd->insertParam.pTableBlockHashList;
pCmd->insertParam.pTableBlockHashList = NULL;
tscResetSqlCmd(pCmd, true);
tscResetSqlCmd(pCmd, false);
pCmd->insertParam.pTableBlockHashList = hashList;
}
@ -1663,7 +1642,7 @@ int taos_stmt_close(TAOS_STMT* stmt) {
} else {
if (pStmt->multiTbInsert) {
taosHashCleanup(pStmt->mtb.pTableHash);
pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, true);
pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, false);
taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList);
pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL;
taosArrayDestroy(pStmt->mtb.tags);

View File

@ -65,7 +65,6 @@ static char* getAccountId(SSqlObj* pSql);
static int convertTimestampStrToInt64(tVariant *pVar, int32_t precision);
static bool serializeExprListToVariant(SArray* pList, tVariant **dest, int16_t colType, uint8_t precision);
static int32_t validateParamOfRelationIn(tVariant *pVar, int32_t colType);
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
static char* cloneCurrentDBName(SSqlObj* pSql);
@ -156,78 +155,76 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType,
return ret;
}
tSqlExprItem* item = (tSqlExprItem *)taosArrayGet(pList, 0);
int32_t firstTokenType = item->pNode->token.type;
int32_t type = firstTokenType;
tSqlExpr* item = ((tSqlExprItem*)(taosArrayGet(pList, 0)))->pNode;
int32_t firstVarType = item->value.nType;
//nchar to binary and other xxint to bigint
toTSDBType(type);
if (colType != TSDB_DATA_TYPE_TIMESTAMP && !IS_UNSIGNED_NUMERIC_TYPE(colType)) {
if (type != colType && (type != TSDB_DATA_TYPE_BINARY || colType != TSDB_DATA_TYPE_NCHAR)) {
return false;
}
}
type = colType;
SBufferWriter bw = tbufInitWriter( NULL, false);
tbufEnsureCapacity(&bw, 512);
if (colType == TSDB_DATA_TYPE_TIMESTAMP) {
tbufWriteUint32(&bw, TSDB_DATA_TYPE_BIGINT);
} else {
tbufWriteUint32(&bw, colType);
}
tbufWriteInt32(&bw, (int32_t)(pList->size));
int32_t size = (int32_t)(pList->size);
tbufWriteUint32(&bw, type);
tbufWriteInt32(&bw, size);
for (int32_t i = 0; i < size; i++) {
for (int32_t i = 0; i < (int32_t)pList->size; i++) {
tSqlExpr* pSub = ((tSqlExprItem*)(taosArrayGet(pList, i)))->pNode;
tVariant* var = &pSub->value;
// check all the token type in expr list same or not
if (firstTokenType != pSub->token.type) {
if (firstVarType != var->nType) {
break;
}
toTSDBType(pSub->token.type);
tVariant var;
tVariantCreate(&var, &pSub->token);
if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type)) {
tbufWriteInt64(&bw, var.i64);
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
// ugly code, refactor later
if (IS_UNSIGNED_NUMERIC_TYPE(pSub->token.type) || IS_SIGNED_NUMERIC_TYPE(pSub->token.type)) {
tbufWriteUint64(&bw, var.i64);
if ((colType == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(colType))) {
if (var->nType != TSDB_DATA_TYPE_BOOL && !IS_SIGNED_NUMERIC_TYPE(var->nType)) {
break;
}
tbufWriteInt64(&bw, var->i64);
} else if (IS_UNSIGNED_NUMERIC_TYPE(colType)) {
if (IS_SIGNED_NUMERIC_TYPE(var->nType) && IS_UNSIGNED_NUMERIC_TYPE(var->nType)) {
break;
}
tbufWriteUint64(&bw, var->u64);
} else if (colType == TSDB_DATA_TYPE_DOUBLE || colType == TSDB_DATA_TYPE_FLOAT) {
if (IS_SIGNED_NUMERIC_TYPE(var->nType) || IS_UNSIGNED_NUMERIC_TYPE(var->nType)) {
tbufWriteDouble(&bw, (double)(var->i64));
} else if (var->nType == TSDB_DATA_TYPE_DOUBLE || var->nType == TSDB_DATA_TYPE_FLOAT){
tbufWriteDouble(&bw, var->dKey);
} else {
tVariantDestroy(&var);
break;
}
}
else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
tbufWriteDouble(&bw, var.dKey);
} else if (type == TSDB_DATA_TYPE_BINARY){
tbufWriteBinary(&bw, var.pz, var.nLen);
} else if (type == TSDB_DATA_TYPE_NCHAR) {
char *buf = (char *)calloc(1, (var.nLen + 1)*TSDB_NCHAR_SIZE);
if (tVariantDump(&var, buf, type, false) != TSDB_CODE_SUCCESS) {
} else if (colType == TSDB_DATA_TYPE_BINARY) {
if (var->nType != TSDB_DATA_TYPE_BINARY) {
break;
}
tbufWriteBinary(&bw, var->pz, var->nLen);
} else if (colType == TSDB_DATA_TYPE_NCHAR) {
if (var->nType != TSDB_DATA_TYPE_BINARY) {
break;
}
char *buf = (char *)calloc(1, (var->nLen + 1)*TSDB_NCHAR_SIZE);
if (tVariantDump(var, buf, colType, false) != TSDB_CODE_SUCCESS) {
free(buf);
tVariantDestroy(&var);
break;
}
tbufWriteBinary(&bw, buf, twcslen((wchar_t *)buf) * TSDB_NCHAR_SIZE);
free(buf);
} else if (type == TSDB_DATA_TYPE_TIMESTAMP) {
if (var.nType == TSDB_DATA_TYPE_BINARY) {
if (convertTimestampStrToInt64(&var, precision) < 0) {
tVariantDestroy(&var);
} else if (colType == TSDB_DATA_TYPE_TIMESTAMP) {
if (var->nType == TSDB_DATA_TYPE_BINARY) {
if (convertTimestampStrToInt64(var, precision) < 0) {
break;
}
tbufWriteInt64(&bw, var.i64);
} else if (var.nType == TSDB_DATA_TYPE_BIGINT) {
tbufWriteInt64(&bw, var.i64);
tbufWriteInt64(&bw, var->i64);
} else if (var->nType == TSDB_DATA_TYPE_BIGINT) {
tbufWriteInt64(&bw, var->i64);
} else {
break;
}
} else {
break;
}
tVariantDestroy(&var);
if (i == size - 1) { ret = true;}
}
if (i == (int32_t)(pList->size - 1)) { ret = true;}
}
if (ret == true) {
if ((*dst = calloc(1, sizeof(tVariant))) != NULL) {
tVariantCreateFromBinary(*dst, tbufGetData(&bw, false), tbufTell(&bw), TSDB_DATA_TYPE_BINARY);
@ -239,13 +236,6 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType,
return ret;
}
static int32_t validateParamOfRelationIn(tVariant *pVar, int32_t colType) {
if (pVar->nType != TSDB_DATA_TYPE_BINARY) {
return -1;
}
SBufferReader br = tbufInitReader(pVar->pz, pVar->nLen, false);
return colType == TSDB_DATA_TYPE_NCHAR ? 0 : (tbufReadUint32(&br) == colType ? 0: -1);
}
static uint8_t convertOptr(SStrToken *pToken) {
switch (pToken->type) {
@ -1699,7 +1689,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32
// arithmetic expression always return result in the format of double float
pExprInfo->base.resBytes = sizeof(double);
pExprInfo->base.interBytes = sizeof(double);
pExprInfo->base.interBytes = 0;
pExprInfo->base.resType = TSDB_DATA_TYPE_DOUBLE;
pExprInfo->base.functionId = TSDB_FUNC_ARITHM;
@ -1934,14 +1924,14 @@ SExprInfo* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tab
index.columnIndex = colIndex;
}
return tscExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, pSchema->bytes,
return tscExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, 0,
(functionId == TSDB_FUNC_TAGPRJ));
}
SExprInfo* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag, int16_t colId) {
SExprInfo* pExpr = tscExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type,
pColSchema->bytes, colId, pColSchema->bytes, TSDB_COL_IS_TAG(flag));
pColSchema->bytes, colId, 0, TSDB_COL_IS_TAG(flag));
tstrncpy(pExpr->base.aliasName, pColSchema->name, sizeof(pExpr->base.aliasName));
tstrncpy(pExpr->base.token, pColSchema->name, sizeof(pExpr->base.token));
@ -2096,7 +2086,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
type = pSchema->type;
bytes = pSchema->bytes;
}
SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, getNewResColId(pCmd), bytes, false);
tstrncpy(pExpr->base.aliasName, name, tListLen(pExpr->base.aliasName));
@ -2168,6 +2158,17 @@ static void updateLastScanOrderIfNeeded(SQueryInfo* pQueryInfo) {
}
}
static UNUSED_FUNC void updateFunctionInterBuf(SQueryInfo* pQueryInfo, bool superTable) {
size_t numOfExpr = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExpr; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
int32_t param = (int32_t)pExpr->base.param[0].i64;
getResultDataInfo(pExpr->base.colType, pExpr->base.colBytes, pExpr->base.functionId, param, &pExpr->base.resType, &pExpr->base.resBytes,
&pExpr->base.interBytes, 0, superTable);
}
}
int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult) {
STableMetaInfo* pTableMetaInfo = NULL;
int32_t functionId = pItem->pNode->functionId;
@ -2277,10 +2278,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
case TSDB_FUNC_LEASTSQR: {
// 1. valid the number of parameters
int32_t numOfParams = (pItem->pNode->pParam == NULL)? 0: (int32_t) taosArrayGetSize(pItem->pNode->pParam);
// no parameters or more than one parameter for function
if (pItem->pNode->pParam == NULL ||
(functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && numOfParams != 1) ||
((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3)) {
/* no parameters or more than one parameter for function */
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@ -2294,14 +2296,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
// functions can not be applied to tags
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta))) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
// 2. check if sql function can be applied on this column data type
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
if (!IS_NUMERIC_TYPE(pSchema->type)) {
@ -2330,11 +2333,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr);
}
// functions can not be applied to tags
if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false);
if (functionId == TSDB_FUNC_LEASTSQR) { // set the leastsquares parameters
@ -2363,9 +2361,9 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
if (info.precision == TSDB_TIME_PRECISION_MILLI) {
tickPerSec /= 1000000;
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO);
} else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
tickPerSec /= 1000;
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI);
}
if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) {
@ -2598,7 +2596,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// set the first column ts for top/bottom query
SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pCmd),
TSDB_KEYSIZE, false);
0, false);
tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->base.aliasName));
const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX;
@ -3113,15 +3111,10 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
return true;
}
if (pQueryInfo->groupbyExpr.numOfGroupCols != 1) {
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return true;
} else {
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return true;
}
}
} else if (tscIsSessionWindowQuery(pQueryInfo)) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
@ -3371,11 +3364,6 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
if (pRight->tokenId != TK_SET || !serializeExprListToVariant(pRight->pParam, &pVal, colType, timePrecision)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
if (validateParamOfRelationIn(pVal, colType) != TSDB_CODE_SUCCESS) {
tVariantDestroy(pVal);
free(pVal);
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
pColumnFilter->pz = (int64_t)calloc(1, pVal->nLen + 1);
pColumnFilter->len = pVal->nLen;
pColumnFilter->filterstr = 1;
@ -3675,7 +3663,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
if (!tscColumnExists(pTableMetaInfo->tagColList, index.columnIndex, pTableMetaInfo->pTableMeta->id.uid)) {
if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema1->colId, pTableMetaInfo->pTableMeta->id.uid) < 0) {
tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema1);
if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
@ -3707,7 +3695,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMeta);
if (!tscColumnExists(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid)) {
if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema2->colId, pTableMeta->id.uid) < 0) {
tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema2);
if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
@ -4831,6 +4819,12 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq
if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) {
goto PARSE_WHERE_EXIT;
}
if (taosArrayGetSize(pQueryInfo->pUpstream) > 0 ) {
if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pTimewindow, TK_AND)) != TSDB_CODE_SUCCESS) {
goto PARSE_WHERE_EXIT;
}
}
// 6. join condition
if ((ret = getJoinCondInfo(&pSql->cmd, pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) {
@ -7869,11 +7863,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (pTableMeta->tableInfo.precision == TSDB_TIME_PRECISION_MILLI) {
pQueryInfo->window.skey = pQueryInfo->window.skey / 1000;
pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
}
}
// validate the interval info
@ -7905,6 +7894,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
return code;
}
// updateFunctionInterBuf(pQueryInfo, false);
updateLastScanOrderIfNeeded(pQueryInfo);
} else {
pQueryInfo->command = TSDB_SQL_SELECT;
@ -8033,6 +8023,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
updateLastScanOrderIfNeeded(pQueryInfo);
tscFieldInfoUpdateOffset(pQueryInfo);
// updateFunctionInterBuf(pQueryInfo, isSTable);
if ((code = validateFillNode(pCmd, pQueryInfo, pSqlNode)) != TSDB_CODE_SUCCESS) {
return code;
@ -8172,7 +8163,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
int32_t colType = -1;
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
if (pCols != NULL && taosArrayGetSize(pCols) > 0) {
SColIndex* idx = taosArrayGet(pCols, 0);
SColIndex* idx = taosArrayGet(pCols, taosArrayGetSize(pCols) - 1);
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex);
if (pSchema != NULL) {
colType = pSchema->type;

View File

@ -795,6 +795,7 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo,
pSqlExpr->colBytes = htons(pExpr->colBytes);
pSqlExpr->resType = htons(pExpr->resType);
pSqlExpr->resBytes = htons(pExpr->resBytes);
pSqlExpr->interBytes = htonl(pExpr->interBytes);
pSqlExpr->functionId = htons(pExpr->functionId);
pSqlExpr->numOfParams = htons(pExpr->numOfParams);
pSqlExpr->resColId = htons(pExpr->resColId);
@ -1495,7 +1496,9 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg = (char *)pSchema;
pAlterTableMsg->tagValLen = htonl(pAlterInfo->tagData.dataLen);
memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
if (pAlterInfo->tagData.dataLen > 0) {
memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
}
pMsg += pAlterInfo->tagData.dataLen;
msgLen = (int32_t)(pMsg - (char*)pAlterTableMsg);

View File

@ -512,6 +512,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
pSub->pSql = pSql;
pSql->pSubscription = pSub;
pSub->lastSyncTime = 0;
// no table list now, force to update it
tscDebug("begin table synchronization");

View File

@ -103,13 +103,6 @@ bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
pthread_mutex_lock(&subState->mutex);
// bool done = allSubqueryDone(pParentSql);
// if (done) {
// tscDebug("0x%"PRIx64" subquery:0x%"PRIx64",%d all subs already done", pParentSql->self, pSql->self, idx);
// pthread_mutex_unlock(&subState->mutex);
// return false;
// }
tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index:%d state set to 1", pParentSql->self, pSql->self, idx);
subState->states[idx] = 1;
@ -2389,8 +2382,14 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SColumn *pCol = taosArrayGetP(pColList, i);
if (pCol->info.flist.numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered.
SColumn *p = tscColumnClone(pCol);
taosArrayPush(pNewQueryInfo->colList, &p);
int32_t index1 = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid);
if (index1 >= 0) {
SColumn* x = taosArrayGetP(pNewQueryInfo->colList, index1);
tscColumnCopy(x, pCol);
} else {
SColumn *p = tscColumnClone(pCol);
taosArrayPush(pNewQueryInfo->colList, &p);
}
}
}

View File

@ -1332,7 +1332,7 @@ void tscFreeSubobj(SSqlObj* pSql) {
tscDebug("0x%"PRIx64" start to free sub SqlObj, numOfSub:%d", pSql->self, pSql->subState.numOfSub);
for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
tscDebug("0x%"PRIx64" free sub SqlObj:%p, index:%d", pSql->self, pSql->pSubs[i], i);
tscDebug("0x%"PRIx64" free sub SqlObj:0x%"PRIx64", index:%d", pSql->self, pSql->pSubs[i]->self, i);
taos_free_result(pSql->pSubs[i]);
pSql->pSubs[i] = NULL;
}
@ -1784,7 +1784,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
tscSortRemoveDataBlockDupRows(pOneTableBlock);
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName),
tscDebug("0x%"PRIx64" name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName),
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
@ -2270,18 +2270,14 @@ int32_t tscExprCopyAll(SArray* dst, const SArray* src, bool deepcopy) {
return 0;
}
bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid) {
// ignore the tbname columnIndex to be inserted into source list
if (columnIndex < 0) {
return false;
}
// ignore the tbname columnIndex to be inserted into source list
int32_t tscColumnExists(SArray* pColumnList, int32_t columnId, uint64_t uid) {
size_t numOfCols = taosArrayGetSize(pColumnList);
int32_t i = 0;
while (i < numOfCols) {
SColumn* pCol = taosArrayGetP(pColumnList, i);
if ((pCol->columnIndex != columnIndex) || (pCol->tableUid != uid)) {
if ((pCol->info.colId != columnId) || (pCol->tableUid != uid)) {
++i;
continue;
} else {
@ -2290,10 +2286,10 @@ bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid) {
}
if (i >= numOfCols || numOfCols == 0) {
return false;
return -1;
}
return true;
return i;
}
void tscExprAssign(SExprInfo* dst, const SExprInfo* src) {
@ -2379,13 +2375,7 @@ SColumn* tscColumnClone(const SColumn* src) {
return NULL;
}
dst->columnIndex = src->columnIndex;
dst->tableUid = src->tableUid;
dst->info.flist.numOfFilters = src->info.flist.numOfFilters;
dst->info.flist.filterInfo = tFilterInfoDup(src->info.flist.filterInfo, src->info.flist.numOfFilters);
dst->info.type = src->info.type;
dst->info.colId = src->info.colId;
dst->info.bytes = src->info.bytes;
tscColumnCopy(dst, src);
return dst;
}
@ -2394,6 +2384,18 @@ static void tscColumnDestroy(SColumn* pCol) {
free(pCol);
}
void tscColumnCopy(SColumn* pDest, const SColumn* pSrc) {
destroyFilterInfo(&pDest->info.flist);
pDest->columnIndex = pSrc->columnIndex;
pDest->tableUid = pSrc->tableUid;
pDest->info.flist.numOfFilters = pSrc->info.flist.numOfFilters;
pDest->info.flist.filterInfo = tFilterInfoDup(pSrc->info.flist.filterInfo, pSrc->info.flist.numOfFilters);
pDest->info.type = pSrc->info.type;
pDest->info.colId = pSrc->info.colId;
pDest->info.bytes = pSrc->info.bytes;
}
void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid) {
assert(src != NULL && dst != NULL);

View File

@ -98,7 +98,7 @@ TEST(testCase, parse_time) {
taosParseTime(t41, &time, strlen(t41), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999);
// int64_t k = timezone;
// int64_t k = timezone;
char t42[] = "1997-1-1T0:0:0.999999999Z";
taosParseTime(t42, &time, strlen(t42), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999 - timezone * MILLISECOND_PER_SECOND);

View File

@ -303,6 +303,8 @@ static int32_t dnodeInitStorage() {
dnodeCheckDataDirOpenned(tsDnodeDir);
taosGetDisk();
taosPrintDiskInfo();
dInfo("dnode storage is initialized at %s", tsDnodeDir);
return 0;
}

View File

@ -100,7 +100,7 @@ extern const int32_t TYPE_BYTES[15];
#define TSDB_TIME_PRECISION_MICRO_STR "us"
#define TSDB_TIME_PRECISION_NANO_STR "ns"
#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L))
#define TSDB_TICK_PER_SECOND(precision) ((int64_t)((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L)))
#define T_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#define T_APPEND_MEMBER(dst, ptr, type, member) \

View File

@ -90,7 +90,7 @@ enum TEST_MODE {
#define OPT_ABORT 1 /* abort */
#define STRING_LEN 60000
#define MAX_PREPARED_RAND 1000000
#define MAX_FILE_NAME_LEN 256
#define MAX_FILE_NAME_LEN 128
#define MAX_SAMPLES_ONCE_FROM_FILE 10000
#define MAX_NUM_DATATYPE 10

View File

@ -101,6 +101,8 @@ static int32_t mnodeDnodeActionInsert(SSdbRow *pRow) {
pDnode->offlineReason = TAOS_DN_OFF_STATUS_NOT_RECEIVED;
}
pDnode->customScore = 0;
dnodeUpdateEp(pDnode->dnodeId, pDnode->dnodeEp, pDnode->dnodeFqdn, &pDnode->dnodePort);
mnodeUpdateDnodeEps();
@ -1296,4 +1298,4 @@ int32_t mnodeCompactDnodes() {
mInfo("end to compact dnodes table...");
return 0;
}
}

View File

@ -1068,7 +1068,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
pStable->info.tableId = strdup(pCreate->tableName);
pStable->info.type = TSDB_SUPER_TABLE;
pStable->createdTime = taosGetTimestampMs();
pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
int64_t x = (us&0x000000FFFFFFFFFF);
x = x<<24;
pStable->uid = x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
pStable->sversion = 0;
pStable->tversion = 0;
pStable->numOfColumns = numOfColumns;
@ -1740,16 +1742,22 @@ static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
static int32_t calculateVgroupMsgLength(SSTableVgroupMsg* pInfo, int32_t numOfTable) {
static int32_t doGetVgroupInfoLength(char* name) {
SSTableObj *pTable = mnodeGetSuperTable(name);
int32_t len = 0;
if (pTable != NULL && pTable->vgHash != NULL) {
len = (taosHashGetSize(pTable->vgHash) * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg));
}
mnodeDecTableRef(pTable);
return len;
}
static int32_t getVgroupInfoLength(SSTableVgroupMsg* pInfo, int32_t numOfTable) {
int32_t contLen = sizeof(SSTableVgroupRspMsg) + 32 * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg);
for (int32_t i = 0; i < numOfTable; ++i) {
char *stableName = (char *)pInfo + sizeof(SSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN)*i;
SSTableObj *pTable = mnodeGetSuperTable(stableName);
if (pTable != NULL && pTable->vgHash != NULL) {
contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg));
}
mnodeDecTableRef(pTable);
contLen += doGetVgroupInfoLength(stableName);
}
return contLen;
@ -1820,7 +1828,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
int32_t numOfTable = htonl(pInfo->numOfTables);
// calculate the required space.
int32_t contLen = calculateVgroupMsgLength(pInfo, numOfTable);
int32_t contLen = getVgroupInfoLength(pInfo, numOfTable);
SSTableVgroupRspMsg *pRsp = rpcMallocCont(contLen);
if (pRsp == NULL) {
return TSDB_CODE_MND_OUT_OF_MEMORY;
@ -2860,6 +2868,27 @@ static void mnodeProcessAlterTableRsp(SRpcMsg *rpcMsg) {
}
}
static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray* pList, int32_t* totalMallocLen, int32_t numOfVgroupList) {
int32_t len = 0;
for (int32_t i = 0; i < numOfVgroupList; ++i) {
char *name = taosArrayGetP(pList, i);
len += doGetVgroupInfoLength(name);
}
if (len + pMultiMeta->contLen > (*totalMallocLen)) {
while (len + pMultiMeta->contLen > (*totalMallocLen)) {
(*totalMallocLen) *= 2;
}
pMultiMeta = rpcReallocCont(pMultiMeta, *totalMallocLen);
if (pMultiMeta == NULL) {
return NULL;
}
}
return pMultiMeta;
}
static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
SMultiTableInfoMsg *pInfo = pMsg->rpcMsg.pCont;
@ -2950,8 +2979,6 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
}
}
char* msg = (char*) pMultiMeta + pMultiMeta->contLen;
// add the additional super table names that needs the vgroup info
for(;t < num; ++t) {
taosArrayPush(pList, &nameList[t]);
@ -2961,6 +2988,13 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
int32_t numOfVgroupList = (int32_t) taosArrayGetSize(pList);
pMultiMeta->numOfVgroup = htonl(numOfVgroupList);
pMultiMeta = ensureMsgBufferSpace(pMultiMeta, pList, &totalMallocLen, numOfVgroupList);
if (pMultiMeta == NULL) {
code = TSDB_CODE_MND_OUT_OF_MEMORY;
goto _end;
}
char* msg = (char*) pMultiMeta + pMultiMeta->contLen;
for(int32_t i = 0; i < numOfVgroupList; ++i) {
char* name = taosArrayGetP(pList, i);

View File

@ -27,18 +27,20 @@ typedef struct {
} SysDiskSize;
int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize);
void taosGetSystemInfo();
bool taosGetProcIO(float *readKB, float *writeKB);
bool taosGetBandSpeed(float *bandSpeedKb);
void taosGetDisk();
bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage);
bool taosGetProcMemory(float *memoryUsedMB);
bool taosGetSysMemory(float *memoryUsedMB);
void taosPrintOsInfo();
int taosSystem(const char *cmd);
void taosKillSystem();
bool taosGetSystemUid(char *uid);
char * taosGetCmdlineByPID(int pid);
void taosGetSystemInfo();
bool taosGetProcIO(float *readKB, float *writeKB);
bool taosGetBandSpeed(float *bandSpeedKb);
void taosGetDisk();
bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) ;
bool taosGetProcMemory(float *memoryUsedMB) ;
bool taosGetSysMemory(float *memoryUsedMB);
void taosPrintOsInfo();
void taosPrintDiskInfo();
int taosSystem(const char * cmd) ;
void taosKillSystem();
bool taosGetSystemUid(char *uid);
char *taosGetCmdlineByPID(int pid);
void taosSetCoreDump();

View File

@ -136,9 +136,6 @@ void taosPrintOsInfo() {
// uInfo(" os openMax: %" PRId64, tsOpenMax);
// uInfo(" os streamMax: %" PRId64, tsStreamMax);
uInfo(" os numOfCores: %d", tsNumOfCores);
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
struct utsname buf;
@ -154,6 +151,14 @@ void taosPrintOsInfo() {
uInfo("==================================");
}
void taosPrintDiskInfo() {
uInfo("==================================");
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo("==================================");
}
void taosKillSystem() {
uError("function taosKillSystem, exit!");
exit(0);

View File

@ -506,9 +506,6 @@ void taosPrintOsInfo() {
uInfo(" os openMax: %" PRId64, tsOpenMax);
uInfo(" os streamMax: %" PRId64, tsStreamMax);
uInfo(" os numOfCores: %d", tsNumOfCores);
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
struct utsname buf;
@ -523,6 +520,14 @@ void taosPrintOsInfo() {
uInfo(" os machine: %s", buf.machine);
}
void taosPrintDiskInfo() {
uInfo("==================================");
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo("==================================");
}
void taosKillSystem() {
// SIGINT
uInfo("taosd will shut down soon");

View File

@ -205,10 +205,15 @@ void taosGetSystemInfo() {
void taosPrintOsInfo() {
uInfo(" os numOfCores: %d", tsNumOfCores);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
uInfo("==================================");
}
void taosPrintDiskInfo() {
uInfo("==================================");
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
uInfo("==================================");
}

View File

@ -204,7 +204,7 @@ typedef struct SAggFunctionInfo {
bool (*init)(SQLFunctionCtx *pCtx); // setup the execute environment
void (*xFunction)(SQLFunctionCtx *pCtx); // blocks version function
void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version, todo merge with blockwise function
// void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version, todo merge with blockwise function
// finalizer must be called after all xFunction has been executed to generated final result.
void (*xFinalize)(SQLFunctionCtx *pCtx);

View File

@ -295,7 +295,7 @@ enum OPERATOR_TYPE_E {
OP_MultiTableAggregate = 14,
OP_MultiTableTimeInterval = 15,
OP_DummyInput = 16, //TODO remove it after fully refactor.
OP_MultiwayMergeSort = 17, // multi-way data merge into one input stream.
OP_MultiwayMergeSort = 17, // multi-way data merge into one input stream.
OP_GlobalAggregate = 18, // global merge for the multi-way data sources.
OP_Filter = 19,
OP_Distinct = 20,

View File

@ -470,7 +470,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
//////////////////////// The SELECT statement /////////////////////////////////
%type select {SSqlNode*}
%destructor select {destroySqlNode($$);}
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) windowstate_option(D) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) windowstate_option(D) fill_opt(F) sliding_opt(S) groupby_opt(P) having_opt(N) orderby_opt(Z) slimit_opt(G) limit_opt(L). {
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &D, &S, F, &L, &G, N);
}

File diff suppressed because it is too large Load Diff

View File

@ -161,7 +161,7 @@ static void setResultOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* pResul
int32_t numOfCols, int32_t* rowCellInfoOffset);
void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset);
static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId);
static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx);
static void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColIndex* pColIndex);
@ -309,7 +309,7 @@ static bool isProjQuery(SQueryAttr *pQueryAttr) {
return true;
}
static bool hasNullRv(SColIndex* pColIndex, SDataStatis *pStatis) {
static bool hasNull(SColIndex* pColIndex, SDataStatis *pStatis) {
if (TSDB_COL_IS_TAG(pColIndex->flag) || TSDB_COL_IS_UD_COL(pColIndex->flag) || pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return false;
}
@ -708,12 +708,13 @@ static int32_t getNumOfRowsInTimeWindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBloc
static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, STimeWindow* pWin, int32_t offset,
int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput) {
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
bool hasPrev = pCtx[0].preAggVals.isSet;
bool hasAggregates = pCtx[0].preAggVals.isSet;
for (int32_t k = 0; k < numOfOutput; ++k) {
pCtx[k].size = forwardStep;
pCtx[k].size = forwardStep;
pCtx[k].startTs = pWin->skey;
// keep it temprarily
char* start = pCtx[k].pInput;
int32_t pos = (QUERY_IS_ASC_QUERY(pQueryAttr)) ? offset : offset - (forwardStep - 1);
@ -725,20 +726,18 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx
pCtx[k].ptsList = &tsCol[pos];
}
int32_t functionId = pCtx[k].functionId;
// not a whole block involved in query processing, statistics data can not be used
// NOTE: the original value of isSet have been changed here
if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) {
pCtx[k].preAggVals.isSet = false;
}
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
aAggs[functionId].xFunction(&pCtx[k]);
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) {
aAggs[pCtx[k].functionId].xFunction(&pCtx[k]);
}
// restore it
pCtx[k].preAggVals.isSet = hasPrev;
pCtx[k].preAggVals.isSet = hasAggregates;
pCtx[k].pInput = start;
}
}
@ -847,9 +846,6 @@ static void setNotInterpoWindowKey(SQLFunctionCtx* pCtx, int32_t numOfOutput, in
}
}
// window start key interpolation
static void saveDataBlockLastRow(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pDataBlockInfo, SArray* pDataBlock,
int32_t rowIndex) {
if (pDataBlock == NULL) {
@ -975,10 +971,9 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
for (int32_t k = 0; k < pOperator->numOfOutput; ++k) {
int32_t functionId = pCtx[k].functionId;
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) {
pCtx[k].startTs = startTs;// this can be set during create the struct
aAggs[functionId].xFunction(&pCtx[k]);
aAggs[pCtx[k].functionId].xFunction(&pCtx[k]);
}
}
}
@ -1287,6 +1282,15 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
return;
}
int64_t* tsList = NULL;
SColumnInfoData* pFirstColData = taosArrayGet(pSDataBlock->pDataBlock, 0);
if (pFirstColData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
tsList = (int64_t*) pFirstColData->pData;
}
STimeWindow w = TSWINDOW_INITIALIZER;
int32_t num = 0;
for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) {
char* val = ((char*)pColInfoData->pData) + bytes * j;
if (isNull(val, type)) {
@ -1294,33 +1298,59 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
}
// Compare with the previous row of this column, and do not set the output buffer again if they are identical.
if (pInfo->prevData == NULL || (memcmp(pInfo->prevData, val, bytes) != 0)) {
if (pInfo->prevData == NULL) {
pInfo->prevData = malloc(bytes);
}
if (pInfo->prevData == NULL) {
pInfo->prevData = malloc(bytes);
memcpy(pInfo->prevData, val, bytes);
num++;
continue;
}
if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, bytes);
if (IS_VAR_DATA_TYPE(type)) {
int32_t len = varDataLen(val);
if(len == varDataLen(pInfo->prevData) && memcmp(varDataVal(pInfo->prevData), varDataVal(val), len) == 0) {
num++;
continue;
}
int32_t ret =
setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes, item->groupIndex);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
} else {
if (memcmp(pInfo->prevData, val, bytes) == 0) {
num++;
continue;
}
}
// todo opt perf
for (int32_t k = 0; k < pOperator->numOfOutput; ++k) {
pInfo->binfo.pCtx[k].size = 1;
int32_t functionId = pInfo->binfo.pCtx[k].functionId;
if (functionNeedToExecute(pRuntimeEnv, &pInfo->binfo.pCtx[k], functionId)) {
aAggs[functionId].xFunctionF(&pInfo->binfo.pCtx[k], j);
}
if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo->prevData,
bytes);
}
int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, bytes,
item->groupIndex);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
}
doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, j - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput);
num = 1;
memcpy(pInfo->prevData, val, bytes);
}
if (num > 0) {
char* val = ((char*)pColInfoData->pData) + bytes * (pSDataBlock->info.rows - num);
memcpy(pInfo->prevData, val, bytes);
if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val,
bytes);
}
int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes,
item->groupIndex);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
}
doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, pSDataBlock->info.rows - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput);
}
}
@ -1394,9 +1424,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
}
static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) {
int64_t v = -1;
GET_TYPED_DATA(v, int64_t, type, pData);
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
if (IS_VAR_DATA_TYPE(type)) {
if (pResultRow->key == NULL) {
pResultRow->key = malloc(varDataTLen(pData));
varDataCopy(pResultRow->key, pData);
@ -1404,6 +1432,9 @@ static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) {
assert(memcmp(pResultRow->key, pData, varDataTLen(pData)) == 0);
}
} else {
int64_t v = -1;
GET_TYPED_DATA(v, int64_t, type, pData);
pResultRow->win.skey = v;
pResultRow->win.ekey = v;
}
@ -1419,7 +1450,7 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasic
// not assign result buffer yet, add new result buffer, TODO remove it
char* d = pData;
int16_t len = bytes;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
if (IS_VAR_DATA_TYPE(type)) {
d = varDataVal(pData);
len = varDataLen(pData);
}
@ -1461,11 +1492,12 @@ static int32_t getGroupbyColumnIndex(SGroupbyExpr *pGroupbyExpr, SSDataBlock* pD
return -1;
}
static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) {
static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
// in case of timestamp column, always generated results.
int32_t functionId = pCtx->functionId;
if (functionId == TSDB_FUNC_TS) {
return true;
}
@ -1505,7 +1537,7 @@ void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColInde
pCtx->preAggVals.isSet = false;
}
pCtx->hasNull = hasNullRv(pColIndex, pStatis);
pCtx->hasNull = hasNull(pColIndex, pStatis);
// set the statistics data for primary time stamp column
if (pCtx->functionId == TSDB_FUNC_SPREAD && pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
@ -3478,6 +3510,7 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag,
return 0;
}
// TODO refactor: this funciton should be merged with setparamForStableStddevColumnData function.
void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExprInfo) {
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
@ -4683,8 +4716,7 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv,
SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo));
pInfo->resultRowFactor =
(int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery,
false));
(int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false));
pRuntimeEnv->scanFlag = MERGE_STAGE; // TODO init when creating pCtx
@ -5256,6 +5288,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
doApplyFunctions(pRuntimeEnv, pBInfo->pCtx, &pInfo->curWindow, pInfo->start, pInfo->numOfRows, tsList,
pSDataBlock->info.rows, pOperator->numOfOutput);
}
static SSDataBlock* doStateWindowAgg(void *param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
@ -6268,7 +6301,7 @@ static bool validateQueryMsg(SQueryTableMsg *pQueryMsg) {
return true;
}
static UNUSED_FUNC bool validateQueryTableCols(SQueriedTableInfo* pTableInfo, SSqlExpr** pExpr, int32_t numOfOutput,
static bool validateQueryTableCols(SQueriedTableInfo* pTableInfo, SSqlExpr** pExpr, int32_t numOfOutput,
SColumnInfo* pTagCols, void* pMsg) {
int32_t numOfTotal = pTableInfo->numOfCols + pTableInfo->numOfTags;
if (pTableInfo->numOfCols < 0 || pTableInfo->numOfTags < 0 || numOfTotal > TSDB_MAX_COLUMNS) {
@ -6453,6 +6486,7 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
pExprMsg->resType = htons(pExprMsg->resType);
pExprMsg->resBytes = htons(pExprMsg->resBytes);
pExprMsg->interBytes = htonl(pExprMsg->interBytes);
pExprMsg->functionId = htons(pExprMsg->functionId);
pExprMsg->numOfParams = htons(pExprMsg->numOfParams);
@ -6660,41 +6694,41 @@ _cleanup:
return code;
}
int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) {
if (filterNum <= 0) {
return TSDB_CODE_SUCCESS;
}
*dst = calloc(filterNum, sizeof(*src));
if (*dst == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
memcpy(*dst, src, sizeof(*src) * filterNum);
for (int32_t i = 0; i < filterNum; i++) {
if ((*dst)[i].filterstr && dst[i]->len > 0) {
void *pz = calloc(1, (size_t)(*dst)[i].len + 1);
if (pz == NULL) {
if (i == 0) {
free(*dst);
} else {
freeColumnFilterInfo(*dst, i);
}
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
memcpy(pz, (void *)src->pz, (size_t)src->len + 1);
(*dst)[i].pz = (int64_t)pz;
}
}
int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) {
if (filterNum <= 0) {
return TSDB_CODE_SUCCESS;
}
*dst = calloc(filterNum, sizeof(*src));
if (*dst == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
memcpy(*dst, src, sizeof(*src) * filterNum);
for (int32_t i = 0; i < filterNum; i++) {
if ((*dst)[i].filterstr && dst[i]->len > 0) {
void *pz = calloc(1, (size_t)(*dst)[i].len + 1);
if (pz == NULL) {
if (i == 0) {
free(*dst);
} else {
freeColumnFilterInfo(*dst, i);
}
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
memcpy(pz, (void *)src->pz, (size_t)src->len + 1);
(*dst)[i].pz = (int64_t)pz;
}
}
return TSDB_CODE_SUCCESS;
}
int32_t buildArithmeticExprFromMsg(SExprInfo *pExprInfo, void *pQueryMsg) {
qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg);
@ -6753,8 +6787,8 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp
for (int32_t i = 0; i < numOfOutput; ++i) {
pExprs[i].base = *pExprMsg[i];
memset(pExprs[i].base.param, 0, sizeof(tVariant) * tListLen(pExprs[i].base.param));
memset(pExprs[i].base.param, 0, sizeof(tVariant) * tListLen(pExprs[i].base.param));
for (int32_t j = 0; j < pExprMsg[i]->numOfParams; ++j) {
tVariantAssign(&pExprs[i].base.param[j], &pExprMsg[i]->param[j]);
}
@ -6829,6 +6863,7 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp
return TSDB_CODE_QRY_INVALID_MSG;
}
// todo remove it
if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType, &pExprs[i].base.resBytes,
&pExprs[i].base.interBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) {
tfree(pExprs);
@ -7295,11 +7330,16 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo*
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
STSBuf *pTsBuf = NULL;
if (pTsBufInfo->tsLen > 0) { // open new file to save the result
char *tsBlock = start + pTsBufInfo->tsOffset;
pTsBuf = tsBufCreateFromCompBlocks(tsBlock, pTsBufInfo->tsNumOfBlocks, pTsBufInfo->tsLen, pTsBufInfo->tsOrder,
pQueryAttr->vgId);
if (pTsBufInfo->tsLen > 0) { // open new file to save the result
char* tsBlock = start + pTsBufInfo->tsOffset;
pTsBuf = tsBufCreateFromCompBlocks(tsBlock, pTsBufInfo->tsNumOfBlocks, pTsBufInfo->tsLen, pTsBufInfo->tsOrder,
pQueryAttr->vgId);
if (pTsBuf == NULL) {
code = TSDB_CODE_QRY_NO_DISKSPACE;
goto _error;
}
tsBufResetPos(pTsBuf);
bool ret = tsBufNextPos(pTsBuf);
UNUSED(ret);

View File

@ -2,6 +2,7 @@
#include "taoserror.h"
#include "tscompression.h"
#include "tutil.h"
#include "queryLog.h"
static int32_t getDataStartOffset();
static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo);
@ -633,10 +634,15 @@ int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader) {
int32_t r = fseek(pTSBuf->f, 0, SEEK_SET);
if (r != 0) {
qError("fseek failed, errno:%d", errno);
return -1;
}
size_t ws = fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f);
if (ws != 1) {
qError("ts update header fwrite failed, size:%d, expected size:%d", (int32_t)ws, (int32_t)sizeof(STSBufFileHeader));
return -1;
}
fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f);
return 0;
}
@ -853,9 +859,17 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_
TSBufUpdateGroupInfo(pTSBuf, pTSBuf->numOfGroups - 1, pBlockInfo);
int32_t ret = fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET);
UNUSED(ret);
if (ret == -1) {
qError("fseek failed, errno:%d", errno);
tsBufDestroy(pTSBuf);
return NULL;
}
size_t sz = fwrite((void*)pData, 1, len, pTSBuf->f);
UNUSED(sz);
if (sz != len) {
qError("ts data fwrite failed, write size:%d, expected size:%d", (int32_t)sz, len);
tsBufDestroy(pTSBuf);
return NULL;
}
pTSBuf->fileSize += len;
pTSBuf->tsOrder = order;
@ -863,9 +877,16 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_
STSBufFileHeader header = {
.magic = TS_COMP_FILE_MAGIC, .numOfGroup = pTSBuf->numOfGroups, .tsOrder = pTSBuf->tsOrder};
STSBufUpdateHeader(pTSBuf, &header);
if (STSBufUpdateHeader(pTSBuf, &header) < 0) {
tsBufDestroy(pTSBuf);
return NULL;
}
taosFsync(fileno(pTSBuf->f));
if (taosFsync(fileno(pTSBuf->f)) == -1) {
qError("fsync failed, errno:%d", errno);
tsBufDestroy(pTSBuf);
return NULL;
}
return pTSBuf;
}

View File

@ -10,6 +10,7 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wwrite-strings"
#pragma GCC diagnostic ignored "-Wunused-function"
typedef struct ResultObj {
int32_t numOfResult;

View File

@ -5,6 +5,10 @@
#include "taos.h"
#include "qHistogram.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
namespace {
void doHistogramAddTest() {
SHistogramInfo* pHisto = NULL;

View File

@ -6,6 +6,9 @@
#include "qAggMain.h"
#include "tcompare.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
TEST(testCase, patternMatchTest) {
SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER;

View File

@ -7,6 +7,9 @@
#include "qPercentile.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
namespace {
tMemBucket *createBigIntDataBucket(int32_t start, int32_t end) {
tMemBucket *pBucket = tMemBucketCreate(sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, start, end);

View File

@ -6,6 +6,9 @@
#include "taos.h"
#include "tsdb.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
namespace {
// simple test
void simpleTest() {

View File

@ -9,6 +9,10 @@
#include "ttoken.h"
#include "tutil.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
namespace {
/**
*

View File

@ -6,14 +6,17 @@
#include "taos.h"
#include "tsdb.h"
#pragma GCC diagnostic ignored "-Wwrite-strings"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
#pragma GCC diagnostic ignored "-Wsign-compare"
#include "../../client/inc/tscUtil.h"
#include "tutil.h"
#include "tvariant.h"
#include "ttokendef.h"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wwrite-strings"
namespace {
int32_t testValidateName(char* name) {
SStrToken token = {0};

View File

@ -3364,7 +3364,7 @@ static bool tableFilterFp(const void* pNode, void* param) {
GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val);
return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
}
else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_DOUBLE) {
else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
double v;
GET_TYPED_DATA(v, double, pInfo->sch.type, val);
return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));

View File

@ -235,6 +235,8 @@ python3 ./test.py -f query/queryTscomputWithNow.py
python3 ./test.py -f query/computeErrorinWhere.py
python3 ./test.py -f query/queryTsisNull.py
python3 ./test.py -f query/subqueryFilter.py
python3 ./test.py -f query/nestedQuery/queryInterval.py
python3 ./test.py -f query/queryStateWindow.py
#stream
@ -325,6 +327,7 @@ python3 ./test.py -f query/queryGroupbySort.py
python3 ./test.py -f functions/queryTestCases.py
python3 ./test.py -f functions/function_stateWindow.py
python3 ./test.py -f functions/function_derivative.py
python3 ./test.py -f functions/function_irate.py
python3 ./test.py -f insert/unsignedInt.py
python3 ./test.py -f insert/unsignedBigint.py

View File

@ -0,0 +1,228 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.rowNum = 100
self.ts = 1537146000000
self.ts1 = 1537146000000000
def run(self):
# db precison ms
tdSql.prepare()
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20), tag1 int)''')
tdSql.execute("create table test1 using test tags('beijing', 10)")
tdSql.execute("create table gtest1 (ts timestamp, col1 float)")
tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)")
for i in range(self.rowNum):
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
tdSql.execute("insert into gtest1 values(1537146000000,0);")
tdSql.execute("insert into gtest1 values(1537146001100,1.2);")
tdSql.execute("insert into gtest2 values(1537146001001,1);")
tdSql.execute("insert into gtest2 values(1537146001101,2);")
tdSql.execute("insert into gtest3 values(1537146001101,2);")
tdSql.execute("insert into gtest4(ts) values(1537146001101);")
tdSql.execute("insert into gtest5 values(1537146001002,4);")
tdSql.execute("insert into gtest5 values(1537146002202,4);")
tdSql.execute("insert into gtest6 values(1537146000000,5);")
tdSql.execute("insert into gtest6 values(1537146001000,2);")
tdSql.execute("insert into gtest7 values(1537146001000,1);")
tdSql.execute("insert into gtest7 values(1537146008000,2);")
tdSql.execute("insert into gtest7 values(1537146009000,6);")
tdSql.execute("insert into gtest7 values(1537146012000,3);")
tdSql.execute("insert into gtest7 values(1537146015000,3);")
tdSql.execute("insert into gtest7 values(1537146017000,1);")
tdSql.execute("insert into gtest7 values(1537146019000,3);")
tdSql.execute("insert into gtest8 values(1537146000002,4);")
tdSql.execute("insert into gtest8 values(1537146002202,4);")
# irate verifacation
tdSql.query("select irate(col1) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from test1 interval(10s);")
tdSql.checkData(0, 1, 1)
tdSql.query("select irate(col1) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col2) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col3) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col4) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col5) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col6) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col11) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col12) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col13) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col14) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col2) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col2) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from gtest1;")
tdSql.checkData(0, 0, 1.2/1.1)
tdSql.query("select irate(col1) from gtest2;")
tdSql.checkData(0, 0, 10)
tdSql.query("select irate(col1) from gtest3;")
tdSql.checkData(0, 0, 0)
tdSql.query("select irate(col1) from gtest4;")
tdSql.checkRows(0)
tdSql.query("select irate(col1) from gtest5;")
tdSql.checkData(0, 0, 0)
tdSql.query("select irate(col1) from gtest6;")
tdSql.checkData(0, 0, 2)
tdSql.query("select irate(col1) from gtest7;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;")
tdSql.checkData(1, 1, 4)
tdSql.checkData(2, 1, 0)
tdSql.checkData(3, 1, 1)
tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;")
tdSql.checkData(1, 1, 0)
tdSql.checkData(2, 1, 4)
tdSql.checkData(3, 1, 0)
#error
tdSql.error("select irate(col1) from test")
tdSql.error("select irate(ts) from test1")
tdSql.error("select irate(col7) from test1")
tdSql.error("select irate(col8) from test1")
tdSql.error("select irate(col9) from test1")
tdSql.error("select irate(loc) from test1")
tdSql.error("select irate(tag1) from test1")
# use db1 precision us
tdSql.execute("create database db1 precision 'us' keep 3650 UPDATE 1")
tdSql.execute("use db1 ")
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
tdSql.execute("create table test1 using test tags('beijing')")
tdSql.execute("create table gtest1 (ts timestamp, col1 float)")
tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest9 (ts timestamp, col1 tinyint)")
for i in range(self.rowNum):
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts1 + i*1000000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
tdSql.execute("insert into gtest1 values(1537146000000000,0);")
tdSql.execute("insert into gtest1 values(1537146001100000,1.2);")
tdSql.execute("insert into gtest2 values(1537146001001000,1);")
tdSql.execute("insert into gtest2 values(1537146001101000,2);")
tdSql.execute("insert into gtest3 values(1537146001101000,2);")
tdSql.execute("insert into gtest4(ts) values(1537146001101000);")
tdSql.execute("insert into gtest5 values(1537146001002000,4);")
tdSql.execute("insert into gtest5 values(1537146002202000,4);")
tdSql.execute("insert into gtest6 values(1537146000000000,5);")
tdSql.execute("insert into gtest6 values(1537146001000000,2);")
tdSql.execute("insert into gtest7 values(1537146001000000,1);")
tdSql.execute("insert into gtest7 values(1537146008000000,2);")
tdSql.execute("insert into gtest7 values(1537146009000000,6);")
tdSql.execute("insert into gtest7 values(1537146012000000,3);")
tdSql.execute("insert into gtest7 values(1537146015000000,3);")
tdSql.execute("insert into gtest7 values(1537146017000000,1);")
tdSql.execute("insert into gtest7 values(1537146019000000,3);")
tdSql.execute("insert into gtest8 values(1537146000002000,3);")
tdSql.execute("insert into gtest8 values(1537146001003000,4);")
tdSql.execute("insert into gtest9 values(1537146000000000,4);")
tdSql.execute("insert into gtest9 values(1537146000000001,5);")
# irate verifacation
tdSql.query("select irate(col1) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from test1 interval(10s);")
tdSql.checkData(0, 1, 1)
tdSql.query("select irate(col1) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from gtest1;")
tdSql.checkData(0, 0, 1.2/1.1)
tdSql.query("select irate(col1) from gtest2;")
tdSql.checkData(0, 0, 10)
tdSql.query("select irate(col1) from gtest3;")
tdSql.checkData(0, 0, 0)
tdSql.query("select irate(col1) from gtest4;")
tdSql.checkRows(0)
tdSql.query("select irate(col1) from gtest5;")
tdSql.checkData(0, 0, 0)
tdSql.query("select irate(col1) from gtest6;")
tdSql.checkData(0, 0, 2)
tdSql.query("select irate(col1) from gtest7;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;")
tdSql.checkData(1, 1, 4)
tdSql.checkData(2, 1, 0)
tdSql.checkData(3, 1, 1)
tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;")
tdSql.checkData(1, 1, 0)
tdSql.checkData(2, 1, 4)
tdSql.checkData(3, 1, 0)
tdSql.query("select irate(col1) from gtest8;")
tdSql.checkData(0, 0, 1/1.001)
tdSql.query("select irate(col1) from gtest9;")
tdSql.checkData(0, 0, 1000000)
#error
tdSql.error("select irate(col1) from test")
tdSql.error("select irate(ts) from test1")
tdSql.error("select irate(col7) from test1")
tdSql.error("select irate(col8) from test1")
tdSql.error("select irate(col9) from test1")
tdSql.error("select irate(loc) from test1")
tdSql.error("select irate(tag1) from test1")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -698,24 +698,24 @@ class TDTestCase:
tdSql.execute(cmd1)
cmd2 = 'select * from normal_in_float_double_1 where in_float in (\'888\');'
tdLog.info(cmd2)
tdSql.error(cmd2)
try:
tdSql.execute(cmd2)
tdLog.exit("invalid operation: not supported filter condition")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("invalid operation: not supported filter condition")
cmd3 = 'select * from normal_in_float_double_1 where in_double in (\'66666\');'
tdLog.info(cmd3)
tdSql.error(cmd3)
try:
tdSql.execute(cmd3)
tdLog.exit("invalid operation: not supported filter condition")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("invalid operation: not supported filter condition")
#tdLog.info(cmd2)
#tdSql.error(cmd2)
#try:
# tdSql.execute(cmd2)
# tdLog.exit("invalid operation: not supported filter condition")
#except Exception as e:
# tdLog.info(repr(e))
# tdLog.info("invalid operation: not supported filter condition")
#
#cmd3 = 'select * from normal_in_float_double_1 where in_double in (\'66666\');'
#tdLog.info(cmd3)
#tdSql.error(cmd3)
#try:
# tdSql.execute(cmd3)
# tdLog.exit("invalid operation: not supported filter condition")
#except Exception as e:
# tdLog.info(repr(e))
# tdLog.info("invalid operation: not supported filter condition")
def stop(self):
tdSql.close()

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file":"./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100000,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1,
"timestamp_step": 1000,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/sample.csv",
"tags_file": "",
"columns": [{"type": "INT", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BOOL"}],
"tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":1}]
}]
}]
}

View File

@ -0,0 +1,81 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
os.system("%staosdemo -f query/nestedQuery/insertData.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 1000)
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 1000)
tdSql.query("select count(*) from stb00_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 100000)
tdSql.query("select count(*) from stb01_1")
tdSql.checkData(0, 0, 200)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 200000)
testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf query/nestedQuery/%s.sql" % testcaseFilename )
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,106 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
import random
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts1 = 1593548685000
self.ts2 = 1593548785000
def run(self):
# tdSql.execute("drop database db ")
tdSql.prepare()
tdSql.execute("create table st (ts timestamp, num int, value int , t_instance int) tags (loc nchar(30))")
node = 5
number = 10
for n in range(node):
for m in range(number):
dt= m*300000+n*60000 # collecting'frequency is 10s
args1=(n,n,self.ts1+dt,n,100+2*m+2*n,10+m+n)
# args2=(n,self.ts2+dt,n,120+n,15+n)
tdSql.execute("insert into t%d using st tags('beijing%d') values(%d, %d, %d, %d)" % args1)
# tdSql.execute("insert into t1 using st tags('shanghai') values(%d, %d, %d, %d)" % args2)
# interval function
tdSql.query("select avg(value) from st interval(10m)")
# print(tdSql.queryResult)
tdSql.checkRows(6)
tdSql.checkData(0, 0, "2020-07-01 04:20:00")
tdSql.checkData(1, 1, 107.4)
# subquery with interval
tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));")
tdSql.checkData(0, 0, 109.0)
# subquery with interval and select two Column in parent query
tdSql.error("select ts,avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));")
# subquery with interval and sliding
tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(30s) limit 1;")
tdSql.checkData(0, 0, "2020-07-01 04:17:00")
tdSql.checkData(0, 1, 100)
tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(30s));")
tdSql.checkData(0, 0, 111)
# subquery with interval and offset
tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m);")
tdSql.checkData(0, 0, "2020-07-01 04:21:00")
tdSql.checkData(0, 1, 100)
tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m) group by loc);")
tdSql.checkData(0, 0, 109)
# subquery with interval,sliding and group by ; parent query with interval
tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(1m) group by loc limit 1 offset 52 ;")
tdSql.checkData(0, 0, "2020-07-01 05:09:00")
tdSql.checkData(0, 1, 118)
tdSql.query("select avg(avg_val) as ncst from(select avg(value) as avg_val from st where loc!='beijing0' interval(8m) sliding(1m) group by loc ) interval(5m);")
tdSql.checkData(1, 1, 105)
# # subquery and parent query with interval and sliding
tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(5m)) interval(10m) sliding(2m);")
tdSql.checkData(29, 0, "2020-07-01 05:10:00.000")
# subquery and parent query with top and bottom
tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val desc;")
tdSql.checkData(0, 1, 117)
tdSql.query("select bottom(avg_val,3) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val asc;")
tdSql.checkData(0, 1, 111)
#
tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(3m));")
tdSql.checkData(0, 1, 120)
# clear env
testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf wal/%s.sql" % testcaseFilename )
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,111 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.rowNum = 100000
self.ts = 1537146000000
def run(self):
tdSql.prepare()
print("==============step1")
tdSql.execute(
"create table if not exists st (ts timestamp, t1 int, t2 timestamp, t3 bigint, t4 float, t5 double, t6 binary(10), t7 smallint, t8 tinyint, t9 bool, t10 nchar(10), t11 int unsigned, t12 bigint unsigned, t13 smallint unsigned, t14 tinyint unsigned ,t15 int) tags(dev nchar(50), tag2 binary(16))")
tdSql.execute(
'CREATE TABLE if not exists dev_001 using st tags("dev_01", "tag_01")')
tdSql.execute(
'CREATE TABLE if not exists dev_002 using st tags("dev_02", "tag_02")')
print("==============step2")
tdSql.execute(
"INSERT INTO dev_001 VALUES('2020-05-13 10:00:00.000', 1, '2020-05-13 10:00:00.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 254, 1)('2020-05-13 10:00:01.000', 1, '2020-05-13 10:00:01.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 253, 5)('2020-05-13 10:00:02.000', 10, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', 10, -127, false, '测试', 15, 10, 65534, 253, 10)('2020-05-13 10:00:03.000', 1, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', -10, -126, true, '测试', 14, 12, 65532, 254, 15)")
for i in range(self.rowNum):
tdSql.execute("insert into dev_002 (ts,t1) values(%d, %d,)" % (self.ts + i, i + 1))
tdSql.query("select count(ts) from dev_001 state_window(t1)")
tdSql.checkRows(3)
tdSql.checkData(0, 0, 2)
tdSql.query("select count(ts) from dev_001 state_window(t3)")
tdSql.checkRows(2)
tdSql.checkData(1, 0, 2)
tdSql.query("select count(ts) from dev_001 state_window(t7)")
tdSql.checkRows(3)
tdSql.checkData(1, 0, 1)
tdSql.query("select count(ts) from dev_001 state_window(t8)")
tdSql.checkRows(3)
tdSql.checkData(2, 0, 1)
tdSql.query("select count(ts) from dev_001 state_window(t11)")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 3)
tdSql.query("select count(ts) from dev_001 state_window(t12)")
tdSql.checkRows(2)
tdSql.checkData(1, 0, 1)
tdSql.query("select count(ts) from dev_001 state_window(t13)")
tdSql.checkRows(2)
tdSql.checkData(1, 0, 1)
tdSql.query("select count(ts) from dev_001 state_window(t14)")
tdSql.checkRows(3)
tdSql.checkData(1, 0, 2)
tdSql.query("select count(ts) from dev_002 state_window(t1)")
tdSql.checkRows(100000)
# with all aggregate function
tdSql.query("select count(*),sum(t1),avg(t1),twa(t1),stddev(t15),leastsquares(t15,1,1),first(t15),last(t15),spread(t15),percentile(t15,90),t9 from dev_001 state_window(t9);")
tdSql.checkRows(3)
tdSql.checkData(0, 0, 2)
tdSql.checkData(1, 1, 10)
tdSql.checkData(0, 2, 1)
# tdSql.checkData(0, 3, 1)
tdSql.checkData(0, 4, np.std([1,5]))
# tdSql.checkData(0, 5, 1)
tdSql.checkData(0, 6, 1)
tdSql.checkData(0, 7, 5)
tdSql.checkData(0, 8, 4)
tdSql.checkData(0, 9, 4.6)
tdSql.checkData(0, 10, 'True')
# with where
tdSql.query("select avg(t15),t9 from dev_001 where t9='true' state_window(t9);")
tdSql.checkData(0, 0, 7)
tdSql.checkData(0, 1, 'True')
# error
tdSql.error("select count(*) from dev_001 state_window(t2)")
tdSql.error("select count(*) from st state_window(t3)")
tdSql.error("select count(*) from dev_001 state_window(t4)")
tdSql.error("select count(*) from dev_001 state_window(t5)")
tdSql.error("select count(*) from dev_001 state_window(t6)")
tdSql.error("select count(*) from dev_001 state_window(t10)")
tdSql.error("select count(*) from dev_001 state_window(tag2)")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,450 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import time
import datetime
import inspect
import psutil
import shutil
import json
from util.log import *
from multiprocessing import cpu_count
# TODO: fully test the function. Handle exceptions.
# Handle json format not accepted by taosdemo
class TDTaosdemoCfg:
def __init__(self):
self.insert_cfg = {
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": cpu_count(),
"thread_count_create_tbl": cpu_count(),
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"num_of_records_per_req": 32766,
"max_sql_len": 32766,
"databases": None
}
self.db = {
"name": 'db',
"drop": 'yes',
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 6,
"precision": "ms",
"keep": 3650,
"minRows": 100,
"maxRows": 4096,
"comp": 2,
"walLevel": 1,
"cachelast": 0,
"quorum": 1,
"fsync": 3000,
"update": 0
}
self.query_cfg = {
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "db",
"query_times": 2,
"query_mode": "taosc",
"specified_table_query": None,
"super_table_query": None
}
self.table_query = {
"query_interval": 1,
"concurrent": 3,
"sqls": None
}
self.stable_query = {
"stblname": "stb",
"query_interval": 1,
"threads": 3,
"sqls": None
}
self.sub_cfg = {
"filetype": "subscribe",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "db",
"confirm_parameter_prompt": "no",
"specified_table_query": None,
"super_table_query": None
}
self.table_sub = {
"concurrent": 1,
"mode": "sync",
"interval": 10000,
"restart": "yes",
"keepProgress": "yes",
"sqls": None
}
self.stable_sub = {
"stblname": "stb",
"threads": 1,
"mode": "sync",
"interval": 10000,
"restart": "yes",
"keepProgress": "yes",
"sqls": None
}
self.stbs = []
self.stb_template = {
"name": "stb",
"child_table_exists": "no",
"childtable_count": 100,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_limit": 10,
"childtable_offset": 0,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 32766,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT", "count": 1}],
"tags": [{"type": "BIGINT", "count": 1}]
}
self.tb_query_sql = []
self.tb_query_sql_template = {
"sql": "select last_row(*) from stb_0 ",
"result": "temp/query_res0.txt"
}
self.stb_query_sql = []
self.stb_query_sql_template = {
"sql": "select last_row(ts) from xxxx",
"result": "temp/query_res2.txt"
}
self.tb_sub_sql = []
self.tb_sub_sql_template = {
"sql": "select * from stb_0 ;",
"result": "temp/subscribe_res0.txt"
}
self.stb_sub_sql = []
self.stb_sub_sql_template = {
"sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;",
"result": "temp/subscribe_res1.txt"
}
# The following functions are import functions for different dicts and lists
# except import_sql, all other import functions will a dict and overwrite the origional dict
# dict_in: the dict used to overwrite the target
def import_insert_cfg(self, dict_in):
self.insert_cfg = dict_in
def import_db(self, dict_in):
self.db = dict_in
def import_stbs(self, dict_in):
self.stbs = dict_in
def import_query_cfg(self, dict_in):
self.query_cfg = dict_in
def import_table_query(self, dict_in):
self.table_query = dict_in
def import_stable_query(self, dict_in):
self.stable_query = dict_in
def import_sub_cfg(self, dict_in):
self.sub_cfg = dict_in
def import_table_sub(self, dict_in):
self.table_sub = dict_in
def import_stable_sub(self, dict_in):
self.stable_sub = dict_in
def import_sql(self, Sql_in, mode):
"""used for importing the sql later used
Args:
Sql_in (dict): the imported sql dict
mode (str): the sql storing location within TDTaosdemoCfg
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
"""
if mode == 'query_table':
self.tb_query_sql = Sql_in
elif mode == 'query_stable':
self.stb_query_sql = Sql_in
elif mode == 'sub_table':
self.tb_sub_sql = Sql_in
elif mode == 'sub_stable':
self.stb_sub_sql = Sql_in
# import functions end
# The following functions are alter functions for different dicts
# Args:
# key: the key that is going to be modified
# value: the value of the key that is going to be modified
# if key = 'databases' | "specified_table_query" | "super_table_query"|"sqls"
# value will not be used
def alter_insert_cfg(self, key, value):
if key == 'databases':
self.insert_cfg[key] = [
{
'dbinfo': self.db,
'super_tables': self.stbs
}
]
else:
self.insert_cfg[key] = value
def alter_db(self, key, value):
self.db[key] = value
def alter_query_tb(self, key, value):
if key == "sqls":
self.table_query[key] = self.tb_query_sql
else:
self.table_query[key] = value
def alter_query_stb(self, key, value):
if key == "sqls":
self.stable_query[key] = self.stb_query_sql
else:
self.stable_query[key] = value
def alter_query_cfg(self, key, value):
if key == "specified_table_query":
self.query_cfg["specified_table_query"] = self.table_query
elif key == "super_table_query":
self.query_cfg["super_table_query"] = self.stable_query
else:
self.table_query[key] = value
def alter_sub_cfg(self, key, value):
if key == "specified_table_query":
self.sub_cfg["specified_table_query"] = self.table_sub
elif key == "super_table_query":
self.sub_cfg["super_table_query"] = self.stable_sub
else:
self.table_query[key] = value
def alter_sub_stb(self, key, value):
if key == "sqls":
self.stable_sub[key] = self.stb_sub_sql
else:
self.stable_sub[key] = value
def alter_sub_tb(self, key, value):
if key == "sqls":
self.table_sub[key] = self.tb_sub_sql
else:
self.table_sub[key] = value
# alter function ends
# the following functions are for handling the sql lists
def append_sql_stb(self, target, value):
"""for appending sql dict into specific sql list
Args:
target (str): the target append list
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
unique: 'insert_stbs'
value (dict): the sql dict going to be appended
"""
if target == 'insert_stbs':
self.stbs.append(value)
elif target == 'query_table':
self.tb_query_sql.append(value)
elif target == 'query_stable':
self.stb_query_sql.append(value)
elif target == 'sub_table':
self.tb_sub_sql.append(value)
elif target == 'sub_stable':
self.stb_sub_sql.append(value)
def pop_sql_stb(self, target, index):
"""for poping a sql dict from specific sql list
Args:
target (str): the target append list
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
unique: 'insert_stbs'
index (int): the sql dict that is going to be popped
"""
if target == 'insert_stbs':
self.stbs.pop(index)
elif target == 'query_table':
self.tb_query_sql.pop(index)
elif target == 'query_stable':
self.stb_query_sql.pop(index)
elif target == 'sub_table':
self.tb_sub_sql.pop(index)
elif target == 'sub_stable':
self.stb_sub_sql.pop(index)
# sql list modification function end
# The following functions are get functions for different dicts
def get_db(self):
return self.db
def get_stb(self):
return self.stbs
def get_insert_cfg(self):
return self.insert_cfg
def get_query_cfg(self):
return self.query_cfg
def get_tb_query(self):
return self.table_query
def get_stb_query(self):
return self.stable_query
def get_sub_cfg(self):
return self.sub_cfg
def get_tb_sub(self):
return self.table_sub
def get_stb_sub(self):
return self.stable_sub
def get_sql(self, target):
"""general get function for all sql lists
Args:
target (str): the sql list want to get
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
unique: 'insert_stbs'
"""
if target == 'query_table':
return self.tb_query_sql
elif target == 'query_stable':
return self.stb_query_sql
elif target == 'sub_table':
return self.tb_sub_sql
elif target == 'sub_stable':
return self.stb_sub_sql
def get_template(self, target):
"""general get function for the default sql template
Args:
target (str): the sql list want to get
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
unique: 'insert_stbs'
"""
if target == 'insert_stbs':
return self.stb_template
elif target == 'query_table':
return self.tb_query_sql_template
elif target == 'query_stable':
return self.stb_query_sql_template
elif target == 'sub_table':
return self.tb_sub_sql_template
elif target == 'sub_stable':
return self.stb_sub_sql_template
else:
print(f'did not find {target}')
# the folloing are the file generation functions
"""defalut document:
generator functio for generating taosdemo json file
will assemble the dicts and dump the final json
Args:
pathName (str): the directory wanting the json file to be
fileName (str): the name suffix of the json file
Returns:
str: [pathName]/[filetype]_[filName].json
"""
def generate_insert_cfg(self, pathName, fileName):
cfgFileName = f'{pathName}/insert_{fileName}.json'
self.alter_insert_cfg('databases', None)
with open(cfgFileName, 'w') as file:
json.dump(self.insert_cfg, file)
return cfgFileName
def generate_query_cfg(self, pathName, fileName):
cfgFileName = f'{pathName}/query_{fileName}.json'
self.alter_query_tb('sqls', None)
self.alter_query_stb('sqls', None)
self.alter_query_cfg('specified_table_query', None)
self.alter_query_cfg('super_table_query', None)
with open(cfgFileName, 'w') as file:
json.dump(self.query_cfg, file)
return cfgFileName
def generate_subscribe_cfg(self, pathName, fileName):
cfgFileName = f'{pathName}/subscribe_{fileName}.json'
self.alter_sub_tb('sqls', None)
self.alter_sub_stb('sqls', None)
self.alter_sub_cfg('specified_table_query', None)
self.alter_sub_cfg('super_table_query', None)
with open(cfgFileName, 'w') as file:
json.dump(self.sub_cfg, file)
return cfgFileName
# file generation functions ends
def drop_cfg_file(self, fileName):
os.remove(f'{fileName}')
taosdemoCfg = TDTaosdemoCfg()

View File

@ -654,53 +654,91 @@ if $data31 != @20-03-27 05:10:19.000@ then
return -1
endi
#sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2;
#if $rows != 40 then
# return -1
#endi
#
#if $data01 != 1.000000000 then
# return -1
#endi
#if $data02 != t1 then
# return -1
#endi
#if $data03 != 1 then
# return -1
#endi
#if $data04 != 1 then
# return -1
#endi
#
#if $data11 != 1.000000000 then
# return -1
#endi
#if $data12 != t1 then
# return -1
#endi
#if $data13 != 1 then
# return -1
#endi
#if $data14 != 1 then
# return -1
#endi
#
#sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1;
#if $rows != 2 then
# return -1
#endi
#
#if $data11 != 1.000000000 then
# return -1
#endi
#if $data12 != t2 then
# return -1
#endi
#if $data13 != 1 then
# return -1
#endi
#if $data14 != 2 then
# return -1
#endi
print ===============>
sql select stddev(c),c from st where t2=1 or t2=2 group by c;
if $rows != 4 then
return -1
endi
if $data00 != 0.000000000 then
return -1
endi
if $data01 != 1 then
return -1
endi
if $data10 != 0.000000000 then
return -1
endi
if $data11 != 2 then
return -1
endi
if $data20 != 0.000000000 then
return -1
endi
if $data21 != 3 then
return -1
endi
if $data30 != 0.000000000 then
return -1
endi
if $data31 != 4 then
return -1
endi
sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2;
if $rows != 40 then
return -1
endi
if $data01 != 1.000000000 then
return -1
endi
if $data02 != t1 then
return -1
endi
if $data03 != 1 then
return -1
endi
if $data04 != 1 then
return -1
endi
if $data11 != 1.000000000 then
return -1
endi
if $data12 != t1 then
return -1
endi
if $data13 != 1 then
return -1
endi
if $data14 != 1 then
return -1
endi
sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1;
if $rows != 2 then
return -1
endi
if $data11 != 1.000000000 then
return -1
endi
if $data12 != t2 then
return -1
endi
if $data13 != 1 then
return -1
endi
if $data14 != 2 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,6 +1,6 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
system sh/exec.sh -n dnode1 -s start

View File

@ -68,6 +68,27 @@ endw
sleep 100
#======================= only check first table tag, TD-4827
sql select count(*) from $mt where t1 in (0)
if $rows != 1 then
return -1
endi
if $data00 != $rowNum then
return -1;
endi
$secTag = ' . abc
$secTag = $secTag . 0
$secTag = $secTag . '
sql select count(*) from $mt where t2 =$secTag and t1 in (0)
if $rows != 1 then
return -1
endi
if $data00 != $rowNum then
return -1;
endi
#================================
sql select ts from select_tags_mt0
print $rows

View File

@ -63,4 +63,3 @@ run general/parser/between_and.sim
run general/parser/last_cache.sim
run general/parser/nestquery.sim
run general/parser/precision_ns.sim

View File

@ -139,18 +139,18 @@ sql_error select * from $mt where c1 like 1
sql create table wh_mt1 (ts timestamp, c1 smallint, c2 int, c3 bigint, c4 float, c5 double, c6 tinyint, c7 binary(10), c8 nchar(10), c9 bool, c10 timestamp) tags (t1 binary(10), t2 smallint, t3 int, t4 bigint, t5 float, t6 double)
sql create table wh_mt1_tb1 using wh_mt1 tags ('tb11', 1, 1, 1, 1, 1)
sql insert into wh_mt1_tb1 values (now, 1, 1, 1, 1, 1, 1, 'binary', 'nchar', true, '2019-01-01 00:00:00.000')
sql_error select last(*) from wh_mt1 where c1 in ('1')
sql_error select last(*) from wh_mt1_tb1 where c1 in ('1')
sql_error select last(*) from wh_mt1 where c2 in ('1')
sql_error select last(*) from wh_mt1_tb1 where c2 in ('1')
sql_error select last(*) from wh_mt1 where c3 in ('1')
sql_error select last(*) from wh_mt1_tb1 where c3 in ('1')
sql_error select last(*) from wh_mt1 where c4 in ('1')
sql_error select last(*) from wh_mt1_tb1 where c4 in ('1')
sql_error select last(*) from wh_mt1 where c5 in ('1')
sql_error select last(*) from wh_mt1_tb1 where c5 in ('1')
sql_error select last(*) from wh_mt1 where c6 in ('1')
sql_error select last(*) from wh_mt1_tb1 where c6 in ('1')
#sql_error select last(*) from wh_mt1 where c1 in ('1')
#sql_error select last(*) from wh_mt1_tb1 where c1 in ('1')
#sql_error select last(*) from wh_mt1 where c2 in ('1')
#sql_error select last(*) from wh_mt1_tb1 where c2 in ('1')
#sql_error select last(*) from wh_mt1 where c3 in ('1')
#sql_error select last(*) from wh_mt1_tb1 where c3 in ('1')
#sql_error select last(*) from wh_mt1 where c4 in ('1')
#sql_error select last(*) from wh_mt1_tb1 where c4 in ('1')
#sql_error select last(*) from wh_mt1 where c5 in ('1')
#sql_error select last(*) from wh_mt1_tb1 where c5 in ('1')
#sql_error select last(*) from wh_mt1 where c6 in ('1')
#sql_error select last(*) from wh_mt1_tb1 where c6 in ('1')
#sql_error select last(*) from wh_mt1 where c7 in ('binary')
#sql_error select last(*) from wh_mt1_tb1 where c7 in ('binary')
#sql_error select last(*) from wh_mt1 where c8 in ('nchar')
@ -352,5 +352,18 @@ if $rows != 0 then
return -1
endi
print ==========================> td-4783
sql create table where_ts(ts timestamp, f int)
sql insert into where_ts values('2021-06-19 16:22:00', 1);
sql insert into where_ts values('2021-06-19 16:23:00', 2);
sql insert into where_ts values('2021-06-19 16:24:00', 3);
sql insert into where_ts values('2021-06-19 16:25:00', 1);
sql select * from (select * from where_ts) where ts<'2021-06-19 16:25:00' and ts>'2021-06-19 16:22:00'
if $row != 2 then
return -1
endi
print $data00, $data01
if $data01 != 2 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -35,7 +35,7 @@ int32_t main(int32_t argc, char *argv[]) {
for (int32_t i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-c") == 0 && i < argc - 1) {
tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
tstrncpy(configDir, argv[++i], 128);
} else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) {
strcpy(scriptFile, argv[++i]);
} else if (strcmp(argv[i], "-a") == 0) {
@ -75,4 +75,4 @@ int32_t main(int32_t argc, char *argv[]) {
simInfo("execute result %d", ret);
return ret;
}
}