Merge remote-tracking branch 'origin/develop' into feature/crash_gen

This commit is contained in:
Steven Li 2020-07-08 08:32:55 +00:00
commit 11f5653be8
89 changed files with 3289 additions and 1764 deletions

View File

@ -480,9 +480,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
- **LEASTSQUARES**
```mysql
SELECT LEASTSQUARES(field_name) FROM tb_name [WHERE clause]
SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
```
功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。
功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val是自变量初始值step_val是自变量的步长值。
返回结果数据类型:字符串表达式(斜率, 截距)。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:自变量是时间戳,因变量是该列的值。

View File

@ -412,7 +412,7 @@ TDengine supports aggregations over numerical values, they are listed below:
SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]
```
Function: the value of the specified column below which `P` percent of the data points fall.
Return Data Type: the same data type.
Return Data Type: double.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable.
Note: The range of `P` is `[0, 100]`. When `P=0` , `PERCENTILE` returns the equal value as `MIN`; when `P=100`, `PERCENTILE` returns the equal value as `MAX`.
@ -446,7 +446,7 @@ TDengine supports aggregations over numerical values, they are listed below:
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: return the difference between the maximum and the mimimum value.
Return Data Type: the same data type.
Return Data Type: double.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable.
Note: spread gives the range of data variation in a table/supertable; it is equivalent to `MAX()` - `MIN()`

View File

@ -31,9 +31,7 @@ extern int32_t tscEmbedded;
#define tscInfo(...) { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC INFO ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }}
#define tscDebug(...) { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC DEBUG ", cDebugFlag, __VA_ARGS__); }}
#define tscTrace(...) { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC TRACE ", cDebugFlag, __VA_ARGS__); }}
#define tscDebugDump(...) { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC DEBUG ", cDebugFlag, __VA_ARGS__); }}
#define tscTraceDump(...) { if (cDebugFlag & DEBUG_TRACE) { taosPrintLongString("TSC TRACE ", cDebugFlag, __VA_ARGS__); }}
#define tscDebugL(...){ if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC DEBUG ", cDebugFlag, __VA_ARGS__); }}
#ifdef __cplusplus
}

View File

@ -110,8 +110,6 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
//todo tags value as well as the table id structure needs refactor
char *tsGetTagsValue(STableMeta *pMeta);
void extractTableNameFromToken(SSQLToken *pToken, SSQLToken* pTable);
#ifdef __cplusplus
}
#endif

View File

@ -29,9 +29,6 @@
#define jniDebug(...) { if (jniDebugFlag & DEBUG_DEBUG) { taosPrintLog("JNI DEBUG ", jniDebugFlag, __VA_ARGS__); }}
#define jniTrace(...) { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLog("JNI TRACE ", jniDebugFlag, __VA_ARGS__); }}
#define jniDebugDump(...) { if (jniDebugFlag & DEBUG_DEBUG) { taosPrintLongString("JNI DEBUG ", jniDebugFlag, __VA_ARGS__); }}
#define jniTraceDump(...) { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLongString("JNI DEBUG ", jniDebugFlag, __VA_ARGS__); }}
int __init = 0;
JavaVM *g_vm = NULL;

View File

@ -55,7 +55,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
strtolower(pSql->sqlstr, sqlstr);
tscDebugDump("%p SQL: %s", pSql, pSql->sqlstr);
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
pSql->cmd.curSql = pSql->sqlstr;
int32_t code = tsParseSql(pSql, true);
@ -471,7 +471,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
}
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly.
// 2. vnode may need the schema information along with submit block to update its local table schema.
if (pCmd->command == TSDB_SQL_INSERT) {
tscDebug("%p redo parse sql string to build submit block", pSql);

View File

@ -14,7 +14,6 @@
*/
#include "os.h"
#include "qast.h"
#include "qextbuffer.h"
#include "qfill.h"
#include "qhistogram.h"
@ -23,6 +22,7 @@
#include "qtsbuf.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "qast.h"
#include "tscLog.h"
#include "tscSubquery.h"
#include "tscompression.h"

View File

@ -364,7 +364,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
TSKEY stime = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
@ -831,7 +831,7 @@ void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SQueryInfo
if (pFillInfo != NULL) {
int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
taosResetFillInfo(pFillInfo, revisedSTime);
}
@ -1301,9 +1301,7 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
TSKEY skey = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t newTime =
taosGetIntervalStartTimestamp(skey, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision);
// taosResetFillInfo(pLocalReducer->pFillInfo, pQueryInfo->order.order, newTime,
// pQueryInfo->groupbyExpr.numOfGroupCols, 4096, 0, NULL, pLocalReducer->rowSize);
taosGetIntervalStartTimestamp(skey, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision);
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
}
}

View File

@ -538,7 +538,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pRes->numOfRows = 1;
strtolower(pSql->sqlstr, sql);
tscDebugDump("%p SQL: %s", pSql, pSql->sqlstr);
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
if (tscIsInsertData(pSql->sqlstr)) {
pStmt->isInsert = true;

View File

@ -18,19 +18,19 @@
#define _DEFAULT_SOURCE
#include "os.h"
#include "qast.h"
#include "taos.h"
#include "taosmsg.h"
#include "tstoken.h"
#include "tstrbuild.h"
#include "ttime.h"
#include "qast.h"
#include "tcompare.h"
#include "tname.h"
#include "tscLog.h"
#include "tscUtil.h"
#include "tschemautil.h"
#include "tsclient.h"
#include "tstoken.h"
#include "tstrbuild.h"
#include "ttime.h"
#include "ttokendef.h"
#include "tname.h"
#include "tcompare.h"
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
@ -4487,10 +4487,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) pCmd->payload;
pUpdateMsg->head.vgId = htonl(pTableMeta->vgroupInfo.vgId);
pUpdateMsg->tid = htonl(pTableMeta->sid);
pUpdateMsg->uid = htobe64(pTableMeta->uid);
pUpdateMsg->colId = htons(pTagsSchema->colId);
pUpdateMsg->tversion = htons(pTableMeta->tversion);
pUpdateMsg->tid = htonl(pTableMeta->sid);
pUpdateMsg->uid = htobe64(pTableMeta->uid);
pUpdateMsg->colId = htons(pTagsSchema->colId);
pUpdateMsg->type = pTagsSchema->type;
pUpdateMsg->bytes = htons(pTagsSchema->bytes);
pUpdateMsg->tversion = htons(pTableMeta->tversion);
pUpdateMsg->numOfTags = htons(numOfTags);
pUpdateMsg->schemaLen = htonl(schemaLen);

View File

@ -215,25 +215,3 @@ __attribute__ ((unused)) static FORCE_INLINE size_t copy(char* dst, const char*
return len;
}
/*
* tablePrefix.columnName
* extract table name and save it in pTable, with only column name in pToken
*/
void extractTableNameFromToken(SSQLToken* pToken, SSQLToken* pTable) {
const char sep = TS_PATH_DELIMITER[0];
if (pToken == pTable || pToken == NULL || pTable == NULL) {
return;
}
char* r = strnchr(pToken->z, sep, pToken->n, false);
if (r != NULL) { // record the table name token
pTable->n = r - pToken->z;
pTable->z = pToken->z;
r += 1;
pToken->n -= (r - pToken->z);
pToken->z = r;
}
}

View File

@ -247,7 +247,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
} else {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
if (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID ||
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
if (pCmd->command == TSDB_SQL_CONNECT) {
rpcMsg->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
rpcFreeCont(rpcMsg->pCont);
@ -260,7 +260,12 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
// get table meta query will not retry, do nothing
} else {
tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry);
// set the flag to denote that sql string needs to be re-parsed and build submit block with table schema
if (rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
pSql->cmd.submitSchema = 1;
}
pSql->res.code = rpcMsg->code; // keep the previous error code
if (pSql->retry > pSql->maxRetry) {
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
@ -433,8 +438,9 @@ void tscKillSTableQuery(SSqlObj *pSql) {
* here, we cannot set the command = TSDB_SQL_KILL_QUERY. Otherwise, it may cause
* sub-queries not correctly released and master sql object of super table query reaches an abnormal state.
*/
pSql->pSubs[i]->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
rpcCancelRequest(pSql->pSubs[i]->pRpcCtx);
rpcCancelRequest(pSub->pRpcCtx);
pSub->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
tscQueueAsyncRes(pSub);
}
/*

View File

@ -617,19 +617,18 @@ void taos_stop_query(TAOS_RES *res) {
if (pSql->signature != pSql) return;
tscDebug("%p start to cancel query", res);
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
tscKillSTableQuery(pSql);
return;
}
if (pSql->cmd.command >= TSDB_SQL_LOCAL) {
return;
if (pSql->cmd.command < TSDB_SQL_LOCAL) {
rpcCancelRequest(pSql->pRpcCtx);
}
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
tscQueueAsyncRes(pSql);
rpcCancelRequest(pSql->pRpcCtx);
tscDebug("%p query is cancelled", res);
}

View File

@ -503,7 +503,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
}
strtolower(pSql->sqlstr, sqlstr);
tscDebugDump("%p SQL: %s", pSql, pSql->sqlstr);
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
tsem_init(&pSql->rspSem, 0, 0);
int32_t code = tsParseSql(pSql, true);

View File

@ -14,12 +14,12 @@
*/
#include "os.h"
#include "tscSubquery.h"
#include "qtsbuf.h"
#include "qast.h"
#include "tcompare.h"
#include "tschemautil.h"
#include "qtsbuf.h"
#include "tscLog.h"
#include "tscSubquery.h"
#include "tschemautil.h"
#include "tsclient.h"
typedef struct SInsertSupporter {
@ -57,10 +57,15 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
pSubQueryInfo1->tsBuf = output1;
pSubQueryInfo2->tsBuf = output2;
// no result generated, return directly
if (pSupporter1->pTSBuf == NULL || pSupporter2->pTSBuf == NULL) {
tscDebug("%p at least one ts-comp is empty, 0 for secondary query after ts blocks intersecting", pSql);
return 0;
}
tsBufResetPos(pSupporter1->pTSBuf);
tsBufResetPos(pSupporter2->pTSBuf);
// TODO add more details information
if (!tsBufNextPos(pSupporter1->pTSBuf)) {
tsBufFlush(output1);
tsBufFlush(output2);
@ -210,6 +215,7 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) {
pSupporter->f = NULL;
}
tfree(pSupporter->pIdTagList);
tscTagCondRelease(&pSupporter->tagCond);
free(pSupporter);
}
@ -420,43 +426,6 @@ static void updateQueryTimeRange(SQueryInfo* pQueryInfo, STimeWindow* win) {
pQueryInfo->window = *win;
}
static UNUSED_FUNC void tSIntersectionAndLaunchSecQuery(SJoinSupporter* pSupporter, SSqlObj* pSql) {
SSqlObj* pParentSql = pSupporter->pObj;
SQueryInfo* pParentQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, pParentSql->cmd.clauseIndex);
// if (tscNonOrderedProjectionQueryOnSTable(pParentQueryInfo, 0)) {
// STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
// assert(pQueryInfo->numOfTables == 1);
//
// // for projection query, need to try next vnode
//// int32_t totalVnode = pTableMetaInfo->pMetricMeta->numOfVnodes;
// int32_t totalVnode = 0;
// if ((++pTableMetaInfo->vgroupIndex) < totalVnode) {
// tscDebug("%p current vnode:%d exhausted, try next:%d. total vnode:%d. current numOfRes:%d", pSql,
// pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVnode, pRes->numOfTotal);
//
// pSql->cmd.command = TSDB_SQL_SELECT;
// pSql->fp = tscJoinQueryCallback;
// tscProcessSql(pSql);
//
// return;
// }
// }
SJoinSupporter* p1 = pParentSql->pSubs[0]->param;
SJoinSupporter* p2 = pParentSql->pSubs[1]->param;
STimeWindow win = TSWINDOW_INITIALIZER;
int64_t num = doTSBlockIntersect(pParentSql, p1, p2, &win);
if (num <= 0) { // no result during ts intersect
tscDebug("%p free all sub SqlObj and quit", pParentSql);
freeJoinSubqueryObj(pParentSql);
} else {
updateQueryTimeRange(pParentQueryInfo, &win);
tscLaunchRealSubqueries(pParentSql);
}
}
int32_t tscCompareTidTags(const void* p1, const void* p2) {
const STidTags* t1 = (const STidTags*) varDataVal(p1);
const STidTags* t2 = (const STidTags*) varDataVal(p2);
@ -713,9 +682,12 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
SArray *s1 = NULL, *s2 = NULL;
getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2);
if (taosArrayGetSize(s1) == 0 || taosArrayGetSize(s2) == 0) { // no results,return.
tscDebug("%p free all sub SqlObj and quit", pParentSql);
tscDebug("%p tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", pParentSql);
freeJoinSubqueryObj(pParentSql);
// set no result command
pParentSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
(*pParentSql->fp)(pParentSql->param, pParentSql, 0);
} else {
// proceed to for ts_comp query
SSqlCmd* pSubCmd1 = &pParentSql->pSubs[0]->cmd;
@ -846,7 +818,10 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
if (num <= 0) { // no result during ts intersect
tscDebug("%p no results generated in ts intersection, free all sub SqlObj and quit", pParentSql);
freeJoinSubqueryObj(pParentSql);
// set no result command
pParentSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
(*pParentSql->fp)(pParentSql->param, pParentSql, 0);
return;
}

View File

@ -14,21 +14,21 @@
*/
#include "os.h"
#include "qast.h"
#include "hash.h"
#include "tscUtil.h"
#include "taosmsg.h"
#include "qast.h"
#include "tcache.h"
#include "tkey.h"
#include "tmd5.h"
#include "tscProfile.h"
#include "tscLocalMerge.h"
#include "tscLog.h"
#include "tscProfile.h"
#include "tscSubquery.h"
#include "tschemautil.h"
#include "tsclient.h"
#include "ttimer.h"
#include "ttokendef.h"
#include "tscLog.h"
#include "tscUtil.h"
#include "hash.h"
static void freeQueryInfoImpl(SQueryInfo* pQueryInfo);
static void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache);
@ -579,9 +579,9 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo
int32_t numOfCols = tscGetNumOfColumns(pTableDataBlock->pTableMeta);
for(int32_t j = 0; j < numOfCols; ++j) {
STColumn* pCol = (STColumn*) pDataBlock;
pCol->colId = pSchema[j].colId;
pCol->colId = htons(pSchema[j].colId);
pCol->type = pSchema[j].type;
pCol->bytes = pSchema[j].bytes;
pCol->bytes = htons(pSchema[j].bytes);
pCol->offset = 0;
pDataBlock += sizeof(STColumn);
@ -663,7 +663,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
}
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize;
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
if (dataBuf->nAllocSize < destSize) {
while (dataBuf->nAllocSize < destSize) {
@ -691,7 +691,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
tscDebug("%p tableId:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->tableId,
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize);
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
pBlocks->tid = htonl(pBlocks->tid);
pBlocks->uid = htobe64(pBlocks->uid);

View File

@ -3,6 +3,7 @@
#include "os.h"
#include "taosmsg.h"
#include "tstoken.h"
typedef struct SDataStatis {
int16_t colId;
@ -23,10 +24,14 @@ void extractTableName(const char *tableId, char *name);
char* extractDBName(const char *tableId, char *name);
void extractTableNameFromToken(SSQLToken *pToken, SSQLToken* pTable);
SSchema tGetTableNameColumnSchema();
bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
#endif // TDENGINE_NAME_H

View File

@ -32,9 +32,6 @@ extern int32_t tscEmbedded;
#define uDebug(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLog("UTL DEBUG ", uDebugFlag, __VA_ARGS__); }}
#define uTrace(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLog("UTL TRACE ", uDebugFlag, __VA_ARGS__); }}
#define uDebugDump(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLongString("UTL DEBUG ", uDebugFlag, __VA_ARGS__); }}
#define uTraceDump(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLongString("UTL TRACE ", uDebugFlag, __VA_ARGS__); }}
#define pError(...) { taosPrintLog("APP ERROR ", 255, __VA_ARGS__); }
#define pPrint(...) { taosPrintLog("APP INFO ", 255, __VA_ARGS__); }

View File

@ -1210,7 +1210,7 @@ void taosInitGlobalCfg() {
}
bool taosCheckGlobalCfg() {
if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG) {
if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) {
taosSetAllDebugFlag();
}

View File

@ -75,3 +75,56 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
return pFilter;
}
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
return startTime;
}
int64_t start = ((startTime - slidingTime) / slidingTime + 1) * slidingTime;
if (!(timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h')) {
/*
* here we revised the start time of day according to the local time zone,
* but in case of DST, the start time of one day need to be dynamically decided.
*/
// todo refactor to extract function that is available for Linux/Windows/Mac platform
#if defined(WINDOWS) && _MSC_VER >= 1900
// see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
int64_t timezone = _timezone;
int32_t daylight = _daylight;
char** tzname = _tzname;
#endif
int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L;
start += timezone * t;
}
int64_t end = start + intervalTime - 1;
if (end < startTime) {
start += slidingTime;
}
return start;
}
/*
* tablePrefix.columnName
* extract table name and save it in pTable, with only column name in pToken
*/
void extractTableNameFromToken(SSQLToken* pToken, SSQLToken* pTable) {
const char sep = TS_PATH_DELIMITER[0];
if (pToken == pTable || pToken == NULL || pTable == NULL) {
return;
}
char* r = strnchr(pToken->z, sep, pToken->n, false);
if (r != NULL) { // record the table name token
pTable->n = r - pToken->z;
pTable->z = pToken->z;
r += 1;
pToken->n -= (r - pToken->z);
pToken->z = r;
}
}

View File

@ -131,8 +131,8 @@ static void dnodeFreeMnodeWriteMsg(SMnodeMsg *pWrite) {
taosFreeQitem(pWrite);
}
void dnodeSendRpcMnodeWriteRsp(void *pRaw, int32_t code) {
SMnodeMsg *pWrite = pRaw;
void dnodeSendRpcMnodeWriteRsp(void *pMsg, int32_t code) {
SMnodeMsg *pWrite = pMsg;
if (pWrite == NULL) return;
if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return;
if (code == TSDB_CODE_MND_ACTION_NEED_REPROCESSED) {

View File

@ -106,6 +106,12 @@ int32_t dnodeInitMgmt() {
}
}
int32_t code = vnodeInitResources();
if (code != TSDB_CODE_SUCCESS) {
dnodeCleanupMgmt();
return -1;
}
// create the queue and thread to handle the message
tsMgmtQset = taosOpenQset();
if (tsMgmtQset == NULL) {
@ -127,7 +133,7 @@ int32_t dnodeInitMgmt() {
pthread_attr_init(&thAttr);
pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE);
int32_t code = pthread_create(&tsQthread, &thAttr, dnodeProcessMgmtQueue, NULL);
code = pthread_create(&tsQthread, &thAttr, dnodeProcessMgmtQueue, NULL);
pthread_attr_destroy(&thAttr);
if (code != 0) {
dError("failed to create thread to process mgmt queue, reason:%s", strerror(errno));
@ -282,13 +288,12 @@ static void *dnodeOpenVnode(void *param) {
}
static int32_t dnodeOpenVnodes() {
int32_t *vnodeList = calloc(TSDB_MAX_VNODES, sizeof(int32_t));
int32_t vnodeList[TSDB_MAX_VNODES] = {0};
int32_t numOfVnodes = 0;
int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
if (status != TSDB_CODE_SUCCESS) {
dInfo("get dnode list failed");
free(vnodeList);
return status;
}
@ -334,7 +339,6 @@ static int32_t dnodeOpenVnodes() {
free(pThread->vnodeList);
}
free(vnodeList);
free(threads);
dInfo("there are total vnodes:%d, openned:%d failed:%d", numOfVnodes, openVnodes, failedVnodes);
@ -342,7 +346,7 @@ static int32_t dnodeOpenVnodes() {
}
void dnodeStartStream() {
int32_t vnodeList[TSDB_MAX_VNODES];
int32_t vnodeList[TSDB_MAX_VNODES] = {0};
int32_t numOfVnodes = 0;
int32_t status = vnodeGetVnodeList(vnodeList, &numOfVnodes);
@ -359,7 +363,7 @@ void dnodeStartStream() {
}
static void dnodeCloseVnodes() {
int32_t vnodeList[TSDB_MAX_VNODES];
int32_t vnodeList[TSDB_MAX_VNODES]= {0};
int32_t numOfVnodes = 0;
int32_t status;

View File

@ -210,6 +210,7 @@ static void *dnodeProcessWriteQueue(void *param) {
int32_t numOfMsgs;
int type;
void *pVnode, *item;
SRspRet *pRspRet;
dDebug("write worker:%d is running", pWorker->workerId);
@ -222,9 +223,11 @@ static void *dnodeProcessWriteQueue(void *param) {
for (int32_t i = 0; i < numOfMsgs; ++i) {
pWrite = NULL;
pRspRet = NULL;
taosGetQitem(pWorker->qall, &type, &item);
if (type == TAOS_QTYPE_RPC) {
pWrite = (SWriteMsg *)item;
pRspRet = &pWrite->rspRet;
pHead = (SWalHead *)(pWrite->pCont - sizeof(SWalHead));
pHead->msgType = pWrite->rpcMsg.msgType;
pHead->version = 0;
@ -234,7 +237,7 @@ static void *dnodeProcessWriteQueue(void *param) {
pHead = (SWalHead *)item;
}
int32_t code = vnodeProcessWrite(pVnode, type, pHead, item);
int32_t code = vnodeProcessWrite(pVnode, type, pHead, pRspRet);
if (pWrite) pWrite->rpcMsg.code = code;
}
@ -247,6 +250,11 @@ static void *dnodeProcessWriteQueue(void *param) {
if (type == TAOS_QTYPE_RPC) {
pWrite = (SWriteMsg *)item;
dnodeSendRpcVnodeWriteRsp(pVnode, item, pWrite->rpcMsg.code);
} else if (type == TAOS_QTYPE_FWD) {
pHead = (SWalHead *)item;
vnodeConfirmForward(pVnode, pHead->version, 0);
taosFreeQitem(item);
vnodeRelease(pVnode);
} else {
taosFreeQitem(item);
vnodeRelease(pVnode);

View File

@ -180,7 +180,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_DISK_PERMISSIONS, 0, 0x0506, "vnode no d
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, 0, 0x0507, "vnode no such file or directory")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, 0, 0x0508, "vnode out of memory")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "vnode app error")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0214, "vnode no write auth")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_STATUS, 0, 0x0510, "vnode not in ready state")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "vnode not in synced state")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "vnode no write auth")
// tsdb
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, 0, 0x0600, "tsdb invalid table id")
@ -200,6 +202,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_ACTION, 0, 0x060D, "tsdb inval
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CREATE_TB_MSG, 0, 0x060E, "tsdb invalid create table message")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM, 0, 0x060F, "tsdb no table data in memory skiplist")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_ALREADY_EXISTS, 0, 0x0610, "tsdb file already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECONFIGURE, 0, 0x0611, "tsdb need to reconfigure table")
// query
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, 0, 0x0700, "query invalid handle")

View File

@ -203,8 +203,7 @@ typedef struct SSubmitBlk {
typedef struct SSubmitMsg {
SMsgHead header;
int32_t length;
int32_t compressed : 2;
int32_t numOfBlocks : 30;
int32_t numOfBlocks;
SSubmitBlk blocks[];
} SSubmitMsg;
@ -285,6 +284,8 @@ typedef struct {
int32_t tid;
int16_t tversion;
int16_t colId;
int8_t type;
int16_t bytes;
int32_t tagValLen;
int16_t numOfTags;
int32_t schemaLen;

View File

@ -108,12 +108,14 @@ void tsdbClearTableCfg(STableCfg *config);
void* tsdbGetTableTagVal(const void* pTable, int32_t colId, int16_t type, int16_t bytes);
char* tsdbGetTableName(void *pTable);
STableId tsdbGetTableId(void *pTable);
#define TSDB_TABLEID(_table) ((STableId*) (_table))
STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg);
int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg);
int tsdbDropTable(TSDB_REPO_T *pRepo, STableId tableId);
int tsdbUpdateTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg);
int tsdbUpdateTableTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg);
TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid);
void tsdbStartStream(TSDB_REPO_T *repo);
@ -233,9 +235,10 @@ bool tsdbNextDataBlock(TsdbQueryHandleT *pQueryHandle);
* Get current data block information
*
* @param pQueryHandle
* @param pBlockInfo
* @return
*/
SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT *pQueryHandle);
void tsdbRetrieveDataBlockInfo(TsdbQueryHandleT *pQueryHandle, SDataBlockInfo* pBlockInfo);
/**
*

View File

@ -60,7 +60,10 @@ void* vnodeGetWal(void *pVnode);
int32_t vnodeProcessWrite(void *pVnode, int qtype, void *pHead, void *item);
int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes);
void vnodeBuildStatusMsg(void *param);
void vnodeConfirmForward(void *param, uint64_t version, int32_t code);
void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes);
int32_t vnodeInitResources();
void vnodeCleanupResources();
int32_t vnodeProcessRead(void *pVnode, SReadMsg *pReadMsg);

View File

@ -206,9 +206,10 @@ static void shellSourceFile(TAOS *con, char *fptr) {
if (code != 0) {
fprintf(stderr, "DB error: %s: %s (%d)\n", taos_errstr(con), fname, lineNo);
/* free local resouce: allocated memory/metric-meta refcnt */
taos_free_result(pSql);
}
/* free local resouce: allocated memory/metric-meta refcnt */
taos_free_result(pSql);
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd_len = 0;

View File

@ -520,9 +520,8 @@ int main(int argc, char *argv[]) {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols);
queryDB(taos, command);
printf("meters created!\n");
taos_close(taos);
}
taos_close(taos);
/* Wait for table to create */
multiThreadCreateTable(cols, use_metric, threads, ntables, db_name, tb_prefix, ip_addr, port, user, pass);
@ -792,9 +791,6 @@ void * createTable(void *sarg)
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", winfo->db_name, winfo->tb_prefix, i, winfo->cols);
queryDB(winfo->taos, command);
}
taos_close(winfo->taos);
} else {
/* Create all the tables; */
printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
@ -812,7 +808,6 @@ void * createTable(void *sarg)
}
queryDB(winfo->taos, command);
}
taos_close(winfo->taos);
}
return NULL;
@ -875,6 +870,11 @@ void *readTable(void *sarg) {
int64_t sTime = rinfo->start_time;
char *tb_prefix = rinfo->tb_prefix;
FILE *fp = fopen(rinfo->fp, "a");
if (NULL == fp) {
printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
return NULL;
}
int num_of_DPT = rinfo->nrecords_per_table;
int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1;
int totalData = num_of_DPT * num_of_tables;
@ -930,6 +930,11 @@ void *readMetric(void *sarg) {
TAOS *taos = rinfo->taos;
char command[BUFFER_SIZE] = "\0";
FILE *fp = fopen(rinfo->fp, "a");
if (NULL == fp) {
printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
return NULL;
}
int num_of_DPT = rinfo->nrecords_per_table;
int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1;
int totalData = num_of_DPT * num_of_tables;

File diff suppressed because it is too large Load Diff

View File

@ -51,6 +51,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
break;
case 'f':
arguments->fqdn = arg;
break;
case 'g':
arguments->dnodeGroups = arg;
break;

View File

@ -96,6 +96,7 @@ void walModWalFile(char* walfile) {
if (wfd < 0) {
printf("wal:%s, failed to open(%s)\n", newWalFile, strerror(errno));
free(buffer);
close(rfd);
return ;
}
@ -116,6 +117,11 @@ void walModWalFile(char* walfile) {
break;
}
if (pHead->len >= 1024000 - sizeof(SWalHead)) {
printf("wal:%s, SWalHead.len(%d) overflow, skip the rest of file\n", walfile, pHead->len);
break;
}
ret = read(rfd, pHead->cont, pHead->len);
if ( ret != pHead->len) {
printf("wal:%s, failed to read body, skip, len:%d ret:%d\n", walfile, pHead->len, ret);

View File

@ -99,6 +99,8 @@ static int32_t readVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
goto PARSE_OVER;
}
content[maxLen] = (char)0;
root = cJSON_Parse(content);
if (root == NULL) {
printf("failed to json parse %s, invalid json format\n", cfgFile);

View File

@ -53,6 +53,7 @@ typedef struct {
void * rowData;
int32_t rowSize;
int32_t retCode; // for callback in sdb queue
int32_t processedCount; // for sync fwd callback
int32_t (*cb)(struct SMnodeMsg *pMsg, int32_t code);
struct SMnodeMsg *pMsg;
} SSdbOper;

View File

@ -88,13 +88,13 @@ static int32_t mnodeDnodeActionDelete(SSdbOper *pOper) {
}
static int32_t mnodeDnodeActionUpdate(SSdbOper *pOper) {
SDnodeObj *pDnode = pOper->pObj;
SDnodeObj *pSaved = mnodeGetDnode(pDnode->dnodeId);
if (pSaved != NULL && pDnode != pSaved) {
memcpy(pSaved, pDnode, pOper->rowSize);
free(pDnode);
mnodeDecDnodeRef(pSaved);
SDnodeObj *pNew = pOper->pObj;
SDnodeObj *pDnode = mnodeGetDnode(pNew->dnodeId);
if (pDnode != NULL && pNew != pDnode) {
memcpy(pDnode, pNew, pOper->rowSize);
free(pNew);
}
mnodeDecDnodeRef(pDnode);
return TSDB_CODE_SUCCESS;
}
@ -334,7 +334,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
if (pStatus->dnodeId == 0) {
mDebug("dnode:%d %s, first access", pDnode->dnodeId, pDnode->dnodeEp);
} else {
//mDebug("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess);
mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess);
}
int32_t openVnodes = htons(pStatus->openVnodes);

View File

@ -72,8 +72,6 @@ typedef struct {
void * sync;
void * wal;
SSyncCfg cfg;
sem_t sem;
int32_t code;
int32_t numOfTables;
SSdbTable *tableList[SDB_TABLE_MAX];
pthread_mutex_t mutex;
@ -244,27 +242,36 @@ static void sdbNotifyRole(void *ahandle, int8_t role) {
sdbUpdateMnodeRoles();
}
FORCE_INLINE
static void sdbConfirmForward(void *ahandle, void *param, int32_t code) {
tsSdbObj.code = code;
sem_post(&tsSdbObj.sem);
sdbDebug("forward request confirmed, version:%" PRIu64 ", result:%s", (int64_t)param, tstrerror(code));
}
assert(param);
SSdbOper * pOper = param;
SMnodeMsg *pMsg = pOper->pMsg;
if (code <= 0) pOper->retCode = code;
static int32_t sdbForwardToPeer(SWalHead *pHead) {
if (tsSdbObj.sync == NULL) return TSDB_CODE_SUCCESS;
int32_t processedCount = atomic_add_fetch_32(&pOper->processedCount, 1);
if (processedCount <= 1) {
if (pMsg != NULL) {
sdbDebug("app:%p:%p, waiting for confirm this operation, count:%d", pMsg->rpcMsg.ahandle, pMsg, processedCount);
}
return;
}
int32_t code = syncForwardToPeer(tsSdbObj.sync, pHead, (void*)pHead->version, TAOS_QTYPE_RPC);
if (code > 0) {
sdbDebug("forward request is sent, version:%" PRIu64 ", code:%d", pHead->version, code);
sem_wait(&tsSdbObj.sem);
return tsSdbObj.code;
}
return code;
if (pMsg != NULL) {
sdbDebug("app:%p:%p, is confirmed and will do callback func", pMsg->rpcMsg.ahandle, pMsg);
}
if (pOper->cb != NULL) {
pOper->retCode = (*pOper->cb)(pMsg, pOper->retCode);
}
dnodeSendRpcMnodeWriteRsp(pMsg, pOper->retCode);
taosFreeQitem(pOper);
}
void sdbUpdateSync() {
SSyncCfg syncCfg = {0};
int32_t index = 0;
int32_t index = 0;
SDMMnodeInfos *mnodes = dnodeGetMnodeInfos();
for (int32_t i = 0; i < mnodes->nodeNum; ++i) {
@ -298,7 +305,7 @@ void sdbUpdateSync() {
}
syncCfg.replica = index;
syncCfg.quorum = (syncCfg.replica == 1) ? 1:2;
syncCfg.quorum = (syncCfg.replica == 1) ? 1 : 2;
bool hasThisDnode = false;
for (int32_t i = 0; i < syncCfg.replica; ++i) {
@ -325,10 +332,10 @@ void sdbUpdateSync() {
syncInfo.getWalInfo = sdbGetWalInfo;
syncInfo.getFileInfo = sdbGetFileInfo;
syncInfo.writeToCache = sdbWriteToQueue;
syncInfo.confirmForward = sdbConfirmForward;
syncInfo.confirmForward = sdbConfirmForward;
syncInfo.notifyRole = sdbNotifyRole;
tsSdbObj.cfg = syncCfg;
if (tsSdbObj.sync) {
syncReconfig(tsSdbObj.sync, &syncCfg);
} else {
@ -339,7 +346,6 @@ void sdbUpdateSync() {
int32_t sdbInit() {
pthread_mutex_init(&tsSdbObj.mutex, NULL);
sem_init(&tsSdbObj.sem, 0, 0);
if (sdbInitWriteWorker() != 0) {
return -1;
@ -379,7 +385,6 @@ void sdbCleanUp() {
tsSdbObj.wal = NULL;
}
sem_destroy(&tsSdbObj.sem);
pthread_mutex_destroy(&tsSdbObj.mutex);
}
@ -513,24 +518,22 @@ static int sdbWrite(void *param, void *data, int type) {
assert(pTable != NULL);
pthread_mutex_lock(&tsSdbObj.mutex);
if (pHead->version == 0) {
// assign version
// assign version
tsSdbObj.version++;
pHead->version = tsSdbObj.version;
} else {
// for data from WAL or forward, version may be smaller
if (pHead->version <= tsSdbObj.version) {
pthread_mutex_unlock(&tsSdbObj.mutex);
if (type == TAOS_QTYPE_FWD && tsSdbObj.sync != NULL) {
sdbDebug("forward request is received, version:%" PRIu64 " confirm it", pHead->version);
syncConfirmForward(tsSdbObj.sync, pHead->version, TSDB_CODE_SUCCESS);
}
sdbDebug("table:%s, failed to restore %s record:%s from source(%d), version:%" PRId64 " too large, sdb version:%" PRId64,
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version);
return TSDB_CODE_SUCCESS;
} else if (pHead->version != tsSdbObj.version + 1) {
pthread_mutex_unlock(&tsSdbObj.mutex);
sdbError("table:%s, failed to restore %s record:%s from wal, version:%" PRId64 " too large, sdb version:%" PRId64,
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version,
tsSdbObj.version);
sdbError("table:%s, failed to restore %s record:%s from source(%d), version:%" PRId64 " too large, sdb version:%" PRId64,
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version);
return TSDB_CODE_MND_APP_ERROR;
} else {
tsSdbObj.version = pHead->version;
@ -542,28 +545,36 @@ static int sdbWrite(void *param, void *data, int type) {
pthread_mutex_unlock(&tsSdbObj.mutex);
return code;
}
code = sdbForwardToPeer(pHead);
pthread_mutex_unlock(&tsSdbObj.mutex);
// from app, oper is created
if (pOper != NULL) {
sdbTrace("record from app is disposed, table:%s action:%s record:%s version:%" PRIu64 " result:%s",
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version,
tstrerror(code));
return code;
// forward to peers
pOper->processedCount = 0;
int32_t syncCode = syncForwardToPeer(tsSdbObj.sync, pHead, pOper, TAOS_QTYPE_RPC);
if (syncCode <= 0) pOper->processedCount = 1;
if (syncCode < 0) {
sdbError("table:%s, failed to forward request, result:%s action:%s record:%s version:%" PRId64, pTable->tableName,
tstrerror(syncCode), sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
} else if (syncCode > 0) {
sdbDebug("table:%s, forward request is sent, action:%s record:%s version:%" PRId64, pTable->tableName,
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
} else {
sdbTrace("table:%s, no need to send fwd request, action:%s record:%s version:%" PRId64, pTable->tableName,
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
}
return syncCode;
}
sdbDebug("table:%s, record from wal/fwd is disposed, action:%s record:%s version:%" PRId64, pTable->tableName,
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
// even it is WAL/FWD, it shall be called to update version in sync
syncForwardToPeer(tsSdbObj.sync, pHead, pOper, TAOS_QTYPE_RPC);
// from wal or forward msg, oper not created, should add into hash
if (tsSdbObj.sync != NULL) {
sdbTrace("record from wal forward is disposed, table:%s action:%s record:%s version:%" PRIu64 " confirm it",
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
syncConfirmForward(tsSdbObj.sync, pHead->version, code);
} else {
sdbTrace("record from wal restore is disposed, table:%s action:%s record:%s version:%" PRIu64, pTable->tableName,
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
}
if (action == SDB_ACTION_INSERT) {
SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable};
code = (*pTable->decodeFp)(&oper);
@ -627,7 +638,7 @@ int32_t sdbInsertRow(SSdbOper *pOper) {
memcpy(pNewOper, pOper, sizeof(SSdbOper));
if (pNewOper->pMsg != NULL) {
sdbDebug("app:%p:%p, table:%s record:%p:%s, insert action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle,
sdbDebug("app:%p:%p, table:%s record:%p:%s, insert action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle,
pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj));
}
@ -677,7 +688,7 @@ int32_t sdbDeleteRow(SSdbOper *pOper) {
memcpy(pNewOper, pOper, sizeof(SSdbOper));
if (pNewOper->pMsg != NULL) {
sdbDebug("app:%p:%p, table:%s record:%p:%s, delete action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle,
sdbDebug("app:%p:%p, table:%s record:%p:%s, delete action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle,
pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj));
}
@ -727,7 +738,7 @@ int32_t sdbUpdateRow(SSdbOper *pOper) {
memcpy(pNewOper, pOper, sizeof(SSdbOper));
if (pNewOper->pMsg != NULL) {
sdbDebug("app:%p:%p, table:%s record:%p:%s, update action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle,
sdbDebug("app:%p:%p, table:%s record:%p:%s, update action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle,
pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj));
}
@ -943,20 +954,20 @@ static void *sdbWorkerFp(void *param) {
taosGetQitem(tsSdbWriteQall, &type, &item);
if (type == TAOS_QTYPE_RPC) {
pOper = (SSdbOper *)item;
pOper->processedCount = 1;
pHead = (void *)pOper + sizeof(SSdbOper) + SDB_SYNC_HACK;
if (pOper->pMsg != NULL) {
sdbDebug("app:%p:%p, table:%s record:%p:%s version:%" PRIu64 ", will be processed in sdb queue",
pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, ((SSdbTable *)pOper->table)->tableName, pOper->pObj,
sdbGetKeyStr(pOper->table, pHead->cont), pHead->version);
}
} else {
pHead = (SWalHead *)item;
pOper = NULL;
}
if (pOper != NULL && pOper->pMsg != NULL) {
sdbDebug("app:%p:%p, table:%s record:%p:%s version:%" PRIu64 ", will be processed in sdb queue",
pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, ((SSdbTable *)pOper->table)->tableName, pOper->pObj,
sdbGetKeyStr(pOper->table, pHead->cont), pHead->version);
}
int32_t code = sdbWrite(pOper, pHead, type);
if (pOper) pOper->retCode = code;
if (pOper && code <= 0) pOper->retCode = code;
}
walFsync(tsSdbObj.wal);
@ -965,25 +976,17 @@ static void *sdbWorkerFp(void *param) {
taosResetQitems(tsSdbWriteQall);
for (int32_t i = 0; i < numOfMsgs; ++i) {
taosGetQitem(tsSdbWriteQall, &type, &item);
if (type == TAOS_QTYPE_RPC) {
pOper = (SSdbOper *)item;
if (pOper != NULL && pOper->cb != NULL) {
sdbTrace("app:%p:%p, will do callback func, index:%d", pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, i);
pOper->retCode = (*pOper->cb)(pOper->pMsg, pOper->retCode);
}
if (pOper != NULL && pOper->pMsg != NULL) {
sdbTrace("app:%p:%p, msg is processed, result:%s", pOper->pMsg->rpcMsg.ahandle, pOper->pMsg,
tstrerror(pOper->retCode));
}
if (pOper != NULL) {
sdbDecRef(pOper->table, pOper->pObj);
}
dnodeSendRpcMnodeWriteRsp(pOper->pMsg, pOper->retCode);
sdbDecRef(pOper->table, pOper->pObj);
sdbConfirmForward(NULL, pOper, pOper->retCode);
} else if (type == TAOS_QTYPE_FWD) {
syncConfirmForward(tsSdbObj.sync, pHead->version, TSDB_CODE_SUCCESS);
taosFreeQitem(item);
} else {
taosFreeQitem(item);
}
taosFreeQitem(item);
}
}

View File

@ -783,9 +783,15 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) {
static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) {
SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable;
if (pTable != NULL) {
mLInfo("app:%p:%p, stable:%s, is created in sdb, result:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
tstrerror(code));
assert(pTable);
if (code == TSDB_CODE_SUCCESS) {
mLInfo("stable:%s, is created in sdb", pTable->info.tableId);
} else {
mError("app:%p:%p, stable:%s, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
tstrerror(code));
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsSuperTableSdb};
sdbDeleteRow(&desc);
}
return code;
@ -1561,10 +1567,16 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
assert(pTable);
mDebug("app:%p:%p, table:%s, create table in id:%d, uid:%" PRIu64 ", result:%s", pMsg->rpcMsg.ahandle, pMsg,
pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code));
if (code != TSDB_CODE_SUCCESS) return code;
if (code == TSDB_CODE_SUCCESS) {
mDebug("app:%p:%p, table:%s, create table in sid:%d, uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
pTable->sid, pTable->uid);
} else {
mError("app:%p:%p, table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg->rpcMsg.ahandle, pMsg,
pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code));
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsChildTableSdb};
sdbDeleteRow(&desc);
return code;
}
SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont;
SMDCreateTableMsg *pMDCreate = mnodeBuildCreateChildTableMsg(pCreate, pTable);

View File

@ -165,10 +165,18 @@ static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) {
}
mnodeDecDnodeRef(pDnode);
}
free(pNew);
}
mnodeVgroupUpdateIdPool(pVgroup);
// reset vgid status on vgroup changed
mDebug("vgId:%d, reset sync status to unsynced", pVgroup->vgId);
for (int32_t v = 0; v < pVgroup->numOfVnodes; ++v) {
pVgroup->vnodeGid[v].role = TAOS_SYNC_ROLE_UNSYNCED;
}
mnodeDecVgroupRef(pVgroup);
mDebug("vgId:%d, is updated, numOfVnode:%d", pVgroup->vgId, pVgroup->numOfVnodes);
@ -300,6 +308,7 @@ void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVl
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
if (pVgid->pDnode == pDnode) {
mTrace("dnode:%d, receive status from dnode, vgId:%d status is %d", pVgroup->vgId, pDnode->dnodeId, pVgid->role);
pVgid->role = pVload->role;
if (pVload->role == TAOS_SYNC_ROLE_MASTER) {
pVgroup->inUse = i;
@ -339,17 +348,23 @@ void *mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup) {
}
static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) {
SVgObj *pVgroup = pMsg->pVgroup;
SDbObj *pDb = pMsg->pDb;
assert(pVgroup);
if (code != TSDB_CODE_SUCCESS) {
pMsg->pVgroup = NULL;
mError("app:%p:%p, vgId:%d, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId,
tstrerror(code));
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .table = tsVgroupSdb};
sdbDeleteRow(&desc);
return code;
}
SVgObj *pVgroup = pMsg->pVgroup;
SDbObj *pDb = pMsg->pDb;
mInfo("vgId:%d, is created in mnode, db:%s replica:%d", pVgroup->vgId, pDb->name, pVgroup->numOfVnodes);
mInfo("app:%p:%p, vgId:%d, is created in mnode, db:%s replica:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId,
pDb->name, pVgroup->numOfVnodes);
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
mInfo("vgId:%d, index:%d, dnode:%d", pVgroup->vgId, i, pVgroup->vnodeGid[i].dnodeId);
mInfo("app:%p:%p, vgId:%d, index:%d, dnode:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, i,
pVgroup->vnodeGid[i].dnodeId);
}
mnodeIncVgroupRef(pVgroup);

View File

@ -26,8 +26,6 @@ extern int32_t httpDebugFlag;
#define httpInfo(...) { if (httpDebugFlag & DEBUG_INFO) { taosPrintLog("HTP INFO ", 255, __VA_ARGS__); }}
#define httpDebug(...) { if (httpDebugFlag & DEBUG_DEBUG) { taosPrintLog("HTP DEBUG ", httpDebugFlag, __VA_ARGS__); }}
#define httpTrace(...) { if (httpDebugFlag & DEBUG_TRACE) { taosPrintLog("HTP TRACE ", httpDebugFlag, __VA_ARGS__); }}
#define httpDebugDump(...) { if (httpDebugFlag & DEBUG_DEBUG) { taosPrintLongString("HTP DEBUG ", httpDebugFlag, __VA_ARGS__); }}
#define httpTraceDump(...) { if (httpDebugFlag & DEBUG_TRACE) { taosPrintLongString("HTP TRACE ", httpDebugFlag, __VA_ARGS__); }}
#define httpTraceL(...){ if (httpDebugFlag & DEBUG_TRACE) { taosPrintLongString("HTP TRACE ", httpDebugFlag, __VA_ARGS__); }}
#endif

View File

@ -313,9 +313,9 @@ bool httpParseRequest(HttpContext* pContext) {
return true;
}
httpTraceDump("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, raw data:\n%s", pContext, pContext->fd,
pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds, pContext->parser.bufsize,
pContext->parser.buffer);
httpTraceL("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, raw data:\n%s", pContext, pContext->fd,
pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds, pContext->parser.bufsize,
pContext->parser.buffer);
if (!httpGetHttpMethod(pContext)) {
return false;

View File

@ -108,7 +108,7 @@ bool httpReadDataImp(HttpContext *pContext) {
static bool httpDecompressData(HttpContext *pContext) {
if (pContext->contentEncoding != HTTP_COMPRESS_GZIP) {
httpTraceDump("context:%p, fd:%d, ip:%s, content:%s", pContext, pContext->fd, pContext->ipstr, pContext->parser.data.pos);
httpTraceL("context:%p, fd:%d, ip:%s, content:%s", pContext, pContext->fd, pContext->ipstr, pContext->parser.data.pos);
return true;
}
@ -124,8 +124,8 @@ static bool httpDecompressData(HttpContext *pContext) {
if (ret == 0) {
memcpy(pContext->parser.data.pos, decompressBuf, decompressBufLen);
pContext->parser.data.pos[decompressBufLen] = 0;
httpTraceDump("context:%p, fd:%d, ip:%s, rawSize:%d, decompressSize:%d, content:%s",
pContext, pContext->fd, pContext->ipstr, pContext->parser.data.len, decompressBufLen, decompressBuf);
httpTraceL("context:%p, fd:%d, ip:%s, rawSize:%d, decompressSize:%d, content:%s", pContext, pContext->fd,
pContext->ipstr, pContext->parser.data.len, decompressBufLen, decompressBuf);
pContext->parser.data.len = decompressBufLen;
} else {
httpError("context:%p, fd:%d, ip:%s, failed to decompress data, rawSize:%d, error:%d",

View File

@ -166,8 +166,8 @@ void httpProcessMultiSql(HttpContext *pContext) {
HttpSqlCmd *cmd = multiCmds->cmds + multiCmds->pos;
char *sql = httpGetCmdsString(pContext, cmd->sql);
httpTraceDump("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start query, sql:%s", pContext, pContext->fd,
pContext->ipstr, pContext->user, multiCmds->pos, sql);
httpTraceL("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start query, sql:%s", pContext, pContext->fd,
pContext->ipstr, pContext->user, multiCmds->pos, sql);
taosNotePrintHttp(sql);
taos_query_a(pContext->session->taos, sql, httpProcessMultiSqlCallBack, (void *)pContext);
}
@ -306,8 +306,8 @@ void httpProcessSingleSqlCmd(HttpContext *pContext) {
return;
}
httpTraceDump("context:%p, fd:%d, ip:%s, user:%s, start query, sql:%s", pContext, pContext->fd, pContext->ipstr,
pContext->user, sql);
httpTraceL("context:%p, fd:%d, ip:%s, user:%s, start query, sql:%s", pContext, pContext->fd, pContext->ipstr,
pContext->user, sql);
taosNotePrintHttp(sql);
taos_query_a(pSession->taos, sql, httpProcessSingleSqlCallBack, (void *)pContext);
}

View File

@ -35,9 +35,6 @@
#define monitorDebug(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLog("MON DEBUG ", monitorDebugFlag, __VA_ARGS__); }}
#define monitorTrace(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLog("MON TRACE ", monitorDebugFlag, __VA_ARGS__); }}
#define monitorDebugDump(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLongString("MON DEBUG ", monitorDebugFlag, __VA_ARGS__); }}
#define monitorTraceDump(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLongString("MON TRACE ", monitorDebugFlag, __VA_ARGS__); }}
#define SQL_LENGTH 1024
#define LOG_LEN_STR 100
#define IP_LEN_STR 18

View File

@ -27,7 +27,4 @@ extern int32_t mqttDebugFlag;
#define mqttDebug(...) { if (mqttDebugFlag & DEBUG_DEBUG) { taosPrintLog("MQT DEBUG ", mqttDebugFlag, __VA_ARGS__); }}
#define mqttTrace(...) { if (mqttDebugFlag & DEBUG_TRACE) { taosPrintLog("MQT TRACE ", mqttDebugFlag, __VA_ARGS__); }}
#define mqttDebugDump(...) { if (mqttDebugFlag & DEBUG_DEBUG) { taosPrintLongString("MQT DEBUG ", mqttDebugFlag, __VA_ARGS__); }}
#define mqttTraceDump(...) { if (mqttDebugFlag & DEBUG_TRACE) { taosPrintLongString("MQT DEBUG ", mqttDebugFlag, __VA_ARGS__); }}
#endif

View File

@ -154,6 +154,7 @@ typedef struct SQuery {
} SQuery;
typedef struct SQueryRuntimeEnv {
jmp_buf env;
SResultInfo* resultInfo; // todo refactor to merge with SWindowResInfo
SQuery* pQuery;
SQLFunctionCtx* pCtx;
@ -169,6 +170,8 @@ typedef struct SQueryRuntimeEnv {
void* pSecQueryHandle; // another thread for
bool stableQuery; // super table query or not
bool topBotQuery; // false
bool groupbyNormalCol; // denote if this is a groupby normal column query
bool hasTagResults; // if there are tag values in final result or not
int32_t prevGroupId; // previous executed group id
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
} SQueryRuntimeEnv;
@ -197,8 +200,10 @@ typedef struct SQInfo {
*/
int32_t tableIndex;
int32_t numOfGroupResultPages;
_qinfo_free_fn_t freeFn;
jmp_buf env;
_qinfo_free_fn_t freeFn; //todo remove it
void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables;
} SQInfo;
#endif // TDENGINE_QUERYEXECUTOR_H

View File

@ -16,16 +16,16 @@
#ifndef TDENGINE_TAST_H
#define TDENGINE_TAST_H
#include <tbuffer.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <tskiplist.h>
#include "os.h"
#include "taosmsg.h"
#include "taosdef.h"
#include "tskiplist.h"
#include "tbuffer.h"
#include "tvariant.h"
struct tExprNode;
@ -75,10 +75,6 @@ typedef struct tExprNode {
};
} tExprNode;
void tSQLBinaryExprFromString(tExprNode **pExpr, SSchema *pSchema, int32_t numOfCols, char *src, int32_t len);
void tSQLBinaryExprToString(tExprNode *pExpr, char *dst, int32_t *len);
void tExprTreeDestroy(tExprNode **pExprs, void (*fp)(void*));
void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param);
@ -86,12 +82,9 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S
void tExprTreeCalcTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
char *(*cb)(void *, const char*, int32_t));
// todo refactor: remove it
void tSQLBinaryExprTrv(tExprNode *pExprs, SArray* res);
uint8_t getBinaryExprOptr(SSQLToken *pToken);
void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *));
void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *));
void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
tExprNode* exprTreeFromBinary(const void* data, size_t size);

View File

@ -60,8 +60,6 @@ typedef struct SPoint {
void * val;
} SPoint;
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, char timeUnit, int16_t precision);
SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType,
SFillColInfo* pFillCol);

View File

@ -18,18 +18,18 @@
#include "qfill.h"
#include "taosmsg.h"
#include "exception.h"
#include "hash.h"
#include "qExecutor.h"
#include "qUtil.h"
#include "qast.h"
#include "qresultBuf.h"
#include "query.h"
#include "queryLog.h"
#include "qast.h"
#include "tfile.h"
#include "tlosertree.h"
#include "exception.h"
#include "tscompression.h"
#include "ttime.h"
#include "tfile.h"
/**
* check if the primary column is load by default, otherwise, the program will
@ -49,6 +49,8 @@
#define GET_COL_DATA_POS(query, index, step) ((query)->pos + (index) * (step))
#define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC))
#define SDATA_BLOCK_INITIALIZER (SDataBlockInfo) {{0}, 0}
/* get the qinfo struct address from the query struct address */
#define GET_COLUMN_BYTES(query, colidx) \
((query)->colList[(query)->pSelectExpr[colidx].base.colInfo.colIndex].bytes)
@ -100,6 +102,16 @@ static UNUSED_FUNC void *u_malloc (size_t __size) {
}
}
static UNUSED_FUNC void* u_calloc(size_t num, size_t __size) {
uint32_t v = rand();
if (v % 5 <= 1) {
return NULL;
} else {
return calloc(num, __size);
}
}
#define calloc u_calloc
#define malloc u_malloc
#endif
@ -109,7 +121,7 @@ static UNUSED_FUNC void *u_malloc (size_t __size) {
static void setQueryStatus(SQuery *pQuery, int8_t status);
static bool isIntervalQuery(SQuery *pQuery) { return pQuery->intervalTime > 0; }
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->intervalTime > 0)
// todo move to utility
static int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *group);
@ -314,6 +326,24 @@ static bool isTopBottomQuery(SQuery *pQuery) {
return false;
}
static bool hasTagValOutput(SQuery* pQuery) {
SExprInfo *pExprInfo = &pQuery->pSelectExpr[0];
if (pQuery->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP) {
return true;
} else { // set tag value, by which the results are aggregated.
for (int32_t idx = 0; idx < pQuery->numOfOutput; ++idx) {
SExprInfo *pLocalExprInfo = &pQuery->pSelectExpr[idx];
// ts_comp column required the tag value for join filter
if (TSDB_COL_IS_TAG(pLocalExprInfo->base.colInfo.flag)) {
return true;
}
}
}
return false;
}
static SDataStatis *getStatisInfo(SQuery *pQuery, SDataStatis *pStatis, int32_t numOfCols, int32_t index) {
// for a tag column, no corresponding field info
SColIndex *pColIndex = &pQuery->pSelectExpr[index].base.colInfo;
@ -368,34 +398,38 @@ static bool hasNullValue(SQuery *pQuery, int32_t col, int32_t numOfCols, SDataSt
}
static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo *pWindowResInfo, char *pData,
int16_t bytes) {
int16_t bytes, bool masterscan) {
SQuery *pQuery = pRuntimeEnv->pQuery;
int32_t *p1 = (int32_t *) taosHashGet(pWindowResInfo->hashList, pData, bytes);
if (p1 != NULL) {
pWindowResInfo->curIndex = *p1;
} else { // more than the capacity, reallocate the resources
if (pWindowResInfo->size >= pWindowResInfo->capacity) {
int64_t newCap = pWindowResInfo->capacity * 2;
} else {
if (masterscan) { // more than the capacity, reallocate the resources
if (pWindowResInfo->size >= pWindowResInfo->capacity) {
int64_t newCap = pWindowResInfo->capacity * 2;
char *t = realloc(pWindowResInfo->pResult, newCap * sizeof(SWindowResult));
if (t != NULL) {
pWindowResInfo->pResult = (SWindowResult *)t;
memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, sizeof(SWindowResult) * pWindowResInfo->capacity);
} else {
// todo
char *t = realloc(pWindowResInfo->pResult, newCap * sizeof(SWindowResult));
if (t != NULL) {
pWindowResInfo->pResult = (SWindowResult *)t;
memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, sizeof(SWindowResult) * pWindowResInfo->capacity);
} else {
// todo
}
for (int32_t i = pWindowResInfo->capacity; i < newCap; ++i) {
SPosInfo pos = {-1, -1};
createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &pos);
}
pWindowResInfo->capacity = newCap;
}
for (int32_t i = pWindowResInfo->capacity; i < newCap; ++i) {
SPosInfo pos = {-1, -1};
createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &pos);
}
pWindowResInfo->capacity = newCap;
// add a new result set for a new group
pWindowResInfo->curIndex = pWindowResInfo->size++;
taosHashPut(pWindowResInfo->hashList, pData, bytes, (char *)&pWindowResInfo->curIndex, sizeof(int32_t));
} else {
return NULL;
}
// add a new result set for a new group
pWindowResInfo->curIndex = pWindowResInfo->size++;
taosHashPut(pWindowResInfo->hashList, pData, bytes, (char *)&pWindowResInfo->curIndex, sizeof(int32_t));
}
return getWindowResult(pWindowResInfo, pWindowResInfo->curIndex);
@ -482,15 +516,19 @@ static int32_t addNewWindowResultBuf(SWindowResult *pWindowRes, SDiskbasedResult
}
static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo *pWindowResInfo, int32_t sid,
STimeWindow *win) {
STimeWindow *win, bool masterscan, bool* newWind) {
assert(win->skey <= win->ekey);
SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf;
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&win->skey, TSDB_KEYSIZE);
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&win->skey,
TSDB_KEYSIZE, masterscan);
if (pWindowRes == NULL) {
return -1;
*newWind = false;
return masterscan? -1:0;
}
*newWind = true;
// not assign result buffer yet, add new result buffer
if (pWindowRes->pos.pageId == -1) {
int32_t ret = addNewWindowResultBuf(pWindowRes, pResultBuf, sid, pRuntimeEnv->numOfRowsPerPage);
@ -534,7 +572,7 @@ static int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t sea
*/
static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey, SWindowResInfo *pWindowResInfo) {
SQuery *pQuery = pRuntimeEnv->pQuery;
if (pRuntimeEnv->scanFlag != MASTER_SCAN || (!isIntervalQuery(pQuery))) {
if (pRuntimeEnv->scanFlag != MASTER_SCAN || (!QUERY_IS_INTERVAL_QUERY(pQuery))) {
return pWindowResInfo->size;
}
@ -673,7 +711,7 @@ static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SWindowStat
if (forwardStep != numOfTotal) {
pCtx[k].preAggVals.isSet = false;
}
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
aAggs[functionId].xFunction(&pCtx[k]);
}
@ -787,8 +825,8 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas
return NULL;
}
char *dataBlock = NULL;
SQuery *pQuery = pRuntimeEnv->pQuery;
SQuery *pQuery = pRuntimeEnv->pQuery;
SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx;
int32_t functionId = pQuery->pSelectExpr[col].base.functionId;
@ -807,6 +845,10 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas
sas->numOfCols = pQuery->numOfCols;
sas->data = calloc(pQuery->numOfCols, POINTER_BYTES);
if (sas->data == NULL) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
// here the pQuery->colList and sas->colList are identical
int32_t numOfCols = taosArrayGetSize(pDataBlock);
for (int32_t i = 0; i < pQuery->numOfCols; ++i) {
@ -851,7 +893,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
SDataBlockInfo *pDataBlockInfo, SWindowResInfo *pWindowResInfo,
__block_search_fn_t searchFn, SArray *pDataBlock) {
SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx;
bool masterScan = IS_MASTER_SCAN(pRuntimeEnv);
SQuery *pQuery = pRuntimeEnv->pQuery;
TSKEY *tsCols = NULL;
if (pDataBlock != NULL) {
@ -860,6 +903,9 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
}
SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport));
if (sasArray == NULL) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
for (int32_t k = 0; k < pQuery->numOfOutput; ++k) {
char *dataBlock = getDataBlock(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock);
@ -867,22 +913,25 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
}
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
if (isIntervalQuery(pQuery) && tsCols != NULL) {
if (QUERY_IS_INTERVAL_QUERY(pQuery) && tsCols != NULL) {
int32_t offset = GET_COL_DATA_POS(pQuery, 0, step);
TSKEY ts = tsCols[offset];
bool hasTimeWindow = false;
STimeWindow win = getActiveTimeWindow(pWindowResInfo, ts, pQuery);
if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win) != TSDB_CODE_SUCCESS) {
if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) {
tfree(sasArray);
return;
}
TSKEY ekey = reviseWindowEkey(pQuery, &win);
int32_t forwardStep =
getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, tsCols, pQuery->pos, ekey, searchFn, true);
if (hasTimeWindow) {
TSKEY ekey = reviseWindowEkey(pQuery, &win);
int32_t forwardStep =
getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, tsCols, pQuery->pos, ekey, searchFn, true);
SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo));
doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &win, pQuery->pos, forwardStep, tsCols, pDataBlockInfo->rows);
SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo));
doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &win, pQuery->pos, forwardStep, tsCols, pDataBlockInfo->rows);
}
int32_t index = pWindowResInfo->curIndex;
STimeWindow nextWin = win;
@ -894,14 +943,19 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
}
// null data, failed to allocate more memory buffer
if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin) != TSDB_CODE_SUCCESS) {
bool hasTimeWindow = false;
if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) {
break;
}
ekey = reviseWindowEkey(pQuery, &nextWin);
forwardStep = getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, tsCols, startPos, ekey, searchFn, true);
if (!hasTimeWindow) {
continue;
}
pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo));
TSKEY ekey = reviseWindowEkey(pQuery, &nextWin);
int32_t forwardStep = getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, tsCols, startPos, ekey, searchFn, true);
SWindowStatus* pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo));
doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &nextWin, startPos, forwardStep, tsCols, pDataBlockInfo->rows);
}
@ -951,7 +1005,7 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat
}
// assert(pRuntimeEnv->windowResInfo.hashList->size <= 2);
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pData, bytes);
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pData, bytes, true);
if (pWindowRes == NULL) {
return -1;
}
@ -1051,6 +1105,11 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) {
static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
SQuery* pQuery = pRuntimeEnv->pQuery;
// in case of timestamp column, always generated results.
if (functionId == TSDB_FUNC_TS) {
return true;
}
if (pResInfo->complete || functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
return false;
@ -1063,7 +1122,6 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx
// todo add comments
if ((functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_LAST)) {
return pCtx->param[0].i64Key == pQuery->order.order;
// return !QUERY_IS_ASC_QUERY(pQuery);
}
// in the supplementary scan, only the following functions need to be executed
@ -1077,6 +1135,7 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx
static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pStatis, SDataBlockInfo *pDataBlockInfo,
SWindowResInfo *pWindowResInfo, SArray *pDataBlock) {
SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx;
bool masterScan = IS_MASTER_SCAN(pRuntimeEnv);
SQuery *pQuery = pRuntimeEnv->pQuery;
STableQueryInfo* item = pQuery->current;
@ -1084,8 +1143,12 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS
SColumnInfoData* pColumnInfoData = (SColumnInfoData *)taosArrayGet(pDataBlock, 0);
TSKEY *tsCols = (pColumnInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP)? (TSKEY*) pColumnInfoData->pData:NULL;
bool groupbyColumnValue = isGroupbyNormalCol(pQuery->pGroupbyExpr);
bool groupbyColumnValue = pRuntimeEnv->groupbyNormalCol;
SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport));
if (sasArray == NULL) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
int16_t type = 0;
int16_t bytes = 0;
@ -1139,16 +1202,21 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS
}
// interval window query
if (isIntervalQuery(pQuery)) {
if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
// decide the time window according to the primary timestamp
int64_t ts = tsCols[offset];
STimeWindow win = getActiveTimeWindow(pWindowResInfo, ts, pQuery);
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win);
bool hasTimeWindow = false;
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win, masterScan, &hasTimeWindow);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
continue;
}
if (!hasTimeWindow) {
continue;
}
SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo));
doRowwiseApplyFunctions(pRuntimeEnv, pStatus, &win, offset);
@ -1168,12 +1236,15 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS
}
// null data, failed to allocate more memory buffer
if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin) != TSDB_CODE_SUCCESS) {
bool hasTimeWindow = false;
if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) {
break;
}
pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo));
doRowwiseApplyFunctions(pRuntimeEnv, pStatus, &nextWin, offset);
if (hasTimeWindow) {
pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo));
doRowwiseApplyFunctions(pRuntimeEnv, pStatus, &nextWin, offset);
}
}
pWindowResInfo->curIndex = index;
@ -1231,7 +1302,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl
STableQueryInfo* pTableQInfo = pQuery->current;
SWindowResInfo* pWindowResInfo = &pRuntimeEnv->windowResInfo;
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || pRuntimeEnv->groupbyNormalCol) {
rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock);
} else {
blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock);
@ -1243,7 +1314,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl
// interval query with limit applied
int32_t numOfRes = 0;
if (isIntervalQuery(pQuery)) {
if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
numOfRes = doCheckQueryCompleted(pRuntimeEnv, lastKey, pWindowResInfo);
} else {
numOfRes = getNumOfResult(pRuntimeEnv);
@ -1352,14 +1423,16 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY
}
// set the output buffer for the selectivity + tag query
static void setCtxTagColumnInfo(SQuery *pQuery, SQLFunctionCtx *pCtx) {
static void setCtxTagColumnInfo(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx) {
SQuery* pQuery = pRuntimeEnv->pQuery;
if (isSelectivityWithTagsQuery(pQuery)) {
int32_t num = 0;
int16_t tagLen = 0;
SQLFunctionCtx *p = NULL;
SQLFunctionCtx **pTagCtx = calloc(pQuery->numOfOutput, POINTER_BYTES);
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SSqlFuncMsg *pSqlFuncMsg = &pQuery->pSelectExpr[i].base;
@ -1386,7 +1459,7 @@ static void setCtxTagColumnInfo(SQuery *pQuery, SQLFunctionCtx *pCtx) {
}
}
static void setWindowResultInfo(SResultInfo *pResultInfo, SQuery *pQuery, bool isStableQuery) {
static FORCE_INLINE void setWindowResultInfo(SResultInfo *pResultInfo, SQuery *pQuery, bool isStableQuery) {
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
assert(pQuery->pSelectExpr[i].interBytes <= DEFAULT_INTERN_BUF_PAGE_SIZE);
@ -1477,7 +1550,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
resetCtxOutputBuf(pRuntimeEnv);
}
setCtxTagColumnInfo(pQuery, pRuntimeEnv->pCtx);
setCtxTagColumnInfo(pRuntimeEnv, pRuntimeEnv->pCtx);
return TSDB_CODE_SUCCESS;
_clean:
@ -1640,8 +1713,7 @@ static bool onlyQueryTags(SQuery* pQuery) {
void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *realWin, STimeWindow *win) {
assert(key >= keyFirst && key <= keyLast && pQuery->slidingTime <= pQuery->intervalTime);
win->skey = taosGetIntervalStartTimestamp(key, pQuery->slidingTime, pQuery->slidingTimeUnit, pQuery->precision);
win->skey = taosGetIntervalStartTimestamp(key, pQuery->slidingTime, pQuery->intervalTime, pQuery->slidingTimeUnit, pQuery->precision);
if (keyFirst > (INT64_MAX - pQuery->intervalTime)) {
/*
* if the realSkey > INT64_MAX - pQuery->intervalTime, the query duration between
@ -1828,7 +1900,7 @@ static int32_t getInitialPageNum(SQInfo *pQInfo) {
if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
num = 128;
} else if (isIntervalQuery(pQuery)) { // time window query, allocate one page for each table
} else if (QUERY_IS_INTERVAL_QUERY(pQuery)) { // time window query, allocate one page for each table
size_t s = pQInfo->tableqinfoGroupInfo.numOfTables;
num = MAX(s, INITIAL_RESULT_ROWS_VALUE);
} else { // for super table query, one page for each subset
@ -1978,7 +2050,7 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle,
r |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pQuery->window.skey, pQuery->window.ekey, colId);
}
if (pRuntimeEnv->pTSBuf > 0 || isIntervalQuery(pQuery)) {
if (pRuntimeEnv->pTSBuf > 0 || QUERY_IS_INTERVAL_QUERY(pQuery)) {
r |= BLK_DATA_ALL_NEEDED;
}
}
@ -2117,7 +2189,7 @@ static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capa
static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) {
// in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block
SQuery* pQuery = pRuntimeEnv->pQuery;
if (!isIntervalQuery(pQuery) && !isGroupbyNormalCol(pQuery->pGroupbyExpr) && !isFixedOutputQuery(pQuery)) {
if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyNormalCol && !isFixedOutputQuery(pQuery)) {
SResultRec *pRec = &pQuery->rec;
if (pQuery->rec.capacity - pQuery->rec.rows < pBlockInfo->rows) {
@ -2132,6 +2204,7 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB
if (tmp == NULL) { // todo handle the oom
assert(0);
} else {
memset(tmp + sizeof(tFilePage) + bytes * pRec->rows, 0, (newSize - pRec->rows) * bytes);
pQuery->sdata[i] = (tFilePage *)tmp;
}
@ -2162,16 +2235,18 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
pQuery->order.order);
TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle;
SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER;
while (tsdbNextDataBlock(pQueryHandle)) {
summary->totalBlocks += 1;
if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) {
return 0;
}
SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pQueryHandle);
tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo);
// todo extract methods
if (isIntervalQuery(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) {
if (QUERY_IS_INTERVAL_QUERY(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) {
STimeWindow realWin = TSWINDOW_INITIALIZER, w = TSWINDOW_INITIALIZER;
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
@ -2217,7 +2292,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
setQueryStatus(pQuery, QUERY_COMPLETED);
}
if (isIntervalQuery(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) {
if (QUERY_IS_INTERVAL_QUERY(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) {
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
int32_t step = QUERY_IS_ASC_QUERY(pQuery) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP;
@ -2638,7 +2713,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
tfree(pTableList);
qError("QInfo:%p failed alloc memory", pQInfo);
longjmp(pQInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
// todo opt for the case of one table per group
@ -2646,7 +2721,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
for (int32_t i = 0; i < size; ++i) {
STableQueryInfo *item = taosArrayGetP(pGroup, i);
SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, tsdbGetTableId(item->pTable).tid);
SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, TSDB_TABLEID(item->pTable)->tid);
if (list.size > 0 && item->windowResInfo.size > 0) {
pTableList[numOfTables] = item;
numOfTables += 1;
@ -2669,6 +2744,10 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
tLoserTreeCreate(&pTree, numOfTables, &cs, tableResultComparFn);
SResultInfo *pResultInfo = calloc(pQuery->numOfOutput, sizeof(SResultInfo));
if (pResultInfo == NULL) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
setWindowResultInfo(pResultInfo, pQuery, pRuntimeEnv->stableQuery);
resetMergeResultBuf(pQuery, pRuntimeEnv->pCtx, pResultInfo);
@ -2869,7 +2948,7 @@ void disableFuncInReverseScan(SQInfo *pQInfo) {
// group by normal columns and interval query on normal table
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) {
if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) {
disableFuncInReverseScanImpl(pQInfo, pWindowResInfo, order);
} else { // for simple result of table query,
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { // todo refactor
@ -3044,7 +3123,7 @@ bool needScanDataBlocksAgain(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery;
bool toContinue = false;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) {
if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) {
// for each group result, call the finalize function for each column
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
@ -3236,10 +3315,10 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) {
if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) {
// for each group result, call the finalize function for each column
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
if (pRuntimeEnv->groupbyNormalCol) {
closeAllTimeWindow(pWindowResInfo);
}
@ -3281,10 +3360,10 @@ static bool hasMainOutput(SQuery *pQuery) {
return false;
}
static STableQueryInfo *createTableQueryInfo( SQueryRuntimeEnv *pRuntimeEnv, void* pTable, STimeWindow win) {
SQuery* pQuery = pRuntimeEnv->pQuery;
static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void* pTable, STimeWindow win, void* buf) {
SQuery *pQuery = pRuntimeEnv->pQuery;
STableQueryInfo *pTableQueryInfo = calloc(1, sizeof(STableQueryInfo));
STableQueryInfo *pTableQueryInfo = buf;
pTableQueryInfo->win = win;
pTableQueryInfo->lastKey = win.skey;
@ -3292,15 +3371,14 @@ static STableQueryInfo *createTableQueryInfo( SQueryRuntimeEnv *pRuntimeEnv, voi
pTableQueryInfo->pTable = pTable;
pTableQueryInfo->cur.vgroupIndex = -1;
int32_t initialSize = 1;
int32_t initialThreshold = 1;
if (isIntervalQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
initialSize = 20;
initialThreshold = 100;
// set more initial size of interval/groupby query
if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) {
int32_t initialSize = 20;
int32_t initialThreshold = 100;
initWindowResInfo(&pTableQueryInfo->windowResInfo, pRuntimeEnv, initialSize, initialThreshold, TSDB_DATA_TYPE_INT);
} else { // in other aggregate query, do not initialize the windowResInfo
}
initWindowResInfo(&pTableQueryInfo->windowResInfo, pRuntimeEnv, initialSize, initialThreshold, TSDB_DATA_TYPE_INT);
return pTableQueryInfo;
}
@ -3310,7 +3388,6 @@ void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols)
}
cleanupTimeWindowInfo(&pTableQueryInfo->windowResInfo, numOfCols);
free(pTableQueryInfo);
}
#define SET_CURRENT_QUERY_TABLE_INFO(_runtime, _tableInfo) \
@ -3323,7 +3400,6 @@ void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols)
/**
* set output buffer for different group
* TODO opt performance if current group is identical to previous group
* @param pRuntimeEnv
* @param pDataBlockInfo
*/
@ -3334,14 +3410,18 @@ void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) {
// lastKey needs to be updated
pTableQueryInfo->lastKey = nextKey;
setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo);
if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) {
setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo);
}
if (pRuntimeEnv->prevGroupId != INT32_MIN && pRuntimeEnv->prevGroupId == groupIndex) {
return;
}
int32_t GROUPRESULTID = 1;
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&groupIndex, sizeof(groupIndex));
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&groupIndex,
sizeof(groupIndex), true);
if (pWindowRes == NULL) {
return;
}
@ -3523,7 +3603,7 @@ static int32_t getNumOfSubset(SQInfo *pQInfo) {
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
int32_t totalSubset = 0;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (isIntervalQuery(pQuery))) {
if (pQInfo->runtimeEnv.groupbyNormalCol || (QUERY_IS_INTERVAL_QUERY(pQuery))) {
totalSubset = numOfClosedTimeWindow(&pQInfo->runtimeEnv.windowResInfo);
} else {
totalSubset = GET_NUM_OF_TABLEGROUP(pQInfo);
@ -3617,11 +3697,11 @@ void copyFromWindowResToSData(SQInfo *pQInfo, SWindowResult *result) {
assert(pQuery->rec.rows <= pQuery->rec.capacity);
}
static UNUSED_FUNC void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) {
static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) {
SQuery *pQuery = pRuntimeEnv->pQuery;
// update the number of result for each, only update the number of rows for the corresponding window result.
if (pQuery->intervalTime == 0) {
if (!QUERY_IS_INTERVAL_QUERY(pQuery)) {
for (int32_t i = 0; i < pRuntimeEnv->windowResInfo.size; ++i) {
SWindowResult *pResult = &pRuntimeEnv->windowResInfo.pResult[i];
@ -3635,14 +3715,6 @@ static UNUSED_FUNC void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, S
pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes);
}
}
// int32_t g = pTableQueryInfo->groupIndex;
// assert(pRuntimeEnv->windowResInfo.size > 0);
//
// SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, (char *)&g, sizeof(g));
// if (pWindowRes->numOfRows == 0) {
// pWindowRes->numOfRows = getNumOfResult(pRuntimeEnv);
// }
}
}
@ -3654,7 +3726,7 @@ void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *
SWindowResInfo * pWindowResInfo = &pTableQueryInfo->windowResInfo;
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1;
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || pRuntimeEnv->groupbyNormalCol) {
rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock);
} else {
blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock);
@ -3696,7 +3768,7 @@ bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) {
} else {
// there are results waiting for returned to client.
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED) &&
(isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) &&
(pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) &&
(pRuntimeEnv->windowResInfo.size > 0)) {
return true;
}
@ -3879,12 +3951,13 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
STableQueryInfo* pTableQueryInfo = pQuery->current;
TsdbQueryHandleT pQueryHandle = pRuntimeEnv->pQueryHandle;
SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER;
while (tsdbNextDataBlock(pQueryHandle)) {
if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) {
return;
}
SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pQueryHandle);
tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo);
if (pQuery->limit.offset > blockInfo.rows) {
pQuery->limit.offset -= blockInfo.rows;
@ -3921,8 +3994,9 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
STableQueryInfo *pTableQueryInfo = pQuery->current;
SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER;
while (tsdbNextDataBlock(pRuntimeEnv->pQueryHandle)) {
SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pRuntimeEnv->pQueryHandle);
tsdbRetrieveDataBlockInfo(pRuntimeEnv->pQueryHandle, &blockInfo);
if (QUERY_IS_ASC_QUERY(pQuery)) {
if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) {
@ -4028,7 +4102,7 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
return;
}
if (isSTableQuery && (!isIntervalQuery(pQuery)) && (!isFixedOutputQuery(pQuery))) {
if (isSTableQuery && (!QUERY_IS_INTERVAL_QUERY(pQuery)) && (!isFixedOutputQuery(pQuery))) {
return;
}
@ -4042,7 +4116,7 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
if (!isSTableQuery
&& (pQInfo->tableqinfoGroupInfo.numOfTables == 1)
&& (cond.order == TSDB_ORDER_ASC)
&& (!isIntervalQuery(pQuery))
&& (!QUERY_IS_INTERVAL_QUERY(pQuery))
&& (!isGroupbyNormalCol(pQuery->pGroupbyExpr))
&& (!isFixedOutputQuery(pQuery))
) {
@ -4101,6 +4175,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
pRuntimeEnv->cur.vgroupIndex = -1;
pRuntimeEnv->stableQuery = isSTableQuery;
pRuntimeEnv->prevGroupId = INT32_MIN;
pRuntimeEnv->groupbyNormalCol = isGroupbyNormalCol(pQuery->pGroupbyExpr);
if (pTsBuf != NULL) {
int16_t order = (pQuery->order.order == pRuntimeEnv->pTSBuf->tsOrder) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
@ -4125,7 +4200,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
if (pQuery->intervalTime == 0) {
int16_t type = TSDB_DATA_TYPE_NULL;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // group by columns not tags;
if (pRuntimeEnv->groupbyNormalCol) { // group by columns not tags;
type = getGroupbyColumnType(pQuery, pQuery->pGroupbyExpr);
} else {
type = TSDB_DATA_TYPE_INT; // group id
@ -4134,7 +4209,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, 512, 4096, type);
}
} else if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) {
} else if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) {
int32_t rows = getInitialPageNum(pQInfo);
code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, rows, pQuery->rowSize, pQInfo);
if (code != TSDB_CODE_SUCCESS) {
@ -4142,7 +4217,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
}
int16_t type = TSDB_DATA_TYPE_NULL;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
if (pRuntimeEnv->groupbyNormalCol) {
type = getGroupbyColumnType(pQuery, pQuery->pGroupbyExpr);
} else {
type = TSDB_DATA_TYPE_TIMESTAMP;
@ -4160,8 +4235,9 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
// todo refactor
pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery);
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
pRuntimeEnv->hasTagResults = hasTagValOutput(pQuery);
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
return TSDB_CODE_SUCCESS;
}
@ -4184,13 +4260,15 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
int64_t st = taosGetTimestampMs();
TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle;
SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER;
while (tsdbNextDataBlock(pQueryHandle)) {
summary->totalBlocks += 1;
if (isQueryKilled(pQInfo)) {
break;
}
SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pQueryHandle);
tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo);
STableQueryInfo **pTableQueryInfo = (STableQueryInfo**) taosHashGet(pQInfo->tableqinfoGroupInfo.map, &blockInfo.tid, sizeof(blockInfo.tid));
if(pTableQueryInfo == NULL) {
break;
@ -4202,14 +4280,17 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
SDataStatis *pStatis = NULL;
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
if (!isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
if (!isIntervalQuery(pQuery)) {
if (!pRuntimeEnv->groupbyNormalCol) {
if (!QUERY_IS_INTERVAL_QUERY(pQuery)) {
int32_t step = QUERY_IS_ASC_QUERY(pQuery)? 1:-1;
setExecutionContext(pQInfo, (*pTableQueryInfo)->groupIndex, blockInfo.window.ekey + step);
} else { // interval query
TSKEY nextKey = blockInfo.window.skey;
setIntervalQueryRange(pQInfo, nextKey);
/*int32_t ret = */setAdditionalInfo(pQInfo, (*pTableQueryInfo)->pTable, *pTableQueryInfo);
if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) {
setAdditionalInfo(pQInfo, (*pTableQueryInfo)->pTable, *pTableQueryInfo);
}
}
}
@ -4234,9 +4315,9 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) {
setTagVal(pRuntimeEnv, pCheckInfo->pTable, pQInfo->tsdb);
STableId id = tsdbGetTableId(pCheckInfo->pTable);
STableId* id = TSDB_TABLEID(pCheckInfo->pTable);
qDebug("QInfo:%p query on (%d): uid:%" PRIu64 ", tid:%d, qrange:%" PRId64 "-%" PRId64, pQInfo, index,
id.uid, id.tid, pCheckInfo->lastKey, pCheckInfo->win.ekey);
id->uid, id->tid, pCheckInfo->lastKey, pCheckInfo->win.ekey);
STsdbQueryCond cond = {
.twindow = {pCheckInfo->lastKey, pCheckInfo->win.ekey},
@ -4365,7 +4446,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
break;
}
}
} else if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // group-by on normal columns query
} else if (pRuntimeEnv->groupbyNormalCol) { // group-by on normal columns query
while (pQInfo->groupIndex < numOfGroups) {
SArray* group = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, pQInfo->groupIndex);
@ -4503,11 +4584,11 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
*/
pQInfo->tableIndex++;
STableIdInfo tidInfo;
STableId id = tsdbGetTableId(pQuery->current->pTable);
STableIdInfo tidInfo = {0};
tidInfo.uid = id.uid;
tidInfo.tid = id.tid;
STableId* id = TSDB_TABLEID(pQuery->current->pTable);
tidInfo.uid = id->uid;
tidInfo.tid = id->tid;
tidInfo.key = pQuery->current->lastKey;
taosArrayPush(pQInfo->arrTableIdInfo, &tidInfo);
@ -4609,7 +4690,9 @@ static void doRestoreContext(SQInfo *pQInfo) {
static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) {
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
if (isIntervalQuery(pQuery)) {
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
size_t numOfGroup = GET_NUM_OF_TABLEGROUP(pQInfo);
for (int32_t i = 0; i < numOfGroup; ++i) {
SArray *group = GET_TABLEGROUP(pQInfo, i);
@ -4618,6 +4701,7 @@ static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) {
for (int32_t j = 0; j < num; ++j) {
STableQueryInfo* item = taosArrayGetP(group, j);
closeAllTimeWindow(&item->windowResInfo);
removeRedundantWindow(&item->windowResInfo, item->lastKey - step, step);
}
}
} else { // close results for group result
@ -4634,7 +4718,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
* if the groupIndex > 0, the query process must be completed yet, we only need to
* copy the data into output buffer
*/
if (isIntervalQuery(pQuery)) {
if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
copyResToQueryResultBuf(pQInfo, pQuery);
#ifdef _DEBUG_VIEW
displayInterResult(pQuery->sdata, pRuntimeEnv, pQuery->sdata[0]->num);
@ -4669,6 +4753,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
el = scanMultiTableDataBlocks(pQInfo);
qDebug("QInfo:%p reversed scan completed, elapsed time: %" PRId64 "ms", pQInfo, el);
doCloseAllTimeWindowAfterScan(pQInfo);
doRestoreContext(pQInfo);
} else {
qDebug("QInfo:%p no need to do reversed scan, query completed", pQInfo);
@ -4681,7 +4766,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
return;
}
if (isIntervalQuery(pQuery) || isSumAvgRateQuery(pQuery)) {
if (QUERY_IS_INTERVAL_QUERY(pQuery) || isSumAvgRateQuery(pQuery)) {
if (mergeIntoGroupResult(pQInfo) == TSDB_CODE_SUCCESS) {
copyResToQueryResultBuf(pQInfo, pQuery);
@ -4778,10 +4863,10 @@ static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
pQuery->current->lastKey, pQuery->window.ekey);
} else if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
STableIdInfo tidInfo;
STableId id = tsdbGetTableId(pQuery->current);
STableId* id = TSDB_TABLEID(pQuery->current);
tidInfo.uid = id.uid;
tidInfo.tid = id.tid;
tidInfo.uid = id->uid;
tidInfo.tid = id->tid;
tidInfo.key = pQuery->current->lastKey;
taosArrayPush(pQInfo->arrTableIdInfo, &tidInfo);
}
@ -4842,7 +4927,7 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) {
while (1) {
tableIntervalProcessImpl(pRuntimeEnv, newStartKey);
if (isIntervalQuery(pQuery)) {
if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
pQInfo->groupIndex = 0; // always start from 0
pQuery->rec.rows = 0;
copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult);
@ -4871,7 +4956,7 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) {
}
// all data scanned, the group by normal column can return
if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // todo refactor with merge interval time result
if (pRuntimeEnv->groupbyNormalCol) { // todo refactor with merge interval time result
pQInfo->groupIndex = 0;
pQuery->rec.rows = 0;
copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult);
@ -4932,7 +5017,7 @@ static void tableQueryImpl(SQInfo *pQInfo) {
STableQueryInfo* item = taosArrayGetP(g, 0);
// group by normal column, sliding window query, interval query are handled by interval query processor
if (isIntervalQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // interval (down sampling operation)
if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { // interval (down sampling operation)
tableIntervalProcess(pQInfo, item);
} else if (isFixedOutputQuery(pQuery)) {
tableFixedOutputProcess(pQInfo, item);
@ -4947,18 +5032,19 @@ static void tableQueryImpl(SQInfo *pQInfo) {
}
static void stableQueryImpl(SQInfo *pQInfo) {
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery *pQuery = pRuntimeEnv->pQuery;
pQuery->rec.rows = 0;
int64_t st = taosGetTimestampUs();
if (isIntervalQuery(pQuery) ||
(isFixedOutputQuery(pQuery) && (!isPointInterpoQuery(pQuery)) && !isGroupbyNormalCol(pQuery->pGroupbyExpr) &&
if (QUERY_IS_INTERVAL_QUERY(pQuery) ||
(isFixedOutputQuery(pQuery) && (!isPointInterpoQuery(pQuery)) && !pRuntimeEnv->groupbyNormalCol &&
!isFirstLastRowQuery(pQuery))) {
multiTableQueryProcess(pQInfo);
} else {
assert((pQuery->checkBuffer == 1 && pQuery->intervalTime == 0) || isPointInterpoQuery(pQuery) ||
isFirstLastRowQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr));
isFirstLastRowQuery(pQuery) || pRuntimeEnv->groupbyNormalCol);
sequentialTableProcess(pQInfo);
}
@ -5653,28 +5739,33 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
STimeWindow window = pQueryMsg->window;
taosArraySort(pTableIdList, compareTableIdInfo);
// TODO optimize the STableQueryInfo malloc strategy
pQInfo->pBuf = calloc(pTableGroupInfo->numOfTables, sizeof(STableQueryInfo));
int32_t index = 0;
for(int32_t i = 0; i < numOfGroups; ++i) {
SArray* pa = taosArrayGetP(pTableGroupInfo->pGroupList, i);
size_t s = taosArrayGetSize(pa);
size_t s = taosArrayGetSize(pa);
SArray* p1 = taosArrayInit(s, POINTER_BYTES);
for(int32_t j = 0; j < s; ++j) {
void* pTable = taosArrayGetP(pa, j);
STableId* id = TSDB_TABLEID(pTable);
// NOTE: compare STableIdInfo with STableId
STableId id = tsdbGetTableId(pTable);
STableIdInfo* pTableId = taosArraySearch(pTableIdList, &id, compareTableIdInfo);
STableIdInfo* pTableId = taosArraySearch(pTableIdList, id, compareTableIdInfo);
if (pTableId != NULL ) {
window.skey = pTableId->key;
} else {
window.skey = pQueryMsg->window.skey;
}
STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, pTable, window);
void* buf = pQInfo->pBuf + index * sizeof(STableQueryInfo);
STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, pTable, window, buf);
item->groupIndex = i;
taosArrayPush(p1, &item);
taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id.tid, sizeof(id.tid), &item, POINTER_BYTES);
taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id->tid, sizeof(id->tid), &item, POINTER_BYTES);
index += 1;
}
taosArrayPush(pQInfo->tableqinfoGroupInfo.pGroupList, &p1);
@ -5818,6 +5909,7 @@ static void freeQInfo(SQInfo *pQInfo) {
taosArrayDestroy(p);
}
tfree(pQInfo->pBuf);
taosArrayDestroy(pQInfo->tableqinfoGroupInfo.pGroupList);
taosHashCleanup(pQInfo->tableqinfoGroupInfo.map);
tsdbDestoryTableGroup(&pQInfo->tableGroupInfo);
@ -6094,7 +6186,8 @@ void qTableQuery(qinfo_t qinfo) {
return;
}
int32_t ret = setjmp(pQInfo->env);
int32_t ret = setjmp(pQInfo->runtimeEnv.env);
// error occurs, record the error code and return to client
if (ret != TSDB_CODE_SUCCESS) {
pQInfo->code = ret;
@ -6277,13 +6370,13 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
varDataSetLen(output, rsize - VARSTR_HEADER_SIZE);
output = varDataVal(output);
STableId id = tsdbGetTableId(item->pTable);
STableId* id = TSDB_TABLEID(item->pTable);
*(int64_t *)output = id.uid; // memory align problem, todo serialize
output += sizeof(id.uid);
*(int64_t *)output = id->uid; // memory align problem, todo serialize
output += sizeof(id->uid);
*(int32_t *)output = id.tid;
output += sizeof(id.tid);
*(int32_t *)output = id->tid;
output += sizeof(id->tid);
*(int32_t *)output = pQInfo->vgId;
output += sizeof(pQInfo->vgId);

View File

@ -32,7 +32,6 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
pWindowResInfo->threshold = threshold;
pWindowResInfo->type = type;
_hash_fn_t fn = taosGetDefaultHashFunction(type);
pWindowResInfo->hashList = taosHashInit(threshold, fn, false);
@ -54,7 +53,8 @@ void destroyTimeWindowRes(SWindowResult *pWindowRes, int32_t nOutputCols) {
if (pWindowRes == NULL) {
return;
}
// TODO opt malloc strategy
for (int32_t i = 0; i < nOutputCols; ++i) {
free(pWindowRes->resultInfo[i].interResultBuf);
}
@ -180,19 +180,33 @@ void closeAllTimeWindow(SWindowResInfo *pWindowResInfo) {
/*
* remove the results that are not the FIRST time window that spreads beyond the
* the last qualified time stamp in case of sliding query, which the sliding time is not equalled to the interval time
* the last qualified time stamp in case of sliding query, which the sliding time is not equalled to the interval time.
* NOTE: remove redundant, only when the result set order equals to traverse order
*/
void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_t order) {
assert(pWindowResInfo->size >= 0 && pWindowResInfo->capacity >= pWindowResInfo->size);
int32_t i = 0;
while (i < pWindowResInfo->size &&
((pWindowResInfo->pResult[i].window.ekey < lastKey && order == QUERY_ASC_FORWARD_STEP) ||
(pWindowResInfo->pResult[i].window.skey > lastKey && order == QUERY_DESC_FORWARD_STEP))) {
++i;
if (pWindowResInfo->size <= 1) {
return;
}
// assert(i < pWindowResInfo->size);
// get the result order
int32_t resultOrder = (pWindowResInfo->pResult[0].window.skey < pWindowResInfo->pResult[1].window.skey)? 1:-1;
if (order != resultOrder) {
return;
}
int32_t i = 0;
if (order == QUERY_ASC_FORWARD_STEP) {
while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i].window.ekey < lastKey)) {
++i;
}
} else if (order == QUERY_DESC_FORWARD_STEP) {
while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i].window.skey > lastKey)) {
++i;
}
}
if (i < pWindowResInfo->size) {
pWindowResInfo->size = (i + 1);
}

View File

@ -13,25 +13,26 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "os.h"
#include "tulog.h"
#include "tutil.h"
#include "tbuffer.h"
#include "tname.h"
#include "qast.h"
#include "tcompare.h"
#include "tsdb.h"
#include "exception.h"
#include "qsqlparser.h"
#include "qsyntaxtreefunction.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "tarray.h"
#include "tbuffer.h"
#include "tcompare.h"
#include "tskiplist.h"
#include "tsqlfunction.h"
#include "tstoken.h"
#include "ttokendef.h"
#include "tschemautil.h"
#include "tarray.h"
#include "tskiplist.h"
#include "queryLog.h"
#include "tsdbMain.h"
#include "exception.h"
#include "tulog.h"
#include "tutil.h"
/*
*
@ -327,104 +328,6 @@ static tExprNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, char *st
}
}
void tSQLBinaryExprFromString(tExprNode **pExpr, SSchema *pSchema, int32_t numOfCols, char *src, int32_t len) {
*pExpr = NULL;
if (len <= 0 || src == NULL || pSchema == NULL || numOfCols <= 0) {
return;
}
int32_t pos = 0;
*pExpr = createSyntaxTree(pSchema, numOfCols, src, &pos);
if (*pExpr != NULL) {
assert((*pExpr)->nodeType == TSQL_NODE_EXPR);
}
}
int32_t tSQLBinaryExprToStringImpl(tExprNode *pNode, char *dst, uint8_t type) {
int32_t len = 0;
if (type == TSQL_NODE_EXPR) {
*dst = '(';
tSQLBinaryExprToString(pNode, dst + 1, &len);
len += 2;
*(dst + len - 1) = ')';
} else if (type == TSQL_NODE_COL) {
len = sprintf(dst, "%s", pNode->pSchema->name);
} else {
len = tVariantToString(pNode->pVal, dst);
}
return len;
}
// TODO REFACTOR WITH SQL PARSER
static char *tSQLOptrToString(uint8_t optr, char *dst) {
switch (optr) {
case TSDB_RELATION_LESS: {
*dst = '<';
dst += 1;
break;
}
case TSDB_RELATION_LESS_EQUAL: {
*dst = '<';
*(dst + 1) = '=';
dst += 2;
break;
}
case TSDB_RELATION_EQUAL: {
*dst = '=';
dst += 1;
break;
}
case TSDB_RELATION_GREATER: {
*dst = '>';
dst += 1;
break;
}
case TSDB_RELATION_GREATER_EQUAL: {
*dst = '>';
*(dst + 1) = '=';
dst += 2;
break;
}
case TSDB_RELATION_NOT_EQUAL: {
*dst = '<';
*(dst + 1) = '>';
dst += 2;
break;
}
case TSDB_RELATION_OR: {
memcpy(dst, "or", 2);
dst += 2;
break;
}
case TSDB_RELATION_AND: {
memcpy(dst, "and", 3);
dst += 3;
break;
}
default:;
}
return dst;
}
void tSQLBinaryExprToString(tExprNode *pExpr, char *dst, int32_t *len) {
if (pExpr == NULL) {
*dst = 0;
*len = 0;
return;
}
int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->_node.pLeft, dst, pExpr->_node.pLeft->nodeType);
dst += lhs;
*len = lhs;
char *start = tSQLOptrToString(pExpr->_node.optr, dst);
*len += (start - dst);
*len += tSQLBinaryExprToStringImpl(pExpr->_node.pRight, start, pExpr->_node.pRight->nodeType);
}
static void UNUSED_FUNC destroySyntaxTree(tExprNode *pNode) { tExprNodeDestroy(pNode, NULL); }
void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)) {
@ -773,8 +676,7 @@ static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo,
SSkipListNode *pNode = tSkipListIterGet(iter);
char * pData = SL_GET_NODE_DATA(pNode);
// todo refactor:
tstr *name = (*(STable **)pData)->name;
tstr *name = (tstr*) tsdbGetTableName(*(void**) pData);
// todo speed up by using hash
if (pQueryInfo->colIndex == TSDB_TBNAME_COLUMN_INDEX) {
if (pQueryInfo->optr == TSDB_RELATION_IN) {
@ -976,27 +878,27 @@ void tExprTreeCalcTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput,
free(pRightOutput);
}
void tSQLBinaryExprTrv(tExprNode *pExprs, SArray* res) {
if (pExprs == NULL) {
return;
}
tExprNode *pLeft = pExprs->_node.pLeft;
tExprNode *pRight = pExprs->_node.pRight;
// recursive traverse left child branch
if (pLeft->nodeType == TSQL_NODE_EXPR) {
tSQLBinaryExprTrv(pLeft, res);
} else if (pLeft->nodeType == TSQL_NODE_COL) {
taosArrayPush(res, &pLeft->pSchema->colId);
}
if (pRight->nodeType == TSQL_NODE_EXPR) {
tSQLBinaryExprTrv(pRight, res);
} else if (pRight->nodeType == TSQL_NODE_COL) {
taosArrayPush(res, &pRight->pSchema->colId);
}
}
//void tSQLBinaryExprTrv(tExprNode *pExprs, SArray* res) {
// if (pExprs == NULL) {
// return;
// }
//
// tExprNode *pLeft = pExprs->_node.pLeft;
// tExprNode *pRight = pExprs->_node.pRight;
//
// // recursive traverse left child branch
// if (pLeft->nodeType == TSQL_NODE_EXPR) {
// tSQLBinaryExprTrv(pLeft, res);
// } else if (pLeft->nodeType == TSQL_NODE_COL) {
// taosArrayPush(res, &pLeft->pSchema->colId);
// }
//
// if (pRight->nodeType == TSQL_NODE_EXPR) {
// tSQLBinaryExprTrv(pRight, res);
// } else if (pRight->nodeType == TSQL_NODE_COL) {
// taosArrayPush(res, &pRight->pSchema->colId);
// }
//}
static void exprTreeToBinaryImpl(SBufferWriter* bw, tExprNode* expr) {
tbufWriteUint8(bw, expr->nodeType);

View File

@ -118,7 +118,7 @@ static bool tExtMemBufferAlloc(tExtMemBuffer *pMemBuffer) {
* To flush data to disk to accommodate more data
*/
if (pMemBuffer->numOfInMemPages > 0 && pMemBuffer->numOfInMemPages == pMemBuffer->inMemCapacity) {
if (!tExtMemBufferFlush(pMemBuffer)) {
if (tExtMemBufferFlush(pMemBuffer) != 0) {
return false;
}
}
@ -268,6 +268,7 @@ int32_t tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) {
size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file);
if (retVal <= 0) { // failed to write to buffer, may be not enough space
ret = TAOS_SYSTEM_ERROR(errno);
return ret;
}
pMemBuffer->fileMeta.numOfElemsInFile += first->item.num;

View File

@ -22,41 +22,6 @@
#define FILL_IS_ASC_FILL(_f) ((_f)->order == TSDB_ORDER_ASC)
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
return startTime;
}
if (timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h') {
return (startTime / slidingTime) * slidingTime;
} else {
/*
* here we revised the start time of day according to the local time zone,
* but in case of DST, the start time of one day need to be dynamically decided.
*
* TODO dynamically decide the start time of a day, move to common module
*/
// todo refactor to extract function that is available for Linux/Windows/Mac platform
#if defined(WINDOWS) && _MSC_VER >= 1900
// see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
int64_t timezone = _timezone;
int32_t daylight = _daylight;
char** tzname = _tzname;
#endif
int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L;
int64_t revStartime = (startTime / slidingTime) * slidingTime + timezone * t;
int64_t revEndtime = revStartime + slidingTime - 1;
if (revEndtime < startTime) {
revStartime += slidingTime;
}
return revStartime;
}
}
SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, SFillColInfo* pFillCol) {
if (fillType == TSDB_FILL_NONE) {
@ -128,7 +93,7 @@ static TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int64_t timeInterva
if (order == TSDB_ORDER_ASC) {
return ekey;
} else {
return taosGetIntervalStartTimestamp(ekey, timeInterval, slidingTimeUnit, precision);
return taosGetIntervalStartTimestamp(ekey, timeInterval, timeInterval, slidingTimeUnit, precision);
}
}

View File

@ -1,11 +1,10 @@
#include <gtest/gtest.h>
#include <qast.h>
#include <sys/time.h>
#include <cassert>
#include <iostream>
#include "qast.h"
#include "taosmsg.h"
#include "qast.h"
#include "tsdb.h"
#include "tskiplist.h"
@ -24,8 +23,6 @@ static void initSchema_binary(SSchema *schema, int32_t numOfCols);
static SSkipList *createSkipList(SSchema *pSchema, int32_t numOfTags);
static SSkipList *createSkipList_binary(SSchema *pSchema, int32_t numOfTags);
static void testQueryStr(SSchema *schema, int32_t numOfCols, char *sql, SSkipList *pSkipList, ResultObj *expectedVal);
static void dropMeter(SSkipList *pSkipList);
static void Right2LeftTest(SSchema *schema, int32_t numOfCols, SSkipList *pSkipList);
@ -239,44 +236,45 @@ static void initSchema(SSchema *schema, int32_t numOfCols) {
// return pSkipList;
//}
static void testQueryStr(SSchema *schema, int32_t numOfCols, char *sql, SSkipList *pSkipList, ResultObj *pResult) {
tExprNode *pExpr = NULL;
tSQLBinaryExprFromString(&pExpr, schema, numOfCols, sql, strlen(sql));
char str[512] = {0};
int32_t len = 0;
if (pExpr == NULL) {
printf("-----error in parse syntax:%s\n\n", sql);
assert(pResult == NULL);
return;
}
tSQLBinaryExprToString(pExpr, str, &len);
printf("expr is: %s\n", str);
SArray *result = NULL;
// tExprTreeTraverse(pExpr, pSkipList, result, SSkipListNodeFilterCallback, &result);
// printf("the result is:%lld\n", result.num);
//
// bool findResult = false;
// for (int32_t i = 0; i < result.num; ++i) {
// STabObj *pm = (STabObj *)result.pRes[i];
// printf("meterid:%s,\t", pm->meterId);
//
// for (int32_t j = 0; j < pResult->numOfResult; ++j) {
// if (strcmp(pm->meterId, pResult->resultName[j]) == 0) {
// findResult = true;
// break;
// }
// }
// assert(findResult == true);
// findResult = false;
// }
printf("\n\n");
tExprTreeDestroy(&pExpr, NULL);
}
//static void testQueryStr(SSchema *schema, int32_t numOfCols, char *sql, SSkipList *pSkipList, ResultObj *pResult) {
// tExprNode *pExpr = NULL;
// tSQLBinaryExprFromString(&pExpr, schema, numOfCols, sql, strlen(sql));
//
// char str[512] = {0};
// int32_t len = 0;
// if (pExpr == NULL) {
// printf("-----error in parse syntax:%s\n\n", sql);
// assert(pResult == NULL);
// return;
// }
//
// tSQLBinaryExprToString(pExpr, str, &len);
// printf("expr is: %s\n", str);
//
// SArray *result = NULL;
// // tExprTreeTraverse(pExpr, pSkipList, result, SSkipListNodeFilterCallback, &result);
// // printf("the result is:%lld\n", result.num);
// //
// // bool findResult = false;
// // for (int32_t i = 0; i < result.num; ++i) {
// // STabObj *pm = (STabObj *)result.pRes[i];
// // printf("meterid:%s,\t", pm->meterId);
// //
// // for (int32_t j = 0; j < pResult->numOfResult; ++j) {
// // if (strcmp(pm->meterId, pResult->resultName[j]) == 0) {
// // findResult = true;
// // break;
// // }
// // }
// // assert(findResult == true);
// // findResult = false;
// // }
//
// printf("\n\n");
// tExprTreeDestroy(&pExpr, NULL);
//}
#if 0
static void Left2RightTest(SSchema *schema, int32_t numOfCols, SSkipList *pSkipList) {
char str[256] = {0};
@ -632,4 +630,5 @@ void exprSerializeTest2() {
} // namespace
TEST(testCase, astTest) {
// exprSerializeTest2();
}
}
#endif

View File

@ -31,9 +31,7 @@ extern int32_t tscEmbedded;
#define tInfo(...) { if (rpcDebugFlag & DEBUG_INFO) { taosPrintLog("RPC INFO ", tscEmbedded ? 255 : rpcDebugFlag, __VA_ARGS__); }}
#define tDebug(...) { if (rpcDebugFlag & DEBUG_DEBUG) { taosPrintLog("RPC DEBUG ", rpcDebugFlag, __VA_ARGS__); }}
#define tTrace(...) { if (rpcDebugFlag & DEBUG_TRACE) { taosPrintLog("RPC TRACE ", rpcDebugFlag, __VA_ARGS__); }}
#define tDebugDump(x, y) { if (rpcDebugFlag & DEBUG_DEBUG) { taosDumpData((unsigned char *)x, y); }}
#define tTraceDump(x, y) { if (rpcDebugFlag & DEBUG_TRACE) { taosDumpData((unsigned char *)x, y); }}
#define tDump(x, y) { if (rpcDebugFlag & DEBUG_DUMP) { taosDumpData((unsigned char *)x, y); }}
#ifdef __cplusplus
}

View File

@ -538,6 +538,7 @@ void rpcCancelRequest(void *handle) {
if (pContext->pConn) {
tDebug("%s, app trys to cancel request", pContext->pConn->info);
pContext->pConn->pReqMsg = NULL;
rpcCloseConn(pContext->pConn);
pContext->pConn = NULL;
rpcFreeCont(pContext->pCont);
@ -602,8 +603,13 @@ static void rpcReleaseConn(SRpcConn *pConn) {
rpcFreeMsg(pConn->pRspMsg); // it may have a response msg saved, but not request msg
pConn->pRspMsg = NULL;
if (pConn->pReqMsg) rpcFreeCont(pConn->pReqMsg);
}
// if server has ever reported progress, free content
if (pConn->pReqMsg) rpcFreeCont(pConn->pReqMsg); // do not use rpcFreeMsg
} else {
// if there is an outgoing message, free it
if (pConn->outType && pConn->pReqMsg)
rpcFreeMsg(pConn->pReqMsg);
}
// memset could not be used, since lockeBy can not be reset
pConn->inType = 0;
@ -959,6 +965,7 @@ static void rpcProcessBrokenLink(SRpcConn *pConn) {
if (pConn->outType) {
SRpcReqContext *pContext = pConn->pContext;
pContext->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
pConn->pReqMsg = NULL;
taosTmrStart(rpcProcessConnError, 0, pContext, pRpc->tmrCtrl);
}
@ -973,7 +980,7 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
SRpcInfo *pRpc = (SRpcInfo *)pRecv->shandle;
SRpcConn *pConn = (SRpcConn *)pRecv->thandle;
tTraceDump(pRecv->msg, pRecv->msgLen);
tDump(pRecv->msg, pRecv->msgLen);
// underlying UDP layer does not know it is server or client
pRecv->connType = pRecv->connType | pRpc->connType;
@ -1061,6 +1068,7 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
SRpcReqContext *pContext = pConn->pContext;
rpcMsg.handle = pContext;
pConn->pContext = NULL;
pConn->pReqMsg = NULL;
// for UDP, port may be changed by server, the port in ipSet shall be used for cache
if (pHead->code != TSDB_CODE_RPC_TOO_SLOW) {
@ -1247,7 +1255,7 @@ static void rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) {
tError("%s, failed to send, msgLen:%d written:%d, reason:%s", pConn->info, msgLen, writtenLen, strerror(errno));
}
tTraceDump(msg, msgLen);
tDump(msg, msgLen);
}
static void rpcProcessConnError(void *param, void *id) {
@ -1297,6 +1305,7 @@ static void rpcProcessRetryTimer(void *param, void *tmrId) {
tDebug("%s, failed to send msg:%s to %s:%hu", pConn->info, taosMsg[pConn->outType], pConn->peerFqdn, pConn->peerPort);
if (pConn->pContext) {
pConn->pContext->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
pConn->pReqMsg = NULL;
taosTmrStart(rpcProcessConnError, 0, pConn->pContext, pRpc->tmrCtrl);
rpcReleaseConn(pConn);
}

View File

@ -156,6 +156,7 @@ int main(int argc, char *argv[]) {
}
tInfo("client is initialized");
tInfo("threads:%d msgSize:%d requests:%d", appThreads, msgSize, numOfReqs);
gettimeofday(&systemTime, NULL);
startTime = systemTime.tv_sec*1000000 + systemTime.tv_usec;

View File

@ -24,23 +24,21 @@ int msgSize = 128;
int commit = 0;
int dataFd = -1;
void *qhandle = NULL;
void *qset = NULL;
void processShellMsg() {
static int num = 0;
taos_qall qall;
SRpcMsg *pRpcMsg, rpcMsg;
int type;
void *pvnode;
qall = taosAllocateQall();
while (1) {
int numOfMsgs = taosReadAllQitems(qhandle, qall);
if (numOfMsgs <= 0) {
usleep(100);
continue;
}
int numOfMsgs = taosReadAllQitemsFromQset(qset, qall, &pvnode);
tDebug("%d shell msgs are received", numOfMsgs);
if (numOfMsgs <= 0) break;
for (int i=0; i<numOfMsgs; ++i) {
taosGetQitem(qall, &type, (void **)&pRpcMsg);
@ -82,15 +80,6 @@ void processShellMsg() {
}
taosFreeQall(qall);
/*
SRpcIpSet ipSet;
ipSet.numOfIps = 1;
ipSet.index = 0;
ipSet.port = 7000;
ipSet.ip[0] = inet_addr("192.168.0.2");
rpcSendRedirectRsp(ahandle, &ipSet);
*/
}
@ -189,6 +178,8 @@ int main(int argc, char *argv[]) {
}
qhandle = taosOpenQueue(sizeof(SRpcMsg));
qset = taosOpenQset();
taosAddIntoQset(qset, qhandle, NULL);
processShellMsg();

View File

@ -47,9 +47,9 @@ extern int tsdbDebugFlag;
// Definitions
// ------------------ tsdbMeta.c
typedef struct STable {
STableId tableId;
ETableType type;
tstr* name; // NOTE: there a flexible string here
STableId tableId;
uint64_t suid;
struct STable* pSuper; // super table pointer
uint8_t numOfSchemas;
@ -195,7 +195,6 @@ typedef struct {
typedef struct {
uint32_t len;
uint32_t offset;
// uint32_t padding;
uint32_t hasLast : 2;
uint32_t numOfBlocks : 30;
uint64_t uid;
@ -224,7 +223,7 @@ typedef struct {
typedef struct {
int16_t colId;
int16_t len;
int32_t len;
int32_t type : 8;
int32_t offset : 24;
int64_t sum;
@ -298,16 +297,71 @@ STsdbMeta* tsdbNewMeta(STsdbCfg* pCfg);
void tsdbFreeMeta(STsdbMeta* pMeta);
int tsdbOpenMeta(STsdbRepo* pRepo);
int tsdbCloseMeta(STsdbRepo* pRepo);
STSchema* tsdbGetTableSchema(STable* pTable);
STable* tsdbGetTableByUid(STsdbMeta* pMeta, uint64_t uid);
STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t version);
STSchema* tsdbGetTableTagSchema(STable* pTable);
int tsdbUpdateTable(STsdbRepo* pRepo, STable* pTable, STableCfg* pCfg);
int tsdbWLockRepoMeta(STsdbRepo* pRepo);
int tsdbRLockRepoMeta(STsdbRepo* pRepo);
int tsdbUnlockRepoMeta(STsdbRepo* pRepo);
void tsdbRefTable(STable* pTable);
void tsdbUnRefTable(STable* pTable);
void tsdbUpdateTableSchema(STsdbRepo* pRepo, STable* pTable, STSchema* pSchema, bool insertAct);
static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *key2) {
if (*(int16_t *)key1 < schemaVersion(*(STSchema **)key2)) {
return -1;
} else if (*(int16_t *)key1 > schemaVersion(*(STSchema **)key2)) {
return 1;
} else {
return 0;
}
}
static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t version) {
STable* pDTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable;
STSchema* pSchema = NULL;
STSchema* pTSchema = NULL;
if (lock) taosRLockLatch(&(pDTable->latch));
if (version < 0) { // get the latest version of schema
pTSchema = pDTable->schema[pDTable->numOfSchemas - 1];
} else { // get the schema with version
void* ptr = taosbsearch(&version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*),
tsdbCompareSchemaVersion, TD_EQ);
if (ptr == NULL) {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
goto _exit;
}
pTSchema = *(STSchema**)ptr;
}
ASSERT(pTSchema != NULL);
if (copy) {
if ((pSchema = tdDupSchema(pTSchema)) == NULL) terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
} else {
pSchema = pTSchema;
}
_exit:
if (lock) taosRUnLockLatch(&(pDTable->latch));
return pSchema;
}
static FORCE_INLINE STSchema* tsdbGetTableSchema(STable* pTable) {
return tsdbGetTableSchemaImpl(pTable, false, false, -1);
}
static FORCE_INLINE STSchema *tsdbGetTableTagSchema(STable *pTable) {
if (pTable->type == TSDB_CHILD_TABLE) { // check child table first
STable *pSuper = pTable->pSuper;
if (pSuper == NULL) return NULL;
return pSuper->tagSchema;
} else if (pTable->type == TSDB_SUPER_TABLE) {
return pTable->tagSchema;
} else {
return NULL;
}
}
// ------------------ tsdbBuffer.c
STsdbBufPool* tsdbNewBufPool();
@ -383,8 +437,9 @@ int tsdbLoadCompIdx(SRWHelper* pHelper, void* target);
int tsdbLoadCompInfo(SRWHelper* pHelper, void* target);
int tsdbLoadCompData(SRWHelper* phelper, SCompBlock* pcompblock, void* target);
void tsdbGetDataStatis(SRWHelper* pHelper, SDataStatis* pStatis, int numOfCols);
int tsdbLoadBlockDataCols(SRWHelper* pHelper, SCompBlock* pCompBlock, int16_t* colIds, int numOfColIds);
int tsdbLoadBlockData(SRWHelper* pHelper, SCompBlock* pCompBlock);
int tsdbLoadBlockDataCols(SRWHelper* pHelper, SCompBlock* pCompBlock, SCompInfo* pCompInfo, int16_t* colIds,
int numOfColIds);
int tsdbLoadBlockData(SRWHelper* pHelper, SCompBlock* pCompBlock, SCompInfo* pCompInfo);
// ------------------ tsdbMain.c
#define REPO_ID(r) (r)->config.tsdbId

View File

@ -41,9 +41,9 @@ typedef struct {
} SSubmitBlkIter;
typedef struct {
int32_t totalLen;
int32_t len;
SSubmitBlk *pBlock;
int32_t totalLen;
int32_t len;
void * pMsg;
} SSubmitMsgIter;
static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg);
@ -56,7 +56,7 @@ static STsdbRepo * tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg);
static void tsdbFreeRepo(STsdbRepo *pRepo);
static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter);
static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows);
static SSubmitBlk *tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter);
static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock);
static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter);
static int tsdbRestoreInfo(STsdbRepo *pRepo);
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter);
@ -68,6 +68,7 @@ static int keyFGroupCompFunc(const void *key, const void *fgroup);
static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg);
static void * tsdbDecodeCfg(void *buf, STsdbCfg *pCfg);
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable);
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg);
// Function declaration
int32_t tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg) {
@ -164,6 +165,13 @@ int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *
STsdbRepo * pRepo = (STsdbRepo *)repo;
SSubmitMsgIter msgIter = {0};
if (tsdbScanAndConvertSubmitMsg(pRepo, pMsg) < 0) {
if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) {
tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno));
}
return -1;
}
if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) {
tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
@ -173,12 +181,14 @@ int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *
int32_t affectedrows = 0;
TSKEY now = taosGetTimestamp(pRepo->config.precision);
while ((pBlock = tsdbGetSubmitMsgNext(&msgIter)) != NULL) {
while (true) {
tsdbGetSubmitMsgNext(&msgIter, &pBlock);
if (pBlock == NULL) break;
if (tsdbInsertDataToTable(pRepo, pBlock, now, &affectedrows) < 0) {
return -1;
}
}
if (pRsp != NULL) pRsp->affectedRows = htonl(affectedrows);
return 0;
}
@ -263,7 +273,7 @@ void tsdbStartStream(TSDB_REPO_T *repo) {
STable *pTable = pMeta->tables[i];
if (pTable && pTable->type == TSDB_STREAM_TABLE) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql,
tsdbGetTableSchema(pTable));
tsdbGetTableSchemaImpl(pTable, false, false, -1));
}
}
}
@ -694,17 +704,12 @@ static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) {
return -1;
}
pMsg->length = htonl(pMsg->length);
pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
pMsg->compressed = htonl(pMsg->compressed);
pIter->totalLen = pMsg->length;
pIter->len = TSDB_SUBMIT_MSG_HEAD_SIZE;
pIter->len = 0;
pIter->pMsg = pMsg;
if (pMsg->length <= TSDB_SUBMIT_MSG_HEAD_SIZE) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
return -1;
} else {
pIter->pBlock = pMsg->blocks;
}
return 0;
@ -714,26 +719,8 @@ static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY
STsdbMeta *pMeta = pRepo->tsdbMeta;
int64_t points = 0;
STable *pTable = tsdbGetTableByUid(pMeta, pBlock->uid);
if (pTable == NULL || TABLE_TID(pTable) != pBlock->tid) {
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable));
terrno = TSDB_CODE_TDB_INVALID_ACTION;
return -1;
}
// Check schema version and update schema if needed
if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) {
tsdbError("vgId:%d failed to insert data to table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
return -1;
}
STable *pTable = pMeta->tables[pBlock->tid];
ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
SSubmitBlkIter blkIter = {0};
SDataRow row = NULL;
@ -764,27 +751,23 @@ static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY
return 0;
}
static SSubmitBlk *tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter) {
SSubmitBlk *pBlock = pIter->pBlock;
if (pBlock == NULL) return NULL;
pBlock->dataLen = htonl(pBlock->dataLen);
pBlock->schemaLen = htonl(pBlock->schemaLen);
pBlock->numOfRows = htons(pBlock->numOfRows);
pBlock->uid = htobe64(pBlock->uid);
pBlock->tid = htonl(pBlock->tid);
pBlock->sversion = htonl(pBlock->sversion);
pBlock->padding = htonl(pBlock->padding);
pIter->len = pIter->len + sizeof(SSubmitBlk) + pBlock->dataLen;
if (pIter->len >= pIter->totalLen) {
pIter->pBlock = NULL;
static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) {
if (pIter->len == 0) {
pIter->len += TSDB_SUBMIT_MSG_HEAD_SIZE;
} else {
pIter->pBlock = (SSubmitBlk *)((char *)pBlock + pBlock->dataLen + sizeof(SSubmitBlk));
SSubmitBlk *pSubmitBlk = (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len);
pIter->len += (sizeof(SSubmitBlk) + pSubmitBlk->dataLen + pSubmitBlk->schemaLen);
}
return pBlock;
if (pIter->len > pIter->totalLen) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
*pPBlock = NULL;
return -1;
}
*pPBlock = (pIter->len == pIter->totalLen) ? NULL : (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len);
return 0;
}
static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) {
@ -969,42 +952,64 @@ static void *tsdbDecodeCfg(void *buf, STsdbCfg *pCfg) {
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) {
ASSERT(pTable != NULL);
STSchema *pSchema = tsdbGetTableSchema(pTable);
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
int sversion = schemaVersion(pSchema);
if (pBlock->sversion == sversion) return 0;
if (pBlock->sversion > sversion) { // need to config
tsdbDebug("vgId:%d table %s tid %d has version %d smaller than client version %d, try to config", REPO_ID(pRepo),
TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), sversion, pBlock->sversion);
if (pRepo->appH.configFunc) {
void *msg = (*pRepo->appH.configFunc)(REPO_ID(pRepo), TABLE_TID(pTable));
if (msg == NULL) {
tsdbError("vgId:%d failed to config table %s tid %d since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
TABLE_TID(pTable), tstrerror(terrno));
return -1;
}
STableCfg *pTableCfg = tsdbCreateTableCfgFromMsg(msg);
if (pTableCfg == NULL) {
rpcFreeCont(msg);
return -1;
}
if (tsdbUpdateTable(pRepo, (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable, pTableCfg) < 0) {
tsdbError("vgId:%d failed to update table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
tsdbClearTableCfg(pTableCfg);
rpcFreeCont(msg);
return -1;
}
tsdbClearTableCfg(pTableCfg);
rpcFreeCont(msg);
} else {
if (pBlock->sversion == sversion) {
return 0;
} else {
if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE) { // stream table is not allowed to change schema
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
return -1;
}
}
if (pBlock->sversion > sversion) { // may need to update table schema
if (pBlock->schemaLen > 0) {
tsdbDebug(
"vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, update...",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion);
ASSERT(pBlock->schemaLen % sizeof(STColumn) == 0);
int numOfCols = pBlock->schemaLen / sizeof(STColumn);
STColumn *pTCol = (STColumn *)pBlock->data;
STSchemaBuilder schemaBuilder = {0};
if (tdInitTSchemaBuilder(&schemaBuilder, pBlock->sversion) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
return -1;
}
for (int i = 0; i < numOfCols; i++) {
if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, htons(pTCol[i].colId), htons(pTCol[i].bytes)) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
tdDestroyTSchemaBuilder(&schemaBuilder);
return -1;
}
}
STSchema *pNSchema = tdGetSchemaFromBuilder(&schemaBuilder);
if (pNSchema == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tdDestroyTSchemaBuilder(&schemaBuilder);
return -1;
}
tdDestroyTSchemaBuilder(&schemaBuilder);
tsdbUpdateTableSchema(pRepo, pTable, pNSchema, true);
} else {
tsdbDebug(
"vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, reconfigure...",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion);
terrno = TSDB_CODE_TDB_TABLE_RECONFIGURE;
return -1;
}
} else {
if (tsdbGetTableSchemaByVersion(pTable, pBlock->sversion) == NULL) {
ASSERT(pBlock->sversion >= 0);
if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) {
tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo),
pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable));
}
@ -1013,7 +1018,64 @@ static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pT
}
return 0;
}
}
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
ASSERT(pMsg != NULL);
STsdbMeta * pMeta = pRepo->tsdbMeta;
SSubmitMsgIter msgIter = {0};
SSubmitBlk * pBlock = NULL;
terrno = TSDB_CODE_SUCCESS;
pMsg->length = htonl(pMsg->length);
pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1;
while (true) {
if (tsdbGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1;
if (pBlock == NULL) break;
pBlock->uid = htobe64(pBlock->uid);
pBlock->tid = htonl(pBlock->tid);
pBlock->sversion = htonl(pBlock->sversion);
pBlock->dataLen = htonl(pBlock->dataLen);
pBlock->schemaLen = htonl(pBlock->schemaLen);
pBlock->numOfRows = htons(pBlock->numOfRows);
if (pBlock->tid <= 0 || pBlock->tid >= pRepo->config.maxTables) {
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
STable *pTable = pMeta->tables[pBlock->tid];
if (pTable == NULL || TABLE_UID(pTable) != pBlock->uid) {
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable));
terrno = TSDB_CODE_TDB_INVALID_ACTION;
return -1;
}
// Check schema version and update schema if needed
if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) {
if (terrno == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
continue;
} else {
return -1;
}
}
}
if (terrno != TSDB_CODE_SUCCESS) return -1;
return 0;
}
static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) {
// TODO

View File

@ -538,10 +538,12 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
SCommitIter *pIter = iters + tid;
if (pIter->pTable == NULL) continue;
taosRLockLatch(&(pIter->pTable->latch));
tsdbSetHelperTable(pHelper, pIter->pTable, pRepo);
if (pIter->pIter != NULL) {
tdInitDataCols(pDataCols, tsdbGetTableSchema(pIter->pTable));
tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1));
int maxRowsToRead = pCfg->maxRowsPerFileBlock * 4 / 5;
int nLoop = 0;
@ -557,6 +559,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
int rowsWritten = tsdbWriteDataBlock(pHelper, pDataCols);
ASSERT(rowsWritten != 0);
if (rowsWritten < 0) {
taosRUnLockLatch(&(pIter->pTable->latch));
tsdbError("vgId:%d failed to write data block to table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo),
TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable),
tstrerror(terrno));
@ -571,6 +574,8 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
ASSERT(pDataCols->numOfRows == 0);
}
taosRUnLockLatch(&(pIter->pTable->latch));
// Move the last block to the new .l file if neccessary
if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) {
tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno));
@ -680,10 +685,10 @@ static int tsdbReadRowsFromCache(STsdbMeta *pMeta, STable *pTable, SSkipListIter
if (dataRowKey(row) > maxKey) break;
if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) {
pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row));
pSchema = tsdbGetTableSchemaImpl(pTable, true, false, dataRowVersion(row));
if (pSchema == NULL) {
// TODO: deal with the error here
ASSERT(false);
ASSERT(0);
}
}

View File

@ -29,10 +29,9 @@ static void tsdbOrgMeta(void *pHandle);
static char * getTagIndexKey(const void *pData);
static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper);
static void tsdbFreeTable(STable *pTable);
static int tsdbUpdateTableTagSchema(STable *pTable, STSchema *newSchema);
static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx);
static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, bool lock);
static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFromIdx, bool lock);
static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable);
static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable, bool refSuper);
static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable);
static int tsdbInitTableCfg(STableCfg *config, ETableType type, uint64_t uid, int32_t tid);
static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool dup);
@ -76,7 +75,7 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
// TODO
if (super->type != TSDB_SUPER_TABLE) return -1;
if (super->tableId.uid != pCfg->superUid) return -1;
tsdbUpdateTable(pRepo, super, pCfg);
// tsdbUpdateTable(pRepo, super, pCfg);
}
}
@ -84,10 +83,18 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
if (table == NULL) goto _err;
// Register to meta
tsdbWLockRepoMeta(pRepo);
if (newSuper) {
if (tsdbAddTableToMeta(pRepo, super, true) < 0) goto _err;
if (tsdbAddTableToMeta(pRepo, super, true, false) < 0) {
tsdbUnlockRepoMeta(pRepo);
goto _err;
}
}
if (tsdbAddTableToMeta(pRepo, table, true) < 0) goto _err;
if (tsdbAddTableToMeta(pRepo, table, true, false) < 0) {
tsdbUnlockRepoMeta(pRepo);
goto _err;
}
tsdbUnlockRepoMeta(pRepo);
// Write to memtable action
int tlen1 = (newSuper) ? tsdbGetTableEncodeSize(TSDB_UPDATE_META, super) : 0;
@ -185,11 +192,6 @@ char *tsdbGetTableName(void* pTable) {
}
}
STableId tsdbGetTableId(void *pTable) {
assert(pTable);
return ((STable*)pTable)->tableId;
}
STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg) {
if (pMsg == NULL) return NULL;
@ -255,7 +257,7 @@ _err:
return NULL;
}
static int32_t colIdCompar(const void* left, const void* right) {
static UNUSED_FUNC int32_t colIdCompar(const void* left, const void* right) {
int16_t colId = *(int16_t*) left;
STColumn* p2 = (STColumn*) right;
@ -266,89 +268,118 @@ static int32_t colIdCompar(const void* left, const void* right) {
return (colId < p2->colId)? -1:1;
}
int tsdbUpdateTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg) {
int tsdbUpdateTableTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg) {
STsdbRepo *pRepo = (STsdbRepo *)repo;
STsdbMeta *pMeta = pRepo->tsdbMeta;
STSchema * pNewSchema = NULL;
pMsg->uid = htobe64(pMsg->uid);
pMsg->tid = htonl(pMsg->tid);
pMsg->tversion = htons(pMsg->tversion);
pMsg->colId = htons(pMsg->colId);
pMsg->bytes = htons(pMsg->bytes);
pMsg->tagValLen = htonl(pMsg->tagValLen);
pMsg->numOfTags = htons(pMsg->numOfTags);
pMsg->schemaLen = htonl(pMsg->schemaLen);
assert(pMsg->schemaLen == sizeof(STColumn) * pMsg->numOfTags);
char* d = pMsg->data;
for(int32_t i = 0; i < pMsg->numOfTags; ++i) {
STColumn* pCol = (STColumn*) d;
pCol->colId = htons(pCol->colId);
pCol->bytes = htons(pCol->bytes);
pCol->offset = 0;
d += sizeof(STColumn);
for (int i = 0; i < pMsg->numOfTags; i++) {
STColumn *pTCol = (STColumn *)pMsg->data + i;
pTCol->bytes = htons(pTCol->bytes);
pTCol->colId = htons(pTCol->colId);
}
STable *pTable = tsdbGetTableByUid(pMeta, pMsg->uid);
if (pTable == NULL) {
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
if (TABLE_TID(pTable) != pMsg->tid) {
if (pTable == NULL || TABLE_TID(pTable) != pMsg->tid) {
tsdbError("vgId:%d failed to update table tag value since invalid table id %d uid %" PRIu64, REPO_ID(pRepo),
pMsg->tid, pMsg->uid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) {
tsdbError("vgId:%d failed to update tag value of table %s since its type is %d", REPO_ID(pRepo),
TABLE_CHAR_NAME(pTable), TABLE_TYPE(pTable));
tsdbError("vgId:%d try to update tag value of a non-child table, invalid action", REPO_ID(pRepo));
terrno = TSDB_CODE_TDB_INVALID_ACTION;
return -1;
}
if (schemaVersion(tsdbGetTableTagSchema(pTable)) < pMsg->tversion) {
tsdbDebug("vgId:%d server tag version %d is older than client tag version %d, try to config", REPO_ID(pRepo),
schemaVersion(tsdbGetTableTagSchema(pTable)), pMsg->tversion);
void *msg = (*pRepo->appH.configFunc)(pRepo->config.tsdbId, pMsg->tid);
if (msg == NULL) return -1;
// Deal with error her
STableCfg *pTableCfg = tsdbCreateTableCfgFromMsg(msg);
STable * super = tsdbGetTableByUid(pMeta, pTableCfg->superUid);
ASSERT(super != NULL);
int32_t code = tsdbUpdateTable(pRepo, super, pTableCfg);
if (code != TSDB_CODE_SUCCESS) {
tsdbClearTableCfg(pTableCfg);
return code;
}
tsdbClearTableCfg(pTableCfg);
rpcFreeCont(msg);
}
STSchema *pTagSchema = tsdbGetTableTagSchema(pTable);
if (schemaVersion(pTagSchema) > pMsg->tversion) {
if (schemaVersion(pTable->pSuper->tagSchema) > pMsg->tversion) {
tsdbError(
"vgId:%d failed to update tag value of table %s since version out of date, client tag version %d server tag "
"version %d",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), pMsg->tversion, schemaVersion(pTable->tagSchema));
return TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE;
terrno = TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE;
return -1;
}
if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == pMsg->colId) {
if (schemaVersion(pTable->pSuper->tagSchema) < pMsg->tversion) { // tag schema out of data,
tsdbDebug("vgId:%d need to update tag schema of table %s tid %d uid %" PRIu64
" since out of date, current version %d new version %d",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable),
schemaVersion(pTable->pSuper->tagSchema), pMsg->tversion);
STSchemaBuilder schemaBuilder = {0};
STColumn *pTCol = (STColumn *)pMsg->data;
ASSERT(pMsg->schemaLen % sizeof(STColumn) == 0 && pTCol[0].colId == colColId(schemaColAt(pTable->pSuper->tagSchema, 0)));
if (tdInitTSchemaBuilder(&schemaBuilder, pMsg->tversion) < 0) {
tsdbDebug("vgId:%d failed to update tag schema of table %s tid %d uid %" PRIu64 " since out of memory",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable));
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
for (int i = 0; i < (pMsg->schemaLen / sizeof(STColumn)); i++) {
if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, pTCol[i].colId, pTCol[i].bytes) < 0) {
tdDestroyTSchemaBuilder(&schemaBuilder);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
}
pNewSchema = tdGetSchemaFromBuilder(&schemaBuilder);
if (pNewSchema == NULL) {
tdDestroyTSchemaBuilder(&schemaBuilder);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
tdDestroyTSchemaBuilder(&schemaBuilder);
}
// Chage in memory
if (pNewSchema != NULL) { // change super table tag schema
taosWLockLatch(&(pTable->pSuper->latch));
STSchema *pOldSchema = pTable->pSuper->tagSchema;
pTable->pSuper->tagSchema = pNewSchema;
tdFreeSchema(pOldSchema);
taosWUnLockLatch(&(pTable->pSuper->latch));
}
bool isChangeIndexCol = (pMsg->colId == colColId(schemaColAt(pTable->pSuper->tagSchema, 0)));
// STColumn *pCol = bsearch(&(pMsg->colId), pMsg->data, pMsg->numOfTags, sizeof(STColumn), colIdCompar);
// ASSERT(pCol != NULL);
if (isChangeIndexCol) {
tsdbWLockRepoMeta(pRepo);
tsdbRemoveTableFromIndex(pMeta, pTable);
}
// TODO: remove table from index if it is the first column of tag
// TODO: convert the tag schema from client, and then extract the type and bytes from schema according to colId
STColumn* res = bsearch(&pMsg->colId, pMsg->data, pMsg->numOfTags, sizeof(STColumn), colIdCompar);
assert(res != NULL);
tdSetKVRowDataOfCol(&pTable->tagVal, pMsg->colId, res->type, pMsg->data + pMsg->schemaLen);
if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == pMsg->colId) {
tsdbAddTableIntoIndex(pMeta, pTable);
taosWLockLatch(&(pTable->latch));
tdSetKVRowDataOfCol(&(pTable->tagVal), pMsg->colId, pMsg->type, POINTER_SHIFT(pMsg->data, pMsg->schemaLen));
taosWUnLockLatch(&(pTable->latch));
if (isChangeIndexCol) {
tsdbAddTableIntoIndex(pMeta, pTable, false);
tsdbUnlockRepoMeta(pRepo);
}
return TSDB_CODE_SUCCESS;
// Update on file
int tlen1 = (pNewSchema) ? tsdbGetTableEncodeSize(TSDB_UPDATE_META, pTable->pSuper) : 0;
int tlen2 = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pTable);
void *buf = tsdbAllocBytes(pRepo, tlen1+tlen2);
ASSERT(buf != NULL);
if (pNewSchema) {
void *pBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pTable->pSuper);
ASSERT(POINTER_DISTANCE(pBuf, buf) == tlen1);
buf = pBuf;
}
tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pTable);
return 0;
}
// ------------------ INTERNAL FUNCTIONS ------------------
@ -449,18 +480,6 @@ int tsdbCloseMeta(STsdbRepo *pRepo) {
return 0;
}
STSchema *tsdbGetTableSchema(STable *pTable) {
if (pTable->type == TSDB_NORMAL_TABLE || pTable->type == TSDB_SUPER_TABLE || pTable->type == TSDB_STREAM_TABLE) {
return pTable->schema[pTable->numOfSchemas - 1];
} else if (pTable->type == TSDB_CHILD_TABLE) {
STable *pSuper = pTable->pSuper;
if (pSuper == NULL) return NULL;
return pSuper->schema[pSuper->numOfSchemas - 1];
} else {
return NULL;
}
}
STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) {
void *ptr = taosHashGet(pMeta->uidMap, (char *)(&uid), sizeof(uid));
@ -470,68 +489,7 @@ STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) {
}
STSchema *tsdbGetTableSchemaByVersion(STable *pTable, int16_t version) {
STable *pSearchTable = (pTable->type == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable;
if (pSearchTable == NULL) return NULL;
void *ptr = taosbsearch(&version, pSearchTable->schema, pSearchTable->numOfSchemas, sizeof(STSchema *),
tsdbCompareSchemaVersion, TD_EQ);
if (ptr == NULL) return NULL;
return *(STSchema **)ptr;
}
STSchema *tsdbGetTableTagSchema(STable *pTable) {
if (pTable->type == TSDB_SUPER_TABLE) {
return pTable->tagSchema;
} else if (pTable->type == TSDB_CHILD_TABLE) {
STable *pSuper = pTable->pSuper;
if (pSuper == NULL) return NULL;
return pSuper->tagSchema;
} else {
return NULL;
}
}
int tsdbUpdateTable(STsdbRepo *pRepo, STable *pTable, STableCfg *pCfg) {
// TODO: this function can only be called when there is no query and commit on this table
ASSERT(TABLE_TYPE(pTable) != TSDB_CHILD_TABLE);
bool changed = false;
STsdbMeta *pMeta = pRepo->tsdbMeta;
if ((pTable->type == TSDB_SUPER_TABLE) && (schemaVersion(pTable->tagSchema) < schemaVersion(pCfg->tagSchema))) {
if (tsdbUpdateTableTagSchema(pTable, pCfg->tagSchema) < 0) {
tsdbError("vgId:%d failed to update table %s tag schema since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
return -1;
}
changed = true;
}
STSchema *pTSchema = tsdbGetTableSchema(pTable);
if (schemaVersion(pTSchema) < schemaVersion(pCfg->schema)) {
if (pTable->numOfSchemas < TSDB_MAX_TABLE_SCHEMAS) {
pTable->schema[pTable->numOfSchemas++] = tdDupSchema(pCfg->schema);
} else {
ASSERT(pTable->numOfSchemas == TSDB_MAX_TABLE_SCHEMAS);
STSchema *tSchema = tdDupSchema(pCfg->schema);
tdFreeSchema(pTable->schema[0]);
memmove(pTable->schema, pTable->schema + 1, sizeof(STSchema *) * (TSDB_MAX_TABLE_SCHEMAS - 1));
pTable->schema[pTable->numOfSchemas - 1] = tSchema;
}
pMeta->maxRowBytes = MAX(pMeta->maxRowBytes, dataRowMaxBytesFromSchema(pCfg->schema));
pMeta->maxCols = MAX(pMeta->maxCols, schemaNCols(pCfg->schema));
changed = true;
}
if (changed) {
int tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pTable);
void *buf = tsdbAllocBytes(pRepo, tlen);
tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pTable);
}
return 0;
return tsdbGetTableSchemaImpl(pTable, true, false, version);
}
int tsdbWLockRepoMeta(STsdbRepo *pRepo) {
@ -575,7 +533,7 @@ void tsdbRefTable(STable *pTable) {
void tsdbUnRefTable(STable *pTable) {
int32_t ref = T_REF_DEC(pTable);
tsdbTrace("unref table uid:%"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref);
tsdbDebug("unref table uid:%"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref);
if (ref == 0) {
// tsdbDebug("destory table name:%s uid:%"PRIu64", tid:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable));
@ -587,17 +545,36 @@ void tsdbUnRefTable(STable *pTable) {
}
}
// ------------------ LOCAL FUNCTIONS ------------------
static int tsdbCompareSchemaVersion(const void *key1, const void *key2) {
if (*(int16_t *)key1 < schemaVersion(*(STSchema **)key2)) {
return -1;
} else if (*(int16_t *)key1 > schemaVersion(*(STSchema **)key2)) {
return 1;
void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema, bool insertAct) {
ASSERT(TABLE_TYPE(pTable) != TSDB_STREAM_TABLE && TABLE_TYPE(pTable) != TSDB_SUPER_TABLE);
STsdbMeta *pMeta = pRepo->tsdbMeta;
STable *pCTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable;
ASSERT(schemaVersion(pSchema) > schemaVersion(pCTable->schema[pCTable->numOfSchemas - 1]));
taosWLockLatch(&(pCTable->latch));
if (pCTable->numOfSchemas < TSDB_MAX_TABLE_SCHEMAS) {
pCTable->schema[pCTable->numOfSchemas++] = pSchema;
} else {
return 0;
ASSERT(pCTable->numOfSchemas == TSDB_MAX_TABLE_SCHEMAS);
tdFreeSchema(pCTable->schema[0]);
memmove(pCTable->schema, pCTable->schema + 1, sizeof(STSchema *) * (TSDB_MAX_TABLE_SCHEMAS - 1));
pCTable->schema[pCTable->numOfSchemas - 1] = pSchema;
}
if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema);
if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema);
taosWUnLockLatch(&(pCTable->latch));
if (insertAct) {
int tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pCTable);
void *buf = tsdbAllocBytes(pRepo, tlen);
ASSERT(buf != NULL);
tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pTable);
}
}
// ------------------ LOCAL FUNCTIONS ------------------
static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) {
STsdbRepo *pRepo = (STsdbRepo *)pHandle;
STable * pTable = NULL;
@ -609,7 +586,7 @@ static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) {
tsdbDecodeTable(cont, &pTable);
if (tsdbAddTableToMeta(pRepo, pTable, false) < 0) {
if (tsdbAddTableToMeta(pRepo, pTable, false, false) < 0) {
tsdbFreeTable(pTable);
return -1;
}
@ -627,7 +604,7 @@ static void tsdbOrgMeta(void *pHandle) {
for (int i = 1; i < pCfg->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable != NULL && pTable->type == TSDB_CHILD_TABLE) {
tsdbAddTableIntoIndex(pMeta, pTable);
tsdbAddTableIntoIndex(pMeta, pTable, true);
}
}
}
@ -737,7 +714,7 @@ _err:
static void tsdbFreeTable(STable *pTable) {
if (pTable) {
tsdbDebug("table %s is destroyed", TABLE_CHAR_NAME(pTable));
if (pTable->name != NULL) tsdbDebug("table %s is destroyed", TABLE_CHAR_NAME(pTable));
tfree(TABLE_NAME(pTable));
if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) {
for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) {
@ -757,25 +734,10 @@ static void tsdbFreeTable(STable *pTable) {
}
}
static int tsdbUpdateTableTagSchema(STable *pTable, STSchema *newSchema) {
ASSERT(pTable->type == TSDB_SUPER_TABLE);
ASSERT(schemaVersion(pTable->tagSchema) < schemaVersion(newSchema));
STSchema *pOldSchema = pTable->tagSchema;
STSchema *pNewSchema = tdDupSchema(newSchema);
if (pNewSchema == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
pTable->tagSchema = pNewSchema;
tdFreeSchema(pOldSchema);
return 0;
}
static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) {
static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, bool lock) {
STsdbMeta *pMeta = pRepo->tsdbMeta;
if (addIdx && tsdbWLockRepoMeta(pRepo) < 0) {
if (lock && tsdbWLockRepoMeta(pRepo) < 0) {
tsdbError("vgId:%d failed to add table %s to meta since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
return -1;
@ -790,7 +752,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) {
}
} else {
if (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE && addIdx) { // add STABLE to the index
if (tsdbAddTableIntoIndex(pMeta, pTable) < 0) {
if (tsdbAddTableIntoIndex(pMeta, pTable, true) < 0) {
tsdbDebug("vgId:%d failed to add table %s to meta while add table to index since %s", REPO_ID(pRepo),
TABLE_CHAR_NAME(pTable), tstrerror(terrno));
goto _err;
@ -809,14 +771,15 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) {
}
if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) {
STSchema *pSchema = tsdbGetTableSchema(pTable);
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema);
if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema);
}
if (addIdx && tsdbUnlockRepoMeta(pRepo) < 0) return -1;
if (lock && tsdbUnlockRepoMeta(pRepo) < 0) return -1;
if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql, tsdbGetTableSchema(pTable));
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql,
tsdbGetTableSchemaImpl(pTable, false, false, -1));
}
tsdbTrace("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
@ -825,7 +788,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) {
_err:
tsdbRemoveTableFromMeta(pRepo, pTable, false, false);
if (addIdx) tsdbUnlockRepoMeta(pRepo);
if (lock) tsdbUnlockRepoMeta(pRepo);
return -1;
}
@ -836,7 +799,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
STable * tTable = NULL;
STsdbCfg * pCfg = &(pRepo->config);
STSchema *pSchema = tsdbGetTableSchema(pTable);
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
int maxCols = schemaNCols(pSchema);
int maxRowBytes = schemaTLen(pSchema);
@ -870,7 +833,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
for (int i = 0; i < pCfg->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable != NULL) {
pSchema = tsdbGetTableSchema(pTable);
pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
maxCols = MAX(maxCols, schemaNCols(pSchema));
maxRowBytes = MAX(maxRowBytes, schemaTLen(pSchema));
}
@ -882,7 +845,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
tsdbUnRefTable(pTable);
}
static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable) {
static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable, bool refSuper) {
ASSERT(pTable->type == TSDB_CHILD_TABLE && pTable != NULL);
STable *pSTable = tsdbGetTableByUid(pMeta, TABLE_SUID(pTable));
ASSERT(pSTable != NULL);
@ -906,7 +869,7 @@ static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable) {
memcpy(SL_GET_NODE_DATA(pNode), &pTable, sizeof(STable *));
tSkipListPut(pSTable->pIndex, pNode);
T_REF_INC(pSTable);
if (refSuper) T_REF_INC(pSTable);
return 0;
}
@ -1274,4 +1237,4 @@ static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable) {
}
return 0;
}
}

View File

@ -218,7 +218,7 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) {
pHelper->tableInfo.tid = pTable->tableId.tid;
pHelper->tableInfo.uid = pTable->tableId.uid;
STSchema *pSchema = tsdbGetTableSchema(pTable);
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
pHelper->tableInfo.sversion = schemaVersion(pSchema);
tdInitDataCols(pHelper->pDataCols[0], pSchema);
@ -318,7 +318,7 @@ int tsdbMoveLastBlockIfNeccessary(SRWHelper *pHelper) {
ASSERT(pCompBlock->last);
if (pCompBlock->numOfSubBlocks > 1) {
if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, pIdx->numOfBlocks - 1)) < 0) return -1;
if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, pIdx->numOfBlocks - 1), NULL) < 0) return -1;
ASSERT(pHelper->pDataCols[0]->numOfRows > 0 && pHelper->pDataCols[0]->numOfRows < pCfg->minRowsPerFileBlock);
if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.nLastF), pHelper->pDataCols[0],
pHelper->pDataCols[0]->numOfRows, &compBlock, true, true) < 0)
@ -577,11 +577,12 @@ void tsdbGetDataStatis(SRWHelper *pHelper, SDataStatis *pStatis, int numOfCols)
}
}
int tsdbLoadBlockDataCols(SRWHelper *pHelper, SCompBlock *pCompBlock, int16_t *colIds, int numOfColIds) {
int tsdbLoadBlockDataCols(SRWHelper *pHelper, SCompBlock *pCompBlock, SCompInfo *pCompInfo, int16_t *colIds, int numOfColIds) {
ASSERT(pCompBlock->numOfSubBlocks >= 1); // Must be super block
int numOfSubBlocks = pCompBlock->numOfSubBlocks;
if (numOfSubBlocks > 1) pCompBlock = (SCompBlock *)POINTER_SHIFT(pHelper->pCompInfo, pCompBlock->offset);
if (numOfSubBlocks > 1)
pCompBlock = (SCompBlock *)POINTER_SHIFT((pCompInfo == NULL) ? pHelper->pCompInfo : pCompInfo, pCompBlock->offset);
tdResetDataCols(pHelper->pDataCols[0]);
if (tsdbLoadBlockDataColsImpl(pHelper, pCompBlock, pHelper->pDataCols[0], colIds, numOfColIds) < 0) goto _err;
@ -598,10 +599,10 @@ _err:
return -1;
}
int tsdbLoadBlockData(SRWHelper *pHelper, SCompBlock *pCompBlock) {
int tsdbLoadBlockData(SRWHelper *pHelper, SCompBlock *pCompBlock, SCompInfo *pCompInfo) {
int numOfSubBlock = pCompBlock->numOfSubBlocks;
if (numOfSubBlock > 1) pCompBlock = (SCompBlock *)POINTER_SHIFT(pHelper->pCompInfo, pCompBlock->offset);
if (numOfSubBlock > 1)
pCompBlock = (SCompBlock *)POINTER_SHIFT((pCompInfo == NULL) ? pHelper->pCompInfo : pCompInfo, pCompBlock->offset);
tdResetDataCols(pHelper->pDataCols[0]);
if (tsdbLoadBlockDataImpl(pHelper, pCompBlock, pHelper->pDataCols[0]) < 0) goto _err;
@ -703,6 +704,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
}
// Add checksum
ASSERT(pCompCol->len > 0);
pCompCol->len += sizeof(TSCKSUM);
taosCalcChecksumAppend(0, (uint8_t *)tptr, pCompCol->len);
@ -792,7 +794,7 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
if (tsdbAddSubBlock(pHelper, &compBlock, blkIdx, rowsWritten) < 0) goto _err;
} else {
// Load
if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx)) < 0) goto _err;
if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx), NULL) < 0) goto _err;
ASSERT(pHelper->pDataCols[0]->numOfRows <= blockAtIdx(pHelper, blkIdx)->numOfRows);
// Merge
if (tdMergeDataCols(pHelper->pDataCols[0], pDataCols, rowsWritten) < 0) goto _err;
@ -848,7 +850,7 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
if (tsdbAddSubBlock(pHelper, &compBlock, blkIdx, rowsWritten) < 0) goto _err;
} else { // Load-Merge-Write
// Load
if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx)) < 0) goto _err;
if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx), NULL) < 0) goto _err;
if (blockAtIdx(pHelper, blkIdx)->last) pHelper->hasOldLastBlock = false;
rowsWritten = rows3;

View File

@ -22,7 +22,7 @@
#include "exception.h"
#include "../../../query/inc/qast.h" // todo move to common module
#include "../../../query/inc/tlosertree.h" // todo move to util module
#include "tlosertree.h"
#include "tsdb.h"
#include "tsdbMain.h"
@ -122,7 +122,7 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle);
static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock,
SArray* sa);
static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, STimeWindow* win,
STsdbQueryHandle* pQueryHandle);
static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) {
@ -249,8 +249,6 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
pCheckInfo->initBuf = true;
int32_t order = pHandle->order;
// tsdbTakeMemSnapshot(pHandle->pTsdb, &pCheckInfo->mem, &pCheckInfo->imem);
// no data in buffer, abort
if (pHandle->mem == NULL && pHandle->imem == NULL) {
return false;
@ -392,8 +390,11 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
STable* pTable = pCheckInfo->pTableObj;
assert(pTable != NULL);
initTableMemIterator(pHandle, pCheckInfo);
if (!pCheckInfo->initBuf) {
initTableMemIterator(pHandle, pCheckInfo);
}
SDataRow row = getSDataRowInTableMem(pCheckInfo);
if (row == NULL) {
return false;
@ -411,8 +412,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
int32_t step = ASCENDING_TRAVERSE(pHandle->order)? 1:-1;
STimeWindow* win = &pHandle->cur.win;
pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo, pHandle->window.ekey,
pHandle->outputCapacity, &win->skey, &win->ekey, pHandle); // todo refactor API
pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo, pHandle->window.ekey, pHandle->outputCapacity, win, pHandle);
// update the last key value
pCheckInfo->lastKey = win->ekey + step;
@ -576,6 +576,8 @@ static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS
static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) {
STsdbRepo *pRepo = pQueryHandle->pTsdb;
// TODO refactor
SCompData* data = calloc(1, sizeof(SCompData) + sizeof(SCompCol) * pBlock->numOfCols);
data->numOfCols = pBlock->numOfCols;
@ -592,9 +594,12 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock);
}
tdInitDataCols(pCheckInfo->pDataCols, tsdbGetTableSchema(pCheckInfo->pTableObj));
STSchema* pSchema = tsdbGetTableSchema(pCheckInfo->pTableObj);
tdInitDataCols(pCheckInfo->pDataCols, pSchema);
tdInitDataCols(pQueryHandle->rhelper.pDataCols[0], pSchema);
tdInitDataCols(pQueryHandle->rhelper.pDataCols[1], pSchema);
if (tsdbLoadBlockData(&(pQueryHandle->rhelper), pBlock) == 0) {
if (tsdbLoadBlockData(&(pQueryHandle->rhelper), pBlock, pCheckInfo->pCompInfo) == 0) {
SDataBlockLoadInfo* pBlockLoadInfo = &pQueryHandle->dataBlockLoadInfo;
pBlockLoadInfo->fileGroup = pQueryHandle->pFileGroup;
@ -605,8 +610,9 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
}
SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0];
assert(pCols->numOfRows != 0);
assert(pCols->numOfRows != 0 && pCols->numOfRows <= pBlock->numOfRows);
pBlock->numOfRows = pCols->numOfRows;
taosArrayDestroy(sa);
tfree(data);
@ -636,7 +642,7 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock*
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order) ? 1 : -1;
cur->rows = tsdbReadRowsFromCache(pCheckInfo, binfo.window.skey - step,
pQueryHandle->outputCapacity, &cur->win.skey, &cur->win.ekey, pQueryHandle);
pQueryHandle->outputCapacity, &cur->win, pQueryHandle);
pQueryHandle->realNumOfRows = cur->rows;
// update the last key value
@ -1237,7 +1243,6 @@ static int32_t dataBlockOrderCompar(const void* pLeft, const void* pRight, void*
// assert(pLeftBlockInfoEx->compBlock->offset != pRightBlockInfoEx->compBlock->offset);
if (pLeftBlockInfoEx->compBlock->offset == pRightBlockInfoEx->compBlock->offset &&
pLeftBlockInfoEx->compBlock->last == pRightBlockInfoEx->compBlock->last) {
// todo add more information
tsdbError("error in header file, two block with same offset:%" PRId64, (int64_t)pLeftBlockInfoEx->compBlock->offset);
}
@ -1477,7 +1482,8 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
assert(numOfTables > 0);
SDataBlockInfo blockInfo = {{0}, 0};
if (pQueryHandle->type == TSDB_QUERY_TYPE_EXTERNAL) {
pQueryHandle->type = TSDB_QUERY_TYPE_ALL;
pQueryHandle->order = TSDB_ORDER_DESC;
@ -1487,7 +1493,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
}
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
/*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo(pHandle);
/*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo(pHandle, &blockInfo);
/*SArray *pDataBlock = */tsdbRetrieveDataBlock(pHandle, sa);
if (pQueryHandle->cur.win.ekey == pQueryHandle->window.skey) {
@ -1558,7 +1564,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
bool ret = tsdbNextDataBlock((void*) pSecQueryHandle);
assert(ret);
/*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle);
/*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo);
/*SArray *pDataBlock = */tsdbRetrieveDataBlock((void*) pSecQueryHandle, sa);
for (int32_t i = 0; i < numOfCols; ++i) {
@ -1693,11 +1699,11 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle) {
pQueryHandle->window = (STimeWindow) {info.lastKey, TSKEY_INITIAL_VAL};
}
static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, STimeWindow* win,
STsdbQueryHandle* pQueryHandle) {
int numOfRows = 0;
int32_t numOfCols = taosArrayGetSize(pQueryHandle->pColumns);
*skey = TSKEY_INITIAL_VAL;
win->skey = TSKEY_INITIAL_VAL;
int64_t st = taosGetTimestampUs();
STsdbMeta* pMeta = tsdbGetMeta(pQueryHandle->pTsdb);
@ -1717,11 +1723,11 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
break;
}
if (*skey == INT64_MIN) {
*skey = key;
if (win->skey == INT64_MIN) {
win->skey = key;
}
*ekey = key;
win->ekey = key;
copyOneRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, pMeta, numOfCols, pTable);
if (++numOfRows >= maxRowsToRead) {
@ -1750,7 +1756,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
return numOfRows;
}
SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle) {
void tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle, SDataBlockInfo* pDataBlockInfo) {
STsdbQueryHandle* pHandle = (STsdbQueryHandle*)pQueryHandle;
SQueryFilePos* cur = &pHandle->cur;
STable* pTable = NULL;
@ -1763,16 +1769,12 @@ SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle) {
STableCheckInfo* pCheckInfo = taosArrayGet(pHandle->pTableCheckInfo, pHandle->activeIndex);
pTable = pCheckInfo->pTableObj;
}
SDataBlockInfo blockInfo = {
.uid = pTable->tableId.uid,
.tid = pTable->tableId.tid,
.rows = cur->rows,
.window = cur->win,
.numOfCols = QH_GET_NUM_OF_COLS(pHandle),
};
return blockInfo;
pDataBlockInfo->uid = pTable->tableId.uid;
pDataBlockInfo->tid = pTable->tableId.tid;
pDataBlockInfo->rows = cur->rows;
pDataBlockInfo->window = cur->win;
pDataBlockInfo->numOfCols = QH_GET_NUM_OF_COLS(pHandle);
}
/*
@ -1974,9 +1976,9 @@ int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) {
int32_t type = 0;
int32_t bytes = 0;
if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { // todo refactor extract method , to queryExecutor to generate tags values
f1 = (char*) pTable1->name;
f2 = (char*) pTable2->name;
if (colIndex == TSDB_TBNAME_COLUMN_INDEX) {
f1 = (char*) TABLE_NAME(pTable1);
f2 = (char*) TABLE_NAME(pTable2);
type = TSDB_DATA_TYPE_BINARY;
bytes = tGetTableNameColumnSchema().bytes;
} else {
@ -2085,13 +2087,17 @@ bool indexedNodeFilterFp(const void* pNode, void* param) {
char* val = NULL;
if (pInfo->colIndex == TSDB_TBNAME_COLUMN_INDEX) {
val = (char*) pTable->name;
val = (char*) TABLE_NAME(pTable);
} else {
val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId);
}
//todo :the val is possible to be null, so check it out carefully
int32_t ret = pInfo->compare(val, pInfo->q);
int32_t ret = 0;
if (val == NULL) { //the val is possible to be null, so check it out carefully
ret = -1; // val is missing in table tags value pairs
} else {
ret = pInfo->compare(val, pInfo->q);
}
switch (pInfo->optr) {
case TSDB_RELATION_EQUAL: {

View File

@ -26,6 +26,7 @@ extern "C" {
#define DEBUG_INFO DEBUG_WARN
#define DEBUG_DEBUG 4U
#define DEBUG_TRACE 8U
#define DEBUG_DUMP 16U
#define DEBUG_SCREEN 64U
#define DEBUG_FILE 128U

View File

@ -13,10 +13,10 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tlosertree.h"
#include "os.h"
#include "taosmsg.h"
#include "tlosertree.h"
#include "queryLog.h"
#include "tulog.h"
// set initial value for loser tree
void tLoserTreeInit(SLoserTreeInfo* pTree) {
@ -45,7 +45,7 @@ uint32_t tLoserTreeCreate(SLoserTreeInfo** pTree, int32_t numOfEntries, void* pa
*pTree = (SLoserTreeInfo*)calloc(1, sizeof(SLoserTreeInfo) + sizeof(SLoserTreeNode) * totalEntries);
if ((*pTree) == NULL) {
qError("allocate memory for loser-tree failed. reason:%s", strerror(errno));
uError("allocate memory for loser-tree failed. reason:%s", strerror(errno));
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}

View File

@ -34,8 +34,7 @@
#define TSDB_VNODE_VERSION_CONTENT_LEN 31
static int32_t tsOpennedVnodes;
static void *tsDnodeVnodesHash;
static SHashObj*tsDnodeVnodesHash;
static void vnodeCleanUp(SVnodeObj *pVnode);
static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg);
static int32_t vnodeReadCfg(SVnodeObj *pVnode);
@ -47,8 +46,6 @@ static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index);
static void vnodeNotifyRole(void *ahandle, int8_t role);
static void vnodeNotifyFileSynced(void *ahandle, uint64_t fversion);
static pthread_once_t vnodeModuleInit = PTHREAD_ONCE_INIT;
#ifndef _SYNC
tsync_h syncStart(const SSyncInfo *info) { return NULL; }
int32_t syncForwardToPeer(tsync_h shandle, void *pHead, void *mhandle, int qtype) { return 0; }
@ -58,25 +55,28 @@ int syncGetNodesRole(tsync_h shandle, SNodesRole * cfg) { return 0; }
void syncConfirmForward(tsync_h shandle, uint64_t version, int32_t code) {}
#endif
static void vnodeInit() {
int32_t vnodeInitResources() {
vnodeInitWriteFp();
vnodeInitReadFp();
tsDnodeVnodesHash = taosHashInit(TSDB_MAX_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true);
if (tsDnodeVnodesHash == NULL) {
vError("failed to init vnode list");
return TSDB_CODE_VND_OUT_OF_MEMORY;
}
return TSDB_CODE_SUCCESS;
}
void vnodeCleanupResources() {
taosHashCleanup(tsDnodeVnodesHash);
vnodeModuleInit = PTHREAD_ONCE_INIT;
tsDnodeVnodesHash = NULL;
if (tsDnodeVnodesHash != NULL) {
taosHashCleanup(tsDnodeVnodesHash);
tsDnodeVnodesHash = NULL;
}
}
int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
int32_t code;
pthread_once(&vnodeModuleInit, vnodeInit);
SVnodeObj *pTemp = (SVnodeObj *)taosHashGet(tsDnodeVnodesHash, (const char *)&pVnodeCfg->cfg.vgId, sizeof(int32_t));
if (pTemp != NULL) {
@ -144,11 +144,6 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
}
int32_t vnodeDrop(int32_t vgId) {
if (tsDnodeVnodesHash == NULL) {
vDebug("vgId:%d, failed to drop, vgId not exist", vgId);
return TSDB_CODE_VND_INVALID_VGROUP_ID;
}
SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t));
if (ppVnode == NULL || *ppVnode == NULL) {
vDebug("vgId:%d, failed to drop, vgId not find", vgId);
@ -165,6 +160,13 @@ int32_t vnodeDrop(int32_t vgId) {
int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) {
SVnodeObj *pVnode = param;
if (pVnode->status != TAOS_VN_STATUS_READY)
return TSDB_CODE_VND_INVALID_STATUS;
if (pVnode->syncCfg.replica > 1 && pVnode->role == TAOS_SYNC_ROLE_UNSYNCED)
return TSDB_CODE_VND_NOT_SYNCED;
pVnode->status = TAOS_VN_STATUS_UPDATING;
int32_t code = vnodeSaveCfg(pVnodeCfg);
@ -187,7 +189,6 @@ int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) {
int32_t vnodeOpen(int32_t vnode, char *rootDir) {
char temp[TSDB_FILENAME_LEN];
pthread_once(&vnodeModuleInit, vnodeInit);
SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1);
if (pVnode == NULL) {
@ -195,7 +196,6 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
return TAOS_SYSTEM_ERROR(errno);
}
atomic_add_fetch_32(&tsOpennedVnodes, 1);
atomic_add_fetch_32(&pVnode->refCount, 1);
pVnode->vgId = vnode;
@ -366,13 +366,11 @@ void vnodeRelease(void *pVnodeRaw) {
free(pVnode);
int32_t count = atomic_sub_fetch_32(&tsOpennedVnodes, 1);
int32_t count = taosHashGetSize(tsDnodeVnodesHash);
vDebug("vgId:%d, vnode is released, vnodes:%d", vgId, count);
}
void *vnodeGetVnode(int32_t vgId) {
if (tsDnodeVnodesHash == NULL) return NULL;
SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t));
if (ppVnode == NULL || *ppVnode == NULL) {
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
@ -417,10 +415,19 @@ void *vnodeGetWal(void *pVnode) {
}
static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) {
if (pVnode->status == TAOS_VN_STATUS_DELETING) return;
int64_t totalStorage = 0;
int64_t compStorage = 0;
int64_t pointsWritten = 0;
if (pVnode->status != TAOS_VN_STATUS_READY) return;
if (pStatus->openVnodes >= TSDB_MAX_VNODES) return;
int64_t totalStorage, compStorage, pointsWritten = 0;
tsdbReportStat(pVnode->tsdb, &pointsWritten, &totalStorage, &compStorage);
// still need report status when unsynced
if (pVnode->syncCfg.replica > 1 && pVnode->role == TAOS_SYNC_ROLE_UNSYNCED) {
} else if (pVnode->tsdb == NULL) {
} else {
tsdbReportStat(pVnode->tsdb, &pointsWritten, &totalStorage, &compStorage);
}
SVnodeLoad *pLoad = &pStatus->load[pStatus->openVnodes++];
pLoad->vgId = htonl(pVnode->vgId);
@ -434,8 +441,6 @@ static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) {
}
int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) {
if (tsDnodeVnodesHash == NULL) return TSDB_CODE_SUCCESS;
SHashMutableIterator *pIter = taosHashCreateIter(tsDnodeVnodesHash);
while (taosHashIterNext(pIter)) {
SVnodeObj **pVnode = taosHashIterGet(pIter);

View File

@ -46,9 +46,9 @@ int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) {
return TSDB_CODE_VND_MSG_NOT_PROCESSED;
}
if (pVnode->status == TAOS_VN_STATUS_DELETING || pVnode->status == TAOS_VN_STATUS_CLOSING) {
if (pVnode->status != TAOS_VN_STATUS_READY) {
vDebug("vgId:%d, msgType:%s not processed, vnode status is %d", pVnode->vgId, taosMsg[msgType], pVnode->status);
return TSDB_CODE_VND_INVALID_VGROUP_ID;
return TSDB_CODE_VND_INVALID_STATUS;
}
// TODO: Later, let slave to support query
@ -94,10 +94,10 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
}
int32_t code = TSDB_CODE_SUCCESS;
qinfo_t pQInfo = NULL;
void** handle = NULL;
if (contLen != 0) {
qinfo_t pQInfo = NULL;
code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, NULL, &pQInfo);
SQueryTableRsp *pRsp = (SQueryTableRsp *) rpcMallocCont(sizeof(SQueryTableRsp));
@ -110,7 +110,6 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
// current connect is broken
if (code == TSDB_CODE_SUCCESS) {
// add lock here
handle = qRegisterQInfo(pVnode->qMgmt, pQInfo);
if (handle == NULL) { // failed to register qhandle
pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE;
@ -122,36 +121,38 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
pRsp->qhandle = htobe64((uint64_t) (handle));
}
pQInfo = NULL;
if (handle != NULL && vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, handle, pVnode->vgId) != TSDB_CODE_SUCCESS) {
vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, pQInfo, pReadMsg->rpcMsg.handle);
vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, *handle, pReadMsg->rpcMsg.handle);
pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
// NOTE: there two refcount, needs to kill twice
// query has not been put into qhandle pool, kill it directly.
qKillQuery(pQInfo);
qKillQuery(*handle);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
return pRsp->code;
}
} else {
assert(pQInfo == NULL);
}
if (handle != NULL) {
dnodePutItemIntoReadQueue(pVnode, handle);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false);
}
vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo);
} else {
assert(pCont != NULL);
pQInfo = *(void**)(pCont);
handle = pCont;
code = TSDB_CODE_VND_ACTION_IN_PROGRESS;
vDebug("vgId:%d, QInfo:%p, dnode query msg in progress", pVnode->vgId, pQInfo);
}
if (pQInfo != NULL) {
qTableQuery(pQInfo); // do execute query
assert(handle != NULL);
handle = qAcquireQInfo(pVnode->qMgmt, (void**) pCont);
if (handle == NULL) {
vWarn("QInfo:%p invalid qhandle in continuing exec query, conn:%p", *(void**) pCont, pReadMsg->rpcMsg.handle);
code = TSDB_CODE_QRY_INVALID_QHANDLE;
} else {
vDebug("vgId:%d, QInfo:%p, dnode query msg in progress", pVnode->vgId, *(void**) pCont);
code = TSDB_CODE_VND_ACTION_IN_PROGRESS;
qTableQuery(*handle); // do execute query
}
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false);
}
return code;
}
@ -160,57 +161,64 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
SRspRet *pRet = &pReadMsg->rspRet;
SRetrieveTableMsg *pRetrieve = pCont;
void **pQInfo = (void*) htobe64(pRetrieve->qhandle);
pRetrieve->qhandle = htobe64(pRetrieve->qhandle);
pRetrieve->free = htons(pRetrieve->free);
vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed", pVnode->vgId, *pQInfo);
vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed", pVnode->vgId, *(void**) pRetrieve->qhandle);
memset(pRet, 0, sizeof(SRspRet));
int32_t ret = 0;
void** handle = qAcquireQInfo(pVnode->qMgmt, pQInfo);
if (handle == NULL || handle != pQInfo) {
ret = TSDB_CODE_QRY_INVALID_QHANDLE;
int32_t code = TSDB_CODE_SUCCESS;
void** handle = qAcquireQInfo(pVnode->qMgmt, (void**) pRetrieve->qhandle);
if (handle == NULL || handle != (void**) pRetrieve->qhandle) {
code = TSDB_CODE_QRY_INVALID_QHANDLE;
vDebug("vgId:%d, invalid qhandle in fetch result, QInfo:%p", pVnode->vgId, *(void**) pRetrieve->qhandle);
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
pRet->len = sizeof(SRetrieveTableRsp);
memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
SRetrieveTableRsp* pRsp = pRet->rsp;
pRsp->numOfRows = 0;
pRsp->completed = true;
pRsp->useconds = 0;
return code;
}
if (pRetrieve->free == 1) {
if (ret == TSDB_CODE_SUCCESS) {
vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pQInfo);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, *handle);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
pRet->len = sizeof(SRetrieveTableRsp);
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
pRet->len = sizeof(SRetrieveTableRsp);
memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
SRetrieveTableRsp* pRsp = pRet->rsp;
pRsp->numOfRows = 0;
pRsp->completed = true;
pRsp->useconds = 0;
} else { // todo handle error
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
}
return ret;
memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
SRetrieveTableRsp* pRsp = pRet->rsp;
pRsp->numOfRows = 0;
pRsp->completed = true;
pRsp->useconds = 0;
return code;
}
int32_t code = qRetrieveQueryResultInfo(*pQInfo);
if (code != TSDB_CODE_SUCCESS || ret != TSDB_CODE_SUCCESS) {
//TODO
bool freeHandle = true;
code = qRetrieveQueryResultInfo(*handle);
if (code != TSDB_CODE_SUCCESS) {
//TODO handle malloc failure
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
} else {
// todo check code and handle error in build result set
code = qDumpRetrieveResult(*pQInfo, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len);
if (qHasMoreResultsToRetrieve(*handle)) {
dnodePutItemIntoReadQueue(pVnode, handle);
pRet->qhandle = handle;
code = TSDB_CODE_SUCCESS;
} else { // no further execution invoked, release the ref to vnode
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
} else { // if failed to dump result, free qhandle immediately
if ((code = qDumpRetrieveResult(*handle, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len)) == TSDB_CODE_SUCCESS) {
if (qHasMoreResultsToRetrieve(*handle)) {
dnodePutItemIntoReadQueue(pVnode, handle);
pRet->qhandle = handle;
freeHandle = false;
}
}
}
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freeHandle);
return code;
}
@ -225,4 +233,4 @@ int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId) {
vDebug("QInfo:%p register qhandle to connect:%p", qhandle, handle);
return rpcReportProgress(handle, (char*) killQueryMsg, sizeof(SRetrieveTableMsg));
}
}

View File

@ -58,7 +58,7 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) {
if (pHead->version == 0) { // from client or CQ
if (pVnode->status != TAOS_VN_STATUS_READY)
return TSDB_CODE_VND_INVALID_VGROUP_ID; // it may be in deleting or closing state
return TSDB_CODE_VND_INVALID_STATUS; // it may be in deleting or closing state
if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER)
return TSDB_CODE_RPC_NOT_READY;
@ -89,21 +89,25 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) {
return syncCode;
}
void vnodeConfirmForward(void *param, uint64_t version, int32_t code) {
SVnodeObj *pVnode = (SVnodeObj *)param;
syncConfirmForward(pVnode->sync, version, code);
}
static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet) {
int32_t code = TSDB_CODE_SUCCESS;
// save insert result into item
vTrace("vgId:%d, submit msg is processed", pVnode->vgId);
pRet->len = sizeof(SShellSubmitRspMsg);
pRet->rsp = rpcMallocCont(pRet->len);
SShellSubmitRspMsg *pRsp = pRet->rsp;
// save insert result into item
SShellSubmitRspMsg *pRsp = NULL;
if (pRet) {
pRet->len = sizeof(SShellSubmitRspMsg);
pRet->rsp = rpcMallocCont(pRet->len);
pRsp = pRet->rsp;
}
if (tsdbInsertData(pVnode->tsdb, pCont, pRsp) < 0) code = terrno;
pRsp->numOfFailedBlocks = 0; //TODO
//pRet->len += pRsp->numOfFailedBlocks * sizeof(SShellSubmitRspBlock); //TODO
pRsp->code = 0;
pRsp->numOfRows = htonl(1);
return code;
}
@ -158,7 +162,7 @@ static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pCont, SRspRet
}
static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet) {
if (tsdbUpdateTagValue(pVnode->tsdb, (SUpdateTableTagValMsg *)pCont) < 0) {
if (tsdbUpdateTableTagValue(pVnode->tsdb, (SUpdateTableTagValMsg *)pCont) < 0) {
return terrno;
}
return TSDB_CODE_SUCCESS;

View File

@ -78,5 +78,6 @@ class TDTestCase:
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -40,6 +40,9 @@ class TDTestCase:
ret = tdSql.query('select server_status() as result')
tdSql.checkData(0, 0, 1)
ret = tdSql.execute('alter dnode 127.0.0.1 debugFlag 135')
tdLog.info("alter dnode ret: %d" % ret)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -1,7 +1,6 @@
#!/bin/bash
ulimit -c unlimited
python3 ./test.py -f client/client.py
python3 ./test.py -f insert/basic.py
python3 ./test.py -f insert/int.py
python3 ./test.py -f insert/float.py
@ -151,3 +150,6 @@ python3 ./test.py -f stream/stream2.py
#alter table
python3 ./test.py -f alter/alter_table_crash.py
# client
python3 ./test.py -f client/client.py

View File

@ -67,7 +67,7 @@ class DBWriteNonStop:
self.cursor.execute(
"select first(ts), last(ts), min(speed), max(speed), avg(speed), count(*) from st")
data = self.cursor.fetchall()
end = datetime.now()
end = datetime.now()
self.writeDataToCSVFile(data, (end - start).seconds)
time.sleep(.001)
@ -75,8 +75,9 @@ class DBWriteNonStop:
self.cursor.close()
self.conn.close()
test = DBWriteNonStop()
test.connectDB()
test.createTable()
test.insertData()
test.closeConn()
test.closeConn()

View File

@ -89,17 +89,25 @@ class TDTestCase:
tdSql.checkRows(101)
# range for int type on column
tdSql.query("select * from st%s where num > 50 and num < 100" % curType)
tdSql.checkRows(49)
tdSql.query(
"select * from st%s where num > 50 and num < 100" %
curType)
tdSql.checkRows(49)
tdSql.query("select * from st%s where num >= 50 and num < 100" % curType)
tdSql.checkRows(50)
tdSql.query(
"select * from st%s where num >= 50 and num < 100" %
curType)
tdSql.checkRows(50)
tdSql.query("select * from st%s where num > 50 and num <= 100" % curType)
tdSql.checkRows(50)
tdSql.query(
"select * from st%s where num > 50 and num <= 100" %
curType)
tdSql.checkRows(50)
tdSql.query("select * from st%s where num >= 50 and num <= 100" % curType)
tdSql.checkRows(51)
tdSql.query(
"select * from st%s where num >= 50 and num <= 100" %
curType)
tdSql.checkRows(51)
# > for int type on tag
tdSql.query("select * from st%s where id > 5" % curType)
@ -135,16 +143,22 @@ class TDTestCase:
# range for int type on tag
tdSql.query("select * from st%s where id > 5 and id < 7" % curType)
tdSql.checkRows(10)
tdSql.checkRows(10)
tdSql.query("select * from st%s where id >= 5 and id < 7" % curType)
tdSql.checkRows(20)
tdSql.query(
"select * from st%s where id >= 5 and id < 7" %
curType)
tdSql.checkRows(20)
tdSql.query("select * from st%s where id > 5 and id <= 7" % curType)
tdSql.checkRows(20)
tdSql.query(
"select * from st%s where id > 5 and id <= 7" %
curType)
tdSql.checkRows(20)
tdSql.query("select * from st%s where id >= 5 and id <= 7" % curType)
tdSql.checkRows(30)
tdSql.query(
"select * from st%s where id >= 5 and id <= 7" %
curType)
tdSql.checkRows(30)
print(
"======= Verify filter for %s type finished =========" %

View File

@ -52,8 +52,7 @@ class TDTestCase:
# illegal condition
tdSql.error(
"select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2")
tdSql.error("select * from db.st where tagtype <> 1 OR tagtype < 2")
"select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2")
def stop(self):
tdSql.close()

View File

@ -77,7 +77,7 @@ class TDTestCase:
# join queries
tdSql.query(
"select * from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id")
tdSql.checkRows(6)
tdSql.checkRows(6)
tdSql.error(
"select ts, pressure, temperature, id, dscrption from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id")
@ -108,7 +108,7 @@ class TDTestCase:
tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id")
tdSql.checkRows(6)
tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id")
tdSql.checkRows(6)

View File

@ -55,9 +55,9 @@ class MetadataQuery:
def createTablesAndInsertData(self, threadID):
cursor = self.connectDB()
cursor.execute("use test")
cursor.execute("use test")
tablesPerThread = int (self.tables / self.numOfTherads)
tablesPerThread = int(self.tables / self.numOfTherads)
base = threadID * tablesPerThread
for i in range(tablesPerThread):
cursor.execute(
@ -68,24 +68,72 @@ class MetadataQuery:
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')''' %
(base + i + 1,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100))
(base + i + 1, (base + i) %
100, (base + i) %
10000, (base + i) %
1000000, (base + i) %
100000000, (base + i) %
100 * 1.1, (base + i) %
100 * 2.3, (base + i) %
2, (base + i) %
100, (base + i) %
100, (base + i) %
100, (base + i) %
10000, (base + i) %
1000000, (base + i) %
100000000, (base + i) %
100 * 1.1, (base + i) %
100 * 2.3, (base + i) %
2, (base + i) %
100, (base + i) %
100, (base + i) %
100, (base + i) %
10000, (base + i) %
1000000, (base + i) %
100000000, (base + i) %
100 * 1.1, (base + i) %
100 * 2.3, (base + i) %
2, (base + i) %
100, (base + i) %
100, (base + i) %
100, (base + i) %
10000, (base + i) %
1000000, (base + i) %
100000000, (base + i) %
100 * 1.1, (base + i) %
100 * 2.3, (base + i) %
2, (base + i) %
100, (base + i) %
100, (base + i) %
100, (base + i) %
10000, (base + i) %
1000000, (base + i) %
100000000, (base + i) %
100 * 1.1, (base + i) %
100 * 2.3, (base + i) %
2, (base + i) %
100, (base + i) %
100, (base + i) %
100, (base + i) %
10000, (base + i) %
1000000, (base + i) %
100000000, (base + i) %
100 * 1.1, (base + i) %
100 * 2.3, (base + i) %
2, (base + i) %
100, (base + i) %
100))
cursor.execute(
"insert into t%d values(%d, 1) (%d, 2) (%d, 3) (%d, 4) (%d, 5)" %
(base + i + 1, self.ts + 1, self.ts + 2, self.ts + 3, self.ts + 4, self.ts + 5))
cursor.close()
cursor.close()
def queryData(self, query):
cursor = self.connectDB()
cursor.execute("use test")
print("================= query tag data =================")
print("================= query tag data =================")
startTime = datetime.now()
cursor.execute(query)
cursor.fetchall()
@ -107,15 +155,15 @@ if __name__ == '__main__':
print(
"================= Create %d tables and insert %d records into each table =================" %
(t.tables, t.records))
startTime = datetime.now()
startTime = datetime.now()
threads = []
for i in range(t.numOfTherads):
thread = threading.Thread(
target=t.createTablesAndInsertData, args=(i,))
thread.start()
threads.append(thread)
for th in threads:
for th in threads:
th.join()
endTime = datetime.now()

View File

@ -19,6 +19,7 @@ import time
from datetime import datetime
import numpy as np
class MyThread(threading.Thread):
def __init__(self, func, args=()):
@ -35,17 +36,23 @@ class MyThread(threading.Thread):
except Exception:
return None
class MetadataQuery:
def initConnection(self):
self.tables = 100
self.records = 10
self.numOfTherads =5
self.numOfTherads = 5
self.ts = 1537146000000
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
self.config = "/etc/taos"
self.conn = taos.connect( self.host, self.user, self.password, self.config)
self.conn = taos.connect(
self.host,
self.user,
self.password,
self.config)
def connectDB(self):
return self.conn.cursor()
@ -69,7 +76,7 @@ class MetadataQuery:
cursor.execute("use test")
base = threadID * self.tables
tablesPerThread = int (self.tables / self.numOfTherads)
tablesPerThread = int(self.tables / self.numOfTherads)
for i in range(tablesPerThread):
cursor.execute(
'''create table t%d using meters tags(
@ -79,20 +86,69 @@ class MetadataQuery:
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')''' %
(base + i + 1,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100))
(base + i + 1, (base + i) %
100, (base + i) %
10000, (base + i) %
1000000, (base + i) %
100000000, (base + i) %
100 * 1.1, (base + i) %
100 * 2.3, (base + i) %
2, (base + i) %
100, (base + i) %
100, (base + i) %
100, (base + i) %
10000, (base + i) %
1000000, (base + i) %
100000000, (base + i) %
100 * 1.1, (base + i) %
100 * 2.3, (base + i) %
2, (base + i) %
100, (base + i) %
100, (base + i) %
100, (base + i) %
10000, (base + i) %
1000000, (base + i) %
100000000, (base + i) %
100 * 1.1, (base + i) %
100 * 2.3, (base + i) %
2, (base + i) %
100, (base + i) %
100, (base + i) %
100, (base + i) %
10000, (base + i) %
1000000, (base + i) %
100000000, (base + i) %
100 * 1.1, (base + i) %
100 * 2.3, (base + i) %
2, (base + i) %
100, (base + i) %
100, (base + i) %
100, (base + i) %
10000, (base + i) %
1000000, (base + i) %
100000000, (base + i) %
100 * 1.1, (base + i) %
100 * 2.3, (base + i) %
2, (base + i) %
100, (base + i) %
100, (base + i) %
100, (base + i) %
10000, (base + i) %
1000000, (base + i) %
100000000, (base + i) %
100 * 1.1, (base + i) %
100 * 2.3, (base + i) %
2, (base + i) %
100, (base + i) %
100))
for j in range(self.records):
cursor.execute(
"insert into t%d values(%d, %d)" %
(base + i + 1, self.ts + j, j))
cursor.close()
def queryWithTagId(self, threadId, tagId, queryNum):
print("---------thread%d start-----------"%threadId)
def queryWithTagId(self, threadId, tagId, queryNum):
print("---------thread%d start-----------" % threadId)
query = '''select tgcol1, tgcol2, tgcol3, tgcol4, tgcol5, tgcol6, tgcol7, tgcol8, tgcol9,
tgcol10, tgcol11, tgcol12, tgcol13, tgcol14, tgcol15, tgcol16, tgcol17, tgcol18,
tgcol19, tgcol20, tgcol21, tgcol22, tgcol23, tgcol24, tgcol25, tgcol26, tgcol27,
@ -103,18 +159,19 @@ class MetadataQuery:
latancy = []
cursor = self.connectDB()
cursor.execute("use test")
for i in range(queryNum):
for i in range(queryNum):
startTime = time.time()
cursor.execute(query.format(id = tagId, condition = i))
cursor.execute(query.format(id=tagId, condition=i))
cursor.fetchall()
latancy.append((time.time() - startTime))
print("---------thread%d end-----------"%threadId)
latancy.append((time.time() - startTime))
print("---------thread%d end-----------" % threadId)
return latancy
def queryData(self, query):
cursor = self.connectDB()
cursor.execute("use test")
print("================= query tag data =================")
print("================= query tag data =================")
startTime = datetime.now()
cursor.execute(query)
cursor.fetchall()
@ -124,7 +181,7 @@ class MetadataQuery:
(endTime - startTime).seconds)
cursor.close()
#self.conn.close()
# self.conn.close()
if __name__ == '__main__':
@ -132,18 +189,33 @@ if __name__ == '__main__':
t = MetadataQuery()
t.initConnection()
latancys = []
threads = []
latancys = []
threads = []
tagId = 1
queryNum = 1000
queryNum = 1000
for i in range(t.numOfTherads):
thread = MyThread(t.queryWithTagId, args = (i, tagId, queryNum))
threads.append(thread)
thread = MyThread(t.queryWithTagId, args=(i, tagId, queryNum))
threads.append(thread)
thread.start()
for i in range(t.numOfTherads):
for i in range(t.numOfTherads):
threads[i].join()
latancys.extend(threads[i].get_result())
print("Total query: %d"%(queryNum * t.numOfTherads))
print("statistic(s): mean= %f, P50 = %f, P75 = %f, P95 = %f, P99 = %f"
%(sum(latancys)/(queryNum * t.numOfTherads), np.percentile(latancys, 50), np.percentile(latancys, 75), np.percentile(latancys, 95), np.percentile(latancys, 99)))
latancys.extend(threads[i].get_result())
print("Total query: %d" % (queryNum * t.numOfTherads))
print(
"statistic(s): mean= %f, P50 = %f, P75 = %f, P95 = %f, P99 = %f" %
(sum(latancys) /
(
queryNum *
t.numOfTherads),
np.percentile(
latancys,
50),
np.percentile(
latancys,
75),
np.percentile(
latancys,
95),
np.percentile(
latancys,
99)))

View File

@ -36,18 +36,17 @@ class TDTestCase:
"insert into tb2 using stb1 tags(2,'tb2', '表2') values ('2020-04-18 15:00:02.000', 3, 2.1), ('2020-04-18 15:00:03.000', 4, 2.2)")
# inner join --- bug
tdSql.query("select * from tb1 a, tb2 b where a.ts = b.ts")
tdSql.checkRows(1)
tdSql.error("select * from tb1 a, tb2 b where a.ts = b.ts")
# join 3 tables -- bug exists
tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
tdSql.error("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
# query show stable
tdSql.query("show stables")
tdSql.checkRows(1)
# query show tables
tdSql.query("show table")
tdSql.query("show tables")
tdSql.checkRows(2)
# query count
@ -71,16 +70,13 @@ class TDTestCase:
tdSql.checkRows(2)
# query first ... as
tdSql.query("select first(*) as begin from stb1")
tdSql.checkData(0, 1, 1)
tdSql.error("select first(*) as begin from stb1")
# query last ... as
tdSql.query("select last(*) as end from stb1")
tdSql.checkData(0, 1, 4)
tdSql.error("select last(*) as end from stb1")
# query last_row ... as
tdSql.query("select last_row(*) as end from stb1")
tdSql.checkData(0, 1, 4)
tdSql.error("select last_row(*) as end from stb1")
# query group .. by
tdSql.query("select sum(c1), t2 from stb1 group by t2")
@ -95,8 +91,7 @@ class TDTestCase:
tdSql.checkRows(1)
# query ... alias for table ---- bug
tdSql.query("select t.ts from tb1 t")
tdSql.checkRows(2)
tdSql.error("select t.ts from tb1 t")
# query ... tbname
tdSql.query("select tbname from stb1")
@ -104,7 +99,7 @@ class TDTestCase:
# query ... tbname count ---- bug
tdSql.query("select count(tbname) from stb1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 2)
# query ... select database ---- bug
tdSql.query("SELECT database()")

View File

@ -23,7 +23,6 @@ class TDTestCase:
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.rowNum = 5000
self.ts = 1537146000000
@ -36,15 +35,13 @@ class TDTestCase:
"create table t1 using st tags('dev_001')")
for i in range(self.rowNum):
tdSql.execute("insert into t1 values(%d, 'taosdata%d', %d)" % (self.ts + i, i + 1, i + 1))
tdSql.execute(
"insert into t1 values(%d, 'taosdata%d', %d)" %
(self.ts + i, i + 1, i + 1))
tdSql.query("select last(*) from st")
tdSql.checkRows(1)
print(
"======= Verify filter for %s type finished =========" %
curType)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -330,7 +330,6 @@ class Test (Thread):
self.q.put(-1)
tdLog.exit("second thread failed, first thread exit too")
elif (self.threadId == 2):
while True:
self.dbEvent.wait()

View File

@ -1,7 +1,6 @@
#!/bin/bash
ulimit -c unlimited
python3 ./test.py -f client/client.py
python3 ./test.py -f insert/basic.py
python3 ./test.py -f insert/int.py
python3 ./test.py -f insert/float.py
@ -138,6 +137,9 @@ python3 ./test.py -f query/filterOtherTypes.py
python3 ./test.py -f query/queryError.py
python3 ./test.py -f query/querySort.py
python3 ./test.py -f query/queryJoin.py
python3 ./test.py -f query/filterCombo.py
python3 ./test.py -f query/queryNormal.py
python3 ./test.py -f query/select_last_crash.py
#stream
python3 ./test.py -f stream/stream1.py
@ -146,4 +148,5 @@ python3 ./test.py -f stream/stream2.py
#alter table
python3 ./test.py -f alter/alter_table_crash.py
# client
python3 ./test.py -f client/client.py

View File

@ -1,10 +1,6 @@
#!/bin/bash
ulimit -c unlimited
# client
python3 ./test.py $1 -f client/client.py
python3 ./test.py $1 -s && sleep 1
# insert
python3 ./test.py $1 -f insert/basic.py
python3 ./test.py $1 -s && sleep 1
@ -35,3 +31,7 @@ python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f query/filter.py
python3 ./test.py $1 -s && sleep 1
# client
python3 ./test.py $1 -f client/client.py
python3 ./test.py $1 -s && sleep 1

View File

@ -198,7 +198,7 @@ class TDSql:
"%s failed: sql:%s, affectedRows:%d != expect:%d" %
(callerFilename, self.sql, self.affectedRows, expectAffectedRows))
tdLog.info("sql:%s, affectedRows:%d == expect:%d" %
(self.sql, self.affectedRows, expectAffectedRows))
(self.sql, self.affectedRows, expectAffectedRows))
tdSql = TDSql()

View File

@ -447,4 +447,15 @@ sql insert into um2 using m2 tags(9) values(1000001, 10)(2000000, 20);
sql_error select count(*) from m1,m2 where m1.a=m2.a and m1.ts=m2.ts;
#empty table join test, add for no result join test
sql create database ux1;
sql use ux1;
sql create table m1(ts timestamp, k int) tags(a binary(12), b int);
sql create table tm0 using m1 tags('abc', 1);
sql create table m2(ts timestamp, k int) tags(a int, b binary(12));
sql create table tm2 using m2 tags(2, 'abc');
sql select count(*) from tm0, tm2 where tm0.ts=tm2.ts;
sql select count(*) from m1, m2 where m1.ts=m2.ts and m1.b=m2.a
sql drop database ux1;
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -0,0 +1,16 @@
sql connect
$db = db1
$stb = stb1
print =============== client1_0:
sql use $db
$tblNum = 1000
$i = 1
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags ($i, 'abcd')
$i = $i + 1
endw

View File

@ -0,0 +1,494 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode2 -c walLevel -v 1
system sh/cfg.sh -n dnode3 -c walLevel -v 1
system sh/cfg.sh -n dnode4 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
system sh/cfg.sh -n dnode3 -c alternativeRole -v 0
system sh/cfg.sh -n dnode4 -c alternativeRole -v 0
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1/dnode2/dnode3
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
sleep 3000
sql connect
sql create dnode $hostname2
sql create dnode $hostname3
sleep 3000
print ============== step2: create db1 with replica 3
$db = db1
print create database $db replica 3
#sql create database $db replica 3 maxTables $totalTableNum
sql create database $db replica 3
sql use $db
print ============== step3: create stable stb1
$stb = stb1
sql create table $stb (ts timestamp, c1 int, c2 int) tags(t1 int, t2 binary(8))
print ============== step4: start 10 client1/ 10 client2/ 10 client3/ 10 client4/ 1 client5
run_back unique/cluster/client1_0.sim
#run_back unique/cluster/client1_1.sim
#run_back unique/big_cluster/client1_2.sim
#run_back unique/big_cluster/client1_3.sim
#run_back unique/big_cluster/client1_4.sim
#run_back unique/big_cluster/client1_5.sim
#run_back unique/big_cluster/client1_6.sim
#run_back unique/big_cluster/client1_7.sim
#run_back unique/big_cluster/client1_8.sim
#run_back unique/big_cluster/client1_9.sim
print wait for a while to let clients start insert data
sleep 5000
$loop_cnt = 0
loop_cluster_do:
print **** **** **** START loop cluster do **** **** **** ****
print ============== step5: start dnode4 and add into cluster, then wait dnode4 ready
system sh/exec.sh -n dnode4 -s start
sql create dnode $hostname4
wait_dnode4_ready_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode4_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode4Status = $data4_4
elif $loop_cnt == 1 then
$dnode4Status = $data4_6
elif $loop_cnt == 2 then
$dnode4Status = $data4_8
else then
print **** **** **** END loop cluster do 2**** **** **** ****
return
endi
if $dnode4Status != ready then
sleep 2000
goto wait_dnode4_ready_0
endi
print ============== step6: stop and drop dnode1, then remove data dir of dnode1
system sh/exec.sh -n dnode1 -s stop -x SIGINT
$cnt = 0
wait_dnode1_offline_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode1_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode1Status = $data4_1
elif $loop_cnt == 1 then
$dnode1Status = $data4_5
elif $loop_cnt == 2 then
$dnode1Status = $data4_7
elif $loop_cnt == 3 then
$dnode1Status = $data4_9
else then
print **** **** **** END loop cluster do 1**** **** **** ****
return
endi
if $dnode1Status != offline then
sleep 2000
goto wait_dnode1_offline_0
endi
sql drop dnode $hostname1
system rm -rf ../../../sim/dnode1
print ============== step7: stop dnode2, because mnodes < 50%, so clusert don't provide services
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sql show dnodes -x wait_dnode2_offline_0
if $rows != 3 then
sleep 2000
goto wait_dnode2_offline_0
endi
wait_dnode2_offline_0:
#$cnt = 0
#wait_dnode2_offline_0:
#$cnt = $cnt + 1
#if $cnt == 10 then
# return -1
#endi
#sql show dnodes -x wait_dnode2_offline_0
#if $rows != 3 then
# sleep 2000
# goto wait_dnode2_offline_0
#endi
#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#$dnode1Status = $data4_1
#$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#
#if $dnode2Status != offline then
# sleep 2000
# goto wait_dnode1_offline_0
#endi
print ============== step8: restart dnode2, then wait sync end
system sh/exec.sh -n dnode2 -s start
$cnt = 0
wait_dnode2_ready_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready_0
endi
print ============== step9: stop dnode3, then wait sync end
system sh/exec.sh -n dnode3 -s stop -x SIGINT
$cnt = 0
wait_dnode3_offline_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode3_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $dnode3Status != offline then
sleep 2000
goto wait_dnode3_offline_0
endi
print ============== step10: restart dnode3, then wait sync end
system sh/exec.sh -n dnode3 -s start
$cnt = 0
wait_dnode3_ready_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode3_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $dnode3Status != ready then
sleep 2000
goto wait_dnode3_ready_0
endi
print ============== step11: stop dnode4, then wait sync end
system sh/exec.sh -n dnode4 -s stop -x SIGINT
$cnt = 0
wait_dnode4_offline_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode4_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode4Status = $data4_4
elif $loop_cnt == 1 then
$dnode4Status = $data4_6
elif $loop_cnt == 2 then
$dnode4Status = $data4_8
else then
print **** **** **** END loop cluster do 2**** **** **** ****
return
endi
if $dnode4Status != offline then
sleep 2000
goto wait_dnode4_offline_0
endi
print ============== step12: restart dnode4, then wait sync end
system sh/exec.sh -n dnode4 -s start
$cnt = 0
wait_dnode4_ready_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode4_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode4Status = $data4_4
elif $loop_cnt == 1 then
$dnode4Status = $data4_6
elif $loop_cnt == 2 then
$dnode4Status = $data4_8
else then
print **** **** **** END loop cluster do 2**** **** **** ****
return
endi
if $dnode4Status != ready then
sleep 2000
goto wait_dnode4_ready_0
endi
print ============== step13: alter replica 2
sql alter database $db replica 2
sql show database
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
if $data0_5 != 2 then
print rplica is not modify to 2, error!!!!!!
return
endi
print ============== step14: stop and drop dnode4, then remove data dir of dnode4
system sh/exec.sh -n dnode4 -s stop -x SIGINT
$cnt = 0
wait_dnode4_offline_1:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode4_offline_1
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode4Status = $data4_4
elif $loop_cnt == 1 then
$dnode4Status = $data4_6
elif $loop_cnt == 2 then
$dnode4Status = $data4_8
else then
print **** **** **** END loop cluster do 2**** **** **** ****
return
endi
if $dnode4Status != offline then
sleep 2000
goto wait_dnode4_offline_1
endi
sql drop dnode $hostname4
system rm -rf ../../../sim/dnode4
print ============== step15: alter replica 1
sql alter database $db replica 1
sql show database
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
if $data0_5 != 1 then
print rplica is not modify to 1, error!!!!!!
return
endi
print ============== step16: alter replica 2
sql alter database $db replica 1
sql show database
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
if $data0_5 != 2 then
print rplica is not modify to 2, error!!!!!!
return
endi
print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready
system sh/exec.sh -n dnode1 -s start
sql create dnode $hostname1
wait_dnode1_ready_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode1_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode1Status = $data4_1
elif $loop_cnt == 1 then
$dnode1Status = $data4_5
elif $loop_cnt == 2 then
$dnode1Status = $data4_7
elif $loop_cnt == 3 then
$dnode1Status = $data4_9
else then
print **** **** **** END loop cluster do 3**** **** **** ****
return
endi
if $dnode1Status != ready then
sleep 2000
goto wait_dnode1_ready_0
endi
print ============== step18: alter replica 3
sql alter database $db replica 3
sql show database
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
if $data0_5 != 3 then
print rplica is not modify to 3, error!!!!!!
return
endi
$loop_cnt = $loop_cnt + 1
goto loop_cluster_do