Merge branch 'develop' into jdbcfixes

This commit is contained in:
Ping Xiao 2020-07-07 14:38:53 +08:00
commit 21c7773867
79 changed files with 2479 additions and 722 deletions

View File

@ -24,3 +24,11 @@ ENDIF ()
IF (TD_MEM_CHECK) IF (TD_MEM_CHECK)
ADD_DEFINITIONS(-DTAOS_MEM_CHECK) ADD_DEFINITIONS(-DTAOS_MEM_CHECK)
ENDIF () ENDIF ()
IF (TD_RANDOM_FILE_FAIL)
ADD_DEFINITIONS(-DTAOS_RANDOM_FILE_FAIL)
ENDIF ()
IF (TD_RANDOM_NETWORK_FAIL)
ADD_DEFINITIONS(-DTAOS_RANDOM_NETWORK_FAIL)
ENDIF ()

View File

@ -30,4 +30,14 @@ ENDIF ()
IF (${MEM_CHECK} MATCHES "true") IF (${MEM_CHECK} MATCHES "true")
SET(TD_MEM_CHECK TRUE) SET(TD_MEM_CHECK TRUE)
MESSAGE(STATUS "build with memory check") MESSAGE(STATUS "build with memory check")
ENDIF () ENDIF ()
IF (${RANDOM_FILE_FAIL} MATCHES "true")
SET(TD_RANDOM_FILE_FAIL TRUE)
MESSAGE(STATUS "build with random-file-fail enabled")
ENDIF ()
IF (${RANDOM_NETWORK_FAIL} MATCHES "true")
SET(TD_RANDOM_NETWORK_FAIL TRUE)
MESSAGE(STATUS "build with random-network-fail enabled")
ENDIF ()

View File

@ -480,9 +480,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
- **LEASTSQUARES** - **LEASTSQUARES**
```mysql ```mysql
SELECT LEASTSQUARES(field_name) FROM tb_name [WHERE clause] SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
``` ```
功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。 功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val是自变量初始值step_val是自变量的步长值。
返回结果数据类型:字符串表达式(斜率, 截距)。 返回结果数据类型:字符串表达式(斜率, 截距)。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。 应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:自变量是时间戳,因变量是该列的值。 说明:自变量是时间戳,因变量是该列的值。

View File

@ -412,7 +412,7 @@ TDengine supports aggregations over numerical values, they are listed below:
SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause] SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]
``` ```
Function: the value of the specified column below which `P` percent of the data points fall. Function: the value of the specified column below which `P` percent of the data points fall.
Return Data Type: the same data type. Return Data Type: double.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`. Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable. Applied to: table/STable.
Note: The range of `P` is `[0, 100]`. When `P=0` , `PERCENTILE` returns the equal value as `MIN`; when `P=100`, `PERCENTILE` returns the equal value as `MAX`. Note: The range of `P` is `[0, 100]`. When `P=0` , `PERCENTILE` returns the equal value as `MIN`; when `P=100`, `PERCENTILE` returns the equal value as `MAX`.
@ -446,7 +446,7 @@ TDengine supports aggregations over numerical values, they are listed below:
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause] SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]
``` ```
Function: return the difference between the maximum and the mimimum value. Function: return the difference between the maximum and the mimimum value.
Return Data Type: the same data type. Return Data Type: double.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`. Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable. Applied to: table/STable.
Note: spread gives the range of data variation in a table/supertable; it is equivalent to `MAX()` - `MIN()` Note: spread gives the range of data variation in a table/supertable; it is equivalent to `MAX()` - `MIN()`

View File

@ -31,9 +31,7 @@ extern int32_t tscEmbedded;
#define tscInfo(...) { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC INFO ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} #define tscInfo(...) { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC INFO ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }}
#define tscDebug(...) { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC DEBUG ", cDebugFlag, __VA_ARGS__); }} #define tscDebug(...) { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC DEBUG ", cDebugFlag, __VA_ARGS__); }}
#define tscTrace(...) { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC TRACE ", cDebugFlag, __VA_ARGS__); }} #define tscTrace(...) { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC TRACE ", cDebugFlag, __VA_ARGS__); }}
#define tscDebugL(...){ if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC DEBUG ", cDebugFlag, __VA_ARGS__); }}
#define tscDebugDump(...) { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC DEBUG ", cDebugFlag, __VA_ARGS__); }}
#define tscTraceDump(...) { if (cDebugFlag & DEBUG_TRACE) { taosPrintLongString("TSC TRACE ", cDebugFlag, __VA_ARGS__); }}
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -29,9 +29,6 @@
#define jniDebug(...) { if (jniDebugFlag & DEBUG_DEBUG) { taosPrintLog("JNI DEBUG ", jniDebugFlag, __VA_ARGS__); }} #define jniDebug(...) { if (jniDebugFlag & DEBUG_DEBUG) { taosPrintLog("JNI DEBUG ", jniDebugFlag, __VA_ARGS__); }}
#define jniTrace(...) { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLog("JNI TRACE ", jniDebugFlag, __VA_ARGS__); }} #define jniTrace(...) { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLog("JNI TRACE ", jniDebugFlag, __VA_ARGS__); }}
#define jniDebugDump(...) { if (jniDebugFlag & DEBUG_DEBUG) { taosPrintLongString("JNI DEBUG ", jniDebugFlag, __VA_ARGS__); }}
#define jniTraceDump(...) { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLongString("JNI DEBUG ", jniDebugFlag, __VA_ARGS__); }}
int __init = 0; int __init = 0;
JavaVM *g_vm = NULL; JavaVM *g_vm = NULL;

View File

@ -55,7 +55,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
strtolower(pSql->sqlstr, sqlstr); strtolower(pSql->sqlstr, sqlstr);
tscDebugDump("%p SQL: %s", pSql, pSql->sqlstr); tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
pSql->cmd.curSql = pSql->sqlstr; pSql->cmd.curSql = pSql->sqlstr;
int32_t code = tsParseSql(pSql, true); int32_t code = tsParseSql(pSql, true);
@ -471,7 +471,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
} }
// in case of insert, redo parsing the sql string and build new submit data block for two reasons: // in case of insert, redo parsing the sql string and build new submit data block for two reasons:
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated // 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly.
// 2. vnode may need the schema information along with submit block to update its local table schema. // 2. vnode may need the schema information along with submit block to update its local table schema.
if (pCmd->command == TSDB_SQL_INSERT) { if (pCmd->command == TSDB_SQL_INSERT) {
tscDebug("%p redo parse sql string to build submit block", pSql); tscDebug("%p redo parse sql string to build submit block", pSql);

View File

@ -406,7 +406,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
pSql->res.qhandle = 0x1; pSql->res.qhandle = 0x1;
pSql->res.numOfRows = 0; pSql->res.numOfRows = 0;
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) { } else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
taosCacheEmpty(tscCacheHandle); taosCacheEmpty(tscCacheHandle,false);
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) { } else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
tscProcessServerVer(pSql); tscProcessServerVer(pSql);
} else if (pCmd->command == TSDB_SQL_CLI_VERSION) { } else if (pCmd->command == TSDB_SQL_CLI_VERSION) {

View File

@ -364,7 +364,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
TSKEY stime = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey); TSKEY stime = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t revisedSTime = int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision); taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
if (pQueryInfo->fillType != TSDB_FILL_NONE) { if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo); SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
@ -831,7 +831,7 @@ void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SQueryInfo
if (pFillInfo != NULL) { if (pFillInfo != NULL) {
int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey; int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime = int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit, tinfo.precision); taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
taosResetFillInfo(pFillInfo, revisedSTime); taosResetFillInfo(pFillInfo, revisedSTime);
} }
@ -1301,9 +1301,7 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
if (pQueryInfo->fillType != TSDB_FILL_NONE) { if (pQueryInfo->fillType != TSDB_FILL_NONE) {
TSKEY skey = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey); TSKEY skey = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t newTime = int64_t newTime =
taosGetIntervalStartTimestamp(skey, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision); taosGetIntervalStartTimestamp(skey, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision);
// taosResetFillInfo(pLocalReducer->pFillInfo, pQueryInfo->order.order, newTime,
// pQueryInfo->groupbyExpr.numOfGroupCols, 4096, 0, NULL, pLocalReducer->rowSize);
taosResetFillInfo(pLocalReducer->pFillInfo, newTime); taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
} }
} }

View File

@ -538,7 +538,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pRes->numOfRows = 1; pRes->numOfRows = 1;
strtolower(pSql->sqlstr, sql); strtolower(pSql->sqlstr, sql);
tscDebugDump("%p SQL: %s", pSql, pSql->sqlstr); tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
if (tscIsInsertData(pSql->sqlstr)) { if (tscIsInsertData(pSql->sqlstr)) {
pStmt->isInsert = true; pStmt->isInsert = true;

View File

@ -4487,10 +4487,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) pCmd->payload; SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) pCmd->payload;
pUpdateMsg->head.vgId = htonl(pTableMeta->vgroupInfo.vgId); pUpdateMsg->head.vgId = htonl(pTableMeta->vgroupInfo.vgId);
pUpdateMsg->tid = htonl(pTableMeta->sid); pUpdateMsg->tid = htonl(pTableMeta->sid);
pUpdateMsg->uid = htobe64(pTableMeta->uid); pUpdateMsg->uid = htobe64(pTableMeta->uid);
pUpdateMsg->colId = htons(pTagsSchema->colId); pUpdateMsg->colId = htons(pTagsSchema->colId);
pUpdateMsg->tversion = htons(pTableMeta->tversion); pUpdateMsg->type = pTagsSchema->type;
pUpdateMsg->bytes = htons(pTagsSchema->bytes);
pUpdateMsg->tversion = htons(pTableMeta->tversion);
pUpdateMsg->numOfTags = htons(numOfTags); pUpdateMsg->numOfTags = htons(numOfTags);
pUpdateMsg->schemaLen = htonl(schemaLen); pUpdateMsg->schemaLen = htonl(schemaLen);

View File

@ -247,7 +247,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
} else { } else {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
if (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID || if (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID ||
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
if (pCmd->command == TSDB_SQL_CONNECT) { if (pCmd->command == TSDB_SQL_CONNECT) {
rpcMsg->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; rpcMsg->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
rpcFreeCont(rpcMsg->pCont); rpcFreeCont(rpcMsg->pCont);
@ -260,7 +260,12 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
// get table meta query will not retry, do nothing // get table meta query will not retry, do nothing
} else { } else {
tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry); tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry);
// set the flag to denote that sql string needs to be re-parsed and build submit block with table schema
if (rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
pSql->cmd.submitSchema = 1;
}
pSql->res.code = rpcMsg->code; // keep the previous error code pSql->res.code = rpcMsg->code; // keep the previous error code
if (pSql->retry > pSql->maxRetry) { if (pSql->retry > pSql->maxRetry) {
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry); tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
@ -1950,7 +1955,7 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
} }
int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) { int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) {
taosCacheEmpty(tscCacheHandle); taosCacheEmpty(tscCacheHandle, false);
return 0; return 0;
} }
@ -1996,7 +2001,7 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
if (isSuperTable) { // if it is a super table, reset whole query cache if (isSuperTable) { // if it is a super table, reset whole query cache
tscDebug("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name); tscDebug("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name);
taosCacheEmpty(tscCacheHandle); taosCacheEmpty(tscCacheHandle, false);
} }
} }

View File

@ -503,7 +503,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
} }
strtolower(pSql->sqlstr, sqlstr); strtolower(pSql->sqlstr, sqlstr);
tscDebugDump("%p SQL: %s", pSql, pSql->sqlstr); tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
tsem_init(&pSql->rspSem, 0, 0); tsem_init(&pSql->rspSem, 0, 0);
int32_t code = tsParseSql(pSql, true); int32_t code = tsParseSql(pSql, true);

View File

@ -579,9 +579,9 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo
int32_t numOfCols = tscGetNumOfColumns(pTableDataBlock->pTableMeta); int32_t numOfCols = tscGetNumOfColumns(pTableDataBlock->pTableMeta);
for(int32_t j = 0; j < numOfCols; ++j) { for(int32_t j = 0; j < numOfCols; ++j) {
STColumn* pCol = (STColumn*) pDataBlock; STColumn* pCol = (STColumn*) pDataBlock;
pCol->colId = pSchema[j].colId; pCol->colId = htons(pSchema[j].colId);
pCol->type = pSchema[j].type; pCol->type = pSchema[j].type;
pCol->bytes = pSchema[j].bytes; pCol->bytes = htons(pSchema[j].bytes);
pCol->offset = 0; pCol->offset = 0;
pDataBlock += sizeof(STColumn); pDataBlock += sizeof(STColumn);
@ -663,7 +663,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
} }
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData; SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize; int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
if (dataBuf->nAllocSize < destSize) { if (dataBuf->nAllocSize < destSize) {
while (dataBuf->nAllocSize < destSize) { while (dataBuf->nAllocSize < destSize) {
@ -691,7 +691,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
tscDebug("%p tableId:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->tableId, tscDebug("%p tableId:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->tableId,
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey)); pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize); int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
pBlocks->tid = htonl(pBlocks->tid); pBlocks->tid = htonl(pBlocks->tid);
pBlocks->uid = htobe64(pBlocks->uid); pBlocks->uid = htobe64(pBlocks->uid);

View File

@ -29,4 +29,6 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters); SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
#endif // TDENGINE_NAME_H #endif // TDENGINE_NAME_H

View File

@ -32,9 +32,6 @@ extern int32_t tscEmbedded;
#define uDebug(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLog("UTL DEBUG ", uDebugFlag, __VA_ARGS__); }} #define uDebug(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLog("UTL DEBUG ", uDebugFlag, __VA_ARGS__); }}
#define uTrace(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLog("UTL TRACE ", uDebugFlag, __VA_ARGS__); }} #define uTrace(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLog("UTL TRACE ", uDebugFlag, __VA_ARGS__); }}
#define uDebugDump(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLongString("UTL DEBUG ", uDebugFlag, __VA_ARGS__); }}
#define uTraceDump(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLongString("UTL TRACE ", uDebugFlag, __VA_ARGS__); }}
#define pError(...) { taosPrintLog("APP ERROR ", 255, __VA_ARGS__); } #define pError(...) { taosPrintLog("APP ERROR ", 255, __VA_ARGS__); }
#define pPrint(...) { taosPrintLog("APP INFO ", 255, __VA_ARGS__); } #define pPrint(...) { taosPrintLog("APP INFO ", 255, __VA_ARGS__); }

View File

@ -1210,7 +1210,7 @@ void taosInitGlobalCfg() {
} }
bool taosCheckGlobalCfg() { bool taosCheckGlobalCfg() {
if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG) { if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) {
taosSetAllDebugFlag(); taosSetAllDebugFlag();
} }

View File

@ -75,3 +75,33 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
return pFilter; return pFilter;
} }
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
return startTime;
}
int64_t start = ((startTime - intervalTime) / slidingTime + 1) * slidingTime;
if (!(timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h')) {
/*
* here we revised the start time of day according to the local time zone,
* but in case of DST, the start time of one day need to be dynamically decided.
*/
// todo refactor to extract function that is available for Linux/Windows/Mac platform
#if defined(WINDOWS) && _MSC_VER >= 1900
// see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
int64_t timezone = _timezone;
int32_t daylight = _daylight;
char** tzname = _tzname;
#endif
int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L;
start += timezone * t;
}
int64_t end = start + intervalTime - 1;
if (end < startTime) {
start += slidingTime;
}
return start;
}

View File

@ -106,6 +106,12 @@ int32_t dnodeInitMgmt() {
} }
} }
int32_t code = vnodeInitResources();
if (code != TSDB_CODE_SUCCESS) {
dnodeCleanupMgmt();
return -1;
}
// create the queue and thread to handle the message // create the queue and thread to handle the message
tsMgmtQset = taosOpenQset(); tsMgmtQset = taosOpenQset();
if (tsMgmtQset == NULL) { if (tsMgmtQset == NULL) {
@ -127,7 +133,7 @@ int32_t dnodeInitMgmt() {
pthread_attr_init(&thAttr); pthread_attr_init(&thAttr);
pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE);
int32_t code = pthread_create(&tsQthread, &thAttr, dnodeProcessMgmtQueue, NULL); code = pthread_create(&tsQthread, &thAttr, dnodeProcessMgmtQueue, NULL);
pthread_attr_destroy(&thAttr); pthread_attr_destroy(&thAttr);
if (code != 0) { if (code != 0) {
dError("failed to create thread to process mgmt queue, reason:%s", strerror(errno)); dError("failed to create thread to process mgmt queue, reason:%s", strerror(errno));
@ -176,6 +182,7 @@ void dnodeCleanupMgmt() {
tsMgmtQset = NULL; tsMgmtQset = NULL;
tsMgmtQueue = NULL; tsMgmtQueue = NULL;
vnodeCleanupResources();
} }
void dnodeDispatchToMgmtQueue(SRpcMsg *pMsg) { void dnodeDispatchToMgmtQueue(SRpcMsg *pMsg) {
@ -242,8 +249,14 @@ static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) {
int32_t vnode = atoi(de->d_name + 5); int32_t vnode = atoi(de->d_name + 5);
if (vnode == 0) continue; if (vnode == 0) continue;
vnodeList[*numOfVnodes] = vnode;
(*numOfVnodes)++; (*numOfVnodes)++;
if (*numOfVnodes >= TSDB_MAX_VNODES) {
dError("vgId:%d, too many vnode directory in disk, exist:%d max:%d", vnode, *numOfVnodes, TSDB_MAX_VNODES);
continue;
} else {
vnodeList[*numOfVnodes - 1] = vnode;
}
} }
} }
closedir(dir); closedir(dir);
@ -275,13 +288,12 @@ static void *dnodeOpenVnode(void *param) {
} }
static int32_t dnodeOpenVnodes() { static int32_t dnodeOpenVnodes() {
int32_t *vnodeList = calloc(TSDB_MAX_VNODES, sizeof(int32_t)); int32_t vnodeList[TSDB_MAX_VNODES] = {0};
int32_t numOfVnodes; int32_t numOfVnodes = 0;
int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes); int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
if (status != TSDB_CODE_SUCCESS) { if (status != TSDB_CODE_SUCCESS) {
dInfo("get dnode list failed"); dInfo("get dnode list failed");
free(vnodeList);
return status; return status;
} }
@ -327,7 +339,6 @@ static int32_t dnodeOpenVnodes() {
free(pThread->vnodeList); free(pThread->vnodeList);
} }
free(vnodeList);
free(threads); free(threads);
dInfo("there are total vnodes:%d, openned:%d failed:%d", numOfVnodes, openVnodes, failedVnodes); dInfo("there are total vnodes:%d, openned:%d failed:%d", numOfVnodes, openVnodes, failedVnodes);
@ -335,9 +346,9 @@ static int32_t dnodeOpenVnodes() {
} }
void dnodeStartStream() { void dnodeStartStream() {
int32_t vnodeList[TSDB_MAX_VNODES]; int32_t vnodeList[TSDB_MAX_VNODES] = {0};
int32_t numOfVnodes = 0; int32_t numOfVnodes = 0;
int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes); int32_t status = vnodeGetVnodeList(vnodeList, &numOfVnodes);
if (status != TSDB_CODE_SUCCESS) { if (status != TSDB_CODE_SUCCESS) {
dInfo("get dnode list failed"); dInfo("get dnode list failed");
@ -352,15 +363,14 @@ void dnodeStartStream() {
} }
static void dnodeCloseVnodes() { static void dnodeCloseVnodes() {
int32_t *vnodeList = (int32_t *)malloc(sizeof(int32_t) * TSDB_MAX_VNODES); int32_t vnodeList[TSDB_MAX_VNODES]= {0};
int32_t numOfVnodes; int32_t numOfVnodes = 0;
int32_t status; int32_t status;
status = dnodeGetVnodeList(vnodeList, &numOfVnodes); status = vnodeGetVnodeList(vnodeList, &numOfVnodes);
if (status != TSDB_CODE_SUCCESS) { if (status != TSDB_CODE_SUCCESS) {
dInfo("get dnode list failed"); dInfo("get dnode list failed");
free(vnodeList);
return; return;
} }
@ -368,7 +378,6 @@ static void dnodeCloseVnodes() {
vnodeClose(vnodeList[i]); vnodeClose(vnodeList[i]);
} }
free(vnodeList);
dInfo("total vnodes:%d are all closed", numOfVnodes); dInfo("total vnodes:%d are all closed", numOfVnodes);
} }
@ -391,7 +400,7 @@ static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) {
pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId); pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId);
} }
void *pVnode = vnodeAccquireVnode(pCreate->cfg.vgId); void *pVnode = vnodeAcquireVnode(pCreate->cfg.vgId);
if (pVnode != NULL) { if (pVnode != NULL) {
int32_t code = vnodeAlter(pVnode, pCreate); int32_t code = vnodeAlter(pVnode, pCreate);
vnodeRelease(pVnode); vnodeRelease(pVnode);

View File

@ -98,11 +98,7 @@ void dnodeDispatchToVnodeReadQueue(SRpcMsg *pMsg) {
pHead->vgId = htonl(pHead->vgId); pHead->vgId = htonl(pHead->vgId);
pHead->contLen = htonl(pHead->contLen); pHead->contLen = htonl(pHead->contLen);
if (pMsg->msgType == TSDB_MSG_TYPE_FETCH) { pVnode = vnodeAcquireVnode(pHead->vgId);
pVnode = vnodeGetVnode(pHead->vgId);
} else {
pVnode = vnodeAccquireVnode(pHead->vgId);
}
if (pVnode == NULL) { if (pVnode == NULL) {
leftLen -= pHead->contLen; leftLen -= pHead->contLen;
@ -179,24 +175,19 @@ void dnodeFreeVnodeRqueue(void *rqueue) {
// dynamically adjust the number of threads // dynamically adjust the number of threads
} }
static void dnodeContinueExecuteQuery(void* pVnode, void* qhandle, SReadMsg *pMsg) { void dnodePutItemIntoReadQueue(void *pVnode, void *qhandle) {
SReadMsg *pRead = (SReadMsg *)taosAllocateQitem(sizeof(SReadMsg)); SReadMsg *pRead = (SReadMsg *)taosAllocateQitem(sizeof(SReadMsg));
pRead->rpcMsg = pMsg->rpcMsg;
pRead->pCont = qhandle;
pRead->contLen = 0;
pRead->rpcMsg.msgType = TSDB_MSG_TYPE_QUERY; pRead->rpcMsg.msgType = TSDB_MSG_TYPE_QUERY;
pRead->pCont = qhandle;
pRead->contLen = 0;
taos_queue queue = vnodeGetRqueue(pVnode); assert(pVnode != NULL);
taosWriteQitem(queue, TAOS_QTYPE_RPC, pRead); taos_queue queue = vnodeAcquireRqueue(pVnode);
taosWriteQitem(queue, TAOS_QTYPE_QUERY, pRead);
} }
void dnodeSendRpcReadRsp(void *pVnode, SReadMsg *pRead, int32_t code) { void dnodeSendRpcReadRsp(void *pVnode, SReadMsg *pRead, int32_t code) {
if (code == TSDB_CODE_VND_ACTION_IN_PROGRESS) return;
if (code == TSDB_CODE_VND_ACTION_NEED_REPROCESSED) {
dnodeContinueExecuteQuery(pVnode, pRead->rspRet.qhandle, pRead);
code = TSDB_CODE_SUCCESS;
}
SRpcMsg rpcRsp = { SRpcMsg rpcRsp = {
.handle = pRead->rpcMsg.handle, .handle = pRead->rpcMsg.handle,
.pCont = pRead->rspRet.rsp, .pCont = pRead->rspRet.rsp,
@ -206,6 +197,12 @@ void dnodeSendRpcReadRsp(void *pVnode, SReadMsg *pRead, int32_t code) {
rpcSendResponse(&rpcRsp); rpcSendResponse(&rpcRsp);
rpcFreeCont(pRead->rpcMsg.pCont); rpcFreeCont(pRead->rpcMsg.pCont);
vnodeRelease(pVnode);
}
void dnodeDispatchNonRspMsg(void *pVnode, SReadMsg *pRead, int32_t code) {
vnodeRelease(pVnode);
return;
} }
static void *dnodeProcessReadQueue(void *param) { static void *dnodeProcessReadQueue(void *param) {
@ -219,9 +216,16 @@ static void *dnodeProcessReadQueue(void *param) {
break; break;
} }
dDebug("%p, msg:%s will be processed in vread queue", pReadMsg->rpcMsg.ahandle, taosMsg[pReadMsg->rpcMsg.msgType]); dDebug("%p, msg:%s will be processed in vread queue, qtype:%d", pReadMsg->rpcMsg.ahandle,
taosMsg[pReadMsg->rpcMsg.msgType], type);
int32_t code = vnodeProcessRead(pVnode, pReadMsg); int32_t code = vnodeProcessRead(pVnode, pReadMsg);
dnodeSendRpcReadRsp(pVnode, pReadMsg, code);
if (type == TAOS_QTYPE_RPC) {
dnodeSendRpcReadRsp(pVnode, pReadMsg, code);
} else {
dnodeDispatchNonRspMsg(pVnode, pReadMsg, code);
}
taosFreeQitem(pReadMsg); taosFreeQitem(pReadMsg);
} }

View File

@ -210,6 +210,7 @@ static void *dnodeProcessWriteQueue(void *param) {
int32_t numOfMsgs; int32_t numOfMsgs;
int type; int type;
void *pVnode, *item; void *pVnode, *item;
SRspRet *pRspRet;
dDebug("write worker:%d is running", pWorker->workerId); dDebug("write worker:%d is running", pWorker->workerId);
@ -222,9 +223,11 @@ static void *dnodeProcessWriteQueue(void *param) {
for (int32_t i = 0; i < numOfMsgs; ++i) { for (int32_t i = 0; i < numOfMsgs; ++i) {
pWrite = NULL; pWrite = NULL;
pRspRet = NULL;
taosGetQitem(pWorker->qall, &type, &item); taosGetQitem(pWorker->qall, &type, &item);
if (type == TAOS_QTYPE_RPC) { if (type == TAOS_QTYPE_RPC) {
pWrite = (SWriteMsg *)item; pWrite = (SWriteMsg *)item;
pRspRet = &pWrite->rspRet;
pHead = (SWalHead *)(pWrite->pCont - sizeof(SWalHead)); pHead = (SWalHead *)(pWrite->pCont - sizeof(SWalHead));
pHead->msgType = pWrite->rpcMsg.msgType; pHead->msgType = pWrite->rpcMsg.msgType;
pHead->version = 0; pHead->version = 0;
@ -234,7 +237,7 @@ static void *dnodeProcessWriteQueue(void *param) {
pHead = (SWalHead *)item; pHead = (SWalHead *)item;
} }
int32_t code = vnodeProcessWrite(pVnode, type, pHead, item); int32_t code = vnodeProcessWrite(pVnode, type, pHead, pRspRet);
if (pWrite) pWrite->rpcMsg.code = code; if (pWrite) pWrite->rpcMsg.code = code;
} }
@ -247,6 +250,11 @@ static void *dnodeProcessWriteQueue(void *param) {
if (type == TAOS_QTYPE_RPC) { if (type == TAOS_QTYPE_RPC) {
pWrite = (SWriteMsg *)item; pWrite = (SWriteMsg *)item;
dnodeSendRpcVnodeWriteRsp(pVnode, item, pWrite->rpcMsg.code); dnodeSendRpcVnodeWriteRsp(pVnode, item, pWrite->rpcMsg.code);
} else if (type == TAOS_QTYPE_FWD) {
pHead = (SWalHead *)item;
vnodeConfirmForward(pVnode, pHead->version, 0);
taosFreeQitem(item);
vnodeRelease(pVnode);
} else { } else {
taosFreeQitem(item); taosFreeQitem(item);
vnodeRelease(pVnode); vnodeRelease(pVnode);

View File

@ -53,6 +53,7 @@ void *dnodeAllocateVnodeWqueue(void *pVnode);
void dnodeFreeVnodeWqueue(void *queue); void dnodeFreeVnodeWqueue(void *queue);
void *dnodeAllocateVnodeRqueue(void *pVnode); void *dnodeAllocateVnodeRqueue(void *pVnode);
void dnodeFreeVnodeRqueue(void *rqueue); void dnodeFreeVnodeRqueue(void *rqueue);
void dnodePutItemIntoReadQueue(void *pVnode, void *qhandle);
void dnodeSendRpcVnodeWriteRsp(void *pVnode, void *param, int32_t code); void dnodeSendRpcVnodeWriteRsp(void *pVnode, void *param, int32_t code);
int32_t dnodeAllocateMnodePqueue(); int32_t dnodeAllocateMnodePqueue();

View File

@ -84,6 +84,13 @@ bool qHasMoreResultsToRetrieve(qinfo_t qinfo);
*/ */
int32_t qKillQuery(qinfo_t qinfo); int32_t qKillQuery(qinfo_t qinfo);
void* qOpenQueryMgmt(int32_t vgId);
void qSetQueryMgmtClosed(void* pExecutor);
void qCleanupQueryMgmt(void* pExecutor);
void** qRegisterQInfo(void* pMgmt, void* qInfo);
void** qAcquireQInfo(void* pMgmt, void** key);
void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool needFree);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -365,6 +365,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TAOS_QTYPE_FWD 1 #define TAOS_QTYPE_FWD 1
#define TAOS_QTYPE_WAL 2 #define TAOS_QTYPE_WAL 2
#define TAOS_QTYPE_CQ 3 #define TAOS_QTYPE_CQ 3
#define TAOS_QTYPE_QUERY 4
typedef enum { typedef enum {
TSDB_SUPER_TABLE = 0, // super table TSDB_SUPER_TABLE = 0, // super table

View File

@ -180,7 +180,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_DISK_PERMISSIONS, 0, 0x0506, "vnode no d
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, 0, 0x0507, "vnode no such file or directory") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, 0, 0x0507, "vnode no such file or directory")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, 0, 0x0508, "vnode out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, 0, 0x0508, "vnode out of memory")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "vnode app error") TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "vnode app error")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0214, "vnode no write auth") TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_STATUS, 0, 0x0510, "vnode not in ready state")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "vnode not in synced state")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "vnode no write auth")
// tsdb // tsdb
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, 0, 0x0600, "tsdb invalid table id") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, 0, 0x0600, "tsdb invalid table id")
@ -200,6 +202,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_ACTION, 0, 0x060D, "tsdb inval
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CREATE_TB_MSG, 0, 0x060E, "tsdb invalid create table message") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CREATE_TB_MSG, 0, 0x060E, "tsdb invalid create table message")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM, 0, 0x060F, "tsdb no table data in memory skiplist") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM, 0, 0x060F, "tsdb no table data in memory skiplist")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_ALREADY_EXISTS, 0, 0x0610, "tsdb file already exists") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_ALREADY_EXISTS, 0, 0x0610, "tsdb file already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECONFIGURE, 0, 0x0611, "tsdb need to reconfigure table")
// query // query
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, 0, 0x0700, "query invalid handle") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, 0, 0x0700, "query invalid handle")

View File

@ -203,8 +203,7 @@ typedef struct SSubmitBlk {
typedef struct SSubmitMsg { typedef struct SSubmitMsg {
SMsgHead header; SMsgHead header;
int32_t length; int32_t length;
int32_t compressed : 2; int32_t numOfBlocks;
int32_t numOfBlocks : 30;
SSubmitBlk blocks[]; SSubmitBlk blocks[];
} SSubmitMsg; } SSubmitMsg;
@ -285,6 +284,8 @@ typedef struct {
int32_t tid; int32_t tid;
int16_t tversion; int16_t tversion;
int16_t colId; int16_t colId;
int8_t type;
int16_t bytes;
int32_t tagValLen; int32_t tagValLen;
int16_t numOfTags; int16_t numOfTags;
int32_t schemaLen; int32_t schemaLen;

View File

@ -108,12 +108,14 @@ void tsdbClearTableCfg(STableCfg *config);
void* tsdbGetTableTagVal(const void* pTable, int32_t colId, int16_t type, int16_t bytes); void* tsdbGetTableTagVal(const void* pTable, int32_t colId, int16_t type, int16_t bytes);
char* tsdbGetTableName(void *pTable); char* tsdbGetTableName(void *pTable);
STableId tsdbGetTableId(void *pTable);
#define TSDB_TABLEID(_table) ((STableId*) (_table))
STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg); STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg);
int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg); int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg);
int tsdbDropTable(TSDB_REPO_T *pRepo, STableId tableId); int tsdbDropTable(TSDB_REPO_T *pRepo, STableId tableId);
int tsdbUpdateTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg); int tsdbUpdateTableTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg);
TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid); TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid);
void tsdbStartStream(TSDB_REPO_T *repo); void tsdbStartStream(TSDB_REPO_T *repo);

View File

@ -49,17 +49,23 @@ int32_t vnodeAlter(void *pVnode, SMDCreateVnodeMsg *pVnodeCfg);
int32_t vnodeClose(int32_t vgId); int32_t vnodeClose(int32_t vgId);
void vnodeRelease(void *pVnode); void vnodeRelease(void *pVnode);
void* vnodeAccquireVnode(int32_t vgId); // add refcount void* vnodeAcquireVnode(int32_t vgId); // add refcount
void* vnodeGetVnode(int32_t vgId); // keep refcount unchanged void* vnodeGetVnode(int32_t vgId); // keep refcount unchanged
void* vnodeAcquireRqueue(void *);
void* vnodeGetRqueue(void *); void* vnodeGetRqueue(void *);
void* vnodeGetWqueue(int32_t vgId); void* vnodeGetWqueue(int32_t vgId);
void* vnodeGetWal(void *pVnode); void* vnodeGetWal(void *pVnode);
int32_t vnodeProcessWrite(void *pVnode, int qtype, void *pHead, void *item); int32_t vnodeProcessWrite(void *pVnode, int qtype, void *pHead, void *item);
void vnodeBuildStatusMsg(void * param); int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes);
void vnodeBuildStatusMsg(void *param);
void vnodeConfirmForward(void *param, uint64_t version, int32_t code);
void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes); void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes);
int32_t vnodeInitResources();
void vnodeCleanupResources();
int32_t vnodeProcessRead(void *pVnode, SReadMsg *pReadMsg); int32_t vnodeProcessRead(void *pVnode, SReadMsg *pReadMsg);
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -32,6 +32,7 @@
#include <time.h> #include <time.h>
#include <unistd.h> #include <unistd.h>
#include <wordexp.h> #include <wordexp.h>
#include <regex.h>
#include "taos.h" #include "taos.h"
#include "tutil.h" #include "tutil.h"
@ -54,6 +55,7 @@ static struct argp_option options[] = {
{0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3}, {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3},
{0, 'd', "database", 0, "Destination database. Default is 'test'.", 3}, {0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
{0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3}, {0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3},
{0, 's', "sql file", 0, "The select sql file.", 3},
{0, 'M', 0, 0, "Use metric flag.", 13}, {0, 'M', 0, 0, "Use metric flag.", 13},
{0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 14}, {0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 14},
{0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 6}, {0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 6},
@ -79,6 +81,7 @@ typedef struct DemoArguments {
char *password; char *password;
char *database; char *database;
char *tb_prefix; char *tb_prefix;
char *sqlFile;
bool use_metric; bool use_metric;
bool insert_only; bool insert_only;
char *output_file; char *output_file;
@ -120,6 +123,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'o': case 'o':
arguments->output_file = arg; arguments->output_file = arg;
break; break;
case 's':
arguments->sqlFile = arg;
break;
case 'q': case 'q':
arguments->mode = atoi(arg); arguments->mode = atoi(arg);
break; break;
@ -179,10 +185,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
arguments->tb_prefix = arg; arguments->tb_prefix = arg;
break; break;
case 'M': case 'M':
arguments->use_metric = true; arguments->use_metric = false;
break; break;
case 'x': case 'x':
arguments->insert_only = true; arguments->insert_only = false;
break; break;
case 'c': case 'c':
if (wordexp(arg, &full_path, 0) != 0) { if (wordexp(arg, &full_path, 0) != 0) {
@ -253,6 +259,9 @@ typedef struct {
int data_of_rate; int data_of_rate;
int64_t start_time; int64_t start_time;
bool do_aggreFunc; bool do_aggreFunc;
char* cols;
bool use_metric;
sem_t mutex_sem; sem_t mutex_sem;
int notFinished; int notFinished;
@ -305,6 +314,8 @@ void rand_string(char *str, int size);
double getCurrentTime(); double getCurrentTime();
void callBack(void *param, TAOS_RES *res, int code); void callBack(void *param, TAOS_RES *res, int code);
void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass);
void querySqlFile(TAOS* taos, char* sqlFile);
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
SDemoArguments arguments = { NULL, // host SDemoArguments arguments = { NULL, // host
@ -313,6 +324,7 @@ int main(int argc, char *argv[]) {
"taosdata", // password "taosdata", // password
"test", // database "test", // database
"t", // tb_prefix "t", // tb_prefix
NULL,
false, // use_metric false, // use_metric
false, // insert_only false, // insert_only
"./output.txt", // output_file "./output.txt", // output_file
@ -361,7 +373,7 @@ int main(int argc, char *argv[]) {
abort(); abort();
#endif #endif
} }
enum MODE query_mode = arguments.mode; enum MODE query_mode = arguments.mode;
char *ip_addr = arguments.host; char *ip_addr = arguments.host;
uint16_t port = arguments.port; uint16_t port = arguments.port;
@ -385,6 +397,13 @@ int main(int argc, char *argv[]) {
char dataString[STRING_LEN]; char dataString[STRING_LEN];
bool do_aggreFunc = true; bool do_aggreFunc = true;
if (NULL != arguments.sqlFile) {
TAOS* qtaos = taos_connect(ip_addr, user, pass, db_name, port);
querySqlFile(qtaos, arguments.sqlFile);
taos_close(qtaos);
return 0;
}
memset(dataString, 0, STRING_LEN); memset(dataString, 0, STRING_LEN);
int len = 0; int len = 0;
@ -495,47 +514,19 @@ int main(int argc, char *argv[]) {
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d))", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary); len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d))", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary);
} }
if (!use_metric) { if (use_metric) {
/* Create all the tables; */
printf("Creating %d table(s)......\n", ntables);
for (int i = 0; i < ntables; i++) {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", db_name, tb_prefix, i, cols);
queryDB(taos, command);
}
printf("Table(s) created!\n");
taos_close(taos);
} else {
/* Create metric table */ /* Create metric table */
printf("Creating meters super table...\n"); printf("Creating meters super table...\n");
snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols); snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols);
queryDB(taos, command); queryDB(taos, command);
printf("meters created!\n"); printf("meters created!\n");
/* Create all the tables; */
printf("Creating %d table(s)......\n", ntables);
for (int i = 0; i < ntables; i++) {
int j;
if (i % 10 == 0) {
j = 10;
} else {
j = i % 10;
}
if (j % 2 == 0) {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", db_name, tb_prefix, i, db_name, j, "shanghai");
} else {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", db_name, tb_prefix, i, db_name, j, "beijing");
}
queryDB(taos, command);
}
printf("Table(s) created!\n");
taos_close(taos); taos_close(taos);
} }
/* Wait for table to create */
/* Wait for table to create */
multiThreadCreateTable(cols, use_metric, threads, ntables, db_name, tb_prefix, ip_addr, port, user, pass);
/* Insert data */ /* Insert data */
double ts = getCurrentTime(); double ts = getCurrentTime();
printf("Inserting data......\n"); printf("Inserting data......\n");
@ -685,6 +676,198 @@ int main(int argc, char *argv[]) {
return 0; return 0;
} }
#define MAX_SQL_SIZE 65536
void selectSql(TAOS* taos, char* sqlcmd)
{
TAOS_RES *pSql = taos_query(taos, sqlcmd);
int32_t code = taos_errno(pSql);
if (code != 0) {
printf("Failed to sqlcmd:%s, reason:%s\n", sqlcmd, taos_errstr(pSql));
taos_free_result(pSql);
exit(1);
}
int count = 0;
while (taos_fetch_row(pSql) != NULL) {
count++;
}
taos_free_result(pSql);
return;
}
/* Function to do regular expression check */
static int regexMatch(const char *s, const char *reg, int cflags) {
regex_t regex;
char msgbuf[100] = {0};
/* Compile regular expression */
if (regcomp(&regex, reg, cflags) != 0) {
printf("Fail to compile regex\n");
exit(-1);
}
/* Execute regular expression */
int reti = regexec(&regex, s, 0, NULL, 0);
if (!reti) {
regfree(&regex);
return 1;
} else if (reti == REG_NOMATCH) {
regfree(&regex);
return 0;
} else {
regerror(reti, &regex, msgbuf, sizeof(msgbuf));
printf("Regex match failed: %s\n", msgbuf);
regfree(&regex);
exit(-1);
}
return 0;
}
static int isCommentLine(char *line) {
if (line == NULL) return 1;
return regexMatch(line, "^\\s*#.*", REG_EXTENDED);
}
void querySqlFile(TAOS* taos, char* sqlFile)
{
FILE *fp = fopen(sqlFile, "r");
if (fp == NULL) {
printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno));
exit(-1);
}
int read_len = 0;
char * cmd = calloc(1, MAX_SQL_SIZE);
size_t cmd_len = 0;
char * line = NULL;
size_t line_len = 0;
double t = getCurrentTime();
while ((read_len = getline(&line, &line_len, fp)) != -1) {
if (read_len >= MAX_SQL_SIZE) continue;
line[--read_len] = '\0';
if (read_len == 0 || isCommentLine(line)) { // line starts with #
continue;
}
if (line[read_len - 1] == '\\') {
line[read_len - 1] = ' ';
memcpy(cmd + cmd_len, line, read_len);
cmd_len += read_len;
continue;
}
memcpy(cmd + cmd_len, line, read_len);
selectSql(taos, cmd);
memset(cmd, 0, MAX_SQL_SIZE);
cmd_len = 0;
}
t = getCurrentTime() - t;
printf("run %s took %.6f second(s)\n\n", sqlFile, t);
free(cmd);
if (line) free(line);
fclose(fp);
return;
}
void * createTable(void *sarg)
{
char command[BUFFER_SIZE] = "\0";
info *winfo = (info *)sarg;
if (!winfo->use_metric) {
/* Create all the tables; */
printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", winfo->db_name, winfo->tb_prefix, i, winfo->cols);
queryDB(winfo->taos, command);
}
taos_close(winfo->taos);
} else {
/* Create all the tables; */
printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
int j;
if (i % 10 == 0) {
j = 10;
} else {
j = i % 10;
}
if (j % 2 == 0) {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "shanghai");
} else {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "beijing");
}
queryDB(winfo->taos, command);
}
taos_close(winfo->taos);
}
return NULL;
}
void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass) {
double ts = getCurrentTime();
printf("create table......\n");
pthread_t *pids = malloc(threads * sizeof(pthread_t));
info *infos = malloc(threads * sizeof(info));
int a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
int b = 0;
if (threads != 0)
b = ntables % threads;
int last = 0;
for (int i = 0; i < threads; i++) {
info *t_info = infos + i;
t_info->threadID = i;
tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
tstrncpy(t_info->tb_prefix, tb_prefix, MAX_TB_NAME_SIZE);
t_info->taos = taos_connect(ip_addr, user, pass, db_name, port);
t_info->start_table_id = last;
t_info->end_table_id = i < b ? last + a : last + a - 1;
last = t_info->end_table_id + 1;
t_info->use_metric = use_metric;
t_info->cols = cols;
pthread_create(pids + i, NULL, createTable, t_info);
}
for (int i = 0; i < threads; i++) {
pthread_join(pids[i], NULL);
}
double t = getCurrentTime() - ts;
printf("Spent %.4f seconds to create %d tables with %d connections\n", t, ntables, threads);
for (int i = 0; i < threads; i++) {
info *t_info = infos + i;
taos_close(t_info->taos);
sem_destroy(&(t_info->mutex_sem));
sem_destroy(&(t_info->lock_sem));
}
free(pids);
free(infos);
return ;
}
void *readTable(void *sarg) { void *readTable(void *sarg) {
info *rinfo = (info *)sarg; info *rinfo = (info *)sarg;
TAOS *taos = rinfo->taos; TAOS *taos = rinfo->taos;

View File

@ -13,4 +13,6 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
ADD_EXECUTABLE(taosmigrate ${SRC}) ADD_EXECUTABLE(taosmigrate ${SRC})
TARGET_LINK_LIBRARIES(taosmigrate common tutil cJson) TARGET_LINK_LIBRARIES(taosmigrate common tutil cJson)
ENDIF () ENDIF ()
SET_SOURCE_FILES_PROPERTIES(./taosmigrate.c PROPERTIES COMPILE_FLAGS -w)

View File

@ -68,7 +68,7 @@ int32_t mnodeInitProfile() {
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_KILL_STREAM, mnodeProcessKillStreamMsg); mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_KILL_STREAM, mnodeProcessKillStreamMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_KILL_CONN, mnodeProcessKillConnectionMsg); mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_KILL_CONN, mnodeProcessKillConnectionMsg);
tsMnodeConnCache = taosCacheInitWithCb(TSDB_DATA_TYPE_INT, CONN_CHECK_TIME, false, mnodeFreeConn, "conn"); tsMnodeConnCache = taosCacheInit(TSDB_DATA_TYPE_INT, CONN_CHECK_TIME, false, mnodeFreeConn, "conn");
return 0; return 0;
} }

View File

@ -65,7 +65,7 @@ int32_t mnodeInitShow() {
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_CONNECT, mnodeProcessConnectMsg); mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_CONNECT, mnodeProcessConnectMsg);
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_USE_DB, mnodeProcessUseMsg); mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_USE_DB, mnodeProcessUseMsg);
tsMnodeShowCache = taosCacheInitWithCb(TSDB_DATA_TYPE_INT, 5, false, mnodeFreeShowObj, "show"); tsMnodeShowCache = taosCacheInit(TSDB_DATA_TYPE_INT, 5, false, mnodeFreeShowObj, "show");
return 0; return 0;
} }

View File

@ -165,6 +165,8 @@ static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) {
} }
mnodeDecDnodeRef(pDnode); mnodeDecDnodeRef(pDnode);
} }
free(pNew);
} }
mnodeVgroupUpdateIdPool(pVgroup); mnodeVgroupUpdateIdPool(pVgroup);

View File

@ -86,9 +86,28 @@ extern "C" {
} \ } \
} }
#ifdef TAOS_RANDOM_NETWORK_FAIL
ssize_t taos_send_random_fail(int sockfd, const void *buf, size_t len, int flags);
ssize_t taos_sendto_random_fail(int sockfd, const void *buf, size_t len, int flags,
const struct sockaddr *dest_addr, socklen_t addrlen);
ssize_t taos_read_random_fail(int fd, void *buf, size_t count);
ssize_t taos_write_random_fail(int fd, const void *buf, size_t count);
#define send(sockfd, buf, len, flags) taos_send_random_fail(sockfd, buf, len, flags)
#define sendto(sockfd, buf, len, flags, dest_addr, addrlen) \
taos_sendto_random_fail(sockfd, buf, len, flags, dest_addr, addrlen)
#define taosWriteSocket(fd, buf, len) taos_write_random_fail(fd, buf, len)
#define taosReadSocket(fd, buf, len) taos_read_random_fail(fd, buf, len)
#else
#define taosWriteSocket(fd, buf, len) write(fd, buf, len) #define taosWriteSocket(fd, buf, len) write(fd, buf, len)
#define taosReadSocket(fd, buf, len) read(fd, buf, len) #define taosReadSocket(fd, buf, len) read(fd, buf, len)
#endif /* TAOS_RANDOM_NETWORK_FAIL */
#define atomic_load_8(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) #define atomic_load_8(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_16(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) #define atomic_load_16(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_32(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) #define atomic_load_32(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)

View File

@ -270,3 +270,49 @@ int tSystem(const char * cmd)
} }
} }
#ifdef TAOS_RANDOM_NETWORK_FAIL
#define RANDOM_NETWORK_FAIL_FACTOR 20
ssize_t taos_send_random_fail(int sockfd, const void *buf, size_t len, int flags)
{
if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) {
errno = ECONNRESET;
return -1;
}
return send(sockfd, buf, len, flags);
}
ssize_t taos_sendto_random_fail(int sockfd, const void *buf, size_t len, int flags,
const struct sockaddr *dest_addr, socklen_t addrlen)
{
if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) {
errno = ECONNRESET;
return -1;
}
return sendto(sockfd, buf, len, flags, dest_addr, addrlen);
}
ssize_t taos_read_random_fail(int fd, void *buf, size_t count)
{
if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) {
errno = ECONNRESET;
return -1;
}
return read(fd, buf, count);
}
ssize_t taos_write_random_fail(int fd, const void *buf, size_t count)
{
if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) {
errno = EINTR;
return -1;
}
return write(fd, buf, count);
}
#endif /* TAOS_RANDOM_NETWORK_FAIL */

View File

@ -160,7 +160,7 @@ static void taosGetSystemTimezone() {
/* load time zone string from /etc/timezone */ /* load time zone string from /etc/timezone */
FILE *f = fopen("/etc/timezone", "r"); FILE *f = fopen("/etc/timezone", "r");
char buf[65] = {0}; char buf[68] = {0};
if (f != NULL) { if (f != NULL) {
int len = fread(buf, 64, 1, f); int len = fread(buf, 64, 1, f);
if(len < 64 && ferror(f)) { if(len < 64 && ferror(f)) {
@ -170,18 +170,17 @@ static void taosGetSystemTimezone() {
} }
fclose(f); fclose(f);
}
char *lineEnd = strstr(buf, "\n"); char *lineEnd = strstr(buf, "\n");
if (lineEnd != NULL) { if (lineEnd != NULL) {
*lineEnd = 0; *lineEnd = 0;
} }
// for CentOS system, /etc/timezone does not exist. Ignore the TZ environment variables // for CentOS system, /etc/timezone does not exist. Ignore the TZ environment variables
if (strlen(buf) > 0) { if (strlen(buf) > 0) {
setenv("TZ", buf, 1); setenv("TZ", buf, 1);
}
} }
// get and set default timezone // get and set default timezone
tzset(); tzset();

View File

@ -26,8 +26,6 @@ extern int32_t httpDebugFlag;
#define httpInfo(...) { if (httpDebugFlag & DEBUG_INFO) { taosPrintLog("HTP INFO ", 255, __VA_ARGS__); }} #define httpInfo(...) { if (httpDebugFlag & DEBUG_INFO) { taosPrintLog("HTP INFO ", 255, __VA_ARGS__); }}
#define httpDebug(...) { if (httpDebugFlag & DEBUG_DEBUG) { taosPrintLog("HTP DEBUG ", httpDebugFlag, __VA_ARGS__); }} #define httpDebug(...) { if (httpDebugFlag & DEBUG_DEBUG) { taosPrintLog("HTP DEBUG ", httpDebugFlag, __VA_ARGS__); }}
#define httpTrace(...) { if (httpDebugFlag & DEBUG_TRACE) { taosPrintLog("HTP TRACE ", httpDebugFlag, __VA_ARGS__); }} #define httpTrace(...) { if (httpDebugFlag & DEBUG_TRACE) { taosPrintLog("HTP TRACE ", httpDebugFlag, __VA_ARGS__); }}
#define httpTraceL(...){ if (httpDebugFlag & DEBUG_TRACE) { taosPrintLongString("HTP TRACE ", httpDebugFlag, __VA_ARGS__); }}
#define httpDebugDump(...) { if (httpDebugFlag & DEBUG_DEBUG) { taosPrintLongString("HTP DEBUG ", httpDebugFlag, __VA_ARGS__); }}
#define httpTraceDump(...) { if (httpDebugFlag & DEBUG_TRACE) { taosPrintLongString("HTP TRACE ", httpDebugFlag, __VA_ARGS__); }}
#endif #endif

View File

@ -58,7 +58,7 @@ static void httpDestroyContext(void *data) {
} }
bool httpInitContexts() { bool httpInitContexts() {
tsHttpServer.contextCache = taosCacheInitWithCb(TSDB_DATA_TYPE_BIGINT, 2, false, httpDestroyContext, "restc"); tsHttpServer.contextCache = taosCacheInit(TSDB_DATA_TYPE_BIGINT, 2, false, httpDestroyContext, "restc");
if (tsHttpServer.contextCache == NULL) { if (tsHttpServer.contextCache == NULL) {
httpError("failed to init context cache"); httpError("failed to init context cache");
return false; return false;

View File

@ -313,9 +313,9 @@ bool httpParseRequest(HttpContext* pContext) {
return true; return true;
} }
httpTraceDump("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, raw data:\n%s", pContext, pContext->fd, httpTraceL("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, raw data:\n%s", pContext, pContext->fd,
pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds, pContext->parser.bufsize, pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds, pContext->parser.bufsize,
pContext->parser.buffer); pContext->parser.buffer);
if (!httpGetHttpMethod(pContext)) { if (!httpGetHttpMethod(pContext)) {
return false; return false;

View File

@ -108,7 +108,7 @@ bool httpReadDataImp(HttpContext *pContext) {
static bool httpDecompressData(HttpContext *pContext) { static bool httpDecompressData(HttpContext *pContext) {
if (pContext->contentEncoding != HTTP_COMPRESS_GZIP) { if (pContext->contentEncoding != HTTP_COMPRESS_GZIP) {
httpTraceDump("context:%p, fd:%d, ip:%s, content:%s", pContext, pContext->fd, pContext->ipstr, pContext->parser.data.pos); httpTraceL("context:%p, fd:%d, ip:%s, content:%s", pContext, pContext->fd, pContext->ipstr, pContext->parser.data.pos);
return true; return true;
} }
@ -124,8 +124,8 @@ static bool httpDecompressData(HttpContext *pContext) {
if (ret == 0) { if (ret == 0) {
memcpy(pContext->parser.data.pos, decompressBuf, decompressBufLen); memcpy(pContext->parser.data.pos, decompressBuf, decompressBufLen);
pContext->parser.data.pos[decompressBufLen] = 0; pContext->parser.data.pos[decompressBufLen] = 0;
httpTraceDump("context:%p, fd:%d, ip:%s, rawSize:%d, decompressSize:%d, content:%s", httpTraceL("context:%p, fd:%d, ip:%s, rawSize:%d, decompressSize:%d, content:%s", pContext, pContext->fd,
pContext, pContext->fd, pContext->ipstr, pContext->parser.data.len, decompressBufLen, decompressBuf); pContext->ipstr, pContext->parser.data.len, decompressBufLen, decompressBuf);
pContext->parser.data.len = decompressBufLen; pContext->parser.data.len = decompressBufLen;
} else { } else {
httpError("context:%p, fd:%d, ip:%s, failed to decompress data, rawSize:%d, error:%d", httpError("context:%p, fd:%d, ip:%s, failed to decompress data, rawSize:%d, error:%d",

View File

@ -115,7 +115,7 @@ void httpCleanUpSessions() {
} }
bool httpInitSessions() { bool httpInitSessions() {
tsHttpServer.sessionCache = taosCacheInitWithCb(TSDB_DATA_TYPE_BINARY, 5, false, httpDestroySession, "rests"); tsHttpServer.sessionCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, 5, false, httpDestroySession, "rests");
if (tsHttpServer.sessionCache == NULL) { if (tsHttpServer.sessionCache == NULL) {
httpError("failed to init session cache"); httpError("failed to init session cache");
return false; return false;

View File

@ -166,8 +166,8 @@ void httpProcessMultiSql(HttpContext *pContext) {
HttpSqlCmd *cmd = multiCmds->cmds + multiCmds->pos; HttpSqlCmd *cmd = multiCmds->cmds + multiCmds->pos;
char *sql = httpGetCmdsString(pContext, cmd->sql); char *sql = httpGetCmdsString(pContext, cmd->sql);
httpTraceDump("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start query, sql:%s", pContext, pContext->fd, httpTraceL("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start query, sql:%s", pContext, pContext->fd,
pContext->ipstr, pContext->user, multiCmds->pos, sql); pContext->ipstr, pContext->user, multiCmds->pos, sql);
taosNotePrintHttp(sql); taosNotePrintHttp(sql);
taos_query_a(pContext->session->taos, sql, httpProcessMultiSqlCallBack, (void *)pContext); taos_query_a(pContext->session->taos, sql, httpProcessMultiSqlCallBack, (void *)pContext);
} }
@ -306,8 +306,8 @@ void httpProcessSingleSqlCmd(HttpContext *pContext) {
return; return;
} }
httpTraceDump("context:%p, fd:%d, ip:%s, user:%s, start query, sql:%s", pContext, pContext->fd, pContext->ipstr, httpTraceL("context:%p, fd:%d, ip:%s, user:%s, start query, sql:%s", pContext, pContext->fd, pContext->ipstr,
pContext->user, sql); pContext->user, sql);
taosNotePrintHttp(sql); taosNotePrintHttp(sql);
taos_query_a(pSession->taos, sql, httpProcessSingleSqlCallBack, (void *)pContext); taos_query_a(pSession->taos, sql, httpProcessSingleSqlCallBack, (void *)pContext);
} }

View File

@ -35,9 +35,6 @@
#define monitorDebug(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLog("MON DEBUG ", monitorDebugFlag, __VA_ARGS__); }} #define monitorDebug(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLog("MON DEBUG ", monitorDebugFlag, __VA_ARGS__); }}
#define monitorTrace(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLog("MON TRACE ", monitorDebugFlag, __VA_ARGS__); }} #define monitorTrace(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLog("MON TRACE ", monitorDebugFlag, __VA_ARGS__); }}
#define monitorDebugDump(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLongString("MON DEBUG ", monitorDebugFlag, __VA_ARGS__); }}
#define monitorTraceDump(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLongString("MON TRACE ", monitorDebugFlag, __VA_ARGS__); }}
#define SQL_LENGTH 1024 #define SQL_LENGTH 1024
#define LOG_LEN_STR 100 #define LOG_LEN_STR 100
#define IP_LEN_STR 18 #define IP_LEN_STR 18

View File

@ -27,7 +27,4 @@ extern int32_t mqttDebugFlag;
#define mqttDebug(...) { if (mqttDebugFlag & DEBUG_DEBUG) { taosPrintLog("MQT DEBUG ", mqttDebugFlag, __VA_ARGS__); }} #define mqttDebug(...) { if (mqttDebugFlag & DEBUG_DEBUG) { taosPrintLog("MQT DEBUG ", mqttDebugFlag, __VA_ARGS__); }}
#define mqttTrace(...) { if (mqttDebugFlag & DEBUG_TRACE) { taosPrintLog("MQT TRACE ", mqttDebugFlag, __VA_ARGS__); }} #define mqttTrace(...) { if (mqttDebugFlag & DEBUG_TRACE) { taosPrintLog("MQT TRACE ", mqttDebugFlag, __VA_ARGS__); }}
#define mqttDebugDump(...) { if (mqttDebugFlag & DEBUG_DEBUG) { taosPrintLongString("MQT DEBUG ", mqttDebugFlag, __VA_ARGS__); }}
#define mqttTraceDump(...) { if (mqttDebugFlag & DEBUG_TRACE) { taosPrintLongString("MQT DEBUG ", mqttDebugFlag, __VA_ARGS__); }}
#endif #endif

View File

@ -111,7 +111,7 @@ void mqttStopSystem() {
} }
void mqttCleanUpSystem() { void mqttCleanUpSystem() {
mqttInfo("starting to clean up mqtt"); mqttInfo("starting to cleanup mqtt");
free(recntStatus.user_name); free(recntStatus.user_name);
free(recntStatus.password); free(recntStatus.password);
free(recntStatus.hostname); free(recntStatus.hostname);

View File

@ -154,6 +154,7 @@ typedef struct SQuery {
} SQuery; } SQuery;
typedef struct SQueryRuntimeEnv { typedef struct SQueryRuntimeEnv {
jmp_buf env;
SResultInfo* resultInfo; // todo refactor to merge with SWindowResInfo SResultInfo* resultInfo; // todo refactor to merge with SWindowResInfo
SQuery* pQuery; SQuery* pQuery;
SQLFunctionCtx* pCtx; SQLFunctionCtx* pCtx;
@ -169,6 +170,8 @@ typedef struct SQueryRuntimeEnv {
void* pSecQueryHandle; // another thread for void* pSecQueryHandle; // another thread for
bool stableQuery; // super table query or not bool stableQuery; // super table query or not
bool topBotQuery; // false bool topBotQuery; // false
bool groupbyNormalCol; // denote if this is a groupby normal column query
bool hasTagResults; // if there are tag values in final result or not
int32_t prevGroupId; // previous executed group id int32_t prevGroupId; // previous executed group id
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
} SQueryRuntimeEnv; } SQueryRuntimeEnv;
@ -197,8 +200,10 @@ typedef struct SQInfo {
*/ */
int32_t tableIndex; int32_t tableIndex;
int32_t numOfGroupResultPages; int32_t numOfGroupResultPages;
_qinfo_free_fn_t freeFn; _qinfo_free_fn_t freeFn; //todo remove it
jmp_buf env;
void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables;
} SQInfo; } SQInfo;
#endif // TDENGINE_QUERYEXECUTOR_H #endif // TDENGINE_QUERYEXECUTOR_H

View File

@ -60,8 +60,6 @@ typedef struct SPoint {
void * val; void * val;
} SPoint; } SPoint;
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, char timeUnit, int16_t precision);
SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType,
SFillColInfo* pFillCol); SFillColInfo* pFillCol);

View File

@ -13,8 +13,10 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include "os.h" #include "os.h"
#include "taosmsg.h" #include "tcache.h"
#include "tglobal.h"
#include "qfill.h" #include "qfill.h"
#include "taosmsg.h"
#include "hash.h" #include "hash.h"
#include "qExecutor.h" #include "qExecutor.h"
@ -27,6 +29,7 @@
#include "exception.h" #include "exception.h"
#include "tscompression.h" #include "tscompression.h"
#include "ttime.h" #include "ttime.h"
#include "tfile.h"
/** /**
* check if the primary column is load by default, otherwise, the program will * check if the primary column is load by default, otherwise, the program will
@ -87,16 +90,28 @@ typedef struct {
STSCursor cur; STSCursor cur;
} SQueryStatusInfo; } SQueryStatusInfo;
#if 0
static UNUSED_FUNC void *u_malloc (size_t __size) { static UNUSED_FUNC void *u_malloc (size_t __size) {
// uint32_t v = rand(); uint32_t v = rand();
// if (v % 5 <= 1) { if (v % 5 <= 1) {
// return NULL; return NULL;
// } else { } else {
return malloc(__size); return malloc(__size);
// } }
} }
static UNUSED_FUNC void* u_calloc(size_t num, size_t __size) {
uint32_t v = rand();
if (v % 5 <= 1) {
return NULL;
} else {
return calloc(num, __size);
}
}
#define calloc u_calloc
#define malloc u_malloc #define malloc u_malloc
#endif
#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) #define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) #define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList)
@ -104,6 +119,7 @@ static UNUSED_FUNC void *u_malloc (size_t __size) {
static void setQueryStatus(SQuery *pQuery, int8_t status); static void setQueryStatus(SQuery *pQuery, int8_t status);
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->intervalTime > 0)
static bool isIntervalQuery(SQuery *pQuery) { return pQuery->intervalTime > 0; } static bool isIntervalQuery(SQuery *pQuery) { return pQuery->intervalTime > 0; }
// todo move to utility // todo move to utility
@ -309,6 +325,24 @@ static bool isTopBottomQuery(SQuery *pQuery) {
return false; return false;
} }
static bool hasTagValOutput(SQuery* pQuery) {
SExprInfo *pExprInfo = &pQuery->pSelectExpr[0];
if (pQuery->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP) {
return true;
} else { // set tag value, by which the results are aggregated.
for (int32_t idx = 0; idx < pQuery->numOfOutput; ++idx) {
SExprInfo *pLocalExprInfo = &pQuery->pSelectExpr[idx];
// ts_comp column required the tag value for join filter
if (TSDB_COL_IS_TAG(pLocalExprInfo->base.colInfo.flag)) {
return true;
}
}
}
return false;
}
static SDataStatis *getStatisInfo(SQuery *pQuery, SDataStatis *pStatis, int32_t numOfCols, int32_t index) { static SDataStatis *getStatisInfo(SQuery *pQuery, SDataStatis *pStatis, int32_t numOfCols, int32_t index) {
// for a tag column, no corresponding field info // for a tag column, no corresponding field info
SColIndex *pColIndex = &pQuery->pSelectExpr[index].base.colInfo; SColIndex *pColIndex = &pQuery->pSelectExpr[index].base.colInfo;
@ -652,26 +686,24 @@ static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SWindowStat
SQuery * pQuery = pRuntimeEnv->pQuery; SQuery * pQuery = pRuntimeEnv->pQuery;
SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx;
if (IS_MASTER_SCAN(pRuntimeEnv) || pStatus->closed) { for (int32_t k = 0; k < pQuery->numOfOutput; ++k) {
for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { int32_t functionId = pQuery->pSelectExpr[k].base.functionId;
int32_t functionId = pQuery->pSelectExpr[k].base.functionId;
pCtx[k].nStartQueryTimestamp = pWin->skey; pCtx[k].nStartQueryTimestamp = pWin->skey;
pCtx[k].size = forwardStep; pCtx[k].size = forwardStep;
pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1); pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1);
if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) { if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
pCtx[k].ptsList = &tsBuf[offset]; pCtx[k].ptsList = &tsBuf[offset];
} }
// not a whole block involved in query processing, statistics data can not be used // not a whole block involved in query processing, statistics data can not be used
if (forwardStep != numOfTotal) { if (forwardStep != numOfTotal) {
pCtx[k].preAggVals.isSet = false; pCtx[k].preAggVals.isSet = false;
} }
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
aAggs[functionId].xFunction(&pCtx[k]); aAggs[functionId].xFunction(&pCtx[k]);
}
} }
} }
} }
@ -681,14 +713,12 @@ static void doRowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SWindowStatus
SQuery * pQuery = pRuntimeEnv->pQuery; SQuery * pQuery = pRuntimeEnv->pQuery;
SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx;
if (IS_MASTER_SCAN(pRuntimeEnv) || pStatus->closed) { for (int32_t k = 0; k < pQuery->numOfOutput; ++k) {
for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { pCtx[k].nStartQueryTimestamp = pWin->skey;
pCtx[k].nStartQueryTimestamp = pWin->skey;
int32_t functionId = pQuery->pSelectExpr[k].base.functionId; int32_t functionId = pQuery->pSelectExpr[k].base.functionId;
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
aAggs[functionId].xFunctionF(&pCtx[k], offset); aAggs[functionId].xFunctionF(&pCtx[k], offset);
}
} }
} }
} }
@ -782,8 +812,8 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas
return NULL; return NULL;
} }
char *dataBlock = NULL; char *dataBlock = NULL;
SQuery *pQuery = pRuntimeEnv->pQuery;
SQuery *pQuery = pRuntimeEnv->pQuery;
SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx;
int32_t functionId = pQuery->pSelectExpr[col].base.functionId; int32_t functionId = pQuery->pSelectExpr[col].base.functionId;
@ -802,6 +832,10 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas
sas->numOfCols = pQuery->numOfCols; sas->numOfCols = pQuery->numOfCols;
sas->data = calloc(pQuery->numOfCols, POINTER_BYTES); sas->data = calloc(pQuery->numOfCols, POINTER_BYTES);
if (sas->data == NULL) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
// here the pQuery->colList and sas->colList are identical // here the pQuery->colList and sas->colList are identical
int32_t numOfCols = taosArrayGetSize(pDataBlock); int32_t numOfCols = taosArrayGetSize(pDataBlock);
for (int32_t i = 0; i < pQuery->numOfCols; ++i) { for (int32_t i = 0; i < pQuery->numOfCols; ++i) {
@ -855,6 +889,9 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
} }
SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport)); SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport));
if (sasArray == NULL) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { for (int32_t k = 0; k < pQuery->numOfOutput; ++k) {
char *dataBlock = getDataBlock(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock); char *dataBlock = getDataBlock(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock);
@ -862,7 +899,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
} }
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
if (isIntervalQuery(pQuery) && tsCols != NULL) { if (QUERY_IS_INTERVAL_QUERY(pQuery) && tsCols != NULL) {
int32_t offset = GET_COL_DATA_POS(pQuery, 0, step); int32_t offset = GET_COL_DATA_POS(pQuery, 0, step);
TSKEY ts = tsCols[offset]; TSKEY ts = tsCols[offset];
@ -1046,6 +1083,11 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) {
static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) { static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx); SResultInfo *pResInfo = GET_RES_INFO(pCtx);
SQuery* pQuery = pRuntimeEnv->pQuery; SQuery* pQuery = pRuntimeEnv->pQuery;
// in case of timestamp column, always generated results.
if (functionId == TSDB_FUNC_TS) {
return true;
}
if (pResInfo->complete || functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { if (pResInfo->complete || functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
return false; return false;
@ -1058,7 +1100,6 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx
// todo add comments // todo add comments
if ((functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_LAST)) { if ((functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_LAST)) {
return pCtx->param[0].i64Key == pQuery->order.order; return pCtx->param[0].i64Key == pQuery->order.order;
// return !QUERY_IS_ASC_QUERY(pQuery);
} }
// in the supplementary scan, only the following functions need to be executed // in the supplementary scan, only the following functions need to be executed
@ -1079,8 +1120,12 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS
SColumnInfoData* pColumnInfoData = (SColumnInfoData *)taosArrayGet(pDataBlock, 0); SColumnInfoData* pColumnInfoData = (SColumnInfoData *)taosArrayGet(pDataBlock, 0);
TSKEY *tsCols = (pColumnInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP)? (TSKEY*) pColumnInfoData->pData:NULL; TSKEY *tsCols = (pColumnInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP)? (TSKEY*) pColumnInfoData->pData:NULL;
bool groupbyColumnValue = isGroupbyNormalCol(pQuery->pGroupbyExpr); bool groupbyColumnValue = pRuntimeEnv->groupbyNormalCol;
SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport)); SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport));
if (sasArray == NULL) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
int16_t type = 0; int16_t type = 0;
int16_t bytes = 0; int16_t bytes = 0;
@ -1226,7 +1271,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl
STableQueryInfo* pTableQInfo = pQuery->current; STableQueryInfo* pTableQInfo = pQuery->current;
SWindowResInfo* pWindowResInfo = &pRuntimeEnv->windowResInfo; SWindowResInfo* pWindowResInfo = &pRuntimeEnv->windowResInfo;
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || pRuntimeEnv->groupbyNormalCol) {
rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock); rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock);
} else { } else {
blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock); blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock);
@ -1347,14 +1392,16 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY
} }
// set the output buffer for the selectivity + tag query // set the output buffer for the selectivity + tag query
static void setCtxTagColumnInfo(SQuery *pQuery, SQLFunctionCtx *pCtx) { static void setCtxTagColumnInfo(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx) {
SQuery* pQuery = pRuntimeEnv->pQuery;
if (isSelectivityWithTagsQuery(pQuery)) { if (isSelectivityWithTagsQuery(pQuery)) {
int32_t num = 0; int32_t num = 0;
int16_t tagLen = 0; int16_t tagLen = 0;
SQLFunctionCtx *p = NULL; SQLFunctionCtx *p = NULL;
SQLFunctionCtx **pTagCtx = calloc(pQuery->numOfOutput, POINTER_BYTES); SQLFunctionCtx **pTagCtx = calloc(pQuery->numOfOutput, POINTER_BYTES);
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SSqlFuncMsg *pSqlFuncMsg = &pQuery->pSelectExpr[i].base; SSqlFuncMsg *pSqlFuncMsg = &pQuery->pSelectExpr[i].base;
@ -1381,7 +1428,7 @@ static void setCtxTagColumnInfo(SQuery *pQuery, SQLFunctionCtx *pCtx) {
} }
} }
static void setWindowResultInfo(SResultInfo *pResultInfo, SQuery *pQuery, bool isStableQuery) { static FORCE_INLINE void setWindowResultInfo(SResultInfo *pResultInfo, SQuery *pQuery, bool isStableQuery) {
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
assert(pQuery->pSelectExpr[i].interBytes <= DEFAULT_INTERN_BUF_PAGE_SIZE); assert(pQuery->pSelectExpr[i].interBytes <= DEFAULT_INTERN_BUF_PAGE_SIZE);
@ -1472,7 +1519,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
resetCtxOutputBuf(pRuntimeEnv); resetCtxOutputBuf(pRuntimeEnv);
} }
setCtxTagColumnInfo(pQuery, pRuntimeEnv->pCtx); setCtxTagColumnInfo(pRuntimeEnv, pRuntimeEnv->pCtx);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
_clean: _clean:
@ -1520,7 +1567,6 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
} }
static bool isQueryKilled(SQInfo *pQInfo) { static bool isQueryKilled(SQInfo *pQInfo) {
return false;
return (pQInfo->code == TSDB_CODE_TSC_QUERY_CANCELLED); return (pQInfo->code == TSDB_CODE_TSC_QUERY_CANCELLED);
} }
@ -1636,8 +1682,7 @@ static bool onlyQueryTags(SQuery* pQuery) {
void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *realWin, STimeWindow *win) { void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *realWin, STimeWindow *win) {
assert(key >= keyFirst && key <= keyLast && pQuery->slidingTime <= pQuery->intervalTime); assert(key >= keyFirst && key <= keyLast && pQuery->slidingTime <= pQuery->intervalTime);
win->skey = taosGetIntervalStartTimestamp(key, pQuery->slidingTime, pQuery->slidingTimeUnit, pQuery->precision); win->skey = taosGetIntervalStartTimestamp(key, pQuery->slidingTime, pQuery->intervalTime, pQuery->slidingTimeUnit, pQuery->precision);
if (keyFirst > (INT64_MAX - pQuery->intervalTime)) { if (keyFirst > (INT64_MAX - pQuery->intervalTime)) {
/* /*
* if the realSkey > INT64_MAX - pQuery->intervalTime, the query duration between * if the realSkey > INT64_MAX - pQuery->intervalTime, the query duration between
@ -2113,7 +2158,7 @@ static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capa
static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) { static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) {
// in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block // in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block
SQuery* pQuery = pRuntimeEnv->pQuery; SQuery* pQuery = pRuntimeEnv->pQuery;
if (!isIntervalQuery(pQuery) && !isGroupbyNormalCol(pQuery->pGroupbyExpr) && !isFixedOutputQuery(pQuery)) { if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyNormalCol && !isFixedOutputQuery(pQuery)) {
SResultRec *pRec = &pQuery->rec; SResultRec *pRec = &pQuery->rec;
if (pQuery->rec.capacity - pQuery->rec.rows < pBlockInfo->rows) { if (pQuery->rec.capacity - pQuery->rec.rows < pBlockInfo->rows) {
@ -2167,7 +2212,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pQueryHandle); SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pQueryHandle);
// todo extract methods // todo extract methods
if (isIntervalQuery(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) { if (QUERY_IS_INTERVAL_QUERY(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) {
STimeWindow realWin = TSWINDOW_INITIALIZER, w = TSWINDOW_INITIALIZER; STimeWindow realWin = TSWINDOW_INITIALIZER, w = TSWINDOW_INITIALIZER;
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
@ -2213,7 +2258,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
setQueryStatus(pQuery, QUERY_COMPLETED); setQueryStatus(pQuery, QUERY_COMPLETED);
} }
if (isIntervalQuery(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) { if (QUERY_IS_INTERVAL_QUERY(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) {
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
int32_t step = QUERY_IS_ASC_QUERY(pQuery) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP; int32_t step = QUERY_IS_ASC_QUERY(pQuery) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP;
@ -2634,7 +2679,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
tfree(pTableList); tfree(pTableList);
qError("QInfo:%p failed alloc memory", pQInfo); qError("QInfo:%p failed alloc memory", pQInfo);
longjmp(pQInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
} }
// todo opt for the case of one table per group // todo opt for the case of one table per group
@ -2642,7 +2687,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
for (int32_t i = 0; i < size; ++i) { for (int32_t i = 0; i < size; ++i) {
STableQueryInfo *item = taosArrayGetP(pGroup, i); STableQueryInfo *item = taosArrayGetP(pGroup, i);
SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, tsdbGetTableId(item->pTable).tid); SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, TSDB_TABLEID(item->pTable)->tid);
if (list.size > 0 && item->windowResInfo.size > 0) { if (list.size > 0 && item->windowResInfo.size > 0) {
pTableList[numOfTables] = item; pTableList[numOfTables] = item;
numOfTables += 1; numOfTables += 1;
@ -2665,6 +2710,10 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
tLoserTreeCreate(&pTree, numOfTables, &cs, tableResultComparFn); tLoserTreeCreate(&pTree, numOfTables, &cs, tableResultComparFn);
SResultInfo *pResultInfo = calloc(pQuery->numOfOutput, sizeof(SResultInfo)); SResultInfo *pResultInfo = calloc(pQuery->numOfOutput, sizeof(SResultInfo));
if (pResultInfo == NULL) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
setWindowResultInfo(pResultInfo, pQuery, pRuntimeEnv->stableQuery); setWindowResultInfo(pResultInfo, pQuery, pRuntimeEnv->stableQuery);
resetMergeResultBuf(pQuery, pRuntimeEnv->pCtx, pResultInfo); resetMergeResultBuf(pQuery, pRuntimeEnv->pCtx, pResultInfo);
@ -2865,7 +2914,7 @@ void disableFuncInReverseScan(SQInfo *pQInfo) {
// group by normal columns and interval query on normal table // group by normal columns and interval query on normal table
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) { if (pRuntimeEnv->groupbyNormalCol || isIntervalQuery(pQuery)) {
disableFuncInReverseScanImpl(pQInfo, pWindowResInfo, order); disableFuncInReverseScanImpl(pQInfo, pWindowResInfo, order);
} else { // for simple result of table query, } else { // for simple result of table query,
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { // todo refactor for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { // todo refactor
@ -3040,7 +3089,7 @@ bool needScanDataBlocksAgain(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery; SQuery *pQuery = pRuntimeEnv->pQuery;
bool toContinue = false; bool toContinue = false;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) { if (pRuntimeEnv->groupbyNormalCol || isIntervalQuery(pQuery)) {
// for each group result, call the finalize function for each column // for each group result, call the finalize function for each column
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
@ -3232,10 +3281,10 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv) { void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery; SQuery *pQuery = pRuntimeEnv->pQuery;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) { if (pRuntimeEnv->groupbyNormalCol || isIntervalQuery(pQuery)) {
// for each group result, call the finalize function for each column // for each group result, call the finalize function for each column
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { if (pRuntimeEnv->groupbyNormalCol) {
closeAllTimeWindow(pWindowResInfo); closeAllTimeWindow(pWindowResInfo);
} }
@ -3277,10 +3326,10 @@ static bool hasMainOutput(SQuery *pQuery) {
return false; return false;
} }
static STableQueryInfo *createTableQueryInfo( SQueryRuntimeEnv *pRuntimeEnv, void* pTable, STimeWindow win) { static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void* pTable, STimeWindow win, void* buf) {
SQuery* pQuery = pRuntimeEnv->pQuery; SQuery* pQuery = pRuntimeEnv->pQuery;
STableQueryInfo *pTableQueryInfo = calloc(1, sizeof(STableQueryInfo)); STableQueryInfo *pTableQueryInfo = buf;//calloc(1, sizeof(STableQueryInfo));
pTableQueryInfo->win = win; pTableQueryInfo->win = win;
pTableQueryInfo->lastKey = win.skey; pTableQueryInfo->lastKey = win.skey;
@ -3291,7 +3340,8 @@ static STableQueryInfo *createTableQueryInfo( SQueryRuntimeEnv *pRuntimeEnv, voi
int32_t initialSize = 1; int32_t initialSize = 1;
int32_t initialThreshold = 1; int32_t initialThreshold = 1;
if (isIntervalQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // set more initial size of interval/groupby query
if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) {
initialSize = 20; initialSize = 20;
initialThreshold = 100; initialThreshold = 100;
} }
@ -3306,7 +3356,6 @@ void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols)
} }
cleanupTimeWindowInfo(&pTableQueryInfo->windowResInfo, numOfCols); cleanupTimeWindowInfo(&pTableQueryInfo->windowResInfo, numOfCols);
free(pTableQueryInfo);
} }
#define SET_CURRENT_QUERY_TABLE_INFO(_runtime, _tableInfo) \ #define SET_CURRENT_QUERY_TABLE_INFO(_runtime, _tableInfo) \
@ -3319,7 +3368,6 @@ void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols)
/** /**
* set output buffer for different group * set output buffer for different group
* TODO opt performance if current group is identical to previous group
* @param pRuntimeEnv * @param pRuntimeEnv
* @param pDataBlockInfo * @param pDataBlockInfo
*/ */
@ -3330,7 +3378,10 @@ void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) {
// lastKey needs to be updated // lastKey needs to be updated
pTableQueryInfo->lastKey = nextKey; pTableQueryInfo->lastKey = nextKey;
setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo);
if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) {
setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo);
}
if (pRuntimeEnv->prevGroupId != INT32_MIN && pRuntimeEnv->prevGroupId == groupIndex) { if (pRuntimeEnv->prevGroupId != INT32_MIN && pRuntimeEnv->prevGroupId == groupIndex) {
return; return;
@ -3519,7 +3570,7 @@ static int32_t getNumOfSubset(SQInfo *pQInfo) {
SQuery *pQuery = pQInfo->runtimeEnv.pQuery; SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
int32_t totalSubset = 0; int32_t totalSubset = 0;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (isIntervalQuery(pQuery))) { if (pQInfo->runtimeEnv.groupbyNormalCol || (isIntervalQuery(pQuery))) {
totalSubset = numOfClosedTimeWindow(&pQInfo->runtimeEnv.windowResInfo); totalSubset = numOfClosedTimeWindow(&pQInfo->runtimeEnv.windowResInfo);
} else { } else {
totalSubset = GET_NUM_OF_TABLEGROUP(pQInfo); totalSubset = GET_NUM_OF_TABLEGROUP(pQInfo);
@ -3613,11 +3664,11 @@ void copyFromWindowResToSData(SQInfo *pQInfo, SWindowResult *result) {
assert(pQuery->rec.rows <= pQuery->rec.capacity); assert(pQuery->rec.rows <= pQuery->rec.capacity);
} }
static UNUSED_FUNC void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) { static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) {
SQuery *pQuery = pRuntimeEnv->pQuery; SQuery *pQuery = pRuntimeEnv->pQuery;
// update the number of result for each, only update the number of rows for the corresponding window result. // update the number of result for each, only update the number of rows for the corresponding window result.
if (pQuery->intervalTime == 0) { if (!QUERY_IS_INTERVAL_QUERY(pQuery)) {
for (int32_t i = 0; i < pRuntimeEnv->windowResInfo.size; ++i) { for (int32_t i = 0; i < pRuntimeEnv->windowResInfo.size; ++i) {
SWindowResult *pResult = &pRuntimeEnv->windowResInfo.pResult[i]; SWindowResult *pResult = &pRuntimeEnv->windowResInfo.pResult[i];
@ -3631,14 +3682,6 @@ static UNUSED_FUNC void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, S
pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes); pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes);
} }
} }
// int32_t g = pTableQueryInfo->groupIndex;
// assert(pRuntimeEnv->windowResInfo.size > 0);
//
// SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, (char *)&g, sizeof(g));
// if (pWindowRes->numOfRows == 0) {
// pWindowRes->numOfRows = getNumOfResult(pRuntimeEnv);
// }
} }
} }
@ -3650,7 +3693,7 @@ void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *
SWindowResInfo * pWindowResInfo = &pTableQueryInfo->windowResInfo; SWindowResInfo * pWindowResInfo = &pTableQueryInfo->windowResInfo;
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1; pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1;
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || pRuntimeEnv->groupbyNormalCol) {
rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock); rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock);
} else { } else {
blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock); blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock);
@ -3692,7 +3735,7 @@ bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) {
} else { } else {
// there are results waiting for returned to client. // there are results waiting for returned to client.
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED) && if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED) &&
(isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) && (pRuntimeEnv->groupbyNormalCol || isIntervalQuery(pQuery)) &&
(pRuntimeEnv->windowResInfo.size > 0)) { (pRuntimeEnv->windowResInfo.size > 0)) {
return true; return true;
} }
@ -4097,6 +4140,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
pRuntimeEnv->cur.vgroupIndex = -1; pRuntimeEnv->cur.vgroupIndex = -1;
pRuntimeEnv->stableQuery = isSTableQuery; pRuntimeEnv->stableQuery = isSTableQuery;
pRuntimeEnv->prevGroupId = INT32_MIN; pRuntimeEnv->prevGroupId = INT32_MIN;
pRuntimeEnv->groupbyNormalCol = isGroupbyNormalCol(pQuery->pGroupbyExpr);
if (pTsBuf != NULL) { if (pTsBuf != NULL) {
int16_t order = (pQuery->order.order == pRuntimeEnv->pTSBuf->tsOrder) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; int16_t order = (pQuery->order.order == pRuntimeEnv->pTSBuf->tsOrder) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
@ -4121,7 +4165,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
if (pQuery->intervalTime == 0) { if (pQuery->intervalTime == 0) {
int16_t type = TSDB_DATA_TYPE_NULL; int16_t type = TSDB_DATA_TYPE_NULL;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // group by columns not tags; if (pRuntimeEnv->groupbyNormalCol) { // group by columns not tags;
type = getGroupbyColumnType(pQuery, pQuery->pGroupbyExpr); type = getGroupbyColumnType(pQuery, pQuery->pGroupbyExpr);
} else { } else {
type = TSDB_DATA_TYPE_INT; // group id type = TSDB_DATA_TYPE_INT; // group id
@ -4130,7 +4174,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, 512, 4096, type); initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, 512, 4096, type);
} }
} else if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) { } else if (pRuntimeEnv->groupbyNormalCol || isIntervalQuery(pQuery)) {
int32_t rows = getInitialPageNum(pQInfo); int32_t rows = getInitialPageNum(pQInfo);
code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, rows, pQuery->rowSize, pQInfo); code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, rows, pQuery->rowSize, pQInfo);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
@ -4138,7 +4182,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
} }
int16_t type = TSDB_DATA_TYPE_NULL; int16_t type = TSDB_DATA_TYPE_NULL;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { if (pRuntimeEnv->groupbyNormalCol) {
type = getGroupbyColumnType(pQuery, pQuery->pGroupbyExpr); type = getGroupbyColumnType(pQuery, pQuery->pGroupbyExpr);
} else { } else {
type = TSDB_DATA_TYPE_TIMESTAMP; type = TSDB_DATA_TYPE_TIMESTAMP;
@ -4156,8 +4200,9 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
// todo refactor // todo refactor
pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery); pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery);
setQueryStatus(pQuery, QUERY_NOT_COMPLETED); pRuntimeEnv->hasTagResults = hasTagValOutput(pQuery);
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -4198,14 +4243,17 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
SDataStatis *pStatis = NULL; SDataStatis *pStatis = NULL;
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis); SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
if (!isGroupbyNormalCol(pQuery->pGroupbyExpr)) { if (!pRuntimeEnv->groupbyNormalCol) {
if (!isIntervalQuery(pQuery)) { if (!isIntervalQuery(pQuery)) {
int32_t step = QUERY_IS_ASC_QUERY(pQuery)? 1:-1; int32_t step = QUERY_IS_ASC_QUERY(pQuery)? 1:-1;
setExecutionContext(pQInfo, (*pTableQueryInfo)->groupIndex, blockInfo.window.ekey + step); setExecutionContext(pQInfo, (*pTableQueryInfo)->groupIndex, blockInfo.window.ekey + step);
} else { // interval query } else { // interval query
TSKEY nextKey = blockInfo.window.skey; TSKEY nextKey = blockInfo.window.skey;
setIntervalQueryRange(pQInfo, nextKey); setIntervalQueryRange(pQInfo, nextKey);
/*int32_t ret = */setAdditionalInfo(pQInfo, (*pTableQueryInfo)->pTable, *pTableQueryInfo);
if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) {
setAdditionalInfo(pQInfo, (*pTableQueryInfo)->pTable, *pTableQueryInfo);
}
} }
} }
@ -4230,9 +4278,9 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) {
setTagVal(pRuntimeEnv, pCheckInfo->pTable, pQInfo->tsdb); setTagVal(pRuntimeEnv, pCheckInfo->pTable, pQInfo->tsdb);
STableId id = tsdbGetTableId(pCheckInfo->pTable); STableId* id = TSDB_TABLEID(pCheckInfo->pTable);
qDebug("QInfo:%p query on (%d): uid:%" PRIu64 ", tid:%d, qrange:%" PRId64 "-%" PRId64, pQInfo, index, qDebug("QInfo:%p query on (%d): uid:%" PRIu64 ", tid:%d, qrange:%" PRId64 "-%" PRId64, pQInfo, index,
id.uid, id.tid, pCheckInfo->lastKey, pCheckInfo->win.ekey); id->uid, id->tid, pCheckInfo->lastKey, pCheckInfo->win.ekey);
STsdbQueryCond cond = { STsdbQueryCond cond = {
.twindow = {pCheckInfo->lastKey, pCheckInfo->win.ekey}, .twindow = {pCheckInfo->lastKey, pCheckInfo->win.ekey},
@ -4361,7 +4409,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
break; break;
} }
} }
} else if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // group-by on normal columns query } else if (pRuntimeEnv->groupbyNormalCol) { // group-by on normal columns query
while (pQInfo->groupIndex < numOfGroups) { while (pQInfo->groupIndex < numOfGroups) {
SArray* group = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, pQInfo->groupIndex); SArray* group = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, pQInfo->groupIndex);
@ -4499,11 +4547,11 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
*/ */
pQInfo->tableIndex++; pQInfo->tableIndex++;
STableIdInfo tidInfo; STableIdInfo tidInfo = {0};
STableId id = tsdbGetTableId(pQuery->current->pTable);
tidInfo.uid = id.uid; STableId* id = TSDB_TABLEID(pQuery->current->pTable);
tidInfo.tid = id.tid; tidInfo.uid = id->uid;
tidInfo.tid = id->tid;
tidInfo.key = pQuery->current->lastKey; tidInfo.key = pQuery->current->lastKey;
taosArrayPush(pQInfo->arrTableIdInfo, &tidInfo); taosArrayPush(pQInfo->arrTableIdInfo, &tidInfo);
@ -4605,6 +4653,8 @@ static void doRestoreContext(SQInfo *pQInfo) {
static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) { static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) {
SQuery *pQuery = pQInfo->runtimeEnv.pQuery; SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
if (isIntervalQuery(pQuery)) { if (isIntervalQuery(pQuery)) {
size_t numOfGroup = GET_NUM_OF_TABLEGROUP(pQInfo); size_t numOfGroup = GET_NUM_OF_TABLEGROUP(pQInfo);
for (int32_t i = 0; i < numOfGroup; ++i) { for (int32_t i = 0; i < numOfGroup; ++i) {
@ -4614,6 +4664,7 @@ static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) {
for (int32_t j = 0; j < num; ++j) { for (int32_t j = 0; j < num; ++j) {
STableQueryInfo* item = taosArrayGetP(group, j); STableQueryInfo* item = taosArrayGetP(group, j);
closeAllTimeWindow(&item->windowResInfo); closeAllTimeWindow(&item->windowResInfo);
removeRedundantWindow(&item->windowResInfo, item->lastKey - step, step);
} }
} }
} else { // close results for group result } else { // close results for group result
@ -4665,6 +4716,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
el = scanMultiTableDataBlocks(pQInfo); el = scanMultiTableDataBlocks(pQInfo);
qDebug("QInfo:%p reversed scan completed, elapsed time: %" PRId64 "ms", pQInfo, el); qDebug("QInfo:%p reversed scan completed, elapsed time: %" PRId64 "ms", pQInfo, el);
doCloseAllTimeWindowAfterScan(pQInfo);
doRestoreContext(pQInfo); doRestoreContext(pQInfo);
} else { } else {
qDebug("QInfo:%p no need to do reversed scan, query completed", pQInfo); qDebug("QInfo:%p no need to do reversed scan, query completed", pQInfo);
@ -4677,7 +4729,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
return; return;
} }
if (isIntervalQuery(pQuery) || isSumAvgRateQuery(pQuery)) { if (QUERY_IS_INTERVAL_QUERY(pQuery) || isSumAvgRateQuery(pQuery)) {
if (mergeIntoGroupResult(pQInfo) == TSDB_CODE_SUCCESS) { if (mergeIntoGroupResult(pQInfo) == TSDB_CODE_SUCCESS) {
copyResToQueryResultBuf(pQInfo, pQuery); copyResToQueryResultBuf(pQInfo, pQuery);
@ -4774,10 +4826,10 @@ static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
pQuery->current->lastKey, pQuery->window.ekey); pQuery->current->lastKey, pQuery->window.ekey);
} else if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { } else if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
STableIdInfo tidInfo; STableIdInfo tidInfo;
STableId id = tsdbGetTableId(pQuery->current); STableId* id = TSDB_TABLEID(pQuery->current);
tidInfo.uid = id.uid; tidInfo.uid = id->uid;
tidInfo.tid = id.tid; tidInfo.tid = id->tid;
tidInfo.key = pQuery->current->lastKey; tidInfo.key = pQuery->current->lastKey;
taosArrayPush(pQInfo->arrTableIdInfo, &tidInfo); taosArrayPush(pQInfo->arrTableIdInfo, &tidInfo);
} }
@ -4867,7 +4919,7 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) {
} }
// all data scanned, the group by normal column can return // all data scanned, the group by normal column can return
if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // todo refactor with merge interval time result if (pRuntimeEnv->groupbyNormalCol) { // todo refactor with merge interval time result
pQInfo->groupIndex = 0; pQInfo->groupIndex = 0;
pQuery->rec.rows = 0; pQuery->rec.rows = 0;
copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult); copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult);
@ -4928,7 +4980,7 @@ static void tableQueryImpl(SQInfo *pQInfo) {
STableQueryInfo* item = taosArrayGetP(g, 0); STableQueryInfo* item = taosArrayGetP(g, 0);
// group by normal column, sliding window query, interval query are handled by interval query processor // group by normal column, sliding window query, interval query are handled by interval query processor
if (isIntervalQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // interval (down sampling operation) if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { // interval (down sampling operation)
tableIntervalProcess(pQInfo, item); tableIntervalProcess(pQInfo, item);
} else if (isFixedOutputQuery(pQuery)) { } else if (isFixedOutputQuery(pQuery)) {
tableFixedOutputProcess(pQInfo, item); tableFixedOutputProcess(pQInfo, item);
@ -4943,18 +4995,19 @@ static void tableQueryImpl(SQInfo *pQInfo) {
} }
static void stableQueryImpl(SQInfo *pQInfo) { static void stableQueryImpl(SQInfo *pQInfo) {
SQuery *pQuery = pQInfo->runtimeEnv.pQuery; SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery *pQuery = pRuntimeEnv->pQuery;
pQuery->rec.rows = 0; pQuery->rec.rows = 0;
int64_t st = taosGetTimestampUs(); int64_t st = taosGetTimestampUs();
if (isIntervalQuery(pQuery) || if (QUERY_IS_INTERVAL_QUERY(pQuery) ||
(isFixedOutputQuery(pQuery) && (!isPointInterpoQuery(pQuery)) && !isGroupbyNormalCol(pQuery->pGroupbyExpr) && (isFixedOutputQuery(pQuery) && (!isPointInterpoQuery(pQuery)) && !pRuntimeEnv->groupbyNormalCol &&
!isFirstLastRowQuery(pQuery))) { !isFirstLastRowQuery(pQuery))) {
multiTableQueryProcess(pQInfo); multiTableQueryProcess(pQInfo);
} else { } else {
assert((pQuery->checkBuffer == 1 && pQuery->intervalTime == 0) || isPointInterpoQuery(pQuery) || assert((pQuery->checkBuffer == 1 && pQuery->intervalTime == 0) || isPointInterpoQuery(pQuery) ||
isFirstLastRowQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)); isFirstLastRowQuery(pQuery) || pRuntimeEnv->groupbyNormalCol);
sequentialTableProcess(pQInfo); sequentialTableProcess(pQInfo);
} }
@ -5649,28 +5702,33 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
STimeWindow window = pQueryMsg->window; STimeWindow window = pQueryMsg->window;
taosArraySort(pTableIdList, compareTableIdInfo); taosArraySort(pTableIdList, compareTableIdInfo);
// TODO optimize the STableQueryInfo malloc strategy
pQInfo->pBuf = calloc(pTableGroupInfo->numOfTables, sizeof(STableQueryInfo));
int32_t index = 0;
for(int32_t i = 0; i < numOfGroups; ++i) { for(int32_t i = 0; i < numOfGroups; ++i) {
SArray* pa = taosArrayGetP(pTableGroupInfo->pGroupList, i); SArray* pa = taosArrayGetP(pTableGroupInfo->pGroupList, i);
size_t s = taosArrayGetSize(pa);
size_t s = taosArrayGetSize(pa);
SArray* p1 = taosArrayInit(s, POINTER_BYTES); SArray* p1 = taosArrayInit(s, POINTER_BYTES);
for(int32_t j = 0; j < s; ++j) { for(int32_t j = 0; j < s; ++j) {
void* pTable = taosArrayGetP(pa, j); void* pTable = taosArrayGetP(pa, j);
STableId* id = TSDB_TABLEID(pTable);
// NOTE: compare STableIdInfo with STableId STableIdInfo* pTableId = taosArraySearch(pTableIdList, id, compareTableIdInfo);
STableId id = tsdbGetTableId(pTable);
STableIdInfo* pTableId = taosArraySearch(pTableIdList, &id, compareTableIdInfo);
if (pTableId != NULL ) { if (pTableId != NULL ) {
window.skey = pTableId->key; window.skey = pTableId->key;
} else { } else {
window.skey = pQueryMsg->window.skey; window.skey = pQueryMsg->window.skey;
} }
STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, pTable, window); void* buf = pQInfo->pBuf + index * sizeof(STableQueryInfo);
STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, pTable, window, buf);
item->groupIndex = i; item->groupIndex = i;
taosArrayPush(p1, &item); taosArrayPush(p1, &item);
taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id.tid, sizeof(id.tid), &item, POINTER_BYTES); taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id->tid, sizeof(id->tid), &item, POINTER_BYTES);
index += 1;
} }
taosArrayPush(pQInfo->tableqinfoGroupInfo.pGroupList, &p1); taosArrayPush(pQInfo->tableqinfoGroupInfo.pGroupList, &p1);
@ -5814,6 +5872,7 @@ static void freeQInfo(SQInfo *pQInfo) {
taosArrayDestroy(p); taosArrayDestroy(p);
} }
tfree(pQInfo->pBuf);
taosArrayDestroy(pQInfo->tableqinfoGroupInfo.pGroupList); taosArrayDestroy(pQInfo->tableqinfoGroupInfo.pGroupList);
taosHashCleanup(pQInfo->tableqinfoGroupInfo.map); taosHashCleanup(pQInfo->tableqinfoGroupInfo.map);
tsdbDestoryTableGroup(&pQInfo->tableGroupInfo); tsdbDestoryTableGroup(&pQInfo->tableGroupInfo);
@ -5910,9 +5969,16 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
typedef struct SQueryMgmt {
SCacheObj *qinfoPool; // query handle pool
int32_t vgId;
bool closed;
pthread_mutex_t lock;
} SQueryMgmt;
int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, void* param, _qinfo_free_fn_t fn, int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, void* param, _qinfo_free_fn_t fn,
qinfo_t* pQInfo) { qinfo_t* pQInfo) {
assert(pQueryMsg != NULL); assert(pQueryMsg != NULL && tsdb != NULL);
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
@ -6083,7 +6149,8 @@ void qTableQuery(qinfo_t qinfo) {
return; return;
} }
int32_t ret = setjmp(pQInfo->env); int32_t ret = setjmp(pQInfo->runtimeEnv.env);
// error occurs, record the error code and return to client // error occurs, record the error code and return to client
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
pQInfo->code = ret; pQInfo->code = ret;
@ -6266,13 +6333,13 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
varDataSetLen(output, rsize - VARSTR_HEADER_SIZE); varDataSetLen(output, rsize - VARSTR_HEADER_SIZE);
output = varDataVal(output); output = varDataVal(output);
STableId id = tsdbGetTableId(item->pTable); STableId* id = TSDB_TABLEID(item->pTable);
*(int64_t *)output = id.uid; // memory align problem, todo serialize *(int64_t *)output = id->uid; // memory align problem, todo serialize
output += sizeof(id.uid); output += sizeof(id->uid);
*(int32_t *)output = id.tid; *(int32_t *)output = id->tid;
output += sizeof(id.tid); output += sizeof(id->tid);
*(int32_t *)output = pQInfo->vgId; *(int32_t *)output = pQInfo->vgId;
output += sizeof(pQInfo->vgId); output += sizeof(pQInfo->vgId);
@ -6356,3 +6423,112 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
setQueryStatus(pQuery, QUERY_COMPLETED); setQueryStatus(pQuery, QUERY_COMPLETED);
} }
void freeqinfoFn(void *qhandle) {
void** handle = qhandle;
if (handle == NULL || *handle == NULL) {
return;
}
qKillQuery(*handle);
}
void* qOpenQueryMgmt(int32_t vgId) {
const int32_t REFRESH_HANDLE_INTERVAL = 2; // every 2 seconds, refresh handle pool
char cacheName[128] = {0};
sprintf(cacheName, "qhandle_%d", vgId);
SQueryMgmt* pQueryHandle = calloc(1, sizeof(SQueryMgmt));
pQueryHandle->qinfoPool = taosCacheInit(TSDB_DATA_TYPE_BIGINT, REFRESH_HANDLE_INTERVAL, true, freeqinfoFn, cacheName);
pQueryHandle->closed = false;
pthread_mutex_init(&pQueryHandle->lock, NULL);
qDebug("vgId:%d, open querymgmt success", vgId);
return pQueryHandle;
}
void qSetQueryMgmtClosed(void* pQMgmt) {
if (pQMgmt == NULL) {
return;
}
SQueryMgmt* pQueryMgmt = pQMgmt;
qDebug("vgId:%d, set querymgmt closed, wait for all queries cancelled", pQueryMgmt->vgId);
pthread_mutex_lock(&pQueryMgmt->lock);
pQueryMgmt->closed = true;
pthread_mutex_unlock(&pQueryMgmt->lock);
taosCacheEmpty(pQueryMgmt->qinfoPool, true);
}
void qCleanupQueryMgmt(void* pQMgmt) {
if (pQMgmt == NULL) {
return;
}
SQueryMgmt* pQueryMgmt = pQMgmt;
int32_t vgId = pQueryMgmt->vgId;
assert(pQueryMgmt->closed);
SCacheObj* pqinfoPool = pQueryMgmt->qinfoPool;
pQueryMgmt->qinfoPool = NULL;
taosCacheCleanup(pqinfoPool);
pthread_mutex_destroy(&pQueryMgmt->lock);
tfree(pQueryMgmt);
qDebug("vgId:%d querymgmt cleanup completed", vgId);
}
void** qRegisterQInfo(void* pMgmt, void* qInfo) {
if (pMgmt == NULL) {
return NULL;
}
SQueryMgmt *pQueryMgmt = pMgmt;
if (pQueryMgmt->qinfoPool == NULL) {
return NULL;
}
pthread_mutex_lock(&pQueryMgmt->lock);
if (pQueryMgmt->closed) {
pthread_mutex_unlock(&pQueryMgmt->lock);
return NULL;
} else {
void** handle = taosCachePut(pQueryMgmt->qinfoPool, qInfo, POINTER_BYTES, &qInfo, POINTER_BYTES, tsShellActivityTimer*2);
pthread_mutex_unlock(&pQueryMgmt->lock);
return handle;
}
}
void** qAcquireQInfo(void* pMgmt, void** key) {
SQueryMgmt *pQueryMgmt = pMgmt;
if (pQueryMgmt->qinfoPool == NULL || pQueryMgmt->closed) {
return NULL;
}
void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, key, POINTER_BYTES);
if (handle == NULL || *handle == NULL) {
return NULL;
} else {
return handle;
}
}
void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool needFree) {
SQueryMgmt *pQueryMgmt = pMgmt;
if (pQueryMgmt->qinfoPool == NULL) {
return NULL;
}
taosCacheRelease(pQueryMgmt->qinfoPool, pQInfo, needFree);
return 0;
}

View File

@ -32,7 +32,6 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
pWindowResInfo->threshold = threshold; pWindowResInfo->threshold = threshold;
pWindowResInfo->type = type; pWindowResInfo->type = type;
_hash_fn_t fn = taosGetDefaultHashFunction(type); _hash_fn_t fn = taosGetDefaultHashFunction(type);
pWindowResInfo->hashList = taosHashInit(threshold, fn, false); pWindowResInfo->hashList = taosHashInit(threshold, fn, false);
@ -54,7 +53,8 @@ void destroyTimeWindowRes(SWindowResult *pWindowRes, int32_t nOutputCols) {
if (pWindowRes == NULL) { if (pWindowRes == NULL) {
return; return;
} }
// TODO opt malloc strategy
for (int32_t i = 0; i < nOutputCols; ++i) { for (int32_t i = 0; i < nOutputCols; ++i) {
free(pWindowRes->resultInfo[i].interResultBuf); free(pWindowRes->resultInfo[i].interResultBuf);
} }
@ -180,19 +180,34 @@ void closeAllTimeWindow(SWindowResInfo *pWindowResInfo) {
/* /*
* remove the results that are not the FIRST time window that spreads beyond the * remove the results that are not the FIRST time window that spreads beyond the
* the last qualified time stamp in case of sliding query, which the sliding time is not equalled to the interval time * the last qualified time stamp in case of sliding query, which the sliding time is not equalled to the interval time.
* NOTE: remove redundant, only when the result set order equals to traverse order
*/ */
void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_t order) { void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_t order) {
assert(pWindowResInfo->size >= 0 && pWindowResInfo->capacity >= pWindowResInfo->size); assert(pWindowResInfo->size >= 0 && pWindowResInfo->capacity >= pWindowResInfo->size);
if (pWindowResInfo->size <= 1) {
int32_t i = 0; return;
while (i < pWindowResInfo->size &&
((pWindowResInfo->pResult[i].window.ekey < lastKey && order == QUERY_ASC_FORWARD_STEP) ||
(pWindowResInfo->pResult[i].window.skey > lastKey && order == QUERY_DESC_FORWARD_STEP))) {
++i;
} }
// assert(i < pWindowResInfo->size); // get the result order
int32_t resultOrder = (pWindowResInfo->pResult[0].window.skey < pWindowResInfo->pResult[1].window.skey)?
TSDB_ORDER_ASC:TSDB_ORDER_DESC;
if (order != resultOrder) {
return;
}
int32_t i = 0;
if (order == QUERY_ASC_FORWARD_STEP) {
while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i].window.ekey < lastKey)) {
++i;
}
} else if (order == QUERY_DESC_FORWARD_STEP) {
while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i].window.skey > lastKey)) {
++i;
}
}
if (i < pWindowResInfo->size) { if (i < pWindowResInfo->size) {
pWindowResInfo->size = (i + 1); pWindowResInfo->size = (i + 1);
} }

View File

@ -118,7 +118,7 @@ static bool tExtMemBufferAlloc(tExtMemBuffer *pMemBuffer) {
* To flush data to disk to accommodate more data * To flush data to disk to accommodate more data
*/ */
if (pMemBuffer->numOfInMemPages > 0 && pMemBuffer->numOfInMemPages == pMemBuffer->inMemCapacity) { if (pMemBuffer->numOfInMemPages > 0 && pMemBuffer->numOfInMemPages == pMemBuffer->inMemCapacity) {
if (!tExtMemBufferFlush(pMemBuffer)) { if (tExtMemBufferFlush(pMemBuffer) != 0) {
return false; return false;
} }
} }
@ -268,6 +268,7 @@ int32_t tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) {
size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file); size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file);
if (retVal <= 0) { // failed to write to buffer, may be not enough space if (retVal <= 0) { // failed to write to buffer, may be not enough space
ret = TAOS_SYSTEM_ERROR(errno); ret = TAOS_SYSTEM_ERROR(errno);
return ret;
} }
pMemBuffer->fileMeta.numOfElemsInFile += first->item.num; pMemBuffer->fileMeta.numOfElemsInFile += first->item.num;

View File

@ -22,41 +22,6 @@
#define FILL_IS_ASC_FILL(_f) ((_f)->order == TSDB_ORDER_ASC) #define FILL_IS_ASC_FILL(_f) ((_f)->order == TSDB_ORDER_ASC)
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
return startTime;
}
if (timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h') {
return (startTime / slidingTime) * slidingTime;
} else {
/*
* here we revised the start time of day according to the local time zone,
* but in case of DST, the start time of one day need to be dynamically decided.
*
* TODO dynamically decide the start time of a day, move to common module
*/
// todo refactor to extract function that is available for Linux/Windows/Mac platform
#if defined(WINDOWS) && _MSC_VER >= 1900
// see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
int64_t timezone = _timezone;
int32_t daylight = _daylight;
char** tzname = _tzname;
#endif
int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L;
int64_t revStartime = (startTime / slidingTime) * slidingTime + timezone * t;
int64_t revEndtime = revStartime + slidingTime - 1;
if (revEndtime < startTime) {
revStartime += slidingTime;
}
return revStartime;
}
}
SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, SFillColInfo* pFillCol) { int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, SFillColInfo* pFillCol) {
if (fillType == TSDB_FILL_NONE) { if (fillType == TSDB_FILL_NONE) {
@ -128,7 +93,7 @@ static TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int64_t timeInterva
if (order == TSDB_ORDER_ASC) { if (order == TSDB_ORDER_ASC) {
return ekey; return ekey;
} else { } else {
return taosGetIntervalStartTimestamp(ekey, timeInterval, slidingTimeUnit, precision); return taosGetIntervalStartTimestamp(ekey, timeInterval, timeInterval, slidingTimeUnit, precision);
} }
} }

View File

@ -31,9 +31,7 @@ extern int32_t tscEmbedded;
#define tInfo(...) { if (rpcDebugFlag & DEBUG_INFO) { taosPrintLog("RPC INFO ", tscEmbedded ? 255 : rpcDebugFlag, __VA_ARGS__); }} #define tInfo(...) { if (rpcDebugFlag & DEBUG_INFO) { taosPrintLog("RPC INFO ", tscEmbedded ? 255 : rpcDebugFlag, __VA_ARGS__); }}
#define tDebug(...) { if (rpcDebugFlag & DEBUG_DEBUG) { taosPrintLog("RPC DEBUG ", rpcDebugFlag, __VA_ARGS__); }} #define tDebug(...) { if (rpcDebugFlag & DEBUG_DEBUG) { taosPrintLog("RPC DEBUG ", rpcDebugFlag, __VA_ARGS__); }}
#define tTrace(...) { if (rpcDebugFlag & DEBUG_TRACE) { taosPrintLog("RPC TRACE ", rpcDebugFlag, __VA_ARGS__); }} #define tTrace(...) { if (rpcDebugFlag & DEBUG_TRACE) { taosPrintLog("RPC TRACE ", rpcDebugFlag, __VA_ARGS__); }}
#define tDump(x, y) { if (rpcDebugFlag & DEBUG_DUMP) { taosDumpData((unsigned char *)x, y); }}
#define tDebugDump(x, y) { if (rpcDebugFlag & DEBUG_DEBUG) { taosDumpData((unsigned char *)x, y); }}
#define tTraceDump(x, y) { if (rpcDebugFlag & DEBUG_TRACE) { taosDumpData((unsigned char *)x, y); }}
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -73,6 +73,7 @@ typedef struct {
SRpcInfo *pRpc; // associated SRpcInfo SRpcInfo *pRpc; // associated SRpcInfo
SRpcIpSet ipSet; // ip list provided by app SRpcIpSet ipSet; // ip list provided by app
void *ahandle; // handle provided by app void *ahandle; // handle provided by app
void *signature; // for validation
struct SRpcConn *pConn; // pConn allocated struct SRpcConn *pConn; // pConn allocated
char msgType; // message type char msgType; // message type
uint8_t *pCont; // content provided by app uint8_t *pCont; // content provided by app
@ -361,6 +362,7 @@ void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, SRpcMsg *pMsg) {
int contLen = rpcCompressRpcMsg(pMsg->pCont, pMsg->contLen); int contLen = rpcCompressRpcMsg(pMsg->pCont, pMsg->contLen);
pContext = (SRpcReqContext *) (pMsg->pCont-sizeof(SRpcHead)-sizeof(SRpcReqContext)); pContext = (SRpcReqContext *) (pMsg->pCont-sizeof(SRpcHead)-sizeof(SRpcReqContext));
pContext->ahandle = pMsg->ahandle; pContext->ahandle = pMsg->ahandle;
pContext->signature = pContext;
pContext->pRpc = (SRpcInfo *)shandle; pContext->pRpc = (SRpcInfo *)shandle;
pContext->ipSet = *pIpSet; pContext->ipSet = *pIpSet;
pContext->contLen = contLen; pContext->contLen = contLen;
@ -527,13 +529,16 @@ int rpcReportProgress(void *handle, char *pCont, int contLen) {
return code; return code;
} }
/* todo: cancel process may have race condition, pContext may have been released
just before app calls the rpcCancelRequest */
void rpcCancelRequest(void *handle) { void rpcCancelRequest(void *handle) {
SRpcReqContext *pContext = handle; SRpcReqContext *pContext = handle;
// signature is used to check if pContext is freed.
// pContext may have been released just before app calls the rpcCancelRequest
if (pContext->signature != pContext) return;
if (pContext->pConn) { if (pContext->pConn) {
tDebug("%s, app trys to cancel request", pContext->pConn->info); tDebug("%s, app trys to cancel request", pContext->pConn->info);
pContext->pConn->pReqMsg = NULL;
rpcCloseConn(pContext->pConn); rpcCloseConn(pContext->pConn);
pContext->pConn = NULL; pContext->pConn = NULL;
rpcFreeCont(pContext->pCont); rpcFreeCont(pContext->pCont);
@ -598,8 +603,13 @@ static void rpcReleaseConn(SRpcConn *pConn) {
rpcFreeMsg(pConn->pRspMsg); // it may have a response msg saved, but not request msg rpcFreeMsg(pConn->pRspMsg); // it may have a response msg saved, but not request msg
pConn->pRspMsg = NULL; pConn->pRspMsg = NULL;
if (pConn->pReqMsg) rpcFreeCont(pConn->pReqMsg); // if server has ever reported progress, free content
} if (pConn->pReqMsg) rpcFreeCont(pConn->pReqMsg); // do not use rpcFreeMsg
} else {
// if there is an outgoing message, free it
if (pConn->outType && pConn->pReqMsg)
rpcFreeMsg(pConn->pReqMsg);
}
// memset could not be used, since lockeBy can not be reset // memset could not be used, since lockeBy can not be reset
pConn->inType = 0; pConn->inType = 0;
@ -955,6 +965,7 @@ static void rpcProcessBrokenLink(SRpcConn *pConn) {
if (pConn->outType) { if (pConn->outType) {
SRpcReqContext *pContext = pConn->pContext; SRpcReqContext *pContext = pConn->pContext;
pContext->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; pContext->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
pConn->pReqMsg = NULL;
taosTmrStart(rpcProcessConnError, 0, pContext, pRpc->tmrCtrl); taosTmrStart(rpcProcessConnError, 0, pContext, pRpc->tmrCtrl);
} }
@ -969,7 +980,7 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
SRpcInfo *pRpc = (SRpcInfo *)pRecv->shandle; SRpcInfo *pRpc = (SRpcInfo *)pRecv->shandle;
SRpcConn *pConn = (SRpcConn *)pRecv->thandle; SRpcConn *pConn = (SRpcConn *)pRecv->thandle;
tTraceDump(pRecv->msg, pRecv->msgLen); tDump(pRecv->msg, pRecv->msgLen);
// underlying UDP layer does not know it is server or client // underlying UDP layer does not know it is server or client
pRecv->connType = pRecv->connType | pRpc->connType; pRecv->connType = pRecv->connType | pRpc->connType;
@ -1005,6 +1016,7 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) { static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) {
SRpcInfo *pRpc = pContext->pRpc; SRpcInfo *pRpc = pContext->pRpc;
pContext->signature = NULL;
pContext->pConn = NULL; pContext->pConn = NULL;
if (pContext->pRsp) { if (pContext->pRsp) {
// for synchronous API // for synchronous API
@ -1056,6 +1068,7 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
SRpcReqContext *pContext = pConn->pContext; SRpcReqContext *pContext = pConn->pContext;
rpcMsg.handle = pContext; rpcMsg.handle = pContext;
pConn->pContext = NULL; pConn->pContext = NULL;
pConn->pReqMsg = NULL;
// for UDP, port may be changed by server, the port in ipSet shall be used for cache // for UDP, port may be changed by server, the port in ipSet shall be used for cache
if (pHead->code != TSDB_CODE_RPC_TOO_SLOW) { if (pHead->code != TSDB_CODE_RPC_TOO_SLOW) {
@ -1242,7 +1255,7 @@ static void rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) {
tError("%s, failed to send, msgLen:%d written:%d, reason:%s", pConn->info, msgLen, writtenLen, strerror(errno)); tError("%s, failed to send, msgLen:%d written:%d, reason:%s", pConn->info, msgLen, writtenLen, strerror(errno));
} }
tTraceDump(msg, msgLen); tDump(msg, msgLen);
} }
static void rpcProcessConnError(void *param, void *id) { static void rpcProcessConnError(void *param, void *id) {
@ -1292,6 +1305,7 @@ static void rpcProcessRetryTimer(void *param, void *tmrId) {
tDebug("%s, failed to send msg:%s to %s:%hu", pConn->info, taosMsg[pConn->outType], pConn->peerFqdn, pConn->peerPort); tDebug("%s, failed to send msg:%s to %s:%hu", pConn->info, taosMsg[pConn->outType], pConn->peerFqdn, pConn->peerPort);
if (pConn->pContext) { if (pConn->pContext) {
pConn->pContext->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; pConn->pContext->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
pConn->pReqMsg = NULL;
taosTmrStart(rpcProcessConnError, 0, pConn->pContext, pRpc->tmrCtrl); taosTmrStart(rpcProcessConnError, 0, pConn->pContext, pRpc->tmrCtrl);
rpcReleaseConn(pConn); rpcReleaseConn(pConn);
} }

View File

@ -47,9 +47,9 @@ extern int tsdbDebugFlag;
// Definitions // Definitions
// ------------------ tsdbMeta.c // ------------------ tsdbMeta.c
typedef struct STable { typedef struct STable {
STableId tableId;
ETableType type; ETableType type;
tstr* name; // NOTE: there a flexible string here tstr* name; // NOTE: there a flexible string here
STableId tableId;
uint64_t suid; uint64_t suid;
struct STable* pSuper; // super table pointer struct STable* pSuper; // super table pointer
uint8_t numOfSchemas; uint8_t numOfSchemas;
@ -298,16 +298,71 @@ STsdbMeta* tsdbNewMeta(STsdbCfg* pCfg);
void tsdbFreeMeta(STsdbMeta* pMeta); void tsdbFreeMeta(STsdbMeta* pMeta);
int tsdbOpenMeta(STsdbRepo* pRepo); int tsdbOpenMeta(STsdbRepo* pRepo);
int tsdbCloseMeta(STsdbRepo* pRepo); int tsdbCloseMeta(STsdbRepo* pRepo);
STSchema* tsdbGetTableSchema(STable* pTable);
STable* tsdbGetTableByUid(STsdbMeta* pMeta, uint64_t uid); STable* tsdbGetTableByUid(STsdbMeta* pMeta, uint64_t uid);
STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t version); STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t version);
STSchema* tsdbGetTableTagSchema(STable* pTable);
int tsdbUpdateTable(STsdbRepo* pRepo, STable* pTable, STableCfg* pCfg);
int tsdbWLockRepoMeta(STsdbRepo* pRepo); int tsdbWLockRepoMeta(STsdbRepo* pRepo);
int tsdbRLockRepoMeta(STsdbRepo* pRepo); int tsdbRLockRepoMeta(STsdbRepo* pRepo);
int tsdbUnlockRepoMeta(STsdbRepo* pRepo); int tsdbUnlockRepoMeta(STsdbRepo* pRepo);
void tsdbRefTable(STable* pTable); void tsdbRefTable(STable* pTable);
void tsdbUnRefTable(STable* pTable); void tsdbUnRefTable(STable* pTable);
void tsdbUpdateTableSchema(STsdbRepo* pRepo, STable* pTable, STSchema* pSchema, bool insertAct);
static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *key2) {
if (*(int16_t *)key1 < schemaVersion(*(STSchema **)key2)) {
return -1;
} else if (*(int16_t *)key1 > schemaVersion(*(STSchema **)key2)) {
return 1;
} else {
return 0;
}
}
static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t version) {
STable* pDTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable;
STSchema* pSchema = NULL;
STSchema* pTSchema = NULL;
if (lock) taosRLockLatch(&(pDTable->latch));
if (version < 0) { // get the latest version of schema
pTSchema = pDTable->schema[pDTable->numOfSchemas - 1];
} else { // get the schema with version
void* ptr = taosbsearch(&version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*),
tsdbCompareSchemaVersion, TD_EQ);
if (ptr == NULL) {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
goto _exit;
}
pTSchema = *(STSchema**)ptr;
}
ASSERT(pTSchema != NULL);
if (copy) {
if ((pSchema = tdDupSchema(pTSchema)) == NULL) terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
} else {
pSchema = pTSchema;
}
_exit:
if (lock) taosRUnLockLatch(&(pDTable->latch));
return pSchema;
}
static FORCE_INLINE STSchema* tsdbGetTableSchema(STable* pTable) {
return tsdbGetTableSchemaImpl(pTable, false, false, -1);
}
static FORCE_INLINE STSchema *tsdbGetTableTagSchema(STable *pTable) {
if (pTable->type == TSDB_CHILD_TABLE) { // check child table first
STable *pSuper = pTable->pSuper;
if (pSuper == NULL) return NULL;
return pSuper->tagSchema;
} else if (pTable->type == TSDB_SUPER_TABLE) {
return pTable->tagSchema;
} else {
return NULL;
}
}
// ------------------ tsdbBuffer.c // ------------------ tsdbBuffer.c
STsdbBufPool* tsdbNewBufPool(); STsdbBufPool* tsdbNewBufPool();

View File

@ -28,6 +28,7 @@
#include "tsdbMain.h" #include "tsdbMain.h"
#include "tutil.h" #include "tutil.h"
#include "ttime.h" #include "ttime.h"
#include "tfile.h"
const char *tsdbFileSuffix[] = {".head", ".data", ".last", "", ".h", ".l"}; const char *tsdbFileSuffix[] = {".head", ".data", ".last", "", ".h", ".l"};

View File

@ -41,9 +41,9 @@ typedef struct {
} SSubmitBlkIter; } SSubmitBlkIter;
typedef struct { typedef struct {
int32_t totalLen; int32_t totalLen;
int32_t len; int32_t len;
SSubmitBlk *pBlock; void * pMsg;
} SSubmitMsgIter; } SSubmitMsgIter;
static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg); static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg);
@ -56,7 +56,7 @@ static STsdbRepo * tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg);
static void tsdbFreeRepo(STsdbRepo *pRepo); static void tsdbFreeRepo(STsdbRepo *pRepo);
static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter); static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter);
static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows); static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows);
static SSubmitBlk *tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter); static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock);
static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter);
static int tsdbRestoreInfo(STsdbRepo *pRepo); static int tsdbRestoreInfo(STsdbRepo *pRepo);
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter); static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter);
@ -68,6 +68,7 @@ static int keyFGroupCompFunc(const void *key, const void *fgroup);
static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg); static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg);
static void * tsdbDecodeCfg(void *buf, STsdbCfg *pCfg); static void * tsdbDecodeCfg(void *buf, STsdbCfg *pCfg);
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable); static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable);
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg);
// Function declaration // Function declaration
int32_t tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg) { int32_t tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg) {
@ -164,6 +165,13 @@ int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *
STsdbRepo * pRepo = (STsdbRepo *)repo; STsdbRepo * pRepo = (STsdbRepo *)repo;
SSubmitMsgIter msgIter = {0}; SSubmitMsgIter msgIter = {0};
if (tsdbScanAndConvertSubmitMsg(pRepo, pMsg) < 0) {
if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) {
tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno));
}
return -1;
}
if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) { if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) {
tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno)); tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1; return -1;
@ -173,12 +181,14 @@ int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *
int32_t affectedrows = 0; int32_t affectedrows = 0;
TSKEY now = taosGetTimestamp(pRepo->config.precision); TSKEY now = taosGetTimestamp(pRepo->config.precision);
while (true) {
while ((pBlock = tsdbGetSubmitMsgNext(&msgIter)) != NULL) { tsdbGetSubmitMsgNext(&msgIter, &pBlock);
if (pBlock == NULL) break;
if (tsdbInsertDataToTable(pRepo, pBlock, now, &affectedrows) < 0) { if (tsdbInsertDataToTable(pRepo, pBlock, now, &affectedrows) < 0) {
return -1; return -1;
} }
} }
if (pRsp != NULL) pRsp->affectedRows = htonl(affectedrows); if (pRsp != NULL) pRsp->affectedRows = htonl(affectedrows);
return 0; return 0;
} }
@ -263,7 +273,7 @@ void tsdbStartStream(TSDB_REPO_T *repo) {
STable *pTable = pMeta->tables[i]; STable *pTable = pMeta->tables[i];
if (pTable && pTable->type == TSDB_STREAM_TABLE) { if (pTable && pTable->type == TSDB_STREAM_TABLE) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql, pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql,
tsdbGetTableSchema(pTable)); tsdbGetTableSchemaImpl(pTable, false, false, -1));
} }
} }
} }
@ -694,17 +704,12 @@ static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) {
return -1; return -1;
} }
pMsg->length = htonl(pMsg->length);
pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
pMsg->compressed = htonl(pMsg->compressed);
pIter->totalLen = pMsg->length; pIter->totalLen = pMsg->length;
pIter->len = TSDB_SUBMIT_MSG_HEAD_SIZE; pIter->len = 0;
pIter->pMsg = pMsg;
if (pMsg->length <= TSDB_SUBMIT_MSG_HEAD_SIZE) { if (pMsg->length <= TSDB_SUBMIT_MSG_HEAD_SIZE) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
return -1; return -1;
} else {
pIter->pBlock = pMsg->blocks;
} }
return 0; return 0;
@ -714,26 +719,8 @@ static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY
STsdbMeta *pMeta = pRepo->tsdbMeta; STsdbMeta *pMeta = pRepo->tsdbMeta;
int64_t points = 0; int64_t points = 0;
STable *pTable = tsdbGetTableByUid(pMeta, pBlock->uid); STable *pTable = pMeta->tables[pBlock->tid];
if (pTable == NULL || TABLE_TID(pTable) != pBlock->tid) { ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable));
terrno = TSDB_CODE_TDB_INVALID_ACTION;
return -1;
}
// Check schema version and update schema if needed
if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) {
tsdbError("vgId:%d failed to insert data to table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
return -1;
}
SSubmitBlkIter blkIter = {0}; SSubmitBlkIter blkIter = {0};
SDataRow row = NULL; SDataRow row = NULL;
@ -764,27 +751,23 @@ static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY
return 0; return 0;
} }
static SSubmitBlk *tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter) { static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) {
SSubmitBlk *pBlock = pIter->pBlock; if (pIter->len == 0) {
if (pBlock == NULL) return NULL; pIter->len += TSDB_SUBMIT_MSG_HEAD_SIZE;
pBlock->dataLen = htonl(pBlock->dataLen);
pBlock->schemaLen = htonl(pBlock->schemaLen);
pBlock->numOfRows = htons(pBlock->numOfRows);
pBlock->uid = htobe64(pBlock->uid);
pBlock->tid = htonl(pBlock->tid);
pBlock->sversion = htonl(pBlock->sversion);
pBlock->padding = htonl(pBlock->padding);
pIter->len = pIter->len + sizeof(SSubmitBlk) + pBlock->dataLen;
if (pIter->len >= pIter->totalLen) {
pIter->pBlock = NULL;
} else { } else {
pIter->pBlock = (SSubmitBlk *)((char *)pBlock + pBlock->dataLen + sizeof(SSubmitBlk)); SSubmitBlk *pSubmitBlk = (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len);
pIter->len += (sizeof(SSubmitBlk) + pSubmitBlk->dataLen + pSubmitBlk->schemaLen);
} }
return pBlock; if (pIter->len > pIter->totalLen) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
*pPBlock = NULL;
return -1;
}
*pPBlock = (pIter->len == pIter->totalLen) ? NULL : (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len);
return 0;
} }
static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) { static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) {
@ -969,42 +952,64 @@ static void *tsdbDecodeCfg(void *buf, STsdbCfg *pCfg) {
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) { static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) {
ASSERT(pTable != NULL); ASSERT(pTable != NULL);
STSchema *pSchema = tsdbGetTableSchema(pTable); STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
int sversion = schemaVersion(pSchema); int sversion = schemaVersion(pSchema);
if (pBlock->sversion == sversion) return 0; if (pBlock->sversion == sversion) {
if (pBlock->sversion > sversion) { // need to config return 0;
tsdbDebug("vgId:%d table %s tid %d has version %d smaller than client version %d, try to config", REPO_ID(pRepo), } else {
TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), sversion, pBlock->sversion); if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE) { // stream table is not allowed to change schema
if (pRepo->appH.configFunc) {
void *msg = (*pRepo->appH.configFunc)(REPO_ID(pRepo), TABLE_TID(pTable));
if (msg == NULL) {
tsdbError("vgId:%d failed to config table %s tid %d since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
TABLE_TID(pTable), tstrerror(terrno));
return -1;
}
STableCfg *pTableCfg = tsdbCreateTableCfgFromMsg(msg);
if (pTableCfg == NULL) {
rpcFreeCont(msg);
return -1;
}
if (tsdbUpdateTable(pRepo, (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable, pTableCfg) < 0) {
tsdbError("vgId:%d failed to update table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
tsdbClearTableCfg(pTableCfg);
rpcFreeCont(msg);
return -1;
}
tsdbClearTableCfg(pTableCfg);
rpcFreeCont(msg);
} else {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
return -1; return -1;
} }
}
if (pBlock->sversion > sversion) { // may need to update table schema
if (pBlock->schemaLen > 0) {
tsdbDebug(
"vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, update...",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion);
ASSERT(pBlock->schemaLen % sizeof(STColumn) == 0);
int numOfCols = pBlock->schemaLen / sizeof(STColumn);
STColumn *pTCol = (STColumn *)pBlock->data;
STSchemaBuilder schemaBuilder = {0};
if (tdInitTSchemaBuilder(&schemaBuilder, pBlock->sversion) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
return -1;
}
for (int i = 0; i < numOfCols; i++) {
if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, htons(pTCol[i].colId), htons(pTCol[i].bytes)) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
tdDestroyTSchemaBuilder(&schemaBuilder);
return -1;
}
}
STSchema *pNSchema = tdGetSchemaFromBuilder(&schemaBuilder);
if (pNSchema == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tdDestroyTSchemaBuilder(&schemaBuilder);
return -1;
}
tdDestroyTSchemaBuilder(&schemaBuilder);
tsdbUpdateTableSchema(pRepo, pTable, pNSchema, true);
} else {
tsdbDebug(
"vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, reconfigure...",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion);
terrno = TSDB_CODE_TDB_TABLE_RECONFIGURE;
return -1;
}
} else { } else {
if (tsdbGetTableSchemaByVersion(pTable, pBlock->sversion) == NULL) { ASSERT(pBlock->sversion >= 0);
if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) {
tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo), tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo),
pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable)); pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable));
} }
@ -1013,7 +1018,64 @@ static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pT
} }
return 0; return 0;
} }
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
ASSERT(pMsg != NULL);
STsdbMeta * pMeta = pRepo->tsdbMeta;
SSubmitMsgIter msgIter = {0};
SSubmitBlk * pBlock = NULL;
terrno = TSDB_CODE_SUCCESS;
pMsg->length = htonl(pMsg->length);
pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1;
while (true) {
if (tsdbGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1;
if (pBlock == NULL) break;
pBlock->uid = htobe64(pBlock->uid);
pBlock->tid = htonl(pBlock->tid);
pBlock->sversion = htonl(pBlock->sversion);
pBlock->dataLen = htonl(pBlock->dataLen);
pBlock->schemaLen = htonl(pBlock->schemaLen);
pBlock->numOfRows = htons(pBlock->numOfRows);
if (pBlock->tid <= 0 || pBlock->tid >= pRepo->config.maxTables) {
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
STable *pTable = pMeta->tables[pBlock->tid];
if (pTable == NULL || TABLE_UID(pTable) != pBlock->uid) {
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable));
terrno = TSDB_CODE_TDB_INVALID_ACTION;
return -1;
}
// Check schema version and update schema if needed
if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) {
if (terrno == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
continue;
} else {
return -1;
}
}
}
if (terrno != TSDB_CODE_SUCCESS) return -1;
return 0;
}
static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) { static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) {
// TODO // TODO

View File

@ -538,10 +538,12 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
SCommitIter *pIter = iters + tid; SCommitIter *pIter = iters + tid;
if (pIter->pTable == NULL) continue; if (pIter->pTable == NULL) continue;
taosRLockLatch(&(pIter->pTable->latch));
tsdbSetHelperTable(pHelper, pIter->pTable, pRepo); tsdbSetHelperTable(pHelper, pIter->pTable, pRepo);
if (pIter->pIter != NULL) { if (pIter->pIter != NULL) {
tdInitDataCols(pDataCols, tsdbGetTableSchema(pIter->pTable)); tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1));
int maxRowsToRead = pCfg->maxRowsPerFileBlock * 4 / 5; int maxRowsToRead = pCfg->maxRowsPerFileBlock * 4 / 5;
int nLoop = 0; int nLoop = 0;
@ -557,6 +559,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
int rowsWritten = tsdbWriteDataBlock(pHelper, pDataCols); int rowsWritten = tsdbWriteDataBlock(pHelper, pDataCols);
ASSERT(rowsWritten != 0); ASSERT(rowsWritten != 0);
if (rowsWritten < 0) { if (rowsWritten < 0) {
taosRUnLockLatch(&(pIter->pTable->latch));
tsdbError("vgId:%d failed to write data block to table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo), tsdbError("vgId:%d failed to write data block to table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo),
TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable), TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable),
tstrerror(terrno)); tstrerror(terrno));
@ -571,6 +574,8 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
ASSERT(pDataCols->numOfRows == 0); ASSERT(pDataCols->numOfRows == 0);
} }
taosRUnLockLatch(&(pIter->pTable->latch));
// Move the last block to the new .l file if neccessary // Move the last block to the new .l file if neccessary
if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) { if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) {
tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno)); tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno));
@ -680,10 +685,10 @@ static int tsdbReadRowsFromCache(STsdbMeta *pMeta, STable *pTable, SSkipListIter
if (dataRowKey(row) > maxKey) break; if (dataRowKey(row) > maxKey) break;
if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) {
pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); pSchema = tsdbGetTableSchemaImpl(pTable, true, false, dataRowVersion(row));
if (pSchema == NULL) { if (pSchema == NULL) {
// TODO: deal with the error here // TODO: deal with the error here
ASSERT(false); ASSERT(0);
} }
} }

View File

@ -29,10 +29,9 @@ static void tsdbOrgMeta(void *pHandle);
static char * getTagIndexKey(const void *pData); static char * getTagIndexKey(const void *pData);
static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper); static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper);
static void tsdbFreeTable(STable *pTable); static void tsdbFreeTable(STable *pTable);
static int tsdbUpdateTableTagSchema(STable *pTable, STSchema *newSchema); static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, bool lock);
static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx);
static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFromIdx, bool lock); static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFromIdx, bool lock);
static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable); static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable, bool refSuper);
static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable); static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable);
static int tsdbInitTableCfg(STableCfg *config, ETableType type, uint64_t uid, int32_t tid); static int tsdbInitTableCfg(STableCfg *config, ETableType type, uint64_t uid, int32_t tid);
static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool dup); static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool dup);
@ -76,7 +75,7 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
// TODO // TODO
if (super->type != TSDB_SUPER_TABLE) return -1; if (super->type != TSDB_SUPER_TABLE) return -1;
if (super->tableId.uid != pCfg->superUid) return -1; if (super->tableId.uid != pCfg->superUid) return -1;
tsdbUpdateTable(pRepo, super, pCfg); // tsdbUpdateTable(pRepo, super, pCfg);
} }
} }
@ -84,10 +83,18 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
if (table == NULL) goto _err; if (table == NULL) goto _err;
// Register to meta // Register to meta
tsdbWLockRepoMeta(pRepo);
if (newSuper) { if (newSuper) {
if (tsdbAddTableToMeta(pRepo, super, true) < 0) goto _err; if (tsdbAddTableToMeta(pRepo, super, true, false) < 0) {
tsdbUnlockRepoMeta(pRepo);
goto _err;
}
} }
if (tsdbAddTableToMeta(pRepo, table, true) < 0) goto _err; if (tsdbAddTableToMeta(pRepo, table, true, false) < 0) {
tsdbUnlockRepoMeta(pRepo);
goto _err;
}
tsdbUnlockRepoMeta(pRepo);
// Write to memtable action // Write to memtable action
int tlen1 = (newSuper) ? tsdbGetTableEncodeSize(TSDB_UPDATE_META, super) : 0; int tlen1 = (newSuper) ? tsdbGetTableEncodeSize(TSDB_UPDATE_META, super) : 0;
@ -255,7 +262,7 @@ _err:
return NULL; return NULL;
} }
static int32_t colIdCompar(const void* left, const void* right) { static UNUSED_FUNC int32_t colIdCompar(const void* left, const void* right) {
int16_t colId = *(int16_t*) left; int16_t colId = *(int16_t*) left;
STColumn* p2 = (STColumn*) right; STColumn* p2 = (STColumn*) right;
@ -266,89 +273,118 @@ static int32_t colIdCompar(const void* left, const void* right) {
return (colId < p2->colId)? -1:1; return (colId < p2->colId)? -1:1;
} }
int tsdbUpdateTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg) { int tsdbUpdateTableTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg) {
STsdbRepo *pRepo = (STsdbRepo *)repo; STsdbRepo *pRepo = (STsdbRepo *)repo;
STsdbMeta *pMeta = pRepo->tsdbMeta; STsdbMeta *pMeta = pRepo->tsdbMeta;
STSchema * pNewSchema = NULL;
pMsg->uid = htobe64(pMsg->uid); pMsg->uid = htobe64(pMsg->uid);
pMsg->tid = htonl(pMsg->tid); pMsg->tid = htonl(pMsg->tid);
pMsg->tversion = htons(pMsg->tversion); pMsg->tversion = htons(pMsg->tversion);
pMsg->colId = htons(pMsg->colId); pMsg->colId = htons(pMsg->colId);
pMsg->bytes = htons(pMsg->bytes);
pMsg->tagValLen = htonl(pMsg->tagValLen); pMsg->tagValLen = htonl(pMsg->tagValLen);
pMsg->numOfTags = htons(pMsg->numOfTags); pMsg->numOfTags = htons(pMsg->numOfTags);
pMsg->schemaLen = htonl(pMsg->schemaLen); pMsg->schemaLen = htonl(pMsg->schemaLen);
assert(pMsg->schemaLen == sizeof(STColumn) * pMsg->numOfTags); for (int i = 0; i < pMsg->numOfTags; i++) {
STColumn *pTCol = (STColumn *)pMsg->data + i;
char* d = pMsg->data; pTCol->bytes = htons(pTCol->bytes);
for(int32_t i = 0; i < pMsg->numOfTags; ++i) { pTCol->colId = htons(pTCol->colId);
STColumn* pCol = (STColumn*) d;
pCol->colId = htons(pCol->colId);
pCol->bytes = htons(pCol->bytes);
pCol->offset = 0;
d += sizeof(STColumn);
} }
STable *pTable = tsdbGetTableByUid(pMeta, pMsg->uid); STable *pTable = tsdbGetTableByUid(pMeta, pMsg->uid);
if (pTable == NULL) { if (pTable == NULL || TABLE_TID(pTable) != pMsg->tid) {
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; tsdbError("vgId:%d failed to update table tag value since invalid table id %d uid %" PRIu64, REPO_ID(pRepo),
return -1; pMsg->tid, pMsg->uid);
}
if (TABLE_TID(pTable) != pMsg->tid) {
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1; return -1;
} }
if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) {
tsdbError("vgId:%d failed to update tag value of table %s since its type is %d", REPO_ID(pRepo), tsdbError("vgId:%d try to update tag value of a non-child table, invalid action", REPO_ID(pRepo));
TABLE_CHAR_NAME(pTable), TABLE_TYPE(pTable));
terrno = TSDB_CODE_TDB_INVALID_ACTION; terrno = TSDB_CODE_TDB_INVALID_ACTION;
return -1; return -1;
} }
if (schemaVersion(tsdbGetTableTagSchema(pTable)) < pMsg->tversion) { if (schemaVersion(pTable->pSuper->tagSchema) > pMsg->tversion) {
tsdbDebug("vgId:%d server tag version %d is older than client tag version %d, try to config", REPO_ID(pRepo),
schemaVersion(tsdbGetTableTagSchema(pTable)), pMsg->tversion);
void *msg = (*pRepo->appH.configFunc)(pRepo->config.tsdbId, pMsg->tid);
if (msg == NULL) return -1;
// Deal with error her
STableCfg *pTableCfg = tsdbCreateTableCfgFromMsg(msg);
STable * super = tsdbGetTableByUid(pMeta, pTableCfg->superUid);
ASSERT(super != NULL);
int32_t code = tsdbUpdateTable(pRepo, super, pTableCfg);
if (code != TSDB_CODE_SUCCESS) {
tsdbClearTableCfg(pTableCfg);
return code;
}
tsdbClearTableCfg(pTableCfg);
rpcFreeCont(msg);
}
STSchema *pTagSchema = tsdbGetTableTagSchema(pTable);
if (schemaVersion(pTagSchema) > pMsg->tversion) {
tsdbError( tsdbError(
"vgId:%d failed to update tag value of table %s since version out of date, client tag version %d server tag " "vgId:%d failed to update tag value of table %s since version out of date, client tag version %d server tag "
"version %d", "version %d",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), pMsg->tversion, schemaVersion(pTable->tagSchema)); REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), pMsg->tversion, schemaVersion(pTable->tagSchema));
return TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE; terrno = TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE;
return -1;
} }
if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == pMsg->colId) {
if (schemaVersion(pTable->pSuper->tagSchema) < pMsg->tversion) { // tag schema out of data,
tsdbDebug("vgId:%d need to update tag schema of table %s tid %d uid %" PRIu64
" since out of date, current version %d new version %d",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable),
schemaVersion(pTable->pSuper->tagSchema), pMsg->tversion);
STSchemaBuilder schemaBuilder = {0};
STColumn *pTCol = (STColumn *)pMsg->data;
ASSERT(pMsg->schemaLen % sizeof(STColumn) == 0 && pTCol[0].colId == colColId(schemaColAt(pTable->pSuper->tagSchema, 0)));
if (tdInitTSchemaBuilder(&schemaBuilder, pMsg->tversion) < 0) {
tsdbDebug("vgId:%d failed to update tag schema of table %s tid %d uid %" PRIu64 " since out of memory",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable));
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
for (int i = 0; i < (pMsg->schemaLen / sizeof(STColumn)); i++) {
if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, pTCol[i].colId, pTCol[i].bytes) < 0) {
tdDestroyTSchemaBuilder(&schemaBuilder);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
}
pNewSchema = tdGetSchemaFromBuilder(&schemaBuilder);
if (pNewSchema == NULL) {
tdDestroyTSchemaBuilder(&schemaBuilder);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
tdDestroyTSchemaBuilder(&schemaBuilder);
}
// Chage in memory
if (pNewSchema != NULL) { // change super table tag schema
taosWLockLatch(&(pTable->pSuper->latch));
STSchema *pOldSchema = pTable->pSuper->tagSchema;
pTable->pSuper->tagSchema = pNewSchema;
tdFreeSchema(pOldSchema);
taosWUnLockLatch(&(pTable->pSuper->latch));
}
bool isChangeIndexCol = (pMsg->colId == colColId(schemaColAt(pTable->pSuper->tagSchema, 0)));
// STColumn *pCol = bsearch(&(pMsg->colId), pMsg->data, pMsg->numOfTags, sizeof(STColumn), colIdCompar);
// ASSERT(pCol != NULL);
if (isChangeIndexCol) {
tsdbWLockRepoMeta(pRepo);
tsdbRemoveTableFromIndex(pMeta, pTable); tsdbRemoveTableFromIndex(pMeta, pTable);
} }
// TODO: remove table from index if it is the first column of tag taosWLockLatch(&(pTable->latch));
tdSetKVRowDataOfCol(&(pTable->tagVal), pMsg->colId, pMsg->type, POINTER_SHIFT(pMsg->data, pMsg->schemaLen));
// TODO: convert the tag schema from client, and then extract the type and bytes from schema according to colId taosWUnLockLatch(&(pTable->latch));
STColumn* res = bsearch(&pMsg->colId, pMsg->data, pMsg->numOfTags, sizeof(STColumn), colIdCompar); if (isChangeIndexCol) {
assert(res != NULL); tsdbAddTableIntoIndex(pMeta, pTable, false);
tsdbUnlockRepoMeta(pRepo);
tdSetKVRowDataOfCol(&pTable->tagVal, pMsg->colId, res->type, pMsg->data + pMsg->schemaLen);
if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == pMsg->colId) {
tsdbAddTableIntoIndex(pMeta, pTable);
} }
return TSDB_CODE_SUCCESS;
// Update on file
int tlen1 = (pNewSchema) ? tsdbGetTableEncodeSize(TSDB_UPDATE_META, pTable->pSuper) : 0;
int tlen2 = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pTable);
void *buf = tsdbAllocBytes(pRepo, tlen1+tlen2);
ASSERT(buf != NULL);
if (pNewSchema) {
void *pBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pTable->pSuper);
ASSERT(POINTER_DISTANCE(pBuf, buf) == tlen1);
buf = pBuf;
}
tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pTable);
return 0;
} }
// ------------------ INTERNAL FUNCTIONS ------------------ // ------------------ INTERNAL FUNCTIONS ------------------
@ -449,18 +485,6 @@ int tsdbCloseMeta(STsdbRepo *pRepo) {
return 0; return 0;
} }
STSchema *tsdbGetTableSchema(STable *pTable) {
if (pTable->type == TSDB_NORMAL_TABLE || pTable->type == TSDB_SUPER_TABLE || pTable->type == TSDB_STREAM_TABLE) {
return pTable->schema[pTable->numOfSchemas - 1];
} else if (pTable->type == TSDB_CHILD_TABLE) {
STable *pSuper = pTable->pSuper;
if (pSuper == NULL) return NULL;
return pSuper->schema[pSuper->numOfSchemas - 1];
} else {
return NULL;
}
}
STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) { STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) {
void *ptr = taosHashGet(pMeta->uidMap, (char *)(&uid), sizeof(uid)); void *ptr = taosHashGet(pMeta->uidMap, (char *)(&uid), sizeof(uid));
@ -470,68 +494,7 @@ STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) {
} }
STSchema *tsdbGetTableSchemaByVersion(STable *pTable, int16_t version) { STSchema *tsdbGetTableSchemaByVersion(STable *pTable, int16_t version) {
STable *pSearchTable = (pTable->type == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable; return tsdbGetTableSchemaImpl(pTable, true, false, version);
if (pSearchTable == NULL) return NULL;
void *ptr = taosbsearch(&version, pSearchTable->schema, pSearchTable->numOfSchemas, sizeof(STSchema *),
tsdbCompareSchemaVersion, TD_EQ);
if (ptr == NULL) return NULL;
return *(STSchema **)ptr;
}
STSchema *tsdbGetTableTagSchema(STable *pTable) {
if (pTable->type == TSDB_SUPER_TABLE) {
return pTable->tagSchema;
} else if (pTable->type == TSDB_CHILD_TABLE) {
STable *pSuper = pTable->pSuper;
if (pSuper == NULL) return NULL;
return pSuper->tagSchema;
} else {
return NULL;
}
}
int tsdbUpdateTable(STsdbRepo *pRepo, STable *pTable, STableCfg *pCfg) {
// TODO: this function can only be called when there is no query and commit on this table
ASSERT(TABLE_TYPE(pTable) != TSDB_CHILD_TABLE);
bool changed = false;
STsdbMeta *pMeta = pRepo->tsdbMeta;
if ((pTable->type == TSDB_SUPER_TABLE) && (schemaVersion(pTable->tagSchema) < schemaVersion(pCfg->tagSchema))) {
if (tsdbUpdateTableTagSchema(pTable, pCfg->tagSchema) < 0) {
tsdbError("vgId:%d failed to update table %s tag schema since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
return -1;
}
changed = true;
}
STSchema *pTSchema = tsdbGetTableSchema(pTable);
if (schemaVersion(pTSchema) < schemaVersion(pCfg->schema)) {
if (pTable->numOfSchemas < TSDB_MAX_TABLE_SCHEMAS) {
pTable->schema[pTable->numOfSchemas++] = tdDupSchema(pCfg->schema);
} else {
ASSERT(pTable->numOfSchemas == TSDB_MAX_TABLE_SCHEMAS);
STSchema *tSchema = tdDupSchema(pCfg->schema);
tdFreeSchema(pTable->schema[0]);
memmove(pTable->schema, pTable->schema + 1, sizeof(STSchema *) * (TSDB_MAX_TABLE_SCHEMAS - 1));
pTable->schema[pTable->numOfSchemas - 1] = tSchema;
}
pMeta->maxRowBytes = MAX(pMeta->maxRowBytes, dataRowMaxBytesFromSchema(pCfg->schema));
pMeta->maxCols = MAX(pMeta->maxCols, schemaNCols(pCfg->schema));
changed = true;
}
if (changed) {
int tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pTable);
void *buf = tsdbAllocBytes(pRepo, tlen);
tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pTable);
}
return 0;
} }
int tsdbWLockRepoMeta(STsdbRepo *pRepo) { int tsdbWLockRepoMeta(STsdbRepo *pRepo) {
@ -575,7 +538,7 @@ void tsdbRefTable(STable *pTable) {
void tsdbUnRefTable(STable *pTable) { void tsdbUnRefTable(STable *pTable) {
int32_t ref = T_REF_DEC(pTable); int32_t ref = T_REF_DEC(pTable);
tsdbTrace("unref table uid:%"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref); tsdbDebug("unref table uid:%"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref);
if (ref == 0) { if (ref == 0) {
// tsdbDebug("destory table name:%s uid:%"PRIu64", tid:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable)); // tsdbDebug("destory table name:%s uid:%"PRIu64", tid:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable));
@ -587,17 +550,36 @@ void tsdbUnRefTable(STable *pTable) {
} }
} }
// ------------------ LOCAL FUNCTIONS ------------------ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema, bool insertAct) {
static int tsdbCompareSchemaVersion(const void *key1, const void *key2) { ASSERT(TABLE_TYPE(pTable) != TSDB_STREAM_TABLE && TABLE_TYPE(pTable) != TSDB_SUPER_TABLE);
if (*(int16_t *)key1 < schemaVersion(*(STSchema **)key2)) { STsdbMeta *pMeta = pRepo->tsdbMeta;
return -1;
} else if (*(int16_t *)key1 > schemaVersion(*(STSchema **)key2)) { STable *pCTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable;
return 1; ASSERT(schemaVersion(pSchema) > schemaVersion(pCTable->schema[pCTable->numOfSchemas - 1]));
taosWLockLatch(&(pCTable->latch));
if (pCTable->numOfSchemas < TSDB_MAX_TABLE_SCHEMAS) {
pCTable->schema[pCTable->numOfSchemas++] = pSchema;
} else { } else {
return 0; ASSERT(pCTable->numOfSchemas == TSDB_MAX_TABLE_SCHEMAS);
tdFreeSchema(pCTable->schema[0]);
memmove(pCTable->schema, pCTable->schema + 1, sizeof(STSchema *) * (TSDB_MAX_TABLE_SCHEMAS - 1));
pCTable->schema[pCTable->numOfSchemas - 1] = pSchema;
}
if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema);
if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema);
taosWUnLockLatch(&(pCTable->latch));
if (insertAct) {
int tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pCTable);
void *buf = tsdbAllocBytes(pRepo, tlen);
ASSERT(buf != NULL);
tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pTable);
} }
} }
// ------------------ LOCAL FUNCTIONS ------------------
static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) { static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) {
STsdbRepo *pRepo = (STsdbRepo *)pHandle; STsdbRepo *pRepo = (STsdbRepo *)pHandle;
STable * pTable = NULL; STable * pTable = NULL;
@ -609,7 +591,7 @@ static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) {
tsdbDecodeTable(cont, &pTable); tsdbDecodeTable(cont, &pTable);
if (tsdbAddTableToMeta(pRepo, pTable, false) < 0) { if (tsdbAddTableToMeta(pRepo, pTable, false, false) < 0) {
tsdbFreeTable(pTable); tsdbFreeTable(pTable);
return -1; return -1;
} }
@ -627,7 +609,7 @@ static void tsdbOrgMeta(void *pHandle) {
for (int i = 1; i < pCfg->maxTables; i++) { for (int i = 1; i < pCfg->maxTables; i++) {
STable *pTable = pMeta->tables[i]; STable *pTable = pMeta->tables[i];
if (pTable != NULL && pTable->type == TSDB_CHILD_TABLE) { if (pTable != NULL && pTable->type == TSDB_CHILD_TABLE) {
tsdbAddTableIntoIndex(pMeta, pTable); tsdbAddTableIntoIndex(pMeta, pTable, true);
} }
} }
} }
@ -737,7 +719,7 @@ _err:
static void tsdbFreeTable(STable *pTable) { static void tsdbFreeTable(STable *pTable) {
if (pTable) { if (pTable) {
tsdbDebug("table %s is destroyed", TABLE_CHAR_NAME(pTable)); if (pTable->name != NULL) tsdbDebug("table %s is destroyed", TABLE_CHAR_NAME(pTable));
tfree(TABLE_NAME(pTable)); tfree(TABLE_NAME(pTable));
if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) {
for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) { for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) {
@ -757,25 +739,10 @@ static void tsdbFreeTable(STable *pTable) {
} }
} }
static int tsdbUpdateTableTagSchema(STable *pTable, STSchema *newSchema) { static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, bool lock) {
ASSERT(pTable->type == TSDB_SUPER_TABLE);
ASSERT(schemaVersion(pTable->tagSchema) < schemaVersion(newSchema));
STSchema *pOldSchema = pTable->tagSchema;
STSchema *pNewSchema = tdDupSchema(newSchema);
if (pNewSchema == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
pTable->tagSchema = pNewSchema;
tdFreeSchema(pOldSchema);
return 0;
}
static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) {
STsdbMeta *pMeta = pRepo->tsdbMeta; STsdbMeta *pMeta = pRepo->tsdbMeta;
if (addIdx && tsdbWLockRepoMeta(pRepo) < 0) { if (lock && tsdbWLockRepoMeta(pRepo) < 0) {
tsdbError("vgId:%d failed to add table %s to meta since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), tsdbError("vgId:%d failed to add table %s to meta since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno)); tstrerror(terrno));
return -1; return -1;
@ -790,7 +757,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) {
} }
} else { } else {
if (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE && addIdx) { // add STABLE to the index if (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE && addIdx) { // add STABLE to the index
if (tsdbAddTableIntoIndex(pMeta, pTable) < 0) { if (tsdbAddTableIntoIndex(pMeta, pTable, true) < 0) {
tsdbDebug("vgId:%d failed to add table %s to meta while add table to index since %s", REPO_ID(pRepo), tsdbDebug("vgId:%d failed to add table %s to meta while add table to index since %s", REPO_ID(pRepo),
TABLE_CHAR_NAME(pTable), tstrerror(terrno)); TABLE_CHAR_NAME(pTable), tstrerror(terrno));
goto _err; goto _err;
@ -809,14 +776,15 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) {
} }
if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) {
STSchema *pSchema = tsdbGetTableSchema(pTable); STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema); if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema);
if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema); if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema);
} }
if (addIdx && tsdbUnlockRepoMeta(pRepo) < 0) return -1; if (lock && tsdbUnlockRepoMeta(pRepo) < 0) return -1;
if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) { if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql, tsdbGetTableSchema(pTable)); pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql,
tsdbGetTableSchemaImpl(pTable, false, false, -1));
} }
tsdbTrace("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), tsdbTrace("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
@ -825,7 +793,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) {
_err: _err:
tsdbRemoveTableFromMeta(pRepo, pTable, false, false); tsdbRemoveTableFromMeta(pRepo, pTable, false, false);
if (addIdx) tsdbUnlockRepoMeta(pRepo); if (lock) tsdbUnlockRepoMeta(pRepo);
return -1; return -1;
} }
@ -836,7 +804,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
STable * tTable = NULL; STable * tTable = NULL;
STsdbCfg * pCfg = &(pRepo->config); STsdbCfg * pCfg = &(pRepo->config);
STSchema *pSchema = tsdbGetTableSchema(pTable); STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
int maxCols = schemaNCols(pSchema); int maxCols = schemaNCols(pSchema);
int maxRowBytes = schemaTLen(pSchema); int maxRowBytes = schemaTLen(pSchema);
@ -870,7 +838,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
for (int i = 0; i < pCfg->maxTables; i++) { for (int i = 0; i < pCfg->maxTables; i++) {
STable *pTable = pMeta->tables[i]; STable *pTable = pMeta->tables[i];
if (pTable != NULL) { if (pTable != NULL) {
pSchema = tsdbGetTableSchema(pTable); pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
maxCols = MAX(maxCols, schemaNCols(pSchema)); maxCols = MAX(maxCols, schemaNCols(pSchema));
maxRowBytes = MAX(maxRowBytes, schemaTLen(pSchema)); maxRowBytes = MAX(maxRowBytes, schemaTLen(pSchema));
} }
@ -882,7 +850,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
tsdbUnRefTable(pTable); tsdbUnRefTable(pTable);
} }
static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable) { static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable, bool refSuper) {
ASSERT(pTable->type == TSDB_CHILD_TABLE && pTable != NULL); ASSERT(pTable->type == TSDB_CHILD_TABLE && pTable != NULL);
STable *pSTable = tsdbGetTableByUid(pMeta, TABLE_SUID(pTable)); STable *pSTable = tsdbGetTableByUid(pMeta, TABLE_SUID(pTable));
ASSERT(pSTable != NULL); ASSERT(pSTable != NULL);
@ -906,7 +874,7 @@ static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable) {
memcpy(SL_GET_NODE_DATA(pNode), &pTable, sizeof(STable *)); memcpy(SL_GET_NODE_DATA(pNode), &pTable, sizeof(STable *));
tSkipListPut(pSTable->pIndex, pNode); tSkipListPut(pSTable->pIndex, pNode);
T_REF_INC(pSTable); if (refSuper) T_REF_INC(pSTable);
return 0; return 0;
} }
@ -1274,4 +1242,4 @@ static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable) {
} }
return 0; return 0;
} }

View File

@ -19,6 +19,7 @@
#include "tcoding.h" #include "tcoding.h"
#include "tscompression.h" #include "tscompression.h"
#include "tsdbMain.h" #include "tsdbMain.h"
#include "tfile.h"
#define TSDB_GET_COMPCOL_LEN(nCols) (sizeof(SCompData) + sizeof(SCompCol) * (nCols) + sizeof(TSCKSUM)) #define TSDB_GET_COMPCOL_LEN(nCols) (sizeof(SCompData) + sizeof(SCompCol) * (nCols) + sizeof(TSCKSUM))
@ -217,7 +218,7 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) {
pHelper->tableInfo.tid = pTable->tableId.tid; pHelper->tableInfo.tid = pTable->tableId.tid;
pHelper->tableInfo.uid = pTable->tableId.uid; pHelper->tableInfo.uid = pTable->tableId.uid;
STSchema *pSchema = tsdbGetTableSchema(pTable); STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
pHelper->tableInfo.sversion = schemaVersion(pSchema); pHelper->tableInfo.sversion = schemaVersion(pSchema);
tdInitDataCols(pHelper->pDataCols[0], pSchema); tdInitDataCols(pHelper->pDataCols[0], pSchema);

View File

@ -65,7 +65,7 @@ typedef struct {
int64_t totalSize; // total allocated buffer in this hash table, SCacheObj is not included. int64_t totalSize; // total allocated buffer in this hash table, SCacheObj is not included.
int64_t refreshTime; int64_t refreshTime;
STrashElem * pTrash; STrashElem * pTrash;
const char * cacheName; char* name;
// void * tmrCtrl; // void * tmrCtrl;
// void * pTimer; // void * pTimer;
SCacheStatis statistics; SCacheStatis statistics;
@ -163,8 +163,9 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove);
/** /**
* move all data node into trash, clear node in trash can if it is not referenced by any clients * move all data node into trash, clear node in trash can if it is not referenced by any clients
* @param handle * @param handle
* @param _remove remove the data or not if refcount is greater than 0
*/ */
void taosCacheEmpty(SCacheObj *pCacheObj); void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove);
/** /**
* release all allocated memory and destroy the cache object. * release all allocated memory and destroy the cache object.

31
src/util/inc/tfile.h Normal file
View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_TFILE_H
#define TDENGINE_TFILE_H
#ifdef TAOS_RANDOM_FILE_FAIL
ssize_t taos_tread(int fd, void *buf, size_t count);
ssize_t taos_twrite(int fd, void *buf, size_t count);
off_t taos_lseek(int fd, off_t offset, int whence);
#define tread(fd, buf, count) taos_tread(fd, buf, count)
#define twrite(fd, buf, count) taos_twrite(fd, buf, count)
#define lseek(fd, offset, whence) taos_lseek(fd, offset, whence)
#endif // TAOS_RANDOM_FILE_FAIL
#endif // TDENGINE_TFILE_H

View File

@ -26,6 +26,7 @@ extern "C" {
#define DEBUG_INFO DEBUG_WARN #define DEBUG_INFO DEBUG_WARN
#define DEBUG_DEBUG 4U #define DEBUG_DEBUG 4U
#define DEBUG_TRACE 8U #define DEBUG_TRACE 8U
#define DEBUG_DUMP 16U
#define DEBUG_SCREEN 64U #define DEBUG_SCREEN 64U
#define DEBUG_FILE 128U #define DEBUG_FILE 128U

View File

@ -119,9 +119,8 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo
int32_t size = pNode->size; int32_t size = pNode->size;
taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize);
uDebug("key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes, cacheName:%s", uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes",
pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, size, pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, size);
pCacheObj->cacheName);
if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data); if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data);
free(pNode); free(pNode);
} }
@ -226,7 +225,7 @@ static void doCleanupDataCache(SCacheObj *pCacheObj);
*/ */
static void* taosCacheRefresh(void *handle); static void* taosCacheRefresh(void *handle);
SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char* cacheName) { SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char* cacheName) {
if (refreshTimeInSeconds <= 0) { if (refreshTimeInSeconds <= 0) {
return NULL; return NULL;
} }
@ -238,7 +237,7 @@ SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bo
} }
pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), false); pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), false);
pCacheObj->cacheName = cacheName; pCacheObj->name = strdup(cacheName);
if (pCacheObj->pHashTable == NULL) { if (pCacheObj->pHashTable == NULL) {
free(pCacheObj); free(pCacheObj);
uError("failed to allocate memory, reason:%s", strerror(errno)); uError("failed to allocate memory, reason:%s", strerror(errno));
@ -268,10 +267,6 @@ SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bo
return pCacheObj; return pCacheObj;
} }
SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char* cacheName) {
return taosCacheInitWithCb(keyType, refreshTimeInSeconds, extendLifespan, fn, cacheName);
}
void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const void *pData, size_t dataSize, int duration) { void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const void *pData, size_t dataSize, int duration) {
SCacheDataNode *pNode; SCacheDataNode *pNode;
@ -288,16 +283,16 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v
if (NULL != pNode) { if (NULL != pNode) {
pCacheObj->totalSize += pNode->size; pCacheObj->totalSize += pNode->size;
uDebug("key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d totalSize:%" PRId64 uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d totalSize:%" PRId64
"bytes size:%" PRId64 "bytes, cacheName:%s", "bytes size:%" PRId64 "bytes",
key, pNode->data, pNode->addedTime, (pNode->lifespan * pNode->extendFactor + pNode->addedTime), pCacheObj->name, key, pNode->data, pNode->addedTime, (pNode->lifespan * pNode->extendFactor + pNode->addedTime),
(int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, dataSize, pCacheObj->cacheName); (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, dataSize);
} else { } else {
uError("key:%p, failed to added into cache, out of memory, cacheName:%s", key, pCacheObj->cacheName); uError("cache:%s, key:%p, failed to added into cache, out of memory", pCacheObj->name, key);
} }
} else { // old data exists, update the node } else { // old data exists, update the node
pNode = taosUpdateCacheImpl(pCacheObj, pOld, key, keyLen, pData, dataSize, duration * 1000L); pNode = taosUpdateCacheImpl(pCacheObj, pOld, key, keyLen, pData, dataSize, duration * 1000L);
uDebug("key:%p, %p exist in cache, updated, cacheName:%s", key, pNode->data, pCacheObj->cacheName); uDebug("cache:%s, key:%p, %p exist in cache, updated", pCacheObj->name, key, pNode->data);
} }
__cache_unlock(pCacheObj); __cache_unlock(pCacheObj);
@ -332,10 +327,10 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
if (ptNode != NULL) { if (ptNode != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1); atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
uDebug("key:%p, %p is retrieved from cache, refcnt:%d, cacheName:%s", key, (*ptNode)->data, ref, pCacheObj->cacheName); uDebug("cache:%s, key:%p, %p is retrieved from cache, refcnt:%d", pCacheObj->name, key, (*ptNode)->data, ref);
} else { } else {
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1); atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
uDebug("key:%p, not in cache, retrieved failed, cacheName:%s", key, pCacheObj->cacheName); uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key);
} }
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1); atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
@ -360,11 +355,11 @@ void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t ke
if (ptNode != NULL) { if (ptNode != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1); atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
uDebug("key:%p, %p expireTime is updated in cache, refcnt:%d, cacheName:%s", key, (*ptNode)->data, uDebug("cache:%s, key:%p, %p expireTime is updated in cache, refcnt:%d", pCacheObj->name, key,
T_REF_VAL_GET(*ptNode), pCacheObj->cacheName); (*ptNode)->data, T_REF_VAL_GET(*ptNode));
} else { } else {
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1); atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
uDebug("key:%p, not in cache, retrieved failed, cacheName:%s", key, pCacheObj->cacheName); uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key);
} }
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1); atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
@ -383,7 +378,7 @@ void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) {
} }
int32_t ref = T_REF_INC(ptNode); int32_t ref = T_REF_INC(ptNode);
uDebug("%p acquired by data in cache, refcnt:%d, cacheName:%s", ptNode->data, ref, pCacheObj->cacheName); uDebug("cache:%s, data: %p acquired by data in cache, refcnt:%d", pCacheObj->name, ptNode->data, ref);
// if the remained life span is less then the (*ptNode)->lifeSpan, add up one lifespan // if the remained life span is less then the (*ptNode)->lifeSpan, add up one lifespan
if (pCacheObj->extendLifespan) { if (pCacheObj->extendLifespan) {
@ -391,7 +386,7 @@ void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) {
if ((now - ptNode->addedTime) < ptNode->lifespan * ptNode->extendFactor) { if ((now - ptNode->addedTime) < ptNode->lifespan * ptNode->extendFactor) {
ptNode->extendFactor += 1; ptNode->extendFactor += 1;
uDebug("%p extend life time to %" PRId64, ptNode->data, uDebug("cache:%s, %p extend life time to %" PRId64, pCacheObj->name, ptNode->data,
ptNode->lifespan * ptNode->extendFactor + ptNode->addedTime); ptNode->lifespan * ptNode->extendFactor + ptNode->addedTime);
} }
} }
@ -437,7 +432,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
*data = NULL; *data = NULL;
int16_t ref = T_REF_DEC(pNode); int16_t ref = T_REF_DEC(pNode);
uDebug("key:%p, %p is released, refcnt:%d, cacheName:%s", pNode->key, pNode->data, ref, pCacheObj->cacheName); uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref);
if (_remove && (!pNode->inTrashCan)) { if (_remove && (!pNode->inTrashCan)) {
__cache_wr_lock(pCacheObj); __cache_wr_lock(pCacheObj);
@ -455,7 +450,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
} }
} }
void taosCacheEmpty(SCacheObj *pCacheObj) { void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove) {
SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable);
__cache_wr_lock(pCacheObj); __cache_wr_lock(pCacheObj);
@ -465,12 +460,16 @@ void taosCacheEmpty(SCacheObj *pCacheObj) {
} }
SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
taosCacheMoveToTrash(pCacheObj, pNode); if (T_REF_VAL_GET(pNode) == 0 || _remove) {
taosCacheReleaseNode(pCacheObj, pNode);
} else {
taosCacheMoveToTrash(pCacheObj, pNode);
}
} }
__cache_unlock(pCacheObj); __cache_unlock(pCacheObj);
taosHashDestroyIter(pIter); taosHashDestroyIter(pIter);
taosTrashCanEmpty(pCacheObj, false); taosTrashCanEmpty(pCacheObj, _remove);
} }
void taosCacheCleanup(SCacheObj *pCacheObj) { void taosCacheCleanup(SCacheObj *pCacheObj) {
@ -481,7 +480,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj) {
pCacheObj->deleting = 1; pCacheObj->deleting = 1;
pthread_join(pCacheObj->refreshWorker, NULL); pthread_join(pCacheObj->refreshWorker, NULL);
uInfo("cacheName:%p, will be cleanuped", pCacheObj->cacheName); uInfo("cache:%s will be cleaned up", pCacheObj->name);
doCleanupDataCache(pCacheObj); doCleanupDataCache(pCacheObj);
} }
@ -601,22 +600,25 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable);
while (taosHashIterNext(pIter)) { while (taosHashIterNext(pIter)) {
SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
// if (pNode->expiredTime <= expiredTime && T_REF_VAL_GET(pNode) <= 0) {
if (T_REF_VAL_GET(pNode) <= 0) { int32_t c = T_REF_VAL_GET(pNode);
if (c <= 0) {
taosCacheReleaseNode(pCacheObj, pNode); taosCacheReleaseNode(pCacheObj, pNode);
} else { } else {
uDebug("key:%p, %p will not remove from cache, refcnt:%d, cacheName:%s", pNode->key, pNode->data, uDebug("cache:%s key:%p, %p will not remove from cache, refcnt:%d", pCacheObj->name, pNode->key,
T_REF_VAL_GET(pNode), pCacheObj->cacheName); pNode->data, T_REF_VAL_GET(pNode));
} }
} }
taosHashDestroyIter(pIter); taosHashDestroyIter(pIter);
taosHashCleanup(pCacheObj->pHashTable); // todo memory leak if there are object with refcount greater than 0 in hash table?
taosHashCleanup(pCacheObj->pHashTable);
__cache_unlock(pCacheObj); __cache_unlock(pCacheObj);
taosTrashCanEmpty(pCacheObj, true); taosTrashCanEmpty(pCacheObj, true);
__cache_lock_destroy(pCacheObj); __cache_lock_destroy(pCacheObj);
tfree(pCacheObj->name);
memset(pCacheObj, 0, sizeof(SCacheObj)); memset(pCacheObj, 0, sizeof(SCacheObj));
free(pCacheObj); free(pCacheObj);
} }

65
src/util/src/tfile.c Normal file
View File

@ -0,0 +1,65 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <error.h>
#include <errno.h>
#include <stdarg.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "os.h"
#define RANDOM_FILE_FAIL_FACTOR 5
ssize_t taos_tread(int fd, void *buf, size_t count)
{
#ifdef TAOS_RANDOM_FILE_FAIL
if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) {
errno = EIO;
return -1;
}
#endif
return tread(fd, buf, count);
}
ssize_t taos_twrite(int fd, void *buf, size_t count)
{
#ifdef TAOS_RANDOM_FILE_FAIL
if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) {
errno = EIO;
return -1;
}
#endif
return twrite(fd, buf, count);
}
off_t taos_lseek(int fd, off_t offset, int whence)
{
#ifdef TAOS_RANDOM_FILE_FAIL
if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) {
errno = EIO;
return -1;
}
#endif
return lseek(fd, offset, whence);
}

View File

@ -27,6 +27,7 @@
#include "tcoding.h" #include "tcoding.h"
#include "tkvstore.h" #include "tkvstore.h"
#include "tulog.h" #include "tulog.h"
#include "tfile.h"
#define TD_KVSTORE_HEADER_SIZE 512 #define TD_KVSTORE_HEADER_SIZE 512
#define TD_KVSTORE_MAJOR_VERSION 1 #define TD_KVSTORE_MAJOR_VERSION 1
@ -581,4 +582,4 @@ _err:
taosHashDestroyIter(pIter); taosHashDestroyIter(pIter);
tfree(buf); tfree(buf);
return -1; return -1;
} }

View File

@ -53,7 +53,7 @@ typedef struct {
STsdbCfg tsdbCfg; STsdbCfg tsdbCfg;
SSyncCfg syncCfg; SSyncCfg syncCfg;
SWalCfg walCfg; SWalCfg walCfg;
void *qHandlePool; // query handle pool void *qMgmt;
char *rootDir; char *rootDir;
char db[TSDB_DB_NAME_LEN]; char db[TSDB_DB_NAME_LEN];
} SVnodeObj; } SVnodeObj;

View File

@ -34,8 +34,7 @@
#define TSDB_VNODE_VERSION_CONTENT_LEN 31 #define TSDB_VNODE_VERSION_CONTENT_LEN 31
static int32_t tsOpennedVnodes; static SHashObj*tsDnodeVnodesHash;
static void *tsDnodeVnodesHash;
static void vnodeCleanUp(SVnodeObj *pVnode); static void vnodeCleanUp(SVnodeObj *pVnode);
static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg); static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg);
static int32_t vnodeReadCfg(SVnodeObj *pVnode); static int32_t vnodeReadCfg(SVnodeObj *pVnode);
@ -46,9 +45,6 @@ static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, uin
static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index); static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index);
static void vnodeNotifyRole(void *ahandle, int8_t role); static void vnodeNotifyRole(void *ahandle, int8_t role);
static void vnodeNotifyFileSynced(void *ahandle, uint64_t fversion); static void vnodeNotifyFileSynced(void *ahandle, uint64_t fversion);
static void vnodeFreeqHandle(void* phandle);
static pthread_once_t vnodeModuleInit = PTHREAD_ONCE_INIT;
#ifndef _SYNC #ifndef _SYNC
tsync_h syncStart(const SSyncInfo *info) { return NULL; } tsync_h syncStart(const SSyncInfo *info) { return NULL; }
@ -59,19 +55,28 @@ int syncGetNodesRole(tsync_h shandle, SNodesRole * cfg) { return 0; }
void syncConfirmForward(tsync_h shandle, uint64_t version, int32_t code) {} void syncConfirmForward(tsync_h shandle, uint64_t version, int32_t code) {}
#endif #endif
static void vnodeInit() { int32_t vnodeInitResources() {
vnodeInitWriteFp(); vnodeInitWriteFp();
vnodeInitReadFp(); vnodeInitReadFp();
tsDnodeVnodesHash = taosHashInit(TSDB_MAX_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true); tsDnodeVnodesHash = taosHashInit(TSDB_MAX_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true);
if (tsDnodeVnodesHash == NULL) { if (tsDnodeVnodesHash == NULL) {
vError("failed to init vnode list"); vError("failed to init vnode list");
return TSDB_CODE_VND_OUT_OF_MEMORY;
}
return TSDB_CODE_SUCCESS;
}
void vnodeCleanupResources() {
if (tsDnodeVnodesHash != NULL) {
taosHashCleanup(tsDnodeVnodesHash);
tsDnodeVnodesHash = NULL;
} }
} }
int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) { int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
int32_t code; int32_t code;
pthread_once(&vnodeModuleInit, vnodeInit);
SVnodeObj *pTemp = (SVnodeObj *)taosHashGet(tsDnodeVnodesHash, (const char *)&pVnodeCfg->cfg.vgId, sizeof(int32_t)); SVnodeObj *pTemp = (SVnodeObj *)taosHashGet(tsDnodeVnodesHash, (const char *)&pVnodeCfg->cfg.vgId, sizeof(int32_t));
if (pTemp != NULL) { if (pTemp != NULL) {
@ -139,11 +144,6 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
} }
int32_t vnodeDrop(int32_t vgId) { int32_t vnodeDrop(int32_t vgId) {
if (tsDnodeVnodesHash == NULL) {
vDebug("vgId:%d, failed to drop, vgId not exist", vgId);
return TSDB_CODE_VND_INVALID_VGROUP_ID;
}
SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t)); SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t));
if (ppVnode == NULL || *ppVnode == NULL) { if (ppVnode == NULL || *ppVnode == NULL) {
vDebug("vgId:%d, failed to drop, vgId not find", vgId); vDebug("vgId:%d, failed to drop, vgId not find", vgId);
@ -160,6 +160,13 @@ int32_t vnodeDrop(int32_t vgId) {
int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) { int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) {
SVnodeObj *pVnode = param; SVnodeObj *pVnode = param;
if (pVnode->status != TAOS_VN_STATUS_READY)
return TSDB_CODE_VND_INVALID_STATUS;
if (pVnode->syncCfg.replica > 1 && pVnode->role == TAOS_SYNC_ROLE_UNSYNCED)
return TSDB_CODE_VND_NOT_SYNCED;
pVnode->status = TAOS_VN_STATUS_UPDATING; pVnode->status = TAOS_VN_STATUS_UPDATING;
int32_t code = vnodeSaveCfg(pVnodeCfg); int32_t code = vnodeSaveCfg(pVnodeCfg);
@ -182,7 +189,6 @@ int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) {
int32_t vnodeOpen(int32_t vnode, char *rootDir) { int32_t vnodeOpen(int32_t vnode, char *rootDir) {
char temp[TSDB_FILENAME_LEN]; char temp[TSDB_FILENAME_LEN];
pthread_once(&vnodeModuleInit, vnodeInit);
SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1); SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1);
if (pVnode == NULL) { if (pVnode == NULL) {
@ -190,7 +196,6 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
return TAOS_SYSTEM_ERROR(errno); return TAOS_SYSTEM_ERROR(errno);
} }
atomic_add_fetch_32(&tsOpennedVnodes, 1);
atomic_add_fetch_32(&pVnode->refCount, 1); atomic_add_fetch_32(&pVnode->refCount, 1);
pVnode->vgId = vnode; pVnode->vgId = vnode;
@ -283,9 +288,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
if (pVnode->role == TAOS_SYNC_ROLE_MASTER) if (pVnode->role == TAOS_SYNC_ROLE_MASTER)
cqStart(pVnode->cq); cqStart(pVnode->cq);
const int32_t REFRESH_HANDLE_INTERVAL = 2; // every 2 seconds, rfresh handle pool pVnode->qMgmt = qOpenQueryMgmt(pVnode->vgId);
pVnode->qHandlePool = taosCacheInit(TSDB_DATA_TYPE_BIGINT, REFRESH_HANDLE_INTERVAL, true, vnodeFreeqHandle, "qhandle");
pVnode->events = NULL; pVnode->events = NULL;
pVnode->status = TAOS_VN_STATUS_READY; pVnode->status = TAOS_VN_STATUS_READY;
vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode); vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode);
@ -296,7 +299,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
} }
int32_t vnodeStartStream(int32_t vnode) { int32_t vnodeStartStream(int32_t vnode) {
SVnodeObj* pVnode = vnodeAccquireVnode(vnode); SVnodeObj* pVnode = vnodeAcquireVnode(vnode);
if (pVnode != NULL) { if (pVnode != NULL) {
tsdbStartStream(pVnode->tsdb); tsdbStartStream(pVnode->tsdb);
vnodeRelease(pVnode); vnodeRelease(pVnode);
@ -328,6 +331,9 @@ void vnodeRelease(void *pVnodeRaw) {
return; return;
} }
qCleanupQueryMgmt(pVnode->qMgmt);
pVnode->qMgmt = NULL;
if (pVnode->tsdb) if (pVnode->tsdb)
tsdbCloseRepo(pVnode->tsdb, 1); tsdbCloseRepo(pVnode->tsdb, 1);
pVnode->tsdb = NULL; pVnode->tsdb = NULL;
@ -360,19 +366,11 @@ void vnodeRelease(void *pVnodeRaw) {
free(pVnode); free(pVnode);
int32_t count = atomic_sub_fetch_32(&tsOpennedVnodes, 1); int32_t count = taosHashGetSize(tsDnodeVnodesHash);
vDebug("vgId:%d, vnode is released, vnodes:%d", vgId, count); vDebug("vgId:%d, vnode is released, vnodes:%d", vgId, count);
if (count <= 0) {
taosHashCleanup(tsDnodeVnodesHash);
vnodeModuleInit = PTHREAD_ONCE_INIT;
tsDnodeVnodesHash = NULL;
}
} }
void *vnodeGetVnode(int32_t vgId) { void *vnodeGetVnode(int32_t vgId) {
if (tsDnodeVnodesHash == NULL) return NULL;
SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t)); SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t));
if (ppVnode == NULL || *ppVnode == NULL) { if (ppVnode == NULL || *ppVnode == NULL) {
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
@ -383,7 +381,7 @@ void *vnodeGetVnode(int32_t vgId) {
return *ppVnode; return *ppVnode;
} }
void *vnodeAccquireVnode(int32_t vgId) { void *vnodeAcquireVnode(int32_t vgId) {
SVnodeObj *pVnode = vnodeGetVnode(vgId); SVnodeObj *pVnode = vnodeGetVnode(vgId);
if (pVnode == NULL) return pVnode; if (pVnode == NULL) return pVnode;
@ -393,12 +391,21 @@ void *vnodeAccquireVnode(int32_t vgId) {
return pVnode; return pVnode;
} }
void *vnodeAcquireRqueue(void *param) {
SVnodeObj *pVnode = param;
if (pVnode == NULL) return NULL;
atomic_add_fetch_32(&pVnode->refCount, 1);
vDebug("vgId:%d, get vnode rqueue, refCount:%d", pVnode->vgId, pVnode->refCount);
return ((SVnodeObj *)pVnode)->rqueue;
}
void *vnodeGetRqueue(void *pVnode) { void *vnodeGetRqueue(void *pVnode) {
return ((SVnodeObj *)pVnode)->rqueue; return ((SVnodeObj *)pVnode)->rqueue;
} }
void *vnodeGetWqueue(int32_t vgId) { void *vnodeGetWqueue(int32_t vgId) {
SVnodeObj *pVnode = vnodeAccquireVnode(vgId); SVnodeObj *pVnode = vnodeAcquireVnode(vgId);
if (pVnode == NULL) return NULL; if (pVnode == NULL) return NULL;
return pVnode->wqueue; return pVnode->wqueue;
} }
@ -408,8 +415,11 @@ void *vnodeGetWal(void *pVnode) {
} }
static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) { static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) {
if (pVnode->status == TAOS_VN_STATUS_DELETING) return; if (pVnode->status != TAOS_VN_STATUS_READY) return;
if (pStatus->openVnodes >= TSDB_MAX_VNODES) return; if (pStatus->openVnodes >= TSDB_MAX_VNODES) return;
if (pVnode->syncCfg.replica > 1 && pVnode->role == TAOS_SYNC_ROLE_UNSYNCED) return;
if (pVnode->tsdb == NULL) return;
int64_t totalStorage, compStorage, pointsWritten = 0; int64_t totalStorage, compStorage, pointsWritten = 0;
tsdbReportStat(pVnode->tsdb, &pointsWritten, &totalStorage, &compStorage); tsdbReportStat(pVnode->tsdb, &pointsWritten, &totalStorage, &compStorage);
@ -424,6 +434,26 @@ static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) {
pLoad->replica = pVnode->syncCfg.replica; pLoad->replica = pVnode->syncCfg.replica;
} }
int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) {
SHashMutableIterator *pIter = taosHashCreateIter(tsDnodeVnodesHash);
while (taosHashIterNext(pIter)) {
SVnodeObj **pVnode = taosHashIterGet(pIter);
if (pVnode == NULL) continue;
if (*pVnode == NULL) continue;
(*numOfVnodes)++;
if (*numOfVnodes >= TSDB_MAX_VNODES) {
vError("vgId:%d, too many open vnodes, exist:%d max:%d", (*pVnode)->vgId, *numOfVnodes, TSDB_MAX_VNODES);
continue;
} else {
vnodeList[*numOfVnodes - 1] = (*pVnode)->vgId;
}
}
taosHashDestroyIter(pIter);
return TSDB_CODE_SUCCESS;
}
void vnodeBuildStatusMsg(void *param) { void vnodeBuildStatusMsg(void *param) {
SDMStatusMsg *pStatus = param; SDMStatusMsg *pStatus = param;
SHashMutableIterator *pIter = taosHashCreateIter(tsDnodeVnodesHash); SHashMutableIterator *pIter = taosHashCreateIter(tsDnodeVnodesHash);
@ -442,7 +472,7 @@ void vnodeBuildStatusMsg(void *param) {
void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes) { void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes) {
for (int32_t i = 0; i < numOfVnodes; ++i) { for (int32_t i = 0; i < numOfVnodes; ++i) {
pAccess[i].vgId = htonl(pAccess[i].vgId); pAccess[i].vgId = htonl(pAccess[i].vgId);
SVnodeObj *pVnode = vnodeAccquireVnode(pAccess[i].vgId); SVnodeObj *pVnode = vnodeAcquireVnode(pAccess[i].vgId);
if (pVnode != NULL) { if (pVnode != NULL) {
pVnode->accessState = pAccess[i].accessState; pVnode->accessState = pAccess[i].accessState;
if (pVnode->accessState != TSDB_VN_ALL_ACCCESS) { if (pVnode->accessState != TSDB_VN_ALL_ACCCESS) {
@ -466,7 +496,7 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
vTrace("vgId:%d, vnode will cleanup, refCount:%d", pVnode->vgId, pVnode->refCount); vTrace("vgId:%d, vnode will cleanup, refCount:%d", pVnode->vgId, pVnode->refCount);
// release local resources only after cutting off outside connections // release local resources only after cutting off outside connections
taosCacheCleanup(pVnode->qHandlePool); qSetQueryMgmtClosed(pVnode->qMgmt);
vnodeRelease(pVnode); vnodeRelease(pVnode);
} }
@ -872,12 +902,3 @@ PARSE_OVER:
if(fp) fclose(fp); if(fp) fclose(fp);
return terrno; return terrno;
} }
void vnodeFreeqHandle(void *qHandle) {
void** handle = qHandle;
if (handle == NULL || *handle == NULL) {
return;
}
qKillQuery(*handle);
}

View File

@ -14,6 +14,7 @@
*/ */
#define _DEFAULT_SOURCE #define _DEFAULT_SOURCE
#include <dnode.h>
#include "os.h" #include "os.h"
#include "tglobal.h" #include "tglobal.h"
@ -45,9 +46,9 @@ int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) {
return TSDB_CODE_VND_MSG_NOT_PROCESSED; return TSDB_CODE_VND_MSG_NOT_PROCESSED;
} }
if (pVnode->status == TAOS_VN_STATUS_DELETING || pVnode->status == TAOS_VN_STATUS_CLOSING) { if (pVnode->status != TAOS_VN_STATUS_READY) {
vDebug("vgId:%d, msgType:%s not processed, vnode status is %d", pVnode->vgId, taosMsg[msgType], pVnode->status); vDebug("vgId:%d, msgType:%s not processed, vnode status is %d", pVnode->vgId, taosMsg[msgType], pVnode->status);
return TSDB_CODE_VND_INVALID_VGROUP_ID; return TSDB_CODE_VND_INVALID_STATUS;
} }
// TODO: Later, let slave to support query // TODO: Later, let slave to support query
@ -73,18 +74,22 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
killQueryMsg->free = htons(killQueryMsg->free); killQueryMsg->free = htons(killQueryMsg->free);
killQueryMsg->qhandle = htobe64(killQueryMsg->qhandle); killQueryMsg->qhandle = htobe64(killQueryMsg->qhandle);
vWarn("QInfo:%p connection %p broken, kill query", (void*)killQueryMsg->qhandle, pReadMsg->rpcMsg.handle); void* handle = NULL;
if ((void**) killQueryMsg->qhandle != NULL) {
handle = *(void**) killQueryMsg->qhandle;
}
vWarn("QInfo:%p connection %p broken, kill query", handle, pReadMsg->rpcMsg.handle);
assert(pReadMsg->rpcMsg.contLen > 0 && killQueryMsg->free == 1); assert(pReadMsg->rpcMsg.contLen > 0 && killQueryMsg->free == 1);
// this message arrived here by means of the *query* message, so release the vnode is necessary void** qhandle = qAcquireQInfo(pVnode->qMgmt, (void**) killQueryMsg->qhandle);
void** qhandle = taosCacheAcquireByKey(pVnode->qHandlePool, (void*) &killQueryMsg->qhandle, sizeof(killQueryMsg->qhandle));
if (qhandle == NULL || *qhandle == NULL) { if (qhandle == NULL || *qhandle == NULL) {
vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void*) killQueryMsg->qhandle, pReadMsg->rpcMsg.handle); vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void*) killQueryMsg->qhandle, pReadMsg->rpcMsg.handle);
} else { } else {
taosCacheRelease(pVnode->qHandlePool, (void**) &qhandle, true); assert(qhandle == (void**) killQueryMsg->qhandle);
qReleaseQInfo(pVnode->qMgmt, (void**) &qhandle, true);
} }
vnodeRelease(pVnode);
return TSDB_CODE_TSC_QUERY_CANCELLED; return TSDB_CODE_TSC_QUERY_CANCELLED;
} }
@ -93,7 +98,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
void** handle = NULL; void** handle = NULL;
if (contLen != 0) { if (contLen != 0) {
code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, vnodeRelease, &pQInfo); code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, NULL, &pQInfo);
SQueryTableRsp *pRsp = (SQueryTableRsp *) rpcMallocCont(sizeof(SQueryTableRsp)); SQueryTableRsp *pRsp = (SQueryTableRsp *) rpcMallocCont(sizeof(SQueryTableRsp));
pRsp->code = code; pRsp->code = code;
@ -105,25 +110,30 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
// current connect is broken // current connect is broken
if (code == TSDB_CODE_SUCCESS) { if (code == TSDB_CODE_SUCCESS) {
if (vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, pQInfo, pVnode->vgId) != TSDB_CODE_SUCCESS) { handle = qRegisterQInfo(pVnode->qMgmt, pQInfo);
vError("vgId:%d, QInfo:%p, dnode query discarded since link is broken, %p", pVnode->vgId, pQInfo, if (handle == NULL) { // failed to register qhandle
pReadMsg->rpcMsg.handle); pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE;
pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
// NOTE: there two refcount, needs to kill twice, todo refactor
// query has not been put into qhandle pool, kill it directly.
qKillQuery(pQInfo); qKillQuery(pQInfo);
qKillQuery(pQInfo); qKillQuery(pQInfo);
pQInfo = NULL;
return pRsp->code; } else {
assert(*handle == pQInfo);
pRsp->qhandle = htobe64((uint64_t) (handle));
} }
handle = taosCachePut(pVnode->qHandlePool, pQInfo, sizeof(pQInfo), &pQInfo, sizeof(pQInfo), tsShellActivityTimer * 2); if (handle != NULL && vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, handle, pVnode->vgId) != TSDB_CODE_SUCCESS) {
assert(*handle == pQInfo); vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, pQInfo, pReadMsg->rpcMsg.handle);
pRsp->qhandle = htobe64((uint64_t) (handle)); pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
// NOTE: there two refcount, needs to kill twice
// query has not been put into qhandle pool, kill it directly.
qKillQuery(pQInfo);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
return pRsp->code;
}
} else { } else {
assert(pQInfo == NULL); assert(pQInfo == NULL);
vnodeRelease(pVnode);
} }
vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo); vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo);
@ -138,9 +148,8 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
if (pQInfo != NULL) { if (pQInfo != NULL) {
qTableQuery(pQInfo); // do execute query qTableQuery(pQInfo); // do execute query
assert(handle != NULL); assert(handle != NULL);
taosCacheRelease(pVnode->qHandlePool, (void**) &handle, false); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false);
} }
return code; return code;
@ -159,7 +168,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
memset(pRet, 0, sizeof(SRspRet)); memset(pRet, 0, sizeof(SRspRet));
int32_t ret = 0; int32_t ret = 0;
void** handle = taosCacheAcquireByKey(pVnode->qHandlePool, pQInfo, sizeof(pQInfo)); void** handle = qAcquireQInfo(pVnode->qMgmt, pQInfo);
if (handle == NULL || handle != pQInfo) { if (handle == NULL || handle != pQInfo) {
ret = TSDB_CODE_QRY_INVALID_QHANDLE; ret = TSDB_CODE_QRY_INVALID_QHANDLE;
} }
@ -167,8 +176,8 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
if (pRetrieve->free == 1) { if (pRetrieve->free == 1) {
if (ret == TSDB_CODE_SUCCESS) { if (ret == TSDB_CODE_SUCCESS) {
vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pQInfo); vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pQInfo);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
taosCacheRelease(pVnode->qHandlePool, (void**) &handle, true);
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
pRet->len = sizeof(SRetrieveTableRsp); pRet->len = sizeof(SRetrieveTableRsp);
@ -178,30 +187,30 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
pRsp->completed = true; pRsp->completed = true;
pRsp->useconds = 0; pRsp->useconds = 0;
} else { // todo handle error } else { // todo handle error
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
} }
return ret; return ret;
} }
vDebug("vgId:%d, QInfo:%p, retrieve msg is received", pVnode->vgId, *pQInfo);
int32_t code = qRetrieveQueryResultInfo(*pQInfo); int32_t code = qRetrieveQueryResultInfo(*pQInfo);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS || ret != TSDB_CODE_SUCCESS) {
//TODO //TODO
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
} else { } else {
// todo check code and handle error in build result set // todo check code and handle error in build result set
code = qDumpRetrieveResult(*pQInfo, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len); code = qDumpRetrieveResult(*pQInfo, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len);
if (qHasMoreResultsToRetrieve(*pQInfo)) { if (qHasMoreResultsToRetrieve(*handle)) {
dnodePutItemIntoReadQueue(pVnode, handle);
pRet->qhandle = handle; pRet->qhandle = handle;
code = TSDB_CODE_VND_ACTION_NEED_REPROCESSED; code = TSDB_CODE_SUCCESS;
} else { // no further execution invoked, release the ref to vnode } else { // no further execution invoked, release the ref to vnode
taosCacheRelease(pVnode->qHandlePool, (void**) &handle, true); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
} }
} }
return code; return code;
} }
@ -216,4 +225,4 @@ int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId) {
vDebug("QInfo:%p register qhandle to connect:%p", qhandle, handle); vDebug("QInfo:%p register qhandle to connect:%p", qhandle, handle);
return rpcReportProgress(handle, (char*) killQueryMsg, sizeof(SRetrieveTableMsg)); return rpcReportProgress(handle, (char*) killQueryMsg, sizeof(SRetrieveTableMsg));
} }

View File

@ -58,7 +58,7 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) {
if (pHead->version == 0) { // from client or CQ if (pHead->version == 0) { // from client or CQ
if (pVnode->status != TAOS_VN_STATUS_READY) if (pVnode->status != TAOS_VN_STATUS_READY)
return TSDB_CODE_VND_INVALID_VGROUP_ID; // it may be in deleting or closing state return TSDB_CODE_VND_INVALID_STATUS; // it may be in deleting or closing state
if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER) if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER)
return TSDB_CODE_RPC_NOT_READY; return TSDB_CODE_RPC_NOT_READY;
@ -89,21 +89,25 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) {
return syncCode; return syncCode;
} }
void vnodeConfirmForward(void *param, uint64_t version, int32_t code) {
SVnodeObj *pVnode = (SVnodeObj *)param;
syncConfirmForward(pVnode->sync, version, code);
}
static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet) { static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
// save insert result into item
vTrace("vgId:%d, submit msg is processed", pVnode->vgId); vTrace("vgId:%d, submit msg is processed", pVnode->vgId);
pRet->len = sizeof(SShellSubmitRspMsg); // save insert result into item
pRet->rsp = rpcMallocCont(pRet->len); SShellSubmitRspMsg *pRsp = NULL;
SShellSubmitRspMsg *pRsp = pRet->rsp; if (pRet) {
pRet->len = sizeof(SShellSubmitRspMsg);
pRet->rsp = rpcMallocCont(pRet->len);
pRsp = pRet->rsp;
}
if (tsdbInsertData(pVnode->tsdb, pCont, pRsp) < 0) code = terrno; if (tsdbInsertData(pVnode->tsdb, pCont, pRsp) < 0) code = terrno;
pRsp->numOfFailedBlocks = 0; //TODO
//pRet->len += pRsp->numOfFailedBlocks * sizeof(SShellSubmitRspBlock); //TODO
pRsp->code = 0;
pRsp->numOfRows = htonl(1);
return code; return code;
} }
@ -158,7 +162,7 @@ static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pCont, SRspRet
} }
static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet) { static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet) {
if (tsdbUpdateTagValue(pVnode->tsdb, (SUpdateTableTagValMsg *)pCont) < 0) { if (tsdbUpdateTableTagValue(pVnode->tsdb, (SUpdateTableTagValMsg *)pCont) < 0) {
return terrno; return terrno;
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;

View File

@ -94,7 +94,7 @@
<dependency> <dependency>
<groupId>com.google.guava</groupId> <groupId>com.google.guava</groupId>
<artifactId>guava</artifactId> <artifactId>guava</artifactId>
<version>18.0</version> <version>24.1.1</version>
</dependency> </dependency>
<dependency> <dependency>

View File

@ -52,8 +52,7 @@ class TDTestCase:
# illegal condition # illegal condition
tdSql.error( tdSql.error(
"select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2") "select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2")
tdSql.error("select * from db.st where tagtype <> 1 OR tagtype < 2")
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -36,18 +36,17 @@ class TDTestCase:
"insert into tb2 using stb1 tags(2,'tb2', '表2') values ('2020-04-18 15:00:02.000', 3, 2.1), ('2020-04-18 15:00:03.000', 4, 2.2)") "insert into tb2 using stb1 tags(2,'tb2', '表2') values ('2020-04-18 15:00:02.000', 3, 2.1), ('2020-04-18 15:00:03.000', 4, 2.2)")
# inner join --- bug # inner join --- bug
tdSql.query("select * from tb1 a, tb2 b where a.ts = b.ts") tdSql.error("select * from tb1 a, tb2 b where a.ts = b.ts")
tdSql.checkRows(1)
# join 3 tables -- bug exists # join 3 tables -- bug exists
tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id") tdSql.error("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
# query show stable # query show stable
tdSql.query("show stables") tdSql.query("show stables")
tdSql.checkRows(1) tdSql.checkRows(1)
# query show tables # query show tables
tdSql.query("show table") tdSql.query("show tables")
tdSql.checkRows(2) tdSql.checkRows(2)
# query count # query count
@ -71,16 +70,13 @@ class TDTestCase:
tdSql.checkRows(2) tdSql.checkRows(2)
# query first ... as # query first ... as
tdSql.query("select first(*) as begin from stb1") tdSql.error("select first(*) as begin from stb1")
tdSql.checkData(0, 1, 1)
# query last ... as # query last ... as
tdSql.query("select last(*) as end from stb1") tdSql.error("select last(*) as end from stb1")
tdSql.checkData(0, 1, 4)
# query last_row ... as # query last_row ... as
tdSql.query("select last_row(*) as end from stb1") tdSql.error("select last_row(*) as end from stb1")
tdSql.checkData(0, 1, 4)
# query group .. by # query group .. by
tdSql.query("select sum(c1), t2 from stb1 group by t2") tdSql.query("select sum(c1), t2 from stb1 group by t2")
@ -95,8 +91,7 @@ class TDTestCase:
tdSql.checkRows(1) tdSql.checkRows(1)
# query ... alias for table ---- bug # query ... alias for table ---- bug
tdSql.query("select t.ts from tb1 t") tdSql.error("select t.ts from tb1 t")
tdSql.checkRows(2)
# query ... tbname # query ... tbname
tdSql.query("select tbname from stb1") tdSql.query("select tbname from stb1")
@ -104,7 +99,7 @@ class TDTestCase:
# query ... tbname count ---- bug # query ... tbname count ---- bug
tdSql.query("select count(tbname) from stb1") tdSql.query("select count(tbname) from stb1")
tdSql.checkRows(2) tdSql.checkData(0, 0, 2)
# query ... select database ---- bug # query ... select database ---- bug
tdSql.query("SELECT database()") tdSql.query("SELECT database()")

View File

@ -40,11 +40,7 @@ class TDTestCase:
tdSql.query("select last(*) from st") tdSql.query("select last(*) from st")
tdSql.checkRows(1) tdSql.checkRows(1)
print(
"======= Verify filter for %s type finished =========" %
curType)
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)

View File

@ -365,3 +365,7 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/sync_replica3_dropDb.sim ./test.sh -f unique/arbitrator/sync_replica3_dropDb.sim
./test.sh -f unique/arbitrator/sync_replica3_dropTable.sim ./test.sh -f unique/arbitrator/sync_replica3_dropTable.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim

View File

@ -133,3 +133,7 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/sync_replica3_dropDb.sim ./test.sh -f unique/arbitrator/sync_replica3_dropDb.sim
./test.sh -f unique/arbitrator/sync_replica3_dropTable.sim ./test.sh -f unique/arbitrator/sync_replica3_dropTable.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim

View File

@ -0,0 +1,272 @@
# Test case describe: dnode1/dnode2 include mnode and vnode roles
# step 1: start dnode1/dnode2, and added into cluster
# step 2: create db(repl = 2), table, insert data,
# step 4: stop dnode1, remove its mnode dir, and copy mnode dir of dnode2 to dnode1
# step 5: restart dnode1, waiting sync end
# step 6: stop dnode2, reset query cache, and query
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
#system sh/deploy.sh -n dnode3 -i 3
#system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
#system sh/cfg.sh -n dnode3 -c walLevel -v 2
#system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1/dnode2 and add into cluster
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 1000
sql connect
sleep 1000
sql create dnode $hostname2
sleep 1000
print ============== step2: create database with replica 2, and create table, insert data
$totalTableNum = 10
$sleepTimer = 3000
$db = db
sql create database $db replica 2 cache 1
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 1200
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
$ts = $tsStart + $x
sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
$x = $x + 60
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
print rows:$rows data00:$data00 totalRows:$totalRows
if $rows != 1 then
return -1
endi
if $data00 != $totalRows then
return -1
endi
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now - 40d , -40 )
$totalRows = $totalRows + 2
print ============== step4: stop dnode1
system sh/exec.sh -n dnode1 -s stop -x SIGINT
$loopCnt = 0
wait_dnode1_offline:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode1_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != offline then
sleep 2000
goto wait_dnode1_offline
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode1_offline
endi
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
#sql show vgroups
#print show vgroups:
#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
system_content rm -rf ../../../sim/dnode1/data/mnode
system_content cp -rf ../../../sim/dnode2/data/mnode ../../../sim/dnode1/data/
print ============== step6: restart dnode1, waiting sync end
system sh/exec.sh -n dnode1 -s start
sleep 1000
$loopCnt = 0
wait_dnode1_ready:
$loopCnt = $loopCnt + 1
if $loopCnt == 20 then
return -1
endi
sql show dnodes -x wait_dnode1_ready
if $rows != 2 then
sleep 2000
goto wait_dnode1_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
$loopCnt = 0
wait_dnode1_vgroup_slave:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show vgroups
if $rows != 3 then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
$d2v2status = $data4_4
$d2v3status = $data4_2
$d2v4status = $data4_3
$d1v2status = $data7_4
$d1v3status = $data7_2
$d1v4status = $data7_3
if $d2v2status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v3status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v4status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v2status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v3status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v4status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
print ============== step7: stop dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
$loopCnt = 0
wait_dnode2_offline:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode2_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != ready then
sleep 2000
goto wait_dnode2_offline
endi
if $dnode2Status != offline then
sleep 2000
goto wait_dnode2_offline
endi
sql reset query cache
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi

View File

@ -0,0 +1,274 @@
# Test case describe: dnode1/dnode2 include mnode and vnode roles
# step 1: start dnode1/dnode2, and added into cluster
# step 2: create db(repl = 2), table, insert data,
# step 4: stop dnode1, remove its mnode and vnode dir, and copy mnode and vnode dir of dnode2 to dnode1
# step 5: restart dnode1, waiting sync end
# step 6: stop dnode2, reset query cache, and query
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
#system sh/deploy.sh -n dnode3 -i 3
#system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
#system sh/cfg.sh -n dnode3 -c walLevel -v 2
#system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1/dnode2 and add into cluster
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 1000
sql connect
sleep 1000
sql create dnode $hostname2
sleep 1000
print ============== step2: create database with replica 2, and create table, insert data
$totalTableNum = 10
$sleepTimer = 3000
$db = db
sql create database $db replica 2 cache 1
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 1200
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
$ts = $tsStart + $x
sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
$x = $x + 60
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
print rows:$rows data00:$data00 totalRows:$totalRows
if $rows != 1 then
return -1
endi
if $data00 != $totalRows then
return -1
endi
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now - 40d , -40 )
$totalRows = $totalRows + 2
print ============== step4: stop dnode1
system sh/exec.sh -n dnode1 -s stop -x SIGINT
$loopCnt = 0
wait_dnode1_offline:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode1_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != offline then
sleep 2000
goto wait_dnode1_offline
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode1_offline
endi
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
#sql show vgroups
#print show vgroups:
#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
system_content rm -rf ../../../sim/dnode1/data/vnode
system_content rm -rf ../../../sim/dnode1/data/mnode
system_content cp -rf ../../../sim/dnode2/data/vnode ../../../sim/dnode1/data/
system_content cp -rf ../../../sim/dnode2/data/mnode ../../../sim/dnode1/data/
print ============== step6: restart dnode1, waiting sync end
system sh/exec.sh -n dnode1 -s start
sleep 1000
$loopCnt = 0
wait_dnode1_ready:
$loopCnt = $loopCnt + 1
if $loopCnt == 20 then
return -1
endi
sql show dnodes -x wait_dnode1_ready
if $rows != 2 then
sleep 2000
goto wait_dnode1_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
$loopCnt = 0
wait_dnode1_vgroup_slave:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show vgroups
if $rows != 3 then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
$d2v2status = $data4_4
$d2v3status = $data4_2
$d2v4status = $data4_3
$d1v2status = $data7_4
$d1v3status = $data7_2
$d1v4status = $data7_3
if $d2v2status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v3status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v4status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v2status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v3status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v4status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
print ============== step7: stop dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
$loopCnt = 0
wait_dnode2_offline:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode2_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != ready then
sleep 2000
goto wait_dnode2_offline
endi
if $dnode2Status != offline then
sleep 2000
goto wait_dnode2_offline
endi
sql reset query cache
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi

View File

@ -0,0 +1,210 @@
# Test case describe: dnode1/dnode2 include mnode and vnode roles
# step 1: start dnode1/dnode2, and added into cluster
# step 2: create db(repl = 2), table, insert data,
# step 4: stop dnode1, remove its mnode and vnode dir, and copy mnode and vnode dir of dnode2 to dnode1
# step 5: restart dnode1, waiting sync end
# step 6: stop dnode2, reset query cache, and query
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
#system sh/deploy.sh -n dnode3 -i 3
#system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
#system sh/cfg.sh -n dnode3 -c walLevel -v 2
#system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1/dnode2 and add into cluster
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 1000
sql connect
sleep 1000
sql create dnode $hostname2
sleep 1000
print ============== step2: create database with replica 2, and create table, insert data
$totalTableNum = 10
$sleepTimer = 3000
$db = db
sql create database $db replica 2 cache 1
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 1200
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
$ts = $tsStart + $x
sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
$x = $x + 60
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
print rows:$rows data00:$data00 totalRows:$totalRows
if $rows != 1 then
return -1
endi
if $data00 != $totalRows then
return -1
endi
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now - 40d , -40 )
$totalRows = $totalRows + 2
print ============== step4: stop dnode1
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
system_content rm -rf ../../../sim/dnode1/data/vnode
system_content rm -rf ../../../sim/dnode1/data/mnode
system_content cp -rf ../../../sim/dnode2/data/vnode ../../../sim/dnode1/data/
system_content cp -rf ../../../sim/dnode2/data/mnode ../../../sim/dnode1/data/
print ============== step6: restart dnode1/dnode2
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 1000
sql connect
sql use $db
$loopCnt = 0
wait_dnode1_ready:
$loopCnt = $loopCnt + 1
if $loopCnt == 20 then
return -1
endi
sql show dnodes -x wait_dnode1_ready
if $rows != 2 then
sleep 2000
goto wait_dnode1_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
$loopCnt = 0
wait_dnode1_vgroup_slave:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show vgroups
if $rows != 3 then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
$d2v2status = $data4_4
$d2v3status = $data4_2
$d2v4status = $data4_3
$d1v2status = $data7_4
$d1v3status = $data7_2
$d1v4status = $data7_3
if $d2v2status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v3status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v4status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v2status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v3status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v4status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
sql reset query cache
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi

View File

@ -0,0 +1,272 @@
# Test case describe: dnode1/dnode2 include mnode and vnode roles
# step 1: start dnode1/dnode2, and added into cluster
# step 2: create db(repl = 2), table, insert data,
# step 4: stop dnode1, remove its vnode dir, and copy vnode dir of dnode2 to dnode1
# step 5: restart dnode1, waiting sync end
# step 6: stop dnode2, reset query cache, and query
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
#system sh/deploy.sh -n dnode3 -i 3
#system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
#system sh/cfg.sh -n dnode3 -c walLevel -v 2
#system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1/dnode2 and add into cluster
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 1000
sql connect
sleep 1000
sql create dnode $hostname2
sleep 1000
print ============== step2: create database with replica 2, and create table, insert data
$totalTableNum = 10
$sleepTimer = 3000
$db = db
sql create database $db replica 2 cache 1
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 1200
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
$ts = $tsStart + $x
sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
$x = $x + 60
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
print rows:$rows data00:$data00 totalRows:$totalRows
if $rows != 1 then
return -1
endi
if $data00 != $totalRows then
return -1
endi
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now - 40d , -40 )
$totalRows = $totalRows + 2
print ============== step4: stop dnode1
system sh/exec.sh -n dnode1 -s stop -x SIGINT
$loopCnt = 0
wait_dnode1_offline:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode1_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != offline then
sleep 2000
goto wait_dnode1_offline
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode1_offline
endi
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
#sql show vgroups
#print show vgroups:
#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
system_content rm -rf ../../../sim/dnode1/data/vnode
system_content cp -rf ../../../sim/dnode2/data/vnode ../../../sim/dnode1/data/
print ============== step6: restart dnode1, waiting sync end
system sh/exec.sh -n dnode1 -s start
sleep 1000
$loopCnt = 0
wait_dnode1_ready:
$loopCnt = $loopCnt + 1
if $loopCnt == 20 then
return -1
endi
sql show dnodes -x wait_dnode1_ready
if $rows != 2 then
sleep 2000
goto wait_dnode1_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
$loopCnt = 0
wait_dnode1_vgroup_slave:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show vgroups
if $rows != 3 then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
$d2v2status = $data4_4
$d2v3status = $data4_2
$d2v4status = $data4_3
$d1v2status = $data7_4
$d1v3status = $data7_2
$d1v4status = $data7_3
if $d2v2status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v3status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v4status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v2status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v3status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v4status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
print ============== step7: stop dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
$loopCnt = 0
wait_dnode2_offline:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode2_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != ready then
sleep 2000
goto wait_dnode2_offline
endi
if $dnode2Status != offline then
sleep 2000
goto wait_dnode2_offline
endi
sql reset query cache
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi