diff --git a/.travis.yml b/.travis.yml
index 4d7a809e29..6a4acce451 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -63,7 +63,7 @@ matrix:
pkill -TERM -x taosd
fuser -k -n tcp 6030
sleep 1
- ./crash_gen.sh -a -p -t 4 -s 25|| travis_terminate $?
+ ./crash_gen.sh -a -p -t 4 -s 2000|| travis_terminate $?
sleep 1
cd ${TRAVIS_BUILD_DIR}/tests/pytest
diff --git a/src/client/inc/tscLocalMerge.h b/src/client/inc/tscLocalMerge.h
index d38885ab2e..397a60d140 100644
--- a/src/client/inc/tscLocalMerge.h
+++ b/src/client/inc/tscLocalMerge.h
@@ -20,8 +20,8 @@
extern "C" {
#endif
-#include "qextbuffer.h"
-#include "qfill.h"
+#include "qExtbuffer.h"
+#include "qFill.h"
#include "taosmsg.h"
#include "tlosertree.h"
#include "tsclient.h"
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index 786133a8f3..590f205e1d 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -23,11 +23,11 @@ extern "C" {
/*
* @date 2018/09/30
*/
-#include "os.h"
-#include "tbuffer.h"
#include "exception.h"
-#include "qextbuffer.h"
+#include "os.h"
+#include "qExtbuffer.h"
#include "taosdef.h"
+#include "tbuffer.h"
#include "tscLocalMerge.h"
#include "tsclient.h"
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index b5455ed1fb..17840df4a4 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -31,8 +31,8 @@ extern "C" {
#include "tutil.h"
#include "qExecutor.h"
+#include "qTsbuf.h"
#include "qsqlparser.h"
-#include "qtsbuf.h"
#include "tcmdtype.h"
// forward declaration
diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c
index 262b7ab3f6..72ccd5adc6 100644
--- a/src/client/src/tscFunctionImpl.c
+++ b/src/client/src/tscFunctionImpl.c
@@ -14,15 +14,15 @@
*/
#include "os.h"
-#include "qextbuffer.h"
-#include "qfill.h"
-#include "qhistogram.h"
-#include "qpercentile.h"
-#include "qsyntaxtreefunction.h"
-#include "qtsbuf.h"
+#include "qAst.h"
+#include "qExtbuffer.h"
+#include "qFill.h"
+#include "qHistogram.h"
+#include "qPercentile.h"
+#include "qSyntaxtreefunction.h"
+#include "qTsbuf.h"
#include "taosdef.h"
#include "taosmsg.h"
-#include "qast.h"
#include "tscLog.h"
#include "tscSubquery.h"
#include "tscompression.h"
@@ -74,7 +74,7 @@ for (int32_t i = 0; i < (ctx)->tagInfo.numOfTagCols; ++i) { \
void noop1(SQLFunctionCtx *UNUSED_PARAM(pCtx)) {}
void noop2(SQLFunctionCtx *UNUSED_PARAM(pCtx), int32_t UNUSED_PARAM(index)) {}
-void doFinalizer(SQLFunctionCtx *pCtx) { resetResultInfo(GET_RES_INFO(pCtx)); }
+void doFinalizer(SQLFunctionCtx *pCtx) { RESET_RESULT_INFO(GET_RES_INFO(pCtx)); }
typedef struct tValuePair {
tVariant v;
@@ -330,12 +330,6 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
return TSDB_CODE_SUCCESS;
}
-/**
- * the numOfRes should be kept, since it may be used later
- * and allow the ResultInfo to be re initialized
- */
-void resetResultInfo(SResultInfo *pResInfo) { pResInfo->initialized = false; }
-
void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable, char* buf) {
assert(pResInfo->interResultBuf == NULL);
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index 83700ce0a5..7f336daa91 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -16,14 +16,14 @@
#include "os.h"
#include "taosmsg.h"
-#include "tcache.h"
-#include "tscUtil.h"
-#include "tsclient.h"
+#include "qExtbuffer.h"
#include "taosdef.h"
-#include "tscLog.h"
-#include "qextbuffer.h"
-#include "tschemautil.h"
+#include "tcache.h"
#include "tname.h"
+#include "tscLog.h"
+#include "tscUtil.h"
+#include "tschemautil.h"
+#include "tsclient.h"
static void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength);
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index b97e486449..32d44dfb81 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -18,9 +18,9 @@
#define _DEFAULT_SOURCE
#include "os.h"
+#include "qAst.h"
#include "taos.h"
#include "taosmsg.h"
-#include "qast.h"
#include "tcompare.h"
#include "tname.h"
#include "tscLog.h"
@@ -358,7 +358,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_CFG_DNODE: {
- const char* msg2 = "invalid configure options or values";
+ const char* msg2 = "invalid configure options or values, such as resetlog / debugFlag 135 / balance 'vnode:1-dnode:2' / monitor 1 ";
const char* msg3 = "invalid dnode ep";
/* validate the ip address */
@@ -4674,26 +4674,42 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- const int DNODE_DYNAMIC_CFG_OPTIONS_SIZE = 19;
- const SDNodeDynConfOption DNODE_DYNAMIC_CFG_OPTIONS[] = {
- {"resetLog", 8}, {"resetQueryCache", 15}, {"debugFlag", 9}, {"mDebugFlag", 10},
- {"dDebugFlag", 10}, {"sdbDebugFlag", 12}, {"vDebugFlag", 10}, {"cDebugFlag", 10},
- {"httpDebugFlag", 13}, {"monitorDebugFlag", 16}, {"rpcDebugFlag", 12}, {"uDebugFlag", 10},
- {"tmrDebugFlag", 12}, {"qDebugflag", 10}, {"sDebugflag", 10}, {"tsdbDebugFlag", 13},
- {"mqttDebugFlag", 13}, {"wDebugFlag", 10}, {"monitor", 7}};
+ const int tokenLogEnd = 2;
+ const int tokenBalance = 2;
+ const int tokenMonitor = 3;
+ const int tokenDebugFlag = 4;
+ const int tokenDebugFlagEnd = 20;
+ const SDNodeDynConfOption cfgOptions[] = {
+ {"resetLog", 8}, {"resetQueryCache", 15}, {"balance", 7}, {"monitor", 7},
+ {"debugFlag", 9}, {"monitorDebugFlag", 16}, {"vDebugFlag", 10}, {"mDebugFlag", 10},
+ {"cDebugFlag", 10}, {"httpDebugFlag", 13}, {"qDebugflag", 10}, {"sdbDebugFlag", 12},
+ {"uDebugFlag", 10}, {"tsdbDebugFlag", 13}, {"sDebugflag", 10}, {"rpcDebugFlag", 12},
+ {"dDebugFlag", 10}, {"mqttDebugFlag", 13}, {"wDebugFlag", 10}, {"tmrDebugFlag", 12},
+ };
SSQLToken* pOptionToken = &pOptions->a[1];
if (pOptions->nTokens == 2) {
// reset log and reset query cache does not need value
- for (int32_t i = 0; i < 2; ++i) {
- const SDNodeDynConfOption* pOption = &DNODE_DYNAMIC_CFG_OPTIONS[i];
+ for (int32_t i = 0; i < tokenLogEnd; ++i) {
+ const SDNodeDynConfOption* pOption = &cfgOptions[i];
if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) {
return TSDB_CODE_SUCCESS;
}
}
- } else if ((strncasecmp(DNODE_DYNAMIC_CFG_OPTIONS[DNODE_DYNAMIC_CFG_OPTIONS_SIZE - 1].name, pOptionToken->z, pOptionToken->n) == 0) &&
- (DNODE_DYNAMIC_CFG_OPTIONS[DNODE_DYNAMIC_CFG_OPTIONS_SIZE - 1].len == pOptionToken->n)) {
+ } else if ((strncasecmp(cfgOptions[tokenBalance].name, pOptionToken->z, pOptionToken->n) == 0) &&
+ (cfgOptions[tokenBalance].len == pOptionToken->n)) {
+ SSQLToken* pValToken = &pOptions->a[2];
+ int32_t vnodeIndex = 0;
+ int32_t dnodeIndex = 0;
+ strdequote(pValToken->z);
+ bool parseOk = taosCheckBalanceCfgOptions(pValToken->z, &vnodeIndex, &dnodeIndex);
+ if (!parseOk) {
+ return TSDB_CODE_TSC_INVALID_SQL; // options value is invalid
+ }
+ return TSDB_CODE_SUCCESS;
+ } else if ((strncasecmp(cfgOptions[tokenMonitor].name, pOptionToken->z, pOptionToken->n) == 0) &&
+ (cfgOptions[tokenMonitor].len == pOptionToken->n)) {
SSQLToken* pValToken = &pOptions->a[2];
int32_t val = strtol(pValToken->z, NULL, 10);
if (val != 0 && val != 1) {
@@ -4709,8 +4725,8 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- for (int32_t i = 2; i < DNODE_DYNAMIC_CFG_OPTIONS_SIZE - 1; ++i) {
- const SDNodeDynConfOption* pOption = &DNODE_DYNAMIC_CFG_OPTIONS[i];
+ for (int32_t i = tokenDebugFlag; i < tokenDebugFlagEnd; ++i) {
+ const SDNodeDynConfOption* pOption = &cfgOptions[i];
if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) {
/* options is valid */
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index d9922b8718..9a1fa77629 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -294,52 +294,31 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
}
}
- if (rpcMsg->pCont == NULL) {
- rpcMsg->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
- } else {
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- // if (rpcMsg->code != TSDB_CODE_RPC_NETWORK_UNAVAIL) {
- // if (pCmd->command == TSDB_SQL_CONNECT) {
- // rpcMsg->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
- // rpcFreeCont(rpcMsg->pCont);
- // return;
- // }
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- // if (pCmd->command == TSDB_SQL_HB) {
- // rpcMsg->code = TSDB_CODE_RPC_NOT_READY;
- // rpcFreeCont(rpcMsg->pCont);
- // return;
- // }
+ int32_t cmd = pCmd->command;
+ if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_FETCH || cmd == TSDB_SQL_INSERT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) &&
+ (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID ||
+ rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID ||
+ rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL ||
+ rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE)) {
+ tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry);
- // if (pCmd->command == TSDB_SQL_META || pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
- // pCmd->command == TSDB_SQL_STABLEVGROUP || pCmd->command == TSDB_SQL_SHOW ||
- // pCmd->command == TSDB_SQL_RETRIEVE) {
- // // get table meta/vgroup query will not retry, do nothing
- // }
- // }
+ // set the flag to denote that sql string needs to be re-parsed and build submit block with table schema
+ if (rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
+ pSql->cmd.submitSchema = 1;
+ }
- if ((pCmd->command == TSDB_SQL_SELECT || pCmd->command == TSDB_SQL_FETCH || pCmd->command == TSDB_SQL_INSERT ||
- pCmd->command == TSDB_SQL_UPDATE_TAGS_VAL) &&
- (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID ||
- rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE)) {
- tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry);
- // set the flag to denote that sql string needs to be re-parsed and build submit block with table schema
- if (rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
- pSql->cmd.submitSchema = 1;
- }
+ pSql->res.code = rpcMsg->code; // keep the previous error code
+ if (pSql->retry > pSql->maxRetry) {
+ tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
+ } else {
+ rpcMsg->code = tscRenewTableMeta(pSql, pTableMetaInfo->name);
- pSql->res.code = rpcMsg->code; // keep the previous error code
- if (pSql->retry > pSql->maxRetry) {
- tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
- } else {
- rpcMsg->code = tscRenewTableMeta(pSql, pTableMetaInfo->name);
-
- // if there is an error occurring, proceed to the following error handling procedure.
- // todo add test cases
- if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
- rpcFreeCont(rpcMsg->pCont);
- return;
- }
+ // if there is an error occurring, proceed to the following error handling procedure.
+ if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ rpcFreeCont(rpcMsg->pCont);
+ return;
}
}
}
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index 5848b7b82f..ff050dbbbf 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -15,7 +15,7 @@
#include "hash.h"
#include "os.h"
-#include "qast.h"
+#include "qAst.h"
#include "tcache.h"
#include "tnote.h"
#include "trpc.h"
@@ -724,6 +724,13 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
return len;
}
+static void asyncCallback(void *param, TAOS_RES *tres, int code) {
+ assert(param != NULL);
+ SSqlObj *pSql = ((SSqlObj *)param);
+ pSql->res.code = code;
+ sem_post(&pSql->rspSem);
+}
+
int taos_validate_sql(TAOS *taos, const char *sql) {
STscObj *pObj = (STscObj *)taos;
if (pObj == NULL || pObj->signature != pObj) {
@@ -732,7 +739,8 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
}
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
-
+ pSql->pTscObj = taos;
+ pSql->signature = pSql;
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
@@ -766,10 +774,17 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
pCmd->pTableList = NULL;
}
- pRes->code = (uint8_t)tsParseSql(pSql, false);
- int code = pRes->code;
-
- tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj);
+ pSql->fp = asyncCallback;
+ pSql->fetchFp = asyncCallback;
+ pSql->param = pSql;
+ int code = tsParseSql(pSql, true);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ sem_wait(&pSql->rspSem);
+ code = pSql->res.code;
+ }
+ if (code != TSDB_CODE_SUCCESS) {
+ tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, code, taos_errstr(taos), pObj);
+ }
taos_free_result(pSql);
return code;
@@ -865,6 +880,8 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
}
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
+ pSql->pTscObj = taos;
+ pSql->signature = pSql;
SSqlRes *pRes = &pSql->res;
pRes->numOfTotal = 0; // the number of getting table meta from server
diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c
index 7c188ec969..b07627c87b 100644
--- a/src/client/src/tscStream.c
+++ b/src/client/src/tscStream.c
@@ -122,7 +122,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
pQueryInfo->window.ekey = pStream->etime;
}
} else {
- pQueryInfo->window.skey = pStream->stime - pStream->interval;
+ pQueryInfo->window.skey = pStream->stime;
int64_t etime = taosGetTimestamp(pStream->precision);
// delay to wait all data in last time window
if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
@@ -232,6 +232,9 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
(*pStream->fp)(pStream->param, res, row);
}
+ if (!pStream->isProject) {
+ pStream->stime += pStream->slidingTime;
+ }
// actually only one row is returned. this following is not necessary
taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream);
} else { // numOfRows == 0, all data has been retrieved
@@ -432,6 +435,7 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
} else { // timewindow based aggregation stream
if (stime == 0) { // no data in meter till now
stime = ((int64_t)taosGetTimestamp(pStream->precision) / pStream->interval) * pStream->interval;
+ stime -= pStream->interval;
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
} else {
int64_t newStime = (stime / pStream->interval) * pStream->interval;
diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c
index 2c5035c2ef..526f380475 100644
--- a/src/client/src/tscSub.c
+++ b/src/client/src/tscSub.c
@@ -34,6 +34,7 @@ typedef struct SSubscriptionProgress {
typedef struct SSub {
void * signature;
char topic[32];
+ sem_t sem;
int64_t lastSyncTime;
int64_t lastConsumeTime;
TAOS * taos;
@@ -83,84 +84,108 @@ void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts) {
static void asyncCallback(void *param, TAOS_RES *tres, int code) {
assert(param != NULL);
- SSqlObj *pSql = ((SSqlObj *)param);
-
- pSql->res.code = code;
- sem_post(&pSql->rspSem);
+ SSub *pSub = ((SSub *)param);
+ pSub->pSql->res.code = code;
+ sem_post(&pSub->sem);
}
static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char* sql) {
- SSub* pSub = NULL;
+ int code = TSDB_CODE_SUCCESS, line = __LINE__;
+ SSqlObj* pSql = NULL;
- TRY( 8 ) {
- SSqlObj* pSql = calloc_throw(1, sizeof(SSqlObj));
- CLEANUP_PUSH_FREE(true, pSql);
- SSqlCmd *pCmd = &pSql->cmd;
- SSqlRes *pRes = &pSql->res;
+ SSub* pSub = calloc(1, sizeof(SSub));
+ if (pSub == NULL) {
+ line = __LINE__;
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto fail;
+ }
+ pSub->signature = pSub;
+ if (tsem_init(&pSub->sem, 0, 0) == -1) {
+ line = __LINE__;
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto fail;
+ }
+ tstrncpy(pSub->topic, topic, sizeof(pSub->topic));
+ pSub->progress = taosArrayInit(32, sizeof(SSubscriptionProgress));
+ if (pSub->progress == NULL) {
+ line = __LINE__;
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto fail;
+ }
- if (tsem_init(&pSql->rspSem, 0, 0) == -1) {
- THROW(TAOS_SYSTEM_ERROR(errno));
- }
- CLEANUP_PUSH_INT_PTR(true, tsem_destroy, &pSql->rspSem);
+ pSql = calloc(1, sizeof(SSqlObj));
+ if (pSql == NULL) {
+ line = __LINE__;
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto fail;
+ }
+ pSql->signature = pSql;
+ pSql->pTscObj = pObj;
+ pSql->pSubscription = pSub;
+ pSub->pSql = pSql;
- pSql->signature = pSql;
- pSql->param = pSql;
- pSql->pTscObj = pObj;
- pSql->maxRetry = TSDB_MAX_REPLICA;
- pSql->fp = asyncCallback;
+ SSqlCmd* pCmd = &pSql->cmd;
+ SSqlRes* pRes = &pSql->res;
+ if (tsem_init(&pSql->rspSem, 0, 0) == -1) {
+ line = __LINE__;
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto fail;
+ }
- int code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE);
- if (code != TSDB_CODE_SUCCESS) {
- THROW(code);
- }
- CLEANUP_PUSH_FREE(true, pCmd->payload);
+ pSql->param = pSub;
+ pSql->maxRetry = TSDB_MAX_REPLICA;
+ pSql->fp = asyncCallback;
+ pSql->fetchFp = asyncCallback;
+ pSql->sqlstr = strdup(sql);
+ if (pSql->sqlstr == NULL) {
+ line = __LINE__;
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto fail;
+ }
+ strtolower(pSql->sqlstr, pSql->sqlstr);
+ pRes->qhandle = 0;
+ pRes->numOfRows = 1;
- pRes->qhandle = 0;
- pRes->numOfRows = 1;
+ code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE);
+ if (code != TSDB_CODE_SUCCESS) {
+ line = __LINE__;
+ goto fail;
+ }
- pSql->sqlstr = strdup_throw(sql);
- CLEANUP_PUSH_FREE(true, pSql->sqlstr);
- strtolower(pSql->sqlstr, pSql->sqlstr);
+ code = tsParseSql(pSql, false);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ sem_wait(&pSub->sem);
+ code = pSql->res.code;
+ }
+ if (code != TSDB_CODE_SUCCESS) {
+ line = __LINE__;
+ goto fail;
+ }
- code = tsParseSql(pSql, false);
- if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
- // wait for the callback function to post the semaphore
- sem_wait(&pSql->rspSem);
- code = pSql->res.code;
- }
- if (code != TSDB_CODE_SUCCESS) {
- tscError("failed to parse sql statement: %s, error: %s", pSub->topic, tstrerror(code));
- THROW( code );
- }
-
- if (pSql->cmd.command != TSDB_SQL_SELECT) {
- tscError("only 'select' statement is allowed in subscription: %s", pSub->topic);
- THROW( -1 ); // TODO
- }
-
- pSub = calloc_throw(1, sizeof(SSub));
- CLEANUP_PUSH_FREE(true, pSub);
- pSql->pSubscription = pSub;
- pSub->pSql = pSql;
- pSub->signature = pSub;
- strncpy(pSub->topic, topic, sizeof(pSub->topic));
- pSub->topic[sizeof(pSub->topic) - 1] = 0;
- pSub->progress = taosArrayInit(32, sizeof(SSubscriptionProgress));
- if (pSub->progress == NULL) {
- THROW(TSDB_CODE_TSC_OUT_OF_MEMORY);
- }
-
- CLEANUP_EXECUTE();
-
- } CATCH( code ) {
- tscError("failed to create subscription object: %s", tstrerror(code));
- CLEANUP_EXECUTE();
- pSub = NULL;
-
- } END_TRY
+ if (pSql->cmd.command != TSDB_SQL_SELECT) {
+ line = __LINE__;
+ code = TSDB_CODE_TSC_INVALID_SQL;
+ goto fail;
+ }
return pSub;
+
+fail:
+ tscError("tscCreateSubscription failed at line %d, reason: %s", line, tstrerror(code));
+ if (pSql != NULL) {
+ tscFreeSqlObj(pSql);
+ pSql = NULL;
+ }
+ if (pSub != NULL) {
+ taosArrayDestroy(pSub->progress);
+ tsem_destroy(&pSub->sem);
+ free(pSub);
+ pSub = NULL;
+ }
+
+ terrno = code;
+ return NULL;
}
@@ -405,9 +430,10 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0)->vgroupIndex = 0;
pSql->fp = asyncCallback;
- pSql->param = pSql;
+ pSql->fetchFp = asyncCallback;
+ pSql->param = pSub;
tscDoQuery(pSql);
- sem_wait(&pSql->rspSem);
+ sem_wait(&pSub->sem);
if (pRes->code != TSDB_CODE_SUCCESS) {
continue;
@@ -437,7 +463,9 @@ void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress) {
}
if (keepProgress) {
- tscSaveSubscriptionProgress(pSub);
+ if (pSub->progress != NULL) {
+ tscSaveSubscriptionProgress(pSub);
+ }
} else {
char path[256];
sprintf(path, "%s/subscribe/%s", tsDataDir, pSub->topic);
@@ -448,6 +476,7 @@ void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress) {
tscFreeSqlObj(pSub->pSql);
taosArrayDestroy(pSub->progress);
+ tsem_destroy(&pSub->sem);
memset(pSub, 0, sizeof(*pSub));
free(pSub);
}
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 1dbc52efb0..5d26d09fae 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -14,8 +14,8 @@
*/
#include "os.h"
-#include "qtsbuf.h"
-#include "qast.h"
+#include "qAst.h"
+#include "qTsbuf.h"
#include "tcompare.h"
#include "tscLog.h"
#include "tscSubquery.h"
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 957bdeeb7f..17adc0c03d 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -13,11 +13,11 @@
* along with this program. If not, see .
*/
-#include "os.h"
-#include "hash.h"
#include "tscUtil.h"
+#include "hash.h"
+#include "os.h"
+#include "qAst.h"
#include "taosmsg.h"
-#include "qast.h"
#include "tcache.h"
#include "tkey.h"
#include "tmd5.h"
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index 6506707457..7ba96ceb60 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -174,6 +174,7 @@ bool taosCheckGlobalCfg();
void taosSetAllDebugFlag();
bool taosCfgDynamicOptions(char *msg);
int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port);
+bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeIndex, int32_t *dnodeIndex);
#ifdef __cplusplus
}
diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c
index 719d80aa77..12ea4ad78d 100644
--- a/src/common/src/tdataformat.c
+++ b/src/common/src/tdataformat.c
@@ -318,7 +318,7 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
pCols->maxPoints = maxRows;
pCols->bufSize = maxRowSize * maxRows;
- pCols->buf = calloc(1, pCols->bufSize);
+ pCols->buf = malloc(pCols->bufSize);
if (pCols->buf == NULL) {
free(pCols);
return NULL;
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index 56c63ee49d..c79b016b93 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -198,6 +198,7 @@ int32_t tsdbDebugFlag = 131;
int32_t (*monitorStartSystemFp)() = NULL;
void (*monitorStopSystemFp)() = NULL;
+void (*monitorExecuteSQLFp)(char *sql) = NULL;
static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT;
@@ -252,11 +253,15 @@ bool taosCfgDynamicOptions(char *msg) {
if (monitorStartSystemFp) {
(*monitorStartSystemFp)();
uInfo("monitor is enabled");
+ } else {
+ uError("monitor can't be updated, for monitor not initialized");
}
} else {
if (monitorStopSystemFp) {
(*monitorStopSystemFp)();
uInfo("monitor is disabled");
+ } else {
+ uError("monitor can't be updated, for monitor not initialized");
}
}
return true;
@@ -276,7 +281,12 @@ bool taosCfgDynamicOptions(char *msg) {
}
if (strncasecmp(option, "resetQueryCache", 15) == 0) {
- uError("reset query cache can't be executed, for monitor not initialized");
+ if (monitorExecuteSQLFp) {
+ (*monitorExecuteSQLFp)("resetQueryCache");
+ uInfo("resetquerycache is executed");
+ } else {
+ uError("resetquerycache can't be executed, for monitor not started");
+ }
}
return false;
@@ -1300,3 +1310,32 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) {
return 0;
}
+
+/*
+ * alter dnode 1 balance "vnode:1-dnode:2"
+ */
+
+bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeIndex, int32_t *dnodeIndex) {
+ int len = strlen(option);
+ if (strncasecmp(option, "vnode:", 6) != 0) {
+ return false;
+ }
+
+ int pos = 0;
+ for (; pos < len; ++pos) {
+ if (option[pos] == '-') break;
+ }
+
+ if (++pos >= len) return false;
+ if (strncasecmp(option + pos, "dnode:", 6) != 0) {
+ return false;
+ }
+
+ *vnodeIndex = strtol(option + 6, NULL, 10);
+ *dnodeIndex = strtol(option + pos + 6, NULL, 10);
+ if (*vnodeIndex <= 1 || *dnodeIndex <= 0) {
+ return false;
+ }
+
+ return true;
+}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java
index df25076a95..800265868d 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java
@@ -1,6 +1,8 @@
package com.taosdata.jdbc.utils;
+import java.io.BufferedReader;
import java.io.File;
+import java.io.InputStreamReader;
import java.util.*;
import java.util.concurrent.TimeUnit;
@@ -31,6 +33,10 @@ public class TDNode {
this.testCluster = testCluster;
}
+ public void setRunning(int running) {
+ this.running = running;
+ }
+
public void searchTaosd(File dir, ArrayList taosdPath) {
File[] fileList = dir.listFiles();
@@ -102,15 +108,46 @@ public class TDNode {
this.running = 1;
}
- public void stop() {
- String toBeKilled = "taosd";
+ public Integer getTaosdPid() {
+ String cmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'";
+ String[] cmds = {"sh", "-c", cmd};
+ try {
+ Process process = Runtime.getRuntime().exec(cmds);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
+ String line = null;
+ Integer res = null;
+ while((line = reader.readLine()) != null) {
+ if(!line.isEmpty()) {
+ res = Integer.valueOf(line);
+ break;
+ }
+ }
+
+ return res;
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ return null;
+ }
+
+ public void stop() {
if (this.running != 0) {
- String killCmd = "pkill -kill -x " + toBeKilled;
- String[] killCmds = {"sh", "-c", killCmd};
- try {
- Runtime.getRuntime().exec(killCmds).waitFor();
+ Integer pid = null;
+ while((pid = getTaosdPid()) != null) {
+
+ String killCmd = "kill -term " + pid;
+ String[] killCmds = {"sh", "-c", killCmd};
+ try {
+ Runtime.getRuntime().exec(killCmds).waitFor();
+ TimeUnit.SECONDS.sleep(2);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ try {
for(int port = 6030; port < 6041; port ++) {
String fuserCmd = "fuser -k -n tcp " + port;
Runtime.getRuntime().exec(fuserCmd).waitFor();
@@ -120,7 +157,7 @@ public class TDNode {
}
this.running = 0;
- System.out.println("dnode:" + this.index + " is stopped by pkill");
+ System.out.println("dnode:" + this.index + " is stopped by kill -term");
}
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java
index ea15ae9863..efc4c53e28 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java
@@ -14,33 +14,6 @@ public class TDNodes {
}
}
- public void setPath(String path) {
- try {
- String killCmd = "pkill -kill -x taosd";
- String[] killCmds = {"sh", "-c", killCmd};
- Runtime.getRuntime().exec(killCmds).waitFor();
-
- String binPath = System.getProperty("user.dir");
- binPath += "/../../../debug";
- System.out.println("binPath: " + binPath);
-
- File file = new File(path);
- binPath = file.getCanonicalPath();
- System.out.println("binPath real path: " + binPath);
-
- if(path.isEmpty()){
- file = new File(path + "/../../");
- path = file.getCanonicalPath();
- }
-
- for(int i = 0; i < tdNodes.size(); i++) {
- tdNodes.get(i).setPath(path);
- }
- } catch (Exception e) {
- e.printStackTrace();
- }
- }
-
public void setTestCluster(boolean testCluster) {
this.testCluster = testCluster;
}
@@ -70,6 +43,11 @@ public class TDNodes {
check(index);
tdNodes.get(index - 1).setCfgConfig(option, value);
}
+
+ public TDNode getTDNode(int index) {
+ check(index);
+ return tdNodes.get(index - 1);
+ }
public void start(int index) {
check(index);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java
index 5f105fb782..6c3437186f 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java
@@ -1,6 +1,5 @@
package com.taosdata.jdbc;
-import java.io.File;
import com.taosdata.jdbc.utils.TDNodes;
import org.junit.AfterClass;
@@ -8,31 +7,30 @@ import org.junit.BeforeClass;
public class BaseTest {
- private static boolean testCluster = false;
- private static String deployPath = System.getProperty("user.dir");
- private static TDNodes tdNodes = new TDNodes();
+ private static boolean testCluster = false;
+ private static TDNodes nodes = new TDNodes();
@BeforeClass
public static void setupEnv() {
- try{
- File file = new File(deployPath + "/../../../");
- String rootPath = file.getCanonicalPath();
-
- tdNodes.setPath(rootPath);
- tdNodes.setTestCluster(testCluster);
+ try{
+ if (nodes.getTDNode(1).getTaosdPid() != null) {
+ System.out.println("Kill taosd before running JDBC test");
+ nodes.getTDNode(1).setRunning(1);
+ nodes.stop(1);
+ }
- tdNodes.deploy(1);
- tdNodes.start(1);
+ nodes.setTestCluster(testCluster);
+ nodes.deploy(1);
+ nodes.start(1);
} catch (Exception e) {
- e.printStackTrace();
- System.out.println("Base Test Exception");
+ e.printStackTrace();
}
}
@AfterClass
public static void cleanUpEnv() {
- tdNodes.stop(1);
+ nodes.stop(1);
}
}
\ No newline at end of file
diff --git a/src/inc/taos.h b/src/inc/taos.h
index d6f1883572..1d609bc7db 100644
--- a/src/inc/taos.h
+++ b/src/inc/taos.h
@@ -67,8 +67,6 @@ DLL_EXPORT void taos_init();
DLL_EXPORT void taos_cleanup();
DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...);
DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port);
-DLL_EXPORT TAOS *taos_connect_c(const char *ip, uint8_t ipLen, const char *user, uint8_t userLen,
- const char *pass, uint8_t passLen, const char *db, uint8_t dbLen, uint16_t port);
DLL_EXPORT void taos_close(TAOS *taos);
typedef struct TAOS_BIND {
@@ -90,7 +88,6 @@ TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt);
int taos_stmt_close(TAOS_STMT *stmt);
DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql);
-DLL_EXPORT TAOS_RES *taos_query_c(TAOS *taos, const char *sql, uint32_t sqlLen);
DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res);
DLL_EXPORT int taos_result_precision(TAOS_RES *res); // get the time precision of result
DLL_EXPORT void taos_free_result(TAOS_RES *res);
diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h
index 3503e39d31..59b2c0220b 100644
--- a/src/inc/taoserror.h
+++ b/src/inc/taoserror.h
@@ -227,6 +227,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_CPU_LIMITED, 0, 0x080B, "grant cpu
// sync
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_CONFIG, 0, 0x0900, "sync invalid configuration")
+TAOS_DEFINE_ERROR(TSDB_CODE_SYN_NOT_ENABLED, 0, 0x0901, "sync module not enabled")
// wal
TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, 0, 0x1000, "wal app error")
diff --git a/src/inc/tbalance.h b/src/inc/tbalance.h
index c52f5afaaa..9ee8d73189 100644
--- a/src/inc/tbalance.h
+++ b/src/inc/tbalance.h
@@ -29,6 +29,7 @@ void balanceAsyncNotify();
void balanceSyncNotify();
void balanceReset();
int32_t balanceAllocVnodes(struct SVgObj *pVgroup);
+int32_t balanceCfgDnode(struct SDnodeObj *pDnode, const char *option);
int32_t balanceDropDnode(struct SDnodeObj *pDnode);
#ifdef __cplusplus
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 305302b71a..79cfbe2a37 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -361,7 +361,7 @@ int main(int argc, char *argv[]) {
arguments.num_of_DPT = 100000;
arguments.num_of_RPR = 1000;
arguments.use_metric = true;
- arguments.insert_only = true;
+ arguments.insert_only = false;
// end change
argp_parse(&argp, argc, argv, 0, 0, &arguments);
@@ -954,13 +954,13 @@ void *readMetric(void *sarg) {
for (int i = 1; i <= m; i++) {
if (i == 1) {
- sprintf(tempS, "index = %d", i);
+ sprintf(tempS, "areaid = %d", i);
} else {
- sprintf(tempS, " or index = %d ", i);
+ sprintf(tempS, " or areaid = %d ", i);
}
strcat(condition, tempS);
- sprintf(command, "select %s from m1 where %s", aggreFunc[j], condition);
+ sprintf(command, "select %s from meters where %s", aggreFunc[j], condition);
printf("Where condition: %s\n", condition);
fprintf(fp, "%s\n", command);
diff --git a/src/mnode/inc/mnodeDef.h b/src/mnode/inc/mnodeDef.h
index 3154a441ca..a454f413f0 100644
--- a/src/mnode/inc/mnodeDef.h
+++ b/src/mnode/inc/mnodeDef.h
@@ -50,8 +50,8 @@ typedef struct SDnodeObj {
int8_t alternativeRole; // from dnode status msg, 0-any, 1-mgmt, 2-dnode
int8_t status; // set in balance function
int8_t isMgmt;
- int8_t reserve1[14];
- int8_t updateEnd[1];
+ int8_t reserve1[11];
+ int8_t updateEnd[4];
int32_t refCount;
uint32_t moduleStatus;
uint32_t lastReboot; // time stamp for last reboot
@@ -68,8 +68,8 @@ typedef struct SMnodeObj {
int32_t mnodeId;
int8_t reserved0[4];
int64_t createdTime;
- int8_t reserved1[7];
- int8_t updateEnd[1];
+ int8_t reserved1[4];
+ int8_t updateEnd[4];
int32_t refCount;
int8_t role;
int8_t reserved2[3];
@@ -90,8 +90,7 @@ typedef struct SSuperTableObj {
int32_t tversion;
int32_t numOfColumns;
int32_t numOfTags;
- int8_t reserved1[3];
- int8_t updateEnd[1];
+ int8_t updateEnd[4];
int32_t refCount;
int32_t numOfTables;
SSchema * schema;
@@ -111,8 +110,7 @@ typedef struct {
int32_t sid;
int32_t vgId;
int32_t sqlLen;
- int8_t updateEnd[1];
- int8_t reserved1[1];
+ int8_t updateEnd[4];
int32_t refCount;
char* sql; //used by normal table
SSchema* schema; //used by normal table
@@ -138,8 +136,8 @@ typedef struct SVgObj {
int8_t status;
int8_t reserved0[4];
SVnodeGid vnodeGid[TSDB_MAX_REPLICA];
- int8_t reserved1[7];
- int8_t updateEnd[1];
+ int8_t reserved1[4];
+ int8_t updateEnd[4];
int32_t refCount;
int32_t numOfTables;
int64_t totalStorage;
@@ -176,8 +174,8 @@ typedef struct SDbObj {
int32_t cfgVersion;
SDbCfg cfg;
int8_t status;
- int8_t reserved1[14];
- int8_t updateEnd[1];
+ int8_t reserved1[11];
+ int8_t updateEnd[4];
int32_t refCount;
int32_t numOfVgroups;
int32_t numOfTables;
@@ -196,8 +194,8 @@ typedef struct SUserObj {
int64_t createdTime;
int8_t superAuth;
int8_t writeAuth;
- int8_t reserved[13];
- int8_t updateEnd[1];
+ int8_t reserved[10];
+ int8_t updateEnd[4];
int32_t refCount;
struct SAcctObj * pAcct;
} SUserObj;
@@ -228,11 +226,11 @@ typedef struct SAcctObj {
int64_t createdTime;
int32_t acctId;
int8_t status;
- int8_t reserved0[10];
- int8_t updateEnd[1];
- SAcctInfo acctInfo;
+ int8_t reserved0[7];
+ int8_t updateEnd[4];
int32_t refCount;
int8_t reserved1[4];
+ SAcctInfo acctInfo;
pthread_mutex_t mutex;
} SAcctObj;
diff --git a/src/mnode/src/mnodeBalance.c b/src/mnode/src/mnodeBalance.c
index 23fc10d0bd..d2ec6dd36e 100644
--- a/src/mnode/src/mnodeBalance.c
+++ b/src/mnode/src/mnodeBalance.c
@@ -28,6 +28,7 @@ void balanceCleanUp() {}
void balanceAsyncNotify() {}
void balanceSyncNotify() {}
void balanceReset() {}
+int32_t balanceCfgDnode(struct SDnodeObj *pDnode, const char *option) { return TSDB_CODE_SYN_NOT_ENABLED; }
int32_t balanceAllocVnodes(SVgObj *pVgroup) {
void * pIter = NULL;
diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c
index fb97d6f380..a159e98ed5 100644
--- a/src/mnode/src/mnodeDb.c
+++ b/src/mnode/src/mnodeDb.c
@@ -67,8 +67,11 @@ static int32_t mnodeDbActionInsert(SSdbOper *pOper) {
SAcctObj *pAcct = mnodeGetAcct(pDb->acct);
pthread_mutex_init(&pDb->mutex, NULL);
+ pthread_mutex_lock(&pDb->mutex);
pDb->vgListSize = VG_LIST_SIZE;
pDb->vgList = calloc(pDb->vgListSize, sizeof(SVgObj *));
+ pthread_mutex_unlock(&pDb->mutex);
+
pDb->numOfVgroups = 0;
pDb->numOfTables = 0;
pDb->numOfSuperTables = 0;
@@ -395,8 +398,8 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, void *pMs
code = sdbInsertRow(&oper);
if (code != TSDB_CODE_SUCCESS) {
- mnodeDestroyDb(pDb);
mLInfo("db:%s, failed to create, reason:%s", pDb->name, tstrerror(code));
+ mnodeDestroyDb(pDb);
return code;
} else {
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
@@ -605,7 +608,9 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
static char *mnodeGetDbStr(char *src) {
char *pos = strstr(src, TS_PATH_DELIMITER);
- return ++pos;
+ if (pos != NULL) ++pos;
+
+ return pos;
}
static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
@@ -622,10 +627,13 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
cols = 0;
- pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
-
+ pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
char* name = mnodeGetDbStr(pDb->name);
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, name, pShow->bytes[cols]);
+ if (name != NULL) {
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, name, pShow->bytes[cols]);
+ } else {
+ STR_TO_VARSTR(pWrite, "NULL");
+ }
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c
index 7edba8662e..26c4b7a3ea 100644
--- a/src/mnode/src/mnodeDnode.c
+++ b/src/mnode/src/mnodeDnode.c
@@ -277,45 +277,45 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
SCMCfgDnodeMsg *pCmCfgDnode = pMsg->rpcMsg.pCont;
if (pCmCfgDnode->ep[0] == 0) {
tstrncpy(pCmCfgDnode->ep, tsLocalEp, TSDB_EP_LEN);
- }
-
- int32_t dnodeId = 0;
- char* pos = strchr(pCmCfgDnode->ep, ':');
- if (NULL == pos) {
- dnodeId = strtol(pCmCfgDnode->ep, NULL, 10);
- if (dnodeId <= 0 || dnodeId > 65536) {
- mError("failed to cfg dnode, invalid dnodeId:%s", pCmCfgDnode->ep);
- return TSDB_CODE_MND_DNODE_NOT_EXIST;
- }
}
- SRpcEpSet epSet = mnodeGetEpSetFromIp(pCmCfgDnode->ep);
- if (dnodeId != 0) {
- SDnodeObj *pDnode = mnodeGetDnode(dnodeId);
+ SDnodeObj *pDnode = mnodeGetDnodeByEp(pCmCfgDnode->ep);
+ if (pDnode == NULL) {
+ int32_t dnodeId = strtol(pCmCfgDnode->ep, NULL, 10);
+ if (dnodeId <= 0 || dnodeId > 65536) {
+ mError("failed to cfg dnode, invalid dnodeEp:%s", pCmCfgDnode->ep);
+ return TSDB_CODE_MND_DNODE_NOT_EXIST;
+ }
+
+ pDnode = mnodeGetDnode(dnodeId);
if (pDnode == NULL) {
mError("failed to cfg dnode, invalid dnodeId:%d", dnodeId);
return TSDB_CODE_MND_DNODE_NOT_EXIST;
}
- epSet = mnodeGetEpSetFromIp(pDnode->dnodeEp);
- mnodeDecDnodeRef(pDnode);
}
- SMDCfgDnodeMsg *pMdCfgDnode = rpcMallocCont(sizeof(SMDCfgDnodeMsg));
- strcpy(pMdCfgDnode->ep, pCmCfgDnode->ep);
- strcpy(pMdCfgDnode->config, pCmCfgDnode->config);
+ SRpcEpSet epSet = mnodeGetEpSetFromIp(pDnode->dnodeEp);
+ mnodeDecDnodeRef(pDnode);
- SRpcMsg rpcMdCfgDnodeMsg = {
- .ahandle = 0,
- .code = 0,
- .msgType = TSDB_MSG_TYPE_MD_CONFIG_DNODE,
- .pCont = pMdCfgDnode,
- .contLen = sizeof(SMDCfgDnodeMsg)
- };
+ if (strncasecmp(pCmCfgDnode->config, "balance", 7) == 0) {
+ return balanceCfgDnode(pDnode, pCmCfgDnode->config + 8);
+ } else {
+ SMDCfgDnodeMsg *pMdCfgDnode = rpcMallocCont(sizeof(SMDCfgDnodeMsg));
+ strcpy(pMdCfgDnode->ep, pCmCfgDnode->ep);
+ strcpy(pMdCfgDnode->config, pCmCfgDnode->config);
- mInfo("dnode:%s, is configured by %s", pCmCfgDnode->ep, pMsg->pUser->user);
- dnodeSendMsgToDnode(&epSet, &rpcMdCfgDnodeMsg);
+ SRpcMsg rpcMdCfgDnodeMsg = {
+ .ahandle = 0,
+ .code = 0,
+ .msgType = TSDB_MSG_TYPE_MD_CONFIG_DNODE,
+ .pCont = pMdCfgDnode,
+ .contLen = sizeof(SMDCfgDnodeMsg)
+ };
- return TSDB_CODE_SUCCESS;
+ mInfo("dnode:%s, is configured by %s", pCmCfgDnode->ep, pMsg->pUser->user);
+ dnodeSendMsgToDnode(&epSet, &rpcMdCfgDnodeMsg);
+ return TSDB_CODE_SUCCESS;
+ }
}
static void mnodeProcessCfgDnodeMsgRsp(SRpcMsg *rpcMsg) {
diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c
index 42ded7ed06..4b2945152b 100644
--- a/src/mnode/src/mnodeSdb.c
+++ b/src/mnode/src/mnodeSdb.c
@@ -406,7 +406,7 @@ void sdbDecRef(void *handle, void *pObj) {
int32_t refCount = atomic_sub_fetch_32(pRefCount, 1);
sdbTrace("def ref of table:%s record:%p:%s:%d", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
- int8_t *updateEnd = pObj + pTable->refCountPos - 1;
+ int32_t *updateEnd = pObj + pTable->refCountPos - 4;
if (refCount <= 0 && *updateEnd) {
sdbTrace("table:%s, record:%p:%s:%d is destroyed", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
SSdbOper oper = {.pObj = pObj};
@@ -453,7 +453,7 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
keySize = strlen((char *)key);
}
- taosHashPut(pTable->iHandle, key, keySize, &pOper->pObj, sizeof(void **));
+ taosHashPut(pTable->iHandle, key, keySize, &pOper->pObj, sizeof(int64_t));
sdbIncRef(pTable, pOper->pObj);
atomic_add_fetch_32(&pTable->numOfRows, 1);
@@ -472,6 +472,14 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
}
static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
+ int32_t *updateEnd = pOper->pObj + pTable->refCountPos - 4;
+ bool set = atomic_val_compare_exchange_32(updateEnd, 0, 1) == 0;
+ if (!set) {
+ sdbError("table:%s, failed to delete record:%s from hash, for it already removed", pTable->tableName,
+ sdbGetKeyStrFromObj(pTable, pOper->pObj));
+ return TSDB_CODE_MND_SDB_OBJ_NOT_THERE;
+ }
+
(*pTable->deleteFp)(pOper);
void * key = sdbGetObjKey(pTable, pOper->pObj);
@@ -486,8 +494,6 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
sdbDebug("table:%s, delete record:%s from hash, numOfRows:%" PRId64 ", msg:%p", pTable->tableName,
sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, pOper->pMsg);
- int8_t *updateEnd = pOper->pObj + pTable->refCountPos - 1;
- *updateEnd = 1;
sdbDecRef(pTable, pOper->pObj);
return TSDB_CODE_SUCCESS;
@@ -654,8 +660,9 @@ bool sdbCheckRowDeleted(void *pTableInput, void *pRow) {
SSdbTable *pTable = pTableInput;
if (pTable == NULL) return false;
- int8_t *updateEnd = pRow + pTable->refCountPos - 1;
- return (*updateEnd == 1);
+ int32_t *updateEnd = pRow + pTable->refCountPos - 4;
+ return atomic_val_compare_exchange_32(updateEnd, 1, 1) == 1;
+ // return (*updateEnd == 1);
}
int32_t sdbDeleteRow(SSdbOper *pOper) {
diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c
index e3d5b41be3..1d85a8cacd 100644
--- a/src/mnode/src/mnodeShow.c
+++ b/src/mnode/src/mnodeShow.c
@@ -236,7 +236,7 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) {
}
SCMHeartBeatMsg *pHBMsg = pMsg->rpcMsg.pCont;
- SRpcConnInfo connInfo;
+ SRpcConnInfo connInfo = {0};
rpcGetConnInfo(pMsg->rpcMsg.handle, &connInfo);
int32_t connId = htonl(pHBMsg->connId);
@@ -284,7 +284,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
SCMConnectRsp *pConnectRsp = NULL;
int32_t code = TSDB_CODE_SUCCESS;
- SRpcConnInfo connInfo;
+ SRpcConnInfo connInfo = {0};
if (rpcGetConnInfo(pMsg->rpcMsg.handle, &connInfo) != 0) {
mError("thandle:%p is already released while process connect msg", pMsg->rpcMsg.handle);
code = TSDB_CODE_MND_INVALID_CONNECTION;
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index 7478d7cd78..12ab58d949 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -72,7 +72,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg);
static int32_t mnodeProcessDropTableMsg(SMnodeMsg *mnodeMsg);
static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg);
static void mnodeProcessDropSuperTableRsp(SRpcMsg *rpcMsg);
-static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn);
+static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg);
static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg);
static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *mnodeMsg);
@@ -759,7 +759,7 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
SChildTableObj *pCTable = (SChildTableObj *)pMsg->pTable;
mInfo("app:%p:%p, table:%s, start to drop ctable, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg,
pDrop->tableId, pCTable->vgId, pCTable->sid, pCTable->uid);
- return mnodeProcessDropChildTableMsg(pMsg, true);
+ return mnodeProcessDropChildTableMsg(pMsg);
}
}
@@ -882,7 +882,7 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
static int32_t mnodeDropSuperTableCb(SMnodeMsg *pMsg, int32_t code) {
SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable;
if (code != TSDB_CODE_SUCCESS) {
- mError("app:%p:%p, table:%s, failed to drop, sdb error", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId);
+ mError("app:%p:%p, stable:%s, failed to drop, sdb error", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId);
} else {
mLInfo("app:%p:%p, stable:%s, is dropped from sdb", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId);
}
@@ -1765,18 +1765,13 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
}
}
-static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
+static int32_t mnodeSendDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
- if (pMsg->pVgroup == NULL) pMsg->pVgroup = mnodeGetVgroup(pTable->vgId);
- if (pMsg->pVgroup == NULL) {
- mError("app:%p:%p, table:%s, failed to drop ctable, vgroup not exist", pMsg->rpcMsg.ahandle, pMsg,
- pTable->info.tableId);
- return TSDB_CODE_MND_APP_ERROR;
- }
+ mLInfo("app:%p:%p, ctable:%s, is dropped from sdb", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId);
SMDDropTableMsg *pDrop = rpcMallocCont(sizeof(SMDDropTableMsg));
if (pDrop == NULL) {
- mError("app:%p:%p, table:%s, failed to drop ctable, no enough memory", pMsg->rpcMsg.ahandle, pMsg,
+ mError("app:%p:%p, ctable:%s, failed to drop ctable, no enough memory", pMsg->rpcMsg.ahandle, pMsg,
pTable->info.tableId);
return TSDB_CODE_MND_OUT_OF_MEMORY;
}
@@ -1789,7 +1784,7 @@ static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
SRpcEpSet epSet = mnodeGetEpSetFromVgroup(pMsg->pVgroup);
- mInfo("app:%p:%p, table:%s, send drop ctable msg, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg,
+ mInfo("app:%p:%p, ctable:%s, send drop ctable msg, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg,
pDrop->tableId, pTable->vgId, pTable->sid, pTable->uid);
SRpcMsg rpcMsg = {
@@ -1807,6 +1802,40 @@ static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
}
+static int32_t mnodeDropChildTableCb(SMnodeMsg *pMsg, int32_t code) {
+ if (code != TSDB_CODE_SUCCESS) {
+ SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
+ mError("app:%p:%p, ctable:%s, failed to drop, sdb error", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId);
+ return code;
+ }
+
+ return mnodeSendDropChildTableMsg(pMsg, true);
+}
+
+static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg) {
+ SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
+ if (pMsg->pVgroup == NULL) pMsg->pVgroup = mnodeGetVgroup(pTable->vgId);
+ if (pMsg->pVgroup == NULL) {
+ mError("app:%p:%p, table:%s, failed to drop ctable, vgroup not exist", pMsg->rpcMsg.ahandle, pMsg,
+ pTable->info.tableId);
+ return TSDB_CODE_MND_APP_ERROR;
+ }
+
+ SSdbOper oper = {
+ .type = SDB_OPER_GLOBAL,
+ .table = tsChildTableSdb,
+ .pObj = pTable,
+ .pMsg = pMsg,
+ .cb = mnodeDropChildTableCb
+ };
+
+ int32_t code = sdbDeleteRow(&oper);
+ if (code == TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_MND_ACTION_IN_PROGRESS;
+ }
+ return code;
+}
+
static int32_t mnodeFindNormalTableColumnIndex(SChildTableObj *pTable, char *colName) {
SSchema *schema = (SSchema *) pTable->schema;
for (int32_t col = 0; col < pTable->numOfColumns; col++) {
@@ -2220,19 +2249,6 @@ static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg) {
return;
}
- SSdbOper oper = {
- .type = SDB_OPER_GLOBAL,
- .table = tsChildTableSdb,
- .pObj = pTable
- };
-
- int32_t code = sdbDeleteRow(&oper);
- if (code != TSDB_CODE_SUCCESS) {
- mError("app:%p:%p, table:%s, update ctables sdb error", mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId);
- dnodeSendRpcMnodeWriteRsp(mnodeMsg, TSDB_CODE_MND_SDB_ERROR);
- return;
- }
-
if (mnodeMsg->pVgroup->numOfTables <= 0) {
mInfo("app:%p:%p, vgId:%d, all tables is dropped, drop vgroup", mnodeMsg->rpcMsg.ahandle, mnodeMsg,
mnodeMsg->pVgroup->vgId);
@@ -2259,7 +2275,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) {
if (sdbCheckRowDeleted(tsChildTableSdb, pTable)) {
mDebug("app:%p:%p, table:%s, create table rsp received, but a deleting opertion incoming, vgId:%d sid:%d uid:%" PRIu64,
mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid);
- mnodeProcessDropChildTableMsg(mnodeMsg, false);
+ mnodeSendDropChildTableMsg(mnodeMsg, false);
rpcMsg->code = TSDB_CODE_SUCCESS;
}
diff --git a/src/mnode/src/mnodeUser.c b/src/mnode/src/mnodeUser.c
index 84f5d6aa58..8c783eebaf 100644
--- a/src/mnode/src/mnodeUser.c
+++ b/src/mnode/src/mnodeUser.c
@@ -358,7 +358,7 @@ static int32_t mnodeRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, voi
}
SUserObj *mnodeGetUserFromConn(void *pConn) {
- SRpcConnInfo connInfo;
+ SRpcConnInfo connInfo = {0};
if (rpcGetConnInfo(pConn, &connInfo) == 0) {
return mnodeGetUser(connInfo.user);
} else {
diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c
index 1de591df7c..46255a6c8f 100644
--- a/src/mnode/src/mnodeVgroup.c
+++ b/src/mnode/src/mnodeVgroup.c
@@ -434,15 +434,22 @@ int32_t mnodeGetAvailableVgroup(SMnodeMsg *pMsg, SVgObj **ppVgroup, int32_t *pSi
}
if (pDb->numOfVgroups < maxVgroupsPerDb) {
- mDebug("app:%p:%p, db:%s, try to create a new vgroup, numOfVgroups:%d maxVgroupsPerDb:%d", pMsg->rpcMsg.ahandle, pMsg,
- pDb->name, pDb->numOfVgroups, maxVgroupsPerDb);
+ mDebug("app:%p:%p, db:%s, try to create a new vgroup, numOfVgroups:%d maxVgroupsPerDb:%d", pMsg->rpcMsg.ahandle,
+ pMsg, pDb->name, pDb->numOfVgroups, maxVgroupsPerDb);
pthread_mutex_unlock(&pDb->mutex);
int32_t code = mnodeCreateVgroup(pMsg);
- if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return code;
+ if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) {
+ return code;
+ } else {
+ pthread_mutex_lock(&pDb->mutex);
+ }
}
SVgObj *pVgroup = pDb->vgList[0];
- if (pVgroup == NULL) return TSDB_CODE_MND_NO_ENOUGH_DNODES;
+ if (pVgroup == NULL) {
+ pthread_mutex_unlock(&pDb->mutex);
+ return TSDB_CODE_MND_NO_ENOUGH_DNODES;
+ }
int32_t code = mnodeAllocVgroupIdPool(pVgroup);
if (code != TSDB_CODE_SUCCESS) {
@@ -483,7 +490,7 @@ static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) {
} else {
pVgroup->status = TAOS_VG_STATUS_READY;
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .table = tsVgroupSdb};
- sdbUpdateRow(&desc);
+ (void)sdbUpdateRow(&desc);
}
mInfo("app:%p:%p, vgId:%d, is created in mnode, db:%s replica:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId,
diff --git a/src/plugins/http/src/gcJson.c b/src/plugins/http/src/gcJson.c
index 544a11b5fc..94d53db6ef 100644
--- a/src/plugins/http/src/gcJson.c
+++ b/src/plugins/http/src/gcJson.c
@@ -121,6 +121,10 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
for (int k = 0; k < numOfRows; ++k) {
TAOS_ROW row = taos_fetch_row(result);
+ if (row == NULL) {
+ cmd->numOfRows--;
+ continue;
+ }
int32_t* length = taos_fetch_lengths(result);
// for group by
diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c
index cefcca7821..225977abae 100644
--- a/src/plugins/http/src/httpContext.c
+++ b/src/plugins/http/src/httpContext.c
@@ -108,7 +108,7 @@ HttpContext *httpCreateContext(int32_t fd) {
pContext->lastAccessTime = taosGetTimestampSec();
pContext->state = HTTP_CONTEXT_STATE_READY;
- HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &pContext, sizeof(void *), &pContext, sizeof(void *), 3);
+ HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &pContext, sizeof(int64_t), &pContext, sizeof(int64_t), 3);
pContext->ppContext = ppContext;
httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext);
diff --git a/src/plugins/http/src/restJson.c b/src/plugins/http/src/restJson.c
index 53b0248149..7a73f6559f 100644
--- a/src/plugins/http/src/restJson.c
+++ b/src/plugins/http/src/restJson.c
@@ -94,6 +94,10 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
for (int k = 0; k < numOfRows; ++k) {
TAOS_ROW row = taos_fetch_row(result);
+ if (row == NULL) {
+ cmd->numOfRows--;
+ continue;
+ }
int32_t* length = taos_fetch_lengths(result);
// data row array begin
diff --git a/src/plugins/monitor/src/monitorMain.c b/src/plugins/monitor/src/monitorMain.c
index e6e8cf982b..0cc28bb82c 100644
--- a/src/plugins/monitor/src/monitorMain.c
+++ b/src/plugins/monitor/src/monitorMain.c
@@ -27,7 +27,6 @@
#include "dnode.h"
#include "monitor.h"
-
#define monitorFatal(...) { if (monitorDebugFlag & DEBUG_FATAL) { taosPrintLog("MON FATAL ", 255, __VA_ARGS__); }}
#define monitorError(...) { if (monitorDebugFlag & DEBUG_ERROR) { taosPrintLog("MON ERROR ", 255, __VA_ARGS__); }}
#define monitorWarn(...) { if (monitorDebugFlag & DEBUG_WARN) { taosPrintLog("MON WARN ", 255, __VA_ARGS__); }}
@@ -78,6 +77,7 @@ static void monitorStartTimer();
static void monitorSaveSystemInfo();
extern int32_t (*monitorStartSystemFp)();
extern void (*monitorStopSystemFp)();
+extern void (*monitorExecuteSQLFp)(char *sql);
static void monitorCheckDiskUsage(void *para, void *unused) {
taosGetDisk();
@@ -207,6 +207,7 @@ static void monitorInitDatabase() {
taos_query_a(tsMonitorConn.conn, tsMonitorConn.sql, monitorInitDatabaseCb, NULL);
} else {
tsMonitorConn.state = MONITOR_STATE_INITIALIZED;
+ monitorExecuteSQLFp = monitorExecuteSQL;
monitorInfo("monitor service init success");
monitorStartTimer();
@@ -230,6 +231,7 @@ static void monitorInitDatabaseCb(void *param, TAOS_RES *result, int32_t code) {
void monitorStopSystem() {
monitorInfo("monitor module is stopped");
+ monitorExecuteSQLFp = NULL;
tsMonitorConn.state = MONITOR_STATE_STOPPED;
if (tsMonitorConn.initTimer != NULL) {
taosTmrStopA(&(tsMonitorConn.initTimer));
@@ -248,33 +250,13 @@ static void monitorStartTimer() {
taosTmrReset(monitorSaveSystemInfo, tsMonitorInterval * 1000, NULL, tscTmr, &tsMonitorConn.timer);
}
-static void dnodeMontiorInsertAcctCallback(void *param, TAOS_RES *result, int32_t code) {
+static void dnodeMontiorLogCallback(void *param, TAOS_RES *result, int32_t code) {
if (code < 0) {
- monitorError("monitor:%p, save account info failed, code:%s", tsMonitorConn.conn, tstrerror(code));
+ monitorError("monitor:%p, save %s failed, reason:%s", tsMonitorConn.conn, (char *)param, tstrerror(code));
} else if (code == 0) {
- monitorError("monitor:%p, save account info failed, affect rows:%d", tsMonitorConn.conn, code);
+ monitorError("monitor:%p, save %s failed, affect rows:%d", tsMonitorConn.conn, (char *)param, code);
} else {
- monitorDebug("monitor:%p, save account info success, code:%s", tsMonitorConn.conn, tstrerror(code));
- }
-}
-
-static void dnodeMontiorInsertSysCallback(void *param, TAOS_RES *result, int32_t code) {
- if (code < 0) {
- monitorError("monitor:%p, save system info failed, code:%s %s", tsMonitorConn.conn, tstrerror(code), tsMonitorConn.sql);
- } else if (code == 0) {
- monitorError("monitor:%p, save system info failed, affect rows:%d %s", tsMonitorConn.conn, code, tsMonitorConn.sql);
- } else {
- monitorDebug("monitor:%p, save system info success, code:%s %s", tsMonitorConn.conn, tstrerror(code), tsMonitorConn.sql);
- }
-}
-
-static void dnodeMontiorInsertLogCallback(void *param, TAOS_RES *result, int32_t code) {
- if (code < 0) {
- monitorError("monitor:%p, save log failed, code:%s", tsMonitorConn.conn, tstrerror(code));
- } else if (code == 0) {
- monitorError("monitor:%p, save log failed, affect rows:%d", tsMonitorConn.conn, code);
- } else {
- monitorDebug("monitor:%p, save log info success, code:%s", tsMonitorConn.conn, tstrerror(code));
+ monitorDebug("monitor:%p, save %s info success, reason:%s", tsMonitorConn.conn, (char *)param, tstrerror(code));
}
}
@@ -359,7 +341,7 @@ static void monitorSaveSystemInfo() {
pos += monitorBuildReqSql(sql + pos);
monitorDebug("monitor:%p, save system info, sql:%s", tsMonitorConn.conn, sql);
- taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorInsertSysCallback, "log");
+ taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "sys");
if (tsMonitorConn.timer != NULL && tsMonitorConn.state != MONITOR_STATE_STOPPED) {
monitorStartTimer();
@@ -397,7 +379,7 @@ void monitorSaveAcctLog(SAcctMonitorObj *pMon) {
pMon->accessState);
monitorDebug("monitor:%p, save account info, sql %s", tsMonitorConn.conn, sql);
- taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorInsertAcctCallback, "account");
+ taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "account");
}
void monitorSaveLog(int32_t level, const char *const format, ...) {
@@ -421,14 +403,11 @@ void monitorSaveLog(int32_t level, const char *const format, ...) {
sql[len++] = 0;
monitorDebug("monitor:%p, save log, sql: %s", tsMonitorConn.conn, sql);
- taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorInsertLogCallback, "log");
+ taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "log");
}
void monitorExecuteSQL(char *sql) {
if (tsMonitorConn.state != MONITOR_STATE_INITIALIZED) return;
-
monitorDebug("monitor:%p, execute sql: %s", tsMonitorConn.conn, sql);
-
- // bug while insert binary
- // taos_query_a(tsMonitorConn.conn, sql, NULL, NULL);
+ taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "sql");
}
diff --git a/src/plugins/mqtt/src/mqttSystem.c b/src/plugins/mqtt/src/mqttSystem.c
index 2687106124..0259ea23eb 100644
--- a/src/plugins/mqtt/src/mqttSystem.c
+++ b/src/plugins/mqtt/src/mqttSystem.c
@@ -64,7 +64,7 @@ int32_t mqttInitSystem() {
}
char* _begin_hostname = strstr(url, recntStatus.hostname);
- if (strstr(_begin_hostname, ":") != NULL) {
+ if (_begin_hostname != NULL && strstr(_begin_hostname, ":") != NULL) {
recntStatus.port = strbetween(_begin_hostname, ":", "/");
} else {
recntStatus.port = strbetween("'1883'", "'", "'");
diff --git a/src/query/inc/qast.h b/src/query/inc/qAst.h
similarity index 98%
rename from src/query/inc/qast.h
rename to src/query/inc/qAst.h
index 918604f8c9..00049b486d 100644
--- a/src/query/inc/qast.h
+++ b/src/query/inc/qAst.h
@@ -45,7 +45,6 @@ typedef void (*__do_filter_suppl_fn_t)(void *, void *);
*
*/
typedef struct tQueryInfo {
- int32_t colIndex; // index of column in schema
uint8_t optr; // expression operator
SSchema sch; // schema of tags
char* q;
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index fb8750323f..127c38a6f8 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -18,16 +18,16 @@
#include "os.h"
#include "hash.h"
-#include "qfill.h"
-#include "qresultBuf.h"
+#include "qFill.h"
+#include "qResultbuf.h"
+#include "qTsbuf.h"
#include "qsqlparser.h"
-#include "qtsbuf.h"
+#include "query.h"
#include "taosdef.h"
#include "tarray.h"
#include "tlockfree.h"
#include "tsdb.h"
#include "tsqlfunction.h"
-#include "query.h"
struct SColumnFilterElem;
typedef bool (*__filter_func_t)(struct SColumnFilterElem* pFilter, char* val1, char* val2);
@@ -158,7 +158,7 @@ typedef struct SQueryRuntimeEnv {
SResultInfo* resultInfo; // todo refactor to merge with SWindowResInfo
SQuery* pQuery;
SQLFunctionCtx* pCtx;
- int16_t numOfRowsPerPage;
+ int32_t numOfRowsPerPage;
int16_t offset[TSDB_MAX_COLUMNS];
uint16_t scanFlag; // denotes reversed scan of data or not
SFillInfo* pFillInfo;
diff --git a/src/query/inc/qextbuffer.h b/src/query/inc/qExtbuffer.h
similarity index 98%
rename from src/query/inc/qextbuffer.h
rename to src/query/inc/qExtbuffer.h
index 2cbef2b1be..b57c48933f 100644
--- a/src/query/inc/qextbuffer.h
+++ b/src/query/inc/qExtbuffer.h
@@ -28,7 +28,7 @@ extern "C" {
#include "tdataformat.h"
#include "talgo.h"
-#define DEFAULT_PAGE_SIZE (1024L*64) // 16k larger than the SHistoInfo
+#define DEFAULT_PAGE_SIZE (1024L*4) // 16k larger than the SHistoInfo
#define MAX_TMPFILE_PATH_LENGTH PATH_MAX
#define INITIAL_ALLOCATION_BUFFER_SIZE 64
diff --git a/src/query/inc/qfill.h b/src/query/inc/qFill.h
similarity index 99%
rename from src/query/inc/qfill.h
rename to src/query/inc/qFill.h
index ee5974708a..db6a69c2c5 100644
--- a/src/query/inc/qfill.h
+++ b/src/query/inc/qFill.h
@@ -21,8 +21,8 @@ extern "C" {
#endif
#include "os.h"
+#include "qExtbuffer.h"
#include "taosdef.h"
-#include "qextbuffer.h"
typedef struct {
STColumn col; // column info
diff --git a/src/query/inc/qhistogram.h b/src/query/inc/qHistogram.h
similarity index 100%
rename from src/query/inc/qhistogram.h
rename to src/query/inc/qHistogram.h
diff --git a/src/query/inc/qpercentile.h b/src/query/inc/qPercentile.h
similarity index 98%
rename from src/query/inc/qpercentile.h
rename to src/query/inc/qPercentile.h
index c1227dad77..52f666c338 100644
--- a/src/query/inc/qpercentile.h
+++ b/src/query/inc/qPercentile.h
@@ -16,7 +16,7 @@
#ifndef TDENGINE_QPERCENTILE_H
#define TDENGINE_QPERCENTILE_H
-#include "qextbuffer.h"
+#include "qExtbuffer.h"
typedef struct MinMaxEntry {
union {
diff --git a/src/query/inc/qresultBuf.h b/src/query/inc/qResultbuf.h
similarity index 74%
rename from src/query/inc/qresultBuf.h
rename to src/query/inc/qResultbuf.h
index a323d530a2..8c8afb0957 100644
--- a/src/query/inc/qresultBuf.h
+++ b/src/query/inc/qResultbuf.h
@@ -20,9 +20,9 @@
extern "C" {
#endif
-#include "os.h"
-#include "qextbuffer.h"
#include "hash.h"
+#include "os.h"
+#include "qExtbuffer.h"
typedef struct SArray* SIDList;
@@ -33,14 +33,20 @@ typedef struct SDiskbasedResultBuf {
int32_t fd; // data file fd
int32_t allocateId; // allocated page id
int32_t incStep; // minimum allocated pages
- char* pBuf; // mmap buffer pointer
+ void* pBuf; // mmap buffer pointer
char* path; // file path
-
+ int32_t pageSize; // current used page size
+ int32_t inMemPages; // numOfPages that are allocated in memory
SHashObj* idsTable; // id hash table
SIDList list; // for each id, there is a page id list
+
+ void* iBuf; // inmemory buf
+ void* handle; // for debug purpose
+ void* emptyDummyIdList; // dummy id list
} SDiskbasedResultBuf;
-#define DEFAULT_INTERN_BUF_PAGE_SIZE (8192L*5)
+#define DEFAULT_INTERN_BUF_PAGE_SIZE (1024L)
+#define DEFAULT_INMEM_BUF_PAGES 10
/**
* create disk-based result buffer
@@ -49,7 +55,8 @@ typedef struct SDiskbasedResultBuf {
* @param rowSize
* @return
*/
-int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t size, int32_t rowSize, void* handle);
+int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t numOfPages, int32_t rowSize, int32_t pagesize,
+ int32_t inMemPages, void* handle);
/**
*
@@ -81,8 +88,13 @@ SIDList getDataBufPagesIdList(SDiskbasedResultBuf* pResultBuf, int32_t groupId);
* @param id
* @return
*/
-#define GET_RES_BUF_PAGE_BY_ID(buf, id) ((tFilePage*)((buf)->pBuf + DEFAULT_INTERN_BUF_PAGE_SIZE*(id)))
-
+static FORCE_INLINE tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) {
+ if (id < pResultBuf->inMemPages) {
+ return (tFilePage*) ((char*) pResultBuf->iBuf + id * pResultBuf->pageSize);
+ } else {
+ return (tFilePage*) ((char*) pResultBuf->pBuf + (id - pResultBuf->inMemPages) * pResultBuf->pageSize);
+ }
+}
/**
* get the total buffer size in the format of disk file
* @param pResultBuf
diff --git a/src/query/inc/qsyntaxtreefunction.h b/src/query/inc/qSyntaxtreefunction.h
similarity index 100%
rename from src/query/inc/qsyntaxtreefunction.h
rename to src/query/inc/qSyntaxtreefunction.h
diff --git a/src/query/inc/qtsbuf.h b/src/query/inc/qTsbuf.h
similarity index 100%
rename from src/query/inc/qtsbuf.h
rename to src/query/inc/qTsbuf.h
diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h
index 78ae7be030..7119cb75fe 100644
--- a/src/query/inc/qUtil.h
+++ b/src/query/inc/qUtil.h
@@ -49,7 +49,7 @@ static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int3
assert(pResult != NULL && pRuntimeEnv != NULL);
SQuery *pQuery = pRuntimeEnv->pQuery;
- tFilePage *page = GET_RES_BUF_PAGE_BY_ID(pRuntimeEnv->pResultBuf, pResult->pos.pageId);
+ tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pos.pageId);
int32_t realRowId = pResult->pos.rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery);
return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage +
@@ -59,6 +59,4 @@ static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int3
__filter_func_t *getRangeFilterFuncArray(int32_t type);
__filter_func_t *getValueFilterFuncArray(int32_t type);
-bool supportPrefilter(int32_t type);
-
#endif // TDENGINE_QUERYUTIL_H
diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h
index c687f01cbc..5ce9121cf1 100644
--- a/src/query/inc/tsqlfunction.h
+++ b/src/query/inc/tsqlfunction.h
@@ -255,7 +255,15 @@ extern int32_t functionCompatList[]; // compatible check array list
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, const char *minval, const char *maxval);
-void resetResultInfo(SResultInfo *pResInfo);
+/**
+ * the numOfRes should be kept, since it may be used later
+ * and allow the ResultInfo to be re initialized
+ */
+#define RESET_RESULT_INFO(_r) \
+ do { \
+ (_r)->initialized = false; \
+ } while (0)
+
void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable, char* buf);
static FORCE_INLINE void initResultInfo(SResultInfo *pResInfo) {
diff --git a/src/query/src/qast.c b/src/query/src/qAst.c
similarity index 99%
rename from src/query/src/qast.c
rename to src/query/src/qAst.c
index da4eb8f3ba..c2578c15c0 100644
--- a/src/query/src/qast.c
+++ b/src/query/src/qAst.c
@@ -16,17 +16,17 @@
#include "os.h"
-#include "tname.h"
-#include "qast.h"
-#include "tsdb.h"
#include "exception.h"
+#include "qAst.h"
+#include "qSyntaxtreefunction.h"
#include "qsqlparser.h"
-#include "qsyntaxtreefunction.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "tarray.h"
#include "tbuffer.h"
#include "tcompare.h"
+#include "tname.h"
+#include "tsdb.h"
#include "tskiplist.h"
#include "tsqlfunction.h"
#include "tstoken.h"
@@ -678,7 +678,7 @@ static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo,
tstr *name = (tstr*) tsdbGetTableName(*(void**) pData);
// todo speed up by using hash
- if (pQueryInfo->colIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
if (pQueryInfo->optr == TSDB_RELATION_IN) {
addToResult = pQueryInfo->compare(name, pQueryInfo->q);
} else if (pQueryInfo->optr == TSDB_RELATION_LIKE) {
@@ -716,7 +716,7 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S
}
tQueryInfo *pQueryInfo = pExpr->_node.info;
- if (pQueryInfo->colIndex == 0 && pQueryInfo->optr != TSDB_RELATION_LIKE) {
+ if (pQueryInfo->sch.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX && pQueryInfo->optr != TSDB_RELATION_LIKE) {
tQueryIndexColumn(pSkipList, pQueryInfo, result);
} else {
tQueryIndexlessColumn(pSkipList, pQueryInfo, result, param->nodeFilterFn);
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 6dba5cbd2a..1220c5ca31 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -13,19 +13,19 @@
* along with this program. If not, see .
*/
#include "os.h"
+#include "qFill.h"
+#include "taosmsg.h"
#include "tcache.h"
#include "tglobal.h"
-#include "qfill.h"
-#include "taosmsg.h"
#include "exception.h"
#include "hash.h"
+#include "qAst.h"
#include "qExecutor.h"
+#include "qResultbuf.h"
#include "qUtil.h"
-#include "qresultBuf.h"
#include "query.h"
#include "queryLog.h"
-#include "qast.h"
#include "tlosertree.h"
#include "tscompression.h"
#include "ttime.h"
@@ -50,11 +50,6 @@
#define SDATA_BLOCK_INITIALIZER (SDataBlockInfo) {{0}, 0}
-/* get the qinfo struct address from the query struct address */
-#define GET_COLUMN_BYTES(query, colidx) \
- ((query)->colList[(query)->pSelectExpr[colidx].base.colInfo.colIndex].bytes)
-#define GET_COLUMN_TYPE(query, colidx) ((query)->colList[(query)->pSelectExpr[colidx].base.colInfo.colIndex].type)
-
enum {
// when query starts to execute, this status will set
QUERY_NOT_COMPLETED = 0x1u,
@@ -238,9 +233,7 @@ bool isGroupbyNormalCol(SSqlGroupbyExpr *pGroupbyExpr) {
for (int32_t i = 0; i < pGroupbyExpr->numOfGroupCols; ++i) {
SColIndex *pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, i);
if (pColIndex->flag == TSDB_COL_NORMAL) {
- /*
- * make sure the normal column locates at the second position if tbname exists in group by clause
- */
+ //make sure the normal column locates at the second position if tbname exists in group by clause
if (pGroupbyExpr->numOfGroupCols > 1) {
assert(pColIndex->colIndex > 0);
}
@@ -299,6 +292,17 @@ bool isSelectivityWithTagsQuery(SQuery *pQuery) {
return false;
}
+bool isProjQuery(SQuery *pQuery) {
+ for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
+ int32_t functId = pQuery->pSelectExpr[i].base.functionId;
+ if (functId != TSDB_FUNC_PRJ && functId != TSDB_FUNC_TAGPRJ) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
bool isTSCompQuery(SQuery *pQuery) { return pQuery->pSelectExpr[0].base.functionId == TSDB_FUNC_TS_COMP; }
static bool limitResults(SQueryRuntimeEnv* pRuntimeEnv) {
@@ -394,15 +398,15 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin
if (pWindowResInfo->size >= pWindowResInfo->capacity) {
int64_t newCap = pWindowResInfo->capacity * 1.5;
char *t = realloc(pWindowResInfo->pResult, newCap * sizeof(SWindowResult));
- if (t != NULL) {
- pWindowResInfo->pResult = (SWindowResult *)t;
-
- int32_t inc = newCap - pWindowResInfo->capacity;
- memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, sizeof(SWindowResult) * inc);
- } else {
- // todo
+ if (t == NULL) {
+ longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
+ pWindowResInfo->pResult = (SWindowResult *)t;
+
+ int32_t inc = newCap - pWindowResInfo->capacity;
+ memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, sizeof(SWindowResult) * inc);
+
for (int32_t i = pWindowResInfo->capacity; i < newCap; ++i) {
createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, pRuntimeEnv->interBufSize);
}
@@ -475,7 +479,7 @@ static int32_t addNewWindowResultBuf(SWindowResult *pWindowRes, SDiskbasedResult
pData = getNewDataBuf(pResultBuf, sid, &pageId);
} else {
pageId = getLastPageId(list);
- pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, pageId);
+ pData = getResBufPage(pResultBuf, pageId);
if (pData->num >= numOfRowsPerPage) {
pData = getNewDataBuf(pResultBuf, sid, &pageId);
@@ -1008,7 +1012,6 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat
case TSDB_DATA_TYPE_BIGINT: v = GET_INT64_VAL(pData); break;
}
-// assert(pRuntimeEnv->windowResInfo.hashList->size <= 2);
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pData, bytes, true);
if (pWindowRes == NULL) {
return -1;
@@ -1053,9 +1056,9 @@ static char *getGroupbyColumnData(SQuery *pQuery, int16_t *type, int16_t *bytes,
*type = pQuery->colList[colIndex].type;
*bytes = pQuery->colList[colIndex].bytes;
/*
- * the colIndex is acquired from the first meter of all qualified meters in this vnode during query prepare
- * stage, the remain meter may not have the required column in cache actually. So, the validation of required
- * column in cache with the corresponding meter schema is reinforced.
+ * the colIndex is acquired from the first tables of all qualified tables in this vnode during query prepare
+ * stage, the remain tables may not have the required column in cache actually. So, the validation of required
+ * column in cache with the corresponding schema is reinforced.
*/
int32_t numOfCols = taosArrayGetSize(pDataBlock);
@@ -1206,9 +1209,8 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS
continue;
}
- // interval window query
+ // interval window query, decide the time window according to the primary timestamp
if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
- // decide the time window according to the primary timestamp
int64_t ts = tsCols[offset];
STimeWindow win = getActiveTimeWindow(pWindowResInfo, ts, pQuery);
@@ -1230,8 +1232,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS
while (1) {
GET_NEXT_TIMEWINDOW(pQuery, &nextWin);
- if (/*pWindowResInfo->startTime > nextWin.skey ||*/
- (nextWin.skey > pQuery->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) ||
+ if ((nextWin.skey > pQuery->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) ||
(nextWin.skey < pQuery->window.ekey && !QUERY_IS_ASC_QUERY(pQuery))) {
break;
}
@@ -1489,6 +1490,8 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
goto _clean;
}
+ qDebug("QInfo:%p setup runtime env1", GET_QINFO_ADDR(pRuntimeEnv));
+
pRuntimeEnv->offset[0] = 0;
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SSqlFuncMsg *pSqlFuncMsg = &pQuery->pSelectExpr[i].base;
@@ -1533,6 +1536,8 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
}
}
+ qDebug("QInfo:%p setup runtime env2", GET_QINFO_ADDR(pRuntimeEnv));
+
// set the order information for top/bottom query
int32_t functionId = pCtx->functionId;
@@ -1553,17 +1558,25 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
}
}
+ qDebug("QInfo:%p setup runtime env3", GET_QINFO_ADDR(pRuntimeEnv));
+
char* buf = (char*) pRuntimeEnv->resultInfo + sizeof(SResultInfo) * pQuery->numOfOutput;
// set the intermediate result output buffer
setWindowResultInfo(pRuntimeEnv->resultInfo, pQuery, pRuntimeEnv->stableQuery, buf);
+ qDebug("QInfo:%p setup runtime env4", GET_QINFO_ADDR(pRuntimeEnv));
+
// if it is group by normal column, do not set output buffer, the output buffer is pResult
- if (!isGroupbyNormalCol(pQuery->pGroupbyExpr) && !pRuntimeEnv->stableQuery) {
+ if (!pRuntimeEnv->groupbyNormalCol && !pRuntimeEnv->stableQuery) {
resetCtxOutputBuf(pRuntimeEnv);
}
+ qDebug("QInfo:%p setup runtime env5", GET_QINFO_ADDR(pRuntimeEnv));
+
setCtxTagColumnInfo(pRuntimeEnv, pRuntimeEnv->pCtx);
+
+ qDebug("QInfo:%p init completed", GET_QINFO_ADDR(pRuntimeEnv));
return TSDB_CODE_SUCCESS;
_clean:
@@ -1915,9 +1928,20 @@ static int32_t getInitialPageNum(SQInfo *pQInfo) {
return num;
}
-static FORCE_INLINE int32_t getNumOfRowsInResultPage(SQuery *pQuery, bool topBotQuery, bool isSTableQuery) {
- int32_t rowSize = pQuery->rowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, topBotQuery, isSTableQuery);
- return (DEFAULT_INTERN_BUF_PAGE_SIZE - sizeof(tFilePage)) / rowSize;
+static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, int32_t* rowsize) {
+ SQuery* pQuery = pRuntimeEnv->pQuery;
+
+ *rowsize = pQuery->rowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery);
+ int32_t overhead = sizeof(tFilePage);
+
+ // one page contains at least two rows
+ *ps = DEFAULT_INTERN_BUF_PAGE_SIZE;
+ while(((*rowsize) * 2) > (*ps) - overhead) {
+ *ps = (*ps << 1u);
+ }
+
+ pRuntimeEnv->numOfRowsPerPage = ((*ps) - sizeof(tFilePage)) / (*rowsize);
+
}
#define IS_PREFILTER_TYPE(_t) ((_t) != TSDB_DATA_TYPE_BINARY && (_t) != TSDB_DATA_TYPE_NCHAR)
@@ -2043,8 +2067,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle,
} else { // check if this data block is required to load
// Calculate all time windows that are overlapping or contain current data block.
- // If current data block is contained by all possible time window, loading current
- // data block is not needed.
+ // If current data block is contained by all possible time window, do not load current data block.
if (QUERY_IS_INTERVAL_QUERY(pQuery) && overlapWithTimeWindow(pQuery, pBlockInfo)) {
status = BLK_DATA_ALL_NEEDED;
}
@@ -2364,6 +2387,18 @@ static void doSetTagValueInParam(void *tsdb, void* pTable, int32_t tagColId, tVa
}
}
+static SColumnInfo* doGetTagColumnInfoById(SColumnInfo* pTagColList, int32_t numOfTags, int16_t colId) {
+ assert(pTagColList != NULL && numOfTags > 0);
+
+ for(int32_t i = 0; i < numOfTags; ++i) {
+ if (pTagColList[i].colId == colId) {
+ return &pTagColList[i];
+ }
+ }
+
+ return NULL;
+}
+
void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) {
SQuery *pQuery = pRuntimeEnv->pQuery;
SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv);
@@ -2372,16 +2407,10 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) {
if (pQuery->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP) {
assert(pExprInfo->base.numOfParams == 1);
- // todo refactor extract function.
- int16_t type = -1, bytes = -1;
- for(int32_t i = 0; i < pQuery->numOfTags; ++i) {
- if (pQuery->tagColList[i].colId == pExprInfo->base.arg->argValue.i64) {
- type = pQuery->tagColList[i].type;
- bytes = pQuery->tagColList[i].bytes;
- }
- }
+ int16_t tagColId = pExprInfo->base.arg->argValue.i64;
+ SColumnInfo* pColInfo = doGetTagColumnInfoById(pQuery->tagColList, pQuery->numOfTags, tagColId);
- doSetTagValueInParam(tsdb, pTable, pExprInfo->base.arg->argValue.i64, &pRuntimeEnv->pCtx[0].tag, type, bytes);
+ doSetTagValueInParam(tsdb, pTable, tagColId, &pRuntimeEnv->pCtx[0].tag, pColInfo->type, pColInfo->bytes);
} else {
// set tag value, by which the results are aggregated.
for (int32_t idx = 0; idx < pQuery->numOfOutput; ++idx) {
@@ -2399,20 +2428,14 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) {
// set the join tag for first column
SSqlFuncMsg *pFuncMsg = &pExprInfo->base;
- if ((pFuncMsg->functionId == TSDB_FUNC_TS || pFuncMsg->functionId == TSDB_FUNC_PRJ) && pFuncMsg->colInfo.colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX &&
- pRuntimeEnv->pTSBuf != NULL) {
+ if ((pFuncMsg->functionId == TSDB_FUNC_TS || pFuncMsg->functionId == TSDB_FUNC_PRJ) && pRuntimeEnv->pTSBuf != NULL &&
+ pFuncMsg->colInfo.colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
assert(pFuncMsg->numOfParams == 1);
- // todo refactor
- int16_t type = -1, bytes = -1;
- for(int32_t i = 0; i < pQuery->numOfTags; ++i) {
- if (pQuery->tagColList[i].colId == pExprInfo->base.arg->argValue.i64) {
- type = pQuery->tagColList[i].type;
- bytes = pQuery->tagColList[i].bytes;
- }
- }
+ int16_t tagColId = pExprInfo->base.arg->argValue.i64;
+ SColumnInfo* pColInfo = doGetTagColumnInfoById(pQuery->tagColList, pQuery->numOfTags, tagColId);
- doSetTagValueInParam(tsdb, pTable, pExprInfo->base.arg->argValue.i64, &pRuntimeEnv->pCtx[0].tag, type, bytes);
+ doSetTagValueInParam(tsdb, pTable, tagColId, &pRuntimeEnv->pCtx[0].tag, pColInfo->type, pColInfo->bytes);
qDebug("QInfo:%p set tag value for join comparison, colId:%" PRId64 ", val:%"PRId64, pQInfo, pExprInfo->base.arg->argValue.i64,
pRuntimeEnv->pCtx[0].tag.i64Key)
}
@@ -2429,7 +2452,7 @@ static void doMerge(SQueryRuntimeEnv *pRuntimeEnv, int64_t timestamp, SWindowRes
pCtx[i].aOutputBuf = pCtx[i].aOutputBuf + pCtx[i].outputBytes;
pCtx[i].currentStage = FIRST_STAGE_MERGE;
- resetResultInfo(pCtx[i].resultInfo);
+ RESET_RESULT_INFO(pCtx[i].resultInfo);
aAggs[functionId].init(&pCtx[i]);
}
@@ -2666,7 +2689,7 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
int32_t size = taosArrayGetSize(list);
for (int32_t i = 0; i < size; ++i) {
int32_t* pgId = taosArrayGet(list, i);
- tFilePage *pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, *pgId);
+ tFilePage *pData = getResBufPage(pResultBuf, *pgId);
total += pData->num;
}
@@ -2675,7 +2698,7 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
int32_t offset = 0;
for (int32_t j = 0; j < size; ++j) {
int32_t* pgId = taosArrayGet(list, j);
- tFilePage *pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, *pgId);
+ tFilePage *pData = getResBufPage(pResultBuf, *pgId);
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes;
@@ -2865,10 +2888,10 @@ int32_t flushFromResultBuf(SQInfo *pQInfo) {
SQuery * pQuery = pRuntimeEnv->pQuery;
SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf;
- int32_t capacity = (DEFAULT_INTERN_BUF_PAGE_SIZE - sizeof(tFilePage)) / pQuery->rowSize;
// the base value for group result, since the maximum number of table for each vnode will not exceed 100,000.
int32_t pageId = -1;
+ int32_t capacity = pResultBuf->numOfRowsPerPage;
int32_t remain = pQuery->sdata[0]->num;
int32_t offset = 0;
@@ -3038,7 +3061,7 @@ void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) {
* set the output buffer information and intermediate buffer
* not all queries require the interResultBuf, such as COUNT/TAGPRJ/PRJ/TAG etc.
*/
- resetResultInfo(&pRuntimeEnv->resultInfo[i]);
+ RESET_RESULT_INFO(&pRuntimeEnv->resultInfo[i]);
pCtx->resultInfo = &pRuntimeEnv->resultInfo[i];
// set the timestamp output buffer for top/bottom/diff query
@@ -3077,7 +3100,7 @@ void forwardCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, int64_t output) {
pRuntimeEnv->pCtx[j].ptsOutputBuf += TSDB_KEYSIZE * output;
}
- resetResultInfo(pRuntimeEnv->pCtx[j].resultInfo);
+ RESET_RESULT_INFO(pRuntimeEnv->pCtx[j].resultInfo);
}
}
@@ -3331,8 +3354,8 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
if (pRuntimeEnv->pSecQueryHandle == NULL) {
longjmp(pRuntimeEnv->env, terrno);
}
- pRuntimeEnv->windowResInfo.curIndex = qstatus.windowIndex;
+ pRuntimeEnv->windowResInfo.curIndex = qstatus.windowIndex;
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
pRuntimeEnv->scanFlag = REPEAT_SCAN;
@@ -3467,7 +3490,6 @@ void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) {
return;
}
- int32_t GROUPRESULTID = 1;
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&groupIndex,
sizeof(groupIndex), true);
if (pWindowRes == NULL) {
@@ -3479,7 +3501,7 @@ void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) {
* all group belong to one result set, and each group result has different group id so set the id to be one
*/
if (pWindowRes->pos.pageId == -1) {
- if (addNewWindowResultBuf(pWindowRes, pRuntimeEnv->pResultBuf, GROUPRESULTID, pRuntimeEnv->numOfRowsPerPage) !=
+ if (addNewWindowResultBuf(pWindowRes, pRuntimeEnv->pResultBuf, groupIndex, pRuntimeEnv->numOfRowsPerPage) !=
TSDB_CODE_SUCCESS) {
return;
}
@@ -4149,6 +4171,7 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery)
} else {
pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo);
}
+
return terrno;
}
@@ -4174,10 +4197,9 @@ static SFillColInfo* taosCreateFillColInfo(SQuery* pQuery) {
}
int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) {
- int32_t code = TSDB_CODE_SUCCESS;
-
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
+ int32_t code = TSDB_CODE_SUCCESS;
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
pQuery->precision = tsdbGetCfg(tsdb)->precision;
@@ -4186,6 +4208,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
setScanLimitationByResultBuffer(pQuery);
changeExecuteScanOrder(pQInfo, false);
+
code = setupQueryHandle(tsdb, pQInfo, isSTableQuery);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -4212,33 +4235,42 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
return code;
}
- pRuntimeEnv->numOfRowsPerPage = getNumOfRowsInResultPage(pQuery, pRuntimeEnv->topBotQuery, isSTableQuery);
+ int32_t ps = DEFAULT_PAGE_SIZE;
+ int32_t rowsize = 0;
+ getIntermediateBufInfo(pRuntimeEnv, &ps, &rowsize);
if (isSTableQuery && !onlyQueryTags(pRuntimeEnv->pQuery)) {
- int32_t rows = getInitialPageNum(pQInfo);
- code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, rows, pQuery->rowSize, pQInfo);
+ int32_t numOfPages = getInitialPageNum(pQInfo);
+ code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, numOfPages, rowsize, ps, numOfPages, pQInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
if (!QUERY_IS_INTERVAL_QUERY(pQuery)) {
int16_t type = TSDB_DATA_TYPE_NULL;
+ int32_t threshold = 0;
if (pRuntimeEnv->groupbyNormalCol) { // group by columns not tags;
type = getGroupbyColumnType(pQuery, pQuery->pGroupbyExpr);
+ threshold = 4000;
} else {
type = TSDB_DATA_TYPE_INT; // group id
+ threshold = GET_NUM_OF_TABLEGROUP(pQInfo);
+ if (threshold < 8) {
+ threshold = 8;
+ }
}
- code = initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, 32, 4096, type);
+ code = initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, 8, threshold, type);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
-
} else if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) {
- int32_t rows = getInitialPageNum(pQInfo);
- code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, rows, pQuery->rowSize, pQInfo);
+ int32_t numOfResultRows = getInitialPageNum(pQInfo);
+ getIntermediateBufInfo(pRuntimeEnv, &ps, &rowsize);
+
+ code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, numOfResultRows, rowsize, ps, numOfResultRows, pQInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -4250,7 +4282,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
type = TSDB_DATA_TYPE_TIMESTAMP;
}
- code = initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, rows, 4096, type);
+ code = initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, numOfResultRows, 4096, type);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -4932,7 +4964,7 @@ static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
pQuery->current->lastKey, pQuery->window.ekey);
} else if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
STableIdInfo tidInfo;
- STableId* id = TSDB_TABLEID(pQuery->current);
+ STableId* id = TSDB_TABLEID(pQuery->current->pTable);
tidInfo.uid = id->uid;
tidInfo.tid = id->tid;
@@ -5693,7 +5725,6 @@ static void doUpdateExprColumnIndex(SQuery *pQuery) {
}
}
-
static int compareTableIdInfo(const void* a, const void* b) {
const STableIdInfo* x = (const STableIdInfo*)a;
const STableIdInfo* y = (const STableIdInfo*)b;
@@ -5709,13 +5740,18 @@ static void calResultBufSize(SQuery* pQuery) {
const int32_t RESULT_MSG_MIN_ROWS = 8192;
const float RESULT_THRESHOLD_RATIO = 0.85;
- int32_t numOfRes = RESULT_MSG_MIN_SIZE / pQuery->rowSize;
- if (numOfRes < RESULT_MSG_MIN_ROWS) {
- numOfRes = RESULT_MSG_MIN_ROWS;
- }
+ if (isProjQuery(pQuery)) {
+ int32_t numOfRes = RESULT_MSG_MIN_SIZE / pQuery->rowSize;
+ if (numOfRes < RESULT_MSG_MIN_ROWS) {
+ numOfRes = RESULT_MSG_MIN_ROWS;
+ }
- pQuery->rec.capacity = numOfRes;
- pQuery->rec.threshold = numOfRes * RESULT_THRESHOLD_RATIO;
+ pQuery->rec.capacity = numOfRes;
+ pQuery->rec.threshold = numOfRes * RESULT_THRESHOLD_RATIO;
+ } else { // in case of non-prj query, a smaller output buffer will be used.
+ pQuery->rec.capacity = 4096;
+ pQuery->rec.threshold = pQuery->rec.capacity * RESULT_THRESHOLD_RATIO;
+ }
}
static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
@@ -5727,6 +5763,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
if (pQInfo == NULL) {
goto _cleanup_qinfo;
}
+
// to make sure third party won't overwrite this structure
pQInfo->signature = pQInfo;
pQInfo->tableGroupInfo = *pTableGroupInfo;
@@ -5926,6 +5963,7 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ
pQuery->window.ekey, pQuery->order.order);
setQueryStatus(pQuery, QUERY_COMPLETED);
pQInfo->tableqinfoGroupInfo.numOfTables = 0;
+
sem_post(&pQInfo->dataReady);
return TSDB_CODE_SUCCESS;
}
@@ -6044,11 +6082,10 @@ static void freeQInfo(SQInfo *pQInfo) {
tfree(pQuery->sdata);
tfree(pQuery);
+ pQInfo->signature = 0;
qDebug("QInfo:%p QInfo is freed", pQInfo);
- // destroy signature, in order to avoid the query process pass the object safety check
- memset(pQInfo, 0, sizeof(SQInfo));
tfree(pQInfo);
}
@@ -6136,16 +6173,17 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, vo
int32_t code = TSDB_CODE_SUCCESS;
- char * tagCond = NULL, *tbnameCond = NULL;
- SArray * pTableIdList = NULL;
- SSqlFuncMsg **pExprMsg = NULL;
- SColIndex * pGroupColIndex = NULL;
- SColumnInfo* pTagColumnInfo = NULL;
- SExprInfo *pExprs = NULL;
- SSqlGroupbyExpr *pGroupbyExpr = NULL;
+ char *tagCond = NULL;
+ char *tbnameCond = NULL;
+ SArray *pTableIdList = NULL;
+ SSqlFuncMsg **pExprMsg = NULL;
+ SExprInfo *pExprs = NULL;
+ SColIndex *pGroupColIndex = NULL;
+ SColumnInfo *pTagColumnInfo = NULL;
+ SSqlGroupbyExpr *pGroupbyExpr = NULL;
- if ((code = convertQueryMsg(pQueryMsg, &pTableIdList, &pExprMsg, &tagCond, &tbnameCond, &pGroupColIndex, &pTagColumnInfo)) !=
- TSDB_CODE_SUCCESS) {
+ code = convertQueryMsg(pQueryMsg, &pTableIdList, &pExprMsg, &tagCond, &tbnameCond, &pGroupColIndex, &pTagColumnInfo);
+ if (code != TSDB_CODE_SUCCESS) {
goto _over;
}
@@ -6172,7 +6210,8 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, vo
bool isSTableQuery = false;
STableGroupInfo tableGroupInfo = {0};
-
+ int64_t st = taosGetTimestampUs();
+
if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_TABLE_QUERY)) {
STableIdInfo *id = taosArrayGet(pTableIdList, 0);
@@ -6182,7 +6221,6 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, vo
}
} else if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY|TSDB_QUERY_TYPE_STABLE_QUERY)) {
isSTableQuery = true;
- // TODO: need a macro from TSDB to check if table is super table
// also note there's possibility that only one table in the super table
if (!TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY)) {
@@ -6193,11 +6231,12 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, vo
if (pQueryMsg->numOfGroupCols == 1 && !TSDB_COL_IS_TAG(pGroupColIndex->flag)) {
numOfGroupByCols = 0;
}
-
+
+ qDebug("qmsg:%p query stable, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid);
code = tsdbQuerySTableByTagCond(tsdb, id->uid, tagCond, pQueryMsg->tagCondLen, pQueryMsg->tagNameRelType, tbnameCond, &tableGroupInfo, pGroupColIndex,
numOfGroupByCols);
if (code != TSDB_CODE_SUCCESS) {
- qError("qmsg:%p failed to QueryStable, reason: %s", pQueryMsg, tstrerror(code));
+ qError("qmsg:%p failed to query stable, reason: %s", pQueryMsg, tstrerror(code));
goto _over;
}
} else {
@@ -6208,6 +6247,9 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, vo
qDebug("qmsg:%p query on %zu tables in one group from client", pQueryMsg, tableGroupInfo.numOfTables);
}
+
+ int64_t el = taosGetTimestampUs() - st;
+ qDebug("qmsg:%p tag filter completed, numOfTables:%zu, elapsed time:%"PRId64"us", pQueryMsg, tableGroupInfo.numOfTables, el);
} else {
assert(0);
}
@@ -6247,7 +6289,7 @@ _over:
*pQInfo = NULL;
}
- // if failed to add ref for all meters in this query, abort current query
+ // if failed to add ref for all tables in this query, abort current query
return code;
}
@@ -6377,8 +6419,12 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
size += sizeof(STableIdInfo) * taosArrayGetSize(pQInfo->arrTableIdInfo);
*contLen = size + sizeof(SRetrieveTableRsp);
- // todo handle failed to allocate memory
+ // todo proper handle failed to allocate memory,
+ // current solution only avoid crash, but cannot return error code to client
*pRsp = (SRetrieveTableRsp *)rpcMallocCont(*contLen);
+ if (*pRsp == NULL) {
+ return TSDB_CODE_QRY_OUT_OF_MEMORY;
+ }
(*pRsp)->numOfRows = htonl(pQuery->rec.rows);
int32_t code = pQInfo->code;
diff --git a/src/query/src/qextbuffer.c b/src/query/src/qExtbuffer.c
similarity index 99%
rename from src/query/src/qextbuffer.c
rename to src/query/src/qExtbuffer.c
index afcf902123..69c5f0e24f 100644
--- a/src/query/src/qextbuffer.c
+++ b/src/query/src/qExtbuffer.c
@@ -12,16 +12,15 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
+#include "qExtbuffer.h"
#include "os.h"
-#include "tulog.h"
-#include "qextbuffer.h"
+#include "queryLog.h"
#include "taos.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "tsqlfunction.h"
-#include "ttime.h"
+#include "tulog.h"
#include "tutil.h"
-#include "queryLog.h"
#define COLMODEL_GET_VAL(data, schema, allrow, rowId, colId) \
(data + (schema)->pFields[colId].offset * (allrow) + (rowId) * (schema)->pFields[colId].field.bytes)
diff --git a/src/query/src/qfill.c b/src/query/src/qFill.c
similarity index 99%
rename from src/query/src/qfill.c
rename to src/query/src/qFill.c
index 65951a5b9e..9dec2598bc 100644
--- a/src/query/src/qfill.c
+++ b/src/query/src/qFill.c
@@ -13,9 +13,9 @@
* along with this program. If not, see .
*/
+#include "qFill.h"
#include "os.h"
-#include "qfill.h"
-#include "qextbuffer.h"
+#include "qExtbuffer.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "tsqlfunction.h"
diff --git a/src/query/src/qFilterFunc.c b/src/query/src/qFilterfunc.c
similarity index 99%
rename from src/query/src/qFilterFunc.c
rename to src/query/src/qFilterfunc.c
index 1a95b9fe21..7e9f5c7da5 100644
--- a/src/query/src/qFilterFunc.c
+++ b/src/query/src/qFilterfunc.c
@@ -554,5 +554,3 @@ __filter_func_t* getValueFilterFuncArray(int32_t type) {
default: return NULL;
}
}
-
-bool supportPrefilter(int32_t type) { return type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR; }
diff --git a/src/query/src/qhistogram.c b/src/query/src/qHistogram.c
similarity index 99%
rename from src/query/src/qhistogram.c
rename to src/query/src/qHistogram.c
index 26482e9f14..7835d82469 100644
--- a/src/query/src/qhistogram.c
+++ b/src/query/src/qHistogram.c
@@ -14,7 +14,7 @@
*/
#include "os.h"
-#include "qhistogram.h"
+#include "qHistogram.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "tlosertree.h"
diff --git a/src/query/src/qparserImpl.c b/src/query/src/qParserImpl.c
similarity index 100%
rename from src/query/src/qparserImpl.c
rename to src/query/src/qParserImpl.c
diff --git a/src/query/src/qpercentile.c b/src/query/src/qPercentile.c
similarity index 99%
rename from src/query/src/qpercentile.c
rename to src/query/src/qPercentile.c
index dc5ecb796a..c4490a01e7 100644
--- a/src/query/src/qpercentile.c
+++ b/src/query/src/qPercentile.c
@@ -13,12 +13,12 @@
* along with this program. If not, see .
*/
+#include "qPercentile.h"
#include "os.h"
-#include "tulog.h"
-#include "qpercentile.h"
+#include "queryLog.h"
#include "taosdef.h"
#include "taosmsg.h"
-#include "queryLog.h"
+#include "tulog.h"
tExtMemBuffer *releaseBucketsExceptFor(tMemBucket *pMemBucket, int16_t segIdx, int16_t slotIdx) {
tExtMemBuffer *pBuffer = NULL;
diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c
new file mode 100644
index 0000000000..de59676e59
--- /dev/null
+++ b/src/query/src/qResultbuf.c
@@ -0,0 +1,221 @@
+#include "qResultbuf.h"
+#include "hash.h"
+#include "qExtbuffer.h"
+#include "queryLog.h"
+#include "taoserror.h"
+
+int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t numOfPages, int32_t rowSize,
+ int32_t pagesize, int32_t inMemPages, void* handle) {
+
+ *pResultBuf = calloc(1, sizeof(SDiskbasedResultBuf));
+ SDiskbasedResultBuf* pResBuf = *pResultBuf;
+ if (pResBuf == NULL) {
+ return TSDB_CODE_COM_OUT_OF_MEMORY;
+ }
+
+ pResBuf->pageSize = pagesize;
+ pResBuf->numOfPages = inMemPages; // all pages are in buffer in the first place
+ pResBuf->inMemPages = inMemPages;
+ assert(inMemPages <= numOfPages);
+
+ pResBuf->numOfRowsPerPage = (pagesize - sizeof(tFilePage)) / rowSize;
+
+ pResBuf->totalBufSize = pResBuf->numOfPages * pagesize;
+ pResBuf->incStep = 4;
+ pResBuf->allocateId = -1;
+
+ pResBuf->iBuf = calloc(pResBuf->inMemPages, pResBuf->pageSize);
+
+ // init id hash table
+ pResBuf->idsTable = taosHashInit(numOfPages, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false);
+ pResBuf->list = taosArrayInit(numOfPages, POINTER_BYTES);
+
+ char path[PATH_MAX] = {0};
+ getTmpfilePath("tsdb_qbuf", path);
+ pResBuf->path = strdup(path);
+
+ pResBuf->fd = FD_INITIALIZER;
+ pResBuf->pBuf = NULL;
+ pResBuf->emptyDummyIdList = taosArrayInit(1, sizeof(int32_t));
+
+ qDebug("QInfo:%p create resBuf for output, page size:%d, initial pages:%d, %" PRId64 "bytes", handle,
+ pResBuf->pageSize, pResBuf->numOfPages, pResBuf->totalBufSize);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t getNumOfResultBufGroupId(SDiskbasedResultBuf* pResultBuf) { return taosHashGetSize(pResultBuf->idsTable); }
+
+int32_t getResBufSize(SDiskbasedResultBuf* pResultBuf) { return pResultBuf->totalBufSize; }
+
+#define NUM_OF_PAGES_ON_DISK(_r) ((_r)->numOfPages - (_r)->inMemPages)
+#define FILE_SIZE_ON_DISK(_r) (NUM_OF_PAGES_ON_DISK(_r) * (_r)->pageSize)
+
+static int32_t createDiskResidesBuf(SDiskbasedResultBuf* pResultBuf) {
+ pResultBuf->fd = open(pResultBuf->path, O_CREAT | O_RDWR, 0666);
+ if (!FD_VALID(pResultBuf->fd)) {
+ qError("failed to create tmp file: %s on disk. %s", pResultBuf->path, strerror(errno));
+ return TAOS_SYSTEM_ERROR(errno);
+ }
+
+ assert(pResultBuf->numOfPages == pResultBuf->inMemPages);
+ pResultBuf->numOfPages += pResultBuf->incStep;
+
+ int32_t ret = ftruncate(pResultBuf->fd, NUM_OF_PAGES_ON_DISK(pResultBuf) * pResultBuf->pageSize);
+ if (ret != TSDB_CODE_SUCCESS) {
+ qError("failed to create tmp file: %s on disk. %s", pResultBuf->path, strerror(errno));
+ return TAOS_SYSTEM_ERROR(errno);
+ }
+
+ pResultBuf->pBuf = mmap(NULL, FILE_SIZE_ON_DISK(pResultBuf), PROT_READ | PROT_WRITE, MAP_SHARED, pResultBuf->fd, 0);
+ if (pResultBuf->pBuf == MAP_FAILED) {
+ qError("QInfo:%p failed to map temp file: %s. %s", pResultBuf->handle, pResultBuf->path, strerror(errno));
+ return TAOS_SYSTEM_ERROR(errno);
+ }
+
+ pResultBuf->totalBufSize = pResultBuf->numOfPages * pResultBuf->pageSize;
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t extendDiskFileSize(SDiskbasedResultBuf* pResultBuf, int32_t incNumOfPages) {
+ assert(pResultBuf->numOfPages * pResultBuf->pageSize == pResultBuf->totalBufSize);
+ int32_t ret = TSDB_CODE_SUCCESS;
+
+ if (pResultBuf->pBuf == NULL) {
+ assert(pResultBuf->fd == FD_INITIALIZER);
+
+ if ((ret = createDiskResidesBuf(pResultBuf)) != TSDB_CODE_SUCCESS) {
+ return ret;
+ }
+ } else {
+ ret = munmap(pResultBuf->pBuf, FILE_SIZE_ON_DISK(pResultBuf));
+ pResultBuf->numOfPages += incNumOfPages;
+
+ /*
+ * disk-based output buffer is exhausted, try to extend the disk-based buffer, the available disk space may
+ * be insufficient
+ */
+ ret = ftruncate(pResultBuf->fd, NUM_OF_PAGES_ON_DISK(pResultBuf) * pResultBuf->pageSize);
+ if (ret != TSDB_CODE_SUCCESS) {
+ // dError("QInfo:%p failed to create intermediate result output file:%s. %s", pQInfo, pSupporter->extBufFile,
+ // strerror(errno));
+ return TSDB_CODE_QRY_NO_DISKSPACE;
+ }
+
+ pResultBuf->totalBufSize = pResultBuf->numOfPages * pResultBuf->pageSize;
+ pResultBuf->pBuf = mmap(NULL, FILE_SIZE_ON_DISK(pResultBuf), PROT_READ | PROT_WRITE, MAP_SHARED, pResultBuf->fd, 0);
+
+ if (pResultBuf->pBuf == MAP_FAILED) {
+ // dError("QInfo:%p failed to map temp file: %s. %s", pQInfo, pSupporter->extBufFile, strerror(errno));
+ return TSDB_CODE_QRY_OUT_OF_MEMORY;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+#define NO_AVAILABLE_PAGES(_b) ((_b)->allocateId == (_b)->numOfPages - 1)
+
+static FORCE_INLINE int32_t getGroupIndex(SDiskbasedResultBuf* pResultBuf, int32_t groupId) {
+ assert(pResultBuf != NULL);
+
+ char* p = taosHashGet(pResultBuf->idsTable, (const char*)&groupId, sizeof(int32_t));
+ if (p == NULL) { // it is a new group id
+ return -1;
+ }
+
+ int32_t slot = GET_INT32_VAL(p);
+ assert(slot >= 0 && slot < taosHashGetSize(pResultBuf->idsTable));
+
+ return slot;
+}
+
+static int32_t addNewGroupId(SDiskbasedResultBuf* pResultBuf, int32_t groupId) {
+ int32_t num = getNumOfResultBufGroupId(pResultBuf); // the num is the newest allocated group id slot
+ taosHashPut(pResultBuf->idsTable, (const char*)&groupId, sizeof(int32_t), &num, sizeof(int32_t));
+
+ SArray* pa = taosArrayInit(1, sizeof(int32_t));
+ taosArrayPush(pResultBuf->list, &pa);
+
+ assert(taosArrayGetSize(pResultBuf->list) == taosHashGetSize(pResultBuf->idsTable));
+ return num;
+}
+
+static void registerPageId(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t pageId) {
+ int32_t slot = getGroupIndex(pResultBuf, groupId);
+ if (slot < 0) {
+ slot = addNewGroupId(pResultBuf, groupId);
+ }
+
+ SIDList pList = taosArrayGetP(pResultBuf->list, slot);
+ taosArrayPush(pList, &pageId);
+}
+
+tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t* pageId) {
+ if (NO_AVAILABLE_PAGES(pResultBuf)) {
+ if (extendDiskFileSize(pResultBuf, pResultBuf->incStep) != TSDB_CODE_SUCCESS) {
+ return NULL;
+ }
+ }
+
+ // register new id in this group
+ *pageId = (++pResultBuf->allocateId);
+ registerPageId(pResultBuf, groupId, *pageId);
+
+ // clear memory for the new page
+ tFilePage* page = getResBufPage(pResultBuf, *pageId);
+ memset(page, 0, pResultBuf->pageSize);
+
+ return page;
+}
+
+int32_t getNumOfRowsPerPage(SDiskbasedResultBuf* pResultBuf) { return pResultBuf->numOfRowsPerPage; }
+
+SIDList getDataBufPagesIdList(SDiskbasedResultBuf* pResultBuf, int32_t groupId) {
+ int32_t slot = getGroupIndex(pResultBuf, groupId);
+ if (slot < 0) {
+ return pResultBuf->emptyDummyIdList;
+ } else {
+ return taosArrayGetP(pResultBuf->list, slot);
+ }
+}
+
+void destroyResultBuf(SDiskbasedResultBuf* pResultBuf, void* handle) {
+ if (pResultBuf == NULL) {
+ return;
+ }
+
+ if (FD_VALID(pResultBuf->fd)) {
+ qDebug("QInfo:%p disk-based output buffer closed, total:%" PRId64 " bytes, file created:%s, file size:%d", handle,
+ pResultBuf->totalBufSize, pResultBuf->path, FILE_SIZE_ON_DISK(pResultBuf));
+
+ close(pResultBuf->fd);
+ munmap(pResultBuf->pBuf, FILE_SIZE_ON_DISK(pResultBuf));
+ pResultBuf->pBuf = NULL;
+ } else {
+ qDebug("QInfo:%p disk-based output buffer closed, total:%" PRId64 " bytes, no file created", handle,
+ pResultBuf->totalBufSize);
+ }
+
+ unlink(pResultBuf->path);
+ tfree(pResultBuf->path);
+
+ size_t size = taosArrayGetSize(pResultBuf->list);
+ for (int32_t i = 0; i < size; ++i) {
+ SArray* pa = taosArrayGetP(pResultBuf->list, i);
+ taosArrayDestroy(pa);
+ }
+
+ taosArrayDestroy(pResultBuf->list);
+ taosArrayDestroy(pResultBuf->emptyDummyIdList);
+ taosHashCleanup(pResultBuf->idsTable);
+
+ tfree(pResultBuf->iBuf);
+ tfree(pResultBuf);
+}
+
+int32_t getLastPageId(SIDList pList) {
+ size_t size = taosArrayGetSize(pList);
+ return *(int32_t*) taosArrayGet(pList, size - 1);
+}
+
diff --git a/src/query/src/qsyntaxtreefunction.c b/src/query/src/qSyntaxtreefunction.c
similarity index 99%
rename from src/query/src/qsyntaxtreefunction.c
rename to src/query/src/qSyntaxtreefunction.c
index 5719bb0188..2104edfd91 100644
--- a/src/query/src/qsyntaxtreefunction.c
+++ b/src/query/src/qSyntaxtreefunction.c
@@ -15,7 +15,7 @@
#include "os.h"
-#include "qsyntaxtreefunction.h"
+#include "qSyntaxtreefunction.h"
#include "taosdef.h"
#include "tutil.h"
diff --git a/src/query/src/qtokenizer.c b/src/query/src/qTokenizer.c
similarity index 100%
rename from src/query/src/qtokenizer.c
rename to src/query/src/qTokenizer.c
diff --git a/src/query/src/qtsbuf.c b/src/query/src/qTsbuf.c
similarity index 99%
rename from src/query/src/qtsbuf.c
rename to src/query/src/qTsbuf.c
index b84fbded38..20b29107f5 100644
--- a/src/query/src/qtsbuf.c
+++ b/src/query/src/qTsbuf.c
@@ -1,7 +1,7 @@
-#include "qtsbuf.h"
+#include "qTsbuf.h"
+#include "taoserror.h"
#include "tscompression.h"
#include "tutil.h"
-#include "taoserror.h"
static int32_t getDataStartOffset();
static void TSBufUpdateVnodeInfo(STSBuf* pTSBuf, int32_t index, STSVnodeBlockInfo* pBlockInfo);
diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c
index 4e71de830f..be84471493 100644
--- a/src/query/src/qUtil.c
+++ b/src/query/src/qUtil.c
@@ -26,12 +26,10 @@ int32_t getOutputInterResultBufSize(SQuery* pQuery) {
int32_t size = 0;
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
- assert(pQuery->pSelectExpr[i].interBytes <= DEFAULT_INTERN_BUF_PAGE_SIZE);
size += pQuery->pSelectExpr[i].interBytes;
}
assert(size > 0);
-
return size;
}
@@ -243,7 +241,7 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow
size_t size = pRuntimeEnv->pQuery->pSelectExpr[i].bytes;
memset(s, 0, size);
- resetResultInfo(pResultInfo);
+ RESET_RESULT_INFO(pResultInfo);
}
pWindowRes->numOfRows = 0;
diff --git a/src/query/src/qresultBuf.c b/src/query/src/qresultBuf.c
deleted file mode 100644
index ae1a95179b..0000000000
--- a/src/query/src/qresultBuf.c
+++ /dev/null
@@ -1,184 +0,0 @@
-#include "qresultBuf.h"
-#include "hash.h"
-#include "qextbuffer.h"
-#include "taoserror.h"
-#include "queryLog.h"
-
-int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t size, int32_t rowSize, void* handle) {
- *pResultBuf = calloc(1, sizeof(SDiskbasedResultBuf));
- SDiskbasedResultBuf* pResBuf = *pResultBuf;
- if (pResBuf == NULL) {
- return TSDB_CODE_COM_OUT_OF_MEMORY;
- }
-
- pResBuf->numOfRowsPerPage = (DEFAULT_INTERN_BUF_PAGE_SIZE - sizeof(tFilePage)) / rowSize;
- pResBuf->numOfPages = size;
-
- pResBuf->totalBufSize = pResBuf->numOfPages * DEFAULT_INTERN_BUF_PAGE_SIZE;
- pResBuf->incStep = 4;
-
- // init id hash table
- pResBuf->idsTable = taosHashInit(size, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false);
- pResBuf->list = taosArrayInit(size, POINTER_BYTES);
-
- char path[4096] = {0};
- getTmpfilePath("tsdb_qbuf", path);
- pResBuf->path = strdup(path);
-
- pResBuf->fd = open(pResBuf->path, O_CREAT | O_RDWR, 0666);
- if (!FD_VALID(pResBuf->fd)) {
- qError("failed to create tmp file: %s on disk. %s", pResBuf->path, strerror(errno));
- return TAOS_SYSTEM_ERROR(errno);
- }
-
- int32_t ret = ftruncate(pResBuf->fd, pResBuf->numOfPages * DEFAULT_INTERN_BUF_PAGE_SIZE);
- if (ret != TSDB_CODE_SUCCESS) {
- qError("failed to create tmp file: %s on disk. %s", pResBuf->path, strerror(errno));
- return TAOS_SYSTEM_ERROR(errno);
- }
-
- pResBuf->pBuf = mmap(NULL, pResBuf->totalBufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pResBuf->fd, 0);
- if (pResBuf->pBuf == MAP_FAILED) {
- qError("QInfo:%p failed to map temp file: %s. %s", handle, pResBuf->path, strerror(errno));
- return TAOS_SYSTEM_ERROR(errno);
- }
-
- qDebug("QInfo:%p create tmp file for output result:%s, %" PRId64 "bytes", handle, pResBuf->path,
- pResBuf->totalBufSize);
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t getNumOfResultBufGroupId(SDiskbasedResultBuf* pResultBuf) { return taosHashGetSize(pResultBuf->idsTable); }
-
-int32_t getResBufSize(SDiskbasedResultBuf* pResultBuf) { return pResultBuf->totalBufSize; }
-
-static int32_t extendDiskFileSize(SDiskbasedResultBuf* pResultBuf, int32_t numOfPages) {
- assert(pResultBuf->numOfPages * DEFAULT_INTERN_BUF_PAGE_SIZE == pResultBuf->totalBufSize);
-
- int32_t ret = munmap(pResultBuf->pBuf, pResultBuf->totalBufSize);
- pResultBuf->numOfPages += numOfPages;
-
- /*
- * disk-based output buffer is exhausted, try to extend the disk-based buffer, the available disk space may
- * be insufficient
- */
- ret = ftruncate(pResultBuf->fd, pResultBuf->numOfPages * DEFAULT_INTERN_BUF_PAGE_SIZE);
- if (ret != 0) {
- // dError("QInfo:%p failed to create intermediate result output file:%s. %s", pQInfo, pSupporter->extBufFile,
- // strerror(errno));
- return TSDB_CODE_QRY_NO_DISKSPACE;
- }
-
- pResultBuf->totalBufSize = pResultBuf->numOfPages * DEFAULT_INTERN_BUF_PAGE_SIZE;
- pResultBuf->pBuf = mmap(NULL, pResultBuf->totalBufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pResultBuf->fd, 0);
-
- if (pResultBuf->pBuf == MAP_FAILED) {
- // dError("QInfo:%p failed to map temp file: %s. %s", pQInfo, pSupporter->extBufFile, strerror(errno));
- return TSDB_CODE_QRY_OUT_OF_MEMORY;
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-static FORCE_INLINE bool noMoreAvailablePages(SDiskbasedResultBuf* pResultBuf) {
- return (pResultBuf->allocateId == pResultBuf->numOfPages - 1);
-}
-
-static FORCE_INLINE int32_t getGroupIndex(SDiskbasedResultBuf* pResultBuf, int32_t groupId) {
- assert(pResultBuf != NULL);
-
- char* p = taosHashGet(pResultBuf->idsTable, (const char*)&groupId, sizeof(int32_t));
- if (p == NULL) { // it is a new group id
- return -1;
- }
-
- int32_t slot = GET_INT32_VAL(p);
- assert(slot >= 0 && slot < taosHashGetSize(pResultBuf->idsTable));
-
- return slot;
-}
-
-static int32_t addNewGroupId(SDiskbasedResultBuf* pResultBuf, int32_t groupId) {
- int32_t num = getNumOfResultBufGroupId(pResultBuf); // the num is the newest allocated group id slot
- taosHashPut(pResultBuf->idsTable, (const char*)&groupId, sizeof(int32_t), &num, sizeof(int32_t));
-
- SArray* pa = taosArrayInit(1, sizeof(int32_t));
- taosArrayPush(pResultBuf->list, &pa);
-
- assert(taosArrayGetSize(pResultBuf->list) == taosHashGetSize(pResultBuf->idsTable));
- return num;
-}
-
-static void registerPageId(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t pageId) {
- int32_t slot = getGroupIndex(pResultBuf, groupId);
- if (slot < 0) {
- slot = addNewGroupId(pResultBuf, groupId);
- }
-
- SIDList pList = taosArrayGetP(pResultBuf->list, slot);
- taosArrayPush(pList, &pageId);
-}
-
-tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t* pageId) {
- if (noMoreAvailablePages(pResultBuf)) {
- if (extendDiskFileSize(pResultBuf, pResultBuf->incStep) != TSDB_CODE_SUCCESS) {
- return NULL;
- }
- }
-
- // register new id in this group
- *pageId = (pResultBuf->allocateId++);
- registerPageId(pResultBuf, groupId, *pageId);
-
- tFilePage* page = GET_RES_BUF_PAGE_BY_ID(pResultBuf, *pageId);
-
- // clear memory for the new page
- memset(page, 0, DEFAULT_INTERN_BUF_PAGE_SIZE);
-
- return page;
-}
-
-int32_t getNumOfRowsPerPage(SDiskbasedResultBuf* pResultBuf) { return pResultBuf->numOfRowsPerPage; }
-
-SIDList getDataBufPagesIdList(SDiskbasedResultBuf* pResultBuf, int32_t groupId) {
- int32_t slot = getGroupIndex(pResultBuf, groupId);
- if (slot < 0) {
- return taosArrayInit(1, sizeof(int32_t));
- } else {
- return taosArrayGetP(pResultBuf->list, slot);
- }
-}
-
-void destroyResultBuf(SDiskbasedResultBuf* pResultBuf, void* handle) {
- if (pResultBuf == NULL) {
- return;
- }
-
- if (FD_VALID(pResultBuf->fd)) {
- close(pResultBuf->fd);
- }
-
- qDebug("QInfo:%p disk-based output buffer closed, %" PRId64 " bytes, file:%s", handle, pResultBuf->totalBufSize, pResultBuf->path);
- munmap(pResultBuf->pBuf, pResultBuf->totalBufSize);
- unlink(pResultBuf->path);
-
- tfree(pResultBuf->path);
-
- size_t size = taosArrayGetSize(pResultBuf->list);
- for (int32_t i = 0; i < size; ++i) {
- SArray* pa = taosArrayGetP(pResultBuf->list, i);
- taosArrayDestroy(pa);
- }
-
- taosArrayDestroy(pResultBuf->list);
- taosHashCleanup(pResultBuf->idsTable);
-
- tfree(pResultBuf);
-}
-
-int32_t getLastPageId(SIDList pList) {
- size_t size = taosArrayGetSize(pList);
- return *(int32_t*) taosArrayGet(pList, size - 1);
-}
-
diff --git a/src/query/tests/CMakeLists.txt b/src/query/tests/CMakeLists.txt
index 0ae8600756..1856223391 100644
--- a/src/query/tests/CMakeLists.txt
+++ b/src/query/tests/CMakeLists.txt
@@ -11,5 +11,5 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
ADD_EXECUTABLE(queryTest ${SOURCE_LIST})
- TARGET_LINK_LIBRARIES(queryTest taos query gtest pthread)
-ENDIF()
\ No newline at end of file
+ TARGET_LINK_LIBRARIES(queryTest taos query gtest pthread gcov)
+ENDIF()
diff --git a/src/query/tests/astTest.cpp b/src/query/tests/astTest.cpp
index df27087216..99f03a7ff8 100644
--- a/src/query/tests/astTest.cpp
+++ b/src/query/tests/astTest.cpp
@@ -3,8 +3,8 @@
#include
#include
+#include "qAst.h"
#include "taosmsg.h"
-#include "qast.h"
#include "tsdb.h"
#include "tskiplist.h"
diff --git a/src/query/tests/histogramTest.cpp b/src/query/tests/histogramTest.cpp
index c23f0f5924..4a5f7fbbbe 100644
--- a/src/query/tests/histogramTest.cpp
+++ b/src/query/tests/histogramTest.cpp
@@ -9,7 +9,7 @@
#include "tstoken.h"
#include "tutil.h"
-#include "qhistogram.h"
+#include "qHistogram.h"
/* test validate the names for table/database */
TEST(testCase, histogram_binary_search) {
diff --git a/src/query/tests/resultBufferTest.cpp b/src/query/tests/resultBufferTest.cpp
index 9270c6e458..63ed89ab9f 100644
--- a/src/query/tests/resultBufferTest.cpp
+++ b/src/query/tests/resultBufferTest.cpp
@@ -2,15 +2,15 @@
#include
#include
+#include "qResultbuf.h"
#include "taos.h"
-#include "qresultBuf.h"
#include "tsdb.h"
namespace {
// simple test
void simpleTest() {
SDiskbasedResultBuf* pResultBuf = NULL;
- int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1000, 64, NULL);
+ int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1000, 64, 1024, 4, NULL);
int32_t pageId = 0;
int32_t groupId = 0;
@@ -22,8 +22,7 @@ void simpleTest() {
ASSERT_EQ(getResBufSize(pResultBuf), 1000*16384L);
SIDList list = getDataBufPagesIdList(pResultBuf, groupId);
- ASSERT_EQ(list.size, 1);
-
+ ASSERT_EQ(taosArrayGetSize(list), 1);
ASSERT_EQ(getNumOfResultBufGroupId(pResultBuf), 1);
destroyResultBuf(pResultBuf, NULL);
diff --git a/src/query/tests/tsBufTest.cpp b/src/query/tests/tsBufTest.cpp
index f813261957..e9827518e1 100644
--- a/src/query/tests/tsBufTest.cpp
+++ b/src/query/tests/tsBufTest.cpp
@@ -5,10 +5,10 @@
#include "taos.h"
#include "tsdb.h"
+#include "qTsbuf.h"
#include "tstoken.h"
#include "ttime.h"
#include "tutil.h"
-#include "qtsbuf.h"
namespace {
/**
diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c
index 2325d12d92..4790c22668 100644
--- a/src/rpc/src/rpcMain.c
+++ b/src/rpc/src/rpcMain.c
@@ -446,7 +446,10 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
// set the idle timer to monitor the activity
taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
rpcSendMsgToPeer(pConn, msg, msgLen);
- pConn->secured = 1; // connection shall be secured
+
+ // if not set to secured, set it expcet NOT_READY case, since client wont treat it as secured
+ if (pConn->secured == 0 && pMsg->code != TSDB_CODE_RPC_NOT_READY)
+ pConn->secured = 1; // connection shall be secured
if (pConn->pReqMsg) rpcFreeCont(pConn->pReqMsg);
pConn->pReqMsg = NULL;
diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h
index 40f2dac660..762d2253e2 100644
--- a/src/tsdb/inc/tsdbMain.h
+++ b/src/tsdb/inc/tsdbMain.h
@@ -42,6 +42,7 @@ extern int tsdbDebugFlag;
#define TSDB_MAX_TABLE_SCHEMAS 16
#define TSDB_FILE_HEAD_SIZE 512
#define TSDB_FILE_DELIMITER 0xF00AFA0F
+#define TSDB_FILE_INIT_MAGIC 0xFFFFFFFF
// Definitions
// ------------------ tsdbMeta.c
@@ -132,21 +133,30 @@ typedef struct {
// ------------------ tsdbFile.c
extern const char* tsdbFileSuffix[];
typedef enum {
+#ifdef TSDB_IDX
+ TSDB_FILE_TYPE_IDX = 0,
+ TSDB_FILE_TYPE_HEAD,
+#else
TSDB_FILE_TYPE_HEAD = 0,
+#endif
TSDB_FILE_TYPE_DATA,
TSDB_FILE_TYPE_LAST,
TSDB_FILE_TYPE_MAX,
+#ifdef TSDB_IDX
+ TSDB_FILE_TYPE_NIDX,
+#endif
TSDB_FILE_TYPE_NHEAD,
TSDB_FILE_TYPE_NLAST
} TSDB_FILE_TYPE;
typedef struct {
- uint32_t offset;
+ uint32_t magic;
uint32_t len;
- uint64_t size; // total size of the file
- uint64_t tombSize; // unused file size
uint32_t totalBlocks;
uint32_t totalSubBlocks;
+ uint32_t offset;
+ uint64_t size; // total size of the file
+ uint64_t tombSize; // unused file size
} STsdbFileInfo;
typedef struct {
@@ -197,6 +207,7 @@ typedef struct {
// ------------------ tsdbRWHelper.c
typedef struct {
+ int32_t tid;
uint32_t len;
uint32_t offset;
uint32_t hasLast : 2;
@@ -220,7 +231,7 @@ typedef struct {
typedef struct {
int32_t delimiter; // For recovery usage
- int32_t checksum; // TODO: decide if checksum logic in this file or make it one API
+ int32_t tid;
uint64_t uid;
SCompBlock blocks[];
} SCompInfo;
@@ -249,24 +260,27 @@ typedef struct {
typedef enum { TSDB_WRITE_HELPER, TSDB_READ_HELPER } tsdb_rw_helper_t;
typedef struct {
- int fid;
- TSKEY minKey;
- TSKEY maxKey;
- // For read/write purpose
- SFile headF;
- SFile dataF;
- SFile lastF;
- // For write purpose only
- SFile nHeadF;
- SFile nLastF;
+ TSKEY minKey;
+ TSKEY maxKey;
+ SFileGroup fGroup;
+#ifdef TSDB_IDX
+ SFile nIdxF;
+#endif
+ SFile nHeadF;
+ SFile nLastF;
} SHelperFile;
typedef struct {
uint64_t uid;
int32_t tid;
- int32_t sversion;
} SHelperTable;
+typedef struct {
+ SCompIdx* pIdxArray;
+ int numOfIdx;
+ int curIdx;
+} SIdxH;
+
typedef struct {
tsdb_rw_helper_t type;
@@ -274,7 +288,9 @@ typedef struct {
int8_t state;
// For file set usage
SHelperFile files;
- SCompIdx* pCompIdx;
+ SIdxH idxH;
+ SCompIdx curCompIdx;
+ void* pWIdx;
// For table set usage
SHelperTable tableInfo;
SCompInfo* pCompInfo;
@@ -286,7 +302,6 @@ typedef struct {
void* compBuffer; // Buffer for temperary compress/decompress purpose
} SRWHelper;
-
// Operations
// ------------------ tsdbMeta.c
#define TABLE_TYPE(t) (t)->type
@@ -296,6 +311,7 @@ typedef struct {
#define TABLE_TID(t) (t)->tableId.tid
#define TABLE_SUID(t) (t)->suid
#define TABLE_LASTKEY(t) (t)->lastKey
+#define TSDB_META_FILE_MAGIC(m) KVSTORE_MAGIC((m)->pStore)
STsdbMeta* tsdbNewMeta(STsdbCfg* pCfg);
void tsdbFreeMeta(STsdbMeta* pMeta);
@@ -426,6 +442,7 @@ int tsdbUpdateFileHeader(SFile* pFile, uint32_t version);
int tsdbEncodeSFileInfo(void** buf, const STsdbFileInfo* pInfo);
void* tsdbDecodeSFileInfo(void* buf, STsdbFileInfo* pInfo);
void tsdbRemoveFileGroup(STsdbRepo* pRepo, SFileGroup* pFGroup);
+void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey);
// ------------------ tsdbRWHelper.c
#define TSDB_HELPER_CLEAR_STATE 0x0 // Clear state
@@ -444,6 +461,16 @@ void tsdbRemoveFileGroup(STsdbRepo* pRepo, SFileGroup* pFGroup);
#define helperRepo(h) (h)->pRepo
#define helperState(h) (h)->state
#define TSDB_NLAST_FILE_OPENED(h) ((h)->files.nLastF.fd > 0)
+#define helperFileId(h) ((h)->files.fGroup.fileId)
+#ifdef TSDB_IDX
+#define helperIdxF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_IDX]))
+#define helperNewIdxF(h) (&((h)->files.nIdxF))
+#endif
+#define helperHeadF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_HEAD]))
+#define helperDataF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_DATA]))
+#define helperLastF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_LAST]))
+#define helperNewHeadF(h) (&((h)->files.nHeadF))
+#define helperNewLastF(h) (&((h)->files.nLastF))
int tsdbInitReadHelper(SRWHelper* pHelper, STsdbRepo* pRepo);
int tsdbInitWriteHelper(SRWHelper* pHelper, STsdbRepo* pRepo);
diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c
index 95cc47292b..767fbc8252 100644
--- a/src/tsdb/src/tsdbFile.c
+++ b/src/tsdb/src/tsdbFile.c
@@ -30,7 +30,11 @@
#include "ttime.h"
#include "tfile.h"
+#ifdef TSDB_IDX
+const char *tsdbFileSuffix[] = {".idx", ".head", ".data", ".last", "", ".i", ".h", ".l"};
+#else
const char *tsdbFileSuffix[] = {".head", ".data", ".last", "", ".h", ".l"};
+#endif
static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type);
static void tsdbDestroyFile(SFile *pFile);
@@ -108,7 +112,7 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
memset((void *)(&fileGroup), 0, sizeof(SFileGroup));
fileGroup.fileId = fid;
- for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) {
+ for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) {
if (tsdbInitFile(&fileGroup.files[type], pRepo, fid, type) < 0) {
tsdbError("vgId:%d failed to init file fid %d type %d", REPO_ID(pRepo), fid, type);
goto _err;
@@ -126,7 +130,7 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
return 0;
_err:
- for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) tsdbDestroyFile(&fileGroup.files[type]);
+ for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) tsdbDestroyFile(&fileGroup.files[type]);
tfree(tDataDir);
if (dir != NULL) closedir(dir);
@@ -139,7 +143,7 @@ void tsdbCloseFileH(STsdbRepo *pRepo) {
for (int i = 0; i < pFileH->nFGroups; i++) {
SFileGroup *pFGroup = pFileH->pFGroup + i;
- for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) {
+ for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) {
tsdbDestroyFile(&pFGroup->files[type]);
}
}
@@ -156,7 +160,7 @@ SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid, int
SFileGroup *pGroup = tsdbSearchFGroup(pFileH, fid, TD_EQ);
if (pGroup == NULL) { // if not exists, create one
pFGroup->fileId = fid;
- for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) {
+ for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) {
if (tsdbCreateFile(&pFGroup->files[type], pRepo, fid, type) < 0)
goto _err;
}
@@ -169,7 +173,7 @@ SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid, int
return pGroup;
_err:
- for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) tsdbDestroyFile(&pGroup->files[type]);
+ for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) tsdbDestroyFile(&pGroup->files[type]);
return NULL;
}
@@ -260,6 +264,7 @@ int tsdbCreateFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type) {
}
pFile->info.size = TSDB_FILE_HEAD_SIZE;
+ pFile->info.magic = TSDB_FILE_INIT_MAGIC;
if (tsdbUpdateFileHeader(pFile, 0) < 0) {
tsdbCloseFile(pFile);
@@ -323,23 +328,25 @@ int tsdbUpdateFileHeader(SFile *pFile, uint32_t version) {
int tsdbEncodeSFileInfo(void **buf, const STsdbFileInfo *pInfo) {
int tlen = 0;
- tlen += taosEncodeFixedU32(buf, pInfo->offset);
+ tlen += taosEncodeFixedU32(buf, pInfo->magic);
tlen += taosEncodeFixedU32(buf, pInfo->len);
- tlen += taosEncodeFixedU64(buf, pInfo->size);
- tlen += taosEncodeFixedU64(buf, pInfo->tombSize);
tlen += taosEncodeFixedU32(buf, pInfo->totalBlocks);
tlen += taosEncodeFixedU32(buf, pInfo->totalSubBlocks);
+ tlen += taosEncodeFixedU32(buf, pInfo->offset);
+ tlen += taosEncodeFixedU64(buf, pInfo->size);
+ tlen += taosEncodeFixedU64(buf, pInfo->tombSize);
return tlen;
}
void *tsdbDecodeSFileInfo(void *buf, STsdbFileInfo *pInfo) {
- buf = taosDecodeFixedU32(buf, &(pInfo->offset));
+ buf = taosDecodeFixedU32(buf, &(pInfo->magic));
buf = taosDecodeFixedU32(buf, &(pInfo->len));
- buf = taosDecodeFixedU64(buf, &(pInfo->size));
- buf = taosDecodeFixedU64(buf, &(pInfo->tombSize));
buf = taosDecodeFixedU32(buf, &(pInfo->totalBlocks));
buf = taosDecodeFixedU32(buf, &(pInfo->totalSubBlocks));
+ buf = taosDecodeFixedU32(buf, &(pInfo->offset));
+ buf = taosDecodeFixedU64(buf, &(pInfo->size));
+ buf = taosDecodeFixedU64(buf, &(pInfo->tombSize));
return buf;
}
@@ -358,7 +365,7 @@ void tsdbRemoveFileGroup(STsdbRepo *pRepo, SFileGroup *pFGroup) {
pFileH->nFGroups--;
ASSERT(pFileH->nFGroups >= 0);
- for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) {
+ for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) {
if (remove(fileGroup.files[type].fname) < 0) {
tsdbError("vgId:%d failed to remove file %s", REPO_ID(pRepo), fileGroup.files[type].fname);
}
diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c
index e30164592d..772bcf48d6 100644
--- a/src/tsdb/src/tsdbMain.c
+++ b/src/tsdb/src/tsdbMain.c
@@ -212,59 +212,61 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_
char *sdup = strdup(pRepo->rootDir);
char *prefix = dirname(sdup);
+ int prefixLen = strlen(prefix);
+ tfree(sdup);
if (name[0] == 0) { // get the file from index or after, but not larger than eindex
- int fid = (*index) / 3;
+ int fid = (*index) / TSDB_FILE_TYPE_MAX;
if (pFileH->nFGroups == 0 || fid > pFileH->pFGroup[pFileH->nFGroups - 1].fileId) {
if (*index <= TSDB_META_FILE_INDEX && TSDB_META_FILE_INDEX <= eindex) {
fname = tsdbGetMetaFileName(pRepo->rootDir);
*index = TSDB_META_FILE_INDEX;
+ magic = TSDB_META_FILE_MAGIC(pRepo->tsdbMeta);
} else {
- tfree(sdup);
return 0;
}
} else {
SFileGroup *pFGroup =
taosbsearch(&fid, pFileH->pFGroup, pFileH->nFGroups, sizeof(SFileGroup), keyFGroupCompFunc, TD_GE);
if (pFGroup->fileId == fid) {
- fname = strdup(pFGroup->files[(*index) % 3].fname);
+ fname = strdup(pFGroup->files[(*index) % TSDB_FILE_TYPE_MAX].fname);
+ magic = pFGroup->files[(*index) % TSDB_FILE_TYPE_MAX].info.magic;
} else {
- if (pFGroup->fileId * 3 + 2 < eindex) {
+ if ((pFGroup->fileId + 1) * TSDB_FILE_TYPE_MAX - 1 < eindex) {
fname = strdup(pFGroup->files[0].fname);
- *index = pFGroup->fileId * 3;
+ *index = pFGroup->fileId * TSDB_FILE_TYPE_MAX;
+ magic = pFGroup->files[0].info.magic;
} else {
- tfree(sdup);
return 0;
}
}
}
- strcpy(name, fname + strlen(prefix));
+ strcpy(name, fname + prefixLen);
} else { // get the named file at the specified index. If not there, return 0
if (*index == TSDB_META_FILE_INDEX) { // get meta file
fname = tsdbGetMetaFileName(pRepo->rootDir);
+ magic = TSDB_META_FILE_MAGIC(pRepo->tsdbMeta);
} else {
- int fid = (*index) / 3;
+ int fid = (*index) / TSDB_FILE_TYPE_MAX;
SFileGroup *pFGroup = tsdbSearchFGroup(pFileH, fid, TD_EQ);
if (pFGroup == NULL) { // not found
- tfree(sdup);
return 0;
}
- SFile *pFile = &pFGroup->files[(*index) % 3];
+ SFile *pFile = &pFGroup->files[(*index) % TSDB_FILE_TYPE_MAX];
fname = strdup(pFile->fname);
+ magic = pFile->info.magic;
}
}
if (stat(fname, &fState) < 0) {
- tfree(sdup);
tfree(fname);
return 0;
}
- tfree(sdup);
*size = fState.st_size;
- magic = *size;
+ // magic = *size;
tfree(fname);
return magic;
@@ -793,7 +795,8 @@ static int tsdbRestoreInfo(STsdbRepo *pRepo) {
for (int i = 1; i < pRepo->config.maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable == NULL) continue;
- SCompIdx *pIdx = &rhelper.pCompIdx[i];
+ tsdbSetHelperTable(&rhelper, pTable, pRepo);
+ SCompIdx *pIdx = &(rhelper.curCompIdx);
if (pIdx->offset > 0 && pTable->lastKey < pIdx->maxKey) pTable->lastKey = pIdx->maxKey;
}
diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c
index b29cec3cf9..7708646a61 100644
--- a/src/tsdb/src/tsdbMemTable.c
+++ b/src/tsdb/src/tsdbMemTable.c
@@ -31,7 +31,6 @@ static int tsdbCommitMeta(STsdbRepo *pRepo);
static void tsdbEndCommit(STsdbRepo *pRepo);
static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey);
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols);
-static void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey);
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo);
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables);
@@ -265,13 +264,11 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey
}
do {
- if (numOfRows >= maxRowsToRead) break;
-
SDataRow row = tsdbNextIterRow(pIter);
if (row == NULL) break;
keyNext = dataRowKey(row);
- if (keyNext < 0 || keyNext > maxKey) break;
+ if (keyNext > maxKey) break;
bool keyFiltered = false;
if (nFilterKeys != 0) {
@@ -290,6 +287,7 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey
}
if (!keyFiltered) {
+ if (numOfRows >= maxRowsToRead) break;
if (pCols) {
if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) {
pSchema = tsdbGetTableSchemaImpl(pTable, false, false, dataRowVersion(row));
@@ -544,7 +542,7 @@ static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSK
return 0;
}
-static void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey) {
+void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey) {
*minKey = fileId * daysPerFile * tsMsPerDay[precision];
*maxKey = *minKey + daysPerFile * tsMsPerDay[precision] - 1;
}
@@ -628,9 +626,12 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
tsdbCloseHelperFile(pHelper, 0);
pthread_rwlock_wrlock(&(pFileH->fhlock));
- pGroup->files[TSDB_FILE_TYPE_HEAD] = pHelper->files.headF;
- pGroup->files[TSDB_FILE_TYPE_DATA] = pHelper->files.dataF;
- pGroup->files[TSDB_FILE_TYPE_LAST] = pHelper->files.lastF;
+#ifdef TSDB_IDX
+ pGroup->files[TSDB_FILE_TYPE_IDX] = *(helperIdxF(pHelper));
+#endif
+ pGroup->files[TSDB_FILE_TYPE_HEAD] = *(helperHeadF(pHelper));
+ pGroup->files[TSDB_FILE_TYPE_DATA] = *(helperDataF(pHelper));
+ pGroup->files[TSDB_FILE_TYPE_LAST] = *(helperLastF(pHelper));
pthread_rwlock_unlock(&(pFileH->fhlock));
return 0;
diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c
index c1923f5235..b25e734694 100644
--- a/src/tsdb/src/tsdbMeta.c
+++ b/src/tsdb/src/tsdbMeta.c
@@ -123,7 +123,10 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
int tlen2 = tsdbGetTableEncodeSize(TSDB_UPDATE_META, table);
int tlen = tlen1 + tlen2;
void *buf = tsdbAllocBytes(pRepo, tlen);
- ASSERT(buf != NULL);
+ if (buf == NULL) {
+ goto _err;
+ }
+
if (newSuper) {
void *pBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, super);
ASSERT(POINTER_DISTANCE(pBuf, buf) == tlen1);
diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c
index 0d52b7ae33..3063f22a28 100644
--- a/src/tsdb/src/tsdbRWHelper.c
+++ b/src/tsdb/src/tsdbRWHelper.c
@@ -99,6 +99,7 @@ void tsdbResetHelper(SRWHelper *pHelper) {
int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) {
ASSERT(pHelper != NULL && pGroup != NULL);
+ SFile *pFile = NULL;
// Clear the helper object
tsdbResetHelper(pHelper);
@@ -106,44 +107,51 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) {
ASSERT(pHelper->state == TSDB_HELPER_CLEAR_STATE);
// Set the files
- pHelper->files.fid = pGroup->fileId;
- pHelper->files.headF = pGroup->files[TSDB_FILE_TYPE_HEAD];
- pHelper->files.dataF = pGroup->files[TSDB_FILE_TYPE_DATA];
- pHelper->files.lastF = pGroup->files[TSDB_FILE_TYPE_LAST];
+ pHelper->files.fGroup = *pGroup;
if (helperType(pHelper) == TSDB_WRITE_HELPER) {
- tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NHEAD, pHelper->files.nHeadF.fname);
- tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NLAST, pHelper->files.nLastF.fname);
+#ifdef TSDB_IDX
+ tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NIDX, helperNewIdxF(pHelper)->fname);
+#endif
+ tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NHEAD, helperNewHeadF(pHelper)->fname);
+ tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NLAST, helperNewLastF(pHelper)->fname);
}
// Open the files
- if (tsdbOpenFile(&(pHelper->files.headF), O_RDONLY) < 0) goto _err;
+#ifdef TSDB_IDX
+ if (tsdbOpenFile(helperIdxF(pHelper), O_RDONLY) < 0) goto _err;
+#endif
+ if (tsdbOpenFile(helperHeadF(pHelper), O_RDONLY) < 0) goto _err;
if (helperType(pHelper) == TSDB_WRITE_HELPER) {
- if (tsdbOpenFile(&(pHelper->files.dataF), O_RDWR) < 0) goto _err;
- if (tsdbOpenFile(&(pHelper->files.lastF), O_RDWR) < 0) goto _err;
+ if (tsdbOpenFile(helperDataF(pHelper), O_RDWR) < 0) goto _err;
+ if (tsdbOpenFile(helperLastF(pHelper), O_RDWR) < 0) goto _err;
+
+#ifdef TSDB_IDX
+ // Create and open .i file
+ pFile = helperNewIdxF(pHelper);
+ if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) return -1;
+ pFile->info.size = TSDB_FILE_HEAD_SIZE;
+ pFile->info.magic = TSDB_FILE_INIT_MAGIC;
+ if (tsdbUpdateFileHeader(pFile, 0) < 0) return -1;
+#endif
// Create and open .h
- if (tsdbOpenFile(&(pHelper->files.nHeadF), O_WRONLY | O_CREAT) < 0) return -1;
- // size_t tsize = TSDB_FILE_HEAD_SIZE + sizeof(SCompIdx) * pCfg->maxTables + sizeof(TSCKSUM);
- if (tsendfile(pHelper->files.nHeadF.fd, pHelper->files.headF.fd, NULL, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) {
- tsdbError("vgId:%d failed to sendfile %d bytes from file %s to %s since %s", REPO_ID(pHelper->pRepo),
- TSDB_FILE_HEAD_SIZE, pHelper->files.headF.fname, pHelper->files.nHeadF.fname, strerror(errno));
- terrno = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ pFile = helperNewHeadF(pHelper);
+ if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) return -1;
+ pFile->info.size = TSDB_FILE_HEAD_SIZE;
+ pFile->info.magic = TSDB_FILE_INIT_MAGIC;
+ if (tsdbUpdateFileHeader(pFile, 0) < 0) return -1;
// Create and open .l file if should
if (tsdbShouldCreateNewLast(pHelper)) {
- if (tsdbOpenFile(&(pHelper->files.nLastF), O_WRONLY | O_CREAT) < 0) goto _err;
- if (tsendfile(pHelper->files.nLastF.fd, pHelper->files.lastF.fd, NULL, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) {
- tsdbError("vgId:%d failed to sendfile %d bytes from file %s to %s since %s", REPO_ID(pHelper->pRepo),
- TSDB_FILE_HEAD_SIZE, pHelper->files.lastF.fname, pHelper->files.nLastF.fname, strerror(errno));
- terrno = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ pFile = helperNewLastF(pHelper);
+ if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) goto _err;
+ pFile->info.size = TSDB_FILE_HEAD_SIZE;
+ pFile->info.magic = TSDB_FILE_INIT_MAGIC;
+ if (tsdbUpdateFileHeader(pFile, 0) < 0) return -1;
}
} else {
- if (tsdbOpenFile(&(pHelper->files.dataF), O_RDONLY) < 0) goto _err;
- if (tsdbOpenFile(&(pHelper->files.lastF), O_RDONLY) < 0) goto _err;
+ if (tsdbOpenFile(helperDataF(pHelper), O_RDONLY) < 0) goto _err;
+ if (tsdbOpenFile(helperLastF(pHelper), O_RDONLY) < 0) goto _err;
}
helperSetState(pHelper, TSDB_HELPER_FILE_SET_AND_OPEN);
@@ -155,59 +163,98 @@ _err:
}
int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) {
- if (pHelper->files.headF.fd > 0) {
- close(pHelper->files.headF.fd);
- pHelper->files.headF.fd = -1;
+ SFile *pFile = NULL;
+
+#ifdef TSDB_IDX
+ pFile = helperIdxF(pHelper);
+ if (pFile->fd > 0) {
+ close(pFile->fd);
+ pFile->fd = -1;
}
- if (pHelper->files.dataF.fd > 0) {
+#endif
+
+ pFile = helperHeadF(pHelper);
+ if (pFile->fd > 0) {
+ close(pFile->fd);
+ pFile->fd = -1;
+ }
+
+ pFile = helperDataF(pHelper);
+ if (pFile->fd > 0) {
if (helperType(pHelper) == TSDB_WRITE_HELPER) {
- tsdbUpdateFileHeader(&(pHelper->files.dataF), 0);
- fsync(pHelper->files.dataF.fd);
+ tsdbUpdateFileHeader(pFile, 0);
+ fsync(pFile->fd);
}
- close(pHelper->files.dataF.fd);
- pHelper->files.dataF.fd = -1;
+ close(pFile->fd);
+ pFile->fd = -1;
}
- if (pHelper->files.lastF.fd > 0) {
- if (helperType(pHelper) == TSDB_WRITE_HELPER) {
- fsync(pHelper->files.lastF.fd);
+
+ pFile = helperLastF(pHelper);
+ if (pFile->fd > 0) {
+ if (helperType(pHelper) == TSDB_WRITE_HELPER && !TSDB_NLAST_FILE_OPENED(pHelper)) {
+ fsync(pFile->fd);
}
- close(pHelper->files.lastF.fd);
- pHelper->files.lastF.fd = -1;
+ close(pFile->fd);
+ pFile->fd = -1;
}
+
if (helperType(pHelper) == TSDB_WRITE_HELPER) {
- if (pHelper->files.nHeadF.fd > 0) {
- if (!hasError) tsdbUpdateFileHeader(&(pHelper->files.nHeadF), 0);
- fsync(pHelper->files.nHeadF.fd);
- close(pHelper->files.nHeadF.fd);
- pHelper->files.nHeadF.fd = -1;
+#ifdef TSDB_IDX
+ pFile = helperNewIdxF(pHelper);
+ if (pFile->fd > 0) {
+ if (!hasError) tsdbUpdateFileHeader(pFile, 0);
+ fsync(pFile->fd);
+ close(pFile->fd);
+ pFile->fd = -1;
if (hasError) {
- (void)remove(pHelper->files.nHeadF.fname);
+ (void)remove(pFile->fname);
} else {
- if (rename(pHelper->files.nHeadF.fname, pHelper->files.headF.fname) < 0) {
- tsdbError("failed to rename file from %s to %s since %s", pHelper->files.nHeadF.fname,
- pHelper->files.headF.fname, strerror(errno));
+ if (rename(pFile->fname, helperIdxF(pHelper)->fname) < 0) {
+ tsdbError("failed to rename file from %s to %s since %s", pFile->fname, helperIdxF(pHelper)->fname,
+ strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
- pHelper->files.headF.info = pHelper->files.nHeadF.info;
+ helperIdxF(pHelper)->info = pFile->info;
+ }
+ }
+#endif
+
+ pFile = helperNewHeadF(pHelper);
+ if (pFile->fd > 0) {
+ if (!hasError) tsdbUpdateFileHeader(pFile, 0);
+ fsync(pFile->fd);
+ close(pFile->fd);
+ pFile->fd = -1;
+ if (hasError) {
+ (void)remove(pFile->fname);
+ } else {
+ if (rename(pFile->fname, helperHeadF(pHelper)->fname) < 0) {
+ tsdbError("failed to rename file from %s to %s since %s", pFile->fname, helperHeadF(pHelper)->fname,
+ strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+ helperHeadF(pHelper)->info = pFile->info;
}
}
- if (pHelper->files.nLastF.fd > 0) {
- if (!hasError) tsdbUpdateFileHeader(&(pHelper->files.nLastF), 0);
- fsync(pHelper->files.nLastF.fd);
- close(pHelper->files.nLastF.fd);
- pHelper->files.nLastF.fd = -1;
+ pFile = helperNewLastF(pHelper);
+ if (pFile->fd > 0) {
+ if (!hasError) tsdbUpdateFileHeader(pFile, 0);
+ fsync(pFile->fd);
+ close(pFile->fd);
+ pFile->fd = -1;
if (hasError) {
- (void)remove(pHelper->files.nLastF.fname);
+ (void)remove(pFile->fname);
} else {
- if (rename(pHelper->files.nLastF.fname, pHelper->files.lastF.fname) < 0) {
- tsdbError("failed to rename file from %s to %s since %s", pHelper->files.nLastF.fname,
- pHelper->files.lastF.fname, strerror(errno));
+ if (rename(pFile->fname, helperLastF(pHelper)->fname) < 0) {
+ tsdbError("failed to rename file from %s to %s since %s", pFile->fname, helperLastF(pHelper)->fname,
+ strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
- pHelper->files.lastF.info = pHelper->files.nLastF.info;
+ helperLastF(pHelper)->info = helperNewLastF(pHelper)->info;
}
}
}
@@ -224,18 +271,35 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) {
pHelper->tableInfo.tid = pTable->tableId.tid;
pHelper->tableInfo.uid = pTable->tableId.uid;
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
- pHelper->tableInfo.sversion = schemaVersion(pSchema);
tdInitDataCols(pHelper->pDataCols[0], pSchema);
tdInitDataCols(pHelper->pDataCols[1], pSchema);
- SCompIdx *pIdx = pHelper->pCompIdx + pTable->tableId.tid;
- if (pIdx->offset > 0) {
- if (pIdx->uid != TABLE_UID(pTable)) {
- memset((void *)pIdx, 0, sizeof(SCompIdx));
- } else {
- if (pIdx->hasLast) pHelper->hasOldLastBlock = true;
+ if (pHelper->idxH.numOfIdx > 0) {
+ while (true) {
+ if (pHelper->idxH.curIdx >= pHelper->idxH.numOfIdx) {
+ memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx));
+ break;
+ }
+
+ SCompIdx *pIdx = &(pHelper->idxH.pIdxArray[pHelper->idxH.curIdx]);
+ if (pIdx->tid == TABLE_TID(pTable)) {
+ if (pIdx->uid == TABLE_UID(pTable)) {
+ pHelper->curCompIdx = *pIdx;
+ } else {
+ memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx));
+ }
+ pHelper->idxH.curIdx++;
+ break;
+ } else if (pIdx->tid > TABLE_TID(pTable)) {
+ memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx));
+ break;
+ } else {
+ pHelper->idxH.curIdx++;
+ }
}
+ } else {
+ memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx));
}
helperSetState(pHelper, TSDB_HELPER_TABLE_SET);
@@ -245,8 +309,8 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) {
int tsdbCommitTableData(SRWHelper *pHelper, SCommitIter *pCommitIter, SDataCols *pDataCols, TSKEY maxKey) {
ASSERT(helperType(pHelper) == TSDB_WRITE_HELPER);
- SCompIdx * pIdx = &(pHelper->pCompIdx[TABLE_TID(pCommitIter->pTable)]);
- int blkIdx = 0;
+ SCompIdx *pIdx = &(pHelper->curCompIdx);
+ int blkIdx = 0;
ASSERT(pIdx->offset == 0 || pIdx->uid == TABLE_UID(pCommitIter->pTable));
if (tsdbLoadCompInfo(pHelper, NULL) < 0) return -1;
@@ -271,44 +335,53 @@ int tsdbMoveLastBlockIfNeccessary(SRWHelper *pHelper) {
STsdbCfg *pCfg = &pHelper->pRepo->config;
ASSERT(helperType(pHelper) == TSDB_WRITE_HELPER);
- SCompIdx * pIdx = pHelper->pCompIdx + pHelper->tableInfo.tid;
+ SCompIdx * pIdx = &(pHelper->curCompIdx);
SCompBlock compBlock = {0};
if (TSDB_NLAST_FILE_OPENED(pHelper) && (pHelper->hasOldLastBlock)) {
if (tsdbLoadCompInfo(pHelper, NULL) < 0) return -1;
SCompBlock *pCompBlock = blockAtIdx(pHelper, pIdx->numOfBlocks - 1);
ASSERT(pCompBlock->last);
+ if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1;
+ ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows &&
+ pHelper->pDataCols[0]->numOfRows < pCfg->minRowsPerFileBlock);
+ if (tsdbWriteBlockToFile(pHelper, helperNewLastF(pHelper), pHelper->pDataCols[0], &compBlock, true, true) < 0)
+ return -1;
+ if (tsdbUpdateSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1) < 0) return -1;
+
+#if 0
if (pCompBlock->numOfSubBlocks > 1) {
if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1;
ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows &&
pHelper->pDataCols[0]->numOfRows < pCfg->minRowsPerFileBlock);
- if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.nLastF), pHelper->pDataCols[0], &compBlock, true, true) < 0)
+ if (tsdbWriteBlockToFile(pHelper, helperNewLastF(pHelper), pHelper->pDataCols[0], &compBlock, true, true) < 0)
return -1;
if (tsdbUpdateSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1) < 0) return -1;
} else {
- if (lseek(pHelper->files.lastF.fd, pCompBlock->offset, SEEK_SET) < 0) {
- tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pHelper->files.lastF.fname,
+ if (lseek(helperLastF(pHelper)->fd, pCompBlock->offset, SEEK_SET) < 0) {
+ tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), helperLastF(pHelper)->fname,
strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
- pCompBlock->offset = lseek(pHelper->files.nLastF.fd, 0, SEEK_END);
+ pCompBlock->offset = lseek(helperNewLastF(pHelper)->fd, 0, SEEK_END);
if (pCompBlock->offset < 0) {
- tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pHelper->files.nLastF.fname,
+ tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), helperNewLastF(pHelper)->fname,
strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
- if (tsendfile(pHelper->files.nLastF.fd, pHelper->files.lastF.fd, NULL, pCompBlock->len) < pCompBlock->len) {
+ if (tsendfile(helperNewLastF(pHelper)->fd, helperLastF(pHelper)->fd, NULL, pCompBlock->len) < pCompBlock->len) {
tsdbError("vgId:%d failed to sendfile from file %s to file %s since %s", REPO_ID(pHelper->pRepo),
- pHelper->files.lastF.fname, pHelper->files.nLastF.fname, strerror(errno));
+ helperLastF(pHelper)->fname, helperNewLastF(pHelper)->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
}
+#endif
pHelper->hasOldLastBlock = false;
}
@@ -317,164 +390,178 @@ int tsdbMoveLastBlockIfNeccessary(SRWHelper *pHelper) {
}
int tsdbWriteCompInfo(SRWHelper *pHelper) {
- off_t offset = 0;
- SCompIdx *pIdx = pHelper->pCompIdx + pHelper->tableInfo.tid;
- if (!helperHasState(pHelper, TSDB_HELPER_INFO_LOAD)) {
- if (pIdx->offset > 0) {
- offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END);
- if (offset < 0) {
- tsdbError("vgId:%d failed to lseed file %s since %s", REPO_ID(pHelper->pRepo), pHelper->files.nHeadF.fname,
- strerror(errno));
- terrno = TAOS_SYSTEM_ERROR(errno);
- return -1;
- }
+ SCompIdx *pIdx = &(pHelper->curCompIdx);
+ off_t offset = 0;
+ SFile * pFile = helperNewHeadF(pHelper);
- pIdx->offset = offset;
- ASSERT(pIdx->offset >= TSDB_FILE_HEAD_SIZE);
-
- if (tsendfile(pHelper->files.nHeadF.fd, pHelper->files.headF.fd, NULL, pIdx->len) < pIdx->len) {
- tsdbError("vgId:%d failed to send %d bytes from file %s to %s since %s", REPO_ID(pHelper->pRepo), pIdx->len,
- pHelper->files.headF.fname, pHelper->files.nHeadF.fname, strerror(errno));
- terrno = TAOS_SYSTEM_ERROR(errno);
- return -1;
- }
- }
- } else {
- if (pIdx->len > 0) {
+ if (pIdx->len > 0) {
+ if (!helperHasState(pHelper, TSDB_HELPER_INFO_LOAD)) {
+ if (tsdbLoadCompInfo(pHelper, NULL) < 0) return -1;
+ } else {
pHelper->pCompInfo->delimiter = TSDB_FILE_DELIMITER;
pHelper->pCompInfo->uid = pHelper->tableInfo.uid;
- pHelper->pCompInfo->checksum = 0;
+ pHelper->pCompInfo->tid = pHelper->tableInfo.tid;
ASSERT(pIdx->len > sizeof(SCompInfo) + sizeof(TSCKSUM) &&
(pIdx->len - sizeof(SCompInfo) - sizeof(TSCKSUM)) % sizeof(SCompBlock) == 0);
taosCalcChecksumAppend(0, (uint8_t *)pHelper->pCompInfo, pIdx->len);
- offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END);
- if (offset < 0) {
- tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pHelper->files.nHeadF.fname,
- strerror(errno));
- terrno = TAOS_SYSTEM_ERROR(errno);
- return -1;
- }
- pIdx->offset = offset;
- pIdx->uid = pHelper->tableInfo.uid;
- ASSERT(pIdx->offset >= TSDB_FILE_HEAD_SIZE);
+ }
- if (twrite(pHelper->files.nHeadF.fd, (void *)(pHelper->pCompInfo), pIdx->len) < pIdx->len) {
- tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(pHelper->pRepo), pIdx->len,
- pHelper->files.nHeadF.fname, strerror(errno));
- terrno = TAOS_SYSTEM_ERROR(errno);
+ pFile->info.magic = taosCalcChecksum(
+ pFile->info.magic, (uint8_t *)POINTER_SHIFT(pHelper->pCompInfo, pIdx->len - sizeof(TSCKSUM)), sizeof(TSCKSUM));
+ offset = lseek(pFile->fd, 0, SEEK_END);
+ if (offset < 0) {
+ tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+ pIdx->offset = offset;
+ pIdx->uid = pHelper->tableInfo.uid;
+ pIdx->tid = pHelper->tableInfo.tid;
+ ASSERT(pIdx->offset >= TSDB_FILE_HEAD_SIZE);
+
+ if (twrite(pFile->fd, (void *)(pHelper->pCompInfo), pIdx->len) < pIdx->len) {
+ tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(pHelper->pRepo), pIdx->len,
+ pFile->fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+
+#ifdef TSDB_IDX
+ pFile = helperNewIdxF(pHelper);
+#endif
+
+ if (tsizeof(pHelper->pWIdx) < pFile->info.len + sizeof(SCompIdx) + 12) {
+ pHelper->pWIdx = trealloc(pHelper->pWIdx, tsizeof(pHelper->pWIdx) == 0 ? 1024 : tsizeof(pHelper->pWIdx) * 2);
+ if (pHelper->pWIdx == NULL) {
+ terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
}
+
+ void *pBuf = POINTER_SHIFT(pHelper->pWIdx, pFile->info.len);
+ pFile->info.len += tsdbEncodeSCompIdx(&pBuf, &(pHelper->curCompIdx));
}
return 0;
}
int tsdbWriteCompIdx(SRWHelper *pHelper) {
- STsdbCfg *pCfg = &pHelper->pRepo->config;
-
ASSERT(helperType(pHelper) == TSDB_WRITE_HELPER);
- off_t offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END);
- if (offset < 0) {
- tsdbError("vgId:%d failed to lseek file %s to end since %s", REPO_ID(pHelper->pRepo), pHelper->files.nHeadF.fname,
- strerror(errno));
- terrno = TAOS_SYSTEM_ERROR(errno);
- return -1;
- }
+ off_t offset = 0;
- SFile *pFile = &(pHelper->files.nHeadF);
- pFile->info.offset = offset;
+#ifdef TSDB_IDX
+ SFile *pFile = helperNewIdxF(pHelper);
+#else
+ SFile *pFile = helperNewHeadF(pHelper);
+#endif
- void *buf = pHelper->pBuffer;
- for (uint32_t i = 0; i < pCfg->maxTables; i++) {
- SCompIdx *pCompIdx = pHelper->pCompIdx + i;
- if (pCompIdx->offset > 0) {
- int drift = POINTER_DISTANCE(buf, pHelper->pBuffer);
- if (tsizeof(pHelper->pBuffer) - drift < 128) {
- pHelper->pBuffer = trealloc(pHelper->pBuffer, tsizeof(pHelper->pBuffer) * 2);
- if (pHelper->pBuffer == NULL) {
- terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
- return -1;
- }
- }
- buf = POINTER_SHIFT(pHelper->pBuffer, drift);
- taosEncodeVariantU32(&buf, i);
- tsdbEncodeSCompIdx(&buf, pCompIdx);
+ pFile->info.len += sizeof(TSCKSUM);
+ if (tsizeof(pHelper->pWIdx) < pFile->info.len) {
+ pHelper->pWIdx = trealloc(pHelper->pWIdx, pFile->info.len);
+ if (pHelper->pWIdx == NULL) {
+ terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
+ return -1;
}
}
+ taosCalcChecksumAppend(0, (uint8_t *)pHelper->pWIdx, pFile->info.len);
+ pFile->info.magic = taosCalcChecksum(
+ pFile->info.magic, (uint8_t *)POINTER_SHIFT(pHelper->pWIdx, pFile->info.len - sizeof(TSCKSUM)), sizeof(TSCKSUM));
- int tsize = (char *)buf - (char *)pHelper->pBuffer + sizeof(TSCKSUM);
- taosCalcChecksumAppend(0, (uint8_t *)pHelper->pBuffer, tsize);
-
- if (twrite(pHelper->files.nHeadF.fd, (void *)pHelper->pBuffer, tsize) < tsize) {
- tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(pHelper->pRepo), tsize,
- pHelper->files.nHeadF.fname, strerror(errno));
+ offset = lseek(pFile->fd, 0, SEEK_END);
+ if (offset < 0) {
+ tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
- pFile->info.len = tsize;
+
+ pFile->info.offset = offset;
+
+ if (twrite(pFile->fd, (void *)pHelper->pWIdx, pFile->info.len) < pFile->info.len) {
+ tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(pHelper->pRepo), pFile->info.len,
+ pFile->fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+
return 0;
}
int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) {
- STsdbCfg *pCfg = &(pHelper->pRepo->config);
-
ASSERT(pHelper->state == TSDB_HELPER_FILE_SET_AND_OPEN);
+#ifdef TSDB_IDX
+ SFile *pFile = helperIdxF(pHelper);
+#else
+ SFile *pFile = helperHeadF(pHelper);
+#endif
+ int fd = pFile->fd;
if (!helperHasState(pHelper, TSDB_HELPER_IDX_LOAD)) {
// If not load from file, just load it in object
- SFile *pFile = &(pHelper->files.headF);
- int fd = pFile->fd;
-
- memset(pHelper->pCompIdx, 0, tsizeof(pHelper->pCompIdx));
- if (pFile->info.offset > 0) {
- ASSERT(pFile->info.offset > TSDB_FILE_HEAD_SIZE);
-
- if (lseek(fd, pFile->info.offset, SEEK_SET) < 0) {
- tsdbError("vgId:%d failed to lseek file %s to %u since %s", REPO_ID(pHelper->pRepo), pFile->fname,
- pFile->info.offset, strerror(errno));
- terrno = TAOS_SYSTEM_ERROR(errno);
- return -1;
- }
+ if (pFile->info.len > 0) {
if ((pHelper->pBuffer = trealloc(pHelper->pBuffer, pFile->info.len)) == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
+
+ if (lseek(fd, pFile->info.offset, SEEK_SET) < 0) {
+ tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+
if (tread(fd, (void *)(pHelper->pBuffer), pFile->info.len) < pFile->info.len) {
tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pHelper->pRepo), pFile->info.len,
pFile->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
+
if (!taosCheckChecksumWhole((uint8_t *)(pHelper->pBuffer), pFile->info.len)) {
- tsdbError("vgId:%d file %s SCompIdx part is corrupted. offset %u len %u", REPO_ID(pHelper->pRepo), pFile->fname,
- pFile->info.offset, pFile->info.len);
+ tsdbError("vgId:%d file %s SCompIdx part is corrupted. len %u", REPO_ID(pHelper->pRepo), pFile->fname,
+ pFile->info.len);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return -1;
}
// Decode it
+ pHelper->idxH.numOfIdx = 0;
void *ptr = pHelper->pBuffer;
while (POINTER_DISTANCE(ptr, pHelper->pBuffer) < (pFile->info.len - sizeof(TSCKSUM))) {
- uint32_t tid = 0;
- if ((ptr = taosDecodeVariantU32(ptr, &tid)) == NULL) return -1;
- ASSERT(tid > 0 && tid < pCfg->maxTables);
+ size_t tlen = tsizeof(pHelper->idxH.pIdxArray);
+ pHelper->idxH.numOfIdx++;
- if ((ptr = tsdbDecodeSCompIdx(ptr, pHelper->pCompIdx + tid)) == NULL) return -1;
+ if (tlen < pHelper->idxH.numOfIdx * sizeof(SCompIdx)) {
+ pHelper->idxH.pIdxArray = (SCompIdx *)trealloc(pHelper->idxH.pIdxArray, (tlen == 0) ? 1024 : tlen * 2);
+ if (pHelper->idxH.pIdxArray == NULL) {
+ terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
+ return -1;
+ }
+ }
+
+ ptr = tsdbDecodeSCompIdx(ptr, &(pHelper->idxH.pIdxArray[pHelper->idxH.numOfIdx - 1]));
+ if (ptr == NULL) {
+ tsdbError("vgId:%d file %s SCompIdx part is corrupted. len %u", REPO_ID(pHelper->pRepo), pFile->fname,
+ pFile->info.len);
+ terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
+ return -1;
+ }
+
+ ASSERT(pHelper->idxH.numOfIdx == 1 || pHelper->idxH.pIdxArray[pHelper->idxH.numOfIdx - 1].tid >
+ pHelper->idxH.pIdxArray[pHelper->idxH.numOfIdx - 2].tid);
ASSERT(POINTER_DISTANCE(ptr, pHelper->pBuffer) <= pFile->info.len - sizeof(TSCKSUM));
}
-
- if (lseek(fd, TSDB_FILE_HEAD_SIZE, SEEK_SET) < 0) {
- terrno = TAOS_SYSTEM_ERROR(errno);
- return -1;
- }
}
}
helperSetState(pHelper, TSDB_HELPER_IDX_LOAD);
+ if (helperType(pHelper) == TSDB_WRITE_HELPER) {
+ pFile->info.len = 0;
+ }
+
// Copy the memory for outside usage
- if (target) memcpy(target, pHelper->pCompIdx, tsizeof(pHelper->pCompIdx));
+ if (target && pHelper->idxH.numOfIdx > 0)
+ memcpy(target, pHelper->idxH.pIdxArray, sizeof(SCompIdx) * pHelper->idxH.numOfIdx);
return 0;
}
@@ -482,15 +569,15 @@ int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) {
int tsdbLoadCompInfo(SRWHelper *pHelper, void *target) {
ASSERT(helperHasState(pHelper, TSDB_HELPER_TABLE_SET));
- SCompIdx *pIdx = pHelper->pCompIdx + pHelper->tableInfo.tid;
+ SCompIdx *pIdx = &(pHelper->curCompIdx);
- int fd = pHelper->files.headF.fd;
+ int fd = helperHeadF(pHelper)->fd;
if (!helperHasState(pHelper, TSDB_HELPER_INFO_LOAD)) {
if (pIdx->offset > 0) {
ASSERT(pIdx->uid == pHelper->tableInfo.uid);
if (lseek(fd, pIdx->offset, SEEK_SET) < 0) {
- tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pHelper->files.headF.fname,
+ tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), helperHeadF(pHelper)->fname,
strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
@@ -499,18 +586,18 @@ int tsdbLoadCompInfo(SRWHelper *pHelper, void *target) {
pHelper->pCompInfo = trealloc((void *)pHelper->pCompInfo, pIdx->len);
if (tread(fd, (void *)(pHelper->pCompInfo), pIdx->len) < pIdx->len) {
tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pHelper->pRepo), pIdx->len,
- pHelper->files.headF.fname, strerror(errno));
+ helperHeadF(pHelper)->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
if (!taosCheckChecksumWhole((uint8_t *)pHelper->pCompInfo, pIdx->len)) {
tsdbError("vgId:%d file %s SCompInfo part is corrupted, tid %d uid %" PRIu64, REPO_ID(pHelper->pRepo),
- pHelper->files.headF.fname, pHelper->tableInfo.tid, pHelper->tableInfo.uid);
+ helperHeadF(pHelper)->fname, pHelper->tableInfo.tid, pHelper->tableInfo.uid);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return -1;
}
- ASSERT(pIdx->uid == pHelper->pCompInfo->uid);
+ ASSERT(pIdx->uid == pHelper->pCompInfo->uid && pIdx->tid == pHelper->pCompInfo->tid);
}
helperSetState(pHelper, TSDB_HELPER_INFO_LOAD);
@@ -523,7 +610,7 @@ int tsdbLoadCompInfo(SRWHelper *pHelper, void *target) {
int tsdbLoadCompData(SRWHelper *pHelper, SCompBlock *pCompBlock, void *target) {
ASSERT(pCompBlock->numOfSubBlocks <= 1);
- SFile *pFile = (pCompBlock->last) ? &(pHelper->files.lastF) : &(pHelper->files.dataF);
+ SFile *pFile = (pCompBlock->last) ? helperLastF(pHelper) : helperDataF(pHelper);
if (lseek(pFile->fd, pCompBlock->offset, SEEK_SET) < 0) {
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, strerror(errno));
@@ -642,9 +729,9 @@ _err:
// ---------------------- INTERNAL FUNCTIONS ----------------------
static bool tsdbShouldCreateNewLast(SRWHelper *pHelper) {
- ASSERT(pHelper->files.lastF.fd > 0);
+ ASSERT(helperLastF(pHelper)->fd > 0);
struct stat st;
- if (fstat(pHelper->files.lastF.fd, &st) < 0) return true;
+ if (fstat(helperLastF(pHelper)->fd, &st) < 0) return true;
if (st.st_size > 32 * 1024 + TSDB_FILE_HEAD_SIZE) return true;
return false;
}
@@ -729,6 +816,8 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
ASSERT(flen > 0);
flen += sizeof(TSCKSUM);
taosCalcChecksumAppend(0, (uint8_t *)tptr, flen);
+ pFile->info.magic =
+ taosCalcChecksum(pFile->info.magic, (uint8_t *)POINTER_SHIFT(tptr, flen - sizeof(TSCKSUM)), sizeof(TSCKSUM));
if (ncol != 0) {
pCompCol->offset = toffset;
@@ -747,6 +836,8 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
pCompData->numOfCols = nColsNotAllNull;
taosCalcChecksumAppend(0, (uint8_t *)pCompData, tsize);
+ pFile->info.magic = taosCalcChecksum(pFile->info.magic, (uint8_t *)POINTER_SHIFT(pCompData, tsize - sizeof(TSCKSUM)),
+ sizeof(TSCKSUM));
// Write the whole block to file
if (twrite(pFile->fd, (void *)pCompData, lsize) < lsize) {
@@ -804,7 +895,7 @@ static int tsdbAdjustInfoSizeIfNeeded(SRWHelper *pHelper, size_t esize) {
}
static int tsdbInsertSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx) {
- SCompIdx *pIdx = pHelper->pCompIdx + pHelper->tableInfo.tid;
+ SCompIdx *pIdx = &(pHelper->curCompIdx);
ASSERT(blkIdx >= 0 && blkIdx <= pIdx->numOfBlocks);
ASSERT(pCompBlock->numOfSubBlocks == 1);
@@ -851,7 +942,7 @@ _err:
static int tsdbAddSubBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx, int rowsAdded) {
ASSERT(pCompBlock->numOfSubBlocks == 0);
- SCompIdx *pIdx = pHelper->pCompIdx + pHelper->tableInfo.tid;
+ SCompIdx *pIdx = &(pHelper->curCompIdx);
ASSERT(blkIdx >= 0 && blkIdx < pIdx->numOfBlocks);
SCompBlock *pSCompBlock = pHelper->pCompInfo->blocks + blkIdx;
@@ -935,7 +1026,7 @@ _err:
static int tsdbUpdateSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx) {
ASSERT(pCompBlock->numOfSubBlocks == 1);
- SCompIdx *pIdx = pHelper->pCompIdx + pHelper->tableInfo.tid;
+ SCompIdx *pIdx = &(pHelper->curCompIdx);
ASSERT(blkIdx >= 0 && blkIdx < pIdx->numOfBlocks);
@@ -971,24 +1062,21 @@ static int tsdbUpdateSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int
}
static void tsdbResetHelperFileImpl(SRWHelper *pHelper) {
+ pHelper->idxH.numOfIdx = 0;
+ pHelper->idxH.curIdx = 0;
memset((void *)&pHelper->files, 0, sizeof(pHelper->files));
- pHelper->files.fid = -1;
- pHelper->files.headF.fd = -1;
- pHelper->files.dataF.fd = -1;
- pHelper->files.lastF.fd = -1;
- pHelper->files.nHeadF.fd = -1;
- pHelper->files.nLastF.fd = -1;
+ helperHeadF(pHelper)->fd = -1;
+ helperDataF(pHelper)->fd = -1;
+ helperLastF(pHelper)->fd = -1;
+ helperNewHeadF(pHelper)->fd = -1;
+ helperNewLastF(pHelper)->fd = -1;
+#ifdef TSDB_IDX
+ helperIdxF(pHelper)->fd = -1;
+ helperNewIdxF(pHelper)->fd = -1;
+#endif
}
static int tsdbInitHelperFile(SRWHelper *pHelper) {
- STsdbCfg *pCfg = &pHelper->pRepo->config;
- size_t tsize = sizeof(SCompIdx) * pCfg->maxTables + sizeof(TSCKSUM);
- pHelper->pCompIdx = (SCompIdx *)tmalloc(tsize);
- if (pHelper->pCompIdx == NULL) {
- terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
- return -1;
- }
-
tsdbResetHelperFileImpl(pHelper);
return 0;
}
@@ -996,7 +1084,8 @@ static int tsdbInitHelperFile(SRWHelper *pHelper) {
static void tsdbDestroyHelperFile(SRWHelper *pHelper) {
tsdbCloseHelperFile(pHelper, false);
tsdbResetHelperFileImpl(pHelper);
- tzfree(pHelper->pCompIdx);
+ tzfree(pHelper->idxH.pIdxArray);
+ tzfree(pHelper->pWIdx);
}
// ---------- Operations on Helper Table part
@@ -1154,7 +1243,7 @@ static int tsdbLoadBlockDataColsImpl(SRWHelper *pHelper, SCompBlock *pCompBlock,
ASSERT(pCompBlock->numOfSubBlocks <= 1);
ASSERT(colIds[0] == 0);
- SFile * pFile = (pCompBlock->last) ? &(pHelper->files.lastF) : &(pHelper->files.dataF);
+ SFile * pFile = (pCompBlock->last) ? helperLastF(pHelper) : helperDataF(pHelper);
SCompCol compCol = {0};
// If only load timestamp column, no need to load SCompData part
@@ -1215,7 +1304,7 @@ _err:
static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDataCols *pDataCols) {
ASSERT(pCompBlock->numOfSubBlocks <= 1);
- SFile *pFile = (pCompBlock->last) ? &(pHelper->files.lastF) : &(pHelper->files.dataF);
+ SFile *pFile = (pCompBlock->last) ? helperLastF(pHelper) : helperDataF(pHelper);
pHelper->pBuffer = trealloc(pHelper->pBuffer, pCompBlock->len);
if (pHelper->pBuffer == NULL) {
@@ -1314,6 +1403,7 @@ _err:
static int tsdbEncodeSCompIdx(void **buf, SCompIdx *pIdx) {
int tlen = 0;
+ tlen += taosEncodeVariantI32(buf, pIdx->tid);
tlen += taosEncodeVariantU32(buf, pIdx->len);
tlen += taosEncodeVariantU32(buf, pIdx->offset);
tlen += taosEncodeFixedU8(buf, pIdx->hasLast);
@@ -1329,6 +1419,7 @@ static void *tsdbDecodeSCompIdx(void *buf, SCompIdx *pIdx) {
uint32_t numOfBlocks = 0;
uint64_t value = 0;
+ if ((buf = taosDecodeVariantI32(buf, &(pIdx->tid))) == NULL) return NULL;
if ((buf = taosDecodeVariantU32(buf, &(pIdx->len))) == NULL) return NULL;
if ((buf = taosDecodeVariantU32(buf, &(pIdx->offset))) == NULL) return NULL;
if ((buf = taosDecodeFixedU8(buf, &(hasLast))) == NULL) return NULL;
@@ -1346,7 +1437,7 @@ static void *tsdbDecodeSCompIdx(void *buf, SCompIdx *pIdx) {
static int tsdbProcessAppendCommit(SRWHelper *pHelper, SCommitIter *pCommitIter, SDataCols *pDataCols, TSKEY maxKey) {
STsdbCfg * pCfg = &(pHelper->pRepo->config);
STable * pTable = pCommitIter->pTable;
- SCompIdx * pIdx = pHelper->pCompIdx + TABLE_TID(pTable);
+ SCompIdx * pIdx = &(pHelper->curCompIdx);
TSKEY keyFirst = tsdbNextIterKey(pCommitIter->pIter);
int defaultRowsInBlock = pCfg->maxRowsPerFileBlock * 4 / 5;
SCompBlock compBlock = {0};
@@ -1362,7 +1453,7 @@ static int tsdbProcessAppendCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
ASSERT(rowsRead > 0 && rowsRead == pDataCols->numOfRows);
if (rowsRead + pCompBlock->numOfRows < pCfg->minRowsPerFileBlock &&
pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && !TSDB_NLAST_FILE_OPENED(pHelper)) {
- if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.lastF), pDataCols, &compBlock, true, false) < 0) return -1;
+ if (tsdbWriteBlockToFile(pHelper, helperLastF(pHelper), pDataCols, &compBlock, true, false) < 0) return -1;
if (tsdbAddSubBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1, rowsRead) < 0) return -1;
} else {
if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1;
@@ -1386,6 +1477,11 @@ static int tsdbProcessAppendCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
if (tsdbInsertSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks) < 0) return -1;
}
+#ifndef NDEBUG
+ TSKEY keyNext = tsdbNextIterKey(pCommitIter->pIter);
+ ASSERT(keyNext < 0 || keyNext > pIdx->maxKey);
+#endif
+
return 0;
}
@@ -1393,7 +1489,7 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
int *blkIdx) {
STsdbCfg * pCfg = &(pHelper->pRepo->config);
STable * pTable = pCommitIter->pTable;
- SCompIdx * pIdx = pHelper->pCompIdx + TABLE_TID(pTable);
+ SCompIdx * pIdx = &(pHelper->curCompIdx);
SCompBlock compBlock = {0};
TSKEY keyFirst = tsdbNextIterKey(pCommitIter->pIter);
int defaultRowsInBlock = pCfg->maxRowsPerFileBlock * 4 / 5;
@@ -1427,7 +1523,7 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
int rowsRead = tsdbLoadDataFromCache(pTable, pCommitIter->pIter, maxKey, rows1, pDataCols,
pDataCols0->cols[0].pData, pDataCols0->numOfRows);
ASSERT(rowsRead == rows2 && rowsRead == pDataCols->numOfRows);
- if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.lastF), pDataCols, &compBlock, true, false) < 0) return -1;
+ if (tsdbWriteBlockToFile(pHelper, helperLastF(pHelper), pDataCols, &compBlock, true, false) < 0) return -1;
if (tsdbAddSubBlock(pHelper, &compBlock, tblkIdx, rowsRead) < 0) return -1;
tblkIdx++;
} else {
@@ -1466,15 +1562,16 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
if (rowsRead == 0) break;
ASSERT(rowsRead == pDataCols->numOfRows);
- if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.dataF), pDataCols, &compBlock, false, true) < 0) return -1;
+ if (tsdbWriteBlockToFile(pHelper, helperDataF(pHelper), pDataCols, &compBlock, false, true) < 0) return -1;
if (tsdbInsertSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1;
tblkIdx++;
}
+ ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) < 0 ||
+ tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast));
} else {
ASSERT(keyFirst <= blkKeyLast);
int16_t colId = 0;
if (tsdbLoadBlockDataCols(pHelper, pCompBlock, NULL, &colId, 1) < 0) return -1;
- ASSERT(pDataCols0->numOfRows == pCompBlock->numOfRows);
slIter = *(pCommitIter->pIter);
int rows1 = (pCfg->maxRowsPerFileBlock - pCompBlock->numOfRows);
@@ -1483,9 +1580,10 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
if (rows2 == 0) { // all filtered out
*(pCommitIter->pIter) = slIter;
+ ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) < 0 ||
+ tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast));
} else {
int rows3 = tsdbLoadDataFromCache(pTable, &slIter, keyLimit, INT_MAX, NULL, NULL, 0) + rows2;
- ASSERT(rows3 >= rows2);
if (pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && rows1 >= rows2) {
int rows = (rows1 >= rows3) ? rows3 : rows2;
@@ -1493,10 +1591,12 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
int rowsRead = tsdbLoadDataFromCache(pTable, pCommitIter->pIter, keyLimit, rows, pDataCols,
pDataCols0->cols[0].pData, pDataCols0->numOfRows);
ASSERT(rowsRead == rows && rowsRead == pDataCols->numOfRows);
- if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.dataF), pDataCols, &compBlock, false, false) < 0)
+ if (tsdbWriteBlockToFile(pHelper, helperDataF(pHelper), pDataCols, &compBlock, false, false) < 0)
return -1;
if (tsdbAddSubBlock(pHelper, &compBlock, tblkIdx, rowsRead) < 0) return -1;
tblkIdx++;
+ ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) < 0 ||
+ tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast));
} else {
if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1;
int round = 0;
@@ -1506,7 +1606,7 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
tsdbLoadAndMergeFromCache(pDataCols0, &dIter, pCommitIter, pDataCols, keyLimit, defaultRowsInBlock);
if (rowsRead == 0) break;
- if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.dataF), pDataCols, &compBlock, false, true) < 0)
+ if (tsdbWriteBlockToFile(pHelper, helperDataF(pHelper), pDataCols, &compBlock, false, true) < 0)
return -1;
if (round == 0) {
if (tsdbUpdateSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1;
@@ -1517,6 +1617,8 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
round++;
tblkIdx++;
}
+ ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) < 0 ||
+ tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast));
}
}
}
@@ -1577,10 +1679,10 @@ static int tsdbWriteBlockToProperFile(SRWHelper *pHelper, SDataCols *pDataCols,
ASSERT(pDataCols->numOfRows > 0);
if (pDataCols->numOfRows >= pCfg->minRowsPerFileBlock) {
- pFile = &(pHelper->files.dataF);
+ pFile = helperDataF(pHelper);
} else {
isLast = true;
- pFile = TSDB_NLAST_FILE_OPENED(pHelper) ? &(pHelper->files.nLastF) : &(pHelper->files.lastF);
+ pFile = TSDB_NLAST_FILE_OPENED(pHelper) ? helperNewLastF(pHelper) : helperLastF(pHelper);
}
ASSERT(pFile->fd > 0);
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index eb34805de4..c82d1f905a 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -21,7 +21,7 @@
#include "tcompare.h"
#include "exception.h"
-#include "../../../query/inc/qast.h" // todo move to common module
+#include "../../query/inc/qAst.h" // todo move to common module
#include "tlosertree.h"
#include "tsdb.h"
#include "tsdbMain.h"
@@ -126,13 +126,13 @@ typedef struct STsdbQueryHandle {
SIOCostSummary cost;
} STsdbQueryHandle;
-static void changeQueryHandleForLastrowQuery(TsdbQueryHandleT pqHandle);
-static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle);
-static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock,
- SArray* sa);
+static void changeQueryHandleForLastrowQuery(TsdbQueryHandleT pqHandle);
+static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle);
+static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock);
static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
-static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, STimeWindow* win,
- STsdbQueryHandle* pQueryHandle);
+static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, STimeWindow* win,
+ STsdbQueryHandle* pQueryHandle);
+static int tsdbCheckInfoCompar(const void* key1, const void* key2);
static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) {
pBlockLoadInfo->slot = -1;
@@ -188,8 +188,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
pQueryHandle->allocSize = 0;
if (tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb) != 0) {
- free(pQueryHandle);
- return NULL;
+ goto out_of_memory;
}
tsdbTakeMemSnapshot(pQueryHandle->pTsdb, &pQueryHandle->mem, &pQueryHandle->imem);
@@ -201,18 +200,30 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
int32_t numOfCols = pCond->numOfCols;
pQueryHandle->statis = calloc(numOfCols, sizeof(SDataStatis));
+ if (pQueryHandle->statis == NULL) {
+ goto out_of_memory;
+ }
pQueryHandle->pColumns = taosArrayInit(numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array?
+ if (pQueryHandle->pColumns == NULL) {
+ goto out_of_memory;
+ }
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData colInfo = {{0}, 0};
colInfo.info = pCond->colList[i];
colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes);
+ if (colInfo.pData == NULL) {
+ goto out_of_memory;
+ }
taosArrayPush(pQueryHandle->pColumns, &colInfo);
pQueryHandle->statis[i].colId = colInfo.info.colId;
}
pQueryHandle->pTableCheckInfo = taosArrayInit(groupList->numOfTables, sizeof(STableCheckInfo));
+ if (pQueryHandle->pTableCheckInfo == NULL) {
+ goto out_of_memory;
+ }
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
assert(pMeta != NULL);
@@ -237,15 +248,21 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
taosArrayPush(pQueryHandle->pTableCheckInfo, &info);
}
}
-
+
+ taosArraySort(pQueryHandle->pTableCheckInfo, tsdbCheckInfoCompar);
pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true);
- tsdbDebug("%p total numOfTable:%zu in query", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo));
+ tsdbDebug("%p total numOfTable:%zu in query, %p", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qinfo);
tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo);
tsdbInitCompBlockLoadInfo(&pQueryHandle->compBlockLoadInfo);
return (TsdbQueryHandleT) pQueryHandle;
+
+out_of_memory:
+ terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
+ tsdbCleanupQueryHandle(pQueryHandle);
+ return NULL;
}
TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo) {
@@ -331,7 +348,8 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
tsdbDebug("%p uid:%" PRId64", tid:%d check data in mem from skey:%" PRId64 ", order:%d, %p", pHandle,
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pHandle->qinfo);
} else {
- tsdbDebug("%p uid:%" PRId64 ", tid:%d no data in mem", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid);
+ tsdbDebug("%p uid:%"PRId64", tid:%d no data in mem, %p", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid,
+ pHandle->qinfo);
}
if (!imemEmpty) {
@@ -343,7 +361,8 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
tsdbDebug("%p uid:%" PRId64", tid:%d check data in imem from skey:%" PRId64 ", order:%d, %p", pHandle,
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pHandle->qinfo);
} else {
- tsdbDebug("%p uid:%"PRId64", tid:%d no data in imem", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid);
+ tsdbDebug("%p uid:%"PRId64", tid:%d no data in imem, %p", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid,
+ pHandle->qinfo);
}
return true;
@@ -354,7 +373,7 @@ static void destroyTableMemIterator(STableCheckInfo* pCheckInfo) {
tSkipListDestroyIter(pCheckInfo->iiter);
}
-SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo) {
+SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order) {
SDataRow rmem = NULL, rimem = NULL;
if (pCheckInfo->iter) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
@@ -371,20 +390,35 @@ SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo) {
}
if (rmem != NULL && rimem != NULL) {
- if (dataRowKey(rmem) < dataRowKey(rimem)) {
- pCheckInfo->chosen = 0;
- return rmem;
- } else if (dataRowKey(rmem) == dataRowKey(rimem)) {
- // data ts are duplicated, ignore the data in mem
+ TSKEY r1 = dataRowKey(rmem);
+ TSKEY r2 = dataRowKey(rimem);
+
+ if (r1 == r2) { // data ts are duplicated, ignore the data in mem
tSkipListIterNext(pCheckInfo->iter);
pCheckInfo->chosen = 1;
return rimem;
} else {
- pCheckInfo->chosen = 1;
- return rimem;
+ if (ASCENDING_TRAVERSE(order)) {
+ if (r1 < r2) {
+ pCheckInfo->chosen = 0;
+ return rmem;
+ } else {
+ pCheckInfo->chosen = 1;
+ return rimem;
+ }
+ } else {
+ if (r1 < r2) {
+ pCheckInfo->chosen = 1;
+ return rimem;
+ } else {
+ pCheckInfo->chosen = 0;
+ return rmem;
+ }
+ }
}
}
+ // at least one (rmem or rimem) is absent here
if (rmem != NULL) {
pCheckInfo->chosen = 0;
return rmem;
@@ -398,7 +432,7 @@ SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo) {
return NULL;
}
-static bool moveToNextRow(STableCheckInfo* pCheckInfo) {
+static bool moveToNextRowInMem(STableCheckInfo* pCheckInfo) {
bool hasNext = false;
if (pCheckInfo->chosen == 0) {
if (pCheckInfo->iter != NULL) {
@@ -412,19 +446,17 @@ static bool moveToNextRow(STableCheckInfo* pCheckInfo) {
if (pCheckInfo->iiter != NULL) {
return tSkipListIterGet(pCheckInfo->iiter) != NULL;
}
- } else {
- if (pCheckInfo->chosen == 1) {
- if (pCheckInfo->iiter != NULL) {
- hasNext = tSkipListIterNext(pCheckInfo->iiter);
- }
+ } else { //pCheckInfo->chosen == 1
+ if (pCheckInfo->iiter != NULL) {
+ hasNext = tSkipListIterNext(pCheckInfo->iiter);
+ }
- if (hasNext) {
- return hasNext;
- }
+ if (hasNext) {
+ return hasNext;
+ }
- if (pCheckInfo->iter != NULL) {
- return tSkipListIterGet(pCheckInfo->iter) != NULL;
- }
+ if (pCheckInfo->iter != NULL) {
+ return tSkipListIterGet(pCheckInfo->iter) != NULL;
}
}
@@ -445,7 +477,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
initTableMemIterator(pHandle, pCheckInfo);
}
- SDataRow row = getSDataRowInTableMem(pCheckInfo);
+ SDataRow row = getSDataRowInTableMem(pCheckInfo, pHandle->order);
if (row == NULL) {
return false;
}
@@ -540,7 +572,9 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
pCheckInfo->numOfBlocks = 0;
- SCompIdx* compIndex = &pQueryHandle->rhelper.pCompIdx[pCheckInfo->tableId.tid];
+ tsdbSetHelperTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj, pQueryHandle->pTsdb);
+
+ SCompIdx* compIndex = &pQueryHandle->rhelper.curCompIdx;
// no data block in this file, try next file
if (compIndex->len == 0 || compIndex->numOfBlocks == 0 || compIndex->uid != pCheckInfo->tableId.uid) {
@@ -557,8 +591,6 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
pCheckInfo->compSize = compIndex->len;
}
- tsdbSetHelperTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj, pQueryHandle->pTsdb);
-
tsdbLoadCompInfo(&(pQueryHandle->rhelper), (void *)(pCheckInfo->pCompInfo));
SCompInfo* pCompInfo = pCheckInfo->pCompInfo;
@@ -650,7 +682,7 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock*
SDataBlockInfo binfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock);
/*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo);
- SDataRow row = getSDataRowInTableMem(pCheckInfo);
+ SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order);
TSKEY key = (row != NULL)? dataRowKey(row):TSKEY_INITIAL_VAL;
cur->pos = ASCENDING_TRAVERSE(pQueryHandle->order)? 0:(binfo.rows-1);
@@ -680,7 +712,7 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock*
}
doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo);
- doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, pQueryHandle->defaultLoadColumn);
+ doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock);
} else {
/*
* no data in cache, only load data from file
@@ -696,6 +728,7 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock*
cur->mixBlock = false;
cur->blockCompleted = true;
cur->lastKey = binfo.window.ekey + (ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1);
+ pCheckInfo->lastKey = cur->lastKey;
}
}
@@ -719,7 +752,7 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
cur->pos = 0;
}
- doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, pQueryHandle->defaultLoadColumn);
+ doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock);
} else { // the whole block is loaded in to buffer
handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo);
}
@@ -736,7 +769,7 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
cur->pos = pBlock->numOfRows - 1;
}
- doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, pQueryHandle->defaultLoadColumn);
+ doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock);
} else {
handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo);
}
@@ -892,12 +925,12 @@ static int32_t copyDataFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t cap
pQueryHandle->cur.win.ekey = tsArray[end];
pQueryHandle->cur.lastKey = tsArray[end] + step;
-
+
return numOfRows + num;
}
static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, SDataRow row,
- STsdbMeta *pMeta, int32_t numOfCols, STable* pTable) {
+ int32_t numOfCols, STable* pTable) {
char* pData = NULL;
// the schema version info is embeded in SDataRow
@@ -958,8 +991,7 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
// only return the qualified data to client in terms of query time window, data rows in the same block but do not
// be included in the query time window will be discarded
-static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock,
- SArray* sa) {
+static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock) {
SQueryFilePos* cur = &pQueryHandle->cur;
SDataBlockInfo blockInfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock);
@@ -972,7 +1004,6 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1;
int32_t numOfCols = taosArrayGetSize(pQueryHandle->pColumns);
- STsdbMeta* pMeta = tsdbGetMeta(pQueryHandle->pTsdb);
STable* pTable = pCheckInfo->pTableObj;
int32_t endPos = cur->pos;
@@ -1033,7 +1064,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
} else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) {
SSkipListNode* node = NULL;
do {
- SDataRow row = getSDataRowInTableMem(pCheckInfo);
+ SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order);
if (row == NULL) {
break;
}
@@ -1051,7 +1082,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
- copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, pMeta, numOfCols, pTable);
+ copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable);
numOfRows += 1;
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
@@ -1061,9 +1092,9 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
cur->lastKey = key + step;
cur->mixBlock = true;
- moveToNextRow(pCheckInfo);
+ moveToNextRowInMem(pCheckInfo);
} else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it
- moveToNextRow(pCheckInfo);
+ moveToNextRowInMem(pCheckInfo);
} else if ((key > tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key < tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
if (cur->win.skey == TSKEY_INITIAL_VAL) {
@@ -1072,7 +1103,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
int32_t end = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, key, order);
if (tsArray[end] == key) { // the value of key in cache equals to the end timestamp value, ignore it
- moveToNextRow(pCheckInfo);
+ moveToNextRowInMem(pCheckInfo);
}
int32_t start = -1;
@@ -1376,7 +1407,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
* }
*/
- tsdbDebug("%p %d data blocks sort completed", pQueryHandle, cnt);
+ tsdbDebug("%p %d data blocks sort completed, %p", pQueryHandle, cnt, pQueryHandle->qinfo);
cleanBlockOrderSupporter(&sup, numOfTables);
free(pTree);
@@ -1391,8 +1422,21 @@ static int32_t getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle, bool* ex
int32_t numOfBlocks = 0;
int32_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
-
+
+ STsdbCfg* pCfg = &pQueryHandle->pTsdb->config;
+ STimeWindow win = TSWINDOW_INITIALIZER;
+
while ((pQueryHandle->pFileGroup = tsdbGetFileGroupNext(&pQueryHandle->fileIter)) != NULL) {
+ tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, pQueryHandle->pFileGroup->fileId, &win.skey, &win.ekey);
+
+ // current file are not overlapped with query time window, ignore remain files
+ if ((ASCENDING_TRAVERSE(pQueryHandle->order) && win.skey > pQueryHandle->window.ekey) ||
+ (!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) {
+ tsdbDebug("%p remain files are not qualified for qrange:%"PRId64"-%"PRId64", ignore, %p", pQueryHandle, pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qinfo)
+ pQueryHandle->pFileGroup = NULL;
+ break;
+ }
+
if ((code = getFileCompInfo(pQueryHandle, &numOfBlocks)) != TSDB_CODE_SUCCESS) {
break;
}
@@ -1750,11 +1794,10 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
win->skey = TSKEY_INITIAL_VAL;
int64_t st = taosGetTimestampUs();
- STsdbMeta* pMeta = tsdbGetMeta(pQueryHandle->pTsdb);
STable* pTable = pCheckInfo->pTableObj;
do {
- SDataRow row = getSDataRowInTableMem(pCheckInfo);
+ SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order);
if (row == NULL) {
break;
}
@@ -1772,14 +1815,14 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
}
win->ekey = key;
- copyOneRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, pMeta, numOfCols, pTable);
+ copyOneRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, numOfCols, pTable);
if (++numOfRows >= maxRowsToRead) {
- moveToNextRow(pCheckInfo);
+ moveToNextRowInMem(pCheckInfo);
break;
}
- } while(moveToNextRow(pCheckInfo));
+ } while(moveToNextRowInMem(pCheckInfo));
assert(numOfRows <= maxRowsToRead);
@@ -1869,7 +1912,6 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta
pHandle->statis[i].numOfNull = pBlockInfo->compBlock->numOfRows;
}
- // todo opt perf
SColumnInfo* pColInfo = taosArrayGet(pHandle->pColumns, i);
if (pColInfo->type == TSDB_DATA_TYPE_TIMESTAMP) {
pHandle->statis[i].min = pBlockInfo->compBlock->keyFirst;
@@ -1961,43 +2003,20 @@ static void destroyHelper(void* param) {
free(param);
}
-#define TAG_INVALID_COLUMN_INDEX -2
-static int32_t getTagColumnIndex(STSchema* pTSchema, SSchema* pSchema) {
- // filter on table name(TBNAME)
- if (strcasecmp(pSchema->name, TSQL_TBNAME_L) == 0) {
- return TSDB_TBNAME_COLUMN_INDEX;
- }
-
- for(int32_t i = 0; i < schemaNCols(pTSchema); ++i) {
- STColumn* pColumn = &pTSchema->columns[i];
- if (pColumn->bytes == pSchema->bytes && pColumn->type == pSchema->type && pColumn->colId == pSchema->colId) {
- return i;
- }
- }
-
- return -2;
-}
-
void filterPrepare(void* expr, void* param) {
tExprNode* pExpr = (tExprNode*)expr;
if (pExpr->_node.info != NULL) {
return;
}
- int32_t i = 0;
pExpr->_node.info = calloc(1, sizeof(tQueryInfo));
- STSchema* pTSSchema = (STSchema*) param;
-
+ STSchema* pTSSchema = (STSchema*) param;
tQueryInfo* pInfo = pExpr->_node.info;
tVariant* pCond = pExpr->_node.pRight->pVal;
SSchema* pSchema = pExpr->_node.pLeft->pSchema;
- int32_t index = getTagColumnIndex(pTSSchema, pSchema);
- assert((index >= 0 && i < TSDB_MAX_TAGS) || (index == TSDB_TBNAME_COLUMN_INDEX) || index == TAG_INVALID_COLUMN_INDEX);
-
pInfo->sch = *pSchema;
- pInfo->colIndex = index;
pInfo->optr = pExpr->_node.optr;
pInfo->compare = getComparFunc(pSchema->type, pInfo->optr);
pInfo->param = pTSSchema;
@@ -2143,7 +2162,7 @@ bool indexedNodeFilterFp(const void* pNode, void* param) {
char* val = NULL;
- if (pInfo->colIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
val = (char*) TABLE_NAME(pTable);
} else {
val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId);
@@ -2369,28 +2388,31 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
return;
}
- size_t size = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
- for (int32_t i = 0; i < size; ++i) {
- STableCheckInfo* pTableCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
- destroyTableMemIterator(pTableCheckInfo);
+ if (pQueryHandle->pTableCheckInfo != NULL) {
+ size_t size = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
+ for (int32_t i = 0; i < size; ++i) {
+ STableCheckInfo* pTableCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
+ destroyTableMemIterator(pTableCheckInfo);
- if (pTableCheckInfo->pDataCols != NULL) {
- tfree(pTableCheckInfo->pDataCols->buf);
+ if (pTableCheckInfo->pDataCols != NULL) {
+ tfree(pTableCheckInfo->pDataCols->buf);
+ }
+
+ tfree(pTableCheckInfo->pDataCols);
+ tfree(pTableCheckInfo->pCompInfo);
}
-
- tfree(pTableCheckInfo->pDataCols);
- tfree(pTableCheckInfo->pCompInfo);
+ taosArrayDestroy(pQueryHandle->pTableCheckInfo);
}
- taosArrayDestroy(pQueryHandle->pTableCheckInfo);
+ if (pQueryHandle->pColumns != NULL) {
+ size_t cols = taosArrayGetSize(pQueryHandle->pColumns);
+ for (int32_t i = 0; i < cols; ++i) {
+ SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
+ tfree(pColInfo->pData);
+ }
+ taosArrayDestroy(pQueryHandle->pColumns);
+ }
- size_t cols = taosArrayGetSize(pQueryHandle->pColumns);
- for (int32_t i = 0; i < cols; ++i) {
- SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
- tfree(pColInfo->pData);
- }
-
- taosArrayDestroy(pQueryHandle->pColumns);
taosArrayDestroy(pQueryHandle->defaultLoadColumn);
tfree(pQueryHandle->pDataBlockInfo);
tfree(pQueryHandle->statis);
@@ -2430,3 +2452,13 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) {
taosArrayDestroy(pGroupList->pGroupList);
}
+static int tsdbCheckInfoCompar(const void* key1, const void* key2) {
+ if (((STableCheckInfo*)key1)->tableId.tid < ((STableCheckInfo*)key2)->tableId.tid) {
+ return -1;
+ } else if (((STableCheckInfo*)key1)->tableId.tid > ((STableCheckInfo*)key2)->tableId.tid) {
+ return 1;
+ } else {
+ ASSERT(false);
+ return 0;
+ }
+}
\ No newline at end of file
diff --git a/src/util/inc/tkvstore.h b/src/util/inc/tkvstore.h
index 346e567c41..6d67607e24 100644
--- a/src/util/inc/tkvstore.h
+++ b/src/util/inc/tkvstore.h
@@ -25,10 +25,11 @@ typedef int (*iterFunc)(void *, void *cont, int contLen);
typedef void (*afterFunc)(void *);
typedef struct {
- int64_t size; // including 512 bytes of header size
- int64_t tombSize;
- int64_t nRecords;
- int64_t nDels;
+ int64_t size; // including 512 bytes of header size
+ int64_t tombSize;
+ int64_t nRecords;
+ int64_t nDels;
+ uint32_t magic;
} SStoreInfo;
typedef struct {
@@ -45,6 +46,8 @@ typedef struct {
SStoreInfo info;
} SKVStore;
+#define KVSTORE_MAGIC(s) (s)->info.magic
+
int tdCreateKVStore(char *fname);
int tdDestroyKVStore(char *fname);
SKVStore *tdOpenKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH);
diff --git a/src/util/inc/tutil.h b/src/util/inc/tutil.h
index f7c69e3973..34f35c3807 100644
--- a/src/util/inc/tutil.h
+++ b/src/util/inc/tutil.h
@@ -35,12 +35,12 @@ extern "C" {
#define WCHAR wchar_t
#define tfree(x) \
- { \
+ do { \
if (x) { \
free((void *)(x)); \
x = 0; \
} \
- }
+ } while(0);
#define tstrncpy(dst, src, size) \
do { \
diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c
index 74acb7ce35..d561e8ba5f 100644
--- a/src/util/src/tconfig.c
+++ b/src/util/src/tconfig.c
@@ -308,38 +308,47 @@ bool taosReadGlobalCfg() {
sprintf(fileName, "%s/taos.cfg", configDir);
FILE* fp = fopen(fileName, "r");
+ if (fp == NULL) {
+ struct stat s;
+ if (stat(configDir, &s) != 0 || (!S_ISREG(s.st_mode) && !S_ISLNK(s.st_mode))) {
+ //return true to follow behavior before file support
+ return true;
+ }
+ fp = fopen(configDir, "r");
+ if (fp == NULL) {
+ return false;
+ }
+ }
size_t len = 1024;
line = calloc(1, len);
- if (fp != NULL) {
- while (!feof(fp)) {
- memset(line, 0, len);
+ while (!feof(fp)) {
+ memset(line, 0, len);
- option = value = NULL;
- olen = vlen = 0;
+ option = value = NULL;
+ olen = vlen = 0;
- getline(&line, &len, fp);
- line[len - 1] = 0;
-
- paGetToken(line, &option, &olen);
- if (olen == 0) continue;
- option[olen] = 0;
+ getline(&line, &len, fp);
+ line[len - 1] = 0;
+
+ paGetToken(line, &option, &olen);
+ if (olen == 0) continue;
+ option[olen] = 0;
- paGetToken(option + olen + 1, &value, &vlen);
- if (vlen == 0) continue;
- value[vlen] = 0;
+ paGetToken(option + olen + 1, &value, &vlen);
+ if (vlen == 0) continue;
+ value[vlen] = 0;
- // For dataDir, the format is:
- // dataDir /mnt/disk1 0
- paGetToken(value + vlen + 1, &value1, &vlen1);
-
- taosReadConfigOption(option, value);
- }
-
- fclose(fp);
+ // For dataDir, the format is:
+ // dataDir /mnt/disk1 0
+ paGetToken(value + vlen + 1, &value1, &vlen1);
+
+ taosReadConfigOption(option, value);
}
+ fclose(fp);
+
tfree(line);
return true;
diff --git a/src/util/src/tkvstore.c b/src/util/src/tkvstore.c
index ab2aa738c6..0704285da5 100644
--- a/src/util/src/tkvstore.c
+++ b/src/util/src/tkvstore.c
@@ -34,6 +34,7 @@
#define TD_KVSTORE_MAINOR_VERSION 0
#define TD_KVSTORE_SNAP_SUFFIX ".snap"
#define TD_KVSTORE_NEW_SUFFIX ".new"
+#define TD_KVSTORE_INIT_MAGIC 0xFFFFFFFF
typedef struct {
uint64_t uid;
@@ -140,6 +141,7 @@ SKVStore *tdOpenKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH
if (tdLoadKVStoreHeader(pStore->fd, pStore->fname, &info) < 0) goto _err;
pStore->info.size = TD_KVSTORE_HEADER_SIZE;
+ pStore->info.magic = info.magic;
if (tdRestoreKVStore(pStore) < 0) goto _err;
@@ -251,6 +253,8 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe
return -1;
}
+ pStore->info.magic =
+ taosCalcChecksum(pStore->info.magic, (uint8_t *)POINTER_SHIFT(cont, contLen - sizeof(TSCKSUM)), sizeof(TSCKSUM));
pStore->info.size += (sizeof(SKVRecord) + contLen);
SKVRecord *pRecord = taosHashGet(pStore->map, (void *)&uid, sizeof(uid));
if (pRecord != NULL) { // just to insert
@@ -288,6 +292,7 @@ int tdDropKVStoreRecord(SKVStore *pStore, uint64_t uid) {
return -1;
}
+ pStore->info.magic = taosCalcChecksum(pStore->info.magic, (uint8_t *)buf, POINTER_DISTANCE(pBuf, buf));
pStore->info.size += POINTER_DISTANCE(pBuf, buf);
pStore->info.nDels++;
pStore->info.nRecords--;
@@ -371,7 +376,7 @@ static int tdUpdateKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo) {
}
static int tdInitKVStoreHeader(int fd, char *fname) {
- SStoreInfo info = {TD_KVSTORE_HEADER_SIZE, 0, 0, 0};
+ SStoreInfo info = {TD_KVSTORE_HEADER_SIZE, 0, 0, 0, TD_KVSTORE_INIT_MAGIC};
return tdUpdateKVStoreHeader(fd, fname, &info);
}
@@ -382,6 +387,7 @@ static int tdEncodeStoreInfo(void **buf, SStoreInfo *pInfo) {
tlen += taosEncodeVariantI64(buf, pInfo->tombSize);
tlen += taosEncodeVariantI64(buf, pInfo->nRecords);
tlen += taosEncodeVariantI64(buf, pInfo->nDels);
+ tlen += taosEncodeFixedU32(buf, pInfo->magic);
return tlen;
}
@@ -391,6 +397,7 @@ static void *tdDecodeStoreInfo(void *buf, SStoreInfo *pInfo) {
buf = taosDecodeVariantI64(buf, &(pInfo->tombSize));
buf = taosDecodeVariantI64(buf, &(pInfo->nRecords));
buf = taosDecodeVariantI64(buf, &(pInfo->nDels));
+ buf = taosDecodeFixedU32(buf, &(pInfo->magic));
return buf;
}
diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c
index 3764df4afc..7b73b1e17c 100644
--- a/src/util/src/tutil.c
+++ b/src/util/src/tutil.c
@@ -522,7 +522,7 @@ int32_t taosFileRename(char *fullPath, char *suffix, char delimiter, char **dstP
void getTmpfilePath(const char *fileNamePrefix, char *dstPath) {
const char* tdengineTmpFileNamePrefix = "tdengine-";
- char tmpPath[PATH_MAX] = {0};
+ char tmpPath[PATH_MAX];
#ifdef WINDOWS
char *tmpDir = getenv("tmp");
diff --git a/src/util/tests/CMakeLists.txt b/src/util/tests/CMakeLists.txt
index 9f66eba37a..b0b5d3013b 100644
--- a/src/util/tests/CMakeLists.txt
+++ b/src/util/tests/CMakeLists.txt
@@ -11,5 +11,5 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
ADD_EXECUTABLE(utilTest ${SOURCE_LIST})
- TARGET_LINK_LIBRARIES(utilTest tutil common gtest pthread)
-ENDIF()
\ No newline at end of file
+ TARGET_LINK_LIBRARIES(utilTest tutil common gtest pthread gcov)
+ENDIF()
diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c
new file mode 100644
index 0000000000..759e16d1de
--- /dev/null
+++ b/tests/examples/c/apitest.c
@@ -0,0 +1,474 @@
+// sample code to verify all TDengine API
+// to compile: gcc -o apitest apitest.c -ltaos
+
+#include
+#include
+#include
+#include
+#include
+
+
+static void prepare_data(TAOS* taos) {
+ taos_query(taos, "drop database if exists test;");
+ usleep(100000);
+ taos_query(taos, "create database test;");
+ usleep(100000);
+ taos_select_db(taos, "test");
+
+ taos_query(taos, "create table meters(ts timestamp, a int) tags(area int);");
+
+ taos_query(taos, "create table t0 using meters tags(0);");
+ taos_query(taos, "create table t1 using meters tags(1);");
+ taos_query(taos, "create table t2 using meters tags(2);");
+ taos_query(taos, "create table t3 using meters tags(3);");
+ taos_query(taos, "create table t4 using meters tags(4);");
+ taos_query(taos, "create table t5 using meters tags(5);");
+ taos_query(taos, "create table t6 using meters tags(6);");
+ taos_query(taos, "create table t7 using meters tags(7);");
+ taos_query(taos, "create table t8 using meters tags(8);");
+ taos_query(taos, "create table t9 using meters tags(9);");
+
+ TAOS_RES* res = taos_query(taos, "insert into t0 values('2020-01-01 00:00:00.000', 0)"
+ " ('2020-01-01 00:01:00.000', 0)"
+ " ('2020-01-01 00:02:00.000', 0)"
+ " t1 values('2020-01-01 00:00:00.000', 0)"
+ " ('2020-01-01 00:01:00.000', 0)"
+ " ('2020-01-01 00:02:00.000', 0)"
+ " ('2020-01-01 00:03:00.000', 0)"
+ " t2 values('2020-01-01 00:00:00.000', 0)"
+ " ('2020-01-01 00:01:00.000', 0)"
+ " ('2020-01-01 00:01:01.000', 0)"
+ " ('2020-01-01 00:01:02.000', 0)"
+ " t3 values('2020-01-01 00:01:02.000', 0)"
+ " t4 values('2020-01-01 00:01:02.000', 0)"
+ " t5 values('2020-01-01 00:01:02.000', 0)"
+ " t6 values('2020-01-01 00:01:02.000', 0)"
+ " t7 values('2020-01-01 00:01:02.000', 0)"
+ " t8 values('2020-01-01 00:01:02.000', 0)"
+ " t9 values('2020-01-01 00:01:02.000', 0)");
+ int affected = taos_affected_rows(res);
+ if (affected != 18) {
+ printf("\033[31m%d rows affected by last insert statement, but it should be 18\033[0m\n", affected);
+ }
+ // super tables subscription
+ usleep(1000000);
+}
+
+
+static int print_result(TAOS_RES* res, int blockFetch) {
+ TAOS_ROW row = NULL;
+ int num_fields = taos_num_fields(res);
+ TAOS_FIELD* fields = taos_fetch_fields(res);
+ int nRows = 0;
+
+ if (blockFetch) {
+ int rows = 0;
+ while ((rows = taos_fetch_block(res, &row))) {
+ for (int i = 0; i < rows; i++) {
+ char temp[256];
+ taos_print_row(temp, row + i, fields, num_fields);
+ puts(temp);
+ }
+ nRows += rows;
+ }
+ } else {
+ while ((row = taos_fetch_row(res))) {
+ char temp[256];
+ taos_print_row(temp, row, fields, num_fields);
+ puts(temp);
+ nRows++;
+ }
+ }
+
+ printf("%d rows consumed.\n", nRows);
+ return nRows;
+}
+
+
+static void check_row_count(int line, TAOS_RES* res, int expected) {
+ int actual = print_result(res, expected % 2);
+ if (actual != expected) {
+ printf("\033[31mline %d: row count mismatch, expected: %d, actual: %d\033[0m\n", line, expected, actual);
+ } else {
+ printf("line %d: %d rows consumed as expected\n", line, actual);
+ }
+}
+
+
+static void verify_query(TAOS* taos) {
+ prepare_data(taos);
+
+ int code = taos_load_table_info(taos, "t0,t1,t2,t3,t4,t5,t6,t7,t8,t9");
+ if (code != 0) {
+ printf("\033[31mfailed to load table info: 0x%08x\033[0m\n", code);
+ }
+
+ code = taos_validate_sql(taos, "select * from nonexisttable");
+ if (code == 0) {
+ printf("\033[31mimpossible, the table does not exists\033[0m\n");
+ }
+
+ code = taos_validate_sql(taos, "select * from meters");
+ if (code != 0) {
+ printf("\033[31mimpossible, the table does exists: 0x%08x\033[0m\n", code);
+ }
+
+ TAOS_RES* res = taos_query(taos, "select * from meters");
+ check_row_count(__LINE__, res, 18);
+ printf("result precision is: %d\n", taos_result_precision(res));
+ int c = taos_field_count(res);
+ printf("field count is: %d\n", c);
+ int* lengths = taos_fetch_lengths(res);
+ for (int i = 0; i < c; i++) {
+ printf("length of column %d is %d\n", i, lengths[i]);
+ }
+ taos_free_result(res);
+
+ res = taos_query(taos, "select * from t0");
+ check_row_count(__LINE__, res, 3);
+ taos_free_result(res);
+
+ res = taos_query(taos, "select * from nonexisttable");
+ code = taos_errno(res);
+ printf("code=%d, error msg=%s\n", code, taos_errstr(res));
+ taos_free_result(res);
+
+ res = taos_query(taos, "select * from meters");
+ taos_stop_query(res);
+}
+
+
+void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
+ int rows = print_result(res, *(int*)param);
+ printf("%d rows consumed in subscribe_callback\n", rows);
+}
+
+static void verify_subscribe(TAOS* taos) {
+ prepare_data(taos);
+
+ TAOS_SUB* tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0);
+ TAOS_RES* res = taos_consume(tsub);
+ check_row_count(__LINE__, res, 18);
+
+ res = taos_consume(tsub);
+ check_row_count(__LINE__, res, 0);
+
+ taos_query(taos, "insert into t0 values('2020-01-01 00:02:00.001', 0);");
+ taos_query(taos, "insert into t8 values('2020-01-01 00:01:03.000', 0);");
+ res = taos_consume(tsub);
+ check_row_count(__LINE__, res, 2);
+
+ taos_query(taos, "insert into t2 values('2020-01-01 00:01:02.001', 0);");
+ taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.001', 0);");
+ res = taos_consume(tsub);
+ check_row_count(__LINE__, res, 2);
+
+ taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.002', 0);");
+ res = taos_consume(tsub);
+ check_row_count(__LINE__, res, 1);
+
+ // keep progress information and restart subscription
+ taos_unsubscribe(tsub, 1);
+ taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.000', 0);");
+ tsub = taos_subscribe(taos, 1, "test", "select * from meters;", NULL, NULL, 0);
+ res = taos_consume(tsub);
+ check_row_count(__LINE__, res, 24);
+
+ // keep progress information and continue previous subscription
+ taos_unsubscribe(tsub, 1);
+ tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0);
+ res = taos_consume(tsub);
+ check_row_count(__LINE__, res, 0);
+
+ // don't keep progress information and continue previous subscription
+ taos_unsubscribe(tsub, 0);
+ tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0);
+ res = taos_consume(tsub);
+ check_row_count(__LINE__, res, 24);
+
+ // single meter subscription
+
+ taos_unsubscribe(tsub, 0);
+ tsub = taos_subscribe(taos, 0, "test", "select * from t0;", NULL, NULL, 0);
+ res = taos_consume(tsub);
+ check_row_count(__LINE__, res, 5);
+
+ res = taos_consume(tsub);
+ check_row_count(__LINE__, res, 0);
+
+ taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.001', 0);");
+ res = taos_consume(tsub);
+ check_row_count(__LINE__, res, 1);
+
+ taos_unsubscribe(tsub, 0);
+
+ int blockFetch = 0;
+ tsub = taos_subscribe(taos, 1, "test", "select * from meters;", subscribe_callback, &blockFetch, 1000);
+ usleep(2000000);
+ taos_query(taos, "insert into t0 values('2020-01-01 00:05:00.001', 0);");
+ usleep(2000000);
+ taos_unsubscribe(tsub, 0);
+}
+
+
+void verify_prepare(TAOS* taos) {
+ TAOS_RES* result = taos_query(taos, "drop database if exists test;");
+ usleep(100000);
+ taos_query(taos, "create database test;");
+
+ int code = taos_errno(result);
+ if (code != 0) {
+ printf("\033[31mfailed to create database, reason:%s\033[0m\n", taos_errstr(result));
+ taos_free_result(result);
+ return;
+ }
+ taos_free_result(result);
+
+ usleep(100000);
+ taos_select_db(taos, "test");
+
+ // create table
+ const char* sql = "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10))";
+ result = taos_query(taos, sql);
+ code = taos_errno(result);
+ if (code != 0) {
+ printf("\033[31mfailed to create table, reason:%s\033[0m\n", taos_errstr(result));
+ taos_free_result(result);
+ return;
+ }
+ taos_free_result(result);
+
+ // insert 10 records
+ struct {
+ int64_t ts;
+ int8_t b;
+ int8_t v1;
+ int16_t v2;
+ int32_t v4;
+ int64_t v8;
+ float f4;
+ double f8;
+ char bin[40];
+ char blob[80];
+ } v = {0};
+
+ TAOS_STMT* stmt = taos_stmt_init(taos);
+ TAOS_BIND params[10];
+ params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ params[0].buffer_length = sizeof(v.ts);
+ params[0].buffer = &v.ts;
+ params[0].length = ¶ms[0].buffer_length;
+ params[0].is_null = NULL;
+
+ params[1].buffer_type = TSDB_DATA_TYPE_BOOL;
+ params[1].buffer_length = sizeof(v.b);
+ params[1].buffer = &v.b;
+ params[1].length = ¶ms[1].buffer_length;
+ params[1].is_null = NULL;
+
+ params[2].buffer_type = TSDB_DATA_TYPE_TINYINT;
+ params[2].buffer_length = sizeof(v.v1);
+ params[2].buffer = &v.v1;
+ params[2].length = ¶ms[2].buffer_length;
+ params[2].is_null = NULL;
+
+ params[3].buffer_type = TSDB_DATA_TYPE_SMALLINT;
+ params[3].buffer_length = sizeof(v.v2);
+ params[3].buffer = &v.v2;
+ params[3].length = ¶ms[3].buffer_length;
+ params[3].is_null = NULL;
+
+ params[4].buffer_type = TSDB_DATA_TYPE_INT;
+ params[4].buffer_length = sizeof(v.v4);
+ params[4].buffer = &v.v4;
+ params[4].length = ¶ms[4].buffer_length;
+ params[4].is_null = NULL;
+
+ params[5].buffer_type = TSDB_DATA_TYPE_BIGINT;
+ params[5].buffer_length = sizeof(v.v8);
+ params[5].buffer = &v.v8;
+ params[5].length = ¶ms[5].buffer_length;
+ params[5].is_null = NULL;
+
+ params[6].buffer_type = TSDB_DATA_TYPE_FLOAT;
+ params[6].buffer_length = sizeof(v.f4);
+ params[6].buffer = &v.f4;
+ params[6].length = ¶ms[6].buffer_length;
+ params[6].is_null = NULL;
+
+ params[7].buffer_type = TSDB_DATA_TYPE_DOUBLE;
+ params[7].buffer_length = sizeof(v.f8);
+ params[7].buffer = &v.f8;
+ params[7].length = ¶ms[7].buffer_length;
+ params[7].is_null = NULL;
+
+ params[8].buffer_type = TSDB_DATA_TYPE_BINARY;
+ params[8].buffer_length = sizeof(v.bin);
+ params[8].buffer = v.bin;
+ params[8].length = ¶ms[8].buffer_length;
+ params[8].is_null = NULL;
+
+ strcpy(v.blob, "一二三四五六七八九十");
+ params[9].buffer_type = TSDB_DATA_TYPE_NCHAR;
+ params[9].buffer_length = strlen(v.blob);
+ params[9].buffer = v.blob;
+ params[9].length = ¶ms[9].buffer_length;
+ params[9].is_null = NULL;
+
+ int is_null = 1;
+
+ sql = "insert into m1 values(?,?,?,?,?,?,?,?,?,?)";
+ code = taos_stmt_prepare(stmt, sql, 0);
+ if (code != 0){
+ printf("\033[31mfailed to execute taos_stmt_prepare. code:0x%x\033[0m\n", code);
+ }
+ v.ts = 1591060628000;
+ for (int i = 0; i < 10; ++i) {
+ v.ts += 1;
+ for (int j = 1; j < 10; ++j) {
+ params[j].is_null = ((i == j) ? &is_null : 0);
+ }
+ v.b = (int8_t)i % 2;
+ v.v1 = (int8_t)i;
+ v.v2 = (int16_t)(i * 2);
+ v.v4 = (int32_t)(i * 4);
+ v.v8 = (int64_t)(i * 8);
+ v.f4 = (float)(i * 40);
+ v.f8 = (double)(i * 80);
+ for (int j = 0; j < sizeof(v.bin) - 1; ++j) {
+ v.bin[j] = (char)(i + '0');
+ }
+
+ taos_stmt_bind_param(stmt, params);
+ taos_stmt_add_batch(stmt);
+ }
+ if (taos_stmt_execute(stmt) != 0) {
+ printf("\033[31mfailed to execute insert statement.\033[0m\n");
+ return;
+ }
+ taos_stmt_close(stmt);
+
+ // query the records
+ stmt = taos_stmt_init(taos);
+ taos_stmt_prepare(stmt, "SELECT * FROM m1 WHERE v1 > ? AND v2 < ?", 0);
+ v.v1 = 5;
+ v.v2 = 15;
+ taos_stmt_bind_param(stmt, params + 2);
+ if (taos_stmt_execute(stmt) != 0) {
+ printf("\033[31mfailed to execute select statement.\033[0m\n");
+ return;
+ }
+
+ result = taos_stmt_use_result(stmt);
+
+ TAOS_ROW row;
+ int rows = 0;
+ int num_fields = taos_num_fields(result);
+ TAOS_FIELD *fields = taos_fetch_fields(result);
+ char temp[256];
+
+ // fetch the records row by row
+ while ((row = taos_fetch_row(result))) {
+ rows++;
+ taos_print_row(temp, row, fields, num_fields);
+ printf("%s\n", temp);
+ }
+
+ taos_free_result(result);
+ taos_stmt_close(stmt);
+}
+
+void retrieve_callback(void *param, TAOS_RES *tres, int numOfRows)
+{
+ if (numOfRows > 0) {
+ printf("%d rows async retrieved\n", numOfRows);
+ taos_fetch_rows_a(tres, retrieve_callback, param);
+ } else {
+ if (numOfRows < 0) {
+ printf("\033[31masync retrieve failed, code: %d\033[0m\n", numOfRows);
+ } else {
+ printf("async retrieve completed\n");
+ }
+ taos_free_result(tres);
+ }
+}
+
+void select_callback(void *param, TAOS_RES *tres, int code)
+{
+ if (code == 0 && tres) {
+ taos_fetch_rows_a(tres, retrieve_callback, param);
+ } else {
+ printf("\033[31masync select failed, code: %d\033[0m\n", code);
+ }
+}
+
+void verify_async(TAOS* taos) {
+ prepare_data(taos);
+ taos_query_a(taos, "select * from meters", select_callback, NULL);
+ usleep(1000000);
+}
+
+void stream_callback(void *param, TAOS_RES *res, TAOS_ROW row) {
+ int num_fields = taos_num_fields(res);
+ TAOS_FIELD* fields = taos_fetch_fields(res);
+
+ printf("got one row from stream_callback\n");
+ char temp[256];
+ taos_print_row(temp, row, fields, num_fields);
+ puts(temp);
+}
+
+void verify_stream(TAOS* taos) {
+ prepare_data(taos);
+ TAOS_STREAM* strm = taos_open_stream(
+ taos,
+ "select count(*) from meters interval(1m)",
+ stream_callback,
+ 0,
+ NULL,
+ NULL);
+ printf("waiting for stream data\n");
+ usleep(100000);
+ taos_query(taos, "insert into t0 values(now, 0)(now+5s,1)(now+10s, 2);");
+ usleep(200000000);
+ taos_close_stream(strm);
+}
+
+int main(int argc, char *argv[]) {
+ const char* host = "127.0.0.1";
+ const char* user = "root";
+ const char* passwd = "taosdata";
+
+ taos_options(TSDB_OPTION_TIMEZONE, "GMT-8");
+ taos_init();
+
+ TAOS* taos = taos_connect(host, user, passwd, "", 0);
+ if (taos == NULL) {
+ printf("\033[31mfailed to connect to db, reason:%s\033[0m\n", taos_errstr(taos));
+ exit(1);
+ }
+
+ char* info = taos_get_server_info(taos);
+ printf("server info: %s\n", info);
+ info = taos_get_client_info(taos);
+ printf("client info: %s\n", info);
+
+ printf("************ verify query *************\n");
+ verify_query(taos);
+
+ printf("********* verify async query **********\n");
+ verify_async(taos);
+
+ printf("*********** verify subscribe ************\n");
+ verify_subscribe(taos);
+
+ printf("************ verify prepare *************\n");
+ verify_prepare(taos);
+
+ printf("************ verify stream *************\n");
+ verify_stream(taos);
+ printf("done\n");
+
+ taos_close(taos);
+ taos_cleanup();
+}
\ No newline at end of file
diff --git a/tests/examples/c/makefile b/tests/examples/c/makefile
index 0a4b8ee9d2..f9653c9c96 100644
--- a/tests/examples/c/makefile
+++ b/tests/examples/c/makefile
@@ -4,7 +4,6 @@
ROOT=./
TARGET=exe
LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt
-#LFLAGS = '-Wl,-rpath,/home/zbm/project/td/debug/build/lib/' -L/home/zbm/project/td/debug/build/lib -ltaos -lpthread -lm -lrt
CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX -msse4.2 -Wno-unused-function -D_M_X64 \
-I/usr/local/taos/include -std=gnu99
@@ -16,6 +15,7 @@ exe:
gcc $(CFLAGS) ./prepare.c -o $(ROOT)/prepare $(LFLAGS)
gcc $(CFLAGS) ./stream.c -o $(ROOT)/stream $(LFLAGS)
gcc $(CFLAGS) ./subscribe.c -o $(ROOT)subscribe $(LFLAGS)
+ gcc $(CFLAGS) ./apitest.c -o $(ROOT)apitest $(LFLAGS)
clean:
rm $(ROOT)/asyncdemo
diff --git a/tests/examples/python/read_example.py b/tests/examples/python/read_example.py
index a96115dfb2..73c4b95214 100644
--- a/tests/examples/python/read_example.py
+++ b/tests/examples/python/read_example.py
@@ -29,7 +29,7 @@ if __name__ == '__main__':
# Create a database named db
try:
- c1.execute('create database db')
+ c1.execute('create database if not exists db ')
except Exception as err:
conn.close()
raise(err)
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index 9d1aef0dc5..904e712fa4 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -144,6 +144,7 @@ python3 ./test.py -f query/querySort.py
python3 ./test.py -f query/queryJoin.py
python3 ./test.py -f query/select_last_crash.py
python3 ./test.py -f query/queryNullValueTest.py
+python3 ./test.py -f query/queryInsertValue.py
#stream
python3 ./test.py -f stream/metric_1.py
@@ -161,3 +162,22 @@ python3 ./test.py -f client/client.py
# Misc
python3 testCompress.py
python3 testNoCompress.py
+
+# functions
+python3 ./test.py -f functions/function_avg.py
+python3 ./test.py -f functions/function_bottom.py
+python3 ./test.py -f functions/function_count.py
+python3 ./test.py -f functions/function_diff.py
+python3 ./test.py -f functions/function_first.py
+python3 ./test.py -f functions/function_last.py
+python3 ./test.py -f functions/function_last_row.py
+python3 ./test.py -f functions/function_leastsquares.py
+python3 ./test.py -f functions/function_max.py
+python3 ./test.py -f functions/function_min.py
+python3 ./test.py -f functions/function_operations.py
+python3 ./test.py -f functions/function_percentile.py
+python3 ./test.py -f functions/function_spread.py
+python3 ./test.py -f functions/function_stddev.py
+python3 ./test.py -f functions/function_sum.py
+python3 ./test.py -f functions/function_top.py
+python3 ./test.py -f functions/function_twa.py
\ No newline at end of file
diff --git a/tests/pytest/functions/function_avg.py b/tests/pytest/functions/function_avg.py
new file mode 100644
index 0000000000..9481550ba3
--- /dev/null
+++ b/tests/pytest/functions/function_avg.py
@@ -0,0 +1,73 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ intData = []
+ floatData = []
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+ intData.append(i + 1)
+ floatData.append(i + 0.1)
+
+ # average verifacation
+ tdSql.error("select avg(ts) from test")
+ tdSql.error("select avg(ts) from test1")
+ tdSql.error("select avg(col7) from test")
+ tdSql.error("select avg(col7) from test1")
+ tdSql.error("select avg(col8) from test")
+ tdSql.error("select avg(col8) from test1")
+ tdSql.error("select avg(col9) from test")
+ tdSql.error("select avg(col9) from test1")
+
+ tdSql.query("select avg(col1) from test")
+ tdSql.checkData(0, 0, np.average(intData))
+ tdSql.query("select avg(col2) from test")
+ tdSql.checkData(0, 0, np.average(intData))
+ tdSql.query("select avg(col3) from test")
+ tdSql.checkData(0, 0, np.average(intData))
+ tdSql.query("select avg(col4) from test")
+ tdSql.checkData(0, 0, np.average(intData))
+ tdSql.query("select avg(col5) from test")
+ tdSql.checkData(0, 0, np.average(floatData))
+ tdSql.query("select avg(col6) from test")
+ tdSql.checkData(0, 0, np.average(floatData))
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_bottom.py b/tests/pytest/functions/function_bottom.py
new file mode 100644
index 0000000000..4074166f92
--- /dev/null
+++ b/tests/pytest/functions/function_bottom.py
@@ -0,0 +1,93 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+
+ # bottom verifacation
+ tdSql.error("select bottom(ts, 10) from test")
+ tdSql.error("select bottom(col1, 0) from test")
+ tdSql.error("select bottom(col1, 101) from test")
+ tdSql.error("select bottom(col2, 0) from test")
+ tdSql.error("select bottom(col2, 101) from test")
+ tdSql.error("select bottom(col3, 0) from test")
+ tdSql.error("select bottom(col3, 101) from test")
+ tdSql.error("select bottom(col4, 0) from test")
+ tdSql.error("select bottom(col4, 101) from test")
+ tdSql.error("select bottom(col5, 0) from test")
+ tdSql.error("select bottom(col5, 101) from test")
+ tdSql.error("select bottom(col6, 0) from test")
+ tdSql.error("select bottom(col6, 101) from test")
+ tdSql.error("select bottom(col7, 10) from test")
+ tdSql.error("select bottom(col8, 10) from test")
+ tdSql.error("select bottom(col9, 10) from test")
+
+ tdSql.query("select bottom(col1, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 2)
+
+ tdSql.query("select bottom(col2, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 2)
+
+ tdSql.query("select bottom(col3, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 2)
+
+ tdSql.query("select bottom(col4, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 2)
+
+ tdSql.query("select bottom(col5, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 0.1)
+ tdSql.checkData(1, 1, 1.1)
+
+ tdSql.query("select bottom(col6, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 0.1)
+ tdSql.checkData(1, 1, 1.1)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_count.py b/tests/pytest/functions/function_count.py
new file mode 100644
index 0000000000..4795a6b2df
--- /dev/null
+++ b/tests/pytest/functions/function_count.py
@@ -0,0 +1,79 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+
+ # Count verifacation
+ tdSql.query("select count(*) from test")
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.query("select count(ts) from test")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(col1) from test")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(col2) from test")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(col3) from test")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(col4) from test")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(col5) from test")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(col6) from test")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(col7) from test")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(col8) from test")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(col9) from test")
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.execute("alter table test add column col10 int")
+ tdSql.query("select count(col10) from test")
+ tdSql.checkRows(0)
+
+ tdSql.execute("insert into test1 values(now, 1, 2, 3, 4, 1.1, 2.2, false, 'test', 'test' 1)")
+ tdSql.query("select count(col10) from test")
+ tdSql.checkData(0, 0, 1)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_diff.py b/tests/pytest/functions/function_diff.py
new file mode 100644
index 0000000000..b6f496d7d6
--- /dev/null
+++ b/tests/pytest/functions/function_diff.py
@@ -0,0 +1,99 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ tdSql.execute("insert into test1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ')" % (self.ts - 1))
+
+ # diff verifacation
+ tdSql.query("select diff(col1) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select diff(col2) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select diff(col3) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select diff(col4) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select diff(col5) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select diff(col6) from test1")
+ tdSql.checkRows(0)
+
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+
+ tdSql.error("select diff(ts) from test")
+ tdSql.error("select diff(ts) from test1")
+ tdSql.error("select diff(col1) from test")
+ tdSql.error("select diff(col2) from test")
+ tdSql.error("select diff(col3) from test")
+ tdSql.error("select diff(col4) from test")
+ tdSql.error("select diff(col5) from test")
+ tdSql.error("select diff(col6) from test")
+ tdSql.error("select diff(col7) from test")
+ tdSql.error("select diff(col7) from test1")
+ tdSql.error("select diff(col8) from test")
+ tdSql.error("select diff(col8) from test1")
+ tdSql.error("select diff(col9) from test")
+ tdSql.error("select diff(col9) from test1")
+
+ tdSql.query("select diff(col1) from test1")
+ tdSql.checkRows(10)
+
+ tdSql.query("select diff(col2) from test1")
+ tdSql.checkRows(10)
+
+ tdSql.query("select diff(col3) from test1")
+ tdSql.checkRows(10)
+
+ tdSql.query("select diff(col4) from test1")
+ tdSql.checkRows(10)
+
+ tdSql.query("select diff(col5) from test1")
+ tdSql.checkRows(10)
+
+ tdSql.query("select diff(col6) from test1")
+ tdSql.checkRows(10)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_first.py b/tests/pytest/functions/function_first.py
new file mode 100644
index 0000000000..2b78bd33d8
--- /dev/null
+++ b/tests/pytest/functions/function_first.py
@@ -0,0 +1,119 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1))
+
+ # first verifacation
+ tdSql.query("select first(*) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 1, None)
+
+ tdSql.query("select first(col1) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select first(col2) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select first(col3) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select first(col4) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select first(col5) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select first(col6) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select first(col7) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select first(col8) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select first(col9) from test1")
+ tdSql.checkRows(0)
+
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+
+ tdSql.query("select first(*) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 1, 1)
+
+ tdSql.query("select first(col1) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 1)
+
+ tdSql.query("select first(col2) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 1)
+
+ tdSql.query("select first(col3) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 1)
+
+ tdSql.query("select first(col4) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 1)
+
+ tdSql.query("select first(col5) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 0.1)
+
+ tdSql.query("select first(col6) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 0.1)
+
+ tdSql.query("select first(col7) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, False)
+
+ tdSql.query("select first(col8) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 'taosdata1')
+
+ tdSql.query("select first(col9) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '涛思数据1')
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_last.py b/tests/pytest/functions/function_last.py
new file mode 100644
index 0000000000..337c18a28c
--- /dev/null
+++ b/tests/pytest/functions/function_last.py
@@ -0,0 +1,119 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1))
+
+ # last verifacation
+ tdSql.query("select last(*) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 1, None)
+
+ tdSql.query("select last(col1) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select last(col2) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select last(col3) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select last(col4) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select last(col5) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select last(col6) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select last(col7) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select last(col8) from test1")
+ tdSql.checkRows(0)
+
+ tdSql.query("select last(col9) from test1")
+ tdSql.checkRows(0)
+
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+
+ tdSql.query("select last(*) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 1, 10)
+
+ tdSql.query("select last(col1) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.query("select last(col2) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.query("select last(col3) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.query("select last(col4) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.query("select last(col5) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 9.1)
+
+ tdSql.query("select last(col6) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 9.1)
+
+ tdSql.query("select last(col7) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, True)
+
+ tdSql.query("select last(col8) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 'taosdata10')
+
+ tdSql.query("select last(col9) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '涛思数据10')
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_last_row.py b/tests/pytest/functions/function_last_row.py
new file mode 100644
index 0000000000..ea5cf661eb
--- /dev/null
+++ b/tests/pytest/functions/function_last_row.py
@@ -0,0 +1,128 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1))
+
+ # last_row verifacation
+ tdSql.query("select last_row(*) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 1, None)
+
+ tdSql.query("select last_row(col1) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, None)
+
+ tdSql.query("select last_row(col2) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, None)
+
+ tdSql.query("select last_row(col3) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, None)
+
+ tdSql.query("select last_row(col4) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, None)
+
+ tdSql.query("select last_row(col5) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, None)
+
+ tdSql.query("select last_row(col6) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, None)
+
+ tdSql.query("select last_row(col7) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, None)
+
+ tdSql.query("select last_row(col8) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, None)
+
+ tdSql.query("select last_row(col9) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, None)
+
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+
+ tdSql.query("select last_row(*) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 1, 10)
+
+ tdSql.query("select last_row(col1) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.query("select last_row(col2) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.query("select last_row(col3) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.query("select last_row(col4) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.query("select last_row(col5) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 9.1)
+
+ tdSql.query("select last_row(col6) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 9.1)
+
+ tdSql.query("select last_row(col7) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, True)
+
+ tdSql.query("select last_row(col8) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 'taosdata10')
+
+ tdSql.query("select last_row(col9) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '涛思数据10')
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_leastsquares.py b/tests/pytest/functions/function_leastsquares.py
new file mode 100644
index 0000000000..0ef0a2b786
--- /dev/null
+++ b/tests/pytest/functions/function_leastsquares.py
@@ -0,0 +1,75 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+
+ # leastsquares verifacation
+ tdSql.error("select leastsquares(ts, 1, 1) from test1")
+ tdSql.error("select leastsquares(col1, 1, 1) from test")
+ tdSql.error("select leastsquares(col2, 1, 1) from test")
+ tdSql.error("select leastsquares(col3, 1, 1) from test")
+ tdSql.error("select leastsquares(col4, 1, 1) from test")
+ tdSql.error("select leastsquares(col5, 1, 1) from test")
+ tdSql.error("select leastsquares(col6, 1, 1) from test")
+ tdSql.error("select leastsquares(col7, 1, 1) from test1")
+ tdSql.error("select leastsquares(col8, 1, 1) from test1")
+ tdSql.error("select leastsquares(col9, 1, 1) from test1")
+
+ tdSql.query("select leastsquares(col1, 1, 1) from test1")
+ tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
+
+ tdSql.query("select leastsquares(col2, 1, 1) from test1")
+ tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
+
+ tdSql.query("select leastsquares(col3, 1, 1) from test1")
+ tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
+
+ tdSql.query("select leastsquares(col4, 1, 1) from test1")
+ tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
+
+ tdSql.query("select leastsquares(col5, 1, 1) from test1")
+ tdSql.checkData(0, 0, '{slop:1.000000, intercept:-0.900000}')
+
+ tdSql.query("select leastsquares(col6, 1, 1) from test1")
+ tdSql.checkData(0, 0, '{slop:1.000000, intercept:-0.900000}')
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_max.py b/tests/pytest/functions/function_max.py
new file mode 100644
index 0000000000..3bd5031276
--- /dev/null
+++ b/tests/pytest/functions/function_max.py
@@ -0,0 +1,78 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ intData = []
+ floatData = []
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+ intData.append(i + 1)
+ floatData.append(i + 0.1)
+
+ # max verifacation
+ tdSql.error("select max(ts) from test")
+ tdSql.error("select max(ts) from test1")
+ tdSql.error("select max(col7) from test")
+ tdSql.error("select max(col7) from test1")
+ tdSql.error("select max(col8) from test")
+ tdSql.error("select max(col8) from test1")
+ tdSql.error("select max(col9) from test")
+ tdSql.error("select max(col9) from test1")
+
+ tdSql.query("select max(col1) from test1")
+ tdSql.checkData(0, 0, np.max(intData))
+
+ tdSql.query("select max(col2) from test1")
+ tdSql.checkData(0, 0, np.max(intData))
+
+ tdSql.query("select max(col3) from test1")
+ tdSql.checkData(0, 0, np.max(intData))
+
+ tdSql.query("select max(col4) from test1")
+ tdSql.checkData(0, 0, np.max(intData))
+
+ tdSql.query("select max(col5) from test1")
+ tdSql.checkData(0, 0, np.max(floatData))
+
+ tdSql.query("select max(col6) from test1")
+ tdSql.checkData(0, 0, np.max(floatData))
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_min.py b/tests/pytest/functions/function_min.py
new file mode 100644
index 0000000000..bc180bc224
--- /dev/null
+++ b/tests/pytest/functions/function_min.py
@@ -0,0 +1,78 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ intData = []
+ floatData = []
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+ intData.append(i + 1)
+ floatData.append(i + 0.1)
+
+ # min verifacation
+ tdSql.error("select min(ts) from test")
+ tdSql.error("select min(ts) from test1")
+ tdSql.error("select min(col7) from test")
+ tdSql.error("select min(col7) from test1")
+ tdSql.error("select min(col8) from test")
+ tdSql.error("select min(col8) from test1")
+ tdSql.error("select min(col9) from test")
+ tdSql.error("select min(col9) from test1")
+
+ tdSql.query("select min(col1) from test1")
+ tdSql.checkData(0, 0, np.min(intData))
+
+ tdSql.query("select min(col2) from test1")
+ tdSql.checkData(0, 0, np.min(intData))
+
+ tdSql.query("select min(col3) from test1")
+ tdSql.checkData(0, 0, np.min(intData))
+
+ tdSql.query("select min(col4) from test1")
+ tdSql.checkData(0, 0, np.min(intData))
+
+ tdSql.query("select min(col5) from test1")
+ tdSql.checkData(0, 0, np.min(floatData))
+
+ tdSql.query("select min(col6) from test1")
+ tdSql.checkData(0, 0, np.min(floatData))
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_operations.py b/tests/pytest/functions/function_operations.py
new file mode 100644
index 0000000000..36810621cb
--- /dev/null
+++ b/tests/pytest/functions/function_operations.py
@@ -0,0 +1,81 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+
+ # min verifacation
+ tdSql.error("select ts + col1 from test")
+ tdSql.error("select ts + col1 from test1")
+ tdSql.error("select col1 + col7 from test")
+ tdSql.error("select col1 + col7 from test1")
+ tdSql.error("select col1 + col8 from test")
+ tdSql.error("select col1 + col8 from test1")
+ tdSql.error("select col1 + col9 from test")
+ tdSql.error("select col1 + col9 from test1")
+
+ tdSql.query("select col1 + col2 from test1")
+ tdSql.checkRows(10)
+ tdSql.checkData(0, 0, 2.0)
+
+ tdSql.query("select col1 + col2 * col3 from test1")
+ tdSql.checkRows(10)
+ tdSql.checkData(1, 0, 6.0)
+
+ tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 from test1")
+ tdSql.checkRows(10)
+ tdSql.checkData(0, 0, 3.2)
+
+ tdSql.execute("insert into test1(ts, col1) values(%d, 11)" % (self.ts + 11))
+ tdSql.query("select col1 + col2 from test1")
+ tdSql.checkRows(11)
+ tdSql.checkData(10, 0, None)
+
+ tdSql.query("select col1 + col2 * col3 from test1")
+ tdSql.checkRows(11)
+ tdSql.checkData(10, 0, None)
+
+ tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 from test1")
+ tdSql.checkRows(11)
+ tdSql.checkData(10, 0, None)
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_percentile.py b/tests/pytest/functions/function_percentile.py
new file mode 100644
index 0000000000..aaeb94372e
--- /dev/null
+++ b/tests/pytest/functions/function_percentile.py
@@ -0,0 +1,140 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ intData = []
+ floatData = []
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20))''')
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+ intData.append(i + 1)
+ floatData.append(i + 0.1)
+
+ # percentile verifacation
+ tdSql.error("select percentile(ts 20) from test")
+ tdSql.error("select apercentile(ts 20) from test")
+ tdSql.error("select percentile(col7 20) from test")
+ tdSql.error("select apercentile(col7 20) from test")
+ tdSql.error("select percentile(col8 20) from test")
+ tdSql.error("select apercentile(col8 20) from test")
+ tdSql.error("select percentile(col9 20) from test")
+ tdSql.error("select apercentile(col9 20) from test")
+
+ tdSql.query("select percentile(col1, 0) from test")
+ tdSql.checkData(0, 0, np.percentile(intData, 0))
+ tdSql.query("select apercentile(col1, 0) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+ tdSql.query("select percentile(col1, 50) from test")
+ tdSql.checkData(0, 0, np.percentile(intData, 50))
+ tdSql.query("select apercentile(col1, 50) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+ tdSql.query("select percentile(col1, 100) from test")
+ tdSql.checkData(0, 0, np.percentile(intData, 100))
+ tdSql.query("select apercentile(col1, 100) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+
+ tdSql.query("select percentile(col2, 0) from test")
+ tdSql.checkData(0, 0, np.percentile(intData, 0))
+ tdSql.query("select apercentile(col2, 0) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+ tdSql.query("select percentile(col2, 50) from test")
+ tdSql.checkData(0, 0, np.percentile(intData, 50))
+ tdSql.query("select apercentile(col2, 50) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+ tdSql.query("select percentile(col2, 100) from test")
+ tdSql.checkData(0, 0, np.percentile(intData, 100))
+ tdSql.query("select apercentile(col2, 100) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+
+ tdSql.query("select percentile(col3, 0) from test")
+ tdSql.checkData(0, 0, np.percentile(intData, 0))
+ tdSql.query("select apercentile(col3, 0) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+ tdSql.query("select percentile(col3, 50) from test")
+ tdSql.checkData(0, 0, np.percentile(intData, 50))
+ tdSql.query("select apercentile(col3, 50) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+ tdSql.query("select percentile(col3, 100) from test")
+ tdSql.checkData(0, 0, np.percentile(intData, 100))
+ tdSql.query("select apercentile(col3, 100) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+
+ tdSql.query("select percentile(col4, 0) from test")
+ tdSql.checkData(0, 0, np.percentile(intData, 0))
+ tdSql.query("select apercentile(col4, 0) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+ tdSql.query("select percentile(col4, 50) from test")
+ tdSql.checkData(0, 0, np.percentile(intData, 50))
+ tdSql.query("select apercentile(col4, 50) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+ tdSql.query("select percentile(col4, 100) from test")
+ tdSql.checkData(0, 0, np.percentile(intData, 100))
+ tdSql.query("select apercentile(col4, 100) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+
+ tdSql.query("select percentile(col5, 0) from test")
+ print("query result: %s" % tdSql.getData(0, 0))
+ print("array result: %s" % np.percentile(floatData, 0))
+ tdSql.query("select apercentile(col5, 0) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+ tdSql.query("select percentile(col5, 50) from test")
+ print("query result: %s" % tdSql.getData(0, 0))
+ print("array result: %s" % np.percentile(floatData, 50))
+ tdSql.query("select apercentile(col5, 50) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+ tdSql.query("select percentile(col5, 100) from test")
+ print("query result: %s" % tdSql.getData(0, 0))
+ print("array result: %s" % np.percentile(floatData, 100))
+ tdSql.query("select apercentile(col5, 100) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+
+ tdSql.query("select percentile(col6, 0) from test")
+ tdSql.checkData(0, 0, np.percentile(floatData, 0))
+ tdSql.query("select apercentile(col6, 0) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+ tdSql.query("select percentile(col6, 50) from test")
+ tdSql.checkData(0, 0, np.percentile(floatData, 50))
+ tdSql.query("select apercentile(col6, 50) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+ tdSql.query("select percentile(col6, 100) from test")
+ tdSql.checkData(0, 0, np.percentile(floatData, 100))
+ tdSql.query("select apercentile(col6, 100) from test")
+ print("apercentile result: %s" % tdSql.getData(0, 0))
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_spread.py b/tests/pytest/functions/function_spread.py
new file mode 100644
index 0000000000..4629335973
--- /dev/null
+++ b/tests/pytest/functions/function_spread.py
@@ -0,0 +1,106 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ tdSql.execute("insert into test1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ')" % (self.ts - 1))
+
+ # spread verifacation
+ tdSql.query("select spread(ts) from test1")
+ tdSql.checkRows(1)
+
+ tdSql.query("select spread(col1) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 0)
+
+ tdSql.query("select spread(col2) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 0)
+
+ tdSql.query("select spread(col3) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 0)
+
+ tdSql.query("select spread(col4) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 0)
+
+ tdSql.query("select spread(col5) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 0)
+
+ tdSql.query("select spread(col6) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 0)
+
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+
+ tdSql.error("select spread(col7) from test")
+ tdSql.error("select spread(col7) from test1")
+ tdSql.error("select spread(col8) from test")
+ tdSql.error("select spread(col8) from test1")
+ tdSql.error("select spread(col9) from test")
+ tdSql.error("select spread(col9) from test1")
+
+ tdSql.query("select spread(col1) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.query("select spread(col2) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.query("select spread(col3) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.query("select spread(col4) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 10)
+
+ tdSql.query("select spread(col5) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 9.1)
+
+ tdSql.query("select spread(col6) from test1")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 9.1)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_stddev.py b/tests/pytest/functions/function_stddev.py
new file mode 100644
index 0000000000..23df415aa3
--- /dev/null
+++ b/tests/pytest/functions/function_stddev.py
@@ -0,0 +1,80 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ intData = []
+ floatData = []
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+ intData.append(i + 1)
+ floatData.append(i + 0.1)
+
+ # stddev verifacation
+ tdSql.error("select stddev(ts) from test1")
+ tdSql.error("select stddev(col1) from test")
+ tdSql.error("select stddev(col2) from test")
+ tdSql.error("select stddev(col3) from test")
+ tdSql.error("select stddev(col4) from test")
+ tdSql.error("select stddev(col5) from test")
+ tdSql.error("select stddev(col6) from test")
+ tdSql.error("select stddev(col7) from test1")
+ tdSql.error("select stddev(col8) from test1")
+ tdSql.error("select stddev(col9) from test1")
+
+ tdSql.query("select stddev(col1) from test1")
+ tdSql.checkData(0, 0, np.std(intData))
+
+ tdSql.query("select stddev(col2) from test1")
+ tdSql.checkData(0, 0, np.std(intData))
+
+ tdSql.query("select stddev(col3) from test1")
+ tdSql.checkData(0, 0, np.std(intData))
+
+ tdSql.query("select stddev(col4) from test1")
+ tdSql.checkData(0, 0, np.std(intData))
+
+ tdSql.query("select stddev(col5) from test1")
+ tdSql.checkData(0, 0, np.std(floatData))
+
+ tdSql.query("select stddev(col6) from test1")
+ tdSql.checkData(0, 0, np.std(floatData))
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_sum.py b/tests/pytest/functions/function_sum.py
new file mode 100644
index 0000000000..ef5ddfe315
--- /dev/null
+++ b/tests/pytest/functions/function_sum.py
@@ -0,0 +1,69 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ intData = []
+ floatData = []
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+ intData.append(i + 1)
+ floatData.append(i + 0.1)
+
+ # sum verifacation
+ tdSql.error("select sum(ts) from test")
+ tdSql.error("select sum(col7) from test")
+ tdSql.error("select sum(col8) from test")
+ tdSql.error("select sum(col9) from test")
+
+ tdSql.query("select sum(col1) from test")
+ tdSql.checkData(0, 0, np.sum(intData))
+ tdSql.query("select sum(col2) from test")
+ tdSql.checkData(0, 0, np.sum(intData))
+ tdSql.query("select sum(col3) from test")
+ tdSql.checkData(0, 0, np.sum(intData))
+ tdSql.query("select sum(col4) from test")
+ tdSql.checkData(0, 0, np.sum(intData))
+ tdSql.query("select sum(col5) from test")
+ tdSql.checkData(0, 0, np.sum(floatData))
+ tdSql.query("select sum(col6) from test")
+ tdSql.checkData(0, 0, np.sum(floatData))
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_top.py b/tests/pytest/functions/function_top.py
new file mode 100644
index 0000000000..e24ff1cc53
--- /dev/null
+++ b/tests/pytest/functions/function_top.py
@@ -0,0 +1,98 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ intData = []
+ floatData = []
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+ intData.append(i + 1)
+ floatData.append(i + 0.1)
+
+ # top verifacation
+ tdSql.error("select top(ts, 10) from test")
+ tdSql.error("select top(col1, 0) from test")
+ tdSql.error("select top(col1, 101) from test")
+ tdSql.error("select top(col2, 0) from test")
+ tdSql.error("select top(col2, 101) from test")
+ tdSql.error("select top(col3, 0) from test")
+ tdSql.error("select top(col3, 101) from test")
+ tdSql.error("select top(col4, 0) from test")
+ tdSql.error("select top(col4, 101) from test")
+ tdSql.error("select top(col5, 0) from test")
+ tdSql.error("select top(col5, 101) from test")
+ tdSql.error("select top(col6, 0) from test")
+ tdSql.error("select top(col6, 101) from test")
+ tdSql.error("select top(col7, 10) from test")
+ tdSql.error("select top(col8, 10) from test")
+ tdSql.error("select top(col9, 10) from test")
+
+ tdSql.query("select top(col1, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 9)
+ tdSql.checkData(1, 1, 10)
+
+ tdSql.query("select top(col2, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 9)
+ tdSql.checkData(1, 1, 10)
+
+ tdSql.query("select top(col3, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 9)
+ tdSql.checkData(1, 1, 10)
+
+ tdSql.query("select top(col4, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 9)
+ tdSql.checkData(1, 1, 10)
+
+ tdSql.query("select top(col5, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 8.1)
+ tdSql.checkData(1, 1, 9.1)
+
+ tdSql.query("select top(col6, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 8.1)
+ tdSql.checkData(1, 1, 9.1)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_twa.py b/tests/pytest/functions/function_twa.py
new file mode 100644
index 0000000000..1ce4c99b60
--- /dev/null
+++ b/tests/pytest/functions/function_twa.py
@@ -0,0 +1,135 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ intData = []
+ floatData = []
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
+ intData.append(i + 1)
+ floatData.append(i + 0.1)
+
+ # twa verifacation
+ tdSql.error("select twa(ts) from test")
+ tdSql.error("select twa(ts) from test1")
+
+ tdSql.error("select twa(col1) from test")
+ tdSql.error("select twa(col1) from test1")
+
+ tdSql.error("select twa(col2) from test")
+ tdSql.error("select twa(col2) from test1")
+
+ tdSql.error("select twa(col3) from test")
+ tdSql.error("select twa(col3) from test1")
+
+ tdSql.error("select twa(col4) from test")
+ tdSql.error("select twa(col4) from test1")
+
+ tdSql.error("select twa(col5) from test")
+ tdSql.error("select twa(col5) from test1")
+
+ tdSql.error("select twa(col6) from test")
+ tdSql.error("select twa(col6) from test1")
+
+ tdSql.error("select twa(col7) from test")
+ tdSql.error("select twa(col7) from test1")
+
+ tdSql.error("select twa(col8) from test")
+ tdSql.error("select twa(col8) from test1")
+
+ tdSql.error("select twa(col9) from test")
+ tdSql.error("select twa(col9) from test1")
+
+ tdSql.error("select twa(col1) from test where ts > %d" % self.ts)
+ tdSql.error("select twa(col1) from test1 where ts > %d" % self.ts)
+
+ tdSql.error("select twa(col2) from test where ts > %d" % self.ts)
+ tdSql.error("select twa(col2) from test1 where ts > %d" % self.ts)
+
+ tdSql.error("select twa(col3) from test where ts > %d" % self.ts)
+ tdSql.error("select twa(col3) from test1 where ts > %d" % self.ts)
+
+ tdSql.error("select twa(col4) from test where ts > %d" % self.ts)
+ tdSql.error("select twa(col4) from test1 where ts > %d" % self.ts)
+
+ tdSql.error("select twa(col5) from test where ts > %d" % self.ts)
+ tdSql.error("select twa(col5) from test1 where ts > %d" % self.ts)
+
+ tdSql.error("select twa(col6) from test where ts > %d" % self.ts)
+ tdSql.error("select twa(col6) from test1 where ts > %d" % self.ts)
+
+ tdSql.error("select twa(col1) from test where ts < %d" % (self.ts + self.rowNum))
+ tdSql.error("select twa(col1) from test1 where ts < %d" % (self.ts + self.rowNum))
+
+ tdSql.error("select twa(col2) from test where ts < %d" % (self.ts + self.rowNum))
+ tdSql.error("select twa(col2) from test1 where ts < %d" % (self.ts + self.rowNum))
+
+ tdSql.error("select twa(col3) from test where ts < %d" % (self.ts + self.rowNum))
+ tdSql.error("select twa(col3) from test1 where ts < %d" % (self.ts + self.rowNum))
+
+ tdSql.error("select twa(col4) from test where ts < %d" % (self.ts + self.rowNum))
+ tdSql.error("select twa(col4) from test1 where ts < %d" % (self.ts + self.rowNum))
+
+ tdSql.error("select twa(col5) from test where ts < %d" % (self.ts + self.rowNum))
+ tdSql.error("select twa(col5) from test1 where ts < %d" % (self.ts + self.rowNum))
+
+ tdSql.error("select twa(col6) from test where ts < %d" % (self.ts + self.rowNum))
+ tdSql.error("select twa(col6) from test1 where ts < %d" % (self.ts + self.rowNum))
+
+ tdSql.query("select twa(col1) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
+ tdSql.query("select twa(col1) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
+
+ tdSql.query("select twa(col2) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
+ tdSql.query("select twa(col2) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
+
+ tdSql.query("select twa(col3) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
+ tdSql.query("select twa(col3) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
+
+ tdSql.query("select twa(col4) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
+ tdSql.query("select twa(col4) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
+
+ tdSql.query("select twa(col5) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
+ tdSql.query("select twa(col5) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
+
+ tdSql.query("select twa(col6) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
+ tdSql.query("select twa(col6) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/filterOtherTypes.py b/tests/pytest/query/filterOtherTypes.py
index 1db5497604..5033ffdb48 100644
--- a/tests/pytest/query/filterOtherTypes.py
+++ b/tests/pytest/query/filterOtherTypes.py
@@ -231,10 +231,12 @@ class TDTestCase:
tdSql.error("select * from st where tagcol1 like '____'")
# > for nchar type on tag
- tdSql.error("select * from st where tagcol2 > 'table'")
+ tdSql.query("select * from st where tagcol2 > 'table1'")
+ tdSql.checkRows(5)
# >= for nchar type on tag
- tdSql.error("select * from st where tagcol2 >= 'table'")
+ tdSql.query("select * from st where tagcol2 >= 'table1'")
+ tdSql.checkRows(10)
# = for nchar type on tag
tdSql.query("select * from st where tagcol2 = 'table1'")
@@ -249,10 +251,12 @@ class TDTestCase:
tdSql.checkRows(10)
# > for nchar type on tag
- tdSql.error("select * from st where tagcol2 < 'table'")
+ tdSql.query("select * from st where tagcol2 < 'table'")
+ tdSql.checkRows(0)
# >= for nchar type on tag
- tdSql.error("select * from st where tagcol2 <= 'table'")
+ tdSql.query("select * from st where tagcol2 <= 'table'")
+ tdSql.checkRows(0)
# % for nchar type on tag case 1
tdSql.query("select * from st where tagcol2 like '%'")
@@ -291,10 +295,12 @@ class TDTestCase:
tdSql.checkRows(10)
# > for binary type on tag
- tdSql.error("select * from st where tagcol3 > '表'")
+ tdSql.query("select * from st where tagcol3 > '表'")
+ tdSql.checkRows(10)
# >= for binary type on tag
- tdSql.error("select * from st where tagcol3 >= '表'")
+ tdSql.query("select * from st where tagcol3 >= '表'")
+ tdSql.checkRows(10)
# = for binary type on tag
tdSql.query("select * from st where tagcol3 = '水表'")
@@ -309,10 +315,12 @@ class TDTestCase:
tdSql.checkRows(5)
# > for binary type on tag
- tdSql.error("select * from st where tagcol3 < '水表'")
+ tdSql.query("select * from st where tagcol3 < '水表'")
+ tdSql.checkRows(0)
# >= for binary type on tag
- tdSql.error("select * from st where tagcol3 <= '水表'")
+ tdSql.query("select * from st where tagcol3 <= '水表'")
+ tdSql.checkRows(5)
# % for binary type on tag case 1
tdSql.query("select * from st where tagcol3 like '%'")
diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh
index 24cd93f0fc..61ec491f5d 100755
--- a/tests/pytest/regressiontest.sh
+++ b/tests/pytest/regressiontest.sh
@@ -141,6 +141,7 @@ python3 ./test.py -f query/filterCombo.py
python3 ./test.py -f query/queryNormal.py
python3 ./test.py -f query/select_last_crash.py
python3 ./test.py -f query/queryNullValueTest.py
+python3 ./test.py -f query/queryInsertValue.py
#stream
python3 ./test.py -f stream/stream1.py
@@ -155,3 +156,23 @@ python3 ./test.py -f client/client.py
# Misc
python3 testCompress.py
python3 testNoCompress.py
+
+
+# functions
+python3 ./test.py -f functions/function_avg.py
+python3 ./test.py -f functions/function_bottom.py
+python3 ./test.py -f functions/function_count.py
+python3 ./test.py -f functions/function_diff.py
+python3 ./test.py -f functions/function_first.py
+python3 ./test.py -f functions/function_last.py
+python3 ./test.py -f functions/function_last_row.py
+python3 ./test.py -f functions/function_leastsquares.py
+python3 ./test.py -f functions/function_max.py
+python3 ./test.py -f functions/function_min.py
+python3 ./test.py -f functions/function_operations.py
+python3 ./test.py -f functions/function_percentile.py
+python3 ./test.py -f functions/function_spread.py
+python3 ./test.py -f functions/function_stddev.py
+python3 ./test.py -f functions/function_sum.py
+python3 ./test.py -f functions/function_top.py
+python3 ./test.py -f functions/function_twa.py
diff --git a/tests/pytest/test.py b/tests/pytest/test.py
index a9da8e5671..678bd87336 100644
--- a/tests/pytest/test.py
+++ b/tests/pytest/test.py
@@ -96,7 +96,7 @@ if __name__ == "__main__":
processID = subprocess.check_output(usePortPID, shell=True)
if processID:
- killCmd = "kill -9 %s" % processID
+ killCmd = "kill -TERM %s" % processID
os.system(killCmd)
fuserCmd = "fuser -k -n tcp %d" % port
os.system(fuserCmd)
diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py
index ec39ab61b9..1e1d02959f 100644
--- a/tests/pytest/util/sql.py
+++ b/tests/pytest/util/sql.py
@@ -122,11 +122,16 @@ class TDSql:
return self.cursor.istype(col, dataType)
def checkData(self, row, col, data):
- self.checkRowCol(row, col)
- if self.queryResult[row][col] != data:
- caller = inspect.getframeinfo(inspect.stack()[1][0])
- args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
- tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
+ self.checkRowCol(row, col)
+ if self.queryResult[row][col] != data:
+ if isinstance(data, float) and abs(self.queryResult[row][col] - data) <= 0.000001:
+ tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
+ (self.sql, row, col, self.queryResult[row][col], data))
+ return
+ else:
+ caller = inspect.getframeinfo(inspect.stack()[1][0])
+ args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
+ tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
if data is None:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
@@ -135,9 +140,12 @@ class TDSql:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(self.sql, row, col, self.queryResult[row][col], data))
elif isinstance(data, datetime.date):
+ tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
+ (self.sql, row, col, self.queryResult[row][col], data))
+ elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(self.sql, row, col, self.queryResult[row][col], data))
- else:
+ else:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
(self.sql, row, col, self.queryResult[row][col], data))
diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim
index 2f45ab077c..15cc2954e8 100644
--- a/tests/script/fullGeneralSuite.sim
+++ b/tests/script/fullGeneralSuite.sim
@@ -135,7 +135,6 @@ run general/parser/set_tag_vals.sim
#unsupport run general/parser/repeatAlter.sim
#unsupport run general/parser/slimit_alter_tags.sim
#unsupport run general/parser/stream_on_sys.sim
-run general/parser/stream.sim
#unsupport run general/parser/repeatStream.sim
run general/stable/disk.sim
run general/stable/dnode3.sim
@@ -212,12 +211,9 @@ run general/vector/table_mix.sim
run general/vector/table_query.sim
run general/vector/table_time.sim
run general/stream/restart_stream.sim
-run general/stream/stream_1.sim
-run general/stream/stream_2.sim
run general/stream/stream_3.sim
run general/stream/stream_restart.sim
run general/stream/table_1.sim
-run general/stream/metrics_1.sim
run general/stream/table_n.sim
run general/stream/metrics_n.sim
run general/stream/table_del.sim
diff --git a/tests/script/general/alter/dnode.sim b/tests/script/general/alter/dnode.sim
new file mode 100644
index 0000000000..20ce879979
--- /dev/null
+++ b/tests/script/general/alter/dnode.sim
@@ -0,0 +1,71 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/exec.sh -n dnode1 -s start
+
+sleep 3000
+sql connect
+
+print ======== step1
+sql alter dnode 1 resetlog
+sql alter dnode 1 monitor 1
+
+sleep 5000
+sql select * from log.dn
+if $rows <= 0 then
+ return -1
+endi
+
+print ======== step2
+
+sql alter dnode 1 resetquerycache
+sql alter dnode 1 debugFlag 135
+sql alter dnode 1 debugFlag 131
+sql alter dnode 1 monitor 0
+sql alter dnode 1 debugFlag 135
+sql alter dnode 1 monitorDebugFlag 135
+sql alter dnode 1 vDebugFlag 135
+sql alter dnode 1 mDebugFlag 135
+sql alter dnode 1 cDebugFlag 135
+sql alter dnode 1 httpDebugFlag 135
+sql alter dnode 1 qDebugflag 135
+sql alter dnode 1 sdbDebugFlag 135
+sql alter dnode 1 uDebugFlag 135
+sql alter dnode 1 tsdbDebugFlag 135
+sql alter dnode 1 sDebugflag 135
+sql alter dnode 1 rpcDebugFlag 135
+sql alter dnode 1 dDebugFlag 135
+sql alter dnode 1 mqttDebugFlag 135
+sql alter dnode 1 wDebugFlag 135
+sql alter dnode 1 tmrDebugFlag 135
+sql_error alter dnode 2 wDebugFlag 135
+sql_error alter dnode 2 tmrDebugFlag 135
+
+print ======== step3
+sql_error alter $hostname1 debugFlag 135
+sql_error alter $hostname1 monitorDebugFlag 135
+sql_error alter $hostname1 vDebugFlag 135
+sql_error alter $hostname1 mDebugFlag 135
+sql_error alter dnode $hostname2 debugFlag 135
+sql_error alter dnode $hostname2 monitorDebugFlag 135
+sql_error alter dnode $hostname2 vDebugFlag 135
+sql_error alter dnode $hostname2 mDebugFlag 135
+sql alter dnode $hostname1 debugFlag 135
+sql alter dnode $hostname1 monitorDebugFlag 135
+sql alter dnode $hostname1 vDebugFlag 135
+sql alter dnode $hostname1 tmrDebugFlag 131
+
+print ======== step4
+sql_error sql alter dnode 1 balance 0
+sql_error sql alter dnode 1 balance vnode:1-dnode:1
+sql_error sql alter dnode 1 balance "vnode:1"
+sql_error sql alter dnode 1 balance "vnode:1-dnode:1"
+sql_error sql alter dnode 1 balance "dnode:1-vnode:1"
+sql_error sql alter dnode 1 balance "dnode:1-"
+sql_error sql alter dnode 1 balance "vnode:2-dnod"
+sql alter dnode 1 balance "vnode:2-dnode:1" -x step4
+step4:
+
+print ======= over
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index 3b9806558e..2fd5db0165 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -3,6 +3,7 @@ cd ../../../debug; make
./test.sh -f general/alter/cached_schema_after_alter.sim
./test.sh -f general/alter/count.sim
+./test.sh -f general/alter/dnode.sim
./test.sh -f general/alter/import.sim
./test.sh -f general/alter/insert1.sim
./test.sh -f general/alter/insert2.sim
diff --git a/tests/script/sh/exec-no-random-fail.sh b/tests/script/sh/exec-no-random-fail.sh
index 04a663bc5a..2bd0a64923 100755
--- a/tests/script/sh/exec-no-random-fail.sh
+++ b/tests/script/sh/exec-no-random-fail.sh
@@ -88,7 +88,9 @@ if [ "$EXEC_OPTON" = "start" ]; then
echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR
if [ "$SHELL_OPTION" = "true" ]; then
- nohup valgrind --log-file=${LOG_DIR}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
+ TT=`date +%s`
+ mkdir ${LOG_DIR}/${TT}
+ nohup valgrind --log-file=${LOG_DIR}/${TT}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
else
nohup $EXE_DIR/taosd -c $CFG_DIR --random-file-fail-factor 0 > /dev/null 2>&1 &
fi
@@ -99,12 +101,12 @@ else
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
- if [ "$SIGNAL" = "SIGINT" ]; then
- echo try to kill by signal SIGINT
- kill -SIGINT $PID
- else
+ if [ "$SIGNAL" = "SIGKILL" ]; then
echo try to kill by signal SIGKILL
kill -9 $PID
+ else
+ echo try to kill by signal SIGINT
+ kill -SIGINT $PID
fi
sleep 1
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
diff --git a/tests/script/sh/exec-random-fail.sh b/tests/script/sh/exec-random-fail.sh
index a354021684..3761498859 100755
--- a/tests/script/sh/exec-random-fail.sh
+++ b/tests/script/sh/exec-random-fail.sh
@@ -88,9 +88,12 @@ if [ "$EXEC_OPTON" = "start" ]; then
echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR
if [ "$SHELL_OPTION" = "true" ]; then
- nohup valgrind --log-file=${LOG_DIR}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
+ TT=`date +%s`
+ mkdir ${LOG_DIR}/${TT}
+ nohup valgrind --log-file=${LOG_DIR}/${TT}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
else
- nohup $EXE_DIR/taosd -c $CFG_DIR --alloc-random-fail --random-file-fail-factor 5 > /dev/null 2>&1 &
+ nohup $EXE_DIR/taosd -c $CFG_DIR --alloc-random-fail \
+ --random-file-fail-factor 5 > /dev/null 2>&1 &
fi
else
@@ -99,12 +102,12 @@ else
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
- if [ "$SIGNAL" = "SIGINT" ]; then
- echo try to kill by signal SIGINT
- kill -SIGINT $PID
- else
+ if [ "$SIGNAL" = "SIGKILL" ]; then
echo try to kill by signal SIGKILL
kill -9 $PID
+ else
+ echo try to kill by signal SIGINT
+ kill -SIGINT $PID
fi
sleep 1
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
diff --git a/tests/script/sh/exec.sh b/tests/script/sh/exec.sh
index 2f294075a1..1c84a6b10e 100755
--- a/tests/script/sh/exec.sh
+++ b/tests/script/sh/exec.sh
@@ -101,12 +101,12 @@ else
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
- if [ "$SIGNAL" = "SIGINT" ]; then
- echo try to kill by signal SIGINT
- kill -SIGINT $PID
- else
+ if [ "$SIGNAL" = "SIGKILL" ]; then
echo try to kill by signal SIGKILL
kill -9 $PID
+ else
+ echo try to kill by signal SIGINT
+ kill -SIGINT $PID
fi
sleep 1
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`