diff --git a/documentation20/webdocs/markdowndocs/architecture-ch.md b/documentation20/webdocs/markdowndocs/architecture-ch.md
index 7ab4b5d096..a279875649 100644
--- a/documentation20/webdocs/markdowndocs/architecture-ch.md
+++ b/documentation20/webdocs/markdowndocs/architecture-ch.md
@@ -162,7 +162,7 @@ Master Vnode遵循下面的写入流程:
图 3 TDengine Master写入流程
1. Master vnode收到应用的数据插入请求,验证OK,进入下一步;
-2. 如果系统配置参数walLevel打开(设置为2),vnode将把该请求的原始数据包写入数据库日志文件WAL,以保证TDengine能够在断电等因素导致的服务重启时从数据库日志文件中恢复数据,避免数据的丢失;
+2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失;
3. 如果有多个副本,vnode将把数据包转发给同一虚拟节点组内slave vnodes, 该转发包带有数据的版本号(version);
4. 写入内存,并加记录加入到skip list;
5. Master vnode返回确认信息给应用,表示写入成功。
@@ -174,7 +174,7 @@ Master Vnode遵循下面的写入流程:
图 4 TDengine Slave写入流程
1. Slave vnode收到Master vnode转发了的数据插入请求。
-2. 如果系统配置参数walLevl设置为2,vnode将把该请求的原始数据包写入日志(WAL);
+2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失;
3. 写入内存,更新内存中的skip list。
与Master vnode相比,slave vnode不存在转发环节,也不存在回复确认环节,少了两步。但写内存与WAL是完全一样的。
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index 57a4cb29c1..5ed80f41a8 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -221,20 +221,18 @@ typedef struct STableDataBlocks {
SParamInfo *params;
} STableDataBlocks;
-//typedef struct SDataBlockList { // todo remove
-// uint32_t nSize;
-// uint32_t nAlloc;
-// STableDataBlocks **pData;
-//} SDataBlockList;
-
typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
+ uint32_t type; // query/insert type
+ // TODO refactor
char intervalTimeUnit;
char slidingTimeUnit;
- uint32_t type; // query/insert type
STimeWindow window; // query time window
- int64_t intervalTime; // aggregation time interval
+ int64_t intervalTime; // aggregation time window range
int64_t slidingTime; // sliding window in mseconds
+ int64_t intervalOffset;// start offset of each time window
+ int32_t tz; // query client timezone
+
SSqlGroupbyExpr groupbyExpr; // group by tags info
SArray * colList; // SArray
SFieldInfo fieldsInfo;
diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h
index 384d8079a7..c314087179 100644
--- a/src/query/inc/tsqlfunction.h
+++ b/src/query/inc/tsqlfunction.h
@@ -69,6 +69,15 @@ extern "C" {
#define TSDB_FUNC_AVG_IRATE 33
#define TSDB_FUNC_TID_TAG 34
+#define TSDB_FUNC_HISTOGRAM 35
+#define TSDB_FUNC_HLL 36
+#define TSDB_FUNC_MODE 37
+#define TSDB_FUNC_SAMPLE 38
+#define TSDB_FUNC_CEIL 39
+#define TSDB_FUNC_FLOOR 40
+#define TSDB_FUNC_ROUND 41
+#define TSDB_FUNC_MAVG 42
+#define TSDB_FUNC_CSUM 43
#define TSDB_FUNCSTATE_SO 0x1u // single output
#define TSDB_FUNCSTATE_MO 0x2u // dynamic number of output, not multinumber of output e.g., TOP/BOTTOM
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 78632023f3..41daed087c 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -35,8 +35,6 @@
* forced to load primary column explicitly.
*/
#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0)
-
-
#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP)
#define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN)
@@ -1602,11 +1600,11 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
SColIndex* pIndex = &pSqlFuncMsg->colInfo;
if (TSDB_COL_REQ_NULL(pIndex->flag)) {
- pCtx->requireNull = true;
- pIndex->flag &= ~(TSDB_COL_NULL);
+ pCtx->requireNull = true;
+ pIndex->flag &= ~(TSDB_COL_NULL);
} else {
- pCtx->requireNull = false;
- }
+ pCtx->requireNull = false;
+ }
int32_t index = pSqlFuncMsg->colInfo.colIndex;
if (TSDB_COL_IS_TAG(pIndex->flag)) {
@@ -1927,24 +1925,24 @@ static bool onlyFirstQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSD
static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); }
// todo refactor, add iterator
-static void doExchangeTimeWindow(SQInfo* pQInfo) {
- size_t t = GET_NUM_OF_TABLEGROUP(pQInfo);
+static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) {
+ size_t t = taosArrayGetSize(pQInfo->tableGroupInfo.pGroupList);
for(int32_t i = 0; i < t; ++i) {
- SArray* p1 = GET_TABLEGROUP(pQInfo, i);
+ SArray* p1 = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, i);
- SArray* tableKeyGroup = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, i);
size_t len = taosArrayGetSize(p1);
for(int32_t j = 0; j < len; ++j) {
- STableQueryInfo* pTableQueryInfo = (STableQueryInfo*) taosArrayGetP(p1, j);
- SWAP(pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, TSKEY);
+ STableKeyInfo* pInfo = taosArrayGet(p1, j);
- STableKeyInfo* pInfo = taosArrayGet(tableKeyGroup, j);
- pInfo->lastKey = pTableQueryInfo->win.skey;
+ // update the new lastkey if it is equalled to the value of the old skey
+ if (pInfo->lastKey == win->ekey) {
+ pInfo->lastKey = win->skey;
+ }
}
}
}
-static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
+static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool stableQuery) {
SQuery* pQuery = pQInfo->runtimeEnv.pQuery;
// in case of point-interpolation query, use asc order scan
@@ -1961,15 +1959,17 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
if (pQuery->window.skey > pQuery->window.ekey) {
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
}
+
return;
}
- if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
+ if (isGroupbyNormalCol(pQuery->pGroupbyExpr) && pQuery->order.order == TSDB_ORDER_DESC) {
pQuery->order.order = TSDB_ORDER_ASC;
if (pQuery->window.skey > pQuery->window.ekey) {
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
}
+ doExchangeTimeWindow(pQInfo, &pQuery->window);
return;
}
@@ -1991,7 +1991,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
- doExchangeTimeWindow(pQInfo);
+ doExchangeTimeWindow(pQInfo, &pQuery->window);
}
pQuery->order.order = TSDB_ORDER_ASC;
@@ -2001,7 +2001,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
- doExchangeTimeWindow(pQInfo);
+ doExchangeTimeWindow(pQInfo, &pQuery->window);
}
pQuery->order.order = TSDB_ORDER_DESC;
@@ -2015,6 +2015,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
pQuery->window.skey, pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
+ doExchangeTimeWindow(pQInfo, &pQuery->window);
}
pQuery->order.order = TSDB_ORDER_ASC;
@@ -2024,6 +2025,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
pQuery->window.skey, pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
+ doExchangeTimeWindow(pQInfo, &pQuery->window);
}
pQuery->order.order = TSDB_ORDER_DESC;
@@ -4449,10 +4451,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
setScanLimitationByResultBuffer(pQuery);
- // NOTE: pTableCheckInfo need to update the query time range and the lastKey info
- // TODO fixme
- changeExecuteScanOrder(pQInfo, isSTableQuery);
-
code = setupQueryHandle(tsdb, pQInfo, isSTableQuery);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -6022,14 +6020,6 @@ static void doUpdateExprColumnIndex(SQuery *pQuery) {
}
}
-static int compareTableIdInfo(const void* a, const void* b) {
- const STableIdInfo* x = (const STableIdInfo*)a;
- const STableIdInfo* y = (const STableIdInfo*)b;
- if (x->uid > y->uid) return 1;
- if (x->uid < y->uid) return -1;
- return 0;
-}
-
static void freeQInfo(SQInfo *pQInfo);
static void calResultBufSize(SQuery* pQuery) {
@@ -6051,8 +6041,8 @@ static void calResultBufSize(SQuery* pQuery) {
}
}
-static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
- STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols) {
+static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
+ STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery) {
int16_t numOfCols = pQueryMsg->numOfCols;
int16_t numOfOutput = pQueryMsg->numOfOutput;
@@ -6151,8 +6141,6 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
}
int tableIndex = 0;
- STimeWindow window = pQueryMsg->window;
- taosArraySort(pTableIdList, compareTableIdInfo);
pQInfo->runtimeEnv.interBufSize = getOutputInterResultBufSize(pQuery);
pQInfo->pBuf = calloc(pTableGroupInfo->numOfTables, sizeof(STableQueryInfo));
@@ -6161,12 +6149,20 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
}
// NOTE: pTableCheckInfo need to update the query time range and the lastKey info
-// changeExecuteScanOrder(pQInfo, stableQuery);
+ pQInfo->arrTableIdInfo = taosArrayInit(tableIndex, sizeof(STableIdInfo));
+ pQInfo->dataReady = QUERY_RESULT_NOT_READY;
+ pthread_mutex_init(&pQInfo->lock, NULL);
+
+ pQuery->pos = -1;
+ pQuery->window = pQueryMsg->window;
+ changeExecuteScanOrder(pQInfo, pQueryMsg, stableQuery);
+
+ STimeWindow window = pQuery->window;
int32_t index = 0;
for(int32_t i = 0; i < numOfGroups; ++i) {
- SArray* pa = taosArrayGetP(pTableGroupInfo->pGroupList, i);
+ SArray* pa = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, i);
size_t s = taosArrayGetSize(pa);
SArray* p1 = taosArrayInit(s, POINTER_BYTES);
@@ -6179,12 +6175,9 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
for(int32_t j = 0; j < s; ++j) {
STableKeyInfo* info = taosArrayGet(pa, j);
- STableId* id = TSDB_TABLEID(info->pTable);
- STableIdInfo* pTableId = taosArraySearch(pTableIdList, id, compareTableIdInfo);
-
- window.skey = (pTableId != NULL)? pTableId->key:pQueryMsg->window.skey;
void* buf = (char*)pQInfo->pBuf + index * sizeof(STableQueryInfo);
+ window.skey = info->lastKey;
STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, info->pTable, window, buf);
if (item == NULL) {
goto _cleanup;
@@ -6192,17 +6185,13 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
item->groupIndex = i;
taosArrayPush(p1, &item);
+
+ STableId* id = TSDB_TABLEID(info->pTable);
taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id->tid, sizeof(id->tid), &item, POINTER_BYTES);
index += 1;
}
}
- pQInfo->arrTableIdInfo = taosArrayInit(tableIndex, sizeof(STableIdInfo));
- pQInfo->dataReady = QUERY_RESULT_NOT_READY;
- pthread_mutex_init(&pQInfo->lock, NULL);
-
- pQuery->pos = -1;
- pQuery->window = pQueryMsg->window;
colIdCheck(pQuery);
qDebug("qmsg:%p QInfo:%p created", pQueryMsg, pQInfo);
@@ -6558,7 +6547,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
assert(0);
}
- (*pQInfo) = createQInfoImpl(pQueryMsg, pTableIdList, pGroupbyExpr, pExprs, &tableGroupInfo, pTagColumnInfo);
+ (*pQInfo) = createQInfoImpl(pQueryMsg, pGroupbyExpr, pExprs, &tableGroupInfo, pTagColumnInfo, isSTableQuery);
pExprs = NULL;
pGroupbyExpr = NULL;
pTagColumnInfo = NULL;
diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c
index 3b14254fff..e5526647cb 100644
--- a/src/util/src/tcache.c
+++ b/src/util/src/tcache.c
@@ -378,40 +378,43 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
} else {
// NOTE: remove it from hash in the first place, otherwise, the pNode may have been released by other thread
// when reaches here.
- SCacheDataNode* p = NULL;
- int32_t ret = taosHashRemoveWithData(pCacheObj->pHashTable, pNode->key, pNode->keySize, &p, sizeof(void*));
+ SCacheDataNode *p = NULL;
+ int32_t ret = taosHashRemoveWithData(pCacheObj->pHashTable, pNode->key, pNode->keySize, &p, sizeof(void *));
ref = T_REF_DEC(pNode);
// successfully remove from hash table, if failed, this node must have been move to trash already, do nothing.
// note that the remove operation can be executed only once.
if (ret == 0) {
- if (p != pNode) {
- uDebug("cache:%s, key:%p, successfully removed a new entry:%p, refcnt:%d, prev entry:%p has been removed by others already", pCacheObj->name, pNode->key, p->data, T_REF_VAL_GET(p), pNode->data);
- assert(p->pTNodeHeader == NULL);
- taosAddToTrash(pCacheObj, p);
- } else {
+ if (p != pNode) {
+ uDebug( "cache:%s, key:%p, successfully removed a new entry:%p, refcnt:%d, prev entry:%p has been removed by "
+ "others already", pCacheObj->name, pNode->key, p->data, T_REF_VAL_GET(p), pNode->data);
- uDebug("cache:%s, key:%p, %p successfully removed from hash table, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref);
- if (ref > 0) {
- assert(pNode->pTNodeHeader == NULL);
+ assert(p->pTNodeHeader == NULL);
+ taosAddToTrash(pCacheObj, p);
+ } else {
+ uDebug("cache:%s, key:%p, %p successfully removed from hash table, refcnt:%d", pCacheObj->name, pNode->key,
+ pNode->data, ref);
+ if (ref > 0) {
+ assert(pNode->pTNodeHeader == NULL);
- taosAddToTrash(pCacheObj, pNode);
- } else { // ref == 0
- atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size);
+ taosAddToTrash(pCacheObj, pNode);
+ } else { // ref == 0
+ atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size);
- int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable);
- uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes",
- pCacheObj->name, pNode->key, pNode->data, pNode->size, size, pCacheObj->totalSize);
+ int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable);
+ uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes",
+ pCacheObj->name, pNode->key, pNode->data, pNode->size, size, pCacheObj->totalSize);
- if (pCacheObj->freeFp) {
- pCacheObj->freeFp(pNode->data);
+ if (pCacheObj->freeFp) {
+ pCacheObj->freeFp(pNode->data);
+ }
+
+ free(pNode);
}
-
- free(pNode);
}
- }
} else {
- uDebug("cache:%s, key:%p, %p has been removed from hash table by other thread already, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref);
+ uDebug("cache:%s, key:%p, %p has been removed from hash table by other thread already, refcnt:%d",
+ pCacheObj->name, pNode->key, pNode->data, ref);
}
}
@@ -513,7 +516,7 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
pCacheObj->numOfElemsInTrash++;
__cache_unlock(pCacheObj);
- uDebug("%s key:%p, %p move to trash, numOfElem in trash:%d", pCacheObj->name, pNode->key, pNode->data,
+ uDebug("cache:%s key:%p, %p move to trash, numOfElem in trash:%d", pCacheObj->name, pNode->key, pNode->data,
pCacheObj->numOfElemsInTrash);
}
diff --git a/src/util/tests/CMakeLists.txt b/src/util/tests/CMakeLists.txt
index 09523cbfb4..8687a8005d 100644
--- a/src/util/tests/CMakeLists.txt
+++ b/src/util/tests/CMakeLists.txt
@@ -10,6 +10,6 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR})
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
- ADD_EXECUTABLE(utilTest ./cacheTest.cpp ./hashTest.cpp)
+ ADD_EXECUTABLE(utilTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(utilTest tutil common osdetail gtest pthread gcov)
ENDIF()
diff --git a/src/util/tests/cacheTest.cpp b/src/util/tests/cacheTest.cpp
index e0debd53f4..51221e0b35 100644
--- a/src/util/tests/cacheTest.cpp
+++ b/src/util/tests/cacheTest.cpp
@@ -1,16 +1,9 @@
#include "os.h"
#include
#include
-#include
#include "taos.h"
-//#include "tsdb.h"
-
-//#include "testCommon.h"
-#include "tstoken.h"
-#include "tutil.h"
#include "tcache.h"
-#include "ttimer.h"
namespace {
int32_t tsMaxMgmtConnections = 10000;
diff --git a/tests/pytest/crash_gen.py b/tests/pytest/crash_gen.py
index 768e401be8..c0a8fd1f00 100755
--- a/tests/pytest/crash_gen.py
+++ b/tests/pytest/crash_gen.py
@@ -693,7 +693,7 @@ class DbConnRest(DbConn):
def __init__(self):
super().__init__()
self._type = self.TYPE_REST
- self._url = "http://localhost:6020/rest/sql" # fixed for now
+ self._url = "http://localhost:6041/rest/sql" # fixed for now
self._result = None
def openByType(self): # Open connection
@@ -1306,6 +1306,7 @@ class DbManager():
"Cannot establish DB connection, please re-run script without parameter, and follow the instructions.")
sys.exit(2)
else:
+ print("Failed to connect to DB, errno = {}, msg: {}".format(Helper.convertErrno(err.errno), err.msg))
raise
except BaseException:
print("[=] Unexpected exception")
@@ -1910,10 +1911,19 @@ class TaskReadData(StateTransitionTask):
# 'twa(speed)', # TODO: this one REQUIRES a where statement, not reasonable
'sum(speed)',
'stddev(speed)',
+ # SELECTOR functions
'min(speed)',
'max(speed)',
'first(speed)',
- 'last(speed)']) # TODO: add more from 'top'
+ 'last(speed)',
+ # 'top(speed)', # TODO: not supported?
+ # 'bottom(speed)', # TODO: not supported?
+ # 'percentile(speed, 10)', # TODO: TD-1316
+ 'last_row(speed)',
+ # Transformation Functions
+ # 'diff(speed)', # TODO: no supported?!
+ 'spread(speed)'
+ ]) # TODO: add more from 'top'
filterExpr = Dice.choice([ # TODO: add various kind of WHERE conditions
None
])
@@ -2768,7 +2778,7 @@ class MainExec:
try:
ret = self._clientMgr.run(self._svcMgr) # stop TAOS service inside
except requests.exceptions.ConnectionError as err:
- logger.warning("Failed to open REST connection to DB")
+ logger.warning("Failed to open REST connection to DB: {}".format(err.getMessage()))
# don't raise
return ret
diff --git a/tests/script/general/parser/constCol.sim b/tests/script/general/parser/constCol.sim
index a196ba2b50..13b4455779 100644
--- a/tests/script/general/parser/constCol.sim
+++ b/tests/script/general/parser/constCol.sim
@@ -347,6 +347,8 @@ if $rows != 3 then
return -1
endi
+print ======================udc with normal column group by
+
sql_error select from t1
sql_error select abc from t1
sql_error select abc as tu from t1
diff --git a/tests/script/general/parser/lastrow_query.sim b/tests/script/general/parser/lastrow_query.sim
index 7954a8d228..1459b7b470 100644
--- a/tests/script/general/parser/lastrow_query.sim
+++ b/tests/script/general/parser/lastrow_query.sim
@@ -152,3 +152,5 @@ sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000'
if $rows != 46 then
return -1
endi
+
+
diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim
index 4e26d14cfd..6790564cc7 100644
--- a/tests/script/general/parser/testSuite.sim
+++ b/tests/script/general/parser/testSuite.sim
@@ -99,6 +99,8 @@ run general/parser/union.sim
sleep 2000
run general/parser/constCol.sim
sleep 2000
+run general/parser/timestamp.sim
+sleep 2000
run general/parser/sliding.sim
#sleep 2000
diff --git a/tests/script/general/parser/topbot.sim b/tests/script/general/parser/topbot.sim
index fdda79451d..5616f8ed16 100644
--- a/tests/script/general/parser/topbot.sim
+++ b/tests/script/general/parser/topbot.sim
@@ -118,4 +118,23 @@ if $data21 != 2.10000 then
return -1
endi
+print =====================td-1302 case
+sql create database t1 keep 36500;
+sql use t1;
+sql create table test(ts timestamp, k int);
+sql insert into test values(29999, 1)(70000, 2)(80000, 3)
+
+print ================== restart server to commit data into disk
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+sleep 5000
+system sh/exec.sh -n dnode1 -s start
+print ================== server restart completed
+sql connect
+sleep 3000
+
+sql select count(*) from t1.test where ts>10000 and ts<90000 interval(5000a)
+if $rows != 3 then
+ return -1
+endi
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file