Merge branch 'develop' into feature/TD-3963
This commit is contained in:
commit
e90378b3de
|
@ -123,7 +123,11 @@ IF (TD_LINUX)
|
||||||
MESSAGE(STATUS "set ningsi macro to true")
|
MESSAGE(STATUS "set ningsi macro to true")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
IF (TD_MEMORY_SANITIZER)
|
||||||
|
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG")
|
||||||
|
ELSE ()
|
||||||
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
|
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
|
||||||
|
ENDIF ()
|
||||||
SET(RELEASE_FLAGS "-O3 -Wno-error")
|
SET(RELEASE_FLAGS "-O3 -Wno-error")
|
||||||
|
|
||||||
IF (${COVER} MATCHES "true")
|
IF (${COVER} MATCHES "true")
|
||||||
|
@ -144,7 +148,11 @@ IF (TD_DARWIN_64)
|
||||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||||
MESSAGE(STATUS "darwin64 is defined")
|
MESSAGE(STATUS "darwin64 is defined")
|
||||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||||
|
IF (TD_MEMORY_SANITIZER)
|
||||||
|
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -O0 -g3 -DDEBUG")
|
||||||
|
ELSE ()
|
||||||
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
|
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
|
||||||
|
ENDIF ()
|
||||||
SET(RELEASE_FLAGS "-Og")
|
SET(RELEASE_FLAGS "-Og")
|
||||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
|
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
|
||||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
|
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
|
||||||
|
@ -162,7 +170,14 @@ IF (TD_WINDOWS)
|
||||||
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
||||||
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
SET(DEBUG_FLAGS "/fsanitize=thread /fsanitize=leak /fsanitize=memory /fsanitize=undefined /fsanitize=hwaddress /Zi /W3 /GL")
|
|
||||||
|
IF (TD_MEMORY_SANITIZER)
|
||||||
|
MESSAGE("memory sanitizer detected as true")
|
||||||
|
SET(DEBUG_FLAGS "/fsanitize=address /Zi /W3 /GL")
|
||||||
|
ELSE ()
|
||||||
|
MESSAGE("memory sanitizer detected as false")
|
||||||
|
SET(DEBUG_FLAGS "/Zi /W3 /GL")
|
||||||
|
ENDIF ()
|
||||||
SET(RELEASE_FLAGS "/W0 /O3 /GL")
|
SET(RELEASE_FLAGS "/W0 /O3 /GL")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
|
|
@ -83,3 +83,8 @@ SET(TD_BUILD_JDBC TRUE)
|
||||||
IF (${BUILD_JDBC} MATCHES "false")
|
IF (${BUILD_JDBC} MATCHES "false")
|
||||||
SET(TD_BUILD_JDBC FALSE)
|
SET(TD_BUILD_JDBC FALSE)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
SET(TD_MEMORY_SANITIZER FALSE)
|
||||||
|
IF (${MEMORY_SANITIZER} MATCHES "true")
|
||||||
|
SET(TD_MEMORY_SANITIZER TRUE)
|
||||||
|
ENDIF ()
|
||||||
|
|
|
@ -15,6 +15,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
|
||||||
* [命令行程序TAOS](/getting-started#console):访问TDengine的简便方式
|
* [命令行程序TAOS](/getting-started#console):访问TDengine的简便方式
|
||||||
* [极速体验](/getting-started#demo):运行示例程序,快速体验高效的数据插入、查询
|
* [极速体验](/getting-started#demo):运行示例程序,快速体验高效的数据插入、查询
|
||||||
* [支持平台列表](/getting-started#platforms):TDengine服务器和客户端支持的平台列表
|
* [支持平台列表](/getting-started#platforms):TDengine服务器和客户端支持的平台列表
|
||||||
|
* [Kubenetes部署](https://taosdata.github.io/TDengine-Operator/zh/index.html):TDengine在Kubenetes环境进行部署的详细说明
|
||||||
|
|
||||||
## [整体架构](/architecture)
|
## [整体架构](/architecture)
|
||||||
|
|
||||||
|
|
|
@ -176,7 +176,7 @@ TDengine 分布式架构的逻辑结构图如下:
|
||||||
|
|
||||||
**通讯方式:**TDengine系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过TCP/UDP进行的。因为考虑到物联网场景,数据写入的包一般不大,因此TDengine 除采用TCP做传输之外,还采用UDP方式,因为UDP 更加高效,而且不受连接数的限制。TDengine实现了自己的超时、重传、确认等机制,以确保UDP的可靠传输。对于数据量不到15K的数据包,采取UDP的方式进行传输,超过15K的,或者是查询类的操作,自动采取TCP的方式进行传输。同时,TDengine根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用TCP方式进行数据传输。
|
**通讯方式:**TDengine系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过TCP/UDP进行的。因为考虑到物联网场景,数据写入的包一般不大,因此TDengine 除采用TCP做传输之外,还采用UDP方式,因为UDP 更加高效,而且不受连接数的限制。TDengine实现了自己的超时、重传、确认等机制,以确保UDP的可靠传输。对于数据量不到15K的数据包,采取UDP的方式进行传输,超过15K的,或者是查询类的操作,自动采取TCP的方式进行传输。同时,TDengine根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用TCP方式进行数据传输。
|
||||||
|
|
||||||
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。
|
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。另外,这个参数值的长度需要控制在 96 个字符以内。
|
||||||
|
|
||||||
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))
|
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))
|
||||||
|
|
||||||
|
|
|
@ -325,10 +325,12 @@ for (int i = 0; i < numOfRows; i++){
|
||||||
}
|
}
|
||||||
s.setString(2, s2, 10);
|
s.setString(2, s2, 10);
|
||||||
|
|
||||||
// AddBatch 之后,可以再设定新的表名、TAGS、VALUES 取值,这样就能实现一次执行向多个数据表写入:
|
// AddBatch 之后,缓存并未清空。为避免混乱,并不推荐在 ExecuteBatch 之前再次绑定新一批的数据:
|
||||||
s.columnDataAddBatch();
|
s.columnDataAddBatch();
|
||||||
// 执行语句:
|
// 执行绑定数据后的语句:
|
||||||
s.columnDataExecuteBatch();
|
s.columnDataExecuteBatch();
|
||||||
|
// 执行语句后清空缓存。在清空之后,可以复用当前的对象,绑定新的一批数据(可以是新表名、新 TAGS 值、新 VALUES 值):
|
||||||
|
s.columnDataClearBatch();
|
||||||
// 执行完毕,释放资源:
|
// 执行完毕,释放资源:
|
||||||
s.columnDataCloseBatch();
|
s.columnDataCloseBatch();
|
||||||
```
|
```
|
||||||
|
|
|
@ -99,7 +99,7 @@ taosd -C
|
||||||
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是工作的,一般无需设置。**注意:配置修改后,需要重启*taosd*服务才能生效。**
|
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是工作的,一般无需设置。**注意:配置修改后,需要重启*taosd*服务才能生效。**
|
||||||
|
|
||||||
- firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。
|
- firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。
|
||||||
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。
|
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。这个参数值的长度需要控制在 96 个字符以内。
|
||||||
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。(RESTful服务使用的端口号是在此基础上+11,即默认值为6041。)
|
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。(RESTful服务使用的端口号是在此基础上+11,即默认值为6041。)
|
||||||
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
|
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
|
||||||
- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。
|
- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。
|
||||||
|
|
|
@ -138,6 +138,7 @@ bool isSimpleAggregateRv(SQueryInfo* pQueryInfo);
|
||||||
|
|
||||||
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
|
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
|
||||||
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
||||||
|
bool tscIsDiffDerivQuery(SQueryInfo* pQueryInfo);
|
||||||
bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
||||||
|
|
||||||
bool tscIsProjectionQuery(SQueryInfo* pQueryInfo);
|
bool tscIsProjectionQuery(SQueryInfo* pQueryInfo);
|
||||||
|
|
|
@ -266,6 +266,7 @@ typedef struct SSqlObj {
|
||||||
|
|
||||||
typedef struct SSqlStream {
|
typedef struct SSqlStream {
|
||||||
SSqlObj *pSql;
|
SSqlObj *pSql;
|
||||||
|
void * cqhandle; // stream belong to SCQContext handle
|
||||||
const char* dstTable;
|
const char* dstTable;
|
||||||
uint32_t streamId;
|
uint32_t streamId;
|
||||||
char listed;
|
char listed;
|
||||||
|
|
|
@ -107,14 +107,10 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
|
||||||
return tscInvalidOperationMsg(error, "value expected in timestamp", sToken.z);
|
return tscInvalidOperationMsg(error, "value expected in timestamp", sToken.z);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
|
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval, timePrec) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (timePrec == TSDB_TIME_PRECISION_MILLI) {
|
|
||||||
interval /= 1000;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sToken.type == TK_PLUS) {
|
if (sToken.type == TK_PLUS) {
|
||||||
useconds += interval;
|
useconds += interval;
|
||||||
} else {
|
} else {
|
||||||
|
@ -468,6 +464,10 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
|
||||||
|
|
||||||
int32_t cnt = 0;
|
int32_t cnt = 0;
|
||||||
int32_t j = 0;
|
int32_t j = 0;
|
||||||
|
if (sToken.n >= TSDB_MAX_BYTES_PER_ROW) {
|
||||||
|
return tscSQLSyntaxErrMsg(pInsertParam->msg, "too long string", sToken.z);
|
||||||
|
}
|
||||||
|
|
||||||
for (uint32_t k = 1; k < sToken.n - 1; ++k) {
|
for (uint32_t k = 1; k < sToken.n - 1; ++k) {
|
||||||
if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) {
|
if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) {
|
||||||
tmpTokenBuf[j] = sToken.z[k + 1];
|
tmpTokenBuf[j] = sToken.z[k + 1];
|
||||||
|
@ -711,7 +711,7 @@ static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char
|
||||||
}
|
}
|
||||||
|
|
||||||
code = TSDB_CODE_TSC_INVALID_OPERATION;
|
code = TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
char tmpTokenBuf[16*1024] = {0}; // used for deleting Escape character: \\, \', \"
|
char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \"
|
||||||
|
|
||||||
int32_t numOfRows = 0;
|
int32_t numOfRows = 0;
|
||||||
code = tsParseValues(str, dataBuf, maxNumOfRows, pInsertParam, &numOfRows, tmpTokenBuf);
|
code = tsParseValues(str, dataBuf, maxNumOfRows, pInsertParam, &numOfRows, tmpTokenBuf);
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include "ttimer.h"
|
#include "ttimer.h"
|
||||||
#include "tutil.h"
|
#include "tutil.h"
|
||||||
#include "taosmsg.h"
|
#include "taosmsg.h"
|
||||||
|
#include "tcq.h"
|
||||||
|
|
||||||
#include "taos.h"
|
#include "taos.h"
|
||||||
|
|
||||||
|
@ -294,24 +295,34 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
|
||||||
return msgLen;
|
return msgLen;
|
||||||
}
|
}
|
||||||
|
|
||||||
void tscKillConnection(STscObj *pObj) {
|
// cqContext->dbconn is killed then call this callback
|
||||||
pthread_mutex_lock(&pObj->mutex);
|
void cqConnKilledNotify(void* handle, void* conn) {
|
||||||
|
if (handle == NULL || conn == NULL){
|
||||||
SSqlObj *pSql = pObj->sqlList;
|
return ;
|
||||||
while (pSql) {
|
|
||||||
pSql = pSql->next;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SCqContext* pContext = (SCqContext*) handle;
|
||||||
|
if (pContext->dbConn == conn){
|
||||||
|
atomic_store_ptr(&(pContext->dbConn), NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void tscKillConnection(STscObj *pObj) {
|
||||||
|
// get stream header by locked
|
||||||
|
pthread_mutex_lock(&pObj->mutex);
|
||||||
SSqlStream *pStream = pObj->streamList;
|
SSqlStream *pStream = pObj->streamList;
|
||||||
|
pthread_mutex_unlock(&pObj->mutex);
|
||||||
|
|
||||||
while (pStream) {
|
while (pStream) {
|
||||||
SSqlStream *tmp = pStream->next;
|
SSqlStream *tmp = pStream->next;
|
||||||
|
// set associate variant to NULL
|
||||||
|
cqConnKilledNotify(pStream->cqhandle, pObj);
|
||||||
|
// taos_close_stream function call pObj->mutet lock , careful death-lock
|
||||||
taos_close_stream(pStream);
|
taos_close_stream(pStream);
|
||||||
pStream = tmp;
|
pStream = tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&pObj->mutex);
|
|
||||||
|
|
||||||
tscDebug("connection:%p is killed", pObj);
|
tscDebug("connection:%p is killed", pObj);
|
||||||
taos_close(pObj);
|
taos_close(pObj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -122,7 +122,7 @@ static int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQue
|
||||||
static int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
|
static int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
|
||||||
|
|
||||||
static int32_t getTableIndexImpl(SStrToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
|
static int32_t getTableIndexImpl(SStrToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
|
||||||
static int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
|
static int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* msg);
|
||||||
static int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode);
|
static int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode);
|
||||||
static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate);
|
static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate);
|
||||||
|
|
||||||
|
@ -438,7 +438,9 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
|
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
|
||||||
|
if (pzName->type == TK_STRING) {
|
||||||
pzName->n = strdequote(pzName->z);
|
pzName->n = strdequote(pzName->z);
|
||||||
|
}
|
||||||
strncpy(pCmd->payload, pzName->z, pzName->n);
|
strncpy(pCmd->payload, pzName->z, pzName->n);
|
||||||
} else { // drop user/account
|
} else { // drop user/account
|
||||||
if (pzName->n >= TSDB_USER_LEN) {
|
if (pzName->n >= TSDB_USER_LEN) {
|
||||||
|
@ -516,7 +518,9 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SStrToken* id = taosArrayGet(pInfo->pMiscInfo->a, 0);
|
SStrToken* id = taosArrayGet(pInfo->pMiscInfo->a, 0);
|
||||||
|
if (id->type == TK_STRING) {
|
||||||
id->n = strdequote(id->z);
|
id->n = strdequote(id->z);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -921,18 +925,15 @@ int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pS
|
||||||
|
|
||||||
// interval is not null
|
// interval is not null
|
||||||
SStrToken *t = &pSqlNode->interval.interval;
|
SStrToken *t = &pSqlNode->interval.interval;
|
||||||
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.interval, &pQueryInfo->interval.intervalUnit) != TSDB_CODE_SUCCESS) {
|
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.interval,
|
||||||
|
&pQueryInfo->interval.intervalUnit, tinfo.precision) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
|
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
|
||||||
// if the unit of time window value is millisecond, change the value from microsecond
|
|
||||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
|
||||||
pQueryInfo->interval.interval = pQueryInfo->interval.interval / 1000;
|
|
||||||
}
|
|
||||||
|
|
||||||
// interval cannot be less than 10 milliseconds
|
// interval cannot be less than 10 milliseconds
|
||||||
if (pQueryInfo->interval.interval < tsMinIntervalTime) {
|
if (convertTimePrecision(pQueryInfo->interval.interval, tinfo.precision, TSDB_TIME_PRECISION_MILLI) < tsMinIntervalTime) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1008,6 +1009,8 @@ int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pS
|
||||||
const char* msg3 = "invalid column name";
|
const char* msg3 = "invalid column name";
|
||||||
const char* msg4 = "invalid time window";
|
const char* msg4 = "invalid time window";
|
||||||
|
|
||||||
|
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||||
|
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||||
// no session window
|
// no session window
|
||||||
if (!TPARSER_HAS_TOKEN(pSqlNode->sessionVal.gap)) {
|
if (!TPARSER_HAS_TOKEN(pSqlNode->sessionVal.gap)) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -1017,7 +1020,7 @@ int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pS
|
||||||
SStrToken* gap = &pSqlNode->sessionVal.gap;
|
SStrToken* gap = &pSqlNode->sessionVal.gap;
|
||||||
|
|
||||||
char timeUnit = 0;
|
char timeUnit = 0;
|
||||||
if (parseNatualDuration(gap->z, gap->n, &pQueryInfo->sessionWindow.gap, &timeUnit) != TSDB_CODE_SUCCESS) {
|
if (parseNatualDuration(gap->z, gap->n, &pQueryInfo->sessionWindow.gap, &timeUnit, tinfo.precision) != TSDB_CODE_SUCCESS) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1025,13 +1028,6 @@ int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pS
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// if the unit of time window value is millisecond, change the value from microsecond
|
|
||||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
|
||||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
|
||||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
|
||||||
pQueryInfo->sessionWindow.gap = pQueryInfo->sessionWindow.gap / 1000;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pQueryInfo->sessionWindow.gap != 0 && pQueryInfo->interval.interval != 0) {
|
if (pQueryInfo->sessionWindow.gap != 0 && pQueryInfo->interval.interval != 0) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
}
|
}
|
||||||
|
@ -1068,7 +1064,8 @@ int32_t parseIntervalOffset(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* of
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.offset, &pQueryInfo->interval.offsetUnit) != TSDB_CODE_SUCCESS) {
|
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.offset,
|
||||||
|
&pQueryInfo->interval.offsetUnit, tinfo.precision) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1077,10 +1074,6 @@ int32_t parseIntervalOffset(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* of
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->interval.offsetUnit != 'n' && pQueryInfo->interval.offsetUnit != 'y') {
|
if (pQueryInfo->interval.offsetUnit != 'n' && pQueryInfo->interval.offsetUnit != 'y') {
|
||||||
// if the unit of time window value is millisecond, change the value from microsecond
|
|
||||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
|
||||||
pQueryInfo->interval.offset = pQueryInfo->interval.offset / 1000;
|
|
||||||
}
|
|
||||||
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
|
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
|
||||||
if (pQueryInfo->interval.offset >= pQueryInfo->interval.interval) {
|
if (pQueryInfo->interval.offset >= pQueryInfo->interval.interval) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
|
@ -1125,12 +1118,10 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSl
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||||
}
|
}
|
||||||
|
|
||||||
parseAbsoluteDuration(pSliding->z, pSliding->n, &pQueryInfo->interval.sliding);
|
parseAbsoluteDuration(pSliding->z, pSliding->n, &pQueryInfo->interval.sliding, tinfo.precision);
|
||||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
|
||||||
pQueryInfo->interval.sliding /= 1000;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pQueryInfo->interval.sliding < tsMinSlidingTime) {
|
if (pQueryInfo->interval.sliding <
|
||||||
|
convertTimePrecision(tsMinSlidingTime, TSDB_TIME_PRECISION_MILLI, tinfo.precision)) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2157,7 +2148,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
||||||
const char* msg6 = "function applied to tags not allowed";
|
const char* msg6 = "function applied to tags not allowed";
|
||||||
const char* msg7 = "normal table can not apply this function";
|
const char* msg7 = "normal table can not apply this function";
|
||||||
const char* msg8 = "multi-columns selection does not support alias column name";
|
const char* msg8 = "multi-columns selection does not support alias column name";
|
||||||
const char* msg9 = "diff can no be applied to unsigned numeric type";
|
const char* msg9 = "diff/derivative can no be applied to unsigned numeric type";
|
||||||
|
const char* msg10 = "derivative duration should be greater than 1 Second";
|
||||||
|
const char* msg11 = "third parameter in derivative should be 0 or 1";
|
||||||
|
const char* msg12 = "parameter is out of range [1, 100]";
|
||||||
|
|
||||||
switch (functionId) {
|
switch (functionId) {
|
||||||
case TSDB_FUNC_COUNT: {
|
case TSDB_FUNC_COUNT: {
|
||||||
|
@ -2309,7 +2303,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||||
}
|
}
|
||||||
|
|
||||||
SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), resultSize, false);
|
SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false);
|
||||||
|
|
||||||
if (functionId == TSDB_FUNC_LEASTSQR) { // set the leastsquares parameters
|
if (functionId == TSDB_FUNC_LEASTSQR) { // set the leastsquares parameters
|
||||||
char val[8] = {0};
|
char val[8] = {0};
|
||||||
|
@ -2337,15 +2331,27 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
||||||
}
|
}
|
||||||
|
|
||||||
if (info.precision == TSDB_TIME_PRECISION_MILLI) {
|
if (info.precision == TSDB_TIME_PRECISION_MILLI) {
|
||||||
|
tickPerSec /= 1000000;
|
||||||
|
} else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
tickPerSec /= 1000;
|
tickPerSec /= 1000;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
|
||||||
|
}
|
||||||
|
|
||||||
tscExprAddParams(&pExpr->base, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES);
|
tscExprAddParams(&pExpr->base, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES);
|
||||||
memset(val, 0, tListLen(val));
|
memset(val, 0, tListLen(val));
|
||||||
|
|
||||||
if (tVariantDump(&pParamElem[2].pNode->value, val, TSDB_DATA_TYPE_BIGINT, true) < 0) {
|
if (tVariantDump(&pParamElem[2].pNode->value, val, TSDB_DATA_TYPE_BIGINT, true) < 0) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int64_t v = *(int64_t*) val;
|
||||||
|
if (v != 0 && v != 1) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11);
|
||||||
|
}
|
||||||
|
|
||||||
tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, LONG_BYTES);
|
tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, LONG_BYTES);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2551,7 +2557,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
||||||
|
|
||||||
int64_t nTop = GET_INT32_VAL(val);
|
int64_t nTop = GET_INT32_VAL(val);
|
||||||
if (nTop <= 0 || nTop > 100) { // todo use macro
|
if (nTop <= 0 || nTop > 100) { // todo use macro
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12);
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo REFACTOR
|
// todo REFACTOR
|
||||||
|
@ -3300,8 +3306,9 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
|
||||||
return retVal;
|
return retVal;
|
||||||
}
|
}
|
||||||
} else if ((colType == TSDB_DATA_TYPE_TIMESTAMP) && (TSDB_DATA_TYPE_BIGINT == pRight->value.nType)) {
|
} else if ((colType == TSDB_DATA_TYPE_TIMESTAMP) && (TSDB_DATA_TYPE_BIGINT == pRight->value.nType)) {
|
||||||
if ((timePrecision == TSDB_TIME_PRECISION_MILLI) && (pRight->flags & (1 << EXPR_FLAG_US_TIMESTAMP))) {
|
if (pRight->flags & (1 << EXPR_FLAG_NS_TIMESTAMP)) {
|
||||||
pRight->value.i64 /= 1000;
|
pRight->value.i64 =
|
||||||
|
convertTimePrecision(pRight->value.i64, TSDB_TIME_PRECISION_NANO, timePrecision);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4844,7 +4851,7 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
|
||||||
|
|
||||||
char* seg = strnchr(pRight->value.pz, '-', pRight->value.nLen, false);
|
char* seg = strnchr(pRight->value.pz, '-', pRight->value.nLen, false);
|
||||||
if (seg != NULL) {
|
if (seg != NULL) {
|
||||||
if (taosParseTime(pRight->value.pz, &val, pRight->value.nLen, TSDB_TIME_PRECISION_MICRO, tsDaylight) == TSDB_CODE_SUCCESS) {
|
if (taosParseTime(pRight->value.pz, &val, pRight->value.nLen, timePrecision, tsDaylight) == TSDB_CODE_SUCCESS) {
|
||||||
parsed = true;
|
parsed = true;
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
|
@ -4857,18 +4864,6 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (pRight->tokenId == TK_INTEGER && timePrecision == TSDB_TIME_PRECISION_MILLI) {
|
|
||||||
/*
|
|
||||||
* if the pRight->tokenId == TK_INTEGER/TK_FLOAT, the value is adaptive, we
|
|
||||||
* need the time precision in metermeta to transfer the value in MICROSECOND
|
|
||||||
*
|
|
||||||
* Additional check to avoid data overflow
|
|
||||||
*/
|
|
||||||
if (pRight->value.i64 <= INT64_MAX / 1000) {
|
|
||||||
pRight->value.i64 *= 1000;
|
|
||||||
}
|
|
||||||
} else if (pRight->tokenId == TK_FLOAT && timePrecision == TSDB_TIME_PRECISION_MILLI) {
|
|
||||||
pRight->value.dKey *= 1000;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!parsed) {
|
if (!parsed) {
|
||||||
|
@ -4876,33 +4871,19 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
|
||||||
* failed to parse timestamp in regular formation, try next
|
* failed to parse timestamp in regular formation, try next
|
||||||
* it may be a epoch time in string format
|
* it may be a epoch time in string format
|
||||||
*/
|
*/
|
||||||
|
if (pRight->flags & (1 << EXPR_FLAG_NS_TIMESTAMP)) {
|
||||||
|
pRight->value.i64 = convertTimePrecision(pRight->value.i64, TSDB_TIME_PRECISION_NANO, timePrecision);
|
||||||
|
}
|
||||||
|
|
||||||
tVariantDump(&pRight->value, (char*)&val, TSDB_DATA_TYPE_BIGINT, true);
|
tVariantDump(&pRight->value, (char*)&val, TSDB_DATA_TYPE_BIGINT, true);
|
||||||
|
|
||||||
/*
|
|
||||||
* transfer it into MICROSECOND format if it is a string, since for
|
|
||||||
* TK_INTEGER/TK_FLOAT the value has been transferred
|
|
||||||
*
|
|
||||||
* additional check to avoid data overflow
|
|
||||||
*/
|
|
||||||
if (pRight->tokenId == TK_STRING && timePrecision == TSDB_TIME_PRECISION_MILLI) {
|
|
||||||
if (val <= INT64_MAX / 1000) {
|
|
||||||
val *= 1000;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t delta = 1;
|
|
||||||
/* for millisecond, delta is 1ms=1000us */
|
|
||||||
if (timePrecision == TSDB_TIME_PRECISION_MILLI) {
|
|
||||||
delta *= 1000;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (optr == TK_LE) {
|
if (optr == TK_LE) {
|
||||||
win->ekey = val;
|
win->ekey = val;
|
||||||
} else if (optr == TK_LT) {
|
} else if (optr == TK_LT) {
|
||||||
win->ekey = val - delta;
|
win->ekey = val - 1;
|
||||||
} else if (optr == TK_GT) {
|
} else if (optr == TK_GT) {
|
||||||
win->skey = val + delta;
|
win->skey = val + 1;
|
||||||
} else if (optr == TK_GE) {
|
} else if (optr == TK_GE) {
|
||||||
win->skey = val;
|
win->skey = val;
|
||||||
} else if (optr == TK_EQ) {
|
} else if (optr == TK_EQ) {
|
||||||
|
@ -5625,8 +5606,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||||
const char* msg0 = "sample interval can not be less than 10ms.";
|
const char* msg0 = "sample interval can not be less than 10ms.";
|
||||||
const char* msg1 = "functions not allowed in select clause";
|
const char* msg1 = "functions not allowed in select clause";
|
||||||
|
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||||
if (pQueryInfo->interval.interval != 0 && pQueryInfo->interval.interval < 10 &&
|
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||||
|
if (pQueryInfo->interval.interval != 0 &&
|
||||||
|
convertTimePrecision(pQueryInfo->interval.interval, tinfo.precision, TSDB_TIME_PRECISION_MILLI)< 10 &&
|
||||||
pQueryInfo->interval.intervalUnit != 'n' &&
|
pQueryInfo->interval.intervalUnit != 'n' &&
|
||||||
pQueryInfo->interval.intervalUnit != 'y') {
|
pQueryInfo->interval.intervalUnit != 'y') {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
|
||||||
|
@ -5645,7 +5628,7 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||||
|
|
||||||
int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||||
bool isProjectionFunction = false;
|
bool isProjectionFunction = false;
|
||||||
const char* msg1 = "column projection is not compatible with interval";
|
const char* msg1 = "functions not compatible with interval";
|
||||||
|
|
||||||
// multi-output set/ todo refactor
|
// multi-output set/ todo refactor
|
||||||
size_t size = taosArrayGetSize(pQueryInfo->exprList);
|
size_t size = taosArrayGetSize(pQueryInfo->exprList);
|
||||||
|
@ -5669,8 +5652,8 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) || pExpr->base.functionId == TSDB_FUNC_DIFF ||
|
int32_t f = pExpr->base.functionId;
|
||||||
pExpr->base.functionId == TSDB_FUNC_ARITHM) {
|
if ((f == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ARITHM || f == TSDB_FUNC_DERIVATIVE) {
|
||||||
isProjectionFunction = true;
|
isProjectionFunction = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5956,48 +5939,40 @@ int32_t validateLimitNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlN
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo* pCreateDb) {
|
static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo* pCreateDb) {
|
||||||
const char* msg = "invalid number of options";
|
const char* msg1 = "invalid number of keep options";
|
||||||
|
const char* msg2 = "invalid keep value";
|
||||||
|
const char* msg3 = "invalid keep value, should be keep0 <= keep1 <= keep2";
|
||||||
|
|
||||||
pMsg->daysToKeep = htonl(-1);
|
pMsg->daysToKeep0 = htonl(-1);
|
||||||
pMsg->daysToKeep1 = htonl(-1);
|
pMsg->daysToKeep1 = htonl(-1);
|
||||||
pMsg->daysToKeep2 = htonl(-1);
|
pMsg->daysToKeep2 = htonl(-1);
|
||||||
|
|
||||||
SArray* pKeep = pCreateDb->keep;
|
SArray* pKeep = pCreateDb->keep;
|
||||||
if (pKeep != NULL) {
|
if (pKeep != NULL) {
|
||||||
size_t s = taosArrayGetSize(pKeep);
|
size_t s = taosArrayGetSize(pKeep);
|
||||||
|
#ifdef _STORAGE
|
||||||
|
if (s >= 4 ||s <= 0) {
|
||||||
|
#else
|
||||||
|
if (s != 1) {
|
||||||
|
#endif
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
|
||||||
tVariantListItem* p0 = taosArrayGet(pKeep, 0);
|
tVariantListItem* p0 = taosArrayGet(pKeep, 0);
|
||||||
switch (s) {
|
tVariantListItem* p1 = (s > 1) ? taosArrayGet(pKeep, 1) : p0;
|
||||||
case 1: {
|
tVariantListItem* p2 = (s > 2) ? taosArrayGet(pKeep, 2) : p1;
|
||||||
if ((int32_t)p0->pVar.i64 <= 0) {
|
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
|
||||||
}
|
|
||||||
pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case 2: {
|
|
||||||
tVariantListItem* p1 = taosArrayGet(pKeep, 1);
|
|
||||||
if ((int32_t)p0->pVar.i64 <= 0 || (int32_t)p1->pVar.i64 <= 0) {
|
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
|
||||||
}
|
|
||||||
pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64);
|
|
||||||
pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case 3: {
|
|
||||||
tVariantListItem* p1 = taosArrayGet(pKeep, 1);
|
|
||||||
tVariantListItem* p2 = taosArrayGet(pKeep, 2);
|
|
||||||
|
|
||||||
if ((int32_t)p0->pVar.i64 <= 0 || (int32_t)p1->pVar.i64 <= 0 || (int32_t)p2->pVar.i64 <= 0) {
|
if ((int32_t)p0->pVar.i64 <= 0 || (int32_t)p1->pVar.i64 <= 0 || (int32_t)p2->pVar.i64 <= 0) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
|
}
|
||||||
|
if (!(((int32_t)p0->pVar.i64 <= (int32_t)p1->pVar.i64) && ((int32_t)p1->pVar.i64 <= (int32_t)p2->pVar.i64))) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||||
}
|
}
|
||||||
|
|
||||||
pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64);
|
pMsg->daysToKeep0 = htonl((int32_t)p0->pVar.i64);
|
||||||
pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64);
|
pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64);
|
||||||
pMsg->daysToKeep2 = htonl((int32_t)p2->pVar.i64);
|
pMsg->daysToKeep2 = htonl((int32_t)p2->pVar.i64);
|
||||||
break;
|
|
||||||
}
|
|
||||||
default: { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); }
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -6019,11 +5994,15 @@ static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo
|
||||||
} else if (strncmp(pToken->z, TSDB_TIME_PRECISION_MICRO_STR, pToken->n) == 0 &&
|
} else if (strncmp(pToken->z, TSDB_TIME_PRECISION_MICRO_STR, pToken->n) == 0 &&
|
||||||
strlen(TSDB_TIME_PRECISION_MICRO_STR) == pToken->n) {
|
strlen(TSDB_TIME_PRECISION_MICRO_STR) == pToken->n) {
|
||||||
pMsg->precision = TSDB_TIME_PRECISION_MICRO;
|
pMsg->precision = TSDB_TIME_PRECISION_MICRO;
|
||||||
|
} else if (strncmp(pToken->z, TSDB_TIME_PRECISION_NANO_STR, pToken->n) == 0 &&
|
||||||
|
strlen(TSDB_TIME_PRECISION_NANO_STR) == pToken->n) {
|
||||||
|
pMsg->precision = TSDB_TIME_PRECISION_NANO;
|
||||||
} else {
|
} else {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6266,7 +6245,7 @@ static void updateTagPrjFunction(SQueryInfo* pQueryInfo) {
|
||||||
* 2. if selectivity function and tagprj function both exist, there should be only
|
* 2. if selectivity function and tagprj function both exist, there should be only
|
||||||
* one selectivity function exists.
|
* one selectivity function exists.
|
||||||
*/
|
*/
|
||||||
static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) {
|
static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
|
||||||
const char* msg1 = "only one selectivity function allowed in presence of tags function";
|
const char* msg1 = "only one selectivity function allowed in presence of tags function";
|
||||||
const char* msg3 = "aggregation function should not be mixed up with projection";
|
const char* msg3 = "aggregation function should not be mixed up with projection";
|
||||||
|
|
||||||
|
@ -6289,10 +6268,11 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
|
||||||
|
|
||||||
int16_t functionId = pExpr->base.functionId;
|
int16_t functionId = pExpr->base.functionId;
|
||||||
if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS ||
|
if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS ||
|
||||||
functionId == TSDB_FUNC_ARITHM) {
|
functionId == TSDB_FUNC_ARITHM || functionId == TSDB_FUNC_TS_DUMMY) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
|
if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
|
||||||
numOfSelectivity++;
|
numOfSelectivity++;
|
||||||
} else {
|
} else {
|
||||||
|
@ -6304,7 +6284,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
|
||||||
// When the tag projection function on tag column that is not in the group by clause, aggregation function and
|
// When the tag projection function on tag column that is not in the group by clause, aggregation function and
|
||||||
// selectivity function exist in select clause is not allowed.
|
// selectivity function exist in select clause is not allowed.
|
||||||
if (numOfAggregation > 0) {
|
if (numOfAggregation > 0) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidOperationMsg(msg, msg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -6333,7 +6313,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
|
||||||
(functionId == TSDB_FUNC_LAST_DST && (pExpr->base.colInfo.flag & TSDB_COL_NULL) != 0)) {
|
(functionId == TSDB_FUNC_LAST_DST && (pExpr->base.colInfo.flag & TSDB_COL_NULL) != 0)) {
|
||||||
// do nothing
|
// do nothing
|
||||||
} else {
|
} else {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidOperationMsg(msg, msg1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6346,7 +6326,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
|
||||||
} else {
|
} else {
|
||||||
if ((pQueryInfo->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0) {
|
if ((pQueryInfo->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0) {
|
||||||
if (numOfAggregation > 0 && pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
|
if (numOfAggregation > 0 && pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
return invalidOperationMsg(msg, msg3);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numOfAggregation > 0 || numOfSelectivity > 0) {
|
if (numOfAggregation > 0 || numOfSelectivity > 0) {
|
||||||
|
@ -6394,9 +6374,14 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
|
||||||
size_t size = tscNumOfExprs(pQueryInfo);
|
size_t size = tscNumOfExprs(pQueryInfo);
|
||||||
|
|
||||||
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
|
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
|
||||||
|
|
||||||
|
int32_t f = TSDB_FUNC_TAG;
|
||||||
|
if (tscIsDiffDerivQuery(pQueryInfo)) {
|
||||||
|
f = TSDB_FUNC_TAGPRJ;
|
||||||
|
}
|
||||||
|
|
||||||
SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex};
|
SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex};
|
||||||
SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, s->type, s->bytes,
|
SExprInfo* pExpr = tscExprAppend(pQueryInfo, f, &index, s->type, s->bytes, getNewResColId(pCmd), s->bytes, true);
|
||||||
getNewResColId(pCmd), s->bytes, true);
|
|
||||||
|
|
||||||
memset(pExpr->base.aliasName, 0, sizeof(pExpr->base.aliasName));
|
memset(pExpr->base.aliasName, 0, sizeof(pExpr->base.aliasName));
|
||||||
tstrncpy(pExpr->base.aliasName, s->name, sizeof(pExpr->base.aliasName));
|
tstrncpy(pExpr->base.aliasName, s->name, sizeof(pExpr->base.aliasName));
|
||||||
|
@ -6456,7 +6441,7 @@ static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) {
|
||||||
return (tableCounting && tagProjection)? -1:0;
|
return (tableCounting && tagProjection)? -1:0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* msg) {
|
||||||
const char* msg1 = "functions/columns not allowed in group by query";
|
const char* msg1 = "functions/columns not allowed in group by query";
|
||||||
const char* msg2 = "projection query on columns not allowed";
|
const char* msg2 = "projection query on columns not allowed";
|
||||||
const char* msg3 = "group by/session/state_window not allowed on projection query";
|
const char* msg3 = "group by/session/state_window not allowed on projection query";
|
||||||
|
@ -6466,17 +6451,17 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||||
// only retrieve tags, group by is not supportted
|
// only retrieve tags, group by is not supportted
|
||||||
if (tscQueryTags(pQueryInfo)) {
|
if (tscQueryTags(pQueryInfo)) {
|
||||||
if (doTagFunctionCheck(pQueryInfo) != TSDB_CODE_SUCCESS) {
|
if (doTagFunctionCheck(pQueryInfo) != TSDB_CODE_SUCCESS) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
return invalidOperationMsg(msg, msg5);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || isTimeWindowQuery(pQueryInfo)) {
|
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || isTimeWindowQuery(pQueryInfo)) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
return invalidOperationMsg(msg, msg4);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (tscIsProjectionQuery(pQueryInfo) && tscIsSessionWindowQuery(pQueryInfo)) {
|
if (tscIsProjectionQuery(pQueryInfo) && tscIsSessionWindowQuery(pQueryInfo)) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
return invalidOperationMsg(msg, msg3);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
|
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
|
||||||
|
@ -6484,6 +6469,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||||
if (onlyTagPrjFunction(pQueryInfo) && allTagPrjInGroupby(pQueryInfo)) {
|
if (onlyTagPrjFunction(pQueryInfo) && allTagPrjInGroupby(pQueryInfo)) {
|
||||||
// It is a groupby aggregate query, the tag project function is not suitable for this case.
|
// It is a groupby aggregate query, the tag project function is not suitable for this case.
|
||||||
updateTagPrjFunction(pQueryInfo);
|
updateTagPrjFunction(pQueryInfo);
|
||||||
|
|
||||||
return doAddGroupbyColumnsOnDemand(pCmd, pQueryInfo);
|
return doAddGroupbyColumnsOnDemand(pCmd, pQueryInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6508,21 +6494,21 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!qualified) {
|
if (!qualified) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return invalidOperationMsg(msg, msg2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_MULTIOUTPUT(aAggs[functId].status) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM &&
|
if (IS_MULTIOUTPUT(aAggs[functId].status) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM &&
|
||||||
functId != TSDB_FUNC_DIFF && functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_PRJ) {
|
functId != TSDB_FUNC_DIFF && functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_PRJ) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidOperationMsg(msg, msg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (functId == TSDB_FUNC_COUNT && pExpr->base.colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
|
if (functId == TSDB_FUNC_COUNT && pExpr->base.colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidOperationMsg(msg, msg1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (checkUpdateTagPrjFunctions(pQueryInfo, pCmd) != TSDB_CODE_SUCCESS) {
|
if (checkUpdateTagPrjFunctions(pQueryInfo, msg) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6531,13 +6517,13 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// projection query on super table does not compatible with "group by" syntax
|
// projection query on super table does not compatible with "group by" syntax
|
||||||
if (tscIsProjectionQuery(pQueryInfo)) {
|
if (tscIsProjectionQuery(pQueryInfo) && !(tscIsDiffDerivQuery(pQueryInfo))) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
return invalidOperationMsg(msg, msg3);
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else {
|
} else {
|
||||||
return checkUpdateTagPrjFunctions(pQueryInfo, pCmd);
|
return checkUpdateTagPrjFunctions(pQueryInfo, msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode) {
|
int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode) {
|
||||||
|
@ -6623,6 +6609,13 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t blocks = ntohl(pCreate->totalBlocks);
|
||||||
|
if (blocks != -1 && (blocks < TSDB_MIN_TOTAL_BLOCKS || blocks > TSDB_MAX_TOTAL_BLOCKS)) {
|
||||||
|
snprintf(msg, tListLen(msg), "invalid db option totalBlocks: %d valid range: [%d, %d]", blocks,
|
||||||
|
TSDB_MIN_TOTAL_BLOCKS, TSDB_MAX_TOTAL_BLOCKS);
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||||
|
}
|
||||||
|
|
||||||
if (pCreate->quorum != -1 &&
|
if (pCreate->quorum != -1 &&
|
||||||
(pCreate->quorum < TSDB_MIN_DB_QUORUM_OPTION || pCreate->quorum > TSDB_MAX_DB_QUORUM_OPTION)) {
|
(pCreate->quorum < TSDB_MIN_DB_QUORUM_OPTION || pCreate->quorum > TSDB_MAX_DB_QUORUM_OPTION)) {
|
||||||
snprintf(msg, tListLen(msg), "invalid db option quorum: %d valid range: [%d, %d]", pCreate->quorum,
|
snprintf(msg, tListLen(msg), "invalid db option quorum: %d valid range: [%d, %d]", pCreate->quorum,
|
||||||
|
@ -6651,9 +6644,10 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pCreate->precision != TSDB_TIME_PRECISION_MILLI && pCreate->precision != TSDB_TIME_PRECISION_MICRO) {
|
if (pCreate->precision != TSDB_TIME_PRECISION_MILLI && pCreate->precision != TSDB_TIME_PRECISION_MICRO &&
|
||||||
snprintf(msg, tListLen(msg), "invalid db option timePrecision: %d valid value: [%d, %d]", pCreate->precision,
|
pCreate->precision != TSDB_TIME_PRECISION_NANO) {
|
||||||
TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO);
|
snprintf(msg, tListLen(msg), "invalid db option timePrecision: %d valid value: [%d, %d, %d]", pCreate->precision,
|
||||||
|
TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO, TSDB_TIME_PRECISION_NANO);
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7052,6 +7046,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// project query primary column must be timestamp type
|
||||||
if (tscIsProjectionQuery(pQueryInfo)) {
|
if (tscIsProjectionQuery(pQueryInfo)) {
|
||||||
SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
|
SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
|
||||||
if (pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
if (pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||||
|
@ -7731,7 +7726,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
||||||
const char* msg2 = "too many tables in from clause";
|
const char* msg2 = "too many tables in from clause";
|
||||||
const char* msg3 = "start(end) time of query range required or time range too large";
|
const char* msg3 = "start(end) time of query range required or time range too large";
|
||||||
const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column";
|
const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column";
|
||||||
const char* msg9 = "only tag query not compatible with normal column filter";
|
const char* msg5 = "only tag query not compatible with normal column filter";
|
||||||
|
const char* msg6 = "not support stddev/percentile in outer query yet";
|
||||||
|
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
@ -7772,24 +7768,27 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// todo NOT support yet
|
||||||
|
for(int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) {
|
||||||
|
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
|
||||||
|
int32_t f = pExpr->base.functionId;
|
||||||
|
if (f == TSDB_FUNC_STDDEV || f == TSDB_FUNC_PERCT) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// validate the query filter condition info
|
// validate the query filter condition info
|
||||||
if (pSqlNode->pWhere != NULL) {
|
if (pSqlNode->pWhere != NULL) {
|
||||||
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
|
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
|
|
||||||
if (pTableMeta->tableInfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
|
||||||
pQueryInfo->window.skey = pQueryInfo->window.skey / 1000;
|
|
||||||
pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate the interval info
|
// validate the interval info
|
||||||
if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
|
if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
} else {
|
} else {
|
||||||
if (isTimeWindowQuery(pQueryInfo)) {
|
if (isTimeWindowQuery(pQueryInfo) || pQueryInfo->sessionWindow.gap > 0) {
|
||||||
// check if the first column of the nest query result is timestamp column
|
// check if the first column of the nest query result is timestamp column
|
||||||
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0);
|
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0);
|
||||||
if (pCol->info.type != TSDB_DATA_TYPE_TIMESTAMP) {
|
if (pCol->info.type != TSDB_DATA_TYPE_TIMESTAMP) {
|
||||||
|
@ -7804,10 +7803,13 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
||||||
|
|
||||||
// set order by info
|
// set order by info
|
||||||
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
|
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
|
||||||
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMeta)) !=
|
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMeta)) != TSDB_CODE_SUCCESS) {
|
||||||
TSDB_CODE_SUCCESS) {
|
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo, tscGetErrorMsgPayload(pCmd))) != TSDB_CODE_SUCCESS) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
pQueryInfo->command = TSDB_SQL_SELECT;
|
pQueryInfo->command = TSDB_SQL_SELECT;
|
||||||
|
|
||||||
|
@ -7833,18 +7835,12 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
||||||
}
|
}
|
||||||
|
|
||||||
// set where info
|
// set where info
|
||||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
|
||||||
|
|
||||||
if (pSqlNode->pWhere != NULL) {
|
if (pSqlNode->pWhere != NULL) {
|
||||||
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
|
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
pSqlNode->pWhere = NULL;
|
pSqlNode->pWhere = NULL;
|
||||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
|
||||||
pQueryInfo->window.skey = pQueryInfo->window.skey / 1000;
|
|
||||||
pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
if (taosArrayGetSize(pSqlNode->from->list) > 1) { // Cross join not allowed yet
|
if (taosArrayGetSize(pSqlNode->from->list) > 1) { // Cross join not allowed yet
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "cross join not supported yet");
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "cross join not supported yet");
|
||||||
|
@ -7872,11 +7868,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
||||||
// set interval value
|
// set interval value
|
||||||
if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
|
if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
} else {
|
|
||||||
if (isTimeWindowQuery(pQueryInfo) &&
|
|
||||||
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
|
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tscQueryTags(pQueryInfo)) {
|
if (tscQueryTags(pQueryInfo)) {
|
||||||
|
@ -7887,7 +7878,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
||||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||||
SColumn* pCols = taosArrayGetP(pQueryInfo->colList, i);
|
SColumn* pCols = taosArrayGetP(pQueryInfo->colList, i);
|
||||||
if (pCols->info.flist.numOfFilters > 0) {
|
if (pCols->info.flist.numOfFilters > 0) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7907,6 +7898,11 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((isTimeWindowQuery(pQueryInfo) || pQueryInfo->sessionWindow.gap > 0) &&
|
||||||
|
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
|
}
|
||||||
|
|
||||||
if (isSTable) {
|
if (isSTable) {
|
||||||
tscTansformFuncForSTableQuery(pQueryInfo);
|
tscTansformFuncForSTableQuery(pQueryInfo);
|
||||||
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
|
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
|
||||||
|
@ -7936,7 +7932,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo)) != TSDB_CODE_SUCCESS) {
|
if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo,tscGetErrorMsgPayload(pCmd))) != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -912,7 +912,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SGroupbyExpr *pGroupbyExpr = query.pGroupbyExpr;
|
SGroupbyExpr *pGroupbyExpr = query.pGroupbyExpr;
|
||||||
if (pGroupbyExpr->numOfGroupCols > 0) {
|
if (pGroupbyExpr != NULL && pGroupbyExpr->numOfGroupCols > 0) {
|
||||||
pQueryMsg->orderByIdx = htons(pGroupbyExpr->orderIndex);
|
pQueryMsg->orderByIdx = htons(pGroupbyExpr->orderIndex);
|
||||||
pQueryMsg->orderType = htons(pGroupbyExpr->orderType);
|
pQueryMsg->orderType = htons(pGroupbyExpr->orderType);
|
||||||
|
|
||||||
|
|
|
@ -53,9 +53,7 @@ static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, in
|
||||||
|
|
||||||
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
|
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
|
||||||
// change to ms
|
// change to ms
|
||||||
if (prec == TSDB_TIME_PRECISION_MICRO) {
|
slidingTime = convertTimePrecision(slidingTime, pStream->precision, TSDB_TIME_PRECISION_MILLI);
|
||||||
slidingTime = slidingTime / 1000;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (slidingTime < retryDelta) {
|
if (slidingTime < retryDelta) {
|
||||||
return slidingTime;
|
return slidingTime;
|
||||||
|
@ -139,8 +137,13 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
||||||
|
|
||||||
pStream->numOfRes = 0; // reset the numOfRes.
|
pStream->numOfRes = 0; // reset the numOfRes.
|
||||||
SSqlObj *pSql = pStream->pSql;
|
SSqlObj *pSql = pStream->pSql;
|
||||||
|
|
||||||
|
// pSql == NULL maybe killStream already called
|
||||||
|
if(pSql == NULL) {
|
||||||
|
return ;
|
||||||
|
}
|
||||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
|
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
|
||||||
tscDebug("0x%"PRIx64" timer launch query", pSql->self);
|
tscDebug("0x%"PRIx64" add into timer", pSql->self);
|
||||||
|
|
||||||
if (pStream->isProject) {
|
if (pStream->isProject) {
|
||||||
/*
|
/*
|
||||||
|
@ -157,11 +160,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
||||||
pQueryInfo->window.skey = pStream->stime;
|
pQueryInfo->window.skey = pStream->stime;
|
||||||
int64_t etime = taosGetTimestamp(pStream->precision);
|
int64_t etime = taosGetTimestamp(pStream->precision);
|
||||||
// delay to wait all data in last time window
|
// delay to wait all data in last time window
|
||||||
if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
|
etime -= convertTimePrecision(tsMaxStreamComputDelay, TSDB_TIME_PRECISION_MILLI, pStream->precision);
|
||||||
etime -= tsMaxStreamComputDelay * 1000l;
|
|
||||||
} else {
|
|
||||||
etime -= tsMaxStreamComputDelay;
|
|
||||||
}
|
|
||||||
if (etime > pStream->etime) {
|
if (etime > pStream->etime) {
|
||||||
etime = pStream->etime;
|
etime = pStream->etime;
|
||||||
} else if (pStream->interval.intervalUnit != 'y' && pStream->interval.intervalUnit != 'n') {
|
} else if (pStream->interval.intervalUnit != 'y' && pStream->interval.intervalUnit != 'n') {
|
||||||
|
@ -178,8 +177,8 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
||||||
int64_t timer = pStream->interval.sliding;
|
int64_t timer = pStream->interval.sliding;
|
||||||
if (pStream->interval.intervalUnit == 'y' || pStream->interval.intervalUnit == 'n') {
|
if (pStream->interval.intervalUnit == 'y' || pStream->interval.intervalUnit == 'n') {
|
||||||
timer = 86400 * 1000l;
|
timer = 86400 * 1000l;
|
||||||
} else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
|
} else {
|
||||||
timer /= 1000l;
|
timer = convertTimePrecision(timer, pStream->precision, TSDB_TIME_PRECISION_MILLI);
|
||||||
}
|
}
|
||||||
tscSetRetryTimer(pStream, pSql, timer);
|
tscSetRetryTimer(pStream, pSql, timer);
|
||||||
return;
|
return;
|
||||||
|
@ -339,8 +338,12 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
|
||||||
if (pStream->isProject) {
|
if (pStream->isProject) {
|
||||||
int64_t now = taosGetTimestamp(pStream->precision);
|
int64_t now = taosGetTimestamp(pStream->precision);
|
||||||
int64_t etime = now > pStream->etime ? pStream->etime : now;
|
int64_t etime = now > pStream->etime ? pStream->etime : now;
|
||||||
|
int64_t maxRetent = tsMaxRetentWindow * 1000;
|
||||||
|
if(pStream->precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
|
maxRetent *= 1000;
|
||||||
|
}
|
||||||
|
|
||||||
if (pStream->etime < now && now - pStream->etime > tsMaxRetentWindow) {
|
if (pStream->etime < now && now - pStream->etime > maxRetent) {
|
||||||
/*
|
/*
|
||||||
* current time window will be closed, since it too early to exceed the maxRetentWindow value
|
* current time window will be closed, since it too early to exceed the maxRetentWindow value
|
||||||
*/
|
*/
|
||||||
|
@ -369,8 +372,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
|
static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
|
||||||
int64_t maxDelay =
|
int64_t maxDelay = convertTimePrecision(tsMaxStreamComputDelay, TSDB_TIME_PRECISION_MILLI, pStream->precision);
|
||||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
|
|
||||||
|
|
||||||
int64_t delayDelta = maxDelay;
|
int64_t delayDelta = maxDelay;
|
||||||
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
|
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
|
||||||
|
@ -438,16 +440,14 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
|
||||||
|
|
||||||
timer += getLaunchTimeDelay(pStream);
|
timer += getLaunchTimeDelay(pStream);
|
||||||
|
|
||||||
if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
|
timer = convertTimePrecision(timer, pStream->precision, TSDB_TIME_PRECISION_MILLI);
|
||||||
timer = timer / 1000L;
|
|
||||||
}
|
|
||||||
|
|
||||||
tscSetRetryTimer(pStream, pSql, timer);
|
tscSetRetryTimer(pStream, pSql, timer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
||||||
int64_t minIntervalTime =
|
int64_t minIntervalTime =
|
||||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime;
|
convertTimePrecision(tsMinIntervalTime, TSDB_TIME_PRECISION_MILLI, pStream->precision);
|
||||||
|
|
||||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
|
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
|
||||||
|
|
||||||
|
@ -471,7 +471,7 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t minSlidingTime =
|
int64_t minSlidingTime =
|
||||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
|
convertTimePrecision(tsMinSlidingTime, TSDB_TIME_PRECISION_MILLI, pStream->precision);
|
||||||
|
|
||||||
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
|
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
|
||||||
tscWarn("0x%"PRIx64" stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql->self, pStream,
|
tscWarn("0x%"PRIx64" stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql->self, pStream,
|
||||||
|
@ -539,13 +539,12 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) {
|
||||||
timer = pStream->stime - now;
|
timer = pStream->stime - now;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t startDelay =
|
int64_t startDelay = convertTimePrecision(tsStreamCompStartDelay, TSDB_TIME_PRECISION_MILLI, pStream->precision);
|
||||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsStreamCompStartDelay * 1000L : tsStreamCompStartDelay;
|
|
||||||
|
|
||||||
timer += getLaunchTimeDelay(pStream);
|
timer += getLaunchTimeDelay(pStream);
|
||||||
timer += startDelay;
|
timer += startDelay;
|
||||||
|
|
||||||
return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer;
|
return convertTimePrecision(timer, pStream->precision, TSDB_TIME_PRECISION_MILLI);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
||||||
|
@ -664,7 +663,7 @@ void cbParseSql(void* param, TAOS_RES* res, int code) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||||
int64_t stime, void *param, void (*callback)(void *)) {
|
int64_t stime, void *param, void (*callback)(void *), void* cqhandle) {
|
||||||
STscObj *pObj = (STscObj *)taos;
|
STscObj *pObj = (STscObj *)taos;
|
||||||
if (pObj == NULL || pObj->signature != pObj) return NULL;
|
if (pObj == NULL || pObj->signature != pObj) return NULL;
|
||||||
|
|
||||||
|
@ -697,6 +696,7 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
|
||||||
pStream->callback = callback;
|
pStream->callback = callback;
|
||||||
pStream->param = param;
|
pStream->param = param;
|
||||||
pStream->pSql = pSql;
|
pStream->pSql = pSql;
|
||||||
|
pStream->cqhandle = cqhandle;
|
||||||
pSql->pStream = pStream;
|
pSql->pStream = pStream;
|
||||||
pSql->param = pStream;
|
pSql->param = pStream;
|
||||||
pSql->maxRetry = TSDB_MAX_REPLICA;
|
pSql->maxRetry = TSDB_MAX_REPLICA;
|
||||||
|
@ -745,7 +745,7 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
|
||||||
|
|
||||||
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||||
int64_t stime, void *param, void (*callback)(void *)) {
|
int64_t stime, void *param, void (*callback)(void *)) {
|
||||||
return taos_open_stream_withname(taos, "", sqlstr, fp, stime, param, callback);
|
return taos_open_stream_withname(taos, "", sqlstr, fp, stime, param, callback, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void taos_close_stream(TAOS_STREAM *handle) {
|
void taos_close_stream(TAOS_STREAM *handle) {
|
||||||
|
|
|
@ -255,10 +255,14 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) {
|
||||||
size_t size = tscNumOfExprs(pQueryInfo);
|
size_t size = tscNumOfExprs(pQueryInfo);
|
||||||
|
|
||||||
for (int32_t i = 0; i < size; ++i) {
|
for (int32_t i = 0; i < size; ++i) {
|
||||||
int32_t functionId = tscExprGet(pQueryInfo, i)->base.functionId;
|
int32_t f = tscExprGet(pQueryInfo, i)->base.functionId;
|
||||||
|
if (f == TSDB_FUNC_TS_DUMMY) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TAGPRJ && functionId != TSDB_FUNC_TAG &&
|
if (f != TSDB_FUNC_PRJ && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_TAG &&
|
||||||
functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_ARITHM) {
|
f != TSDB_FUNC_TS && f != TSDB_FUNC_ARITHM && f != TSDB_FUNC_DIFF &&
|
||||||
|
f != TSDB_FUNC_DERIVATIVE) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -266,6 +270,24 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool tscIsDiffDerivQuery(SQueryInfo* pQueryInfo) {
|
||||||
|
size_t size = tscNumOfExprs(pQueryInfo);
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < size; ++i) {
|
||||||
|
int32_t f = tscExprGet(pQueryInfo, i)->base.functionId;
|
||||||
|
if (f == TSDB_FUNC_TS_DUMMY) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
bool tscHasColumnFilter(SQueryInfo* pQueryInfo) {
|
bool tscHasColumnFilter(SQueryInfo* pQueryInfo) {
|
||||||
// filter on primary timestamp column
|
// filter on primary timestamp column
|
||||||
if (pQueryInfo->window.skey != INT64_MIN || pQueryInfo->window.ekey != INT64_MAX) {
|
if (pQueryInfo->window.skey != INT64_MIN || pQueryInfo->window.ekey != INT64_MAX) {
|
||||||
|
@ -962,6 +984,9 @@ static void destroyDummyInputOperator(void* param, int32_t numOfOutput) {
|
||||||
|
|
||||||
pInfo->block = destroyOutputBuf(pInfo->block);
|
pInfo->block = destroyOutputBuf(pInfo->block);
|
||||||
pInfo->pSql = NULL;
|
pInfo->pSql = NULL;
|
||||||
|
|
||||||
|
cleanupResultRowInfo(&pInfo->pTableQueryInfo->resInfo);
|
||||||
|
tfree(pInfo->pTableQueryInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo this operator servers as the adapter for Operator tree and SqlRes result, remove it later
|
// todo this operator servers as the adapter for Operator tree and SqlRes result, remove it later
|
||||||
|
@ -4263,10 +4288,9 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
|
||||||
|
|
||||||
STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[0];
|
STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[0];
|
||||||
|
|
||||||
|
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
|
||||||
pQueryAttr->pGroupbyExpr = calloc(1, sizeof(SGroupbyExpr));
|
pQueryAttr->pGroupbyExpr = calloc(1, sizeof(SGroupbyExpr));
|
||||||
*(pQueryAttr->pGroupbyExpr) = pQueryInfo->groupbyExpr;
|
*(pQueryAttr->pGroupbyExpr) = pQueryInfo->groupbyExpr;
|
||||||
|
|
||||||
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
|
|
||||||
pQueryAttr->pGroupbyExpr->columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo);
|
pQueryAttr->pGroupbyExpr->columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo);
|
||||||
} else {
|
} else {
|
||||||
assert(pQueryInfo->groupbyExpr.columnInfo == NULL);
|
assert(pQueryInfo->groupbyExpr.columnInfo == NULL);
|
||||||
|
@ -4345,7 +4369,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryAttr->pGroupbyExpr->numOfGroupCols < 0) {
|
if (pQueryAttr->pGroupbyExpr != NULL && pQueryAttr->pGroupbyExpr->numOfGroupCols < 0) {
|
||||||
tscError("%p illegal value of numOfGroupCols in query msg: %d", addr, pQueryInfo->groupbyExpr.numOfGroupCols);
|
tscError("%p illegal value of numOfGroupCols in query msg: %d", addr, pQueryInfo->groupbyExpr.numOfGroupCols);
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
|
|
|
@ -831,6 +831,16 @@ static void doInitGlobalConfig(void) {
|
||||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||||
taosInitConfigOption(cfg);
|
taosInitConfigOption(cfg);
|
||||||
|
|
||||||
|
cfg.option = "precision";
|
||||||
|
cfg.ptr = &tsTimePrecision;
|
||||||
|
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||||
|
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||||
|
cfg.minValue = TSDB_MIN_PRECISION;
|
||||||
|
cfg.maxValue = TSDB_MAX_PRECISION;
|
||||||
|
cfg.ptrLength = 0;
|
||||||
|
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||||
|
taosInitConfigOption(cfg);
|
||||||
|
|
||||||
cfg.option = "comp";
|
cfg.option = "comp";
|
||||||
cfg.ptr = &tsCompression;
|
cfg.ptr = &tsCompression;
|
||||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||||
|
@ -901,6 +911,16 @@ static void doInitGlobalConfig(void) {
|
||||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||||
taosInitConfigOption(cfg);
|
taosInitConfigOption(cfg);
|
||||||
|
|
||||||
|
cfg.option = "cachelast";
|
||||||
|
cfg.ptr = &tsCacheLastRow;
|
||||||
|
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||||
|
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||||
|
cfg.minValue = TSDB_MIN_DB_CACHE_LAST_ROW;
|
||||||
|
cfg.maxValue = TSDB_MAX_DB_CACHE_LAST_ROW;
|
||||||
|
cfg.ptrLength = 0;
|
||||||
|
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||||
|
taosInitConfigOption(cfg);
|
||||||
|
|
||||||
cfg.option = "mqttHostName";
|
cfg.option = "mqttHostName";
|
||||||
cfg.ptr = tsMqttHostName;
|
cfg.ptr = tsMqttHostName;
|
||||||
cfg.valType = TAOS_CFG_VTYPE_STRING;
|
cfg.valType = TAOS_CFG_VTYPE_STRING;
|
||||||
|
|
|
@ -74,7 +74,7 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
|
||||||
|
|
||||||
case TSDB_DATA_TYPE_BINARY: {
|
case TSDB_DATA_TYPE_BINARY: {
|
||||||
pVar->pz = strndup(token->z, token->n);
|
pVar->pz = strndup(token->z, token->n);
|
||||||
pVar->nLen = strdequote(pVar->pz);
|
pVar->nLen = strRmquote(pVar->pz, token->n);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,8 @@ import java.io.IOException;
|
||||||
import java.math.BigDecimal;
|
import java.math.BigDecimal;
|
||||||
import java.sql.*;
|
import java.sql.*;
|
||||||
import java.time.LocalTime;
|
import java.time.LocalTime;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
public class TSDBPreparedStatementTest {
|
public class TSDBPreparedStatementTest {
|
||||||
|
|
||||||
|
@ -296,6 +298,296 @@ public class TSDBPreparedStatementTest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void executeTest() throws SQLException {
|
||||||
|
Statement stmt = conn.createStatement();
|
||||||
|
|
||||||
|
int numOfRows = 1000;
|
||||||
|
|
||||||
|
for (int loop = 0; loop < 10; loop++){
|
||||||
|
stmt.execute("drop table if exists weather_test");
|
||||||
|
stmt.execute("create table weather_test(ts timestamp, f1 nchar(4), f2 float, f3 double, f4 timestamp, f5 int, f6 bool, f7 binary(10))");
|
||||||
|
|
||||||
|
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? values(?, ?, ?, ?, ?, ?, ?, ?)");
|
||||||
|
Random r = new Random();
|
||||||
|
s.setTableName("weather_test");
|
||||||
|
|
||||||
|
ArrayList<Long> ts = new ArrayList<Long>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
ts.add(System.currentTimeMillis() + i);
|
||||||
|
}
|
||||||
|
s.setTimestamp(0, ts);
|
||||||
|
|
||||||
|
int random = 10 + r.nextInt(5);
|
||||||
|
ArrayList<String> s2 = new ArrayList<String>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
if(i % random == 0) {
|
||||||
|
s2.add(null);
|
||||||
|
}else{
|
||||||
|
s2.add("分支" + i % 4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.setNString(1, s2, 4);
|
||||||
|
|
||||||
|
random = 10 + r.nextInt(5);
|
||||||
|
ArrayList<Float> s3 = new ArrayList<Float>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
if(i % random == 0) {
|
||||||
|
s3.add(null);
|
||||||
|
}else{
|
||||||
|
s3.add(r.nextFloat());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.setFloat(2, s3);
|
||||||
|
|
||||||
|
random = 10 + r.nextInt(5);
|
||||||
|
ArrayList<Double> s4 = new ArrayList<Double>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
if(i % random == 0) {
|
||||||
|
s4.add(null);
|
||||||
|
}else{
|
||||||
|
s4.add(r.nextDouble());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.setDouble(3, s4);
|
||||||
|
|
||||||
|
random = 10 + r.nextInt(5);
|
||||||
|
ArrayList<Long> ts2 = new ArrayList<Long>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
if(i % random == 0) {
|
||||||
|
ts2.add(null);
|
||||||
|
}else{
|
||||||
|
ts2.add(System.currentTimeMillis() + i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.setTimestamp(4, ts2);
|
||||||
|
|
||||||
|
random = 10 + r.nextInt(5);
|
||||||
|
ArrayList<Integer> vals = new ArrayList<>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
if(i % random == 0) {
|
||||||
|
vals.add(null);
|
||||||
|
}else{
|
||||||
|
vals.add(r.nextInt());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.setInt(5, vals);
|
||||||
|
|
||||||
|
random = 10 + r.nextInt(5);
|
||||||
|
ArrayList<Boolean> sb = new ArrayList<>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
if(i % random == 0) {
|
||||||
|
sb.add(null);
|
||||||
|
}else{
|
||||||
|
sb.add(i % 2 == 0 ? true : false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.setBoolean(6, sb);
|
||||||
|
|
||||||
|
random = 10 + r.nextInt(5);
|
||||||
|
ArrayList<String> s5 = new ArrayList<String>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
if(i % random == 0) {
|
||||||
|
s5.add(null);
|
||||||
|
}else{
|
||||||
|
s5.add("test" + i % 10);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.setString(7, s5, 10);
|
||||||
|
|
||||||
|
s.columnDataAddBatch();
|
||||||
|
s.columnDataExecuteBatch();
|
||||||
|
s.columnDataCloseBatch();
|
||||||
|
|
||||||
|
String sql = "select * from weather_test";
|
||||||
|
PreparedStatement statement = conn.prepareStatement(sql);
|
||||||
|
ResultSet rs = statement.executeQuery();
|
||||||
|
int rows = 0;
|
||||||
|
while(rs.next()) {
|
||||||
|
rows++;
|
||||||
|
}
|
||||||
|
Assert.assertEquals(numOfRows, rows);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void bindDataSelectColumnTest() throws SQLException {
|
||||||
|
Statement stmt = conn.createStatement();
|
||||||
|
|
||||||
|
int numOfRows = 1000;
|
||||||
|
|
||||||
|
for (int loop = 0; loop < 10; loop++){
|
||||||
|
stmt.execute("drop table if exists weather_test");
|
||||||
|
stmt.execute("create table weather_test(ts timestamp, f1 nchar(4), f2 float, f3 double, f4 timestamp, f5 int, f6 bool, f7 binary(10))");
|
||||||
|
|
||||||
|
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? (ts, f1, f7) values(?, ?, ?)");
|
||||||
|
Random r = new Random();
|
||||||
|
s.setTableName("weather_test");
|
||||||
|
|
||||||
|
ArrayList<Long> ts = new ArrayList<Long>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
ts.add(System.currentTimeMillis() + i);
|
||||||
|
}
|
||||||
|
s.setTimestamp(0, ts);
|
||||||
|
|
||||||
|
int random = 10 + r.nextInt(5);
|
||||||
|
ArrayList<String> s2 = new ArrayList<String>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
if(i % random == 0) {
|
||||||
|
s2.add(null);
|
||||||
|
}else{
|
||||||
|
s2.add("分支" + i % 4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.setNString(1, s2, 4);
|
||||||
|
|
||||||
|
random = 10 + r.nextInt(5);
|
||||||
|
ArrayList<String> s3 = new ArrayList<String>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
if(i % random == 0) {
|
||||||
|
s3.add(null);
|
||||||
|
}else{
|
||||||
|
s3.add("test" + i % 10);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.setString(2, s3, 10);
|
||||||
|
|
||||||
|
s.columnDataAddBatch();
|
||||||
|
s.columnDataExecuteBatch();
|
||||||
|
s.columnDataCloseBatch();
|
||||||
|
|
||||||
|
String sql = "select * from weather_test";
|
||||||
|
PreparedStatement statement = conn.prepareStatement(sql);
|
||||||
|
ResultSet rs = statement.executeQuery();
|
||||||
|
int rows = 0;
|
||||||
|
while(rs.next()) {
|
||||||
|
rows++;
|
||||||
|
}
|
||||||
|
Assert.assertEquals(numOfRows, rows);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void bindDataWithSingleTagTest() throws SQLException {
|
||||||
|
Statement stmt = conn.createStatement();
|
||||||
|
|
||||||
|
String types[] = new String[] {"tinyint", "smallint", "int", "bigint", "bool", "float", "double", "binary(10)", "nchar(10)"};
|
||||||
|
|
||||||
|
for (String type : types) {
|
||||||
|
stmt.execute("drop table if exists weather_test");
|
||||||
|
stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t " + type + ")");
|
||||||
|
|
||||||
|
int numOfRows = 1;
|
||||||
|
|
||||||
|
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?) values(?, ?, ?)");
|
||||||
|
Random r = new Random();
|
||||||
|
s.setTableName("w1");
|
||||||
|
|
||||||
|
switch(type) {
|
||||||
|
case "tinyint":
|
||||||
|
case "smallint":
|
||||||
|
case "int":
|
||||||
|
case "bigint":
|
||||||
|
s.setTagInt(0, 1);
|
||||||
|
break;
|
||||||
|
case "float":
|
||||||
|
s.setTagFloat(0, 1.23f);
|
||||||
|
break;
|
||||||
|
case "double":
|
||||||
|
s.setTagDouble(0, 3.14159265);
|
||||||
|
break;
|
||||||
|
case "bool":
|
||||||
|
s.setTagBoolean(0, true);
|
||||||
|
break;
|
||||||
|
case "binary(10)":
|
||||||
|
s.setTagString(0, "test");
|
||||||
|
break;
|
||||||
|
case "nchar(10)":
|
||||||
|
s.setTagNString(0, "test");
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ArrayList<Long> ts = new ArrayList<Long>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
ts.add(System.currentTimeMillis() + i);
|
||||||
|
}
|
||||||
|
s.setTimestamp(0, ts);
|
||||||
|
|
||||||
|
int random = 10 + r.nextInt(5);
|
||||||
|
ArrayList<String> s2 = new ArrayList<String>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
s2.add("分支" + i % 4);
|
||||||
|
}
|
||||||
|
s.setNString(1, s2, 10);
|
||||||
|
|
||||||
|
random = 10 + r.nextInt(5);
|
||||||
|
ArrayList<String> s3 = new ArrayList<String>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
s3.add("test" + i % 4);
|
||||||
|
}
|
||||||
|
s.setString(2, s3, 10);
|
||||||
|
|
||||||
|
s.columnDataAddBatch();
|
||||||
|
s.columnDataExecuteBatch();
|
||||||
|
s.columnDataCloseBatch();
|
||||||
|
|
||||||
|
String sql = "select * from weather_test";
|
||||||
|
PreparedStatement statement = conn.prepareStatement(sql);
|
||||||
|
ResultSet rs = statement.executeQuery();
|
||||||
|
int rows = 0;
|
||||||
|
while(rs.next()) {
|
||||||
|
rows++;
|
||||||
|
}
|
||||||
|
Assert.assertEquals(numOfRows, rows);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void bindDataWithMultipleTagsTest() throws SQLException {
|
||||||
|
Statement stmt = conn.createStatement();
|
||||||
|
|
||||||
|
stmt.execute("drop table if exists weather_test");
|
||||||
|
stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t1 int, t2 binary(10))");
|
||||||
|
|
||||||
|
int numOfRows = 1;
|
||||||
|
|
||||||
|
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?) (ts, f2) values(?, ?)");
|
||||||
|
s.setTableName("w2");
|
||||||
|
s.setTagInt(0, 1);
|
||||||
|
s.setTagString(1, "test");
|
||||||
|
|
||||||
|
|
||||||
|
ArrayList<Long> ts = new ArrayList<Long>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
ts.add(System.currentTimeMillis() + i);
|
||||||
|
}
|
||||||
|
s.setTimestamp(0, ts);
|
||||||
|
|
||||||
|
ArrayList<String> s2 = new ArrayList<String>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
s2.add("test" + i % 4);
|
||||||
|
}
|
||||||
|
s.setString(1, s2, 10);
|
||||||
|
|
||||||
|
s.columnDataAddBatch();
|
||||||
|
s.columnDataExecuteBatch();
|
||||||
|
s.columnDataCloseBatch();
|
||||||
|
|
||||||
|
String sql = "select * from weather_test";
|
||||||
|
PreparedStatement statement = conn.prepareStatement(sql);
|
||||||
|
ResultSet rs = statement.executeQuery();
|
||||||
|
int rows = 0;
|
||||||
|
while(rs.next()) {
|
||||||
|
rows++;
|
||||||
|
}
|
||||||
|
Assert.assertEquals(numOfRows, rows);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void createTwoSameDbTest() throws SQLException {
|
public void createTwoSameDbTest() throws SQLException {
|
||||||
Statement stmt = conn.createStatement();
|
Statement stmt = conn.createStatement();
|
||||||
|
|
|
@ -177,6 +177,7 @@ public class TSDBResultSetTest {
|
||||||
rs.getAsciiStream("f1");
|
rs.getAsciiStream("f1");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("deprecation")
|
||||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||||
public void getUnicodeStream() throws SQLException {
|
public void getUnicodeStream() throws SQLException {
|
||||||
rs.getUnicodeStream("f1");
|
rs.getUnicodeStream("f1");
|
||||||
|
|
|
@ -242,7 +242,7 @@ def _load_taos_linux():
|
||||||
|
|
||||||
|
|
||||||
def _load_taos_darwin():
|
def _load_taos_darwin():
|
||||||
return ctypes.cDLL('libtaos.dylib')
|
return ctypes.CDLL('libtaos.dylib')
|
||||||
|
|
||||||
|
|
||||||
def _load_taos_windows():
|
def _load_taos_windows():
|
||||||
|
|
|
@ -38,21 +38,6 @@
|
||||||
#define cDebug(...) { if (cqDebugFlag & DEBUG_DEBUG) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
|
#define cDebug(...) { if (cqDebugFlag & DEBUG_DEBUG) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
|
||||||
#define cTrace(...) { if (cqDebugFlag & DEBUG_TRACE) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
|
#define cTrace(...) { if (cqDebugFlag & DEBUG_TRACE) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
int32_t vgId;
|
|
||||||
int32_t master;
|
|
||||||
int32_t num; // number of continuous streams
|
|
||||||
char user[TSDB_USER_LEN];
|
|
||||||
char pass[TSDB_KEY_LEN];
|
|
||||||
char db[TSDB_DB_NAME_LEN];
|
|
||||||
FCqWrite cqWrite;
|
|
||||||
struct SCqObj *pHead;
|
|
||||||
void *dbConn;
|
|
||||||
void *tmrCtrl;
|
|
||||||
pthread_mutex_t mutex;
|
|
||||||
int32_t delete;
|
|
||||||
int32_t cqObjNum;
|
|
||||||
} SCqContext;
|
|
||||||
|
|
||||||
typedef struct SCqObj {
|
typedef struct SCqObj {
|
||||||
tmr_h tmrId;
|
tmr_h tmrId;
|
||||||
|
@ -439,7 +424,7 @@ static void cqProcessCreateTimer(void *param, void *tmrId) {
|
||||||
|
|
||||||
// inner implement in tscStream.c
|
// inner implement in tscStream.c
|
||||||
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* desName, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* desName, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||||
int64_t stime, void *param, void (*callback)(void *));
|
int64_t stime, void *param, void (*callback)(void *), void* cqhandle);
|
||||||
|
|
||||||
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
|
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
|
||||||
pObj->pContext = pContext;
|
pObj->pContext = pContext;
|
||||||
|
@ -453,7 +438,8 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
|
||||||
pObj->tmrId = 0;
|
pObj->tmrId = 0;
|
||||||
|
|
||||||
if (pObj->pStream == NULL) {
|
if (pObj->pStream == NULL) {
|
||||||
pObj->pStream = taos_open_stream_withname(pContext->dbConn, pObj->dstTable, pObj->sqlStr, cqProcessStreamRes, INT64_MIN, (void *)pObj->rid, NULL);
|
pObj->pStream = taos_open_stream_withname(pContext->dbConn, pObj->dstTable, pObj->sqlStr, cqProcessStreamRes, \
|
||||||
|
INT64_MIN, (void *)pObj->rid, NULL, pContext);
|
||||||
|
|
||||||
// TODO the pObj->pStream may be released if error happens
|
// TODO the pObj->pStream may be released if error happens
|
||||||
if (pObj->pStream) {
|
if (pObj->pStream) {
|
||||||
|
|
|
@ -548,7 +548,7 @@ typedef struct {
|
||||||
int32_t totalBlocks;
|
int32_t totalBlocks;
|
||||||
int32_t maxTables;
|
int32_t maxTables;
|
||||||
int32_t daysPerFile;
|
int32_t daysPerFile;
|
||||||
int32_t daysToKeep;
|
int32_t daysToKeep0;
|
||||||
int32_t daysToKeep1;
|
int32_t daysToKeep1;
|
||||||
int32_t daysToKeep2;
|
int32_t daysToKeep2;
|
||||||
int32_t minRowsPerFileBlock;
|
int32_t minRowsPerFileBlock;
|
||||||
|
|
|
@ -31,6 +31,23 @@ typedef struct {
|
||||||
FCqWrite cqWrite;
|
FCqWrite cqWrite;
|
||||||
} SCqCfg;
|
} SCqCfg;
|
||||||
|
|
||||||
|
// SCqContext
|
||||||
|
typedef struct {
|
||||||
|
int32_t vgId;
|
||||||
|
int32_t master;
|
||||||
|
int32_t num; // number of continuous streams
|
||||||
|
char user[TSDB_USER_LEN];
|
||||||
|
char pass[TSDB_KEY_LEN];
|
||||||
|
char db[TSDB_DB_NAME_LEN];
|
||||||
|
FCqWrite cqWrite;
|
||||||
|
struct SCqObj *pHead;
|
||||||
|
void *dbConn;
|
||||||
|
void *tmrCtrl;
|
||||||
|
pthread_mutex_t mutex;
|
||||||
|
int32_t delete;
|
||||||
|
int32_t cqObjNum;
|
||||||
|
} SCqContext;
|
||||||
|
|
||||||
// the following API shall be called by vnode
|
// the following API shall be called by vnode
|
||||||
void *cqOpen(void *ahandle, const SCqCfg *pCfg);
|
void *cqOpen(void *ahandle, const SCqCfg *pCfg);
|
||||||
void cqClose(void *handle);
|
void cqClose(void *handle);
|
||||||
|
|
|
@ -94,7 +94,7 @@ STsdbRepo *tsdbOpenRepo(STsdbCfg *pCfg, STsdbAppH *pAppH);
|
||||||
int tsdbCloseRepo(STsdbRepo *repo, int toCommit);
|
int tsdbCloseRepo(STsdbRepo *repo, int toCommit);
|
||||||
int32_t tsdbConfigRepo(STsdbRepo *repo, STsdbCfg *pCfg);
|
int32_t tsdbConfigRepo(STsdbRepo *repo, STsdbCfg *pCfg);
|
||||||
int tsdbGetState(STsdbRepo *repo);
|
int tsdbGetState(STsdbRepo *repo);
|
||||||
|
bool tsdbInCompact(STsdbRepo *repo);
|
||||||
// --------- TSDB TABLE DEFINITION
|
// --------- TSDB TABLE DEFINITION
|
||||||
typedef struct {
|
typedef struct {
|
||||||
uint64_t uid; // the unique table ID
|
uint64_t uid; // the unique table ID
|
||||||
|
|
|
@ -104,28 +104,28 @@
|
||||||
#define TK_QTIME 85
|
#define TK_QTIME 85
|
||||||
#define TK_CONNS 86
|
#define TK_CONNS 86
|
||||||
#define TK_STATE 87
|
#define TK_STATE 87
|
||||||
#define TK_KEEP 88
|
#define TK_COMMA 88
|
||||||
#define TK_CACHE 89
|
#define TK_KEEP 89
|
||||||
#define TK_REPLICA 90
|
#define TK_CACHE 90
|
||||||
#define TK_QUORUM 91
|
#define TK_REPLICA 91
|
||||||
#define TK_DAYS 92
|
#define TK_QUORUM 92
|
||||||
#define TK_MINROWS 93
|
#define TK_DAYS 93
|
||||||
#define TK_MAXROWS 94
|
#define TK_MINROWS 94
|
||||||
#define TK_BLOCKS 95
|
#define TK_MAXROWS 95
|
||||||
#define TK_CTIME 96
|
#define TK_BLOCKS 96
|
||||||
#define TK_WAL 97
|
#define TK_CTIME 97
|
||||||
#define TK_FSYNC 98
|
#define TK_WAL 98
|
||||||
#define TK_COMP 99
|
#define TK_FSYNC 99
|
||||||
#define TK_PRECISION 100
|
#define TK_COMP 100
|
||||||
#define TK_UPDATE 101
|
#define TK_PRECISION 101
|
||||||
#define TK_CACHELAST 102
|
#define TK_UPDATE 102
|
||||||
#define TK_PARTITIONS 103
|
#define TK_CACHELAST 103
|
||||||
#define TK_LP 104
|
#define TK_PARTITIONS 104
|
||||||
#define TK_RP 105
|
#define TK_LP 105
|
||||||
#define TK_UNSIGNED 106
|
#define TK_RP 106
|
||||||
#define TK_TAGS 107
|
#define TK_UNSIGNED 107
|
||||||
#define TK_USING 108
|
#define TK_TAGS 108
|
||||||
#define TK_COMMA 109
|
#define TK_USING 109
|
||||||
#define TK_AS 110
|
#define TK_AS 110
|
||||||
#define TK_NULL 111
|
#define TK_NULL 111
|
||||||
#define TK_SELECT 112
|
#define TK_SELECT 112
|
||||||
|
@ -212,7 +212,6 @@
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define TK_SPACE 300
|
#define TK_SPACE 300
|
||||||
#define TK_COMMENT 301
|
#define TK_COMMENT 301
|
||||||
#define TK_ILLEGAL 302
|
#define TK_ILLEGAL 302
|
||||||
|
|
|
@ -398,7 +398,10 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
||||||
|
|
||||||
time_t tt;
|
time_t tt;
|
||||||
int32_t ms = 0;
|
int32_t ms = 0;
|
||||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||||
|
tt = (time_t)(val / 1000000000);
|
||||||
|
ms = val % 1000000000;
|
||||||
|
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
tt = (time_t)(val / 1000000);
|
tt = (time_t)(val / 1000000);
|
||||||
ms = val % 1000000;
|
ms = val % 1000000;
|
||||||
} else {
|
} else {
|
||||||
|
@ -419,7 +422,9 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
||||||
#endif
|
#endif
|
||||||
if (tt <= 0 && ms < 0) {
|
if (tt <= 0 && ms < 0) {
|
||||||
tt--;
|
tt--;
|
||||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||||
|
ms += 1000000000;
|
||||||
|
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
ms += 1000000;
|
ms += 1000000;
|
||||||
} else {
|
} else {
|
||||||
ms += 1000;
|
ms += 1000;
|
||||||
|
@ -427,9 +432,11 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
||||||
}
|
}
|
||||||
|
|
||||||
struct tm* ptm = localtime(&tt);
|
struct tm* ptm = localtime(&tt);
|
||||||
size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
|
size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", ptm);
|
||||||
|
|
||||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||||
|
sprintf(buf + pos, ".%09d", ms);
|
||||||
|
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
sprintf(buf + pos, ".%06d", ms);
|
sprintf(buf + pos, ".%06d", ms);
|
||||||
} else {
|
} else {
|
||||||
sprintf(buf + pos, ".%03d", ms);
|
sprintf(buf + pos, ".%03d", ms);
|
||||||
|
@ -778,6 +785,8 @@ static int calcColWidth(TAOS_FIELD* field, int precision) {
|
||||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||||
if (args.is_raw_time) {
|
if (args.is_raw_time) {
|
||||||
return MAX(14, width);
|
return MAX(14, width);
|
||||||
|
} if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||||
|
return MAX(29, width);
|
||||||
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
return MAX(26, width); // '2020-01-01 00:00:00.000000'
|
return MAX(26, width); // '2020-01-01 00:00:00.000000'
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1213,7 +1213,6 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int totalLen = 0;
|
int totalLen = 0;
|
||||||
char temp[16000];
|
|
||||||
|
|
||||||
// fetch the records row by row
|
// fetch the records row by row
|
||||||
while((row = taos_fetch_row(res))) {
|
while((row = taos_fetch_row(res))) {
|
||||||
|
@ -1224,6 +1223,7 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
|
||||||
memset(databuf, 0, 100*1024*1024);
|
memset(databuf, 0, 100*1024*1024);
|
||||||
}
|
}
|
||||||
num_rows++;
|
num_rows++;
|
||||||
|
char temp[16000] = {0};
|
||||||
int len = taos_print_row(temp, row, fields, num_fields);
|
int len = taos_print_row(temp, row, fields, num_fields);
|
||||||
len += sprintf(temp + len, "\n");
|
len += sprintf(temp + len, "\n");
|
||||||
//printf("query result:%s\n", temp);
|
//printf("query result:%s\n", temp);
|
||||||
|
@ -1852,7 +1852,9 @@ static void printfQueryMeta() {
|
||||||
|
|
||||||
static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
||||||
time_t tt;
|
time_t tt;
|
||||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||||
|
tt = (time_t)(val / 1000000000);
|
||||||
|
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
tt = (time_t)(val / 1000000);
|
tt = (time_t)(val / 1000000);
|
||||||
} else {
|
} else {
|
||||||
tt = (time_t)(val / 1000);
|
tt = (time_t)(val / 1000);
|
||||||
|
@ -1873,7 +1875,9 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
||||||
struct tm* ptm = localtime(&tt);
|
struct tm* ptm = localtime(&tt);
|
||||||
size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
|
size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
|
||||||
|
|
||||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||||
|
sprintf(buf + pos, ".%09d", (int)(val % 1000000000));
|
||||||
|
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
sprintf(buf + pos, ".%06d", (int)(val % 1000000));
|
sprintf(buf + pos, ".%06d", (int)(val % 1000000));
|
||||||
} else {
|
} else {
|
||||||
sprintf(buf + pos, ".%03d", (int)(val % 1000));
|
sprintf(buf + pos, ".%03d", (int)(val % 1000));
|
||||||
|
@ -6255,6 +6259,8 @@ static void startMultiThreadInsertData(int threads, char* db_name,
|
||||||
timePrec = TSDB_TIME_PRECISION_MILLI;
|
timePrec = TSDB_TIME_PRECISION_MILLI;
|
||||||
} else if (0 == strncasecmp(precision, "us", 2)) {
|
} else if (0 == strncasecmp(precision, "us", 2)) {
|
||||||
timePrec = TSDB_TIME_PRECISION_MICRO;
|
timePrec = TSDB_TIME_PRECISION_MICRO;
|
||||||
|
} else if (0 == strncasecmp(precision, "ns", 2)) {
|
||||||
|
timePrec = TSDB_TIME_PRECISION_NANO;
|
||||||
} else {
|
} else {
|
||||||
errorPrint("Not support precision: %s\n", precision);
|
errorPrint("Not support precision: %s\n", precision);
|
||||||
exit(-1);
|
exit(-1);
|
||||||
|
|
|
@ -161,7 +161,7 @@ typedef struct {
|
||||||
int32_t totalBlocks;
|
int32_t totalBlocks;
|
||||||
int32_t maxTables;
|
int32_t maxTables;
|
||||||
int32_t daysPerFile;
|
int32_t daysPerFile;
|
||||||
int32_t daysToKeep;
|
int32_t daysToKeep0;
|
||||||
int32_t daysToKeep1;
|
int32_t daysToKeep1;
|
||||||
int32_t daysToKeep2;
|
int32_t daysToKeep2;
|
||||||
int32_t minRowsPerFileBlock;
|
int32_t minRowsPerFileBlock;
|
||||||
|
|
|
@ -263,26 +263,27 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
|
||||||
return TSDB_CODE_MND_INVALID_DB_OPTION_DAYS;
|
return TSDB_CODE_MND_INVALID_DB_OPTION_DAYS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pCfg->daysToKeep < TSDB_MIN_KEEP || pCfg->daysToKeep > TSDB_MAX_KEEP) {
|
if (pCfg->daysToKeep0 < TSDB_MIN_KEEP || pCfg->daysToKeep0 > TSDB_MAX_KEEP) {
|
||||||
mError("invalid db option daysToKeep:%d valid range: [%d, %d]", pCfg->daysToKeep, TSDB_MIN_KEEP, TSDB_MAX_KEEP);
|
mError("invalid db option daysToKeep:%d valid range: [%d, %d]", pCfg->daysToKeep0, TSDB_MIN_KEEP, TSDB_MAX_KEEP);
|
||||||
return TSDB_CODE_MND_INVALID_DB_OPTION_KEEP;
|
return TSDB_CODE_MND_INVALID_DB_OPTION_KEEP;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pCfg->daysToKeep < pCfg->daysPerFile) {
|
if (pCfg->daysToKeep0 < pCfg->daysPerFile) {
|
||||||
mError("invalid db option daysToKeep:%d should larger than daysPerFile:%d", pCfg->daysToKeep, pCfg->daysPerFile);
|
mError("invalid db option daysToKeep:%d should larger than daysPerFile:%d", pCfg->daysToKeep0, pCfg->daysPerFile);
|
||||||
return TSDB_CODE_MND_INVALID_DB_OPTION_KEEP;
|
return TSDB_CODE_MND_INVALID_DB_OPTION_KEEP;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pCfg->daysToKeep2 < TSDB_MIN_KEEP || pCfg->daysToKeep2 > pCfg->daysToKeep) {
|
if (pCfg->daysToKeep1 < pCfg->daysToKeep0 || pCfg->daysToKeep1 > TSDB_MAX_KEEP) {
|
||||||
mError("invalid db option daysToKeep2:%d valid range: [%d, %d]", pCfg->daysToKeep2, TSDB_MIN_KEEP, pCfg->daysToKeep);
|
mError("invalid db option daysToKeep1:%d valid range: [%d, %d]", pCfg->daysToKeep1, pCfg->daysToKeep0, TSDB_MAX_KEEP);
|
||||||
return TSDB_CODE_MND_INVALID_DB_OPTION_KEEP;
|
return TSDB_CODE_MND_INVALID_DB_OPTION_KEEP;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pCfg->daysToKeep1 < TSDB_MIN_KEEP || pCfg->daysToKeep1 > pCfg->daysToKeep2) {
|
if (pCfg->daysToKeep2 < pCfg->daysToKeep1 || pCfg->daysToKeep2 > TSDB_MAX_KEEP) {
|
||||||
mError("invalid db option daysToKeep1:%d valid range: [%d, %d]", pCfg->daysToKeep1, TSDB_MIN_KEEP, pCfg->daysToKeep2);
|
mError("invalid db option daysToKeep2:%d valid range: [%d, %d]", pCfg->daysToKeep2, pCfg->daysToKeep1, TSDB_MAX_KEEP);
|
||||||
return TSDB_CODE_MND_INVALID_DB_OPTION_KEEP;
|
return TSDB_CODE_MND_INVALID_DB_OPTION_KEEP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (pCfg->maxRowsPerFileBlock < TSDB_MIN_MAX_ROW_FBLOCK || pCfg->maxRowsPerFileBlock > TSDB_MAX_MAX_ROW_FBLOCK) {
|
if (pCfg->maxRowsPerFileBlock < TSDB_MIN_MAX_ROW_FBLOCK || pCfg->maxRowsPerFileBlock > TSDB_MAX_MAX_ROW_FBLOCK) {
|
||||||
mError("invalid db option maxRowsPerFileBlock:%d valid range: [%d, %d]", pCfg->maxRowsPerFileBlock,
|
mError("invalid db option maxRowsPerFileBlock:%d valid range: [%d, %d]", pCfg->maxRowsPerFileBlock,
|
||||||
TSDB_MIN_MAX_ROW_FBLOCK, TSDB_MAX_MAX_ROW_FBLOCK);
|
TSDB_MIN_MAX_ROW_FBLOCK, TSDB_MAX_MAX_ROW_FBLOCK);
|
||||||
|
@ -380,9 +381,9 @@ static void mnodeSetDefaultDbCfg(SDbCfg *pCfg) {
|
||||||
if (pCfg->totalBlocks < 0) pCfg->totalBlocks = tsBlocksPerVnode;
|
if (pCfg->totalBlocks < 0) pCfg->totalBlocks = tsBlocksPerVnode;
|
||||||
if (pCfg->maxTables < 0) pCfg->maxTables = tsMaxTablePerVnode;
|
if (pCfg->maxTables < 0) pCfg->maxTables = tsMaxTablePerVnode;
|
||||||
if (pCfg->daysPerFile < 0) pCfg->daysPerFile = tsDaysPerFile;
|
if (pCfg->daysPerFile < 0) pCfg->daysPerFile = tsDaysPerFile;
|
||||||
if (pCfg->daysToKeep < 0) pCfg->daysToKeep = tsDaysToKeep;
|
if (pCfg->daysToKeep2 < 0) pCfg->daysToKeep2 = tsDaysToKeep;
|
||||||
if (pCfg->daysToKeep1 < 0) pCfg->daysToKeep1 = pCfg->daysToKeep;
|
if (pCfg->daysToKeep1 < 0) pCfg->daysToKeep1 = pCfg->daysToKeep2;
|
||||||
if (pCfg->daysToKeep2 < 0) pCfg->daysToKeep2 = pCfg->daysToKeep;
|
if (pCfg->daysToKeep0 < 0) pCfg->daysToKeep0 = pCfg->daysToKeep1;
|
||||||
if (pCfg->minRowsPerFileBlock < 0) pCfg->minRowsPerFileBlock = tsMinRowsInFileBlock;
|
if (pCfg->minRowsPerFileBlock < 0) pCfg->minRowsPerFileBlock = tsMinRowsInFileBlock;
|
||||||
if (pCfg->maxRowsPerFileBlock < 0) pCfg->maxRowsPerFileBlock = tsMaxRowsInFileBlock;
|
if (pCfg->maxRowsPerFileBlock < 0) pCfg->maxRowsPerFileBlock = tsMaxRowsInFileBlock;
|
||||||
if (pCfg->fsyncPeriod <0) pCfg->fsyncPeriod = tsFsyncPeriod;
|
if (pCfg->fsyncPeriod <0) pCfg->fsyncPeriod = tsFsyncPeriod;
|
||||||
|
@ -437,7 +438,7 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCreateDbMsg *pCreate, SMnodeMsg *
|
||||||
.totalBlocks = pCreate->totalBlocks,
|
.totalBlocks = pCreate->totalBlocks,
|
||||||
.maxTables = pCreate->maxTables,
|
.maxTables = pCreate->maxTables,
|
||||||
.daysPerFile = pCreate->daysPerFile,
|
.daysPerFile = pCreate->daysPerFile,
|
||||||
.daysToKeep = pCreate->daysToKeep,
|
.daysToKeep0 = pCreate->daysToKeep0,
|
||||||
.daysToKeep1 = pCreate->daysToKeep1,
|
.daysToKeep1 = pCreate->daysToKeep1,
|
||||||
.daysToKeep2 = pCreate->daysToKeep2,
|
.daysToKeep2 = pCreate->daysToKeep2,
|
||||||
.minRowsPerFileBlock = pCreate->minRowsPerFileBlock,
|
.minRowsPerFileBlock = pCreate->minRowsPerFileBlock,
|
||||||
|
@ -613,7 +614,12 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
|
||||||
|
|
||||||
pShow->bytes[cols] = 24 + VARSTR_HEADER_SIZE;
|
pShow->bytes[cols] = 24 + VARSTR_HEADER_SIZE;
|
||||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||||
strcpy(pSchema[cols].name, "keep0,keep1,keep(D)");
|
|
||||||
|
#ifdef _STORAGE
|
||||||
|
strcpy(pSchema[cols].name, "keep0,keep1,keep2");
|
||||||
|
#else
|
||||||
|
strcpy(pSchema[cols].name, "keep");
|
||||||
|
#endif
|
||||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||||
cols++;
|
cols++;
|
||||||
|
|
||||||
|
@ -779,7 +785,15 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
|
||||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||||
|
|
||||||
char tmp[128] = {0};
|
char tmp[128] = {0};
|
||||||
sprintf(tmp, "%d,%d,%d", pDb->cfg.daysToKeep1, pDb->cfg.daysToKeep2, pDb->cfg.daysToKeep);
|
#ifdef _STORAGE
|
||||||
|
if (pDb->cfg.daysToKeep0 > pDb->cfg.daysToKeep1 || pDb->cfg.daysToKeep0 > pDb->cfg.daysToKeep2) {
|
||||||
|
sprintf(tmp, "%d,%d,%d", pDb->cfg.daysToKeep1, pDb->cfg.daysToKeep2, pDb->cfg.daysToKeep0);
|
||||||
|
} else {
|
||||||
|
sprintf(tmp, "%d,%d,%d", pDb->cfg.daysToKeep0, pDb->cfg.daysToKeep1, pDb->cfg.daysToKeep2);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
sprintf(tmp, "%d", pDb->cfg.daysToKeep2);
|
||||||
|
#endif
|
||||||
STR_WITH_SIZE_TO_VARSTR(pWrite, tmp, strlen(tmp));
|
STR_WITH_SIZE_TO_VARSTR(pWrite, tmp, strlen(tmp));
|
||||||
cols++;
|
cols++;
|
||||||
|
|
||||||
|
@ -822,8 +836,13 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||||
char *prec = (pDb->cfg.precision == TSDB_TIME_PRECISION_MILLI) ? TSDB_TIME_PRECISION_MILLI_STR
|
char *prec = NULL;
|
||||||
: TSDB_TIME_PRECISION_MICRO_STR;
|
switch (pDb->cfg.precision) {
|
||||||
|
case TSDB_TIME_PRECISION_MILLI: prec = TSDB_TIME_PRECISION_MILLI_STR; break;
|
||||||
|
case TSDB_TIME_PRECISION_MICRO: prec = TSDB_TIME_PRECISION_MICRO_STR; break;
|
||||||
|
case TSDB_TIME_PRECISION_NANO: prec = TSDB_TIME_PRECISION_NANO_STR; break;
|
||||||
|
default: assert(false); break;
|
||||||
|
}
|
||||||
STR_WITH_SIZE_TO_VARSTR(pWrite, prec, 2);
|
STR_WITH_SIZE_TO_VARSTR(pWrite, prec, 2);
|
||||||
cols++;
|
cols++;
|
||||||
|
|
||||||
|
@ -892,7 +911,7 @@ static int32_t mnodeProcessCreateDbMsg(SMnodeMsg *pMsg) {
|
||||||
pCreate->cacheBlockSize = htonl(pCreate->cacheBlockSize);
|
pCreate->cacheBlockSize = htonl(pCreate->cacheBlockSize);
|
||||||
pCreate->totalBlocks = htonl(pCreate->totalBlocks);
|
pCreate->totalBlocks = htonl(pCreate->totalBlocks);
|
||||||
pCreate->daysPerFile = htonl(pCreate->daysPerFile);
|
pCreate->daysPerFile = htonl(pCreate->daysPerFile);
|
||||||
pCreate->daysToKeep = htonl(pCreate->daysToKeep);
|
pCreate->daysToKeep0 = htonl(pCreate->daysToKeep0);
|
||||||
pCreate->daysToKeep1 = htonl(pCreate->daysToKeep1);
|
pCreate->daysToKeep1 = htonl(pCreate->daysToKeep1);
|
||||||
pCreate->daysToKeep2 = htonl(pCreate->daysToKeep2);
|
pCreate->daysToKeep2 = htonl(pCreate->daysToKeep2);
|
||||||
pCreate->commitTime = htonl(pCreate->commitTime);
|
pCreate->commitTime = htonl(pCreate->commitTime);
|
||||||
|
@ -919,7 +938,7 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SAlterDbMsg *pAlter) {
|
||||||
int32_t cacheBlockSize = htonl(pAlter->cacheBlockSize);
|
int32_t cacheBlockSize = htonl(pAlter->cacheBlockSize);
|
||||||
int32_t totalBlocks = htonl(pAlter->totalBlocks);
|
int32_t totalBlocks = htonl(pAlter->totalBlocks);
|
||||||
int32_t daysPerFile = htonl(pAlter->daysPerFile);
|
int32_t daysPerFile = htonl(pAlter->daysPerFile);
|
||||||
int32_t daysToKeep = htonl(pAlter->daysToKeep);
|
int32_t daysToKeep0 = htonl(pAlter->daysToKeep0);
|
||||||
int32_t daysToKeep1 = htonl(pAlter->daysToKeep1);
|
int32_t daysToKeep1 = htonl(pAlter->daysToKeep1);
|
||||||
int32_t daysToKeep2 = htonl(pAlter->daysToKeep2);
|
int32_t daysToKeep2 = htonl(pAlter->daysToKeep2);
|
||||||
int32_t minRows = htonl(pAlter->minRowsPerFileBlock);
|
int32_t minRows = htonl(pAlter->minRowsPerFileBlock);
|
||||||
|
@ -938,6 +957,14 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SAlterDbMsg *pAlter) {
|
||||||
|
|
||||||
terrno = TSDB_CODE_SUCCESS;
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
//UPGRATE FROM LOW VERSION, reorder it
|
||||||
|
if (pDb->cfg.daysToKeep0 > pDb->cfg.daysToKeep1 || pDb->cfg.daysToKeep0 > pDb->cfg.daysToKeep2) {
|
||||||
|
int32_t t = pDb->cfg.daysToKeep0;
|
||||||
|
newCfg.daysToKeep0 = pDb->cfg.daysToKeep1;
|
||||||
|
newCfg.daysToKeep1 = pDb->cfg.daysToKeep2;
|
||||||
|
newCfg.daysToKeep2 = t;
|
||||||
|
}
|
||||||
|
|
||||||
if (cacheBlockSize > 0 && cacheBlockSize != pDb->cfg.cacheBlockSize) {
|
if (cacheBlockSize > 0 && cacheBlockSize != pDb->cfg.cacheBlockSize) {
|
||||||
mError("db:%s, can't alter cache option", pDb->name);
|
mError("db:%s, can't alter cache option", pDb->name);
|
||||||
terrno = TSDB_CODE_MND_INVALID_DB_OPTION;
|
terrno = TSDB_CODE_MND_INVALID_DB_OPTION;
|
||||||
|
@ -962,17 +989,17 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SAlterDbMsg *pAlter) {
|
||||||
terrno = TSDB_CODE_MND_INVALID_DB_OPTION;
|
terrno = TSDB_CODE_MND_INVALID_DB_OPTION;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (daysToKeep > 0 && daysToKeep != pDb->cfg.daysToKeep) {
|
if (daysToKeep0 > 0 && (daysToKeep0 != pDb->cfg.daysToKeep0 || newCfg.daysToKeep0 != pDb->cfg.daysToKeep0)) {
|
||||||
mDebug("db:%s, daysToKeep:%d change to %d", pDb->name, pDb->cfg.daysToKeep, daysToKeep);
|
mDebug("db:%s, daysToKeep:%d change to %d", pDb->name, pDb->cfg.daysToKeep0, daysToKeep0);
|
||||||
newCfg.daysToKeep = daysToKeep;
|
newCfg.daysToKeep0 = daysToKeep0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (daysToKeep1 > 0 && daysToKeep1 != pDb->cfg.daysToKeep1) {
|
if (daysToKeep1 > 0 && (daysToKeep1 != pDb->cfg.daysToKeep1 || newCfg.daysToKeep1 != pDb->cfg.daysToKeep1)) {
|
||||||
mDebug("db:%s, daysToKeep1:%d change to %d", pDb->name, pDb->cfg.daysToKeep1, daysToKeep1);
|
mDebug("db:%s, daysToKeep1:%d change to %d", pDb->name, pDb->cfg.daysToKeep1, daysToKeep1);
|
||||||
newCfg.daysToKeep1 = daysToKeep1;
|
newCfg.daysToKeep1 = daysToKeep1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (daysToKeep2 > 0 && daysToKeep2 != pDb->cfg.daysToKeep2) {
|
if (daysToKeep2 > 0 && (daysToKeep2 != pDb->cfg.daysToKeep2 || newCfg.daysToKeep2 != pDb->cfg.daysToKeep2)) {
|
||||||
mDebug("db:%s, daysToKeep2:%d change to %d", pDb->name, pDb->cfg.daysToKeep2, daysToKeep2);
|
mDebug("db:%s, daysToKeep2:%d change to %d", pDb->name, pDb->cfg.daysToKeep2, daysToKeep2);
|
||||||
newCfg.daysToKeep2 = daysToKeep2;
|
newCfg.daysToKeep2 = daysToKeep2;
|
||||||
}
|
}
|
||||||
|
@ -1065,8 +1092,8 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SAlterDbMsg *pAlter) {
|
||||||
// community version can only change daysToKeep
|
// community version can only change daysToKeep
|
||||||
// but enterprise version can change all daysToKeep options
|
// but enterprise version can change all daysToKeep options
|
||||||
#ifndef _STORAGE
|
#ifndef _STORAGE
|
||||||
newCfg.daysToKeep1 = newCfg.daysToKeep;
|
newCfg.daysToKeep1 = newCfg.daysToKeep0;
|
||||||
newCfg.daysToKeep2 = newCfg.daysToKeep;
|
newCfg.daysToKeep2 = newCfg.daysToKeep0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return newCfg;
|
return newCfg;
|
||||||
|
|
|
@ -892,9 +892,9 @@ static SCreateVnodeMsg *mnodeBuildVnodeMsg(SVgObj *pVgroup) {
|
||||||
pCfg->totalBlocks = htonl(pDb->cfg.totalBlocks);
|
pCfg->totalBlocks = htonl(pDb->cfg.totalBlocks);
|
||||||
pCfg->maxTables = htonl(maxTables + 1);
|
pCfg->maxTables = htonl(maxTables + 1);
|
||||||
pCfg->daysPerFile = htonl(pDb->cfg.daysPerFile);
|
pCfg->daysPerFile = htonl(pDb->cfg.daysPerFile);
|
||||||
pCfg->daysToKeep = htonl(pDb->cfg.daysToKeep);
|
pCfg->daysToKeep = htonl(pDb->cfg.daysToKeep2); //FROM DB TO VNODE MAP:
|
||||||
pCfg->daysToKeep1 = htonl(pDb->cfg.daysToKeep1);
|
pCfg->daysToKeep1 = htonl(pDb->cfg.daysToKeep0); //keep0,keep1,keep2 in SQL is mapped to keep1,keep2,keep in vnode
|
||||||
pCfg->daysToKeep2 = htonl(pDb->cfg.daysToKeep2);
|
pCfg->daysToKeep2 = htonl(pDb->cfg.daysToKeep1); //user,client,mnode use keep0,keep1,keep2; vnode use keep1,keep2,keep
|
||||||
pCfg->minRowsPerFileBlock = htonl(pDb->cfg.minRowsPerFileBlock);
|
pCfg->minRowsPerFileBlock = htonl(pDb->cfg.minRowsPerFileBlock);
|
||||||
pCfg->maxRowsPerFileBlock = htonl(pDb->cfg.maxRowsPerFileBlock);
|
pCfg->maxRowsPerFileBlock = htonl(pDb->cfg.maxRowsPerFileBlock);
|
||||||
pCfg->fsyncPeriod = htonl(pDb->cfg.fsyncPeriod);
|
pCfg->fsyncPeriod = htonl(pDb->cfg.fsyncPeriod);
|
||||||
|
|
|
@ -55,6 +55,13 @@ static FORCE_INLINE int64_t taosGetTimestampUs() {
|
||||||
return (int64_t)systemTime.tv_sec * 1000000L + (int64_t)systemTime.tv_usec;
|
return (int64_t)systemTime.tv_sec * 1000000L + (int64_t)systemTime.tv_usec;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//@return timestamp in nanosecond
|
||||||
|
static FORCE_INLINE int64_t taosGetTimestampNs() {
|
||||||
|
struct timespec systemTime = {0};
|
||||||
|
clock_gettime(CLOCK_REALTIME, &systemTime);
|
||||||
|
return (int64_t)systemTime.tv_sec * 1000000000L + (int64_t)systemTime.tv_nsec;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @return timestamp decided by global conf variable, tsTimePrecision
|
* @return timestamp decided by global conf variable, tsTimePrecision
|
||||||
* if precision == TSDB_TIME_PRECISION_MICRO, it returns timestamp in microsecond.
|
* if precision == TSDB_TIME_PRECISION_MICRO, it returns timestamp in microsecond.
|
||||||
|
@ -63,6 +70,8 @@ static FORCE_INLINE int64_t taosGetTimestampUs() {
|
||||||
static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
|
static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
|
||||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
return taosGetTimestampUs();
|
return taosGetTimestampUs();
|
||||||
|
} else if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||||
|
return taosGetTimestampNs();
|
||||||
}else {
|
}else {
|
||||||
return taosGetTimestampMs();
|
return taosGetTimestampMs();
|
||||||
}
|
}
|
||||||
|
@ -88,12 +97,13 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
|
||||||
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
|
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
|
||||||
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
|
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
|
||||||
|
|
||||||
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts);
|
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts, int32_t timePrecision);
|
||||||
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
|
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision);
|
||||||
|
|
||||||
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
|
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
|
||||||
void deltaToUtcInitOnce();
|
void deltaToUtcInitOnce();
|
||||||
|
|
||||||
|
int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision);
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -59,6 +59,9 @@ bool taosMbsToUcs4(char *mbs, size_t mbsLength, char *ucs4, int32_t ucs4_max_len
|
||||||
iconv_close(cd);
|
iconv_close(cd);
|
||||||
if (len != NULL) {
|
if (len != NULL) {
|
||||||
*len = (int32_t)(ucs4_max_len - outLeft);
|
*len = (int32_t)(ucs4_max_len - outLeft);
|
||||||
|
if (*len < 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -14,7 +14,13 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define _BSD_SOURCE
|
#define _BSD_SOURCE
|
||||||
|
|
||||||
|
#ifdef DARWIN
|
||||||
#define _XOPEN_SOURCE
|
#define _XOPEN_SOURCE
|
||||||
|
#else
|
||||||
|
#define _XOPEN_SOURCE 500
|
||||||
|
#endif
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
|
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
|
@ -119,8 +125,9 @@ int64_t parseFraction(char* str, char** end, int32_t timePrec) {
|
||||||
|
|
||||||
const int32_t MILLI_SEC_FRACTION_LEN = 3;
|
const int32_t MILLI_SEC_FRACTION_LEN = 3;
|
||||||
const int32_t MICRO_SEC_FRACTION_LEN = 6;
|
const int32_t MICRO_SEC_FRACTION_LEN = 6;
|
||||||
|
const int32_t NANO_SEC_FRACTION_LEN = 9;
|
||||||
|
|
||||||
int32_t factor[6] = {1, 10, 100, 1000, 10000, 100000};
|
int32_t factor[9] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000};
|
||||||
int32_t times = 1;
|
int32_t times = 1;
|
||||||
|
|
||||||
while (str[i] >= '0' && str[i] <= '9') {
|
while (str[i] >= '0' && str[i] <= '9') {
|
||||||
|
@ -140,12 +147,17 @@ int64_t parseFraction(char* str, char** end, int32_t timePrec) {
|
||||||
}
|
}
|
||||||
|
|
||||||
times = MILLI_SEC_FRACTION_LEN - i;
|
times = MILLI_SEC_FRACTION_LEN - i;
|
||||||
} else {
|
} else if (timePrec == TSDB_TIME_PRECISION_MICRO) {
|
||||||
assert(timePrec == TSDB_TIME_PRECISION_MICRO);
|
|
||||||
if (i >= MICRO_SEC_FRACTION_LEN) {
|
if (i >= MICRO_SEC_FRACTION_LEN) {
|
||||||
i = MICRO_SEC_FRACTION_LEN;
|
i = MICRO_SEC_FRACTION_LEN;
|
||||||
}
|
}
|
||||||
times = MICRO_SEC_FRACTION_LEN - i;
|
times = MICRO_SEC_FRACTION_LEN - i;
|
||||||
|
} else {
|
||||||
|
assert(timePrec == TSDB_TIME_PRECISION_NANO);
|
||||||
|
if (i >= NANO_SEC_FRACTION_LEN) {
|
||||||
|
i = NANO_SEC_FRACTION_LEN;
|
||||||
|
}
|
||||||
|
times = NANO_SEC_FRACTION_LEN - i;
|
||||||
}
|
}
|
||||||
|
|
||||||
fraction = strnatoi(str, i) * factor[times];
|
fraction = strnatoi(str, i) * factor[times];
|
||||||
|
@ -202,7 +214,9 @@ int32_t parseTimezone(char* str, int64_t* tzOffset) {
|
||||||
* 2013-04-12T15:52:01.123+0800
|
* 2013-04-12T15:52:01.123+0800
|
||||||
*/
|
*/
|
||||||
int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec) {
|
int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec) {
|
||||||
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : 1000000;
|
|
||||||
|
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 :
|
||||||
|
(timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000);
|
||||||
int64_t tzOffset = 0;
|
int64_t tzOffset = 0;
|
||||||
|
|
||||||
struct tm tm = {0};
|
struct tm tm = {0};
|
||||||
|
@ -287,7 +301,8 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : 1000000;
|
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 :
|
||||||
|
(timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000);
|
||||||
*time = factor * seconds + fraction;
|
*time = factor * seconds + fraction;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -315,37 +330,50 @@ int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : 1000000;
|
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 :
|
||||||
|
(timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000);
|
||||||
*time = factor * seconds + fraction;
|
*time = factor * seconds + fraction;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision) {
|
||||||
static int32_t getDurationInUs(int64_t val, char unit, int64_t* result) {
|
assert(fromPrecision == TSDB_TIME_PRECISION_MILLI ||
|
||||||
*result = val;
|
fromPrecision == TSDB_TIME_PRECISION_MICRO ||
|
||||||
|
fromPrecision == TSDB_TIME_PRECISION_NANO);
|
||||||
int64_t factor = 1000L;
|
assert(toPrecision == TSDB_TIME_PRECISION_MILLI ||
|
||||||
|
toPrecision == TSDB_TIME_PRECISION_MICRO ||
|
||||||
|
toPrecision == TSDB_TIME_PRECISION_NANO);
|
||||||
|
static double factors[3][3] = { {1., 1000., 1000000.},
|
||||||
|
{1.0 / 1000, 1., 1000.},
|
||||||
|
{1.0 / 1000000, 1.0 / 1000, 1.} };
|
||||||
|
return (int64_t)((double)time * factors[fromPrecision][toPrecision]);
|
||||||
|
}
|
||||||
|
static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision) {
|
||||||
|
|
||||||
switch (unit) {
|
switch (unit) {
|
||||||
case 's':
|
case 's':
|
||||||
(*result) *= MILLISECOND_PER_SECOND*factor;
|
(*result) = convertTimePrecision(val * MILLISECOND_PER_SECOND, TSDB_TIME_PRECISION_MILLI, timePrecision);
|
||||||
break;
|
break;
|
||||||
case 'm':
|
case 'm':
|
||||||
(*result) *= MILLISECOND_PER_MINUTE*factor;
|
(*result) = convertTimePrecision(val * MILLISECOND_PER_MINUTE, TSDB_TIME_PRECISION_MILLI, timePrecision);
|
||||||
break;
|
break;
|
||||||
case 'h':
|
case 'h':
|
||||||
(*result) *= MILLISECOND_PER_HOUR*factor;
|
(*result) = convertTimePrecision(val * MILLISECOND_PER_HOUR, TSDB_TIME_PRECISION_MILLI, timePrecision);
|
||||||
break;
|
break;
|
||||||
case 'd':
|
case 'd':
|
||||||
(*result) *= MILLISECOND_PER_DAY*factor;
|
(*result) = convertTimePrecision(val * MILLISECOND_PER_DAY, TSDB_TIME_PRECISION_MILLI, timePrecision);
|
||||||
break;
|
break;
|
||||||
case 'w':
|
case 'w':
|
||||||
(*result) *= MILLISECOND_PER_WEEK*factor;
|
(*result) = convertTimePrecision(val * MILLISECOND_PER_WEEK, TSDB_TIME_PRECISION_MILLI, timePrecision);
|
||||||
break;
|
break;
|
||||||
case 'a':
|
case 'a':
|
||||||
(*result) *= factor;
|
(*result) = convertTimePrecision(val, TSDB_TIME_PRECISION_MILLI, timePrecision);
|
||||||
break;
|
break;
|
||||||
case 'u':
|
case 'u':
|
||||||
|
(*result) = convertTimePrecision(val, TSDB_TIME_PRECISION_MICRO, timePrecision);
|
||||||
|
break;
|
||||||
|
case 'b':
|
||||||
|
(*result) = convertTimePrecision(val, TSDB_TIME_PRECISION_NANO, timePrecision);
|
||||||
break;
|
break;
|
||||||
default: {
|
default: {
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -357,6 +385,8 @@ static int32_t getDurationInUs(int64_t val, char unit, int64_t* result) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
* b - nanoseconds;
|
||||||
|
* u - microseconds;
|
||||||
* a - Millionseconds
|
* a - Millionseconds
|
||||||
* s - Seconds
|
* s - Seconds
|
||||||
* m - Minutes
|
* m - Minutes
|
||||||
|
@ -366,7 +396,7 @@ static int32_t getDurationInUs(int64_t val, char unit, int64_t* result) {
|
||||||
* n - Months (30 days)
|
* n - Months (30 days)
|
||||||
* y - Years (365 days)
|
* y - Years (365 days)
|
||||||
*/
|
*/
|
||||||
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration) {
|
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration, int32_t timePrecision) {
|
||||||
errno = 0;
|
errno = 0;
|
||||||
char* endPtr = NULL;
|
char* endPtr = NULL;
|
||||||
|
|
||||||
|
@ -382,10 +412,10 @@ int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return getDurationInUs(timestamp, unit, duration);
|
return getDuration(timestamp, unit, duration, timePrecision);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
|
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision) {
|
||||||
errno = 0;
|
errno = 0;
|
||||||
|
|
||||||
/* get the basic numeric value */
|
/* get the basic numeric value */
|
||||||
|
@ -399,7 +429,7 @@ int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* durati
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return getDurationInUs(*duration, *unit, duration);
|
return getDuration(*duration, *unit, duration, timePrecision);
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
|
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
|
||||||
|
|
|
@ -64,8 +64,8 @@ void httpJsonOriginString(JsonBuf* buf, char* sVal, int32_t len);
|
||||||
void httpJsonStringForTransMean(JsonBuf* buf, char* SVal, int32_t maxLen);
|
void httpJsonStringForTransMean(JsonBuf* buf, char* SVal, int32_t maxLen);
|
||||||
void httpJsonInt64(JsonBuf* buf, int64_t num);
|
void httpJsonInt64(JsonBuf* buf, int64_t num);
|
||||||
void httpJsonUInt64(JsonBuf* buf, uint64_t num);
|
void httpJsonUInt64(JsonBuf* buf, uint64_t num);
|
||||||
void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us);
|
void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision);
|
||||||
void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, bool us);
|
void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision);
|
||||||
void httpJsonInt(JsonBuf* buf, int32_t num);
|
void httpJsonInt(JsonBuf* buf, int32_t num);
|
||||||
void httpJsonUInt(JsonBuf* buf, uint32_t num);
|
void httpJsonUInt(JsonBuf* buf, uint32_t num);
|
||||||
void httpJsonFloat(JsonBuf* buf, float num);
|
void httpJsonFloat(JsonBuf* buf, float num);
|
||||||
|
|
|
@ -262,42 +262,92 @@ void httpJsonUInt64(JsonBuf* buf, uint64_t num) {
|
||||||
buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%" PRIu64, num);
|
buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%" PRIu64, num);
|
||||||
}
|
}
|
||||||
|
|
||||||
void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us) {
|
void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
|
||||||
char ts[35] = {0};
|
char ts[35] = {0};
|
||||||
struct tm* ptm;
|
struct tm* ptm;
|
||||||
int32_t precision = 1000;
|
|
||||||
if (us) {
|
int32_t fractionLen;
|
||||||
precision = 1000000;
|
char* format = NULL;
|
||||||
|
time_t quot = 0;
|
||||||
|
long mod = 0;
|
||||||
|
|
||||||
|
switch (timePrecision) {
|
||||||
|
case TSDB_TIME_PRECISION_MILLI: {
|
||||||
|
quot = t / 1000;
|
||||||
|
fractionLen = 5;
|
||||||
|
format = ".%03" PRId64;
|
||||||
|
mod = t % 1000;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
time_t tt = t / precision;
|
case TSDB_TIME_PRECISION_MICRO: {
|
||||||
ptm = localtime(&tt);
|
quot = t / 1000000;
|
||||||
int32_t length = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", ptm);
|
fractionLen = 8;
|
||||||
if (us) {
|
format = ".%06" PRId64;
|
||||||
length += snprintf(ts + length, 8, ".%06" PRId64, t % precision);
|
mod = t % 1000000;
|
||||||
} else {
|
break;
|
||||||
length += snprintf(ts + length, 5, ".%03" PRId64, t % precision);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case TSDB_TIME_PRECISION_NANO: {
|
||||||
|
quot = t / 1000000000;
|
||||||
|
fractionLen = 11;
|
||||||
|
format = ".%09" PRId64;
|
||||||
|
mod = t % 1000000000;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
ptm = localtime(");
|
||||||
|
int32_t length = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", ptm);
|
||||||
|
length += snprintf(ts + length, fractionLen, format, mod);
|
||||||
|
|
||||||
httpJsonString(buf, ts, length);
|
httpJsonString(buf, ts, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, bool us) {
|
void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
|
||||||
char ts[40] = {0};
|
char ts[40] = {0};
|
||||||
struct tm* ptm;
|
struct tm* ptm;
|
||||||
int32_t precision = 1000;
|
|
||||||
if (us) {
|
int32_t fractionLen;
|
||||||
precision = 1000000;
|
char* format = NULL;
|
||||||
|
time_t quot = 0;
|
||||||
|
long mod = 0;
|
||||||
|
|
||||||
|
switch (timePrecision) {
|
||||||
|
case TSDB_TIME_PRECISION_MILLI: {
|
||||||
|
quot = t / 1000;
|
||||||
|
fractionLen = 5;
|
||||||
|
format = ".%03" PRId64;
|
||||||
|
mod = t % 1000;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
time_t tt = t / precision;
|
case TSDB_TIME_PRECISION_MICRO: {
|
||||||
ptm = localtime(&tt);
|
quot = t / 1000000;
|
||||||
int32_t length = (int32_t)strftime(ts, 40, "%Y-%m-%dT%H:%M:%S", ptm);
|
fractionLen = 8;
|
||||||
if (us) {
|
format = ".%06" PRId64;
|
||||||
length += snprintf(ts + length, 8, ".%06" PRId64, t % precision);
|
mod = t % 1000000;
|
||||||
} else {
|
break;
|
||||||
length += snprintf(ts + length, 5, ".%03" PRId64, t % precision);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case TSDB_TIME_PRECISION_NANO: {
|
||||||
|
quot = t / 1000000000;
|
||||||
|
fractionLen = 11;
|
||||||
|
format = ".%09" PRId64;
|
||||||
|
mod = t % 1000000000;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
ptm = localtime(");
|
||||||
|
int32_t length = (int32_t)strftime(ts, 40, "%Y-%m-%dT%H:%M:%S", ptm);
|
||||||
|
length += snprintf(ts + length, fractionLen, format, mod);
|
||||||
length += (int32_t)strftime(ts + length, 40 - length, "%z", ptm);
|
length += (int32_t)strftime(ts + length, 40 - length, "%z", ptm);
|
||||||
|
|
||||||
httpJsonString(buf, ts, length);
|
httpJsonString(buf, ts, length);
|
||||||
|
|
|
@ -186,13 +186,11 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||||
if (timestampFormat == REST_TIMESTAMP_FMT_LOCAL_STRING) {
|
if (timestampFormat == REST_TIMESTAMP_FMT_LOCAL_STRING) {
|
||||||
httpJsonTimestamp(jsonBuf, *((int64_t *)row[i]),
|
httpJsonTimestamp(jsonBuf, *((int64_t *)row[i]), taos_result_precision(result));
|
||||||
taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO);
|
|
||||||
} else if (timestampFormat == REST_TIMESTAMP_FMT_TIMESTAMP) {
|
} else if (timestampFormat == REST_TIMESTAMP_FMT_TIMESTAMP) {
|
||||||
httpJsonInt64(jsonBuf, *((int64_t *)row[i]));
|
httpJsonInt64(jsonBuf, *((int64_t *)row[i]));
|
||||||
} else {
|
} else {
|
||||||
httpJsonUtcTimestamp(jsonBuf, *((int64_t *)row[i]),
|
httpJsonUtcTimestamp(jsonBuf, *((int64_t *)row[i]), taos_result_precision(result));
|
||||||
taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO);
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -184,11 +184,17 @@ static void *monThreadFunc(void *param) {
|
||||||
static void monBuildMonitorSql(char *sql, int32_t cmd) {
|
static void monBuildMonitorSql(char *sql, int32_t cmd) {
|
||||||
memset(sql, 0, SQL_LENGTH);
|
memset(sql, 0, SQL_LENGTH);
|
||||||
|
|
||||||
|
#ifdef _STORAGE
|
||||||
|
char *keepValue = "30,30,30";
|
||||||
|
#else
|
||||||
|
char *keepValue = "30";
|
||||||
|
#endif
|
||||||
|
|
||||||
if (cmd == MON_CMD_CREATE_DB) {
|
if (cmd == MON_CMD_CREATE_DB) {
|
||||||
snprintf(sql, SQL_LENGTH,
|
snprintf(sql, SQL_LENGTH,
|
||||||
"create database if not exists %s replica 1 days 10 keep 30 cache %d "
|
"create database if not exists %s replica 1 days 10 keep %s cache %d "
|
||||||
"blocks %d precision 'us'",
|
"blocks %d precision 'us'",
|
||||||
tsMonitorDbName, TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MIN_TOTAL_BLOCKS);
|
tsMonitorDbName, keepValue, TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MIN_TOTAL_BLOCKS);
|
||||||
} else if (cmd == MON_CMD_CREATE_MT_DN) {
|
} else if (cmd == MON_CMD_CREATE_MT_DN) {
|
||||||
snprintf(sql, SQL_LENGTH,
|
snprintf(sql, SQL_LENGTH,
|
||||||
"create table if not exists %s.dn(ts timestamp"
|
"create table if not exists %s.dn(ts timestamp"
|
||||||
|
|
|
@ -464,6 +464,7 @@ typedef struct SSWindowOperatorInfo {
|
||||||
TSKEY prevTs; // previous timestamp
|
TSKEY prevTs; // previous timestamp
|
||||||
int32_t numOfRows; // number of rows
|
int32_t numOfRows; // number of rows
|
||||||
int32_t start; // start row index
|
int32_t start; // start row index
|
||||||
|
bool reptScan; // next round scan
|
||||||
} SSWindowOperatorInfo;
|
} SSWindowOperatorInfo;
|
||||||
|
|
||||||
typedef struct SStateWindowOperatorInfo {
|
typedef struct SStateWindowOperatorInfo {
|
||||||
|
@ -473,7 +474,7 @@ typedef struct SStateWindowOperatorInfo {
|
||||||
int32_t colIndex; // start row index
|
int32_t colIndex; // start row index
|
||||||
int32_t start;
|
int32_t start;
|
||||||
char* prevData; // previous data
|
char* prevData; // previous data
|
||||||
|
bool reptScan;
|
||||||
} SStateWindowOperatorInfo ;
|
} SStateWindowOperatorInfo ;
|
||||||
|
|
||||||
typedef struct SDistinctOperatorInfo {
|
typedef struct SDistinctOperatorInfo {
|
||||||
|
|
|
@ -46,7 +46,7 @@ enum SQL_NODE_FROM_TYPE {
|
||||||
|
|
||||||
enum SQL_EXPR_FLAG {
|
enum SQL_EXPR_FLAG {
|
||||||
EXPR_FLAG_TS_ERROR = 1,
|
EXPR_FLAG_TS_ERROR = 1,
|
||||||
EXPR_FLAG_US_TIMESTAMP = 2,
|
EXPR_FLAG_NS_TIMESTAMP = 2,
|
||||||
EXPR_FLAG_TIMESTAMP_VAR = 3,
|
EXPR_FLAG_TIMESTAMP_VAR = 3,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -238,9 +238,18 @@ acct_optr(Y) ::= pps(C) tseries(D) storage(P) streams(F) qtime(Q) dbs(E) users(K
|
||||||
Y.stat = M;
|
Y.stat = M;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
%type intitemlist {SArray*}
|
||||||
|
%destructor intitemlist {taosArrayDestroy($$);}
|
||||||
|
|
||||||
|
%type intitem {tVariant}
|
||||||
|
intitemlist(A) ::= intitemlist(X) COMMA intitem(Y). { A = tVariantListAppend(X, &Y, -1); }
|
||||||
|
intitemlist(A) ::= intitem(X). { A = tVariantListAppend(NULL, &X, -1); }
|
||||||
|
|
||||||
|
intitem(A) ::= INTEGER(X). { toTSDBType(X.type); tVariantCreate(&A, &X); }
|
||||||
|
|
||||||
%type keep {SArray*}
|
%type keep {SArray*}
|
||||||
%destructor keep {taosArrayDestroy($$);}
|
%destructor keep {taosArrayDestroy($$);}
|
||||||
keep(Y) ::= KEEP tagitemlist(X). { Y = X; }
|
keep(Y) ::= KEEP intitemlist(X). { Y = X; }
|
||||||
|
|
||||||
cache(Y) ::= CACHE INTEGER(X). { Y = X; }
|
cache(Y) ::= CACHE INTEGER(X). { Y = X; }
|
||||||
replica(Y) ::= REPLICA INTEGER(X). { Y = X; }
|
replica(Y) ::= REPLICA INTEGER(X). { Y = X; }
|
||||||
|
@ -559,10 +568,8 @@ session_option(X) ::= SESSION LP ids(V) cpxName(Z) COMMA tmvar(Y) RP. {
|
||||||
X.gap = Y;
|
X.gap = Y;
|
||||||
}
|
}
|
||||||
%type windowstate_option {SWindowStateVal}
|
%type windowstate_option {SWindowStateVal}
|
||||||
windowstate_option(X) ::= . {X.col.n = 0;}
|
windowstate_option(X) ::= . { X.col.n = 0; X.col.z = NULL;}
|
||||||
windowstate_option(X) ::= STATE_WINDOW LP ids(V) RP. {
|
windowstate_option(X) ::= STATE_WINDOW LP ids(V) RP. { X.col = V; }
|
||||||
X.col = V;
|
|
||||||
}
|
|
||||||
|
|
||||||
%type fill_opt {SArray*}
|
%type fill_opt {SArray*}
|
||||||
%destructor fill_opt {taosArrayDestroy($$);}
|
%destructor fill_opt {taosArrayDestroy($$);}
|
||||||
|
|
|
@ -3428,7 +3428,7 @@ static bool deriv_function_setup(SQLFunctionCtx *pCtx) {
|
||||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||||
SDerivInfo* pDerivInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
SDerivInfo* pDerivInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||||
|
|
||||||
pDerivInfo->ignoreNegative = pCtx->param[2].i64;
|
pDerivInfo->ignoreNegative = pCtx->param[1].i64;
|
||||||
pDerivInfo->prevTs = -1;
|
pDerivInfo->prevTs = -1;
|
||||||
pDerivInfo->tsWindow = pCtx->param[0].i64;
|
pDerivInfo->tsWindow = pCtx->param[0].i64;
|
||||||
pDerivInfo->valueSet = false;
|
pDerivInfo->valueSet = false;
|
||||||
|
@ -3440,10 +3440,8 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
|
||||||
SDerivInfo* pDerivInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
SDerivInfo* pDerivInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||||
|
|
||||||
void *data = GET_INPUT_DATA_LIST(pCtx);
|
void *data = GET_INPUT_DATA_LIST(pCtx);
|
||||||
bool isFirstBlock = (pDerivInfo->valueSet == false);
|
|
||||||
|
|
||||||
int32_t notNullElems = 0;
|
int32_t notNullElems = 0;
|
||||||
|
|
||||||
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
|
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
|
||||||
int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1;
|
int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1;
|
||||||
|
|
||||||
|
@ -3469,12 +3467,12 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
|
||||||
*pTimestamp = tsList[i];
|
*pTimestamp = tsList[i];
|
||||||
pOutput += 1;
|
pOutput += 1;
|
||||||
pTimestamp += 1;
|
pTimestamp += 1;
|
||||||
|
notNullElems++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pDerivInfo->prevValue = pData[i];
|
pDerivInfo->prevValue = pData[i];
|
||||||
pDerivInfo->prevTs = tsList[i];
|
pDerivInfo->prevTs = tsList[i];
|
||||||
notNullElems++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
@ -3496,12 +3494,12 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
|
||||||
*pTimestamp = tsList[i];
|
*pTimestamp = tsList[i];
|
||||||
pOutput += 1;
|
pOutput += 1;
|
||||||
pTimestamp += 1;
|
pTimestamp += 1;
|
||||||
|
notNullElems++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pDerivInfo->prevValue = (double) pData[i];
|
pDerivInfo->prevValue = (double) pData[i];
|
||||||
pDerivInfo->prevTs = tsList[i];
|
pDerivInfo->prevTs = tsList[i];
|
||||||
notNullElems++;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -3522,12 +3520,12 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
|
||||||
*pTimestamp = tsList[i];
|
*pTimestamp = tsList[i];
|
||||||
pOutput += 1;
|
pOutput += 1;
|
||||||
pTimestamp += 1;
|
pTimestamp += 1;
|
||||||
|
notNullElems++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pDerivInfo->prevValue = pData[i];
|
pDerivInfo->prevValue = pData[i];
|
||||||
pDerivInfo->prevTs = tsList[i];
|
pDerivInfo->prevTs = tsList[i];
|
||||||
notNullElems++;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -3549,12 +3547,12 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
|
||||||
*pTimestamp = tsList[i];
|
*pTimestamp = tsList[i];
|
||||||
pOutput += 1;
|
pOutput += 1;
|
||||||
pTimestamp += 1;
|
pTimestamp += 1;
|
||||||
|
notNullElems++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pDerivInfo->prevValue = pData[i];
|
pDerivInfo->prevValue = pData[i];
|
||||||
pDerivInfo->prevTs = tsList[i];
|
pDerivInfo->prevTs = tsList[i];
|
||||||
notNullElems++;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -3575,12 +3573,12 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
|
||||||
*pTimestamp = tsList[i];
|
*pTimestamp = tsList[i];
|
||||||
pOutput += 1;
|
pOutput += 1;
|
||||||
pTimestamp += 1;
|
pTimestamp += 1;
|
||||||
|
notNullElems++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pDerivInfo->prevValue = pData[i];
|
pDerivInfo->prevValue = pData[i];
|
||||||
pDerivInfo->prevTs = tsList[i];
|
pDerivInfo->prevTs = tsList[i];
|
||||||
notNullElems++;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -3602,12 +3600,12 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
|
||||||
|
|
||||||
pOutput += 1;
|
pOutput += 1;
|
||||||
pTimestamp += 1;
|
pTimestamp += 1;
|
||||||
|
notNullElems++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pDerivInfo->prevValue = pData[i];
|
pDerivInfo->prevValue = pData[i];
|
||||||
pDerivInfo->prevTs = tsList[i];
|
pDerivInfo->prevTs = tsList[i];
|
||||||
notNullElems++;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -3623,8 +3621,7 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
|
||||||
*/
|
*/
|
||||||
assert(pCtx->hasNull);
|
assert(pCtx->hasNull);
|
||||||
} else {
|
} else {
|
||||||
int32_t forwardStep = (isFirstBlock) ? notNullElems - 1 : notNullElems;
|
GET_RES_INFO(pCtx)->numOfRes += notNullElems;
|
||||||
GET_RES_INFO(pCtx)->numOfRes += forwardStep;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4687,8 +4684,8 @@ static bool rate_function_setup(SQLFunctionCtx *pCtx) {
|
||||||
pInfo->correctionValue = 0;
|
pInfo->correctionValue = 0;
|
||||||
pInfo->firstKey = INT64_MIN;
|
pInfo->firstKey = INT64_MIN;
|
||||||
pInfo->lastKey = INT64_MIN;
|
pInfo->lastKey = INT64_MIN;
|
||||||
pInfo->firstValue = INT64_MIN;
|
pInfo->firstValue = (double) INT64_MIN;
|
||||||
pInfo->lastValue = INT64_MIN;
|
pInfo->lastValue = (double) INT64_MIN;
|
||||||
|
|
||||||
pInfo->hasResult = 0;
|
pInfo->hasResult = 0;
|
||||||
pInfo->isIRate = (pCtx->functionId == TSDB_FUNC_IRATE);
|
pInfo->isIRate = (pCtx->functionId == TSDB_FUNC_IRATE);
|
||||||
|
@ -5003,6 +5000,19 @@ void generateBlockDistResult(STableBlockDist *pTableBlockDist, char* result) {
|
||||||
min = totalBlocks > 0 ? pTableBlockDist->minRows : 0;
|
min = totalBlocks > 0 ? pTableBlockDist->minRows : 0;
|
||||||
max = totalBlocks > 0 ? pTableBlockDist->maxRows : 0;
|
max = totalBlocks > 0 ? pTableBlockDist->maxRows : 0;
|
||||||
|
|
||||||
|
double stdDev = 0;
|
||||||
|
if (totalBlocks > 0) {
|
||||||
|
double variance = 0;
|
||||||
|
for (int32_t i = 0; i < numSteps; i++) {
|
||||||
|
SFileBlockInfo *blockInfo = taosArrayGet(blockInfos, i);
|
||||||
|
int64_t blocks = blockInfo->numBlocksOfStep;
|
||||||
|
int32_t rows = (i * TSDB_BLOCK_DIST_STEP_ROWS + TSDB_BLOCK_DIST_STEP_ROWS / 2);
|
||||||
|
variance += blocks * (rows - avg) * (rows - avg);
|
||||||
|
}
|
||||||
|
variance = variance / totalBlocks;
|
||||||
|
stdDev = sqrt(variance);
|
||||||
|
}
|
||||||
|
|
||||||
double percents[] = {0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99};
|
double percents[] = {0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99};
|
||||||
int32_t percentiles[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
|
int32_t percentiles[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
|
||||||
assert(sizeof(percents)/sizeof(double) == sizeof(percentiles)/sizeof(int32_t));
|
assert(sizeof(percents)/sizeof(double) == sizeof(percentiles)/sizeof(int32_t));
|
||||||
|
@ -5017,12 +5027,12 @@ void generateBlockDistResult(STableBlockDist *pTableBlockDist, char* result) {
|
||||||
"60th=[%d], 70th=[%d], 80th=[%d], 90th=[%d], 95th=[%d], 99th=[%d]\n\t "
|
"60th=[%d], 70th=[%d], 80th=[%d], 90th=[%d], 95th=[%d], 99th=[%d]\n\t "
|
||||||
"Min=[%"PRId64"(Rows)] Max=[%"PRId64"(Rows)] Avg=[%"PRId64"(Rows)] Stddev=[%.2f] \n\t "
|
"Min=[%"PRId64"(Rows)] Max=[%"PRId64"(Rows)] Avg=[%"PRId64"(Rows)] Stddev=[%.2f] \n\t "
|
||||||
"Rows=[%"PRIu64"], Blocks=[%"PRId64"], Size=[%.3f(Kb)] Comp=[%.2f]\n\t "
|
"Rows=[%"PRIu64"], Blocks=[%"PRId64"], Size=[%.3f(Kb)] Comp=[%.2f]\n\t "
|
||||||
"RowsInMem=[%d] \n\t SeekHeaderTime=[%d(us)]",
|
"RowsInMem=[%d] \n\t",
|
||||||
percentiles[0], percentiles[1], percentiles[2], percentiles[3], percentiles[4], percentiles[5],
|
percentiles[0], percentiles[1], percentiles[2], percentiles[3], percentiles[4], percentiles[5],
|
||||||
percentiles[6], percentiles[7], percentiles[8], percentiles[9], percentiles[10], percentiles[11],
|
percentiles[6], percentiles[7], percentiles[8], percentiles[9], percentiles[10], percentiles[11],
|
||||||
min, max, avg, 0.0,
|
min, max, avg, stdDev,
|
||||||
totalRows, totalBlocks, totalLen/1024.0, compRatio,
|
totalRows, totalBlocks, totalLen/1024.0, compRatio,
|
||||||
pTableBlockDist->numOfRowsInMemTable, pTableBlockDist->firstSeekTimeUs);
|
pTableBlockDist->numOfRowsInMemTable);
|
||||||
varDataSetLen(result, sz);
|
varDataSetLen(result, sz);
|
||||||
UNUSED(sz);
|
UNUSED(sz);
|
||||||
}
|
}
|
||||||
|
@ -5290,7 +5300,7 @@ SAggFunctionInfo aAggs[] = {{
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// 17
|
// 17
|
||||||
"ts_dummy",
|
"ts",
|
||||||
TSDB_FUNC_TS_DUMMY,
|
TSDB_FUNC_TS_DUMMY,
|
||||||
TSDB_FUNC_TS_DUMMY,
|
TSDB_FUNC_TS_DUMMY,
|
||||||
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
|
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
|
||||||
|
@ -5384,7 +5394,7 @@ SAggFunctionInfo aAggs[] = {{
|
||||||
"diff",
|
"diff",
|
||||||
TSDB_FUNC_DIFF,
|
TSDB_FUNC_DIFF,
|
||||||
TSDB_FUNC_INVALID_ID,
|
TSDB_FUNC_INVALID_ID,
|
||||||
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS,
|
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
|
||||||
diff_function_setup,
|
diff_function_setup,
|
||||||
diff_function,
|
diff_function,
|
||||||
diff_function_f,
|
diff_function_f,
|
||||||
|
@ -5488,7 +5498,7 @@ SAggFunctionInfo aAggs[] = {{
|
||||||
"derivative", // return table id and the corresponding tags for join match and subscribe
|
"derivative", // return table id and the corresponding tags for join match and subscribe
|
||||||
TSDB_FUNC_DERIVATIVE,
|
TSDB_FUNC_DERIVATIVE,
|
||||||
TSDB_FUNC_INVALID_ID,
|
TSDB_FUNC_INVALID_ID,
|
||||||
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS,
|
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
|
||||||
deriv_function_setup,
|
deriv_function_setup,
|
||||||
deriv_function,
|
deriv_function,
|
||||||
noop2,
|
noop2,
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
|
|
||||||
#define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN)
|
#define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN)
|
||||||
#define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN)
|
#define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN)
|
||||||
|
#define IS_REPEAT_SCAN(runtime) ((runtime)->scanFlag == REPEAT_SCAN)
|
||||||
#define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN)
|
#define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN)
|
||||||
#define SET_REVERSE_SCAN_FLAG(runtime) ((runtime)->scanFlag = REVERSE_SCAN)
|
#define SET_REVERSE_SCAN_FLAG(runtime) ((runtime)->scanFlag = REVERSE_SCAN)
|
||||||
|
|
||||||
|
@ -130,10 +131,10 @@ static void getNextTimeWindow(SQueryAttr* pQueryAttr, STimeWindow* tw) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t key = tw->skey / 1000, interval = pQueryAttr->interval.interval;
|
int64_t key = tw->skey, interval = pQueryAttr->interval.interval;
|
||||||
if (pQueryAttr->precision == TSDB_TIME_PRECISION_MICRO) {
|
//convert key to second
|
||||||
key /= 1000;
|
key = convertTimePrecision(key, pQueryAttr->precision, TSDB_TIME_PRECISION_MILLI) / 1000;
|
||||||
}
|
|
||||||
if (pQueryAttr->interval.intervalUnit == 'y') {
|
if (pQueryAttr->interval.intervalUnit == 'y') {
|
||||||
interval *= 12;
|
interval *= 12;
|
||||||
}
|
}
|
||||||
|
@ -145,17 +146,13 @@ static void getNextTimeWindow(SQueryAttr* pQueryAttr, STimeWindow* tw) {
|
||||||
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor);
|
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor);
|
||||||
tm.tm_year = mon / 12;
|
tm.tm_year = mon / 12;
|
||||||
tm.tm_mon = mon % 12;
|
tm.tm_mon = mon % 12;
|
||||||
tw->skey = mktime(&tm) * 1000L;
|
tw->skey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision);
|
||||||
|
|
||||||
mon = (int)(mon + interval);
|
mon = (int)(mon + interval);
|
||||||
tm.tm_year = mon / 12;
|
tm.tm_year = mon / 12;
|
||||||
tm.tm_mon = mon % 12;
|
tm.tm_mon = mon % 12;
|
||||||
tw->ekey = mktime(&tm) * 1000L;
|
tw->ekey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision);
|
||||||
|
|
||||||
if (pQueryAttr->precision == TSDB_TIME_PRECISION_MICRO) {
|
|
||||||
tw->skey *= 1000L;
|
|
||||||
tw->ekey *= 1000L;
|
|
||||||
}
|
|
||||||
tw->ekey -= 1;
|
tw->ekey -= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -735,6 +732,7 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx
|
||||||
if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) {
|
if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) {
|
||||||
pCtx[k].preAggVals.isSet = false;
|
pCtx[k].preAggVals.isSet = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
|
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
|
||||||
aAggs[functionId].xFunction(&pCtx[k]);
|
aAggs[functionId].xFunction(&pCtx[k]);
|
||||||
}
|
}
|
||||||
|
@ -918,7 +916,7 @@ void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlo
|
||||||
doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order);
|
doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (pCtx[0].pInput == NULL && pBlock->pDataBlock != NULL) {
|
if (/*pCtx[0].pInput == NULL && */pBlock->pDataBlock != NULL) {
|
||||||
doSetInputDataBlock(pOperator, pCtx, pBlock, order);
|
doSetInputDataBlock(pOperator, pCtx, pBlock, order);
|
||||||
} else {
|
} else {
|
||||||
doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order);
|
doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order);
|
||||||
|
@ -1336,6 +1334,10 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
|
||||||
|
|
||||||
int64_t gap = pOperator->pRuntimeEnv->pQueryAttr->sw.gap;
|
int64_t gap = pOperator->pRuntimeEnv->pQueryAttr->sw.gap;
|
||||||
pInfo->numOfRows = 0;
|
pInfo->numOfRows = 0;
|
||||||
|
if (IS_REPEAT_SCAN(pRuntimeEnv) && !pInfo->reptScan) {
|
||||||
|
pInfo->reptScan = true;
|
||||||
|
pInfo->prevTs = INT64_MIN;
|
||||||
|
}
|
||||||
|
|
||||||
TSKEY* tsList = (TSKEY*)pColInfoData->pData;
|
TSKEY* tsList = (TSKEY*)pColInfoData->pData;
|
||||||
for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) {
|
for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) {
|
||||||
|
@ -1345,7 +1347,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
|
||||||
pInfo->prevTs = tsList[j];
|
pInfo->prevTs = tsList[j];
|
||||||
pInfo->numOfRows = 1;
|
pInfo->numOfRows = 1;
|
||||||
pInfo->start = j;
|
pInfo->start = j;
|
||||||
} else if (tsList[j] - pInfo->prevTs <= gap) {
|
} else if (tsList[j] - pInfo->prevTs <= gap && (tsList[j] - pInfo->prevTs) >= 0) {
|
||||||
pInfo->curWindow.ekey = tsList[j];
|
pInfo->curWindow.ekey = tsList[j];
|
||||||
pInfo->prevTs = tsList[j];
|
pInfo->prevTs = tsList[j];
|
||||||
pInfo->numOfRows += 1;
|
pInfo->numOfRows += 1;
|
||||||
|
@ -1681,8 +1683,6 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
|
||||||
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
|
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
|
||||||
|
|
||||||
pRuntimeEnv->prevGroupId = INT32_MIN;
|
pRuntimeEnv->prevGroupId = INT32_MIN;
|
||||||
pRuntimeEnv->enableGroupData = false;
|
|
||||||
|
|
||||||
pRuntimeEnv->pQueryAttr = pQueryAttr;
|
pRuntimeEnv->pQueryAttr = pQueryAttr;
|
||||||
|
|
||||||
pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||||
|
@ -3094,7 +3094,7 @@ int32_t initResultRow(SResultRow *pResultRow) {
|
||||||
* +------------+-----------------result column 1-----------+-----------------result column 2-----------+
|
* +------------+-----------------result column 1-----------+-----------------result column 2-----------+
|
||||||
* + SResultRow | SResultRowCellInfo | intermediate buffer1 | SResultRowCellInfo | intermediate buffer 2|
|
* + SResultRow | SResultRowCellInfo | intermediate buffer1 | SResultRowCellInfo | intermediate buffer 2|
|
||||||
* +------------+-------------------------------------------+-------------------------------------------+
|
* +------------+-------------------------------------------+-------------------------------------------+
|
||||||
* offset[0] offset[1]
|
* offset[0] offset[1] offset[2]
|
||||||
*/
|
*/
|
||||||
void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, int64_t uid, int32_t stage) {
|
void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, int64_t uid, int32_t stage) {
|
||||||
SQLFunctionCtx* pCtx = pInfo->pCtx;
|
SQLFunctionCtx* pCtx = pInfo->pCtx;
|
||||||
|
@ -3323,7 +3323,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe
|
||||||
offset += pCtx[i].outputBytes;
|
offset += pCtx[i].outputBytes;
|
||||||
|
|
||||||
int32_t functionId = pCtx[i].functionId;
|
int32_t functionId = pCtx[i].functionId;
|
||||||
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
|
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
|
||||||
pCtx[i].ptsOutputBuf = pCtx[0].pOutput;
|
pCtx[i].ptsOutputBuf = pCtx[0].pOutput;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3381,7 +3381,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF
|
||||||
offset += pCtx[i].outputBytes;
|
offset += pCtx[i].outputBytes;
|
||||||
|
|
||||||
int32_t functionId = pCtx[i].functionId;
|
int32_t functionId = pCtx[i].functionId;
|
||||||
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
|
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
|
||||||
pCtx[i].ptsOutputBuf = pCtx[0].pOutput;
|
pCtx[i].ptsOutputBuf = pCtx[0].pOutput;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3589,6 +3589,8 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo*
|
||||||
int32_t step = -1;
|
int32_t step = -1;
|
||||||
|
|
||||||
qDebug("QInfo:0x%"PRIx64" start to copy data from windowResInfo to output buf", GET_QID(pRuntimeEnv));
|
qDebug("QInfo:0x%"PRIx64" start to copy data from windowResInfo to output buf", GET_QID(pRuntimeEnv));
|
||||||
|
assert(orderType == TSDB_ORDER_ASC || orderType == TSDB_ORDER_DESC);
|
||||||
|
|
||||||
if (orderType == TSDB_ORDER_ASC) {
|
if (orderType == TSDB_ORDER_ASC) {
|
||||||
start = pGroupResInfo->index;
|
start = pGroupResInfo->index;
|
||||||
step = 1;
|
step = 1;
|
||||||
|
@ -4115,6 +4117,7 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr
|
||||||
pQueryAttr->interBufSize = getOutputInterResultBufSize(pQueryAttr);
|
pQueryAttr->interBufSize = getOutputInterResultBufSize(pQueryAttr);
|
||||||
|
|
||||||
pRuntimeEnv->groupResInfo.totalGroup = (int32_t) (pQueryAttr->stableQuery? GET_NUM_OF_TABLEGROUP(pRuntimeEnv):0);
|
pRuntimeEnv->groupResInfo.totalGroup = (int32_t) (pQueryAttr->stableQuery? GET_NUM_OF_TABLEGROUP(pRuntimeEnv):0);
|
||||||
|
pRuntimeEnv->enableGroupData = false;
|
||||||
|
|
||||||
pRuntimeEnv->pQueryAttr = pQueryAttr;
|
pRuntimeEnv->pQueryAttr = pQueryAttr;
|
||||||
pRuntimeEnv->pTsBuf = pTsBuf;
|
pRuntimeEnv->pTsBuf = pTsBuf;
|
||||||
|
@ -4570,7 +4573,7 @@ SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntime
|
||||||
}
|
}
|
||||||
|
|
||||||
SArray* getOrderCheckColumns(SQueryAttr* pQuery) {
|
SArray* getOrderCheckColumns(SQueryAttr* pQuery) {
|
||||||
int32_t numOfCols = pQuery->pGroupbyExpr->numOfGroupCols;
|
int32_t numOfCols = pQuery->pGroupbyExpr == NULL? 0: pQuery->pGroupbyExpr->numOfGroupCols;
|
||||||
|
|
||||||
SArray* pOrderColumns = NULL;
|
SArray* pOrderColumns = NULL;
|
||||||
if (numOfCols > 0) {
|
if (numOfCols > 0) {
|
||||||
|
@ -4609,7 +4612,7 @@ SArray* getOrderCheckColumns(SQueryAttr* pQuery) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SArray* getResultGroupCheckColumns(SQueryAttr* pQuery) {
|
SArray* getResultGroupCheckColumns(SQueryAttr* pQuery) {
|
||||||
int32_t numOfCols = pQuery->pGroupbyExpr->numOfGroupCols;
|
int32_t numOfCols = pQuery->pGroupbyExpr == NULL? 0 : pQuery->pGroupbyExpr->numOfGroupCols;
|
||||||
|
|
||||||
SArray* pOrderColumns = NULL;
|
SArray* pOrderColumns = NULL;
|
||||||
if (numOfCols > 0) {
|
if (numOfCols > 0) {
|
||||||
|
@ -5175,6 +5178,10 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
|
||||||
|
|
||||||
SColumnInfoData* pTsColInfoData = taosArrayGet(pSDataBlock->pDataBlock, 0);
|
SColumnInfoData* pTsColInfoData = taosArrayGet(pSDataBlock->pDataBlock, 0);
|
||||||
TSKEY* tsList = (TSKEY*)pTsColInfoData->pData;
|
TSKEY* tsList = (TSKEY*)pTsColInfoData->pData;
|
||||||
|
if (IS_REPEAT_SCAN(pRuntimeEnv) && !pInfo->reptScan) {
|
||||||
|
pInfo->reptScan = true;
|
||||||
|
tfree(pInfo->prevData);
|
||||||
|
}
|
||||||
|
|
||||||
pInfo->numOfRows = 0;
|
pInfo->numOfRows = 0;
|
||||||
for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) {
|
for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) {
|
||||||
|
@ -5761,6 +5768,7 @@ SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOp
|
||||||
SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
|
SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
|
||||||
SStateWindowOperatorInfo* pInfo = calloc(1, sizeof(SStateWindowOperatorInfo));
|
SStateWindowOperatorInfo* pInfo = calloc(1, sizeof(SStateWindowOperatorInfo));
|
||||||
pInfo->colIndex = -1;
|
pInfo->colIndex = -1;
|
||||||
|
pInfo->reptScan = false;
|
||||||
pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset);
|
pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset);
|
||||||
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
|
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
|
||||||
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT);
|
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT);
|
||||||
|
@ -5789,6 +5797,7 @@ SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato
|
||||||
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT);
|
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT);
|
||||||
|
|
||||||
pInfo->prevTs = INT64_MIN;
|
pInfo->prevTs = INT64_MIN;
|
||||||
|
pInfo->reptScan = false;
|
||||||
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
|
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
|
||||||
|
|
||||||
pOperator->name = "SessionWindowAggOperator";
|
pOperator->name = "SessionWindowAggOperator";
|
||||||
|
@ -7213,7 +7222,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S
|
||||||
// todo refactor
|
// todo refactor
|
||||||
pQInfo->query.queryBlockDist = (numOfOutput == 1 && pExprs[0].base.functionId == TSDB_FUNC_BLKINFO);
|
pQInfo->query.queryBlockDist = (numOfOutput == 1 && pExprs[0].base.functionId == TSDB_FUNC_BLKINFO);
|
||||||
|
|
||||||
qDebug("qmsg:%p QInfo:0x%" PRIx64 "-%p created", pQueryMsg, pQInfo->qId, pQInfo);
|
qDebug("qmsg:%p vgId:%d, QInfo:0x%" PRIx64 "-%p created", pQueryMsg, pQInfo->query.vgId, pQInfo->qId, pQInfo);
|
||||||
return pQInfo;
|
return pQInfo;
|
||||||
|
|
||||||
_cleanup_qinfo:
|
_cleanup_qinfo:
|
||||||
|
|
|
@ -139,19 +139,20 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) {
|
||||||
pSqlExpr->tokenId = optrType;
|
pSqlExpr->tokenId = optrType;
|
||||||
pSqlExpr->type = SQL_NODE_VALUE;
|
pSqlExpr->type = SQL_NODE_VALUE;
|
||||||
} else if (optrType == TK_NOW) {
|
} else if (optrType == TK_NOW) {
|
||||||
// use microsecond by default
|
// use nanosecond by default TODO set value after getting database precision
|
||||||
pSqlExpr->value.i64 = taosGetTimestamp(TSDB_TIME_PRECISION_MICRO);
|
pSqlExpr->value.i64 = taosGetTimestamp(TSDB_TIME_PRECISION_NANO);
|
||||||
pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
|
pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
|
||||||
pSqlExpr->tokenId = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond
|
pSqlExpr->tokenId = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond
|
||||||
pSqlExpr->type = SQL_NODE_VALUE;
|
pSqlExpr->type = SQL_NODE_VALUE;
|
||||||
pSqlExpr->flags |= 1 << EXPR_FLAG_US_TIMESTAMP;
|
pSqlExpr->flags |= 1 << EXPR_FLAG_NS_TIMESTAMP;
|
||||||
} else if (optrType == TK_VARIABLE) {
|
} else if (optrType == TK_VARIABLE) {
|
||||||
int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64);
|
// use nanosecond by default TODO set value after getting database precision
|
||||||
|
int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64, TSDB_TIME_PRECISION_NANO);
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
pSqlExpr->flags |= 1 << EXPR_FLAG_US_TIMESTAMP;
|
pSqlExpr->flags |= 1 << EXPR_FLAG_NS_TIMESTAMP;
|
||||||
pSqlExpr->flags |= 1 << EXPR_FLAG_TIMESTAMP_VAR;
|
pSqlExpr->flags |= 1 << EXPR_FLAG_TIMESTAMP_VAR;
|
||||||
pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
|
pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
|
||||||
pSqlExpr->tokenId = TK_TIMESTAMP;
|
pSqlExpr->tokenId = TK_TIMESTAMP;
|
||||||
|
|
|
@ -132,7 +132,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
|
||||||
numOfGroupByCols = 0;
|
numOfGroupByCols = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("qmsg:%p query stable, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid);
|
qDebug("qmsg:%p query stable, uid:%"PRIu64", tid:%d", pQueryMsg, id->uid, id->tid);
|
||||||
code = tsdbQuerySTableByTagCond(tsdb, id->uid, pQueryMsg->window.skey, param.tagCond, pQueryMsg->tagCondLen,
|
code = tsdbQuerySTableByTagCond(tsdb, id->uid, pQueryMsg->window.skey, param.tagCond, pQueryMsg->tagCondLen,
|
||||||
pQueryMsg->tagNameRelType, param.tbnameCond, &tableGroupInfo, param.pGroupColIndex, numOfGroupByCols);
|
pQueryMsg->tagNameRelType, param.tbnameCond, &tableGroupInfo, param.pGroupColIndex, numOfGroupByCols);
|
||||||
|
|
||||||
|
|
3243
src/query/src/sql.c
3243
src/query/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -92,6 +92,7 @@ struct STsdbRepo {
|
||||||
pthread_mutex_t mutex;
|
pthread_mutex_t mutex;
|
||||||
bool repoLocked;
|
bool repoLocked;
|
||||||
int32_t code; // Commit code
|
int32_t code; // Commit code
|
||||||
|
bool inCompact; // is in compact process?
|
||||||
};
|
};
|
||||||
|
|
||||||
#define REPO_ID(r) (r)->config.tsdbId
|
#define REPO_ID(r) (r)->config.tsdbId
|
||||||
|
|
|
@ -12,11 +12,516 @@
|
||||||
* You should have received a copy of the GNU Affero General Public License
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
#include "tsdb.h"
|
#include "tsdbint.h"
|
||||||
|
|
||||||
#ifndef _TSDB_PLUGINS
|
#ifndef _TSDB_PLUGINS
|
||||||
|
|
||||||
int tsdbCompact(STsdbRepo *pRepo) { return 0; }
|
typedef struct {
|
||||||
void *tsdbCompactImpl(STsdbRepo *pRepo) { return NULL; }
|
STable * pTable;
|
||||||
|
SBlockIdx * pBlkIdx;
|
||||||
|
SBlockIdx bindex;
|
||||||
|
SBlockInfo *pInfo;
|
||||||
|
} STableCompactH;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
SRtn rtn;
|
||||||
|
SFSIter fsIter;
|
||||||
|
SArray * tbArray; // table array to cache table obj and block indexes
|
||||||
|
SReadH readh;
|
||||||
|
SDFileSet wSet;
|
||||||
|
SArray * aBlkIdx;
|
||||||
|
SArray * aSupBlk;
|
||||||
|
SDataCols *pDataCols;
|
||||||
|
} SCompactH;
|
||||||
|
|
||||||
|
#define TSDB_COMPACT_WSET(pComph) (&((pComph)->wSet))
|
||||||
|
#define TSDB_COMPACT_REPO(pComph) TSDB_READ_REPO(&((pComph)->readh))
|
||||||
|
#define TSDB_COMPACT_HEAD_FILE(pComph) TSDB_DFILE_IN_SET(TSDB_COMPACT_WSET(pComph), TSDB_FILE_HEAD)
|
||||||
|
#define TSDB_COMPACT_DATA_FILE(pComph) TSDB_DFILE_IN_SET(TSDB_COMPACT_WSET(pComph), TSDB_FILE_DATA)
|
||||||
|
#define TSDB_COMPACT_LAST_FILE(pComph) TSDB_DFILE_IN_SET(TSDB_COMPACT_WSET(pComph), TSDB_FILE_LAST)
|
||||||
|
#define TSDB_COMPACT_BUF(pComph) TSDB_READ_BUF(&((pComph)->readh))
|
||||||
|
#define TSDB_COMPACT_COMP_BUF(pComph) TSDB_READ_COMP_BUF(&((pComph)->readh))
|
||||||
|
|
||||||
|
static int tsdbAsyncCompact(STsdbRepo *pRepo);
|
||||||
|
static void tsdbStartCompact(STsdbRepo *pRepo);
|
||||||
|
static void tsdbEndCompact(STsdbRepo *pRepo, int eno);
|
||||||
|
static int tsdbCompactMeta(STsdbRepo *pRepo);
|
||||||
|
static int tsdbCompactTSData(STsdbRepo *pRepo);
|
||||||
|
static int tsdbCompactFSet(SCompactH *pComph, SDFileSet *pSet);
|
||||||
|
static bool tsdbShouldCompact(SCompactH *pComph);
|
||||||
|
static int tsdbInitCompactH(SCompactH *pComph, STsdbRepo *pRepo);
|
||||||
|
static void tsdbDestroyCompactH(SCompactH *pComph);
|
||||||
|
static int tsdbInitCompTbArray(SCompactH *pComph);
|
||||||
|
static void tsdbDestroyCompTbArray(SCompactH *pComph);
|
||||||
|
static int tsdbCacheFSetIndex(SCompactH *pComph);
|
||||||
|
static int tsdbCompactFSetInit(SCompactH *pComph, SDFileSet *pSet);
|
||||||
|
static void tsdbCompactFSetEnd(SCompactH *pComph);
|
||||||
|
static int tsdbCompactFSetImpl(SCompactH *pComph);
|
||||||
|
static int tsdbWriteBlockToRightFile(SCompactH *pComph, STable *pTable, SDataCols *pDataCols, void **ppBuf,
|
||||||
|
void **ppCBuf);
|
||||||
|
|
||||||
|
int tsdbCompact(STsdbRepo *pRepo) { return tsdbAsyncCompact(pRepo); }
|
||||||
|
|
||||||
|
void *tsdbCompactImpl(STsdbRepo *pRepo) {
|
||||||
|
// Check if there are files in TSDB FS to compact
|
||||||
|
if (REPO_FS(pRepo)->cstatus->pmf == NULL) {
|
||||||
|
tsdbInfo("vgId:%d no file to compact in FS", REPO_ID(pRepo));
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
tsdbStartCompact(pRepo);
|
||||||
|
|
||||||
|
if (tsdbCompactMeta(pRepo) < 0) {
|
||||||
|
tsdbError("vgId:%d failed to compact META data since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbCompactTSData(pRepo) < 0) {
|
||||||
|
tsdbError("vgId:%d failed to compact TS data since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
tsdbEndCompact(pRepo, TSDB_CODE_SUCCESS);
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
_err:
|
||||||
|
pRepo->code = terrno;
|
||||||
|
tsdbEndCompact(pRepo, terrno);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tsdbAsyncCompact(STsdbRepo *pRepo) {
|
||||||
|
tsem_wait(&(pRepo->readyToCommit));
|
||||||
|
return tsdbScheduleCommit(pRepo, COMPACT_REQ);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tsdbStartCompact(STsdbRepo *pRepo) {
|
||||||
|
ASSERT(!pRepo->inCompact);
|
||||||
|
tsdbInfo("vgId:%d start to compact!", REPO_ID(pRepo));
|
||||||
|
tsdbStartFSTxn(pRepo, 0, 0);
|
||||||
|
pRepo->code = TSDB_CODE_SUCCESS;
|
||||||
|
pRepo->inCompact = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tsdbEndCompact(STsdbRepo *pRepo, int eno) {
|
||||||
|
if (eno != TSDB_CODE_SUCCESS) {
|
||||||
|
tsdbEndFSTxnWithError(REPO_FS(pRepo));
|
||||||
|
} else {
|
||||||
|
tsdbEndFSTxn(pRepo);
|
||||||
|
}
|
||||||
|
pRepo->inCompact = false;
|
||||||
|
tsdbInfo("vgId:%d compact over, %s", REPO_ID(pRepo), (eno == TSDB_CODE_SUCCESS) ? "succeed" : "failed");
|
||||||
|
tsem_post(&(pRepo->readyToCommit));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tsdbCompactMeta(STsdbRepo *pRepo) {
|
||||||
|
STsdbFS *pfs = REPO_FS(pRepo);
|
||||||
|
tsdbUpdateMFile(pfs, pfs->cstatus->pmf);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tsdbCompactTSData(STsdbRepo *pRepo) {
|
||||||
|
SCompactH compactH;
|
||||||
|
SDFileSet *pSet = NULL;
|
||||||
|
|
||||||
|
tsdbDebug("vgId:%d start to compact TS data", REPO_ID(pRepo));
|
||||||
|
|
||||||
|
// If no file, just return 0;
|
||||||
|
if (taosArrayGetSize(REPO_FS(pRepo)->cstatus->df) <= 0) {
|
||||||
|
tsdbDebug("vgId:%d no TS data file to compact, compact over", REPO_ID(pRepo));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbInitCompactH(&compactH, pRepo) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
while ((pSet = tsdbFSIterNext(&(compactH.fsIter)))) {
|
||||||
|
// Remove those expired files
|
||||||
|
if (pSet->fid < compactH.rtn.minFid) {
|
||||||
|
tsdbInfo("vgId:%d FSET %d on level %d disk id %d expires, remove it", REPO_ID(pRepo), pSet->fid,
|
||||||
|
TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (TSDB_FSET_LEVEL(pSet) == TFS_MAX_LEVEL) {
|
||||||
|
tsdbDebug("vgId:%d FSET %d on level %d, should not compact", REPO_ID(pRepo), pSet->fid, TFS_MAX_LEVEL);
|
||||||
|
tsdbUpdateDFileSet(REPO_FS(pRepo), pSet);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbCompactFSet(&compactH, pSet) < 0) {
|
||||||
|
tsdbDestroyCompactH(&compactH);
|
||||||
|
tsdbError("vgId:%d failed to compact FSET %d since %s", REPO_ID(pRepo), pSet->fid, tstrerror(terrno));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tsdbDestroyCompactH(&compactH);
|
||||||
|
tsdbDebug("vgId:%d compact TS data over", REPO_ID(pRepo));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tsdbCompactFSet(SCompactH *pComph, SDFileSet *pSet) {
|
||||||
|
STsdbRepo *pRepo = TSDB_COMPACT_REPO(pComph);
|
||||||
|
SDiskID did;
|
||||||
|
|
||||||
|
tsdbDebug("vgId:%d start to compact FSET %d on level %d id %d", REPO_ID(pRepo), pSet->fid, TSDB_FSET_LEVEL(pSet),
|
||||||
|
TSDB_FSET_ID(pSet));
|
||||||
|
|
||||||
|
if (tsdbCompactFSetInit(pComph, pSet) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!tsdbShouldCompact(pComph)) {
|
||||||
|
tsdbDebug("vgId:%d no need to compact FSET %d", REPO_ID(pRepo), pSet->fid);
|
||||||
|
if (tsdbApplyRtnOnFSet(TSDB_COMPACT_REPO(pComph), pSet, &(pComph->rtn)) < 0) {
|
||||||
|
tsdbCompactFSetEnd(pComph);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Create new fset as compacted fset
|
||||||
|
tfsAllocDisk(tsdbGetFidLevel(pSet->fid, &(pComph->rtn)), &(did.level), &(did.id));
|
||||||
|
if (did.level == TFS_UNDECIDED_LEVEL) {
|
||||||
|
terrno = TSDB_CODE_TDB_NO_AVAIL_DISK;
|
||||||
|
tsdbError("vgId:%d failed to compact FSET %d since %s", REPO_ID(pRepo), pSet->fid, tstrerror(terrno));
|
||||||
|
tsdbCompactFSetEnd(pComph);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
tsdbInitDFileSet(TSDB_COMPACT_WSET(pComph), did, REPO_ID(pRepo), TSDB_FSET_FID(pSet),
|
||||||
|
FS_TXN_VERSION(REPO_FS(pRepo)));
|
||||||
|
if (tsdbCreateDFileSet(TSDB_COMPACT_WSET(pComph), true) < 0) {
|
||||||
|
tsdbError("vgId:%d failed to compact FSET %d since %s", REPO_ID(pRepo), pSet->fid, tstrerror(terrno));
|
||||||
|
tsdbCompactFSetEnd(pComph);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbCompactFSetImpl(pComph) < 0) {
|
||||||
|
tsdbCloseDFileSet(TSDB_COMPACT_WSET(pComph));
|
||||||
|
tsdbRemoveDFileSet(TSDB_COMPACT_WSET(pComph));
|
||||||
|
tsdbCompactFSetEnd(pComph);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
tsdbCloseDFileSet(TSDB_COMPACT_WSET(pComph));
|
||||||
|
tsdbUpdateDFileSet(REPO_FS(pRepo), TSDB_COMPACT_WSET(pComph));
|
||||||
|
tsdbDebug("vgId:%d FSET %d compact over", REPO_ID(pRepo), pSet->fid);
|
||||||
|
}
|
||||||
|
|
||||||
|
tsdbCompactFSetEnd(pComph);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool tsdbShouldCompact(SCompactH *pComph) {
|
||||||
|
STsdbRepo * pRepo = TSDB_COMPACT_REPO(pComph);
|
||||||
|
STsdbCfg * pCfg = REPO_CFG(pRepo);
|
||||||
|
SReadH * pReadh = &(pComph->readh);
|
||||||
|
STableCompactH *pTh;
|
||||||
|
SBlock * pBlock;
|
||||||
|
int defaultRows = TSDB_DEFAULT_BLOCK_ROWS(pCfg->maxRowsPerFileBlock);
|
||||||
|
SDFile * pDataF = TSDB_READ_DATA_FILE(pReadh);
|
||||||
|
SDFile * pLastF = TSDB_READ_LAST_FILE(pReadh);
|
||||||
|
|
||||||
|
int tblocks = 0; // total blocks
|
||||||
|
int nSubBlocks = 0; // # of blocks with sub-blocks
|
||||||
|
int nSmallBlocks = 0; // # of blocks with rows < defaultRows
|
||||||
|
int64_t tsize = 0;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < taosArrayGetSize(pComph->tbArray); i++) {
|
||||||
|
pTh = (STableCompactH *)taosArrayGet(pComph->tbArray, i);
|
||||||
|
|
||||||
|
if (pTh->pTable == NULL || pTh->pBlkIdx == NULL) continue;
|
||||||
|
|
||||||
|
for (size_t bidx = 0; bidx < pTh->pBlkIdx->numOfBlocks; bidx++) {
|
||||||
|
tblocks++;
|
||||||
|
pBlock = pTh->pInfo->blocks + bidx;
|
||||||
|
|
||||||
|
if (pBlock->numOfRows < defaultRows) {
|
||||||
|
nSmallBlocks++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pBlock->numOfSubBlocks > 1) {
|
||||||
|
nSubBlocks++;
|
||||||
|
for (int k = 0; k < pBlock->numOfSubBlocks; k++) {
|
||||||
|
SBlock *iBlock = ((SBlock *)POINTER_SHIFT(pTh->pInfo, pBlock->offset)) + k;
|
||||||
|
tsize = tsize + iBlock->len;
|
||||||
|
}
|
||||||
|
} else if (pBlock->numOfSubBlocks == 1) {
|
||||||
|
tsize += pBlock->len;
|
||||||
|
} else {
|
||||||
|
ASSERT(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return (((nSubBlocks * 1.0 / tblocks) > 0.33) || ((nSmallBlocks * 1.0 / tblocks) > 0.33) ||
|
||||||
|
(tsize * 1.0 / (pDataF->info.size + pLastF->info.size - 2 * TSDB_FILE_HEAD_SIZE) < 0.85));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tsdbInitCompactH(SCompactH *pComph, STsdbRepo *pRepo) {
|
||||||
|
STsdbCfg *pCfg = REPO_CFG(pRepo);
|
||||||
|
|
||||||
|
memset(pComph, 0, sizeof(*pComph));
|
||||||
|
|
||||||
|
TSDB_FSET_SET_CLOSED(TSDB_COMPACT_WSET(pComph));
|
||||||
|
|
||||||
|
tsdbGetRtnSnap(pRepo, &(pComph->rtn));
|
||||||
|
tsdbFSIterInit(&(pComph->fsIter), REPO_FS(pRepo), TSDB_FS_ITER_FORWARD);
|
||||||
|
|
||||||
|
if (tsdbInitReadH(&(pComph->readh), pRepo) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbInitCompTbArray(pComph) < 0) {
|
||||||
|
tsdbDestroyCompactH(pComph);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pComph->aBlkIdx = taosArrayInit(1024, sizeof(SBlockIdx));
|
||||||
|
if (pComph->aBlkIdx == NULL) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
tsdbDestroyCompactH(pComph);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pComph->aSupBlk = taosArrayInit(1024, sizeof(SBlock));
|
||||||
|
if (pComph->aSupBlk == NULL) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
tsdbDestroyCompactH(pComph);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pComph->pDataCols = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock);
|
||||||
|
if (pComph->pDataCols == NULL) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
tsdbDestroyCompactH(pComph);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tsdbDestroyCompactH(SCompactH *pComph) {
|
||||||
|
pComph->pDataCols = tdFreeDataCols(pComph->pDataCols);
|
||||||
|
pComph->aSupBlk = taosArrayDestroy(pComph->aSupBlk);
|
||||||
|
pComph->aBlkIdx = taosArrayDestroy(pComph->aBlkIdx);
|
||||||
|
tsdbDestroyCompTbArray(pComph);
|
||||||
|
tsdbDestroyReadH(&(pComph->readh));
|
||||||
|
tsdbCloseDFileSet(TSDB_COMPACT_WSET(pComph));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tsdbInitCompTbArray(SCompactH *pComph) { // Init pComp->tbArray
|
||||||
|
STsdbRepo *pRepo = TSDB_COMPACT_REPO(pComph);
|
||||||
|
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||||
|
|
||||||
|
if (tsdbRLockRepoMeta(pRepo) < 0) return -1;
|
||||||
|
|
||||||
|
pComph->tbArray = taosArrayInit(pMeta->maxTables, sizeof(STableCompactH));
|
||||||
|
if (pComph->tbArray == NULL) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
tsdbUnlockRepoMeta(pRepo);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note here must start from 0
|
||||||
|
for (int i = 0; i < pMeta->maxTables; i++) {
|
||||||
|
STableCompactH ch = {0};
|
||||||
|
if (pMeta->tables[i] != NULL) {
|
||||||
|
tsdbRefTable(pMeta->tables[i]);
|
||||||
|
ch.pTable = pMeta->tables[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (taosArrayPush(pComph->tbArray, &ch) == NULL) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
tsdbUnlockRepoMeta(pRepo);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbUnlockRepoMeta(pRepo) < 0) return -1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tsdbDestroyCompTbArray(SCompactH *pComph) {
|
||||||
|
STableCompactH *pTh;
|
||||||
|
|
||||||
|
if (pComph->tbArray == NULL) return;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < taosArrayGetSize(pComph->tbArray); i++) {
|
||||||
|
pTh = (STableCompactH *)taosArrayGet(pComph->tbArray, i);
|
||||||
|
if (pTh->pTable) {
|
||||||
|
tsdbUnRefTable(pTh->pTable);
|
||||||
|
}
|
||||||
|
|
||||||
|
pTh->pInfo = taosTZfree(pTh->pInfo);
|
||||||
|
}
|
||||||
|
|
||||||
|
pComph->tbArray = taosArrayDestroy(pComph->tbArray);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tsdbCacheFSetIndex(SCompactH *pComph) {
|
||||||
|
SReadH *pReadH = &(pComph->readh);
|
||||||
|
|
||||||
|
if (tsdbLoadBlockIdx(pReadH) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int tid = 1; tid < taosArrayGetSize(pComph->tbArray); tid++) {
|
||||||
|
STableCompactH *pTh = (STableCompactH *)taosArrayGet(pComph->tbArray, tid);
|
||||||
|
pTh->pBlkIdx = NULL;
|
||||||
|
|
||||||
|
if (pTh->pTable == NULL) continue;
|
||||||
|
if (tsdbSetReadTable(pReadH, pTh->pTable) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pReadH->pBlkIdx == NULL) continue;
|
||||||
|
pTh->bindex = *(pReadH->pBlkIdx);
|
||||||
|
pTh->pBlkIdx = &(pTh->bindex);
|
||||||
|
|
||||||
|
if (tsdbMakeRoom((void **)(&(pTh->pInfo)), pTh->pBlkIdx->len) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbLoadBlockInfo(pReadH, (void *)(pTh->pInfo)) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tsdbCompactFSetInit(SCompactH *pComph, SDFileSet *pSet) {
|
||||||
|
taosArrayClear(pComph->aBlkIdx);
|
||||||
|
taosArrayClear(pComph->aSupBlk);
|
||||||
|
|
||||||
|
if (tsdbSetAndOpenReadFSet(&(pComph->readh), pSet) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbCacheFSetIndex(pComph) < 0) {
|
||||||
|
tsdbCloseAndUnsetFSet(&(pComph->readh));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tsdbCompactFSetEnd(SCompactH *pComph) { tsdbCloseAndUnsetFSet(&(pComph->readh)); }
|
||||||
|
|
||||||
|
static int tsdbCompactFSetImpl(SCompactH *pComph) {
|
||||||
|
STsdbRepo *pRepo = TSDB_COMPACT_REPO(pComph);
|
||||||
|
STsdbCfg * pCfg = REPO_CFG(pRepo);
|
||||||
|
SReadH * pReadh = &(pComph->readh);
|
||||||
|
SBlockIdx blkIdx;
|
||||||
|
void ** ppBuf = &(TSDB_COMPACT_BUF(pComph));
|
||||||
|
void ** ppCBuf = &(TSDB_COMPACT_COMP_BUF(pComph));
|
||||||
|
int defaultRows = TSDB_DEFAULT_BLOCK_ROWS(pCfg->maxRowsPerFileBlock);
|
||||||
|
|
||||||
|
taosArrayClear(pComph->aBlkIdx);
|
||||||
|
|
||||||
|
for (int tid = 1; tid < taosArrayGetSize(pComph->tbArray); tid++) {
|
||||||
|
STableCompactH *pTh = (STableCompactH *)taosArrayGet(pComph->tbArray, tid);
|
||||||
|
STSchema * pSchema;
|
||||||
|
|
||||||
|
if (pTh->pTable == NULL || pTh->pBlkIdx == NULL) continue;
|
||||||
|
|
||||||
|
pSchema = tsdbGetTableSchemaImpl(pTh->pTable, true, true, -1);
|
||||||
|
taosArrayClear(pComph->aSupBlk);
|
||||||
|
if ((tdInitDataCols(pComph->pDataCols, pSchema) < 0) || (tdInitDataCols(pReadh->pDCols[0], pSchema) < 0) ||
|
||||||
|
(tdInitDataCols(pReadh->pDCols[1], pSchema) < 0)) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
tdFreeSchema(pSchema);
|
||||||
|
|
||||||
|
// Loop to compact each block data
|
||||||
|
for (int i = 0; i < pTh->pBlkIdx->numOfBlocks; i++) {
|
||||||
|
SBlock *pBlock = pTh->pInfo->blocks + i;
|
||||||
|
|
||||||
|
// Load the block data
|
||||||
|
if (tsdbLoadBlockData(pReadh, pBlock, pTh->pInfo) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge pComph->pDataCols and pReadh->pDCols[0] and write data to file
|
||||||
|
if (pComph->pDataCols->numOfRows == 0 && pBlock->numOfRows >= defaultRows) {
|
||||||
|
if (tsdbWriteBlockToRightFile(pComph, pTh->pTable, pReadh->pDCols[0], ppBuf, ppCBuf) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
int ridx = 0;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
if (pReadh->pDCols[0]->numOfRows - ridx == 0) break;
|
||||||
|
int rowsToMerge = MIN(pReadh->pDCols[0]->numOfRows - ridx, defaultRows - pComph->pDataCols->numOfRows);
|
||||||
|
|
||||||
|
tdMergeDataCols(pComph->pDataCols, pReadh->pDCols[0], rowsToMerge, &ridx);
|
||||||
|
|
||||||
|
if (pComph->pDataCols->numOfRows < defaultRows) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbWriteBlockToRightFile(pComph, pTh->pTable, pComph->pDataCols, ppBuf, ppCBuf) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
tdResetDataCols(pComph->pDataCols);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pComph->pDataCols->numOfRows > 0 &&
|
||||||
|
tsdbWriteBlockToRightFile(pComph, pTh->pTable, pComph->pDataCols, ppBuf, ppCBuf) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbWriteBlockInfoImpl(TSDB_COMPACT_HEAD_FILE(pComph), pTh->pTable, pComph->aSupBlk, NULL, ppBuf, &blkIdx) <
|
||||||
|
0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((blkIdx.numOfBlocks > 0) && (taosArrayPush(pComph->aBlkIdx, (void *)(&blkIdx)) == NULL)) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbWriteBlockIdx(TSDB_COMPACT_HEAD_FILE(pComph), pComph->aBlkIdx, ppBuf) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tsdbWriteBlockToRightFile(SCompactH *pComph, STable *pTable, SDataCols *pDataCols, void **ppBuf,
|
||||||
|
void **ppCBuf) {
|
||||||
|
STsdbRepo *pRepo = TSDB_COMPACT_REPO(pComph);
|
||||||
|
STsdbCfg * pCfg = REPO_CFG(pRepo);
|
||||||
|
SDFile * pDFile;
|
||||||
|
bool isLast;
|
||||||
|
SBlock block;
|
||||||
|
|
||||||
|
ASSERT(pDataCols->numOfRows > 0);
|
||||||
|
|
||||||
|
if (pDataCols->numOfRows < pCfg->minRowsPerFileBlock) {
|
||||||
|
pDFile = TSDB_COMPACT_LAST_FILE(pComph);
|
||||||
|
isLast = true;
|
||||||
|
} else {
|
||||||
|
pDFile = TSDB_COMPACT_DATA_FILE(pComph);
|
||||||
|
isLast = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbWriteBlockImpl(pRepo, pTable, pDFile, pDataCols, &block, isLast, true, ppBuf, ppCBuf) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (taosArrayPush(pComph->aSupBlk, (void *)(&block)) == NULL) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
|
@ -195,6 +195,8 @@ STsdbRepoInfo *tsdbGetStatus(STsdbRepo *pRepo) { return NULL; }
|
||||||
|
|
||||||
int tsdbGetState(STsdbRepo *repo) { return repo->state; }
|
int tsdbGetState(STsdbRepo *repo) { return repo->state; }
|
||||||
|
|
||||||
|
bool tsdbInCompact(STsdbRepo *repo) { return repo->inCompact; }
|
||||||
|
|
||||||
void tsdbReportStat(void *repo, int64_t *totalPoints, int64_t *totalStorage, int64_t *compStorage) {
|
void tsdbReportStat(void *repo, int64_t *totalPoints, int64_t *totalStorage, int64_t *compStorage) {
|
||||||
ASSERT(repo != NULL);
|
ASSERT(repo != NULL);
|
||||||
STsdbRepo *pRepo = repo;
|
STsdbRepo *pRepo = repo;
|
||||||
|
@ -533,6 +535,7 @@ static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) {
|
||||||
|
|
||||||
pRepo->state = TSDB_STATE_OK;
|
pRepo->state = TSDB_STATE_OK;
|
||||||
pRepo->code = TSDB_CODE_SUCCESS;
|
pRepo->code = TSDB_CODE_SUCCESS;
|
||||||
|
pRepo->inCompact = false;
|
||||||
pRepo->config = *pCfg;
|
pRepo->config = *pCfg;
|
||||||
if (pAppH) {
|
if (pAppH) {
|
||||||
pRepo->appH = *pAppH;
|
pRepo->appH = *pAppH;
|
||||||
|
|
|
@ -218,11 +218,6 @@ static void tsdbMayUnTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) {
|
||||||
int64_t tsdbGetNumOfRowsInMemTable(TsdbQueryHandleT* pHandle) {
|
int64_t tsdbGetNumOfRowsInMemTable(TsdbQueryHandleT* pHandle) {
|
||||||
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle;
|
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle;
|
||||||
|
|
||||||
size_t size = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
|
||||||
assert(pQueryHandle->activeIndex < size && pQueryHandle->activeIndex >= 0 && size >= 1);
|
|
||||||
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, pQueryHandle->activeIndex);
|
|
||||||
|
|
||||||
|
|
||||||
int64_t rows = 0;
|
int64_t rows = 0;
|
||||||
SMemRef* pMemRef = pQueryHandle->pMemRef;
|
SMemRef* pMemRef = pQueryHandle->pMemRef;
|
||||||
if (pMemRef == NULL) { return rows; }
|
if (pMemRef == NULL) { return rows; }
|
||||||
|
@ -233,6 +228,10 @@ int64_t tsdbGetNumOfRowsInMemTable(TsdbQueryHandleT* pHandle) {
|
||||||
SMemTable* pMemT = pMemRef->snapshot.mem;
|
SMemTable* pMemT = pMemRef->snapshot.mem;
|
||||||
SMemTable* pIMemT = pMemRef->snapshot.imem;
|
SMemTable* pIMemT = pMemRef->snapshot.imem;
|
||||||
|
|
||||||
|
size_t size = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||||
|
for (int32_t i = 0; i < size; ++i) {
|
||||||
|
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
|
||||||
|
|
||||||
if (pMemT && pCheckInfo->tableId.tid < pMemT->maxTables) {
|
if (pMemT && pCheckInfo->tableId.tid < pMemT->maxTables) {
|
||||||
pMem = pMemT->tData[pCheckInfo->tableId.tid];
|
pMem = pMemT->tData[pCheckInfo->tableId.tid];
|
||||||
rows += (pMem && pMem->uid == pCheckInfo->tableId.uid) ? pMem->numOfRows : 0;
|
rows += (pMem && pMem->uid == pCheckInfo->tableId.uid) ? pMem->numOfRows : 0;
|
||||||
|
@ -241,7 +240,7 @@ int64_t tsdbGetNumOfRowsInMemTable(TsdbQueryHandleT* pHandle) {
|
||||||
pIMem = pIMemT->tData[pCheckInfo->tableId.tid];
|
pIMem = pIMemT->tData[pCheckInfo->tableId.tid];
|
||||||
rows += (pIMem && pIMem->uid == pCheckInfo->tableId.uid) ? pIMem->numOfRows : 0;
|
rows += (pIMem && pIMem->uid == pCheckInfo->tableId.uid) ? pIMem->numOfRows : 0;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return rows;
|
return rows;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1088,7 +1087,11 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p
|
||||||
assert(cur->pos >= 0 && cur->pos <= binfo.rows);
|
assert(cur->pos >= 0 && cur->pos <= binfo.rows);
|
||||||
|
|
||||||
TSKEY key = (row != NULL)? dataRowKey(row):TSKEY_INITIAL_VAL;
|
TSKEY key = (row != NULL)? dataRowKey(row):TSKEY_INITIAL_VAL;
|
||||||
|
if (key != TSKEY_INITIAL_VAL) {
|
||||||
tsdbDebug("%p key in mem:%"PRId64", 0x%"PRIx64, pQueryHandle, key, pQueryHandle->qId);
|
tsdbDebug("%p key in mem:%"PRId64", 0x%"PRIx64, pQueryHandle, key, pQueryHandle->qId);
|
||||||
|
} else {
|
||||||
|
tsdbDebug("%p no data in mem, 0x%"PRIx64, pQueryHandle, pQueryHandle->qId);
|
||||||
|
}
|
||||||
|
|
||||||
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) ||
|
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) ||
|
||||||
(!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) {
|
(!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) {
|
||||||
|
@ -1152,8 +1155,14 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(cur->blockCompleted);
|
assert(cur->blockCompleted);
|
||||||
tsdbDebug("create data block from remain file block, brange:%"PRId64"-%"PRId64", rows:%d, lastKey:%"PRId64", %p",
|
if (cur->rows == binfo.rows) {
|
||||||
cur->win.skey, cur->win.ekey, cur->rows, cur->lastKey, pQueryHandle);
|
tsdbDebug("%p whole file block qualified, brange:%"PRId64"-%"PRId64", rows:%d, lastKey:%"PRId64", %"PRIx64,
|
||||||
|
pQueryHandle, cur->win.skey, cur->win.ekey, cur->rows, cur->lastKey, pQueryHandle->qId);
|
||||||
|
} else {
|
||||||
|
tsdbDebug("%p create data block from remain file block, brange:%"PRId64"-%"PRId64", rows:%d, total:%d, lastKey:%"PRId64", %"PRIx64,
|
||||||
|
pQueryHandle, cur->win.skey, cur->win.ekey, cur->rows, binfo.rows, cur->lastKey, pQueryHandle->qId);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
|
|
|
@ -26,6 +26,7 @@ extern "C" {
|
||||||
#include "taosdef.h"
|
#include "taosdef.h"
|
||||||
|
|
||||||
int32_t strdequote(char *src);
|
int32_t strdequote(char *src);
|
||||||
|
int32_t strRmquote(char *z, int32_t len);
|
||||||
size_t strtrim(char *src);
|
size_t strtrim(char *src);
|
||||||
char * strnchr(char *haystack, char needle, int32_t len, bool skipquote);
|
char * strnchr(char *haystack, char needle, int32_t len, bool skipquote);
|
||||||
char ** strsplit(char *src, const char *delim, int32_t *num);
|
char ** strsplit(char *src, const char *delim, int32_t *num);
|
||||||
|
|
|
@ -193,7 +193,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_IN_DROPPING, "Database not availabl
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_READY, "Database unsynced")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_READY, "Database unsynced")
|
||||||
|
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION_DAYS, "Invalid database option: days out of range")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION_DAYS, "Invalid database option: days out of range")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION_KEEP, "Invalid database option: keep >= keep1 >= keep0 >= days")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION_KEEP, "Invalid database option: keep2 >= keep1 >= keep0 >= days")
|
||||||
|
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TOPIC, "Invalid topic name")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TOPIC, "Invalid topic name")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TOPIC_OPTION, "Invalid topic option")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TOPIC_OPTION, "Invalid topic option")
|
||||||
|
|
|
@ -494,9 +494,9 @@ uint32_t tGetToken(char* z, uint32_t* tokenId) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* here is the 1u/1a/2s/3m/9y */
|
/* here is the 1u/1a/2s/3m/9y */
|
||||||
if ((z[i] == 'u' || z[i] == 'a' || z[i] == 's' || z[i] == 'm' || z[i] == 'h' || z[i] == 'd' || z[i] == 'n' ||
|
if ((z[i] == 'b' || z[i] == 'u' || z[i] == 'a' || z[i] == 's' || z[i] == 'm' || z[i] == 'h' || z[i] == 'd' || z[i] == 'n' ||
|
||||||
z[i] == 'y' || z[i] == 'w' ||
|
z[i] == 'y' || z[i] == 'w' ||
|
||||||
z[i] == 'U' || z[i] == 'A' || z[i] == 'S' || z[i] == 'M' || z[i] == 'H' || z[i] == 'D' || z[i] == 'N' ||
|
z[i] == 'B' || z[i] == 'U' || z[i] == 'A' || z[i] == 'S' || z[i] == 'M' || z[i] == 'H' || z[i] == 'D' || z[i] == 'N' ||
|
||||||
z[i] == 'Y' || z[i] == 'W') &&
|
z[i] == 'Y' || z[i] == 'W') &&
|
||||||
(isIdChar[(uint8_t)z[i + 1]] == 0)) {
|
(isIdChar[(uint8_t)z[i + 1]] == 0)) {
|
||||||
*tokenId = TK_VARIABLE;
|
*tokenId = TK_VARIABLE;
|
||||||
|
|
|
@ -52,6 +52,36 @@ int32_t strdequote(char *z) {
|
||||||
return j + 1; // only one quote, do nothing
|
return j + 1; // only one quote, do nothing
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int32_t strRmquote(char *z, int32_t len){
|
||||||
|
// delete escape character: \\, \', \"
|
||||||
|
char delim = z[0];
|
||||||
|
if (delim != '\'' && delim != '\"') {
|
||||||
|
return len;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t cnt = 0;
|
||||||
|
int32_t j = 0;
|
||||||
|
for (uint32_t k = 1; k < len - 1; ++k) {
|
||||||
|
if (z[k] == '\\' || (z[k] == delim && z[k + 1] == delim)) {
|
||||||
|
z[j] = z[k + 1];
|
||||||
|
|
||||||
|
cnt++;
|
||||||
|
j++;
|
||||||
|
k++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
z[j] = z[k];
|
||||||
|
j++;
|
||||||
|
}
|
||||||
|
|
||||||
|
z[j] = 0;
|
||||||
|
|
||||||
|
return len - 2 - cnt;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
size_t strtrim(char *z) {
|
size_t strtrim(char *z) {
|
||||||
int32_t i = 0;
|
int32_t i = 0;
|
||||||
int32_t j = 0;
|
int32_t j = 0;
|
||||||
|
|
|
@ -86,7 +86,7 @@ static int print_result(TAOS_RES* res, int blockFetch) {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
while ((row = taos_fetch_row(res))) {
|
while ((row = taos_fetch_row(res))) {
|
||||||
char temp[256];
|
char temp[256] = {0};
|
||||||
taos_print_row(temp, row, fields, num_fields);
|
taos_print_row(temp, row, fields, num_fields);
|
||||||
puts(temp);
|
puts(temp);
|
||||||
nRows++;
|
nRows++;
|
||||||
|
@ -391,10 +391,10 @@ void verify_prepare(TAOS* taos) {
|
||||||
int rows = 0;
|
int rows = 0;
|
||||||
int num_fields = taos_num_fields(result);
|
int num_fields = taos_num_fields(result);
|
||||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||||
char temp[256];
|
|
||||||
|
|
||||||
// fetch the records row by row
|
// fetch the records row by row
|
||||||
while ((row = taos_fetch_row(result))) {
|
while ((row = taos_fetch_row(result))) {
|
||||||
|
char temp[256] = {0};
|
||||||
rows++;
|
rows++;
|
||||||
taos_print_row(temp, row, fields, num_fields);
|
taos_print_row(temp, row, fields, num_fields);
|
||||||
printf("%s\n", temp);
|
printf("%s\n", temp);
|
||||||
|
@ -614,10 +614,10 @@ void verify_prepare2(TAOS* taos) {
|
||||||
int rows = 0;
|
int rows = 0;
|
||||||
int num_fields = taos_num_fields(result);
|
int num_fields = taos_num_fields(result);
|
||||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||||
char temp[256];
|
|
||||||
|
|
||||||
// fetch the records row by row
|
// fetch the records row by row
|
||||||
while ((row = taos_fetch_row(result))) {
|
while ((row = taos_fetch_row(result))) {
|
||||||
|
char temp[256] = {0};
|
||||||
rows++;
|
rows++;
|
||||||
taos_print_row(temp, row, fields, num_fields);
|
taos_print_row(temp, row, fields, num_fields);
|
||||||
printf("%s\n", temp);
|
printf("%s\n", temp);
|
||||||
|
@ -866,12 +866,10 @@ void verify_prepare3(TAOS* taos) {
|
||||||
int rows = 0;
|
int rows = 0;
|
||||||
int num_fields = taos_num_fields(result);
|
int num_fields = taos_num_fields(result);
|
||||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||||
char temp[256] = {0};
|
|
||||||
|
|
||||||
// fetch the records row by row
|
// fetch the records row by row
|
||||||
while ((row = taos_fetch_row(result))) {
|
while ((row = taos_fetch_row(result))) {
|
||||||
memset(temp, 0, sizeof(temp)/sizeof(temp[0]));
|
char temp[256] = {0};
|
||||||
|
|
||||||
rows++;
|
rows++;
|
||||||
taos_print_row(temp, row, fields, num_fields);
|
taos_print_row(temp, row, fields, num_fields);
|
||||||
printf("%s\n", temp);
|
printf("%s\n", temp);
|
||||||
|
|
|
@ -116,12 +116,12 @@ void Test(TAOS *taos, char *qstr, int index) {
|
||||||
int rows = 0;
|
int rows = 0;
|
||||||
int num_fields = taos_field_count(result);
|
int num_fields = taos_field_count(result);
|
||||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||||
char temp[1024];
|
|
||||||
|
|
||||||
printf("num_fields = %d\n", num_fields);
|
printf("num_fields = %d\n", num_fields);
|
||||||
printf("select * from table, result:\n");
|
printf("select * from table, result:\n");
|
||||||
// fetch the records row by row
|
// fetch the records row by row
|
||||||
while ((row = taos_fetch_row(result))) {
|
while ((row = taos_fetch_row(result))) {
|
||||||
|
char temp[1024] = {0};
|
||||||
rows++;
|
rows++;
|
||||||
taos_print_row(temp, row, fields, num_fields);
|
taos_print_row(temp, row, fields, num_fields);
|
||||||
printf("%s\n", temp);
|
printf("%s\n", temp);
|
||||||
|
|
|
@ -184,10 +184,10 @@ int main(int argc, char *argv[])
|
||||||
int rows = 0;
|
int rows = 0;
|
||||||
int num_fields = taos_num_fields(result);
|
int num_fields = taos_num_fields(result);
|
||||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||||
char temp[256];
|
|
||||||
|
|
||||||
// fetch the records row by row
|
// fetch the records row by row
|
||||||
while ((row = taos_fetch_row(result))) {
|
while ((row = taos_fetch_row(result))) {
|
||||||
|
char temp[256] = {0};
|
||||||
rows++;
|
rows++;
|
||||||
taos_print_row(temp, row, fields, num_fields);
|
taos_print_row(temp, row, fields, num_fields);
|
||||||
printf("%s\n", temp);
|
printf("%s\n", temp);
|
||||||
|
|
|
@ -14,8 +14,6 @@ void print_result(TAOS_RES* res, int blockFetch) {
|
||||||
int num_fields = taos_num_fields(res);
|
int num_fields = taos_num_fields(res);
|
||||||
TAOS_FIELD* fields = taos_fetch_fields(res);
|
TAOS_FIELD* fields = taos_fetch_fields(res);
|
||||||
int nRows = 0;
|
int nRows = 0;
|
||||||
char buf[4096];
|
|
||||||
|
|
||||||
|
|
||||||
if (blockFetch) {
|
if (blockFetch) {
|
||||||
nRows = taos_fetch_block(res, &row);
|
nRows = taos_fetch_block(res, &row);
|
||||||
|
@ -25,6 +23,7 @@ void print_result(TAOS_RES* res, int blockFetch) {
|
||||||
//}
|
//}
|
||||||
} else {
|
} else {
|
||||||
while ((row = taos_fetch_row(res))) {
|
while ((row = taos_fetch_row(res))) {
|
||||||
|
char buf[4096] = {0};
|
||||||
taos_print_row(buf, row, fields, num_fields);
|
taos_print_row(buf, row, fields, num_fields);
|
||||||
puts(buf);
|
puts(buf);
|
||||||
nRows++;
|
nRows++;
|
||||||
|
|
|
@ -21,7 +21,7 @@ def pre_test(){
|
||||||
cmake .. > /dev/null
|
cmake .. > /dev/null
|
||||||
make > /dev/null
|
make > /dev/null
|
||||||
make install > /dev/null
|
make install > /dev/null
|
||||||
pip3 install ${WKC}/src/connector/python/linux/python3/
|
pip3 install ${WKC}/src/connector/python/ || echo 0
|
||||||
'''
|
'''
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,9 +72,9 @@ class TDTestCase:
|
||||||
tdSql.query('show databases')
|
tdSql.query('show databases')
|
||||||
tdSql.checkData(0,7,'3650,3650,3650')
|
tdSql.checkData(0,7,'3650,3650,3650')
|
||||||
|
|
||||||
tdSql.error('alter database db keep 10')
|
tdSql.execute('alter database db keep 10')
|
||||||
tdSql.query('show databases')
|
tdSql.query('show databases')
|
||||||
tdSql.checkData(0,7,'3650,3650,3650')
|
tdSql.checkData(0,7,'10,10,10')
|
||||||
|
|
||||||
## the order for altering keep is keep(D), keep0, keep1.
|
## the order for altering keep is keep(D), keep0, keep1.
|
||||||
## if the order is changed, please modify the following test
|
## if the order is changed, please modify the following test
|
||||||
|
@ -84,17 +84,17 @@ class TDTestCase:
|
||||||
tdSql.query('show databases')
|
tdSql.query('show databases')
|
||||||
tdSql.checkData(0,7,'10,10,10')
|
tdSql.checkData(0,7,'10,10,10')
|
||||||
|
|
||||||
tdSql.execute('alter database db keep 100, 98 ,99')
|
tdSql.error('alter database db keep 100, 98 ,99')
|
||||||
tdSql.query('show databases')
|
tdSql.query('show databases')
|
||||||
tdSql.checkData(0,7,'98,99,100')
|
tdSql.checkData(0,7,'10,10,10')
|
||||||
|
|
||||||
tdSql.execute('alter database db keep 200, 200 ,200')
|
tdSql.execute('alter database db keep 200, 200 ,200')
|
||||||
tdSql.query('show databases')
|
tdSql.query('show databases')
|
||||||
tdSql.checkData(0,7,'200,200,200')
|
tdSql.checkData(0,7,'200,200,200')
|
||||||
|
|
||||||
tdSql.error('alter database db keep 198, 199 ,200')
|
tdSql.execute('alter database db keep 198, 199 ,200')
|
||||||
tdSql.query('show databases')
|
tdSql.query('show databases')
|
||||||
tdSql.checkData(0,7,'200,200,200')
|
tdSql.checkData(0,7,'198,199,200')
|
||||||
|
|
||||||
# tdSql.execute('alter database db keep 3650,3650,3650')
|
# tdSql.execute('alter database db keep 3650,3650,3650')
|
||||||
# tdSql.error('alter database db keep 4000,3640')
|
# tdSql.error('alter database db keep 4000,3640')
|
||||||
|
|
|
@ -0,0 +1,72 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.pathFinding import *
|
||||||
|
from util.dnodes import tdDnodes
|
||||||
|
from datetime import datetime
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
##TODO: this is now automatic, but not sure if this will run through jenkins
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
tdFindPath.init(__file__)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
binPath = tdFindPath.getTaosdemoPath()
|
||||||
|
TDenginePath = tdFindPath.getTDenginePath()
|
||||||
|
|
||||||
|
## change system time to 2020/10/20
|
||||||
|
os.system('sudo timedatectl set-ntp off')
|
||||||
|
os.system('sudo timedatectl set-time 2020-10-20')
|
||||||
|
|
||||||
|
#run taosdemo to insert data. one row per second from 2020/10/11 to 2020/10/20
|
||||||
|
#11 data files should be generated
|
||||||
|
#vnode at TDinternal/community/sim/dnode1/data/vnode
|
||||||
|
os.system(f"{binPath}taosdemo -f tools/taosdemoAllTest/manual_change_time_1_1_A.json")
|
||||||
|
commandArray = ['ls', '-l', f'{TDenginePath}/sim/dnode1/data/vnode/vnode2/tsdb/data']
|
||||||
|
result = subprocess.run(commandArray, stdout=subprocess.PIPE).stdout.decode('utf-8')
|
||||||
|
print(result.count('data'))
|
||||||
|
if result.count('data') != 11:
|
||||||
|
tdLog.exit('wrong number of files')
|
||||||
|
else:
|
||||||
|
tdLog.debug("data file number correct")
|
||||||
|
|
||||||
|
#move 5 days ahead to 2020/10/25. 4 oldest files should be removed during the new write
|
||||||
|
#leaving 7 data files.
|
||||||
|
os.system ('timedatectl set-time 2020-10-25')
|
||||||
|
os.system(f"{binPath}taosdemo -f tools/taosdemoAllTest/manual_change_time_1_1_B.json")
|
||||||
|
commandArray = ['ls', '-l', f'{TDenginePath}/sim/dnode1/data/vnode/vnode2/tsdb/data']
|
||||||
|
result = subprocess.run(commandArray, stdout=subprocess.PIPE).stdout.decode('utf-8')
|
||||||
|
print(result.count('data'))
|
||||||
|
if result.count('data') != 7:
|
||||||
|
tdLog.exit('wrong number of files')
|
||||||
|
else:
|
||||||
|
tdLog.debug("data file number correct")
|
||||||
|
tdSql.query('select first(ts) from stb_0')
|
||||||
|
tdSql.checkData(0,0,datetime(2020,10,14,8,0,0,0)) #check the last data in the database
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
os.system('sudo timedatectl set-ntp on')
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("alter block manual check finish")
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,82 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.dnodes import tdDnodes
|
||||||
|
from util.pathFinding import *
|
||||||
|
from datetime import datetime
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
##TODO: this is now automatic, but not sure if this will run through jenkins
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
tdFindPath.init(__file__)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
binPath = tdFindPath.getTaosdemoPath()
|
||||||
|
TDenginePath = tdFindPath.getTDenginePath()
|
||||||
|
|
||||||
|
## change system time to 2020/10/20
|
||||||
|
os.system ('timedatectl set-ntp off')
|
||||||
|
os.system ('timedatectl set-time 2020-10-20')
|
||||||
|
|
||||||
|
#run taosdemo to insert data. one row per second from 2020/10/11 to 2020/10/20
|
||||||
|
#11 data files should be generated
|
||||||
|
#vnode at TDinternal/community/sim/dnode1/data/vnode
|
||||||
|
os.system(f"{binPath}taosdemo -f tools/taosdemoAllTest/manual_change_time_1_1_A.json")
|
||||||
|
commandArray = ['ls', '-l', f'{TDenginePath}/sim/dnode1/data/vnode/vnode2/tsdb/data']
|
||||||
|
result = subprocess.run(commandArray, stdout=subprocess.PIPE).stdout.decode('utf-8')
|
||||||
|
print(result.count('data'))
|
||||||
|
if result.count('data') != 11:
|
||||||
|
tdLog.exit('wrong number of files')
|
||||||
|
else:
|
||||||
|
tdLog.debug("data file number correct")
|
||||||
|
|
||||||
|
tdSql.query('select first(ts) from stb_0') #check the last data in the database
|
||||||
|
tdSql.checkData(0,0,datetime(2020,10,11,0,0,0,0))
|
||||||
|
|
||||||
|
os.system ('timedatectl set-time 2020-10-25')
|
||||||
|
|
||||||
|
#moves 5 days ahead to 2020/10/25 and restart taosd
|
||||||
|
#4 oldest data file should be removed from tsdb/data
|
||||||
|
#7 data file should be found
|
||||||
|
#vnode at TDinternal/community/sim/dnode1/data/vnode
|
||||||
|
os.system ('timedatectl set-time 2020-10-25')
|
||||||
|
tdDnodes.stop(1)
|
||||||
|
tdDnodes.start(1)
|
||||||
|
tdSql.query('select first(ts) from stb_0')
|
||||||
|
tdSql.checkData(0,0,datetime(2020,10,14,8,0,0,0)) #check the last data in the database
|
||||||
|
commandArray = ['ls', '-l', f'{TDenginePath}/sim/dnode1/data/vnode/vnode2/tsdb/data']
|
||||||
|
result = subprocess.run(commandArray, stdout=subprocess.PIPE).stdout.decode('utf-8')
|
||||||
|
print(result.count('data'))
|
||||||
|
if result.count('data') != 7:
|
||||||
|
tdLog.exit('wrong number of files')
|
||||||
|
else:
|
||||||
|
tdLog.debug("data file number correct")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
os.system('sudo timedatectl set-ntp on')
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("alter block manual check finish")
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -23,6 +23,7 @@ class Node:
|
||||||
self.hostIP = hostIP
|
self.hostIP = hostIP
|
||||||
self.hostName = hostName
|
self.hostName = hostName
|
||||||
self.homeDir = homeDir
|
self.homeDir = homeDir
|
||||||
|
self.corePath = '/coredump'
|
||||||
self.conn = Connection("{}@{}".format(username, hostName), connect_kwargs={"password": "{}".format(password)})
|
self.conn = Connection("{}@{}".format(username, hostName), connect_kwargs={"password": "{}".format(password)})
|
||||||
|
|
||||||
def buildTaosd(self):
|
def buildTaosd(self):
|
||||||
|
@ -127,21 +128,37 @@ class Node:
|
||||||
print("remove taosd error for node %d " % self.index)
|
print("remove taosd error for node %d " % self.index)
|
||||||
logging.exception(e)
|
logging.exception(e)
|
||||||
|
|
||||||
|
|
||||||
|
def detectCoredumpFile(self):
|
||||||
|
try:
|
||||||
|
result = self.conn.run("find /coredump -name 'core_*' ", hide=True)
|
||||||
|
output = result.stdout
|
||||||
|
print("output: %s" % output)
|
||||||
|
return output
|
||||||
|
except Exception as e:
|
||||||
|
print("find coredump file error on node %d " % self.index)
|
||||||
|
logging.exception(e)
|
||||||
|
|
||||||
|
|
||||||
class Nodes:
|
class Nodes:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.tdnodes = []
|
self.tdnodes = []
|
||||||
self.tdnodes.append(Node(0, 'root', '52.143.103.7', 'node1', 'a', '/root/'))
|
self.tdnodes.append(Node(0, 'root', '192.168.17.194', 'taosdata', 'r', '/root/'))
|
||||||
self.tdnodes.append(Node(1, 'root', '52.250.48.222', 'node2', 'a', '/root/'))
|
# self.tdnodes.append(Node(1, 'root', '52.250.48.222', 'node2', 'a', '/root/'))
|
||||||
self.tdnodes.append(Node(2, 'root', '51.141.167.23', 'node3', 'a', '/root/'))
|
# self.tdnodes.append(Node(2, 'root', '51.141.167.23', 'node3', 'a', '/root/'))
|
||||||
self.tdnodes.append(Node(3, 'root', '52.247.207.173', 'node4', 'a', '/root/'))
|
# self.tdnodes.append(Node(3, 'root', '52.247.207.173', 'node4', 'a', '/root/'))
|
||||||
self.tdnodes.append(Node(4, 'root', '51.141.166.100', 'node5', 'a', '/root/'))
|
# self.tdnodes.append(Node(4, 'root', '51.141.166.100', 'node5', 'a', '/root/'))
|
||||||
|
|
||||||
def stopOneNode(self, index):
|
def stopOneNode(self, index):
|
||||||
|
self.tdnodes[index].stopTaosd()
|
||||||
self.tdnodes[index].forceStopOneTaosd()
|
self.tdnodes[index].forceStopOneTaosd()
|
||||||
|
|
||||||
def startOneNode(self, index):
|
def startOneNode(self, index):
|
||||||
self.tdnodes[index].startOneTaosd()
|
self.tdnodes[index].startOneTaosd()
|
||||||
|
|
||||||
|
def detectCoredumpFile(self, index):
|
||||||
|
return self.tdnodes[index].detectCoredumpFile()
|
||||||
|
|
||||||
def stopAllTaosd(self):
|
def stopAllTaosd(self):
|
||||||
for i in range(len(self.tdnodes)):
|
for i in range(len(self.tdnodes)):
|
||||||
self.tdnodes[i].stopTaosd()
|
self.tdnodes[i].stopTaosd()
|
||||||
|
@ -166,14 +183,32 @@ class Nodes:
|
||||||
for i in range(len(self.tdnodes)):
|
for i in range(len(self.tdnodes)):
|
||||||
self.tdnodes[i].removeData()
|
self.tdnodes[i].removeData()
|
||||||
|
|
||||||
|
class Test:
|
||||||
|
def __init__(self):
|
||||||
|
self.nodes = Nodes()
|
||||||
|
|
||||||
# kill taosd randomly every 10 mins
|
# kill taosd randomly every 10 mins
|
||||||
nodes = Nodes()
|
def randomlyKillDnode(self):
|
||||||
loop = 0
|
loop = 0
|
||||||
while True:
|
while True:
|
||||||
loop = loop + 1
|
|
||||||
index = random.randint(0, 4)
|
index = random.randint(0, 4)
|
||||||
print("loop: %d, kill taosd on node%d" %(loop, index))
|
print("loop: %d, kill taosd on node%d" %(loop, index))
|
||||||
nodes.stopOneNode(index)
|
self.nodes.stopOneNode(index)
|
||||||
time.sleep(60)
|
time.sleep(60)
|
||||||
nodes.startOneNode(index)
|
self.nodes.startOneNode(index)
|
||||||
time.sleep(600)
|
time.sleep(600)
|
||||||
|
loop = loop + 1
|
||||||
|
|
||||||
|
def detectCoredump(self):
|
||||||
|
loop = 0
|
||||||
|
while True:
|
||||||
|
for i in range(len(self.nodes.tdnodes)):
|
||||||
|
result = self.nodes.detectCoredumpFile(i)
|
||||||
|
print("core file path is %s" % result)
|
||||||
|
if result and not result.isspace():
|
||||||
|
self.nodes.stopAllTaosd()
|
||||||
|
print("sleep for 10 mins")
|
||||||
|
time.sleep(600)
|
||||||
|
|
||||||
|
test = Test()
|
||||||
|
test.detectCoredump()
|
|
@ -256,6 +256,8 @@ python3 ./test.py -f client/client.py
|
||||||
python3 ./test.py -f client/version.py
|
python3 ./test.py -f client/version.py
|
||||||
python3 ./test.py -f client/alterDatabase.py
|
python3 ./test.py -f client/alterDatabase.py
|
||||||
python3 ./test.py -f client/noConnectionErrorTest.py
|
python3 ./test.py -f client/noConnectionErrorTest.py
|
||||||
|
python3 test.py -f client/change_time_1_1.py
|
||||||
|
python3 test.py -f client/change_time_1_2.py
|
||||||
|
|
||||||
# Misc
|
# Misc
|
||||||
python3 testCompress.py
|
python3 testCompress.py
|
||||||
|
|
|
@ -25,7 +25,7 @@ class TDTestCase:
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
tdSql.query("show variables")
|
tdSql.query("show variables")
|
||||||
tdSql.checkData(51, 1, 864000)
|
tdSql.checkData(53, 1, 864000)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
from fabric import Connection
|
||||||
|
import sys
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.dnodes import tdDnodes
|
||||||
|
from datetime import datetime
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import taos
|
||||||
|
##TODO: this is now automatic, but not sure if this will run through jenkins
|
||||||
|
|
||||||
|
#the initial time used for this test is 2020/10/20
|
||||||
|
|
||||||
|
#setting local machine's time for later connecting to the server
|
||||||
|
os.system('sudo timedatectl set-ntp off')
|
||||||
|
os.system('sudo timedatectl set-time 2020-10-25')
|
||||||
|
|
||||||
|
#connect to VM lyq-1, and initalize the environment at lyq-1
|
||||||
|
conn1 = Connection("{}@{}".format('ubuntu', "192.168.1.125"), connect_kwargs={"password": "{}".format('tbase125!')})
|
||||||
|
conn1.run("sudo systemctl stop taosd")
|
||||||
|
conn1.run('ls -l')
|
||||||
|
conn1.run('sudo timedatectl set-ntp off')
|
||||||
|
conn1.run('sudo timedatectl set-time 2020-10-20')
|
||||||
|
|
||||||
|
with conn1.cd('/data/taos/log'):
|
||||||
|
conn1.run('sudo rm -rf *')
|
||||||
|
|
||||||
|
with conn1.cd('/data/taos/data'):
|
||||||
|
conn1.run('sudo rm -rf *')
|
||||||
|
|
||||||
|
#lanuch taosd and start taosdemo
|
||||||
|
conn1.run("sudo systemctl start taosd")
|
||||||
|
time.sleep(5)
|
||||||
|
with conn1.cd('~/bschang_test'):
|
||||||
|
conn1.run('taosdemo -f manual_change_time_1_1_A.json')
|
||||||
|
|
||||||
|
#force everything onto disk
|
||||||
|
conn1.run("sudo systemctl restart taosd")
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
#change lyq-1 to 2020/10/25 for testing if the server
|
||||||
|
#will send data that is out of time range
|
||||||
|
conn1.run('sudo timedatectl set-time 2020-10-25')
|
||||||
|
|
||||||
|
#connect to VM lyq-2, initalize the environment at lyq-2, and run taosd
|
||||||
|
#on that
|
||||||
|
conn2 = Connection("{}@{}".format('ubuntu', "192.168.1.126"), connect_kwargs={"password": "{}".format('tbase125!')})
|
||||||
|
conn2.run('sudo timedatectl set-ntp off')
|
||||||
|
conn2.run('sudo timedatectl set-time 2020-10-20')
|
||||||
|
conn2.run("sudo systemctl stop taosd")
|
||||||
|
with conn2.cd('/data/taos/log'):
|
||||||
|
conn2.run('sudo rm -rf *')
|
||||||
|
with conn2.cd('/data/taos/data'):
|
||||||
|
conn2.run('sudo rm -rf *')
|
||||||
|
conn2.run("sudo systemctl start taosd")
|
||||||
|
|
||||||
|
#set replica to 2
|
||||||
|
connTaos = taos.connect(host = '192.168.1.125', user = 'root', password = 'taosdata', cnfig = '/etc/taos')
|
||||||
|
c1 = connTaos.cursor()
|
||||||
|
c1.execute('create dnode \'lyq-2:6030\'')
|
||||||
|
c1.execute('alter database db replica 2')
|
||||||
|
c1.close()
|
||||||
|
connTaos.close()
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
#force everything onto the disk for lyq-2
|
||||||
|
#stopping taosd on lyq-1 for future testing
|
||||||
|
conn2.run("sudo systemctl stop taosd")
|
||||||
|
conn1.run("sudo systemctl stop taosd")
|
||||||
|
|
||||||
|
#reset the time
|
||||||
|
conn1.run('sudo timedatectl set-ntp on')
|
||||||
|
conn2.run('sudo timedatectl set-ntp on')
|
||||||
|
os.system('sudo timedatectl set-ntp on')
|
||||||
|
|
||||||
|
#check if the number of file received is 7
|
||||||
|
#the 4 oldest data files should be dropped
|
||||||
|
#4 files because of moving 5 days ahead
|
||||||
|
with conn2.cd('/data/taos/data/vnode/vnode3/tsdb/data'):
|
||||||
|
result = conn2.run('ls -l |grep \'data\' |wc -l')
|
||||||
|
if result.stdout.strip() != '7':
|
||||||
|
tdLog.exit('the file number is wrong')
|
||||||
|
else:
|
||||||
|
tdLog.success('the file number is the same. test pass')
|
|
@ -0,0 +1,97 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
from fabric import Connection
|
||||||
|
import sys
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.dnodes import tdDnodes
|
||||||
|
from datetime import datetime
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import taos
|
||||||
|
|
||||||
|
##TODO: this is now automatic, but not sure if this will run through jenkins
|
||||||
|
|
||||||
|
#the initial time used for this test is 2020/10/20
|
||||||
|
|
||||||
|
#setting local machine's time for later connecting to the server
|
||||||
|
os.system('sudo timedatectl set-ntp off')
|
||||||
|
os.system('sudo timedatectl set-time 2020-10-20')
|
||||||
|
|
||||||
|
#connect to VM lyq-1, and initalize the environment at lyq-1
|
||||||
|
conn1 = Connection("{}@{}".format('ubuntu', "192.168.1.125"), connect_kwargs={"password": "{}".format('tbase125!')})
|
||||||
|
conn1.run("sudo systemctl stop taosd")
|
||||||
|
conn1.run('sudo timedatectl set-ntp off')
|
||||||
|
conn1.run('sudo timedatectl set-time 2020-10-20')
|
||||||
|
with conn1.cd('/data/taos/log'):
|
||||||
|
conn1.run('sudo rm -rf *')
|
||||||
|
|
||||||
|
with conn1.cd('/data/taos/data'):
|
||||||
|
conn1.run('sudo rm -rf *')
|
||||||
|
|
||||||
|
#lanuch taosd and start taosdemo
|
||||||
|
conn1.run("sudo systemctl start taosd")
|
||||||
|
time.sleep(5)
|
||||||
|
with conn1.cd('~/bschang_test'):
|
||||||
|
conn1.run('taosdemo -f manual_change_time_1_1_A.json') #the json file is placed in lyq-1 already
|
||||||
|
|
||||||
|
#force everything onto disk
|
||||||
|
conn1.run("sudo systemctl restart taosd")
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
#connect to VM lyq-2, and initalize the environment at lyq-2
|
||||||
|
conn2 = Connection("{}@{}".format('ubuntu', "192.168.1.126"), connect_kwargs={"password": "{}".format('tbase125!')})
|
||||||
|
conn2.run('sudo timedatectl set-ntp off')
|
||||||
|
conn2.run('sudo timedatectl set-time 2020-10-20')
|
||||||
|
conn2.run("sudo systemctl stop taosd")
|
||||||
|
with conn2.cd('/data/taos/log'):
|
||||||
|
conn2.run('sudo rm -rf *')
|
||||||
|
with conn2.cd('/data/taos/data'):
|
||||||
|
conn2.run('sudo rm -rf *')
|
||||||
|
|
||||||
|
#the date of lyq-2 is going to be set to 2020/10/25
|
||||||
|
#for testing if other pnode will accpet file out of local time range
|
||||||
|
conn2.run("sudo systemctl start taosd")
|
||||||
|
conn2.run('sudo timedatectl set-time 2020-10-25')
|
||||||
|
|
||||||
|
#set the replica to 2
|
||||||
|
connTaos = taos.connect(host = '192.168.1.125', user = 'root', password = 'taosdata', cnfig = '/etc/taos')
|
||||||
|
c1 = connTaos.cursor()
|
||||||
|
c1.execute('create dnode \'lyq-2:6030\'')
|
||||||
|
c1.execute('alter database db replica 2')
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
#force everything onto the disk for lyq-2
|
||||||
|
#stopping taosd on lyq-1 for future testing
|
||||||
|
conn2.run("sudo systemctl stop taosd")
|
||||||
|
conn1.run("sudo systemctl stop taosd")
|
||||||
|
|
||||||
|
#reset the time
|
||||||
|
conn1.run('sudo timedatectl set-ntp on')
|
||||||
|
conn2.run('sudo timedatectl set-ntp on')
|
||||||
|
os.system('sudo timedatectl set-ntp on')
|
||||||
|
|
||||||
|
#check if the number of file received is 7
|
||||||
|
#the 4 oldest data files should be dropped
|
||||||
|
#4 files because of moving 5 days ahead
|
||||||
|
with conn2.cd('/data/taos/data/vnode/vnode3/tsdb/data'):
|
||||||
|
result = conn2.run('ls -l |grep \'data\' |wc -l')
|
||||||
|
if result.stdout.strip() != '7':
|
||||||
|
tdLog.exit('the file number is wrong')
|
||||||
|
else:
|
||||||
|
tdLog.success('the file number is the same. test pass')
|
||||||
|
|
||||||
|
c1.close()
|
||||||
|
connTaos.close()
|
||||||
|
|
|
@ -141,7 +141,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
|
||||||
print("============== last_row_cache_0.sim")
|
print("============== Step1: last_row_cache_0.sim")
|
||||||
tdSql.execute("create database test1 cachelast 0")
|
tdSql.execute("create database test1 cachelast 0")
|
||||||
tdSql.execute("use test1")
|
tdSql.execute("use test1")
|
||||||
self.insertData()
|
self.insertData()
|
||||||
|
@ -149,43 +149,48 @@ class TDTestCase:
|
||||||
self.insertData2()
|
self.insertData2()
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
print("============== alter last cache")
|
print("============== Step2: alter database test1 cachelast 1")
|
||||||
tdSql.execute("alter database test1 cachelast 1")
|
tdSql.execute("alter database test1 cachelast 1")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step3: alter database test1 cachelast 2")
|
||||||
tdSql.execute("alter database test1 cachelast 2")
|
tdSql.execute("alter database test1 cachelast 2")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step4: alter database test1 cachelast 3")
|
||||||
tdSql.execute("alter database test1 cachelast 3")
|
tdSql.execute("alter database test1 cachelast 3")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
|
||||||
print("============== alter last cache")
|
print("============== Step5: alter database test1 cachelast 0 and restart taosd")
|
||||||
tdSql.execute("alter database test1 cachelast 0")
|
tdSql.execute("alter database test1 cachelast 0")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.start(1)
|
tdDnodes.start(1)
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step6: alter database test1 cachelast 1 and restart taosd")
|
||||||
tdSql.execute("alter database test1 cachelast 1")
|
tdSql.execute("alter database test1 cachelast 1")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.start(1)
|
tdDnodes.start(1)
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step7: alter database test1 cachelast 2 and restart taosd")
|
||||||
tdSql.execute("alter database test1 cachelast 2")
|
tdSql.execute("alter database test1 cachelast 2")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.start(1)
|
tdDnodes.start(1)
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step8: alter database test1 cachelast 3 and restart taosd")
|
||||||
tdSql.execute("alter database test1 cachelast 3")
|
tdSql.execute("alter database test1 cachelast 3")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.start(1)
|
tdDnodes.start(1)
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
print("============== last_row_cache_1.sim")
|
print("============== Step9: create database test2 cachelast 1")
|
||||||
tdSql.execute("create database test2 cachelast 1")
|
tdSql.execute("create database test2 cachelast 1")
|
||||||
tdSql.execute("use test2")
|
tdSql.execute("use test2")
|
||||||
self.insertData()
|
self.insertData()
|
||||||
|
@ -196,42 +201,51 @@ class TDTestCase:
|
||||||
tdDnodes.start(1)
|
tdDnodes.start(1)
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step8: alter database test2 cachelast 0")
|
||||||
tdSql.execute("alter database test2 cachelast 0")
|
tdSql.execute("alter database test2 cachelast 0")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step9: alter database test2 cachelast 1")
|
||||||
tdSql.execute("alter database test2 cachelast 1")
|
tdSql.execute("alter database test2 cachelast 1")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step10: alter database test2 cachelast 2")
|
||||||
tdSql.execute("alter database test2 cachelast 2")
|
tdSql.execute("alter database test2 cachelast 2")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step11: alter database test2 cachelast 3")
|
||||||
tdSql.execute("alter database test2 cachelast 3")
|
tdSql.execute("alter database test2 cachelast 3")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step12: alter database test2 cachelast 0 and restart taosd")
|
||||||
tdSql.execute("alter database test2 cachelast 0")
|
tdSql.execute("alter database test2 cachelast 0")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.start(1)
|
tdDnodes.start(1)
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step13: alter database test2 cachelast 1 and restart taosd")
|
||||||
tdSql.execute("alter database test2 cachelast 1")
|
tdSql.execute("alter database test2 cachelast 1")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.start(1)
|
tdDnodes.start(1)
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step14: alter database test2 cachelast 2 and restart taosd")
|
||||||
tdSql.execute("alter database test2 cachelast 2")
|
tdSql.execute("alter database test2 cachelast 2")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.start(1)
|
tdDnodes.start(1)
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step15: alter database test2 cachelast 3 and restart taosd")
|
||||||
tdSql.execute("alter database test2 cachelast 3")
|
tdSql.execute("alter database test2 cachelast 3")
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.start(1)
|
tdDnodes.start(1)
|
||||||
self.executeQueries2()
|
self.executeQueries2()
|
||||||
|
|
||||||
|
print("============== Step16: select last_row(*) from st group by tbname")
|
||||||
tdSql.query("select last_row(*) from st group by tbname")
|
tdSql.query("select last_row(*) from st group by tbname")
|
||||||
tdSql.checkRows(10)
|
tdSql.checkRows(10)
|
||||||
|
|
||||||
|
|
|
@ -68,6 +68,50 @@ class TDTestCase:
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, 1)
|
tdSql.checkData(0, 0, 1)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select first(tagtype) val from st interval(30s)) a where a.val >= 20")
|
||||||
|
tdSql.checkData(0, 0, 2)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select first(tagtype) val from st interval(30s)) a where a.val < 20")
|
||||||
|
tdSql.checkData(0, 0, 63)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select first(tagtype) val from st interval(30s)) a where a.val <= 20")
|
||||||
|
tdSql.checkData(0, 0, 64)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select first(tagtype) val from st interval(30s)) a where a.val = 20")
|
||||||
|
tdSql.checkData(0, 0, 1)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select first(tagtype) val from st interval(30s)) a where a.val > 20")
|
||||||
|
tdSql.checkData(0, 0, 1)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select first(tagtype) val from st interval(30s)) a where a.val >= 20")
|
||||||
|
tdSql.checkData(0, 0, 2)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select first(tagtype) val from st interval(30s)) a where a.val < 20")
|
||||||
|
tdSql.checkData(0, 0, 63)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select first(tagtype) val from st interval(30s)) a where a.val <= 20")
|
||||||
|
tdSql.checkData(0, 0, 64)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select first(tagtype) val from st interval(30s)) a where a.val = 20")
|
||||||
|
tdSql.checkData(0, 0, 1)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select last(tagtype) val from st interval(30s)) a where a.val > 20")
|
||||||
|
tdSql.checkData(0, 0, 3)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select last(tagtype) val from st interval(30s)) a where a.val >= 20")
|
||||||
|
tdSql.checkData(0, 0, 5)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select last(tagtype) val from st interval(30s)) a where a.val < 20")
|
||||||
|
tdSql.checkData(0, 0, 60)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select last(tagtype) val from st interval(30s)) a where a.val <= 20")
|
||||||
|
tdSql.checkData(0, 0, 62)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from (select last(tagtype) val from st interval(30s)) a where a.val = 20")
|
||||||
|
tdSql.checkData(0, 0, 2)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
{
|
||||||
|
"filetype": "insert",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"thread_count": 4,
|
||||||
|
"thread_count_create_tbl": 4,
|
||||||
|
"result_file": "./insert_res.txt",
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"insert_interval": 0,
|
||||||
|
"interlace_rows": 100,
|
||||||
|
"num_of_records_per_req": 32766,
|
||||||
|
"databases": [{
|
||||||
|
"dbinfo": {
|
||||||
|
"name": "db",
|
||||||
|
"drop": "yes",
|
||||||
|
"replica": 1,
|
||||||
|
"days": 1,
|
||||||
|
"cache": 4,
|
||||||
|
"blocks": 3,
|
||||||
|
"precision": "ms",
|
||||||
|
"keep": 10,
|
||||||
|
"minRows": 1000,
|
||||||
|
"maxRows": 4096,
|
||||||
|
"comp":2,
|
||||||
|
"walLevel":1,
|
||||||
|
"cachelast":0,
|
||||||
|
"quorum":1,
|
||||||
|
"fsync":3000,
|
||||||
|
"update": 0
|
||||||
|
},
|
||||||
|
"super_tables": [{
|
||||||
|
"name": "stb",
|
||||||
|
"child_table_exists":"no",
|
||||||
|
"childtable_count": 1,
|
||||||
|
"childtable_prefix": "stb_",
|
||||||
|
"auto_create_table": "no",
|
||||||
|
"batch_create_tbl_num": 20,
|
||||||
|
"data_source": "rand",
|
||||||
|
"insert_mode": "taosc",
|
||||||
|
"insert_rows": 864000,
|
||||||
|
"childtable_limit": 1,
|
||||||
|
"childtable_offset":0,
|
||||||
|
"interlace_rows": 0,
|
||||||
|
"insert_interval":0,
|
||||||
|
"max_sql_len": 1024000,
|
||||||
|
"disorder_ratio": 0,
|
||||||
|
"disorder_range": 1000,
|
||||||
|
"timestamp_step": 1000,
|
||||||
|
"start_timestamp": "2020-10-11 00:00:00.000",
|
||||||
|
"sample_format": "csv",
|
||||||
|
"sample_file": "./tools/taosdemoAllTest/sample.csv",
|
||||||
|
"tags_file": "",
|
||||||
|
"columns": [{"type": "BINARY", "len": 5120, "count":1}],
|
||||||
|
"tags": [{"type": "TINYINT", "count":2}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}
|
|
@ -0,0 +1,60 @@
|
||||||
|
{
|
||||||
|
"filetype": "insert",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"thread_count": 4,
|
||||||
|
"thread_count_create_tbl": 4,
|
||||||
|
"result_file": "./insert_res.txt",
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"insert_interval": 0,
|
||||||
|
"interlace_rows": 100,
|
||||||
|
"num_of_records_per_req": 32766,
|
||||||
|
"databases": [{
|
||||||
|
"dbinfo": {
|
||||||
|
"name": "db",
|
||||||
|
"drop": "no",
|
||||||
|
"replica": 1,
|
||||||
|
"days": 1,
|
||||||
|
"cache": 4,
|
||||||
|
"blocks": 3,
|
||||||
|
"precision": "ms",
|
||||||
|
"keep": 10,
|
||||||
|
"minRows": 1000,
|
||||||
|
"maxRows": 4096,
|
||||||
|
"comp":2,
|
||||||
|
"walLevel":1,
|
||||||
|
"cachelast":0,
|
||||||
|
"quorum":1,
|
||||||
|
"fsync":3000,
|
||||||
|
"update": 0
|
||||||
|
},
|
||||||
|
"super_tables": [{
|
||||||
|
"name": "stb",
|
||||||
|
"child_table_exists":"yes",
|
||||||
|
"childtable_count": 1,
|
||||||
|
"childtable_prefix": "stb_",
|
||||||
|
"auto_create_table": "yes",
|
||||||
|
"batch_create_tbl_num": 20,
|
||||||
|
"data_source": "rand",
|
||||||
|
"insert_mode": "taosc",
|
||||||
|
"insert_rows": 432000,
|
||||||
|
"childtable_limit": 1,
|
||||||
|
"childtable_offset":0,
|
||||||
|
"interlace_rows": 0,
|
||||||
|
"insert_interval":0,
|
||||||
|
"max_sql_len": 1024000,
|
||||||
|
"disorder_ratio": 0,
|
||||||
|
"disorder_range": 1000,
|
||||||
|
"timestamp_step": 1000,
|
||||||
|
"start_timestamp": "2020-10-16 00:00:00.500",
|
||||||
|
"sample_format": "csv",
|
||||||
|
"sample_file": "./tools/taosdemoAllTest/sample.csv",
|
||||||
|
"tags_file": "",
|
||||||
|
"columns": [{"type": "BINARY", "len": 5120, "count":1}],
|
||||||
|
"tags": [{"type": "TINYINT", "count":1}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}
|
|
@ -0,0 +1,83 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
|
||||||
|
import os
|
||||||
|
from util.log import *
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class TDFindPath:
|
||||||
|
"""This class is for finding path within TDengine
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self.file = ""
|
||||||
|
|
||||||
|
|
||||||
|
def init(self, file):
|
||||||
|
"""[summary]
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file (str): the file location you want to start the query. Generally using __file__
|
||||||
|
"""
|
||||||
|
self.file = file
|
||||||
|
|
||||||
|
def getTaosdemoPath(self):
|
||||||
|
"""for finding the path of directory containing taosdemo
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: the path to directory containing taosdemo
|
||||||
|
"""
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(self.file))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root)-len("/build/bin")]
|
||||||
|
break
|
||||||
|
if (buildPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info(f"taosd found in {buildPath}")
|
||||||
|
return buildPath + "/build/bin/"
|
||||||
|
|
||||||
|
def getTDenginePath(self):
|
||||||
|
"""for finding the root path of TDengine
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: the root path of TDengine
|
||||||
|
"""
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(self.file))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
print(projPath)
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("sim" in dirs):
|
||||||
|
print(root)
|
||||||
|
rootRealPath = os.path.realpath(root)
|
||||||
|
if (rootRealPath == ""):
|
||||||
|
tdLog.exit("TDengine not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info(f"TDengine found in {rootRealPath}")
|
||||||
|
return rootRealPath
|
||||||
|
|
||||||
|
tdFindPath = TDFindPath()
|
|
@ -135,6 +135,7 @@ run general/parser/tags_dynamically_specifiy.sim
|
||||||
run general/parser/set_tag_vals.sim
|
run general/parser/set_tag_vals.sim
|
||||||
#unsupport run general/parser/repeatAlter.sim
|
#unsupport run general/parser/repeatAlter.sim
|
||||||
#unsupport run general/parser/slimit_alter_tags.sim
|
#unsupport run general/parser/slimit_alter_tags.sim
|
||||||
|
run general/parser/precision_ns.sim
|
||||||
run general/stable/disk.sim
|
run general/stable/disk.sim
|
||||||
run general/stable/dnode3.sim
|
run general/stable/dnode3.sim
|
||||||
run general/stable/metrics.sim
|
run general/stable/metrics.sim
|
||||||
|
|
|
@ -112,21 +112,21 @@ endi
|
||||||
sql alter database db keep 30
|
sql alter database db keep 30
|
||||||
sql show databases
|
sql show databases
|
||||||
print keep $data7_db
|
print keep $data7_db
|
||||||
if $data7_db != 20,20,30 then
|
if $data7_db != 30,30,30 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
sql alter database db keep 40
|
sql alter database db keep 40
|
||||||
sql show databases
|
sql show databases
|
||||||
print keep $data7_db
|
print keep $data7_db
|
||||||
if $data7_db != 20,20,40 then
|
if $data7_db != 40,40,40 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
sql alter database db keep 40
|
sql alter database db keep 40,50
|
||||||
sql alter database db keep 30
|
sql alter database db keep 30,31
|
||||||
sql alter database db keep 20
|
sql alter database db keep 20
|
||||||
sql_error alter database db keep 10
|
sql_error alter database db keep 10.0
|
||||||
sql_error alter database db keep 9
|
sql_error alter database db keep 9
|
||||||
sql_error alter database db keep 1
|
sql_error alter database db keep 1
|
||||||
sql_error alter database db keep 0
|
sql_error alter database db keep 0
|
||||||
|
@ -177,7 +177,7 @@ sql alter database db blocks 20
|
||||||
sql alter database db blocks 10
|
sql alter database db blocks 10
|
||||||
sql_error alter database db blocks 2
|
sql_error alter database db blocks 2
|
||||||
sql_error alter database db blocks 1
|
sql_error alter database db blocks 1
|
||||||
sql alter database db blocks 0
|
sql_error alter database db blocks 0
|
||||||
sql_error alter database db blocks -1
|
sql_error alter database db blocks -1
|
||||||
sql_error alter database db blocks 10001
|
sql_error alter database db blocks 10001
|
||||||
|
|
||||||
|
|
|
@ -367,7 +367,7 @@ sql_error topic db keep 30
|
||||||
sql alter database db keep 30
|
sql alter database db keep 30
|
||||||
sql show databases
|
sql show databases
|
||||||
print keep $data7_db
|
print keep $data7_db
|
||||||
if $data7_db != 20,20,30 then
|
if $data7_db != 30,30,30 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
@ -375,14 +375,14 @@ sql_error alter topic db keep 40
|
||||||
sql alter database db keep 40
|
sql alter database db keep 40
|
||||||
sql show databases
|
sql show databases
|
||||||
print keep $data7_db
|
print keep $data7_db
|
||||||
if $data7_db != 20,20,40 then
|
if $data7_db != 40,40,40 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
sql alter database db keep 40
|
sql alter database db keep 40
|
||||||
sql alter database db keep 30
|
sql alter database db keep 30
|
||||||
sql alter database db keep 20
|
sql alter database db keep 20
|
||||||
sql_error alter database db keep 10
|
sql_error alter database db keep 10.0
|
||||||
sql_error alter database db keep 9
|
sql_error alter database db keep 9
|
||||||
sql_error alter database db keep 1
|
sql_error alter database db keep 1
|
||||||
sql_error alter database db keep 0
|
sql_error alter database db keep 0
|
||||||
|
@ -455,7 +455,7 @@ sql alter database db blocks 20
|
||||||
sql alter database db blocks 10
|
sql alter database db blocks 10
|
||||||
sql_error alter database db blocks 2
|
sql_error alter database db blocks 2
|
||||||
sql_error alter database db blocks 1
|
sql_error alter database db blocks 1
|
||||||
sql alter database db blocks 0
|
sql_error alter database db blocks 0
|
||||||
sql_error alter database db blocks -1
|
sql_error alter database db blocks -1
|
||||||
sql_error alter database db blocks 10001
|
sql_error alter database db blocks 10001
|
||||||
|
|
||||||
|
|
|
@ -88,7 +88,7 @@ print =============== step2 - no db
|
||||||
#11
|
#11
|
||||||
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/sql
|
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/sql
|
||||||
print 11-> $system_content
|
print 11-> $system_content
|
||||||
if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[],"rows":0}@ then
|
if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep2","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep2",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[],"rows":0}@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
|
|
@ -20,8 +20,82 @@ $db = $dbPrefix . $i
|
||||||
$mt = $mtPrefix . $i
|
$mt = $mtPrefix . $i
|
||||||
|
|
||||||
sql drop database if exists $db
|
sql drop database if exists $db
|
||||||
sql create database $db
|
sql create database $db days 10 keep 20,20,20
|
||||||
sql use $db
|
sql use $db
|
||||||
|
|
||||||
|
sql_error alter database $db keep "20"
|
||||||
|
sql_error alter database $db keep "20","20","20"
|
||||||
|
sql_error alter database $db keep 20,19
|
||||||
|
sql_error alter database $db keep 20.0
|
||||||
|
sql_error alter database $db keep 20.0,20.0,20.0
|
||||||
|
sql_error alter database $db keep 0,0,0
|
||||||
|
sql_error alter database $db keep -1,-1,-1
|
||||||
|
sql_error alter database $db keep 9,20
|
||||||
|
sql_error alter database $db keep 9,9,9
|
||||||
|
sql_error alter database $db keep 20,20,19
|
||||||
|
sql_error alter database $db keep 20,19,20
|
||||||
|
sql_error alter database $db keep 20,19,19
|
||||||
|
sql_error alter database $db keep 20,19,18
|
||||||
|
sql_error alter database $db keep 20,20,20,20
|
||||||
|
sql_error alter database $db keep 365001,365001,365001
|
||||||
|
sql alter database $db keep 21
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 21,21,21 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter database $db keep 11,12
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 11,12,12 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter database $db keep 20,20,20
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 20,20,20 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter database $db keep 10,10,10
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 10,10,10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter database $db keep 10,10,11
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 10,10,11 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter database $db keep 11,12,13
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 11,12,13 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter database $db keep 365000,365000,365000
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 365000,365000,365000 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
##### alter table test, simeplest case
|
##### alter table test, simeplest case
|
||||||
sql create table tb (ts timestamp, c1 int, c2 int, c3 int)
|
sql create table tb (ts timestamp, c1 int, c2 int, c3 int)
|
||||||
sql insert into tb values (now, 1, 1, 1)
|
sql insert into tb values (now, 1, 1, 1)
|
||||||
|
|
|
@ -0,0 +1,316 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sleep 100
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
$dbPrefix = m_alt_db
|
||||||
|
$tbPrefix = m_alt_tb
|
||||||
|
$mtPrefix = m_alt_mt
|
||||||
|
$tbNum = 10
|
||||||
|
$rowNum = 5
|
||||||
|
$totalNum = $tbNum * $rowNum
|
||||||
|
$ts0 = 1537146000000
|
||||||
|
$delta = 600000
|
||||||
|
print ========== alter.sim
|
||||||
|
$i = 0
|
||||||
|
$db = $dbPrefix . $i
|
||||||
|
$mt = $mtPrefix . $i
|
||||||
|
|
||||||
|
sql drop database if exists $db
|
||||||
|
sql create database $db days 10 keep 20
|
||||||
|
sql use $db
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 20 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql_error alter database $db keep "20"
|
||||||
|
sql_error alter database $db keep "20","20","20"
|
||||||
|
sql_error alter database $db keep 0
|
||||||
|
sql_error alter database $db keep 20.0
|
||||||
|
sql_error alter database $db keep 20.0,20.0,20.0
|
||||||
|
sql_error alter database $db keep 0,0,0
|
||||||
|
sql_error alter database $db keep 3
|
||||||
|
sql_error alter database $db keep -1,-1,-1
|
||||||
|
sql_error alter database $db keep 20,20
|
||||||
|
sql_error alter database $db keep 9,9,9
|
||||||
|
sql_error alter database $db keep 20,20,19
|
||||||
|
sql_error alter database $db keep 20,19,20
|
||||||
|
sql_error alter database $db keep 20,19,19
|
||||||
|
sql_error alter database $db keep 20,19,18
|
||||||
|
sql_error alter database $db keep 20,20,20,20
|
||||||
|
sql_error alter database $db keep 365001,365001,365001
|
||||||
|
sql_error alter database $db keep 365001
|
||||||
|
sql alter database $db keep 20
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 20 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter database $db keep 10
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter database $db keep 11
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 11 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter database $db keep 13
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 13 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter database $db keep 365000
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 365000 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
##### alter table test, simeplest case
|
||||||
|
sql create table tb (ts timestamp, c1 int, c2 int, c3 int)
|
||||||
|
sql insert into tb values (now, 1, 1, 1)
|
||||||
|
sql select * from tb order by ts desc
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter table tb drop column c3
|
||||||
|
sql select * from tb order by ts desc
|
||||||
|
if $data01 != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data02 != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data03 != null then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter table tb add column c3 nchar(4)
|
||||||
|
sql select * from tb order by ts desc
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data03 != NULL then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql insert into tb values (now, 2, 2, 'taos')
|
||||||
|
sql select * from tb order by ts desc
|
||||||
|
if $rows != 2 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
print data03 = $data03
|
||||||
|
if $data03 != taos then
|
||||||
|
print expect taos, actual: $data03
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop table tb
|
||||||
|
|
||||||
|
##### alter metric test, simplest case
|
||||||
|
sql create table mt (ts timestamp, c1 int, c2 int, c3 int) tags (t1 int)
|
||||||
|
sql create table tb using mt tags(1)
|
||||||
|
sql insert into tb values (now, 1, 1, 1)
|
||||||
|
sql alter table mt drop column c3
|
||||||
|
sql select * from tb order by ts desc
|
||||||
|
if $data01 != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data02 != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data03 != null then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql alter table mt add column c3 nchar(4)
|
||||||
|
sql select * from tb order by ts desc
|
||||||
|
if $data03 != NULL then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql insert into tb values (now, 2, 2, 'taos')
|
||||||
|
sql select * from tb order by ts desc
|
||||||
|
if $rows != 2 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data03 != taos then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data13 != NULL then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop table tb
|
||||||
|
sql drop table mt
|
||||||
|
|
||||||
|
## [TBASE272]
|
||||||
|
sql create table tb (ts timestamp, c1 int, c2 int, c3 int)
|
||||||
|
sql insert into tb values (now, 1, 1, 1)
|
||||||
|
sql alter table tb drop column c3
|
||||||
|
sql alter table tb add column c3 nchar(5)
|
||||||
|
sql insert into tb values(now, 2, 2, 'taos')
|
||||||
|
sql drop table tb
|
||||||
|
sql create table mt (ts timestamp, c1 int, c2 int, c3 int) tags (t1 int)
|
||||||
|
sql create table tb using mt tags(1)
|
||||||
|
sql insert into tb values (now, 1, 1, 1)
|
||||||
|
sql alter table mt drop column c3
|
||||||
|
sql select * from tb order by ts desc
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop table tb
|
||||||
|
sql drop table mt
|
||||||
|
|
||||||
|
sleep 100
|
||||||
|
### ALTER TABLE WHILE STREAMING [TBASE271]
|
||||||
|
#sql create table tb1 (ts timestamp, c1 int, c2 nchar(5), c3 int)
|
||||||
|
#sql create table strm as select count(*), avg(c1), first(c2), sum(c3) from tb1 interval(2s)
|
||||||
|
#sql select * from strm
|
||||||
|
#if $rows != 0 then
|
||||||
|
# return -1
|
||||||
|
#endi
|
||||||
|
##sleep 12000
|
||||||
|
#sql insert into tb1 values (now, 1, 'taos', 1)
|
||||||
|
#sleep 20000
|
||||||
|
#sql select * from strm
|
||||||
|
#print rows = $rows
|
||||||
|
#if $rows != 1 then
|
||||||
|
# return -1
|
||||||
|
#endi
|
||||||
|
#if $data04 != 1 then
|
||||||
|
# return -1
|
||||||
|
#endi
|
||||||
|
#sql alter table tb1 drop column c3
|
||||||
|
#sleep 500
|
||||||
|
#sql insert into tb1 values (now, 2, 'taos')
|
||||||
|
#sleep 30000
|
||||||
|
#sql select * from strm
|
||||||
|
#if $rows != 2 then
|
||||||
|
# return -1
|
||||||
|
#endi
|
||||||
|
#if $data04 != 1 then
|
||||||
|
# return -1
|
||||||
|
#endi
|
||||||
|
#sql alter table tb1 add column c3 int
|
||||||
|
#sleep 500
|
||||||
|
#sql insert into tb1 values (now, 3, 'taos', 3);
|
||||||
|
#sleep 100
|
||||||
|
#sql select * from strm
|
||||||
|
#if $rows != 3 then
|
||||||
|
# return -1
|
||||||
|
#endi
|
||||||
|
#if $data04 != 1 then
|
||||||
|
# return -1
|
||||||
|
#endi
|
||||||
|
|
||||||
|
## ALTER TABLE AND INSERT BY COLUMNS
|
||||||
|
sql create table mt (ts timestamp, c1 int, c2 int) tags(t1 int)
|
||||||
|
sql create table tb using mt tags(0)
|
||||||
|
sql insert into tb values (now-1m, 1, 1)
|
||||||
|
sql alter table mt drop column c2
|
||||||
|
sql_error insert into tb (ts, c1, c2) values (now, 2, 2)
|
||||||
|
sql insert into tb (ts, c1) values (now, 2)
|
||||||
|
sql select * from tb order by ts desc
|
||||||
|
if $rows != 2 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data01 != 2 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data02 != null then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter table mt add column c2 int
|
||||||
|
sql insert into tb (ts, c2) values (now, 3)
|
||||||
|
sql select * from tb order by ts desc
|
||||||
|
if $data02 != 3 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
## ALTER TABLE AND IMPORT
|
||||||
|
sql drop database $db
|
||||||
|
sql create database $db
|
||||||
|
sql use $db
|
||||||
|
sql create table mt (ts timestamp, c1 int, c2 nchar(7), c3 int) tags (t1 int)
|
||||||
|
sql create table tb using mt tags(1)
|
||||||
|
sleep 100
|
||||||
|
sql insert into tb values ('2018-11-01 16:30:00.000', 1, 'insert', 1)
|
||||||
|
sql alter table mt drop column c3
|
||||||
|
|
||||||
|
sql insert into tb values ('2018-11-01 16:29:59.000', 1, 'insert')
|
||||||
|
sql import into tb values ('2018-11-01 16:29:59.000', 1, 'import')
|
||||||
|
sql select * from tb order by ts desc
|
||||||
|
if $data01 != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data02 != insert then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter table mt add column c3 nchar(4)
|
||||||
|
sql select * from tb order by ts desc
|
||||||
|
if $data03 != NULL then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
print ============================>TD-3366 TD-3486
|
||||||
|
sql insert into td_3366(ts, c3, c1) using mt(t1) tags(911) values('2018-1-1 11:11:11', 'new1', 12);
|
||||||
|
sql insert into td_3486(ts, c3, c1) using mt(t1) tags(-12) values('2018-1-1 11:11:11', 'new1', 12);
|
||||||
|
sql insert into ttxu(ts, c3, c1) using mt(t1) tags('-121') values('2018-1-1 11:11:11', 'new1', 12);
|
||||||
|
|
||||||
|
sql insert into tb(ts, c1, c3) using mt(t1) tags(123) values('2018-11-01 16:29:58.000', 2, 'port')
|
||||||
|
|
||||||
|
sql insert into tb values ('2018-11-01 16:29:58.000', 2, 'import', 3)
|
||||||
|
sql import into tb values ('2018-11-01 16:29:58.000', 2, 'import', 3)
|
||||||
|
sql import into tb values ('2018-11-01 16:39:58.000', 2, 'import', 3)
|
||||||
|
sql select * from tb order by ts desc
|
||||||
|
if $rows != 4 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data03 != 3 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
##### ILLEGAL OPERATIONS
|
||||||
|
|
||||||
|
# try dropping columns that are defined in metric
|
||||||
|
sql_error alter table tb drop column c1;
|
||||||
|
|
||||||
|
# try dropping primary key
|
||||||
|
sql_error alter table mt drop column ts;
|
||||||
|
|
||||||
|
# try modifying two columns in a single statement
|
||||||
|
sql_error alter table mt add column c5 nchar(3) c6 nchar(4)
|
||||||
|
|
||||||
|
# duplicate columns
|
||||||
|
sql_error alter table mt add column c1 int
|
||||||
|
|
||||||
|
# drop non-existing columns
|
||||||
|
sql_error alter table mt drop column c9
|
||||||
|
|
||||||
|
#sql drop database $db
|
||||||
|
#sql show databases
|
||||||
|
#if $rows != 0 then
|
||||||
|
# return -1
|
||||||
|
#endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -102,7 +102,7 @@ print create_db.sim case5: db_meta_data test
|
||||||
# cfg params
|
# cfg params
|
||||||
$replica = 1 # max=3
|
$replica = 1 # max=3
|
||||||
$days = 10
|
$days = 10
|
||||||
$keep = 365
|
$keep = 365,365,365
|
||||||
$rows_db = 1000
|
$rows_db = 1000
|
||||||
$cache = 16 # 16MB
|
$cache = 16 # 16MB
|
||||||
$ablocks = 100
|
$ablocks = 100
|
||||||
|
@ -150,6 +150,73 @@ sql_error create database $db day 3651
|
||||||
|
|
||||||
# keep [1, infinity]
|
# keep [1, infinity]
|
||||||
sql_error create database $db keep 0
|
sql_error create database $db keep 0
|
||||||
|
sql_error create database $db keep 0,0,0
|
||||||
|
sql_error create database $db keep 3,3,3
|
||||||
|
sql_error create database $db keep 11.0
|
||||||
|
sql_error create database $db keep 11.0,11.0,11.0
|
||||||
|
sql_error create database $db keep "11","11","11"
|
||||||
|
sql_error create database $db keep "11"
|
||||||
|
sql_error create database $db keep 13,12,11
|
||||||
|
sql_error create database $db keep 11,12,11
|
||||||
|
sql_error create database $db keep 12,11,12
|
||||||
|
sql_error create database $db keep 8
|
||||||
|
sql_error create database $db keep 12,11
|
||||||
|
sql_error create database $db keep 365001,365001,365001
|
||||||
|
sql create database dbk0 keep 19
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 19,19,19 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop database dbk0
|
||||||
|
sql create database dbka keep 19,20
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 19,20,20 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop database dbka
|
||||||
|
|
||||||
|
sql create database dbk1 keep 11,11,11
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 11,11,11 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop database dbk1
|
||||||
|
sql create database dbk2 keep 11,12,13
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 11,12,13 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop database dbk2
|
||||||
|
sql create database dbk3 keep 11,11,13
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 11,11,13 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop database dbk3
|
||||||
|
sql create database dbk4 keep 11,13,13
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 11,13,13 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop database dbk4
|
||||||
#sql_error create database $db keep 3651
|
#sql_error create database $db keep 3651
|
||||||
|
|
||||||
# rows [200, 10000]
|
# rows [200, 10000]
|
||||||
|
|
|
@ -0,0 +1,238 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
|
sleep 100
|
||||||
|
sql connect
|
||||||
|
print ======================== dnode1 start
|
||||||
|
|
||||||
|
$dbPrefix = fi_in_db
|
||||||
|
$tbPrefix = fi_in_tb
|
||||||
|
$mtPrefix = fi_in_mt
|
||||||
|
$tbNum = 10
|
||||||
|
$rowNum = 20
|
||||||
|
$totalNum = 200
|
||||||
|
|
||||||
|
print excuting test script create_db.sim
|
||||||
|
print =============== set up
|
||||||
|
$i = 0
|
||||||
|
$db = $dbPrefix . $i
|
||||||
|
$mt = $mtPrefix . $i
|
||||||
|
|
||||||
|
sql_error createdatabase $db
|
||||||
|
sql create database $db
|
||||||
|
sql use $db
|
||||||
|
sql show databases
|
||||||
|
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data00 != $db then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop database $db
|
||||||
|
|
||||||
|
# case1: case_insensitivity test
|
||||||
|
print =========== create_db.sim case1: case insensitivity test
|
||||||
|
sql_error CREATEDATABASE $db
|
||||||
|
sql CREATE DATABASE $db
|
||||||
|
sql use $db
|
||||||
|
sql show databases
|
||||||
|
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data00 != $db then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop database $db
|
||||||
|
print case_insensitivity test passed
|
||||||
|
|
||||||
|
# case2: illegal_db_name test
|
||||||
|
print =========== create_db.sim case2: illegal_db_name test
|
||||||
|
$illegal_db1 = 1db
|
||||||
|
$illegal_db2 = d@b
|
||||||
|
|
||||||
|
sql_error create database $illegal_db1
|
||||||
|
sql_error create database $illegal_db2
|
||||||
|
print illegal_db_name test passed
|
||||||
|
|
||||||
|
# case3: chinese_char_in_db_name test
|
||||||
|
print ========== create_db.sim case3: chinese_char_in_db_name test
|
||||||
|
$CN_db1 = 数据库
|
||||||
|
$CN_db2 = 数据库1
|
||||||
|
$CN_db3 = db数据库1
|
||||||
|
sql_error create database $CN_db1
|
||||||
|
sql_error create database $CN_db2
|
||||||
|
sql_error create database $CN_db3
|
||||||
|
#sql show databases
|
||||||
|
#if $rows != 3 then
|
||||||
|
# return -1
|
||||||
|
#endi
|
||||||
|
#if $data00 != $CN_db1 then
|
||||||
|
# return -1
|
||||||
|
#endi
|
||||||
|
#if $data10 != $CN_db2 then
|
||||||
|
# return -1
|
||||||
|
#endi
|
||||||
|
#if $data20 != $CN_db3 then
|
||||||
|
# return -1
|
||||||
|
#endi
|
||||||
|
#sql drop database $CN_db1
|
||||||
|
#sql drop database $CN_db2
|
||||||
|
#sql drop database $CN_db3
|
||||||
|
print case_chinese_char_in_db_name test passed
|
||||||
|
|
||||||
|
# case4: db_already_exists
|
||||||
|
print create_db.sim case4: db_already_exists
|
||||||
|
sql create database db0
|
||||||
|
sql create database db0
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop database db0
|
||||||
|
print db_already_exists test passed
|
||||||
|
|
||||||
|
# case5: db_meta_data
|
||||||
|
print create_db.sim case5: db_meta_data test
|
||||||
|
# cfg params
|
||||||
|
$replica = 1 # max=3
|
||||||
|
$days = 10
|
||||||
|
$keep = 365
|
||||||
|
$rows_db = 1000
|
||||||
|
$cache = 16 # 16MB
|
||||||
|
$ablocks = 100
|
||||||
|
$tblocks = 32 # max=512, automatically trimmed when exceeding
|
||||||
|
$ctime = 36000 # 10 hours
|
||||||
|
$wal = 1 # valid value is 1, 2
|
||||||
|
$comp = 1 # max=32, automatically trimmed when exceeding
|
||||||
|
|
||||||
|
sql create database $db replica $replica days $days keep $keep maxrows $rows_db cache $cache blocks 4 ctime $ctime wal $wal comp $comp
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data00 != $db then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data04 != $replica then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data06 != $days then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 365 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
print data08 = $data07
|
||||||
|
if $data08 != $cache then
|
||||||
|
print expect $cache, actual:$data08
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data09 != 4 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql drop database $db
|
||||||
|
|
||||||
|
## param range tests
|
||||||
|
# replica [1,3]
|
||||||
|
#sql_error create database $db replica 0
|
||||||
|
sql_error create database $db replica 4
|
||||||
|
|
||||||
|
# day [1, 3650]
|
||||||
|
sql_error create database $db day 0
|
||||||
|
sql_error create database $db day 3651
|
||||||
|
|
||||||
|
# keep [1, infinity]
|
||||||
|
sql_error create database $db keep 0
|
||||||
|
sql_error create database $db keep 0,0,0
|
||||||
|
sql_error create database $db keep 3,3,3
|
||||||
|
sql_error create database $db keep 3
|
||||||
|
sql_error create database $db keep 11.0
|
||||||
|
sql_error create database $db keep 11.0,11.0,11.0
|
||||||
|
sql_error create database $db keep "11","11","11"
|
||||||
|
sql_error create database $db keep "11"
|
||||||
|
sql_error create database $db keep 13,12,11
|
||||||
|
sql_error create database $db keep 11,12,11
|
||||||
|
sql_error create database $db keep 12,11,12
|
||||||
|
sql_error create database $db keep 11,12,13
|
||||||
|
sql_error create database $db keep 11,12,13,14
|
||||||
|
sql_error create database $db keep 11,11
|
||||||
|
sql_error create database $db keep 365001,365001,365001
|
||||||
|
sql_error create database $db keep 365001
|
||||||
|
sql create database dbk1 keep 11
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 11 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop database dbk1
|
||||||
|
sql create database dbk2 keep 12
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 12 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop database dbk2
|
||||||
|
sql create database dbk3 keep 11
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 11 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop database dbk3
|
||||||
|
sql create database dbk4 keep 13
|
||||||
|
sql show databases
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data07 != 13 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql drop database dbk4
|
||||||
|
#sql_error create database $db keep 3651
|
||||||
|
|
||||||
|
# rows [200, 10000]
|
||||||
|
sql_error create database $db maxrows 199
|
||||||
|
#sql_error create database $db maxrows 10001
|
||||||
|
|
||||||
|
# cache [100, 10485760]
|
||||||
|
sql_error create database $db cache 0
|
||||||
|
#sql_error create database $db cache 10485761
|
||||||
|
|
||||||
|
|
||||||
|
# blocks [32, 4096 overwriten by 4096 if exceeds, Note added:2018-10-24]
|
||||||
|
#sql_error create database $db tblocks 31
|
||||||
|
#sql_error create database $db tblocks 4097
|
||||||
|
|
||||||
|
# ctime [30, 40960]
|
||||||
|
sql_error create database $db ctime 29
|
||||||
|
sql_error create database $db ctime 40961
|
||||||
|
|
||||||
|
# wal {0, 2}
|
||||||
|
#sql_error create database $db wal 0
|
||||||
|
sql_error create database $db wal -1
|
||||||
|
sql_error create database $db wal 3
|
||||||
|
|
||||||
|
# comp {0, 1, 2}
|
||||||
|
sql_error create database $db comp -1
|
||||||
|
sql_error create database $db comp 3
|
||||||
|
|
||||||
|
sql_error drop database $db
|
||||||
|
sql show databases
|
||||||
|
if $rows != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -932,3 +932,125 @@ if $data32 != 0.000144445 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
print ===========================> derivative
|
||||||
|
sql drop table t1
|
||||||
|
sql drop table tx;
|
||||||
|
sql drop table m1;
|
||||||
|
sql drop table if exists tm0;
|
||||||
|
sql drop table if exists tm1;
|
||||||
|
sql create table tm0(ts timestamp, k double)
|
||||||
|
sql insert into tm0 values('2015-08-18T00:00:00Z', 2.064) ('2015-08-18T00:06:00Z', 2.116) ('2015-08-18T00:12:00Z', 2.028)
|
||||||
|
sql insert into tm0 values('2015-08-18T00:18:00Z', 2.126) ('2015-08-18T00:24:00Z', 2.041) ('2015-08-18T00:30:00Z', 2.051)
|
||||||
|
|
||||||
|
sql_error select derivative(ts) from tm0;
|
||||||
|
sql_error select derivative(k) from tm0;
|
||||||
|
sql_error select derivative(k, 0, 0) from tm0;
|
||||||
|
sql_error select derivative(k, 1, 911) from tm0;
|
||||||
|
sql_error select derivative(kx, 1s, 1) from tm0;
|
||||||
|
sql_error select derivative(k, -20s, 1) from tm0;
|
||||||
|
sql_error select derivative(k, 20a, 0) from tm0;
|
||||||
|
sql_error select derivative(k, 200a, 0) from tm0;
|
||||||
|
sql_error select derivative(k, 999a, 0) from tm0;
|
||||||
|
sql_error select derivative(k, 20s, -12) from tm0;
|
||||||
|
|
||||||
|
sql select derivative(k, 1s, 0) from tm0
|
||||||
|
if $rows != 5 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data00 != @15-08-18 08:06:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 0.000144444 then
|
||||||
|
print expect 0.000144444, actual: $data01
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data10 != @15-08-18 08:12:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data11 != -0.000244444 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data20 != @15-08-18 08:18:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data21 != 0.000272222 then
|
||||||
|
print expect 0.000272222, actual: $data21
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data30 != @15-08-18 08:24:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data31 != -0.000236111 then
|
||||||
|
print expect 0.000236111, actual: $data31
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select derivative(k, 6m, 0) from tm0;
|
||||||
|
if $rows != 5 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data00 != @15-08-18 08:06:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 0.052000000 then
|
||||||
|
print expect 0.052000000, actual: $data01
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data10 != @15-08-18 08:12:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data11 != -0.088000000 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data20 != @15-08-18 08:18:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data21 != 0.098000000 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data30 != @15-08-18 08:24:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data31 != -0.085000000 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select derivative(k, 12m, 0) from tm0;
|
||||||
|
if $rows != 5 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data00 != @15-08-18 08:06:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 0.104000000 then
|
||||||
|
print expect 0.104000000, actual: $data01
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select derivative(k, 6m, 1) from tm0;
|
||||||
|
if $rows != 3 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql_error select derivative(k, 6m, 1) from tm0 interval(1s);
|
||||||
|
sql_error select derivative(k, 6m, 1) from tm0 session(ts, 1s);
|
||||||
|
sql_error select derivative(k, 6m, 1) from tm0 group by k;
|
||||||
|
sql_error select derivative(k, 6m, 1) from
|
|
@ -147,8 +147,57 @@ if $data02 != @nest_tb0@ then
|
||||||
endi
|
endi
|
||||||
|
|
||||||
print ===================> nest query interval
|
print ===================> nest query interval
|
||||||
|
sql_error select ts, avg(c1) from (select ts, c1 from nest_tb0);
|
||||||
|
|
||||||
|
sql select avg(c1) from (select * from nest_tb0) interval(3d)
|
||||||
|
if $rows != 3 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data00 != @20-09-14 00:00:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 49.222222222 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data10 != @20-09-17 00:00:00.000@ then
|
||||||
|
print expect 20-09-17 00:00:00.000, actual: $data10
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data11 != 49.685185185 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data20 != @20-09-20 00:00:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data21 != 49.500000000 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
#define TSDB_FUNC_APERCT 7
|
||||||
|
#define TSDB_FUNC_LAST_ROW 10
|
||||||
|
#define TSDB_FUNC_TWA 14
|
||||||
|
#define TSDB_FUNC_LEASTSQR 15
|
||||||
|
#define TSDB_FUNC_ARITHM 23
|
||||||
|
#define TSDB_FUNC_DIFF 24
|
||||||
|
#define TSDB_FUNC_INTERP 28
|
||||||
|
#define TSDB_FUNC_RATE 29
|
||||||
|
#define TSDB_FUNC_IRATE 30
|
||||||
|
#define TSDB_FUNC_DERIVATIVE 32
|
||||||
|
|
||||||
|
sql_error select stddev(c1) from (select c1 from nest_tb0);
|
||||||
|
sql_error select percentile(c1, 20) from (select * from nest_tb0);
|
||||||
|
|
||||||
|
sql select avg(c1),sum(c2), max(c3), min(c4), count(*), first(c7), last(c7),spread(c6) from (select * from nest_tb0) interval(1d);
|
||||||
|
|
||||||
|
sql select top(x, 20) from (select c1 x from nest_tb0);
|
||||||
|
|
||||||
|
sql select bottom(x, 20) from (select c1 x from nest_tb0)
|
||||||
|
|
||||||
print ===================> complex query
|
print ===================> complex query
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,113 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sleep 1000
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
$dbPrefix = m_di_db_ns
|
||||||
|
$tbPrefix = m_di_tb
|
||||||
|
$mtPrefix = m_di_mt
|
||||||
|
$ntPrefix = m_di_nt
|
||||||
|
$tbNum = 2
|
||||||
|
$rowNum = 200
|
||||||
|
$futureTs = 300000000000
|
||||||
|
|
||||||
|
print =============== step1: create database and tables and insert data
|
||||||
|
$i = 0
|
||||||
|
$db = $dbPrefix . $i
|
||||||
|
$mt = $mtPrefix . $i
|
||||||
|
$nt = $ntPrefix . $i
|
||||||
|
|
||||||
|
sql drop database $db -x step1
|
||||||
|
step1:
|
||||||
|
sql create database $db precision 'ns'
|
||||||
|
sql use $db
|
||||||
|
sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int)
|
||||||
|
|
||||||
|
$i = 0
|
||||||
|
while $i < $tbNum
|
||||||
|
$tb = $tbPrefix . $i
|
||||||
|
sql create table $tb using $mt tags( $i )
|
||||||
|
|
||||||
|
$x = 0
|
||||||
|
while $x < $rowNum
|
||||||
|
$cc = $futureTs + $x * 100 + 43
|
||||||
|
$ns = $cc . b
|
||||||
|
sql insert into $tb values (now + $ns , $x )
|
||||||
|
$x = $x + 1
|
||||||
|
endw
|
||||||
|
|
||||||
|
$i = $i + 1
|
||||||
|
endw
|
||||||
|
|
||||||
|
sql create table $nt (ts timestamp, tbcol int)
|
||||||
|
$x = 0
|
||||||
|
while $x < $rowNum
|
||||||
|
$cc = $futureTs + $x * 100 + 43
|
||||||
|
$ns = $cc . b
|
||||||
|
sql insert into $nt values (now + $ns , $x )
|
||||||
|
$x = $x + 1
|
||||||
|
endw
|
||||||
|
|
||||||
|
sleep 100
|
||||||
|
|
||||||
|
print =============== step2: select count(*) from tables
|
||||||
|
$i = 0
|
||||||
|
$tb = $tbPrefix . $i
|
||||||
|
|
||||||
|
sql select count(*) from $tb
|
||||||
|
|
||||||
|
if $data00 != $rowNum then
|
||||||
|
print expect $rowNum, actual:$data00
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
$i = 0
|
||||||
|
$mt = $mtPrefix . $i
|
||||||
|
sql select count(*) from $mt
|
||||||
|
|
||||||
|
$mtRowNum = $tbNum * $rowNum
|
||||||
|
if $data00 != $mtRowNum then
|
||||||
|
print expect $mtRowNum, actual:$data00
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
$i = 0
|
||||||
|
$nt = $ntPrefix . $i
|
||||||
|
|
||||||
|
sql select count(*) from $nt
|
||||||
|
|
||||||
|
if $data00 != $rowNum then
|
||||||
|
print expect $rowNum, actual:$data00
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
print =============== step3: check nano second timestamp
|
||||||
|
$i = 0
|
||||||
|
$mt = $mtPrefix . $i
|
||||||
|
$tb = $tbPrefix . $i
|
||||||
|
sql insert into $tb values (now-43b , $x )
|
||||||
|
sql select count(*) from $tb where ts<now
|
||||||
|
if $data00 != 1 then
|
||||||
|
print expected 1, actual: $data00
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
print =============== step4: check interval/sliding nano second
|
||||||
|
$i = 0
|
||||||
|
$mt = $mtPrefix . $i
|
||||||
|
sql_error select count(*) from $mt interval(1000b) sliding(100b)
|
||||||
|
sql_error select count(*) from $mt interval(10000000b) sliding(99999b)
|
||||||
|
|
||||||
|
sql select count(*) from $mt interval(100000000b) sliding(100000000b)
|
||||||
|
|
||||||
|
print =============== clear
|
||||||
|
sql drop database $db
|
||||||
|
sql show databases
|
||||||
|
if $rows != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -62,4 +62,5 @@ run general/parser/binary_escapeCharacter.sim
|
||||||
run general/parser/between_and.sim
|
run general/parser/between_and.sim
|
||||||
run general/parser/last_cache.sim
|
run general/parser/last_cache.sim
|
||||||
run general/parser/nestquery.sim
|
run general/parser/nestquery.sim
|
||||||
|
run general/parser/precision_ns.sim
|
||||||
|
|
||||||
|
|
|
@ -175,7 +175,7 @@ if $data21 != 2.10000 then
|
||||||
endi
|
endi
|
||||||
|
|
||||||
print =====================td-1302 case
|
print =====================td-1302 case
|
||||||
sql create database t1 keep 36500;
|
sql create database t1 keep 36500
|
||||||
sql use t1;
|
sql use t1;
|
||||||
sql create table test(ts timestamp, k int);
|
sql create table test(ts timestamp, k int);
|
||||||
sql insert into test values(29999, 1)(70000, 2)(80000, 3)
|
sql insert into test values(29999, 1)(70000, 2)(80000, 3)
|
||||||
|
@ -194,7 +194,7 @@ if $rows != 3 then
|
||||||
endi
|
endi
|
||||||
|
|
||||||
print ==============>td-1308
|
print ==============>td-1308
|
||||||
sql create database db keep 36500;
|
sql create database db keep 36500
|
||||||
sql use db;
|
sql use db;
|
||||||
|
|
||||||
sql create table stb (ts timestamp, c1 int, c2 binary(10)) tags(t1 binary(10));
|
sql create table stb (ts timestamp, c1 int, c2 binary(10)) tags(t1 binary(10));
|
||||||
|
|
|
@ -424,6 +424,10 @@ cd ../../../debug; make
|
||||||
./test.sh -f general/parser/stableOp.sim
|
./test.sh -f general/parser/stableOp.sim
|
||||||
./test.sh -f general/parser/timestamp.sim
|
./test.sh -f general/parser/timestamp.sim
|
||||||
./test.sh -f general/parser/sliding.sim
|
./test.sh -f general/parser/sliding.sim
|
||||||
|
./test.sh -f general/parser/having.sim
|
||||||
|
./test.sh -f general/parser/having_child.sim
|
||||||
|
./test.sh -f general/parser/between_and.sim
|
||||||
|
./test.sh -f general/parser/last_cache.sim
|
||||||
./test.sh -f unique/big/balance.sim
|
./test.sh -f unique/big/balance.sim
|
||||||
|
|
||||||
#======================b7-end===============
|
#======================b7-end===============
|
||||||
|
|
|
@ -134,6 +134,7 @@ run general/parser/bug.sim
|
||||||
run general/parser/tags_dynamically_specifiy.sim
|
run general/parser/tags_dynamically_specifiy.sim
|
||||||
run general/parser/set_tag_vals.sim
|
run general/parser/set_tag_vals.sim
|
||||||
run general/parser/repeatAlter.sim
|
run general/parser/repeatAlter.sim
|
||||||
|
run general/parser/precision_ns.sim
|
||||||
##unsupport run general/parser/slimit_alter_tags.sim
|
##unsupport run general/parser/slimit_alter_tags.sim
|
||||||
run general/stable/disk.sim
|
run general/stable/disk.sim
|
||||||
run general/stable/dnode3.sim
|
run general/stable/dnode3.sim
|
||||||
|
|
|
@ -813,8 +813,15 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
|
||||||
value[length[i]] = 0;
|
value[length[i]] = 0;
|
||||||
// snprintf(value, fields[i].bytes, "%s", (char *)row[i]);
|
// snprintf(value, fields[i].bytes, "%s", (char *)row[i]);
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
case TSDB_DATA_TYPE_TIMESTAMP: {
|
||||||
tt = *(int64_t *)row[i] / 1000;
|
int32_t precision = taos_result_precision(pSql);
|
||||||
|
if (precision == TSDB_TIME_PRECISION_MILLI) {
|
||||||
|
tt = (*(int64_t *)row[i]) / 1000;
|
||||||
|
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
|
tt = (*(int64_t *)row[i]) / 1000000;
|
||||||
|
} else {
|
||||||
|
tt = (*(int64_t *)row[i]) / 1000000000;
|
||||||
|
}
|
||||||
/* comment out as it make testcases like select_with_tags.sim fail.
|
/* comment out as it make testcases like select_with_tags.sim fail.
|
||||||
but in windows, this may cause the call to localtime crash if tt < 0,
|
but in windows, this may cause the call to localtime crash if tt < 0,
|
||||||
need to find a better solution.
|
need to find a better solution.
|
||||||
|
@ -829,9 +836,16 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
|
||||||
|
|
||||||
tp = localtime(&tt);
|
tp = localtime(&tt);
|
||||||
strftime(timeStr, 64, "%y-%m-%d %H:%M:%S", tp);
|
strftime(timeStr, 64, "%y-%m-%d %H:%M:%S", tp);
|
||||||
|
if (precision == TSDB_TIME_PRECISION_MILLI) {
|
||||||
sprintf(value, "%s.%03d", timeStr, (int32_t)(*((int64_t *)row[i]) % 1000));
|
sprintf(value, "%s.%03d", timeStr, (int32_t)(*((int64_t *)row[i]) % 1000));
|
||||||
|
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
|
sprintf(value, "%s.%06d", timeStr, (int32_t)(*((int64_t *)row[i]) % 1000000));
|
||||||
|
} else {
|
||||||
|
sprintf(value, "%s.%09d", timeStr, (int32_t)(*((int64_t *)row[i]) % 1000000000));
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
} // end of switch
|
} // end of switch
|
||||||
|
|
Loading…
Reference in New Issue