other: merge 3.0

This commit is contained in:
Haojun Liao 2022-07-23 00:02:53 +08:00
commit 4a7a85ab07
111 changed files with 1565 additions and 1064 deletions

View File

@ -91,7 +91,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
You can build Java connector from source code after cloning the TDengine project:
```
git clone https://github.com/taosdata/taos-connector-jdbc.git
git clone https://github.com/taosdata/taos-connector-jdbc.git --branch 2.0
cd taos-connector-jdbc
mvn clean install -Dmaven.test.skip=true
```
@ -140,34 +140,34 @@ When you use a JDBC native connection to connect to a TDengine cluster, you can
1. Do not specify hostname and port in Java applications.
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}
```
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}
```
2. specify the firstEp and the secondEp in the configuration file taos.cfg
```shell
# first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030
```shell
# first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030
# second fully qualified domain name (FQDN) for TDengine system, for cluster only
secondEp cluster_node2:6030
# second fully qualified domain name (FQDN) for TDengine system, for cluster only
secondEp cluster_node2:6030
# default system charset
# charset UTF-8
# default system charset
# charset UTF-8
# system locale
# locale en_US.UTF-8
```
# system locale
# locale en_US.UTF-8
```
In the above example, JDBC uses the client's configuration file to establish a connection to a hostname `cluster_node1`, port 6030, and a database named `test`. When the firstEp node in the cluster fails, JDBC attempts to connect to the cluster using secondEp.

View File

@ -93,7 +93,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector
```shell
git clone https://github.com/taosdata/taos-connector-jdbc.git
git clone https://github.com/taosdata/taos-connector-jdbc.git --branch 2.0
cd taos-connector-jdbc
mvn clean install -Dmaven.test.skip=true
```

View File

@ -34,11 +34,10 @@ typedef enum {
TSDB_SUPER_TABLE = 1, // super table
TSDB_CHILD_TABLE = 2, // table created from super table
TSDB_NORMAL_TABLE = 3, // ordinary table
TSDB_STREAM_TABLE = 4, // table created from stream computing
TSDB_TEMP_TABLE = 5, // temp table created by nest query
TSDB_SYSTEM_TABLE = 6,
TSDB_TSMA_TABLE = 7, // time-range-wise sma
TSDB_TABLE_MAX = 8
TSDB_TEMP_TABLE = 4, // temp table created by nest query
TSDB_SYSTEM_TABLE = 5,
TSDB_TSMA_TABLE = 6, // time-range-wise sma
TSDB_TABLE_MAX = 7
} ETableType;
typedef enum {

View File

@ -2537,6 +2537,15 @@ static FORCE_INLINE void* tDecodeSMqRebVgReq(const void* buf, SMqRebVgReq* pReq)
return (void*)buf;
}
typedef struct {
char topic[TSDB_TOPIC_FNAME_LEN];
int64_t ntbUid;
SArray* colIdList; // SArray<int16_t>
} SCheckAlterInfo;
int32_t tEncodeSCheckAlterInfo(SEncoder* pEncoder, const SCheckAlterInfo* pInfo);
int32_t tDecodeSCheckAlterInfo(SDecoder* pDecoder, SCheckAlterInfo* pInfo);
typedef struct {
int32_t vgId;
int64_t offset;

View File

@ -186,6 +186,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_CHANGE, "vnode-mq-vg-change", SMqRebVgReq, SMqRebVgRsp)
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_DELETE, "vnode-mq-vg-delete", SMqVDeleteReq, SMqVDeleteRsp)
TD_DEF_MSG_TYPE(TDMT_VND_MQ_COMMIT_OFFSET, "vnode-commit-offset", STqOffset, STqOffset)
TD_DEF_MSG_TYPE(TDMT_VND_CHECK_ALTER_INFO, "vnode-alter-check-info", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TOPIC, "vnode-create-topic", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_TOPIC, "vnode-alter-topic", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TOPIC, "vnode-drop-topic", NULL, NULL)

View File

@ -64,7 +64,8 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers);
* @param SReadHandle
* @return
*/
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols, SSchemaWrapper** pSchemaWrapper);
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols,
SSchemaWrapper** pSchemaWrapper);
/**
* Set the input data block for the stream scan.

View File

@ -111,6 +111,7 @@ typedef struct SAggLogicNode {
SNodeList* pGroupKeys;
SNodeList* pAggFuncs;
bool hasLastRow;
bool hasTimeLineFunc;
} SAggLogicNode;
typedef struct SProjectLogicNode {

View File

@ -332,6 +332,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_VND_INVALID_TABLE_ACTION TAOS_DEF_ERROR_CODE(0, 0x0519)
#define TSDB_CODE_VND_COL_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051a)
#define TSDB_CODE_VND_TABLE_COL_NOT_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051b)
#define TSDB_CODE_VND_COL_SUBSCRIBED TAOS_DEF_ERROR_CODE(0, 0x051c)
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)

View File

@ -51,6 +51,6 @@ target_link_libraries(
PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom
)
if(${BUILD_TEST})
#if(${BUILD_TEST})
ADD_SUBDIRECTORY(test)
endif(${BUILD_TEST})
#endif(${BUILD_TEST})

View File

@ -8,7 +8,7 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
ADD_EXECUTABLE(clientTest clientTests.cpp)
TARGET_LINK_LIBRARIES(
clientTest
PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom
PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom executor
)
ADD_EXECUTABLE(tmqTest tmqTest.cpp)

View File

@ -27,6 +27,7 @@
#pragma GCC diagnostic ignored "-Wsign-compare"
#include "taos.h"
#include "executor.h"
namespace {
void showDB(TAOS* pConn) {
@ -823,6 +824,17 @@ TEST(testCase, async_api_test) {
TEST(testCase, update_test) {
SInterval interval = {0};
interval.offset = 8000;
interval.interval = 10000;
interval.sliding = 4000;
interval.intervalUnit = 's';
interval.offsetUnit = 's';
interval.slidingUnit = 's';
// STimeWindow w = getAlignQueryTimeWindow(&interval, 0, 1630000000000);
STimeWindow w = getAlignQueryTimeWindow(&interval, 0, 1629999999999);
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);

View File

@ -183,7 +183,7 @@ bool tsStartUdfd = true;
int32_t tsTransPullupInterval = 2;
int32_t tsMqRebalanceInterval = 2;
int32_t tsTtlUnit = 86400;
int32_t tsTtlPushInterval = 60;
int32_t tsTtlPushInterval = 86400;
int32_t tsGrantHBInterval = 60;
void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary) {
@ -226,8 +226,17 @@ static int32_t taosSetTfsCfg(SConfig *pCfg) {
}
if (tsDataDir[0] == 0) {
uError("datadir not set");
return -1;
if (pItem->str != NULL) {
taosAddDataDir(0, pItem->str, 0, 1);
tstrncpy(tsDataDir, pItem->str, PATH_MAX);
if (taosMulMkDir(tsDataDir) != 0) {
uError("failed to create dataDir:%s since %s", tsDataDir, terrstr());
return -1;
}
} else {
uError("datadir not set");
return -1;
}
}
return 0;
@ -466,7 +475,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 100000, 1) != 0) return -1;
if (cfgAddBool(pCfg, "udf", tsStartUdfd, 0) != 0) return -1;
return 0;

View File

@ -5623,6 +5623,33 @@ int32_t tDecodeSTqOffset(SDecoder *pDecoder, STqOffset *pOffset) {
return 0;
}
int32_t tEncodeSCheckAlterInfo(SEncoder *pEncoder, const SCheckAlterInfo *pInfo) {
if (tEncodeCStr(pEncoder, pInfo->topic) < 0) return -1;
if (tEncodeI64(pEncoder, pInfo->ntbUid) < 0) return -1;
int32_t sz = taosArrayGetSize(pInfo->colIdList);
if (tEncodeI32(pEncoder, sz) < 0) return -1;
for (int32_t i = 0; i < sz; i++) {
int16_t colId = *(int16_t *)taosArrayGet(pInfo->colIdList, i);
if (tEncodeI16(pEncoder, colId) < 0) return -1;
}
return pEncoder->pos;
}
int32_t tDecodeSCheckAlterInfo(SDecoder *pDecoder, SCheckAlterInfo *pInfo) {
if (tDecodeCStrTo(pDecoder, pInfo->topic) < 0) return -1;
if (tDecodeI64(pDecoder, &pInfo->ntbUid) < 0) return -1;
int32_t sz;
if (tDecodeI32(pDecoder, &sz) < 0) return -1;
pInfo->colIdList = taosArrayInit(sz, sizeof(int16_t));
if (pInfo->colIdList == NULL) return -1;
for (int32_t i = 0; i < sz; i++) {
int16_t colId;
if (tDecodeI16(pDecoder, &colId) < 0) return -1;
taosArrayPush(pInfo->colIdList, &colId);
}
return 0;
}
int32_t tEncodeDeleteRes(SEncoder *pCoder, const SDeleteRes *pRes) {
int32_t nUid = taosArrayGetSize(pRes->uidList);

View File

@ -224,6 +224,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CHECK_ALTER_INFO_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DEPLOY_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DROP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -352,6 +352,7 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_COMMIT_OFFSET, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CHECK_ALTER_INFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CONSUME, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_COMMIT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -144,33 +144,33 @@ typedef enum {
} ECsmUpdateType;
typedef struct {
int32_t id;
ETrnStage stage;
ETrnPolicy policy;
ETrnConflct conflict;
ETrnExec exec;
EOperType oper;
int32_t code;
int32_t failedTimes;
void* rpcRsp;
int32_t rpcRspLen;
int32_t redoActionPos;
SArray* redoActions;
SArray* undoActions;
SArray* commitActions;
int64_t createdTime;
int64_t lastExecTime;
int32_t lastAction;
int32_t lastErrorNo;
tmsg_t lastMsgType;
SEpSet lastEpset;
char dbname1[TSDB_DB_FNAME_LEN];
char dbname2[TSDB_DB_FNAME_LEN];
int32_t startFunc;
int32_t stopFunc;
int32_t paramLen;
void* param;
SArray* pRpcArray;
int32_t id;
ETrnStage stage;
ETrnPolicy policy;
ETrnConflct conflict;
ETrnExec exec;
EOperType oper;
int32_t code;
int32_t failedTimes;
void* rpcRsp;
int32_t rpcRspLen;
int32_t redoActionPos;
SArray* redoActions;
SArray* undoActions;
SArray* commitActions;
int64_t createdTime;
int64_t lastExecTime;
int32_t lastAction;
int32_t lastErrorNo;
tmsg_t lastMsgType;
SEpSet lastEpset;
char dbname1[TSDB_DB_FNAME_LEN];
char dbname2[TSDB_DB_FNAME_LEN];
int32_t startFunc;
int32_t stopFunc;
int32_t paramLen;
void* param;
SArray* pRpcArray;
} STrans;
typedef struct {
@ -477,6 +477,10 @@ typedef struct {
char* physicalPlan;
SSchemaWrapper schema;
int64_t stbUid;
// forbid condition
int64_t ntbUid;
SArray* ntbColIds;
int64_t ctbStbUid;
} SMqTopicObj;
typedef struct {

View File

@ -57,6 +57,7 @@ int32_t mndInitTopic(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_TOPIC, mndProcessCreateTopicReq);
mndSetMsgHandle(pMnode, TDMT_MND_DROP_TOPIC, mndProcessDropTopicReq);
mndSetMsgHandle(pMnode, TDMT_VND_DROP_TOPIC_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_VND_CHECK_ALTER_INFO_RSP, mndTransProcessRsp);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_TOPICS, mndRetrieveTopic);
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_TOPICS, mndCancelGetNextTopic);
@ -74,7 +75,6 @@ const char *mndTopicGetShowName(const char topic[TSDB_TOPIC_FNAME_LEN]) {
int32_t mndCheckColAndTagModifiable(SMnode *pMnode, int64_t suid, col_id_t colId) {
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
bool found = false;
while (1) {
SMqTopicObj *pTopic = NULL;
pIter = sdbFetch(pSdb, SDB_TOPIC, pIter, (void **)&pTopic);
@ -95,10 +95,12 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, int64_t suid, col_id_t colId
SNode *pNode = NULL;
FOREACH(pNode, pNodeList) {
SColumnNode *pCol = (SColumnNode *)pNode;
if (pCol->tableId != suid) goto NEXT;
if (pCol->tableId != suid && pTopic->ctbStbUid != suid) goto NEXT;
if (pCol->colId > 0 && pCol->colId == colId) {
found = true;
goto NEXT;
sdbRelease(pSdb, pTopic);
nodesDestroyNode(pAst);
terrno = TSDB_CODE_MND_FIELD_CONFLICT_WITH_TOPIC;
return -1;
}
mTrace("topic:%s, colId:%d is used", pTopic->name, pCol->colId);
}
@ -106,10 +108,6 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, int64_t suid, col_id_t colId
NEXT:
sdbRelease(pSdb, pTopic);
nodesDestroyNode(pAst);
if (found) {
terrno = TSDB_CODE_MND_FIELD_CONFLICT_WITH_TOPIC;
return -1;
}
}
return 0;
@ -127,8 +125,10 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) {
if (pTopic->schema.nCols) {
schemaLen = taosEncodeSSchemaWrapper(NULL, &pTopic->schema);
}
int32_t size =
sizeof(SMqTopicObj) + physicalPlanLen + pTopic->sqlLen + pTopic->astLen + schemaLen + MND_TOPIC_RESERVE_SIZE;
int32_t ntbColLen = taosArrayGetSize(pTopic->ntbColIds) * sizeof(int16_t);
int32_t size = sizeof(SMqTopicObj) + physicalPlanLen + pTopic->sqlLen + pTopic->astLen + schemaLen + ntbColLen +
MND_TOPIC_RESERVE_SIZE;
SSdbRaw *pRaw = sdbAllocRaw(SDB_TOPIC, MND_TOPIC_VER_NUMBER, size);
if (pRaw == NULL) goto TOPIC_ENCODE_OVER;
@ -164,6 +164,16 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) {
taosEncodeSSchemaWrapper(&aswBuf, &pTopic->schema);
SDB_SET_BINARY(pRaw, dataPos, swBuf, schemaLen, TOPIC_ENCODE_OVER);
}
SDB_SET_INT64(pRaw, dataPos, pTopic->ntbUid, TOPIC_ENCODE_OVER);
if (pTopic->ntbUid != 0) {
int32_t sz = taosArrayGetSize(pTopic->ntbColIds);
SDB_SET_INT32(pRaw, dataPos, sz, TOPIC_ENCODE_OVER);
for (int32_t i = 0; i < sz; i++) {
int16_t colId = *(int16_t *)taosArrayGet(pTopic->ntbColIds, i);
SDB_SET_INT16(pRaw, dataPos, colId, TOPIC_ENCODE_OVER);
}
}
SDB_SET_INT64(pRaw, dataPos, pTopic->ctbStbUid, TOPIC_ENCODE_OVER);
SDB_SET_RESERVE(pRaw, dataPos, MND_TOPIC_RESERVE_SIZE, TOPIC_ENCODE_OVER);
SDB_SET_DATALEN(pRaw, dataPos, TOPIC_ENCODE_OVER);
@ -259,6 +269,20 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) {
pTopic->schema.version = 0;
pTopic->schema.pSchema = NULL;
}
SDB_GET_INT64(pRaw, dataPos, &pTopic->ntbUid, TOPIC_DECODE_OVER);
if (pTopic->ntbUid != 0) {
int32_t ntbColNum;
SDB_GET_INT32(pRaw, dataPos, &ntbColNum, TOPIC_DECODE_OVER);
pTopic->ntbColIds = taosArrayInit(ntbColNum, sizeof(int16_t));
if (pTopic->ntbColIds == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto TOPIC_DECODE_OVER;
}
int16_t colId;
SDB_GET_INT16(pRaw, dataPos, &colId, TOPIC_DECODE_OVER);
taosArrayPush(pTopic->ntbColIds, &colId);
}
SDB_GET_INT64(pRaw, dataPos, &pTopic->ctbStbUid, TOPIC_DECODE_OVER);
SDB_GET_RESERVE(pRaw, dataPos, MND_TOPIC_RESERVE_SIZE, TOPIC_DECODE_OVER);
@ -346,6 +370,26 @@ static int32_t mndCheckCreateTopicReq(SCMCreateTopicReq *pCreate) {
return 0;
}
static int32_t extractTopicTbInfo(SNode *pAst, SMqTopicObj *pTopic) {
SNodeList *pNodeList = NULL;
nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList);
int64_t suid = ((SRealTableNode *)((SSelectStmt *)pAst)->pFromTable)->pMeta->suid;
int8_t tableType = ((SRealTableNode *)((SSelectStmt *)pAst)->pFromTable)->pMeta->tableType;
if (tableType == TSDB_CHILD_TABLE) {
pTopic->ctbStbUid = suid;
} else if (tableType == TSDB_NORMAL_TABLE) {
SNode *pNode = NULL;
FOREACH(pNode, pNodeList) {
SColumnNode *pCol = (SColumnNode *)pNode;
if (pCol->tableType == TSDB_NORMAL_TABLE) {
pTopic->ntbUid = pCol->tableId;
taosArrayPush(pTopic->ntbColIds, &pCol->colId);
}
}
}
return 0;
}
static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *pCreate, SDbObj *pDb) {
mDebug("topic:%s to create", pCreate->name);
SMqTopicObj topicObj = {0};
@ -386,6 +430,19 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
return -1;
}
int64_t ntbUid;
topicObj.ntbColIds = taosArrayInit(0, sizeof(int16_t));
if (topicObj.ntbColIds == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
extractTopicTbInfo(pAst, &topicObj);
if (topicObj.ntbUid == 0) {
taosArrayDestroy(topicObj.ntbColIds);
topicObj.ntbColIds = NULL;
}
if (qExtractResultSchema(pAst, &topicObj.schema.nCols, &topicObj.schema.pSchema) != 0) {
mError("topic:%s, failed to create since %s", pCreate->name, terrstr());
taosMemoryFree(topicObj.ast);
@ -433,6 +490,60 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
}
sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
if (topicObj.ntbUid != 0) {
SCheckAlterInfo info;
memcpy(info.topic, topicObj.name, TSDB_TOPIC_FNAME_LEN);
info.ntbUid = topicObj.ntbUid;
info.colIdList = topicObj.ntbColIds;
// broadcast forbid alter info
void *pIter = NULL;
SSdb *pSdb = pMnode->pSdb;
SVgObj *pVgroup = NULL;
while (1) {
// iterate vg
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
if (!mndVgroupInDb(pVgroup, topicObj.dbUid)) {
sdbRelease(pSdb, pVgroup);
continue;
}
// encoder check alter info
int32_t len;
int32_t code;
tEncodeSize(tEncodeSCheckAlterInfo, &info, len, code);
if (code < 0) {
sdbRelease(pSdb, pVgroup);
mndTransDrop(pTrans);
ASSERT(0);
return -1;
}
void *buf = taosMemoryCalloc(1, sizeof(SMsgHead) + len);
void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
SEncoder encoder;
tEncoderInit(&encoder, abuf, len);
if (tEncodeSCheckAlterInfo(&encoder, &info) < 0) {
sdbRelease(pSdb, pVgroup);
mndTransDrop(pTrans);
return -1;
}
tEncoderClear(&encoder);
((SMsgHead *)buf)->vgId = htonl(pVgroup->vgId);
// add redo action
STransAction action = {0};
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
action.pCont = buf;
action.contLen = sizeof(SMsgHead) + len;
action.msgType = TDMT_VND_CHECK_ALTER_INFO;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(buf);
sdbRelease(pSdb, pVgroup);
mndTransDrop(pTrans);
return -1;
}
}
}
if (mndTransPrepare(pMnode, pTrans) != 0) {
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
taosMemoryFreeClear(topicObj.physicalPlan);
@ -442,7 +553,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
taosMemoryFreeClear(topicObj.physicalPlan);
mndTransDrop(pTrans);
return 0;
return TSDB_CODE_ACTION_IN_PROGRESS;
}
static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) {

View File

@ -89,7 +89,7 @@ typedef struct {
STqExecDb execDb;
};
int32_t numOfCols; // number of out pout column, temporarily used
SSchemaWrapper *pSchemaWrapper; // columns that are involved in query
SSchemaWrapper* pSchemaWrapper; // columns that are involved in query
} STqExecHandle;
typedef struct {
@ -110,9 +110,6 @@ typedef struct {
// exec
STqExecHandle execHandle;
// prevent drop
int64_t ntbUid;
SArray* colIdList; // SArray<int32_t>
} STqHandle;
struct STQ {
@ -120,9 +117,9 @@ struct STQ {
SHashObj* pushMgr; // consumerId -> STqHandle*
SHashObj* handles; // subKey -> STqHandle
SHashObj* pStreamTasks; // taksId -> SStreamTask
SHashObj* pAlterInfo; // topic -> SAlterCheckInfo
STqOffsetStore* pOffsetStore;
SVnode* pVnode;
SWal* pWal;
TDB* pMetaStore;
TTB* pExecStore;
};

View File

@ -137,12 +137,13 @@ STsdbReader tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableL
// tq
int tqInit();
void tqCleanUp();
STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal);
STQ* tqOpen(const char* path, SVnode* pVnode);
void tqClose(STQ*);
int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
int tqCommit(STQ*);
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
int32_t tqCheckColModifiable(STQ* pTq, int32_t colId);
int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId);
int32_t tqProcessCheckAlterInfoReq(STQ* pTq, char* msg, int32_t msgLen);
int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen);
int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen);
int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen);

View File

@ -438,12 +438,15 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi
}
int metaTtlDropTable(SMeta *pMeta, int64_t ttl, SArray *tbUids) {
metaWLock(pMeta);
int ret = metaTtlSmaller(pMeta, ttl, tbUids);
if (ret != 0) {
metaULock(pMeta);
return ret;
}
if (taosArrayGetSize(tbUids) == 0){
return 0;
}
metaWLock(pMeta);
for (int i = 0; i < taosArrayGetSize(tbUids); ++i) {
tb_uid_t *uid = (tb_uid_t *)taosArrayGet(tbUids, i);
metaDropTableByUid(pMeta, *uid, NULL);
@ -637,6 +640,10 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION;
goto _err;
}
if (tqCheckColModifiable(pMeta->pVnode->pTq, uid, pColumn->colId) != 0) {
terrno = TSDB_CODE_VND_COL_SUBSCRIBED;
goto _err;
}
pSchema->version++;
tlen = (pSchema->nCols - iCol - 1) * sizeof(SSchema);
if (tlen) {
@ -653,6 +660,10 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION;
goto _err;
}
if (tqCheckColModifiable(pMeta->pVnode->pTq, uid, pColumn->colId) != 0) {
terrno = TSDB_CODE_VND_COL_SUBSCRIBED;
goto _err;
}
pSchema->version++;
pColumn->bytes = pAlterTbReq->colModBytes;
break;
@ -661,6 +672,10 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
terrno = TSDB_CODE_VND_TABLE_COL_NOT_EXISTS;
goto _err;
}
if (tqCheckColModifiable(pMeta->pVnode->pTq, uid, pColumn->colId) != 0) {
terrno = TSDB_CODE_VND_COL_SUBSCRIBED;
goto _err;
}
pSchema->version++;
strcpy(pColumn->name, pAlterTbReq->colNewName);
break;

View File

@ -51,7 +51,7 @@ void tqCleanUp() {
}
}
STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) {
STQ* tqOpen(const char* path, SVnode* pVnode) {
STQ* pTq = taosMemoryCalloc(1, sizeof(STQ));
if (pTq == NULL) {
terrno = TSDB_CODE_TQ_OUT_OF_MEMORY;
@ -59,7 +59,6 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) {
}
pTq->path = strdup(path);
pTq->pVnode = pVnode;
pTq->pWal = pWal;
pTq->handles = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
@ -67,6 +66,8 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) {
pTq->pushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
pTq->pAlterInfo = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
if (tqMetaOpen(pTq) < 0) {
ASSERT(0);
}
@ -91,6 +92,7 @@ void tqClose(STQ* pTq) {
}
taosHashCleanup(pTq->pStreamTasks);
taosHashCleanup(pTq->pushMgr);
taosHashCleanup(pTq->pAlterInfo);
taosMemoryFree(pTq->path);
tqMetaClose(pTq);
taosMemoryFree(pTq);
@ -208,18 +210,18 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen) {
return 0;
}
int32_t tqCheckColModifiable(STQ* pTq, int32_t colId) {
int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId) {
void* pIter = NULL;
while (1) {
pIter = taosHashIterate(pTq->handles, pIter);
pIter = taosHashIterate(pTq->pAlterInfo, pIter);
if (pIter == NULL) break;
STqHandle* pExec = (STqHandle*)pIter;
if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
int32_t sz = pExec->execHandle.pSchemaWrapper->nCols;
SCheckAlterInfo* pCheck = (SCheckAlterInfo*)pIter;
if (pCheck->ntbUid == tbUid) {
int32_t sz = taosArrayGetSize(pCheck->colIdList);
for (int32_t i = 0; i < sz; i++) {
SSchema* pSchema = &pExec->execHandle.pSchemaWrapper->pSchema[i];
if (pSchema->colId == colId) {
taosHashCancelIterate(pTq->handles, pIter);
int16_t forbidColId = *(int16_t*)taosArrayGet(pCheck->colIdList, i);
if (forbidColId == colId) {
taosHashCancelIterate(pTq->pAlterInfo, pIter);
return -1;
}
}
@ -270,6 +272,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
int32_t code = 0;
STqOffsetVal reqOffset = pReq->reqOffset;
STqOffsetVal fetchOffsetNew;
SWalCkHead* pCkHead = NULL;
// 1.find handle
STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey));
@ -459,6 +462,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
}
OVER:
if (pCkHead) taosMemoryFree(pCkHead);
// TODO wrap in destroy func
taosArrayDestroy(dataRsp.blockDataLen);
taosArrayDestroyP(dataRsp.blockData, (FDelete)taosMemoryFree);
@ -488,6 +492,22 @@ int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) {
return 0;
}
int32_t tqProcessCheckAlterInfoReq(STQ* pTq, char* msg, int32_t msgLen) {
SCheckAlterInfo info = {0};
SDecoder decoder;
tDecoderInit(&decoder, msg, msgLen);
if (tDecodeSCheckAlterInfo(&decoder, &info) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
tDecoderClear(&decoder);
if (taosHashPut(pTq->pAlterInfo, info.topic, strlen(info.topic), &info, sizeof(SCheckAlterInfo)) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
return 0;
}
int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
SMqRebVgReq req = {0};
tDecodeSMqRebVgReq(msg, &req);

View File

@ -201,10 +201,12 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR
if (pRsp->withTbName) {
int64_t uid = pExec->pExecReader->msgIter.uid;
if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
blockDataFreeRes(&block);
continue;
}
}
tqAddBlockDataToRsp(&block, pRsp, taosArrayGetSize(block.pDataBlock));
blockDataFreeRes(&block);
tqAddBlockSchemaToRsp(pExec, pRsp);
pRsp->blockNum++;
}
@ -220,10 +222,12 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR
if (pRsp->withTbName) {
int64_t uid = pExec->pExecReader->msgIter.uid;
if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
blockDataFreeRes(&block);
continue;
}
}
tqAddBlockDataToRsp(&block, pRsp, taosArrayGetSize(block.pDataBlock));
blockDataFreeRes(&block);
tqAddBlockSchemaToRsp(pExec, pRsp);
pRsp->blockNum++;
}

View File

@ -89,8 +89,8 @@ int32_t tqMetaOpen(STQ* pTq) {
.version = handle.snapshotVer,
};
handle.execHandle.execCol.task =
qCreateQueueExecTaskInfo(handle.execHandle.execCol.qmsg, &reader, &handle.execHandle.numOfCols, &handle.execHandle.pSchemaWrapper);
handle.execHandle.execCol.task = qCreateQueueExecTaskInfo(
handle.execHandle.execCol.qmsg, &reader, &handle.execHandle.numOfCols, &handle.execHandle.pSchemaWrapper);
ASSERT(handle.execHandle.execCol.task);
void* scanner = NULL;
qExtractStreamScanner(handle.execHandle.execCol.task, &scanner);

View File

@ -340,29 +340,30 @@ FAIL:
void tqReaderSetColIdList(STqReader* pReadHandle, SArray* pColIdList) { pReadHandle->pColIdList = pColIdList; }
int tqReaderSetTbUidList(STqReader* pHandle, const SArray* tbUidList) {
if (pHandle->tbIdHash) {
taosHashClear(pHandle->tbIdHash);
int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) {
if (pReader->tbIdHash) {
taosHashClear(pReader->tbIdHash);
} else {
pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
}
pHandle->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
if (pHandle->tbIdHash == NULL) {
if (pReader->tbIdHash == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
for (int i = 0; i < taosArrayGetSize(tbUidList); i++) {
int64_t* pKey = (int64_t*)taosArrayGet(tbUidList, i);
taosHashPut(pHandle->tbIdHash, pKey, sizeof(int64_t), NULL, 0);
taosHashPut(pReader->tbIdHash, pKey, sizeof(int64_t), NULL, 0);
}
return 0;
}
int tqReaderAddTbUidList(STqReader* pHandle, const SArray* tbUidList) {
if (pHandle->tbIdHash == NULL) {
pHandle->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
if (pHandle->tbIdHash == NULL) {
int tqReaderAddTbUidList(STqReader* pReader, const SArray* tbUidList) {
if (pReader->tbIdHash == NULL) {
pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
if (pReader->tbIdHash == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
@ -370,18 +371,18 @@ int tqReaderAddTbUidList(STqReader* pHandle, const SArray* tbUidList) {
for (int i = 0; i < taosArrayGetSize(tbUidList); i++) {
int64_t* pKey = (int64_t*)taosArrayGet(tbUidList, i);
taosHashPut(pHandle->tbIdHash, pKey, sizeof(int64_t), NULL, 0);
taosHashPut(pReader->tbIdHash, pKey, sizeof(int64_t), NULL, 0);
}
return 0;
}
int tqReaderRemoveTbUidList(STqReader* pHandle, const SArray* tbUidList) {
ASSERT(pHandle->tbIdHash != NULL);
int tqReaderRemoveTbUidList(STqReader* pReader, const SArray* tbUidList) {
ASSERT(pReader->tbIdHash != NULL);
for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
int64_t* pKey = (int64_t*)taosArrayGet(tbUidList, i);
taosHashRemove(pHandle->tbIdHash, pKey, sizeof(int64_t));
taosHashRemove(pReader->tbIdHash, pKey, sizeof(int64_t));
}
return 0;

View File

@ -110,7 +110,6 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
// TODO
ret = rpcMallocCont(cap);
ret->header.vgId = vgId;
ret->version = htonl(1);
ret->length = sizeof(SSubmitReq);
ret->numOfBlocks = htonl(sz);

View File

@ -95,16 +95,19 @@ int vnodeSaveInfo(const char *dir, const SVnodeInfo *pInfo) {
// save info to a vnode_tmp.json
pFile = taosOpenFile(fname, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (pFile == NULL) {
vError("failed to open info file: %s for write: %s", fname, terrstr());
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
if (taosWriteFile(pFile, data, strlen(data)) < 0) {
vError("failed to write info file: %s data: %s", fname, terrstr());
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
if (taosFsyncFile(pFile) < 0) {
vError("failed to fsync info file: %s error: %s", fname, terrstr());
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}

View File

@ -135,7 +135,7 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
// open tq
sprintf(tdir, "%s%s%s", dir, TD_DIRSEP, VNODE_TQ_DIR);
taosRealPath(tdir, NULL, sizeof(tdir));
pVnode->pTq = tqOpen(tdir, pVnode, pVnode->pWal);
pVnode->pTq = tqOpen(tdir, pVnode);
if (pVnode->pTq == NULL) {
vError("vgId:%d, failed to open vnode tq since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;

View File

@ -206,6 +206,12 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
goto _err;
}
break;
case TDMT_VND_CHECK_ALTER_INFO:
if (tqProcessCheckAlterInfoReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
pMsg->contLen - sizeof(SMsgHead)) < 0) {
goto _err;
}
break;
case TDMT_STREAM_TASK_DEPLOY: {
if (tqProcessTaskDeployReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
pMsg->contLen - sizeof(SMsgHead)) < 0) {

View File

@ -52,11 +52,12 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int
#define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData ? 1 : 0)
#define START_TS_COLUMN_INDEX 0
#define END_TS_COLUMN_INDEX 1
#define UID_COLUMN_INDEX 2
#define GROUPID_COLUMN_INDEX UID_COLUMN_INDEX
#define DELETE_GROUPID_COLUMN_INDEX 2
#define START_TS_COLUMN_INDEX 0
#define END_TS_COLUMN_INDEX 1
#define UID_COLUMN_INDEX 2
#define GROUPID_COLUMN_INDEX 3
#define CALCULATE_START_TS_COLUMN_INDEX 4
#define CALCULATE_END_TS_COLUMN_INDEX 5
enum {
// when this task starts to execute, this status will set
@ -175,6 +176,7 @@ typedef struct SExecTaskInfo {
int64_t owner; // if it is in execution
int32_t code;
int64_t version; // used for stream to record wal version
SStreamTaskInfo streamInfo;
SSchemaInfo schemaInfo;
STableListInfo tableqinfoList; // this is a table list
@ -346,7 +348,6 @@ typedef enum EStreamScanMode {
STREAM_SCAN_FROM_READERHANDLE = 1,
STREAM_SCAN_FROM_RES,
STREAM_SCAN_FROM_UPDATERES,
STREAM_SCAN_FROM_DATAREADER, // todo(liuyao) delete it
STREAM_SCAN_FROM_DATAREADER_RETRIEVE,
STREAM_SCAN_FROM_DATAREADER_RANGE,
} EStreamScanMode;
@ -366,7 +367,7 @@ typedef struct SStreamAggSupporter {
char* pKeyBuf; // window key buffer
SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
SArray* pScanWindow;
SSDataBlock* pScanBlock;
} SStreamAggSupporter;
typedef struct SessionWindowSupporter {
@ -401,8 +402,6 @@ typedef struct SStreamScanInfo {
uint64_t numOfExec; // execution times
STqReader* tqReader;
int32_t tsArrayIndex;
SArray* tsArray;
uint64_t groupId;
SUpdateInfo* pUpdateInfo;
@ -419,7 +418,7 @@ typedef struct SStreamScanInfo {
int32_t deleteDataIndex;
STimeWindow updateWin;
STimeWindowAggSupp twAggSup;
SSDataBlock* pUpdateDataRes;
// status for tmq
// SSchemaWrapper schema;
STqOffset offset;
@ -714,7 +713,6 @@ typedef struct SStreamStateAggOperatorInfo {
SSDataBlock* pDelRes;
SHashObj* pSeDeleted;
void* pDelIterator;
SArray* pScanWindow;
SArray* pChildren; // cache for children's result;
bool ignoreExpiredData;
} SStreamStateAggOperatorInfo;
@ -956,6 +954,7 @@ int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs,
TSKEY* pEndTs, int32_t rows, int32_t start, int64_t gap, SHashObj* pStDeleted);
bool functionNeedToExecute(SqlFunctionCtx* pCtx);
bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid);
int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition,
SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs, const int32_t* rowCellOffset,
@ -972,7 +971,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex);
int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, SNodeList* groupKey);
SSDataBlock* createPullDataBlock();
SSDataBlock* createSpecialDataBlock(EStreamType type);
#ifdef __cplusplus
}

View File

View File

@ -521,7 +521,7 @@ static int32_t doSetInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCt
// NOTE: the last parameter is the primary timestamp column
// todo: refactor this
if (fmIsTimelineFunc(pCtx[i].functionId) && (j == pOneExpr->base.numOfParams - 1)) {
if (fmIsImplicitTsFunc(pCtx[i].functionId) && (j == pOneExpr->base.numOfParams - 1)) {
pInput->pPTS = pInput->pData[j]; // in case of merge function, this is not always the ts column data.
// ASSERT(pInput->pPTS->info.type == TSDB_DATA_TYPE_TIMESTAMP);
}
@ -1616,6 +1616,9 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG
SSDataBlock* pBlock = pbInfo->pRes;
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
// set output datablock version
pBlock->info.version = pTaskInfo->version;
blockDataCleanup(pBlock);
if (!hasDataInGroupInfo(pGroupResInfo)) {
return;
@ -4455,10 +4458,10 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
return createExchangeOperatorInfo(pHandle->pMsgCb->clientRpc, (SExchangePhysiNode*)pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) {
STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
STimeWindowAggSupp aggSup = (STimeWindowAggSupp){
.waterMark = pTableScanNode->watermark,
.calTrigger = pTableScanNode->triggerType,
.maxTs = INT64_MIN,
STimeWindowAggSupp aggSup = (STimeWindowAggSupp){
.waterMark = pTableScanNode->watermark,
.calTrigger = pTableScanNode->triggerType,
.maxTs = INT64_MIN,
};
if (pHandle->vnode) {
@ -5151,8 +5154,7 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlF
}
pSup->valueSize = size;
pSup->pScanWindow = taosArrayInit(4, sizeof(STimeWindow));
pSup->pScanBlock = createSpecialDataBlock(STREAM_CLEAR);
int32_t pageSize = 4096;
while (pageSize < pSup->resultRowSize * 4) {
pageSize <<= 1u;

View File

@ -25,7 +25,6 @@
#include "tdatablock.h"
#include "tmsg.h"
#include "executorimpl.h"
#include "query.h"
#include "tcompare.h"
#include "thash.h"
@ -814,6 +813,10 @@ static bool isSignleIntervalWindow(SStreamScanInfo* pInfo) {
return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL;
}
static bool isSlidingWindow(SStreamScanInfo* pInfo) {
return isIntervalWindow(pInfo) && pInfo->interval.interval != pInfo->interval.sliding;
}
static uint64_t getGroupId(SOperatorInfo* pOperator, uint64_t uid) {
uint64_t* groupId = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &uid, sizeof(int64_t));
if (groupId) {
@ -836,17 +839,10 @@ static uint64_t getGroupId(SOperatorInfo* pOperator, uint64_t uid) {
}
static void setGroupId(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t groupColIndex, int32_t rowIndex) {
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, groupColIndex);
uint64_t* groupCol = (uint64_t*)pColInfo->pData;
ASSERT(rowIndex < pBlock->info.rows);
switch (pBlock->info.type) {
case STREAM_DELETE_DATA:
case STREAM_RETRIEVE: {
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, groupColIndex);
uint64_t* groupCol = (uint64_t*)pColInfo->pData;
pInfo->groupId = groupCol[rowIndex];
} break;
default:
break;
}
pInfo->groupId = groupCol[rowIndex];
}
void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin) {
@ -866,7 +862,17 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
TSKEY* endData = (TSKEY*)pEndTsCol->pData;
STimeWindow win = {.skey = startData[*pRowIndex], .ekey = endData[*pRowIndex]};
SColumnInfoData* pCalStartTsCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
TSKEY* calStartData = (TSKEY*)pCalStartTsCol->pData;
SColumnInfoData* pCalEndTsCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
TSKEY* calEndData = (TSKEY*)pCalEndTsCol->pData;
setGroupId(pInfo, pBlock, GROUPID_COLUMN_INDEX, *pRowIndex);
if (isSlidingWindow(pInfo)) {
pInfo->updateWin.skey = calStartData[*pRowIndex];
pInfo->updateWin.ekey = calEndData[*pRowIndex];
}
(*pRowIndex)++;
for (; *pRowIndex < pBlock->info.rows; (*pRowIndex)++) {
@ -878,8 +884,8 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
win.skey = TMIN(win.skey, startData[*pRowIndex]);
continue;
}
ASSERT((win.skey > startData[*pRowIndex] && win.ekey < endData[*pRowIndex]) ||
(isInTimeWindow(&win, startData[*pRowIndex], 0) || isInTimeWindow(&win, endData[*pRowIndex], 0)));
ASSERT(!(win.skey > startData[*pRowIndex] && win.ekey < endData[*pRowIndex]) ||
!(isInTimeWindow(&win, startData[*pRowIndex], 0) || isInTimeWindow(&win, endData[*pRowIndex], 0)));
break;
}
@ -910,68 +916,6 @@ static STimeWindow getSlidingWindow(TSKEY* tsCol, SInterval* pInterval, SDataBlo
win.ekey = endWin.ekey;
}
}
static bool prepareDataScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) {
STimeWindow win = {
.skey = INT64_MIN,
.ekey = INT64_MAX,
};
bool needRead = false;
if (!isStateWindow(pInfo) && (*pRowIndex) < pSDB->info.rows) {
SColumnInfoData* pColDataInfo = taosArrayGet(pSDB->pDataBlock, tsColIndex);
TSKEY* tsCols = (TSKEY*)pColDataInfo->pData;
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
if (isSessionWindow(pInfo)) {
SStreamAggSupporter* pAggSup = pInfo->sessionSup.pStreamAggSup;
int64_t gap = pInfo->sessionSup.gap;
int32_t winIndex = 0;
SResultWindowInfo* pCurWin =
getSessionTimeWindow(pAggSup, tsCols[*pRowIndex], INT64_MIN, pSDB->info.groupId, gap, &winIndex);
win = pCurWin->win;
setGroupId(pInfo, pSDB, GROUPID_COLUMN_INDEX, *pRowIndex);
(*pRowIndex) += updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, *pRowIndex, gap, NULL);
} else {
setGroupId(pInfo, pSDB, GROUPID_COLUMN_INDEX, *pRowIndex);
pInfo->updateWin.skey = tsCols[*pRowIndex];
win = getSlidingWindow(tsCols, &pInfo->interval, &pSDB->info, pRowIndex);
pInfo->updateWin.ekey = tsCols[*pRowIndex - 1];
// win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[*pRowIndex], &pInfo->interval, TSDB_ORDER_ASC);
// (*pRowIndex) +=
// getNumOfRowsInTimeWindow(&pSDB->info, tsCols, *pRowIndex, win.ekey, binarySearchForKey, NULL,
// TSDB_ORDER_ASC);
}
needRead = true;
} else if (isStateWindow(pInfo)) {
SArray* pWins = pInfo->sessionSup.pStreamAggSup->pScanWindow;
int32_t size = taosArrayGetSize(pWins);
if (pInfo->scanWinIndex < size) {
win = *(STimeWindow*)taosArrayGet(pWins, pInfo->scanWinIndex);
pInfo->scanWinIndex++;
needRead = true;
} else {
pInfo->scanWinIndex = 0;
taosArrayClear(pWins);
}
}
if (!needRead) {
return false;
}
resetTableScanInfo(pInfo->pTableScanOp->info, &win);
return true;
}
static void copyOneRow(SSDataBlock* dest, SSDataBlock* source, int32_t sourceRowId) {
for (int32_t j = 0; j < taosArrayGetSize(source->pDataBlock); j++) {
SColumnInfoData* pDestCol = (SColumnInfoData*)taosArrayGet(dest->pDataBlock, j);
SColumnInfoData* pSourceCol = (SColumnInfoData*)taosArrayGet(source->pDataBlock, j);
if (colDataIsNull_s(pSourceCol, sourceRowId)) {
colDataAppendNULL(pDestCol, dest->info.rows);
} else {
colDataAppend(pDestCol, dest->info.rows, colDataGetData(pSourceCol, sourceRowId), false);
}
}
dest->info.rows++;
}
static SSDataBlock* doRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) {
while (1) {
@ -984,29 +928,6 @@ static SSDataBlock* doRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32
if (!pResult) {
blockDataCleanup(pSDB);
*pRowIndex = 0;
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
tsdbReaderClose(pTableScanInfo->dataReader);
pTableScanInfo->dataReader = NULL;
return NULL;
}
if (pResult->info.groupId == pInfo->groupId) {
return pResult;
}
}
}
static SSDataBlock* doDataScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) {
while (1) {
SSDataBlock* pResult = NULL;
pResult = doTableScan(pInfo->pTableScanOp);
if (pResult == NULL) {
if (prepareDataScan(pInfo, pSDB, tsColIndex, pRowIndex)) {
// scan next window data
pResult = doTableScan(pInfo->pTableScanOp);
}
}
if (!pResult) {
pInfo->updateWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
tsdbReaderClose(pTableScanInfo->dataReader);
@ -1019,77 +940,31 @@ static SSDataBlock* doDataScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_
return pResult;
}
}
/* Todo(liuyao) for partition by column
SSDataBlock* pBlock = createOneDataBlock(pResult, true);
blockDataCleanup(pResult);
for (int32_t i = 0; i < pBlock->info.rows; i++) {
uint64_t id = getGroupId(pInfo->pOperatorDumy, pBlock->info.uid);
if (id == pInfo->groupId) {
copyOneRow(pResult, pBlock, i);
}
}
return pResult;
*/
}
static void generateIntervalTs(SStreamScanInfo* pInfo, SSDataBlock* pDelBlock, SOperatorInfo* pOperator,
SSDataBlock* pUpdateRes) {
if (pDelBlock->info.rows == 0) {
return;
}
blockDataCleanup(pUpdateRes);
blockDataEnsureCapacity(pUpdateRes, 64);
ASSERT(taosArrayGetSize(pDelBlock->pDataBlock) >= 3);
SColumnInfoData* pStartTsCol = taosArrayGet(pDelBlock->pDataBlock, START_TS_COLUMN_INDEX);
TSKEY* startData = (TSKEY*)pStartTsCol->pData;
SColumnInfoData* pEndTsCol = taosArrayGet(pDelBlock->pDataBlock, END_TS_COLUMN_INDEX);
TSKEY* endData = (TSKEY*)pEndTsCol->pData;
SColumnInfoData* pGpCol = taosArrayGet(pDelBlock->pDataBlock, UID_COLUMN_INDEX);
uint64_t* uidCol = (uint64_t*)pGpCol->pData;
SColumnInfoData* pDestTsCol = taosArrayGet(pUpdateRes->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pDestGpCol = taosArrayGet(pUpdateRes->pDataBlock, GROUPID_COLUMN_INDEX);
for (int32_t i = pInfo->deleteDataIndex;
i < pDelBlock->info.rows &&
i < pDelBlock->info.capacity - (endData[i] - startData[i]) / pInfo->interval.interval - 1;
i++) {
uint64_t groupId = getGroupId(pOperator, uidCol[i]);
for (TSKEY startTs = startData[i]; startTs <= endData[i];) {
colDataAppend(pDestTsCol, pUpdateRes->info.rows, (const char*)&startTs, false);
colDataAppend(pDestGpCol, pUpdateRes->info.rows, (const char*)&groupId, false);
pUpdateRes->info.rows++;
startTs = taosTimeAdd(startTs, pInfo->interval.interval, pInfo->interval.intervalUnit, pInfo->interval.precision);
}
pInfo->deleteDataIndex++;
}
if (pInfo->deleteDataIndex > 0 && pInfo->deleteDataIndex == pDelBlock->info.rows) {
blockDataCleanup(pDelBlock);
pInfo->deleteDataIndex = 0;
}
}
static void generateScanRange(SStreamScanInfo* pInfo, SSDataBlock* pBlock, SOperatorInfo* pOperator,
SSDataBlock* pUpdateRes) {
if (pBlock->info.rows == 0) {
return;
static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock) {
if (pSrcBlock->info.rows == 0) {
return TSDB_CODE_SUCCESS;
}
blockDataCleanup(pUpdateRes);
blockDataEnsureCapacity(pUpdateRes, pBlock->info.rows);
ASSERT(taosArrayGetSize(pBlock->pDataBlock) >= 3);
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
blockDataCleanup(pDestBlock);
int32_t code = blockDataEnsureCapacity(pDestBlock, pSrcBlock->info.rows);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
ASSERT(taosArrayGetSize(pSrcBlock->pDataBlock) >= 3);
SColumnInfoData* pStartTsCol = taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
TSKEY* startData = (TSKEY*)pStartTsCol->pData;
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
SColumnInfoData* pEndTsCol = taosArrayGet(pSrcBlock->pDataBlock, END_TS_COLUMN_INDEX);
TSKEY* endData = (TSKEY*)pEndTsCol->pData;
SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
uint64_t* uidCol = (uint64_t*)pGpCol->pData;
SColumnInfoData* pUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX);
uint64_t* uidCol = (uint64_t*)pUidCol->pData;
SColumnInfoData* pDestStartCol = taosArrayGet(pUpdateRes->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pDestEndCol = taosArrayGet(pUpdateRes->pDataBlock, END_TS_COLUMN_INDEX);
SColumnInfoData* pDestGpCol = taosArrayGet(pUpdateRes->pDataBlock, GROUPID_COLUMN_INDEX);
SColumnInfoData* pDestStartCol = taosArrayGet(pDestBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pDestEndCol = taosArrayGet(pDestBlock->pDataBlock, END_TS_COLUMN_INDEX);
SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
int32_t dummy = 0;
for (int32_t i = 0; i < pBlock->info.rows; i++) {
uint64_t groupId = getGroupId(pOperator, uidCol[i]);
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
uint64_t groupId = getGroupId(pInfo->pTableScanOp, uidCol[i]);
// gap must be 0.
SResultWindowInfo* pStartWin =
getCurSessionWindow(pInfo->sessionSup.pStreamAggSup, startData[i], endData[i], groupId, 0, &dummy);
@ -1103,46 +978,75 @@ static void generateScanRange(SStreamScanInfo* pInfo, SSDataBlock* pBlock, SOper
colDataAppend(pDestStartCol, i, (const char*)&pStartWin->win.skey, false);
colDataAppend(pDestEndCol, i, (const char*)&pEndWin->win.ekey, false);
colDataAppend(pDestGpCol, i, (const char*)&groupId, false);
pUpdateRes->info.rows++;
pDestBlock->info.rows++;
}
return TSDB_CODE_SUCCESS;
}
static void setUpdateData(SStreamScanInfo* pInfo, SSDataBlock* pBlock, SSDataBlock* pUpdateBlock) {
blockDataCleanup(pUpdateBlock);
int32_t size = taosArrayGetSize(pInfo->tsArray);
if (pInfo->tsArrayIndex < size) {
SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pUpdateBlock->pDataBlock, pInfo->primaryTsIndex);
ASSERT(pCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
blockDataEnsureCapacity(pUpdateBlock, size);
int32_t rowId = *(int32_t*)taosArrayGet(pInfo->tsArray, pInfo->tsArrayIndex);
pInfo->groupId = getGroupId(pInfo->pTableScanOp, pBlock->info.uid);
int32_t i = 0;
for (; i < size; i++) {
rowId = *(int32_t*)taosArrayGet(pInfo->tsArray, i + pInfo->tsArrayIndex);
uint64_t id = getGroupId(pInfo->pTableScanOp, pBlock->info.uid);
if (pInfo->groupId != id) {
break;
}
copyOneRow(pUpdateBlock, pBlock, rowId);
}
pUpdateBlock->info.rows = i;
pInfo->tsArrayIndex += i;
pUpdateBlock->info.groupId = pInfo->groupId;
pUpdateBlock->info.type = STREAM_CLEAR;
blockDataUpdateTsWindow(pUpdateBlock, 0);
static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock) {
blockDataCleanup(pDestBlock);
int32_t rows = pSrcBlock->info.rows;
if (rows == 0) {
return TSDB_CODE_SUCCESS;
}
int32_t code = blockDataEnsureCapacity(pDestBlock, rows);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
SColumnInfoData* pTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX);
uint64_t* uidCol = (uint64_t*)pUidCol->pData;
ASSERT(pTsCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
TSKEY* tsCol = (TSKEY*)pTsCol->pData;
SColumnInfoData* pStartTsCol = taosArrayGet(pDestBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pEndTsCol = taosArrayGet(pDestBlock->pDataBlock, END_TS_COLUMN_INDEX);
SColumnInfoData* pGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
SColumnInfoData* pCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
SColumnInfoData* pCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
uint64_t groupId = getGroupId(pInfo->pTableScanOp, uidCol[0]);
for (int32_t i = 0; i < rows; ) {
colDataAppend(pCalStartTsCol, pDestBlock->info.rows, (const char*)(tsCol + i), false);
STimeWindow win = getSlidingWindow(tsCol, &pInfo->interval, &pSrcBlock->info, &i);
colDataAppend(pCalEndTsCol, pDestBlock->info.rows, (const char*)(tsCol + i - 1), false);
colDataAppend(pStartTsCol, pDestBlock->info.rows, (const char*)(&win.skey), false);
colDataAppend(pEndTsCol, pDestBlock->info.rows, (const char*)(&win.ekey), false);
colDataAppend(pGpCol, pDestBlock->info.rows, (const char*)(&groupId), false);
pDestBlock->info.rows++;
}
// all rows have same group id
ASSERT(pInfo->tsArrayIndex >= size);
if (size > 0 && pInfo->tsArrayIndex == size) {
taosArrayClear(pInfo->tsArray);
}
pDestBlock->info.groupId = groupId;
return TSDB_CODE_SUCCESS;
}
if (size == 0) {
generateIntervalTs(pInfo, pInfo->pDeleteDataRes, pInfo->pTableScanOp, pUpdateBlock);
static int32_t generateScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock) {
int32_t code = TSDB_CODE_SUCCESS;
if (isIntervalWindow(pInfo)) {
code = generateIntervalScanRange(pInfo, pSrcBlock, pDestBlock);
} else {
code = generateSessionScanRange(pInfo, pSrcBlock, pDestBlock);
}
pDestBlock->info.type = STREAM_CLEAR;
blockDataUpdateTsWindow(pDestBlock, 0);
return code;
}
void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid) {
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
colDataAppend(pStartTsCol, pBlock->info.rows, (const char*)pStartTs, false);
colDataAppend(pEndTsCol, pBlock->info.rows, (const char*)pEndTs, false);
colDataAppend(pUidCol, pBlock->info.rows, (const char*)pUid, false);
pBlock->info.rows++;
}
static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock* pBlock, bool out) {
if (out) {
blockDataCleanup(pInfo->pUpdateDataRes);
blockDataEnsureCapacity(pInfo->pUpdateDataRes, pBlock->info.rows);
}
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
ASSERT(pColDataInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP);
TSKEY* tsCol = (TSKEY*)pColDataInfo->pData;
@ -1153,9 +1057,13 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
// must check update info first.
bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, tsCol[rowId]);
if ((update || (isSignleIntervalWindow(pInfo) && isCloseWindow(&win, &pInfo->twAggSup))) && out) {
taosArrayPush(pInfo->tsArray, &rowId);
appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid);
}
}
if (out) {
blockDataUpdateTsWindow(pInfo->pUpdateDataRes, 0);
pInfo->pUpdateDataRes->info.type = STREAM_CLEAR;
}
}
static void setBlockGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t uidColIndex) {
@ -1178,6 +1086,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
pInfo->pRes->info.rows = pBlock->info.rows;
pInfo->pRes->info.uid = pBlock->info.uid;
pInfo->pRes->info.type = STREAM_NORMAL;
pInfo->pRes->info.version = pBlock->info.version;
uint64_t* groupIdPre = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
if (groupIdPre) {
@ -1321,26 +1230,18 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
case STREAM_RETRIEVE: {
pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RETRIEVE;
copyDataBlock(pInfo->pPullDataRes, pBlock);
pInfo->pullDataResIndex = 0;
prepareDataScan(pInfo, pInfo->pPullDataRes, START_TS_COLUMN_INDEX, &pInfo->pullDataResIndex);
copyDataBlock(pInfo->pUpdateRes, pBlock);
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
updateInfoAddCloseWindowSBF(pInfo->pUpdateInfo);
} break;
case STREAM_DELETE_DATA: {
pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
pInfo->updateResIndex = 0;
if (isIntervalWindow(pInfo)) {
copyDataBlock(pInfo->pDeleteDataRes, pBlock);
generateIntervalTs(pInfo, pInfo->pDeleteDataRes, pInfo->pTableScanOp, pInfo->pUpdateRes);
prepareDataScan(pInfo, pInfo->pUpdateRes, START_TS_COLUMN_INDEX, &pInfo->updateResIndex);
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
} else {
generateScanRange(pInfo, pBlock, pInfo->pTableScanOp, pInfo->pUpdateRes);
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
}
pInfo->pUpdateRes->info.type = STREAM_DELETE_DATA;
return pInfo->pUpdateRes;
generateScanRange(pInfo, pBlock, pInfo->pUpdateRes);
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
copyDataBlock(pInfo->pDeleteDataRes, pInfo->pUpdateRes);
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
return pInfo->pDeleteDataRes;
} break;
default:
break;
@ -1348,56 +1249,40 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
return pBlock;
} else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) {
qDebug("scan mode %d", pInfo->scanMode);
if (pInfo->scanMode == STREAM_SCAN_FROM_RES) {
blockDataDestroy(pInfo->pUpdateRes);
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
return pInfo->pRes;
} else if (pInfo->scanMode == STREAM_SCAN_FROM_UPDATERES) {
if (isStateWindow(pInfo)) {
switch (pInfo->scanMode) {
case STREAM_SCAN_FROM_RES: {
blockDataDestroy(pInfo->pUpdateRes);
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
} else {
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
}
return pInfo->pRes;
} break;
case STREAM_SCAN_FROM_UPDATERES: {
generateScanRange(pInfo, pInfo->pUpdateDataRes, pInfo->pUpdateRes);
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
return pInfo->pUpdateRes;
} break;
case STREAM_SCAN_FROM_DATAREADER_RANGE:
case STREAM_SCAN_FROM_DATAREADER_RETRIEVE: {
SSDataBlock* pSDB = doRangeScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
if (pSDB) {
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
checkUpdateData(pInfo, true, pSDB, false);
return pSDB;
}
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
} break;
default:
break;
}
SStreamAggSupporter* pSup = pInfo->sessionSup.pStreamAggSup;
if (isStateWindow(pInfo) && pSup->pScanBlock->info.rows > 0) {
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
pInfo->updateResIndex = 0;
copyDataBlock(pInfo->pUpdateRes, pSup->pScanBlock);
blockDataCleanup(pSup->pScanBlock);
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
return pInfo->pUpdateRes;
} else if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RETRIEVE) {
SSDataBlock* pSDB = doDataScan(pInfo, pInfo->pPullDataRes, 0, &pInfo->pullDataResIndex);
if (pSDB != NULL) {
checkUpdateData(pInfo, true, pSDB, false);
pSDB->info.type = STREAM_PULL_DATA;
return pSDB;
}
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
} else if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER) {
SSDataBlock* pSDB = doDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
if (pSDB) {
pSDB->info.type = STREAM_NORMAL;
checkUpdateData(pInfo, true, pSDB, false);
return pSDB;
}
setUpdateData(pInfo, pInfo->pRes, pInfo->pUpdateRes);
if (pInfo->pUpdateRes->info.rows > 0) {
prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
return pInfo->pUpdateRes;
}
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
} else if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE) {
SSDataBlock* pSDB = doRangeScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
if (pSDB) {
pSDB->info.type = STREAM_NORMAL;
checkUpdateData(pInfo, true, pSDB, false);
return pSDB;
}
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
} else if (isStateWindow(pInfo)) {
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
pInfo->updateResIndex = pInfo->pUpdateRes->info.rows;
if (prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex)) {
blockDataCleanup(pInfo->pUpdateRes);
// return empty data blcok
return pInfo->pUpdateRes;
}
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
}
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
@ -1454,17 +1339,15 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo);
/*pOperator->status = OP_EXEC_DONE;*/
} else if (pInfo->pUpdateInfo) {
pInfo->tsArrayIndex = 0;
checkUpdateData(pInfo, true, pInfo->pRes, true);
setUpdateData(pInfo, pInfo->pRes, pInfo->pUpdateRes);
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlockInfo->window.ekey);
if (pInfo->pUpdateRes->info.rows > 0) {
if (pInfo->pUpdateRes->info.type == STREAM_CLEAR) {
if (pInfo->pUpdateDataRes->info.rows > 0) {
if (pInfo->pUpdateDataRes->info.type == STREAM_CLEAR) {
pInfo->updateResIndex = 0;
pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES;
} else if (pInfo->pUpdateRes->info.type == STREAM_INVERT) {
} else if (pInfo->pUpdateDataRes->info.type == STREAM_INVERT) {
pInfo->scanMode = STREAM_SCAN_FROM_RES;
return pInfo->pUpdateRes;
return pInfo->pUpdateDataRes;
}
}
}
@ -1513,7 +1396,7 @@ static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) {
#if 1
if (pStreamScan->pTableScanOp && pStreamScan->pTableScanOp->info) {
STableScanInfo* pTableScanInfo = pStreamScan->pTableScanOp->info;
destroyTableScanOperatorInfo(pTableScanInfo, 1);
destroyTableScanOperatorInfo(pTableScanInfo, numOfOutput);
}
#endif
if (pStreamScan->tqReader) {
@ -1527,8 +1410,8 @@ static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) {
blockDataDestroy(pStreamScan->pUpdateRes);
blockDataDestroy(pStreamScan->pPullDataRes);
blockDataDestroy(pStreamScan->pDeleteDataRes);
blockDataDestroy(pStreamScan->pUpdateDataRes);
taosArrayDestroy(pStreamScan->pBlockLists);
taosArrayDestroy(pStreamScan->tsArray);
taosMemoryFree(pStreamScan);
}
@ -1570,11 +1453,6 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
goto _error;
}
pInfo->tsArray = taosArrayInit(4, sizeof(int32_t));
if (pInfo->tsArray == NULL) {
goto _error;
}
if (pHandle->vnode) {
SOperatorInfo* pTableScanOp = createTableScanOperatorInfo(pTableScanNode, pHandle, pTaskInfo);
STableScanInfo* pTSInfo = (STableScanInfo*)pTableScanOp->info;
@ -1630,17 +1508,18 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
}
pInfo->pRes = createResDataBlock(pDescNode);
pInfo->pUpdateRes = createResDataBlock(pDescNode);
pInfo->pUpdateRes = createSpecialDataBlock(STREAM_CLEAR);
pInfo->pCondition = pScanPhyNode->node.pConditions;
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
pInfo->sessionSup =
(SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1, .parentType = QUERY_NODE_PHYSICAL_PLAN};
pInfo->groupId = 0;
pInfo->pPullDataRes = createPullDataBlock();
pInfo->pPullDataRes = createSpecialDataBlock(STREAM_RETRIEVE);
pInfo->pStreamScanOp = pOperator;
pInfo->deleteDataIndex = 0;
pInfo->pDeleteDataRes = createPullDataBlock();
pInfo->pDeleteDataRes = createSpecialDataBlock(STREAM_DELETE_DATA);
pInfo->updateWin = (STimeWindow){.skey = INT64_MAX, .ekey = INT64_MAX};
pInfo->pUpdateDataRes = createSpecialDataBlock(STREAM_CLEAR);
pOperator->name = "StreamScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
@ -2849,6 +2728,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
int32_t tableEndIdx = pInfo->tableEndIndex;
STableListInfo* tableListInfo = pInfo->tableListInfo;
pInfo->dataReaders = taosArrayInit(64, POINTER_BYTES);
createMultipleDataReaders(&pInfo->cond, &pInfo->readHandle, tableListInfo, tableStartIdx, tableEndIdx,
pInfo->dataReaders, GET_TASKID(pTaskInfo));
@ -3021,7 +2901,13 @@ int32_t getTableMergeScanExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExpla
int32_t compareTableKeyInfoByGid(const void* p1, const void* p2) {
const STableKeyInfo* info1 = p1;
const STableKeyInfo* info2 = p2;
return info1->groupId - info2->groupId;
if (info1->groupId < info2->groupId) {
return -1;
} else if (info1->groupId > info2->groupId) {
return 1;
} else {
return 0;
}
}
SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo* pTableListInfo,
@ -3063,7 +2949,6 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
pInfo->pColMatchInfo = pColList;
pInfo->pResBlock = createResDataBlock(pDescNode);
pInfo->dataReaders = taosArrayInit(64, POINTER_BYTES);
pInfo->sortSourceParams = taosArrayInit(64, sizeof(STableMergeScanSortSourceParam));
pInfo->pSortInfo = generateSortByTsInfo(pInfo->pColMatchInfo, pInfo->cond.order);

View File

@ -1373,8 +1373,10 @@ void doDeleteSpecifyIntervalWindow(SAggSupporter* pAggSup, SSDataBlock* pBlock,
static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval* pInterval, int32_t numOfOutput,
SSDataBlock* pBlock, SArray* pUpWins) {
SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
TSKEY* tsCols = (TSKEY*)pTsCol->pData;
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
TSKEY* startTsCols = (TSKEY*)pStartTsCol->pData;
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
TSKEY* endTsCols = (TSKEY*)pEndTsCol->pData;
uint64_t* pGpDatas = NULL;
if (pBlock->info.type == STREAM_RETRIEVE) {
SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
@ -1382,22 +1384,18 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval*
}
int32_t step = 0;
int32_t startPos = 0;
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[0], pInterval, TSDB_ORDER_ASC);
while (1) {
step =
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
uint64_t winGpId = pGpDatas ? pGpDatas[startPos] : pBlock->info.groupId;
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput);
if (pUpWins && res) {
SWinRes winRes = {.ts = win.skey, .groupId = winGpId};
taosArrayPush(pUpWins, &winRes);
}
int32_t prevEndPos = step - 1 + startPos;
startPos = getNextQualifiedWindow(pInterval, &win, &pBlock->info, tsCols, prevEndPos, TSDB_ORDER_ASC);
if (startPos < 0) {
break;
for (int32_t i = 0; i < pBlock->info.rows; i++) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, startTsCols[i], pInterval, TSDB_ORDER_ASC);
while (win.ekey <= endTsCols[i]) {
uint64_t winGpId = pGpDatas ? pGpDatas[startPos] : pBlock->info.groupId;
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput);
if (pUpWins && res) {
SWinRes winRes = {.ts = win.skey, .groupId = winGpId};
taosArrayPush(pUpWins, &winRes);
}
getNextTimeWindow(pInterval, pInterval->precision, TSDB_ORDER_ASC, &win);
}
}
}
@ -1501,7 +1499,7 @@ static void doBuildDeleteResult(SArray* pWins, int32_t* index, SSDataBlock* pBlo
}
blockDataEnsureCapacity(pBlock, size - *index);
SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, DELETE_GROUPID_COLUMN_INDEX);
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
for (int32_t i = *index; i < size; i++) {
SWinRes* pWin = taosArrayGet(pWins, i);
colDataAppend(pTsCol, pBlock->info.rows, (const char*)&pWin->ts, false);
@ -1562,6 +1560,11 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
continue;
}
if (pBlock->info.type == STREAM_NORMAL) {
//set input version
pTaskInfo->version = pBlock->info.version;
}
if (pInfo->scalarSupp.pExprInfo != NULL) {
SExprSupp* pExprSup = &pInfo->scalarSupp;
projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
@ -1645,6 +1648,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) {
}
}
nodesDestroyNode((SNode*)pInfo->pPhyNode);
colDataDestroy(&pInfo->twAggSup.timeWindowData);
taosMemoryFreeClear(param);
}
@ -1793,10 +1797,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t));
pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes));
pInfo->delIndex = 0;
// pInfo->pDelRes = createPullDataBlock(); todo(liuyao) for delete
pInfo->pDelRes = createOneDataBlock(pInfo->binfo.pRes, false); // todo(liuyao) for delete
pInfo->pDelRes->info.type = STREAM_DELETE_RESULT; // todo(liuyao) for delete
pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
pOperator->name = "TimeIntervalAggOperator";
@ -2598,14 +2599,6 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
chId = getChildIndex(pSDataBlock);
index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
}
// if (index != -1 && pSDataBlock->info.type == STREAM_PULL_DATA) {
// qDebug("===stream===delete child id %d", chId);
// taosArrayRemove(chArray, index);
// if (taosArrayGetSize(chArray) == 0) {
// // pull data is over
// taosHashRemove(pInfo->pPullDataMap, &winRes, sizeof(SWinRes));
// }
// }
if (index == -1 || pSDataBlock->info.type == STREAM_PULL_DATA) {
ignore = false;
}
@ -2697,16 +2690,18 @@ static void doBuildPullDataBlock(SArray* array, int32_t* pIndex, SSDataBlock* pB
}
blockDataEnsureCapacity(pBlock, size - (*pIndex));
ASSERT(3 <= taosArrayGetSize(pBlock->pDataBlock));
SColumnInfoData* pStartTs = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pEndTs = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
SColumnInfoData* pGroupId = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
SColumnInfoData* pCalStartTs = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
SColumnInfoData* pCalEndTs = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
for (; (*pIndex) < size; (*pIndex)++) {
SPullWindowInfo* pWin = taosArrayGet(array, (*pIndex));
SColumnInfoData* pStartTs = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
colDataAppend(pStartTs, pBlock->info.rows, (const char*)&pWin->window.skey, false);
SColumnInfoData* pEndTs = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
colDataAppend(pEndTs, pBlock->info.rows, (const char*)&pWin->window.ekey, false);
SColumnInfoData* pGroupId = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
colDataAppend(pGroupId, pBlock->info.rows, (const char*)&pWin->groupId, false);
colDataAppend(pCalStartTs, pBlock->info.rows, (const char*)&pWin->window.skey, false);
colDataAppend(pCalEndTs, pBlock->info.rows, (const char*)&pWin->window.ekey, false);
pBlock->info.rows++;
}
if ((*pIndex) == size) {
@ -2825,7 +2820,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
continue;
}
removeResults(pUpWins, pUpdated);
copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex);
copyDataBlock(pInfo->pUpdateRes, pBlock);
// copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex);
pInfo->returnUpdate = true;
taosArrayDestroy(pUpWins);
break;
@ -2933,15 +2929,16 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
return NULL;
}
SSDataBlock* createPullDataBlock() {
SSDataBlock* createSpecialDataBlock(EStreamType type) {
SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
pBlock->info.hasVarCol = false;
pBlock->info.groupId = 0;
pBlock->info.rows = 0;
pBlock->info.type = STREAM_RETRIEVE;
pBlock->info.rowSize = sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t);
pBlock->info.type = type;
pBlock->info.rowSize = sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t) +
sizeof(uint64_t) + sizeof(TSKEY) + sizeof(TSKEY);
pBlock->pDataBlock = taosArrayInit(3, sizeof(SColumnInfoData));
pBlock->pDataBlock = taosArrayInit(6, sizeof(SColumnInfoData));
SColumnInfoData infoData = {0};
infoData.info.type = TSDB_DATA_TYPE_TIMESTAMP;
infoData.info.bytes = sizeof(TSKEY);
@ -2952,6 +2949,14 @@ SSDataBlock* createPullDataBlock() {
infoData.info.type = TSDB_DATA_TYPE_UBIGINT;
infoData.info.bytes = sizeof(uint64_t);
// uid
taosArrayPush(pBlock->pDataBlock, &infoData);
// group id
taosArrayPush(pBlock->pDataBlock, &infoData);
// calculate start ts
taosArrayPush(pBlock->pDataBlock, &infoData);
// calculate end ts
taosArrayPush(pBlock->pDataBlock, &infoData);
return pBlock;
@ -3019,8 +3024,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
goto _error;
}
}
pInfo->pUpdateRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
pInfo->pUpdateRes->info.type = STREAM_CLEAR;
pInfo->pUpdateRes = createSpecialDataBlock(STREAM_CLEAR);
blockDataEnsureCapacity(pInfo->pUpdateRes, 128);
pInfo->returnUpdate = false;
@ -3042,11 +3046,9 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
pInfo->pullIndex = 0;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
pInfo->pPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK);
pInfo->pPullDataRes = createPullDataBlock();
pInfo->pPullDataRes = createSpecialDataBlock(STREAM_RETRIEVE);
pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired;
// pInfo->pDelRes = createPullDataBlock(); // todo(liuyao) for delete
pInfo->pDelRes = createOneDataBlock(pInfo->binfo.pRes, false); // todo(liuyao) for delete
pInfo->pDelRes->info.type = STREAM_DELETE_RESULT; // todo(liuyao) for delete
pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
pInfo->delIndex = 0;
pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes));
@ -3061,7 +3063,9 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
pOperator->fpSet =
createOperatorFpSet(NULL, doStreamFinalIntervalAgg, NULL, NULL, destroyStreamFinalIntervalOperatorInfo,
aggEncodeResultRow, aggDecodeResultRow, NULL);
if (pPhyNode->type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL) {
initIntervalDownStream(downstream, pPhyNode->type);
}
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
@ -3086,6 +3090,7 @@ void destroyStreamAggSupporter(SStreamAggSupporter* pSup) {
}
taosHashCleanup(pSup->pResultRows);
destroyDiskbasedBuf(pSup->pResultBuf);
blockDataDestroy(pSup->pScanBlock);
}
void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) {
@ -3200,7 +3205,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
pInfo->pStDeleted = taosHashInit(64, hashFn, true, HASH_NO_LOCK);
pInfo->pDelIterator = NULL;
// pInfo->pDelRes = createPullDataBlock();
// pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
pInfo->pDelRes = createOneDataBlock(pInfo->binfo.pRes, false); // todo(liuyao) for delete
pInfo->pDelRes->info.type = STREAM_DELETE_RESULT; // todo(liuyao) for delete
pInfo->pChildren = NULL;
@ -3559,7 +3564,7 @@ static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc
TSKEY* startDatas = (TSKEY*)pStartTsCol->pData;
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
TSKEY* endDatas = (TSKEY*)pEndTsCol->pData;
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
uint64_t* gpDatas = (uint64_t*)pGroupCol->pData;
for (int32_t i = 0; i < pBlock->info.rows; i++) {
int32_t winIndex = 0;
@ -4255,7 +4260,6 @@ static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc
step = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCol, pKeyColInfo, pBlock->info.rows, i, &allEqual,
pSeDeleted);
ASSERT(isTsInWindow(pCurWin, tsCol[i]) || isEqualStateKey(pCurWin, pKeyData));
taosArrayPush(pAggSup->pScanWindow, &pCurWin->winInfo.win);
taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition));
deleteWindow(pAggSup->pCurWins, winIndex);
}
@ -4280,8 +4284,9 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
} else {
return;
}
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
blockDataEnsureCapacity(pAggSup->pScanBlock, pSDataBlock->info.rows);
SColumnInfoData* pKeyColInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->stateCol.slotId);
for (int32_t i = 0; i < pSDataBlock->info.rows; i += winRows) {
if (pInfo->ignoreExpiredData && isOverdue(tsCols[i], &pInfo->twAggSup)) {
@ -4296,7 +4301,8 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
winRows = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCols, pKeyColInfo, pSDataBlock->info.rows, i,
&allEqual, pInfo->pSeDeleted);
if (!allEqual) {
taosArrayPush(pAggSup->pScanWindow, &pCurWin->winInfo.win);
appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey,
&pSDataBlock->info.groupId);
taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition));
deleteWindow(pAggSup->pCurWins, winIndex);
continue;
@ -4460,7 +4466,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
pInfo->pSeDeleted = taosHashInit(64, hashFn, true, HASH_NO_LOCK);
pInfo->pDelIterator = NULL;
// pInfo->pDelRes = createPullDataBlock(); // todo(liuyao) for delete
// pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
pInfo->pDelRes = createOneDataBlock(pInfo->binfo.pRes, false); // todo(liuyao) for delete
pInfo->pDelRes->info.type = STREAM_DELETE_RESULT; // todo(liuyao) for delete
pInfo->pChildren = NULL;

View File

@ -2242,7 +2242,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "last_row",
.type = FUNCTION_TYPE_LAST_ROW,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLast,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
@ -2253,7 +2253,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_cache_last_row",
.type = FUNCTION_TYPE_CACHE_LAST_ROW,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLast,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
@ -2263,7 +2263,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "first",
.type = FUNCTION_TYPE_FIRST,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLast,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
@ -2277,7 +2277,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_first_partial",
.type = FUNCTION_TYPE_FIRST_PARTIAL,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLastPartial,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
@ -2288,7 +2288,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_first_merge",
.type = FUNCTION_TYPE_FIRST_MERGE,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLastMerge,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
@ -2299,7 +2299,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "last",
.type = FUNCTION_TYPE_LAST,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLast,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
@ -2313,7 +2313,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_last_partial",
.type = FUNCTION_TYPE_LAST_PARTIAL,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLastPartial,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
@ -2324,7 +2324,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_last_merge",
.type = FUNCTION_TYPE_LAST_MERGE,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLastMerge,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,

View File

@ -476,16 +476,16 @@ int32_t functionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
int32_t firstCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
char* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
SFirstLastRes* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
int32_t type = pDestCtx->input.pData[0]->info.type;
int32_t bytes = pDestCtx->input.pData[0]->info.bytes;
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
char* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
SFirstLastRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
if (pSResInfo->numOfRes != 0 && (pDResInfo->numOfRes == 0 || *(TSKEY*)(pDBuf + bytes) > *(TSKEY*)(pSBuf + bytes))) {
memcpy(pDBuf, pSBuf, bytes);
*(TSKEY*)(pDBuf + bytes) = *(TSKEY*)(pSBuf + bytes);
if (pSResInfo->numOfRes != 0 && (pDResInfo->numOfRes == 0 || pDBuf->ts > pSBuf->ts)) {
memcpy(pDBuf->buf, pSBuf->buf, bytes);
pDBuf->ts = pSBuf->ts;
pDResInfo->numOfRes = 1;
}
return TSDB_CODE_SUCCESS;
@ -2465,8 +2465,8 @@ bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResult
}
int32_t apercentileFunction(SqlFunctionCtx* pCtx) {
int32_t numOfElems = 0;
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
int32_t numOfElems = 0;
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SInputColumnInfoData* pInput = &pCtx->input;
SColumnInfoData* pCol = pInput->pData[0];
@ -2502,7 +2502,7 @@ int32_t apercentileFunction(SqlFunctionCtx* pCtx) {
}
qDebug("add %d elements into histogram, total:%d, numOfEntry:%d, %p", numOfElems, pInfo->pHisto->numOfElems,
pInfo->pHisto->numOfEntries, pInfo->pHisto);
pInfo->pHisto->numOfEntries, pInfo->pHisto);
}
SET_VAL(pResInfo, numOfElems, 1);
@ -2542,18 +2542,17 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo*
memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1));
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
qDebug("merge histo, total:%"PRId64", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries, pHisto);
qDebug("merge histo, total:%" PRId64 ", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries, pHisto);
} else {
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
qDebug("input histogram, elem:%"PRId64", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries,
pInput->pHisto);
qDebug("input histogram, elem:%" PRId64 ", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries,
pInput->pHisto);
SHistogramInfo* pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN);
memcpy(pHisto, pRes, sizeof(SHistogramInfo) + sizeof(SHistBin) * MAX_HISTOGRAM_BIN);
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
qDebug("merge histo, total:%"PRId64", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries,
pHisto);
qDebug("merge histo, total:%" PRId64 ", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries, pHisto);
tHistogramDestroy(&pRes);
}
}
@ -2580,7 +2579,8 @@ int32_t apercentileFunctionMerge(SqlFunctionCtx* pCtx) {
}
if (pInfo->algo != APERCT_ALGO_TDIGEST) {
qDebug("after merge, total:%d, numOfEntry:%d, %p", pInfo->pHisto->numOfElems, pInfo->pHisto->numOfEntries, pInfo->pHisto);
qDebug("after merge, total:%d, numOfEntry:%d, %p", pInfo->pHisto->numOfElems, pInfo->pHisto->numOfEntries,
pInfo->pHisto);
}
SET_VAL(pResInfo, 1, 1);
@ -2602,7 +2602,8 @@ int32_t apercentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
} else {
buildHistogramInfo(pInfo);
if (pInfo->pHisto->numOfElems > 0) {
qDebug("get the final res:%d, elements:%"PRId64", entry:%d", pInfo->pHisto->numOfElems, pInfo->pHisto->numOfEntries);
qDebug("get the final res:%d, elements:%" PRId64 ", entry:%d", pInfo->pHisto->numOfElems,
pInfo->pHisto->numOfEntries);
double ratio[] = {pInfo->percent};
double* res = tHistogramUniform(pInfo->pHisto, ratio, 1);
@ -2994,16 +2995,16 @@ int32_t firstLastPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
// todo rewrite:
int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
char* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
SFirstLastRes* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
int32_t type = pDestCtx->input.pData[0]->info.type;
int32_t bytes = pDestCtx->input.pData[0]->info.bytes;
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
char* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
SFirstLastRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
if (pSResInfo->numOfRes != 0 && (pDResInfo->numOfRes == 0 || *(TSKEY*)(pDBuf + bytes) < *(TSKEY*)(pSBuf + bytes))) {
memcpy(pDBuf, pSBuf, bytes);
*(TSKEY*)(pDBuf + bytes) = *(TSKEY*)(pSBuf + bytes);
if (pSResInfo->numOfRes != 0 && (pDResInfo->numOfRes == 0 || pDBuf->ts < pSBuf->ts)) {
memcpy(pDBuf->buf, pSBuf->buf, bytes);
pDBuf->ts = pSBuf->ts;
pDResInfo->numOfRes = 1;
}
return TSDB_CODE_SUCCESS;
@ -4665,10 +4666,8 @@ int32_t csumFunction(SqlFunctionCtx* pCtx) {
SSumRes* pSumRes = GET_ROWCELL_INTERBUF(pResInfo);
SInputColumnInfoData* pInput = &pCtx->input;
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
SColumnInfoData* pInputCol = pInput->pData[0];
SColumnInfoData* pTsOutput = pCtx->pTsOutput;
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
int32_t numOfElems = 0;
@ -4704,11 +4703,6 @@ int32_t csumFunction(SqlFunctionCtx* pCtx) {
}
}
// TODO: remove this after pTsOutput is handled
if (pTsOutput != NULL) {
colDataAppendInt64(pTsOutput, pos, &tsList[i]);
}
numOfElems++;
}
@ -5205,8 +5199,8 @@ bool twaFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) {
STwaInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
pInfo->isNull = false;
pInfo->p.key = INT64_MIN;
pInfo->win = TSWINDOW_INITIALIZER;
pInfo->p.key = INT64_MIN;
pInfo->win = TSWINDOW_INITIALIZER;
return true;
}

View File

@ -875,7 +875,7 @@ bool isUdfcUvMsgComplete(SClientConnBuf *connBuf);
void udfcUvHandleRsp(SClientUvConn *conn);
void udfcUvHandleError(SClientUvConn *conn);
void onUdfcPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf);
void onUdfcPipetWrite(uv_write_t *write, int status);
void onUdfcPipeWrite(uv_write_t *write, int status);
void onUdfcPipeConnect(uv_connect_t *connect, int status);
int32_t udfcCreateUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvTaskNode **pUvTask);
int32_t udfcQueueUvTask(SClientUvTaskNode *uvTask);
@ -1226,7 +1226,7 @@ int32_t udfcGetUdfTaskResultFromUvTask(SClientUdfTask *task, SClientUvTaskNode *
}
void udfcAllocateBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf) {
SClientUvConn *conn = handle->data;
SClientUvConn *conn = handle->data;
SClientConnBuf *connBuf = &conn->readBuf;
int32_t msgHeadSize = sizeof(int32_t) + sizeof(int64_t);
@ -1244,6 +1244,9 @@ void udfcAllocateBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf
buf->base = NULL;
buf->len = 0;
}
} else if (connBuf->total == -1 && connBuf->len < msgHeadSize) {
buf->base = connBuf->buf + connBuf->len;
buf->len = msgHeadSize - connBuf->len;
} else {
connBuf->cap = connBuf->total > connBuf->cap ? connBuf->total : connBuf->cap;
void *resultBuf = taosMemoryRealloc(connBuf->buf, connBuf->cap);
@ -1258,8 +1261,7 @@ void udfcAllocateBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf
}
}
fnTrace("conn buf cap - len - total : %d - %d - %d", connBuf->cap, connBuf->len, connBuf->total);
fnDebug("udfc uv alloc buffer: cap - len - total : %d - %d - %d", connBuf->cap, connBuf->len, connBuf->total);
}
bool isUdfcUvMsgComplete(SClientConnBuf *connBuf) {
@ -1267,7 +1269,7 @@ bool isUdfcUvMsgComplete(SClientConnBuf *connBuf) {
connBuf->total = *(int32_t *) (connBuf->buf);
}
if (connBuf->len == connBuf->cap && connBuf->total == connBuf->cap) {
fnTrace("udfc complete message is received, now handle it");
fnDebug("udfc complete message is received, now handle it");
return true;
}
return false;
@ -1278,7 +1280,7 @@ void udfcUvHandleRsp(SClientUvConn *conn) {
int64_t seqNum = *(int64_t *) (connBuf->buf + sizeof(int32_t)); // msglen then seqnum
if (QUEUE_EMPTY(&conn->taskQueue)) {
fnError("udfc no task waiting for response on connection");
fnError("udfc no task waiting on connection. response seqnum:%"PRId64, seqNum);
return;
}
bool found = false;
@ -1287,6 +1289,7 @@ void udfcUvHandleRsp(SClientUvConn *conn) {
SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, connTaskQueue);
while (h != &conn->taskQueue) {
fnDebug("udfc handle response iterate through queue. uvTask:%d-%p", task->seqNum, task);
if (task->seqNum == seqNum) {
if (found == false) {
found = true;
@ -1315,6 +1318,7 @@ void udfcUvHandleRsp(SClientUvConn *conn) {
}
void udfcUvHandleError(SClientUvConn *conn) {
fnDebug("handle error on conn: %p, pipe: %p", conn, conn->pipe);
while (!QUEUE_EMPTY(&conn->taskQueue)) {
QUEUE* h = QUEUE_HEAD(&conn->taskQueue);
SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, connTaskQueue);
@ -1328,7 +1332,7 @@ void udfcUvHandleError(SClientUvConn *conn) {
}
void onUdfcPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
fnTrace("udfc client %p, client read from pipe. nread: %zd", client, nread);
fnDebug("udfc client %p, client read from pipe. nread: %zd", client, nread);
if (nread == 0) return;
SClientUvConn *conn = client->data;
@ -1338,31 +1342,25 @@ void onUdfcPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
if (isUdfcUvMsgComplete(connBuf)) {
udfcUvHandleRsp(conn);
}
}
if (nread < 0) {
fnError("udfc client pipe %p read error: %zd, %s.", client, nread, uv_strerror(nread));
fnError("udfc client pipe %p read error: %zd(%s).", client, nread, uv_strerror(nread));
if (nread == UV_EOF) {
fnError("\tudfc client pipe %p closed", client);
}
udfcUvHandleError(conn);
}
}
void onUdfcPipetWrite(uv_write_t *write, int status) {
SClientUvTaskNode *uvTask = write->data;
uv_pipe_t *pipe = uvTask->pipe;
fnTrace("udfc client %p write length:%zu", pipe, uvTask->reqBuf.len);
SClientUvConn *conn = pipe->data;
if (status == 0) {
QUEUE_INSERT_TAIL(&conn->taskQueue, &uvTask->connTaskQueue);
} else {
fnError("udfc client %p write error.", pipe);
void onUdfcPipeWrite(uv_write_t *write, int status) {
SClientUvConn *conn = write->data;
if (status < 0) {
fnError("udfc client connection %p write failed. status: %d(%s)", conn, status, uv_strerror(status));
udfcUvHandleError(conn);
} else {
fnDebug("udfc client connection %p write succeed", conn);
}
taosMemoryFree(write);
taosMemoryFree(uvTask->reqBuf.base);
}
void onUdfcPipeConnect(uv_connect_t *connect, int status) {
@ -1419,7 +1417,7 @@ int32_t udfcCreateUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvTaskN
}
int32_t udfcQueueUvTask(SClientUvTaskNode *uvTask) {
fnTrace("queue uv task to event loop, task: %d, %p", uvTask->type, uvTask);
fnDebug("queue uv task to event loop, uvTask: %d-%p", uvTask->type, uvTask);
SUdfcProxy *udfc = uvTask->udfc;
uv_mutex_lock(&udfc->taskQueueMutex);
QUEUE_INSERT_TAIL(&udfc->taskQueue, &uvTask->recvTaskQueue);
@ -1427,14 +1425,14 @@ int32_t udfcQueueUvTask(SClientUvTaskNode *uvTask) {
uv_async_send(&udfc->loopTaskAync);
uv_sem_wait(&uvTask->taskSem);
fnInfo("udfc uv task finished. task: %d, %p", uvTask->type, uvTask);
fnInfo("udfc uvTask finished. uvTask:%"PRId64"-%d-%p", uvTask->seqNum, uvTask->type, uvTask);
uv_sem_destroy(&uvTask->taskSem);
return 0;
}
int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) {
fnTrace("event loop start uv task. task: %d, %p", uvTask->type, uvTask);
fnDebug("event loop start uv task. uvTask: %"PRId64"-%d-%p", uvTask->seqNum, uvTask->type, uvTask);
int32_t code = 0;
switch (uvTask->type) {
@ -1465,10 +1463,12 @@ int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) {
code = TSDB_CODE_UDF_PIPE_NO_PIPE;
} else {
uv_write_t *write = taosMemoryMalloc(sizeof(uv_write_t));
write->data = uvTask;
int err = uv_write(write, (uv_stream_t *)pipe, &uvTask->reqBuf, 1, onUdfcPipetWrite);
write->data = pipe->data;
QUEUE* connTaskQueue = &((SClientUvConn*)pipe->data)->taskQueue;
QUEUE_INSERT_TAIL(connTaskQueue, &uvTask->connTaskQueue);
int err = uv_write(write, (uv_stream_t *)pipe, &uvTask->reqBuf, 1, onUdfcPipeWrite);
if (err != 0) {
fnError("udfc event loop start req/rsp task uv_write failed. code: %s", uv_strerror(err));
fnError("udfc event loop start req_rsp task uv_write failed. uvtask: %p, code: %s", uvTask, uv_strerror(err));
}
code = err;
}
@ -1618,6 +1618,7 @@ int32_t udfcRunUdfUvTask(SClientUdfTask *task, int8_t uvTaskType) {
SClientUvTaskNode *uvTask = NULL;
udfcCreateUvTask(task, uvTaskType, &uvTask);
fnDebug("udfc client task: %p created uvTask: %p. pipe: %p", task, uvTask, task->session->udfUvPipe);
udfcQueueUvTask(uvTask);
udfcGetUdfTaskResultFromUvTask(task, uvTask);
if (uvTaskType == UV_TASK_CONNECT) {
@ -1625,6 +1626,8 @@ int32_t udfcRunUdfUvTask(SClientUdfTask *task, int8_t uvTaskType) {
SClientUvConn *conn = uvTask->pipe->data;
conn->session = task->session;
}
taosMemoryFree(uvTask->reqBuf.base);
uvTask->reqBuf.base = NULL;
taosMemoryFree(uvTask);
uvTask = NULL;
return task->errCode;
@ -1670,7 +1673,7 @@ int32_t doSetupUdf(char udfName[], UdfcFuncHandle *funcHandle) {
int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdfInterBuf *state, SUdfInterBuf *state2,
SSDataBlock* output, SUdfInterBuf *newState) {
fnTrace("udfc call udf. callType: %d, funcHandle: %p", callType, handle);
fnDebug("udfc call udf. callType: %d, funcHandle: %p", callType, handle);
SUdfcUvSession *session = (SUdfcUvSession *) handle;
if (session->udfUvPipe == NULL) {
fnError("No pipe to udfd");

View File

@ -671,6 +671,9 @@ void udfdAllocBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf) {
fnError("udfd can not allocate enough memory") buf->base = NULL;
buf->len = 0;
}
} else if (ctx->inputTotal == -1 && ctx->inputLen < msgHeadSize) {
buf->base = ctx->inputBuf + ctx->inputLen;
buf->len = msgHeadSize - ctx->inputLen;
} else {
ctx->inputCap = ctx->inputTotal > ctx->inputCap ? ctx->inputTotal : ctx->inputCap;
void *inputBuf = taosMemoryRealloc(ctx->inputBuf, ctx->inputCap);

View File

@ -2667,6 +2667,7 @@ static int32_t jsonToExprNode(const SJson* pJson, void* pObj) {
}
static const char* jkColumnTableId = "TableId";
static const char* jkColumnTableType = "TableType";
static const char* jkColumnColId = "ColId";
static const char* jkColumnColType = "ColType";
static const char* jkColumnDbName = "DbName";
@ -2683,6 +2684,9 @@ static int32_t columnNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkColumnTableId, pNode->tableId);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkColumnTableType, pNode->tableType);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkColumnColId, pNode->colId);
}
@ -2718,6 +2722,9 @@ static int32_t jsonToColumnNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetUBigIntValue(pJson, jkColumnTableId, &pNode->tableId);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetTinyIntValue(pJson, jkColumnTableType, &pNode->tableType);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetSmallIntValue(pJson, jkColumnColId, &pNode->colId);
}

View File

@ -1237,7 +1237,7 @@ static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) {
pSelect->hasTailFunc = pSelect->hasTailFunc ? true : (FUNCTION_TYPE_TAIL == pFunc->funcType);
pSelect->hasInterpFunc = pSelect->hasInterpFunc ? true : (FUNCTION_TYPE_INTERP == pFunc->funcType);
pSelect->hasLastRowFunc = pSelect->hasLastRowFunc ? true : (FUNCTION_TYPE_LAST_ROW == pFunc->funcType);
pSelect->hasTimeLineFunc = pSelect->hasLastRowFunc ? true : fmIsTimelineFunc(pFunc->funcId);
pSelect->hasTimeLineFunc = pSelect->hasTimeLineFunc ? true : fmIsTimelineFunc(pFunc->funcId);
}
}

View File

@ -478,8 +478,9 @@ static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect,
}
pAgg->hasLastRow = pSelect->hasLastRowFunc;
pAgg->hasTimeLineFunc = pSelect->hasTimeLineFunc;
pAgg->node.groupAction = GROUP_ACTION_SET;
pAgg->node.requireDataOrder = DATA_ORDER_LEVEL_NONE;
pAgg->node.requireDataOrder = pAgg->hasTimeLineFunc ? DATA_ORDER_LEVEL_IN_GROUP : DATA_ORDER_LEVEL_NONE;
pAgg->node.resultDataOrder = DATA_ORDER_LEVEL_NONE;
int32_t code = TSDB_CODE_SUCCESS;
@ -928,7 +929,7 @@ static int32_t createDistinctLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSe
int32_t code = TSDB_CODE_SUCCESS;
// set grouyp keys, agg funcs and having conditions
SNodeList* pGroupKeys = NULL;
SNode* pProjection = NULL;
SNode* pProjection = NULL;
FOREACH(pProjection, pSelect->pProjectionList) {
code = nodesListMakeStrictAppend(&pGroupKeys, createGroupingSetNode(pProjection));
if (TSDB_CODE_SUCCESS != code) {

View File

@ -904,14 +904,6 @@ static int32_t stbSplSplitScanNodeWithPartTags(SSplitContext* pCxt, SStableSplit
return code;
}
static int32_t stbSplSplitScanNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
SScanLogicNode* pScan = (SScanLogicNode*)pInfo->pSplitNode;
if (NULL != pScan->pGroupTags) {
return stbSplSplitScanNodeWithPartTags(pCxt, pInfo);
}
return stbSplSplitScanNodeWithoutPartTags(pCxt, pInfo);
}
static SNode* stbSplFindPrimaryKeyFromScan(SScanLogicNode* pScan) {
SNode* pCol = NULL;
FOREACH(pCol, pScan->pScanCols) {
@ -922,11 +914,12 @@ static SNode* stbSplFindPrimaryKeyFromScan(SScanLogicNode* pScan) {
return NULL;
}
static int32_t stbSplSplitScanNodeForJoin(SSplitContext* pCxt, SLogicSubplan* pSubplan, SScanLogicNode* pScan) {
static int32_t stbSplSplitMergeScanNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SScanLogicNode* pScan,
bool groupSort) {
SNodeList* pMergeKeys = NULL;
int32_t code = stbSplCreateMergeKeysByPrimaryKey(stbSplFindPrimaryKeyFromScan(pScan), &pMergeKeys);
if (TSDB_CODE_SUCCESS == code) {
code = stbSplCreateMergeNode(pCxt, pSubplan, (SLogicNode*)pScan, pMergeKeys, (SLogicNode*)pScan, false);
code = stbSplCreateMergeNode(pCxt, pSubplan, (SLogicNode*)pScan, pMergeKeys, (SLogicNode*)pScan, groupSort);
}
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeStrictAppend(&pSubplan->pChildren,
@ -937,12 +930,24 @@ static int32_t stbSplSplitScanNodeForJoin(SSplitContext* pCxt, SLogicSubplan* pS
return code;
}
static int32_t stbSplSplitScanNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
SScanLogicNode* pScan = (SScanLogicNode*)pInfo->pSplitNode;
if (SCAN_TYPE_TABLE_MERGE == pScan->scanType) {
pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE;
return stbSplSplitMergeScanNode(pCxt, pInfo->pSubplan, pScan, true);
}
if (NULL != pScan->pGroupTags) {
return stbSplSplitScanNodeWithPartTags(pCxt, pInfo);
}
return stbSplSplitScanNodeWithoutPartTags(pCxt, pInfo);
}
static int32_t stbSplSplitJoinNodeImpl(SSplitContext* pCxt, SLogicSubplan* pSubplan, SJoinLogicNode* pJoin) {
int32_t code = TSDB_CODE_SUCCESS;
SNode* pChild = NULL;
FOREACH(pChild, pJoin->node.pChildren) {
if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pChild)) {
code = stbSplSplitScanNodeForJoin(pCxt, pSubplan, (SScanLogicNode*)pChild);
code = stbSplSplitMergeScanNode(pCxt, pSubplan, (SScanLogicNode*)pChild, false);
} else if (QUERY_NODE_LOGIC_PLAN_JOIN == nodeType(pChild)) {
code = stbSplSplitJoinNodeImpl(pCxt, pSubplan, (SJoinLogicNode*)pChild);
} else {

View File

@ -124,7 +124,7 @@ int32_t replaceLogicNode(SLogicSubplan* pSubplan, SLogicNode* pOld, SLogicNode*
}
static int32_t adjustScanDataRequirement(SScanLogicNode* pScan, EDataOrderLevel requirement) {
if (SCAN_TYPE_TABLE != pScan->scanType || SCAN_TYPE_TABLE_MERGE != pScan->scanType) {
if (SCAN_TYPE_TABLE != pScan->scanType && SCAN_TYPE_TABLE_MERGE != pScan->scanType) {
return TSDB_CODE_SUCCESS;
}
// The lowest sort level of scan output data is DATA_ORDER_LEVEL_IN_BLOCK
@ -161,7 +161,9 @@ static int32_t adjustAggDataRequirement(SAggLogicNode* pAgg, EDataOrderLevel req
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
pAgg->node.resultDataOrder = requirement;
pAgg->node.requireDataOrder = requirement;
if (pAgg->hasTimeLineFunc) {
pAgg->node.requireDataOrder = requirement < DATA_ORDER_LEVEL_IN_GROUP ? DATA_ORDER_LEVEL_IN_GROUP : requirement;
}
return TSDB_CODE_SUCCESS;
}

View File

@ -139,6 +139,10 @@ TEST_F(PlanBasicTest, timeLineFunc) {
run("SELECT CSUM(c1) FROM t1");
run("SELECT CSUM(c1) FROM st1");
run("SELECT TWA(c1) FROM t1");
run("SELECT TWA(c1) FROM st1");
}
TEST_F(PlanBasicTest, multiResFunc) {

View File

@ -58,7 +58,19 @@ TEST_F(PlanPartitionByTest, withInterval) {
TEST_F(PlanPartitionByTest, withGroupBy) {
useDb("root", "test");
run("select count(*) from t1 partition by c1 group by c2");
run("SELECT COUNT(*) FROM t1 PARTITION BY c1 GROUP BY c2");
run("SELECT TBNAME, c1 FROM st1 PARTITION BY TBNAME GROUP BY c1");
}
TEST_F(PlanPartitionByTest, withTimeLineFunc) {
useDb("root", "test");
run("SELECT TWA(c1) FROM st1 PARTITION BY c1");
}
TEST_F(PlanPartitionByTest, withSlimit) {
useDb("root", "test");
run("SELECT CSUM(c1) FROM st1 PARTITION BY TBNAME SLIMIT 1");
}

View File

@ -207,6 +207,7 @@ void updateInfoDestroy(SUpdateInfo *pInfo) {
}
taosArrayDestroy(pInfo->pTsSBFs);
taosHashCleanup(pInfo->pMap);
taosMemoryFree(pInfo);
}

View File

@ -22,7 +22,7 @@ done
echo ""
echo "generate vgId ..."
cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort | uniq > ${logpath}/log.vgIds.tmp
cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | sort | uniq > ${logpath}/log.vgIds.tmp
echo "all vgIds:" > ${logpath}/log.vgIds
cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort | uniq >> ${logpath}/log.vgIds
for dnode in `ls ${logpath} | grep dnode | grep -v log`;do

View File

@ -58,7 +58,7 @@
static const int32_t TEST_NUMBER = 1;
#define is_bigendian() ((*(char *)&TEST_NUMBER) == 0)
#define SIMPLE8B_MAX_INT64 ((uint64_t)2305843009213693951LL)
#define SIMPLE8B_MAX_INT64 ((uint64_t)1152921504606846974LL)
#define safeInt64Add(a, b) (((a >= 0) && (b <= INT64_MAX - a)) || ((a < 0) && (b >= INT64_MIN - a)))
#define ZIGZAG_ENCODE(T, v) ((u##T)((v) >> (sizeof(T) * 8 - 1))) ^ (((u##T)(v)) << 1) // zigzag encode
@ -101,8 +101,8 @@ int32_t tsCompressINTImp(const char *const input, const int32_t nelements, char
char bit_per_integer[] = {0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 15, 20, 30, 60};
int32_t selector_to_elems[] = {240, 120, 60, 30, 20, 15, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1};
char bit_to_selector[] = {0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13, 13, 13, 13,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15};
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15};
// get the byte limit.
int32_t word_length = 0;

View File

@ -626,6 +626,7 @@ void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump) {
int32_t cfgLoadFromEnvVar(SConfig *pConfig) {
char *line = NULL, *name, *value, *value2, *value3;
int32_t olen, vlen, vlen2, vlen3;
int32_t code = 0;
ssize_t _bytes = 0;
TdCmdPtr pCmd = taosOpenCmd("set");
if (pCmd == NULL) {
@ -658,9 +659,12 @@ int32_t cfgLoadFromEnvVar(SConfig *pConfig) {
if (vlen3 != 0) value3[vlen3] = 0;
}
cfgSetItem(pConfig, name, value, CFG_STYPE_ENV_VAR);
if (value2 != NULL && value3 != NULL && value2[0] != 0 && value3[0] != 0 && strcasecmp(name, "dataDir") == 0) {
cfgSetTfsItem(pConfig, name, value, value2, value3, CFG_STYPE_ENV_VAR);
code = cfgSetTfsItem(pConfig, name, value, value2, value3, CFG_STYPE_ENV_VAR);
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
} else {
code = cfgSetItem(pConfig, name, value, CFG_STYPE_ENV_VAR);
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
}
}
@ -674,6 +678,7 @@ int32_t cfgLoadFromEnvVar(SConfig *pConfig) {
int32_t cfgLoadFromEnvCmd(SConfig *pConfig, const char **envCmd) {
char buf[1024], *name, *value, *value2, *value3;
int32_t olen, vlen, vlen2, vlen3;
int32_t code = 0;
int32_t index = 0;
if (envCmd == NULL) return 0;
while (envCmd[index]!=NULL) {
@ -700,9 +705,12 @@ int32_t cfgLoadFromEnvCmd(SConfig *pConfig, const char **envCmd) {
if (vlen3 != 0) value3[vlen3] = 0;
}
cfgSetItem(pConfig, name, value, CFG_STYPE_ENV_CMD);
if (value2 != NULL && value3 != NULL && value2[0] != 0 && value3[0] != 0 && strcasecmp(name, "dataDir") == 0) {
cfgSetTfsItem(pConfig, name, value, value2, value3, CFG_STYPE_ENV_CMD);
code = cfgSetTfsItem(pConfig, name, value, value2, value3, CFG_STYPE_ENV_CMD);
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
} else {
code = cfgSetItem(pConfig, name, value, CFG_STYPE_ENV_CMD);
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
}
}
@ -713,6 +721,7 @@ int32_t cfgLoadFromEnvCmd(SConfig *pConfig, const char **envCmd) {
int32_t cfgLoadFromEnvFile(SConfig *pConfig, const char *envFile) {
char *line = NULL, *name, *value, *value2, *value3;
int32_t olen, vlen, vlen2, vlen3;
int32_t code = 0;
ssize_t _bytes = 0;
const char *filepath = ".env";
@ -761,9 +770,12 @@ int32_t cfgLoadFromEnvFile(SConfig *pConfig, const char *envFile) {
if (vlen3 != 0) value3[vlen3] = 0;
}
cfgSetItem(pConfig, name, value, CFG_STYPE_ENV_FILE);
if (value2 != NULL && value3 != NULL && value2[0] != 0 && value3[0] != 0 && strcasecmp(name, "dataDir") == 0) {
cfgSetTfsItem(pConfig, name, value, value2, value3, CFG_STYPE_ENV_FILE);
code = cfgSetTfsItem(pConfig, name, value, value2, value3, CFG_STYPE_ENV_FILE);
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
} else {
code = cfgSetItem(pConfig, name, value, CFG_STYPE_ENV_FILE);
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
}
}
@ -819,11 +831,12 @@ int32_t cfgLoadFromCfgFile(SConfig *pConfig, const char *filepath) {
if (vlen3 != 0) value3[vlen3] = 0;
}
code = cfgSetItem(pConfig, name, value, CFG_STYPE_CFG_FILE);
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
if (value2 != NULL && value3 != NULL && value2[0] != 0 && value3[0] != 0 && strcasecmp(name, "dataDir") == 0) {
code = cfgSetTfsItem(pConfig, name, value, value2, value3, CFG_STYPE_CFG_FILE);
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
} else {
code = cfgSetItem(pConfig, name, value, CFG_STYPE_CFG_FILE);
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
}
}
@ -839,9 +852,75 @@ int32_t cfgLoadFromCfgFile(SConfig *pConfig, const char *filepath) {
}
}
// int32_t cfgLoadFromCfgText(SConfig *pConfig, const char *configText) {
// char *line = NULL, *name, *value, *value2, *value3;
// int32_t olen, vlen, vlen2, vlen3;
// ssize_t _bytes = 0;
// int32_t code = 0;
// TdFilePtr pFile = taosOpenFile(filepath, TD_FILE_READ | TD_FILE_STREAM);
// if (pFile == NULL) {
// // success when the file does not exist
// if (errno == ENOENT) {
// terrno = TAOS_SYSTEM_ERROR(errno);
// uInfo("failed to load from cfg file %s since %s, use default parameters", filepath, terrstr());
// return 0;
// } else {
// uError("failed to load from cfg file %s since %s", filepath, terrstr());
// return -1;
// }
// }
// while (!taosEOFFile(pFile)) {
// name = value = value2 = value3 = NULL;
// olen = vlen = vlen2 = vlen3 = 0;
// _bytes = taosGetLineFile(pFile, &line);
// if (_bytes <= 0) {
// break;
// }
// if(line[_bytes - 1] == '\n') line[_bytes - 1] = 0;
// paGetToken(line, &name, &olen);
// if (olen == 0) continue;
// name[olen] = 0;
// paGetToken(name + olen + 1, &value, &vlen);
// if (vlen == 0) continue;
// value[vlen] = 0;
// paGetToken(value + vlen + 1, &value2, &vlen2);
// if (vlen2 != 0) {
// value2[vlen2] = 0;
// paGetToken(value2 + vlen2 + 1, &value3, &vlen3);
// if (vlen3 != 0) value3[vlen3] = 0;
// }
// code = cfgSetItem(pConfig, name, value, CFG_STYPE_CFG_FILE);
// if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
// if (value2 != NULL && value3 != NULL && value2[0] != 0 && value3[0] != 0 && strcasecmp(name, "dataDir") == 0) {
// code = cfgSetTfsItem(pConfig, name, value, value2, value3, CFG_STYPE_CFG_FILE);
// if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
// }
// }
// taosCloseFile(&pFile);
// if (line != NULL) taosMemoryFreeClear(line);
// if (code == 0 || (code != 0 && terrno == TSDB_CODE_CFG_NOT_FOUND)) {
// uInfo("load from cfg file %s success", filepath);
// return 0;
// } else {
// uError("failed to load from cfg file %s since %s", filepath, terrstr());
// return -1;
// }
// }
int32_t cfgLoadFromApollUrl(SConfig *pConfig, const char *url) {
char *cfgLineBuf = NULL, *name, *value, *value2, *value3;
int32_t olen, vlen, vlen2, vlen3;
int32_t code = 0;
if (url == NULL || strlen(url) == 0) {
uInfo("fail to load apoll url");
return 0;
@ -916,9 +995,12 @@ int32_t cfgLoadFromApollUrl(SConfig *pConfig, const char *url) {
paGetToken(value2 + vlen2 + 1, &value3, &vlen3);
if (vlen3 != 0) value3[vlen3] = 0;
}
cfgSetItem(pConfig, name, value, CFG_STYPE_APOLLO_URL);
if (value2 != NULL && value3 != NULL && value2[0] != 0 && value3[0] != 0 && strcasecmp(name, "dataDir") == 0) {
cfgSetTfsItem(pConfig, name, value, value2, value3, CFG_STYPE_APOLLO_URL);
code = cfgSetTfsItem(pConfig, name, value, value2, value3, CFG_STYPE_APOLLO_URL);
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
} else {
code = cfgSetItem(pConfig, name, value, CFG_STYPE_APOLLO_URL);
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
}
}
}

View File

@ -336,6 +336,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_TABLE_NOT_EXIST, "Table does not exists
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_TABLE_ACTION, "Invalid table action")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_COL_ALREADY_EXISTS, "Table column already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_TABLE_COL_NOT_EXISTS, "Table column not exists")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_COL_SUBSCRIBED, "Table column is subscribed")
// tsdb
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, "Invalid table ID")

View File

@ -1,91 +0,0 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts = 1538548685000
def run(self):
tdSql.prepare()
# test case for https://jira.taosdata.com:18080/browse/TD-3679
print("==============step1")
tdSql.execute(
"create topic tq_test partitions 10")
tdSql.execute(
"insert into tq_test.p1(off, ts, content) values(0, %d, 'aaaa')" % self.ts)
tdSql.execute(
"insert into tq_test.p1(off, ts, content) values(1, %d, 'aaaa')" % (self.ts + 1))
tdSql.execute(
"insert into tq_test.p1(off, ts, content) values(2, %d, 'aaaa')" % (self.ts + 2))
tdSql.execute(
"insert into tq_test.p1(off, ts, content) values(3, %d, 'aaaa')" % (self.ts + 3))
print("==============step2")
tdSql.query("select * from tq_test.p1")
tdSql.checkRows(4)
tdSql.query("select * from tq_test.p1 where ts >= %d" % self.ts)
tdSql.checkRows(4)
tdSql.query("select * from tq_test.p1 where ts > %d" % self.ts)
tdSql.checkRows(3)
tdSql.query("select * from tq_test.p1 where ts = %d" % self.ts)
tdSql.checkRows(1)
tdSql.execute("use db")
tdSql.execute("create table test(ts timestamp, start timestamp, value int)")
tdSql.execute("insert into test values(%d, %d, 1)" % (self.ts, self.ts))
tdSql.execute("insert into test values(%d, %d, 1)" % (self.ts + 1, self.ts + 1))
tdSql.execute("insert into test values(%d, %d, 1)" % (self.ts + 2, self.ts + 2))
tdSql.execute("insert into test values(%d, %d, 1)" % (self.ts + 3, self.ts + 3))
tdSql.query("select * from test")
tdSql.checkRows(4)
tdSql.query("select * from test where ts >= %d" % self.ts)
tdSql.checkRows(4)
tdSql.query("select * from test where ts > %d" % self.ts)
tdSql.checkRows(3)
tdSql.query("select * from test where ts = %d" % self.ts)
tdSql.checkRows(1)
tdSql.query("select * from test where start >= %d" % self.ts)
tdSql.checkRows(4)
tdSql.query("select * from test where start > %d" % self.ts)
tdSql.checkRows(3)
tdSql.query("select * from test where start = %d" % self.ts)
tdSql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -69,6 +69,7 @@
./test.sh -f tsim/insert/basic.sim
./test.sh -f tsim/insert/basic0.sim
./test.sh -f tsim/insert/basic1.sim
./test.sh -f tsim/insert/basic2.sim
./test.sh -f tsim/insert/commit-merge0.sim
./test.sh -f tsim/insert/insert_drop.sim
./test.sh -f tsim/insert/insert_select.sim
@ -88,13 +89,13 @@
./test.sh -f tsim/parser/alter_column.sim
./test.sh -f tsim/parser/alter_stable.sim
./test.sh -f tsim/parser/alter.sim
# ./test.sh -f tsim/parser/alter1.sim
# TD-17661 ./test.sh -f tsim/parser/alter1.sim
./test.sh -f tsim/parser/auto_create_tb_drop_tb.sim
./test.sh -f tsim/parser/auto_create_tb.sim
./test.sh -f tsim/parser/between_and.sim
./test.sh -f tsim/parser/binary_escapeCharacter.sim
# ./test.sh -f tsim/parser/col_arithmetic_operation.sim
# ./test.sh -f tsim/parser/columnValue.sim
# TD-17738 ./test.sh -f tsim/parser/col_arithmetic_operation.sim
# TD-17661 ./test.sh -f tsim/parser/columnValue.sim
./test.sh -f tsim/parser/commit.sim
# TD-17661 ./test.sh -f tsim/parser/condition.sim
./test.sh -f tsim/parser/constCol.sim
@ -109,61 +110,58 @@
./test.sh -f tsim/parser/fill.sim
./test.sh -f tsim/parser/first_last.sim
./test.sh -f tsim/parser/fourArithmetic-basic.sim
# TD-17659 TD-17658 ./test.sh -f tsim/parser/function.sim
# TD-17659 ./test.sh -f tsim/parser/function.sim
./test.sh -f tsim/parser/groupby-basic.sim
# ./test.sh -f tsim/parser/groupby.sim
./test.sh -f tsim/parser/groupby.sim
# TD-17622 ./test.sh -f tsim/parser/having_child.sim
# ./test.sh -f tsim/parser/having.sim
# TD-17622 ./test.sh -f tsim/parser/having.sim
./test.sh -f tsim/parser/import_commit1.sim
./test.sh -f tsim/parser/import_commit2.sim
./test.sh -f tsim/parser/import_commit3.sim
# TD-17663 ./test.sh -f tsim/parser/import_file.sim
./test.sh -f tsim/parser/import_file.sim
./test.sh -f tsim/parser/import.sim
./test.sh -f tsim/parser/insert_multiTbl.sim
./test.sh -f tsim/parser/insert_tb.sim
# ./test.sh -f tsim/parser/interp.sim
# TD-17038 ./test.sh -f tsim/parser/interp.sim
./test.sh -f tsim/parser/join_manyblocks.sim
# ./test.sh -f tsim/parser/join_multitables.sim
# TD-17713 ./test.sh -f tsim/parser/join_multitables.sim
# TD-17713 ./test.sh -f tsim/parser/join_multivnode.sim
# TD-17707 ./test.sh -f tsim/parser/join.sim
./test.sh -f tsim/parser/last_cache.sim
./test.sh -f tsim/parser/last_groupby.sim
# TD-17675 ./test.sh -f tsim/parser/lastrow.sim
# TD-17722 ./test.sh -f tsim/parser/lastrow.sim
./test.sh -f tsim/parser/like.sim
# ./test.sh -f tsim/parser/limit.sim
# ./test.sh -f tsim/parser/limit1.sim
# ./test.sh -f tsim/parser/limit2.sim
# TD-17464 ./test.sh -f tsim/parser/limit.sim
# TD-17464 ./test.sh -f tsim/parser/limit1.sim
# TD-17623 ./test.sh -f tsim/parser/limit2.sim
./test.sh -f tsim/parser/mixed_blocks.sim
./test.sh -f tsim/parser/nchar.sim
# TD-17703 ./test.sh -f tsim/parser/nestquery.sim
# ./test.sh -f tsim/parser/null_char.sim
# TD-17685 ./test.sh -f tsim/parser/null_char.sim
./test.sh -f tsim/parser/precision_ns.sim
./test.sh -f tsim/parser/projection_limit_offset.sim
./test.sh -f tsim/parser/regex.sim
./test.sh -f tsim/parser/select_across_vnodes.sim
./test.sh -f tsim/parser/select_distinct_tag.sim
./test.sh -f tsim/parser/select_from_cache_disk.sim
# ./test.sh -f tsim/parser/select_with_tags.sim
# TD-17659 ./test.sh -f tsim/parser/select_with_tags.sim
./test.sh -f tsim/parser/selectResNum.sim
# TD-17685 ./test.sh -f tsim/parser/set_tag_vals.sim
./test.sh -f tsim/parser/single_row_in_tb.sim
# TD-17684 ./test.sh -f tsim/parser/sliding.sim
# ./test.sh -f tsim/parser/slimit_alter_tags.sim
# ./test.sh -f tsim/parser/slimit.sim
# ./test.sh -f tsim/parser/slimit1.sim
# TD-17722 ./test.sh -f tsim/parser/slimit_alter_tags.sim
# TD-17722 ./test.sh -f tsim/parser/slimit.sim
# TD-17722 ./test.sh -f tsim/parser/slimit1.sim
./test.sh -f tsim/parser/stableOp.sim
# ./test.sh -f tsim/parser/tags_dynamically_specifiy.sim
# ./test.sh -f tsim/parser/tags_filter.sim
# TD-17661 ./test.sh -f tsim/parser/tags_dynamically_specifiy.sim
# TD-17661 ./test.sh -f tsim/parser/tags_filter.sim
./test.sh -f tsim/parser/tbnameIn.sim
./test.sh -f tsim/parser/timestamp.sim
./test.sh -f tsim/parser/top_groupby.sim
./test.sh -f tsim/parser/topbot.sim
# ./test.sh -f tsim/parser/udf_dll_stable.sim
# ./test.sh -f tsim/parser/udf_dll.sim
# ./test.sh -f tsim/parser/udf.sim
./test.sh -f tsim/parser/union.sim
# TD-17704 ./test.sh -f tsim/parser/union_sysinfo.sim
# ./test.sh -f tsim/parser/where.sim
# TD-17661 ./test.sh -f tsim/parser/where.sim
# ---- query
./test.sh -f tsim/query/interval.sim
@ -324,7 +322,7 @@
./test.sh -f tsim/vnode/stable_replica3_vnode3.sim
# --- sync
# ./test.sh -f tsim/sync/3Replica1VgElect.sim
./test.sh -f tsim/sync/3Replica1VgElect.sim
./test.sh -f tsim/sync/3Replica5VgElect.sim
./test.sh -f tsim/sync/oneReplica1VgElect.sim
./test.sh -f tsim/sync/oneReplica5VgElect.sim
@ -422,18 +420,18 @@
./test.sh -f tsim/tag/bool_binary.sim
./test.sh -f tsim/tag/bool_int.sim
./test.sh -f tsim/tag/bool.sim
# ./test.sh -f tsim/tag/change.sim
# ./test.sh -f tsim/tag/column.sim
# ./test.sh -f tsim/tag/commit.sim
# ./test.sh -f tsim/tag/create.sim
# /test.sh -f tsim/tag/delete.sim
# ./test.sh -f tsim/tag/double.sim
# ./test.sh -f tsim/tag/filter.sim
# TD-17661 ./test.sh -f tsim/tag/change.sim
./test.sh -f tsim/tag/column.sim
./test.sh -f tsim/tag/commit.sim
# TD-17661 ./test.sh -f tsim/tag/create.sim
# TD-17661 ./test.sh -f tsim/tag/delete.sim
# TD-17661 ./test.sh -f tsim/tag/double.sim
# TD-17661 ./test.sh -f tsim/tag/filter.sim
# TD-17407 ./test.sh -f tsim/tag/float.sim
./test.sh -f tsim/tag/int_binary.sim
./test.sh -f tsim/tag/int_float.sim
./test.sh -f tsim/tag/int.sim
# ./test.sh -f tsim/tag/set.sim
# TD-17661 ./test.sh -f tsim/tag/set.sim
./test.sh -f tsim/tag/smallint.sim
./test.sh -f tsim/tag/tinyint.sim

View File

@ -0,0 +1,29 @@
#======================b1-start===============
# ---- mnode
./test.sh -f tsim/mnode/basic1.sim
./test.sh -f tsim/mnode/basic2.sim
./test.sh -f tsim/mnode/basic3.sim
./test.sh -f tsim/mnode/basic4.sim
./test.sh -f tsim/mnode/basic5.sim
# --- vnode
# unsupport ./test.sh -f tsim/vnode/replica3_basic.sim
# unsupport ./test.sh -f tsim/vnode/replica3_repeat.sim
# unsupport ./test.sh -f tsim/vnode/replica3_vgroup.sim
# unsupport ./test.sh -f tsim/vnode/replica3_many.sim
# unsupport ./test.sh -f tsim/vnode/replica3_import.sim
# unsupport ./test.sh -f tsim/vnode/stable_balance_replica1.sim
# unsupport ./test.sh -f tsim/vnode/stable_dnode2_stop.sim
./test.sh -f tsim/vnode/stable_dnode2.sim
./test.sh -f tsim/vnode/stable_dnode3.sim
./test.sh -f tsim/vnode/stable_replica3_dnode6.sim
./test.sh -f tsim/vnode/stable_replica3_vnode3.sim
# --- sync
# jira ./test.sh -f tsim/sync/3Replica1VgElect.sim
./test.sh -f tsim/sync/3Replica5VgElect.sim
./test.sh -f tsim/sync/oneReplica1VgElect.sim
./test.sh -f tsim/sync/oneReplica5VgElect.sim

View File

@ -40,7 +40,7 @@ if $data(2)[4] != ready then
endi
print ========== step3
system sh/exec.sh -n dnode2 -s stop
system sh/exec.sh -n dnode2 -s stop
$x = 0
step3:
@ -64,9 +64,10 @@ if $rows != 1 then
endi
print ========== step5
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname port 7200
system sh/exec.sh -n dnode2 -s start
return
$x = 0
step5:
$x = $x + 1

View File

@ -0,0 +1,322 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
print =============== create database
sql create database d0 keep 365000d,365000d,365000d
sql use d0
print =============== create super table
sql create table if not exists stb (ts timestamp, c1 int unsigned, c2 double, c3 binary(10), c4 nchar(10), c5 double) tags (city binary(20),district binary(20));
sql show stables
if $rows != 1 then
return -1
endi
print =============== create child table
sql create table ct1 using stb tags("BeiJing", "ChaoYang")
sql create table ct2 using stb tags("BeiJing", "HaiDian")
sql show tables
if $rows != 2 then
return -1
endi
print =============== step3-1 insert records into ct1
sql insert into ct1 values('2022-05-03 16:59:00.010', 10, 20, 'n','n',30);
sql insert into ct1 values('2022-05-03 16:59:00.011', 'N', 'n', 'N',"N",30);
sql insert into ct1 values('2022-05-03 16:59:00.012', 'Nu', 'nul', 'Nul','NUL',30);
sql insert into ct1 values('2022-05-03 16:59:00.013', NULL, 'null', 'Null',null,30);
sql insert into ct1 values('2022-05-03 16:59:00.014', NULL, 'NuLL', 'Null',NULL,30);
sql_error insert into ct1 values('2022-05-03 16:59:00.015', NULL, 20, 'Null',NUL,30);
sql_error insert into ct1 values('2022-05-03 16:59:00.015', NULL, 20, 'Null',NU,30);
sql_error insert into ct1 values('2022-05-03 16:59:00.015', NULL, 20, 'Null',Nu,30);
sql_error insert into ct1 values('2022-05-03 16:59:00.015', NULL, 20, 'Null',N,30);
sql_error insert into ct1 values('2022-05-03 16:59:00.015', N, 20, 'Null',NULL,30);
sql_error insert into ct1 values('2022-05-03 16:59:00.015', Nu, 20, 'Null',NULL,30);
sql_error insert into ct1 values('2022-05-03 16:59:00.015', Nul, 20, 'Null',NULL,30);
print =============== step3-1 query records of ct1 from memory
sql select * from ct1;
print $data00 $data01 $data02 $data03 $data04 $data05
print $data10 $data11 $data12 $data13 $data14 $data15
print $data20 $data21 $data22 $data23 $data24 $data25
print $data30 $data31 $data32 $data33 $data34 $data35
print $data40 $data41 $data42 $data43 $data44 $data45
if $rows != 5 then
print rows $rows != 5
return -1
endi
if $data01 != 10 then
print data01 $data01 != 10
return -1
endi
if $data02 != 20.000000000 then
print data02 $data02 != 20.000000000
return -1
endi
if $data03 != n then
print data03 $data03 != n
return -1
endi
if $data04 != n then
print data04 $data04 != n
return -1
endi
if $data05 != 30.000000000 then
print data05 $data05 != 30.000000000
return -1
endi
if $data11 != NULL then
print data11 $data11 != NULL
return -1
endi
if $data12 != NULL then
print data12 $data12 != NULL
return -1
endi
if $data13 != N then
print data13 $data13 != N
return -1
endi
if $data14 != N then
print data14 $data14 != N
return -1
endi
if $data15 != 30.000000000 then
print data15 $data15 != 30.000000000
return -1
endi
if $data21 != NULL then
print data21 $data21 != NULL
return -1
endi
if $data22 != NULL then
print data22 $data22 != NULL
return -1
endi
if $data23 != Nul then
print data23 $data23 != Nul
return -1
endi
if $data24 != NUL then
print data24 $data24 != NUL
return -1
endi
if $data25 != 30.000000000 then
print data25 $data25 != 30.000000000
return -1
endi
if $data31 != NULL then
print data31 $data31 != NULL
return -1
endi
if $data32 != NULL then
print data32 $data32 != NULL
return -1
endi
if $data33 != Null then
print data33 $data33 != Null
return -1
endi
if $data34 != NULL then
print data34 $data34 != NULL
return -1
endi
if $data35 != 30.000000000 then
print data35 $data35 != 30.000000000
return -1
endi
if $data41 != NULL then
print data41 $data41 != NULL
return -1
endi
if $data42 != NULL then
print data42 $data42 != NULL
return -1
endi
if $data43 != Null then
print data43 $data43 != Null
return -1
endi
if $data44 != NULL then
print data44 $data44 != NULL
return -1
endi
if $data45 != 30.000000000 then
print data45 $data45 != 30.000000000
return -1
endi
#==================== reboot to trigger commit data to file
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
print =============== step3-2 query records of ct1 from file
sql select * from ct1;
print $data00 $data01 $data02 $data03 $data04 $data05
print $data10 $data11 $data12 $data13 $data14 $data15
print $data20 $data21 $data22 $data23 $data24 $data25
print $data30 $data31 $data32 $data33 $data34 $data35
print $data40 $data41 $data42 $data43 $data44 $data45
if $rows != 5 then
print rows $rows != 5
return -1
endi
if $data01 != 10 then
print data01 $data01 != 10
return -1
endi
if $data02 != 20.000000000 then
print data02 $data02 != 20.000000000
return -1
endi
if $data03 != n then
print data03 $data03 != n
return -1
endi
if $data04 != n then
print data04 $data04 != n
return -1
endi
if $data05 != 30.000000000 then
print data05 $data05 != 30.000000000
return -1
endi
if $data11 != NULL then
print data11 $data11 != NULL
return -1
endi
if $data12 != NULL then
print data12 $data12 != NULL
return -1
endi
if $data13 != N then
print data13 $data13 != N
return -1
endi
if $data14 != N then
print data14 $data14 != N
return -1
endi
if $data15 != 30.000000000 then
print data15 $data15 != 30.000000000
return -1
endi
if $data21 != NULL then
print data21 $data21 != NULL
return -1
endi
if $data22 != NULL then
print data22 $data22 != NULL
return -1
endi
if $data23 != Nul then
print data23 $data23 != Nul
return -1
endi
if $data24 != NUL then
print data24 $data24 != NUL
return -1
endi
if $data25 != 30.000000000 then
print data25 $data25 != 30.000000000
return -1
endi
if $data31 != NULL then
print data31 $data31 != NULL
return -1
endi
if $data32 != NULL then
print data32 $data32 != NULL
return -1
endi
if $data33 != Null then
print data33 $data33 != Null
return -1
endi
if $data34 != NULL then
print data34 $data34 != NULL
return -1
endi
if $data35 != 30.000000000 then
print data35 $data35 != 30.000000000
return -1
endi
if $data41 != NULL then
print data41 $data41 != NULL
return -1
endi
if $data42 != NULL then
print data42 $data42 != NULL
return -1
endi
if $data43 != Null then
print data43 $data43 != Null
return -1
endi
if $data44 != NULL then
print data44 $data44 != NULL
return -1
endi
if $data45 != 30.000000000 then
print data45 $data45 != 30.000000000
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -242,3 +242,5 @@ else
$reboot_cnt = $reboot_cnt + 1
goto reboot_and_check
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -226,4 +226,6 @@ endi
if $data41 != NULL then
print data41 $data41 != NULL
return -1
endi
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -816,4 +816,6 @@ endi
if $data44 != n8 then
print data44 $data44 != n8
return -1
endi
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -36,28 +36,22 @@ sql select c1 *( 2 / 3 ), c1/c1 from $tb order by ts asc;
if $rows != 10000 then
return -1
endi
if $data00 != 0.000000000 then
return -1
endi
#if $data01 != -nan then
# print expect -nan, actual: $data01
# return -1
#endi
if $data10 != 0.666666667 then
return -1
endi
if $data11 != 1.000000000 then
return -1
endi
if $data90 != 6.000000000 then
return -1
endi
if $data91 != 1.000000000 then
return -1
endi
@ -66,65 +60,49 @@ sql select (c1 * 2) % 7.9, c1*1, c1*1*1, c1*c1, c1*c1*c1 from $tb order by ts de
if $rows != 10000 then
return -1
endi
if $data00 != 2.200000000 then
print expect 2.200000000, actual:$data00
return -1
endi
if $data01 != 9.000000000 then
return -1
endi
if $data02 != 9.000000000 then
return -1
endi
if $data03 != 81.000000000 then
return -1
endi
if $data04 != 729.000000000 then
return -1
endi
if $data10 != 0.200000000 then
return -1
endi
if $data11 != 8.000000000 then
return -1
endi
if $data12 != 8.000000000 then
return -1
endi
if $data13 != 64.000000000 then
return -1
endi
if $data14 != 512.000000000 then
return -1
endi
if $data90 != 0.000000000 then
return -1
endi
if $data91 != 0.000000000 then
return -1
endi
if $data92 != 0.000000000 then
return -1
endi
if $data93 != 0.000000000 then
return -1
endi
if $data94 != 0.000000000 then
return -1
endi
@ -134,20 +112,16 @@ sql select c1 * c2 /4 from $tb where ts < 1537166000000 and ts > 1537156000000
if $rows != 17 then
return -1
endi
if $data00 != 12.250000000 then
return -1
endi
if $data10 != 16.000000000 then
return -1
endi
if $data20 != 20.250000000 then
print expect 20.250000000, actual:$data21
return -1
endi
if $data30 != 0.000000000 then
return -1
endi
@ -180,47 +154,36 @@ sql select c2-c1*1.1, c3/c2, c4*c3, c5%c4, (c6+c4)%22, c2-c2 from $tb
if $rows != 10000 then
return -1
endi
if $data00 != 0.000000000 then
return -1
endi
#if $data01 != -nan then
# return -1
#endi
if $data02 != 0.000000000 then
return -1
endi
if $data03 != NULL then
return -1
endi
if $data04 != 0.000000000 then
return -1
endi
if $data05 != 0.000000000 then
return -1
endi
if $data90 != -0.900000000 then
return -1
endi
if $data91 != 1.000000000 then
return -1
endi
if $data92 != 81.000000000 then
return -1
endi
if $data93 != 0.000000000 then
return -1
endi
if $data94 != 18.000000000 then
return -1
endi
@ -237,10 +200,8 @@ sql select c8+c7, c9+c9+c8+c7/c6 from $tb
# arithmetic expression in join [d.7]==================================================
# arithmetic expression in union [d.8]=================================================
# arithmetic expression in group by [d.9]==============================================
# in group by tag, not support for normal table
sql_error select c5*99 from $tb group by t1
@ -248,17 +209,14 @@ sql_error select c5*99 from $tb group by t1
# in group by column
sql_error select c6-(c6+c3)*12 from $tb group by c3;
# limit offset [d.10]==================================================================
sql select c6 * c1 + 12 from $tb limit 12 offset 99;
if $rows != 12 then
return -1
endi
if $data00 != 93.000000000 then
return -1
endi
if $data90 != 76.000000000 then
return -1
endi
@ -267,7 +225,6 @@ sql select c4 / 99.123 from $tb limit 10 offset 9999;
if $rows != 1 then
return -1
endi
if $data00 != 0.090796283 then
return -1
endi
@ -283,27 +240,21 @@ sql select c1, c2+c6, 12.9876545678, 1, 1.1 from $tb
if $rows != 10000 then
return -1
endi
if $data00 != 0 then
return -1
endi
if $data01 != 0.000000000 then
return -1
endi
if $data02 != 12.987654568 then
return -1
endi
if $data03 != 1 then
return -1
endi
if $data04 != 1.100000000 then
return -1
endi
if $data10 != 1 then
return -1
endi
@ -313,27 +264,21 @@ sql select c1, c2+c6, 12.9876545678, 1, 1.1 from $tb where c1<2
if $rows != 2000 then
return -1
endi
if $data00 != 0 then
return -1
endi
if $data01 != 0.000000000 then
return -1
endi
if $data02 != 12.987654568 then
return -1
endi
if $data03 != 1 then
return -1
endi
if $data10 != 1 then
return -1
endi
if $data20 != 0 then
return -1
endi
@ -377,7 +322,6 @@ sql select first(c1) * ( 2 / 3 ) from $stb order by ts asc;
if $rows != 1 then
return -1
endi
if $data00 != 0.000000000 then
return -1
endi
@ -386,7 +330,6 @@ sql select first(c1) * (2/99) from $stb order by ts desc;
if $rows != 1 then
return -1
endi
if $data00 != 0.000000000 then
return -1
endi
@ -395,15 +338,12 @@ sql select (count(c1) * 2) % 7.9, (count(c1) * 2), ( count(1)*2) from $stb
if $rows != 1 then
return -1
endi
if $data00 != 1.800000000 then
return -1
endi
if $data01 != 100000.000000000 then
return -1
endi
if $data02 != 200000.000000000 then
return -1
endi
@ -412,16 +352,13 @@ sql select spread( c1 )/44, spread(c1), 0.204545455 * 44 from $stb
if $rows != 1 then
return -1
endi
if $data00 != 0.204545455 then
print expect 0.204545455, actual: $data00
return -1
endi
if $data01 != 9.000000000 then
return -1
endi
if $data02 != 9.000000020 then
return -1
endi
@ -431,27 +368,21 @@ sql select min(c1) * max(c2) /4, sum(c1) * apercentile(c2, 20), apercentile(c4,
if $rows != 1 then
return -1
endi
if $data00 != 0.000000000 then
return -1
endi
if $data01 != 225000.000000000 then
return -1
endi
if $data02 != 8.077777778 then
return -1
endi
if $data03 != NULL then
return -1
endi
if $data04 != 0.444444444 then
return -1
endi
if $data05 != 450000.000000000 then
return -1
endi
@ -487,35 +418,29 @@ sql_error select top(c1, 99) - bottom(c1, 99) from $stb
sql select c2-c1, c3/c2, c4*c3, c5%c4, c6+99%22 from $stb
# error case, ts/bool/binary/nchar not support arithmetic expression
sql select first(c7)*12 from $stb
sql select last(c8)/55 from $stb
sql_error select last_row(c9) + last_row(c8) from $stb
sql select first(c7)*12 from $stb
sql select last(c8)/55 from $stb
sql select last_row(c9) + last_row(c8) from $stb
# arithmetic expression in join [d.7]===============================================================
# arithmetic expression in union [d.8]===============================================================
# arithmetic expression in group by [d.9]===============================================================
# in group by tag
sql select avg(c4)*99 from $stb group by t1
sql select avg(c4)*99, t1 from $stb group by t1 order by t1
if $rows != 10 then
return -1
endi
if $data00 != 445.500000000 then
return -1
endi
if $data01 != 0 then
return -1
endi
if $data90 != 445.500000000 then
return -1
endi
if $data91 != 9 then
return -1
endi
@ -550,22 +475,19 @@ endi
# return -1
#endi
#
sql_error select first(c6) - last(c6) *12 / count(*) from $stb group by c3;
sql select first(c6) - last(c6) *12 / count(*) from $stb group by c3;
sql select first(c6) - last(c6) *12 / count(*) from $stb group by c5;
if $rows != 10 then
sql select first(c6) - last(c6) *12 / count(*) from $stb group by c5 order by c5;
if $rows != 11 then
return -1
endi
if $data00 != 0.000000000 then
if $data10 != 0.000000000 then
return -1
endi
if $data10 != 0.997600000 then
if $data20 != 0.997600000 then
return -1
endi
if $data90 != 8.978400000 then
if $data90 != 7.980800000 then
return -1
endi
@ -574,7 +496,6 @@ sql select first(c6) - sum(c6) + 12 from $stb limit 12 offset 0;
if $rows != 1 then
return -1
endi
if $data00 != -449988.000000000 then
return -1
endi
@ -604,10 +525,8 @@ sql_error select first(c1) from $stb fill(value, 20);
# constant column. [d.13]===============================================================
# column value filter [d.14]===============================================================
# tag filter. [d.15]===============================================================
sql select sum(c2)+99 from $stb where t1=12;
@ -633,7 +552,6 @@ sql select avg(c2)*count(c2), sum(c3)-first(c3), last(c4)+9 from $stb interval(1
if $rows != 10000 then
return -1
endi
if $data00 != @18-09-17 09:00:00.000@ then
return -1
endi
@ -645,11 +563,9 @@ sql_error select first(c7)- last(c1) from $tb interval(2y)
# first/last query [d.19]===============================================================
# multiple retrieve [d.20]===============================================================
sql select c2-c2 from $tb
sql select first(c1)-last(c1), spread(c2), max(c3) - min(c3), avg(c4)*count(c4) from $tb

View File

@ -1,4 +1,3 @@
sleep 100
sql connect
sql create database if not exists db
sql use db

View File

@ -1,4 +1,3 @@
sleep 100
sql connect
sql create database if not exists db
sql use db

View File

@ -1,5 +1,3 @@
####
sleep 100
sql connect
sql create database if not exists db
sql use db

View File

@ -1,5 +1,3 @@
####
sleep 100
sql connect
sql create database if not exists db
sql use db

View File

@ -1,4 +1,3 @@
sleep 100
sql connect
sql create database if not exists db
sql use db

View File

@ -1,5 +1,3 @@
sleep 100
sql connect
sql create database if not exists db
sql use db

View File

@ -1,5 +1,3 @@
sleep 100
sql connect
sql create database if not exists db
sql use db

View File

@ -1,5 +1,3 @@
sleep 100
sql connect
sql create database if not exists db
sql use db

View File

@ -1,4 +1,3 @@
sleep 100
sql connect
$dbPrefix = first_db

View File

@ -278,7 +278,6 @@ sql create stable td6086st(ts timestamp, d double) tags(t nchar(50));
sql create table td6086ct1 using td6086st tags("ct1");
sql create table td6086ct2 using td6086st tags("ct2");
return
sql SELECT LAST(d),t FROM td6086st WHERE tbname in ('td6086ct1', 'td6086ct2') and ts>="2019-07-30 00:00:00" and ts<="2021-08-31 00:00:00" partition BY tbname interval(1800s) fill(prev);
print ==================> td-2624

View File

@ -3,6 +3,6 @@
Cur_Dir=$(pwd)
echo $Cur_Dir
echo "'2020-1-1 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql
echo "'2020-1-2 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql
echo "'2020-1-3 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql
echo "'2020-1-1 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> /tmp/data.sql
echo "'2020-1-2 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> /tmp/data.sql
echo "'2020-1-3 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> /tmp/data.sql

View File

@ -3,25 +3,6 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
$loop_cnt = 0
check_dnode_ready:
$loop_cnt = $loop_cnt + 1
sleep 200
if $loop_cnt == 10 then
print ====> dnode not ready!
return -1
endi
sql show dnodes
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
if $data00 != 1 then
return -1
endi
if $data04 != ready then
goto check_dnode_ready
endi
sql connect
$dbPrefix = group_db
$tbPrefix = group_tb
$mtPrefix = group_mt
@ -80,8 +61,6 @@ while $i < $tbNum
$tstart = 1640966400000
endw
sleep 100
$i1 = 1
$i2 = 0
@ -752,12 +731,7 @@ sql insert into tm1 values('2020-2-1 1:1:1', 2, 10);
sql insert into tm1 values('2020-2-1 1:1:2', 2, 20);
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 100
system sh/exec.sh -n dnode1 -s start
sleep 100
sql connect
sleep 100
sql use group_db0;
print =========================>TD-4894

View File

@ -7,7 +7,7 @@ sql drop database if exists indb
sql create database if not exists indb
sql use indb
$inFileName = '~/data.csv'
$inFileName = '/tmp/data.csv'
$numOfRows = 10000
system tsim/parser/gendata.sh
@ -16,8 +16,8 @@ sql create table stbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16
sql create table tbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2))
print ====== create tables success, starting insert data
sql insert into tbx file '~/data.sql'
sql import into tbx file '~/data.sql'
sql insert into tbx file '/tmp/data.sql'
sql import into tbx file '/tmp/data.sql'
sql select count(*) from tbx
if $rows != 1 then
@ -31,8 +31,8 @@ endi
sql drop table tbx;
sql insert into tbx using stbx tags(1,'abc') file '~/data.sql';
sql insert into tbx using stbx tags(1,'abc') file '~/data.sql';
sql insert into tbx using stbx tags(1,'abc') file '/tmp/data.sql';
sql insert into tbx using stbx tags(1,'abc') file '/tmp/data.sql';
sql select count(*) from tbx
if $rows != 1 then
@ -44,7 +44,7 @@ if $data00 != 3 then
endi
sql drop table tbx;
sql insert into tbx using stbx(b) tags('abcf') file '~/data.sql';
sql insert into tbx using stbx(b) tags('abcf') file '/tmp/data.sql';
sql select ts,a,b from tbx;
if $rows != 3 then
@ -64,6 +64,6 @@ if $data02 != @abcf@ then
return -1
endi
system rm -f ~/data.sql
system rm -f /tmp/data.sql
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,4 +1,3 @@
sleep 100
sql connect
$dbPrefix = intp_db

View File

@ -54,8 +54,6 @@ while $i < $tbNum
$tstart = 100000
endw
sleep 100
$tstart = 100000
$mt = $mtPrefix . 1 . $i
sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12), t3 int)
@ -99,8 +97,6 @@ while $i < $tbNum
$tstart = 100000
endw
sleep 100
$i1 = 1
$i2 = 0

View File

@ -62,11 +62,8 @@ run tsim/parser/limit_stb.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 500
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
sleep 100
run tsim/parser/limit_tb.sim
run tsim/parser/limit_stb.sim

View File

@ -18,7 +18,7 @@ $stb = $stbPrefix . $i
sql drop database $db -x step1
step1:
sql create database $db cache 16
sql create database $db
print ====== create tables
sql use $db
sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int)

View File

@ -370,7 +370,8 @@ sql select top(c1, 1) from $tb where ts >= $ts0 and ts <= $tsu limit 5 offset 1
if $rows != 0 then
return -1
endi
sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu limit 3 offset 1
sql select ts, top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts limit 3 offset 1
if $rows != 3 then
return -1
endi
@ -392,6 +393,7 @@ endi
if $data21 != 9 then
return -1
endi
sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu limit 3 offset 5
if $rows != 0 then
return -1
@ -401,7 +403,8 @@ sql select bottom(c1, 1) from $tb where ts >= $ts0 and ts <= $tsu limit 5 offset
if $rows != 0 then
return -1
endi
sql select bottom(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu limit 3 offset 1
sql select ts, bottom(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts limit 3 offset 1
if $rows != 3 then
return -1
endi

View File

@ -61,11 +61,10 @@ while $i < $halfNum
endw
print ====== tables created
#run tsim/parser/limit2_query.sim
run tsim/parser/limit2_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 100
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed

View File

@ -27,8 +27,8 @@ print select count(*) from $stb where t1 > $val1 and t1 < $val2 group by t1, t2,
sql select count(*), t1, t2, t3, t4, t5, t6 from $stb where t1 > $val1 and t1 < $val2 group by t1, t2, t3, t4, t5, t6 order by t1 asc limit 1 offset 0
$val = $tbNum - 3
print $rows $val
if $rows != $val then
print $rows
if $rows != 1 then
return -1
endi
if $data00 != $rowNum then
@ -51,7 +51,7 @@ if $data05 != 2 then
return -1
endi
sql select count(*) from $stb where t2 like '%' and t1 > 2 and t1 < 5 group by t3, t4 order by t3 desc limit 1 offset 0
sql select count(*), t3, t4 from $stb where t2 like '%' and t1 > 2 and t1 < 5 group by t3, t4 order by t3 desc limit 2 offset 0
if $rows != 2 then
return -1
endi
@ -70,15 +70,17 @@ endi
if $data12 != 3 then
return -1
endi
sql select count(*) from $stb where t2 like '%' and t1 > 2 and t1 < 5 group by t3, t4 order by t3 desc limit 1 offset 1
if $rows != 0 then
if $rows != 1 then
return -1
endi
## TBASE-348
sql_error select count(*) from $stb where t1 like 1
##### aggregation on tb + where + fill + limit offset
sql select max(c1) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2) limit 10 offset 1
sql select _wstart, max(c1) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2) limit 10 offset 1
if $rows != 10 then
return -1
endi

View File

@ -358,8 +358,8 @@ endi
print ========> TD-6017
sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1)
sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
print select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
sql select ts, top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
print select ts, top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
print $data00 $data01
print $data10 $data11
print $data20 $data21
@ -386,7 +386,7 @@ if $data21 != 6 then
return -1
endi
sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts asc limit 3 offset 1
sql select ts, top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts asc limit 3 offset 1
if $rows != 3 then
return -1
endi
@ -418,7 +418,7 @@ sql select bottom(c1, 1) from $tb where ts >= $ts0 and ts <= $tsu limit 5 offset
if $rows != 0 then
return -1
endi
sql select bottom(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu limit 3 offset 1
sql select ts, bottom(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts limit 3 offset 1
if $rows != 3 then
return -1
endi
@ -482,7 +482,7 @@ endi
if $data41 != 4 then
return -1
endi
sql select max(c1), max(c2), max(c3), max(c4), max(c5), max(c6) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) limit 5 offset 1
sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5), max(c6) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) limit 5 offset 1
if $rows != 5 then
return -1
endi
@ -518,7 +518,7 @@ if $data41 != 5 then
endi
## TBASE-334
sql select max(c1), max(c2), max(c3), max(c4), max(c5), max(c6) from $tb where ts >= $ts0 and ts <= $tsu interval(30m) limit 2 offset 1
sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5), max(c6) from $tb where ts >= $ts0 and ts <= $tsu interval(30m) limit 2 offset 1
if $rows != 2 then
return -1
endi
@ -634,7 +634,8 @@ sql select stddev(c1), stddev(c2), stddev(c3), stddev(c4), stddev(c5), stddev(c6
if $rows != 0 then
return -1
endi
sql select stddev(c1), stddev(c2), stddev(c3), stddev(c4), stddev(c5), stddev(c6) from $tb where ts >= $ts0 and ts <= $tsu interval(30m) limit 5 offset 1
sql select _wstart, stddev(c1), stddev(c2), stddev(c3), stddev(c4), stddev(c5), stddev(c6) from $tb where ts >= $ts0 and ts <= $tsu interval(30m) limit 5 offset 1
if $rows != 3 then
return -1
endi
@ -670,6 +671,7 @@ endi
if $data31 != 3 then
return -1
endi
sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6) from $tb where ts >= $ts0 and ts <= $tsu interval(27m) limit 5 offset 1
if $rows != 3 then
return -1
@ -707,7 +709,8 @@ sql select first(c1), first(c2), first(c3), first(c4), first(c5), first(c6) from
if $rows != 0 then
return -1
endi
sql select first(c1), first(c2), first(c3), first(c4), first(c5), first(c6) from $tb where ts >= $ts0 and ts <= $tsu interval(30m) limit 3 offset 1
sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6) from $tb where ts >= $ts0 and ts <= $tsu interval(30m) limit 3 offset 1
if $rows != 3 then
return -1
endi
@ -721,7 +724,6 @@ if $data23 != 9.00000 then
return -1
endi
sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6) from $tb where ts >= $ts0 and ts <= $tsu limit 5 offset 1
if $rows != 0 then
return -1

View File

@ -62,8 +62,6 @@ while $i < $half
$tstart = 100000
endw
sleep 100
$i1 = 1
$i2 = 0

View File

@ -28,7 +28,6 @@ run tsim/parser/single_row_in_tb_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 500
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed

View File

@ -52,11 +52,9 @@ run tsim/parser/slimit1_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 500
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
sleep 100
run tsim/parser/slimit1_query.sim

View File

@ -1,4 +1,3 @@
sleep 100
sql connect
$dbPrefix = slm_alt_tg_db

View File

@ -1,10 +1,6 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
system sh/exec.sh -n dnode1 -s start
sleep 100
sql connect
$dbPrefix = slm_alt_tg_db
@ -93,7 +89,6 @@ if $data02 != tb0 then
return -1
endi
sleep 500
sql reset query cache
sql select count(*), first(ts) from stb group by tg_added order by tg_added asc slimit 5 soffset 3
if $rows != 5 then
@ -171,11 +166,8 @@ endi
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 500
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
sleep 100
sql use $db
### repeat above queries

View File

@ -1,4 +1,3 @@
sleep 100
sql connect
$dbPrefix = slm_db

View File

@ -95,9 +95,6 @@ while $i < $tbNum
$j = $j + 1
endw
print sleep 1sec.
sleep 100
$i = 1
$tb = $tbPrefix . $i

View File

@ -179,4 +179,6 @@ if $data05 != 30.000000000 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -367,7 +367,7 @@ if $data32 != 8 then
endi
#$loop_all = 0
#looptest:
#=looptest:
sql drop database IF EXISTS test2;
sql drop stream IF EXISTS streams21;
@ -511,6 +511,6 @@ endi
$loop_all = $loop_all + 1
print ============loop_all=$loop_all
#goto looptest
#=goto looptest
system sh/stop_dnodes.sh

View File

@ -31,7 +31,7 @@ sql create table $tb using $mt tags( 0, '0' )
$i = 1
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( 1, 1 )
sql create table $tb using $mt tags( 1, '1' )
$i = 2
$tb = $tbPrefix . $i
@ -39,7 +39,7 @@ sql create table $tb using $mt tags( '2', '2' )
$i = 3
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( '3', 3 )
sql create table $tb using $mt tags( '3', '3' )
sql show tables
if $rows != 4 then
@ -54,7 +54,7 @@ sql insert into $tb values(now, 0, '0')
$i = 1
$tb = $tbPrefix . $i
sql insert into $tb values(now, 1, 1 )
sql insert into $tb values(now, 1, '1' )
$i = 2
$tb = $tbPrefix . $i
@ -62,7 +62,7 @@ sql insert into $tb values(now, '2', '2')
$i = 3
$tb = $tbPrefix . $i
sql insert into $tb values(now, '3', 3)
sql insert into $tb values(now, '3', '3')
print =============== step4
sql select * from $mt where tgcol2 = '1'

View File

@ -249,8 +249,8 @@ sql alter table $mt add tag tgcol6 binary(10)
sql reset query cache
sql alter table $tb set tag tgcol4=false
sql alter table $tb set tag tgcol5=5
sql alter table $tb set tag tgcol6=6
sql alter table $tb set tag tgcol5='5'
sql alter table $tb set tag tgcol6='6'
sql reset query cache
sql select * from $mt where tgcol5 = '5'
@ -321,7 +321,7 @@ if $data04 != 3 then
return -1
endi
sql alter table $mt change tag tgcol1 tgcol4
sql alter table $mt rename tag tgcol1 tgcol4
sql alter table $mt drop tag tgcol2
sql alter table $mt drop tag tgcol3
sql alter table $mt add tag tgcol5 bigint
@ -382,14 +382,14 @@ if $data04 != 3 then
return -1
endi
sql alter table $mt change tag tgcol1 tgcol4
sql alter table $mt rename tag tgcol1 tgcol4
sql alter table $mt drop tag tgcol2
sql alter table $mt drop tag tgcol3
sql alter table $mt add tag tgcol5 binary(17)
sql alter table $mt add tag tgcol6 bool
sql reset query cache
sql alter table $tb set tag tgcol4=4
sql alter table $tb set tag tgcol5=5
sql alter table $tb set tag tgcol5='5'
sql alter table $tb set tag tgcol6=1
sql reset query cache
@ -423,7 +423,7 @@ $i = 9
$mt = $mtPrefix . $i
$tb = $tbPrefix . $i
sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10), tgcol3 binary(10))
sql create table $tb using $mt tags( 1, 2, '3' )
sql create table $tb using $mt tags( 1, '2', '3' )
sql insert into $tb values(now, 1)
sql select * from $mt where tgcol2 = '2'
if $rows != 1 then
@ -442,7 +442,7 @@ if $data04 != 3 then
return -1
endi
sql alter table $mt change tag tgcol1 tgcol4
sql alter table $mt rename tag tgcol1 tgcol4
sql alter table $mt drop tag tgcol2
sql alter table $mt drop tag tgcol3
sql alter table $mt add tag tgcol5 bool
@ -506,7 +506,7 @@ if $data05 != 4 then
return -1
endi
sql alter table $mt change tag tgcol1 tgcol4 -x step103
sql alter table $mt rename tag tgcol1 tgcol4 -x step103
return -1
step103:
@ -518,7 +518,7 @@ sql alter table $mt add tag tgcol4 binary(10)
sql alter table $mt add tag tgcol5 bool
sql reset query cache
sql alter table $tb set tag tgcol4=4
sql alter table $tb set tag tgcol4='4'
sql alter table $tb set tag tgcol5=false
sql reset query cache
@ -580,7 +580,7 @@ if $data06 != 5 then
return -1
endi
sql alter table $mt change tag tgcol1 tgcol4 -x step114
sql alter table $mt rename tag tgcol1 tgcol4 -x step114
return -1
step114:
@ -596,9 +596,9 @@ sql alter table $mt add tag tgcol7 bigint
sql alter table $mt add tag tgcol8 smallint
sql reset query cache
sql alter table $tb set tag tgcol4=4
sql alter table $tb set tag tgcol4='4'
sql alter table $tb set tag tgcol5=5
sql alter table $tb set tag tgcol6=6
sql alter table $tb set tag tgcol6='6'
sql alter table $tb set tag tgcol7=7
sql alter table $tb set tag tgcol8=8
sql reset query cache
@ -685,11 +685,11 @@ sql alter table $mt add tag tgcol5 bigint
sql reset query cache
sql alter table $tb set tag tgcol1=false
sql alter table $tb set tag tgcol2=5
sql alter table $tb set tag tgcol2='5'
sql alter table $tb set tag tgcol3=4
sql alter table $tb set tag tgcol4=3
sql alter table $tb set tag tgcol4='3'
sql alter table $tb set tag tgcol5=2
sql alter table $tb set tag tgcol6=1
sql alter table $tb set tag tgcol6='1'
sql reset query cache
sql select * from $mt where tgcol4 = '3'
@ -781,8 +781,8 @@ sql alter table $mt add tag tgcol4 int
sql alter table $mt add tag tgcol6 bigint
sql reset query cache
sql alter table $tb set tag tgcol1=7
sql alter table $tb set tag tgcol2=8
sql alter table $tb set tag tgcol1='7'
sql alter table $tb set tag tgcol2='8'
sql alter table $tb set tag tgcol3=9
sql alter table $tb set tag tgcol4=10
sql alter table $tb set tag tgcol5=11
@ -817,9 +817,7 @@ if $data07 != 12 then
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 3000
system sh/exec.sh -n dnode1 -s start
sleep 3000
print =============== step1
$i = 0

View File

@ -28,7 +28,7 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def csum_query_form(self, col="c1", alias="", table_expr="t1", condition=""):

View File

@ -136,9 +136,14 @@ class TDTestCase:
tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
# 9. breakdown-frequency
# NULL ---count(NULL)=0 expect count(NULL)= 100
tdSql.query("SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ")
parRows=tdSql.queryRows
assert parRows != 0 , "query result is wrong"
tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")

View File

@ -127,52 +127,12 @@ class TDTestCase:
tdLog.notice(" The election still succeeds when leader of both mnodes are killed ")
except Exception:
pass
tdSql.error("create user user1 pass '123';")
# tdSql.error("create user user1 pass '123';")
tdLog.info("start leader")
tdDnodes[0].starttaosd()
if clusterComCheck.checkMnodeStatus(2) :
print("both mnodes are ready")
# # fisrt drop follower of mnode
# BUG
# tdSql.execute("drop mnode on dnode 2")
# tdSql.query("show mnodes;")
# tdSql.checkRows(1)
# tdSql.checkData(0,1,'%s:6030'%self.host)
# tdSql.checkData(0,2,'leader')
# tdSql.checkData(0,3,'ready')
# tdSql.execute("create mnode on dnode 2")
# count=0
# while count < 10:
# time.sleep(1)
# tdSql.query("show mnodes;")
# tdSql.checkRows(2)
# if tdSql.queryResult[0][2]=='leader' :
# if tdSql.queryResult[1][2]=='follower':
# print("two mnodes is ready")
# break
# count+=1
# else:
# print("two mnodes is not ready in 10s ")
# tdSql.query("show mnodes;")
# tdSql.checkRows(2)
# tdSql.checkData(0,1,'%s:6030'%self.host)
# tdSql.checkData(0,2,'leader')
# tdSql.checkData(0,3,'ready')
# tdSql.checkData(1,1,'%s:6130'%self.host)
# tdSql.checkData(1,2,'follower')
# tdSql.checkData(2,3,'ready')
def getConnection(self, dnode):
host = dnode.cfgDict["fqdn"]
port = dnode.cfgDict["serverPort"]
config_dir = dnode.cfgDir
return taos.connect(host=host, port=int(port), config=config_dir)
def run(self):
self.five_dnode_two_mnode()

View File

@ -93,7 +93,7 @@ class TDTestCase:
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db',
'dbNumbers': 20,
'dbNumbers': 8,
'dropFlag': 1,
'event': '',
'vgroups': 4,
@ -183,15 +183,22 @@ class TDTestCase:
for tr in threads:
tr.join()
tdLog.info("check dnode number:")
clusterComCheck.checkDnodes(dnodeNumbers)
clusterComCheck.checkDbRows(allDbNumbers)
for i in range(restartNumbers):
clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i))
tdSql.query("show databases")
tdLog.debug("we find %d databases but exepect to create %d databases "%(tdSql.queryRows-2,allDbNumbers))
# tdLog.info("check DB Rows:")
# clusterComCheck.checkDbRows(allDbNumbers)
# tdLog.info("check DB Status on by on")
# for i in range(restartNumbers):
# clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i))
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='dnode')
def stop(self):
tdSql.close()

View File

@ -65,40 +65,15 @@ class TDTestCase:
self._async_raise(thread.ident, SystemExit)
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
print("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("use db%d" %couti)
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db',
paraDict = {'dbName': 'db0_0',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'replica': 1,
'stbName': 'stb',
'stbNumbers': 100,
'stbNumbers': 80,
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
@ -188,7 +163,10 @@ class TDTestCase:
tdSql.execute("use %s" %(paraDict["dbName"]))
tdSql.query("show stables")
tdSql.checkRows(allStbNumbers)
tdLog.debug("we find %d stables but exepect to create %d stables "%(tdSql.queryRows,allStbNumbers))
# # tdLog.info("check Stable Rows:")
# tdSql.checkRows(allStbNumbers)
def run(self):

View File

@ -68,7 +68,7 @@ class TDTestCase:
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db',
'dbNumbers': 10,
'dbNumbers': 8,
'dropFlag': 1,
'event': '',
'vgroups': 2,

View File

@ -98,7 +98,7 @@ class TDTestCase:
'vgroups': 4,
'replica': 1,
'stbName': 'stb',
'stbNumbers': 100,
'stbNumbers': 80,
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
@ -142,7 +142,8 @@ class TDTestCase:
threads=[]
for i in range(restartNumbers):
stableName= '%s%d'%(paraDict['stbName'],i)
threads.append(threading.Thread(target=clusterComCreate.create_stables, args=(tdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])))
newTdSql=tdCom.newTdSql()
threads.append(threading.Thread(target=clusterComCreate.create_stables, args=(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])))
for tr in threads:
tr.start()
@ -190,6 +191,7 @@ class TDTestCase:
tdSql.execute("use %s" %(paraDict["dbName"]))
tdSql.query("show stables")
tdLog.debug("we find %d stables but exepect to create %d stables "%(tdSql.queryRows,allStbNumbers))
# # tdLog.info("check Stable Rows:")
# tdSql.checkRows(allStbNumbers)

View File

@ -159,19 +159,19 @@ class TDTestCase:
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)
tdSql.query("show databases")
tdLog.debug("we find %d databases but exepect to create %d databases "%(tdSql.queryRows-2,allDbNumbers))
# tdSql.query("show databases")
# tdLog.debug("we find %d databases but exepect to create %d databases "%(tdSql.queryRows-2,allDbNumbers))
# # tdLog.info("check DB Rows:")
# clusterComCheck.checkDbRows(allDbNumbers)
# # tdLog.info("check DB Status on by on")
# tdLog.info("check DB Rows:")
clusterComCheck.checkDbRows(allDbNumbers)
# tdLog.info("check DB Status on by on")
# for i in range(restartNumbers):
# clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i))
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=15,stopRole='vnode')
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='vnode')
def stop(self):
tdSql.close()

View File

@ -98,7 +98,7 @@ class TDTestCase:
'vgroups': 4,
'replica': 1,
'stbName': 'stb',
'stbNumbers': 100,
'stbNumbers': 80,
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
@ -192,6 +192,8 @@ class TDTestCase:
tdSql.execute("use %s" %(paraDict["dbName"]))
tdSql.query("show stables")
tdLog.debug("we find %d stables but exepect to create %d stables "%(tdSql.queryRows,allStbNumbers))
# # tdLog.info("check Stable Rows:")
tdSql.checkRows(allStbNumbers)

View File

@ -111,7 +111,7 @@ class TDTestCase:
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(5,3,1)
self.fiveDnodeThreeMnode(dnodenumbers=5,mnodeNums=3,restartNumber=1)
def stop(self):
tdSql.close()

Some files were not shown because too many files have changed in this diff Show More