Merge branch '3.0' of https://github.com/taosdata/TDengine into feature/3.0_mhli
This commit is contained in:
commit
13eaf5fc73
|
@ -16,3 +16,6 @@
|
|||
[submodule "tools/taos-tools"]
|
||||
path = tools/taos-tools
|
||||
url = https://github.com/taosdata/taos-tools
|
||||
[submodule "tools/taosadapter"]
|
||||
path = tools/taosadapter
|
||||
url = https://github.com/taosdata/taosadapter.git
|
||||
|
|
14
Jenkinsfile2
14
Jenkinsfile2
|
@ -38,6 +38,7 @@ def pre_test(){
|
|||
sh '''
|
||||
hostname
|
||||
date
|
||||
env
|
||||
'''
|
||||
sh '''
|
||||
cd ${WK}
|
||||
|
@ -82,23 +83,33 @@ def pre_test(){
|
|||
sh '''
|
||||
cd ${WKC}
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "`date "+%Y%m%d-%H%M%S"` ${JOB_NAME}:${BRANCH_NAME}:${BUILD_ID}:${CHANGE_TARGET}" >>${WKDIR}/jenkins.log
|
||||
echo "community log: `git log -5`" >>${WKDIR}/jenkins.log
|
||||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git log -5
|
||||
echo "community log merged: `git log -5`" >>${WKDIR}/jenkins.log
|
||||
cd ${WK}
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "tdinternal log: `git log -5`" >>${WKDIR}/jenkins.log
|
||||
'''
|
||||
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "`date "+%Y%m%d-%H%M%S"` ${JOB_NAME}:${BRANCH_NAME}:${BUILD_ID}:${CHANGE_TARGET}" >>${WKDIR}/jenkins.log
|
||||
echo "tdinternal log: `git log -5`" >>${WKDIR}/jenkins.log
|
||||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git log -5
|
||||
echo "tdinternal log merged: `git log -5`" >>${WKDIR}/jenkins.log
|
||||
cd ${WKC}
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "community log: `git log -5`" >>${WKDIR}/jenkins.log
|
||||
'''
|
||||
} else {
|
||||
sh '''
|
||||
|
@ -113,6 +124,9 @@ def pre_test(){
|
|||
cd ${WKPY}
|
||||
git reset --hard
|
||||
git pull
|
||||
git log -5
|
||||
echo "python connector log: `git log -5`" >>${WKDIR}/jenkins.log
|
||||
echo >>${WKDIR}/jenkins.log
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
|
|
|
@ -18,6 +18,33 @@ if (NOT DEFINED TD_GRANT)
|
|||
SET(TD_GRANT FALSE)
|
||||
endif()
|
||||
|
||||
IF ("${BUILD_HTTP}" STREQUAL "")
|
||||
IF (TD_LINUX)
|
||||
IF (TD_ARM_32)
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ELSE ()
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN)
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ELSE ()
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ENDIF ()
|
||||
ELSEIF (${BUILD_HTTP} MATCHES "false")
|
||||
SET(TD_BUILD_HTTP FALSE)
|
||||
ELSEIF (${BUILD_HTTP} MATCHES "true")
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ELSEIF (${BUILD_HTTP} MATCHES "internal")
|
||||
SET(TD_BUILD_HTTP FALSE)
|
||||
SET(TD_BUILD_TAOSA_INTERNAL TRUE)
|
||||
ELSE ()
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_BUILD_HTTP)
|
||||
ADD_DEFINITIONS(-DHTTP_EMBEDDED)
|
||||
ENDIF ()
|
||||
|
||||
IF ("${BUILD_TOOLS}" STREQUAL "")
|
||||
IF (TD_LINUX)
|
||||
IF (TD_ARM_32)
|
||||
|
|
|
@ -1134,11 +1134,11 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp* pRsp);
|
|||
int32_t tSerializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp);
|
||||
int32_t tDeserializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp);
|
||||
void tFreeSTableMetaRsp(STableMetaRsp* pRsp);
|
||||
void tFreeSTableIndexRsp(void *info);
|
||||
void tFreeSTableIndexRsp(void* info);
|
||||
|
||||
typedef struct {
|
||||
SArray* pMetaRsp; // Array of STableMetaRsp
|
||||
SArray* pIndexRsp; // Array of STableIndexRsp;
|
||||
SArray* pMetaRsp; // Array of STableMetaRsp
|
||||
SArray* pIndexRsp; // Array of STableIndexRsp;
|
||||
} SSTbHbRsp;
|
||||
|
||||
int32_t tSerializeSSTbHbRsp(void* buf, int32_t bufLen, SSTbHbRsp* pRsp);
|
||||
|
@ -1305,8 +1305,9 @@ int32_t tSerializeSSetStandbyReq(void* buf, int32_t bufLen, SSetStandbyReq* pReq
|
|||
int32_t tDeserializeSSetStandbyReq(void* buf, int32_t bufLen, SSetStandbyReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t connId;
|
||||
int32_t queryId;
|
||||
int32_t connId; // todo remove
|
||||
int32_t queryId; // todo remove
|
||||
char queryStrId[TSDB_QUERY_ID_LEN];
|
||||
} SKillQueryReq;
|
||||
|
||||
int32_t tSerializeSKillQueryReq(void* buf, int32_t bufLen, SKillQueryReq* pReq);
|
||||
|
|
|
@ -78,7 +78,7 @@
|
|||
#define TK_BUFFER 60
|
||||
#define TK_CACHELAST 61
|
||||
#define TK_COMP 62
|
||||
#define TK_DAYS 63
|
||||
#define TK_DURATION 63
|
||||
#define TK_NK_VARIABLE 64
|
||||
#define TK_FSYNC 65
|
||||
#define TK_MAXROWS 66
|
||||
|
|
|
@ -89,6 +89,7 @@ typedef struct STableOptions {
|
|||
ENodeType type;
|
||||
char comment[TSDB_TB_COMMENT_LEN];
|
||||
double filesFactor;
|
||||
int32_t delay;
|
||||
SNodeList* pRollupFuncs;
|
||||
int32_t ttl;
|
||||
SNodeList* pSma;
|
||||
|
@ -286,6 +287,11 @@ typedef struct SKillStmt {
|
|||
int32_t targetId;
|
||||
} SKillStmt;
|
||||
|
||||
typedef struct SKillQueryStmt {
|
||||
ENodeType type;
|
||||
char queryId[TSDB_QUERY_ID_LEN];
|
||||
} SKillQueryStmt;
|
||||
|
||||
typedef struct SStreamOptions {
|
||||
ENodeType type;
|
||||
int8_t triggerType;
|
||||
|
|
|
@ -204,6 +204,7 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_PROJECT,
|
||||
|
|
|
@ -34,7 +34,13 @@ typedef struct SLogicNode {
|
|||
uint8_t precision;
|
||||
} SLogicNode;
|
||||
|
||||
typedef enum EScanType { SCAN_TYPE_TAG = 1, SCAN_TYPE_TABLE, SCAN_TYPE_SYSTEM_TABLE, SCAN_TYPE_STREAM } EScanType;
|
||||
typedef enum EScanType {
|
||||
SCAN_TYPE_TAG = 1,
|
||||
SCAN_TYPE_TABLE,
|
||||
SCAN_TYPE_SYSTEM_TABLE,
|
||||
SCAN_TYPE_STREAM,
|
||||
SCAN_TYPE_TABLE_MERGE
|
||||
} EScanType;
|
||||
|
||||
typedef struct SScanLogicNode {
|
||||
SLogicNode node;
|
||||
|
@ -262,6 +268,7 @@ typedef struct STableScanPhysiNode {
|
|||
} STableScanPhysiNode;
|
||||
|
||||
typedef STableScanPhysiNode STableSeqScanPhysiNode;
|
||||
typedef STableScanPhysiNode STableMergeScanPhysiNode;
|
||||
typedef STableScanPhysiNode SStreamScanPhysiNode;
|
||||
|
||||
typedef struct SProjectPhysiNode {
|
||||
|
|
|
@ -222,6 +222,8 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_APP_NAME_LEN TSDB_UNI_LEN
|
||||
#define TSDB_TB_COMMENT_LEN 1025
|
||||
|
||||
#define TSDB_QUERY_ID_LEN 26
|
||||
|
||||
/**
|
||||
* In some scenarios uint16_t (0~65535) is used to store the row len.
|
||||
* - Firstly, we use 65531(65535 - 4), as the SDataRow/SKVRow contains 4 bits header.
|
||||
|
@ -341,6 +343,9 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_DB_SCHEMALESS_OFF 0
|
||||
#define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF
|
||||
|
||||
// #define TSDB_MIN_ROLLUP_DELAY 1
|
||||
// #define TSDB_MAX_ROLLUP_DELAY 10
|
||||
// #define TSDB_DEFAULT_ROLLUP_DELAY 1
|
||||
#define TSDB_MIN_ROLLUP_FILE_FACTOR 0
|
||||
#define TSDB_MAX_ROLLUP_FILE_FACTOR 10
|
||||
#define TSDB_DEFAULT_ROLLUP_FILE_FACTOR 0.1
|
||||
|
|
|
@ -3369,8 +3369,7 @@ int32_t tSerializeSKillQueryReq(void *buf, int32_t bufLen, SKillQueryReq *pReq)
|
|||
tEncoderInit(&encoder, buf, bufLen);
|
||||
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->connId) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->queryId) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->queryStrId) < 0) return -1;
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
|
@ -3383,8 +3382,7 @@ int32_t tDeserializeSKillQueryReq(void *buf, int32_t bufLen, SKillQueryReq *pReq
|
|||
tDecoderInit(&decoder, buf, bufLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->connId) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->queryId) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->queryStrId) < 0) return -1;
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
|
|
|
@ -407,6 +407,7 @@ static int32_t mndSetUpdateSmaStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static int32_t mndSetCreateSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SSmaObj *pSma) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
SVgObj *pVgroup = NULL;
|
||||
|
@ -445,6 +446,7 @@ static int32_t mndSetCreateSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int32_t mndSetCreateSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup,
|
||||
SSmaObj *pSma) {
|
||||
|
@ -579,7 +581,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
|
|||
if (mndSetCreateSmaCommitLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
|
||||
if (mndSetCreateSmaVgroupCommitLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
|
||||
if (mndSetUpdateSmaStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
|
||||
if (mndSetCreateSmaRedoActions(pMnode, pTrans, pDb, &smaObj) != 0) goto _OVER;
|
||||
// if (mndSetCreateSmaRedoActions(pMnode, pTrans, pDb, &smaObj) != 0) goto _OVER;
|
||||
if (mndSetCreateSmaVgroupRedoActions(pMnode, pTrans, pDb, &streamObj.fixedSinkVg, &smaObj) != 0) goto _OVER;
|
||||
if (mndAddStreamToTrans(pMnode, &streamObj, pCreate->ast, STREAM_TRIGGER_AT_ONCE, 0, pTrans) != 0) goto _OVER;
|
||||
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
||||
|
@ -734,6 +736,7 @@ static int32_t mndSetDropSmaVgroupCommitLogs(SMnode *pMnode, STrans *pTrans, SVg
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static int32_t mndSetDropSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SSmaObj *pSma) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
SVgObj *pVgroup = NULL;
|
||||
|
@ -774,6 +777,7 @@ static int32_t mndSetDropSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int32_t mndSetDropSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) {
|
||||
SVnodeGid *pVgid = pVgroup->vnodeGid + 0;
|
||||
|
@ -824,7 +828,7 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
|
|||
if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER;
|
||||
if (mndSetDropSmaVgroupCommitLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER;
|
||||
if (mndSetUpdateSmaStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
|
||||
if (mndSetDropSmaRedoActions(pMnode, pTrans, pDb, pSma) != 0) goto _OVER;
|
||||
// if (mndSetDropSmaRedoActions(pMnode, pTrans, pDb, pSma) != 0) goto _OVER;
|
||||
if (mndSetDropSmaVgroupRedoActions(pMnode, pTrans, pDb, pVgroup) != 0) goto _OVER;
|
||||
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
||||
|
||||
|
@ -854,7 +858,7 @@ int32_t mndDropSmasByStb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p
|
|||
if (mndSetDropSmaVgroupCommitLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER;
|
||||
if (mndSetDropSmaVgroupRedoActions(pMnode, pTrans, pDb, pVgroup) != 0) goto _OVER;
|
||||
if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER;
|
||||
if (mndSetDropSmaRedoActions(pMnode, pTrans, pDb, pSma) != 0) goto _OVER;
|
||||
// if (mndSetDropSmaRedoActions(pMnode, pTrans, pDb, pSma) != 0) goto _OVER;
|
||||
mndReleaseVgroup(pMnode, pVgroup);
|
||||
pVgroup = NULL;
|
||||
}
|
||||
|
|
|
@ -323,10 +323,14 @@ static int32_t mndStbActionUpdate(SSdb *pSdb, SStbObj *pOld, SStbObj *pNew) {
|
|||
pOld->smaVer = pNew->smaVer;
|
||||
pOld->nextColId = pNew->nextColId;
|
||||
pOld->ttl = pNew->ttl;
|
||||
pOld->numOfColumns = pNew->numOfColumns;
|
||||
pOld->numOfTags = pNew->numOfTags;
|
||||
memcpy(pOld->pColumns, pNew->pColumns, pOld->numOfColumns * sizeof(SSchema));
|
||||
memcpy(pOld->pTags, pNew->pTags, pOld->numOfTags * sizeof(SSchema));
|
||||
if (pNew->numOfColumns > 0) {
|
||||
pOld->numOfColumns = pNew->numOfColumns;
|
||||
memcpy(pOld->pColumns, pNew->pColumns, pOld->numOfColumns * sizeof(SSchema));
|
||||
}
|
||||
if (pNew->numOfTags > 0) {
|
||||
pOld->numOfTags = pNew->numOfTags;
|
||||
memcpy(pOld->pTags, pNew->pTags, pOld->numOfTags * sizeof(SSchema));
|
||||
}
|
||||
if (pNew->commentLen != 0) {
|
||||
memcpy(pOld->comment, pNew->comment, pNew->commentLen);
|
||||
}
|
||||
|
|
|
@ -1480,14 +1480,6 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t physiStreamScanNodeToJson(const void* pObj, SJson* pJson) {
|
||||
return physiTableScanNodeToJson(pObj, pJson);
|
||||
}
|
||||
|
||||
static int32_t jsonToPhysiStreamScanNode(const SJson* pJson, void* pObj) {
|
||||
return jsonToPhysiTableScanNode(pJson, pObj);
|
||||
}
|
||||
|
||||
static const char* jkSysTableScanPhysiPlanMnodeEpSet = "MnodeEpSet";
|
||||
static const char* jkSysTableScanPhysiPlanShowRewrite = "ShowRewrite";
|
||||
static const char* jkSysTableScanPhysiPlanAccountId = "AccountId";
|
||||
|
@ -3964,9 +3956,9 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
|
|||
case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
|
||||
return physiTagScanNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
|
||||
return physiTableScanNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
|
||||
return physiStreamScanNodeToJson(pObj, pJson);
|
||||
return physiTableScanNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
|
||||
return physiSysTableScanNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
|
||||
|
@ -4097,9 +4089,9 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
|
|||
case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
|
||||
return jsonToPhysiTagScanNode(pJson, pObj);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
|
||||
return jsonToPhysiTableScanNode(pJson, pObj);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
|
||||
return jsonToPhysiStreamScanNode(pJson, pObj);
|
||||
return jsonToPhysiTableScanNode(pJson, pObj);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
|
||||
return jsonToPhysiSysTableScanNode(pJson, pObj);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
|
||||
|
|
|
@ -209,9 +209,10 @@ SNode* nodesMakeNode(ENodeType type) {
|
|||
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
|
||||
case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
|
||||
return makeNode(type, sizeof(SShowStmt));
|
||||
case QUERY_NODE_KILL_CONNECTION_STMT:
|
||||
case QUERY_NODE_KILL_QUERY_STMT:
|
||||
return makeNode(type, sizeof(SKillQueryStmt));
|
||||
case QUERY_NODE_KILL_TRANSACTION_STMT:
|
||||
case QUERY_NODE_KILL_CONNECTION_STMT:
|
||||
return makeNode(type, sizeof(SKillStmt));
|
||||
case QUERY_NODE_DELETE_STMT:
|
||||
return makeNode(type, sizeof(SDeleteStmt));
|
||||
|
@ -251,6 +252,8 @@ SNode* nodesMakeNode(ENodeType type) {
|
|||
return makeNode(type, sizeof(STableScanPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN:
|
||||
return makeNode(type, sizeof(STableSeqScanPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
|
||||
return makeNode(type, sizeof(STableMergeScanPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
|
||||
return makeNode(type, sizeof(SStreamScanPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
|
||||
|
|
|
@ -60,6 +60,7 @@ typedef enum EDatabaseOptionType {
|
|||
typedef enum ETableOptionType {
|
||||
TABLE_OPTION_COMMENT = 1,
|
||||
TABLE_OPTION_FILE_FACTOR,
|
||||
TABLE_OPTION_DELAY,
|
||||
TABLE_OPTION_ROLLUP,
|
||||
TABLE_OPTION_TTL,
|
||||
TABLE_OPTION_SMA
|
||||
|
@ -187,6 +188,7 @@ SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, const
|
|||
SNode* pOptions, SNode* pQuery);
|
||||
SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pStreamName);
|
||||
SNode* createKillStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pId);
|
||||
SNode* createKillQueryStmt(SAstCreateContext* pCxt, const SToken* pQueryId);
|
||||
SNode* createBalanceVgroupStmt(SAstCreateContext* pCxt);
|
||||
SNode* createMergeVgroupStmt(SAstCreateContext* pCxt, const SToken* pVgId1, const SToken* pVgId2);
|
||||
SNode* createRedistributeVgroupStmt(SAstCreateContext* pCxt, const SToken* pVgId, SNodeList* pDnodes);
|
||||
|
|
|
@ -168,8 +168,8 @@ db_options(A) ::= .
|
|||
db_options(A) ::= db_options(B) BUFFER NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_BUFFER, &C); }
|
||||
db_options(A) ::= db_options(B) CACHELAST NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_CACHELAST, &C); }
|
||||
db_options(A) ::= db_options(B) COMP NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_COMP, &C); }
|
||||
db_options(A) ::= db_options(B) DAYS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_DAYS, &C); }
|
||||
db_options(A) ::= db_options(B) DAYS NK_VARIABLE(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_DAYS, &C); }
|
||||
db_options(A) ::= db_options(B) DURATION NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_DAYS, &C); }
|
||||
db_options(A) ::= db_options(B) DURATION NK_VARIABLE(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_DAYS, &C); }
|
||||
db_options(A) ::= db_options(B) FSYNC NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_FSYNC, &C); }
|
||||
db_options(A) ::= db_options(B) MAXROWS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_MAXROWS, &C); }
|
||||
db_options(A) ::= db_options(B) MINROWS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_MINROWS, &C); }
|
||||
|
@ -317,8 +317,9 @@ tags_def(A) ::= TAGS NK_LP column_def_list(B) NK_RP.
|
|||
|
||||
table_options(A) ::= . { A = createDefaultTableOptions(pCxt); }
|
||||
table_options(A) ::= table_options(B) COMMENT NK_STRING(C). { A = setTableOption(pCxt, B, TABLE_OPTION_COMMENT, &C); }
|
||||
//table_options(A) ::= table_options(B) DELAY NK_INTEGER(C). { A = setTableOption(pCxt, B, TABLE_OPTION_DELAY, &C); }
|
||||
table_options(A) ::= table_options(B) FILE_FACTOR NK_FLOAT(C). { A = setTableOption(pCxt, B, TABLE_OPTION_FILE_FACTOR, &C); }
|
||||
table_options(A) ::= table_options(B) ROLLUP NK_LP func_name_list(C) NK_RP. { A = setTableOption(pCxt, B, TABLE_OPTION_ROLLUP, C); }
|
||||
table_options(A) ::= table_options(B) ROLLUP NK_LP rollup_func_list(C) NK_RP. { A = setTableOption(pCxt, B, TABLE_OPTION_ROLLUP, C); }
|
||||
table_options(A) ::= table_options(B) TTL NK_INTEGER(C). { A = setTableOption(pCxt, B, TABLE_OPTION_TTL, &C); }
|
||||
table_options(A) ::= table_options(B) SMA NK_LP col_name_list(C) NK_RP. { A = setTableOption(pCxt, B, TABLE_OPTION_SMA, C); }
|
||||
|
||||
|
@ -330,6 +331,15 @@ alter_table_options(A) ::= alter_table_options(B) alter_table_option(C).
|
|||
alter_table_option(A) ::= COMMENT NK_STRING(B). { A.type = TABLE_OPTION_COMMENT; A.val = B; }
|
||||
alter_table_option(A) ::= TTL NK_INTEGER(B). { A.type = TABLE_OPTION_TTL; A.val = B; }
|
||||
|
||||
%type rollup_func_list { SNodeList* }
|
||||
%destructor rollup_func_list { nodesDestroyList($$); }
|
||||
rollup_func_list(A) ::= rollup_func_name(B). { A = createNodeList(pCxt, B); }
|
||||
rollup_func_list(A) ::= rollup_func_list(B) NK_COMMA rollup_func_name(C). { A = addNodeToList(pCxt, B, C); }
|
||||
|
||||
rollup_func_name(A) ::= function_name(B). { A = createFunctionNode(pCxt, &B, NULL); }
|
||||
rollup_func_name(A) ::= FIRST(B). { A = createFunctionNode(pCxt, &B, NULL); }
|
||||
rollup_func_name(A) ::= LAST(B). { A = createFunctionNode(pCxt, &B, NULL); }
|
||||
|
||||
%type col_name_list { SNodeList* }
|
||||
%destructor col_name_list { nodesDestroyList($$); }
|
||||
col_name_list(A) ::= col_name(B). { A = createNodeList(pCxt, B); }
|
||||
|
@ -378,13 +388,6 @@ table_name_cond(A) ::= table_name(B).
|
|||
from_db_opt(A) ::= . { A = createDefaultDatabaseCondValue(pCxt); }
|
||||
from_db_opt(A) ::= FROM db_name(B). { A = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B); }
|
||||
|
||||
%type func_name_list { SNodeList* }
|
||||
%destructor func_name_list { nodesDestroyList($$); }
|
||||
func_name_list(A) ::= func_name(B). { A = createNodeList(pCxt, B); }
|
||||
func_name_list(A) ::= func_name_list(B) NK_COMMA func_name(C). { A = addNodeToList(pCxt, B, C); }
|
||||
|
||||
func_name(A) ::= function_name(B). { A = createFunctionNode(pCxt, &B, NULL); }
|
||||
|
||||
/************************************************ create index ********************************************************/
|
||||
cmd ::= CREATE SMA INDEX not_exists_opt(D)
|
||||
index_name(A) ON table_name(B) index_options(C). { pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, D, &A, &B, NULL, C); }
|
||||
|
@ -466,7 +469,7 @@ stream_options(A) ::= stream_options(B) WATERMARK duration_literal(C).
|
|||
|
||||
/************************************************ kill connection/query ***********************************************/
|
||||
cmd ::= KILL CONNECTION NK_INTEGER(A). { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &A); }
|
||||
cmd ::= KILL QUERY NK_INTEGER(A). { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_QUERY_STMT, &A); }
|
||||
cmd ::= KILL QUERY NK_STRING(A). { pCxt->pRootNode = createKillQueryStmt(pCxt, &A); }
|
||||
cmd ::= KILL TRANSACTION NK_INTEGER(A). { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &A); }
|
||||
|
||||
/************************************************ merge/redistribute/ vgroup ******************************************/
|
||||
|
|
|
@ -804,10 +804,10 @@ SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOpti
|
|||
case DB_OPTION_RETENTIONS:
|
||||
((SDatabaseOptions*)pOptions)->pRetentions = pVal;
|
||||
break;
|
||||
// case DB_OPTION_SCHEMALESS:
|
||||
// ((SDatabaseOptions*)pOptions)->schemaless = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
|
||||
// ((SDatabaseOptions*)pOptions)->schemaless = 0;
|
||||
// break;
|
||||
// case DB_OPTION_SCHEMALESS:
|
||||
// ((SDatabaseOptions*)pOptions)->schemaless = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
|
||||
// ((SDatabaseOptions*)pOptions)->schemaless = 0;
|
||||
// break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -867,6 +867,7 @@ SNode* createDefaultTableOptions(SAstCreateContext* pCxt) {
|
|||
CHECK_PARSER_STATUS(pCxt);
|
||||
STableOptions* pOptions = (STableOptions*)nodesMakeNode(QUERY_NODE_TABLE_OPTIONS);
|
||||
CHECK_OUT_OF_MEM(pOptions);
|
||||
// pOptions->delay = TSDB_DEFAULT_ROLLUP_DELAY;
|
||||
pOptions->filesFactor = TSDB_DEFAULT_ROLLUP_FILE_FACTOR;
|
||||
pOptions->ttl = TSDB_DEFAULT_TABLE_TTL;
|
||||
return (SNode*)pOptions;
|
||||
|
@ -876,7 +877,7 @@ SNode* createAlterTableOptions(SAstCreateContext* pCxt) {
|
|||
CHECK_PARSER_STATUS(pCxt);
|
||||
STableOptions* pOptions = (STableOptions*)nodesMakeNode(QUERY_NODE_TABLE_OPTIONS);
|
||||
CHECK_OUT_OF_MEM(pOptions);
|
||||
pOptions->filesFactor = -1;
|
||||
pOptions->delay = -1;
|
||||
pOptions->ttl = -1;
|
||||
return (SNode*)pOptions;
|
||||
}
|
||||
|
@ -890,8 +891,8 @@ SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType
|
|||
sizeof(((STableOptions*)pOptions)->comment));
|
||||
}
|
||||
break;
|
||||
case TABLE_OPTION_FILE_FACTOR:
|
||||
((STableOptions*)pOptions)->filesFactor = taosStr2Double(((SToken*)pVal)->z, NULL);
|
||||
case TABLE_OPTION_DELAY:
|
||||
((STableOptions*)pOptions)->delay = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
|
||||
break;
|
||||
case TABLE_OPTION_ROLLUP:
|
||||
((STableOptions*)pOptions)->pRollupFuncs = pVal;
|
||||
|
@ -1431,7 +1432,7 @@ SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const
|
|||
CHECK_PARSER_STATUS(pCxt);
|
||||
SDropStreamStmt* pStmt = (SDropStreamStmt*)nodesMakeNode(QUERY_NODE_DROP_STREAM_STMT);
|
||||
CHECK_OUT_OF_MEM(pStmt);
|
||||
strncpy(pStmt->streamName, pStreamName->z, pStreamName->n);
|
||||
strncpy(pStmt->streamName, pStreamName->z, TMIN(pStreamName->n, sizeof(pStmt->streamName) - 1));
|
||||
pStmt->ignoreNotExists = ignoreNotExists;
|
||||
return (SNode*)pStmt;
|
||||
}
|
||||
|
@ -1444,6 +1445,14 @@ SNode* createKillStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pId
|
|||
return (SNode*)pStmt;
|
||||
}
|
||||
|
||||
SNode* createKillQueryStmt(SAstCreateContext* pCxt, const SToken* pQueryId) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
SKillQueryStmt* pStmt = (SKillQueryStmt*)nodesMakeNode(QUERY_NODE_KILL_QUERY_STMT);
|
||||
CHECK_OUT_OF_MEM(pStmt);
|
||||
strncpy(pStmt->queryId, pQueryId->z, TMIN(pQueryId->n, sizeof(pStmt->queryId) - 1));
|
||||
return (SNode*)pStmt;
|
||||
}
|
||||
|
||||
SNode* createBalanceVgroupStmt(SAstCreateContext* pCxt) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
SBalanceVgroupStmt* pStmt = (SBalanceVgroupStmt*)nodesMakeNode(QUERY_NODE_BALANCE_VGROUP_STMT);
|
||||
|
|
|
@ -68,7 +68,7 @@ static SKeyword keywordTable[] = {
|
|||
{"CONTAINS", TK_CONTAINS},
|
||||
{"DATABASE", TK_DATABASE},
|
||||
{"DATABASES", TK_DATABASES},
|
||||
{"DAYS", TK_DAYS},
|
||||
// {"DAYS", TK_DAYS},
|
||||
{"DBS", TK_DBS},
|
||||
{"DELETE", TK_DELETE},
|
||||
{"DESC", TK_DESC},
|
||||
|
@ -78,6 +78,7 @@ static SKeyword keywordTable[] = {
|
|||
{"DNODES", TK_DNODES},
|
||||
{"DOUBLE", TK_DOUBLE},
|
||||
{"DROP", TK_DROP},
|
||||
{"DURATION", TK_DURATION},
|
||||
{"EXISTS", TK_EXISTS},
|
||||
{"EXPLAIN", TK_EXPLAIN},
|
||||
{"FILE_FACTOR", TK_FILE_FACTOR},
|
||||
|
|
|
@ -2839,6 +2839,9 @@ static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checTableFactorOption(pCxt, pStmt->pOptions->filesFactor);
|
||||
}
|
||||
// if (TSDB_CODE_SUCCESS == code) {
|
||||
// code = checkRangeOption(pCxt, "delay", pStmt->pOptions->delay, TSDB_MIN_ROLLUP_DELAY, TSDB_MAX_ROLLUP_DELAY);
|
||||
// }
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkTableRollupOption(pCxt, pStmt->pOptions->pRollupFuncs);
|
||||
}
|
||||
|
@ -3081,6 +3084,7 @@ static int32_t buildRollupAst(STranslateContext* pCxt, SCreateTableStmt* pStmt,
|
|||
|
||||
static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStmt, SMCreateStbReq* pReq) {
|
||||
pReq->igExists = pStmt->ignoreExists;
|
||||
// pReq->delay = pStmt->pOptions->delay;
|
||||
pReq->xFilesFactor = pStmt->pOptions->filesFactor;
|
||||
pReq->ttl = pStmt->pOptions->ttl;
|
||||
columnDefNodeToField(pStmt->pCols, &pReq->pColumns);
|
||||
|
@ -3626,9 +3630,9 @@ static int32_t translateKillConnection(STranslateContext* pCxt, SKillStmt* pStmt
|
|||
return buildCmdMsg(pCxt, TDMT_MND_KILL_CONN, (FSerializeFunc)tSerializeSKillQueryReq, &killReq);
|
||||
}
|
||||
|
||||
static int32_t translateKillQuery(STranslateContext* pCxt, SKillStmt* pStmt) {
|
||||
static int32_t translateKillQuery(STranslateContext* pCxt, SKillQueryStmt* pStmt) {
|
||||
SKillQueryReq killReq = {0};
|
||||
killReq.queryId = pStmt->targetId;
|
||||
strcpy(killReq.queryStrId, pStmt->queryId);
|
||||
return buildCmdMsg(pCxt, TDMT_MND_KILL_QUERY, (FSerializeFunc)tSerializeSKillQueryReq, &killReq);
|
||||
}
|
||||
|
||||
|
@ -3970,7 +3974,7 @@ static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) {
|
|||
code = translateKillConnection(pCxt, (SKillStmt*)pNode);
|
||||
break;
|
||||
case QUERY_NODE_KILL_QUERY_STMT:
|
||||
code = translateKillQuery(pCxt, (SKillStmt*)pNode);
|
||||
code = translateKillQuery(pCxt, (SKillQueryStmt*)pNode);
|
||||
break;
|
||||
case QUERY_NODE_KILL_TRANSACTION_STMT:
|
||||
code = translateKillTransaction(pCxt, (SKillStmt*)pNode);
|
||||
|
|
|
@ -564,7 +564,7 @@ static const YYCODETYPE yy_lookahead[] = {
|
|||
/* 1260 */ 0, 0, 40, 0, 271, 72, 0, 47, 175, 175,
|
||||
/* 1270 */ 47, 47, 279, 310, 47, 0, 313, 314, 315, 316,
|
||||
/* 1280 */ 317, 318, 289, 320, 47, 47, 293, 243, 175, 0,
|
||||
/* 1290 */ 175, 0, 47, 0, 47, 0, 243, 47, 0, 81,
|
||||
/* 1290 */ 175, 0, 47, 0, 22, 0, 243, 47, 0, 81,
|
||||
/* 1300 */ 113, 160, 156, 310, 159, 0, 313, 314, 315, 316,
|
||||
/* 1310 */ 317, 318, 0, 320, 152, 271, 323, 151, 0, 356,
|
||||
/* 1320 */ 357, 328, 0, 279, 271, 44, 0, 0, 0, 0,
|
||||
|
@ -693,7 +693,7 @@ static const unsigned short int yy_shift_ofst[] = {
|
|||
/* 310 */ 929, 931, 826, 875, 934, 952, 962, 965, 974, 976,
|
||||
/* 320 */ 859, 935, 1260, 1261, 1222, 1263, 1193, 1266, 1220, 1093,
|
||||
/* 330 */ 1223, 1224, 1227, 1094, 1275, 1237, 1238, 1113, 1289, 1115,
|
||||
/* 340 */ 1291, 1245, 1293, 1247, 1295, 1250, 1298, 1218, 1141, 1145,
|
||||
/* 340 */ 1291, 1245, 1293, 1272, 1295, 1250, 1298, 1218, 1141, 1145,
|
||||
/* 350 */ 1187, 1146, 1305, 1312, 1162, 1166, 1318, 1322, 1281, 1326,
|
||||
/* 360 */ 1327, 1328, 1329, 1330, 1331, 1334, 1335, 1336, 1338, 1339,
|
||||
/* 370 */ 1340, 1341, 1343, 1344, 1345, 1347, 1348, 1309, 1351, 1352,
|
||||
|
@ -898,7 +898,7 @@ static const YYCODETYPE yyFallback[] = {
|
|||
0, /* BUFFER => nothing */
|
||||
0, /* CACHELAST => nothing */
|
||||
0, /* COMP => nothing */
|
||||
0, /* DAYS => nothing */
|
||||
0, /* DURATION => nothing */
|
||||
0, /* NK_VARIABLE => nothing */
|
||||
0, /* FSYNC => nothing */
|
||||
0, /* MAXROWS => nothing */
|
||||
|
@ -1225,7 +1225,7 @@ static const char *const yyTokenName[] = {
|
|||
/* 60 */ "BUFFER",
|
||||
/* 61 */ "CACHELAST",
|
||||
/* 62 */ "COMP",
|
||||
/* 63 */ "DAYS",
|
||||
/* 63 */ "DURATION",
|
||||
/* 64 */ "NK_VARIABLE",
|
||||
/* 65 */ "FSYNC",
|
||||
/* 66 */ "MAXROWS",
|
||||
|
@ -1600,8 +1600,8 @@ static const char *const yyRuleName[] = {
|
|||
/* 68 */ "db_options ::= db_options BUFFER NK_INTEGER",
|
||||
/* 69 */ "db_options ::= db_options CACHELAST NK_INTEGER",
|
||||
/* 70 */ "db_options ::= db_options COMP NK_INTEGER",
|
||||
/* 71 */ "db_options ::= db_options DAYS NK_INTEGER",
|
||||
/* 72 */ "db_options ::= db_options DAYS NK_VARIABLE",
|
||||
/* 71 */ "db_options ::= db_options DURATION NK_INTEGER",
|
||||
/* 72 */ "db_options ::= db_options DURATION NK_VARIABLE",
|
||||
/* 73 */ "db_options ::= db_options FSYNC NK_INTEGER",
|
||||
/* 74 */ "db_options ::= db_options MAXROWS NK_INTEGER",
|
||||
/* 75 */ "db_options ::= db_options MINROWS NK_INTEGER",
|
||||
|
@ -1783,7 +1783,7 @@ static const char *const yyRuleName[] = {
|
|||
/* 251 */ "stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal",
|
||||
/* 252 */ "stream_options ::= stream_options WATERMARK duration_literal",
|
||||
/* 253 */ "cmd ::= KILL CONNECTION NK_INTEGER",
|
||||
/* 254 */ "cmd ::= KILL QUERY NK_INTEGER",
|
||||
/* 254 */ "cmd ::= KILL QUERY NK_STRING",
|
||||
/* 255 */ "cmd ::= KILL TRANSACTION NK_INTEGER",
|
||||
/* 256 */ "cmd ::= BALANCE VGROUP",
|
||||
/* 257 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER",
|
||||
|
@ -2646,8 +2646,8 @@ static const struct {
|
|||
{ 254, -3 }, /* (68) db_options ::= db_options BUFFER NK_INTEGER */
|
||||
{ 254, -3 }, /* (69) db_options ::= db_options CACHELAST NK_INTEGER */
|
||||
{ 254, -3 }, /* (70) db_options ::= db_options COMP NK_INTEGER */
|
||||
{ 254, -3 }, /* (71) db_options ::= db_options DAYS NK_INTEGER */
|
||||
{ 254, -3 }, /* (72) db_options ::= db_options DAYS NK_VARIABLE */
|
||||
{ 254, -3 }, /* (71) db_options ::= db_options DURATION NK_INTEGER */
|
||||
{ 254, -3 }, /* (72) db_options ::= db_options DURATION NK_VARIABLE */
|
||||
{ 254, -3 }, /* (73) db_options ::= db_options FSYNC NK_INTEGER */
|
||||
{ 254, -3 }, /* (74) db_options ::= db_options MAXROWS NK_INTEGER */
|
||||
{ 254, -3 }, /* (75) db_options ::= db_options MINROWS NK_INTEGER */
|
||||
|
@ -2829,7 +2829,7 @@ static const struct {
|
|||
{ 305, -4 }, /* (251) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
|
||||
{ 305, -3 }, /* (252) stream_options ::= stream_options WATERMARK duration_literal */
|
||||
{ 240, -3 }, /* (253) cmd ::= KILL CONNECTION NK_INTEGER */
|
||||
{ 240, -3 }, /* (254) cmd ::= KILL QUERY NK_INTEGER */
|
||||
{ 240, -3 }, /* (254) cmd ::= KILL QUERY NK_STRING */
|
||||
{ 240, -3 }, /* (255) cmd ::= KILL TRANSACTION NK_INTEGER */
|
||||
{ 240, -2 }, /* (256) cmd ::= BALANCE VGROUP */
|
||||
{ 240, -4 }, /* (257) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
|
||||
|
@ -3326,8 +3326,8 @@ static YYACTIONTYPE yy_reduce(
|
|||
{ yylhsminor.yy632 = setDatabaseOption(pCxt, yymsp[-2].minor.yy632, DB_OPTION_COMP, &yymsp[0].minor.yy0); }
|
||||
yymsp[-2].minor.yy632 = yylhsminor.yy632;
|
||||
break;
|
||||
case 71: /* db_options ::= db_options DAYS NK_INTEGER */
|
||||
case 72: /* db_options ::= db_options DAYS NK_VARIABLE */ yytestcase(yyruleno==72);
|
||||
case 71: /* db_options ::= db_options DURATION NK_INTEGER */
|
||||
case 72: /* db_options ::= db_options DURATION NK_VARIABLE */ yytestcase(yyruleno==72);
|
||||
{ yylhsminor.yy632 = setDatabaseOption(pCxt, yymsp[-2].minor.yy632, DB_OPTION_DAYS, &yymsp[0].minor.yy0); }
|
||||
yymsp[-2].minor.yy632 = yylhsminor.yy632;
|
||||
break;
|
||||
|
@ -3907,8 +3907,8 @@ static YYACTIONTYPE yy_reduce(
|
|||
case 253: /* cmd ::= KILL CONNECTION NK_INTEGER */
|
||||
{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); }
|
||||
break;
|
||||
case 254: /* cmd ::= KILL QUERY NK_INTEGER */
|
||||
{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_QUERY_STMT, &yymsp[0].minor.yy0); }
|
||||
case 254: /* cmd ::= KILL QUERY NK_STRING */
|
||||
{ pCxt->pRootNode = createKillQueryStmt(pCxt, &yymsp[0].minor.yy0); }
|
||||
break;
|
||||
case 255: /* cmd ::= KILL TRANSACTION NK_INTEGER */
|
||||
{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); }
|
||||
|
|
|
@ -46,7 +46,7 @@ TEST_F(ParserInitialCTest, createBnode) {
|
|||
* BUFFER value
|
||||
* | CACHELAST value
|
||||
* | COMP {0 | 1 | 2}
|
||||
* | DAYS value
|
||||
* | DURATION value
|
||||
* | FSYNC value
|
||||
* | MAXROWS value
|
||||
* | MINROWS value
|
||||
|
@ -155,7 +155,7 @@ TEST_F(ParserInitialCTest, createDatabase) {
|
|||
ASSERT_EQ(req.replications, expect.replications);
|
||||
ASSERT_EQ(req.strict, expect.strict);
|
||||
ASSERT_EQ(req.cacheLastRow, expect.cacheLastRow);
|
||||
//ASSERT_EQ(req.schemaless, expect.schemaless);
|
||||
// ASSERT_EQ(req.schemaless, expect.schemaless);
|
||||
ASSERT_EQ(req.ignoreExist, expect.ignoreExist);
|
||||
ASSERT_EQ(req.numOfRetensions, expect.numOfRetensions);
|
||||
if (expect.numOfRetensions > 0) {
|
||||
|
@ -202,7 +202,7 @@ TEST_F(ParserInitialCTest, createDatabase) {
|
|||
"BUFFER 64 "
|
||||
"CACHELAST 2 "
|
||||
"COMP 1 "
|
||||
"DAYS 100 "
|
||||
"DURATION 100 "
|
||||
"FSYNC 100 "
|
||||
"MAXROWS 1000 "
|
||||
"MINROWS 100 "
|
||||
|
@ -223,7 +223,7 @@ TEST_F(ParserInitialCTest, createDatabase) {
|
|||
setDbDaysFunc(100);
|
||||
setDbKeepFunc(1440, 300 * 60, 400 * 1440);
|
||||
run("CREATE DATABASE IF NOT EXISTS wxy_db "
|
||||
"DAYS 100m "
|
||||
"DURATION 100m "
|
||||
"KEEP 1440m,300h,400d ");
|
||||
clearCreateDbReq();
|
||||
}
|
||||
|
|
|
@ -181,7 +181,7 @@ static int16_t getUnsetSlotId(const SArray* pSlotIdsInfo) {
|
|||
}
|
||||
|
||||
static int32_t addDataBlockSlotsImpl(SPhysiPlanContext* pCxt, SNodeList* pList, SDataBlockDescNode* pDataBlockDesc,
|
||||
const char* pStmtName, bool output, bool reserve) {
|
||||
const char* pStmtName, bool output, bool reserve) {
|
||||
if (NULL == pList) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -463,10 +463,25 @@ static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubpla
|
|||
return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTagScan, pPhyNode);
|
||||
}
|
||||
|
||||
static ENodeType getScanOperatorType(EScanType scanType) {
|
||||
switch (scanType) {
|
||||
case SCAN_TYPE_TABLE:
|
||||
return QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN;
|
||||
case SCAN_TYPE_STREAM:
|
||||
return QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
|
||||
case SCAN_TYPE_TABLE_MERGE:
|
||||
return QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN;
|
||||
// return QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN;
|
||||
}
|
||||
|
||||
static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode,
|
||||
SPhysiNode** pPhyNode) {
|
||||
STableScanPhysiNode* pTableScan =
|
||||
(STableScanPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pScanLogicNode, QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN);
|
||||
STableScanPhysiNode* pTableScan = (STableScanPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pScanLogicNode,
|
||||
getScanOperatorType(pScanLogicNode->scanType));
|
||||
if (NULL == pTableScan) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
@ -528,12 +543,12 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan*
|
|||
|
||||
static int32_t createStreamScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode,
|
||||
SPhysiNode** pPhyNode) {
|
||||
int32_t res = createTableScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode);
|
||||
if (res == TSDB_CODE_SUCCESS) {
|
||||
ENodeType type = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
|
||||
setNodeType(*pPhyNode, type);
|
||||
}
|
||||
return res;
|
||||
return createTableScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode);
|
||||
}
|
||||
|
||||
static int32_t createTableMergeScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan,
|
||||
SScanLogicNode* pScanLogicNode, SPhysiNode** pPhyNode) {
|
||||
return createTableScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode);
|
||||
}
|
||||
|
||||
static int32_t createScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode,
|
||||
|
@ -547,6 +562,8 @@ static int32_t createScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan,
|
|||
return createSystemTableScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode);
|
||||
case SCAN_TYPE_STREAM:
|
||||
return createStreamScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode);
|
||||
case SCAN_TYPE_TABLE_MERGE:
|
||||
return createTableMergeScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -170,8 +170,8 @@ static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) {
|
|||
switch (nodeType(pNode)) {
|
||||
case QUERY_NODE_LOGIC_PLAN_SCAN:
|
||||
return stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pNode);
|
||||
// case QUERY_NODE_LOGIC_PLAN_JOIN:
|
||||
// return !(((SJoinLogicNode*)pNode)->isSingleTableJoin);
|
||||
case QUERY_NODE_LOGIC_PLAN_JOIN:
|
||||
return !(((SJoinLogicNode*)pNode)->isSingleTableJoin);
|
||||
case QUERY_NODE_LOGIC_PLAN_AGG:
|
||||
return !stbSplHasGatherExecFunc(((SAggLogicNode*)pNode)->pAggFuncs) && stbSplHasMultiTbScan(streamQuery, pNode);
|
||||
case QUERY_NODE_LOGIC_PLAN_WINDOW: {
|
||||
|
@ -392,6 +392,7 @@ static int32_t stbSplSplitIntervalForBatch(SSplitContext* pCxt, SStableSplitInfo
|
|||
(SNode*)splCreateScanSubplan(pCxt, pPartWindow, SPLIT_FLAG_STABLE_SPLIT));
|
||||
}
|
||||
pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE;
|
||||
++(pCxt->groupId);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -408,6 +409,7 @@ static int32_t stbSplSplitIntervalForStream(SSplitContext* pCxt, SStableSplitInf
|
|||
(SNode*)splCreateScanSubplan(pCxt, pPartWindow, SPLIT_FLAG_STABLE_SPLIT));
|
||||
}
|
||||
pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE;
|
||||
++(pCxt->groupId);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -496,6 +498,7 @@ static int32_t stbSplSplitAggNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
|
|||
(SNode*)splCreateScanSubplan(pCxt, pPartAgg, SPLIT_FLAG_STABLE_SPLIT));
|
||||
}
|
||||
pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE;
|
||||
++(pCxt->groupId);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -610,6 +613,7 @@ static int32_t stbSplSplitSortNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
|
|||
(SNode*)splCreateScanSubplan(pCxt, pPartSort, SPLIT_FLAG_STABLE_SPLIT));
|
||||
}
|
||||
pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE;
|
||||
++(pCxt->groupId);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -619,6 +623,7 @@ static int32_t stbSplSplitScanNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
|
|||
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
|
||||
(SNode*)splCreateScanSubplan(pCxt, pInfo->pSplitNode, SPLIT_FLAG_STABLE_SPLIT));
|
||||
}
|
||||
++(pCxt->groupId);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -642,6 +647,8 @@ static int32_t stbSplSplitScanNodeForJoin(SSplitContext* pCxt, SLogicSubplan* pS
|
|||
code = nodesListMakeStrictAppend(&pSubplan->pChildren,
|
||||
(SNode*)splCreateScanSubplan(pCxt, (SLogicNode*)pScan, SPLIT_FLAG_STABLE_SPLIT));
|
||||
}
|
||||
pScan->scanType = SCAN_TYPE_TABLE_MERGE;
|
||||
++(pCxt->groupId);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -703,7 +710,6 @@ static int32_t stableSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
|
|||
break;
|
||||
}
|
||||
|
||||
++(pCxt->groupId);
|
||||
pCxt->split = true;
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -41,5 +41,16 @@ fi
|
|||
cat ../script/jenkins/basic.txt |grep -v "^#"|grep -v "^$"|sed "s/^/,,script,/" >>$case_file
|
||||
grep "^python" ../system-test/fulltest.sh |sed "s/^/,,system-test,/" >>$case_file
|
||||
|
||||
# tar source code for run.sh to use
|
||||
# if [ $ent -eq 0 ]; then
|
||||
# cd ../../../
|
||||
# rm -rf TDengine.tar.gz
|
||||
# tar --exclude=TDengine/debug --exclude=TDengine/sim --exclude=TDengine/release -czf TDengine.tar.gz TDengine taos-connector-python
|
||||
# else
|
||||
# cd ../../../../
|
||||
# rm -rf TDinternal.tar.gz
|
||||
# tar --exclude=TDinternal/debug --exclude=TDinternal/sim --exclude=TDinternal/community/debug --exclude=TDinternal/community/release --exclude=TDinternal/community/sim -czf TDinternal.tar.gz TDinternal taos-connector-python
|
||||
# fi
|
||||
|
||||
exit 0
|
||||
|
||||
|
|
|
@ -255,7 +255,6 @@ function run_thread() {
|
|||
$cmd # 2>/dev/null
|
||||
local case_info=`echo "$line"|cut -d, -f 3,4`
|
||||
local corefile=`ls $log_dir/${case_file}.coredump/`
|
||||
corefile=`find $log_dir/${case_file}.coredump/ -name "core.*"`
|
||||
echo -e "$case_info \e[31m failed\e[0m"
|
||||
echo "=========================log============================"
|
||||
cat $log_dir/$case_file.log
|
||||
|
@ -291,6 +290,19 @@ function run_thread() {
|
|||
fi
|
||||
cmd="$scpcmd:${remote_sim_tar} $log_dir/${case_file}.sim.tar.gz"
|
||||
$cmd
|
||||
# backup source code (disabled)
|
||||
source_tar_dir=$log_dir/TDengine_${hosts[index]}
|
||||
source_tar_file=TDengine.tar.gz
|
||||
if [ $ent -ne 0 ]; then
|
||||
source_tar_dir=$log_dir/TDinternal_${hosts[index]}
|
||||
source_tar_file=TDinternal.tar.gz
|
||||
fi
|
||||
mkdir $source_tar_dir 2>/dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
cmd="$scpcmd:${workdirs[index]}/$source_tar_file $source_tar_dir"
|
||||
# echo "$cmd"
|
||||
# $cmd
|
||||
fi
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ class TDTestCase:
|
|||
tdDnodes.start(3)
|
||||
|
||||
def run(self):
|
||||
tdSql.execute('create database db replica 3 days 7')
|
||||
tdSql.execute('create database db replica 3 duration 7')
|
||||
tdSql.execute('use db')
|
||||
for tid in range(1, 11):
|
||||
tdSql.execute('create table tb%d(ts timestamp, i int)' % tid)
|
||||
|
|
|
@ -83,7 +83,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
|
||||
# test case: https://jira.taosdata.com:18080/browse/TD-2583
|
||||
tdSql.execute("create database test days 2")
|
||||
tdSql.execute("create database test duration 2")
|
||||
tdSql.execute("create table car(ts timestamp, speed int)")
|
||||
tdSql.execute("insert into car values(now, -1)")
|
||||
tdSql.execute("insert into car values(now-10d, null)")
|
||||
|
|
|
@ -83,7 +83,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 0, np.min(floatData))
|
||||
|
||||
# test case: https://jira.taosdata.com:18080/browse/TD-2583
|
||||
tdSql.execute("create database test days 2")
|
||||
tdSql.execute("create database test duration 2")
|
||||
tdSql.execute("create table car(ts timestamp, speed int)")
|
||||
tdSql.execute("insert into car values(now, 1)")
|
||||
tdSql.execute("insert into car values(now-10d, null)")
|
||||
|
|
|
@ -264,7 +264,7 @@ class TDTestCase:
|
|||
|
||||
def td4288(self):
|
||||
tdLog.printNoPrefix("==========TD-4288==========")
|
||||
# keep ~ [days,365000]
|
||||
# keep ~ [duration,365000]
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("create database if not exists db")
|
||||
tdSql.query("show variables")
|
||||
|
|
|
@ -32,7 +32,7 @@ class TDTestCase:
|
|||
|
||||
print("==============step1")
|
||||
tdSql.execute("create database if not exists demo keep 36500;");
|
||||
print("==============create db demo keep 365000 days")
|
||||
print("==============create db demo keep 365000 duration")
|
||||
tdSql.execute("use demo;")
|
||||
tdSql.execute("CREATE table if not exists test (ts timestamp, f1 int);")
|
||||
print("==============create table test")
|
||||
|
|
|
@ -51,7 +51,7 @@ class TDTestRetetion:
|
|||
def run(self):
|
||||
|
||||
tdLog.info("=============== step1")
|
||||
tdSql.execute('create database test keep 3 days 1;')
|
||||
tdSql.execute('create database test keep 3 duration 1;')
|
||||
tdSql.execute('use test;')
|
||||
tdSql.execute('create table test(ts timestamp,i int);')
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ class TDTestCase:
|
|||
tdDnodes.deploy(1,cfg)
|
||||
tdDnodes.startWithoutSleep(1)
|
||||
|
||||
tdSql.execute("create database test days 1")
|
||||
tdSql.execute("create database test duration 1")
|
||||
tdSql.execute("use test")
|
||||
|
||||
tdSql.execute("create table stb(ts timestamp, c int) tags(t int)")
|
||||
|
@ -85,7 +85,7 @@ class TDTestCase:
|
|||
tdLog.info("================= step3")
|
||||
tdSql.execute('drop database test')
|
||||
for i in range(50):
|
||||
tdSql.execute("create database test%d days 1" %(i))
|
||||
tdSql.execute("create database test%d duration 1" %(i))
|
||||
tdSql.execute("use test%d" %(i))
|
||||
tdSql.execute("create table tb (ts timestamp,i int)")
|
||||
for j in range(10):
|
||||
|
|
|
@ -56,7 +56,7 @@ class TDTestCase:
|
|||
tdDnodes.deploy(1,cfg)
|
||||
tdDnodes.startWithoutSleep(1)
|
||||
|
||||
tdSql.execute("create database test days 1 keep 15,5,10")
|
||||
tdSql.execute("create database test duration 1 keep 15,5,10")
|
||||
tdSql.execute("use test")
|
||||
|
||||
tdSql.execute("create table tb(ts timestamp, c int)")
|
||||
|
|
|
@ -66,7 +66,7 @@ class TDTestCase:
|
|||
"name": "db",
|
||||
"drop": "yes",
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
"duration": 10,
|
||||
"cache": 16,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
|
|
|
@ -81,7 +81,7 @@ class JoinPerf:
|
|||
"name": self.dbname,
|
||||
"drop": self.drop,
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
"duration": 10,
|
||||
"cache": 16,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
|
|
|
@ -75,7 +75,7 @@ class Taosdemo:
|
|||
"name": self.dbname,
|
||||
"drop": self.drop,
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
"duration": 10,
|
||||
"cache": 16,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
|
|
|
@ -57,7 +57,7 @@ class TDTestCase:
|
|||
"name": "db",
|
||||
"drop": "yes",
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
"duration": 10,
|
||||
"cache": 16,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
|
|
|
@ -138,7 +138,7 @@ class TDTestCase:
|
|||
|
||||
sqls_ls = [
|
||||
'drop database if exists nsdbsql;',
|
||||
'create database nsdbsql precision "ns" keep 3600 days 6 update 1;',
|
||||
'create database nsdbsql precision "ns" keep 3600 duration 6 update 1;',
|
||||
'use nsdbsql;',
|
||||
'CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);',
|
||||
'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);',
|
||||
|
|
|
@ -125,7 +125,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 0, 600)
|
||||
# check taosdemo -s
|
||||
|
||||
sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 36 days 6 update 1;',
|
||||
sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 36 duration 6 update 1;',
|
||||
'use nsdbsql;','CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);',
|
||||
'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);',
|
||||
'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);',
|
||||
|
|
|
@ -60,8 +60,8 @@ class TDTestCase:
|
|||
if not os.path.exists("./taosdumptest/tmp2"):
|
||||
os.makedirs("./taosdumptest/tmp2")
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
|
||||
tdSql.execute("create database db1 days 12 keep 3640 blocks 7 ")
|
||||
tdSql.execute("create database db duration 11 keep 3649 blocks 8 ")
|
||||
tdSql.execute("create database db1 duration 12 keep 3640 blocks 7 ")
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute(
|
||||
"create table st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))")
|
||||
|
@ -102,7 +102,7 @@ class TDTestCase:
|
|||
tdSql.query("show databases")
|
||||
tdSql.checkRows(2)
|
||||
dbresult = tdSql.queryResult
|
||||
# 6--days,7--keep0,keep1,keep, 12--block,
|
||||
# 6--duration,7--keep0,keep1,keep, 12--block,
|
||||
|
||||
isCommunity = self.checkCommunity()
|
||||
print("iscommunity: %d" % isCommunity)
|
||||
|
|
|
@ -60,7 +60,7 @@ class TDTestCase:
|
|||
def build_db(precision, start_time):
|
||||
tdSql.execute("drop database if exists timedb1")
|
||||
tdSql.execute(
|
||||
"create database timedb1 days 10 keep 365 blocks 8 precision "+"\""+precision+"\"")
|
||||
"create database timedb1 duration 10 keep 365 blocks 8 precision "+"\""+precision+"\"")
|
||||
|
||||
tdSql.execute("use timedb1")
|
||||
tdSql.execute(
|
||||
|
|
|
@ -30,7 +30,7 @@ class TDTestCase:
|
|||
tdSql.execute(s)
|
||||
s = 'drop database if exists db'
|
||||
tdSql.execute(s)
|
||||
s = 'create database db days 30'
|
||||
s = 'create database db duration 30'
|
||||
tdSql.execute(s)
|
||||
s = 'use db'
|
||||
tdSql.execute(s)
|
||||
|
|
|
@ -30,7 +30,7 @@ class TDTestCase:
|
|||
tdSql.execute(s)
|
||||
s = 'drop database if exists db'
|
||||
tdSql.execute(s)
|
||||
s = 'create database db update 1 days 30'
|
||||
s = 'create database db update 1 duration 30'
|
||||
tdSql.execute(s)
|
||||
s = 'use db'
|
||||
tdSql.execute(s)
|
||||
|
|
|
@ -50,7 +50,7 @@ class TDTestCase:
|
|||
tdSql.execute(sql)
|
||||
sql = 'drop database if exists db'
|
||||
tdSql.execute(sql)
|
||||
sql = 'create database db update 1 days 30;'
|
||||
sql = 'create database db update 1 duration 30;'
|
||||
tdSql.execute(sql)
|
||||
sql = 'use db;'
|
||||
tdSql.execute(sql)
|
||||
|
|
|
@ -49,7 +49,7 @@ class TDTestCase:
|
|||
tdSql.execute(sql)
|
||||
sql = 'drop database if exists db'
|
||||
tdSql.execute(sql)
|
||||
sql = 'create database db update 0 days 30;'
|
||||
sql = 'create database db update 0 duration 30;'
|
||||
tdSql.execute(sql)
|
||||
sql = 'use db;'
|
||||
tdSql.execute(sql)
|
||||
|
|
|
@ -34,7 +34,7 @@ class TDTestCase:
|
|||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute("create database udb update 1 days 30")
|
||||
tdSql.execute("create database udb update 1 duration 30")
|
||||
tdSql.execute("use udb")
|
||||
|
||||
print("==============step 1: UPDATE THE LAST RECORD REPEATEDLY")
|
||||
|
|
|
@ -57,7 +57,7 @@ class TDSql:
|
|||
tdLog.notice("'reset query cache' is not supported")
|
||||
s = 'drop database if exists db'
|
||||
self.cursor.execute(s)
|
||||
s = 'create database db days 300'
|
||||
s = 'create database db duration 300'
|
||||
self.cursor.execute(s)
|
||||
s = 'use db'
|
||||
self.cursor.execute(s)
|
||||
|
|
|
@ -63,7 +63,7 @@ class TDTaosdemoCfg:
|
|||
"name": 'db',
|
||||
"drop": 'yes',
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
"duration": 10,
|
||||
"cache": 16,
|
||||
"blocks": 6,
|
||||
"precision": "ms",
|
||||
|
|
|
@ -9,7 +9,7 @@ sleep 2000
|
|||
sql connect
|
||||
|
||||
print ============= create database
|
||||
sql create database db cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1
|
||||
sql create database db cache 2 blocks 4 duration 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1
|
||||
sql show databases
|
||||
if $data00 != db then
|
||||
return -1
|
||||
|
@ -87,13 +87,13 @@ sql_error alter database db quorum 4
|
|||
sql_error alter database db quorum 5
|
||||
sql_error alter database db quorum -1
|
||||
|
||||
print ============== step days
|
||||
sql_error alter database db days 0
|
||||
sql_error alter database db days 1
|
||||
sql_error alter database db days 2
|
||||
sql_error alter database db days 10
|
||||
sql_error alter database db days 50
|
||||
sql_error alter database db days 100
|
||||
print ============== step duration
|
||||
sql_error alter database db duration 0
|
||||
sql_error alter database db duration 1
|
||||
sql_error alter database db duration 2
|
||||
sql_error alter database db duration 10
|
||||
sql_error alter database db duration 50
|
||||
sql_error alter database db duration 100
|
||||
|
||||
print ============== step keep
|
||||
sql show databases
|
||||
|
|
|
@ -26,7 +26,7 @@ system sh/exec.sh -n dnode2 -s start
|
|||
sleep 2000
|
||||
|
||||
print ======== step1 create db
|
||||
sql create database keepdb replica 1 keep 30 days 7
|
||||
sql create database keepdb replica 1 keep 30 duration 7
|
||||
sql use keepdb
|
||||
sql create table tb (ts timestamp, i int)
|
||||
|
||||
|
@ -201,7 +201,7 @@ sql alter database keepdb keep 0 -x error2
|
|||
return -1
|
||||
error2:
|
||||
|
||||
sql alter database keepdb days 1 -x error3
|
||||
sql alter database keepdb duration 1 -x error3
|
||||
return -1
|
||||
error3:
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ sql_error create topic t1 partitions -1;
|
|||
sql_error create topic t1 partitions 10001;
|
||||
|
||||
print =============step3 create with db para
|
||||
sql create topic db cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1
|
||||
sql create topic db cache 2 blocks 4 duration 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1
|
||||
sql show databases
|
||||
if $data00 != db then
|
||||
return -1
|
||||
|
@ -199,7 +199,7 @@ if $data09 != 4 then
|
|||
endi
|
||||
sql drop topic db;
|
||||
|
||||
sql create topic db cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1 partitions 7
|
||||
sql create topic db cache 2 blocks 4 duration 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1 partitions 7
|
||||
sql show databases
|
||||
if $data00 != db then
|
||||
return -1
|
||||
|
@ -334,19 +334,19 @@ sql_error alter topic db quorum 4
|
|||
sql_error alter topic db quorum 5
|
||||
sql_error alter topic db quorum -1
|
||||
|
||||
print ============== step days
|
||||
sql_error alter database db days 0
|
||||
sql_error alter database db days 1
|
||||
sql_error alter database db days 2
|
||||
sql_error alter database db days 10
|
||||
sql_error alter database db days 50
|
||||
sql_error alter database db days 100
|
||||
sql_error alter topic db days 0
|
||||
sql_error alter topic db days 1
|
||||
sql_error alter topic db days 2
|
||||
sql_error alter topic db days 10
|
||||
sql_error alter topic db days 50
|
||||
sql_error alter topic db days 100
|
||||
print ============== step duration
|
||||
sql_error alter database db duration 0
|
||||
sql_error alter database db duration 1
|
||||
sql_error alter database db duration 2
|
||||
sql_error alter database db duration 10
|
||||
sql_error alter database db duration 50
|
||||
sql_error alter database db duration 100
|
||||
sql_error alter topic db duration 0
|
||||
sql_error alter topic db duration 1
|
||||
sql_error alter topic db duration 2
|
||||
sql_error alter topic db duration 10
|
||||
sql_error alter topic db duration 50
|
||||
sql_error alter topic db duration 100
|
||||
|
||||
print ============== step keep
|
||||
sql show databases
|
||||
|
|
|
@ -30,7 +30,7 @@ sleep 2000
|
|||
sql connect
|
||||
|
||||
print ========= step1
|
||||
sql create database ic1db days 7;
|
||||
sql create database ic1db duration 7;
|
||||
sql create table ic1db.tb(ts timestamp, s int);
|
||||
sql insert into ic1db.tb values(now-30d, -30);
|
||||
sql insert into ic1db.tb values(now-20d, -20);
|
||||
|
@ -50,7 +50,7 @@ if $rows != 12 then
|
|||
endi
|
||||
|
||||
print ========= step2
|
||||
sql create database ic2db days 7;
|
||||
sql create database ic2db duration 7;
|
||||
sql create table ic2db.tb(ts timestamp, s int);
|
||||
sql insert into ic2db.tb values(now, 0);
|
||||
sql import into ic2db.tb values(now-30d, -30);
|
||||
|
|
|
@ -30,7 +30,7 @@ system sh/exec.sh -n dnode1 -s start
|
|||
sleep 2000
|
||||
sql connect
|
||||
|
||||
sql create database ir1db days 7
|
||||
sql create database ir1db duration 7
|
||||
sql use ir1db
|
||||
sql create table tb(ts timestamp, i bigint)
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ $db = $dbPrefix . $i
|
|||
$mt = $mtPrefix . $i
|
||||
|
||||
sql drop database if exists $db
|
||||
sql create database $db days 10 keep 20,20,20
|
||||
sql create database $db duration 10 keep 20,20,20
|
||||
sql use $db
|
||||
|
||||
sql_error alter database $db keep "20"
|
||||
|
|
|
@ -20,7 +20,7 @@ $db = $dbPrefix . $i
|
|||
$mt = $mtPrefix . $i
|
||||
|
||||
sql drop database if exists $db
|
||||
sql create database $db days 10 keep 20
|
||||
sql create database $db duration 10 keep 20
|
||||
sql use $db
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
|
|
|
@ -101,7 +101,7 @@ print db_already_exists test passed
|
|||
print create_db.sim case5: db_meta_data test
|
||||
# cfg params
|
||||
$replica = 1 # max=3
|
||||
$days = 10
|
||||
$duration = 10
|
||||
$keep = 365,365,365
|
||||
$rows_db = 1000
|
||||
$cache = 16 # 16MB
|
||||
|
@ -111,7 +111,7 @@ $ctime = 36000 # 10 hours
|
|||
$wal = 1 # valid value is 1, 2
|
||||
$comp = 1 # max=32, automatically trimmed when exceeding
|
||||
|
||||
sql create database $db replica $replica days $days keep $keep maxrows $rows_db cache $cache blocks 4 ctime $ctime wal $wal comp $comp
|
||||
sql create database $db replica $replica duration $duration keep $keep maxrows $rows_db cache $cache blocks 4 ctime $ctime wal $wal comp $comp
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
|
@ -122,7 +122,7 @@ endi
|
|||
if $data04 != $replica then
|
||||
return -1
|
||||
endi
|
||||
if $data06 != $days then
|
||||
if $data06 != $duration then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 365,365,365 then
|
||||
|
|
|
@ -101,7 +101,7 @@ print db_already_exists test passed
|
|||
print create_db.sim case5: db_meta_data test
|
||||
# cfg params
|
||||
$replica = 1 # max=3
|
||||
$days = 10
|
||||
$duration = 10
|
||||
$keep = 365
|
||||
$rows_db = 1000
|
||||
$cache = 16 # 16MB
|
||||
|
@ -111,7 +111,7 @@ $ctime = 36000 # 10 hours
|
|||
$wal = 1 # valid value is 1, 2
|
||||
$comp = 1 # max=32, automatically trimmed when exceeding
|
||||
|
||||
sql create database $db replica $replica days $days keep $keep maxrows $rows_db cache $cache blocks 4 ctime $ctime wal $wal comp $comp
|
||||
sql create database $db replica $replica duration $duration keep $keep maxrows $rows_db cache $cache blocks 4 ctime $ctime wal $wal comp $comp
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
|
@ -122,7 +122,7 @@ endi
|
|||
if $data04 != $replica then
|
||||
return -1
|
||||
endi
|
||||
if $data06 != $days then
|
||||
if $data06 != $duration then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 365 then
|
||||
|
|
|
@ -62,11 +62,11 @@ print ============= create database
|
|||
# | PAGES value [64~16384, default: 256]
|
||||
# | CACHELAST value [0, 1, 2, 3]
|
||||
# | FSYNC value [0 ~ 180000 ms]
|
||||
# | KEEP value [days, 365000]
|
||||
# | KEEP value [duration, 365000]
|
||||
# | REPLICA value [1 | 3]
|
||||
# | WAL value [1 | 2]
|
||||
|
||||
sql create database db CACHELAST 3 COMP 0 DAYS 240 FSYNC 1000 MAXROWS 8000 MINROWS 10 KEEP 1000 PRECISION 'ns' REPLICA 3 WAL 2 VGROUPS 6 SINGLE_STABLE 1
|
||||
sql create database db CACHELAST 3 COMP 0 DURATION 240 FSYNC 1000 MAXROWS 8000 MINROWS 10 KEEP 1000 PRECISION 'ns' REPLICA 3 WAL 2 VGROUPS 6 SINGLE_STABLE 1
|
||||
sql show databases
|
||||
print rows: $rows
|
||||
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
|
||||
|
@ -92,7 +92,7 @@ endi
|
|||
if $data5_db != no_strict then # strict
|
||||
return -1
|
||||
endi
|
||||
if $data6_db != 345600 then # days
|
||||
if $data6_db != 345600 then # duration
|
||||
return -1
|
||||
endi
|
||||
if $data7_db != 1440000m,1440000m,1440000m then # keep
|
||||
|
@ -222,11 +222,11 @@ sql_error alter database db replica 0
|
|||
#sql_error alter database db quorum 4
|
||||
#sql_error alter database db quorum 5
|
||||
|
||||
#print ============== modify days
|
||||
sql_error alter database db days 480
|
||||
sql_error alter database db days 360
|
||||
sql_error alter database db days 0
|
||||
sql_error alter database db days 14400 # set over than keep
|
||||
#print ============== modify duration
|
||||
sql_error alter database db duration 480
|
||||
sql_error alter database db duration 360
|
||||
sql_error alter database db duration 0
|
||||
sql_error alter database db duration 14400 # set over than keep
|
||||
|
||||
print ============== modify keep
|
||||
sql alter database db keep 2400
|
||||
|
|
|
@ -15,7 +15,7 @@ $tb = $tbPrefix . $i
|
|||
|
||||
print =============== step1
|
||||
# quorum presicion
|
||||
sql create database $db vgroups 8 replica 1 days 2 keep 10 minrows 80 maxrows 10000 wal 2 fsync 1000 comp 0 cachelast 2 precision 'us'
|
||||
sql create database $db vgroups 8 replica 1 duration 2 keep 10 minrows 80 maxrows 10000 wal 2 fsync 1000 comp 0 cachelast 2 precision 'us'
|
||||
sql show databases
|
||||
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
|
||||
|
||||
|
@ -66,7 +66,7 @@ print =============== step4
|
|||
sql_error drop database $db
|
||||
|
||||
print =============== step5
|
||||
sql create database $db replica 1 days 15 keep 1500
|
||||
sql create database $db replica 1 duration 15 keep 1500
|
||||
sql show databases
|
||||
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07
|
||||
if $data20 != $db then
|
||||
|
|
|
@ -63,7 +63,7 @@ print ============= create database with all options
|
|||
# | PAGESIZE value [1~16384, default: 4]
|
||||
# | CACHELAST value [0, 1, 2, 3, default: 0]
|
||||
# | COMP [0 | 1 | 2, default: 2]
|
||||
# | DAYS value [60m ~ min(3650d,keep), default: 10d, unit may be minut/hour/day]
|
||||
# | DURATION value [60m ~ min(3650d,keep), default: 10d, unit may be minut/hour/day]
|
||||
# | FSYNC value [0 ~ 180000 ms, default: 3000]
|
||||
# | MAXROWS value [200~10000, default: 4096]
|
||||
# | MINROWS value [10~1000, default: 100]
|
||||
|
@ -79,7 +79,7 @@ print ============= create database with all options
|
|||
#$data2_db : vgroups
|
||||
#$data3_db : ntables
|
||||
#$data4_db : replica
|
||||
#$data6_db : days
|
||||
#$data6_db : duration
|
||||
#$data7_db : keep
|
||||
#$data10_db : minrows
|
||||
#$data11_db : maxrows
|
||||
|
@ -113,7 +113,7 @@ endi
|
|||
if $data5_db != no_strict then # strict
|
||||
return -1
|
||||
endi
|
||||
if $data6_db != 14400 then # days
|
||||
if $data6_db != 14400 then # duration
|
||||
return -1
|
||||
endi
|
||||
if $data7_db != 5256000m,5256000m,5256000m then # keep
|
||||
|
@ -234,9 +234,9 @@ sql drop database db
|
|||
sql_error create database db COMP 3
|
||||
sql_error create database db COMP -1
|
||||
|
||||
#print ====> DAYS value [60m ~ min(3650d,keep), default: 10d, unit may be minut/hour/day]
|
||||
#print ====> DURATION value [60m ~ min(3650d,keep), default: 10d, unit may be minut/hour/day]
|
||||
#print ====> KEEP value [max(1d ~ 365000d), default: 1d, unit may be minut/hour/day]
|
||||
#sql create database db DAYS 60m KEEP 60m
|
||||
#sql create database db DURATION 60m KEEP 60m
|
||||
#sql show databases
|
||||
#print $data0_db $data1_db $data2_db $data3_db $data4_db $data5_db $data6_db $data7_db $data8_db $data9_db $data10_db $data11_db $data12_db $data13_db $data14_db $data15_db $data16_db $data17_db
|
||||
#if $data6_db != 60 then
|
||||
|
@ -246,7 +246,7 @@ sql_error create database db COMP -1
|
|||
# return -1
|
||||
#endi
|
||||
#sql drop database db
|
||||
#sql create database db DAYS 60m KEEP 1d
|
||||
#sql create database db DURATION 60m KEEP 1d
|
||||
#sql show databases
|
||||
#print $data0_db $data1_db $data2_db $data3_db $data4_db $data5_db $data6_db $data7_db $data8_db $data9_db $data10_db $data11_db $data12_db $data13_db $data14_db $data15_db $data16_db $data17_db
|
||||
#if $data6_db != 60 then
|
||||
|
@ -255,7 +255,7 @@ sql_error create database db COMP -1
|
|||
#if $data7_db != 1440,1440,1440 then
|
||||
# return -1
|
||||
#endi
|
||||
#sql create database db DAYS 3650d KEEP 365000d
|
||||
#sql create database db DURATION 3650d KEEP 365000d
|
||||
#sql show databases
|
||||
#print $data0_db $data1_db $data2_db $data3_db $data4_db $data5_db $data6_db $data7_db $data8_db $data9_db $data10_db $data11_db $data12_db $data13_db $data14_db $data15_db $data16_db $data17_db
|
||||
#if $data6_db != 5256000 then
|
||||
|
@ -265,10 +265,10 @@ sql_error create database db COMP -1
|
|||
# return -1
|
||||
#endi
|
||||
#sql drop database db
|
||||
#sql_error create database db DAYS -59m
|
||||
#sql_error create database db DAYS 59m
|
||||
#sql_error create database db DAYS 5256001m
|
||||
#sql_error create database db DAYS 3651d
|
||||
#sql_error create database db DURATION -59m
|
||||
#sql_error create database db DURATION 59m
|
||||
#sql_error create database db DURATION 5256001m
|
||||
#sql_error create database db DURATION 3651d
|
||||
#sql_error create database db KEEP -59m
|
||||
#sql_error create database db KEEP 14399m
|
||||
#sql_error create database db KEEP 525600001m
|
||||
|
|
|
@ -5,7 +5,7 @@ sleep 50
|
|||
sql connect
|
||||
|
||||
print =============== create database
|
||||
sql create database db days 300 keep 365000d,365000d,365000d
|
||||
sql create database db duration 300 keep 365000d,365000d,365000d
|
||||
sql show databases
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
|
|
|
@ -5,7 +5,7 @@ sleep 500
|
|||
sql connect
|
||||
|
||||
print =============== create database
|
||||
sql create database d0 days 300
|
||||
sql create database d0 duration 300
|
||||
sql use d0
|
||||
|
||||
print =============== create super table and child table
|
||||
|
|
|
@ -6,7 +6,7 @@ system sh/exec.sh -n dnode1 -s start
|
|||
sql connect
|
||||
|
||||
print ======== step1
|
||||
sql create database d1 replica 1 days 7 keep 50
|
||||
sql create database d1 replica 1 duration 7 keep 50
|
||||
sql use d1
|
||||
sql create table tb (ts timestamp, a int)
|
||||
sql insert into tb values(now-28d, -28)
|
||||
|
|
|
@ -6,7 +6,7 @@ system sh/exec.sh -n dnode1 -s start
|
|||
sql connect
|
||||
|
||||
print ======== step1
|
||||
sql create database d1 replica 1 days 7 keep 50
|
||||
sql create database d1 replica 1 duration 7 keep 50
|
||||
sql use d1
|
||||
sql create table tb (ts timestamp, a int)
|
||||
sql insert into tb values(now-30d, -28)
|
||||
|
|
|
@ -24,7 +24,7 @@ system sh/exec.sh -n dnode2 -s start
|
|||
sleep 2000
|
||||
|
||||
print ======== step1 create db
|
||||
sql create database commitdb replica 1 days 7 keep 30
|
||||
sql create database commitdb replica 1 duration 7 keep 30
|
||||
sql use commitdb
|
||||
sql create table tb (ts timestamp, i int)
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ if $data4_2 != ready then
|
|||
goto step1
|
||||
endi
|
||||
|
||||
sql create database ir2db replica 2 days 7
|
||||
sql create database ir2db replica 2 duration 7
|
||||
sql use ir2db
|
||||
sql create table tb(ts timestamp, i bigint)
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ if $data4_3 != ready then
|
|||
goto step1
|
||||
endi
|
||||
|
||||
sql create database ir3db replica 3 days 7
|
||||
sql create database ir3db replica 3 duration 7
|
||||
sql use ir3db
|
||||
sql create table tb(ts timestamp, i bigint)
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ $db = $dbPrefix . $i
|
|||
$tb = $tbPrefix . $i
|
||||
|
||||
print =============== step1
|
||||
sql create database $db replica 1 days 20 keep 2000 cache 16
|
||||
sql create database $db replica 1 duration 20 keep 2000 cache 16
|
||||
sql show databases
|
||||
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07
|
||||
if $data00 != $db then
|
||||
|
@ -57,7 +57,7 @@ print =============== step4
|
|||
sql_error drop database $db
|
||||
|
||||
print =============== step5
|
||||
sql create database $db replica 1 days 15 keep 1500
|
||||
sql create database $db replica 1 duration 15 keep 1500
|
||||
sql show databases
|
||||
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07
|
||||
if $data00 != $db then
|
||||
|
|
|
@ -48,21 +48,21 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key
|
|||
#output = child.readline()
|
||||
#print (output.decode())
|
||||
if len(expectString) != 0:
|
||||
i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=6)
|
||||
i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=20)
|
||||
else:
|
||||
i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6)
|
||||
i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=20)
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
retResult = child.before
|
||||
else:
|
||||
retResult = child.before.decode()
|
||||
print("cmd return result:\n%s\n"%retResult)
|
||||
#print(child.after.decode())
|
||||
# print(child.after.decode())
|
||||
if i == 0:
|
||||
print ('taos login success! Here can run sql, taos> ')
|
||||
if len(sqlString) != 0:
|
||||
child.sendline (sqlString)
|
||||
w = child.expect(["Query OK", taosExpect.TIMEOUT, taosExpect.EOF], timeout=1)
|
||||
w = child.expect(["Query OK", taosExpect.TIMEOUT, taosExpect.EOF], timeout=10)
|
||||
if platform.system().lower() == 'windows':
|
||||
retResult = child.before
|
||||
else:
|
||||
|
|
|
@ -60,7 +60,7 @@ class TDTestCase:
|
|||
def prepare_data(self):
|
||||
|
||||
tdSql.execute("drop database if exists db ")
|
||||
tdSql.execute("create database if not exists db days 300")
|
||||
tdSql.execute("create database if not exists db duration 300")
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
|
|
@ -63,7 +63,7 @@ class TDTestCase:
|
|||
def prepare_data(self):
|
||||
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("create database if not exists db replica 1 days 300")
|
||||
tdSql.execute("create database if not exists db replica 1 duration 300")
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
|
|
@ -62,7 +62,7 @@ class TDTestCase:
|
|||
def prepare_data(self):
|
||||
|
||||
tdSql.execute("drop database if exists db ")
|
||||
tdSql.execute("create database if not exists db days 300")
|
||||
tdSql.execute("create database if not exists db duration 300")
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
|
|
@ -59,7 +59,7 @@ class TDTestCase:
|
|||
def prepare_data(self):
|
||||
|
||||
tdSql.execute("drop database if exists db ")
|
||||
tdSql.execute("create database if not exists db days 300")
|
||||
tdSql.execute("create database if not exists db duration 300")
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
|
|
@ -68,7 +68,7 @@ class TDTestCase:
|
|||
|
||||
def prepare_tag_datas(self):
|
||||
# prepare datas
|
||||
tdSql.execute("create database if not exists testdb keep 3650 days 1000")
|
||||
tdSql.execute("create database if not exists testdb keep 3650 duration 1000")
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
|
|
@ -628,7 +628,7 @@ class TDTestCase:
|
|||
|
||||
def basic_sample_query(self):
|
||||
tdSql.execute(" drop database if exists db ")
|
||||
tdSql.execute(" create database if not exists db days 300 ")
|
||||
tdSql.execute(" create database if not exists db duration 300 ")
|
||||
tdSql.execute(" use db ")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
|
|
@ -104,7 +104,7 @@ class TDTestCase:
|
|||
tdSql.error("drop mnode on dnode 1;")
|
||||
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("create database if not exists db replica 1 days 300")
|
||||
tdSql.execute("create database if not exists db replica 1 duration 300")
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
|
|
@ -104,7 +104,7 @@ class TDTestCase:
|
|||
tdSql.error("drop mnode on dnode 1;")
|
||||
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("create database if not exists db replica 1 days 300")
|
||||
tdSql.execute("create database if not exists db replica 1 duration 300")
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
@ -163,7 +163,7 @@ class TDTestCase:
|
|||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
tdSql.execute("drop database if exists db2")
|
||||
tdSql.execute("create database if not exists db2 replica 1 days 300")
|
||||
tdSql.execute("create database if not exists db2 replica 1 duration 300")
|
||||
tdSql.execute("use db2")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
|
|
@ -53,7 +53,7 @@ class TDTestCase:
|
|||
# fisrt add data : db\stable\childtable\general table
|
||||
for couti in count:
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 days 300" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
|
|
@ -53,7 +53,7 @@ class TDTestCase:
|
|||
# fisrt add data : db\stable\childtable\general table
|
||||
for couti in count:
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 days 300" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
|
|
@ -12,8 +12,8 @@ from util.dnodes import TDDnode
|
|||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading as thd
|
||||
|
||||
class MyDnodes(TDDnodes):
|
||||
def __init__(self ,dnodes_lists):
|
||||
super(MyDnodes,self).__init__()
|
||||
|
@ -30,7 +30,6 @@ class TDTestCase:
|
|||
self.depoly_cluster(dnodenumber)
|
||||
self.master_dnode = self.TDDnodes.dnodes[0]
|
||||
self.host=self.master_dnode.cfgDict["fqdn"]
|
||||
|
||||
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
|
||||
tdSql.init(conn1.cursor())
|
||||
|
||||
|
@ -50,12 +49,12 @@ class TDTestCase:
|
|||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
|
||||
def insert_data(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
for couti in range(countstart,countstop):
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 days 300" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
@ -139,14 +138,15 @@ class TDTestCase:
|
|||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'chenhaoran02:6030')
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'chenhaoran02:6130')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'chenhaoran02:6230')
|
||||
tdSql.checkData(2,1,'%s:6230'%self.host)
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
def check3mnode1off(self):
|
||||
tdSql.error("drop mnode on dnode 1;")
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
|
@ -167,17 +167,18 @@ class TDTestCase:
|
|||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'chenhaoran02:6030')
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'offline')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'chenhaoran02:6130')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'chenhaoran02:6230')
|
||||
tdSql.checkData(2,1,'%s:6230'%self.host)
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
def check3mnode2off(self):
|
||||
tdSql.error("drop mnode on dnode 2;")
|
||||
count=0
|
||||
while count < 10:
|
||||
while count < 40:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
|
@ -192,17 +193,18 @@ class TDTestCase:
|
|||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'chenhaoran02:6030')
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'chenhaoran02:6130')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
tdSql.checkData(1,2,'offline')
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'chenhaoran02:6230')
|
||||
tdSql.checkData(2,1,'%s:6230'%self.host)
|
||||
tdSql.checkData(2,2,'follower')
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
def check3mnode3off(self):
|
||||
tdSql.error("drop mnode on dnode 3;")
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
|
@ -219,13 +221,13 @@ class TDTestCase:
|
|||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'chenhaoran02:6030')
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'chenhaoran02:6130')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
tdSql.checkData(1,2,'follower')
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'chenhaoran02:6230')
|
||||
tdSql.checkData(2,1,'%s:6230'%self.host)
|
||||
tdSql.checkData(2,2,'offline')
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
|
@ -233,13 +235,13 @@ class TDTestCase:
|
|||
|
||||
def five_dnode_three_mnode(self,dnodenumber):
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'chenhaoran02:6030')
|
||||
tdSql.checkData(4,1,'chenhaoran02:6430')
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
tdSql.checkData(0,4,'ready')
|
||||
tdSql.checkData(4,4,'ready')
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,1,'chenhaoran02:6030')
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
||||
|
@ -255,15 +257,27 @@ class TDTestCase:
|
|||
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
# stop and follower of mnode
|
||||
|
||||
tdLog.debug("stop and follower of mnode")
|
||||
self.TDDnodes.stoptaosd(2)
|
||||
self.check3mnode2off()
|
||||
self.TDDnodes.starttaosd(2)
|
||||
|
||||
self.TDDnodes.stoptaosd(3)
|
||||
self.check3mnode3off()
|
||||
self.TDDnodes.starttaosd(3)
|
||||
|
||||
self.TDDnodes.stoptaosd(1)
|
||||
self.check3mnode1off()
|
||||
self.TDDnodes.starttaosd(1)
|
||||
|
||||
# self.check3mnode()
|
||||
stopcount =0
|
||||
while stopcount <= 2:
|
||||
for i in range(dnodenumber):
|
||||
threads = []
|
||||
threads.append(thd.Thread(target=self.insert_data, args=(i*2,i*2+2)))
|
||||
# start_time = time.time()
|
||||
threads[0].start()
|
||||
# end_time = time.time()
|
||||
self.TDDnodes.stoptaosd(i+1)
|
||||
# if i == 1 :
|
||||
# self.check3mnode2off()
|
||||
|
@ -271,12 +285,15 @@ class TDTestCase:
|
|||
# self.check3mnode3off()
|
||||
# elif i == 0:
|
||||
# self.check3mnode1off()
|
||||
|
||||
self.TDDnodes.starttaosd(i+1)
|
||||
threads[0].join()
|
||||
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
self.check3mnode()
|
||||
|
||||
|
||||
def getConnection(self, dnode):
|
||||
host = dnode.cfgDict["fqdn"]
|
||||
port = dnode.cfgDict["serverPort"]
|
||||
|
|
|
@ -358,8 +358,8 @@ class TDTestCase:
|
|||
tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.error("alter table %s.%s set tag t1 10"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s set tag t2 '20'"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s set tag t1=20"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.error("alter table %s.%s set tag t2='20'"%(parameterDict['dbName'], ctbName))
|
||||
|
||||
tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
@ -370,9 +370,9 @@ class TDTestCase:
|
|||
tdSql.query("alter table %s.%s modify column c4 binary(60)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.query("alter table %s.%s set tag t3 30"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s set tag t4 '40'"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s set tag t5 '50'"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s set tag t3=20"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.query("alter table %s.%s set tag t4='20'"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.query("alter table %s.%s set tag t5='20'"%(parameterDict['dbName'], ctbName))
|
||||
|
||||
tdSql.query("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
@ -395,7 +395,7 @@ class TDTestCase:
|
|||
tdLog.printNoPrefix("======== test case 2: ")
|
||||
parameterDict = {'cfg': '', \
|
||||
'actionType': 0, \
|
||||
'dbName': 'db1', \
|
||||
'dbName': 'db2', \
|
||||
'dropFlag': 1, \
|
||||
'vgroups': 4, \
|
||||
'replica': 1, \
|
||||
|
@ -407,8 +407,8 @@ class TDTestCase:
|
|||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
# tdLog.info("create database, super table, child table, normal table")
|
||||
# self.create_database(tdSql, parameterDict["dbName"])
|
||||
tdLog.info("create database, super table, child table, normal table")
|
||||
self.create_database(tdSql, parameterDict["dbName"])
|
||||
ntbName = 'ntb2'
|
||||
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"]))
|
||||
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10))"%(parameterDict["dbName"],ntbName))
|
||||
|
@ -449,10 +449,10 @@ class TDTestCase:
|
|||
tdSql.query("alter table %s.%s modify column c5 nchar(60)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s modify tag t5 nchar(60)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.query("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s rename tag t5 t5new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.query("alter table %s.%s drop column c5new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop tag t5new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.query("alter table %s.%s add column c5 int"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
@ -508,10 +508,10 @@ class TDTestCase:
|
|||
tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.error("alter table %s.%s set tag t1 11"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s set tag t2 '22'"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s set tag t3 33"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s set tag t4 '44'"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s set tag t1=20"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.error("alter table %s.%s set tag t2='20'"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.error("alter table %s.%s set tag t3=20"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.error("alter table %s.%s set tag t4='20'"%(parameterDict['dbName'], ctbName))
|
||||
|
||||
tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
@ -526,7 +526,7 @@ class TDTestCase:
|
|||
tdSql.query("alter table %s.%s modify column c5 nchar(60)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s modify tag t5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.query("alter table %s.%s set tag t5 '50'"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s set tag t5='50'"%(parameterDict['dbName'], ctbName))
|
||||
|
||||
tdSql.query("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s rename tag t5 t5new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
@ -543,7 +543,7 @@ class TDTestCase:
|
|||
tdLog.printNoPrefix("======== test case 3: ")
|
||||
parameterDict = {'cfg': '', \
|
||||
'actionType': 0, \
|
||||
'dbName': 'db1', \
|
||||
'dbName': 'db3', \
|
||||
'dropFlag': 1, \
|
||||
'vgroups': 4, \
|
||||
'replica': 1, \
|
||||
|
@ -555,7 +555,8 @@ class TDTestCase:
|
|||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
# tdLog.info("create database, super table, child table, normal table")
|
||||
tdLog.info("create database, super table, child table, normal table")
|
||||
self.create_database(tdSql, parameterDict["dbName"])
|
||||
ntbName = 'ntb3'
|
||||
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"]))
|
||||
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10))"%(parameterDict["dbName"],ntbName))
|
||||
|
@ -627,7 +628,7 @@ class TDTestCase:
|
|||
parameterDict['stbName'] = 'stb31'
|
||||
ctbName = 'stb31_0'
|
||||
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
|
||||
tdSql.query("create table %s.%s using %s.%s tags (10, 100, '1000')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
|
||||
tdSql.query("create table %s.%s using %s.%s tags (10, '10', 10, '10', '10')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
|
||||
|
||||
tdLog.info("create topics from child table")
|
||||
columnTopicFromCtb = 'column_topic_from_ctb3'
|
||||
|
@ -653,11 +654,11 @@ class TDTestCase:
|
|||
tdSql.error("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s modify tag t5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.error("alter table %s.%s set tag t1 10"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s set tag t2 '20'"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s set tag t3 30"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s set tag t4 '40'"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s set tag t5 '50'"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s set tag t1=20"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.error("alter table %s.%s set tag t2='20'"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.error("alter table %s.%s set tag t3=20"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.error("alter table %s.%s set tag t4='20'"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.error("alter table %s.%s set tag t5='20'"%(parameterDict['dbName'], ctbName))
|
||||
|
||||
tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
@ -676,6 +677,148 @@ class TDTestCase:
|
|||
|
||||
tdLog.printNoPrefix("======== test case 3 end ...... ")
|
||||
|
||||
def tmqCase4(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 4: ")
|
||||
parameterDict = {'cfg': '', \
|
||||
'actionType': 0, \
|
||||
'dbName': 'db4', \
|
||||
'dropFlag': 1, \
|
||||
'vgroups': 4, \
|
||||
'replica': 1, \
|
||||
'stbName': 'stb4', \
|
||||
'ctbPrefix': 'stb4', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 23, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
ctbName = 'stb4_0'
|
||||
|
||||
tdLog.info("create database, super table, child table, normal table")
|
||||
self.create_database(tdSql, parameterDict["dbName"])
|
||||
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"]))
|
||||
tdSql.query("create table %s.%s using %s.%s tags (10, '10', 10, '10', '10')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
|
||||
|
||||
tdLog.info("create topics from super table")
|
||||
columnTopicFromStb = 'star_topic_from_stb4'
|
||||
|
||||
tdSql.execute("create topic %s as stable %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdLog.info("======== child table test:")
|
||||
tdSql.query("alter table %s.%s set tag t1=20"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.query("alter table %s.%s set tag t2='20'"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.query("alter table %s.%s set tag t3=20"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.query("alter table %s.%s set tag t4='20'"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.query("alter table %s.%s set tag t5='20'"%(parameterDict['dbName'], ctbName))
|
||||
|
||||
tdLog.info("======== super table test:")
|
||||
# all alter actions allow
|
||||
tdSql.query("alter table %s.%s add column c6 int"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s add tag t6 float"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.query("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s modify column c5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s modify tag t5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s rename tag t5 t5new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.query("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop tag t1new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop tag t3new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop tag t4new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop tag t5new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdLog.printNoPrefix("======== test case 4 end ...... ")
|
||||
|
||||
def tmqCase5(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 5: ")
|
||||
parameterDict = {'cfg': '', \
|
||||
'actionType': 0, \
|
||||
'dbName': 'db5', \
|
||||
'dropFlag': 1, \
|
||||
'vgroups': 4, \
|
||||
'replica': 1, \
|
||||
'stbName': 'stb5', \
|
||||
'ctbPrefix': 'stb5', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 23, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
ctbName = 'stb5_0'
|
||||
|
||||
tdLog.info("create database, super table, child table, normal table")
|
||||
self.create_database(tdSql, parameterDict["dbName"])
|
||||
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"]))
|
||||
tdSql.query("create table %s.%s using %s.%s tags (10, '10', 10, '10', '10')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
|
||||
|
||||
tdLog.info("create topics from super table")
|
||||
columnTopicFromStb = 'star_topic_from_db5'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(columnTopicFromStb, parameterDict['dbName']))
|
||||
|
||||
tdLog.info("======== child table test:")
|
||||
tdSql.query("alter table %s.%s set tag t1=20"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.query("alter table %s.%s set tag t2='20'"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.query("alter table %s.%s set tag t3=20"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.query("alter table %s.%s set tag t4='20'"%(parameterDict['dbName'], ctbName))
|
||||
tdSql.query("alter table %s.%s set tag t5='20'"%(parameterDict['dbName'], ctbName))
|
||||
|
||||
tdLog.info("======== super table test:")
|
||||
# all alter actions allow
|
||||
tdSql.query("alter table %s.%s add column c6 int"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s add tag t6 float"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.query("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s modify column c5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s modify tag t5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.error("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s rename tag t5 t5new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdSql.query("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop tag t1new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop tag t3new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop tag t4new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.query("alter table %s.%s drop tag t5new"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdLog.printNoPrefix("======== test case 5 end ...... ")
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
|
@ -687,9 +830,11 @@ class TDTestCase:
|
|||
cfgPath = buildPath + "/../sim/psim/cfg"
|
||||
tdLog.info("cfgPath: %s" % cfgPath)
|
||||
|
||||
self.tmqCase1(cfgPath, buildPath)
|
||||
self.tmqCase2(cfgPath, buildPath)
|
||||
# self.tmqCase1(cfgPath, buildPath)
|
||||
# self.tmqCase2(cfgPath, buildPath)
|
||||
# self.tmqCase3(cfgPath, buildPath)
|
||||
self.tmqCase4(cfgPath, buildPath)
|
||||
self.tmqCase5(cfgPath, buildPath)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -0,0 +1,315 @@
|
|||
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import os
|
||||
import threading
|
||||
from enum import Enum
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
class actionType(Enum):
|
||||
CREATE_DATABASE = 0
|
||||
CREATE_STABLE = 1
|
||||
CREATE_CTABLE = 2
|
||||
INSERT_DATA = 3
|
||||
|
||||
class TDTestCase:
|
||||
hostname = socket.gethostname()
|
||||
#rpcDebugFlagVal = '143'
|
||||
#clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
|
||||
#clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
|
||||
#updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
|
||||
#updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal
|
||||
#print ("===================: ", updatecfgDict)
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files or "taosd.exe" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def newcur(self,cfg,host,port):
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
|
||||
cur=con.cursor()
|
||||
print(cur)
|
||||
return cur
|
||||
|
||||
def initConsumerTable(self,cdbName='cdb'):
|
||||
tdLog.info("create consume database, and consume info table, and consume result table")
|
||||
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
|
||||
# tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
|
||||
# tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
|
||||
|
||||
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
|
||||
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
|
||||
|
||||
def initConsumerInfoTable(self,cdbName='cdb'):
|
||||
tdLog.info("drop consumeinfo table")
|
||||
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
|
||||
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
|
||||
|
||||
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
|
||||
sql = "insert into %s.consumeinfo values "%cdbName
|
||||
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
|
||||
tdLog.info("consume info sql: %s"%sql)
|
||||
tdSql.query(sql)
|
||||
|
||||
def selectConsumeResult(self,expectRows,cdbName='cdb'):
|
||||
resultList=[]
|
||||
while 1:
|
||||
tdSql.query("select * from %s.consumeresult"%cdbName)
|
||||
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
|
||||
if tdSql.getRows() == expectRows:
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
|
||||
for i in range(expectRows):
|
||||
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
|
||||
resultList.append(tdSql.getData(i , 3))
|
||||
|
||||
return resultList
|
||||
|
||||
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
|
||||
if valgrind == 1:
|
||||
logFile = cfgPath + '/../log/valgrind-tmq.log'
|
||||
shellCmd = 'nohup valgrind --log-file=' + logFile
|
||||
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
|
||||
|
||||
if (platform.system().lower() == 'windows'):
|
||||
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
|
||||
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
|
||||
shellCmd += "> nul 2>&1 &"
|
||||
else:
|
||||
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
|
||||
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
|
||||
shellCmd += "> /dev/null 2>&1 &"
|
||||
tdLog.info(shellCmd)
|
||||
os.system(shellCmd)
|
||||
|
||||
def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1):
|
||||
if dropFlag == 1:
|
||||
tsql.execute("drop database if exists %s"%(dbName))
|
||||
|
||||
tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica))
|
||||
tdLog.debug("complete to create database %s"%(dbName))
|
||||
return
|
||||
|
||||
def create_stable(self,tsql, dbName,stbName):
|
||||
tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName))
|
||||
tdLog.debug("complete to create %s.%s" %(dbName, stbName))
|
||||
return
|
||||
|
||||
def create_ctables(self,tsql, dbName,stbName,ctbNum):
|
||||
tsql.execute("use %s" %dbName)
|
||||
pre_create = "create table"
|
||||
sql = pre_create
|
||||
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
|
||||
for i in range(ctbNum):
|
||||
sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1)
|
||||
if (i > 0) and (i%100 == 0):
|
||||
tsql.execute(sql)
|
||||
sql = pre_create
|
||||
if sql != pre_create:
|
||||
tsql.execute(sql)
|
||||
|
||||
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
|
||||
return
|
||||
|
||||
def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0):
|
||||
tdLog.debug("start to insert data ............")
|
||||
tsql.execute("use %s" %dbName)
|
||||
pre_insert = "insert into "
|
||||
sql = pre_insert
|
||||
|
||||
if startTs == 0:
|
||||
t = time.time()
|
||||
startTs = int(round(t * 1000))
|
||||
|
||||
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
|
||||
rowsOfSql = 0
|
||||
for i in range(ctbNum):
|
||||
sql += " %s_%d values "%(stbName,i)
|
||||
for j in range(rowsPerTbl):
|
||||
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
|
||||
rowsOfSql += 1
|
||||
if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)):
|
||||
tsql.execute(sql)
|
||||
rowsOfSql = 0
|
||||
if j < rowsPerTbl - 1:
|
||||
sql = "insert into %s_%d values " %(stbName,i)
|
||||
else:
|
||||
sql = "insert into "
|
||||
#end sql
|
||||
if sql != pre_insert:
|
||||
#print("insert sql:%s"%sql)
|
||||
tsql.execute(sql)
|
||||
tdLog.debug("insert data ............ [OK]")
|
||||
return
|
||||
|
||||
def prepareEnv(self, **parameterDict):
|
||||
# create new connector for my thread
|
||||
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
|
||||
|
||||
if parameterDict["actionType"] == actionType.CREATE_DATABASE:
|
||||
self.create_database(tsql, parameterDict["dbName"])
|
||||
elif parameterDict["actionType"] == actionType.CREATE_STABLE:
|
||||
self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"])
|
||||
elif parameterDict["actionType"] == actionType.CREATE_CTABLE:
|
||||
self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
|
||||
elif parameterDict["actionType"] == actionType.INSERT_DATA:
|
||||
self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\
|
||||
parameterDict["rowsPerTbl"],parameterDict["batchNum"])
|
||||
else:
|
||||
tdLog.exit("not support's action: ", parameterDict["actionType"])
|
||||
|
||||
return
|
||||
|
||||
def tmqCase1(self, cfgPath, buildPath):
|
||||
'''
|
||||
Leave a TMQ process. Stop taosd, delete the data directory, restart taosd,
|
||||
and restart a consumption process to complete a consumption
|
||||
'''
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'actionType': 0, \
|
||||
'dbName': 'db3', \
|
||||
'dropFlag': 1, \
|
||||
'vgroups': 4, \
|
||||
'replica': 1, \
|
||||
'stbName': 'stb1', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 20000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.create_database(tdSql, parameterDict["dbName"])
|
||||
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
|
||||
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
|
||||
self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"])
|
||||
|
||||
tdLog.info("create topics from stb1")
|
||||
topicFromStb1 = 'topic_stb1'
|
||||
|
||||
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
|
||||
consumerId = 0
|
||||
# expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
expectrowcnt = 90000000000
|
||||
topicList = topicFromStb1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 9000000 # Forever loop
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
time.sleep(3)
|
||||
tdLog.info("================= stop dnode, and remove data file, then start dnode ===========================")
|
||||
tdDnodes.stop(1)
|
||||
# time.sleep(5)
|
||||
dataPath = buildPath + "/../sim/dnode1/data/*"
|
||||
shellCmd = 'rm -rf ' + dataPath
|
||||
tdLog.info(shellCmd)
|
||||
os.system(shellCmd)
|
||||
tdDnodes.start(1)
|
||||
time.sleep(2)
|
||||
|
||||
######### redo to consume
|
||||
self.initConsumerTable()
|
||||
|
||||
self.create_database(tdSql, parameterDict["dbName"])
|
||||
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
|
||||
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
|
||||
self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"])
|
||||
|
||||
tdLog.info("create topics from stb1")
|
||||
topicFromStb1 = 'topic_stb1'
|
||||
|
||||
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
topicList = topicFromStb1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 20
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
if not (totalConsumeRows == expectrowcnt):
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicFromStb1)
|
||||
os.system('pkill tmq_sim')
|
||||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
cfgPath = buildPath + "/../sim/psim/cfg"
|
||||
tdLog.info("cfgPath: %s" % cfgPath)
|
||||
|
||||
self.tmqCase1(cfgPath, buildPath)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -116,3 +116,5 @@ python3 ./test.py -f 7-tmq/subscribeStb2.py
|
|||
python3 ./test.py -f 7-tmq/subscribeStb3.py
|
||||
python3 ./test.py -f 7-tmq/subscribeStb4.py
|
||||
python3 ./test.py -f 7-tmq/db.py
|
||||
python3 ./test.py -f 7-tmq/tmqError.py
|
||||
python3 ./test.py -f 7-tmq/schema.py
|
||||
|
|
|
@ -9,3 +9,95 @@ IF (TD_TAOS_TOOLS)
|
|||
ENDIF ()
|
||||
|
||||
add_subdirectory(shell)
|
||||
IF (TD_BUILD_HTTP)
|
||||
MESSAGE("")
|
||||
MESSAGE("${Yellow} use original embedded httpd ${ColourReset}")
|
||||
MESSAGE("")
|
||||
# ADD_SUBDIRECTORY(http)
|
||||
ELSEIF(TD_BUILD_TAOSA_INTERNAL)
|
||||
MESSAGE("${Yellow} use taosa internal as httpd ${ColourReset}")
|
||||
ELSE ()
|
||||
MESSAGE("")
|
||||
MESSAGE("${Green} use taosadapter as httpd, platform is ${PLATFORM_ARCH_STR} ${ColourReset}")
|
||||
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND git rev-parse --abbrev-ref HEAD
|
||||
RESULT_VARIABLE result_taos_version
|
||||
OUTPUT_VARIABLE taos_version
|
||||
)
|
||||
|
||||
STRING(FIND ${taos_version} release is_release_branch)
|
||||
|
||||
IF ("${is_release_branch}" STREQUAL "0")
|
||||
STRING(SUBSTRING "${taos_version}" 12 -1 taos_version)
|
||||
STRING(STRIP "${taos_version}" taos_version)
|
||||
ELSE ()
|
||||
STRING(CONCAT taos_version "_branch_" "${taos_version}")
|
||||
STRING(STRIP "${taos_version}" taos_version)
|
||||
ENDIF ()
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
|
||||
)
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND git rev-parse --short HEAD
|
||||
RESULT_VARIABLE commit_sha1
|
||||
OUTPUT_VARIABLE taosadapter_commit_sha1
|
||||
)
|
||||
IF ("${taosadapter_commit_sha1}" STREQUAL "")
|
||||
SET(taosadapter_commit_sha1 "unknown")
|
||||
ELSE ()
|
||||
STRING(SUBSTRING "${taosadapter_commit_sha1}" 0 7 taosadapter_commit_sha1)
|
||||
STRING(STRIP "${taosadapter_commit_sha1}" taosadapter_commit_sha1)
|
||||
ENDIF ()
|
||||
MESSAGE("${Green} taosAdapter will use ${taos_version} and commit ${taosadapter_commit_sha1} as version ${ColourReset}")
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND cd ..
|
||||
)
|
||||
MESSAGE("CURRENT SOURCE DIR ${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
IF (TD_LINUX)
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(taosadapter
|
||||
PREFIX "taosadapter"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
|
||||
BUILD_ALWAYS off
|
||||
DEPENDS taos
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config"
|
||||
PATCH_COMMAND
|
||||
COMMAND git clean -f -d
|
||||
BUILD_COMMAND
|
||||
COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
|
||||
COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
|
||||
INSTALL_COMMAND
|
||||
COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -o upx.tar.xz && tar -xvJf upx.tar.xz -C ${CMAKE_BINARY_DIR} --strip-components 1 > /dev/null && ${CMAKE_BINARY_DIR}/upx taosadapter || :
|
||||
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
|
||||
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
|
||||
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
|
||||
COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/
|
||||
COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin
|
||||
)
|
||||
ELSEIF (TD_DARWIN)
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(taosadapter
|
||||
PREFIX "taosadapter"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
|
||||
BUILD_ALWAYS off
|
||||
DEPENDS taos
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config"
|
||||
PATCH_COMMAND
|
||||
COMMAND git clean -f -d
|
||||
BUILD_COMMAND
|
||||
COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
|
||||
COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
|
||||
INSTALL_COMMAND
|
||||
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
|
||||
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
|
||||
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
|
||||
COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/
|
||||
COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin
|
||||
)
|
||||
ELSE ()
|
||||
MESSAGE("${Yellow} Windows system still use original embedded httpd ${ColourReset}")
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Subproject commit 9ce3f5c98ef95d9c7c596c4ed7302b0ed69a92b2
|
Loading…
Reference in New Issue