Merge branch 'develop' of https://github.com/taosdata/TDengine into develop
This commit is contained in:
commit
f364ad55bd
|
@ -15,28 +15,15 @@
|
||||||
# arbitrator arbitrator_hostname:6030
|
# arbitrator arbitrator_hostname:6030
|
||||||
|
|
||||||
# the full-qualified domain name (FQDN) of dnode
|
# the full-qualified domain name (FQDN) of dnode
|
||||||
# fqdn hostname:6030
|
# fqdn hostname
|
||||||
|
|
||||||
# port for MNode connect to Client, default udp[6030-6055] tcp[6030]
|
# port for MNode connect to Client, default udp/tcp [6030-6040]
|
||||||
# serverPort 6030
|
# serverPort 6030
|
||||||
|
|
||||||
# http service port, default tcp[6020]
|
# http service port, default tcp [6020]
|
||||||
# httpPort 6020
|
# httpPort 6020
|
||||||
|
|
||||||
# set socket type ("udp" and "tcp")
|
# data file's directory
|
||||||
# the server and client should have the same socket type. Otherwise, connect will fail
|
|
||||||
# sockettype udp
|
|
||||||
|
|
||||||
# for the cluster version, data file's directory is configured this way
|
|
||||||
# option mount_path tier_level
|
|
||||||
# dataDir /mnt/disk1/taos 0
|
|
||||||
# dataDir /mnt/disk2/taos 0
|
|
||||||
# dataDir /mnt/disk3/taos 0
|
|
||||||
# dataDir /mnt/disk4/taos 0
|
|
||||||
# dataDir /mnt/disk5/taos 0
|
|
||||||
# dataDir /mnt/disk6/taos 1
|
|
||||||
# dataDir /mnt/disk7/taos 1
|
|
||||||
# for the stand-alone version, data file's directory is configured this way
|
|
||||||
# dataDir /var/lib/taos
|
# dataDir /var/lib/taos
|
||||||
|
|
||||||
# log file's directory
|
# log file's directory
|
||||||
|
@ -46,12 +33,12 @@
|
||||||
# numOfMnodes 3
|
# numOfMnodes 3
|
||||||
|
|
||||||
# optional roles for dnode. 0 - any, 1 - mnode, 2 - dnode
|
# optional roles for dnode. 0 - any, 1 - mnode, 2 - dnode
|
||||||
# alternativeRole 0
|
# role 0
|
||||||
|
|
||||||
# number of threads per CPU core
|
# number of threads per CPU core
|
||||||
# numOfThreadsPerCore 1.0
|
# numOfThreadsPerCore 1.0
|
||||||
|
|
||||||
# number of vgroups per db
|
# max number of vgroups per db
|
||||||
# maxVgroupsPerDb 0
|
# maxVgroupsPerDb 0
|
||||||
|
|
||||||
# max number of tables per vnode
|
# max number of tables per vnode
|
||||||
|
@ -60,9 +47,6 @@
|
||||||
# the ratio of threads responsible for querying in the total thread
|
# the ratio of threads responsible for querying in the total thread
|
||||||
# ratioOfQueryThreads 0.5
|
# ratioOfQueryThreads 0.5
|
||||||
|
|
||||||
# interval of check load balance when the management node is in normal operation
|
|
||||||
# balanceInterval 300
|
|
||||||
|
|
||||||
# interval of DNode report status to MNode, unit is Second, for cluster version only
|
# interval of DNode report status to MNode, unit is Second, for cluster version only
|
||||||
# statusInterval 1
|
# statusInterval 1
|
||||||
|
|
||||||
|
@ -88,7 +72,7 @@
|
||||||
# minIntervalTime 10
|
# minIntervalTime 10
|
||||||
|
|
||||||
# max length of an SQL
|
# max length of an SQL
|
||||||
# maxSQLLength 65380
|
# maxSQLLength 65480
|
||||||
|
|
||||||
# Support the maximum number of records allowed for super table time sorting
|
# Support the maximum number of records allowed for super table time sorting
|
||||||
# maxNumOfOrderedRes 100000
|
# maxNumOfOrderedRes 100000
|
||||||
|
@ -153,11 +137,8 @@
|
||||||
# Stop writing data when the disk size of the log folder is less than this value
|
# Stop writing data when the disk size of the log folder is less than this value
|
||||||
# minimalDataDirGB 0.1
|
# minimalDataDirGB 0.1
|
||||||
|
|
||||||
# mnode take into account while balance, for cluster version only
|
|
||||||
# mnodeEqualVnodeNum 4
|
|
||||||
|
|
||||||
# number of seconds allowed for a dnode to be offline, for cluster version only
|
# number of seconds allowed for a dnode to be offline, for cluster version only
|
||||||
# offlineThreshold 864000
|
# offlineThreshold 8640000
|
||||||
|
|
||||||
# start http service
|
# start http service
|
||||||
# http 1
|
# http 1
|
||||||
|
@ -165,7 +146,7 @@
|
||||||
# start system monitor module
|
# start system monitor module
|
||||||
# monitor 1
|
# monitor 1
|
||||||
|
|
||||||
# start http service
|
# start muqq service
|
||||||
# mqtt 0
|
# mqtt 0
|
||||||
|
|
||||||
# mqtt uri
|
# mqtt uri
|
||||||
|
@ -201,19 +182,22 @@
|
||||||
# 131: output warning and error,135: output info, warning and error to log.
|
# 131: output warning and error,135: output info, warning and error to log.
|
||||||
# 199: output debug, info, warning and error to both screen and file
|
# 199: output debug, info, warning and error to both screen and file
|
||||||
|
|
||||||
# debug flag for basic utils
|
# debug flag for all log type, take effect when non-zero value
|
||||||
# debugFlag 131
|
# debugFlag 0
|
||||||
|
|
||||||
# debug flag for meta management messages
|
# debug flag for meta management messages
|
||||||
# mDebugFlag 135
|
# mDebugFlag 135
|
||||||
|
|
||||||
# debug flag for dnode messages
|
# debug flag for dnode messages
|
||||||
# dDebugFlag 131
|
# dDebugFlag 135
|
||||||
|
|
||||||
# debug flag for TDengine SDB
|
# debug flag for sync module
|
||||||
# sDebugFlag 135
|
# sDebugFlag 135
|
||||||
|
|
||||||
# debug flag for TDengine SDB
|
# debug flag for WAL
|
||||||
|
# wDebugFlag 135
|
||||||
|
|
||||||
|
# debug flag for SDB
|
||||||
# sdbDebugFlag 135
|
# sdbDebugFlag 135
|
||||||
|
|
||||||
# debug flag for RPC
|
# debug flag for RPC
|
||||||
|
@ -246,6 +230,9 @@
|
||||||
# debug flag for query
|
# debug flag for query
|
||||||
# qDebugflag 131
|
# qDebugflag 131
|
||||||
|
|
||||||
|
# debug flag for vnode
|
||||||
|
# vDebugflag 131
|
||||||
|
|
||||||
# debug flag for http server
|
# debug flag for http server
|
||||||
# tsdbDebugFlag 131
|
# tsdbDebugFlag 131
|
||||||
|
|
||||||
|
|
|
@ -358,7 +358,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
case TSDB_SQL_CFG_DNODE: {
|
case TSDB_SQL_CFG_DNODE: {
|
||||||
const char* msg2 = "invalid configure options or values, such as resetlog / debugFlag 135 / balance 'vnode:1-dnode:2' / monitor 1 ";
|
const char* msg2 = "invalid configure options or values, such as resetlog / debugFlag 135 / balance 'vnode:2-dnode:2' / monitor 1 ";
|
||||||
const char* msg3 = "invalid dnode ep";
|
const char* msg3 = "invalid dnode ep";
|
||||||
|
|
||||||
/* validate the ip address */
|
/* validate the ip address */
|
||||||
|
@ -4700,10 +4700,10 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
|
||||||
} else if ((strncasecmp(cfgOptions[tokenBalance].name, pOptionToken->z, pOptionToken->n) == 0) &&
|
} else if ((strncasecmp(cfgOptions[tokenBalance].name, pOptionToken->z, pOptionToken->n) == 0) &&
|
||||||
(cfgOptions[tokenBalance].len == pOptionToken->n)) {
|
(cfgOptions[tokenBalance].len == pOptionToken->n)) {
|
||||||
SSQLToken* pValToken = &pOptions->a[2];
|
SSQLToken* pValToken = &pOptions->a[2];
|
||||||
int32_t vnodeIndex = 0;
|
int32_t vnodeId = 0;
|
||||||
int32_t dnodeIndex = 0;
|
int32_t dnodeId = 0;
|
||||||
strdequote(pValToken->z);
|
strdequote(pValToken->z);
|
||||||
bool parseOk = taosCheckBalanceCfgOptions(pValToken->z, &vnodeIndex, &dnodeIndex);
|
bool parseOk = taosCheckBalanceCfgOptions(pValToken->z, &vnodeId, &dnodeId);
|
||||||
if (!parseOk) {
|
if (!parseOk) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL; // options value is invalid
|
return TSDB_CODE_TSC_INVALID_SQL; // options value is invalid
|
||||||
}
|
}
|
||||||
|
|
|
@ -256,11 +256,12 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
|
||||||
}
|
}
|
||||||
size_t numOfTables = taosArrayGetSize(tables);
|
size_t numOfTables = taosArrayGetSize(tables);
|
||||||
|
|
||||||
|
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||||
SArray* progress = taosArrayInit(numOfTables, sizeof(SSubscriptionProgress));
|
SArray* progress = taosArrayInit(numOfTables, sizeof(SSubscriptionProgress));
|
||||||
for( size_t i = 0; i < numOfTables; i++ ) {
|
for( size_t i = 0; i < numOfTables; i++ ) {
|
||||||
STidTags* tt = taosArrayGet( tables, i );
|
STidTags* tt = taosArrayGet( tables, i );
|
||||||
SSubscriptionProgress p = { .uid = tt->uid };
|
SSubscriptionProgress p = { .uid = tt->uid };
|
||||||
p.key = tscGetSubscriptionProgress(pSub, tt->uid, INT64_MIN);
|
p.key = tscGetSubscriptionProgress(pSub, tt->uid, pQueryInfo->window.skey);
|
||||||
taosArrayPush(progress, &p);
|
taosArrayPush(progress, &p);
|
||||||
}
|
}
|
||||||
taosArraySort(progress, tscCompareSubscriptionProgress);
|
taosArraySort(progress, tscCompareSubscriptionProgress);
|
||||||
|
|
|
@ -174,7 +174,7 @@ bool taosCheckGlobalCfg();
|
||||||
void taosSetAllDebugFlag();
|
void taosSetAllDebugFlag();
|
||||||
bool taosCfgDynamicOptions(char *msg);
|
bool taosCfgDynamicOptions(char *msg);
|
||||||
int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port);
|
int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port);
|
||||||
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeIndex, int32_t *dnodeIndex);
|
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -1315,7 +1315,7 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) {
|
||||||
* alter dnode 1 balance "vnode:1-dnode:2"
|
* alter dnode 1 balance "vnode:1-dnode:2"
|
||||||
*/
|
*/
|
||||||
|
|
||||||
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeIndex, int32_t *dnodeIndex) {
|
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId) {
|
||||||
int len = strlen(option);
|
int len = strlen(option);
|
||||||
if (strncasecmp(option, "vnode:", 6) != 0) {
|
if (strncasecmp(option, "vnode:", 6) != 0) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -1331,9 +1331,9 @@ bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeIndex, int32_t
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
*vnodeIndex = strtol(option + 6, NULL, 10);
|
*vnodeId = strtol(option + 6, NULL, 10);
|
||||||
*dnodeIndex = strtol(option + pos + 6, NULL, 10);
|
*dnodeId = strtol(option + pos + 6, NULL, 10);
|
||||||
if (*vnodeIndex <= 1 || *dnodeIndex <= 0) {
|
if (*vnodeId <= 1 || *dnodeId <= 0) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -492,6 +492,7 @@ static void dnodeProcessStatusRsp(SRpcMsg *pMsg) {
|
||||||
pCfg->numOfVnodes = htonl(pCfg->numOfVnodes);
|
pCfg->numOfVnodes = htonl(pCfg->numOfVnodes);
|
||||||
pCfg->moduleStatus = htonl(pCfg->moduleStatus);
|
pCfg->moduleStatus = htonl(pCfg->moduleStatus);
|
||||||
pCfg->dnodeId = htonl(pCfg->dnodeId);
|
pCfg->dnodeId = htonl(pCfg->dnodeId);
|
||||||
|
pCfg->clusterId = htonl(pCfg->clusterId);
|
||||||
|
|
||||||
for (int32_t i = 0; i < pMnodes->nodeNum; ++i) {
|
for (int32_t i = 0; i < pMnodes->nodeNum; ++i) {
|
||||||
SDMMnodeInfo *pMnodeInfo = &pMnodes->nodeInfos[i];
|
SDMMnodeInfo *pMnodeInfo = &pMnodes->nodeInfos[i];
|
||||||
|
@ -697,6 +698,7 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
|
||||||
//strcpy(pStatus->dnodeName, tsDnodeName);
|
//strcpy(pStatus->dnodeName, tsDnodeName);
|
||||||
pStatus->version = htonl(tsVersion);
|
pStatus->version = htonl(tsVersion);
|
||||||
pStatus->dnodeId = htonl(tsDnodeCfg.dnodeId);
|
pStatus->dnodeId = htonl(tsDnodeCfg.dnodeId);
|
||||||
|
pStatus->clusterId = htonl(tsDnodeCfg.clusterId);
|
||||||
strcpy(pStatus->dnodeEp, tsLocalEp);
|
strcpy(pStatus->dnodeEp, tsLocalEp);
|
||||||
pStatus->lastReboot = htonl(tsRebootTime);
|
pStatus->lastReboot = htonl(tsRebootTime);
|
||||||
pStatus->numOfCores = htons((uint16_t) tsNumOfCores);
|
pStatus->numOfCores = htons((uint16_t) tsNumOfCores);
|
||||||
|
@ -767,6 +769,13 @@ static bool dnodeReadDnodeCfg() {
|
||||||
}
|
}
|
||||||
tsDnodeCfg.dnodeId = dnodeId->valueint;
|
tsDnodeCfg.dnodeId = dnodeId->valueint;
|
||||||
|
|
||||||
|
cJSON* clusterId = cJSON_GetObjectItem(root, "clusterId");
|
||||||
|
if (!clusterId || clusterId->type != cJSON_Number) {
|
||||||
|
dError("failed to read dnodeCfg.json, clusterId not found");
|
||||||
|
goto PARSE_CFG_OVER;
|
||||||
|
}
|
||||||
|
tsDnodeCfg.clusterId = clusterId->valueint;
|
||||||
|
|
||||||
ret = true;
|
ret = true;
|
||||||
|
|
||||||
dInfo("read numOfVnodes successed, dnodeId:%d", tsDnodeCfg.dnodeId);
|
dInfo("read numOfVnodes successed, dnodeId:%d", tsDnodeCfg.dnodeId);
|
||||||
|
@ -790,7 +799,8 @@ static void dnodeSaveDnodeCfg() {
|
||||||
char * content = calloc(1, maxLen + 1);
|
char * content = calloc(1, maxLen + 1);
|
||||||
|
|
||||||
len += snprintf(content + len, maxLen - len, "{\n");
|
len += snprintf(content + len, maxLen - len, "{\n");
|
||||||
len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d\n", tsDnodeCfg.dnodeId);
|
len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d,\n", tsDnodeCfg.dnodeId);
|
||||||
|
len += snprintf(content + len, maxLen - len, " \"clusterId\": %d\n", tsDnodeCfg.clusterId);
|
||||||
len += snprintf(content + len, maxLen - len, "}\n");
|
len += snprintf(content + len, maxLen - len, "}\n");
|
||||||
|
|
||||||
fwrite(content, 1, len, fp);
|
fwrite(content, 1, len, fp);
|
||||||
|
@ -803,8 +813,9 @@ static void dnodeSaveDnodeCfg() {
|
||||||
|
|
||||||
void dnodeUpdateDnodeCfg(SDMDnodeCfg *pCfg) {
|
void dnodeUpdateDnodeCfg(SDMDnodeCfg *pCfg) {
|
||||||
if (tsDnodeCfg.dnodeId == 0) {
|
if (tsDnodeCfg.dnodeId == 0) {
|
||||||
dInfo("dnodeId is set to %d", pCfg->dnodeId);
|
dInfo("dnodeId is set to %d, clusterId is set to %d", pCfg->dnodeId, pCfg->clusterId);
|
||||||
tsDnodeCfg.dnodeId = pCfg->dnodeId;
|
tsDnodeCfg.dnodeId = pCfg->dnodeId;
|
||||||
|
tsDnodeCfg.clusterId = pCfg->clusterId;
|
||||||
dnodeSaveDnodeCfg();
|
dnodeSaveDnodeCfg();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -377,6 +377,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
||||||
#define TSDB_ORDER_ASC 1
|
#define TSDB_ORDER_ASC 1
|
||||||
#define TSDB_ORDER_DESC 2
|
#define TSDB_ORDER_DESC 2
|
||||||
|
|
||||||
|
#define TSDB_DEFAULT_CLUSTER_HASH_SIZE 1
|
||||||
#define TSDB_DEFAULT_MNODES_HASH_SIZE 5
|
#define TSDB_DEFAULT_MNODES_HASH_SIZE 5
|
||||||
#define TSDB_DEFAULT_DNODES_HASH_SIZE 10
|
#define TSDB_DEFAULT_DNODES_HASH_SIZE 10
|
||||||
#define TSDB_DEFAULT_ACCOUNTS_HASH_SIZE 10
|
#define TSDB_DEFAULT_ACCOUNTS_HASH_SIZE 10
|
||||||
|
|
|
@ -120,12 +120,18 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_OBJ_NOT_THERE, 0, 0x0323, "sdb object
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_META_ROW, 0, 0x0324, "sdb invalid meta row")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_META_ROW, 0, 0x0324, "sdb invalid meta row")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_KEY_TYPE, 0, 0x0325, "sdb invalid key type")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_KEY_TYPE, 0, 0x0325, "sdb invalid key type")
|
||||||
|
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ALREADY_EXIST, 0, 0x0330, "mnode dnode already exist")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ALREADY_EXIST, 0, 0x0330, "dnode already exist")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_EXIST, 0, 0x0331, "mnode dnode not exist")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_EXIST, 0, 0x0331, "dnode not exist")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_EXIST, 0, 0x0332, "mnode vgroup not exist")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_EXIST, 0, 0x0332, "vgroup not exist")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_REMOVE_MASTER, 0, 0x0333, "mnode cant not remove master")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_REMOVE_MASTER, 0, 0x0333, "cant not remove master")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_DNODES, 0, 0x0334, "mnode no enough dnodes")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_DNODES, 0, 0x0334, "no enough dnodes")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT, 0, 0x0335, "mnode cluster cfg inconsistent")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT, 0, 0x0335, "cluster cfg inconsistent")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION, 0, 0x0336, "invalid dnode cfg option")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_BALANCE_ENABLED, 0, 0x0337, "balance already enabled")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_IN_DNODE, 0, 0x0338, "vgroup not in dnode")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE, 0, 0x0339, "vgroup already in dnode")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_FREE, 0, 0x033A, "dnode not avaliable")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CLUSTER_ID, 0, 0x033B, "cluster id not match")
|
||||||
|
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_ALREADY_EXIST, 0, 0x0340, "mnode accounts already exist")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_ALREADY_EXIST, 0, 0x0340, "mnode accounts already exist")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT, 0, 0x0341, "mnode invalid account")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT, 0, 0x0341, "mnode invalid account")
|
||||||
|
|
|
@ -139,6 +139,7 @@ enum _mgmt_table {
|
||||||
TSDB_MGMT_TABLE_GRANTS,
|
TSDB_MGMT_TABLE_GRANTS,
|
||||||
TSDB_MGMT_TABLE_VNODES,
|
TSDB_MGMT_TABLE_VNODES,
|
||||||
TSDB_MGMT_TABLE_STREAMTABLES,
|
TSDB_MGMT_TABLE_STREAMTABLES,
|
||||||
|
TSDB_MGMT_TABLE_CLUSTER,
|
||||||
TSDB_MGMT_TABLE_MAX,
|
TSDB_MGMT_TABLE_MAX,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -149,6 +150,7 @@ enum _mgmt_table {
|
||||||
|
|
||||||
#define TSDB_ALTER_TABLE_ADD_COLUMN 5
|
#define TSDB_ALTER_TABLE_ADD_COLUMN 5
|
||||||
#define TSDB_ALTER_TABLE_DROP_COLUMN 6
|
#define TSDB_ALTER_TABLE_DROP_COLUMN 6
|
||||||
|
#define TSDB_ALTER_TABLE_CHANGE_COLUMN 7
|
||||||
|
|
||||||
#define TSDB_FILL_NONE 0
|
#define TSDB_FILL_NONE 0
|
||||||
#define TSDB_FILL_NULL 1
|
#define TSDB_FILL_NULL 1
|
||||||
|
@ -545,6 +547,7 @@ typedef struct {
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t dnodeId;
|
int32_t dnodeId;
|
||||||
|
int32_t clusterId;
|
||||||
uint32_t moduleStatus;
|
uint32_t moduleStatus;
|
||||||
uint32_t numOfVnodes;
|
uint32_t numOfVnodes;
|
||||||
uint32_t reserved;
|
uint32_t reserved;
|
||||||
|
@ -585,6 +588,7 @@ typedef struct {
|
||||||
uint16_t openVnodes;
|
uint16_t openVnodes;
|
||||||
uint16_t numOfCores;
|
uint16_t numOfCores;
|
||||||
float diskAvailable; // GB
|
float diskAvailable; // GB
|
||||||
|
int32_t clusterId;
|
||||||
uint8_t alternativeRole;
|
uint8_t alternativeRole;
|
||||||
uint8_t reserve2[15];
|
uint8_t reserve2[15];
|
||||||
SClusterCfg clusterCfg;
|
SClusterCfg clusterCfg;
|
||||||
|
|
|
@ -29,7 +29,7 @@ void balanceAsyncNotify();
|
||||||
void balanceSyncNotify();
|
void balanceSyncNotify();
|
||||||
void balanceReset();
|
void balanceReset();
|
||||||
int32_t balanceAllocVnodes(struct SVgObj *pVgroup);
|
int32_t balanceAllocVnodes(struct SVgObj *pVgroup);
|
||||||
int32_t balanceCfgDnode(struct SDnodeObj *pDnode, const char *option);
|
int32_t balanceAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId);
|
||||||
int32_t balanceDropDnode(struct SDnodeObj *pDnode);
|
int32_t balanceDropDnode(struct SDnodeObj *pDnode);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef TDENGINE_MNODE_CLUSTER_H
|
||||||
|
#define TDENGINE_MNODE_CLUSTER_H
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct SClusterObj;
|
||||||
|
|
||||||
|
int32_t mnodeInitCluster();
|
||||||
|
void mnodeCleanupCluster();
|
||||||
|
int32_t mnodeGetClusterId();
|
||||||
|
void mnodeUpdateClusterId();
|
||||||
|
void * mnodeGetCluster(int32_t clusterId);
|
||||||
|
void * mnodeGetNextCluster(void *pIter, struct SClusterObj **pCluster);
|
||||||
|
void mnodeIncClusterRef(struct SClusterObj *pCluster);
|
||||||
|
void mnodeDecClusterRef(struct SClusterObj *pCluster);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
|
@ -36,6 +36,14 @@ struct define notes:
|
||||||
3. The fields behind the updataEnd field can be changed;
|
3. The fields behind the updataEnd field can be changed;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
typedef struct SClusterObj {
|
||||||
|
int32_t clusterId;
|
||||||
|
int64_t createdTime;
|
||||||
|
int8_t reserved[36];
|
||||||
|
int8_t updateEnd[4];
|
||||||
|
int32_t refCount;
|
||||||
|
} SClusterObj;
|
||||||
|
|
||||||
typedef struct SDnodeObj {
|
typedef struct SDnodeObj {
|
||||||
int32_t dnodeId;
|
int32_t dnodeId;
|
||||||
int32_t openVnodes;
|
int32_t openVnodes;
|
||||||
|
|
|
@ -23,15 +23,16 @@ extern "C" {
|
||||||
struct SMnodeMsg;
|
struct SMnodeMsg;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
SDB_TABLE_DNODE = 0,
|
SDB_TABLE_CLUSTER = 0,
|
||||||
SDB_TABLE_MNODE = 1,
|
SDB_TABLE_DNODE = 1,
|
||||||
SDB_TABLE_ACCOUNT = 2,
|
SDB_TABLE_MNODE = 2,
|
||||||
SDB_TABLE_USER = 3,
|
SDB_TABLE_ACCOUNT = 3,
|
||||||
SDB_TABLE_DB = 4,
|
SDB_TABLE_USER = 4,
|
||||||
SDB_TABLE_VGROUP = 5,
|
SDB_TABLE_DB = 5,
|
||||||
SDB_TABLE_STABLE = 6,
|
SDB_TABLE_VGROUP = 6,
|
||||||
SDB_TABLE_CTABLE = 7,
|
SDB_TABLE_STABLE = 7,
|
||||||
SDB_TABLE_MAX = 8
|
SDB_TABLE_CTABLE = 8,
|
||||||
|
SDB_TABLE_MAX = 9
|
||||||
} ESdbTable;
|
} ESdbTable;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
|
|
|
@ -64,7 +64,7 @@ static int32_t mnodeAcctActionUpdate(SSdbOper *pOper) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mnodeActionActionEncode(SSdbOper *pOper) {
|
static int32_t mnodeAcctActionEncode(SSdbOper *pOper) {
|
||||||
SAcctObj *pAcct = pOper->pObj;
|
SAcctObj *pAcct = pOper->pObj;
|
||||||
memcpy(pOper->rowData, pAcct, tsAcctUpdateSize);
|
memcpy(pOper->rowData, pAcct, tsAcctUpdateSize);
|
||||||
pOper->rowSize = tsAcctUpdateSize;
|
pOper->rowSize = tsAcctUpdateSize;
|
||||||
|
@ -109,7 +109,7 @@ int32_t mnodeInitAccts() {
|
||||||
.insertFp = mnodeAcctActionInsert,
|
.insertFp = mnodeAcctActionInsert,
|
||||||
.deleteFp = mnodeAcctActionDelete,
|
.deleteFp = mnodeAcctActionDelete,
|
||||||
.updateFp = mnodeAcctActionUpdate,
|
.updateFp = mnodeAcctActionUpdate,
|
||||||
.encodeFp = mnodeActionActionEncode,
|
.encodeFp = mnodeAcctActionEncode,
|
||||||
.decodeFp = mnodeAcctActionDecode,
|
.decodeFp = mnodeAcctActionDecode,
|
||||||
.destroyFp = mnodeAcctActionDestroy,
|
.destroyFp = mnodeAcctActionDestroy,
|
||||||
.restoredFp = mnodeAcctActionRestored
|
.restoredFp = mnodeAcctActionRestored
|
||||||
|
|
|
@ -28,7 +28,7 @@ void balanceCleanUp() {}
|
||||||
void balanceAsyncNotify() {}
|
void balanceAsyncNotify() {}
|
||||||
void balanceSyncNotify() {}
|
void balanceSyncNotify() {}
|
||||||
void balanceReset() {}
|
void balanceReset() {}
|
||||||
int32_t balanceCfgDnode(struct SDnodeObj *pDnode, const char *option) { return TSDB_CODE_SYN_NOT_ENABLED; }
|
int32_t balanceAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId) { return TSDB_CODE_SYN_NOT_ENABLED; }
|
||||||
|
|
||||||
int32_t balanceAllocVnodes(SVgObj *pVgroup) {
|
int32_t balanceAllocVnodes(SVgObj *pVgroup) {
|
||||||
void * pIter = NULL;
|
void * pIter = NULL;
|
||||||
|
|
|
@ -0,0 +1,230 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define _DEFAULT_SOURCE
|
||||||
|
#include "os.h"
|
||||||
|
#include "taoserror.h"
|
||||||
|
#include "ttime.h"
|
||||||
|
#include "dnode.h"
|
||||||
|
#include "mnodeDef.h"
|
||||||
|
#include "mnodeInt.h"
|
||||||
|
#include "mnodeCluster.h"
|
||||||
|
#include "mnodeSdb.h"
|
||||||
|
#include "mnodeShow.h"
|
||||||
|
#include "tglobal.h"
|
||||||
|
|
||||||
|
static void * tsClusterSdb = NULL;
|
||||||
|
static int32_t tsClusterUpdateSize;
|
||||||
|
static int32_t tsClusterId;
|
||||||
|
static int32_t mnodeCreateCluster();
|
||||||
|
|
||||||
|
static int32_t mnodeGetClusterMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
|
||||||
|
static int32_t mnodeRetrieveClusters(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
||||||
|
|
||||||
|
static int32_t mnodeClusterActionDestroy(SSdbOper *pOper) {
|
||||||
|
tfree(pOper->pObj);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mnodeClusterActionInsert(SSdbOper *pOper) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mnodeClusterActionDelete(SSdbOper *pOper) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mnodeClusterActionUpdate(SSdbOper *pOper) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mnodeClusterActionEncode(SSdbOper *pOper) {
|
||||||
|
SClusterObj *pCluster = pOper->pObj;
|
||||||
|
memcpy(pOper->rowData, pCluster, tsClusterUpdateSize);
|
||||||
|
pOper->rowSize = tsClusterUpdateSize;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mnodeClusterActionDecode(SSdbOper *pOper) {
|
||||||
|
SClusterObj *pCluster = (SClusterObj *) calloc(1, sizeof(SClusterObj));
|
||||||
|
if (pCluster == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY;
|
||||||
|
|
||||||
|
memcpy(pCluster, pOper->rowData, tsClusterUpdateSize);
|
||||||
|
pOper->pObj = pCluster;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mnodeClusterActionRestored() {
|
||||||
|
int32_t numOfRows = sdbGetNumOfRows(tsClusterSdb);
|
||||||
|
if (numOfRows <= 0 && dnodeIsFirstDeploy()) {
|
||||||
|
mInfo("dnode first deploy, create cluster");
|
||||||
|
int32_t code = mnodeCreateCluster();
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
mError("failed to create cluster, reason:%s", tstrerror(code));
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mnodeUpdateClusterId();
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t mnodeInitCluster() {
|
||||||
|
SClusterObj tObj;
|
||||||
|
tsClusterUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj;
|
||||||
|
|
||||||
|
SSdbTableDesc tableDesc = {
|
||||||
|
.tableId = SDB_TABLE_CLUSTER,
|
||||||
|
.tableName = "cluster",
|
||||||
|
.hashSessions = TSDB_DEFAULT_CLUSTER_HASH_SIZE,
|
||||||
|
.maxRowSize = tsClusterUpdateSize,
|
||||||
|
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
|
||||||
|
.keyType = SDB_KEY_INT,
|
||||||
|
.insertFp = mnodeClusterActionInsert,
|
||||||
|
.deleteFp = mnodeClusterActionDelete,
|
||||||
|
.updateFp = mnodeClusterActionUpdate,
|
||||||
|
.encodeFp = mnodeClusterActionEncode,
|
||||||
|
.decodeFp = mnodeClusterActionDecode,
|
||||||
|
.destroyFp = mnodeClusterActionDestroy,
|
||||||
|
.restoredFp = mnodeClusterActionRestored
|
||||||
|
};
|
||||||
|
|
||||||
|
tsClusterSdb = sdbOpenTable(&tableDesc);
|
||||||
|
if (tsClusterSdb == NULL) {
|
||||||
|
mError("table:%s, failed to create hash", tableDesc.tableName);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_CLUSTER, mnodeGetClusterMeta);
|
||||||
|
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_CLUSTER, mnodeRetrieveClusters);
|
||||||
|
|
||||||
|
mDebug("table:%s, hash is created", tableDesc.tableName);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
void mnodeCleanupCluster() {
|
||||||
|
sdbCloseTable(tsClusterSdb);
|
||||||
|
tsClusterSdb = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *mnodeGetCluster(int32_t clusterId) {
|
||||||
|
return sdbGetRow(tsClusterSdb, &clusterId);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *mnodeGetNextCluster(void *pIter, SClusterObj **pCluster) {
|
||||||
|
return sdbFetchRow(tsClusterSdb, pIter, (void **)pCluster);
|
||||||
|
}
|
||||||
|
|
||||||
|
void mnodeIncClusterRef(SClusterObj *pCluster) {
|
||||||
|
sdbIncRef(tsClusterSdb, pCluster);
|
||||||
|
}
|
||||||
|
|
||||||
|
void mnodeDecClusterRef(SClusterObj *pCluster) {
|
||||||
|
sdbDecRef(tsClusterSdb, pCluster);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mnodeCreateCluster() {
|
||||||
|
int32_t numOfClusters = sdbGetNumOfRows(tsClusterSdb);
|
||||||
|
if (numOfClusters != 0) return TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
SClusterObj *pCluster = malloc(sizeof(SClusterObj));
|
||||||
|
memset(pCluster, 0, sizeof(SClusterObj));
|
||||||
|
pCluster->createdTime = taosGetTimestampMs();
|
||||||
|
pCluster->clusterId = labs((pCluster->createdTime >> 32) & (pCluster->createdTime)) | (*(int32_t*)tsFirst);
|
||||||
|
|
||||||
|
SSdbOper oper = {
|
||||||
|
.type = SDB_OPER_GLOBAL,
|
||||||
|
.table = tsClusterSdb,
|
||||||
|
.pObj = pCluster,
|
||||||
|
};
|
||||||
|
|
||||||
|
return sdbInsertRow(&oper);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t mnodeGetClusterId() {
|
||||||
|
return tsClusterId;
|
||||||
|
}
|
||||||
|
|
||||||
|
void mnodeUpdateClusterId() {
|
||||||
|
SClusterObj *pCluster = NULL;
|
||||||
|
mnodeGetNextCluster(NULL, &pCluster);
|
||||||
|
if (pCluster != NULL) {
|
||||||
|
tsClusterId = pCluster->clusterId;
|
||||||
|
mnodeDecClusterRef(pCluster);
|
||||||
|
mInfo("cluster id is %d", tsClusterId);
|
||||||
|
} else {
|
||||||
|
//assert(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int32_t mnodeGetClusterMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
||||||
|
int32_t cols = 0;
|
||||||
|
SSchema *pSchema = pMeta->schema;
|
||||||
|
|
||||||
|
pShow->bytes[cols] = 4;
|
||||||
|
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
||||||
|
strcpy(pSchema[cols].name, "clusterId");
|
||||||
|
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||||
|
cols++;
|
||||||
|
|
||||||
|
pShow->bytes[cols] = 8;
|
||||||
|
pSchema[cols].type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||||
|
strcpy(pSchema[cols].name, "create_time");
|
||||||
|
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||||
|
cols++;
|
||||||
|
|
||||||
|
pMeta->numOfColumns = htons(cols);
|
||||||
|
strcpy(pMeta->tableId, "show cluster");
|
||||||
|
pShow->numOfColumns = cols;
|
||||||
|
|
||||||
|
pShow->offset[0] = 0;
|
||||||
|
for (int32_t i = 1; i < cols; ++i) {
|
||||||
|
pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
pShow->numOfRows = 1;
|
||||||
|
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mnodeRetrieveClusters(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
|
||||||
|
int32_t numOfRows = 0;
|
||||||
|
int32_t cols = 0;
|
||||||
|
char * pWrite;
|
||||||
|
SClusterObj *pCluster = NULL;
|
||||||
|
|
||||||
|
while (numOfRows < rows) {
|
||||||
|
pShow->pIter = mnodeGetNextCluster(pShow->pIter, &pCluster);
|
||||||
|
if (pCluster == NULL) break;
|
||||||
|
|
||||||
|
cols = 0;
|
||||||
|
|
||||||
|
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||||
|
*(int32_t *) pWrite = pCluster->clusterId;
|
||||||
|
cols++;
|
||||||
|
|
||||||
|
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||||
|
*(int32_t *) pWrite = pCluster->createdTime;
|
||||||
|
cols++;
|
||||||
|
|
||||||
|
mnodeDecClusterRef(pCluster);
|
||||||
|
numOfRows++;
|
||||||
|
}
|
||||||
|
|
||||||
|
pShow->numOfReads += numOfRows;
|
||||||
|
return numOfRows;
|
||||||
|
}
|
|
@ -37,6 +37,7 @@
|
||||||
#include "mnodeVgroup.h"
|
#include "mnodeVgroup.h"
|
||||||
#include "mnodeWrite.h"
|
#include "mnodeWrite.h"
|
||||||
#include "mnodePeer.h"
|
#include "mnodePeer.h"
|
||||||
|
#include "mnodeCluster.h"
|
||||||
|
|
||||||
int32_t tsAccessSquence = 0;
|
int32_t tsAccessSquence = 0;
|
||||||
static void *tsDnodeSdb = NULL;
|
static void *tsDnodeSdb = NULL;
|
||||||
|
@ -295,10 +296,19 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SRpcEpSet epSet = mnodeGetEpSetFromIp(pDnode->dnodeEp);
|
SRpcEpSet epSet = mnodeGetEpSetFromIp(pDnode->dnodeEp);
|
||||||
mnodeDecDnodeRef(pDnode);
|
|
||||||
|
|
||||||
if (strncasecmp(pCmCfgDnode->config, "balance", 7) == 0) {
|
if (strncasecmp(pCmCfgDnode->config, "balance", 7) == 0) {
|
||||||
return balanceCfgDnode(pDnode, pCmCfgDnode->config + 8);
|
int32_t vnodeId = 0;
|
||||||
|
int32_t dnodeId = 0;
|
||||||
|
bool parseOk = taosCheckBalanceCfgOptions(pCmCfgDnode->config + 8, &vnodeId, &dnodeId);
|
||||||
|
if (!parseOk) {
|
||||||
|
mnodeDecDnodeRef(pDnode);
|
||||||
|
return TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t code = balanceAlterDnode(pDnode, vnodeId, dnodeId);
|
||||||
|
mnodeDecDnodeRef(pDnode);
|
||||||
|
return code;
|
||||||
} else {
|
} else {
|
||||||
SMDCfgDnodeMsg *pMdCfgDnode = rpcMallocCont(sizeof(SMDCfgDnodeMsg));
|
SMDCfgDnodeMsg *pMdCfgDnode = rpcMallocCont(sizeof(SMDCfgDnodeMsg));
|
||||||
strcpy(pMdCfgDnode->ep, pCmCfgDnode->ep);
|
strcpy(pMdCfgDnode->ep, pCmCfgDnode->ep);
|
||||||
|
@ -314,6 +324,7 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
|
||||||
|
|
||||||
mInfo("dnode:%s, is configured by %s", pCmCfgDnode->ep, pMsg->pUser->user);
|
mInfo("dnode:%s, is configured by %s", pCmCfgDnode->ep, pMsg->pUser->user);
|
||||||
dnodeSendMsgToDnode(&epSet, &rpcMdCfgDnodeMsg);
|
dnodeSendMsgToDnode(&epSet, &rpcMdCfgDnodeMsg);
|
||||||
|
mnodeDecDnodeRef(pDnode);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -345,6 +356,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
|
||||||
pStatus->moduleStatus = htonl(pStatus->moduleStatus);
|
pStatus->moduleStatus = htonl(pStatus->moduleStatus);
|
||||||
pStatus->lastReboot = htonl(pStatus->lastReboot);
|
pStatus->lastReboot = htonl(pStatus->lastReboot);
|
||||||
pStatus->numOfCores = htons(pStatus->numOfCores);
|
pStatus->numOfCores = htons(pStatus->numOfCores);
|
||||||
|
pStatus->clusterId = htonl(pStatus->clusterId);
|
||||||
|
|
||||||
uint32_t version = htonl(pStatus->version);
|
uint32_t version = htonl(pStatus->version);
|
||||||
if (version != tsVersion) {
|
if (version != tsVersion) {
|
||||||
|
@ -374,10 +386,16 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
|
||||||
pDnode->moduleStatus = pStatus->moduleStatus;
|
pDnode->moduleStatus = pStatus->moduleStatus;
|
||||||
|
|
||||||
if (pStatus->dnodeId == 0) {
|
if (pStatus->dnodeId == 0) {
|
||||||
mDebug("dnode:%d %s, first access", pDnode->dnodeId, pDnode->dnodeEp);
|
mDebug("dnode:%d %s, first access, set clusterId %d", pDnode->dnodeId, pDnode->dnodeEp, mnodeGetClusterId());
|
||||||
|
} else {
|
||||||
|
if (pStatus->clusterId != mnodeGetClusterId()) {
|
||||||
|
mError("dnode:%d, input clusterId %d not match with exist %d", pDnode->dnodeId, pStatus->clusterId,
|
||||||
|
mnodeGetClusterId());
|
||||||
|
return TSDB_CODE_MND_INVALID_CLUSTER_ID;
|
||||||
} else {
|
} else {
|
||||||
mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess);
|
mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int32_t openVnodes = htons(pStatus->openVnodes);
|
int32_t openVnodes = htons(pStatus->openVnodes);
|
||||||
int32_t contLen = sizeof(SDMStatusRsp) + openVnodes * sizeof(SDMVgroupAccess);
|
int32_t contLen = sizeof(SDMStatusRsp) + openVnodes * sizeof(SDMVgroupAccess);
|
||||||
|
@ -390,6 +408,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
|
||||||
pRsp->dnodeCfg.dnodeId = htonl(pDnode->dnodeId);
|
pRsp->dnodeCfg.dnodeId = htonl(pDnode->dnodeId);
|
||||||
pRsp->dnodeCfg.moduleStatus = htonl((int32_t)pDnode->isMgmt);
|
pRsp->dnodeCfg.moduleStatus = htonl((int32_t)pDnode->isMgmt);
|
||||||
pRsp->dnodeCfg.numOfVnodes = htonl(openVnodes);
|
pRsp->dnodeCfg.numOfVnodes = htonl(openVnodes);
|
||||||
|
pRsp->dnodeCfg.clusterId = htonl(mnodeGetClusterId());
|
||||||
SDMVgroupAccess *pAccess = (SDMVgroupAccess *)((char *)pRsp + sizeof(SDMStatusRsp));
|
SDMVgroupAccess *pAccess = (SDMVgroupAccess *)((char *)pRsp + sizeof(SDMStatusRsp));
|
||||||
|
|
||||||
for (int32_t j = 0; j < openVnodes; ++j) {
|
for (int32_t j = 0; j < openVnodes; ++j) {
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include "mnodeVgroup.h"
|
#include "mnodeVgroup.h"
|
||||||
#include "mnodeUser.h"
|
#include "mnodeUser.h"
|
||||||
#include "mnodeTable.h"
|
#include "mnodeTable.h"
|
||||||
|
#include "mnodeCluster.h"
|
||||||
#include "mnodeShow.h"
|
#include "mnodeShow.h"
|
||||||
#include "mnodeProfile.h"
|
#include "mnodeProfile.h"
|
||||||
|
|
||||||
|
@ -46,6 +47,7 @@ static bool tsMgmtIsRunning = false;
|
||||||
|
|
||||||
static const SMnodeComponent tsMnodeComponents[] = {
|
static const SMnodeComponent tsMnodeComponents[] = {
|
||||||
{"profile", mnodeInitProfile, mnodeCleanupProfile},
|
{"profile", mnodeInitProfile, mnodeCleanupProfile},
|
||||||
|
{"cluster", mnodeInitCluster, mnodeCleanupCluster},
|
||||||
{"accts", mnodeInitAccts, mnodeCleanupAccts},
|
{"accts", mnodeInitAccts, mnodeCleanupAccts},
|
||||||
{"users", mnodeInitUsers, mnodeCleanupUsers},
|
{"users", mnodeInitUsers, mnodeCleanupUsers},
|
||||||
{"dnodes", mnodeInitDnodes, mnodeCleanupDnodes},
|
{"dnodes", mnodeInitDnodes, mnodeCleanupDnodes},
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include "mnodeInt.h"
|
#include "mnodeInt.h"
|
||||||
#include "mnodeMnode.h"
|
#include "mnodeMnode.h"
|
||||||
#include "mnodeDnode.h"
|
#include "mnodeDnode.h"
|
||||||
|
#include "mnodeCluster.h"
|
||||||
#include "mnodeSdb.h"
|
#include "mnodeSdb.h"
|
||||||
|
|
||||||
#define SDB_TABLE_LEN 12
|
#define SDB_TABLE_LEN 12
|
||||||
|
@ -214,6 +215,7 @@ void sdbUpdateMnodeRoles() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mnodeUpdateClusterId();
|
||||||
mnodeUpdateMnodeEpSet();
|
mnodeUpdateMnodeEpSet();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -103,6 +103,8 @@ static char *mnodeGetShowType(int32_t showType) {
|
||||||
case TSDB_MGMT_TABLE_SCORES: return "show scores";
|
case TSDB_MGMT_TABLE_SCORES: return "show scores";
|
||||||
case TSDB_MGMT_TABLE_GRANTS: return "show grants";
|
case TSDB_MGMT_TABLE_GRANTS: return "show grants";
|
||||||
case TSDB_MGMT_TABLE_VNODES: return "show vnodes";
|
case TSDB_MGMT_TABLE_VNODES: return "show vnodes";
|
||||||
|
case TSDB_MGMT_TABLE_CLUSTER: return "show clusters";
|
||||||
|
case TSDB_MGMT_TABLE_STREAMTABLES : return "show streamtables";
|
||||||
default: return "undefined";
|
default: return "undefined";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1223,6 +1223,55 @@ static int32_t mnodeDropSuperTableColumn(SMnodeMsg *pMsg, char *colName) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
|
||||||
|
SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable;
|
||||||
|
mLInfo("app:%p:%p, stable %s, change column result:%s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId,
|
||||||
|
tstrerror(code));
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) {
|
||||||
|
SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable;
|
||||||
|
int32_t col = mnodeFindSuperTableColumnIndex(pStable, oldName);
|
||||||
|
if (col < 0) {
|
||||||
|
mError("app:%p:%p, stable:%s, change column, oldName: %s, newName: %s", pMsg->rpcMsg.ahandle, pMsg,
|
||||||
|
pStable->info.tableId, oldName, newName);
|
||||||
|
return TSDB_CODE_MND_FIELD_NOT_EXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
// int32_t rowSize = 0;
|
||||||
|
uint32_t len = strlen(newName);
|
||||||
|
if (len >= TSDB_COL_NAME_LEN) {
|
||||||
|
return TSDB_CODE_MND_COL_NAME_TOO_LONG;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mnodeFindSuperTableColumnIndex(pStable, newName) >= 0) {
|
||||||
|
return TSDB_CODE_MND_FIELD_ALREAY_EXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
// update
|
||||||
|
SSchema *schema = (SSchema *) (pStable->schema + col);
|
||||||
|
tstrncpy(schema->name, newName, sizeof(schema->name));
|
||||||
|
|
||||||
|
mInfo("app:%p:%p, stable %s, start to modify column %s to %s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId,
|
||||||
|
oldName, newName);
|
||||||
|
|
||||||
|
SSdbOper oper = {
|
||||||
|
.type = SDB_OPER_GLOBAL,
|
||||||
|
.table = tsSuperTableSdb,
|
||||||
|
.pObj = pStable,
|
||||||
|
.pMsg = pMsg,
|
||||||
|
.cb = mnodeChangeSuperTableColumnCb
|
||||||
|
};
|
||||||
|
|
||||||
|
int32_t code = sdbUpdateRow(&oper);
|
||||||
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
|
code = TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
// show super tables
|
// show super tables
|
||||||
static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
||||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||||
|
@ -1405,6 +1454,9 @@ static int32_t mnodeSetSchemaFromSuperTable(SSchema *pSchema, SSuperTableObj *pT
|
||||||
static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) {
|
static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) {
|
||||||
SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable;
|
SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable;
|
||||||
STableMetaMsg *pMeta = rpcMallocCont(sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16));
|
STableMetaMsg *pMeta = rpcMallocCont(sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16));
|
||||||
|
if (pMeta == NULL) {
|
||||||
|
return TSDB_CODE_MND_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
pMeta->uid = htobe64(pTable->uid);
|
pMeta->uid = htobe64(pTable->uid);
|
||||||
pMeta->sversion = htons(pTable->sversion);
|
pMeta->sversion = htons(pTable->sversion);
|
||||||
pMeta->tversion = htons(pTable->tversion);
|
pMeta->tversion = htons(pTable->tversion);
|
||||||
|
@ -1977,6 +2029,48 @@ static int32_t mnodeDropNormalTableColumn(SMnodeMsg *pMsg, char *colName) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t mnodeChangeNormalTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) {
|
||||||
|
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
|
||||||
|
int32_t col = mnodeFindNormalTableColumnIndex(pTable, oldName);
|
||||||
|
if (col < 0) {
|
||||||
|
mError("app:%p:%p, ctable:%s, change column, oldName: %s, newName: %s", pMsg->rpcMsg.ahandle, pMsg,
|
||||||
|
pTable->info.tableId, oldName, newName);
|
||||||
|
return TSDB_CODE_MND_FIELD_NOT_EXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
// int32_t rowSize = 0;
|
||||||
|
uint32_t len = strlen(newName);
|
||||||
|
if (len >= TSDB_COL_NAME_LEN) {
|
||||||
|
return TSDB_CODE_MND_COL_NAME_TOO_LONG;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mnodeFindNormalTableColumnIndex(pTable, newName) >= 0) {
|
||||||
|
return TSDB_CODE_MND_FIELD_ALREAY_EXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
// update
|
||||||
|
SSchema *schema = (SSchema *) (pTable->schema + col);
|
||||||
|
tstrncpy(schema->name, newName, sizeof(schema->name));
|
||||||
|
|
||||||
|
mInfo("app:%p:%p, ctable %s, start to modify column %s to %s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
|
||||||
|
oldName, newName);
|
||||||
|
|
||||||
|
SSdbOper oper = {
|
||||||
|
.type = SDB_OPER_GLOBAL,
|
||||||
|
.table = tsChildTableSdb,
|
||||||
|
.pObj = pTable,
|
||||||
|
.pMsg = pMsg,
|
||||||
|
.cb = mnodeAlterNormalTableColumnCb
|
||||||
|
};
|
||||||
|
|
||||||
|
int32_t code = sdbUpdateRow(&oper);
|
||||||
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
|
code = TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t mnodeSetSchemaFromNormalTable(SSchema *pSchema, SChildTableObj *pTable) {
|
static int32_t mnodeSetSchemaFromNormalTable(SSchema *pSchema, SChildTableObj *pTable) {
|
||||||
int32_t numOfCols = pTable->numOfColumns;
|
int32_t numOfCols = pTable->numOfColumns;
|
||||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||||
|
@ -2596,6 +2690,8 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
||||||
code = mnodeAddSuperTableColumn(pMsg, pAlter->schema, 1);
|
code = mnodeAddSuperTableColumn(pMsg, pAlter->schema, 1);
|
||||||
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
|
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
|
||||||
code = mnodeDropSuperTableColumn(pMsg, pAlter->schema[0].name);
|
code = mnodeDropSuperTableColumn(pMsg, pAlter->schema[0].name);
|
||||||
|
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
|
||||||
|
code = mnodeChangeSuperTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
|
||||||
} else {
|
} else {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -2606,6 +2702,8 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
||||||
code = mnodeAddNormalTableColumn(pMsg, pAlter->schema, 1);
|
code = mnodeAddNormalTableColumn(pMsg, pAlter->schema, 1);
|
||||||
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
|
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
|
||||||
code = mnodeDropNormalTableColumn(pMsg, pAlter->schema[0].name);
|
code = mnodeDropNormalTableColumn(pMsg, pAlter->schema[0].name);
|
||||||
|
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
|
||||||
|
code = mnodeChangeNormalTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
|
||||||
} else {
|
} else {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,7 @@ typedef struct SSqlGroupbyExpr {
|
||||||
} SSqlGroupbyExpr;
|
} SSqlGroupbyExpr;
|
||||||
|
|
||||||
typedef struct SPosInfo {
|
typedef struct SPosInfo {
|
||||||
int16_t pageId;
|
int32_t pageId;
|
||||||
int16_t rowId;
|
int16_t rowId;
|
||||||
} SPosInfo;
|
} SPosInfo;
|
||||||
|
|
||||||
|
|
|
@ -5913,8 +5913,10 @@ _cleanup_qinfo:
|
||||||
tsdbDestroyTableGroup(pTableGroupInfo);
|
tsdbDestroyTableGroup(pTableGroupInfo);
|
||||||
|
|
||||||
_cleanup_query:
|
_cleanup_query:
|
||||||
|
if (pGroupbyExpr != NULL) {
|
||||||
taosArrayDestroy(pGroupbyExpr->columnInfo);
|
taosArrayDestroy(pGroupbyExpr->columnInfo);
|
||||||
tfree(pGroupbyExpr);
|
free(pGroupbyExpr);
|
||||||
|
}
|
||||||
tfree(pTagCols);
|
tfree(pTagCols);
|
||||||
for (int32_t i = 0; i < numOfOutput; ++i) {
|
for (int32_t i = 0; i < numOfOutput; ++i) {
|
||||||
SExprInfo* pExprInfo = &pExprs[i];
|
SExprInfo* pExprInfo = &pExprs[i];
|
||||||
|
|
|
@ -52,7 +52,7 @@ int32_t getResBufSize(SDiskbasedResultBuf* pResultBuf) { return pResultBuf->tota
|
||||||
#define FILE_SIZE_ON_DISK(_r) (NUM_OF_PAGES_ON_DISK(_r) * (_r)->pageSize)
|
#define FILE_SIZE_ON_DISK(_r) (NUM_OF_PAGES_ON_DISK(_r) * (_r)->pageSize)
|
||||||
|
|
||||||
static int32_t createDiskResidesBuf(SDiskbasedResultBuf* pResultBuf) {
|
static int32_t createDiskResidesBuf(SDiskbasedResultBuf* pResultBuf) {
|
||||||
pResultBuf->fd = open(pResultBuf->path, O_CREAT | O_RDWR, 0666);
|
pResultBuf->fd = open(pResultBuf->path, O_CREAT | O_RDWR | O_TRUNC, 0666);
|
||||||
if (!FD_VALID(pResultBuf->fd)) {
|
if (!FD_VALID(pResultBuf->fd)) {
|
||||||
qError("failed to create tmp file: %s on disk. %s", pResultBuf->path, strerror(errno));
|
qError("failed to create tmp file: %s on disk. %s", pResultBuf->path, strerror(errno));
|
||||||
return TAOS_SYSTEM_ERROR(errno);
|
return TAOS_SYSTEM_ERROR(errno);
|
||||||
|
|
|
@ -41,6 +41,9 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
|
||||||
pWindowResInfo->type = type;
|
pWindowResInfo->type = type;
|
||||||
_hash_fn_t fn = taosGetDefaultHashFunction(type);
|
_hash_fn_t fn = taosGetDefaultHashFunction(type);
|
||||||
pWindowResInfo->hashList = taosHashInit(threshold, fn, false);
|
pWindowResInfo->hashList = taosHashInit(threshold, fn, false);
|
||||||
|
if (pWindowResInfo->hashList == NULL) {
|
||||||
|
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
pWindowResInfo->curIndex = -1;
|
pWindowResInfo->curIndex = -1;
|
||||||
pWindowResInfo->size = 0;
|
pWindowResInfo->size = 0;
|
||||||
|
|
|
@ -660,7 +660,7 @@ static SRpcConn *rpcAllocateClientConn(SRpcInfo *pRpc) {
|
||||||
pConn->spi = pRpc->spi;
|
pConn->spi = pRpc->spi;
|
||||||
pConn->encrypt = pRpc->encrypt;
|
pConn->encrypt = pRpc->encrypt;
|
||||||
if (pConn->spi) memcpy(pConn->secret, pRpc->secret, TSDB_KEY_LEN);
|
if (pConn->spi) memcpy(pConn->secret, pRpc->secret, TSDB_KEY_LEN);
|
||||||
tDebug("%s %p client connection is allocated", pRpc->label, pConn);
|
tDebug("%s %p client connection is allocated, uid:0x%x", pRpc->label, pConn, pConn->linkUid);
|
||||||
}
|
}
|
||||||
|
|
||||||
return pConn;
|
return pConn;
|
||||||
|
@ -721,7 +721,7 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES);
|
taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES);
|
||||||
tDebug("%s %p server connection is allocated", pRpc->label, pConn);
|
tDebug("%s %p server connection is allocated, uid:0x%x", pRpc->label, pConn, pConn->linkUid);
|
||||||
}
|
}
|
||||||
|
|
||||||
return pConn;
|
return pConn;
|
||||||
|
@ -848,6 +848,16 @@ static int rpcProcessRspHead(SRpcConn *pConn, SRpcHead *pHead) {
|
||||||
return TSDB_CODE_RPC_ALREADY_PROCESSED;
|
return TSDB_CODE_RPC_ALREADY_PROCESSED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pHead->code == TSDB_CODE_RPC_MISMATCHED_LINK_ID) {
|
||||||
|
tDebug("%s, mismatched linkUid, link shall be restarted", pConn->info);
|
||||||
|
pConn->secured = 0;
|
||||||
|
((SRpcHead *)pConn->pReqMsg)->destId = 0;
|
||||||
|
rpcSendMsgToPeer(pConn, pConn->pReqMsg, pConn->reqMsgLen);
|
||||||
|
if (pConn->connType != RPC_CONN_TCPC)
|
||||||
|
pConn->pTimer = taosTmrStart(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl);
|
||||||
|
return TSDB_CODE_RPC_ALREADY_PROCESSED;
|
||||||
|
}
|
||||||
|
|
||||||
if (pHead->code == TSDB_CODE_RPC_ACTION_IN_PROGRESS) {
|
if (pHead->code == TSDB_CODE_RPC_ACTION_IN_PROGRESS) {
|
||||||
if (pConn->tretry <= tsRpcMaxRetry) {
|
if (pConn->tretry <= tsRpcMaxRetry) {
|
||||||
tDebug("%s, peer is still processing the transaction, retry:%d", pConn->info, pConn->tretry);
|
tDebug("%s, peer is still processing the transaction, retry:%d", pConn->info, pConn->tretry);
|
||||||
|
|
|
@ -1218,7 +1218,9 @@ static void *tsdbInsertTableAct(STsdbRepo *pRepo, int8_t act, void *buf, STable
|
||||||
static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable) {
|
static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable) {
|
||||||
int tlen = tsdbGetTableEncodeSize(TSDB_DROP_META, pTable);
|
int tlen = tsdbGetTableEncodeSize(TSDB_DROP_META, pTable);
|
||||||
void *buf = tsdbAllocBytes(pRepo, tlen);
|
void *buf = tsdbAllocBytes(pRepo, tlen);
|
||||||
ASSERT(buf != NULL);
|
if (buf == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
void *pBuf = buf;
|
void *pBuf = buf;
|
||||||
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
|
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
|
||||||
|
|
|
@ -147,6 +147,7 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) {
|
||||||
if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) goto _err;
|
if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) goto _err;
|
||||||
pFile->info.size = TSDB_FILE_HEAD_SIZE;
|
pFile->info.size = TSDB_FILE_HEAD_SIZE;
|
||||||
pFile->info.magic = TSDB_FILE_INIT_MAGIC;
|
pFile->info.magic = TSDB_FILE_INIT_MAGIC;
|
||||||
|
pFile->info.len = 0;
|
||||||
if (tsdbUpdateFileHeader(pFile, 0) < 0) return -1;
|
if (tsdbUpdateFileHeader(pFile, 0) < 0) return -1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -302,6 +303,10 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) {
|
||||||
memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx));
|
memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (helperType(pHelper) == TSDB_WRITE_HELPER && pHelper->curCompIdx.hasLast) {
|
||||||
|
pHelper->hasOldLastBlock = true;
|
||||||
|
}
|
||||||
|
|
||||||
helperSetState(pHelper, TSDB_HELPER_TABLE_SET);
|
helperSetState(pHelper, TSDB_HELPER_TABLE_SET);
|
||||||
ASSERT(pHelper->state == ((TSDB_HELPER_TABLE_SET << 1) - 1));
|
ASSERT(pHelper->state == ((TSDB_HELPER_TABLE_SET << 1) - 1));
|
||||||
}
|
}
|
||||||
|
@ -555,10 +560,6 @@ int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) {
|
||||||
}
|
}
|
||||||
helperSetState(pHelper, TSDB_HELPER_IDX_LOAD);
|
helperSetState(pHelper, TSDB_HELPER_IDX_LOAD);
|
||||||
|
|
||||||
if (helperType(pHelper) == TSDB_WRITE_HELPER) {
|
|
||||||
pFile->info.len = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy the memory for outside usage
|
// Copy the memory for outside usage
|
||||||
if (target && pHelper->idxH.numOfIdx > 0)
|
if (target && pHelper->idxH.numOfIdx > 0)
|
||||||
memcpy(target, pHelper->idxH.pIdxArray, sizeof(SCompIdx) * pHelper->idxH.numOfIdx);
|
memcpy(target, pHelper->idxH.pIdxArray, sizeof(SCompIdx) * pHelper->idxH.numOfIdx);
|
||||||
|
@ -1259,13 +1260,21 @@ static int tsdbLoadBlockDataColsImpl(SRWHelper *pHelper, SCompBlock *pCompBlock,
|
||||||
SCompCol *pCompCol = NULL;
|
SCompCol *pCompCol = NULL;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
ASSERT(dcol < pDataCols->numOfCols);
|
if (dcol >= pDataCols->numOfCols) {
|
||||||
|
pDataCol = NULL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
pDataCol = &pDataCols->cols[dcol];
|
pDataCol = &pDataCols->cols[dcol];
|
||||||
ASSERT(pDataCol->colId <= colId);
|
if (pDataCol->colId > colId) {
|
||||||
if (pDataCol->colId == colId) break;
|
pDataCol = NULL;
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
dcol++;
|
dcol++;
|
||||||
|
if (pDataCol->colId == colId) break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pDataCol == NULL) continue;
|
||||||
ASSERT(pDataCol->colId == colId);
|
ASSERT(pDataCol->colId == colId);
|
||||||
|
|
||||||
if (colId == 0) { // load the key row
|
if (colId == 0) { // load the key row
|
||||||
|
@ -1275,15 +1284,24 @@ static int tsdbLoadBlockDataColsImpl(SRWHelper *pHelper, SCompBlock *pCompBlock,
|
||||||
compCol.offset = TSDB_KEY_COL_OFFSET;
|
compCol.offset = TSDB_KEY_COL_OFFSET;
|
||||||
pCompCol = &compCol;
|
pCompCol = &compCol;
|
||||||
} else { // load non-key rows
|
} else { // load non-key rows
|
||||||
while (ccol < pCompBlock->numOfCols) {
|
while (true) {
|
||||||
pCompCol = &pHelper->pCompData->cols[ccol];
|
if (ccol >= pCompBlock->numOfCols) {
|
||||||
if (pCompCol->colId >= colId) break;
|
pCompCol = NULL;
|
||||||
ccol++;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ccol >= pCompBlock->numOfCols || pCompCol->colId > colId) {
|
pCompCol = &(pHelper->pCompData->cols[ccol]);
|
||||||
|
if (pCompCol->colId > colId) {
|
||||||
|
pCompCol = NULL;
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
ccol++;
|
||||||
|
if (pCompCol->colId == colId) break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pCompCol == NULL) {
|
||||||
dataColSetNEleNull(pDataCol, pCompBlock->numOfRows, pDataCols->maxPoints);
|
dataColSetNEleNull(pDataCol, pCompBlock->numOfRows, pDataCols->maxPoints);
|
||||||
dcol++;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1291,8 +1309,6 @@ static int tsdbLoadBlockDataColsImpl(SRWHelper *pHelper, SCompBlock *pCompBlock,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tsdbLoadColData(pHelper, pFile, pCompBlock, pCompCol, pDataCol) < 0) goto _err;
|
if (tsdbLoadColData(pHelper, pFile, pCompBlock, pCompCol, pDataCol) < 0) goto _err;
|
||||||
dcol++;
|
|
||||||
if (colId != 0) ccol++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1517,8 +1533,8 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
|
||||||
if (rows2 == 0) { // all data filtered out
|
if (rows2 == 0) { // all data filtered out
|
||||||
*(pCommitIter->pIter) = slIter;
|
*(pCommitIter->pIter) = slIter;
|
||||||
} else {
|
} else {
|
||||||
if (rows1 + rows2 < pCfg->minRowsPerFileBlock && pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS &&
|
if (pCompBlock->numOfRows + rows2 < pCfg->minRowsPerFileBlock &&
|
||||||
!TSDB_NLAST_FILE_OPENED(pHelper)) {
|
pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && !TSDB_NLAST_FILE_OPENED(pHelper)) {
|
||||||
tdResetDataCols(pDataCols);
|
tdResetDataCols(pDataCols);
|
||||||
int rowsRead = tsdbLoadDataFromCache(pTable, pCommitIter->pIter, maxKey, rows1, pDataCols,
|
int rowsRead = tsdbLoadDataFromCache(pTable, pCommitIter->pIter, maxKey, rows1, pDataCols,
|
||||||
pDataCols0->cols[0].pData, pDataCols0->numOfRows);
|
pDataCols0->cols[0].pData, pDataCols0->numOfRows);
|
||||||
|
|
|
@ -175,6 +175,9 @@ static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS
|
||||||
|
|
||||||
TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) {
|
TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) {
|
||||||
STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
|
STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
|
||||||
|
if (pQueryHandle == NULL) {
|
||||||
|
goto out_of_memory;
|
||||||
|
}
|
||||||
pQueryHandle->order = pCond->order;
|
pQueryHandle->order = pCond->order;
|
||||||
pQueryHandle->window = pCond->twindow;
|
pQueryHandle->window = pCond->twindow;
|
||||||
pQueryHandle->pTsdb = tsdb;
|
pQueryHandle->pTsdb = tsdb;
|
||||||
|
@ -260,8 +263,8 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
|
||||||
return (TsdbQueryHandleT) pQueryHandle;
|
return (TsdbQueryHandleT) pQueryHandle;
|
||||||
|
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
|
||||||
tsdbCleanupQueryHandle(pQueryHandle);
|
tsdbCleanupQueryHandle(pQueryHandle);
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,196 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
today=`date +"%Y%m%d"`
|
||||||
|
TDENGINE_DIR=/home/shuduo/work/taosdata/TDengine.cover
|
||||||
|
TDENGINE_COVERAGE_REPORT=$TDENGINE_DIR/tests/coverage-report-$today.log
|
||||||
|
|
||||||
|
# Color setting
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[1;32m'
|
||||||
|
GREEN_DARK='\033[0;32m'
|
||||||
|
GREEN_UNDERLINE='\033[4;32m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
function buildTDengine {
|
||||||
|
echo "check if TDengine need build"
|
||||||
|
cd $TDENGINE_DIR
|
||||||
|
git remote prune origin > /dev/null
|
||||||
|
git remote update > /dev/null
|
||||||
|
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||||
|
LOCAL_COMMIT=`git rev-parse --short @`
|
||||||
|
echo " LOCAL: $LOCAL_COMMIT"
|
||||||
|
echo "REMOTE: $REMOTE_COMMIT"
|
||||||
|
|
||||||
|
# reset counter
|
||||||
|
lcov -d . --zerocounters
|
||||||
|
|
||||||
|
cd $TDENGINE_DIR/debug
|
||||||
|
|
||||||
|
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
|
||||||
|
echo "repo up-to-date"
|
||||||
|
else
|
||||||
|
echo "repo need to pull"
|
||||||
|
git reset --hard
|
||||||
|
git pull
|
||||||
|
|
||||||
|
LOCAL_COMMIT=`git rev-parse --short @`
|
||||||
|
|
||||||
|
rm -rf *
|
||||||
|
cmake -DCOVER=true -DRANDOM_FILE_FAIL=true .. > /dev/null
|
||||||
|
make > /dev/null
|
||||||
|
fi
|
||||||
|
|
||||||
|
make install > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
function runGeneralCaseOneByOne {
|
||||||
|
while read -r line; do
|
||||||
|
if [[ $line =~ ^./test.sh* ]]; then
|
||||||
|
general_case=`echo $line | grep -w general`
|
||||||
|
|
||||||
|
if [ -n "$general_case" ]; then
|
||||||
|
case=`echo $line |grep general| awk '{print $NF}'`
|
||||||
|
./test.sh -f $case > /dev/null 2>&1 && \
|
||||||
|
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \
|
||||||
|
echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done < $1
|
||||||
|
}
|
||||||
|
|
||||||
|
function runTest {
|
||||||
|
echo "run Test"
|
||||||
|
|
||||||
|
cd $TDENGINE_DIR/tests/script
|
||||||
|
|
||||||
|
[ -d ../../sim ] && rm -rf ../../sim
|
||||||
|
[ -f $TDENGINE_COVERAGE_REPORT ] && rm $TDENGINE_COVERAGE_REPORT
|
||||||
|
|
||||||
|
runGeneralCaseOneByOne jenkins/basic.txt
|
||||||
|
|
||||||
|
totalSuccess=`grep 'success' $TDENGINE_COVERAGE_REPORT | wc -l`
|
||||||
|
|
||||||
|
if [ "$totalSuccess" -gt "0" ]; then
|
||||||
|
echo -e "\n${GREEN} ### Total $totalSuccess coverage test case(s) succeed! ### ${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
fi
|
||||||
|
|
||||||
|
totalFailed=`grep 'failed\|fault' $TDENGINE_COVERAGE_REPORT | wc -l`
|
||||||
|
if [ "$totalFailed" -ne "0" ]; then
|
||||||
|
echo -e "${RED} ### Total $totalFailed coverage test case(s) failed! ### ${NC}\n" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
# exit $totalPyFailed
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd $TDENGINE_DIR/tests
|
||||||
|
rm -rf ../sim
|
||||||
|
./test-all.sh full python | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
|
||||||
|
# Test Connector
|
||||||
|
stopTaosd
|
||||||
|
$TDENGINE_DIR/debug/build/bin/taosd -c $TDENGINE_DIR/debug/test/cfg > /dev/null &
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
cd $TDENGINE_DIR/src/connector/jdbc
|
||||||
|
mvn clean package
|
||||||
|
mvn test | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
|
||||||
|
# Test C Demo
|
||||||
|
stopTaosd
|
||||||
|
$TDENGINE_DIR/debug/build/bin/taosd -c $TDENGINE_DIR/debug/test/cfg > /dev/null &
|
||||||
|
sleep 10
|
||||||
|
yes | $TDENGINE_DIR/debug/build/bin/demo 127.0.0.1 | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
|
||||||
|
# Test waltest
|
||||||
|
dataDir=`grep dataDir $TDENGINE_DIR/debug/test/cfg/taos.cfg|awk '{print $2}'`
|
||||||
|
walDir=`find $dataDir -name "wal"|head -n1`
|
||||||
|
echo "dataDir: $dataDir" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
echo "walDir: $walDir" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
if [ -n "$walDir" ]; then
|
||||||
|
yes | $TDENGINE_DIR/debug/build/bin/waltest -p $walDir | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
fi
|
||||||
|
|
||||||
|
# run Unit Test
|
||||||
|
echo "Run Unit Test: utilTest, queryTest and cliTest"
|
||||||
|
$TDENGINE_DIR/debug/build/bin/utilTest > /dev/null && echo "utilTest pass!" || echo "utilTest failed!"
|
||||||
|
$TDENGINE_DIR/debug/build/bin/queryTest > /dev/null && echo "queryTest pass!" || echo "queryTest failed!"
|
||||||
|
$TDENGINE_DIR/debug/build/bin/cliTest > /dev/null && echo "cliTest pass!" || echo "cliTest failed!"
|
||||||
|
|
||||||
|
stopTaosd
|
||||||
|
}
|
||||||
|
|
||||||
|
function lcovFunc {
|
||||||
|
echo "collect data by lcov"
|
||||||
|
cd $TDENGINE_DIR
|
||||||
|
|
||||||
|
# collect data
|
||||||
|
lcov -d . --capture --rc lcov_branch_coverage=1 --rc genhtml_branch_coverage=1 --no-external -b $TDENGINE_DIR -o coverage.info
|
||||||
|
|
||||||
|
# remove exclude paths
|
||||||
|
lcov --remove coverage.info \
|
||||||
|
'*/tests/*' '*/test/*' '*/deps/*' '*/plugins/*' '*/taosdef.h' \
|
||||||
|
--rc lcov_branch_coverage=1 -o coverage.info
|
||||||
|
|
||||||
|
# generate result
|
||||||
|
lcov -l --rc lcov_branch_coverage=1 coverage.info | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
|
||||||
|
# push result to coveralls.io
|
||||||
|
coveralls-lcov coverage.info | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
}
|
||||||
|
|
||||||
|
function sendReport {
|
||||||
|
echo "send report"
|
||||||
|
receiver="sdsang@taosdata.com, sangshuduo@gmail.com, pxiao@taosdata.com"
|
||||||
|
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
|
||||||
|
|
||||||
|
cd $TDENGINE_DIR
|
||||||
|
|
||||||
|
sed -i 's/\x1b\[[0-9;]*m//g' $TDENGINE_COVERAGE_REPORT
|
||||||
|
BODY_CONTENT=`cat $TDENGINE_COVERAGE_REPORT`
|
||||||
|
echo -e "to: ${receiver}\nsubject: Coverage test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
|
||||||
|
(cat - && uuencode $TDENGINE_COVERAGE_REPORT coverage-report-$today.log) | \
|
||||||
|
ssmtp "${receiver}" && echo "Report Sent!"
|
||||||
|
}
|
||||||
|
|
||||||
|
function stopTaosd {
|
||||||
|
echo "Stop taosd"
|
||||||
|
systemctl stop taosd
|
||||||
|
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||||
|
while [ -n "$PID" ]
|
||||||
|
do
|
||||||
|
pkill -TERM -x taosd
|
||||||
|
sleep 1
|
||||||
|
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
function runTestRandomFail {
|
||||||
|
exec_random_fail_sh=$1
|
||||||
|
default_exec_sh=$TDENGINE_DIR/tests/script/sh/exec.sh
|
||||||
|
[ -f $exec_random_fail_sh ] && cp $exec_random_fail_sh $default_exec_sh || exit 1
|
||||||
|
|
||||||
|
dnodes_random_fail_py=$TDENGINE_DIR/tests/pytest/util/dnodes-no-random-fail.py
|
||||||
|
default_dnodes_py=$TDENGINE_DIR/tests/pytest/util/dnodes.py
|
||||||
|
[ -f $dnodes_random_fail_py ] && cp $dnodes_random_fail_py $default_dnodes_py || exit 1
|
||||||
|
|
||||||
|
runTest NoRandomFail
|
||||||
|
}
|
||||||
|
|
||||||
|
WORK_DIR=/home/shuduo/work/taosdata
|
||||||
|
|
||||||
|
date >> $WORK_DIR/cron.log
|
||||||
|
echo "Run Coverage Test" | tee -a $WORK_DIR/cron.log
|
||||||
|
|
||||||
|
rm /tmp/core-*
|
||||||
|
|
||||||
|
stopTaosd
|
||||||
|
buildTDengine
|
||||||
|
|
||||||
|
runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-random-fail.sh
|
||||||
|
runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-default.sh
|
||||||
|
runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-no-random-fail.sh
|
||||||
|
|
||||||
|
lcovFunc
|
||||||
|
sendReport
|
||||||
|
stopTaosd
|
||||||
|
|
||||||
|
date >> $WORK_DIR/cron.log
|
||||||
|
echo "End of Coverage Test" | tee -a $WORK_DIR/cron.log
|
|
@ -0,0 +1,191 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
today=`date +"%Y%m%d"`
|
||||||
|
TDENGINE_DIR=/home/shuduo/work/taosdata/TDengine.orig
|
||||||
|
TDENGINE_FULLTEST_REPORT=$TDENGINE_DIR/tests/full-report-$today.log
|
||||||
|
|
||||||
|
# Color setting
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[1;32m'
|
||||||
|
GREEN_DARK='\033[0;32m'
|
||||||
|
GREEN_UNDERLINE='\033[4;32m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
function buildTDengine {
|
||||||
|
echo "check if TDengine need build"
|
||||||
|
|
||||||
|
need_rebuild=false
|
||||||
|
|
||||||
|
if [ ! -d $TDENGINE_DIR ]; then
|
||||||
|
echo "No TDengine source code found!"
|
||||||
|
git clone https://github.com/taosdata/TDengine $TDENGINE_DIR
|
||||||
|
need_rebuild=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd $TDENGINE_DIR
|
||||||
|
git remote prune origin > /dev/null
|
||||||
|
git remote update > /dev/null
|
||||||
|
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||||
|
LOCAL_COMMIT=`git rev-parse --short @`
|
||||||
|
echo " LOCAL: $LOCAL_COMMIT"
|
||||||
|
echo "REMOTE: $REMOTE_COMMIT"
|
||||||
|
|
||||||
|
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
|
||||||
|
echo "repo up-to-date"
|
||||||
|
else
|
||||||
|
echo "repo need to pull"
|
||||||
|
git pull
|
||||||
|
need_rebuild=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
[ -d $TDENGINE_DIR/debug ] || mkdir $TDENGINE_DIR/debug
|
||||||
|
cd $TDENGINE_DIR/debug
|
||||||
|
[ -f $TDENGINE_DIR/debug/build/bin/taosd ] || need_rebuild=true
|
||||||
|
|
||||||
|
if $need_rebuild ; then
|
||||||
|
echo "rebuild.."
|
||||||
|
|
||||||
|
LOCAL_COMMIT=`git rev-parse --short @`
|
||||||
|
rm -rf *
|
||||||
|
cmake .. > /dev/null
|
||||||
|
make > /dev/null
|
||||||
|
fi
|
||||||
|
|
||||||
|
make install > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
function runGeneralCaseOneByOne {
|
||||||
|
while read -r line; do
|
||||||
|
if [[ $line =~ ^./test.sh* ]]; then
|
||||||
|
general_case=`echo $line | grep -w general`
|
||||||
|
|
||||||
|
if [ -n "$general_case" ]; then
|
||||||
|
case=`echo $line | awk '{print $NF}'`
|
||||||
|
|
||||||
|
start_time=`date +%s`
|
||||||
|
./test.sh -f $case > /dev/null 2>&1 && ret=0 || ret = 1
|
||||||
|
end_time=`date +%s`
|
||||||
|
|
||||||
|
if [[ ret -eq 0 ]]; then
|
||||||
|
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_FULLTEST_REPORT
|
||||||
|
else
|
||||||
|
casename=`echo $case|sed 's/\//\-/g'`
|
||||||
|
find $TDENGINE_DIR/sim -name "*log" -exec tar czf $TDENGINE_DIR/fulltest-$today-$casename.log.tar.gz {} +
|
||||||
|
echo -e "${RED}$case failed and log saved${NC}" | tee -a $TDENGINE_FULLTEST_REPORT
|
||||||
|
fi
|
||||||
|
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_FULLTEST_REPORT
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done < $1
|
||||||
|
}
|
||||||
|
|
||||||
|
function runPyCaseOneByOne {
|
||||||
|
while read -r line; do
|
||||||
|
if [[ $line =~ ^python.* ]]; then
|
||||||
|
if [[ $line != *sleep* ]]; then
|
||||||
|
case=`echo $line|awk '{print $NF}'`
|
||||||
|
start_time=`date +%s`
|
||||||
|
$line > /dev/null 2>&1 && ret=0 || ret=1
|
||||||
|
end_time=`date +%s`
|
||||||
|
|
||||||
|
if [[ ret -eq 0 ]]; then
|
||||||
|
echo -e "${GREEN}$case success${NC}" | tee -a pytest-out.log
|
||||||
|
else
|
||||||
|
casename=`echo $case|sed 's/\//\-/g'`
|
||||||
|
find $TDENGINE_DIR/sim -name "*log" -exec tar czf $TDENGINE_DIR/fulltest-$today-$casename.log.tar.gz {} +
|
||||||
|
echo -e "${RED}$case failed and log saved${NC}" | tee -a pytest-out.log
|
||||||
|
fi
|
||||||
|
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log
|
||||||
|
else
|
||||||
|
$line > /dev/null 2>&1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done < $1
|
||||||
|
}
|
||||||
|
|
||||||
|
function runTest {
|
||||||
|
echo "Run Test"
|
||||||
|
cd $TDENGINE_DIR/tests/script
|
||||||
|
|
||||||
|
[ -d $TDENGINE_DIR/sim ] && rm -rf $TDENGINE_DIR/sim
|
||||||
|
[ -f $TDENGINE_FULLTEST_REPORT ] && rm $TDENGINE_FULLTEST_REPORT
|
||||||
|
|
||||||
|
runGeneralCaseOneByOne jenkins/basic.txt
|
||||||
|
|
||||||
|
totalSuccess=`grep 'success' $TDENGINE_FULLTEST_REPORT | wc -l`
|
||||||
|
|
||||||
|
if [ "$totalSuccess" -gt "0" ]; then
|
||||||
|
echo -e "\n${GREEN} ### Total $totalSuccess SIM case(s) succeed! ### ${NC}" \
|
||||||
|
| tee -a $TDENGINE_FULLTEST_REPORT
|
||||||
|
fi
|
||||||
|
|
||||||
|
totalFailed=`grep 'failed\|fault' $TDENGINE_FULLTEST_REPORT | wc -l`
|
||||||
|
if [ "$totalFailed" -ne "0" ]; then
|
||||||
|
echo -e "${RED} ### Total $totalFailed SIM case(s) failed! ### ${NC}\n" \
|
||||||
|
| tee -a $TDENGINE_FULLTEST_REPORT
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd $TDENGINE_DIR/tests/pytest
|
||||||
|
[ -d $TDENGINE_DIR/sim ] && rm -rf $TDENGINE_DIR/sim
|
||||||
|
[ -f pytest-out.log ] && rm -f pytest-out.log
|
||||||
|
runPyCaseOneByOne fulltest.sh
|
||||||
|
|
||||||
|
totalPySuccess=`grep 'success' pytest-out.log | wc -l`
|
||||||
|
totalPyFailed=`grep 'failed\|fault' pytest-out.log | wc -l`
|
||||||
|
|
||||||
|
cat pytest-out.log >> $TDENGINE_FULLTEST_REPORT
|
||||||
|
if [ "$totalPySuccess" -gt "0" ]; then
|
||||||
|
echo -e "\n${GREEN} ### Total $totalPySuccess python case(s) succeed! ### ${NC}" \
|
||||||
|
| tee -a $TDENGINE_FULLTEST_REPORT
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$totalPyFailed" -ne "0" ]; then
|
||||||
|
echo -e "\n${RED} ### Total $totalPyFailed python case(s) failed! ### ${NC}" \
|
||||||
|
| tee -a $TDENGINE_FULLTEST_REPORT
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function sendReport {
|
||||||
|
echo "Send Report"
|
||||||
|
receiver="sdsang@taosdata.com, sangshuduo@gmail.com, pxiao@taosdata.com"
|
||||||
|
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
|
||||||
|
|
||||||
|
cd $TDENGINE_DIR/tests
|
||||||
|
|
||||||
|
sed -i 's/\x1b\[[0-9;]*m//g' $TDENGINE_FULLTEST_REPORT
|
||||||
|
BODY_CONTENT=`cat $TDENGINE_FULLTEST_REPORT`
|
||||||
|
|
||||||
|
cd $TDENGINE_DIR
|
||||||
|
tar czf fulltest-$today.tar.gz fulltest-$today-*.log.tar.gz
|
||||||
|
|
||||||
|
echo -e "to: ${receiver}\nsubject: Full test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
|
||||||
|
(cat - && uuencode $TDENGINE_FULLTEST_REPORT fulltest-report-$today.log) | \
|
||||||
|
(cat - && uuencode $TDENGINE_DIR/fulltest-$today.tar.gz fulltest-$today.tar.gz) | \
|
||||||
|
ssmtp "${receiver}" && echo "Report Sent!"
|
||||||
|
}
|
||||||
|
|
||||||
|
function stopTaosd {
|
||||||
|
echo "Stop taosd"
|
||||||
|
systemctl stop taosd
|
||||||
|
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||||
|
while [ -n "$PID" ]
|
||||||
|
do
|
||||||
|
pkill -TERM -x taosd
|
||||||
|
sleep 1
|
||||||
|
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
WORK_DIR=/home/shuduo/work/taosdata
|
||||||
|
|
||||||
|
date >> $WORK_DIR/cron.log
|
||||||
|
echo "Run Full Test" | tee -a $WORK_DIR/cron.log
|
||||||
|
|
||||||
|
stopTaosd
|
||||||
|
buildTDengine
|
||||||
|
runTest
|
||||||
|
sendReport
|
||||||
|
stopTaosd
|
||||||
|
|
||||||
|
date >> $WORK_DIR/cron.log
|
||||||
|
echo "End of Full Test" | tee -a $WORK_DIR/cron.log
|
|
@ -0,0 +1,186 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
today=`date +"%Y%m%d"`
|
||||||
|
TDINTERNAL_DIR=/home/shuduo/work/taosdata/TDinternal.cover
|
||||||
|
TDINTERNAL_COVERAGE_REPORT=$TDINTERNAL_DIR/community/tests/tdinternal-coverage-report-$today.log
|
||||||
|
|
||||||
|
# Color setting
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[1;32m'
|
||||||
|
GREEN_DARK='\033[0;32m'
|
||||||
|
GREEN_UNDERLINE='\033[4;32m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
function buildTDinternal {
|
||||||
|
echo "check if TDinternal need build"
|
||||||
|
cd $TDINTERNAL_DIR
|
||||||
|
NEED_COMPILE=0
|
||||||
|
# git remote update
|
||||||
|
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||||
|
LOCAL_COMMIT=`git rev-parse --short @`
|
||||||
|
echo " LOCAL: $LOCAL_COMMIT"
|
||||||
|
echo "REMOTE: $REMOTE_COMMIT"
|
||||||
|
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
|
||||||
|
echo "TDinternal repo is up-to-date"
|
||||||
|
else
|
||||||
|
echo "repo need to pull"
|
||||||
|
# git pull
|
||||||
|
|
||||||
|
# NEED_COMPILE=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
lcov -d . --zerocounters
|
||||||
|
# git submodule update --init --recursive
|
||||||
|
cd $TDINTERNAL_DIR/community
|
||||||
|
TDENGINE_REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||||
|
TDENGINE_LOCAL_COMMIT=`git rev-parse --short @`
|
||||||
|
if [ "$TDENGINE_LOCAL_COMMIT" == "$TDENGINE_REMOTE_COMMIT" ]; then
|
||||||
|
echo "community repo is up-to-date"
|
||||||
|
else
|
||||||
|
echo "repo need to pull"
|
||||||
|
# git checkout develop
|
||||||
|
# git pull
|
||||||
|
# NEED_COMPILE=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd $TDINTERNAL_DIR/debug
|
||||||
|
|
||||||
|
if [[ $NEED_COMPILE -eq 1 ]]; then
|
||||||
|
LOCAL_COMMIT=`git rev-parse --short @`
|
||||||
|
rm -rf *
|
||||||
|
cmake .. > /dev/null
|
||||||
|
make > /dev/null
|
||||||
|
fi
|
||||||
|
|
||||||
|
make install > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
function runUniqueCaseOneByOne {
|
||||||
|
while read -r line; do
|
||||||
|
if [[ $line =~ ^./test.sh* ]]; then
|
||||||
|
case=`echo $line | awk '{print $NF}'`
|
||||||
|
start_time=`date +%s`
|
||||||
|
./test.sh -f $case > /dev/null 2>&1 && \
|
||||||
|
echo -e "${GREEN}$case success${NC}" | tee -a $TDINTERNAL_COVERAGE_REPORT || \
|
||||||
|
echo -e "${RED}$case failed${NC}" | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
end_time=`date +%s`
|
||||||
|
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
fi
|
||||||
|
done < $1
|
||||||
|
}
|
||||||
|
|
||||||
|
function runTest {
|
||||||
|
echo "Run Test"
|
||||||
|
cd $TDINTERNAL_DIR/community/tests/script
|
||||||
|
[ -d ../../sim ] && rm -rf ../../sim
|
||||||
|
|
||||||
|
[ -f $TDINTERNAL_COVERAGE_REPORT ] && rm $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
|
||||||
|
runUniqueCaseOneByOne jenkins/basic.txt
|
||||||
|
|
||||||
|
totalSuccess=`grep 'success' $TDINTERNAL_COVERAGE_REPORT | wc -l`
|
||||||
|
|
||||||
|
if [ "$totalSuccess" -gt "0" ]; then
|
||||||
|
echo -e "\n${GREEN} ### Total $totalSuccess TDinternal case(s) succeed! ### ${NC}" | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
fi
|
||||||
|
|
||||||
|
totalFailed=`grep 'failed\|fault' $TDINTERNAL_COVERAGE_REPORT | wc -l`
|
||||||
|
if [ "$totalFailed" -ne "0" ]; then
|
||||||
|
echo -e "${RED} ### Total $totalFailed TDinternal case(s) failed! ### ${NC}\n" | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
# exit $totalPyFailed
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test Python test case
|
||||||
|
cd $TDINTERNAL_DIR/community/tests
|
||||||
|
/usr/bin/time -f "Total spent: %e" ./test-all.sh full python | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
|
||||||
|
# Test Connector
|
||||||
|
stopTaosd
|
||||||
|
$TDINTERNAL_DIR/debug/build/bin/taosd -c $TDINTERNAL_DIR/debug/test/cfg > /dev/null &
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
cd $TDINTERNAL_DIR/community/src/connector/jdbc
|
||||||
|
mvn clean package
|
||||||
|
mvn test | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
|
||||||
|
# Test C Demo
|
||||||
|
stopTaosd
|
||||||
|
$TDINTERNAL_DIR/debug/build/bin/taosd -c $TDINTERNAL_DIR/debug/test/cfg > /dev/null &
|
||||||
|
sleep 10
|
||||||
|
yes | $TDINTERNAL_DIR/debug/build/bin/demo 127.0.0.1 | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
|
||||||
|
# Test waltest
|
||||||
|
dataDir=`grep dataDir $TDINTERNAL_DIR/debug/test/cfg/taos.cfg|awk '{print $2}'`
|
||||||
|
walDir=`find $dataDir -name "wal"|head -n1`
|
||||||
|
echo "dataDir: $dataDir\nwalDir: $walDir" | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
if [ -n "$walDir" ]; then
|
||||||
|
yes | $TDINTERNAL_DIR/debug/build/bin/waltest -p $walDir | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
fi
|
||||||
|
|
||||||
|
stopTaosd
|
||||||
|
}
|
||||||
|
|
||||||
|
function sendReport {
|
||||||
|
echo "Send Report"
|
||||||
|
receiver="sdsang@taosdata.com, sangshuduo@gmail.com, pxiao@taosdata.com"
|
||||||
|
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
|
||||||
|
|
||||||
|
cd $TDINTERNAL_DIR
|
||||||
|
|
||||||
|
sed -i 's/\x1b\[[0-9;]*m//g' $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
|
||||||
|
BODY_CONTENT=`cat $TDINTERNAL_COVERAGE_REPORT`
|
||||||
|
echo -e "to: ${receiver}\nsubject: TDinternal coverage test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
|
||||||
|
(cat - && uuencode tdinternal-coverage-report-$today.tar.gz tdinternal-coverage-report-$today.tar.gz) | \
|
||||||
|
(cat - && uuencode $TDINTERNAL_COVERAGE_REPORT tdinternal-coverage-report-$today.log) | \
|
||||||
|
ssmtp "${receiver}" && echo "Report Sent!"
|
||||||
|
}
|
||||||
|
|
||||||
|
function lcovFunc {
|
||||||
|
echo "collect data by lcov"
|
||||||
|
cd $TDINTERNAL_DIR
|
||||||
|
|
||||||
|
sed -i 's/\x1b\[[0-9;]*m//g' $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
# collect data
|
||||||
|
lcov -d . --capture --rc lcov_branch_coverage=1 --rc genhtmml_branch_coverage=1 --no-external -b $TDINTERNAL_DIR -o coverage.info
|
||||||
|
|
||||||
|
# remove exclude paths
|
||||||
|
lcov --remove coverage.info '*/tests/*' '*/test/*' '*/deps/*' '*/plugins/*' '*/taosdef.h' \
|
||||||
|
--rc lcov_branch_coverage=1 -o coverage.info
|
||||||
|
|
||||||
|
# generate result
|
||||||
|
lcov -l --rc lcov_branch_coverage=1 coverage.info | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
|
||||||
|
genhtml -o html coverage.info
|
||||||
|
|
||||||
|
tar czf tdinternal-coverage-report-$today.tar.gz html coverage.info $TDINTERNAL_COVERAGE_REPORT
|
||||||
|
# push result to coveralls.io
|
||||||
|
# coveralls-lcov coverage.info | tee -a tdinternal-coverage-report-$today.log
|
||||||
|
}
|
||||||
|
|
||||||
|
function stopTaosd {
|
||||||
|
echo "Stop taosd"
|
||||||
|
systemctl stop taosd
|
||||||
|
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||||
|
while [ -n "$PID" ]
|
||||||
|
do
|
||||||
|
pkill -TERM -x taosd
|
||||||
|
sleep 1
|
||||||
|
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
WORK_DIR=/home/shuduo/work/taosdata
|
||||||
|
|
||||||
|
date >> $WORK_DIR/cron.log
|
||||||
|
echo "Run Coverage Test for TDinternal" | tee -a $WORK_DIR/cron.log
|
||||||
|
|
||||||
|
stopTaosd
|
||||||
|
buildTDinternal
|
||||||
|
runTest
|
||||||
|
lcovFunc
|
||||||
|
sendReport
|
||||||
|
stopTaosd
|
||||||
|
|
||||||
|
date >> $WORK_DIR/cron.log
|
||||||
|
echo "End of TDinternal Coverage Test" | tee -a $WORK_DIR/cron.log
|
|
@ -0,0 +1,179 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
today=`date +"%Y%m%d"`
|
||||||
|
TDINTERNAL_DIR=/home/shuduo/work/taosdata/TDinternal
|
||||||
|
TDINTERNAL_TEST_REPORT=$TDINTERNAL_DIR/community/tests/tdinternal-report-$today.log
|
||||||
|
|
||||||
|
# Color setting
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[1;32m'
|
||||||
|
GREEN_DARK='\033[0;32m'
|
||||||
|
GREEN_UNDERLINE='\033[4;32m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
function buildTDinternal {
|
||||||
|
echo "check if TDinternal need build"
|
||||||
|
cd $TDINTERNAL_DIR
|
||||||
|
NEED_COMPILE=0
|
||||||
|
# git remote update
|
||||||
|
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||||
|
LOCAL_COMMIT=`git rev-parse --short @`
|
||||||
|
echo " LOCAL: $LOCAL_COMMIT"
|
||||||
|
echo "REMOTE: $REMOTE_COMMIT"
|
||||||
|
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
|
||||||
|
echo "TDinternal repo is up-to-date"
|
||||||
|
else
|
||||||
|
echo "repo need to pull"
|
||||||
|
# git pull
|
||||||
|
|
||||||
|
# NEED_COMPILE=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# git submodule update --init --recursive
|
||||||
|
cd $TDINTERNAL_DIR/community
|
||||||
|
TDENGINE_REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||||
|
TDENGINE_LOCAL_COMMIT=`git rev-parse --short @`
|
||||||
|
if [ "$TDENGINE_LOCAL_COMMIT" == "$TDENGINE_REMOTE_COMMIT" ]; then
|
||||||
|
echo "community repo is up-to-date"
|
||||||
|
else
|
||||||
|
echo "repo need to pull"
|
||||||
|
# git checkout develop
|
||||||
|
# git pull
|
||||||
|
# NEED_COMPILE=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd $TDINTERNAL_DIR/debug
|
||||||
|
|
||||||
|
if [[ $NEED_COMPILE -eq 1 ]]; then
|
||||||
|
LOCAL_COMMIT=`git rev-parse --short @`
|
||||||
|
rm -rf *
|
||||||
|
cmake .. > /dev/null
|
||||||
|
make > /dev/null
|
||||||
|
fi
|
||||||
|
|
||||||
|
make install > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
function runUniqueCaseOneByOne {
|
||||||
|
while read -r line; do
|
||||||
|
if [[ $line =~ ^./test.sh* ]]; then
|
||||||
|
case=`echo $line | awk '{print $NF}'`
|
||||||
|
start_time=`date +%s`
|
||||||
|
./test.sh -f $case > /dev/null 2>&1 && \
|
||||||
|
echo -e "${GREEN}$case success${NC}" | tee -a $TDINTERNAL_TEST_REPORT || \
|
||||||
|
echo -e "${RED}$case failed${NC}" | tee -a $TDINTERNAL_TEST_REPORT
|
||||||
|
end_time=`date +%s`
|
||||||
|
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDINTERNAL_TEST_REPORT
|
||||||
|
fi
|
||||||
|
done < $1
|
||||||
|
}
|
||||||
|
|
||||||
|
function runPyCaseOneByOne {
|
||||||
|
while read -r line; do
|
||||||
|
if [[ $line =~ ^python.* ]]; then
|
||||||
|
if [[ $line != *sleep* ]]; then
|
||||||
|
case=`echo $line|awk '{print $NF}'`
|
||||||
|
start_time=`date +%s`
|
||||||
|
$line > /dev/null 2>&1 && ret=0 || ret=1
|
||||||
|
end_time=`date +%s`
|
||||||
|
|
||||||
|
if [[ ret -eq 0 ]]; then
|
||||||
|
echo -e "${GREEN}$case success${NC}" | tee -a pytest-out.log
|
||||||
|
else
|
||||||
|
casename=`echo $case|sed 's/\//\-/g'`
|
||||||
|
find $TDINTERNAL_DIR/community/sim -name "*log" -exec tar czf $TDINTERNAL_DIR/fulltest-$today-$casename.log.tar.gz {} +
|
||||||
|
echo -e "${RED}$case failed and log saved${NC}" | tee -a pytest-out.log
|
||||||
|
fi
|
||||||
|
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log
|
||||||
|
else
|
||||||
|
$line > /dev/null 2>&1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done < $1
|
||||||
|
}
|
||||||
|
|
||||||
|
function runTest {
|
||||||
|
echo "Run Test"
|
||||||
|
cd $TDINTERNAL_DIR/community/tests/script
|
||||||
|
[ -d $TDINTERNAL_DIR/sim ] && rm -rf $TDINTERNAL_DIR/sim
|
||||||
|
|
||||||
|
[ -f $TDINTERNAL_TEST_REPORT ] && rm $TDINTERNAL_TEST_REPORT
|
||||||
|
|
||||||
|
runUniqueCaseOneByOne jenkins/basic.txt
|
||||||
|
|
||||||
|
totalSuccess=`grep 'success' $TDINTERNAL_TEST_REPORT | wc -l`
|
||||||
|
|
||||||
|
if [ "$totalSuccess" -gt "0" ]; then
|
||||||
|
echo -e "\n${GREEN} ### Total $totalSuccess TDinternal case(s) succeed! ### ${NC}" | tee -a $TDINTERNAL_TEST_REPORT
|
||||||
|
fi
|
||||||
|
|
||||||
|
totalFailed=`grep 'failed\|fault' $TDINTERNAL_TEST_REPORT | wc -l`
|
||||||
|
if [ "$totalFailed" -ne "0" ]; then
|
||||||
|
echo -e "${RED} ### Total $totalFailed TDinternal case(s) failed! ### ${NC}\n" | tee -a $TDINTERNAL_TEST_REPORT
|
||||||
|
# exit $totalPyFailed
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd $TDINTERNAL_DIR/community/tests/pytest
|
||||||
|
[ -d $TDINTERNAL_DIR/community/sim ] && rm -rf $TDINTERNAL_DIR/community/sim
|
||||||
|
[ -f pytest-out.log ] && rm -f pytest-out.log
|
||||||
|
|
||||||
|
/usr/bin/time -f "Total spent: %e" ./test-all.sh full python | tee -a $TDINTERNAL_TEST_REPORT
|
||||||
|
runPyCaseOneByOne fulltest.sh
|
||||||
|
|
||||||
|
totalPySuccess=`grep 'success' pytest-out.log | wc -l`
|
||||||
|
totalPyFailed=`grep 'failed\|fault' pytest-out.log | wc -l`
|
||||||
|
|
||||||
|
cat pytest-out.log >> $TDINTERNAL_TEST_REPORT
|
||||||
|
if [ "$totalPySuccess" -gt "0" ]; then
|
||||||
|
echo -e "\n${GREEN} ### Total $totalPySuccess python case(s) succeed! ### ${NC}" \
|
||||||
|
| tee -a $TDINTERNAL_TEST_REPORT
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$totalPyFailed" -ne "0" ]; then
|
||||||
|
echo -e "\n${RED} ### Total $totalPyFailed python case(s) failed! ### ${NC}" \
|
||||||
|
| tee -a $TDINTERNAL_TEST_REPORT
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function sendReport {
|
||||||
|
echo "Send Report"
|
||||||
|
receiver="sdsang@taosdata.com, sangshuduo@gmail.com, pxiao@taosdata.com"
|
||||||
|
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
|
||||||
|
|
||||||
|
cd $TDINTERNAL_DIR
|
||||||
|
|
||||||
|
sed -i 's/\x1b\[[0-9;]*m//g' $TDINTERNAL_TEST_REPORT
|
||||||
|
BODY_CONTENT=`cat $TDINTERNAL_TEST_REPORT`
|
||||||
|
|
||||||
|
cd $TDINTERNAL_DIR
|
||||||
|
tar czf fulltest-$today.tar.gz fulltest-$today-*.log.tar.gz
|
||||||
|
|
||||||
|
echo -e "to: ${receiver}\nsubject: TDinternal test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
|
||||||
|
(cat - && uuencode $TDINTERNAL_TEST_REPORT tdinternal-report-$today.log) | \
|
||||||
|
ssmtp "${receiver}" && echo "Report Sent!"
|
||||||
|
}
|
||||||
|
|
||||||
|
function stopTaosd {
|
||||||
|
echo "Stop taosd"
|
||||||
|
systemctl stop taosd
|
||||||
|
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||||
|
while [ -n "$PID" ]
|
||||||
|
do
|
||||||
|
pkill -KILL -x taosd
|
||||||
|
sleep 1
|
||||||
|
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
WORK_DIR=/home/shuduo/work/taosdata
|
||||||
|
|
||||||
|
date >> $WORK_DIR/cron.log
|
||||||
|
echo "Run Test for TDinternal" | tee -a $WORK_DIR/cron.log
|
||||||
|
|
||||||
|
buildTDinternal
|
||||||
|
runTest
|
||||||
|
sendReport
|
||||||
|
stopTaosd
|
||||||
|
|
||||||
|
date >> $WORK_DIR/cron.log
|
||||||
|
echo "End of TDinternal Test" | tee -a $WORK_DIR/cron.log
|
|
@ -0,0 +1,79 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2020 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
import time
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.sub import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.conn = conn
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
sqlstr = "select * from t0"
|
||||||
|
topic = "test"
|
||||||
|
now = int(time.time() * 1000)
|
||||||
|
tdSql.prepare()
|
||||||
|
|
||||||
|
tdLog.info("create a table and insert 10 rows.")
|
||||||
|
tdSql.execute("create table t0(ts timestamp, a int, b int);")
|
||||||
|
for i in range(0, 10):
|
||||||
|
tdSql.execute("insert into t0 values (%d, %d, %d);" % (now + i, i, i))
|
||||||
|
|
||||||
|
tdLog.info("consumption 01.")
|
||||||
|
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(10)
|
||||||
|
|
||||||
|
tdLog.info("consumption 02: no new rows inserted")
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(0)
|
||||||
|
|
||||||
|
tdLog.info("consumption 03: after one new rows inserted")
|
||||||
|
tdSql.execute("insert into t0 values (%d, 10, 10);" % (now + 10))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(1)
|
||||||
|
|
||||||
|
tdLog.info("consumption 04: keep progress and continue previous subscription")
|
||||||
|
tdSub.close(True)
|
||||||
|
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(0)
|
||||||
|
|
||||||
|
tdLog.info("consumption 05: remove progress and continue previous subscription")
|
||||||
|
tdSub.close(False)
|
||||||
|
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(11)
|
||||||
|
|
||||||
|
tdLog.info("consumption 06: keep progress and restart the subscription")
|
||||||
|
tdSub.close(True)
|
||||||
|
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(11)
|
||||||
|
|
||||||
|
tdSub.close(True)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSub.close(False)
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,114 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2020 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
import time
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.sub import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.conn = conn
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
sqlstr = "select * from meters"
|
||||||
|
topic = "test"
|
||||||
|
now = int(time.time() * 1000)
|
||||||
|
tdSql.prepare()
|
||||||
|
|
||||||
|
tdLog.info("create a super table and 10 sub-tables, then insert 5 rows into each sub-table.")
|
||||||
|
tdSql.execute("create table meters(ts timestamp, a int, b int) tags(area int, loc binary(20));")
|
||||||
|
for i in range(0, 10):
|
||||||
|
for j in range(0, 5):
|
||||||
|
tdSql.execute("insert into t%d using meters tags(%d, 'area%d') values (%d, %d, %d);" % (i, i, i, now + j, j, j))
|
||||||
|
|
||||||
|
tdLog.info("consumption 01.")
|
||||||
|
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(50)
|
||||||
|
|
||||||
|
tdLog.info("consumption 02: no new rows inserted")
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(0)
|
||||||
|
|
||||||
|
tdLog.info("consumption 03: after one new rows inserted")
|
||||||
|
tdSql.execute("insert into t0 values (%d, 10, 10);" % (now + 10))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(1)
|
||||||
|
|
||||||
|
tdLog.info("consumption 04: keep progress and continue previous subscription")
|
||||||
|
tdSub.close(True)
|
||||||
|
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(0)
|
||||||
|
|
||||||
|
tdLog.info("consumption 05: remove progress and continue previous subscription")
|
||||||
|
tdSub.close(False)
|
||||||
|
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(51)
|
||||||
|
|
||||||
|
tdLog.info("consumption 06: keep progress and restart the subscription")
|
||||||
|
tdSub.close(True)
|
||||||
|
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(51)
|
||||||
|
|
||||||
|
tdLog.info("consumption 07: insert one row to two table then remove one table")
|
||||||
|
tdSql.execute("insert into t0 values (%d, 11, 11);" % (now + 11))
|
||||||
|
tdSql.execute("insert into t1 values (%d, 11, 11);" % (now + 11))
|
||||||
|
tdSql.execute("drop table t0")
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(1)
|
||||||
|
|
||||||
|
tdLog.info("consumption 08: check timestamp criteria")
|
||||||
|
tdSub.close(False)
|
||||||
|
tdSub.init(self.conn.subscribe(True, topic, sqlstr + " where ts > %d" % now, 0))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(37)
|
||||||
|
|
||||||
|
tdLog.info("consumption 09: insert large timestamp to t2 then insert smaller timestamp to t1")
|
||||||
|
tdSql.execute("insert into t2 values (%d, 100, 100);" % (now + 100))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(1)
|
||||||
|
tdSql.execute("insert into t1 values (%d, 12, 12);" % (now + 12))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(1)
|
||||||
|
|
||||||
|
tdLog.info("consumption 10: field criteria")
|
||||||
|
tdSub.close(True)
|
||||||
|
tdSub.init(self.conn.subscribe(False, topic, sqlstr + " where a > 100", 0))
|
||||||
|
tdSql.execute("insert into t2 values (%d, 101, 100);" % (now + 101))
|
||||||
|
tdSql.execute("insert into t2 values (%d, 100, 100);" % (now + 102))
|
||||||
|
tdSql.execute("insert into t2 values (%d, 102, 100);" % (now + 103))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(2)
|
||||||
|
|
||||||
|
tdLog.info("consumption 11: two vnodes")
|
||||||
|
tdSql.execute("insert into t2 values (%d, 102, 100);" % (now + 104))
|
||||||
|
tdSql.execute("insert into t9 values (%d, 102, 100);" % (now + 104))
|
||||||
|
tdSub.consume()
|
||||||
|
tdSub.checkRows(2)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSub.close(False)
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,502 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import subprocess
|
||||||
|
from util.log import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDSimClient:
|
||||||
|
def __init__(self):
|
||||||
|
self.testCluster = False
|
||||||
|
|
||||||
|
self.cfgDict = {
|
||||||
|
"numOfLogLines": "100000000",
|
||||||
|
"numOfThreadsPerCore": "2.0",
|
||||||
|
"locale": "en_US.UTF-8",
|
||||||
|
"charset": "UTF-8",
|
||||||
|
"asyncLog": "0",
|
||||||
|
"minTablesPerVnode": "4",
|
||||||
|
"maxTablesPerVnode": "1000",
|
||||||
|
"tableIncStepPerVnode": "10000",
|
||||||
|
"maxVgroupsPerDb": "1000",
|
||||||
|
"sdbDebugFlag": "143",
|
||||||
|
"rpcDebugFlag": "135",
|
||||||
|
"tmrDebugFlag": "131",
|
||||||
|
"cDebugFlag": "135",
|
||||||
|
"udebugFlag": "135",
|
||||||
|
"jnidebugFlag": "135",
|
||||||
|
"qdebugFlag": "135",
|
||||||
|
}
|
||||||
|
def init(self, path):
|
||||||
|
self.__init__()
|
||||||
|
self.path = path
|
||||||
|
|
||||||
|
def getLogDir(self):
|
||||||
|
self.logDir = "%s/sim/psim/log" % (self.path)
|
||||||
|
return self.logDir
|
||||||
|
|
||||||
|
def getCfgDir(self):
|
||||||
|
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
|
||||||
|
return self.cfgDir
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def addExtraCfg(self, option, value):
|
||||||
|
self.cfgDict.update({option: value})
|
||||||
|
|
||||||
|
def cfg(self, option, value):
|
||||||
|
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def deploy(self):
|
||||||
|
self.logDir = "%s/sim/psim/log" % (self.path)
|
||||||
|
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
|
||||||
|
self.cfgPath = "%s/sim/psim/cfg/taos.cfg" % (self.path)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "touch " + self.cfgPath
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.cfg("masterIp", "192.168.0.1")
|
||||||
|
self.cfg("secondIp", "192.168.0.2")
|
||||||
|
self.cfg("logDir", self.logDir)
|
||||||
|
|
||||||
|
for key, value in self.cfgDict.items():
|
||||||
|
self.cfg(key, value)
|
||||||
|
|
||||||
|
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
||||||
|
|
||||||
|
|
||||||
|
class TDDnode:
|
||||||
|
def __init__(self, index):
|
||||||
|
self.index = index
|
||||||
|
self.running = 0
|
||||||
|
self.deployed = 0
|
||||||
|
self.testCluster = False
|
||||||
|
self.valgrind = 0
|
||||||
|
|
||||||
|
def init(self, path):
|
||||||
|
self.path = path
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def setValgrind(self, value):
|
||||||
|
self.valgrind = value
|
||||||
|
|
||||||
|
def getDataSize(self):
|
||||||
|
totalSize = 0
|
||||||
|
|
||||||
|
if (self.deployed == 1):
|
||||||
|
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
||||||
|
for f in filenames:
|
||||||
|
fp = os.path.join(dirpath, f)
|
||||||
|
|
||||||
|
if not os.path.islink(fp):
|
||||||
|
totalSize = totalSize + os.path.getsize(fp)
|
||||||
|
|
||||||
|
return totalSize
|
||||||
|
|
||||||
|
def deploy(self):
|
||||||
|
self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index)
|
||||||
|
self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index)
|
||||||
|
self.cfgDir = "%s/sim/dnode%d/cfg" % (self.path, self.index)
|
||||||
|
self.cfgPath = "%s/sim/dnode%d/cfg/taos.cfg" % (
|
||||||
|
self.path, self.index)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.dataDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.dataDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "touch " + self.cfgPath
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.startIP()
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.cfg("masterIp", "192.168.0.1")
|
||||||
|
self.cfg("secondIp", "192.168.0.2")
|
||||||
|
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("internalIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("privateIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("dataDir", self.dataDir)
|
||||||
|
self.cfg("logDir", self.logDir)
|
||||||
|
self.cfg("numOfLogLines", "100000000")
|
||||||
|
self.cfg("mnodeEqualVnodeNum", "0")
|
||||||
|
self.cfg("walLevel", "2")
|
||||||
|
self.cfg("fsync", "1000")
|
||||||
|
self.cfg("statusInterval", "1")
|
||||||
|
self.cfg("numOfMnodes", "3")
|
||||||
|
self.cfg("numOfThreadsPerCore", "2.0")
|
||||||
|
self.cfg("monitor", "0")
|
||||||
|
self.cfg("maxVnodeConnections", "30000")
|
||||||
|
self.cfg("maxMgmtConnections", "30000")
|
||||||
|
self.cfg("maxMeterConnections", "30000")
|
||||||
|
self.cfg("maxShellConns", "30000")
|
||||||
|
self.cfg("locale", "en_US.UTF-8")
|
||||||
|
self.cfg("charset", "UTF-8")
|
||||||
|
self.cfg("asyncLog", "0")
|
||||||
|
self.cfg("anyIp", "0")
|
||||||
|
self.cfg("dDebugFlag", "135")
|
||||||
|
self.cfg("mDebugFlag", "135")
|
||||||
|
self.cfg("sdbDebugFlag", "135")
|
||||||
|
self.cfg("rpcDebugFlag", "135")
|
||||||
|
self.cfg("tmrDebugFlag", "131")
|
||||||
|
self.cfg("cDebugFlag", "135")
|
||||||
|
self.cfg("httpDebugFlag", "135")
|
||||||
|
self.cfg("monitorDebugFlag", "135")
|
||||||
|
self.cfg("udebugFlag", "135")
|
||||||
|
self.cfg("jnidebugFlag", "135")
|
||||||
|
self.cfg("qdebugFlag", "135")
|
||||||
|
self.deployed = 1
|
||||||
|
tdLog.debug(
|
||||||
|
"dnode:%d is deployed and configured by %s" %
|
||||||
|
(self.index, self.cfgPath))
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root)-len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
buildPath = self.getBuildPath()
|
||||||
|
|
||||||
|
if (buildPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found in %s" % buildPath)
|
||||||
|
|
||||||
|
binPath = buildPath + "/build/bin/taosd"
|
||||||
|
|
||||||
|
if self.deployed == 0:
|
||||||
|
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||||
|
|
||||||
|
if self.valgrind == 0:
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||||
|
|
||||||
|
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
|
||||||
|
print(cmd)
|
||||||
|
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
self.running = 1
|
||||||
|
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||||
|
|
||||||
|
tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index))
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
for port in range(6030, 6041):
|
||||||
|
fuserCmd = "fuser -k -n tcp %d" % port
|
||||||
|
os.system(fuserCmd)
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||||
|
|
||||||
|
def forcestop(self):
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
for port in range(6030, 6041):
|
||||||
|
fuserCmd = "fuser -k -n tcp %d" % port
|
||||||
|
os.system(fuserCmd)
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
|
||||||
|
|
||||||
|
def startIP(self):
|
||||||
|
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def stopIP(self):
|
||||||
|
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
|
||||||
|
self.index, self.index)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def cfg(self, option, value):
|
||||||
|
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def getDnodeRootDir(self, index):
|
||||||
|
dnodeRootDir = "%s/sim/psim/dnode%d" % (self.path, index)
|
||||||
|
return dnodeRootDir
|
||||||
|
|
||||||
|
def getDnodesRootDir(self):
|
||||||
|
dnodesRootDir = "%s/sim/psim" % (self.path)
|
||||||
|
return dnodesRootDir
|
||||||
|
|
||||||
|
|
||||||
|
class TDDnodes:
|
||||||
|
def __init__(self):
|
||||||
|
self.dnodes = []
|
||||||
|
self.dnodes.append(TDDnode(1))
|
||||||
|
self.dnodes.append(TDDnode(2))
|
||||||
|
self.dnodes.append(TDDnode(3))
|
||||||
|
self.dnodes.append(TDDnode(4))
|
||||||
|
self.dnodes.append(TDDnode(5))
|
||||||
|
self.dnodes.append(TDDnode(6))
|
||||||
|
self.dnodes.append(TDDnode(7))
|
||||||
|
self.dnodes.append(TDDnode(8))
|
||||||
|
self.dnodes.append(TDDnode(9))
|
||||||
|
self.dnodes.append(TDDnode(10))
|
||||||
|
self.simDeployed = False
|
||||||
|
|
||||||
|
def init(self, path):
|
||||||
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
binPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
binPath = binPath + "/../../../debug/"
|
||||||
|
tdLog.debug("binPath %s" % (binPath))
|
||||||
|
binPath = os.path.realpath(binPath)
|
||||||
|
tdLog.debug("binPath real path %s" % (binPath))
|
||||||
|
|
||||||
|
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
|
||||||
|
# tdLog.debug(cmd)
|
||||||
|
# os.system(cmd)
|
||||||
|
|
||||||
|
# cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath)
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
# tdLog.debug("execute %s" % (cmd))
|
||||||
|
|
||||||
|
# cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath)
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
# tdLog.debug("execute %s" % (cmd))
|
||||||
|
|
||||||
|
if path == "":
|
||||||
|
# self.path = os.path.expanduser('~')
|
||||||
|
self.path = os.path.abspath(binPath + "../../")
|
||||||
|
else:
|
||||||
|
self.path = os.path.realpath(path)
|
||||||
|
|
||||||
|
for i in range(len(self.dnodes)):
|
||||||
|
self.dnodes[i].init(self.path)
|
||||||
|
|
||||||
|
self.sim = TDSimClient()
|
||||||
|
self.sim.init(self.path)
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def setValgrind(self, value):
|
||||||
|
self.valgrind = value
|
||||||
|
|
||||||
|
def deploy(self, index):
|
||||||
|
self.sim.setTestCluster(self.testCluster)
|
||||||
|
|
||||||
|
if (self.simDeployed == False):
|
||||||
|
self.sim.deploy()
|
||||||
|
self.simDeployed = True
|
||||||
|
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].setTestCluster(self.testCluster)
|
||||||
|
self.dnodes[index - 1].setValgrind(self.valgrind)
|
||||||
|
self.dnodes[index - 1].deploy()
|
||||||
|
|
||||||
|
def cfg(self, index, option, value):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].cfg(option, value)
|
||||||
|
|
||||||
|
def start(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].start()
|
||||||
|
|
||||||
|
def stop(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].stop()
|
||||||
|
|
||||||
|
def getDataSize(self, index):
|
||||||
|
self.check(index)
|
||||||
|
return self.dnodes[index - 1].getDataSize()
|
||||||
|
|
||||||
|
def forcestop(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].forcestop()
|
||||||
|
|
||||||
|
def startIP(self, index):
|
||||||
|
self.check(index)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.dnodes[index - 1].startIP()
|
||||||
|
|
||||||
|
def stopIP(self, index):
|
||||||
|
self.check(index)
|
||||||
|
|
||||||
|
if self.dnodes[index - 1].testCluster:
|
||||||
|
self.dnodes[index - 1].stopIP()
|
||||||
|
|
||||||
|
def check(self, index):
|
||||||
|
if index < 1 or index > 10:
|
||||||
|
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
||||||
|
|
||||||
|
def stopAll(self):
|
||||||
|
tdLog.info("stop all dnodes")
|
||||||
|
for i in range(len(self.dnodes)):
|
||||||
|
self.dnodes[i].stop()
|
||||||
|
|
||||||
|
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
if processID:
|
||||||
|
cmd = "sudo systemctl stop taosd"
|
||||||
|
os.system(cmd)
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def getDnodesRootDir(self):
|
||||||
|
dnodesRootDir = "%s/sim" % (self.path)
|
||||||
|
return dnodesRootDir
|
||||||
|
|
||||||
|
def getSimCfgPath(self):
|
||||||
|
return self.sim.getCfgDir()
|
||||||
|
|
||||||
|
def getSimLogPath(self):
|
||||||
|
return self.sim.getLogDir()
|
||||||
|
|
||||||
|
def addSimExtraCfg(self, option, value):
|
||||||
|
self.sim.addExtraCfg(option, value)
|
||||||
|
|
||||||
|
|
||||||
|
tdDnodes = TDDnodes()
|
|
@ -349,7 +349,7 @@ class TDDnodes:
|
||||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
os.system(killCmd)
|
os.system(killCmd)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
processID = subprocess.check_output(
|
processID = subprocess.check_output(
|
||||||
|
@ -358,7 +358,7 @@ class TDDnodes:
|
||||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
os.system(killCmd)
|
os.system(killCmd)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
processID = subprocess.check_output(
|
processID = subprocess.check_output(
|
||||||
|
@ -465,7 +465,7 @@ class TDDnodes:
|
||||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
os.system(killCmd)
|
os.system(killCmd)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
processID = subprocess.check_output(
|
processID = subprocess.check_output(
|
||||||
|
@ -474,7 +474,7 @@ class TDDnodes:
|
||||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
os.system(killCmd)
|
os.system(killCmd)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
processID = subprocess.check_output(
|
processID = subprocess.check_output(
|
||||||
|
|
|
@ -349,7 +349,7 @@ class TDDnodes:
|
||||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
os.system(killCmd)
|
os.system(killCmd)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
processID = subprocess.check_output(
|
processID = subprocess.check_output(
|
||||||
|
@ -358,7 +358,7 @@ class TDDnodes:
|
||||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
os.system(killCmd)
|
os.system(killCmd)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
processID = subprocess.check_output(
|
processID = subprocess.check_output(
|
||||||
|
@ -465,7 +465,7 @@ class TDDnodes:
|
||||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
os.system(killCmd)
|
os.system(killCmd)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
processID = subprocess.check_output(
|
processID = subprocess.check_output(
|
||||||
|
@ -474,7 +474,7 @@ class TDDnodes:
|
||||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
os.system(killCmd)
|
os.system(killCmd)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
processID = subprocess.check_output(
|
processID = subprocess.check_output(
|
||||||
|
|
|
@ -351,7 +351,7 @@ class TDDnodes:
|
||||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
os.system(killCmd)
|
os.system(killCmd)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
processID = subprocess.check_output(
|
processID = subprocess.check_output(
|
||||||
|
@ -360,7 +360,7 @@ class TDDnodes:
|
||||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
os.system(killCmd)
|
os.system(killCmd)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
processID = subprocess.check_output(
|
processID = subprocess.check_output(
|
||||||
|
@ -467,7 +467,7 @@ class TDDnodes:
|
||||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
os.system(killCmd)
|
os.system(killCmd)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
processID = subprocess.check_output(
|
processID = subprocess.check_output(
|
||||||
|
@ -476,7 +476,7 @@ class TDDnodes:
|
||||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
os.system(killCmd)
|
os.system(killCmd)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
processID = subprocess.check_output(
|
processID = subprocess.check_output(
|
||||||
|
|
|
@ -0,0 +1,43 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2020 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import datetime
|
||||||
|
from util.log import *
|
||||||
|
|
||||||
|
class TDSub:
|
||||||
|
def __init__(self):
|
||||||
|
self.consumedRows = 0
|
||||||
|
self.consumedCols = 0
|
||||||
|
|
||||||
|
def init(self, sub):
|
||||||
|
self.sub = sub
|
||||||
|
|
||||||
|
def close(self, keepProgress):
|
||||||
|
self.sub.close(keepProgress)
|
||||||
|
|
||||||
|
def consume(self):
|
||||||
|
self.data = self.sub.consume()
|
||||||
|
self.consumedRows = len(self.data)
|
||||||
|
self.consumedCols = len(self.sub.fields)
|
||||||
|
return self.consumedRows
|
||||||
|
|
||||||
|
def checkRows(self, expectRows):
|
||||||
|
if self.consumedRows != expectRows:
|
||||||
|
tdLog.exit("consumed rows:%d != expect:%d" % (self.consumedRows, expectRows))
|
||||||
|
tdLog.info("consumed rows:%d == expect:%d" % (self.consumedRows, expectRows))
|
||||||
|
|
||||||
|
|
||||||
|
tdSub = TDSub()
|
|
@ -249,6 +249,7 @@ cd ../../../debug; make
|
||||||
./test.sh -f unique/big/maxvnodes.sim
|
./test.sh -f unique/big/maxvnodes.sim
|
||||||
./test.sh -f unique/big/tcp.sim
|
./test.sh -f unique/big/tcp.sim
|
||||||
|
|
||||||
|
./test.sh -f unique/cluster/alter.sim
|
||||||
./test.sh -f unique/cluster/balance1.sim
|
./test.sh -f unique/cluster/balance1.sim
|
||||||
./test.sh -f unique/cluster/balance2.sim
|
./test.sh -f unique/cluster/balance2.sim
|
||||||
./test.sh -f unique/cluster/balance3.sim
|
./test.sh -f unique/cluster/balance3.sim
|
||||||
|
@ -325,7 +326,7 @@ cd ../../../debug; make
|
||||||
#./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim
|
#./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim
|
||||||
./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim
|
./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim
|
||||||
./test.sh -f unique/arbitrator/dn3_mn1_multiCreateDropTable.sim
|
./test.sh -f unique/arbitrator/dn3_mn1_multiCreateDropTable.sim
|
||||||
./test.sh -f unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
|
#./test.sh -f unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
|
||||||
#./test.sh -f unique/arbitrator/dn3_mn1_replica2_wal1_AddDelDnode.sim
|
#./test.sh -f unique/arbitrator/dn3_mn1_replica2_wal1_AddDelDnode.sim
|
||||||
./test.sh -f unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
|
./test.sh -f unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
|
||||||
./test.sh -f unique/arbitrator/dn3_mn1_replica_change.sim
|
./test.sh -f unique/arbitrator/dn3_mn1_replica_change.sim
|
||||||
|
|
|
@ -0,0 +1,115 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# if [ $# != 4 || $# != 5 ]; then
|
||||||
|
# echo "argument list need input : "
|
||||||
|
# echo " -n nodeName"
|
||||||
|
# echo " -s start/stop"
|
||||||
|
# echo " -c clear"
|
||||||
|
# exit 1
|
||||||
|
# fi
|
||||||
|
|
||||||
|
NODE_NAME=
|
||||||
|
EXEC_OPTON=
|
||||||
|
CLEAR_OPTION="false"
|
||||||
|
while getopts "n:s:u:x:ct" arg
|
||||||
|
do
|
||||||
|
case $arg in
|
||||||
|
n)
|
||||||
|
NODE_NAME=$OPTARG
|
||||||
|
;;
|
||||||
|
s)
|
||||||
|
EXEC_OPTON=$OPTARG
|
||||||
|
;;
|
||||||
|
c)
|
||||||
|
CLEAR_OPTION="clear"
|
||||||
|
;;
|
||||||
|
t)
|
||||||
|
SHELL_OPTION="true"
|
||||||
|
;;
|
||||||
|
u)
|
||||||
|
USERS=$OPTARG
|
||||||
|
;;
|
||||||
|
x)
|
||||||
|
SIGNAL=$OPTARG
|
||||||
|
;;
|
||||||
|
?)
|
||||||
|
echo "unkown argument"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
SCRIPT_DIR=`dirname $0`
|
||||||
|
cd $SCRIPT_DIR/../
|
||||||
|
SCRIPT_DIR=`pwd`
|
||||||
|
|
||||||
|
IN_TDINTERNAL="community"
|
||||||
|
if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
||||||
|
cd ../../..
|
||||||
|
else
|
||||||
|
cd ../../
|
||||||
|
fi
|
||||||
|
|
||||||
|
TAOS_DIR=`pwd`
|
||||||
|
TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1`
|
||||||
|
|
||||||
|
if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
||||||
|
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3`
|
||||||
|
else
|
||||||
|
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2`
|
||||||
|
fi
|
||||||
|
|
||||||
|
BUILD_DIR=$TAOS_DIR/$BIN_DIR/build
|
||||||
|
|
||||||
|
SIM_DIR=$TAOS_DIR/sim
|
||||||
|
NODE_DIR=$SIM_DIR/$NODE_NAME
|
||||||
|
EXE_DIR=$BUILD_DIR/bin
|
||||||
|
CFG_DIR=$NODE_DIR/cfg
|
||||||
|
LOG_DIR=$NODE_DIR/log
|
||||||
|
DATA_DIR=$NODE_DIR/data
|
||||||
|
MGMT_DIR=$NODE_DIR/data/mgmt
|
||||||
|
TSDB_DIR=$NODE_DIR/data/tsdb
|
||||||
|
|
||||||
|
TAOS_CFG=$NODE_DIR/cfg/taos.cfg
|
||||||
|
|
||||||
|
echo ------------ $EXEC_OPTON $NODE_NAME
|
||||||
|
|
||||||
|
TAOS_FLAG=$SIM_DIR/tsim/flag
|
||||||
|
if [ -f "$TAOS_FLAG" ]; then
|
||||||
|
EXE_DIR=/usr/local/bin/taos
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$CLEAR_OPTION" = "clear" ]; then
|
||||||
|
echo rm -rf $MGMT_DIR $TSDB_DIR
|
||||||
|
rm -rf $TSDB_DIR
|
||||||
|
rm -rf $MGMT_DIR
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$EXEC_OPTON" = "start" ]; then
|
||||||
|
echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR
|
||||||
|
|
||||||
|
if [ "$SHELL_OPTION" = "true" ]; then
|
||||||
|
TT=`date +%s`
|
||||||
|
mkdir ${LOG_DIR}/${TT}
|
||||||
|
nohup valgrind --log-file=${LOG_DIR}/${TT}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
|
||||||
|
else
|
||||||
|
nohup $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
|
||||||
|
fi
|
||||||
|
|
||||||
|
else
|
||||||
|
#relative path
|
||||||
|
RCFG_DIR=sim/$NODE_NAME/cfg
|
||||||
|
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
|
||||||
|
while [ -n "$PID" ]
|
||||||
|
do
|
||||||
|
if [ "$SIGNAL" = "SIGKILL" ]; then
|
||||||
|
echo try to kill by signal SIGKILL
|
||||||
|
kill -9 $PID
|
||||||
|
else
|
||||||
|
echo try to kill by signal SIGINT
|
||||||
|
kill -SIGINT $PID
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
|
@ -4,10 +4,6 @@ system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/deploy.sh -n dnode2 -i 2
|
system sh/deploy.sh -n dnode2 -i 2
|
||||||
system sh/deploy.sh -n dnode3 -i 3
|
system sh/deploy.sh -n dnode3 -i 3
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c walLevel -v 2
|
|
||||||
system sh/cfg.sh -n dnode2 -c walLevel -v 2
|
|
||||||
system sh/cfg.sh -n dnode3 -c walLevel -v 2
|
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
|
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
|
||||||
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
|
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
|
||||||
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
|
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
|
||||||
|
@ -16,58 +12,22 @@ system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
|
||||||
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
|
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
|
||||||
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
|
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c http -v 1
|
system sh/cfg.sh -n dnode1 -c http -v 0
|
||||||
system sh/cfg.sh -n dnode2 -c http -v 1
|
system sh/cfg.sh -n dnode2 -c http -v 0
|
||||||
system sh/cfg.sh -n dnode3 -c http -v 1
|
system sh/cfg.sh -n dnode3 -c http -v 0
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c mDebugFlag -v 143
|
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20000
|
||||||
system sh/cfg.sh -n dnode2 -c mDebugFlag -v 143
|
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 20000
|
||||||
system sh/cfg.sh -n dnode3 -c mDebugFlag -v 143
|
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 20000
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c sdbDebugFlag -v 143
|
system sh/cfg.sh -n dnode1 -c replica -v 3
|
||||||
system sh/cfg.sh -n dnode2 -c sdbDebugFlag -v 143
|
system sh/cfg.sh -n dnode2 -c replica -v 3
|
||||||
system sh/cfg.sh -n dnode3 -c sdbDebugFlag -v 143
|
system sh/cfg.sh -n dnode3 -c replica -v 3
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c sdebugFlag -v 143
|
|
||||||
system sh/cfg.sh -n dnode2 -c sdebugFlag -v 143
|
|
||||||
system sh/cfg.sh -n dnode3 -c sdebugFlag -v 143
|
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135
|
|
||||||
system sh/cfg.sh -n dnode2 -c rpcDebugFlag -v 135
|
|
||||||
system sh/cfg.sh -n dnode3 -c rpcDebugFlag -v 135
|
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c tsdbDebugFlag -v 131
|
|
||||||
system sh/cfg.sh -n dnode2 -c tsdbDebugFlag -v 131
|
|
||||||
system sh/cfg.sh -n dnode3 -c tsdbDebugFlag -v 131
|
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c mqttDebugFlag -v 131
|
|
||||||
system sh/cfg.sh -n dnode2 -c mqttDebugFlag -v 131
|
|
||||||
system sh/cfg.sh -n dnode3 -c mqttDebugFlag -v 131
|
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c qdebugFlag -v 131
|
|
||||||
system sh/cfg.sh -n dnode2 -c qdebugFlag -v 131
|
|
||||||
system sh/cfg.sh -n dnode3 -c qdebugFlag -v 131
|
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c cDebugFlag -v 135
|
|
||||||
system sh/cfg.sh -n dnode2 -c cDebugFlag -v 135
|
|
||||||
system sh/cfg.sh -n dnode3 -c cDebugFlag -v 135
|
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c udebugFlag -v 131
|
|
||||||
system sh/cfg.sh -n dnode2 -c udebugFlag -v 131
|
|
||||||
system sh/cfg.sh -n dnode3 -c udebugFlag -v 131
|
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c wdebugFlag -v 131
|
|
||||||
system sh/cfg.sh -n dnode2 -c wdebugFlag -v 131
|
|
||||||
system sh/cfg.sh -n dnode3 -c wdebugFlag -v 131
|
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000000
|
|
||||||
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 1000000
|
|
||||||
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 1000000
|
|
||||||
|
|
||||||
print ============== deploy
|
print ============== deploy
|
||||||
|
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
sleep 2001
|
sleep 5001
|
||||||
sql connect
|
sql connect
|
||||||
|
|
||||||
sql create dnode $hostname2
|
sql create dnode $hostname2
|
||||||
|
@ -112,8 +72,8 @@ print $data0_3 $data2_3
|
||||||
|
|
||||||
|
|
||||||
$x = $x + 1
|
$x = $x + 1
|
||||||
sleep 2000
|
sleep 5000
|
||||||
if $x == 1000 then
|
if $x == 100000 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,174 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/deploy.sh -n dnode2 -i 2
|
||||||
|
system sh/deploy.sh -n dnode3 -i 3
|
||||||
|
system sh/deploy.sh -n dnode4 -i 4
|
||||||
|
|
||||||
|
system sh/cfg.sh -n dnode1 -c role -v 1
|
||||||
|
system sh/cfg.sh -n dnode2 -c role -v 2
|
||||||
|
system sh/cfg.sh -n dnode3 -c role -v 2
|
||||||
|
system sh/cfg.sh -n dnode4 -c role -v 2
|
||||||
|
|
||||||
|
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
|
||||||
|
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
|
||||||
|
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
|
||||||
|
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
|
||||||
|
|
||||||
|
system sh/cfg.sh -n dnode1 -c wallevel -v 2
|
||||||
|
system sh/cfg.sh -n dnode2 -c wallevel -v 2
|
||||||
|
system sh/cfg.sh -n dnode3 -c wallevel -v 2
|
||||||
|
system sh/cfg.sh -n dnode4 -c wallevel -v 2
|
||||||
|
|
||||||
|
system sh/cfg.sh -n dnode1 -c balance -v 0
|
||||||
|
system sh/cfg.sh -n dnode2 -c balance -v 0
|
||||||
|
system sh/cfg.sh -n dnode3 -c balance -v 0
|
||||||
|
system sh/cfg.sh -n dnode4 -c balance -v 0
|
||||||
|
|
||||||
|
print ========== step1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sql connect
|
||||||
|
sleep 3000
|
||||||
|
|
||||||
|
sql create dnode $hostname2
|
||||||
|
system sh/exec.sh -n dnode2 -s start
|
||||||
|
sleep 3000
|
||||||
|
|
||||||
|
print ========== step2
|
||||||
|
sql create database d1
|
||||||
|
sql create table d1.t1 (t timestamp, i int)
|
||||||
|
sql insert into d1.t1 values(now+1s, 15)
|
||||||
|
sql insert into d1.t1 values(now+2s, 14)
|
||||||
|
sql insert into d1.t1 values(now+3s, 13)
|
||||||
|
sql insert into d1.t1 values(now+4s, 12)
|
||||||
|
sql insert into d1.t1 values(now+5s, 11)
|
||||||
|
|
||||||
|
print ========== step3
|
||||||
|
sleep 2000
|
||||||
|
sql create dnode $hostname3
|
||||||
|
system sh/exec.sh -n dnode3 -s start
|
||||||
|
sql create dnode $hostname4
|
||||||
|
system sh/exec.sh -n dnode4 -s start
|
||||||
|
|
||||||
|
sql show dnodes
|
||||||
|
print dnode1 openVnodes $data2_1
|
||||||
|
print dnode2 openVnodes $data2_2
|
||||||
|
print dnode3 openVnodes $data2_3
|
||||||
|
print dnode4 openVnodes $data2_4
|
||||||
|
if $data2_1 != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data2_2 != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data2_3 != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data2_4 != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
print ========== step4
|
||||||
|
sql alter dnode 2 balance "vnode:2-dnode:3"
|
||||||
|
|
||||||
|
$x = 0
|
||||||
|
show4:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 2000
|
||||||
|
if $x == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql show dnodes
|
||||||
|
print dnode1 openVnodes $data2_1
|
||||||
|
print dnode2 openVnodes $data2_2
|
||||||
|
print dnode3 openVnodes $data2_3
|
||||||
|
print dnode4 openVnodes $data2_4
|
||||||
|
if $data2_2 != 0 then
|
||||||
|
goto show4
|
||||||
|
endi
|
||||||
|
if $data2_3 != 1 then
|
||||||
|
goto show4
|
||||||
|
endi
|
||||||
|
if $data2_4 != 0 then
|
||||||
|
goto show4
|
||||||
|
endi
|
||||||
|
|
||||||
|
print ========== step5
|
||||||
|
sql alter dnode 3 balance "vnode:2-dnode:4"
|
||||||
|
|
||||||
|
$x = 0
|
||||||
|
show5:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 2000
|
||||||
|
if $x == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql show dnodes
|
||||||
|
print dnode1 openVnodes $data2_1
|
||||||
|
print dnode2 openVnodes $data2_2
|
||||||
|
print dnode3 openVnodes $data2_3
|
||||||
|
print dnode4 openVnodes $data2_4
|
||||||
|
if $data2_2 != 0 then
|
||||||
|
goto show5
|
||||||
|
endi
|
||||||
|
if $data2_3 != 0 then
|
||||||
|
goto show5
|
||||||
|
endi
|
||||||
|
if $data2_4 != 1 then
|
||||||
|
goto show5
|
||||||
|
endi
|
||||||
|
|
||||||
|
print ========== step6
|
||||||
|
sql alter dnode 4 balance "vnode:2-dnode:2"
|
||||||
|
|
||||||
|
$x = 0
|
||||||
|
show6:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 2000
|
||||||
|
if $x == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql show dnodes
|
||||||
|
print dnode1 openVnodes $data2_1
|
||||||
|
print dnode2 openVnodes $data2_2
|
||||||
|
print dnode3 openVnodes $data2_3
|
||||||
|
print dnode4 openVnodes $data2_4
|
||||||
|
if $data2_2 != 1 then
|
||||||
|
goto show6
|
||||||
|
endi
|
||||||
|
if $data2_3 != 0 then
|
||||||
|
goto show6
|
||||||
|
endi
|
||||||
|
if $data2_4 != 0 then
|
||||||
|
goto show6
|
||||||
|
endi
|
||||||
|
|
||||||
|
print ========== step7
|
||||||
|
sql select * from d1.t1 order by t desc
|
||||||
|
print $data01 $data11 $data21 $data31 $data41
|
||||||
|
if $data01 != 11 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data11 != 12 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data21 != 13 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data31 != 14 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data41 != 15 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
print ========== step8
|
||||||
|
sql_error sql alter dnode 4 balance "vnode:2-dnode:5"
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||||
|
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||||
|
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||||
|
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
|
@ -10,8 +10,8 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
|
||||||
#add_executable(insertPerTable insertPerTable.c)
|
#add_executable(insertPerTable insertPerTable.c)
|
||||||
#target_link_libraries(insertPerTable taos_static pthread)
|
#target_link_libraries(insertPerTable taos_static pthread)
|
||||||
|
|
||||||
#add_executable(insertPerRow insertPerRow.c)
|
add_executable(insertPerRow insertPerRow.c)
|
||||||
#target_link_libraries(insertPerRow taos_static pthread)
|
target_link_libraries(insertPerRow taos_static pthread)
|
||||||
|
|
||||||
#add_executable(importOneRow importOneRow.c)
|
#add_executable(importOneRow importOneRow.c)
|
||||||
#target_link_libraries(importOneRow taos_static pthread)
|
#target_link_libraries(importOneRow taos_static pthread)
|
||||||
|
|
|
@ -44,14 +44,16 @@ void createDbAndTable();
|
||||||
void insertData();
|
void insertData();
|
||||||
|
|
||||||
int32_t randomData[MAX_RANDOM_POINTS];
|
int32_t randomData[MAX_RANDOM_POINTS];
|
||||||
int64_t rowsPerTable = 10000;
|
int64_t rowsPerTable = 1000000000;
|
||||||
int64_t pointsPerTable = 1;
|
int64_t pointsPerTable = 1;
|
||||||
int64_t numOfThreads = 1;
|
int64_t numOfThreads = 10;
|
||||||
int64_t numOfTablesPerThread = 200;
|
int64_t numOfTablesPerThread = 100;
|
||||||
char dbName[32] = "db";
|
char dbName[32] = "db";
|
||||||
char stableName[64] = "st";
|
char stableName[64] = "st";
|
||||||
int32_t cache = 16;
|
int32_t cache = 1;
|
||||||
int32_t tables = 5000;
|
int32_t replica = 3;
|
||||||
|
int32_t days = 10;
|
||||||
|
int32_t interval = 1000;
|
||||||
|
|
||||||
int main(int argc, char *argv[]) {
|
int main(int argc, char *argv[]) {
|
||||||
shellParseArgument(argc, argv);
|
shellParseArgument(argc, argv);
|
||||||
|
@ -77,7 +79,7 @@ void createDbAndTable() {
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
sprintf(qstr, "create database if not exists %s cache %d maxtables %d", dbName, cache, tables);
|
sprintf(qstr, "create database if not exists %s cache %d replica %d days %d", dbName, cache, replica, days);
|
||||||
pSql = taos_query(con, qstr);
|
pSql = taos_query(con, qstr);
|
||||||
int32_t code = taos_errno(pSql);
|
int32_t code = taos_errno(pSql);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
|
@ -239,7 +241,7 @@ void *syncTest(void *param) {
|
||||||
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
|
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
|
||||||
|
|
||||||
int64_t start = 1430000000000;
|
int64_t start = 1430000000000;
|
||||||
int64_t interval = 1000; // 1000 ms
|
interval = 1000; // 1000 ms
|
||||||
|
|
||||||
char *sql = qstr;
|
char *sql = qstr;
|
||||||
char inserStr[] = "insert into";
|
char inserStr[] = "insert into";
|
||||||
|
@ -309,10 +311,14 @@ void printHelp() {
|
||||||
printf("%s%s%s%" PRId64 "\n", indent, indent, "Number of threads to be used, default is ", numOfThreads);
|
printf("%s%s%s%" PRId64 "\n", indent, indent, "Number of threads to be used, default is ", numOfThreads);
|
||||||
printf("%s%s\n", indent, "-n");
|
printf("%s%s\n", indent, "-n");
|
||||||
printf("%s%s%s%" PRId64 "\n", indent, indent, "Number of tables per thread, default is ", numOfTablesPerThread);
|
printf("%s%s%s%" PRId64 "\n", indent, indent, "Number of tables per thread, default is ", numOfTablesPerThread);
|
||||||
printf("%s%s\n", indent, "-tables");
|
printf("%s%s\n", indent, "-replica");
|
||||||
printf("%s%s%s%d\n", indent, indent, "Database parameters tables, default is ", tables);
|
printf("%s%s%s%d\n", indent, indent, "Database parameters replica, default is ", replica);
|
||||||
printf("%s%s\n", indent, "-cache");
|
printf("%s%s\n", indent, "-cache");
|
||||||
printf("%s%s%s%d\n", indent, indent, "Database parameters cache, default is ", cache);
|
printf("%s%s%s%d\n", indent, indent, "Database parameters replica, default is ", cache);
|
||||||
|
printf("%s%s\n", indent, "-days");
|
||||||
|
printf("%s%s%s%d\n", indent, indent, "Database parameters days, default is ", days);
|
||||||
|
printf("%s%s\n", indent, "-interval");
|
||||||
|
printf("%s%s%s%d\n", indent, indent, "Interval of each rows in ms, default is ", interval);
|
||||||
|
|
||||||
exit(EXIT_SUCCESS);
|
exit(EXIT_SUCCESS);
|
||||||
}
|
}
|
||||||
|
@ -336,10 +342,14 @@ void shellParseArgument(int argc, char *argv[]) {
|
||||||
numOfThreads = atoi(argv[++i]);
|
numOfThreads = atoi(argv[++i]);
|
||||||
} else if (strcmp(argv[i], "-n") == 0) {
|
} else if (strcmp(argv[i], "-n") == 0) {
|
||||||
numOfTablesPerThread = atoi(argv[++i]);
|
numOfTablesPerThread = atoi(argv[++i]);
|
||||||
} else if (strcmp(argv[i], "-tables") == 0) {
|
} else if (strcmp(argv[i], "-replica") == 0) {
|
||||||
tables = atoi(argv[++i]);
|
replica = atoi(argv[++i]);
|
||||||
} else if (strcmp(argv[i], "-cache") == 0) {
|
} else if (strcmp(argv[i], "-cache") == 0) {
|
||||||
cache = atoi(argv[++i]);
|
cache = atoi(argv[++i]);
|
||||||
|
} else if (strcmp(argv[i], "-days") == 0) {
|
||||||
|
days = atoi(argv[++i]);
|
||||||
|
} else if (strcmp(argv[i], "-interval") == 0) {
|
||||||
|
interval = atoi(argv[++i]);
|
||||||
} else {
|
} else {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -349,7 +359,7 @@ void shellParseArgument(int argc, char *argv[]) {
|
||||||
pPrint("%snumOfThreads:%" PRId64 "%s", GREEN, numOfThreads, NC);
|
pPrint("%snumOfThreads:%" PRId64 "%s", GREEN, numOfThreads, NC);
|
||||||
pPrint("%snumOfTablesPerThread:%" PRId64 "%s", GREEN, numOfTablesPerThread, NC);
|
pPrint("%snumOfTablesPerThread:%" PRId64 "%s", GREEN, numOfTablesPerThread, NC);
|
||||||
pPrint("%scache:%" PRId32 "%s", GREEN, cache, NC);
|
pPrint("%scache:%" PRId32 "%s", GREEN, cache, NC);
|
||||||
pPrint("%stables:%" PRId32 "%s", GREEN, tables, NC);
|
pPrint("%stables:%" PRId32 "%s", GREEN, replica, NC);
|
||||||
pPrint("%sdbName:%s%s", GREEN, dbName, NC);
|
pPrint("%sdbName:%s%s", GREEN, dbName, NC);
|
||||||
pPrint("%stableName:%s%s", GREEN, stableName, NC);
|
pPrint("%stableName:%s%s", GREEN, stableName, NC);
|
||||||
pPrint("%sstart to run%s", GREEN, NC);
|
pPrint("%sstart to run%s", GREEN, NC);
|
||||||
|
|
|
@ -670,12 +670,12 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
|
||||||
ret = taos_errno(pSql);
|
ret = taos_errno(pSql);
|
||||||
|
|
||||||
if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || ret == TSDB_CODE_MND_DB_ALREADY_EXIST) {
|
if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || ret == TSDB_CODE_MND_DB_ALREADY_EXIST) {
|
||||||
simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret));
|
simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret));
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
} else if (ret != 0) {
|
} else if (ret != 0) {
|
||||||
simDebug("script:%s, taos:%p, %s failed, ret:%d:%s, error:%s",
|
simDebug("script:%s, taos:%p, %s failed, ret:%d:%s, error:%s",
|
||||||
script->fileName, script->taos, rest, ret, tstrerror(ret), taos_errstr(pSql));
|
script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret), taos_errstr(pSql));
|
||||||
|
|
||||||
if (line->errorJump == SQL_JUMP_TRUE) {
|
if (line->errorJump == SQL_JUMP_TRUE) {
|
||||||
script->linePos = line->jump;
|
script->linePos = line->jump;
|
||||||
|
@ -691,7 +691,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
sprintf(script->error, "lineNum:%d. sql:%s failed, ret:%d:%s", line->lineNum, rest, ret, tstrerror(ret));
|
sprintf(script->error, "lineNum:%d. sql:%s failed, ret:%d:%s", line->lineNum, rest, ret & 0XFFFF, tstrerror(ret));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -821,7 +821,7 @@ bool simExecuteRestFulSqlCommand(SScript *script, char *rest) {
|
||||||
ret = simExecuteRestFulCommand(script, command);
|
ret = simExecuteRestFulCommand(script, command);
|
||||||
if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST ||
|
if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST ||
|
||||||
ret == TSDB_CODE_MND_DB_ALREADY_EXIST) {
|
ret == TSDB_CODE_MND_DB_ALREADY_EXIST) {
|
||||||
simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret));
|
simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret));
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
} else if (ret != 0) {
|
} else if (ret != 0) {
|
||||||
|
@ -957,12 +957,12 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) {
|
||||||
|
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
simDebug("script:%s, taos:%p, %s execute, expect failed, so success, ret:%d:%s",
|
simDebug("script:%s, taos:%p, %s execute, expect failed, so success, ret:%d:%s",
|
||||||
script->fileName, script->taos, rest, ret, tstrerror(ret));
|
script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret));
|
||||||
script->linePos++;
|
script->linePos++;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
sprintf(script->error, "lineNum:%d. sql:%s expect failed, but success, ret:%d:%s", line->lineNum, rest, ret, tstrerror(ret));
|
sprintf(script->error, "lineNum:%d. sql:%s expect failed, but success, ret:%d:%s", line->lineNum, rest, ret & 0XFFFF, tstrerror(ret));
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue