Merge remote-tracking branch 'origin/develop' into feature/crash_gen

This commit is contained in:
Steven Li 2020-07-29 21:58:42 +00:00
commit 954861185f
45 changed files with 1671 additions and 1559 deletions

View File

@ -52,7 +52,8 @@ matrix:
cd ${TRAVIS_BUILD_DIR}/debug
make install > /dev/null || travis_terminate $?
pip3 install numpy
py3ver=`python3 --version|awk '{print $2}'|cut -d "." -f 1,2` && apt install python$py3ver-dev
pip3 install psutil
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
cd ${TRAVIS_BUILD_DIR}/tests

View File

@ -4,7 +4,7 @@ TDengine要求每个数据采集点单独建表。独立建表的模式能够避
## 什么是超级表
STable是同一类型数据采集点的抽象是同类型采集实例的集合包含多张数据结构一样的子表。每个STable为其子表定义了表结构和一组标签表结构即表中记录的数据列及其数据类型标签名和数据类型由STable定义标签值记录着每个子表的静态信息用以对子表进行分组过滤。子表本质上就是普通的表由一个时间戳主键和若干个数据列组成每行记录着具体的数据数据查询操作与普通表完全相同但子表与普通表的区别在于每个子表从属于一张超级表并带有一组由STable定义的标签值。每种类型的采集设备可以定义一个STable。数据模型定义表的每列数据的类型如温度、压力、电压、电流、GPS实时位置等而标签信息属于Meta Data如采集设备的序列号、型号、位置等是静态的是表的元数据。用户在创建表数据采集点时指定STable(采集类型)外,还可以指定标签的值,也可事后增加或修改。
超级表是同一类型数据采集点的抽象是同类型采集实例的集合包含多张数据结构一样的子表。每个STable为其子表定义了表结构和一组标签表结构即表中记录的数据列及其数据类型标签名和数据类型由STable定义标签值记录着每个子表的静态信息用以对子表进行分组过滤。子表本质上就是普通的表由一个时间戳主键和若干个数据列组成每行记录着具体的数据数据查询操作与普通表完全相同但子表与普通表的区别在于每个子表从属于一张超级表并带有一组由STable定义的标签值。每种类型的采集设备可以定义一个STable。数据模型定义表的每列数据的类型如温度、压力、电压、电流、GPS实时位置等而标签信息属于Meta Data如采集设备的序列号、型号、位置等是静态的是表的元数据。用户在创建表数据采集点时指定STable(采集类型)外,还可以指定标签的值,也可事后增加或修改。
TDengine扩展标准SQL语法用于定义STable使用关键词tags指定标签信息。语法如下

View File

@ -16,9 +16,9 @@
TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下。其中包括:
- _taosd_TDengine服务端可执行文件
- _taos_ TDengine Shell可执行文件
- _taosdump_:数据导出工具
- _taosd_TDengine服务端可执行文件
- _taos_ TDengine Shell可执行文件
- _taosdump_:数据导出工具
- *rmtaos* 卸载TDengine的脚本, 该脚本会删除全部的程序和数据文件。请务必谨慎执行,如非必须不建议使用。
您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录

View File

@ -11,9 +11,6 @@
# second full-qualified domain name (FQDN) for TDengine system, for cluster edition only
# second cluster_hostname2:6030
# the arbitrator's full-qualified domain name (FQDN) for TDengine system, for cluster edition only
# arbitrator arbitrator_hostname:6030
# the full-qualified domain name (FQDN) of dnode
# fqdn hostname
@ -23,45 +20,57 @@
# http service port, default tcp [6020]
# httpPort 6020
# data file's directory
# dataDir /var/lib/taos
# log file's directory
# logDir /var/log/taos
# number of management nodes in the system
# numOfMnodes 3
# scriptDir file's directory
# scriptDir /var/log/taos
# optional roles for dnode. 0 - any, 1 - mnode, 2 - dnode
# role 0
# data file's directory
# dataDir /var/lib/taos
# the arbitrator's full-qualified domain name (FQDN) for TDengine system, for cluster edition only
# arbitrator arbitrator_hostname:6030
# number of threads per CPU core
# numOfThreadsPerCore 1.0
# max number of vgroups per db
# maxVgroupsPerDb 0
# max number of tables per vnode
# maxTablesPerVnode 1000000
# the ratio of threads responsible for querying in the total thread
# ratioOfQueryThreads 0.5
# interval of DNode report status to MNode, unit is Second, for cluster version only
# statusInterval 1
# number of management nodes in the system
# numOfMnodes 3
# interval of Shell send HB to MNode, unit is Second
# shellActivityTimer 3
# if backup vnode directory when remove dnode
# vnodeBak 1
# Whether to start load balancing
# balance 1
# optional roles for dnode. 0 - any, 1 - mnode, 2 - dnode
# role 0
# max timer control block
# maxTmrCtrl 512
# interval of system monitor
# monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster version only
# offlineThreshold 8640000
# RPC re-try timer, millisecond
# rpcTimer 300
# RPC maximum time for ack, seconds
# rpcMaxTime 600
# interval of DNode report status to MNode, unit is Second, for cluster version only
# statusInterval 1
# interval of Shell send HB to MNode, unit is Second
# shellActivityTimer 3
# duration of to keep tableMeta kept in Cache, seconds
# tableMetaKeepTimer 7200
@ -71,38 +80,38 @@
# Time window minimum
# minIntervalTime 10
# max length of an SQL
# maxSQLLength 65480
# the max allowed delayed time for launching continuous query. 20ms by default
# maxStreamCompDelay 20000
# Support the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000
# The minimum time to wait before the first stream execution
# maxFirstStreamCompDelay 10000
# system locale
# locale en_US.UTF-8
# Retry wait time benchmark
# retryStreamCompDelay 10
# default system charset
# charset UTF-8
# the delayed time for launching each continuous query. 10% of the whole computing time window by default.
# streamCompDelayRatio 0.1
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# max number of vgroups per db
# maxVgroupsPerDb 0
# set write ahead log (WAL) level
# walLevel 1
# max number of tables per vnode
# maxTablesPerVnode 1000000
# enable/disable async log
# asyncLog 1
# the compressed rpc message, option:
# -1 (no compression)
# 0 (all message compressed),
# > 0 (rpc message body which larger than this value will be compressed)
# compressMsgSize -1
# Step size of increasing table number in vnode
# tableIncStepPerVnode 1000
# cache block size (Mbyte)
# cache 16
# cache 16
# number of cache blocks per vnode
# blocks 4
# blocks 6
# number of days per DB file
# days 10
# number of days to keep DB file
# keep 3650
# min row of records in file block
# minRows 100
@ -111,17 +120,44 @@
# maxRows 4096
# enable/disable compression
# comp 1
# comp 2
# number of days per DB file
# days 10
# set write ahead log (WAL) level
# walLevel 1
# number of days to keep DB file
# keep 3650
# When walLevel is set to 2, the cycle of fsync is executed
# fsync 3000
# number of replications, for cluster version only
# replica 1
# mqtt uri
# mqttBrokerAddress mqtt://username:password@hostname:1883/taos/
# mqtt client name
# mqttBrokerClientId taos_mqtt
# the compressed rpc message, option:
# -1 (no compression)
# 0 (all message compressed),
# > 0 (rpc message body which larger than this value will be compressed)
# compressMsgSize -1
# max length of an SQL
# maxSQLLength 65480
# Support the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# system locale
# locale en_US.UTF-8
# default system charset
# charset UTF-8
# max number of connections from client for dnode
# maxShellConns 5000
@ -137,50 +173,36 @@
# Stop writing data when the disk size of the log folder is less than this value
# minimalDataDirGB 0.1
# number of seconds allowed for a dnode to be offline, for cluster version only
# offlineThreshold 8640000
# start http service
# http 1
# start system monitor module
# monitor 1
# start muqq service
# mqtt 0
# mqtt uri
# mqttBrokerAddress mqtt://username:password@hostname:1883/taos/
# start system monitor module
# monitor 1
# mqtt client name
# mqttBrokerClientId taos_mqtt
# maximum number of rows returned by the restful interface
# restfulRowLimit 10240
# Record the SQL through restful interface
# httpEnableRecordSql 0
# number of threads used to process http requests
# httpMaxThreads 2
# The minimum time to wait before the first stream execution
# maxFirstStreamCompDelay 10000
# Retry wait time benchmark
# retryStreamCompDelay 10
# the delayed time for launching each continuous query. 10% of the whole computing time window by default.
# streamCompDelayRatio 0.1
# the max allowed delayed time for launching continuous query. 20ms by default
# maxStreamCompDelay 20000
# maximum number of rows returned by the restful interface
# restfulRowLimit 10240
# The following parameter is used to limit the maximum number of lines in log files.
# max number of rows per log filters
# numOfLogLines 10000000
# enable/disable async log
# asyncLog 1
# The following parameters are used for debug purpose only.
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
# 131: output warning and error135: output info, warning and error to log.
# 199: output debug, info, warning and error to both screen and file
# 131: output warning and error, 135: output debug, warning and error, 143 : output trace, debug, warning and error to log.
# 199: output debug, warning and error to both screen and file
# 207: output trace, debug, warning and error to both screen and file
# debug flag for all log type, take effect when non-zero value
# debugFlag 0
@ -236,9 +258,6 @@
# debug flag for http server
# tsdbDebugFlag 131
# Record the SQL through restful interface
# httpEnableRecordSql 0
# Record the SQL in taos client
# tscEnableRecordSql 0

View File

@ -60,15 +60,14 @@ typedef struct SCMCorVgroupInfo {
} SCMCorVgroupInfo;
typedef struct STableMeta {
STableComInfo tableInfo;
uint8_t tableType;
int16_t sversion;
int16_t tversion;
STableComInfo tableInfo;
uint8_t tableType;
int16_t sversion;
int16_t tversion;
SCMVgroupInfo vgroupInfo;
SCMCorVgroupInfo corVgroupInfo;
int32_t sid; // the index of one table in a virtual node
uint64_t uid; // unique id of a table
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
STableId id;
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
} STableMeta;
typedef struct STableMetaInfo {

View File

@ -629,8 +629,8 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int3
}
static void tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows) {
pBlocks->tid = pTableMeta->sid;
pBlocks->uid = pTableMeta->uid;
pBlocks->tid = pTableMeta->id.tid;
pBlocks->uid = pTableMeta->id.uid;
pBlocks->sversion = pTableMeta->sversion;
pBlocks->numOfRows += numOfRows;
}
@ -686,7 +686,7 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
STableDataBlocks *dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pTableList, pCmd->pDataBlocks, pTableMeta->uid, TSDB_DEFAULT_PAYLOAD_SIZE,
int32_t ret = tscGetDataBlockFromList(pTableList, pCmd->pDataBlocks, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name,
pTableMeta, &dataBuf);
if (ret != TSDB_CODE_SUCCESS) {

View File

@ -635,7 +635,7 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL;
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
if (pTableMetaInfo->pTableMeta->uid == uid) {
if (pTableMetaInfo->pTableMeta->id.uid == uid) {
tableIndex = i;
break;
}
@ -3053,7 +3053,7 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr*
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
SSchema* pTagSchema1 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
pLeft->uid = pTableMetaInfo->pTableMeta->uid;
pLeft->uid = pTableMetaInfo->pTableMeta->id.uid;
pLeft->tagColId = pTagSchema1->colId;
strcpy(pLeft->tableId, pTableMetaInfo->name);
@ -3065,7 +3065,7 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr*
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
SSchema* pTagSchema2 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
pRight->uid = pTableMetaInfo->pTableMeta->uid;
pRight->uid = pTableMetaInfo->pTableMeta->id.uid;
pRight->tagColId = pTagSchema2->colId;
strcpy(pRight->tableId, pTableMetaInfo->name);
@ -3603,7 +3603,7 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableCondIndex);
STagCond* pTagCond = &pQueryInfo->tagCond;
pTagCond->tbnameCond.uid = pTableMetaInfo->pTableMeta->uid;
pTagCond->tbnameCond.uid = pTableMetaInfo->pTableMeta->id.uid;
assert(pExpr->nSQLOptr == TK_LIKE || pExpr->nSQLOptr == TK_IN);
@ -3840,7 +3840,7 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE
// add to source column list
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
int64_t uid = pTableMetaInfo->pTableMeta->uid;
int64_t uid = pTableMetaInfo->pTableMeta->id.uid;
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
size_t num = taosArrayGetSize(colList);
@ -4506,8 +4506,8 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) pCmd->payload;
pUpdateMsg->head.vgId = htonl(pTableMeta->vgroupInfo.vgId);
pUpdateMsg->tid = htonl(pTableMeta->sid);
pUpdateMsg->uid = htobe64(pTableMeta->uid);
pUpdateMsg->tid = htonl(pTableMeta->id.tid);
pUpdateMsg->uid = htobe64(pTableMeta->id.uid);
pUpdateMsg->colId = htons(pTagsSchema->colId);
pUpdateMsg->type = pTagsSchema->type;
pUpdateMsg->bytes = htons(pTagsSchema->bytes);
@ -4992,6 +4992,7 @@ static void setCreateDBOption(SCMCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
pMsg->compression = pCreateDb->compressionLevel;
pMsg->walLevel = (char)pCreateDb->walLevel;
pMsg->replications = pCreateDb->replica;
pMsg->quorum = pCreateDb->quorum;
pMsg->ignoreExist = pCreateDb->ignoreExists;
}
@ -5045,7 +5046,7 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClau
if (pExpr->functionId != TSDB_FUNC_TAG) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex);
int16_t columnInfo = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->uid);
int16_t columnInfo = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
SColumnIndex index = {.tableIndex = 0, .columnIndex = columnInfo};
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
@ -5522,6 +5523,13 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCMCreateDbMsg* pCreate) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
if (pCreate->quorum != -1 &&
(pCreate->quorum < TSDB_MIN_DB_REPLICA_OPTION || pCreate->quorum > TSDB_MAX_DB_REPLICA_OPTION)) {
snprintf(msg, tListLen(msg), "invalid db option quorum: %d valid range: [%d, %d]", pCreate->quorum,
TSDB_MIN_DB_REPLICA_OPTION, TSDB_MAX_DB_REPLICA_OPTION);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
int32_t val = htonl(pCreate->daysPerFile);
if (val != -1 && (val < TSDB_MIN_DAYS_PER_FILE || val > TSDB_MAX_DAYS_PER_FILE)) {
snprintf(msg, tListLen(msg), "invalid db option daysPerFile: %d valid range: [%d, %d]", val,

View File

@ -162,8 +162,8 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
.numOfColumns = pTableMetaMsg->numOfColumns,
};
pTableMeta->sid = pTableMetaMsg->sid;
pTableMeta->uid = pTableMetaMsg->uid;
pTableMeta->id.tid = pTableMetaMsg->sid;
pTableMeta->id.uid = pTableMetaMsg->uid;
pTableMeta->vgroupInfo = pTableMetaMsg->vgroup;
tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, &pTableMeta->vgroupInfo);

View File

@ -605,9 +605,9 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
}
STableIdInfo *pTableIdInfo = (STableIdInfo *)pMsg;
pTableIdInfo->tid = htonl(pTableMeta->sid);
pTableIdInfo->uid = htobe64(pTableMeta->uid);
pTableIdInfo->key = htobe64(tscGetSubscriptionProgress(pSql->pSubscription, pTableMeta->uid, dfltKey));
pTableIdInfo->tid = htonl(pTableMeta->id.tid);
pTableIdInfo->uid = htobe64(pTableMeta->id.uid);
pTableIdInfo->key = htobe64(tscGetSubscriptionProgress(pSql->pSubscription, pTableMeta->id.uid, dfltKey));
pQueryMsg->numOfTables = htonl(1); // set the number of tables
pMsg += sizeof(STableIdInfo);
@ -640,7 +640,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
}
tscDebug("%p vgId:%d, query on table:%s, tid:%d, uid:%" PRIu64, pSql, htonl(pQueryMsg->head.vgId), pTableMetaInfo->name,
pTableMeta->sid, pTableMeta->uid);
pTableMeta->id.tid, pTableMeta->id.uid);
return pMsg;
}
@ -714,8 +714,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if (pCol->colIndex.columnIndex >= tscGetNumOfColumns(pTableMeta) || pColSchema->type < TSDB_DATA_TYPE_BOOL ||
pColSchema->type > TSDB_DATA_TYPE_NCHAR) {
tscError("%p sid:%d uid:%" PRIu64" id:%s, column index out of range, numOfColumns:%d, index:%d, column name:%s",
pSql, pTableMeta->sid, pTableMeta->uid, pTableMetaInfo->name, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex,
tscError("%p tid:%d uid:%" PRIu64" id:%s, column index out of range, numOfColumns:%d, index:%d, column name:%s",
pSql, pTableMeta->id.tid, pTableMeta->id.uid, pTableMetaInfo->name, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex,
pColSchema->name);
return TSDB_CODE_TSC_INVALID_SQL;
@ -833,8 +833,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if ((pCol->colIndex.columnIndex >= numOfTagColumns || pCol->colIndex.columnIndex < -1) ||
(pColSchema->type < TSDB_DATA_TYPE_BOOL || pColSchema->type > TSDB_DATA_TYPE_NCHAR)) {
tscError("%p sid:%d uid:%" PRIu64 " id:%s, tag index out of range, totalCols:%d, numOfTags:%d, index:%d, column name:%s",
pSql, pTableMeta->sid, pTableMeta->uid, pTableMetaInfo->name, total, numOfTagColumns,
tscError("%p tid:%d uid:%" PRIu64 " id:%s, tag index out of range, totalCols:%d, numOfTags:%d, index:%d, column name:%s",
pSql, pTableMeta->id.tid, pTableMeta->id.uid, pTableMetaInfo->name, total, numOfTagColumns,
pCol->colIndex.columnIndex, pColSchema->name);
return TSDB_CODE_TSC_INVALID_SQL;
@ -855,7 +855,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if (pQueryInfo->tagCond.pCond != NULL && taosArrayGetSize(pQueryInfo->tagCond.pCond) > 0) {
STagCond* pTagCond = &pQueryInfo->tagCond;
SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->uid);
SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->id.uid);
if (pCond != NULL && pCond->cond != NULL) {
pQueryMsg->tagCondLen = htons(pCond->len);
memcpy(pMsg, pCond->cond, pCond->len);
@ -1739,7 +1739,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
tscDebug("%p recv table meta, uid:%"PRId64 ", tid:%d, name:%s", pSql, pTableMeta->uid, pTableMeta->sid, pTableMetaInfo->name);
tscDebug("%p recv table meta, uid:%"PRId64 ", tid:%d, name:%s", pSql, pTableMeta->id.uid, pTableMeta->id.tid, pTableMetaInfo->name);
free(pTableMeta);
return TSDB_CODE_SUCCESS;
@ -2215,7 +2215,7 @@ int tscRenewTableMeta(SSqlObj *pSql, char *tableId) {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pTableMetaInfo->pTableMeta) {
tscDebug("%p update table meta, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64 ", addr:%p", pSql,
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->uid, pTableMeta);
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid, pTableMeta);
}
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);

View File

@ -241,7 +241,7 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
SSubscriptionProgress target = {.uid = pTableMeta->uid, .key = 0};
SSubscriptionProgress target = {.uid = pTableMeta->id.uid, .key = 0};
SSubscriptionProgress* p = taosArraySearch(pSub->progress, &target, tscCompareSubscriptionProgress);
if (p == NULL) {
taosArrayClear(pSub->progress);

View File

@ -180,7 +180,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, in
pSupporter->limit = pQueryInfo->limit;
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, index);
pSupporter->uid = pTableMetaInfo->pTableMeta->uid;
pSupporter->uid = pTableMetaInfo->pTableMeta->id.uid;
assert (pSupporter->uid != 0);
getTmpfilePath("join-", pSupporter->path);
@ -355,7 +355,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
// set the join condition tag column info, to do extract method
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
assert(pQueryInfo->tagCond.joinInfo.hasJoin);
int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->uid);
int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
pExpr->param[0].i64Key = colId;
pExpr->numOfParams = 1;
@ -499,7 +499,7 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
// set the tags value for ts_comp function
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, 0);
int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->uid);
int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid);
pExpr->param->i64Key = tagColId;
pExpr->numOfParams = 1;
}
@ -560,7 +560,7 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
qsort(p2->pIdTagList, p2->num, p2->tagSize, tscCompareTidTags);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
int16_t tagColId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->uid);
int16_t tagColId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
SSchema* pColSchema = tscGetTableColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId);
@ -1034,7 +1034,7 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
int32_t tableIndexOfSub = -1;
for (int32_t j = 0; j < pQueryInfo->numOfTables; ++j) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, j);
if (pTableMetaInfo->pTableMeta->uid == pExpr->uid) {
if (pTableMetaInfo->pTableMeta->id.uid == pExpr->uid) {
tableIndexOfSub = j;
break;
}
@ -1205,7 +1205,7 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
STagCond* pTagCond = &pSupporter->tagCond;
assert(pTagCond->joinInfo.hasJoin);
int32_t tagColId = tscGetJoinTagColIdByUid(pTagCond, pTableMetaInfo->pTableMeta->uid);
int32_t tagColId = tscGetJoinTagColIdByUid(pTagCond, pTableMetaInfo->pTableMeta->id.uid);
SSchema* s = tscGetTableColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId);
int16_t bytes = 0;
@ -1237,7 +1237,7 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
SSqlExpr *pExpr = tscSqlExprGet(pNewQueryInfo, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->uid);
int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid);
pExpr->param->i64Key = tagColId;
pExpr->numOfParams = 1;
}

View File

@ -955,7 +955,7 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol
pExpr->interBytes = interSize;
if (pTableMetaInfo->pTableMeta) {
pExpr->uid = pTableMetaInfo->pTableMeta->uid;
pExpr->uid = pTableMetaInfo->pTableMeta->id.uid;
}
return pExpr;
@ -1482,7 +1482,7 @@ STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, i
int32_t k = -1;
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
if (pQueryInfo->pTableMetaInfo[i]->pTableMeta->uid == uid) {
if (pQueryInfo->pTableMetaInfo[i]->pTableMeta->id.uid == uid) {
k = i;
break;
}
@ -1760,7 +1760,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
TSDB_QUERY_SET_TYPE(pNewQueryInfo->type, TSDB_QUERY_TYPE_SUBQUERY);// it must be the subquery
}
uint64_t uid = pTableMetaInfo->pTableMeta->uid;
uint64_t uid = pTableMetaInfo->pTableMeta->id.uid;
tscSqlExprCopy(pNewQueryInfo->exprList, pQueryInfo->exprList, uid, true);
int32_t numOfOutput = tscSqlExprNumOfExprs(pNewQueryInfo);

View File

@ -80,6 +80,7 @@ extern int16_t tsCompression;
extern int16_t tsWAL;
extern int32_t tsFsyncPeriod;
extern int32_t tsReplications;
extern int32_t tsQuorum;
// balance
extern int32_t tsEnableBalance;

View File

@ -109,6 +109,7 @@ int16_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
int32_t tsMaxVgroupsPerDb = 0;
int32_t tsMinTablePerVnode = 100;
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
@ -742,6 +743,16 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "quorum";
cfg.ptr = &tsQuorum;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_DB_REPLICA_OPTION;
cfg.maxValue = TSDB_MAX_DB_REPLICA_OPTION;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "mqttBrokerAddress";
cfg.ptr = tsMqttBrokerAddress;
cfg.valType = TAOS_CFG_VTYPE_STRING;
@ -1338,4 +1349,4 @@ bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *d
}
return true;
}
}

View File

@ -38,6 +38,8 @@ typedef struct {
typedef struct {
SRspRet rspRet;
int32_t processedCount;
int32_t code;
void *pCont;
int32_t contLen;
SRpcMsg rpcMsg;
@ -187,13 +189,16 @@ void dnodeFreeVnodeWqueue(void *wqueue) {
void dnodeSendRpcVnodeWriteRsp(void *pVnode, void *param, int32_t code) {
SWriteMsg *pWrite = (SWriteMsg *)param;
if (code > 0) return;
if (code < 0) pWrite->code = code;
int32_t count = atomic_add_fetch_32(&pWrite->processedCount, 1);
if (count <= 1) return;
SRpcMsg rpcRsp = {
.handle = pWrite->rpcMsg.handle,
.pCont = pWrite->rspRet.rsp,
.contLen = pWrite->rspRet.len,
.code = code,
.code = pWrite->code,
};
rpcSendResponse(&rpcRsp);
@ -239,7 +244,10 @@ static void *dnodeProcessWriteQueue(void *param) {
}
int32_t code = vnodeProcessWrite(pVnode, type, pHead, pRspRet);
if (pWrite) pWrite->rpcMsg.code = code;
if (pWrite) {
pWrite->rpcMsg.code = code;
if (code <= 0) pWrite->processedCount = 1;
}
}
walFsync(vnodeGetWal(pVnode));

View File

@ -338,6 +338,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_MIN_DB_REPLICA_OPTION 1
#define TSDB_MAX_DB_REPLICA_OPTION 3
#define TSDB_DEFAULT_DB_REPLICA_OPTION 1
#define TSDB_DEFAULT_DB_QUORUM_OPTION 1
#define TSDB_MAX_JOIN_TABLE_NUM 5
#define TSDB_MAX_UNION_CLAUSE 5

View File

@ -522,6 +522,7 @@ typedef struct {
int8_t compression;
int8_t walLevel;
int8_t replications;
int8_t quorum;
int8_t ignoreExist;
} SCMCreateDbMsg, SCMAlterDbMsg;

View File

@ -104,123 +104,125 @@
#define TK_MAXTABLES 86
#define TK_CACHE 87
#define TK_REPLICA 88
#define TK_DAYS 89
#define TK_MINROWS 90
#define TK_MAXROWS 91
#define TK_BLOCKS 92
#define TK_CTIME 93
#define TK_WAL 94
#define TK_FSYNC 95
#define TK_COMP 96
#define TK_PRECISION 97
#define TK_LP 98
#define TK_RP 99
#define TK_TAGS 100
#define TK_USING 101
#define TK_AS 102
#define TK_COMMA 103
#define TK_NULL 104
#define TK_SELECT 105
#define TK_UNION 106
#define TK_ALL 107
#define TK_FROM 108
#define TK_VARIABLE 109
#define TK_INTERVAL 110
#define TK_FILL 111
#define TK_SLIDING 112
#define TK_ORDER 113
#define TK_BY 114
#define TK_ASC 115
#define TK_DESC 116
#define TK_GROUP 117
#define TK_HAVING 118
#define TK_LIMIT 119
#define TK_OFFSET 120
#define TK_SLIMIT 121
#define TK_SOFFSET 122
#define TK_WHERE 123
#define TK_NOW 124
#define TK_RESET 125
#define TK_QUERY 126
#define TK_ADD 127
#define TK_COLUMN 128
#define TK_TAG 129
#define TK_CHANGE 130
#define TK_SET 131
#define TK_KILL 132
#define TK_CONNECTION 133
#define TK_STREAM 134
#define TK_COLON 135
#define TK_ABORT 136
#define TK_AFTER 137
#define TK_ATTACH 138
#define TK_BEFORE 139
#define TK_BEGIN 140
#define TK_CASCADE 141
#define TK_CLUSTER 142
#define TK_CONFLICT 143
#define TK_COPY 144
#define TK_DEFERRED 145
#define TK_DELIMITERS 146
#define TK_DETACH 147
#define TK_EACH 148
#define TK_END 149
#define TK_EXPLAIN 150
#define TK_FAIL 151
#define TK_FOR 152
#define TK_IGNORE 153
#define TK_IMMEDIATE 154
#define TK_INITIALLY 155
#define TK_INSTEAD 156
#define TK_MATCH 157
#define TK_KEY 158
#define TK_OF 159
#define TK_RAISE 160
#define TK_REPLACE 161
#define TK_RESTRICT 162
#define TK_ROW 163
#define TK_STATEMENT 164
#define TK_TRIGGER 165
#define TK_VIEW 166
#define TK_COUNT 167
#define TK_SUM 168
#define TK_AVG 169
#define TK_MIN 170
#define TK_MAX 171
#define TK_FIRST 172
#define TK_LAST 173
#define TK_TOP 174
#define TK_BOTTOM 175
#define TK_STDDEV 176
#define TK_PERCENTILE 177
#define TK_APERCENTILE 178
#define TK_LEASTSQUARES 179
#define TK_HISTOGRAM 180
#define TK_DIFF 181
#define TK_SPREAD 182
#define TK_TWA 183
#define TK_INTERP 184
#define TK_LAST_ROW 185
#define TK_RATE 186
#define TK_IRATE 187
#define TK_SUM_RATE 188
#define TK_SUM_IRATE 189
#define TK_AVG_RATE 190
#define TK_AVG_IRATE 191
#define TK_TBID 192
#define TK_SEMI 193
#define TK_NONE 194
#define TK_PREV 195
#define TK_LINEAR 196
#define TK_IMPORT 197
#define TK_METRIC 198
#define TK_TBNAME 199
#define TK_JOIN 200
#define TK_METRICS 201
#define TK_STABLE 202
#define TK_INSERT 203
#define TK_INTO 204
#define TK_VALUES 205
#define TK_QUORUM 89
#define TK_DAYS 90
#define TK_MINROWS 91
#define TK_MAXROWS 92
#define TK_BLOCKS 93
#define TK_CTIME 94
#define TK_WAL 95
#define TK_FSYNC 96
#define TK_COMP 97
#define TK_PRECISION 98
#define TK_LP 99
#define TK_RP 100
#define TK_TAGS 101
#define TK_USING 102
#define TK_AS 103
#define TK_COMMA 104
#define TK_NULL 105
#define TK_SELECT 106
#define TK_UNION 107
#define TK_ALL 108
#define TK_FROM 109
#define TK_VARIABLE 110
#define TK_INTERVAL 111
#define TK_FILL 112
#define TK_SLIDING 113
#define TK_ORDER 114
#define TK_BY 115
#define TK_ASC 116
#define TK_DESC 117
#define TK_GROUP 118
#define TK_HAVING 119
#define TK_LIMIT 120
#define TK_OFFSET 121
#define TK_SLIMIT 122
#define TK_SOFFSET 123
#define TK_WHERE 124
#define TK_NOW 125
#define TK_RESET 126
#define TK_QUERY 127
#define TK_ADD 128
#define TK_COLUMN 129
#define TK_TAG 130
#define TK_CHANGE 131
#define TK_SET 132
#define TK_KILL 133
#define TK_CONNECTION 134
#define TK_STREAM 135
#define TK_COLON 136
#define TK_ABORT 137
#define TK_AFTER 138
#define TK_ATTACH 139
#define TK_BEFORE 140
#define TK_BEGIN 141
#define TK_CASCADE 142
#define TK_CLUSTER 143
#define TK_CONFLICT 144
#define TK_COPY 145
#define TK_DEFERRED 146
#define TK_DELIMITERS 147
#define TK_DETACH 148
#define TK_EACH 149
#define TK_END 150
#define TK_EXPLAIN 151
#define TK_FAIL 152
#define TK_FOR 153
#define TK_IGNORE 154
#define TK_IMMEDIATE 155
#define TK_INITIALLY 156
#define TK_INSTEAD 157
#define TK_MATCH 158
#define TK_KEY 159
#define TK_OF 160
#define TK_RAISE 161
#define TK_REPLACE 162
#define TK_RESTRICT 163
#define TK_ROW 164
#define TK_STATEMENT 165
#define TK_TRIGGER 166
#define TK_VIEW 167
#define TK_COUNT 168
#define TK_SUM 169
#define TK_AVG 170
#define TK_MIN 171
#define TK_MAX 172
#define TK_FIRST 173
#define TK_LAST 174
#define TK_TOP 175
#define TK_BOTTOM 176
#define TK_STDDEV 177
#define TK_PERCENTILE 178
#define TK_APERCENTILE 179
#define TK_LEASTSQUARES 180
#define TK_HISTOGRAM 181
#define TK_DIFF 182
#define TK_SPREAD 183
#define TK_TWA 184
#define TK_INTERP 185
#define TK_LAST_ROW 186
#define TK_RATE 187
#define TK_IRATE 188
#define TK_SUM_RATE 189
#define TK_SUM_IRATE 190
#define TK_AVG_RATE 191
#define TK_AVG_IRATE 192
#define TK_TBID 193
#define TK_SEMI 194
#define TK_NONE 195
#define TK_PREV 196
#define TK_LINEAR 197
#define TK_IMPORT 198
#define TK_METRIC 199
#define TK_TBNAME 200
#define TK_JOIN 201
#define TK_METRICS 202
#define TK_STABLE 203
#define TK_INSERT 204
#define TK_INTO 205
#define TK_VALUES 206
#define TK_SPACE 300
#define TK_COMMENT 301

View File

@ -171,6 +171,7 @@ typedef struct {
int8_t compression;
int8_t walLevel;
int8_t replications;
int8_t quorum;
int8_t reserved[12];
} SDbCfg;

View File

@ -301,6 +301,12 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
return TSDB_CODE_MND_INVALID_DB_OPTION;
}
if (pCfg->quorum < TSDB_MIN_DB_REPLICA_OPTION || pCfg->quorum > TSDB_MAX_DB_REPLICA_OPTION) {
mError("invalid db option quorum:%d valid range: [%d, %d]", pCfg->quorum, TSDB_MIN_DB_REPLICA_OPTION,
TSDB_MAX_DB_REPLICA_OPTION);
return TSDB_CODE_MND_INVALID_DB_OPTION;
}
return TSDB_CODE_SUCCESS;
}
@ -320,6 +326,7 @@ static void mnodeSetDefaultDbCfg(SDbCfg *pCfg) {
if (pCfg->compression < 0) pCfg->compression = tsCompression;
if (pCfg->walLevel < 0) pCfg->walLevel = tsWAL;
if (pCfg->replications < 0) pCfg->replications = tsReplications;
if (pCfg->quorum < 0) pCfg->quorum = tsQuorum;
}
static int32_t mnodeCreateDbCb(SMnodeMsg *pMsg, int32_t code) {
@ -369,7 +376,8 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, void *pMs
.precision = pCreate->precision,
.compression = pCreate->compression,
.walLevel = pCreate->walLevel,
.replications = pCreate->replications
.replications = pCreate->replications,
.quorum = pCreate->quorum
};
mnodeSetDefaultDbCfg(&pDb->cfg);
@ -508,6 +516,12 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 2;
pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT;
strcpy(pSchema[cols].name, "quorum");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 2;
pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT;
strcpy(pSchema[cols].name, "days");
@ -654,6 +668,10 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
*(int16_t *)pWrite = pDb->cfg.replications;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int16_t *)pWrite = pDb->cfg.quorum;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int16_t *)pWrite = pDb->cfg.daysPerFile;
cols++;
@ -803,6 +821,7 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SCMAlterDbMsg *pAlter) {
int8_t compression = pAlter->compression;
int8_t walLevel = pAlter->walLevel;
int8_t replications = pAlter->replications;
int8_t quorum = pAlter->quorum;
int8_t precision = pAlter->precision;
terrno = TSDB_CODE_SUCCESS;
@ -901,6 +920,11 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SCMAlterDbMsg *pAlter) {
}
}
if (quorum >= 0 && quorum != pDb->cfg.quorum) {
mDebug("db:%s, quorum:%d change to %d", pDb->name, pDb->cfg.quorum, quorum);
newCfg.compression = quorum;
}
return newCfg;
}

View File

@ -784,7 +784,7 @@ static SMDCreateVnodeMsg *mnodeBuildVnodeMsg(SVgObj *pVgroup) {
pCfg->walLevel = pDb->cfg.walLevel;
pCfg->replications = (int8_t) pVgroup->numOfVnodes;
pCfg->wals = 3;
pCfg->quorum = 1;
pCfg->quorum = pDb->cfg.quorum;
SMDVnodeDesc *pNodes = pVnode->nodes;
for (int32_t j = 0; j < pVgroup->numOfVnodes; ++j) {

View File

@ -119,6 +119,7 @@ typedef struct SCreateDBInfo {
int32_t fsyncPeriod;
int64_t commitTime;
int32_t walLevel;
int32_t quorum;
int32_t compressionLevel;
SSQLToken precision;
bool ignoreExists;

View File

@ -215,6 +215,7 @@ keep(Y) ::= KEEP tagitemlist(X). { Y = X; }
tables(Y) ::= MAXTABLES INTEGER(X). { Y = X; }
cache(Y) ::= CACHE INTEGER(X). { Y = X; }
replica(Y) ::= REPLICA INTEGER(X). { Y = X; }
quorum(Y) ::= QUORUM INTEGER(X). { Y = X; }
days(Y) ::= DAYS INTEGER(X). { Y = X; }
minrows(Y) ::= MINROWS INTEGER(X). { Y = X; }
maxrows(Y) ::= MAXROWS INTEGER(X). { Y = X; }
@ -231,6 +232,7 @@ db_optr(Y) ::= . {setDefaultCreateDbOption(&Y);}
db_optr(Y) ::= db_optr(Z) tables(X). { Y = Z; Y.maxTablesPerVnode = strtol(X.z, NULL, 10); }
db_optr(Y) ::= db_optr(Z) cache(X). { Y = Z; Y.cacheBlockSize = strtol(X.z, NULL, 10); }
db_optr(Y) ::= db_optr(Z) replica(X). { Y = Z; Y.replica = strtol(X.z, NULL, 10); }
db_optr(Y) ::= db_optr(Z) quorum(X). { Y = Z; Y.quorum = strtol(X.z, NULL, 10); }
db_optr(Y) ::= db_optr(Z) days(X). { Y = Z; Y.daysPerFile = strtol(X.z, NULL, 10); }
db_optr(Y) ::= db_optr(Z) minrows(X). { Y = Z; Y.minRowsPerBlock = strtod(X.z, NULL); }
db_optr(Y) ::= db_optr(Z) maxrows(X). { Y = Z; Y.maxRowsPerBlock = strtod(X.z, NULL); }
@ -246,6 +248,7 @@ db_optr(Y) ::= db_optr(Z) keep(X). { Y = Z; Y.keep = X; }
alter_db_optr(Y) ::= . { setDefaultCreateDbOption(&Y);}
alter_db_optr(Y) ::= alter_db_optr(Z) replica(X). { Y = Z; Y.replica = strtol(X.z, NULL, 10); }
alter_db_optr(Y) ::= alter_db_optr(Z) quorum(X). { Y = Z; Y.quorum = strtol(X.z, NULL, 10); }
alter_db_optr(Y) ::= alter_db_optr(Z) tables(X). { Y = Z; Y.maxTablesPerVnode = strtol(X.z, NULL, 10); }
alter_db_optr(Y) ::= alter_db_optr(Z) keep(X). { Y = Z; Y.keep = X; }
alter_db_optr(Y) ::= alter_db_optr(Z) blocks(X). { Y = Z; Y.numOfBlocks = strtol(X.z, NULL, 10); }

View File

@ -6287,10 +6287,17 @@ void qDestroyQueryInfo(qinfo_t qHandle) {
freeQInfo(pQInfo);
}
static void setQueryResultReady(SQInfo* pQInfo) {
static bool doBuildResCheck(SQInfo* pQInfo) {
bool buildRes = false;
pthread_mutex_lock(&pQInfo->lock);
pQInfo->dataReady = QUERY_RESULT_READY;
buildRes = (pQInfo->rspContext != NULL);
pthread_mutex_unlock(&pQInfo->lock);
return buildRes;
}
bool qTableQuery(qinfo_t qinfo) {
@ -6303,16 +6310,13 @@ bool qTableQuery(qinfo_t qinfo) {
if (IS_QUERY_KILLED(pQInfo)) {
qDebug("QInfo:%p it is already killed, abort", pQInfo);
setQueryResultReady(pQInfo);
return false;
return doBuildResCheck(pQInfo);
}
if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) {
setQueryStatus(pQInfo->runtimeEnv.pQuery, QUERY_COMPLETED);
setQueryResultReady(pQInfo);
qDebug("QInfo:%p no table exists for query, abort", pQInfo);
return false;
setQueryStatus(pQInfo->runtimeEnv.pQuery, QUERY_COMPLETED);
return doBuildResCheck(pQInfo);
}
// error occurs, record the error code and return to client
@ -6320,9 +6324,7 @@ bool qTableQuery(qinfo_t qinfo) {
if (ret != TSDB_CODE_SUCCESS) {
pQInfo->code = ret;
qDebug("QInfo:%p query abort due to error/cancel occurs, code:%s", pQInfo, tstrerror(pQInfo->code));
setQueryResultReady(pQInfo);
return false;
return doBuildResCheck(pQInfo);
}
qDebug("QInfo:%p query task is launched", pQInfo);
@ -6347,17 +6349,7 @@ bool qTableQuery(qinfo_t qinfo) {
pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows);
}
bool buildRes = false;
pthread_mutex_lock(&pQInfo->lock);
pQInfo->dataReady = QUERY_RESULT_READY;
if (pQInfo->rspContext != NULL) {
buildRes = true;
}
pthread_mutex_unlock(&pQInfo->lock);
return buildRes;
return doBuildResCheck(pQInfo);
}
int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContext) {
@ -6484,7 +6476,6 @@ int32_t qKillQuery(qinfo_t qinfo) {
return TSDB_CODE_QRY_INVALID_QHANDLE;
}
// sem_post(&pQInfo->dataReady);
setQueryKilled(pQInfo);
return TSDB_CODE_SUCCESS;
}

View File

@ -907,6 +907,7 @@ void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo) {
pDBInfo->daysPerFile = -1;
pDBInfo->replica = -1;
pDBInfo->quorum = -1;
pDBInfo->keep = NULL;
memset(&pDBInfo->precision, 0, sizeof(SSQLToken));

View File

@ -116,6 +116,7 @@ static SKeyword keywordTable[] = {
{"STATE", TK_STATE},
{"KEEP", TK_KEEP},
{"REPLICA", TK_REPLICA},
{"QUORUM", TK_QUORUM},
{"DAYS", TK_DAYS},
{"MINROWS", TK_MINROWS},
{"MAXROWS", TK_MAXROWS},

File diff suppressed because it is too large Load Diff

View File

@ -145,6 +145,8 @@ void *syncStart(const SSyncInfo *pInfo)
pNode->vgId = pInfo->vgId;
pNode->replica = pCfg->replica;
pNode->quorum = pCfg->quorum;
if (pNode->quorum > pNode->replica) pNode->quorum = pNode->replica;
for (int i = 0; i < pCfg->replica; ++i) {
const SNodeInfo *pNodeInfo = pCfg->nodeInfo + i;
pNode->peerInfo[i] = syncAddPeer(pNode, pNodeInfo);
@ -260,6 +262,7 @@ int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg)
pNode->replica = pNewCfg->replica;
pNode->quorum = pNewCfg->quorum;
if (pNode->quorum > pNode->replica) pNode->quorum = pNode->replica;
memcpy(pNode->peerInfo, newPeers, sizeof(SSyncPeer *) * pNewCfg->replica);
for (i = pNewCfg->replica; i < TAOS_SYNC_MAX_REPLICA; ++i)

View File

@ -69,8 +69,8 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
if (tid < pMeta->maxTables && pMeta->tables[tid] != NULL) {
if (TABLE_UID(pMeta->tables[tid]) == pCfg->tableId.uid) {
tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
TABLE_TID(pTable), TABLE_UID(pTable));
tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pMeta->tables[tid]),
TABLE_TID(pMeta->tables[tid]), TABLE_UID(pMeta->tables[tid]));
return TSDB_CODE_TDB_TABLE_ALREADY_EXIST;
} else {
tsdbError("vgId:%d table %s at tid %d uid %" PRIu64
@ -1295,4 +1295,4 @@ static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid) {
tsdbDebug("vgId:%d tsdb meta maxTables is adjusted as %d", REPO_ID(pRepo), maxTables);
return 0;
}
}

View File

@ -20,8 +20,6 @@
extern "C" {
#endif
#include "os.h"
#define TD_EQ 0x1
#define TD_GT 0x2
#define TD_LT 0x4

View File

@ -304,10 +304,10 @@ int taosOpenTcpClientSocket(uint32_t destIp, uint16_t destPort, uint32_t clientI
//uError("failed to connect socket, ip:0x%x, port:%hu(%s)", destIp, destPort, strerror(errno));
close(sockFd);
sockFd = -1;
} else {
taosKeepTcpAlive(sockFd);
}
taosKeepTcpAlive(sockFd);
return sockFd;
}

View File

@ -94,7 +94,7 @@
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>24.1.1</version>
<version>29.0-jre</version>
</dependency>
<dependency>

View File

@ -24,16 +24,16 @@ endi
if $data04 != 1 then
return -1
endi
if $data05 != 10 then
if $data06 != 10 then
return -1
endi
if $data06 != 20,20,20 then
if $data07 != 20,20,20 then
return -1
endi
if $data07 != 2 then
if $data08 != 2 then
return -1
endi
if $data08 != 4 then
if $data09 != 4 then
return -1
endi

View File

@ -32,10 +32,10 @@ endi
if $data04 != 1 then
return -1
endi
if $data05 != 20 then
if $data06 != 20 then
return -1
endi
if $data07 != 16 then
if $data08 != 16 then
return -1
endi
@ -72,7 +72,7 @@ endi
if $data04 != 1 then
return -1
endi
if $data05 != 15 then
if $data06 != 15 then
return -1
endi

View File

@ -81,7 +81,7 @@ print =============== step2 - no db
#11
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6020/rest/sql
print 11-> $system_content
if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}@ then
if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}@ then
return -1
endi

View File

@ -122,18 +122,18 @@ endi
if $data04 != $replica then
return -1
endi
if $data05 != $days then
if $data06 != $days then
return -1
endi
if $data06 != 365,365,365 then
if $data07 != 365,365,365 then
return -1
endi
print data07 = $data07
if $data07 != $cache then
print expect $cache, actual:$data07
print data08 = $data07
if $data08 != $cache then
print expect $cache, actual:$data08
return -1
endi
if $data08 != 4 then
if $data09 != 4 then
return -1
endi

View File

@ -57,10 +57,10 @@ while $i < $tblEnd
endw
sql select count(*) from $tb
print data00 $data00 rowsPerTbl $rowsPerTbl
if $data00 != $rowsPerTbl then
print data00 $data00 rowsPerTbl $rowsPerTbl
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ************ client-01 insert data error ***** *****
print ************ client-01 insert data into $tb error ***** *****
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
return -1
endi

View File

@ -6,7 +6,7 @@
$tblStart = 0
$tblEnd = 10000
$tsStart = 1325347200000 # 2012-01-01 00:00:00.000
$c2Binary = ' . client-02-
$preBinary = ' . client-02-
###############################################################
sql connect
@ -31,13 +31,14 @@ $rowsPerTbl = 0
$ts = $tsStart
$rowsPerLoop = 160
$loop_cnt = 0
loop_run:
print ================ client-02 start loop insert data
print ================ client-02 start loop insert data ( loop_cnt: $loop_cnt )
$i = $tblStart
while $i < $tblEnd
$tb = tb . $i
$c2Binary = $c2Binary . $i
$c2Binary = $preBinary . $i
$c2Binary = $c2Binary . '
$x = 0
@ -46,23 +47,24 @@ while $i < $tblEnd
sql insert into $tb values ( $ts + 1a , $x , $c2Binary ) ( $ts + 3a , $x , $c2Binary ) ( $ts + 5a , $x , $c2Binary ) ( $ts + 7a , $x , $c2Binary ) ( $ts + 9a , $x , $c2Binary ) ( $ts + 11a , $x , $c2Binary ) ( $ts + 13a , $x , $c2Binary ) ( $ts + 15a , $x , $c2Binary ) ( $ts + 17a , $x , $c2Binary ) ( $ts + 19a , $x , $c2Binary ) ( $ts + 21a , $x , $c2Binary ) ( $ts + 23a , $x , $c2Binary ) ( $ts + 25a , $x , $c2Binary ) ( $ts + 27a , $x , $c2Binary ) ( $ts + 29a , $x , $c2Binary ) ( $ts + 31a , $x , $c2Binary ) ( $ts + 33a , $x , $c2Binary ) ( $ts + 35a , $x , $c2Binary ) ( $ts + 37a , $x , $c2Binary ) ( $ts + 39a , $x , $c2Binary )
$x = $x + 40
$ts = $ts + 40a
if $i == $tblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
endw
if $i == $tblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
$totalRows = $totalRows + $x
$i = $i + 1
endw
sql select count(*) from $tblStart
sql select count(*) from $tb
print data00 $data00 rowsPerTbl $rowsPerTbl
if $data00 != $rowsPerTbl then
print data00 $data00 rowsPerTbl $rowsPerTbl
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ************ client-02 insert data error ***** *****
print ************ client-02 insert data into $tb error ***** *****
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
return -1
endi
$loop_cnt = $loop_cnt + 1
goto loop_run

View File

@ -6,8 +6,7 @@
$tblStart = 0
$tblEnd = 10000
$tsStart = 1325347200000 # 2012-01-01 00:00:00.000
$c2Binary = ' . client-03-
$t2Binary = ' . client-03-
$preBinary = ' . client-03-
###############################################################
sql connect
@ -33,16 +32,17 @@ $rowsPerTbl = 0
$ts = $tsStart
$rowsPerLoop = 160
$loop_cnt = 0
loop_run:
print ================ client-03 start loop insert data
print ================ client-03 start loop insert data ( loop_cnt: $loop_cnt )
$i = $tblStart
while $i < $tblEnd
$tb = tb . $i
$t2Binary = ' . $i
$t2Binary = $preBinary . $i
$t2Binary = $t2Binary . '
$c2Binary = $c2Binary . $i
$c2Binary = $preBinary . $i
$c2Binary = $c2Binary . '
$x = 0
@ -51,32 +51,33 @@ while $i < $tblEnd
sql insert into $tb using $stb tags ( $i , $t2Binary ) values ( $ts + 1a , $x , $c2Binary ) ( $ts + 3a , $x , $c2Binary ) ( $ts + 5a , $x , $c2Binary ) ( $ts + 7a , $x , $c2Binary ) ( $ts + 9a , $x , $c2Binary ) ( $ts + 11a , $x , $c2Binary ) ( $ts + 13a , $x , $c2Binary ) ( $ts + 15a , $x , $c2Binary ) ( $ts + 17a , $x , $c2Binary ) ( $ts + 19a , $x , $c2Binary ) ( $ts + 21a , $x , $c2Binary ) ( $ts + 23a , $x , $c2Binary ) ( $ts + 25a , $x , $c2Binary ) ( $ts + 27a , $x , $c2Binary ) ( $ts + 29a , $x , $c2Binary ) ( $ts + 31a , $x , $c2Binary ) ( $ts + 33a , $x , $c2Binary ) ( $ts + 35a , $x , $c2Binary ) ( $ts + 37a , $x , $c2Binary ) ( $ts + 39a , $x , $c2Binary )
$x = $x + 40
$ts = $ts + 40a
if $i == $tblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
endw
if $i == $tblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
$totalRows = $totalRows + $x
$i = $i + 1
endw
sql select count(*) from $tblStart
sql select count(*) from $tb
print data00 $data00 rowsPerTbl $rowsPerTbl
if $data00 != $rowsPerTbl then
print data00 $data00 rowsPerTbl $rowsPerTbl
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ****** client-3 insert data into $tblStart error ***
print ****** client-3 insert data into $tb error ***
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
return -1
endi
sql select count(*) from $stb
print data00 $data00 totalRows $totalRows
if $data00 != $totalRows then
print data00 $data00 totalRows $totalRows
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ***** client-3 insert data into $stbl error ********
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
return -1
endi
$loop_cnt = $loop_cnt + 1
goto loop_run

View File

@ -6,7 +6,7 @@
$tblStart = 0
$tblEnd = 10000
$tsStart = 1325347200000 # 2012-01-01 00:00:00.000
$c2Binary = ' . client-04-
$preBinary = ' . client-04-
###############################################################
$totalRows = 0
@ -35,13 +35,13 @@ $rowsPerLoop = 160
$loopCnt = 0
loop_run:
print ================ client-04 start loop insert data
loop_run:
print ================ client-04 start loop insert data ( loopCnt: $loopCnt )
$i = $tblStart
while $i < $tblEnd
$tb = tb . $i
$c2Binary = $c2Binary . $i
$c2Binary = $preBinary . $i
$c2Binary = $c2Binary . '
$x = 0
@ -50,22 +50,22 @@ while $i < $tblEnd
sql insert into $tb values ( $ts + 1a , $x , $c2Binary ) ( $ts + 3a , $x , $c2Binary ) ( $ts + 5a , $x , $c2Binary ) ( $ts + 7a , $x , $c2Binary ) ( $ts + 9a , $x , $c2Binary ) ( $ts + 11a , $x , $c2Binary ) ( $ts + 13a , $x , $c2Binary ) ( $ts + 15a , $x , $c2Binary ) ( $ts + 17a , $x , $c2Binary ) ( $ts + 19a , $x , $c2Binary ) ( $ts + 21a , $x , $c2Binary ) ( $ts + 23a , $x , $c2Binary ) ( $ts + 25a , $x , $c2Binary ) ( $ts + 27a , $x , $c2Binary ) ( $ts + 29a , $x , $c2Binary ) ( $ts + 31a , $x , $c2Binary ) ( $ts + 33a , $x , $c2Binary ) ( $ts + 35a , $x , $c2Binary ) ( $ts + 37a , $x , $c2Binary ) ( $ts + 39a , $x , $c2Binary )
$x = $x + 40
$ts = $ts + 40a
if $i == $tblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
endw
if $i == $tblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
$totalRows = $totalRows + $x
$i = $i + 1
endw
sql select count(*) from $tblStart
sql select count(*) from $tb
print data00 $data00 rowsPerTbl $rowsPerTbl
if $data00 != $rowsPerTbl then
print data00 $data00 rowsPerTbl $rowsPerTbl
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ************ client-04 insert data error ***** *****
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ************ client-04 insert data into $tb error ***** *****
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
return -1
endi

View File

@ -5,7 +5,7 @@
$tblStart = 0
$tblEnd = 10000
$tsStart = 1325347200000 # 2012-01-01 00:00:00.000
$c2Binary = ' . client-05-
$preBinary = ' . client-05-
###############################################################
$totalRows = 0
@ -35,12 +35,12 @@ $rowsPerLoop = 160
$loopCnt = 0
loop_run:
print ================ client-05 start loop insert data
print ================ client-05 start loop insert data ( loopCnt: $loopCnt )
$i = $tblStart
while $i < $tblEnd
$tb = tb . $i
$c2Binary = $c2Binary . $i
$c2Binary = $preBinary . $i
$c2Binary = $c2Binary . '
$x = 0
@ -49,22 +49,22 @@ while $i < $tblEnd
sql insert into $tb values ( $ts + 1a , $x , $c2Binary ) ( $ts + 3a , $x , $c2Binary ) ( $ts + 5a , $x , $c2Binary ) ( $ts + 7a , $x , $c2Binary ) ( $ts + 9a , $x , $c2Binary ) ( $ts + 11a , $x , $c2Binary ) ( $ts + 13a , $x , $c2Binary ) ( $ts + 15a , $x , $c2Binary ) ( $ts + 17a , $x , $c2Binary ) ( $ts + 19a , $x , $c2Binary ) ( $ts + 21a , $x , $c2Binary ) ( $ts + 23a , $x , $c2Binary ) ( $ts + 25a , $x , $c2Binary ) ( $ts + 27a , $x , $c2Binary ) ( $ts + 29a , $x , $c2Binary ) ( $ts + 31a , $x , $c2Binary ) ( $ts + 33a , $x , $c2Binary ) ( $ts + 35a , $x , $c2Binary ) ( $ts + 37a , $x , $c2Binary ) ( $ts + 39a , $x , $c2Binary )
$x = $x + 40
$ts = $ts + 40a
if $i == $tblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
endw
if $i == $tblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
$totalRows = $totalRows + $x
$i = $i + 1
endw
sql select count(*) from $tblStart
sql select count(*) from $tb
print data00 $data00 rowsPerTbl $rowsPerTbl
if $data00 != $rowsPerTbl then
print data00 $data00 rowsPerTbl $rowsPerTbl
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ************ client-05 insert data error ***** *****
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ************ client-05 insert data into $tb error ***** *****
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
return -1
endi

View File

@ -6,11 +6,12 @@
$tblStart = 0
$tblEnd = 10000
$tsStart = 1325347200000 # 2012-01-01 00:00:00.000
$c2Binary = ' . client-06-
$preBinary = ' . client-06-
$numPerUpdateTbl = 2000
###############################################################
$totalRows = 0
$totalRows = 0
$dropTblStart = $tblStart
$dropTblEnd = $dropTblStart
sql connect
@ -20,7 +21,7 @@ sql create database if not exists $db replica 2
sql use $db
init_lable:
print ================ create table $tb (ts timestamp, c1 int, c2 binary(16))
print ================ create table [ from $tblStart to $tblEnd ] (ts timestamp, c1 int, c2 binary(16))
$i = $tblStart
while $i < $tblEnd
@ -30,8 +31,8 @@ while $i < $tblEnd
$i = $i + 1
endw
$dropTblStart = $tblStart
$dropTblEnd = $tblStart + $numPerUpdateTbl
$dropTblStart = $dropTblEnd
$dropTblEnd = $dropTblStart + $numPerUpdateTbl
$tblStart = $tblEnd
$tblEnd = $tblEnd + $numPerUpdateTbl
@ -43,12 +44,12 @@ $rowsPerLoop = 160
$loopCnt = 0
loop_run:
print ================ client-06 start loop insert data
$i = $dropTblStart
while $i < $tblEnd
print ================ client-06 start loop insert data from $dropTblStart to $tblStart ( loopCnt: $loopCnt )
$i = $dropTblStart
while $i < $tblStart
$tb = tb . $i
$c2Binary = $c2Binary . $i
$c2Binary = $preBinary . $i
$c2Binary = $c2Binary . '
$x = 0
@ -57,29 +58,29 @@ while $i < $tblEnd
sql insert into $tb values ( $ts + 1a , $x , $c2Binary ) ( $ts + 3a , $x , $c2Binary ) ( $ts + 5a , $x , $c2Binary ) ( $ts + 7a , $x , $c2Binary ) ( $ts + 9a , $x , $c2Binary ) ( $ts + 11a , $x , $c2Binary ) ( $ts + 13a , $x , $c2Binary ) ( $ts + 15a , $x , $c2Binary ) ( $ts + 17a , $x , $c2Binary ) ( $ts + 19a , $x , $c2Binary ) ( $ts + 21a , $x , $c2Binary ) ( $ts + 23a , $x , $c2Binary ) ( $ts + 25a , $x , $c2Binary ) ( $ts + 27a , $x , $c2Binary ) ( $ts + 29a , $x , $c2Binary ) ( $ts + 31a , $x , $c2Binary ) ( $ts + 33a , $x , $c2Binary ) ( $ts + 35a , $x , $c2Binary ) ( $ts + 37a , $x , $c2Binary ) ( $ts + 39a , $x , $c2Binary )
$x = $x + 40
$ts = $ts + 40a
if $i == $tblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
endw
if $i == $dropTblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
$totalRows = $totalRows + $x
$i = $i + 1
endw
sql select count(*) from $tblStart
sql select count(*) from $tb
print data00 $data00 rowsPerTbl $rowsPerTbl
if $data00 != $rowsPerTbl then
print data00 $data00 rowsPerTbl $rowsPerTbl
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ************ client-06 insert data error ***** *****
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ************ client-06 insert data into $tb error ***** *****
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
return -1
endi
$loopCnt = $loopCnt + 1
if $loopCnt > 100 then
if $loopCnt > 2 then
print ================ client-06 drop table from $dropTblStart to $dropTblEnd
$i = $dropTblStart
while $i < $dropTblEnd
$tb = tb . $i
@ -87,7 +88,7 @@ if $loopCnt > 100 then
$i = $i + 1
$totalRows = $totalRows - $rowsPerTbl
endw
sleep 20000
sleep 10000
goto init_lable
endi

View File

@ -5,8 +5,12 @@
$tblStart = 0
$tblEnd = 10000
$tsStart = 1325347200000 # 2012-01-01 00:00:00.000
$c2Binary = ' . client-07-
$preBinary = ' . client-07-
###############################################################
$alterCnt = 0
$columnA = c . $alterCnt
$columnB = d . $alterCnt
$totalRows = 0
@ -17,30 +21,31 @@ $db = db
sql create database if not exists $db replica 2
sql use $db
init_lable:
print ================ create table $tb (ts timestamp, c1 int, c2 binary(16))
$rowsPerTbl = 0
$ts = $tsStart
print ================ create table $tb (ts timestamp, $columnA int, $columnB binary(16))
$i = $tblStart
while $i < $tblEnd
$tb = tb . $i
# print create table if not exists $tb ( ts timestamp , c1 int , c2 binary(16) )
sql create table if not exists $tb ( ts timestamp , c1 int , c2 binary(16) )
sql create table if not exists $tb ( ts timestamp , $columnA int , $columnB binary(16) )
$i = $i + 1
endw
$rowsPerTbl = 0
$ts = $tsStart
$rowsPerLoop = 160
$loopCnt = 0
$alterStep = 0
loop_run:
print ================ client-07 start loop insert data
print ================ client-07 start loop insert data ( loopCnt: $loopCnt )
$i = $tblStart
while $i < $tblEnd
$tb = tb . $i
$c2Binary = $c2Binary . $i
$c2Binary = $preBinary . $i
$c2Binary = $c2Binary . '
$x = 0
@ -49,40 +54,48 @@ while $i < $tblEnd
sql insert into $tb values ( $ts + 1a , $x , $c2Binary ) ( $ts + 3a , $x , $c2Binary ) ( $ts + 5a , $x , $c2Binary ) ( $ts + 7a , $x , $c2Binary ) ( $ts + 9a , $x , $c2Binary ) ( $ts + 11a , $x , $c2Binary ) ( $ts + 13a , $x , $c2Binary ) ( $ts + 15a , $x , $c2Binary ) ( $ts + 17a , $x , $c2Binary ) ( $ts + 19a , $x , $c2Binary ) ( $ts + 21a , $x , $c2Binary ) ( $ts + 23a , $x , $c2Binary ) ( $ts + 25a , $x , $c2Binary ) ( $ts + 27a , $x , $c2Binary ) ( $ts + 29a , $x , $c2Binary ) ( $ts + 31a , $x , $c2Binary ) ( $ts + 33a , $x , $c2Binary ) ( $ts + 35a , $x , $c2Binary ) ( $ts + 37a , $x , $c2Binary ) ( $ts + 39a , $x , $c2Binary )
$x = $x + 40
$ts = $ts + 40a
if $i == $tblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
endw
if $i == $tblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
$totalRows = $totalRows + $x
$i = $i + 1
endw
sql select count(*) from $tblStart
sql select count(*) from $tb
print data00 $data00 rowsPerTbl $rowsPerTbl
if $data00 != $rowsPerTbl then
print data00 $data00 rowsPerTbl $rowsPerTbl
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ************ client-07 insert data error ***** *****
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ************ client-07 insert data into $tb error ***** *****
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
return -1
endi
$loopCnt = $loopCnt + 1
if $loopCnt > 100 then
if $alterStep > 2 then
$alterStep = 0
$alterCnt = $alterCnt + 1
$i = $tblStart
while $i < $tblEnd
$tb = tb . $i
sql alter table $tb add column c3 double
sql alter table $tb add column c4 binary( 16 )
sql alter table $tb drop column c1
sql alter table $tb drop column c2
$newColumnA = c . $alterCnt
$newcolumnB = d . $alterCnt
sql alter table $tb add column $newColumnA double
sql alter table $tb add column $newcolumnB binary( 16 )
sql alter table $tb drop column $columnA
sql alter table $tb drop column $columnB
$i = $i + 1
endw
sleep 20000
goto init_lable
endw
$columnA = $newColumnA
$columnB = $newcolumnB
sleep 10000
goto loop_run
endi
goto loop_run

View File

@ -6,11 +6,12 @@
$tblStart = 0
$tblEnd = 10000
$tsStart = 1325347200000 # 2012-01-01 00:00:00.000
$c2Binary = ' . client-06-
$preBinary = ' . client-08-
$numPerUpdateTbl = 2000
###############################################################
$totalRows = 0
$totalRows = 0
$dropTblStart = $tblStart
$dropTblEnd = $dropTblStart
sql connect
@ -20,7 +21,7 @@ sql create database if not exists $db replica 2
sql use $db
init_lable:
print ================ create table $tb (ts timestamp, c1 int, c2 binary(16))
print ================ create table [ from $tblStart to $tblEnd ] (ts timestamp, c1 int, c2 binary(16))
$i = $tblStart
while $i < $tblEnd
@ -30,8 +31,8 @@ while $i < $tblEnd
$i = $i + 1
endw
$dropTblStart = $tblStart
$dropTblEnd = $tblStart + $numPerUpdateTbl
$dropTblStart = $dropTblEnd
$dropTblEnd = $dropTblStart + $numPerUpdateTbl
$tblStart = $tblEnd
$tblEnd = $tblEnd + $numPerUpdateTbl
@ -43,12 +44,12 @@ $rowsPerLoop = 40
$loopCnt = 0
loop_run:
print ================ client-06 start loop insert data
$i = $dropTblStart
while $i < $tblEnd
print ================ client-08 start loop insert data from $dropTblStart to $tblStart ( loopCnt: $loopCnt )
$i = $dropTblStart
while $i < $tblStart
$tb = tb . $i
$c2Binary = $c2Binary . $i
$c2Binary = $preBinary . $i
$c2Binary = $c2Binary . '
$x = 0
@ -57,28 +58,29 @@ while $i < $tblEnd
sql insert into $tb values ( $ts + 1a , $x , $c2Binary ) ( $ts + 3a , $x , $c2Binary ) ( $ts + 5a , $x , $c2Binary ) ( $ts + 7a , $x , $c2Binary ) ( $ts + 9a , $x , $c2Binary ) ( $ts + 11a , $x , $c2Binary ) ( $ts + 13a , $x , $c2Binary ) ( $ts + 15a , $x , $c2Binary ) ( $ts + 17a , $x , $c2Binary ) ( $ts + 19a , $x , $c2Binary ) ( $ts + 21a , $x , $c2Binary ) ( $ts + 23a , $x , $c2Binary ) ( $ts + 25a , $x , $c2Binary ) ( $ts + 27a , $x , $c2Binary ) ( $ts + 29a , $x , $c2Binary ) ( $ts + 31a , $x , $c2Binary ) ( $ts + 33a , $x , $c2Binary ) ( $ts + 35a , $x , $c2Binary ) ( $ts + 37a , $x , $c2Binary ) ( $ts + 39a , $x , $c2Binary )
$x = $x + 40
$ts = $ts + 40a
if $i == $tblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
endw
if $i == $dropTblStart then
$rowsPerTbl = $rowsPerTbl + $x
endi
$totalRows = $totalRows + $x
$i = $i + 1
endw
sql select count(*) from $tblStart
sql select count(*) from $tb
print data00 $data00 rowsPerTbl $rowsPerTbl tb: $tb
if $data00 != $rowsPerTbl then
print data00 $data00 rowsPerTbl $rowsPerTbl
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ************ client-06 insert data error ***** *****
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
print ************ client-08 insert data into $tb error ***** *****
print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
return -1
endi
$loopCnt = $loopCnt + 1
#if $loopCnt > 100 then
#if $loopCnt > 3 then
print ================ client-08 drop table from $dropTblStart to $dropTblEnd
$i = $dropTblStart
while $i < $dropTblEnd
$tb = tb . $i
@ -86,7 +88,7 @@ $loopCnt = $loopCnt + 1
$i = $i + 1
$totalRows = $totalRows - $rowsPerTbl
endw
sleep 20000
sleep 10000
goto init_lable
#endi

View File

@ -91,14 +91,14 @@ sql create dnode $hostname3
sleep 3000
sleep 3000
print ============== step3: start back client-01.sim
run_back unique/clusterSimCase/client-01.sim
#run_back unique/clusterSimCase/client-01.sim
#run_back unique/clusterSimCase/client-01.sim
#run_back unique/clusterSimCase/client-01.sim
#run_back unique/clusterSimCase/client-01.sim
#run_back unique/clusterSimCase/client-01.sim
#run_back unique/clusterSimCase/client-01.sim
#run_back unique/clusterSimCase/client-01.sim
#run_back unique/clusterSimCase/client-02.sim
#run_back unique/clusterSimCase/client-03.sim
#run_back unique/clusterSimCase/client-04.sim
#run_back unique/clusterSimCase/client-05.sim
#run_back unique/clusterSimCase/client-06.sim
#run_back unique/clusterSimCase/client-07.sim
run_back unique/clusterSimCase/client-08.sim
#run_back unique/clusterSimCase/client-01.sim
#run_back unique/clusterSimCase/client-01.sim
sleep 20000
@ -242,4 +242,7 @@ endi
sleep 3000
print **** **** **** (loop_cnt: $loop_cnt ) end, continue...... **** **** **** ****
$loop_cnt = $loop_cnt + 1
if $loop_cnt == 50 then
return 0
endi
goto loop_cluster_do