Merge branch 'develop' into feature/TD-1380
This commit is contained in:
commit
e3f4c9038c
|
@ -17,6 +17,7 @@ SET(TD_MQTT FALSE)
|
||||||
SET(TD_TSDB_PLUGINS FALSE)
|
SET(TD_TSDB_PLUGINS FALSE)
|
||||||
SET(TD_STORAGE FALSE)
|
SET(TD_STORAGE FALSE)
|
||||||
SET(TD_TOPIC FALSE)
|
SET(TD_TOPIC FALSE)
|
||||||
|
SET(TD_MODULE FALSE)
|
||||||
|
|
||||||
SET(TD_COVER FALSE)
|
SET(TD_COVER FALSE)
|
||||||
SET(TD_MEM_CHECK FALSE)
|
SET(TD_MEM_CHECK FALSE)
|
||||||
|
|
|
@ -6,6 +6,7 @@ node {
|
||||||
}
|
}
|
||||||
|
|
||||||
def skipstage=0
|
def skipstage=0
|
||||||
|
|
||||||
def abortPreviousBuilds() {
|
def abortPreviousBuilds() {
|
||||||
def currentJobName = env.JOB_NAME
|
def currentJobName = env.JOB_NAME
|
||||||
def currentBuildNumber = env.BUILD_NUMBER.toInteger()
|
def currentBuildNumber = env.BUILD_NUMBER.toInteger()
|
||||||
|
@ -24,7 +25,7 @@ def abortPreviousBuilds() {
|
||||||
build.doKill() //doTerm(),doKill(),doTerm()
|
build.doKill() //doTerm(),doKill(),doTerm()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//abort previous build
|
// abort previous build
|
||||||
abortPreviousBuilds()
|
abortPreviousBuilds()
|
||||||
def abort_previous(){
|
def abort_previous(){
|
||||||
def buildNumber = env.BUILD_NUMBER as int
|
def buildNumber = env.BUILD_NUMBER as int
|
||||||
|
@ -32,7 +33,8 @@ def abort_previous(){
|
||||||
milestone(buildNumber)
|
milestone(buildNumber)
|
||||||
}
|
}
|
||||||
def pre_test(){
|
def pre_test(){
|
||||||
|
|
||||||
|
|
||||||
sh '''
|
sh '''
|
||||||
sudo rmtaos || echo "taosd has not installed"
|
sudo rmtaos || echo "taosd has not installed"
|
||||||
'''
|
'''
|
||||||
|
@ -79,6 +81,10 @@ pipeline {
|
||||||
changeRequest()
|
changeRequest()
|
||||||
}
|
}
|
||||||
steps {
|
steps {
|
||||||
|
script{
|
||||||
|
abort_previous()
|
||||||
|
abortPreviousBuilds()
|
||||||
|
}
|
||||||
sh'''
|
sh'''
|
||||||
cp -r ${WORKSPACE} ${WORKSPACE}.tes
|
cp -r ${WORKSPACE} ${WORKSPACE}.tes
|
||||||
cd ${WORKSPACE}.tes
|
cd ${WORKSPACE}.tes
|
||||||
|
|
14
README-CN.md
14
README-CN.md
|
@ -258,10 +258,16 @@ TDengine 社区生态中也有一些非常友好的第三方连接器,可以
|
||||||
|
|
||||||
TDengine 的测试框架和所有测试例全部开源。
|
TDengine 的测试框架和所有测试例全部开源。
|
||||||
|
|
||||||
点击[这里](tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md),了解如何运行测试例和添加新的测试例。
|
点击 [这里](tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md),了解如何运行测试例和添加新的测试例。
|
||||||
|
|
||||||
# 成为社区贡献者
|
# 成为社区贡献者
|
||||||
点击[这里](https://www.taosdata.com/cn/contributor/),了解如何成为 TDengine 的贡献者。
|
|
||||||
|
|
||||||
#加入技术交流群
|
点击 [这里](https://www.taosdata.com/cn/contributor/),了解如何成为 TDengine 的贡献者。
|
||||||
TDengine官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小T为好友,即可入群。
|
|
||||||
|
# 加入技术交流群
|
||||||
|
|
||||||
|
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小T为好友,即可入群。
|
||||||
|
|
||||||
|
# [谁在使用TDengine](https://github.com/taosdata/TDengine/issues/2432)
|
||||||
|
|
||||||
|
欢迎所有 TDengine 用户及贡献者在 [这里](https://github.com/taosdata/TDengine/issues/2432) 分享您在当前工作中开发/使用 TDengine 的故事。
|
||||||
|
|
|
@ -250,3 +250,6 @@ Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to th
|
||||||
|
|
||||||
Add WeChat “tdengine” to join the group,you can communicate with other users.
|
Add WeChat “tdengine” to join the group,you can communicate with other users.
|
||||||
|
|
||||||
|
# [User List](https://github.com/taosdata/TDengine/issues/2432)
|
||||||
|
|
||||||
|
If you are using TDengine and feel it helps or you'd like to do some contributions, please add your company to [user list](https://github.com/taosdata/TDengine/issues/2432) and let us know your needs.
|
||||||
|
|
|
@ -29,6 +29,10 @@ IF (TD_TOPIC)
|
||||||
ADD_DEFINITIONS(-D_TOPIC)
|
ADD_DEFINITIONS(-D_TOPIC)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
IF (TD_MODULE)
|
||||||
|
ADD_DEFINITIONS(-D_MODULE)
|
||||||
|
ENDIF ()
|
||||||
|
|
||||||
IF (TD_GODLL)
|
IF (TD_GODLL)
|
||||||
ADD_DEFINITIONS(-D_TD_GO_DLL_)
|
ADD_DEFINITIONS(-D_TD_GO_DLL_)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
|
@ -17,6 +17,14 @@ ELSEIF (${TOPIC} MATCHES "false")
|
||||||
MESSAGE(STATUS "Build without topic plugins")
|
MESSAGE(STATUS "Build without topic plugins")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
IF (${TD_MODULE} MATCHES "true")
|
||||||
|
SET(TD_MODULE TRUE)
|
||||||
|
MESSAGE(STATUS "Build with module plugins")
|
||||||
|
ELSEIF (${TOPIC} MATCHES "false")
|
||||||
|
SET(TD_MODULE FALSE)
|
||||||
|
MESSAGE(STATUS "Build without module plugins")
|
||||||
|
ENDIF ()
|
||||||
|
|
||||||
IF (${COVER} MATCHES "true")
|
IF (${COVER} MATCHES "true")
|
||||||
SET(TD_COVER TRUE)
|
SET(TD_COVER TRUE)
|
||||||
MESSAGE(STATUS "Build with test coverage")
|
MESSAGE(STATUS "Build with test coverage")
|
||||||
|
|
|
@ -6,19 +6,27 @@
|
||||||
|
|
||||||
### 内存需求
|
### 内存需求
|
||||||
|
|
||||||
每个 DB 可以创建固定数目的 vgroup,默认与 CPU 核数相同,可通过 maxVgroupsPerDb 配置;vgroup 中的每个副本会是一个 vnode;每个 vnode 会占用固定大小的内存(大小与数据库的配置参数 blocks 和 cache 有关);每个 Table 会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个 DB 需要的系统内存可通过如下公式计算:
|
每个 Database 可以创建固定数目的 vgroup,默认与 CPU 核数相同,可通过 maxVgroupsPerDb 配置;vgroup 中的每个副本会是一个 vnode;每个 vnode 会占用固定大小的内存(大小与数据库的配置参数 blocks 和 cache 有关);每个 Table 会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个 DB 需要的系统内存可通过如下公式计算:
|
||||||
|
|
||||||
```
|
```
|
||||||
Memory Size = maxVgroupsPerDb * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
|
Database Memory Size = maxVgroupsPerDb * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
|
||||||
```
|
```
|
||||||
|
|
||||||
示例:假设是 4 核机器,cache 是缺省大小 16M, blocks 是缺省值 6,假设有 10 万张表,标签总长度是 256 字节,则总的内存需求为:4 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 499M。
|
示例:假设是 4 核机器,cache 是缺省大小 16M, blocks 是缺省值 6,并且一个 DB 中有 10 万张表,标签总长度是 256 字节,则这个 DB 总的内存需求为:4 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 499M。
|
||||||
|
|
||||||
注意:从这个公式计算得到的内存容量,应理解为系统的“必要需求”,而不是“内存总数”。在实际运行的生产系统中,由于操作系统缓存、资源管理调度等方面的需要,内存规划应当在计算结果的基础上保留一定冗余,以维持系统状态和系统性能的稳定性。
|
在实际的系统运维中,我们通常会更关心 TDengine 服务进程(taosd)会占用的内存量。
|
||||||
|
```
|
||||||
|
taosd 内存总量 = vnode 内存 + mnode 内存 + 查询内存
|
||||||
|
```
|
||||||
|
|
||||||
实际运行的系统往往会根据数据特点的不同,将数据存放在不同的 DB 里。因此做规划时,也需要考虑。
|
其中:
|
||||||
|
1. “vnode 内存”指的是集群中所有的 Database 存储分摊到当前 taosd 节点上所占用的内存资源。可以按上文“Database Memory Size”计算公式估算每个 DB 的内存占用量进行加总,再按集群中总共的 TDengine 节点数做平均(如果设置为多副本,则还需要乘以对应的副本倍数)。
|
||||||
|
2. “mnode 内存”指的是集群中管理节点所占用的资源。如果一个 taosd 节点上分布有 mnode 管理节点,则内存消耗还需要增加“0.2KB * 集群中数据表总数”。
|
||||||
|
3. “查询内存”指的是服务端处理查询请求时所需要占用的内存。单条查询语句至少会占用“0.2KB * 查询涉及的数据表总数”的内存量。
|
||||||
|
|
||||||
如果内存充裕,可以加大 Blocks 的配置,这样更多数据将保存在内存里,提高查询速度。
|
注意:以上内存估算方法,主要讲解了系统的“必须内存需求”,而不是“内存总数上限”。在实际运行的生产环境中,由于操作系统缓存、资源管理调度等方面的原因,内存规划应当在估算结果的基础上保留一定冗余,以维持系统状态和系统性能的稳定性。并且,生产环境通常会配置系统资源的监控工具,以便及时发现硬件资源的紧缺情况。
|
||||||
|
|
||||||
|
最后,如果内存充裕,可以考虑加大 Blocks 的配置,这样更多数据将保存在内存里,提高查询速度。
|
||||||
|
|
||||||
### CPU 需求
|
### CPU 需求
|
||||||
|
|
||||||
|
|
|
@ -367,7 +367,7 @@ typedef struct SSqlObj {
|
||||||
int64_t svgroupRid;
|
int64_t svgroupRid;
|
||||||
|
|
||||||
int64_t squeryLock;
|
int64_t squeryLock;
|
||||||
|
int32_t retryReason; // previous error code
|
||||||
struct SSqlObj *prev, *next;
|
struct SSqlObj *prev, *next;
|
||||||
int64_t self;
|
int64_t self;
|
||||||
} SSqlObj;
|
} SSqlObj;
|
||||||
|
|
|
@ -310,10 +310,51 @@ void tscAsyncResultOnError(SSqlObj* pSql) {
|
||||||
taosScheduleTask(tscQhandle, &schedMsg);
|
taosScheduleTask(tscQhandle, &schedMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int tscSendMsgToServer(SSqlObj *pSql);
|
int tscSendMsgToServer(SSqlObj *pSql);
|
||||||
|
|
||||||
|
static int32_t updateMetaBeforeRetryQuery(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SQueryInfo* pQueryInfo) {
|
||||||
|
// handle the invalid table error code for super table.
|
||||||
|
// update the pExpr info, colList info, number of table columns
|
||||||
|
// TODO Re-parse this sql and issue the corresponding subquery as an alternative for this case.
|
||||||
|
if (pSql->retryReason == TSDB_CODE_TDB_INVALID_TABLE_ID) {
|
||||||
|
int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
|
||||||
|
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
|
||||||
|
int32_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta);
|
||||||
|
|
||||||
|
SSchema *pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
|
||||||
|
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||||
|
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||||
|
pExpr->uid = pTableMetaInfo->pTableMeta->id.uid;
|
||||||
|
|
||||||
|
if (pExpr->colInfo.colIndex >= 0) {
|
||||||
|
int32_t index = pExpr->colInfo.colIndex;
|
||||||
|
|
||||||
|
if ((TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag) && index >= numOfCols) ||
|
||||||
|
(TSDB_COL_IS_TAG(pExpr->colInfo.flag) && (index < numOfCols || index >= (numOfCols + numOfTags)))) {
|
||||||
|
return pSql->retryReason;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((pSchema[pExpr->colInfo.colIndex].colId != pExpr->colInfo.colId) &&
|
||||||
|
strcasecmp(pExpr->colInfo.name, pSchema[pExpr->colInfo.colIndex].name) != 0) {
|
||||||
|
return pSql->retryReason;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate the table columns information
|
||||||
|
for (int32_t i = 0; i < taosArrayGetSize(pQueryInfo->colList); ++i) {
|
||||||
|
SColumn *pCol = taosArrayGetP(pQueryInfo->colList, i);
|
||||||
|
if (pCol->colIndex.columnIndex >= numOfCols) {
|
||||||
|
return pSql->retryReason;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// do nothing
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
||||||
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)param);
|
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)param);
|
||||||
if (pSql == NULL) return;
|
if (pSql == NULL) return;
|
||||||
|
@ -339,7 +380,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
||||||
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) {
|
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) {
|
||||||
tscDebug("%p update local table meta, continue to process sql and send the corresponding query", pSql);
|
tscDebug("%p update local table meta, continue to process sql and send the corresponding query", pSql);
|
||||||
|
|
||||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||||
|
|
||||||
code = tscGetTableMeta(pSql, pTableMetaInfo);
|
code = tscGetTableMeta(pSql, pTableMetaInfo);
|
||||||
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS);
|
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS);
|
||||||
|
|
||||||
|
@ -349,6 +391,10 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
||||||
}
|
}
|
||||||
|
|
||||||
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
|
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
|
||||||
|
code = updateMetaBeforeRetryQuery(pSql, pTableMetaInfo, pQueryInfo);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
goto _error;
|
||||||
|
}
|
||||||
|
|
||||||
// tscProcessSql can add error into async res
|
// tscProcessSql can add error into async res
|
||||||
tscProcessSql(pSql);
|
tscProcessSql(pSql);
|
||||||
|
|
|
@ -670,7 +670,18 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
if ((code = setKillInfo(pSql, pInfo, pInfo->type)) != TSDB_CODE_SUCCESS) {
|
if ((code = setKillInfo(pSql, pInfo, pInfo->type)) != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case TSDB_SQL_SYNC_DB_REPLICA: {
|
||||||
|
const char* msg1 = "invalid db name";
|
||||||
|
SStrToken* pzName = taosArrayGet(pInfo->pMiscInfo->a, 0);
|
||||||
|
|
||||||
|
assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
|
||||||
|
code = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pzName);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -350,8 +350,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
||||||
taosMsleep(duration);
|
taosMsleep(duration);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pSql->retryReason = rpcMsg->code;
|
||||||
rpcMsg->code = tscRenewTableMeta(pSql, 0);
|
rpcMsg->code = tscRenewTableMeta(pSql, 0);
|
||||||
|
|
||||||
// if there is an error occurring, proceed to the following error handling procedure.
|
// if there is an error occurring, proceed to the following error handling procedure.
|
||||||
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||||
taosReleaseRef(tscObjRef, handle);
|
taosReleaseRef(tscObjRef, handle);
|
||||||
|
@ -1284,6 +1284,23 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
|
||||||
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
|
pCmd->payloadLen = sizeof(SSyncDbMsg);
|
||||||
|
|
||||||
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
|
||||||
|
tscError("%p failed to malloc for query msg", pSql);
|
||||||
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSyncDbMsg *pSyncMsg = (SSyncDbMsg *)pCmd->payload;
|
||||||
|
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||||
|
tNameExtractFullName(&pTableMetaInfo->name, pSyncMsg->db);
|
||||||
|
pCmd->msgType = TSDB_MSG_TYPE_CM_SYNC_DB;
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
STscObj *pObj = pSql->pTscObj;
|
STscObj *pObj = pSql->pTscObj;
|
||||||
SSqlCmd *pCmd = &pSql->cmd;
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
|
@ -2559,6 +2576,7 @@ void tscInitMsgsFp() {
|
||||||
tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserAcctMsg;
|
tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserAcctMsg;
|
||||||
tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropUserAcctMsg;
|
tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropUserAcctMsg;
|
||||||
tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg;
|
tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg;
|
||||||
|
tscBuildMsg[TSDB_SQL_SYNC_DB_REPLICA] = tscBuildSyncDbReplicaMsg;
|
||||||
tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg;
|
tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg;
|
||||||
tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildUserMsg;
|
tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildUserMsg;
|
||||||
tscBuildMsg[TSDB_SQL_CREATE_DNODE] = tscBuildCreateDnodeMsg;
|
tscBuildMsg[TSDB_SQL_CREATE_DNODE] = tscBuildCreateDnodeMsg;
|
||||||
|
|
|
@ -51,6 +51,7 @@ enum {
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_ACCT, "alter-acct" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_ACCT, "alter-acct" )
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_TABLE, "alter-table" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_TABLE, "alter-table" )
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_DB, "alter-db" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_DB, "alter-db" )
|
||||||
|
TSDB_DEFINE_SQL_TYPE(TSDB_SQL_SYNC_DB_REPLICA, "sync db-replica")
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_MNODE, "create-mnode" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_MNODE, "create-mnode" )
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_MNODE, "drop-mnode" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_MNODE, "drop-mnode" )
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_DNODE, "create-dnode" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_DNODE, "create-dnode" )
|
||||||
|
@ -87,13 +88,13 @@ enum {
|
||||||
*/
|
*/
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_EMPTY_RESULT, "retrieve-empty-result" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_EMPTY_RESULT, "retrieve-empty-result" )
|
||||||
|
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RESET_CACHE, "reset-cache" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RESET_CACHE, "reset-cache" )
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_STATUS, "serv-status" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_STATUS, "serv-status" )
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_DB, "current-db" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_DB, "current-db" )
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_VERSION, "serv-version" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_VERSION, "serv-version" )
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CLI_VERSION, "cli-version" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CLI_VERSION, "cli-version" )
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_USER, "current-user ")
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_USER, "current-user ")
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CFG_LOCAL, "cfg-local" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CFG_LOCAL, "cfg-local" )
|
||||||
|
|
||||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MAX, "max" )
|
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MAX, "max" )
|
||||||
};
|
};
|
||||||
|
|
|
@ -35,6 +35,10 @@ IF (TD_TOPIC)
|
||||||
TARGET_LINK_LIBRARIES(taosd topic)
|
TARGET_LINK_LIBRARIES(taosd topic)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
IF (TD_MODULE AND TD_LINUX)
|
||||||
|
TARGET_LINK_LIBRARIES(taosd module dl)
|
||||||
|
ENDIF ()
|
||||||
|
|
||||||
SET(PREPARE_ENV_CMD "prepare_env_cmd")
|
SET(PREPARE_ENV_CMD "prepare_env_cmd")
|
||||||
SET(PREPARE_ENV_TARGET "prepare_env_target")
|
SET(PREPARE_ENV_TARGET "prepare_env_target")
|
||||||
ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD}
|
ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD}
|
||||||
|
|
|
@ -39,6 +39,13 @@
|
||||||
#include "dnodeMPeer.h"
|
#include "dnodeMPeer.h"
|
||||||
#include "dnodeShell.h"
|
#include "dnodeShell.h"
|
||||||
#include "dnodeTelemetry.h"
|
#include "dnodeTelemetry.h"
|
||||||
|
#include "module.h"
|
||||||
|
|
||||||
|
#if !defined(_MODULE) || !defined(_TD_LINUX)
|
||||||
|
int32_t moduleStart() { return 0; }
|
||||||
|
void moduleStop() {}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
void *tsDnodeTmr = NULL;
|
void *tsDnodeTmr = NULL;
|
||||||
static SRunStatus tsRunStatus = TSDB_RUN_STATUS_STOPPED;
|
static SRunStatus tsRunStatus = TSDB_RUN_STATUS_STOPPED;
|
||||||
|
@ -146,6 +153,7 @@ int32_t dnodeInitSystem() {
|
||||||
}
|
}
|
||||||
|
|
||||||
dnodeSetRunStatus(TSDB_RUN_STATUS_RUNING);
|
dnodeSetRunStatus(TSDB_RUN_STATUS_RUNING);
|
||||||
|
moduleStart();
|
||||||
|
|
||||||
dnodeReportStep("TDengine", "initialized successfully", 1);
|
dnodeReportStep("TDengine", "initialized successfully", 1);
|
||||||
dInfo("TDengine is initialized successfully");
|
dInfo("TDengine is initialized successfully");
|
||||||
|
@ -155,6 +163,7 @@ int32_t dnodeInitSystem() {
|
||||||
|
|
||||||
void dnodeCleanUpSystem() {
|
void dnodeCleanUpSystem() {
|
||||||
if (dnodeGetRunStatus() != TSDB_RUN_STATUS_STOPPED) {
|
if (dnodeGetRunStatus() != TSDB_RUN_STATUS_STOPPED) {
|
||||||
|
moduleStop();
|
||||||
dnodeSetRunStatus(TSDB_RUN_STATUS_STOPPED);
|
dnodeSetRunStatus(TSDB_RUN_STATUS_STOPPED);
|
||||||
dnodeCleanupTmr();
|
dnodeCleanupTmr();
|
||||||
dnodeCleanupComponents();
|
dnodeCleanupComponents();
|
||||||
|
|
|
@ -49,6 +49,7 @@ int32_t dnodeInitShell() {
|
||||||
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DB] = dnodeDispatchToMWriteQueue;
|
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DB] = dnodeDispatchToMWriteQueue;
|
||||||
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TP] = dnodeDispatchToMWriteQueue;
|
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TP] = dnodeDispatchToMWriteQueue;
|
||||||
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DB] = dnodeDispatchToMWriteQueue;
|
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DB] = dnodeDispatchToMWriteQueue;
|
||||||
|
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SYNC_DB] = dnodeDispatchToMWriteQueue;
|
||||||
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_TP] = dnodeDispatchToMWriteQueue;
|
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_TP] = dnodeDispatchToMWriteQueue;
|
||||||
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_DB] = dnodeDispatchToMWriteQueue;
|
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_DB] = dnodeDispatchToMWriteQueue;
|
||||||
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_TP] = dnodeDispatchToMWriteQueue;
|
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_TP] = dnodeDispatchToMWriteQueue;
|
||||||
|
|
|
@ -0,0 +1,30 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef TDENGINE_MODULE
|
||||||
|
#define TDENGINE_MODULE
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int32_t moduleStart();
|
||||||
|
void moduleStop();
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
|
@ -77,6 +77,7 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_DB, "create-db" )
|
||||||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_DB, "drop-db" )
|
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_DB, "drop-db" )
|
||||||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_USE_DB, "use-db" )
|
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_USE_DB, "use-db" )
|
||||||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_DB, "alter-db" )
|
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_DB, "alter-db" )
|
||||||
|
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_SYNC_DB, "sync-db-replica" )
|
||||||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_TABLE, "create-table" )
|
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_TABLE, "create-table" )
|
||||||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_TABLE, "drop-table" )
|
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_TABLE, "drop-table" )
|
||||||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_TABLE, "alter-table" )
|
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_TABLE, "alter-table" )
|
||||||
|
@ -574,7 +575,7 @@ typedef struct {
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char db[TSDB_TABLE_FNAME_LEN];
|
char db[TSDB_TABLE_FNAME_LEN];
|
||||||
uint8_t ignoreNotExists;
|
uint8_t ignoreNotExists;
|
||||||
} SDropDbMsg, SUseDbMsg;
|
} SDropDbMsg, SUseDbMsg, SSyncDbMsg;
|
||||||
|
|
||||||
// IMPORTANT: sizeof(SVnodeStatisticInfo) should not exceed
|
// IMPORTANT: sizeof(SVnodeStatisticInfo) should not exceed
|
||||||
// TSDB_FILE_HEADER_LEN/4 - TSDB_FILE_HEADER_VERSION_SIZE
|
// TSDB_FILE_HEADER_LEN/4 - TSDB_FILE_HEADER_VERSION_SIZE
|
||||||
|
|
|
@ -152,56 +152,57 @@
|
||||||
#define TK_NOW 133
|
#define TK_NOW 133
|
||||||
#define TK_RESET 134
|
#define TK_RESET 134
|
||||||
#define TK_QUERY 135
|
#define TK_QUERY 135
|
||||||
#define TK_ADD 136
|
#define TK_SYNCDB 136
|
||||||
#define TK_COLUMN 137
|
#define TK_ADD 137
|
||||||
#define TK_TAG 138
|
#define TK_COLUMN 138
|
||||||
#define TK_CHANGE 139
|
#define TK_TAG 139
|
||||||
#define TK_SET 140
|
#define TK_CHANGE 140
|
||||||
#define TK_KILL 141
|
#define TK_SET 141
|
||||||
#define TK_CONNECTION 142
|
#define TK_KILL 142
|
||||||
#define TK_STREAM 143
|
#define TK_CONNECTION 143
|
||||||
#define TK_COLON 144
|
#define TK_STREAM 144
|
||||||
#define TK_ABORT 145
|
#define TK_COLON 145
|
||||||
#define TK_AFTER 146
|
#define TK_ABORT 146
|
||||||
#define TK_ATTACH 147
|
#define TK_AFTER 147
|
||||||
#define TK_BEFORE 148
|
#define TK_ATTACH 148
|
||||||
#define TK_BEGIN 149
|
#define TK_BEFORE 149
|
||||||
#define TK_CASCADE 150
|
#define TK_BEGIN 150
|
||||||
#define TK_CLUSTER 151
|
#define TK_CASCADE 151
|
||||||
#define TK_CONFLICT 152
|
#define TK_CLUSTER 152
|
||||||
#define TK_COPY 153
|
#define TK_CONFLICT 153
|
||||||
#define TK_DEFERRED 154
|
#define TK_COPY 154
|
||||||
#define TK_DELIMITERS 155
|
#define TK_DEFERRED 155
|
||||||
#define TK_DETACH 156
|
#define TK_DELIMITERS 156
|
||||||
#define TK_EACH 157
|
#define TK_DETACH 157
|
||||||
#define TK_END 158
|
#define TK_EACH 158
|
||||||
#define TK_EXPLAIN 159
|
#define TK_END 159
|
||||||
#define TK_FAIL 160
|
#define TK_EXPLAIN 160
|
||||||
#define TK_FOR 161
|
#define TK_FAIL 161
|
||||||
#define TK_IGNORE 162
|
#define TK_FOR 162
|
||||||
#define TK_IMMEDIATE 163
|
#define TK_IGNORE 163
|
||||||
#define TK_INITIALLY 164
|
#define TK_IMMEDIATE 164
|
||||||
#define TK_INSTEAD 165
|
#define TK_INITIALLY 165
|
||||||
#define TK_MATCH 166
|
#define TK_INSTEAD 166
|
||||||
#define TK_KEY 167
|
#define TK_MATCH 167
|
||||||
#define TK_OF 168
|
#define TK_KEY 168
|
||||||
#define TK_RAISE 169
|
#define TK_OF 169
|
||||||
#define TK_REPLACE 170
|
#define TK_RAISE 170
|
||||||
#define TK_RESTRICT 171
|
#define TK_REPLACE 171
|
||||||
#define TK_ROW 172
|
#define TK_RESTRICT 172
|
||||||
#define TK_STATEMENT 173
|
#define TK_ROW 173
|
||||||
#define TK_TRIGGER 174
|
#define TK_STATEMENT 174
|
||||||
#define TK_VIEW 175
|
#define TK_TRIGGER 175
|
||||||
#define TK_SEMI 176
|
#define TK_VIEW 176
|
||||||
#define TK_NONE 177
|
#define TK_SEMI 177
|
||||||
#define TK_PREV 178
|
#define TK_NONE 178
|
||||||
#define TK_LINEAR 179
|
#define TK_PREV 179
|
||||||
#define TK_IMPORT 180
|
#define TK_LINEAR 180
|
||||||
#define TK_TBNAME 181
|
#define TK_IMPORT 181
|
||||||
#define TK_JOIN 182
|
#define TK_TBNAME 182
|
||||||
#define TK_INSERT 183
|
#define TK_JOIN 183
|
||||||
#define TK_INTO 184
|
#define TK_INSERT 184
|
||||||
#define TK_VALUES 185
|
#define TK_INTO 185
|
||||||
|
#define TK_VALUES 186
|
||||||
|
|
||||||
|
|
||||||
#define TK_SPACE 300
|
#define TK_SPACE 300
|
||||||
|
|
|
@ -41,7 +41,7 @@
|
||||||
"insert_mode": "taosc",
|
"insert_mode": "taosc",
|
||||||
"insert_rows": 1000,
|
"insert_rows": 1000,
|
||||||
"multi_thread_write_one_tbl": "no",
|
"multi_thread_write_one_tbl": "no",
|
||||||
"rows_per_tbl": 20,
|
"interlace_rows": 20,
|
||||||
"max_sql_len": 1024000,
|
"max_sql_len": 1024000,
|
||||||
"disorder_ratio": 0,
|
"disorder_ratio": 0,
|
||||||
"disorder_range": 1000,
|
"disorder_range": 1000,
|
||||||
|
|
|
@ -41,7 +41,7 @@
|
||||||
"insert_mode": "taosc",
|
"insert_mode": "taosc",
|
||||||
"insert_rows": 100000,
|
"insert_rows": 100000,
|
||||||
"multi_thread_write_one_tbl": "no",
|
"multi_thread_write_one_tbl": "no",
|
||||||
"rows_per_tbl": 0,
|
"interlace_rows": 0,
|
||||||
"max_sql_len": 1024000,
|
"max_sql_len": 1024000,
|
||||||
"disorder_ratio": 0,
|
"disorder_ratio": 0,
|
||||||
"disorder_range": 1000,
|
"disorder_range": 1000,
|
||||||
|
|
|
@ -187,6 +187,7 @@ typedef struct SArguments_S {
|
||||||
char * tb_prefix;
|
char * tb_prefix;
|
||||||
char * sqlFile;
|
char * sqlFile;
|
||||||
bool use_metric;
|
bool use_metric;
|
||||||
|
bool drop_database;
|
||||||
bool insert_only;
|
bool insert_only;
|
||||||
bool answer_yes;
|
bool answer_yes;
|
||||||
bool debug_print;
|
bool debug_print;
|
||||||
|
@ -199,7 +200,8 @@ typedef struct SArguments_S {
|
||||||
int num_of_CPR;
|
int num_of_CPR;
|
||||||
int num_of_threads;
|
int num_of_threads;
|
||||||
int insert_interval;
|
int insert_interval;
|
||||||
int rows_per_tbl;
|
int query_times;
|
||||||
|
int interlace_rows;
|
||||||
int num_of_RPR;
|
int num_of_RPR;
|
||||||
int max_sql_len;
|
int max_sql_len;
|
||||||
int num_of_tables;
|
int num_of_tables;
|
||||||
|
@ -314,7 +316,7 @@ typedef struct SDbCfg_S {
|
||||||
|
|
||||||
typedef struct SDataBase_S {
|
typedef struct SDataBase_S {
|
||||||
char dbName[MAX_DB_NAME_SIZE];
|
char dbName[MAX_DB_NAME_SIZE];
|
||||||
int drop; // 0: use exists, 1: if exists, drop then new create
|
bool drop; // 0: use exists, 1: if exists, drop then new create
|
||||||
SDbCfg dbCfg;
|
SDbCfg dbCfg;
|
||||||
int superTblCount;
|
int superTblCount;
|
||||||
SSuperTable superTbls[MAX_SUPER_TABLE_COUNT];
|
SSuperTable superTbls[MAX_SUPER_TABLE_COUNT];
|
||||||
|
@ -351,7 +353,7 @@ typedef struct SuperQueryInfo_S {
|
||||||
int subscribeInterval; // ms
|
int subscribeInterval; // ms
|
||||||
int subscribeRestart;
|
int subscribeRestart;
|
||||||
int subscribeKeepProgress;
|
int subscribeKeepProgress;
|
||||||
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
|
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
|
||||||
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
|
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
|
||||||
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
|
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
|
||||||
} SuperQueryInfo;
|
} SuperQueryInfo;
|
||||||
|
@ -359,7 +361,7 @@ typedef struct SuperQueryInfo_S {
|
||||||
typedef struct SubQueryInfo_S {
|
typedef struct SubQueryInfo_S {
|
||||||
char sTblName[MAX_TB_NAME_SIZE+1];
|
char sTblName[MAX_TB_NAME_SIZE+1];
|
||||||
int rate; // 0: unlimit > 0 loop/s
|
int rate; // 0: unlimit > 0 loop/s
|
||||||
int threadCnt;
|
int threadCnt;
|
||||||
int subscribeMode; // 0: sync, 1: async
|
int subscribeMode; // 0: sync, 1: async
|
||||||
int subscribeInterval; // ms
|
int subscribeInterval; // ms
|
||||||
int subscribeRestart;
|
int subscribeRestart;
|
||||||
|
@ -367,7 +369,7 @@ typedef struct SubQueryInfo_S {
|
||||||
int childTblCount;
|
int childTblCount;
|
||||||
char childTblPrefix[MAX_TB_NAME_SIZE];
|
char childTblPrefix[MAX_TB_NAME_SIZE];
|
||||||
int sqlCount;
|
int sqlCount;
|
||||||
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
|
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
|
||||||
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
|
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
|
||||||
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
|
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
|
||||||
|
|
||||||
|
@ -397,14 +399,14 @@ typedef struct SThreadInfo_S {
|
||||||
int end_table_to;
|
int end_table_to;
|
||||||
int ntables;
|
int ntables;
|
||||||
int data_of_rate;
|
int data_of_rate;
|
||||||
uint64_t start_time;
|
uint64_t start_time;
|
||||||
char* cols;
|
char* cols;
|
||||||
bool use_metric;
|
bool use_metric;
|
||||||
SSuperTable* superTblInfo;
|
SSuperTable* superTblInfo;
|
||||||
|
|
||||||
// for async insert
|
// for async insert
|
||||||
tsem_t lock_sem;
|
tsem_t lock_sem;
|
||||||
int64_t counter;
|
int64_t counter;
|
||||||
uint64_t st;
|
uint64_t st;
|
||||||
uint64_t et;
|
uint64_t et;
|
||||||
int64_t lastTs;
|
int64_t lastTs;
|
||||||
|
@ -524,6 +526,7 @@ SArguments g_args = {
|
||||||
"t", // tb_prefix
|
"t", // tb_prefix
|
||||||
NULL, // sqlFile
|
NULL, // sqlFile
|
||||||
true, // use_metric
|
true, // use_metric
|
||||||
|
true, // drop_database
|
||||||
true, // insert_only
|
true, // insert_only
|
||||||
false, // debug_print
|
false, // debug_print
|
||||||
false, // verbose_print
|
false, // verbose_print
|
||||||
|
@ -547,7 +550,8 @@ SArguments g_args = {
|
||||||
10, // num_of_CPR
|
10, // num_of_CPR
|
||||||
10, // num_of_connections/thread
|
10, // num_of_connections/thread
|
||||||
0, // insert_interval
|
0, // insert_interval
|
||||||
0, // rows_per_tbl;
|
1, // query_times
|
||||||
|
0, // interlace_rows;
|
||||||
100, // num_of_RPR
|
100, // num_of_RPR
|
||||||
TSDB_PAYLOAD_SIZE, // max_sql_len
|
TSDB_PAYLOAD_SIZE, // max_sql_len
|
||||||
10000, // num_of_tables
|
10000, // num_of_tables
|
||||||
|
@ -681,8 +685,10 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
|
||||||
arguments->num_of_threads = atoi(argv[++i]);
|
arguments->num_of_threads = atoi(argv[++i]);
|
||||||
} else if (strcmp(argv[i], "-i") == 0) {
|
} else if (strcmp(argv[i], "-i") == 0) {
|
||||||
arguments->insert_interval = atoi(argv[++i]);
|
arguments->insert_interval = atoi(argv[++i]);
|
||||||
|
} else if (strcmp(argv[i], "-qt") == 0) {
|
||||||
|
arguments->query_times = atoi(argv[++i]);
|
||||||
} else if (strcmp(argv[i], "-B") == 0) {
|
} else if (strcmp(argv[i], "-B") == 0) {
|
||||||
arguments->rows_per_tbl = atoi(argv[++i]);
|
arguments->interlace_rows = atoi(argv[++i]);
|
||||||
} else if (strcmp(argv[i], "-r") == 0) {
|
} else if (strcmp(argv[i], "-r") == 0) {
|
||||||
arguments->num_of_RPR = atoi(argv[++i]);
|
arguments->num_of_RPR = atoi(argv[++i]);
|
||||||
} else if (strcmp(argv[i], "-t") == 0) {
|
} else if (strcmp(argv[i], "-t") == 0) {
|
||||||
|
@ -1064,6 +1070,7 @@ static int printfInsertMeta() {
|
||||||
printf("max sql length: \033[33m%d\033[0m\n", g_args.max_sql_len);
|
printf("max sql length: \033[33m%d\033[0m\n", g_args.max_sql_len);
|
||||||
|
|
||||||
printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount);
|
printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount);
|
||||||
|
|
||||||
for (int i = 0; i < g_Dbs.dbCount; i++) {
|
for (int i = 0; i < g_Dbs.dbCount; i++) {
|
||||||
printf("database[\033[33m%d\033[0m]:\n", i);
|
printf("database[\033[33m%d\033[0m]:\n", i);
|
||||||
printf(" database[%d] name: \033[33m%s\033[0m\n", i, g_Dbs.db[i].dbName);
|
printf(" database[%d] name: \033[33m%s\033[0m\n", i, g_Dbs.db[i].dbName);
|
||||||
|
@ -1220,16 +1227,19 @@ static int printfInsertMeta() {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void printfInsertMetaToFile(FILE* fp) {
|
static void printfInsertMetaToFile(FILE* fp) {
|
||||||
SHOW_PARSE_RESULT_START_TO_FILE(fp);
|
|
||||||
|
SHOW_PARSE_RESULT_START_TO_FILE(fp);
|
||||||
|
|
||||||
fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port);
|
fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port);
|
||||||
fprintf(fp, "user: %s\n", g_Dbs.user);
|
fprintf(fp, "user: %s\n", g_Dbs.user);
|
||||||
fprintf(fp, "password: %s\n", g_Dbs.password);
|
|
||||||
fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
|
fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
|
||||||
fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
|
fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
|
||||||
fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
|
fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
|
||||||
|
fprintf(fp, "insert interval: %d\n", g_args.insert_interval);
|
||||||
|
fprintf(fp, "number of records per req: %d\n", g_args.num_of_RPR);
|
||||||
|
fprintf(fp, "max sql length: %d\n", g_args.max_sql_len);
|
||||||
fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
|
fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
|
||||||
|
|
||||||
for (int i = 0; i < g_Dbs.dbCount; i++) {
|
for (int i = 0; i < g_Dbs.dbCount; i++) {
|
||||||
fprintf(fp, "database[%d]:\n", i);
|
fprintf(fp, "database[%d]:\n", i);
|
||||||
fprintf(fp, " database[%d] name: %s\n", i, g_Dbs.db[i].dbName);
|
fprintf(fp, " database[%d] name: %s\n", i, g_Dbs.db[i].dbName);
|
||||||
|
@ -1364,20 +1374,23 @@ static void printfInsertMetaToFile(FILE* fp) {
|
||||||
}
|
}
|
||||||
fprintf(fp, "\n");
|
fprintf(fp, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
SHOW_PARSE_RESULT_END_TO_FILE(fp);
|
SHOW_PARSE_RESULT_END_TO_FILE(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void printfQueryMeta() {
|
static void printfQueryMeta() {
|
||||||
|
|
||||||
SHOW_PARSE_RESULT_START();
|
SHOW_PARSE_RESULT_START();
|
||||||
|
|
||||||
printf("host: \033[33m%s:%u\033[0m\n",
|
printf("host: \033[33m%s:%u\033[0m\n",
|
||||||
g_queryInfo.host, g_queryInfo.port);
|
g_queryInfo.host, g_queryInfo.port);
|
||||||
printf("user: \033[33m%s\033[0m\n", g_queryInfo.user);
|
printf("user: \033[33m%s\033[0m\n", g_queryInfo.user);
|
||||||
printf("password: \033[33m%s\033[0m\n", g_queryInfo.password);
|
|
||||||
printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName);
|
printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName);
|
||||||
|
|
||||||
printf("\n");
|
printf("\n");
|
||||||
printf("specified table query info: \n");
|
printf("specified table query info: \n");
|
||||||
printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate);
|
printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate);
|
||||||
|
printf("query times: \033[33m%d\033[0m\n", g_args.query_times);
|
||||||
printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.concurrent);
|
printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.concurrent);
|
||||||
printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount);
|
printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount);
|
||||||
|
|
||||||
|
@ -1411,11 +1424,10 @@ static void printfQueryMeta() {
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
|
||||||
SHOW_PARSE_RESULT_END();
|
SHOW_PARSE_RESULT_END();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
||||||
static char* xFormatTimestamp(char* buf, int64_t val, int precision) {
|
|
||||||
time_t tt;
|
time_t tt;
|
||||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
tt = (time_t)(val / 1000000);
|
tt = (time_t)(val / 1000000);
|
||||||
|
@ -1447,7 +1459,9 @@ static char* xFormatTimestamp(char* buf, int64_t val, int precision) {
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_t length, int precision) {
|
static void xDumpFieldToFile(FILE* fp, const char* val,
|
||||||
|
TAOS_FIELD* field, int32_t length, int precision) {
|
||||||
|
|
||||||
if (val == NULL) {
|
if (val == NULL) {
|
||||||
fprintf(fp, "%s", TSDB_DATA_NULL_STR);
|
fprintf(fp, "%s", TSDB_DATA_NULL_STR);
|
||||||
return;
|
return;
|
||||||
|
@ -1483,7 +1497,7 @@ static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32
|
||||||
fprintf(fp, "\'%s\'", buf);
|
fprintf(fp, "\'%s\'", buf);
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||||
xFormatTimestamp(buf, *(int64_t*)val, precision);
|
formatTimestamp(buf, *(int64_t*)val, precision);
|
||||||
fprintf(fp, "'%s'", buf);
|
fprintf(fp, "'%s'", buf);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -1562,7 +1576,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
|
||||||
|
|
||||||
tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
|
tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
|
||||||
fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
|
fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
|
||||||
xFormatTimestamp(dbInfos[count]->create_time,
|
formatTimestamp(dbInfos[count]->create_time,
|
||||||
*(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX],
|
*(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX],
|
||||||
TSDB_TIME_PRECISION_MILLI);
|
TSDB_TIME_PRECISION_MILLI);
|
||||||
dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
|
dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
|
||||||
|
@ -2036,15 +2050,25 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
|
||||||
res = taos_query(taos, command);
|
res = taos_query(taos, command);
|
||||||
int32_t code = taos_errno(res);
|
int32_t code = taos_errno(res);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
printf("failed to run command %s\n", command);
|
|
||||||
taos_free_result(res);
|
taos_free_result(res);
|
||||||
taos_close(taos);
|
taos_close(taos);
|
||||||
|
errorPrint("%s() LN%d, failed to run command %s\n",
|
||||||
|
__func__, __LINE__, command);
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
int childTblCount = (limit < 0)?10000:limit;
|
int childTblCount = (limit < 0)?10000:limit;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
// childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
|
if (childTblName == NULL) {
|
||||||
|
childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
|
||||||
|
if (NULL == childTblName) {
|
||||||
|
taos_free_result(res);
|
||||||
|
taos_close(taos);
|
||||||
|
errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__);
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
char* pTblName = childTblName;
|
char* pTblName = childTblName;
|
||||||
while ((row = taos_fetch_row(res)) != NULL) {
|
while ((row = taos_fetch_row(res)) != NULL) {
|
||||||
int32_t* len = taos_fetch_lengths(res);
|
int32_t* len = taos_fetch_lengths(res);
|
||||||
|
@ -2090,6 +2114,7 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
|
||||||
|
|
||||||
static int getSuperTableFromServer(TAOS * taos, char* dbName,
|
static int getSuperTableFromServer(TAOS * taos, char* dbName,
|
||||||
SSuperTable* superTbls) {
|
SSuperTable* superTbls) {
|
||||||
|
|
||||||
char command[BUFFER_SIZE] = "\0";
|
char command[BUFFER_SIZE] = "\0";
|
||||||
TAOS_RES * res;
|
TAOS_RES * res;
|
||||||
TAOS_ROW row = NULL;
|
TAOS_ROW row = NULL;
|
||||||
|
@ -2315,7 +2340,7 @@ static int createDatabases() {
|
||||||
}
|
}
|
||||||
char command[BUFFER_SIZE] = "\0";
|
char command[BUFFER_SIZE] = "\0";
|
||||||
|
|
||||||
for (int i = 0; i < g_Dbs.dbCount; i++) {
|
for (int i = 0; i < g_Dbs.dbCount; i++) {
|
||||||
if (g_Dbs.db[i].drop) {
|
if (g_Dbs.db[i].drop) {
|
||||||
sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName);
|
sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName);
|
||||||
verbosePrint("%s() %d command: %s\n", __func__, __LINE__, command);
|
verbosePrint("%s() %d command: %s\n", __func__, __LINE__, command);
|
||||||
|
@ -2326,7 +2351,7 @@ static int createDatabases() {
|
||||||
}
|
}
|
||||||
|
|
||||||
int dataLen = 0;
|
int dataLen = 0;
|
||||||
dataLen += snprintf(command + dataLen,
|
dataLen += snprintf(command + dataLen,
|
||||||
BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName);
|
BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName);
|
||||||
|
|
||||||
if (g_Dbs.db[i].dbCfg.blocks > 0) {
|
if (g_Dbs.db[i].dbCfg.blocks > 0) {
|
||||||
|
@ -2403,38 +2428,48 @@ static int createDatabases() {
|
||||||
debugPrint("%s() %d supertbl count:%d\n",
|
debugPrint("%s() %d supertbl count:%d\n",
|
||||||
__func__, __LINE__, g_Dbs.db[i].superTblCount);
|
__func__, __LINE__, g_Dbs.db[i].superTblCount);
|
||||||
for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
|
for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
|
||||||
// describe super table, if exists
|
if ((g_Dbs.db[i].drop) || (g_Dbs.db[i].superTbls[j].superTblExists == TBL_NO_EXISTS)) {
|
||||||
sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName,
|
|
||||||
g_Dbs.db[i].superTbls[j].sTblName);
|
|
||||||
verbosePrint("%s() %d command: %s\n", __func__, __LINE__, command);
|
|
||||||
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) {
|
|
||||||
g_Dbs.db[i].superTbls[j].superTblExists = TBL_NO_EXISTS;
|
|
||||||
ret = createSuperTable(taos, g_Dbs.db[i].dbName,
|
ret = createSuperTable(taos, g_Dbs.db[i].dbName,
|
||||||
&g_Dbs.db[i].superTbls[j], g_Dbs.use_metric);
|
&g_Dbs.db[i].superTbls[j], g_Dbs.use_metric);
|
||||||
} else {
|
|
||||||
g_Dbs.db[i].superTbls[j].superTblExists = TBL_ALREADY_EXISTS;
|
if (0 != ret) {
|
||||||
|
errorPrint("\ncreate super table %d failed!\n\n", j);
|
||||||
if (g_Dbs.db[i].superTbls[j].childTblExists != TBL_ALREADY_EXISTS) {
|
taos_close(taos);
|
||||||
ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName,
|
return -1;
|
||||||
&g_Dbs.db[i].superTbls[j]);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (0 != ret) {
|
/* describe super table, if exists
|
||||||
printf("\ncreate super table %d failed!\n\n", j);
|
sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName,
|
||||||
taos_close(taos);
|
g_Dbs.db[i].superTbls[j].sTblName);
|
||||||
return -1;
|
verbosePrint("%s() %d command: %s\n", __func__, __LINE__, command);
|
||||||
}
|
|
||||||
}
|
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) {
|
||||||
|
g_Dbs.db[i].superTbls[j].superTblExists = TBL_NO_EXISTS;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
*/
|
||||||
|
g_Dbs.db[i].superTbls[j].superTblExists = TBL_ALREADY_EXISTS;
|
||||||
|
ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName,
|
||||||
|
&g_Dbs.db[i].superTbls[j]);
|
||||||
|
//}
|
||||||
|
if (0 != ret) {
|
||||||
|
errorPrint("\nget super table %s.%s info failed!\n\n", g_Dbs.db[i].dbName,
|
||||||
|
g_Dbs.db[i].superTbls[j].sTblName);
|
||||||
|
taos_close(taos);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
taos_close(taos);
|
taos_close(taos);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void* createTable(void *sarg)
|
static void* createTable(void *sarg)
|
||||||
{
|
{
|
||||||
threadInfo *winfo = (threadInfo *)sarg;
|
threadInfo *winfo = (threadInfo *)sarg;
|
||||||
SSuperTable* superTblInfo = winfo->superTblInfo;
|
SSuperTable* superTblInfo = winfo->superTblInfo;
|
||||||
|
|
||||||
int64_t lastPrintTime = taosGetTimestampMs();
|
int64_t lastPrintTime = taosGetTimestampMs();
|
||||||
|
@ -2454,19 +2489,19 @@ static void* createTable(void *sarg)
|
||||||
int len = 0;
|
int len = 0;
|
||||||
int batchNum = 0;
|
int batchNum = 0;
|
||||||
|
|
||||||
verbosePrint("%s() LN%d: Creating table from %d to %d\n",
|
verbosePrint("%s() LN%d: Creating table from %d to %d\n",
|
||||||
__func__, __LINE__,
|
__func__, __LINE__,
|
||||||
winfo->start_table_from, winfo->end_table_to);
|
winfo->start_table_from, winfo->end_table_to);
|
||||||
|
|
||||||
for (int i = winfo->start_table_from; i <= winfo->end_table_to; i++) {
|
for (int i = winfo->start_table_from; i <= winfo->end_table_to; i++) {
|
||||||
if (0 == g_Dbs.use_metric) {
|
if (0 == g_Dbs.use_metric) {
|
||||||
snprintf(buffer, buff_len,
|
snprintf(buffer, buff_len,
|
||||||
"create table if not exists %s.%s%d %s;",
|
"create table if not exists %s.%s%d %s;",
|
||||||
winfo->db_name,
|
winfo->db_name,
|
||||||
g_args.tb_prefix, i,
|
g_args.tb_prefix, i,
|
||||||
winfo->cols);
|
winfo->cols);
|
||||||
} else {
|
} else {
|
||||||
if (0 == len) {
|
if (0 == len) {
|
||||||
batchNum = 0;
|
batchNum = 0;
|
||||||
memset(buffer, 0, buff_len);
|
memset(buffer, 0, buff_len);
|
||||||
len += snprintf(buffer + len,
|
len += snprintf(buffer + len,
|
||||||
|
@ -2485,7 +2520,7 @@ static void* createTable(void *sarg)
|
||||||
free(buffer);
|
free(buffer);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
len += snprintf(buffer + len,
|
len += snprintf(buffer + len,
|
||||||
superTblInfo->maxSqlLen - len,
|
superTblInfo->maxSqlLen - len,
|
||||||
"if not exists %s.%s%d using %s.%s tags %s ",
|
"if not exists %s.%s%d using %s.%s tags %s ",
|
||||||
|
@ -2496,7 +2531,7 @@ static void* createTable(void *sarg)
|
||||||
batchNum++;
|
batchNum++;
|
||||||
|
|
||||||
if ((batchNum < superTblInfo->batchCreateTableNum)
|
if ((batchNum < superTblInfo->batchCreateTableNum)
|
||||||
&& ((superTblInfo->maxSqlLen - len)
|
&& ((superTblInfo->maxSqlLen - len)
|
||||||
>= (superTblInfo->lenOfTagOfOneRow + 256))) {
|
>= (superTblInfo->lenOfTagOfOneRow + 256))) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -2505,8 +2540,8 @@ static void* createTable(void *sarg)
|
||||||
len = 0;
|
len = 0;
|
||||||
verbosePrint("%s() LN%d %s\n", __func__, __LINE__, buffer);
|
verbosePrint("%s() LN%d %s\n", __func__, __LINE__, buffer);
|
||||||
if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)){
|
if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)){
|
||||||
errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer);
|
|
||||||
free(buffer);
|
free(buffer);
|
||||||
|
errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2532,6 +2567,7 @@ static void* createTable(void *sarg)
|
||||||
static int startMultiThreadCreateChildTable(
|
static int startMultiThreadCreateChildTable(
|
||||||
char* cols, int threads, int startFrom, int ntables,
|
char* cols, int threads, int startFrom, int ntables,
|
||||||
char* db_name, SSuperTable* superTblInfo) {
|
char* db_name, SSuperTable* superTblInfo) {
|
||||||
|
|
||||||
pthread_t *pids = malloc(threads * sizeof(pthread_t));
|
pthread_t *pids = malloc(threads * sizeof(pthread_t));
|
||||||
threadInfo *infos = malloc(threads * sizeof(threadInfo));
|
threadInfo *infos = malloc(threads * sizeof(threadInfo));
|
||||||
|
|
||||||
|
@ -2568,7 +2604,7 @@ static int startMultiThreadCreateChildTable(
|
||||||
if (t_info->taos == NULL) {
|
if (t_info->taos == NULL) {
|
||||||
errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
|
errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
|
||||||
free(pids);
|
free(pids);
|
||||||
free(infos);
|
free(infos);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2616,12 +2652,12 @@ static void createChildTables() {
|
||||||
g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
|
g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
|
||||||
|
|
||||||
verbosePrint("%s() LN%d: create %d child tables from %d\n", __func__, __LINE__,
|
verbosePrint("%s() LN%d: create %d child tables from %d\n", __func__, __LINE__,
|
||||||
g_totalChildTables, startFrom);
|
g_totalChildTables, startFrom);
|
||||||
startMultiThreadCreateChildTable(
|
startMultiThreadCreateChildTable(
|
||||||
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable,
|
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable,
|
||||||
g_Dbs.threadCountByCreateTbl,
|
g_Dbs.threadCountByCreateTbl,
|
||||||
startFrom,
|
startFrom,
|
||||||
g_totalChildTables,
|
g_Dbs.db[i].superTbls[j].childTblCount,
|
||||||
g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j]));
|
g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j]));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -3008,13 +3044,23 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
|
||||||
goto PARSE_OVER;
|
goto PARSE_OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
cJSON* rowsPerTbl = cJSON_GetObjectItem(root, "rows_per_tbl");
|
cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times");
|
||||||
if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) {
|
if (gQueryTimes && gQueryTimes->type == cJSON_Number) {
|
||||||
g_args.rows_per_tbl = rowsPerTbl->valueint;
|
g_args.query_times = gQueryTimes->valueint;
|
||||||
} else if (!rowsPerTbl) {
|
} else if (!gQueryTimes) {
|
||||||
g_args.rows_per_tbl = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
|
g_args.query_times = 1;
|
||||||
} else {
|
} else {
|
||||||
errorPrint("%s() LN%d, failed to read json, rows_per_tbl input mistake\n", __func__, __LINE__);
|
errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__);
|
||||||
|
goto PARSE_OVER;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows");
|
||||||
|
if (interlaceRows && interlaceRows->type == cJSON_Number) {
|
||||||
|
g_args.interlace_rows = interlaceRows->valueint;
|
||||||
|
} else if (!interlaceRows) {
|
||||||
|
g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
|
||||||
|
} else {
|
||||||
|
errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", __func__, __LINE__);
|
||||||
goto PARSE_OVER;
|
goto PARSE_OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3092,15 +3138,16 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
|
||||||
|
|
||||||
cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop");
|
cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop");
|
||||||
if (drop && drop->type == cJSON_String && drop->valuestring != NULL) {
|
if (drop && drop->type == cJSON_String && drop->valuestring != NULL) {
|
||||||
if (0 == strncasecmp(drop->valuestring, "yes", 3)) {
|
if (0 == strncasecmp(drop->valuestring, "yes", strlen("yes"))) {
|
||||||
g_Dbs.db[i].drop = 1;
|
g_Dbs.db[i].drop = true;
|
||||||
} else {
|
} else {
|
||||||
g_Dbs.db[i].drop = 0;
|
g_Dbs.db[i].drop = false;
|
||||||
}
|
}
|
||||||
} else if (!drop) {
|
} else if (!drop) {
|
||||||
g_Dbs.db[i].drop = 0;
|
g_Dbs.db[i].drop = g_args.drop_database;
|
||||||
} else {
|
} else {
|
||||||
printf("ERROR: failed to read json, drop not found\n");
|
errorPrint("%s() LN%d, failed to read json, drop input mistake\n",
|
||||||
|
__func__, __LINE__);
|
||||||
goto PARSE_OVER;
|
goto PARSE_OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3498,7 +3545,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
|
||||||
goto PARSE_OVER;
|
goto PARSE_OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
cJSON* rowsPerTbl = cJSON_GetObjectItem(stbInfo, "rows_per_tbl");
|
cJSON* rowsPerTbl = cJSON_GetObjectItem(stbInfo, "interlace_rows");
|
||||||
if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) {
|
if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) {
|
||||||
g_Dbs.db[i].superTbls[j].rowsPerTbl = rowsPerTbl->valueint;
|
g_Dbs.db[i].superTbls[j].rowsPerTbl = rowsPerTbl->valueint;
|
||||||
} else if (!rowsPerTbl) {
|
} else if (!rowsPerTbl) {
|
||||||
|
@ -4425,7 +4472,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
|
||||||
int insertMode;
|
int insertMode;
|
||||||
char tableName[TSDB_TABLE_NAME_LEN];
|
char tableName[TSDB_TABLE_NAME_LEN];
|
||||||
|
|
||||||
int rowsPerTbl = superTblInfo?superTblInfo->rowsPerTbl:g_args.rows_per_tbl;
|
int rowsPerTbl = superTblInfo?superTblInfo->rowsPerTbl:g_args.interlace_rows;
|
||||||
|
|
||||||
if (rowsPerTbl > 0) {
|
if (rowsPerTbl > 0) {
|
||||||
insertMode = INTERLACE_INSERT_MODE;
|
insertMode = INTERLACE_INSERT_MODE;
|
||||||
|
@ -4441,7 +4488,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
|
||||||
pThreadInfo->totalAffectedRows = 0;
|
pThreadInfo->totalAffectedRows = 0;
|
||||||
|
|
||||||
int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
|
int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
|
||||||
int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
|
int insert_interval =
|
||||||
|
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
|
||||||
uint64_t st = 0;
|
uint64_t st = 0;
|
||||||
uint64_t et = 0xffffffff;
|
uint64_t et = 0xffffffff;
|
||||||
|
|
||||||
|
@ -4518,8 +4566,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
|
||||||
|
|
||||||
pstr += dataLen;
|
pstr += dataLen;
|
||||||
recOfBatch += batchPerTbl;
|
recOfBatch += batchPerTbl;
|
||||||
|
startTime += batchPerTbl * superTblInfo->timeStampStep;
|
||||||
pThreadInfo->totalInsertRows += batchPerTbl;
|
pThreadInfo->totalInsertRows += batchPerTbl;
|
||||||
|
|
||||||
verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n",
|
verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n",
|
||||||
pThreadInfo->threadID, __func__, __LINE__,
|
pThreadInfo->threadID, __func__, __LINE__,
|
||||||
batchPerTbl, recOfBatch);
|
batchPerTbl, recOfBatch);
|
||||||
|
@ -4640,8 +4688,10 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
|
||||||
int64_t startTs = taosGetTimestampUs();
|
int64_t startTs = taosGetTimestampUs();
|
||||||
int64_t endTs;
|
int64_t endTs;
|
||||||
|
|
||||||
int timeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
|
int timeStampStep =
|
||||||
int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
|
superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
|
||||||
|
int insert_interval =
|
||||||
|
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
|
||||||
uint64_t st = 0;
|
uint64_t st = 0;
|
||||||
uint64_t et = 0xffffffff;
|
uint64_t et = 0xffffffff;
|
||||||
|
|
||||||
|
@ -4650,7 +4700,8 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
|
||||||
|
|
||||||
pThreadInfo->samplePos = 0;
|
pThreadInfo->samplePos = 0;
|
||||||
|
|
||||||
for (uint32_t tableSeq = pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to;
|
for (uint32_t tableSeq =
|
||||||
|
pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to;
|
||||||
tableSeq ++) {
|
tableSeq ++) {
|
||||||
int64_t start_time = pThreadInfo->start_time;
|
int64_t start_time = pThreadInfo->start_time;
|
||||||
|
|
||||||
|
@ -4746,7 +4797,7 @@ static void* syncWrite(void *sarg) {
|
||||||
threadInfo *winfo = (threadInfo *)sarg;
|
threadInfo *winfo = (threadInfo *)sarg;
|
||||||
SSuperTable* superTblInfo = winfo->superTblInfo;
|
SSuperTable* superTblInfo = winfo->superTblInfo;
|
||||||
|
|
||||||
int rowsPerTbl = superTblInfo?superTblInfo->rowsPerTbl:g_args.rows_per_tbl;
|
int rowsPerTbl = superTblInfo?superTblInfo->rowsPerTbl:g_args.interlace_rows;
|
||||||
|
|
||||||
if (rowsPerTbl > 0) {
|
if (rowsPerTbl > 0) {
|
||||||
// interlace mode
|
// interlace mode
|
||||||
|
@ -4761,14 +4812,15 @@ static void callBack(void *param, TAOS_RES *res, int code) {
|
||||||
threadInfo* winfo = (threadInfo*)param;
|
threadInfo* winfo = (threadInfo*)param;
|
||||||
SSuperTable* superTblInfo = winfo->superTblInfo;
|
SSuperTable* superTblInfo = winfo->superTblInfo;
|
||||||
|
|
||||||
int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
|
int insert_interval =
|
||||||
|
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
|
||||||
if (insert_interval) {
|
if (insert_interval) {
|
||||||
winfo->et = taosGetTimestampUs();
|
winfo->et = taosGetTimestampUs();
|
||||||
if (((winfo->et - winfo->st)/1000) < insert_interval) {
|
if (((winfo->et - winfo->st)/1000) < insert_interval) {
|
||||||
taosMsleep(insert_interval - (winfo->et - winfo->st)/1000); // ms
|
taosMsleep(insert_interval - (winfo->et - winfo->st)/1000); // ms
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen);
|
char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen);
|
||||||
char *data = calloc(1, MAX_DATA_SIZE);
|
char *data = calloc(1, MAX_DATA_SIZE);
|
||||||
char *pstr = buffer;
|
char *pstr = buffer;
|
||||||
|
@ -4786,17 +4838,17 @@ static void callBack(void *param, TAOS_RES *res, int code) {
|
||||||
taos_free_result(res);
|
taos_free_result(res);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < g_args.num_of_RPR; i++) {
|
for (int i = 0; i < g_args.num_of_RPR; i++) {
|
||||||
int rand_num = taosRandom() % 100;
|
int rand_num = taosRandom() % 100;
|
||||||
if (0 != winfo->superTblInfo->disorderRatio && rand_num < winfo->superTblInfo->disorderRatio)
|
if (0 != winfo->superTblInfo->disorderRatio
|
||||||
{
|
&& rand_num < winfo->superTblInfo->disorderRatio) {
|
||||||
int64_t d = winfo->lastTs - taosRandom() % 1000000 + rand_num;
|
int64_t d = winfo->lastTs - taosRandom() % 1000000 + rand_num;
|
||||||
//generateData(data, datatype, ncols_per_record, d, len_of_binary);
|
//generateData(data, datatype, ncols_per_record, d, len_of_binary);
|
||||||
(void)generateRowData(data, MAX_DATA_SIZE, d, winfo->superTblInfo);
|
generateRowData(data, MAX_DATA_SIZE, d, winfo->superTblInfo);
|
||||||
} else {
|
} else {
|
||||||
//generateData(data, datatype, ncols_per_record, start_time += 1000, len_of_binary);
|
//generateData(data, datatype, ncols_per_record, start_time += 1000, len_of_binary);
|
||||||
(void)generateRowData(data, MAX_DATA_SIZE, winfo->lastTs += 1000, winfo->superTblInfo);
|
generateRowData(data, MAX_DATA_SIZE, winfo->lastTs += 1000, winfo->superTblInfo);
|
||||||
}
|
}
|
||||||
pstr += sprintf(pstr, "%s", data);
|
pstr += sprintf(pstr, "%s", data);
|
||||||
winfo->counter++;
|
winfo->counter++;
|
||||||
|
@ -4824,7 +4876,8 @@ static void *asyncWrite(void *sarg) {
|
||||||
winfo->et = 0;
|
winfo->et = 0;
|
||||||
winfo->lastTs = winfo->start_time;
|
winfo->lastTs = winfo->start_time;
|
||||||
|
|
||||||
int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
|
int insert_interval =
|
||||||
|
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
|
||||||
if (insert_interval) {
|
if (insert_interval) {
|
||||||
winfo->st = taosGetTimestampUs();
|
winfo->st = taosGetTimestampUs();
|
||||||
}
|
}
|
||||||
|
@ -4850,7 +4903,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
|
||||||
int ntables = 0;
|
int ntables = 0;
|
||||||
if (superTblInfo) {
|
if (superTblInfo) {
|
||||||
|
|
||||||
if ((superTblInfo->childTblOffset >= 0)
|
if ((superTblInfo->childTblOffset >= 0)
|
||||||
&& (superTblInfo->childTblLimit > 0)) {
|
&& (superTblInfo->childTblLimit > 0)) {
|
||||||
|
|
||||||
ntables = superTblInfo->childTblLimit;
|
ntables = superTblInfo->childTblLimit;
|
||||||
|
@ -5304,16 +5357,16 @@ static int insertTestProcess() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
startMultiThreadInsertData(
|
startMultiThreadInsertData(
|
||||||
g_Dbs.threadCount,
|
g_Dbs.threadCount,
|
||||||
g_Dbs.db[i].dbName,
|
g_Dbs.db[i].dbName,
|
||||||
g_Dbs.db[i].dbCfg.precision,
|
g_Dbs.db[i].dbCfg.precision,
|
||||||
superTblInfo);
|
superTblInfo);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
startMultiThreadInsertData(
|
startMultiThreadInsertData(
|
||||||
g_Dbs.threadCount,
|
g_Dbs.threadCount,
|
||||||
g_Dbs.db[i].dbName,
|
g_Dbs.db[i].dbName,
|
||||||
g_Dbs.db[i].dbCfg.precision,
|
g_Dbs.db[i].dbCfg.precision,
|
||||||
NULL);
|
NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5333,7 +5386,7 @@ static int insertTestProcess() {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *superQueryProcess(void *sarg) {
|
static void *superQueryProcess(void *sarg) {
|
||||||
threadInfo *winfo = (threadInfo *)sarg;
|
threadInfo *winfo = (threadInfo *)sarg;
|
||||||
|
|
||||||
//char sqlStr[MAX_TB_NAME_SIZE*2];
|
//char sqlStr[MAX_TB_NAME_SIZE*2];
|
||||||
//sprintf(sqlStr, "use %s", g_queryInfo.dbName);
|
//sprintf(sqlStr, "use %s", g_queryInfo.dbName);
|
||||||
|
@ -5342,39 +5395,41 @@ static void *superQueryProcess(void *sarg) {
|
||||||
int64_t st = 0;
|
int64_t st = 0;
|
||||||
int64_t et = 0;
|
int64_t et = 0;
|
||||||
while (1) {
|
while (1) {
|
||||||
if (g_queryInfo.superQueryInfo.rate && (et - st) < (int64_t)g_queryInfo.superQueryInfo.rate*1000) {
|
if (g_queryInfo.superQueryInfo.rate && (et - st) <
|
||||||
|
(int64_t)g_queryInfo.superQueryInfo.rate*1000) {
|
||||||
taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms
|
taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms
|
||||||
//printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to);
|
//printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to);
|
||||||
}
|
}
|
||||||
|
|
||||||
st = taosGetTimestampUs();
|
st = taosGetTimestampUs();
|
||||||
for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
|
for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
|
||||||
if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
|
if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
|
||||||
int64_t t1 = taosGetTimestampUs();
|
int64_t t1 = taosGetTimestampUs();
|
||||||
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
|
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
|
||||||
if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
|
if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
|
||||||
sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID);
|
sprintf(tmpFile, "%s-%d",
|
||||||
|
g_queryInfo.superQueryInfo.result[i], winfo->threadID);
|
||||||
}
|
}
|
||||||
selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile);
|
selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile);
|
||||||
int64_t t2 = taosGetTimestampUs();
|
int64_t t2 = taosGetTimestampUs();
|
||||||
printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n",
|
printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n",
|
||||||
taosGetSelfPthreadId(), (t2 - t1)/1000000.0);
|
taosGetSelfPthreadId(), (t2 - t1)/1000000.0);
|
||||||
} else {
|
} else {
|
||||||
int64_t t1 = taosGetTimestampUs();
|
int64_t t1 = taosGetTimestampUs();
|
||||||
int retCode = postProceSql(g_queryInfo.host,
|
int retCode = postProceSql(g_queryInfo.host,
|
||||||
g_queryInfo.port, g_queryInfo.superQueryInfo.sql[i]);
|
g_queryInfo.port, g_queryInfo.superQueryInfo.sql[i]);
|
||||||
int64_t t2 = taosGetTimestampUs();
|
int64_t t2 = taosGetTimestampUs();
|
||||||
printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n",
|
printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n",
|
||||||
taosGetSelfPthreadId(), (t2 - t1)/1000000.0);
|
taosGetSelfPthreadId(), (t2 - t1)/1000000.0);
|
||||||
|
|
||||||
if (0 != retCode) {
|
if (0 != retCode) {
|
||||||
printf("====restful return fail, threadID[%d]\n", winfo->threadID);
|
printf("====restful return fail, threadID[%d]\n", winfo->threadID);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
et = taosGetTimestampUs();
|
et = taosGetTimestampUs();
|
||||||
printf("==thread[%"PRId64"] complete all sqls to specify tables once queries duration:%.6fs\n\n",
|
printf("==thread[%"PRId64"] complete all sqls to specify tables once queries duration:%.6fs\n\n",
|
||||||
taosGetSelfPthreadId(), (double)(et - st)/1000.0);
|
taosGetSelfPthreadId(), (double)(et - st)/1000.0);
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -5383,31 +5438,33 @@ static void *superQueryProcess(void *sarg) {
|
||||||
static void replaceSubTblName(char* inSql, char* outSql, int tblIndex) {
|
static void replaceSubTblName(char* inSql, char* outSql, int tblIndex) {
|
||||||
char sourceString[32] = "xxxx";
|
char sourceString[32] = "xxxx";
|
||||||
char subTblName[MAX_TB_NAME_SIZE*3];
|
char subTblName[MAX_TB_NAME_SIZE*3];
|
||||||
sprintf(subTblName, "%s.%s",
|
sprintf(subTblName, "%s.%s",
|
||||||
g_queryInfo.dbName,
|
g_queryInfo.dbName,
|
||||||
g_queryInfo.subQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN);
|
g_queryInfo.subQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN);
|
||||||
|
|
||||||
//printf("inSql: %s\n", inSql);
|
//printf("inSql: %s\n", inSql);
|
||||||
|
|
||||||
char* pos = strstr(inSql, sourceString);
|
char* pos = strstr(inSql, sourceString);
|
||||||
if (0 == pos) {
|
if (0 == pos) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
tstrncpy(outSql, inSql, pos - inSql + 1);
|
tstrncpy(outSql, inSql, pos - inSql + 1);
|
||||||
//printf("1: %s\n", outSql);
|
//printf("1: %s\n", outSql);
|
||||||
strcat(outSql, subTblName);
|
strcat(outSql, subTblName);
|
||||||
//printf("2: %s\n", outSql);
|
//printf("2: %s\n", outSql);
|
||||||
strcat(outSql, pos+strlen(sourceString));
|
strcat(outSql, pos+strlen(sourceString));
|
||||||
//printf("3: %s\n", outSql);
|
//printf("3: %s\n", outSql);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *subQueryProcess(void *sarg) {
|
static void *subQueryProcess(void *sarg) {
|
||||||
char sqlstr[1024];
|
char sqlstr[1024];
|
||||||
threadInfo *winfo = (threadInfo *)sarg;
|
threadInfo *winfo = (threadInfo *)sarg;
|
||||||
int64_t st = 0;
|
int64_t st = 0;
|
||||||
int64_t et = (int64_t)g_queryInfo.subQueryInfo.rate*1000;
|
int64_t et = (int64_t)g_queryInfo.subQueryInfo.rate*1000;
|
||||||
while (1) {
|
int queryTimes = g_args.query_times;
|
||||||
|
|
||||||
|
while (queryTimes --) {
|
||||||
if (g_queryInfo.subQueryInfo.rate
|
if (g_queryInfo.subQueryInfo.rate
|
||||||
&& (et - st) < (int64_t)g_queryInfo.subQueryInfo.rate*1000) {
|
&& (et - st) < (int64_t)g_queryInfo.subQueryInfo.rate*1000) {
|
||||||
taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms
|
taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms
|
||||||
|
@ -5421,24 +5478,30 @@ static void *subQueryProcess(void *sarg) {
|
||||||
replaceSubTblName(g_queryInfo.subQueryInfo.sql[j], sqlstr, i);
|
replaceSubTblName(g_queryInfo.subQueryInfo.sql[j], sqlstr, i);
|
||||||
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
|
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
|
||||||
if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
|
if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
|
||||||
sprintf(tmpFile, "%s-%d",
|
sprintf(tmpFile, "%s-%d",
|
||||||
g_queryInfo.subQueryInfo.result[i],
|
g_queryInfo.subQueryInfo.result[i],
|
||||||
winfo->threadID);
|
winfo->threadID);
|
||||||
}
|
}
|
||||||
selectAndGetResult(winfo->taos, sqlstr, tmpFile);
|
selectAndGetResult(winfo->taos, sqlstr, tmpFile);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
et = taosGetTimestampUs();
|
et = taosGetTimestampUs();
|
||||||
printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n",
|
printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n",
|
||||||
taosGetSelfPthreadId(),
|
taosGetSelfPthreadId(),
|
||||||
winfo->start_table_from,
|
winfo->start_table_from,
|
||||||
winfo->end_table_to,
|
winfo->end_table_to,
|
||||||
(double)(et - st)/1000000.0);
|
(double)(et - st)/1000000.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int queryTestProcess() {
|
static int queryTestProcess() {
|
||||||
|
|
||||||
|
setupForAnsiEscape();
|
||||||
|
printfQueryMeta();
|
||||||
|
resetAfterAnsiEscape();
|
||||||
|
|
||||||
TAOS * taos = NULL;
|
TAOS * taos = NULL;
|
||||||
taos = taos_connect(g_queryInfo.host,
|
taos = taos_connect(g_queryInfo.host,
|
||||||
g_queryInfo.user,
|
g_queryInfo.user,
|
||||||
|
@ -5456,15 +5519,13 @@ static int queryTestProcess() {
|
||||||
g_queryInfo.subQueryInfo.sTblName,
|
g_queryInfo.subQueryInfo.sTblName,
|
||||||
&g_queryInfo.subQueryInfo.childTblName,
|
&g_queryInfo.subQueryInfo.childTblName,
|
||||||
&g_queryInfo.subQueryInfo.childTblCount);
|
&g_queryInfo.subQueryInfo.childTblCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
printfQueryMeta();
|
|
||||||
|
|
||||||
if (!g_args.answer_yes) {
|
if (!g_args.answer_yes) {
|
||||||
printf("Press enter key to continue\n\n");
|
printf("Press enter key to continue\n\n");
|
||||||
(void)getchar();
|
(void)getchar();
|
||||||
}
|
}
|
||||||
|
|
||||||
printfQuerySystemInfo(taos);
|
printfQuerySystemInfo(taos);
|
||||||
|
|
||||||
pthread_t *pids = NULL;
|
pthread_t *pids = NULL;
|
||||||
|
@ -5553,7 +5614,7 @@ static int queryTestProcess() {
|
||||||
for (int i = 0; i < threads; i++) {
|
for (int i = 0; i < threads; i++) {
|
||||||
threadInfo *t_info = infosOfSub + i;
|
threadInfo *t_info = infosOfSub + i;
|
||||||
t_info->threadID = i;
|
t_info->threadID = i;
|
||||||
|
|
||||||
t_info->start_table_from = startFrom;
|
t_info->start_table_from = startFrom;
|
||||||
t_info->ntables = i<b?a+1:a;
|
t_info->ntables = i<b?a+1:a;
|
||||||
t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
|
t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
|
||||||
|
@ -5565,7 +5626,7 @@ static int queryTestProcess() {
|
||||||
g_queryInfo.subQueryInfo.threadCnt = threads;
|
g_queryInfo.subQueryInfo.threadCnt = threads;
|
||||||
} else {
|
} else {
|
||||||
g_queryInfo.subQueryInfo.threadCnt = 0;
|
g_queryInfo.subQueryInfo.threadCnt = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) {
|
for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) {
|
||||||
pthread_join(pids[i], NULL);
|
pthread_join(pids[i], NULL);
|
||||||
|
@ -5753,18 +5814,20 @@ static void *superSubscribeProcess(void *sarg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int subscribeTestProcess() {
|
static int subscribeTestProcess() {
|
||||||
|
setupForAnsiEscape();
|
||||||
printfQueryMeta();
|
printfQueryMeta();
|
||||||
|
resetAfterAnsiEscape();
|
||||||
|
|
||||||
if (!g_args.answer_yes) {
|
if (!g_args.answer_yes) {
|
||||||
printf("Press enter key to continue\n\n");
|
printf("Press enter key to continue\n\n");
|
||||||
(void)getchar();
|
(void)getchar();
|
||||||
}
|
}
|
||||||
|
|
||||||
TAOS * taos = NULL;
|
TAOS * taos = NULL;
|
||||||
taos = taos_connect(g_queryInfo.host,
|
taos = taos_connect(g_queryInfo.host,
|
||||||
g_queryInfo.user,
|
g_queryInfo.user,
|
||||||
g_queryInfo.password,
|
g_queryInfo.password,
|
||||||
g_queryInfo.dbName,
|
g_queryInfo.dbName,
|
||||||
g_queryInfo.port);
|
g_queryInfo.port);
|
||||||
if (taos == NULL) {
|
if (taos == NULL) {
|
||||||
errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
|
errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
|
||||||
|
@ -5772,10 +5835,10 @@ static int subscribeTestProcess() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (0 != g_queryInfo.subQueryInfo.sqlCount) {
|
if (0 != g_queryInfo.subQueryInfo.sqlCount) {
|
||||||
getAllChildNameOfSuperTable(taos,
|
getAllChildNameOfSuperTable(taos,
|
||||||
g_queryInfo.dbName,
|
g_queryInfo.dbName,
|
||||||
g_queryInfo.subQueryInfo.sTblName,
|
g_queryInfo.subQueryInfo.sTblName,
|
||||||
&g_queryInfo.subQueryInfo.childTblName,
|
&g_queryInfo.subQueryInfo.childTblName,
|
||||||
&g_queryInfo.subQueryInfo.childTblCount);
|
&g_queryInfo.subQueryInfo.childTblCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5875,7 +5938,8 @@ static void initOfInsertMeta() {
|
||||||
tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE);
|
tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE);
|
||||||
tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE);
|
tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE);
|
||||||
g_Dbs.threadCount = 2;
|
g_Dbs.threadCount = 2;
|
||||||
g_Dbs.use_metric = true;
|
|
||||||
|
g_Dbs.use_metric = g_args.use_metric;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void initOfQueryMeta() {
|
static void initOfQueryMeta() {
|
||||||
|
@ -6084,16 +6148,23 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
|
||||||
|
|
||||||
static void testMetaFile() {
|
static void testMetaFile() {
|
||||||
if (INSERT_TEST == g_args.test_mode) {
|
if (INSERT_TEST == g_args.test_mode) {
|
||||||
if (g_Dbs.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir);
|
if (g_Dbs.cfgDir[0])
|
||||||
|
taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir);
|
||||||
|
|
||||||
insertTestProcess();
|
insertTestProcess();
|
||||||
|
|
||||||
} else if (QUERY_TEST == g_args.test_mode) {
|
} else if (QUERY_TEST == g_args.test_mode) {
|
||||||
if (g_queryInfo.cfgDir[0])
|
if (g_queryInfo.cfgDir[0])
|
||||||
taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
|
taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
|
||||||
|
|
||||||
queryTestProcess();
|
queryTestProcess();
|
||||||
|
|
||||||
} else if (SUBSCRIBE_TEST == g_args.test_mode) {
|
} else if (SUBSCRIBE_TEST == g_args.test_mode) {
|
||||||
if (g_queryInfo.cfgDir[0])
|
if (g_queryInfo.cfgDir[0])
|
||||||
taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
|
taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
|
||||||
|
|
||||||
subscribeTestProcess();
|
subscribeTestProcess();
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
;
|
;
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,6 +50,7 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
|
||||||
static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
||||||
static int32_t mnodeProcessCreateDbMsg(SMnodeMsg *pMsg);
|
static int32_t mnodeProcessCreateDbMsg(SMnodeMsg *pMsg);
|
||||||
static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg);
|
static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg);
|
||||||
|
static int32_t mnodeProcessSyncDbMsg(SMnodeMsg *pMsg);
|
||||||
int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg);
|
int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg);
|
||||||
|
|
||||||
#ifndef _TOPIC
|
#ifndef _TOPIC
|
||||||
|
@ -178,6 +179,7 @@ int32_t mnodeInitDbs() {
|
||||||
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CREATE_DB, mnodeProcessCreateDbMsg);
|
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CREATE_DB, mnodeProcessCreateDbMsg);
|
||||||
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_ALTER_DB, mnodeProcessAlterDbMsg);
|
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_ALTER_DB, mnodeProcessAlterDbMsg);
|
||||||
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_DROP_DB, mnodeProcessDropDbMsg);
|
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_DROP_DB, mnodeProcessDropDbMsg);
|
||||||
|
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_SYNC_DB, mnodeProcessSyncDbMsg);
|
||||||
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_DB, mnodeGetDbMeta);
|
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_DB, mnodeGetDbMeta);
|
||||||
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_DB, mnodeRetrieveDbs);
|
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_DB, mnodeRetrieveDbs);
|
||||||
mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_DB, mnodeCancelGetNextDb);
|
mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_DB, mnodeCancelGetNextDb);
|
||||||
|
@ -1184,6 +1186,10 @@ static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg) {
|
||||||
return mnodeDropDb(pMsg);
|
return mnodeDropDb(pMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t mnodeProcessSyncDbMsg(SMnodeMsg *pMsg) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void mnodeDropAllDbs(SAcctObj *pAcct) {
|
void mnodeDropAllDbs(SAcctObj *pAcct) {
|
||||||
int32_t numOfDbs = 0;
|
int32_t numOfDbs = 0;
|
||||||
SDbObj *pDb = NULL;
|
SDbObj *pDb = NULL;
|
||||||
|
|
|
@ -726,6 +726,9 @@ expritem(A) ::= . {A = 0;}
|
||||||
///////////////////////////////////reset query cache//////////////////////////////////////
|
///////////////////////////////////reset query cache//////////////////////////////////////
|
||||||
cmd ::= RESET QUERY CACHE. { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
|
cmd ::= RESET QUERY CACHE. { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
|
||||||
|
|
||||||
|
///////////////////////////////////sync replica database//////////////////////////////////
|
||||||
|
cmd ::= SYNCDB ids(X) REPLICA.{ setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &X);}
|
||||||
|
|
||||||
///////////////////////////////////ALTER TABLE statement//////////////////////////////////
|
///////////////////////////////////ALTER TABLE statement//////////////////////////////////
|
||||||
cmd ::= ALTER TABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
|
cmd ::= ALTER TABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
|
||||||
X.n += F.n;
|
X.n += F.n;
|
||||||
|
|
|
@ -911,6 +911,7 @@ void setDCLSqlElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) {
|
||||||
SStrToken *pToken = va_arg(va, SStrToken *);
|
SStrToken *pToken = va_arg(va, SStrToken *);
|
||||||
taosArrayPush(pInfo->pMiscInfo->a, pToken);
|
taosArrayPush(pInfo->pMiscInfo->a, pToken);
|
||||||
}
|
}
|
||||||
|
|
||||||
va_end(va);
|
va_end(va);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -100,6 +100,7 @@ static SKeyword keywordTable[] = {
|
||||||
{"ACCOUNT", TK_ACCOUNT},
|
{"ACCOUNT", TK_ACCOUNT},
|
||||||
{"USE", TK_USE},
|
{"USE", TK_USE},
|
||||||
{"DESCRIBE", TK_DESCRIBE},
|
{"DESCRIBE", TK_DESCRIBE},
|
||||||
|
{"SYNCDB", TK_SYNCDB},
|
||||||
{"ALTER", TK_ALTER},
|
{"ALTER", TK_ALTER},
|
||||||
{"PASS", TK_PASS},
|
{"PASS", TK_PASS},
|
||||||
{"PRIVILEGE", TK_PRIVILEGE},
|
{"PRIVILEGE", TK_PRIVILEGE},
|
||||||
|
|
1911
src/query/src/sql.c
1911
src/query/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -38,7 +38,7 @@
|
||||||
"insert_rows": 100,
|
"insert_rows": 100,
|
||||||
"multi_thread_write_one_tbl": "no",
|
"multi_thread_write_one_tbl": "no",
|
||||||
"number_of_tbl_in_one_sql": 0,
|
"number_of_tbl_in_one_sql": 0,
|
||||||
"rows_per_tbl": 3,
|
"interlace_rows": 3,
|
||||||
"max_sql_len": 1024,
|
"max_sql_len": 1024,
|
||||||
"disorder_ratio": 0,
|
"disorder_ratio": 0,
|
||||||
"disorder_range": 1000,
|
"disorder_range": 1000,
|
||||||
|
|
|
@ -68,20 +68,14 @@ function prepareBuild {
|
||||||
rm -rf $CURR_DIR/../../../../release/*
|
rm -rf $CURR_DIR/../../../../release/*
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -e $DOCKER_DIR/TDengine-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
|
cd $CURR_DIR/../../../../packaging
|
||||||
cd $CURR_DIR/../../../../packaging
|
|
||||||
echo $CURR_DIR
|
|
||||||
echo $IN_TDINTERNAL
|
|
||||||
echo "generating TDeninger packages"
|
|
||||||
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
|
||||||
pwd
|
|
||||||
./release.sh -v cluster -n $VERSION >> /dev/null 2>&1
|
|
||||||
else
|
|
||||||
pwd
|
|
||||||
./release.sh -v edge -n $VERSION >> /dev/null 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
||||||
|
if [ ! -e $DOCKER_DIR/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
|
||||||
|
|
||||||
|
echo "generating TDeninge enterprise packages"
|
||||||
|
./release.sh -v cluster -n $VERSION >> /dev/null 2>&1
|
||||||
|
|
||||||
if [ ! -e $CURR_DIR/../../../../release/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ]; then
|
if [ ! -e $CURR_DIR/../../../../release/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ]; then
|
||||||
echo "no TDengine install package found"
|
echo "no TDengine install package found"
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -91,7 +85,17 @@ function prepareBuild {
|
||||||
echo "no arbitrator install package found"
|
echo "no arbitrator install package found"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
else
|
|
||||||
|
cd $CURR_DIR/../../../../release
|
||||||
|
mv TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
|
||||||
|
mv TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
if [ ! -e $DOCKER_DIR/TDengine-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
|
||||||
|
|
||||||
|
echo "generating TDeninge community packages"
|
||||||
|
./release.sh -v edge -n $VERSION >> /dev/null 2>&1
|
||||||
|
|
||||||
if [ ! -e $CURR_DIR/../../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then
|
if [ ! -e $CURR_DIR/../../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then
|
||||||
echo "no TDengine install package found"
|
echo "no TDengine install package found"
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -101,16 +105,11 @@ function prepareBuild {
|
||||||
echo "no arbitrator install package found"
|
echo "no arbitrator install package found"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
cd $CURR_DIR/../../../../release
|
cd $CURR_DIR/../../../../release
|
||||||
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
|
||||||
mv TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
|
|
||||||
mv TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
|
|
||||||
else
|
|
||||||
mv TDengine-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
|
mv TDengine-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
|
||||||
mv TDengine-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
|
mv TDengine-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
rm -rf $DOCKER_DIR/*.yml
|
rm -rf $DOCKER_DIR/*.yml
|
||||||
|
|
|
@ -39,7 +39,7 @@
|
||||||
"insert_rows": 100000,
|
"insert_rows": 100000,
|
||||||
"multi_thread_write_one_tbl": "no",
|
"multi_thread_write_one_tbl": "no",
|
||||||
"number_of_tbl_in_one_sql": 1,
|
"number_of_tbl_in_one_sql": 1,
|
||||||
"rows_per_tbl": 100,
|
"interlace_rows": 100,
|
||||||
"max_sql_len": 1024000,
|
"max_sql_len": 1024000,
|
||||||
"disorder_ratio": 0,
|
"disorder_ratio": 0,
|
||||||
"disorder_range": 1000,
|
"disorder_range": 1000,
|
||||||
|
|
|
@ -198,6 +198,7 @@ python3 ./test.py -f query/queryWithTaosdKilled.py
|
||||||
python3 ./test.py -f query/floatCompare.py
|
python3 ./test.py -f query/floatCompare.py
|
||||||
python3 ./test.py -f query/query1970YearsAf.py
|
python3 ./test.py -f query/query1970YearsAf.py
|
||||||
python3 ./test.py -f query/bug3351.py
|
python3 ./test.py -f query/bug3351.py
|
||||||
|
python3 ./test.py -f query/bug3375.py
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -234,16 +235,6 @@ python3 client/twoClients.py
|
||||||
python3 test.py -f query/queryInterval.py
|
python3 test.py -f query/queryInterval.py
|
||||||
python3 test.py -f query/queryFillTest.py
|
python3 test.py -f query/queryFillTest.py
|
||||||
|
|
||||||
# tools
|
|
||||||
python3 test.py -f tools/taosdemoTest.py
|
|
||||||
python3 test.py -f tools/taosdemoTestWithoutMetric.py
|
|
||||||
python3 test.py -f tools/taosdemoTestWithJson.py
|
|
||||||
python3 test.py -f tools/taosdemoTestLimitOffset.py
|
|
||||||
python3 test.py -f tools/taosdumpTest.py
|
|
||||||
python3 test.py -f tools/taosdemoTest2.py
|
|
||||||
python3 test.py -f tools/taosdemoTestSampleData.py
|
|
||||||
python3 test.py -f tools/taosdemoTestInterlace.py
|
|
||||||
|
|
||||||
# subscribe
|
# subscribe
|
||||||
python3 test.py -f subscribe/singlemeter.py
|
python3 test.py -f subscribe/singlemeter.py
|
||||||
#python3 test.py -f subscribe/stability.py
|
#python3 test.py -f subscribe/stability.py
|
||||||
|
@ -253,6 +244,18 @@ python3 test.py -f subscribe/supertable.py
|
||||||
#======================p3-end===============
|
#======================p3-end===============
|
||||||
#======================p4-start===============
|
#======================p4-start===============
|
||||||
|
|
||||||
|
# tools
|
||||||
|
python3 test.py -f tools/taosdumpTest.py
|
||||||
|
|
||||||
|
python3 test.py -f tools/taosdemoTest.py
|
||||||
|
python3 test.py -f tools/taosdemoTestWithoutMetric.py
|
||||||
|
python3 test.py -f tools/taosdemoTestWithJson.py
|
||||||
|
python3 test.py -f tools/taosdemoTestLimitOffset.py
|
||||||
|
python3 test.py -f tools/taosdemoTest2.py
|
||||||
|
python3 test.py -f tools/taosdemoTestSampleData.py
|
||||||
|
python3 test.py -f tools/taosdemoTestInterlace.py
|
||||||
|
python3 test.py -f tools/taosdemoTestQuery.py
|
||||||
|
|
||||||
python3 ./test.py -f update/merge_commit_data-0.py
|
python3 ./test.py -f update/merge_commit_data-0.py
|
||||||
# wal
|
# wal
|
||||||
python3 ./test.py -f wal/addOldWalTest.py
|
python3 ./test.py -f wal/addOldWalTest.py
|
||||||
|
|
|
@ -0,0 +1,61 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.dnodes import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
tdSql.execute("drop database if exists db")
|
||||||
|
tdSql.execute("create database if not exists db keep 36500")
|
||||||
|
tdSql.execute("use db")
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("==========step1:create table && insert data")
|
||||||
|
tdSql.execute(
|
||||||
|
"create table stb1 (ts timestamp, c11 int) TAGS(t11 int, t12 int )"
|
||||||
|
)
|
||||||
|
tdSql.execute(
|
||||||
|
"create table stb2 (ts timestamp, c21 int) TAGS(t21 int, t22 int )"
|
||||||
|
)
|
||||||
|
tdSql.execute("create table t10 using stb1 tags(1, 10)")
|
||||||
|
tdSql.execute("create table t20 using stb2 tags(1, 12)")
|
||||||
|
tdSql.execute("insert into t10 values (1600000000000, 1)")
|
||||||
|
tdSql.execute("insert into t10 values (1610000000000, 2)")
|
||||||
|
tdSql.execute("insert into t20 values (1600000000000, 3)")
|
||||||
|
tdSql.execute("insert into t20 values (1610000000000, 4)")
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("==========step2:query crash test")
|
||||||
|
tdSql.query("select stb1.c11, stb1.t11, stb1.t12 from stb2,stb1 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.query("select stb2.c21, stb2.t21, stb2.t21 from stb1, stb2 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.query("select top(stb2.c21,2) from stb1, stb2 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.query("select last(stb2.c21) from stb1, stb2 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -18,6 +18,7 @@ import json
|
||||||
import subprocess
|
import subprocess
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
|
|
||||||
from util.log import *
|
from util.log import *
|
||||||
from util.sql import *
|
from util.sql import *
|
||||||
from util.cases import *
|
from util.cases import *
|
||||||
|
@ -39,6 +40,7 @@ class TDTestCase:
|
||||||
tdLog.debug(f"binPath {binPath}")
|
tdLog.debug(f"binPath {binPath}")
|
||||||
binPath = os.path.realpath(binPath)
|
binPath = os.path.realpath(binPath)
|
||||||
tdLog.debug(f"binPath real path {binPath}")
|
tdLog.debug(f"binPath real path {binPath}")
|
||||||
|
|
||||||
if path == "":
|
if path == "":
|
||||||
self.path = os.path.abspath(binPath + "../../")
|
self.path = os.path.abspath(binPath + "../../")
|
||||||
else:
|
else:
|
||||||
|
@ -187,12 +189,12 @@ class TDTestCase:
|
||||||
"select * from t9 where t9.ts > '1969-12-31 22:00:00.000' and t9.ts <'1970-01-01 02:00:00.000' "
|
"select * from t9 where t9.ts > '1969-12-31 22:00:00.000' and t9.ts <'1970-01-01 02:00:00.000' "
|
||||||
)
|
)
|
||||||
tdSql.checkRows(719)
|
tdSql.checkRows(719)
|
||||||
|
|
||||||
tdSql.query(
|
tdSql.query(
|
||||||
"select * from t0,t1 where t0.ts=t1.ts and t1.ts >= '1970-01-01 00:00:00.000' "
|
"select * from t0,t1 where t0.ts=t1.ts and t1.ts >= '1970-01-01 00:00:00.000' "
|
||||||
)
|
)
|
||||||
tdSql.checkRows(680)
|
tdSql.checkRows(680)
|
||||||
|
|
||||||
tdSql.query(
|
tdSql.query(
|
||||||
"select diff(col1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' "
|
"select diff(col1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' "
|
||||||
)
|
)
|
||||||
|
@ -251,5 +253,6 @@ class TDTestCase:
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
tdLog.success(f"{__file__} successfully executed")
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
|
||||||
tdCases.addLinux(__file__, TDTestCase())
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
tdCases.addWindows(__file__, TDTestCase())
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -10,7 +10,7 @@
|
||||||
"result_file": "./insert_res.txt",
|
"result_file": "./insert_res.txt",
|
||||||
"confirm_parameter_prompt": "no",
|
"confirm_parameter_prompt": "no",
|
||||||
"insert_interval": 5000,
|
"insert_interval": 5000,
|
||||||
"rows_per_tbl": 50,
|
"interlace_rows": 50,
|
||||||
"num_of_records_per_req": 100,
|
"num_of_records_per_req": 100,
|
||||||
"max_sql_len": 1024000,
|
"max_sql_len": 1024000,
|
||||||
"databases": [{
|
"databases": [{
|
||||||
|
@ -42,7 +42,7 @@
|
||||||
"insert_mode": "taosc",
|
"insert_mode": "taosc",
|
||||||
"insert_rows": 250,
|
"insert_rows": 250,
|
||||||
"multi_thread_write_one_tbl": "no",
|
"multi_thread_write_one_tbl": "no",
|
||||||
"rows_per_tbl": 80,
|
"interlace_rows": 80,
|
||||||
"max_sql_len": 1024000,
|
"max_sql_len": 1024000,
|
||||||
"disorder_ratio": 0,
|
"disorder_ratio": 0,
|
||||||
"disorder_range": 1000,
|
"disorder_range": 1000,
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
{
|
||||||
|
"filetype": "query",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"databases": "test",
|
||||||
|
"query_times": 1,
|
||||||
|
"super_table_query": {
|
||||||
|
"stblname": "meters",
|
||||||
|
"query_interval": 10,
|
||||||
|
"threads": 8,
|
||||||
|
"sqls": [
|
||||||
|
{
|
||||||
|
"sql": "select last_row(ts) from xxxx",
|
||||||
|
"result": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
###################################################################
|
##################################################################
|
||||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
|
@ -25,9 +25,6 @@ class TDTestCase:
|
||||||
tdLog.debug("start to execute %s" % __file__)
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
tdSql.init(conn.cursor(), logSql)
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
self.numberOfTables = 10000
|
|
||||||
self.numberOfRecords = 100
|
|
||||||
|
|
||||||
def getBuildPath(self):
|
def getBuildPath(self):
|
||||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,78 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.dnodes import *
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
self.numberOfTables = 1000
|
||||||
|
self.numberOfRecords = 100
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root) - len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
buildPath = self.getBuildPath()
|
||||||
|
if (buildPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found in %s" % buildPath)
|
||||||
|
binPath = buildPath + "/build/bin/"
|
||||||
|
os.system("%staosdemo -y -t %d -n %d" %
|
||||||
|
(binPath, self.numberOfTables, self.numberOfRecords))
|
||||||
|
print("Sleep 2 seconds..")
|
||||||
|
time.sleep(2)
|
||||||
|
os.system('%staosdemo -f tools/query.json ' % binPath)
|
||||||
|
# taosdemoCmd = '%staosdemo -f tools/query.json ' % binPath
|
||||||
|
# threads = subprocess.check_output(
|
||||||
|
# taosdemoCmd, shell=True).decode("utf-8")
|
||||||
|
# print("threads: %d" % int(threads))
|
||||||
|
|
||||||
|
# if (int(threads) != 8):
|
||||||
|
# caller = inspect.getframeinfo(inspect.stack()[0][0])
|
||||||
|
# tdLog.exit(
|
||||||
|
# "%s(%d) failed: expected threads 8, actual %d" %
|
||||||
|
# (caller.filename, caller.lineno, int(threads)))
|
||||||
|
#
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -100,7 +100,10 @@ function runSimCaseOneByOnefq {
|
||||||
cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
|
cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
|
||||||
rm -rf ../../sim/case.log
|
rm -rf ../../sim/case.log
|
||||||
fi
|
fi
|
||||||
exit 8
|
dohavecore $2
|
||||||
|
if [[ $2 == 1 ]];then
|
||||||
|
exit 8
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
end_time=`date +%s`
|
end_time=`date +%s`
|
||||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log
|
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log
|
||||||
|
@ -169,10 +172,13 @@ function runPyCaseOneByOnefq() {
|
||||||
out_log=`tail -1 pytest-out.log `
|
out_log=`tail -1 pytest-out.log `
|
||||||
if [[ $out_log =~ 'failed' ]];then
|
if [[ $out_log =~ 'failed' ]];then
|
||||||
cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
|
cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
|
||||||
echo '=====================log====================='
|
echo '=====================log===================== '
|
||||||
cat ../../sim/case.log
|
cat ../../sim/case.log
|
||||||
rm -rf ../../sim/case.log
|
rm -rf ../../sim/case.log
|
||||||
exit 8
|
dohavecore $2
|
||||||
|
if [[ $2 == 1 ]];then
|
||||||
|
exit 8
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log
|
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log
|
||||||
else
|
else
|
||||||
|
|
Loading…
Reference in New Issue