Merge branch 'develop' into test/TD-3433

This commit is contained in:
wu champion 2021-03-26 10:50:39 +08:00
commit c2da6d215a
18 changed files with 3438 additions and 421 deletions

4
Jenkinsfile vendored
View File

@ -39,14 +39,14 @@ def pre_test(){
sudo rmtaos || echo "taosd has not installed" sudo rmtaos || echo "taosd has not installed"
''' '''
sh ''' sh '''
killall -9 taosd ||echo "no taosd running"
killall -9 gdb || echo "no gdb running"
cd ${WKC} cd ${WKC}
git checkout develop git checkout develop
git reset --hard HEAD~10 >/dev/null git reset --hard HEAD~10 >/dev/null
git pull >/dev/null git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD git checkout -qf FETCH_HEAD
git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD develop)|grep -v -E '.*md|//src//connector|Jenkinsfile'
find ${WKC}/tests/pytest -name \'*\'.sql -exec rm -rf {} \\; find ${WKC}/tests/pytest -name \'*\'.sql -exec rm -rf {} \\;
cd ${WK} cd ${WK}
git reset --hard HEAD~10 git reset --hard HEAD~10

View File

@ -111,9 +111,10 @@ taos>
**提示:** **提示:**
- 任何已经加入集群在线的数据节点都可以作为后续待加入节点的firstEP。 - 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEP。
- firstEp这个参数仅仅在该数据节点首次加入集群时有作用加入集群后该数据节点会保存最新的mnode的End Point列表不再依赖这个参数。 - firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的 mnode 的 End Point 列表,不再依赖这个参数。
- 两个没有配置firstEp参数的数据节点dnode启动后会独立运行起来。这个时候无法将其中一个数据节点加入到另外一个数据节点形成集群。**无法将两个独立的集群合并成为新的集群**。 - 接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 taos shell 如果不加参数,会默认连接由 firstEp 指定的节点。
- 两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。**无法将两个独立的集群合并成为新的集群**。
## <a class="anchor" id="management"></a>数据节点管理 ## <a class="anchor" id="management"></a>数据节点管理

View File

@ -83,6 +83,22 @@ typedef struct SJoinSupporter {
SArray* pVgroupTables; SArray* pVgroupTables;
} SJoinSupporter; } SJoinSupporter;
typedef struct SMergeCtx {
SJoinSupporter* p;
int32_t idx;
SArray* res;
int8_t compared;
}SMergeCtx;
typedef struct SMergeTsCtx {
SJoinSupporter* p;
STSBuf* res;
int64_t numOfInput;
int8_t compared;
}SMergeTsCtx;
typedef struct SVgroupTableInfo { typedef struct SVgroupTableInfo {
SVgroupInfo vgInfo; SVgroupInfo vgInfo;
SArray* itemList; //SArray<STableIdInfo> SArray* itemList; //SArray<STableIdInfo>
@ -183,6 +199,7 @@ int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deep
void tscSqlExprInfoDestroy(SArray* pExprInfo); void tscSqlExprInfoDestroy(SArray* pExprInfo);
SColumn* tscColumnClone(const SColumn* src); SColumn* tscColumnClone(const SColumn* src);
bool tscColumnExists(SArray* pColumnList, SColumnIndex* pColIndex);
SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex); SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex);
SArray* tscColumnListClone(const SArray* src, int16_t tableIndex); SArray* tscColumnListClone(const SArray* src, int16_t tableIndex);
void tscColumnListDestroy(SArray* pColList); void tscColumnListDestroy(SArray* pColList);

View File

@ -142,15 +142,15 @@ typedef struct SCond {
} SCond; } SCond;
typedef struct SJoinNode { typedef struct SJoinNode {
char tableName[TSDB_TABLE_FNAME_LEN];
uint64_t uid; uint64_t uid;
int16_t tagColId; int16_t tagColId;
SArray* tsJoin;
SArray* tagJoin;
} SJoinNode; } SJoinNode;
typedef struct SJoinInfo { typedef struct SJoinInfo {
bool hasJoin; bool hasJoin;
SJoinNode left; SJoinNode* joinTables[TSDB_MAX_JOIN_TABLE_NUM];
SJoinNode right;
} SJoinInfo; } SJoinInfo;
typedef struct STagCond { typedef struct STagCond {

View File

@ -977,12 +977,18 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSl
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableName, SSqlObj* pSql) { int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableName, SSqlObj* pSql) {
const char* msg1 = "name too long"; const char* msg1 = "name too long";
const char* msg2 = "acctId too long"; const char* msg2 = "acctId too long";
const char* msg3 = "no acctId";
SSqlCmd* pCmd = &pSql->cmd; SSqlCmd* pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
if (hasSpecifyDB(pTableName)) { // db has been specified in sql string so we ignore current db path if (hasSpecifyDB(pTableName)) { // db has been specified in sql string so we ignore current db path
code = tNameSetAcctId(&pTableMetaInfo->name, getAccountId(pSql)); char* acctId = getAccountId(pSql);
if (acctId == NULL || strlen(acctId) <= 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
code = tNameSetAcctId(&pTableMetaInfo->name, acctId);
if (code != 0) { if (code != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
} }
@ -3353,23 +3359,25 @@ static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSq
} }
} }
static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) { static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) {
const char* msg1 = "invalid join query condition"; int32_t code = 0;
const char* msg2 = "invalid table name in join query"; const char* msg1 = "timestamp required for join tables";
const char* msg3 = "type of join columns must be identical"; const char* msg3 = "type of join columns must be identical";
const char* msg4 = "invalid column name in join condition"; const char* msg4 = "invalid column name in join condition";
const char* msg5 = "only support one join tag for each table";
if (pExpr == NULL) { if (pExpr == NULL) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if (!tSqlExprIsParentOfLeaf(pExpr)) { if (!tSqlExprIsParentOfLeaf(pExpr)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); code = checkAndSetJoinCondInfo(pCmd, pQueryInfo, pExpr->pLeft);
if (code) {
return code;
} }
STagCond* pTagCond = &pQueryInfo->tagCond; return checkAndSetJoinCondInfo(pCmd, pQueryInfo, pExpr->pRight);
SJoinNode* pLeft = &pTagCond->joinInfo.left; }
SJoinNode* pRight = &pTagCond->joinInfo.right;
SColumnIndex index = COLUMN_INDEX_INITIALIZER; SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
@ -3379,14 +3387,29 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr*
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
SSchema* pTagSchema1 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); SSchema* pTagSchema1 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
pLeft->uid = pTableMetaInfo->pTableMeta->id.uid; assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM);
pLeft->tagColId = pTagSchema1->colId;
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pLeft->tableName); SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
if (code != TSDB_CODE_SUCCESS) { if (*leftNode == NULL) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
} }
(*leftNode)->uid = pTableMetaInfo->pTableMeta->id.uid;
(*leftNode)->tagColId = pTagSchema1->colId;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
if (!tscColumnExists(pTableMetaInfo->tagColList, &index)) {
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
}
}
int16_t leftIdx = index.tableIndex;
index = (SColumnIndex)COLUMN_INDEX_INITIALIZER; index = (SColumnIndex)COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, &pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { if (getColumnIndexByName(pCmd, &pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
@ -3395,20 +3418,55 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr*
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
SSchema* pTagSchema2 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); SSchema* pTagSchema2 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
pRight->uid = pTableMetaInfo->pTableMeta->id.uid; assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM);
pRight->tagColId = pTagSchema2->colId;
code = tNameExtractFullName(&pTableMetaInfo->name, pRight->tableName); SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
if (code != TSDB_CODE_SUCCESS) { if (*rightNode == NULL) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
} }
(*rightNode)->uid = pTableMetaInfo->pTableMeta->id.uid;
(*rightNode)->tagColId = pTagSchema2->colId;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
if (!tscColumnExists(pTableMetaInfo->tagColList, &index)) {
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
}
}
int16_t rightIdx = index.tableIndex;
if (pTagSchema1->type != pTagSchema2->type) { if (pTagSchema1->type != pTagSchema2->type) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
} }
pTagCond->joinInfo.hasJoin = true; if ((*leftNode)->tagJoin == NULL) {
(*leftNode)->tagJoin = taosArrayInit(2, sizeof(int16_t));
}
if ((*rightNode)->tagJoin == NULL) {
(*rightNode)->tagJoin = taosArrayInit(2, sizeof(int16_t));
}
taosArrayPush((*leftNode)->tagJoin, &rightIdx);
taosArrayPush((*rightNode)->tagJoin, &leftIdx);
pQueryInfo->tagCond.joinInfo.hasJoin = true;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
}
static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
return checkAndSetJoinCondInfo(pCmd, pQueryInfo, pExpr);
} }
static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList,
@ -3674,7 +3732,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
const char* msg1 = "table query cannot use tags filter"; const char* msg1 = "table query cannot use tags filter";
const char* msg2 = "illegal column name"; const char* msg2 = "illegal column name";
const char* msg3 = "only one query time range allowed"; const char* msg3 = "only one query time range allowed";
const char* msg4 = "only one join condition allowed"; const char* msg4 = "too many join tables";
const char* msg5 = "not support ordinary column join"; const char* msg5 = "not support ordinary column join";
const char* msg6 = "only one query condition on tbname allowed"; const char* msg6 = "only one query condition on tbname allowed";
const char* msg7 = "only in/like allowed in filter table name"; const char* msg7 = "only in/like allowed in filter table name";
@ -3705,6 +3763,47 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY); TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY);
pCondExpr->tsJoin = true; pCondExpr->tsJoin = true;
assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM);
SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
if (*leftNode == NULL) {
*leftNode = calloc(1, sizeof(SJoinNode));
if (*leftNode == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
}
int16_t leftIdx = index.tableIndex;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, &pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (index.tableIndex < 0 || index.tableIndex >= TSDB_MAX_JOIN_TABLE_NUM) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
if (*rightNode == NULL) {
*rightNode = calloc(1, sizeof(SJoinNode));
if (*rightNode == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
}
int16_t rightIdx = index.tableIndex;
if ((*leftNode)->tsJoin == NULL) {
(*leftNode)->tsJoin = taosArrayInit(2, sizeof(int16_t));
}
if ((*rightNode)->tsJoin == NULL) {
(*rightNode)->tsJoin = taosArrayInit(2, sizeof(int16_t));
}
taosArrayPush((*leftNode)->tsJoin, &rightIdx);
taosArrayPush((*rightNode)->tsJoin, &leftIdx);
/* /*
* to release expression, e.g., m1.ts = m2.ts, * to release expression, e.g., m1.ts = m2.ts,
* since this expression is used to set the join query type * since this expression is used to set the join query type
@ -3762,10 +3861,6 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
return TSDB_CODE_TSC_INVALID_SQL; return TSDB_CODE_TSC_INVALID_SQL;
} }
if (pCondExpr->pJoinExpr != NULL) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY; pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY;
ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg); ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg);
*pExpr = NULL; *pExpr = NULL;
@ -3993,6 +4088,7 @@ static bool validateFilterExpr(SQueryInfo* pQueryInfo) {
static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) { static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) {
const char* msg0 = "invalid timestamp"; const char* msg0 = "invalid timestamp";
const char* msg1 = "only one time stamp window allowed"; const char* msg1 = "only one time stamp window allowed";
int32_t code = 0;
if (pExpr == NULL) { if (pExpr == NULL) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -4003,7 +4099,10 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlE
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
} }
getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pLeft); code = getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pLeft);
if (code) {
return code;
}
return getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pRight); return getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pRight);
} else { } else {
@ -4085,6 +4184,7 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) {
} }
} }
/*
static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) { static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
@ -4107,6 +4207,7 @@ static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInf
tscColumnListInsert(pTableMetaInfo->tagColList, &index); tscColumnListInsert(pTableMetaInfo->tagColList, &index);
} }
} }
*/
static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) { static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) {
const char *msg1 = "invalid tag operator"; const char *msg1 = "invalid tag operator";
@ -4250,6 +4351,102 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE
return ret; return ret;
} }
int32_t validateJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) {
const char* msg1 = "timestamp required for join tables";
const char* msg2 = "tag required for join stables";
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i];
if (node == NULL || node->tsJoin == NULL || taosArrayGetSize(node->tsJoin) <= 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1);
}
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i];
if (node == NULL || node->tagJoin == NULL || taosArrayGetSize(node->tagJoin) <= 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
}
}
}
return TSDB_CODE_SUCCESS;
}
void mergeJoinNodesImpl(int8_t* r, int8_t* p, int16_t* tidx, SJoinNode** nodes, int32_t type) {
SJoinNode *node = nodes[*tidx];
SArray* arr = (type == 0) ? node->tsJoin : node->tagJoin;
size_t size = taosArrayGetSize(arr);
p[*tidx] = 1;
for (int32_t j = 0; j < size; j++) {
int16_t* idx = taosArrayGet(arr, j);
r[*idx] = 1;
if (p[*idx] == 0) {
mergeJoinNodesImpl(r, p, idx, nodes, type);
}
}
}
int32_t mergeJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) {
const char* msg1 = "not all join tables have same timestamp";
const char* msg2 = "not all join tables have same tag";
int8_t r[TSDB_MAX_JOIN_TABLE_NUM] = {0};
int8_t p[TSDB_MAX_JOIN_TABLE_NUM] = {0};
for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) {
mergeJoinNodesImpl(r, p, &i, pQueryInfo->tagCond.joinInfo.joinTables, 0);
taosArrayClear(pQueryInfo->tagCond.joinInfo.joinTables[i]->tsJoin);
for (int32_t j = 0; j < TSDB_MAX_JOIN_TABLE_NUM; ++j) {
if (r[j]) {
taosArrayPush(pQueryInfo->tagCond.joinInfo.joinTables[i]->tsJoin, &j);
}
}
memset(r, 0, sizeof(r));
memset(p, 0, sizeof(p));
}
if (taosArrayGetSize(pQueryInfo->tagCond.joinInfo.joinTables[0]->tsJoin) != pQueryInfo->numOfTables) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1);
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) {
mergeJoinNodesImpl(r, p, &i, pQueryInfo->tagCond.joinInfo.joinTables, 1);
taosArrayClear(pQueryInfo->tagCond.joinInfo.joinTables[i]->tagJoin);
for (int32_t j = 0; j < TSDB_MAX_JOIN_TABLE_NUM; ++j) {
if (r[j]) {
taosArrayPush(pQueryInfo->tagCond.joinInfo.joinTables[i]->tagJoin, &j);
}
}
memset(r, 0, sizeof(r));
memset(p, 0, sizeof(p));
}
if (taosArrayGetSize(pQueryInfo->tagCond.joinInfo.joinTables[0]->tagJoin) != pQueryInfo->numOfTables) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
}
}
return TSDB_CODE_SUCCESS;
}
int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql) { int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql) {
if (pExpr == NULL) { if (pExpr == NULL) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -4295,17 +4492,17 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql
// 4. get the table name query condition // 4. get the table name query condition
if ((ret = getTablenameCond(&pSql->cmd, pQueryInfo, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) { if ((ret = getTablenameCond(&pSql->cmd, pQueryInfo, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) {
return ret; goto PARSE_WHERE_EXIT;
} }
// 5. other column query condition // 5. other column query condition
if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) { if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) {
return ret; goto PARSE_WHERE_EXIT;
} }
// 6. join condition // 6. join condition
if ((ret = getJoinCondInfo(&pSql->cmd, pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) { if ((ret = getJoinCondInfo(&pSql->cmd, pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) {
return ret; goto PARSE_WHERE_EXIT;
} }
// 7. query condition for table name // 7. query condition for table name
@ -4313,12 +4510,29 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql
ret = setTableCondForSTableQuery(&pSql->cmd, pQueryInfo, getAccountId(pSql), condExpr.pTableCond, condExpr.tableCondIndex, &sb); ret = setTableCondForSTableQuery(&pSql->cmd, pQueryInfo, getAccountId(pSql), condExpr.pTableCond, condExpr.tableCondIndex, &sb);
taosStringBuilderDestroy(&sb); taosStringBuilderDestroy(&sb);
if (ret) {
if (!validateFilterExpr(pQueryInfo)) { goto PARSE_WHERE_EXIT;
return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
} }
doAddJoinTagsColumnsIntoTagList(&pSql->cmd, pQueryInfo, &condExpr); if (!validateFilterExpr(pQueryInfo)) {
ret = invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
goto PARSE_WHERE_EXIT;
}
//doAddJoinTagsColumnsIntoTagList(&pSql->cmd, pQueryInfo, &condExpr);
if (condExpr.tsJoin) {
ret = validateJoinNodes(pQueryInfo, pSql);
if (ret) {
goto PARSE_WHERE_EXIT;
}
ret = mergeJoinNodes(pQueryInfo, pSql);
if (ret) {
goto PARSE_WHERE_EXIT;
}
}
PARSE_WHERE_EXIT:
cleanQueryExpr(&condExpr); cleanQueryExpr(&condExpr);
return ret; return ret;
@ -6531,7 +6745,6 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i
const char* msg1 = "point interpolation query needs timestamp"; const char* msg1 = "point interpolation query needs timestamp";
const char* msg2 = "fill only available for interval query"; const char* msg2 = "fill only available for interval query";
const char* msg3 = "start(end) time of query range required or time range too large"; const char* msg3 = "start(end) time of query range required or time range too large";
const char* msg4 = "illegal number of tables in from clause";
const char* msg5 = "too many columns in selection clause"; const char* msg5 = "too many columns in selection clause";
const char* msg6 = "too many tables in from clause"; const char* msg6 = "too many tables in from clause";
const char* msg7 = "invalid table alias name"; const char* msg7 = "invalid table alias name";
@ -6568,14 +6781,11 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i
} }
size_t fromSize = taosArrayGetSize(pQuerySqlNode->from->tableList); size_t fromSize = taosArrayGetSize(pQuerySqlNode->from->tableList);
if (fromSize > TSDB_MAX_JOIN_TABLE_NUM) { if (fromSize > TSDB_MAX_JOIN_TABLE_NUM * 2) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
} }
pQueryInfo->command = TSDB_SQL_SELECT; pQueryInfo->command = TSDB_SQL_SELECT;
if (fromSize > 2) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
// set all query tables, which are maybe more than one. // set all query tables, which are maybe more than one.
for (int32_t i = 0; i < fromSize; ++i) { for (int32_t i = 0; i < fromSize; ++i) {

View File

@ -613,7 +613,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql, int32_t clauseIndex) {
tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen; tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
} }
static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) { static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg, int32_t *succeed) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
TSKEY dfltKey = htobe64(pQueryMsg->window.skey); TSKEY dfltKey = htobe64(pQueryMsg->window.skey);
@ -626,9 +626,14 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
assert(index >= 0); assert(index >= 0);
SVgroupInfo* pVgroupInfo = NULL; SVgroupInfo* pVgroupInfo = NULL;
if (pTableMetaInfo->vgroupList->numOfVgroups > 0) { if (pTableMetaInfo->vgroupList && pTableMetaInfo->vgroupList->numOfVgroups > 0) {
assert(index < pTableMetaInfo->vgroupList->numOfVgroups); assert(index < pTableMetaInfo->vgroupList->numOfVgroups);
pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index]; pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
} else {
tscError("%p No vgroup info found", pSql);
*succeed = 0;
return pMsg;
} }
vgId = pVgroupInfo->vgId; vgId = pVgroupInfo->vgId;
@ -948,8 +953,13 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->secondStageOutput = 0; pQueryMsg->secondStageOutput = 0;
} }
int32_t succeed = 1;
// serialize the table info (sid, uid, tags) // serialize the table info (sid, uid, tags)
pMsg = doSerializeTableInfo(pQueryMsg, pSql, pMsg); pMsg = doSerializeTableInfo(pQueryMsg, pSql, pMsg, &succeed);
if (succeed == 0) {
return TSDB_CODE_TSC_APP_ERROR;
}
SSqlGroupbyExpr *pGroupbyExpr = &pQueryInfo->groupbyExpr; SSqlGroupbyExpr *pGroupbyExpr = &pQueryInfo->groupbyExpr;
if (pGroupbyExpr->numOfGroupCols > 0) { if (pGroupbyExpr->numOfGroupCols > 0) {
@ -2081,6 +2091,10 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
assert(pInfo->vgroupList != NULL); assert(pInfo->vgroupList != NULL);
pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups; pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups;
if (pInfo->vgroupList->numOfVgroups <= 0) {
//tfree(pInfo->vgroupList);
tscError("%p empty vgroup info", pSql);
} else {
for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) { for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
//just init, no need to lock //just init, no need to lock
SVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j]; SVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j];
@ -2096,6 +2110,7 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
pVgroups->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, tListLen(vmsg->epAddr[k].fqdn)); pVgroups->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, tListLen(vmsg->epAddr[k].fqdn));
} }
} }
}
pMsg += size; pMsg += size;
} }

View File

@ -46,6 +46,13 @@ static int32_t tsCompare(int32_t order, int64_t left, int64_t right) {
} }
static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) { static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) {
STSElem el1 = tsBufGetElem(pTSBuf);
int32_t res = tVariantCompare(el1.tag, tag1);
if (res != 0) { // it is a record with new tag
return;
}
while (tsBufNextPos(pTSBuf)) { while (tsBufNextPos(pTSBuf)) {
STSElem el1 = tsBufGetElem(pTSBuf); STSElem el1 = tsBufGetElem(pTSBuf);
@ -118,123 +125,233 @@ static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJoinSupporter* pSupporter2, STimeWindow * win) { static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
STSBuf* output1 = tsBufCreate(true, pQueryInfo->order.order);
STSBuf* output2 = tsBufCreate(true, pQueryInfo->order.order);
win->skey = INT64_MAX; win->skey = INT64_MAX;
win->ekey = INT64_MIN; win->ekey = INT64_MIN;
SLimitVal* pLimit = &pQueryInfo->limit; SLimitVal* pLimit = &pQueryInfo->limit;
int32_t order = pQueryInfo->order.order; int32_t order = pQueryInfo->order.order;
int32_t joinNum = pSql->subState.numOfSub;
SMergeTsCtx ctxlist[TSDB_MAX_JOIN_TABLE_NUM] = {{0}};
SMergeTsCtx* ctxStack[TSDB_MAX_JOIN_TABLE_NUM] = {0};
int32_t slot = 0;
size_t tableNum = 0;
int16_t* tableMIdx = 0;
int32_t equalNum = 0;
int32_t stackidx = 0;
SMergeTsCtx* ctx = NULL;
SMergeTsCtx* pctx = NULL;
SMergeTsCtx* mainCtx = NULL;
STSElem cur;
STSElem prev;
SArray* tsCond = NULL;
int32_t mergeDone = 0;
SQueryInfo* pSubQueryInfo1 = tscGetQueryInfoDetail(&pSql->pSubs[0]->cmd, 0); for (int32_t i = 0; i < joinNum; ++i) {
SQueryInfo* pSubQueryInfo2 = tscGetQueryInfoDetail(&pSql->pSubs[1]->cmd, 0); STSBuf* output = tsBufCreate(true, pQueryInfo->order.order);
SQueryInfo* pSubQueryInfo = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0);
pSubQueryInfo1->tsBuf = output1; pSubQueryInfo->tsBuf = output;
pSubQueryInfo2->tsBuf = output2;
TSKEY st = taosGetTimestampUs(); SJoinSupporter* pSupporter = pSql->pSubs[i]->param;
// no result generated, return directly if (pSupporter->pTSBuf == NULL) {
if (pSupporter1->pTSBuf == NULL || pSupporter2->pTSBuf == NULL) {
tscDebug("%p at least one ts-comp is empty, 0 for secondary query after ts blocks intersecting", pSql); tscDebug("%p at least one ts-comp is empty, 0 for secondary query after ts blocks intersecting", pSql);
return 0; return 0;
} }
tsBufResetPos(pSupporter1->pTSBuf); tsBufResetPos(pSupporter->pTSBuf);
tsBufResetPos(pSupporter2->pTSBuf);
if (!tsBufNextPos(pSupporter1->pTSBuf)) {
tsBufFlush(output1);
tsBufFlush(output2);
if (!tsBufNextPos(pSupporter->pTSBuf)) {
tscDebug("%p input1 is empty, 0 for secondary query after ts blocks intersecting", pSql); tscDebug("%p input1 is empty, 0 for secondary query after ts blocks intersecting", pSql);
return 0; return 0;
} }
if (!tsBufNextPos(pSupporter2->pTSBuf)) { tscDebug("%p sub:%p table idx:%d, input group number:%d", pSql, pSql->pSubs[i], i, pSupporter->pTSBuf->numOfGroups);
tsBufFlush(output1);
tsBufFlush(output2);
tscDebug("%p input2 is empty, 0 for secondary query after ts blocks intersecting", pSql); ctxlist[i].p = pSupporter;
return 0; ctxlist[i].res = output;
} }
int64_t numOfInput1 = 1; TSKEY st = taosGetTimestampUs();
int64_t numOfInput2 = 1;
while(1) { for (int16_t tidx = 0; tidx < joinNum; tidx++) {
STSElem elem = tsBufGetElem(pSupporter1->pTSBuf); pctx = &ctxlist[tidx];
if (pctx->compared) {
continue;
}
// no data in pSupporter1 anymore, jump out of loop assert(pctx->numOfInput == 0);
if (!tsBufIsValidElem(&elem)) {
tsCond = pQueryInfo->tagCond.joinInfo.joinTables[tidx]->tsJoin;
tableNum = taosArrayGetSize(tsCond);
assert(tableNum >= 2);
for (int32_t i = 0; i < tableNum; ++i) {
tableMIdx = taosArrayGet(tsCond, i);
SMergeTsCtx* tctx = &ctxlist[*tableMIdx];
tctx->compared = 1;
}
tableMIdx = taosArrayGet(tsCond, 0);
pctx = &ctxlist[*tableMIdx];
mainCtx = pctx;
while (1) {
pctx = mainCtx;
prev = tsBufGetElem(pctx->p->pTSBuf);
ctxStack[stackidx++] = pctx;
if (!tsBufIsValidElem(&prev)) {
break; break;
} }
tVariant tag = {0};
tVariantAssign(&tag, prev.tag);
int32_t skipped = 0;
for (int32_t i = 1; i < tableNum; ++i) {
SMergeTsCtx* tctx = &ctxlist[i];
// find the data in supporter2 with the same tag value // find the data in supporter2 with the same tag value
STSElem e2 = tsBufFindElemStartPosByTag(pSupporter2->pTSBuf, elem.tag); STSElem e2 = tsBufFindElemStartPosByTag(tctx->p->pTSBuf, &tag);
/** if (!tsBufIsValidElem(&e2)) {
* there are elements in pSupporter2 with the same tag, continue skipRemainValue(pctx->p->pTSBuf, &tag);
*/ skipped = 1;
tVariant tag1 = {0}; break;
tVariantAssign(&tag1, elem.tag); }
}
if (skipped) {
slot = 0;
stackidx = 0;
continue;
}
tableMIdx = taosArrayGet(tsCond, ++slot);
equalNum = 1;
if (tsBufIsValidElem(&e2)) {
while (1) { while (1) {
STSElem elem1 = tsBufGetElem(pSupporter1->pTSBuf); ctx = &ctxlist[*tableMIdx];
STSElem elem2 = tsBufGetElem(pSupporter2->pTSBuf);
prev = tsBufGetElem(pctx->p->pTSBuf);
cur = tsBufGetElem(ctx->p->pTSBuf);
// data with current are exhausted // data with current are exhausted
if (!tsBufIsValidElem(&elem1) || tVariantCompare(elem1.tag, &tag1) != 0) { if (!tsBufIsValidElem(&prev) || tVariantCompare(prev.tag, &tag) != 0) {
break; break;
} }
if (!tsBufIsValidElem(&elem2) || tVariantCompare(elem2.tag, &tag1) != 0) { // ignore all records with the same tag if (!tsBufIsValidElem(&cur) || tVariantCompare(cur.tag, &tag) != 0) { // ignore all records with the same tag
skipRemainValue(pSupporter1->pTSBuf, &tag1);
break; break;
} }
/* ctxStack[stackidx++] = ctx;
* in case of stable query, limit/offset is not applied here. the limit/offset is applied to the
* final results which is acquired after the secondary merge of in the client. int32_t ret = tsCompare(order, prev.ts, cur.ts);
*/ if (ret == 0) {
int32_t re = tsCompare(order, elem1.ts, elem2.ts); if (++equalNum < tableNum) {
if (re < 0) { pctx = ctx;
tsBufNextPos(pSupporter1->pTSBuf);
numOfInput1++; if (++slot >= tableNum) {
} else if (re > 0) { slot = 0;
tsBufNextPos(pSupporter2->pTSBuf); }
numOfInput2++;
} else { tableMIdx = taosArrayGet(tsCond, slot);
continue;
}
assert(stackidx == tableNum);
if (pLimit->offset == 0 || pQueryInfo->interval.interval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) { if (pLimit->offset == 0 || pQueryInfo->interval.interval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
if (win->skey > elem1.ts) { if (win->skey > prev.ts) {
win->skey = elem1.ts; win->skey = prev.ts;
} }
if (win->ekey < elem1.ts) { if (win->ekey < prev.ts) {
win->ekey = elem1.ts; win->ekey = prev.ts;
} }
tsBufAppend(output1, elem1.id, elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts)); for (int32_t i = 0; i < stackidx; ++i) {
tsBufAppend(output2, elem2.id, elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts)); SMergeTsCtx* tctx = ctxStack[i];
prev = tsBufGetElem(tctx->p->pTSBuf);
tsBufAppend(tctx->res, prev.id, prev.tag, (const char*)&prev.ts, sizeof(prev.ts));
}
} else { } else {
pLimit->offset -= 1;//offset apply to projection? pLimit->offset -= 1;//offset apply to projection?
} }
tsBufNextPos(pSupporter1->pTSBuf); for (int32_t i = 0; i < stackidx; ++i) {
numOfInput1++; SMergeTsCtx* tctx = ctxStack[i];
tsBufNextPos(pSupporter2->pTSBuf); if (!tsBufNextPos(tctx->p->pTSBuf) && tctx == mainCtx) {
numOfInput2++; mergeDone = 1;
} }
tctx->numOfInput++;
} }
} else { // no data in pSupporter2, ignore current data in pSupporter2
skipRemainValue(pSupporter1->pTSBuf, &tag1); if (mergeDone) {
break;
} }
stackidx = 0;
equalNum = 1;
ctxStack[stackidx++] = pctx;
} else if (ret > 0) {
if (!tsBufNextPos(ctx->p->pTSBuf) && ctx == mainCtx) {
mergeDone = 1;
break;
}
ctx->numOfInput++;
stackidx--;
} else {
stackidx--;
for (int32_t i = 0; i < stackidx; ++i) {
SMergeTsCtx* tctx = ctxStack[i];
if (!tsBufNextPos(tctx->p->pTSBuf) && tctx == mainCtx) {
mergeDone = 1;
}
tctx->numOfInput++;
}
if (mergeDone) {
break;
}
stackidx = 0;
equalNum = 1;
ctxStack[stackidx++] = pctx;
}
}
if (mergeDone) {
break;
}
slot = 0;
stackidx = 0;
skipRemainValue(mainCtx->p->pTSBuf, &tag);
}
stackidx = 0;
slot = 0;
mergeDone = 0;
} }
/* /*
@ -242,28 +359,32 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
* 1. only one element * 1. only one element
* 2. only one element for each tag. * 2. only one element for each tag.
*/ */
if (output1->tsOrder == -1) { if (ctxlist[0].res->tsOrder == -1) {
output1->tsOrder = TSDB_ORDER_ASC; for (int32_t i = 0; i < joinNum; ++i) {
output2->tsOrder = TSDB_ORDER_ASC; ctxlist[i].res->tsOrder = TSDB_ORDER_ASC;
}
} }
tsBufFlush(output1); for (int32_t i = 0; i < joinNum; ++i) {
tsBufFlush(output2); tsBufFlush(ctxlist[i].res);
tsBufDestroy(pSupporter1->pTSBuf); tsBufDestroy(ctxlist[i].p->pTSBuf);
pSupporter1->pTSBuf = NULL; ctxlist[i].p->pTSBuf = NULL;
tsBufDestroy(pSupporter2->pTSBuf); }
pSupporter2->pTSBuf = NULL;
TSKEY et = taosGetTimestampUs(); TSKEY et = taosGetTimestampUs();
tscDebug("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
"intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us",
pSql, numOfInput1, numOfInput2, output1->numOfTotal, output1->numOfGroups, win->skey, win->ekey,
tsBufGetNumOfGroup(output1), et - st);
return output1->numOfTotal; for (int32_t i = 0; i < joinNum; ++i) {
tscDebug("%p sub:%p tblidx:%d, input:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
"intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us",
pSql, pSql->pSubs[i], i, ctxlist[i].numOfInput, ctxlist[i].res->numOfTotal, ctxlist[i].res->numOfGroups, win->skey, win->ekey,
tsBufGetNumOfGroup(ctxlist[i].res), et - st);
}
return ctxlist[0].res->numOfTotal;
} }
// todo handle failed to create sub query // todo handle failed to create sub query
SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) { SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) {
SJoinSupporter* pSupporter = calloc(1, sizeof(SJoinSupporter)); SJoinSupporter* pSupporter = calloc(1, sizeof(SJoinSupporter));
@ -768,76 +889,218 @@ static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSq
return true; return true;
} }
static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParentSql, SArray** s1, SArray** s2) {
SJoinSupporter* p1 = pParentSql->pSubs[0]->param;
SJoinSupporter* p2 = pParentSql->pSubs[1]->param;
tscDebug("%p all subquery retrieve <tid, tags> complete, do tags match, %d, %d", pParentSql, p1->num, p2->num);
// sort according to the tag value
qsort(p1->pIdTagList, p1->num, p1->tagSize, tagValCompar);
qsort(p2->pIdTagList, p2->num, p2->tagSize, tagValCompar);
static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParentSql, SArray* resList) {
int16_t joinNum = pParentSql->subState.numOfSub;
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
int16_t tagColId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); int16_t tagColId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
SJoinSupporter* p0 = pParentSql->pSubs[0]->param;
SMergeCtx ctxlist[TSDB_MAX_JOIN_TABLE_NUM] = {{0}};
SMergeCtx* ctxStack[TSDB_MAX_JOIN_TABLE_NUM] = {0};
// int16_t for padding
int32_t size = p0->tagSize - sizeof(int16_t);
SSchema* pColSchema = tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId); SSchema* pColSchema = tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId);
// int16_t for padding tscDebug("%p all subquery retrieve <tid, tags> complete, do tags match", pParentSql);
int32_t size = p1->tagSize - sizeof(int16_t);
*s1 = taosArrayInit(p1->num, size);
*s2 = taosArrayInit(p2->num, size);
if (!(checkForDuplicateTagVal(pColSchema, p1, pParentSql) && checkForDuplicateTagVal(pColSchema, p2, pParentSql))) { for (int32_t i = 0; i < joinNum; i++) {
SJoinSupporter* p = pParentSql->pSubs[i]->param;
ctxlist[i].p = p;
ctxlist[i].res = taosArrayInit(p->num, size);
tscDebug("Join %d - num:%d", i, p->num);
// sort according to the tag valu
qsort(p->pIdTagList, p->num, p->tagSize, tagValCompar);
if (!checkForDuplicateTagVal(pColSchema, p, pParentSql)) {
for (int32_t j = 0; j <= i; j++) {
taosArrayDestroy(ctxlist[j].res);
}
return TSDB_CODE_QRY_DUP_JOIN_KEY; return TSDB_CODE_QRY_DUP_JOIN_KEY;
} }
}
int32_t i = 0, j = 0; int32_t slot = 0;
while(i < p1->num && j < p2->num) { size_t tableNum = 0;
STidTags* pp1 = (STidTags*) varDataVal(p1->pIdTagList + i * p1->tagSize); int16_t* tableMIdx = 0;
STidTags* pp2 = (STidTags*) varDataVal(p2->pIdTagList + j * p2->tagSize); int32_t equalNum = 0;
assert(pp1->tid != 0 && pp2->tid != 0); int32_t stackidx = 0;
int32_t mergeDone = 0;
SMergeCtx* ctx = NULL;
SMergeCtx* pctx = NULL;
STidTags* cur = NULL;
STidTags* prev = NULL;
SArray* tagCond = NULL;
int32_t ret = doCompare(pp1->tag, pp2->tag, pColSchema->type, pColSchema->bytes); for (int16_t tidx = 0; tidx < joinNum; tidx++) {
pctx = &ctxlist[tidx];
if (pctx->compared) {
continue;
}
assert(pctx->idx == 0 && taosArrayGetSize(pctx->res) == 0);
tagCond = pQueryInfo->tagCond.joinInfo.joinTables[tidx]->tagJoin;
tableNum = taosArrayGetSize(tagCond);
assert(tableNum >= 2);
for (int32_t i = 0; i < tableNum; ++i) {
tableMIdx = taosArrayGet(tagCond, i);
SMergeCtx* tctx = &ctxlist[*tableMIdx];
tctx->compared = 1;
}
for (int32_t i = 0; i < tableNum; ++i) {
tableMIdx = taosArrayGet(tagCond, i);
SMergeCtx* tctx = &ctxlist[*tableMIdx];
if (tctx->p->num <= 0 || tctx->p->pIdTagList == NULL) {
mergeDone = 1;
break;
}
}
if (mergeDone) {
mergeDone = 0;
continue;
}
tableMIdx = taosArrayGet(tagCond, slot);
pctx = &ctxlist[*tableMIdx];
prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize);
ctxStack[stackidx++] = pctx;
tableMIdx = taosArrayGet(tagCond, ++slot);
equalNum = 1;
while (1) {
ctx = &ctxlist[*tableMIdx];
cur = (STidTags*) varDataVal(ctx->p->pIdTagList + ctx->idx * ctx->p->tagSize);
assert(cur->tid != 0 && prev->tid != 0);
ctxStack[stackidx++] = ctx;
int32_t ret = doCompare(prev->tag, cur->tag, pColSchema->type, pColSchema->bytes);
if (ret == 0) { if (ret == 0) {
tscDebug("%p tag matched, vgId:%d, val:%d, tid:%d, uid:%"PRIu64", tid:%d, uid:%"PRIu64, pParentSql, pp1->vgId, if (++equalNum < tableNum) {
*(int*) pp1->tag, pp1->tid, pp1->uid, pp2->tid, pp2->uid); prev = cur;
pctx = ctx;
taosArrayPush(*s1, pp1); if (++slot >= tableNum) {
taosArrayPush(*s2, pp2); slot = 0;
j++; }
i++;
tableMIdx = taosArrayGet(tagCond, slot);
continue;
}
tscDebug("%p tag matched, vgId:%d, val:%d, tid:%d, uid:%"PRIu64", tid:%d, uid:%"PRIu64, pParentSql, prev->vgId,
*(int*) prev->tag, prev->tid, prev->uid, cur->tid, cur->uid);
assert(stackidx == tableNum);
for (int32_t i = 0; i < stackidx; ++i) {
SMergeCtx* tctx = ctxStack[i];
prev = (STidTags*) varDataVal(tctx->p->pIdTagList + tctx->idx * tctx->p->tagSize);
taosArrayPush(tctx->res, prev);
}
for (int32_t i = 0; i < stackidx; ++i) {
SMergeCtx* tctx = ctxStack[i];
if (++tctx->idx >= tctx->p->num) {
mergeDone = 1;
break;
}
}
if (mergeDone) {
break;
}
stackidx = 0;
equalNum = 1;
prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize);
ctxStack[stackidx++] = pctx;
} else if (ret > 0) { } else if (ret > 0) {
j++; stackidx--;
if (++ctx->idx >= ctx->p->num) {
break;
}
} else { } else {
i++; stackidx--;
for (int32_t i = 0; i < stackidx; ++i) {
SMergeCtx* tctx = ctxStack[i];
if (++tctx->idx >= tctx->p->num) {
mergeDone = 1;
break;
} }
} }
if (mergeDone) {
break;
}
stackidx = 0;
equalNum = 1;
prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize);
ctxStack[stackidx++] = pctx;
}
}
slot = 0;
mergeDone = 0;
stackidx = 0;
}
for (int32_t i = 0; i < joinNum; ++i) {
// reorganize the tid-tag value according to both the vgroup id and tag values // reorganize the tid-tag value according to both the vgroup id and tag values
// sort according to the tag value // sort according to the tag value
size_t t1 = taosArrayGetSize(*s1); size_t num = taosArrayGetSize(ctxlist[i].res);
size_t t2 = taosArrayGetSize(*s2);
qsort((*s1)->pData, t1, size, tidTagsCompar); qsort((ctxlist[i].res)->pData, num, size, tidTagsCompar);
qsort((*s2)->pData, t2, size, tidTagsCompar);
#if 0 taosArrayPush(resList, &ctxlist[i].res);
for(int32_t k = 0; k < t1; ++k) {
STidTags* p = (*s1)->pData + size * k; tscDebug("%p tags match complete, result num: %"PRIzu, pParentSql, num);
printf("%d, tag:%s\n", p->vgId, ((tstr*)(p->tag))->data);
} }
for(int32_t k = 0; k < t1; ++k) {
STidTags* p = (*s2)->pData + size * k;
printf("%d, tag:%s\n", p->vgId, ((tstr*)(p->tag))->data);
}
#endif
tscDebug("%p tags match complete, result: %"PRIzu", %"PRIzu, pParentSql, t1, t2);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
bool emptyTagList(SArray* resList, int32_t size) {
size_t rsize = taosArrayGetSize(resList);
if (rsize != size) {
return true;
}
for (int32_t i = 0; i < size; ++i) {
SArray** s = taosArrayGet(resList, i);
if (taosArrayGetSize(*s) <= 0) {
return true;
}
}
return false;
}
static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) { static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) {
SJoinSupporter* pSupporter = (SJoinSupporter*)param; SJoinSupporter* pSupporter = (SJoinSupporter*)param;
@ -939,19 +1202,19 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
return; return;
} }
SArray *s1 = NULL, *s2 = NULL; SArray* resList = taosArrayInit(pParentSql->subState.numOfSub, sizeof(SArray *));
int32_t code = getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2);
int32_t code = getIntersectionOfTableTuple(pQueryInfo, pParentSql, resList);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
freeJoinSubqueryObj(pParentSql); freeJoinSubqueryObj(pParentSql);
pParentSql->res.code = code; pParentSql->res.code = code;
tscAsyncResultOnError(pParentSql); tscAsyncResultOnError(pParentSql);
taosArrayDestroy(s1); taosArrayDestroy(resList);
taosArrayDestroy(s2);
return; return;
} }
if (taosArrayGetSize(s1) == 0 || taosArrayGetSize(s2) == 0) { // no results,return. if (emptyTagList(resList, pParentSql->subState.numOfSub)) { // no results,return.
assert(pParentSql->fp != tscJoinQueryCallback); assert(pParentSql->fp != tscJoinQueryCallback);
tscDebug("%p tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", pParentSql); tscDebug("%p tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", pParentSql);
@ -963,37 +1226,34 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
(*pParentSql->fp)(pParentSql->param, pParentSql, 0); (*pParentSql->fp)(pParentSql->param, pParentSql, 0);
} else { } else {
for (int32_t m = 0; m < pParentSql->subState.numOfSub; ++m) {
// proceed to for ts_comp query // proceed to for ts_comp query
SSqlCmd* pSubCmd1 = &pParentSql->pSubs[0]->cmd; SSqlCmd* pSubCmd = &pParentSql->pSubs[m]->cmd;
SSqlCmd* pSubCmd2 = &pParentSql->pSubs[1]->cmd; SArray** s = taosArrayGet(resList, m);
SQueryInfo* pQueryInfo1 = tscGetQueryInfoDetail(pSubCmd1, 0); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pSubCmd, 0);
STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo1, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo1, s1); tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo, *s);
SQueryInfo* pQueryInfo2 = tscGetQueryInfoDetail(pSubCmd2, 0); SSqlObj* psub = pParentSql->pSubs[m];
STableMetaInfo* pTableMetaInfo2 = tscGetMetaInfo(pQueryInfo2, 0); ((SJoinSupporter*)psub->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo->pVgroupTables);
tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo2, s2);
SSqlObj* psub1 = pParentSql->pSubs[0];
((SJoinSupporter*)psub1->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo1->pVgroupTables);
SSqlObj* psub2 = pParentSql->pSubs[1];
((SJoinSupporter*)psub2->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo2->pVgroupTables);
pParentSql->subState.numOfSub = 2;
memset(pParentSql->subState.states, 0, sizeof(pParentSql->subState.states[0]) * pParentSql->subState.numOfSub); memset(pParentSql->subState.states, 0, sizeof(pParentSql->subState.states[0]) * pParentSql->subState.numOfSub);
tscDebug("%p reset all sub states to 0", pParentSql); tscDebug("%p reset all sub states to 0", pParentSql);
for (int32_t m = 0; m < pParentSql->subState.numOfSub; ++m) { issueTsCompQuery(psub, psub->param, pParentSql);
SSqlObj* sub = pParentSql->pSubs[m];
issueTsCompQuery(sub, sub->param, pParentSql);
} }
} }
taosArrayDestroy(s1); size_t rsize = taosArrayGetSize(resList);
taosArrayDestroy(s2); for (int32_t i = 0; i < rsize; ++i) {
SArray** s = taosArrayGet(resList, i);
if (*s) {
taosArrayDestroy(*s);
}
}
taosArrayDestroy(resList);
} }
static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) { static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) {
@ -1124,12 +1384,8 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscDebug("%p all subquery retrieve ts complete, do ts block intersect", pParentSql); tscDebug("%p all subquery retrieve ts complete, do ts block intersect", pParentSql);
// proceeds to launched secondary query to retrieve final data
SJoinSupporter* p1 = pParentSql->pSubs[0]->param;
SJoinSupporter* p2 = pParentSql->pSubs[1]->param;
STimeWindow win = TSWINDOW_INITIALIZER; STimeWindow win = TSWINDOW_INITIALIZER;
int64_t num = doTSBlockIntersect(pParentSql, p1, p2, &win); int64_t num = doTSBlockIntersect(pParentSql, &win);
if (num <= 0) { // no result during ts intersect if (num <= 0) { // no result during ts intersect
tscDebug("%p no results generated in ts intersection, free all sub SqlObj and quit", pParentSql); tscDebug("%p no results generated in ts intersection, free all sub SqlObj and quit", pParentSql);
freeJoinSubqueryObj(pParentSql); freeJoinSubqueryObj(pParentSql);
@ -1639,6 +1895,8 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
pNewQueryInfo->limit.limit = -1; pNewQueryInfo->limit.limit = -1;
pNewQueryInfo->limit.offset = 0; pNewQueryInfo->limit.offset = 0;
pNewQueryInfo->order.orderColId = INT32_MIN;
// backup the data and clear it in the sqlcmd object // backup the data and clear it in the sqlcmd object
memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr)); memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr));

View File

@ -1279,6 +1279,34 @@ int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepco
return 0; return 0;
} }
bool tscColumnExists(SArray* pColumnList, SColumnIndex* pColIndex) {
// ignore the tbname columnIndex to be inserted into source list
if (pColIndex->columnIndex < 0) {
return false;
}
size_t numOfCols = taosArrayGetSize(pColumnList);
int16_t col = pColIndex->columnIndex;
int32_t i = 0;
while (i < numOfCols) {
SColumn* pCol = taosArrayGetP(pColumnList, i);
if ((pCol->colIndex.columnIndex != col) || (pCol->colIndex.tableIndex != pColIndex->tableIndex)) {
++i;
continue;
} else {
break;
}
}
if (i >= numOfCols || numOfCols == 0) {
return false;
}
return true;
}
SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) { SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
// ignore the tbname columnIndex to be inserted into source list // ignore the tbname columnIndex to be inserted into source list
if (pColIndex->columnIndex < 0) { if (pColIndex->columnIndex < 0) {
@ -1583,7 +1611,25 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
dest->tbnameCond.uid = src->tbnameCond.uid; dest->tbnameCond.uid = src->tbnameCond.uid;
dest->tbnameCond.len = src->tbnameCond.len; dest->tbnameCond.len = src->tbnameCond.len;
memcpy(&dest->joinInfo, &src->joinInfo, sizeof(SJoinInfo)); dest->joinInfo.hasJoin = src->joinInfo.hasJoin;
for (int32_t i = 0; i < TSDB_MAX_JOIN_TABLE_NUM; ++i) {
if (src->joinInfo.joinTables[i]) {
dest->joinInfo.joinTables[i] = calloc(1, sizeof(SJoinNode));
memcpy(dest->joinInfo.joinTables[i], src->joinInfo.joinTables[i], sizeof(SJoinNode));
if (src->joinInfo.joinTables[i]->tsJoin) {
dest->joinInfo.joinTables[i]->tsJoin = taosArrayDup(src->joinInfo.joinTables[i]->tsJoin);
}
if (src->joinInfo.joinTables[i]->tagJoin) {
dest->joinInfo.joinTables[i]->tagJoin = taosArrayDup(src->joinInfo.joinTables[i]->tagJoin);
}
}
}
dest->relType = src->relType; dest->relType = src->relType;
if (src->pCond == NULL) { if (src->pCond == NULL) {
@ -1629,6 +1675,23 @@ void tscTagCondRelease(STagCond* pTagCond) {
taosArrayDestroy(pTagCond->pCond); taosArrayDestroy(pTagCond->pCond);
} }
for (int32_t i = 0; i < TSDB_MAX_JOIN_TABLE_NUM; ++i) {
SJoinNode *node = pTagCond->joinInfo.joinTables[i];
if (node == NULL) {
continue;
}
if (node->tsJoin != NULL) {
taosArrayDestroy(node->tsJoin);
}
if (node->tagJoin != NULL) {
taosArrayDestroy(node->tagJoin);
}
tfree(node);
}
memset(pTagCond, 0, sizeof(STagCond)); memset(pTagCond, 0, sizeof(STagCond));
} }
@ -2318,16 +2381,21 @@ void tscDoQuery(SSqlObj* pSql) {
} }
int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid) { int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid) {
if (pTagCond->joinInfo.left.uid == uid) { int32_t i = 0;
return pTagCond->joinInfo.left.tagColId; while (i < TSDB_MAX_JOIN_TABLE_NUM) {
} else if (pTagCond->joinInfo.right.uid == uid) { SJoinNode* node = pTagCond->joinInfo.joinTables[i];
return pTagCond->joinInfo.right.tagColId; if (node && node->uid == uid) {
} else { return node->tagColId;
}
i++;
}
assert(0); assert(0);
return -1; return -1;
}
} }
int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId) { int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId) {
int32_t numOfTags = tscGetNumOfTags(pTableMeta); int32_t numOfTags = tscGetNumOfTags(pTableMeta);

View File

@ -71,7 +71,7 @@ int32_t tsMaxBinaryDisplayWidth = 30;
int32_t tsCompressMsgSize = -1; int32_t tsCompressMsgSize = -1;
// client // client
int32_t tsMaxSQLStringLen = TSDB_MAX_SQL_LEN; int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN;
int8_t tsTscEnableRecordSql = 0; int8_t tsTscEnableRecordSql = 0;
// the maximum number of results for projection query on super table that are returned from // the maximum number of results for projection query on super table that are returned from

View File

@ -259,7 +259,7 @@ do { \
#define TSDB_MIN_TABLES 4 #define TSDB_MIN_TABLES 4
#define TSDB_MAX_TABLES 10000000 #define TSDB_MAX_TABLES 10000000
#define TSDB_DEFAULT_TABLES 1000000 #define TSDB_DEFAULT_TABLES 1000000
#define TSDB_TABLES_STEP 1000 #define TSDB_TABLES_STEP 100
#define TSDB_MIN_DAYS_PER_FILE 1 #define TSDB_MIN_DAYS_PER_FILE 1
#define TSDB_MAX_DAYS_PER_FILE 3650 #define TSDB_MAX_DAYS_PER_FILE 3650
@ -317,7 +317,7 @@ do { \
#define TSDB_MAX_DB_QUORUM_OPTION 2 #define TSDB_MAX_DB_QUORUM_OPTION 2
#define TSDB_DEFAULT_DB_QUORUM_OPTION 1 #define TSDB_DEFAULT_DB_QUORUM_OPTION 1
#define TSDB_MAX_JOIN_TABLE_NUM 5 #define TSDB_MAX_JOIN_TABLE_NUM 10
#define TSDB_MAX_UNION_CLAUSE 5 #define TSDB_MAX_UNION_CLAUSE 5
#define TSDB_MAX_BINARY_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE) #define TSDB_MAX_BINARY_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE)

View File

@ -3,6 +3,53 @@ PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
FIND_PACKAGE(Git)
IF (GIT_FOUND)
MESSAGE("Git found")
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ../src/kit/taosdemo/taosdemo.c
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_COMMIT)
EXECUTE_PROCESS(
COMMAND bash "-c" "echo '${TAOSDEMO_COMMIT}' | awk '{print $1}' | cut -c -9"
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1)
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} status -z -s ../src/kit/taosdemo/taosdemo.c
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_STATUS)
EXECUTE_PROCESS(
COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'"
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_STATUS)
MESSAGE("taosdemo.c status: " ${TAOSDEMO_STATUS})
ELSE()
MESSAGE("Git not found")
SET(TAOSDEMO_COMMIT_SHA1 "unknown")
SET(TAOSDEMO_STATUS "unknown")
ENDIF (GIT_FOUND)
STRING(STRIP ${TAOSDEMO_COMMIT_SHA1} TAOSDEMO_COMMIT_SHA1)
MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1})
STRING(STRIP ${TAOSDEMO_STATUS} TAOSDEMO_STATUS)
IF (TAOSDEMO_STATUS MATCHES "M")
SET(TAOSDEMO_STATUS "modified")
ELSE()
SET(TAOSDEMO_STATUS "")
ENDIF ()
MESSAGE("taosdemo's status is:" ${TAOSDEMO_STATUS})
ADD_DEFINITIONS(-DTAOSDEMO_COMMIT_SHA1="${TAOSDEMO_COMMIT_SHA1}")
ADD_DEFINITIONS(-DTAOSDEMO_STATUS="${TAOSDEMO_STATUS}")
MESSAGE("VERNUMBER is:" ${VERNUMBER})
IF (VERNUMBER MATCHES "")
ADD_DEFINITIONS(-DTD_VERNUMBER="TDengie-version-unknown")
ELSE()
ADD_DEFINITIONS(-DTD_VERNUMBER="${VERNUMBER}")
ENDIF ()
IF (TD_LINUX) IF (TD_LINUX)
AUX_SOURCE_DIRECTORY(. SRC) AUX_SOURCE_DIRECTORY(. SRC)
ADD_EXECUTABLE(taosdemo ${SRC}) ADD_EXECUTABLE(taosdemo ${SRC})

View File

@ -86,7 +86,7 @@ enum TEST_MODE {
#define MAX_COLUMN_COUNT 1024 #define MAX_COLUMN_COUNT 1024
#define MAX_TAG_COUNT 128 #define MAX_TAG_COUNT 128
#define MAX_QUERY_SQL_COUNT 10 #define MAX_QUERY_SQL_COUNT 100
#define MAX_QUERY_SQL_LENGTH 256 #define MAX_QUERY_SQL_LENGTH 256
#define MAX_DATABASE_COUNT 256 #define MAX_DATABASE_COUNT 256
@ -94,6 +94,7 @@ enum TEST_MODE {
#define DEFAULT_TIMESTAMP_STEP 1 #define DEFAULT_TIMESTAMP_STEP 1
typedef enum CREATE_SUB_TALBE_MOD_EN { typedef enum CREATE_SUB_TALBE_MOD_EN {
PRE_CREATE_SUBTBL, PRE_CREATE_SUBTBL,
AUTO_CREATE_SUBTBL, AUTO_CREATE_SUBTBL,
@ -118,11 +119,11 @@ typedef enum enum_INSERT_MODE {
INVALID_INSERT_MODE INVALID_INSERT_MODE
} INSERT_MODE; } INSERT_MODE;
enum QUERY_TYPE { typedef enum enumQUERY_TYPE {
NO_INSERT_TYPE, NO_INSERT_TYPE,
INSERT_TYPE, INSERT_TYPE,
QUERY_TYPE_BUT QUERY_TYPE_BUT
} ; } QUERY_TYPE;
enum _show_db_index { enum _show_db_index {
TSDB_SHOW_DB_NAME_INDEX, TSDB_SHOW_DB_NAME_INDEX,
@ -225,7 +226,6 @@ typedef struct SColumn_S {
typedef struct SSuperTable_S { typedef struct SSuperTable_S {
char sTblName[MAX_TB_NAME_SIZE+1]; char sTblName[MAX_TB_NAME_SIZE+1];
int childTblCount; int childTblCount;
bool superTblExists; // 0: no, 1: yes
bool childTblExists; // 0: no, 1: yes bool childTblExists; // 0: no, 1: yes
int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
@ -236,7 +236,7 @@ typedef struct SSuperTable_S {
int childTblOffset; int childTblOffset;
int multiThreadWriteOneTbl; // 0: no, 1: yes int multiThreadWriteOneTbl; // 0: no, 1: yes
int rowsPerTbl; // int interlaceRows; //
int disorderRatio; // 0: no disorder, >0: x% int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision int disorderRange; // ms or us by database precision
int maxSqlLen; // int maxSqlLen; //
@ -499,7 +499,7 @@ static int taosRandom()
static int createDatabases(); static int createDatabases();
static void createChildTables(); static void createChildTables();
static int queryDbExec(TAOS *taos, char *command, int type); static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet);
/* ************ Global variables ************ */ /* ************ Global variables ************ */
@ -590,6 +590,32 @@ static FILE * g_fpOfInsertResult = NULL;
static void ERROR_EXIT(const char *msg) { perror(msg); exit(-1); } static void ERROR_EXIT(const char *msg) { perror(msg); exit(-1); }
#ifndef TAOSDEMO_COMMIT_SHA1
#define TAOSDEMO_COMMIT_SHA1 "unknown"
#endif
#ifndef TD_VERNUMBER
#define TD_VERNUMBER "unknown"
#endif
#ifndef TAOSDEMO_STATUS
#define TAOSDEMO_STATUS "unknown"
#endif
static void printVersion() {
char tdengine_ver[] = TD_VERNUMBER;
char taosdemo_ver[] = TAOSDEMO_COMMIT_SHA1;
char taosdemo_status[] = TAOSDEMO_STATUS;
if (strlen(taosdemo_status) == 0) {
printf("taosdemo verison %s-%s\n",
tdengine_ver, taosdemo_ver);
} else {
printf("taosdemo verison %s-%s, status:%s\n",
tdengine_ver, taosdemo_ver, taosdemo_status);
}
}
static void printHelp() { static void printHelp() {
char indent[10] = " "; char indent[10] = " ";
printf("%s%s%s%s\n", indent, "-f", indent, printf("%s%s%s%s\n", indent, "-f", indent,
@ -647,6 +673,8 @@ static void printHelp() {
"Out of order data's range, ms, default is 1000."); "Out of order data's range, ms, default is 1000.");
printf("%s%s%s%s\n", indent, "-g", indent, printf("%s%s%s%s\n", indent, "-g", indent,
"Print debug info."); "Print debug info.");
printf("%s%s%s%s\n", indent, "-V, --version", indent,
"Print version info.");
/* printf("%s%s%s%s\n", indent, "-D", indent, /* printf("%s%s%s%s\n", indent, "-D", indent,
"if elete database if exists. 0: no, 1: yes, default is 1"); "if elete database if exists. 0: no, 1: yes, default is 1");
*/ */
@ -788,6 +816,10 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
|| arguments->method_of_delete > 3) { || arguments->method_of_delete > 3) {
arguments->method_of_delete = 0; arguments->method_of_delete = 0;
} }
} else if ((strcmp(argv[i], "--version") == 0) ||
(strcmp(argv[i], "-V") == 0)){
printVersion();
exit(0);
} else if (strcmp(argv[i], "--help") == 0) { } else if (strcmp(argv[i], "--help") == 0) {
printHelp(); printHelp();
exit(0); exit(0);
@ -859,7 +891,7 @@ static void tmfree(char *buf) {
} }
} }
static int queryDbExec(TAOS *taos, char *command, int type) { static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
int i; int i;
TAOS_RES *res = NULL; TAOS_RES *res = NULL;
int32_t code = -1; int32_t code = -1;
@ -878,8 +910,10 @@ static int queryDbExec(TAOS *taos, char *command, int type) {
} }
if (code != 0) { if (code != 0) {
if (!quiet) {
debugPrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); debugPrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
errorPrint( "Failed to run %s, reason: %s\n", command, taos_errstr(res)); errorPrint("Failed to run %s, reason: %s\n", command, taos_errstr(res));
}
taos_free_result(res); taos_free_result(res);
//taos_close(taos); //taos_close(taos);
return -1; return -1;
@ -1166,8 +1200,8 @@ static int printfInsertMeta() {
}else { }else {
printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
} }
printf(" rowsPerTbl: \033[33m%d\033[0m\n", printf(" interlaceRows: \033[33m%d\033[0m\n",
g_Dbs.db[i].superTbls[j].rowsPerTbl); g_Dbs.db[i].superTbls[j].interlaceRows);
printf(" disorderRange: \033[33m%d\033[0m\n", printf(" disorderRange: \033[33m%d\033[0m\n",
g_Dbs.db[i].superTbls[j].disorderRange); g_Dbs.db[i].superTbls[j].disorderRange);
printf(" disorderRatio: \033[33m%d\033[0m\n", printf(" disorderRatio: \033[33m%d\033[0m\n",
@ -1328,7 +1362,7 @@ static void printfInsertMetaToFile(FILE* fp) {
}else { }else {
fprintf(fp, " multiThreadWriteOneTbl: yes\n"); fprintf(fp, " multiThreadWriteOneTbl: yes\n");
} }
fprintf(fp, " rowsPerTbl: %d\n", g_Dbs.db[i].superTbls[j].rowsPerTbl); fprintf(fp, " interlaceRows: %d\n", g_Dbs.db[i].superTbls[j].interlaceRows);
fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange); fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange);
fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio); fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio);
fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen); fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen);
@ -2320,7 +2354,7 @@ static int createSuperTable(TAOS * taos, char* dbName, SSuperTable* superTbls,
dbName, superTbls->sTblName, cols, tags); dbName, superTbls->sTblName, cols, tags);
verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, command); verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, command);
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
errorPrint( "create supertable %s failed!\n\n", errorPrint( "create supertable %s failed!\n\n",
superTbls->sTblName); superTbls->sTblName);
return -1; return -1;
@ -2344,7 +2378,7 @@ static int createDatabases() {
if (g_Dbs.db[i].drop) { if (g_Dbs.db[i].drop) {
sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName);
verbosePrint("%s() %d command: %s\n", __func__, __LINE__, command); verbosePrint("%s() %d command: %s\n", __func__, __LINE__, command);
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
taos_close(taos); taos_close(taos);
return -1; return -1;
} }
@ -2418,7 +2452,7 @@ static int createDatabases() {
} }
debugPrint("%s() %d command: %s\n", __func__, __LINE__, command); debugPrint("%s() %d command: %s\n", __func__, __LINE__, command);
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
taos_close(taos); taos_close(taos);
errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName);
return -1; return -1;
@ -2428,7 +2462,13 @@ static int createDatabases() {
debugPrint("%s() %d supertbl count:%d\n", debugPrint("%s() %d supertbl count:%d\n",
__func__, __LINE__, g_Dbs.db[i].superTblCount); __func__, __LINE__, g_Dbs.db[i].superTblCount);
for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
if ((g_Dbs.db[i].drop) || (g_Dbs.db[i].superTbls[j].superTblExists == TBL_NO_EXISTS)) { sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName,
g_Dbs.db[i].superTbls[j].sTblName);
verbosePrint("%s() %d command: %s\n", __func__, __LINE__, command);
ret = queryDbExec(taos, command, NO_INSERT_TYPE, true);
if ((ret != 0) || (g_Dbs.db[i].drop)) {
ret = createSuperTable(taos, g_Dbs.db[i].dbName, ret = createSuperTable(taos, g_Dbs.db[i].dbName,
&g_Dbs.db[i].superTbls[j], g_Dbs.use_metric); &g_Dbs.db[i].superTbls[j], g_Dbs.use_metric);
@ -2439,27 +2479,14 @@ static int createDatabases() {
} }
} }
/* describe super table, if exists
sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName,
g_Dbs.db[i].superTbls[j].sTblName);
verbosePrint("%s() %d command: %s\n", __func__, __LINE__, command);
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) {
g_Dbs.db[i].superTbls[j].superTblExists = TBL_NO_EXISTS;
} else {
*/
g_Dbs.db[i].superTbls[j].superTblExists = TBL_ALREADY_EXISTS;
ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName,
&g_Dbs.db[i].superTbls[j]); &g_Dbs.db[i].superTbls[j]);
//}
if (0 != ret) { if (0 != ret) {
errorPrint("\nget super table %s.%s info failed!\n\n", g_Dbs.db[i].dbName, errorPrint("\nget super table %s.%s info failed!\n\n",
g_Dbs.db[i].superTbls[j].sTblName); g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName);
taos_close(taos); taos_close(taos);
return -1; return -1;
} }
} }
} }
@ -2539,9 +2566,9 @@ static void* createTable(void *sarg)
len = 0; len = 0;
verbosePrint("%s() LN%d %s\n", __func__, __LINE__, buffer); verbosePrint("%s() LN%d %s\n", __func__, __LINE__, buffer);
if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)){ if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE, false)){
free(buffer);
errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer);
free(buffer);
return NULL; return NULL;
} }
@ -2555,7 +2582,7 @@ static void* createTable(void *sarg)
if (0 != len) { if (0 != len) {
verbosePrint("%s() %d buffer: %s\n", __func__, __LINE__, buffer); verbosePrint("%s() %d buffer: %s\n", __func__, __LINE__, buffer);
if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)) { if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE, false)) {
errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer);
} }
} }
@ -2651,8 +2678,8 @@ static void createChildTables() {
int startFrom = 0; int startFrom = 0;
g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
verbosePrint("%s() LN%d: create %d child tables from %d\n", __func__, __LINE__, verbosePrint("%s() LN%d: create %d child tables from %d\n",
g_totalChildTables, startFrom); __func__, __LINE__, g_totalChildTables, startFrom);
startMultiThreadCreateChildTable( startMultiThreadCreateChildTable(
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, g_Dbs.db[i].superTbls[j].colsOfCreateChildTable,
g_Dbs.threadCountByCreateTbl, g_Dbs.threadCountByCreateTbl,
@ -3028,9 +3055,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (threads2 && threads2->type == cJSON_Number) { if (threads2 && threads2->type == cJSON_Number) {
g_Dbs.threadCountByCreateTbl = threads2->valueint; g_Dbs.threadCountByCreateTbl = threads2->valueint;
} else if (!threads2) { } else if (!threads2) {
g_Dbs.threadCountByCreateTbl = 1; g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
} else { } else {
printf("ERROR: failed to read json, threads2 not found\n"); errorPrint("%s() LN%d, failed to read json, threads2 not found\n",
__func__, __LINE__);
goto PARSE_OVER; goto PARSE_OVER;
} }
@ -3545,13 +3573,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER; goto PARSE_OVER;
} }
cJSON* rowsPerTbl = cJSON_GetObjectItem(stbInfo, "interlace_rows"); cJSON* interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) { if (interlaceRows && interlaceRows->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].rowsPerTbl = rowsPerTbl->valueint; g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint;
} else if (!rowsPerTbl) { } else if (!interlaceRows) {
g_Dbs.db[i].superTbls[j].rowsPerTbl = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else { } else {
errorPrint("%s() LN%d, failed to read json, rowsPerTbl input mistake\n", __func__, __LINE__); errorPrint(
"%s() LN%d, failed to read json, interlace rows input mistake\n",
__func__, __LINE__);
goto PARSE_OVER; goto PARSE_OVER;
} }
@ -3581,7 +3611,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!insertRows) { } else if (!insertRows) {
g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
} else { } else {
errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", __func__, __LINE__); errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n",
__func__, __LINE__);
goto PARSE_OVER; goto PARSE_OVER;
} }
@ -3593,16 +3624,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
__func__, __LINE__, g_args.insert_interval); __func__, __LINE__, g_args.insert_interval);
g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval;
} else { } else {
errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", __func__, __LINE__); errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
__func__, __LINE__);
goto PARSE_OVER; goto PARSE_OVER;
} }
/* CBD if (NO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable
|| (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) {
continue;
}
*/
int retVal = getColumnAndTagTypeFromInsertJsonFile( int retVal = getColumnAndTagTypeFromInsertJsonFile(
stbInfo, &g_Dbs.db[i].superTbls[j]); stbInfo, &g_Dbs.db[i].superTbls[j]);
if (false == retVal) { if (false == retVal) {
@ -4223,7 +4249,7 @@ static int execInsert(threadInfo *pThreadInfo, char *buffer, int k)
__func__, __LINE__, buffer); __func__, __LINE__, buffer);
if (superTblInfo) { if (superTblInfo) {
if (0 == strncasecmp(superTblInfo->insertMode, "taosc", strlen("taosc"))) { if (0 == strncasecmp(superTblInfo->insertMode, "taosc", strlen("taosc"))) {
affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE); affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false);
} else { } else {
if (0 != postProceSql(g_Dbs.host, g_Dbs.port, buffer)) { if (0 != postProceSql(g_Dbs.host, g_Dbs.port, buffer)) {
affectedRows = -1; affectedRows = -1;
@ -4233,7 +4259,7 @@ static int execInsert(threadInfo *pThreadInfo, char *buffer, int k)
} }
} }
} else { } else {
affectedRows = queryDbExec(pThreadInfo->taos, buffer, 1); affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false);
} }
return affectedRows; return affectedRows;
@ -4472,17 +4498,17 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
int insertMode; int insertMode;
char tableName[TSDB_TABLE_NAME_LEN]; char tableName[TSDB_TABLE_NAME_LEN];
int rowsPerTbl = superTblInfo?superTblInfo->rowsPerTbl:g_args.interlace_rows; int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows;
if (rowsPerTbl > 0) { if (interlaceRows > 0) {
insertMode = INTERLACE_INSERT_MODE; insertMode = INTERLACE_INSERT_MODE;
} else { } else {
insertMode = PROGRESSIVE_INSERT_MODE; insertMode = PROGRESSIVE_INSERT_MODE;
} }
// rows per table need be less than insert batch // rows per table need be less than insert batch
if (rowsPerTbl > g_args.num_of_RPR) if (interlaceRows > g_args.num_of_RPR)
rowsPerTbl = g_args.num_of_RPR; interlaceRows = g_args.num_of_RPR;
pThreadInfo->totalInsertRows = 0; pThreadInfo->totalInsertRows = 0;
pThreadInfo->totalAffectedRows = 0; pThreadInfo->totalAffectedRows = 0;
@ -4510,13 +4536,13 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
assert(pThreadInfo->ntables > 0); assert(pThreadInfo->ntables > 0);
if (rowsPerTbl > g_args.num_of_RPR) if (interlaceRows > g_args.num_of_RPR)
rowsPerTbl = g_args.num_of_RPR; interlaceRows = g_args.num_of_RPR;
batchPerTbl = rowsPerTbl; batchPerTbl = interlaceRows;
if ((rowsPerTbl > 0) && (pThreadInfo->ntables > 1)) { if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
batchPerTblTimes = batchPerTblTimes =
(g_args.num_of_RPR / (rowsPerTbl * pThreadInfo->ntables)) + 1; (g_args.num_of_RPR / (interlaceRows * pThreadInfo->ntables)) + 1;
} else { } else {
batchPerTblTimes = 1; batchPerTblTimes = 1;
} }
@ -4797,9 +4823,9 @@ static void* syncWrite(void *sarg) {
threadInfo *winfo = (threadInfo *)sarg; threadInfo *winfo = (threadInfo *)sarg;
SSuperTable* superTblInfo = winfo->superTblInfo; SSuperTable* superTblInfo = winfo->superTblInfo;
int rowsPerTbl = superTblInfo?superTblInfo->rowsPerTbl:g_args.interlace_rows; int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows;
if (rowsPerTbl > 0) { if (interlaceRows > 0) {
// interlace mode // interlace mode
return syncWriteInterlace(winfo); return syncWriteInterlace(winfo);
} else { } else {
@ -4941,7 +4967,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
} else if (0 == strncasecmp(precision, "us", 2)) { } else if (0 == strncasecmp(precision, "us", 2)) {
timePrec = TSDB_TIME_PRECISION_MICRO; timePrec = TSDB_TIME_PRECISION_MICRO;
} else { } else {
errorPrint( "No support precision: %s\n", precision); errorPrint("Not support precision: %s\n", precision);
exit(-1); exit(-1);
} }
} }
@ -4976,7 +5002,8 @@ static void startMultiThreadInsertData(int threads, char* db_name,
if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource,
"sample", strlen("sample")))) { "sample", strlen("sample")))) {
if (0 != prepareSampleDataForSTable(superTblInfo)) { if (0 != prepareSampleDataForSTable(superTblInfo)) {
errorPrint("%s() LN%d, prepare sample data for stable failed!\n", __func__, __LINE__); errorPrint("%s() LN%d, prepare sample data for stable failed!\n",
__func__, __LINE__);
exit(-1); exit(-1);
} }
} }
@ -4985,7 +5012,8 @@ static void startMultiThreadInsertData(int threads, char* db_name,
if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource,
"sample", strlen("sample")))) { "sample", strlen("sample")))) {
if (0 != prepareSampleDataForSTable(superTblInfo)) { if (0 != prepareSampleDataForSTable(superTblInfo)) {
errorPrint("%s() LN%d, prepare sample data for stable failed!\n", __func__, __LINE__); errorPrint("%s() LN%d, prepare sample data for stable failed!\n",
__func__, __LINE__);
exit(-1); exit(-1);
} }
} }
@ -5045,7 +5073,8 @@ static void startMultiThreadInsertData(int threads, char* db_name,
g_Dbs.host, g_Dbs.user, g_Dbs.host, g_Dbs.user,
g_Dbs.password, db_name, g_Dbs.port); g_Dbs.password, db_name, g_Dbs.port);
if (NULL == t_info->taos) { if (NULL == t_info->taos) {
errorPrint( "connect to server fail from insert sub thread, reason: %s\n", errorPrint(
"connect to server fail from insert sub thread, reason: %s\n",
taos_errstr(NULL)); taos_errstr(NULL));
exit(-1); exit(-1);
} }
@ -5187,7 +5216,8 @@ static void *readTable(void *sarg) {
double totalT = 0; double totalT = 0;
int count = 0; int count = 0;
for (int i = 0; i < num_of_tables; i++) { for (int i = 0; i < num_of_tables; i++) {
sprintf(command, "select %s from %s%d where ts>= %" PRId64, aggreFunc[j], tb_prefix, i, sTime); sprintf(command, "select %s from %s%d where ts>= %" PRId64,
aggreFunc[j], tb_prefix, i, sTime);
double t = getCurrentTime(); double t = getCurrentTime();
TAOS_RES *pSql = taos_query(taos, command); TAOS_RES *pSql = taos_query(taos, command);
@ -5340,10 +5370,10 @@ static int insertTestProcess() {
if (g_totalChildTables > 0) { if (g_totalChildTables > 0) {
printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n", printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n",
end - start, g_totalChildTables, g_Dbs.threadCount); end - start, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
fprintf(g_fpOfInsertResult, fprintf(g_fpOfInsertResult,
"Spent %.4f seconds to create %d tables with %d thread(s)\n\n", "Spent %.4f seconds to create %d tables with %d thread(s)\n\n",
end - start, g_totalChildTables, g_Dbs.threadCount); end - start, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
} }
taosMsleep(1000); taosMsleep(1000);
@ -5394,7 +5424,10 @@ static void *superQueryProcess(void *sarg) {
int64_t st = 0; int64_t st = 0;
int64_t et = 0; int64_t et = 0;
while (1) {
int queryTimes = g_args.query_times;
while(queryTimes --) {
if (g_queryInfo.superQueryInfo.rate && (et - st) < if (g_queryInfo.superQueryInfo.rate && (et - st) <
(int64_t)g_queryInfo.superQueryInfo.rate*1000) { (int64_t)g_queryInfo.superQueryInfo.rate*1000) {
taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms
@ -5464,7 +5497,7 @@ static void *subQueryProcess(void *sarg) {
int64_t et = (int64_t)g_queryInfo.subQueryInfo.rate*1000; int64_t et = (int64_t)g_queryInfo.subQueryInfo.rate*1000;
int queryTimes = g_args.query_times; int queryTimes = g_args.query_times;
while (queryTimes --) { while(queryTimes --) {
if (g_queryInfo.subQueryInfo.rate if (g_queryInfo.subQueryInfo.rate
&& (et - st) < (int64_t)g_queryInfo.subQueryInfo.rate*1000) { && (et - st) < (int64_t)g_queryInfo.subQueryInfo.rate*1000) {
taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms
@ -5509,7 +5542,8 @@ static int queryTestProcess() {
NULL, NULL,
g_queryInfo.port); g_queryInfo.port);
if (taos == NULL) { if (taos == NULL) {
errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); errorPrint( "Failed to connect to TDengine, reason:%s\n",
taos_errstr(NULL));
exit(-1); exit(-1);
} }
@ -5556,7 +5590,7 @@ static int queryTestProcess() {
char sqlStr[MAX_TB_NAME_SIZE*2]; char sqlStr[MAX_TB_NAME_SIZE*2];
sprintf(sqlStr, "use %s", g_queryInfo.dbName); sprintf(sqlStr, "use %s", g_queryInfo.dbName);
verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr);
if (0 != queryDbExec(t_info->taos, sqlStr, NO_INSERT_TYPE)) { if (0 != queryDbExec(t_info->taos, sqlStr, NO_INSERT_TYPE, false)) {
free(infos); free(infos);
free(pids); free(pids);
errorPrint( "use database %s failed!\n\n", errorPrint( "use database %s failed!\n\n",
@ -5686,7 +5720,7 @@ static void *subSubscribeProcess(void *sarg) {
char sqlStr[MAX_TB_NAME_SIZE*2]; char sqlStr[MAX_TB_NAME_SIZE*2];
sprintf(sqlStr, "use %s", g_queryInfo.dbName); sprintf(sqlStr, "use %s", g_queryInfo.dbName);
debugPrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); debugPrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr);
if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)){ if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)){
return NULL; return NULL;
} }
@ -5752,7 +5786,7 @@ static void *superSubscribeProcess(void *sarg) {
char sqlStr[MAX_TB_NAME_SIZE*2]; char sqlStr[MAX_TB_NAME_SIZE*2];
sprintf(sqlStr, "use %s", g_queryInfo.dbName); sprintf(sqlStr, "use %s", g_queryInfo.dbName);
debugPrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); debugPrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr);
if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)) { if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
return NULL; return NULL;
} }
@ -5830,7 +5864,8 @@ static int subscribeTestProcess() {
g_queryInfo.dbName, g_queryInfo.dbName,
g_queryInfo.port); g_queryInfo.port);
if (taos == NULL) { if (taos == NULL) {
errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); errorPrint( "Failed to connect to TDengine, reason:%s\n",
taos_errstr(NULL));
exit(-1); exit(-1);
} }
@ -5856,7 +5891,7 @@ static int subscribeTestProcess() {
pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t)); pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t));
infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo)); infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo));
if ((NULL == pids) || (NULL == infos)) { if ((NULL == pids) || (NULL == infos)) {
printf("malloc failed for create threads\n"); errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
taos_close(taos); taos_close(taos);
exit(-1); exit(-1);
} }
@ -6004,11 +6039,10 @@ static void setParaFromArg(){
tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE); tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
g_Dbs.threadCount = g_args.num_of_threads; g_Dbs.threadCount = g_args.num_of_threads;
g_Dbs.threadCountByCreateTbl = 1; g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
g_Dbs.queryMode = g_args.mode; g_Dbs.queryMode = g_args.mode;
g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL;
g_Dbs.db[0].superTbls[0].superTblExists = TBL_NO_EXISTS;
g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS;
g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange;
g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio;
@ -6052,10 +6086,9 @@ static void setParaFromArg(){
g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary; g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary;
g_Dbs.db[0].superTbls[0].tagCount = 2; g_Dbs.db[0].superTbls[0].tagCount = 2;
} else { } else {
g_Dbs.threadCountByCreateTbl = 1; g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
g_Dbs.db[0].superTbls[0].tagCount = 0; g_Dbs.db[0].superTbls[0].tagCount = 0;
} }
} }
/* Function to do regular expression check */ /* Function to do regular expression check */
@ -6126,8 +6159,9 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
memcpy(cmd + cmd_len, line, read_len); memcpy(cmd + cmd_len, line, read_len);
verbosePrint("%s() LN%d cmd: %s\n", __func__, __LINE__, cmd); verbosePrint("%s() LN%d cmd: %s\n", __func__, __LINE__, cmd);
if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE)) { if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) {
printf("queryDbExec %s failed!\n", cmd); errorPrint("%s() LN%d, queryDbExec %s failed!\n",
__func__, __LINE__, cmd);
tmfree(cmd); tmfree(cmd);
tmfree(line); tmfree(line);
tmfclose(fp); tmfclose(fp);
@ -6200,7 +6234,8 @@ static void queryResult() {
g_Dbs.db[0].dbName, g_Dbs.db[0].dbName,
g_Dbs.port); g_Dbs.port);
if (rInfo->taos == NULL) { if (rInfo->taos == NULL) {
errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); errorPrint( "Failed to connect to TDengine, reason:%s\n",
taos_errstr(NULL));
free(rInfo); free(rInfo);
exit(-1); exit(-1);
} }

View File

@ -1017,6 +1017,13 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont
return pConn; return pConn;
} }
static void doRpcReportBrokenLinkToServer(void *param, void *id) {
SRpcMsg *pRpcMsg = (SRpcMsg *)(param);
SRpcConn *pConn = (SRpcConn *)(pRpcMsg->handle);
SRpcInfo *pRpc = pConn->pRpc;
(*(pRpc->cfp))(pRpcMsg, NULL);
free(pRpcMsg);
}
static void rpcReportBrokenLinkToServer(SRpcConn *pConn) { static void rpcReportBrokenLinkToServer(SRpcConn *pConn) {
SRpcInfo *pRpc = pConn->pRpc; SRpcInfo *pRpc = pConn->pRpc;
if (pConn->pReqMsg == NULL) return; if (pConn->pReqMsg == NULL) return;
@ -1025,16 +1032,20 @@ static void rpcReportBrokenLinkToServer(SRpcConn *pConn) {
rpcAddRef(pRpc); rpcAddRef(pRpc);
tDebug("%s, notify the server app, connection is gone", pConn->info); tDebug("%s, notify the server app, connection is gone", pConn->info);
SRpcMsg rpcMsg; SRpcMsg *rpcMsg = malloc(sizeof(SRpcMsg));
rpcMsg.pCont = pConn->pReqMsg; // pReqMsg is re-used to store the APP context from server rpcMsg->pCont = pConn->pReqMsg; // pReqMsg is re-used to store the APP context from server
rpcMsg.contLen = pConn->reqMsgLen; // reqMsgLen is re-used to store the APP context length rpcMsg->contLen = pConn->reqMsgLen; // reqMsgLen is re-used to store the APP context length
rpcMsg.ahandle = pConn->ahandle; rpcMsg->ahandle = pConn->ahandle;
rpcMsg.handle = pConn; rpcMsg->handle = pConn;
rpcMsg.msgType = pConn->inType; rpcMsg->msgType = pConn->inType;
rpcMsg.code = TSDB_CODE_RPC_NETWORK_UNAVAIL; rpcMsg->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
pConn->pReqMsg = NULL; pConn->pReqMsg = NULL;
pConn->reqMsgLen = 0; pConn->reqMsgLen = 0;
if (pRpc->cfp) (*(pRpc->cfp))(&rpcMsg, NULL); if (pRpc->cfp) {
taosTmrStart(doRpcReportBrokenLinkToServer, 0, rpcMsg, pRpc->tmrCtrl);
} else {
free(rpcMsg);
}
} }
static void rpcProcessBrokenLink(SRpcConn *pConn) { static void rpcProcessBrokenLink(SRpcConn *pConn) {

View File

@ -17,7 +17,7 @@
#define TSDB_MAX_SUBBLOCKS 8 #define TSDB_MAX_SUBBLOCKS 8
static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) { static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) {
if (key < 0) { if (key < 0) {
return (int)(-((-key) / tsMsPerDay[precision] / days + 1)); return (int)((key + 1) / tsMsPerDay[precision] / days + 1);
} else { } else {
return (int)((key / tsMsPerDay[precision] / days)); return (int)((key / tsMsPerDay[precision] / days));
} }

23
tests/Jenkinsfile vendored
View File

@ -1,9 +1,8 @@
def pre_test(){ def pre_test(){
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh ''' sh '''
sudo rmtaos sudo rmtaos||echo 'no taosd installed'
''' '''
}
sh ''' sh '''
cd ${WKC} cd ${WKC}
git reset --hard git reset --hard
@ -56,14 +55,8 @@ pipeline {
cd ${WKC}/tests cd ${WKC}/tests
./test-all.sh b1 ./test-all.sh b1
date''' date'''
sh '''
cd ${WKC}/tests
./test-all.sh full jdbc
date'''
sh '''
cd ${WKC}/tests
./test-all.sh full unit
date'''
} }
} }
@ -136,6 +129,10 @@ pipeline {
./test-all.sh b2 ./test-all.sh b2
date date
''' '''
sh '''
cd ${WKC}/tests
./test-all.sh full unit
date'''
} }
} }
@ -154,6 +151,10 @@ pipeline {
''' '''
} }
sh ''' sh '''
cd ${WKC}/tests
./test-all.sh full jdbc
date'''
sh '''
cd ${WKC}/tests/pytest cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log ./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh ./handle_val_log.sh

View File

@ -163,6 +163,16 @@ int main(int argc, char *argv[])
getchar(); getchar();
while(1) {
if (tablesProcessed < numOfTables) {
printf("wait for process finished\n");
sleep(1);
continue;
}
break;
}
taos_close(taos); taos_close(taos);
free(tableList); free(tableList);

File diff suppressed because it is too large Load Diff

View File

@ -29,7 +29,25 @@ function dohavecore(){
proc=`echo $corefile|cut -d "_" -f3` proc=`echo $corefile|cut -d "_" -f3`
if [ -n "$corefile" ];then if [ -n "$corefile" ];then
echo 'taosd or taos has generated core' echo 'taosd or taos has generated core'
tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz /usr/local/taos/ if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then
cd ../../../
tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz debug/build/bin/taosd debug/build/bin/tsim debug/build/lib/libtaos*so*
if [[ $2 == 1 ]];then
cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S"`
rm -rf sim/case.log
else
cd community
cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
rm -rf sim/case.log
fi
else
cd ../../
if [[ $1 == 1 ]];then
tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz debug/build/bin/taosd debug/build/bin/tsim debug/build/lib/libtaos*so*
cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
rm -rf sim/case.log
fi
fi
if [[ $1 == 1 ]];then if [[ $1 == 1 ]];then
echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit
exit 8 exit 8
@ -100,14 +118,14 @@ function runSimCaseOneByOnefq {
cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
rm -rf ../../sim/case.log rm -rf ../../sim/case.log
fi fi
dohavecore $2 dohavecore $2 1
if [[ $2 == 1 ]];then if [[ $2 == 1 ]];then
exit 8 exit 8
fi fi
fi fi
end_time=`date +%s` end_time=`date +%s`
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log
dohavecore $2 dohavecore $2 1
fi fi
done done
rm -rf ../../../sim/case.log rm -rf ../../../sim/case.log
@ -175,7 +193,7 @@ function runPyCaseOneByOnefq() {
echo '=====================log===================== ' echo '=====================log===================== '
cat ../../sim/case.log cat ../../sim/case.log
rm -rf ../../sim/case.log rm -rf ../../sim/case.log
dohavecore $2 dohavecore $2 2
if [[ $2 == 1 ]];then if [[ $2 == 1 ]];then
exit 8 exit 8
fi fi
@ -184,7 +202,7 @@ function runPyCaseOneByOnefq() {
else else
$line > /dev/null 2>&1 $line > /dev/null 2>&1
fi fi
dohavecore $2 dohavecore $2 2
fi fi
done done
rm -rf ../../sim/case.log rm -rf ../../sim/case.log
@ -211,15 +229,15 @@ if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ]; then
echo "### run TSIM b1 test ###" echo "### run TSIM b1 test ###"
runSimCaseOneByOnefq b1 0 runSimCaseOneByOnefq b1 0
runSimCaseOneByOnefq b4 0 runSimCaseOneByOnefq b4 0
runSimCaseOneByOnefq b5 0
runSimCaseOneByOnefq b6 0
runSimCaseOneByOnefq b7 0 runSimCaseOneByOnefq b7 0
elif [ "$1" == "b2" ]; then elif [ "$1" == "b2" ]; then
echo "### run TSIM b2 test ###" echo "### run TSIM b2 test ###"
runSimCaseOneByOnefq b2 0 runSimCaseOneByOnefq b2 0
runSimCaseOneByOnefq b5 0
elif [ "$1" == "b3" ]; then elif [ "$1" == "b3" ]; then
echo "### run TSIM b3 test ###" echo "### run TSIM b3 test ###"
runSimCaseOneByOnefq b3 0 runSimCaseOneByOnefq b3 0
runSimCaseOneByOnefq b6 0
elif [ "$1" == "b1fq" ]; then elif [ "$1" == "b1fq" ]; then
echo "### run TSIM b1 test ###" echo "### run TSIM b1 test ###"
runSimCaseOneByOnefq b1 1 runSimCaseOneByOnefq b1 1