Merge pull request #30139 from taosdata/fix/TS-6099-3.0A

feat: add batch query new feature (3.0)
This commit is contained in:
Linhe Huo 2025-03-12 20:26:16 +08:00 committed by GitHub
commit 6e652f5a32
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 495 additions and 43 deletions

View File

@ -371,10 +371,14 @@ Specify the configuration parameters for tag and data columns in `super_tables`
### Query Parameters
In query scenarios, `filetype` must be set to `query`.
`filetype` must be set to `query`.
`query_mode` connect method:
- "taosc": Native.
- "rest" : RESTful.
`query_times` specifies the number of times to run the query, numeric type.
Query scenarios can control the execution of slow query statements by setting `kill_slow_query_threshold` and `kill_slow_query_interval` parameters, where threshold controls that queries exceeding the specified exec_usec time will be killed by taosBenchmark, in seconds; interval controls the sleep time to avoid continuous slow query CPU consumption, in seconds.
For other common parameters, see [General Configuration Parameters](#general-configuration-parameters)
@ -387,8 +391,21 @@ Configuration parameters for querying specified tables (can specify supertables,
The total number of queries(`General Query`) = the number of `sqls` * `query_times` * `threads`
`Mixed Query`:
All SQL statements in `sqls` are divided into `threads` groups, with each thread executing one group. Each SQL statement needs to execute `query_times` queries.
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`.
- **batch_query** : Batch query power switch.
"yes": indicates that it is enabled.
"no": indicates that it is not enabled, and other values report errors.
Batch query refers to dividing all SQL statements in SQL into `threads` groups, with each thread executing one group.
Each SQL statement is queried only once before exiting, and the main thread waits for all threads to complete before determining if the `query_interval` parameter is set. If sleep is required for a specified time, each thread group is restarted and the previous process is repeated until the number of queries is exhausted.
Functional limitations:
- Only supports scenarios where `mixed_query` is set to 'yes'.
- Restful queries are not supported, meaning `query_made` cannot be 'rest'.
- **query_interval** : Query interval, in millisecond, default is 0.
When the 'batch_query' switch is turned on, it indicates the interval time after each batch query is completed, When closed, it indicates the interval time between each SQL query completion.
If the execution time of the query exceeds the interval time, it will no longer wait. If the execution time of the query is less than the interval time, it is necessary to wait to make up for the interval time.
- **threads** : Number of threads executing the SQL query, default is 1.
- **sqls**:
- **sql**: The SQL command to execute, required.

View File

@ -280,27 +280,45 @@ taosBenchmark -f <json file>
### 查询配置参数
查询场景下 `filetype` 必须设置为 `query`
`query_mode` 查询连接方式,取值为:
- “taosc”: 通过 Native 连接方式查询。
- “rest” : 通过 restful 连接方式查询。
`query_times` 指定运行查询的次数,数值类型。
查询场景可以通过设置 `kill_slow_query_threshold``kill_slow_query_interval` 参数来控制杀掉慢查询语句的执行threshold 控制如果 exec_usec 超过指定时间的查询将被 taosBenchmark 杀掉,单位为秒。
interval 控制休眠时间,避免持续查询慢查询消耗 CPU单位为秒。
其它通用参数详见 [通用配置参数](#通用配置参数)
其它通用参数详见 [通用配置参数](#通用配置参数)。
#### 执行指定查询语句
查询指定表(可以指定超级表、子表或普通表)的配置参数在 `specified_table_query` 中设置。
- **mixed_query**:查询模式
“yes”`混合查询`
"no"(默认值)`普通查询`
`普通查询``sqls` 中每个 sql 启动 `threads` 个线程查询此 sql执行完 `query_times` 次查询后退出,执行此 sql 的所有线程都完成后进入下一个 sql
- **mixed_query**:混合查询开关。
“yes”: 开启 “混合查询”。
“no” : 关闭 “混合查询” ,即 “普通查询”。
- 普通查询:
`sqls` 中每个 sql 启动 `threads` 个线程查询此 sql, 执行完 `query_times` 次查询后退出,执行此 sql 的所有线程都完成后进入下一个 sql
`查询总次数` = `sqls` 个数 * `query_times` * `threads`
`混合查询``sqls` 中所有 sql 分成 `threads` 个组,每个线程执行一组,每个 sql 都需执行 `query_times` 次查询
- 混合查询:
`sqls` 中所有 sql 分成 `threads` 个组,每个线程执行一组, 每个 sql 都需执行 `query_times` 次查询
`查询总次数` = `sqls` 个数 * `query_times`
- **batch_query**:批查询功开关。
取值范围 “yes” 表示开启,"no" 不开启,其它值报错。
批查询是指 `sqls` 中所有 sql 分成 `threads` 个组,每个线程执行一组,每个 sql 只执行一次查询后退出,主线程等待所有线程都执行完,再判断是否设置有 `query_interval` 参数,如果有需要 sleep 指定时间,再启动各线程组重复前面的过程,直到查询次数耗尽为止。
功能限制条件:
- 只支持 `mixed_query` 为 "yes" 的场景。
- 不支持 restful 查询,即 `query_mode` 不能为 "rest"。
- **query_interval**查询时间间隔单位millisecond默认值为 0。
"batch_query" 开关打开时,表示是每批查询完间隔时间;关闭时,表示每个 sql 查询完间隔时间
如果执行查询的时间超过间隔时间,那么将不再等待,如果执行查询的时间不足间隔时间,需等待补足间隔时间
- **threads**:执行查询 SQL 的线程数,默认值为 1。

View File

@ -0,0 +1,36 @@
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "test",
"query_times": 5,
"query_mode": "taosc",
"specified_table_query": {
"concurrent": 5,
"query_interval": 0,
"mixed_query": "no",
"batch_query": "yes",
"sqls": [
{
"sql": "select last_row(*) from test.meters"
},
{
"sql": "select count(*) from test.meters"
},
{
"sql": "select * from test.d0",
"result": "./query_res1.txt"
},
{
"sql": "select count(*) from test.d1"
},
{
"sql": "select * from test.d2"
}
]
}
}

View File

@ -0,0 +1,36 @@
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "test",
"query_times": 5,
"query_mode": "rest",
"specified_table_query": {
"concurrent": 5,
"query_interval": 1000,
"mixed_query": "yes",
"batch_query": "yes",
"sqls": [
{
"sql": "select last_row(*) from test.meters"
},
{
"sql": "select count(*) from test.meters"
},
{
"sql": "select * from test.d0",
"result": "./query_res1.txt"
},
{
"sql": "select count(*) from test.d1"
},
{
"sql": "select * from test.d2"
}
]
}
}

View File

@ -0,0 +1,36 @@
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "test",
"query_times": 5,
"query_mode": "taosc",
"specified_table_query": {
"concurrent": 5,
"query_interval": 1000,
"mixed_query": "yes",
"batch_query": "yes",
"sqls": [
{
"sql": "select last_row(*) from test.meters"
},
{
"sql": "select count(*) from test.meters"
},
{
"sql": "select * from test.d0",
"result": "./query_res1.txt"
},
{
"sql": "select count(*) from test.d1"
},
{
"sql": "select * from test.d2"
}
]
}
}

View File

@ -0,0 +1,36 @@
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "test",
"query_times": 5,
"query_mode": "taosc",
"specified_table_query": {
"concurrent": 5,
"query_interval": 100,
"mixed_query": "yes",
"batch_query": "no",
"sqls": [
{
"sql": "select last_row(*) from test.meters"
},
{
"sql": "select count(*) from test.meters"
},
{
"sql": "select * from test.d0",
"result": "./query_res1.txt"
},
{
"sql": "select count(*) from test.d1"
},
{
"sql": "select * from test.d2"
}
]
}
}

View File

@ -90,15 +90,15 @@ class TDTestCase(TBase):
fval = float(value)
# compare
if equal and fval != expect:
tdLog.exit(f"check not expect. expect:{expect} real:{fval}, key:{key} end:{end} output:\n{output}")
tdLog.exit(f"check not expect. expect:{expect} real:{fval}, key:'{key}' end:'{end}' output:\n{output}")
elif equal == False and fval <= expect:
tdLog.exit(f"failed because {fval} <= {expect}, key:{key} end:{end} output:\n{output}")
tdLog.exit(f"failed because {fval} <= {expect}, key:'{key}' end:'{end}' output:\n{output}")
else:
# succ
if equal:
tdLog.info(f"check successfully. key:{key} expect:{expect} real:{fval}")
tdLog.info(f"check successfully. key:'{key}' expect:{expect} real:{fval}")
else:
tdLog.info(f"check successfully. key:{key} {fval} > {expect}")
tdLog.info(f"check successfully. key:'{key}' {fval} > {expect}")
def checkAfterRun(self, benchmark, jsonFile, specMode, tbCnt):
@ -128,16 +128,24 @@ class TDTestCase(TBase):
sqls = data[label]["sqls"]
# mix
# batch_query
try:
batchQuery = data[label]["batch_query"]
except:
batchQuery = "no"
# mixed_query
try:
mixedQuery = data[label]["mixed_query"]
except:
mixedQuery = "no"
tdLog.info(f"queryTimes={queryTimes} concurrent={concurrent} mixedQuery={mixedQuery} len(sqls)={len(sqls)} label={label}\n")
tdLog.info(f"queryTimes={queryTimes} concurrent={concurrent} mixedQuery={mixedQuery} "
f"batchQuery={batchQuery} len(sqls)={len(sqls)} label={label}\n")
totalQueries = 0
threadQueries = 0
QPS = 10
if continueIfFail.lower() == "yes":
allEnd = " "
@ -154,9 +162,16 @@ class TDTestCase(TBase):
minKey = "min:"
else:
# spec mixed or super
if specMode:
# spec
totalQueries = queryTimes * len(sqls)
# spec mixed
if batchQuery.lower() == "yes":
# batch
threadQueries = len(sqls)
QPS = 2
else:
threadQueries = totalQueries
else:
# super
totalQueries = queryTimes * len(sqls) * tbCnt
@ -182,7 +197,7 @@ class TDTestCase(TBase):
["p99: ", "s", 0, False],
["INFO: Spend ", " ", 0, False],
["completed total queries: ", ",", totalQueries, True],
["the QPS of all threads:", allEnd, 10 , False] # all qps need > 5
["the QPS of all threads:", allEnd, QPS , False] # all qps need > 5
]
# check
@ -196,6 +211,7 @@ class TDTestCase(TBase):
args = [
["./tools/benchmark/basic/json/queryModeSpec", True],
["./tools/benchmark/basic/json/queryModeSpecMix", True],
["./tools/benchmark/basic/json/queryModeSpecMixBatch", True],
["./tools/benchmark/basic/json/queryModeSuper", False]
]
@ -222,8 +238,9 @@ class TDTestCase(TBase):
self.expectFailed(f"{benchmark} -f ./tools/benchmark/basic/json/queryErrorBothSpecSuper.json")
# json format error
self.expectFailed(f"{benchmark} -f ./tools/benchmark/basic/json/queryErrorFormat.json")
# batch query
self.expectFailed(f"{benchmark} -f ./tools/benchmark/basic/json/queryErrorBatchNoMix.json")
self.expectFailed(f"{benchmark} -f ./tools/benchmark/basic/json/queryErrorBatchRest.json")
def run(self):
tbCnt = 10

View File

@ -665,6 +665,7 @@ typedef struct SpecifiedQueryInfo_S {
TAOS_RES *res[MAX_QUERY_SQL_COUNT];
uint64_t totalQueried;
bool mixed_query;
bool batchQuery; // mixed query have batch and no batch query
// error rate
uint64_t totalFail;
} SpecifiedQueryInfo;

View File

@ -1860,6 +1860,23 @@ int32_t readSpecQueryJson(tools_cJSON * specifiedQuery) {
}
}
// batchQuery
tools_cJSON *batchQueryObj =
tools_cJSON_GetObjectItem(specifiedQuery, "batch_query");
if (tools_cJSON_IsString(batchQueryObj)) {
if (0 == strcasecmp(batchQueryObj->valuestring, "yes")) {
g_queryInfo.specifiedQueryInfo.batchQuery = true;
infoPrint("%s\n","batch_query is True");
} else if (0 == strcasecmp(batchQueryObj->valuestring, "no")) {
g_queryInfo.specifiedQueryInfo.batchQuery = false;
infoPrint("%s\n","batch_query is False");
} else {
errorPrint("Invalid batch_query value: %s\n",
batchQueryObj->valuestring);
return -1;
}
}
tools_cJSON *concurrent =
tools_cJSON_GetObjectItem(specifiedQuery, "concurrent");
if (tools_cJSON_IsNumber(concurrent)) {

View File

@ -47,7 +47,7 @@ void* benchCancelHandler(void* arg) {
}
#endif
void checkArgumentValid() {
int checkArgumentValid() {
// check prepared_rand valid
if(g_arguments->prepared_rand < g_arguments->reqPerReq) {
infoPrint("prepared_rand(%"PRIu64") < num_of_records_per_req(%d), so set num_of_records_per_req = prepared_rand\n",
@ -64,13 +64,32 @@ void checkArgumentValid() {
false,
1)) {
errorPrint("%s", "Failed to convert server address\n");
return;
return -1;
}
encodeAuthBase64();
g_arguments->rest_server_ver_major =
getServerVersionRest(g_arguments->port);
}
// check batch query
if (g_arguments->test_mode == QUERY_TEST) {
if (g_queryInfo.specifiedQueryInfo.batchQuery) {
// batch_query = yes
if (!g_queryInfo.specifiedQueryInfo.mixed_query) {
// mixed_query = no
errorPrint("%s\n", "batch_query = yes require mixed_query is yes");
return -1;
}
// rest not support
if (g_queryInfo.iface == REST_IFACE) {
errorPrint("%s\n", "batch_query = yes not support restful.");
return -1;
}
}
}
return 0;
}
int main(int argc, char* argv[]) {
@ -144,7 +163,11 @@ int main(int argc, char* argv[]) {
}
infoPrint("client version: %s\n", taos_get_client_info());
checkArgumentValid();
if (checkArgumentValid()) {
errorPrint("failed to readJsonConfig %s\n", g_arguments->metaFile);
exitLog();
return -1;
}
if (g_arguments->test_mode == INSERT_TEST) {
if (insertTestProcess()) {

View File

@ -80,10 +80,15 @@ int selectAndGetResult(qThreadInfo *pThreadInfo, char *command, bool record) {
}
// interlligent sleep
void autoSleep(uint64_t interval, uint64_t delay ) {
int32_t autoSleep(uint64_t interval, uint64_t delay ) {
int32_t msleep = 0;
if (delay < interval * 1000) {
toolsMsleep((int32_t)(interval * 1000 - delay)); // ms
msleep = (int32_t)((interval - delay/1000));
infoPrint("do sleep %dms ...\n", msleep);
toolsMsleep(msleep); // ms
debugPrint("%s\n","do sleep end");
}
return msleep;
}
// reset
@ -142,8 +147,11 @@ static void *specQueryMixThread(void *sarg) {
int64_t et = 0;
int64_t startTs = toolsGetTimestampMs();
int64_t lastPrintTime = startTs;
uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
uint64_t interval = g_queryInfo.specifiedQueryInfo.queryInterval;
// batchQuery
bool batchQuery = g_queryInfo.specifiedQueryInfo.batchQuery;
uint64_t queryTimes = batchQuery ? 1 : g_queryInfo.specifiedQueryInfo.queryTimes;
uint64_t interval = batchQuery ? 0 : g_queryInfo.specifiedQueryInfo.queryInterval;
pThreadInfo->query_delay_list = benchArrayInit(queryTimes, sizeof(int64_t));
for (int i = pThreadInfo->start_sql; i <= pThreadInfo->end_sql; ++i) {
SSQL * sql = benchArrayGet(g_queryInfo.specifiedQueryInfo.sqls, i);
@ -382,7 +390,7 @@ static void *stbQueryThread(void *sarg) {
// --------------------------------- firse level function ------------------------------
//
void totalChildQuery(qThreadInfo* infos, int threadCnt, int64_t spend) {
void totalChildQuery(qThreadInfo* infos, int threadCnt, int64_t spend, BArray *pDelays) {
// valid check
if (infos == NULL || threadCnt == 0) {
return ;
@ -444,9 +452,11 @@ void totalChildQuery(qThreadInfo* infos, int threadCnt, int64_t spend) {
(int32_t)(delay_list->size * 0.99)))/1E6,
*(int64_t *)(benchArrayGet(delay_list,
(int32_t)(delay_list->size - 1)))/1E6);
} else {
errorPrint("%s() LN%d, delay_list size: %"PRId64"\n",
__func__, __LINE__, (int64_t)delay_list->size);
}
// copy to another
if (pDelays) {
benchArrayAddBatch(pDelays, delay_list->pData, delay_list->size, false);
}
benchArrayDestroy(delay_list);
}
@ -547,7 +557,7 @@ static int stbQuery(uint16_t iface, char* dbName) {
}
// total show
totalChildQuery(threadInfos, threadCnt, end - start);
totalChildQuery(threadInfos, threadCnt, end - start, NULL);
ret = 0;
@ -825,7 +835,7 @@ static int specQueryMix(uint16_t iface, char* dbName) {
}
// statistic
totalChildQuery(infos, threadCnt, end - start);
totalChildQuery(infos, threadCnt, end - start, NULL);
ret = 0;
OVER:
@ -838,6 +848,206 @@ OVER:
return ret;
}
void totalBatchQuery(int32_t allSleep, BArray *pDelays) {
// sort
qsort(pDelays->pData, pDelays->size, pDelays->elemSize, compare);
// total delays
double totalDelays = 0;
for (size_t i = 0; i < pDelays->size; i++) {
int64_t *delay = benchArrayGet(pDelays, i);
totalDelays += *delay;
}
printf("\n");
// show sleep times
if (allSleep > 0) {
infoPrint("All sleep spend: %.3fs\n", (float)allSleep/1000);
}
// show P90 ...
if (pDelays->size) {
infoPrint(
"Total delay: "
"min delay: %.6fs, "
"avg delay: %.6fs, "
"p90: %.6fs, "
"p95: %.6fs, "
"p99: %.6fs, "
"max: %.6fs\n",
*(int64_t *)(benchArrayGet(pDelays, 0))/1E6,
(double)totalDelays/pDelays->size/1E6,
*(int64_t *)(benchArrayGet(pDelays,
(int32_t)(pDelays->size * 0.9)))/1E6,
*(int64_t *)(benchArrayGet(pDelays,
(int32_t)(pDelays->size * 0.95)))/1E6,
*(int64_t *)(benchArrayGet(pDelays,
(int32_t)(pDelays->size * 0.99)))/1E6,
*(int64_t *)(benchArrayGet(pDelays,
(int32_t)(pDelays->size - 1)))/1E6);
}
}
//
// specQuery Mix Batch
//
static int specQueryBatch(uint16_t iface, char* dbName) {
// init
BArray *pDelays = NULL;
int ret = -1;
int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent;
uint64_t interval = g_queryInfo.specifiedQueryInfo.queryInterval;
pthread_t * pids = benchCalloc(nConcurrent, sizeof(pthread_t), true);
qThreadInfo *infos = benchCalloc(nConcurrent, sizeof(qThreadInfo), true);
infoPrint("start batch query, sleep interval:%" PRIu64 "ms query times:%" PRIu64 " thread:%d \n",
interval, g_queryInfo.query_times, nConcurrent);
// concurent calc
int total_sql_num = g_queryInfo.specifiedQueryInfo.sqls->size;
int start_sql = 0;
int a = total_sql_num / nConcurrent;
if (a < 1) {
warnPrint("sqls num:%d < concurent:%d, set concurrent %d\n", total_sql_num, nConcurrent, nConcurrent);
nConcurrent = total_sql_num;
a = 1;
}
int b = 0;
if (nConcurrent != 0) {
b = total_sql_num % nConcurrent;
}
//
// connect
//
int connCnt = 0;
for (int i = 0; i < nConcurrent; ++i) {
qThreadInfo *pThreadInfo = infos + i;
// create conn
if (initQueryConn(pThreadInfo, iface)){
ret = -1;
goto OVER;
}
connCnt ++;
}
// reset total
g_queryInfo.specifiedQueryInfo.totalQueried = 0;
g_queryInfo.specifiedQueryInfo.totalFail = 0;
//
// running
//
int threadCnt = 0;
int allSleep = 0;
pDelays = benchArrayInit(10, sizeof(int64_t));
for (int m = 0; m < g_queryInfo.query_times; ++m) {
// reset
threadCnt = 0;
start_sql = 0;
// create thread
for (int i = 0; i < nConcurrent; ++i) {
qThreadInfo *pThreadInfo = infos + i;
pThreadInfo->threadID = i;
pThreadInfo->start_sql = start_sql;
pThreadInfo->end_sql = i < b ? start_sql + a : start_sql + a - 1;
start_sql = pThreadInfo->end_sql + 1;
pThreadInfo->total_delay = 0;
// total zero
pThreadInfo->nSucc = 0;
pThreadInfo->nFail = 0;
// main run
int code = pthread_create(pids + i, NULL, specQueryMixThread, pThreadInfo);
if (code != 0) {
errorPrint("failed specQueryBatchThread create. error code =%d \n", code);
break;
}
threadCnt ++;
}
bool needExit = false;
if (threadCnt != nConcurrent) {
// if failed, set termainte flag true like ctrl+c exit
needExit = true;
g_arguments->terminate = true;
}
// wait thread finished
int64_t start = toolsGetTimestampUs();
for (int i = 0; i < threadCnt; ++i) {
pthread_join(pids[i], NULL);
qThreadInfo *pThreadInfo = infos + i;
// total queries
g_queryInfo.specifiedQueryInfo.totalQueried += pThreadInfo->nSucc;
if (g_arguments->continueIfFail == YES_IF_FAILED) {
// yes need add failed count
g_queryInfo.specifiedQueryInfo.totalQueried += pThreadInfo->nFail;
g_queryInfo.specifiedQueryInfo.totalFail += pThreadInfo->nFail;
}
// destory
if (needExit) {
benchArrayDestroy(pThreadInfo->query_delay_list);
pThreadInfo->query_delay_list = NULL;
}
}
int64_t end = toolsGetTimestampUs();
// create
if (needExit) {
errorPrint("failed to create thread. expect nConcurrent=%d real threadCnt=%d, exit testing.\n", nConcurrent, threadCnt);
goto OVER;
}
// batch total
printf("\n");
totalChildQuery(infos, threadCnt, end - start, pDelays);
// show batch total
int64_t delay = end - start;
infoPrint("count:%d execute batch spend: %" PRId64 "ms\n", m + 1, delay/1000);
// sleep
if ( g_queryInfo.specifiedQueryInfo.batchQuery && interval > 0) {
allSleep += autoSleep(interval, delay);
}
// check cancel
if(g_arguments->terminate) {
break;
}
}
ret = 0;
// all total
totalBatchQuery(allSleep, pDelays);
OVER:
// close conn
for (int i = 0; i < connCnt; ++i) {
qThreadInfo *pThreadInfo = infos + i;
closeQueryConn(pThreadInfo, iface);
}
// free threads
tmfree(pids);
tmfree(infos);
// free sqls
freeSpecialQueryInfo();
// free delays
if (pDelays) {
benchArrayDestroy(pDelays);
}
return ret;
}
// total query for end
void totalQuery(int64_t spends) {
// total QPS
@ -903,15 +1113,20 @@ int queryTestProcess() {
// start running
//
uint64_t startTs = toolsGetTimestampMs();
if(g_queryInfo.specifiedQueryInfo.sqls && g_queryInfo.specifiedQueryInfo.sqls->size > 0) {
// specified table
if (g_queryInfo.specifiedQueryInfo.mixed_query) {
// mixed
if(g_queryInfo.specifiedQueryInfo.batchQuery) {
if (specQueryBatch(g_queryInfo.iface, g_queryInfo.dbName)) {
return -1;
}
} else {
if (specQueryMix(g_queryInfo.iface, g_queryInfo.dbName)) {
return -1;
}
}
} else {
// no mixied
if (specQuery(g_queryInfo.iface, g_queryInfo.dbName)) {