#define _GNU_SOURCE
#define CURL_STATICLIB
@@ -67,6 +68,12 @@ enum TEST_MODE {
INVAID_TEST
};
+enum QUERY_MODE {
+ SYNC_QUERY_MODE, // 0
+ ASYNC_QUERY_MODE, // 1
+ INVALID_MODE
+};
+
#define MAX_SQL_SIZE 65536
#define BUFFER_SIZE (65536*2)
#define MAX_USERNAME_SIZE 64
@@ -198,7 +205,7 @@ typedef struct SArguments_S {
bool verbose_print;
bool performance_print;
char * output_file;
- int mode;
+ int query_mode;
char * datatype[MAX_NUM_DATATYPE + 1];
int len_of_binary;
int num_of_CPR;
@@ -351,7 +358,7 @@ typedef struct SpecifiedQueryInfo_S {
int rate; // 0: unlimit > 0 loop/s
int concurrent;
int sqlCount;
- int subscribeMode; // 0: sync, 1: async
+ int mode; // 0: sync, 1: async
int subscribeInterval; // ms
int queryTimes;
int subscribeRestart;
@@ -359,13 +366,14 @@ typedef struct SpecifiedQueryInfo_S {
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
+ int totalQueried;
} SpecifiedQueryInfo;
typedef struct SuperQueryInfo_S {
char sTblName[MAX_TB_NAME_SIZE+1];
int rate; // 0: unlimit > 0 loop/s
int threadCnt;
- int subscribeMode; // 0: sync, 1: async
+ int mode; // 0: sync, 1: async
int subscribeInterval; // ms
int subscribeRestart;
int subscribeKeepProgress;
@@ -378,6 +386,7 @@ typedef struct SuperQueryInfo_S {
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
char* childTblName;
+ int totalQueried;
} SuperQueryInfo;
typedef struct SQueryMetaInfo_S {
@@ -391,6 +400,7 @@ typedef struct SQueryMetaInfo_S {
SpecifiedQueryInfo specifiedQueryInfo;
SuperQueryInfo superQueryInfo;
+ int totalQueried;
} SQueryMetaInfo;
typedef struct SThreadInfo_S {
@@ -429,6 +439,8 @@ typedef struct SThreadInfo_S {
int64_t maxDelay;
int64_t minDelay;
+ // query
+ int querySeq; // sequence number of sql command
} threadInfo;
#ifdef WINDOWS
@@ -499,11 +511,6 @@ static void resetAfterAnsiEscape(void) {
static int taosRandom()
{
- struct timeval tv;
-
- gettimeofday(&tv, NULL);
- srand(tv.tv_usec);
-
return rand();
}
@@ -699,7 +706,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
if (strcmp(argv[i], "-f") == 0) {
arguments->metaFile = argv[++i];
} else if (strcmp(argv[i], "-c") == 0) {
- tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
+ tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
} else if (strcmp(argv[i], "-h") == 0) {
arguments->host = argv[++i];
@@ -714,7 +721,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
} else if (strcmp(argv[i], "-s") == 0) {
arguments->sqlFile = argv[++i];
} else if (strcmp(argv[i], "-q") == 0) {
- arguments->mode = atoi(argv[++i]);
+ arguments->query_mode = atoi(argv[++i]);
} else if (strcmp(argv[i], "-T") == 0) {
arguments->num_of_threads = atoi(argv[++i]);
} else if (strcmp(argv[i], "-i") == 0) {
@@ -758,7 +765,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
char *dupstr = strdup(argv[i]);
char *running = dupstr;
char *token = strsep(&running, ",");
- while (token != NULL) {
+ while(token != NULL) {
if (strcasecmp(token, "INT")
&& strcasecmp(token, "FLOAT")
&& strcasecmp(token, "TINYINT")
@@ -769,6 +776,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
&& strcasecmp(token, "BINARY")
&& strcasecmp(token, "NCHAR")) {
printHelp();
+ free(dupstr);
ERROR_EXIT("Invalid data_type!\n");
exit(EXIT_FAILURE);
}
@@ -776,6 +784,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
token = strsep(&running, ",");
if (index >= MAX_NUM_DATATYPE) break;
}
+ free(dupstr);
sptr[index] = NULL;
}
} else if (strcmp(argv[i], "-w") == 0) {
@@ -962,7 +971,7 @@ static void getResult(TAOS_RES *res, char* resultFileName) {
char temp[16000];
// fetch the records row by row
- while ((row = taos_fetch_row(res))) {
+ while((row = taos_fetch_row(res))) {
if (totalLen >= 100*1024*1024 - 32000) {
if (fp) fprintf(fp, "%s", databuf);
totalLen = 0;
@@ -984,7 +993,8 @@ static void getResult(TAOS_RES *res, char* resultFileName) {
static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) {
TAOS_RES *res = taos_query(taos, command);
if (res == NULL || taos_errno(res) != 0) {
- printf("failed to sql:%s, reason:%s\n", command, taos_errstr(res));
+ errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n",
+ __func__, __LINE__, command, taos_errstr(res));
taos_free_result(res);
return;
}
@@ -1053,7 +1063,7 @@ static void rand_string(char *str, int size) {
//--size;
int n;
for (n = 0; n < size - 1; n++) {
- int key = rand_tinyint() % (int)(sizeof(charset) - 1);
+ int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1);
str[n] = charset[key];
}
str[n] = 0;
@@ -1161,7 +1171,8 @@ static int printfInsertMeta() {
if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
- printf(" precision: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision);
+ printf(" precision: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].dbCfg.precision);
} else {
printf("\033[1m\033[40;31m precision error: %s\033[0m\n",
g_Dbs.db[i].dbCfg.precision);
@@ -1169,11 +1180,13 @@ static int printfInsertMeta() {
}
}
- printf(" super table count: \033[33m%d\033[0m\n", g_Dbs.db[i].superTblCount);
+ printf(" super table count: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].superTblCount);
for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
printf(" super table[\033[33m%d\033[0m]:\n", j);
- printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName);
+ printf(" stbName: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].sTblName);
if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
printf(" autoCreateTable: \033[33m%s\033[0m\n", "no");
@@ -1239,7 +1252,7 @@ static int printfInsertMeta() {
g_Dbs.db[i].superTbls[j].sampleFile);
printf(" tagsFile: \033[33m%s\033[0m\n",
g_Dbs.db[i].superTbls[j].tagsFile);
- printf(" columnCount: \033[33m%d\033[0m\n ",
+ printf(" columnCount: \033[33m%d\033[0m\n",
g_Dbs.db[i].superTbls[j].columnCount);
for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
//printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
@@ -1457,41 +1470,61 @@ static void printfQueryMeta() {
printf("\n");
printf("specified table query info: \n");
- printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.rate);
+ printf("query interval: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.rate);
printf("top query times:\033[33m%d\033[0m\n", g_args.query_times);
- printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.concurrent);
- printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.sqlCount);
+ printf("concurrent: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.concurrent);
+ printf("sqlCount: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.sqlCount);
printf("specified tbl query times:\n");
- printf(" \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.queryTimes);
+ printf(" \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.queryTimes);
if (SUBSCRIBE_TEST == g_args.test_mode) {
- printf("mod: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeMode);
- printf("interval: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeInterval);
- printf("restart: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeRestart);
- printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
+ printf("mod: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.mode);
+ printf("interval: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.subscribeInterval);
+ printf("restart: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.subscribeRestart);
+ printf("keepProgress: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
}
for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.specifiedQueryInfo.sql[i]);
+ printf(" sql[%d]: \033[33m%s\033[0m\n",
+ i, g_queryInfo.specifiedQueryInfo.sql[i]);
}
printf("\n");
- printf("super table query info: \n");
- printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate);
- printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.threadCnt);
- printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.childTblCount);
- printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.superQueryInfo.sTblName);
- printf("stb query times:\033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.queryTimes);
+ printf("super table query info:\n");
+ printf("query interval: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.rate);
+ printf("threadCnt: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.threadCnt);
+ printf("childTblCount: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.childTblCount);
+ printf("stable name: \033[33m%s\033[0m\n",
+ g_queryInfo.superQueryInfo.sTblName);
+ printf("stb query times:\033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.queryTimes);
if (SUBSCRIBE_TEST == g_args.test_mode) {
- printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeMode);
- printf("interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval);
- printf("restart: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeRestart);
- printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeKeepProgress);
+ printf("mod: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.mode);
+ printf("interval: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeInterval);
+ printf("restart: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeRestart);
+ printf("keepProgress: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeKeepProgress);
}
- printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount);
+ printf("sqlCount: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.sqlCount);
for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]);
+ printf(" sql[%d]: \033[33m%s\033[0m\n",
+ i, g_queryInfo.superQueryInfo.sql[i]);
}
printf("\n");
@@ -1635,7 +1668,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
TAOS_FIELD *fields = taos_fetch_fields(res);
- while ((row = taos_fetch_row(res)) != NULL) {
+ while((row = taos_fetch_row(res)) != NULL) {
// sys database name : 'log'
if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) {
@@ -1668,7 +1701,8 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
- dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
+ dbInfos[count]->cachelast =
+ (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
tstrncpy(dbInfos[count]->precision,
(char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
@@ -1679,7 +1713,8 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
count++;
if (count > MAX_DATABASE_COUNT) {
- errorPrint( "The database count overflow than %d\n", MAX_DATABASE_COUNT);
+ errorPrint("%s() LN%d, The database count overflow than %d\n",
+ __func__, __LINE__, MAX_DATABASE_COUNT);
break;
}
}
@@ -1689,6 +1724,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
static void printfDbInfoForQueryToFile(
char* filename, SDbInfo* dbInfos, int index) {
+
if (filename[0] == 0)
return;
@@ -1907,7 +1943,7 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr)
if (bytes == 0)
break;
sent+=bytes;
- } while (sent < req_str_len);
+ } while(sent < req_str_len);
memset(response_buf, 0, RESP_BUF_LEN);
resp_len = sizeof(response_buf) - 1;
@@ -1925,7 +1961,7 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr)
if (bytes == 0)
break;
received += bytes;
- } while (received < resp_len);
+ } while(received < resp_len);
if (received == resp_len) {
free(request_buf);
@@ -1949,7 +1985,8 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr)
static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) {
char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
if (NULL == dataBuf) {
- errorPrint("%s() LN%d, calloc failed! size:%d\n", __func__, __LINE__, TSDB_MAX_SQL_LEN+1);
+ errorPrint("%s() LN%d, calloc failed! size:%d\n",
+ __func__, __LINE__, TSDB_MAX_SQL_LEN+1);
return NULL;
}
@@ -2153,7 +2190,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
}
char* pTblName = childTblName;
- while ((row = taos_fetch_row(res)) != NULL) {
+ while((row = taos_fetch_row(res)) != NULL) {
int32_t* len = taos_fetch_lengths(res);
tstrncpy(pTblName, (char *)row[0], len[0]+1);
//printf("==== sub table name: %s\n", pTblName);
@@ -2216,7 +2253,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
int tagIndex = 0;
int columnIndex = 0;
TAOS_FIELD *fields = taos_fetch_fields(res);
- while ((row = taos_fetch_row(res)) != NULL) {
+ while((row = taos_fetch_row(res)) != NULL) {
if (0 == count) {
count++;
continue;
@@ -2331,7 +2368,8 @@ static int createSuperTable(TAOS * taos, char* dbName,
lenOfOneRow += 21;
} else {
taos_close(taos);
- printf("config error data type : %s\n", dataType);
+ errorPrint("%s() LN%d, config error data type : %s\n",
+ __func__, __LINE__, dataType);
exit(-1);
}
}
@@ -2349,7 +2387,8 @@ static int createSuperTable(TAOS * taos, char* dbName,
}
snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols);
- verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, superTbl->colsOfCreateChildTable);
+ verbosePrint("%s() LN%d: %s\n",
+ __func__, __LINE__, superTbl->colsOfCreateChildTable);
if (superTbl->tagCount == 0) {
errorPrint("%s() LN%d, super table tag count is %d\n",
@@ -2404,7 +2443,8 @@ static int createSuperTable(TAOS * taos, char* dbName,
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42;
} else {
taos_close(taos);
- printf("config error tag type : %s\n", dataType);
+ errorPrint("%s() LN%d, config error tag type : %s\n",
+ __func__, __LINE__, dataType);
exit(-1);
}
}
@@ -2565,16 +2605,13 @@ static int createDatabasesAndStables() {
static void* createTable(void *sarg)
{
- threadInfo *winfo = (threadInfo *)sarg;
- SSuperTable* superTblInfo = winfo->superTblInfo;
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
int64_t lastPrintTime = taosGetTimestampMs();
int buff_len;
- if (superTblInfo)
- buff_len = superTblInfo->maxSqlLen;
- else
- buff_len = BUFFER_SIZE;
+ buff_len = BUFFER_SIZE / 8;
char *buffer = calloc(buff_len, 1);
if (buffer == NULL) {
@@ -2587,15 +2624,15 @@ static void* createTable(void *sarg)
verbosePrint("%s() LN%d: Creating table from %d to %d\n",
__func__, __LINE__,
- winfo->start_table_from, winfo->end_table_to);
+ pThreadInfo->start_table_from, pThreadInfo->end_table_to);
- for (int i = winfo->start_table_from; i <= winfo->end_table_to; i++) {
+ for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) {
if (0 == g_Dbs.use_metric) {
snprintf(buffer, buff_len,
"create table if not exists %s.%s%d %s;",
- winfo->db_name,
+ pThreadInfo->db_name,
g_args.tb_prefix, i,
- winfo->cols);
+ pThreadInfo->cols);
} else {
if (superTblInfo == NULL) {
errorPrint("%s() LN%d, use metric, but super table info is NULL\n",
@@ -2622,15 +2659,15 @@ static void* createTable(void *sarg)
return NULL;
}
len += snprintf(buffer + len,
- superTblInfo->maxSqlLen - len,
+ buff_len - len,
"if not exists %s.%s%d using %s.%s tags %s ",
- winfo->db_name, superTblInfo->childTblPrefix,
- i, winfo->db_name,
+ pThreadInfo->db_name, superTblInfo->childTblPrefix,
+ i, pThreadInfo->db_name,
superTblInfo->sTblName, tagsValBuf);
free(tagsValBuf);
batchNum++;
if ((batchNum < superTblInfo->batchCreateTableNum)
- && ((superTblInfo->maxSqlLen - len)
+ && ((buff_len - len)
>= (superTblInfo->lenOfTagOfOneRow + 256))) {
continue;
}
@@ -2639,7 +2676,7 @@ static void* createTable(void *sarg)
len = 0;
verbosePrint("%s() LN%d %s\n", __func__, __LINE__, buffer);
- if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE, false)){
+ if (0 != queryDbExec(pThreadInfo->taos, buffer, NO_INSERT_TYPE, false)){
errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer);
free(buffer);
return NULL;
@@ -2648,14 +2685,14 @@ static void* createTable(void *sarg)
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
printf("thread[%d] already create %d - %d tables\n",
- winfo->threadID, winfo->start_table_from, i);
+ pThreadInfo->threadID, pThreadInfo->start_table_from, i);
lastPrintTime = currentPrintTime;
}
}
if (0 != len) {
verbosePrint("%s() %d buffer: %s\n", __func__, __LINE__, buffer);
- if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE, false)) {
+ if (0 != queryDbExec(pThreadInfo->taos, buffer, NO_INSERT_TYPE, false)) {
errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer);
}
}
@@ -2702,7 +2739,8 @@ static int startMultiThreadCreateChildTable(
db_name,
g_Dbs.port);
if (t_info->taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
+ errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n",
+ __func__, __LINE__, taos_errstr(NULL));
free(pids);
free(infos);
return -1;
@@ -2763,35 +2801,35 @@ static void createChildTables() {
}
}
} else {
- // normal table
- len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP");
- int j = 0;
- while (g_args.datatype[j]) {
- if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0)
- || (strncasecmp(g_args.datatype[j],
- "NCHAR", strlen("NCHAR")) == 0)) {
- snprintf(tblColsBuf + len, MAX_SQL_SIZE - len,
- ", COL%d %s(60)", j, g_args.datatype[j]);
- } else {
- snprintf(tblColsBuf + len, MAX_SQL_SIZE - len,
- ", COL%d %s", j, g_args.datatype[j]);
- }
- len = strlen(tblColsBuf);
- j++;
- }
+ // normal table
+ len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP");
+ int j = 0;
+ while(g_args.datatype[j]) {
+ if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0)
+ || (strncasecmp(g_args.datatype[j],
+ "NCHAR", strlen("NCHAR")) == 0)) {
+ snprintf(tblColsBuf + len, MAX_SQL_SIZE - len,
+ ", COL%d %s(60)", j, g_args.datatype[j]);
+ } else {
+ snprintf(tblColsBuf + len, MAX_SQL_SIZE - len,
+ ", COL%d %s", j, g_args.datatype[j]);
+ }
+ len = strlen(tblColsBuf);
+ j++;
+ }
- snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")");
+ snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")");
- verbosePrint("%s() LN%d: dbName: %s num of tb: %d schema: %s\n",
- __func__, __LINE__,
- g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf);
- startMultiThreadCreateChildTable(
- tblColsBuf,
- g_Dbs.threadCountByCreateTbl,
- 0,
- g_args.num_of_tables,
- g_Dbs.db[i].dbName,
- NULL);
+ verbosePrint("%s() LN%d: dbName: %s num of tb: %d schema: %s\n",
+ __func__, __LINE__,
+ g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf);
+ startMultiThreadCreateChildTable(
+ tblColsBuf,
+ g_Dbs.threadCountByCreateTbl,
+ 0,
+ g_args.num_of_tables,
+ g_Dbs.db[i].dbName,
+ NULL);
}
}
}
@@ -2825,7 +2863,7 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
return -1;
}
- while ((readLen = tgetline(&line, &n, fp)) != -1) {
+ while((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
line[--readLen] = 0;
}
@@ -2889,7 +2927,7 @@ static int readSampleFromCsvFileToMem(
assert(superTblInfo->sampleDataBuf);
memset(superTblInfo->sampleDataBuf, 0,
MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow);
- while (1) {
+ while(1) {
readLen = tgetline(&line, &n, fp);
if (-1 == readLen) {
if(0 != fseek(fp, 0, SEEK_SET)) {
@@ -2915,9 +2953,6 @@ static int readSampleFromCsvFileToMem(
continue;
}
- verbosePrint("readLen=%ld stb->lenOfOneRow=%d getRows=%d\n", (long)readLen,
- superTblInfo->lenOfOneRow, getRows);
-
memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow,
line, readLen);
getRows++;
@@ -2948,7 +2983,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
int columnSize = cJSON_GetArraySize(columns);
- if (columnSize > MAX_COLUMN_COUNT) {
+ if ((columnSize + 1/* ts */) > MAX_COLUMN_COUNT) {
errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n",
__func__, __LINE__, MAX_COLUMN_COUNT);
goto PARSE_OVER;
@@ -2968,7 +3003,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (countObj && countObj->type == cJSON_Number) {
count = countObj->valueint;
} else if (countObj && countObj->type != cJSON_Number) {
- errorPrint("%s() LN%d, failed to read json, column count not found\n", __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to read json, column count not found\n",
+ __func__, __LINE__);
goto PARSE_OVER;
} else {
count = 1;
@@ -2977,8 +3013,10 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
// column info
memset(&columnCase, 0, sizeof(StrColumn));
cJSON *dataType = cJSON_GetObjectItem(column, "type");
- if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) {
- errorPrint("%s() LN%d: failed to read json, column type not found\n", __func__, __LINE__);
+ if (!dataType || dataType->type != cJSON_String
+ || dataType->valuestring == NULL) {
+ errorPrint("%s() LN%d: failed to read json, column type not found\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
//tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
@@ -2988,7 +3026,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (dataLen && dataLen->type == cJSON_Number) {
columnCase.dataLen = dataLen->valueint;
} else if (dataLen && dataLen->type != cJSON_Number) {
- debugPrint("%s() LN%d: failed to read json, column len not found\n", __func__, __LINE__);
+ debugPrint("%s() LN%d: failed to read json, column len not found\n",
+ __func__, __LINE__);
goto PARSE_OVER;
} else {
columnCase.dataLen = 8;
@@ -3001,6 +3040,13 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
index++;
}
}
+
+ if ((index + 1 /* ts */) > MAX_COLUMN_COUNT) {
+ errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n",
+ __func__, __LINE__, MAX_COLUMN_COUNT);
+ goto PARSE_OVER;
+ }
+
superTbls->columnCount = index;
count = 1;
@@ -3008,13 +3054,15 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
// tags
cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags");
if (!tags || tags->type != cJSON_Array) {
- debugPrint("%s() LN%d, failed to read json, tags not found\n", __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to read json, tags not found\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
int tagSize = cJSON_GetArraySize(tags);
if (tagSize > MAX_TAG_COUNT) {
- debugPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", __func__, __LINE__, MAX_TAG_COUNT);
+ errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n",
+ __func__, __LINE__, MAX_TAG_COUNT);
goto PARSE_OVER;
}
@@ -3037,8 +3085,10 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
// column info
memset(&columnCase, 0, sizeof(StrColumn));
cJSON *dataType = cJSON_GetObjectItem(tag, "type");
- if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) {
- printf("ERROR: failed to read json, tag type not found\n");
+ if (!dataType || dataType->type != cJSON_String
+ || dataType->valuestring == NULL) {
+ errorPrint("%s() LN%d, failed to read json, tag type not found\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
@@ -3047,26 +3097,37 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (dataLen && dataLen->type == cJSON_Number) {
columnCase.dataLen = dataLen->valueint;
} else if (dataLen && dataLen->type != cJSON_Number) {
- printf("ERROR: failed to read json, column len not found\n");
+ errorPrint("%s() LN%d, failed to read json, column len not found\n",
+ __func__, __LINE__);
goto PARSE_OVER;
} else {
columnCase.dataLen = 0;
}
for (int n = 0; n < count; ++n) {
- tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE);
+ tstrncpy(superTbls->tags[index].dataType, columnCase.dataType,
+ MAX_TB_NAME_SIZE);
superTbls->tags[index].dataLen = columnCase.dataLen;
index++;
}
}
+
+ if (index > MAX_TAG_COUNT) {
+ errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n",
+ __func__, __LINE__, MAX_TAG_COUNT);
+ goto PARSE_OVER;
+ }
+
superTbls->tagCount = index;
+ if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > MAX_COLUMN_COUNT) {
+ errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n",
+ __func__, __LINE__, MAX_COLUMN_COUNT);
+ goto PARSE_OVER;
+ }
ret = true;
PARSE_OVER:
- //free(content);
- //cJSON_Delete(root);
- //fclose(fp);
return ret;
}
@@ -3143,7 +3204,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!gInsertInterval) {
g_args.insert_interval = 0;
} else {
- errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
@@ -3153,9 +3215,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
// rows per table need be less than insert batch
if (g_args.interlace_rows > g_args.num_of_RPR) {
- printf("NOTICE: interlace rows value %d > num_of_records_per_request %d\n\n",
+ printf("NOTICE: interlace rows value %d > num_of_records_per_req %d\n\n",
g_args.interlace_rows, g_args.num_of_RPR);
- printf(" interlace rows value will be set to num_of_records_per_request %d\n\n",
+ printf(" interlace rows value will be set to num_of_records_per_req %d\n\n",
g_args.num_of_RPR);
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
@@ -3164,7 +3226,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!interlaceRows) {
g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else {
- errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
@@ -3174,7 +3237,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!maxSqlLen) {
g_args.max_sql_len = TSDB_PAYLOAD_SIZE;
} else {
- errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
@@ -3182,9 +3246,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (numRecPerReq && numRecPerReq->type == cJSON_Number) {
g_args.num_of_RPR = numRecPerReq->valueint;
} else if (!numRecPerReq) {
- g_args.num_of_RPR = 0xffff;
+ g_args.num_of_RPR = INT32_MAX;
} else {
- errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
@@ -3477,9 +3542,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (childTblExists
&& childTblExists->type == cJSON_String
&& childTblExists->valuestring != NULL) {
- if (0 == strncasecmp(childTblExists->valuestring, "yes", 3)) {
+ if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3))
+ && (g_Dbs.db[i].drop == false)) {
g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS;
- } else if (0 == strncasecmp(childTblExists->valuestring, "no", 2)) {
+ } else if ((0 == strncasecmp(childTblExists->valuestring, "no", 2)
+ || (g_Dbs.db[i].drop == true))) {
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
} else {
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
@@ -3508,7 +3575,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!dataSource) {
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE);
} else {
- errorPrint("%s() LN%d, failed to read json, data_source not found\n", __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to read json, data_source not found\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
@@ -3525,18 +3593,20 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit");
- if (childTbl_limit) {
+ if ((childTbl_limit) && (g_Dbs.db[i].drop != true)
+ && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
if (childTbl_limit->type != cJSON_Number) {
printf("ERROR: failed to read json, childtable_limit\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint;
} else {
- g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result
+ g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result, drop = yes mean all table need recreate, limit value is invalid.
}
cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset");
- if (childTbl_offset) {
+ if ((childTbl_offset) && (g_Dbs.db[i].drop != true)
+ && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
if (childTbl_offset->type != cJSON_Number || 0 > childTbl_offset->valueint) {
printf("ERROR: failed to read json, childtable_offset\n");
goto PARSE_OVER;
@@ -3581,7 +3651,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file");
- if (sampleFile && sampleFile->type == cJSON_String && sampleFile->valuestring != NULL) {
+ if (sampleFile && sampleFile->type == cJSON_String
+ && sampleFile->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile,
sampleFile->valuestring, MAX_FILE_NAME_LEN);
} else if (!sampleFile) {
@@ -3613,17 +3684,18 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
int32_t len = maxSqlLen->valueint;
if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
len = TSDB_MAX_ALLOWED_SQL_LEN;
- } else if (len < TSDB_MAX_SQL_LEN) {
- len = TSDB_MAX_SQL_LEN;
+ } else if (len < 5) {
+ len = 5;
}
g_Dbs.db[i].superTbls[j].maxSqlLen = len;
} else if (!maxSqlLen) {
g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len;
} else {
- printf("ERROR: failed to read json, maxSqlLen not found\n");
+ errorPrint("%s() LN%d, failed to read json, maxSqlLen input mistake\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
-
+/*
cJSON *multiThreadWriteOneTbl =
cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes
if (multiThreadWriteOneTbl
@@ -3640,15 +3712,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
printf("ERROR: failed to read json, multiThreadWriteOneTbl not found\n");
goto PARSE_OVER;
}
-
+*/
cJSON* interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint;
// rows per table need be less than insert batch
if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) {
- printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %d > num_of_records_per_request %d\n\n",
+ printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %d > num_of_records_per_req %d\n\n",
i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR);
- printf(" interlace rows value will be set to num_of_records_per_request %d\n\n",
+ printf(" interlace rows value will be set to num_of_records_per_req %d\n\n",
g_args.num_of_RPR);
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
@@ -3724,9 +3796,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
ret = true;
PARSE_OVER:
- //free(content);
- //cJSON_Delete(root);
- //fclose(fp);
return ret;
}
@@ -3792,7 +3861,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (!gQueryTimes) {
g_args.query_times = 1;
} else {
- errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
@@ -3830,35 +3900,45 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.specifiedQueryInfo.rate = 0;
}
- cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, "query_times");
+ cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery,
+ "query_times");
if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) {
g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint;
} else if (!specifiedQueryTimes) {
g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times;
} else {
- errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent");
if (concurrent && concurrent->type == cJSON_Number) {
g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint;
+ if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
+ errorPrint("%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n",
+ __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount,
+ g_queryInfo.specifiedQueryInfo.concurrent);
+ goto PARSE_OVER;
+ }
} else if (!concurrent) {
g_queryInfo.specifiedQueryInfo.concurrent = 1;
}
- cJSON* mode = cJSON_GetObjectItem(specifiedQuery, "mode");
- if (mode && mode->type == cJSON_String && mode->valuestring != NULL) {
- if (0 == strcmp("sync", mode->valuestring)) {
- g_queryInfo.specifiedQueryInfo.subscribeMode = 0;
- } else if (0 == strcmp("async", mode->valuestring)) {
- g_queryInfo.specifiedQueryInfo.subscribeMode = 1;
+ cJSON* queryMode = cJSON_GetObjectItem(specifiedQuery, "mode");
+ if (queryMode && queryMode->type == cJSON_String
+ && queryMode->valuestring != NULL) {
+ if (0 == strcmp("sync", queryMode->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE;
+ } else if (0 == strcmp("async", queryMode->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.mode = ASYNC_QUERY_MODE;
} else {
- printf("ERROR: failed to read json, subscribe mod error\n");
+ errorPrint("%s() LN%d, failed to read json, query mode input error\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
} else {
- g_queryInfo.specifiedQueryInfo.subscribeMode = 0;
+ g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE;
}
cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval");
@@ -3905,12 +3985,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (!superSqls) {
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (superSqls->type != cJSON_Array) {
- printf("ERROR: failed to read json, super sqls not found\n");
+ errorPrint("%s() LN%d, failed to read json, super sqls not found\n",
+ __func__, __LINE__);
goto PARSE_OVER;
} else {
int superSqlSize = cJSON_GetArraySize(superSqls);
if (superSqlSize > MAX_QUERY_SQL_COUNT) {
- printf("ERROR: failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT);
+ errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n",
+ __func__, __LINE__, MAX_QUERY_SQL_COUNT);
goto PARSE_OVER;
}
@@ -3962,7 +4044,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (!superQueryTimes) {
g_queryInfo.superQueryInfo.queryTimes = g_args.query_times;
} else {
- errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
@@ -3981,25 +4064,30 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
//}
cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname");
- if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) {
- tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE);
+ if (stblname && stblname->type == cJSON_String
+ && stblname->valuestring != NULL) {
+ tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring,
+ MAX_TB_NAME_SIZE);
} else {
- printf("ERROR: failed to read json, super table name not found\n");
+ errorPrint("%s() LN%d, failed to read json, super table name input error\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
cJSON* submode = cJSON_GetObjectItem(superQuery, "mode");
- if (submode && submode->type == cJSON_String && submode->valuestring != NULL) {
+ if (submode && submode->type == cJSON_String
+ && submode->valuestring != NULL) {
if (0 == strcmp("sync", submode->valuestring)) {
- g_queryInfo.superQueryInfo.subscribeMode = 0;
+ g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE;
} else if (0 == strcmp("async", submode->valuestring)) {
- g_queryInfo.superQueryInfo.subscribeMode = 1;
+ g_queryInfo.superQueryInfo.mode = ASYNC_QUERY_MODE;
} else {
- printf("ERROR: failed to read json, subscribe mod error\n");
+ errorPrint("%s() LN%d, failed to read json, query mode input error\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
} else {
- g_queryInfo.superQueryInfo.subscribeMode = 0;
+ g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE;
}
cJSON* subinterval = cJSON_GetObjectItem(superQuery, "interval");
@@ -4012,7 +4100,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
cJSON* subrestart = cJSON_GetObjectItem(superQuery, "restart");
- if (subrestart && subrestart->type == cJSON_String && subrestart->valuestring != NULL) {
+ if (subrestart && subrestart->type == cJSON_String
+ && subrestart->valuestring != NULL) {
if (0 == strcmp("yes", subrestart->valuestring)) {
g_queryInfo.superQueryInfo.subscribeRestart = 1;
} else if (0 == strcmp("no", subrestart->valuestring)) {
@@ -4046,12 +4135,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (!subsqls) {
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (subsqls->type != cJSON_Array) {
- printf("ERROR: failed to read json, super sqls not found\n");
+ errorPrint("%s() LN%d: failed to read json, super sqls not found\n",
+ __func__, __LINE__);
goto PARSE_OVER;
} else {
int superSqlSize = cJSON_GetArraySize(subsqls);
if (superSqlSize > MAX_QUERY_SQL_COUNT) {
- printf("ERROR: failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT);
+ errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n",
+ __func__, __LINE__, MAX_QUERY_SQL_COUNT);
goto PARSE_OVER;
}
@@ -4061,19 +4152,25 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (sql == NULL) continue;
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
- if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) {
- printf("ERROR: failed to read json, sql not found\n");
+ if (!sqlStr || sqlStr->type != cJSON_String
+ || sqlStr->valuestring == NULL) {
+ errorPrint("%s() LN%d, failed to read json, sql not found\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
- tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
+ tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
+ MAX_QUERY_SQL_LENGTH);
cJSON *result = cJSON_GetObjectItem(sql, "result");
- if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){
- tstrncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN);
+ if (result != NULL && result->type == cJSON_String
+ && result->valuestring != NULL){
+ tstrncpy(g_queryInfo.superQueryInfo.result[j],
+ result->valuestring, MAX_FILE_NAME_LEN);
} else if (NULL == result) {
memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN);
} else {
- printf("ERROR: failed to read json, sub query result file not found\n");
+ errorPrint("%s() LN%d, failed to read json, sub query result file not found\n",
+ __func__, __LINE__);
goto PARSE_OVER;
}
}
@@ -4083,9 +4180,6 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
ret = true;
PARSE_OVER:
- //free(content);
- //cJSON_Delete(root);
- //fclose(fp);
return ret;
}
@@ -4255,13 +4349,20 @@ static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo
"%f, ", rand_double());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
"smallint", 8)) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_smallint());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "tinyint", 7)) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_tinyint());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bool", 4)) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_bool());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint());
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%d, ", rand_smallint());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "tinyint", strlen("tinyint"))) {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%d, ", rand_tinyint());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "bool", strlen("bool"))) {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%d, ", rand_bool());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "timestamp", strlen("timestamp"))) {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%"PRId64", ", rand_bigint());
} else {
errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType);
return -1;
@@ -4353,7 +4454,8 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) {
int ret = readSampleFromCsvFileToMem(superTblInfo);
if (0 != ret) {
- errorPrint("%s() LN%d, read sample from csv file failed.\n", __func__, __LINE__);
+ errorPrint("%s() LN%d, read sample from csv file failed.\n",
+ __func__, __LINE__);
tmfree(sampleDataBuf);
superTblInfo->sampleDataBuf = NULL;
return -1;
@@ -4375,7 +4477,8 @@ static int execInsert(threadInfo *pThreadInfo, char *buffer, int k)
} else {
if (0 != postProceSql(g_Dbs.host, g_Dbs.port, buffer)) {
affectedRows = -1;
- printf("========restful return fail, threadID[%d]\n", pThreadInfo->threadID);
+ printf("========restful return fail, threadID[%d]\n",
+ pThreadInfo->threadID);
} else {
affectedRows = k;
}
@@ -4411,13 +4514,15 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq)
}
}
-static int generateDataTail(char *tableName, int32_t tableSeq,
- threadInfo* pThreadInfo, SSuperTable* superTblInfo,
+static int generateDataTail(
+ SSuperTable* superTblInfo,
int batch, char* buffer, int remainderBufLen, int64_t insertRows,
int64_t startFrom, uint64_t startTime, int *pSamplePos, int *dataLen) {
int len = 0;
int ncols_per_record = 1; // count first col ts
+ char *pstr = buffer;
+
if (superTblInfo == NULL) {
int datatypeSeq = 0;
while(g_args.datatype[datatypeSeq]) {
@@ -4446,29 +4551,29 @@ static int generateDataTail(char *tableName, int32_t tableSeq,
pSamplePos);
} else if (0 == strncasecmp(superTblInfo->dataSource,
"rand", strlen("rand"))) {
+
+ int randTail = superTblInfo->timeStampStep * k;
+ if (superTblInfo->disorderRatio > 0) {
int rand_num = taosRandom() % 100;
- if (0 != superTblInfo->disorderRatio
- && rand_num < superTblInfo->disorderRatio) {
- int64_t d = startTime
- + superTblInfo->timeStampStep * k
- - taosRandom() % superTblInfo->disorderRange;
- retLen = generateRowData(
+ if(rand_num < superTblInfo->disorderRatio) {
+ randTail = (randTail + (taosRandom() % superTblInfo->disorderRange + 1)) * (-1);
+ debugPrint("rand data generated, back %d\n", randTail);
+ }
+ }
+
+ uint64_t d = startTime
+ + randTail;
+ retLen = generateRowData(
data,
d,
superTblInfo);
- } else {
- retLen = generateRowData(
- data,
- startTime + superTblInfo->timeStampStep * k,
- superTblInfo);
- }
}
if (retLen > remainderBufLen) {
break;
}
- buffer += snprintf(buffer, retLen + 1, "%s", data);
+ pstr += snprintf(pstr , retLen + 1, "%s", data);
k++;
len += retLen;
remainderBufLen -= retLen;
@@ -4477,25 +4582,26 @@ static int generateDataTail(char *tableName, int32_t tableSeq,
int lenOfBinary = g_args.len_of_binary;
int rand_num = taosRandom() % 100;
+ int randTail;
+
if ((g_args.disorderRatio != 0)
&& (rand_num < g_args.disorderRatio)) {
-
- int64_t d = startTime + DEFAULT_TIMESTAMP_STEP * k
- - taosRandom() % g_args.disorderRange;
-
- retLen = generateData(data, data_type,
- ncols_per_record, d, lenOfBinary);
+ randTail = (DEFAULT_TIMESTAMP_STEP * k
+ + (taosRandom() % g_args.disorderRange + 1)) * (-1);
+ debugPrint("rand data generated, back %d\n", randTail);
} else {
- retLen = generateData(data, data_type,
- ncols_per_record,
- startTime + DEFAULT_TIMESTAMP_STEP * k,
- lenOfBinary);
+ randTail = DEFAULT_TIMESTAMP_STEP * k;
}
+ retLen = generateData(data, data_type,
+ ncols_per_record,
+ startTime + randTail,
+ lenOfBinary);
+
if (len > remainderBufLen)
break;
- buffer += sprintf(buffer, " %s", data);
+ pstr += sprintf(pstr, " %s", data);
k++;
len += retLen;
remainderBufLen -= retLen;
@@ -4516,9 +4622,14 @@ static int generateDataTail(char *tableName, int32_t tableSeq,
}
static int generateSQLHead(char *tableName, int32_t tableSeq,
- threadInfo* pThreadInfo, SSuperTable* superTblInfo, char *buffer)
+ threadInfo* pThreadInfo, SSuperTable* superTblInfo,
+ char *buffer, int remainderBufLen)
{
int len;
+
+#define HEAD_BUFF_LEN 1024*24 // 16*1024 + (192+32)*2 + insert into ..
+ char headBuf[HEAD_BUFF_LEN];
+
if (superTblInfo) {
if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) {
char* tagsValBuf = NULL;
@@ -4530,13 +4641,15 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
tableSeq % superTblInfo->tagSampleCount);
}
if (NULL == tagsValBuf) {
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__);
+ errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
+ __func__, __LINE__);
return -1;
}
- len = snprintf(buffer,
- superTblInfo->maxSqlLen,
- "insert into %s.%s using %s.%s tags %s values",
+ len = snprintf(
+ headBuf,
+ HEAD_BUFF_LEN,
+ "%s.%s using %s.%s tags %s values",
pThreadInfo->db_name,
tableName,
pThreadInfo->db_name,
@@ -4544,34 +4657,101 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
tagsValBuf);
tmfree(tagsValBuf);
} else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) {
- len = snprintf(buffer,
- superTblInfo->maxSqlLen,
- "insert into %s.%s values",
+ len = snprintf(
+ headBuf,
+ HEAD_BUFF_LEN,
+ "%s.%s values",
pThreadInfo->db_name,
tableName);
} else {
- len = snprintf(buffer,
- superTblInfo->maxSqlLen,
- "insert into %s.%s values",
+ len = snprintf(
+ headBuf,
+ HEAD_BUFF_LEN,
+ "%s.%s values",
pThreadInfo->db_name,
tableName);
}
} else {
- len = snprintf(buffer,
- g_args.max_sql_len,
- "insert into %s.%s values",
+ len = snprintf(
+ headBuf,
+ HEAD_BUFF_LEN,
+ "%s.%s values",
pThreadInfo->db_name,
tableName);
}
+ if (len > remainderBufLen)
+ return -1;
+
+ tstrncpy(buffer, headBuf, len + 1);
+
return len;
}
-static int generateProgressiveDataBuffer(char *pTblName,
+static int generateInterlaceDataBuffer(
+ char *tableName, int batchPerTbl, int i, int batchPerTblTimes,
int32_t tableSeq,
threadInfo *pThreadInfo, char *buffer,
int64_t insertRows,
- int64_t startFrom, int64_t startTime, int *pSamplePos)
+ int64_t startTime,
+ int *pRemainderBufLen)
+{
+ assert(buffer);
+ char *pstr = buffer;
+ SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
+
+ int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo,
+ superTblInfo, pstr, *pRemainderBufLen);
+
+ if (headLen <= 0) {
+ return 0;
+ }
+ // generate data buffer
+ verbosePrint("[%d] %s() LN%d i=%d buffer:\n%s\n",
+ pThreadInfo->threadID, __func__, __LINE__, i, buffer);
+
+ pstr += headLen;
+ *pRemainderBufLen -= headLen;
+
+ int dataLen = 0;
+
+ verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ i, batchPerTblTimes, batchPerTbl);
+
+ if (superTblInfo) {
+ if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) {
+ startTime = taosGetTimestamp(pThreadInfo->time_precision);
+ }
+ } else {
+ startTime = 1500000000000;
+ }
+
+ int k = generateDataTail(
+ superTblInfo,
+ batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0,
+ startTime,
+ &(pThreadInfo->samplePos), &dataLen);
+
+ if (k == batchPerTbl) {
+ pstr += dataLen;
+ *pRemainderBufLen -= dataLen;
+ } else {
+ pstr -= headLen;
+ pstr[0] = '\0';
+ k = 0;
+ }
+
+ return k;
+}
+
+static int generateProgressiveDataBuffer(
+ char *tableName,
+ int32_t tableSeq,
+ threadInfo *pThreadInfo, char *buffer,
+ int64_t insertRows,
+ int64_t startFrom, int64_t startTime, int *pSamplePos,
+ int *pRemainderBufLen)
{
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
@@ -4586,23 +4766,24 @@ static int generateProgressiveDataBuffer(char *pTblName,
}
assert(buffer != NULL);
-
- int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
- int remainderBufLen = maxSqlLen;
-
- memset(buffer, 0, maxSqlLen);
-
char *pstr = buffer;
- int headLen = generateSQLHead(pTblName, tableSeq, pThreadInfo, superTblInfo,
- buffer);
- pstr += headLen;
- remainderBufLen -= headLen;
+ int k = 0;
+
+ memset(buffer, 0, *pRemainderBufLen);
+
+ int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo,
+ buffer, *pRemainderBufLen);
+
+ if (headLen <= 0) {
+ return 0;
+ }
+ pstr += headLen;
+ *pRemainderBufLen -= headLen;
- int k;
int dataLen;
- k = generateDataTail(pTblName, tableSeq, pThreadInfo, superTblInfo,
- g_args.num_of_RPR, pstr, remainderBufLen, insertRows, startFrom,
+ k = generateDataTail(superTblInfo,
+ g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, startFrom,
startTime,
pSamplePos, &dataLen);
@@ -4614,8 +4795,16 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->threadID, __func__, __LINE__);
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
+
+ int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows;
+ if (interlaceRows > insertRows)
+ interlaceRows = insertRows;
+
+ if (interlaceRows > g_args.num_of_RPR)
+ interlaceRows = g_args.num_of_RPR;
+
int insertMode;
if (interlaceRows > 0) {
@@ -4627,11 +4816,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
// TODO: prompt tbl count multple interlace rows and batch
//
- char* buffer = calloc(superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, 1);
+ int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
+ char* buffer = calloc(maxSqlLen, 1);
if (NULL == buffer) {
- errorPrint( "Failed to alloc %d Bytes, reason:%s\n",
- superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len,
- strerror(errno));
+ errorPrint( "%s() LN%d, Failed to alloc %d Bytes, reason:%s\n",
+ __func__, __LINE__, maxSqlLen, strerror(errno));
return NULL;
}
@@ -4642,7 +4831,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
int nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
- int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
int insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
uint64_t st = 0;
@@ -4660,18 +4848,14 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
int64_t startTime = pThreadInfo->start_time;
- int batchPerTblTimes;
- int batchPerTbl;
-
assert(pThreadInfo->ntables > 0);
- if (interlaceRows > g_args.num_of_RPR)
- interlaceRows = g_args.num_of_RPR;
+ int batchPerTbl = interlaceRows;
- batchPerTbl = interlaceRows;
+ int batchPerTblTimes;
if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
batchPerTblTimes =
- (g_args.num_of_RPR / (interlaceRows * pThreadInfo->ntables)) + 1;
+ g_args.num_of_RPR / interlaceRows;
} else {
batchPerTblTimes = 1;
}
@@ -4680,8 +4864,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
bool flagSleep = true;
int sleepTimeTotal = 0;
- int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
- int remainderBufLen;
+ char *strInsertInto = "insert into ";
+ int nInsertBufLen = strlen(strInsertInto);
while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) {
if ((flagSleep) && (insert_interval)) {
@@ -4690,9 +4874,14 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
// generate data
memset(buffer, 0, maxSqlLen);
- remainderBufLen = maxSqlLen;
+ int remainderBufLen = maxSqlLen;
char *pstr = buffer;
+
+ int len = snprintf(pstr, nInsertBufLen + 1, "%s", strInsertInto);
+ pstr += len;
+ remainderBufLen -= len;
+
int recOfBatch = 0;
for (int i = 0; i < batchPerTblTimes; i ++) {
@@ -4704,58 +4893,32 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
return NULL;
}
- int headLen;
- if (i == 0) {
- headLen = generateSQLHead(tableName, tableSeq, pThreadInfo,
- superTblInfo, pstr);
- } else {
- headLen = snprintf(pstr, TSDB_TABLE_NAME_LEN, "%s.%s values",
- pThreadInfo->db_name,
- tableName);
- }
-
- // generate data buffer
- verbosePrint("[%d] %s() LN%d i=%d buffer:\n%s\n",
- pThreadInfo->threadID, __func__, __LINE__, i, buffer);
-
- pstr += headLen;
- remainderBufLen -= headLen;
-
- int dataLen = 0;
-
- verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n",
- pThreadInfo->threadID, __func__, __LINE__,
- i, batchPerTblTimes, batchPerTbl);
-
- if (superTblInfo) {
- if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) {
- startTime = taosGetTimestamp(pThreadInfo->time_precision);
- }
- } else {
- startTime = 1500000000000;
- }
- int generated = generateDataTail(
- tableName, tableSeq, pThreadInfo, superTblInfo,
- batchPerTbl, pstr, remainderBufLen, insertRows, 0,
+ int oldRemainderLen = remainderBufLen;
+ int generated = generateInterlaceDataBuffer(
+ tableName, batchPerTbl, i, batchPerTblTimes,
+ tableSeq,
+ pThreadInfo, pstr,
+ insertRows,
startTime,
- &(pThreadInfo->samplePos), &dataLen);
+ &remainderBufLen);
if (generated < 0) {
debugPrint("[%d] %s() LN%d, generated data is %d\n",
pThreadInfo->threadID, __func__, __LINE__, generated);
goto free_and_statistics_interlace;
+ } else if (generated == 0) {
+ break;
}
- pstr += dataLen;
- remainderBufLen -= dataLen;
+ tableSeq ++;
recOfBatch += batchPerTbl;
+ pstr += (oldRemainderLen - remainderBufLen);
// startTime += batchPerTbl * superTblInfo->timeStampStep;
pThreadInfo->totalInsertRows += batchPerTbl;
verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n",
pThreadInfo->threadID, __func__, __LINE__,
batchPerTbl, recOfBatch);
- tableSeq ++;
if (insertMode == INTERLACE_INSERT_MODE) {
if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) {
// turn to first table
@@ -4861,11 +5024,12 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__);
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
+ int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
- char* buffer = calloc(superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, 1);
+ char* buffer = calloc(maxSqlLen, 1);
if (NULL == buffer) {
errorPrint( "Failed to alloc %d Bytes, reason:%s\n",
- superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len,
+ maxSqlLen,
strerror(errno));
return NULL;
}
@@ -4908,10 +5072,20 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
__func__, __LINE__,
pThreadInfo->threadID, tableSeq, tableName);
+ int remainderBufLen = maxSqlLen;
+ char *pstr = buffer;
+ int nInsertBufLen = strlen("insert into ");
+
+ int len = snprintf(pstr, nInsertBufLen + 1, "%s", "insert into ");
+
+ pstr += len;
+ remainderBufLen -= len;
+
int generated = generateProgressiveDataBuffer(
- tableName, tableSeq, pThreadInfo, buffer, insertRows,
+ tableName, tableSeq, pThreadInfo, pstr, insertRows,
i, start_time,
- &(pThreadInfo->samplePos));
+ &(pThreadInfo->samplePos),
+ &remainderBufLen);
if (generated > 0)
i += generated;
else
@@ -4964,11 +5138,13 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
*/
} // num_of_DPT
- if ((tableSeq == pThreadInfo->ntables - 1) && superTblInfo &&
+ if (g_args.verbose_print) {
+ if ((tableSeq == pThreadInfo->ntables - 1) && superTblInfo &&
(0 == strncasecmp(
superTblInfo->dataSource, "sample", strlen("sample")))) {
- printf("%s() LN%d samplePos=%d\n",
+ verbosePrint("%s() LN%d samplePos=%d\n",
__func__, __LINE__, pThreadInfo->samplePos);
+ }
}
} // tableSeq
@@ -4984,45 +5160,45 @@ free_and_statistics_2:
static void* syncWrite(void *sarg) {
- threadInfo *winfo = (threadInfo *)sarg;
- SSuperTable* superTblInfo = winfo->superTblInfo;
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows;
if (interlaceRows > 0) {
// interlace mode
- return syncWriteInterlace(winfo);
+ return syncWriteInterlace(pThreadInfo);
} else {
// progressive mode
- return syncWriteProgressive(winfo);
+ return syncWriteProgressive(pThreadInfo);
}
}
static void callBack(void *param, TAOS_RES *res, int code) {
- threadInfo* winfo = (threadInfo*)param;
- SSuperTable* superTblInfo = winfo->superTblInfo;
+ threadInfo* pThreadInfo = (threadInfo*)param;
+ SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
int insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
if (insert_interval) {
- winfo->et = taosGetTimestampUs();
- if (((winfo->et - winfo->st)/1000) < insert_interval) {
- taosMsleep(insert_interval - (winfo->et - winfo->st)/1000); // ms
+ pThreadInfo->et = taosGetTimestampUs();
+ if (((pThreadInfo->et - pThreadInfo->st)/1000) < insert_interval) {
+ taosMsleep(insert_interval - (pThreadInfo->et - pThreadInfo->st)/1000); // ms
}
}
- char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen);
+ char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen);
char data[MAX_DATA_SIZE];
char *pstr = buffer;
- pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix,
- winfo->start_table_from);
-// if (winfo->counter >= winfo->superTblInfo->insertRows) {
- if (winfo->counter >= g_args.num_of_RPR) {
- winfo->start_table_from++;
- winfo->counter = 0;
+ pstr += sprintf(pstr, "insert into %s.%s%d values", pThreadInfo->db_name, pThreadInfo->tb_prefix,
+ pThreadInfo->start_table_from);
+// if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) {
+ if (pThreadInfo->counter >= g_args.num_of_RPR) {
+ pThreadInfo->start_table_from++;
+ pThreadInfo->counter = 0;
}
- if (winfo->start_table_from > winfo->end_table_to) {
- tsem_post(&winfo->lock_sem);
+ if (pThreadInfo->start_table_from > pThreadInfo->end_table_to) {
+ tsem_post(&pThreadInfo->lock_sem);
free(buffer);
taos_free_result(res);
return;
@@ -5030,46 +5206,46 @@ static void callBack(void *param, TAOS_RES *res, int code) {
for (int i = 0; i < g_args.num_of_RPR; i++) {
int rand_num = taosRandom() % 100;
- if (0 != winfo->superTblInfo->disorderRatio
- && rand_num < winfo->superTblInfo->disorderRatio) {
- int64_t d = winfo->lastTs - taosRandom() % winfo->superTblInfo->disorderRange;
- generateRowData(data, d, winfo->superTblInfo);
+ if (0 != pThreadInfo->superTblInfo->disorderRatio
+ && rand_num < pThreadInfo->superTblInfo->disorderRatio) {
+ int64_t d = pThreadInfo->lastTs - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1);
+ generateRowData(data, d, pThreadInfo->superTblInfo);
} else {
- generateRowData(data, winfo->lastTs += 1000, winfo->superTblInfo);
+ generateRowData(data, pThreadInfo->lastTs += 1000, pThreadInfo->superTblInfo);
}
pstr += sprintf(pstr, "%s", data);
- winfo->counter++;
+ pThreadInfo->counter++;
- if (winfo->counter >= winfo->superTblInfo->insertRows) {
+ if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) {
break;
}
}
if (insert_interval) {
- winfo->st = taosGetTimestampUs();
+ pThreadInfo->st = taosGetTimestampUs();
}
- taos_query_a(winfo->taos, buffer, callBack, winfo);
+ taos_query_a(pThreadInfo->taos, buffer, callBack, pThreadInfo);
free(buffer);
taos_free_result(res);
}
static void *asyncWrite(void *sarg) {
- threadInfo *winfo = (threadInfo *)sarg;
- SSuperTable* superTblInfo = winfo->superTblInfo;
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
- winfo->st = 0;
- winfo->et = 0;
- winfo->lastTs = winfo->start_time;
+ pThreadInfo->st = 0;
+ pThreadInfo->et = 0;
+ pThreadInfo->lastTs = pThreadInfo->start_time;
int insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
if (insert_interval) {
- winfo->st = taosGetTimestampUs();
+ pThreadInfo->st = taosGetTimestampUs();
}
- taos_query_a(winfo->taos, "show databases", callBack, winfo);
+ taos_query_a(pThreadInfo->taos, "show databases", callBack, pThreadInfo);
- tsem_wait(&(winfo->lock_sem));
+ tsem_wait(&(pThreadInfo->lock_sem));
return NULL;
}
@@ -5162,13 +5338,15 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int limit, offset;
if ((superTblInfo->childTblExists == TBL_NO_EXISTS) &&
- ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit != 0))) {
+ ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) {
printf("WARNING: offset and limit will not be used since the child tables are not exists!\n");
}
if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS)
&& (superTblInfo->childTblOffset >= 0)) {
- if (superTblInfo->childTblLimit < 0) {
+ if ((superTblInfo->childTblLimit < 0)
+ || ((superTblInfo->childTblOffset + superTblInfo->childTblLimit)
+ > (superTblInfo->childTblCount))) {
superTblInfo->childTblLimit =
superTblInfo->childTblCount - superTblInfo->childTblOffset;
}
@@ -5410,7 +5588,7 @@ static void *readTable(void *sarg) {
return NULL;
}
- while (taos_fetch_row(pSql) != NULL) {
+ while(taos_fetch_row(pSql) != NULL) {
count++;
}
@@ -5486,7 +5664,7 @@ static void *readMetric(void *sarg) {
return NULL;
}
int count = 0;
- while (taos_fetch_row(pSql) != NULL) {
+ while(taos_fetch_row(pSql) != NULL) {
count++;
}
t = getCurrentTimeUs() - t;
@@ -5597,10 +5775,10 @@ static int insertTestProcess() {
return 0;
}
-static void *superQueryProcess(void *sarg) {
- threadInfo *winfo = (threadInfo *)sarg;
+static void *specifiedTableQuery(void *sarg) {
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
- if (winfo->taos == NULL) {
+ if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
g_queryInfo.user,
@@ -5609,17 +5787,17 @@ static void *superQueryProcess(void *sarg) {
g_queryInfo.port);
if (taos == NULL) {
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
- winfo->threadID, taos_errstr(NULL));
+ pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
} else {
- winfo->taos = taos;
+ pThreadInfo->taos = taos;
}
}
char sqlStr[MAX_DB_NAME_SIZE + 5];
sprintf(sqlStr, "use %s", g_queryInfo.dbName);
- if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
- taos_close(winfo->taos);
+ if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
+ taos_close(pThreadInfo->taos);
errorPrint( "use database %s failed!\n\n",
g_queryInfo.dbName);
return NULL;
@@ -5630,48 +5808,67 @@ static void *superQueryProcess(void *sarg) {
int queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
+ int totalQueried = 0;
+ int64_t lastPrintTime = taosGetTimestampMs();
+ int64_t startTs = taosGetTimestampMs();
+
while(queryTimes --) {
if (g_queryInfo.specifiedQueryInfo.rate && (et - st) <
(int64_t)g_queryInfo.specifiedQueryInfo.rate*1000) {
taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms
- //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to);
+ //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
}
st = taosGetTimestampUs();
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
- int64_t t1 = taosGetTimestampUs();
- char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
- if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) {
- sprintf(tmpFile, "%s-%d",
- g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID);
- }
- selectAndGetResult(winfo->taos, g_queryInfo.specifiedQueryInfo.sql[i], tmpFile);
- int64_t t2 = taosGetTimestampUs();
- printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n",
- taosGetSelfPthreadId(), (t2 - t1)/1000000.0);
- } else {
- int64_t t1 = taosGetTimestampUs();
- int retCode = postProceSql(g_queryInfo.host,
- g_queryInfo.port, g_queryInfo.specifiedQueryInfo.sql[i]);
- int64_t t2 = taosGetTimestampUs();
- printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n",
- taosGetSelfPthreadId(), (t2 - t1)/1000000.0);
- if (0 != retCode) {
- printf("====restful return fail, threadID[%d]\n", winfo->threadID);
- return NULL;
- }
+ if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
+ int64_t t1 = taosGetTimestampUs();
+ char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
+ if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
+ sprintf(tmpFile, "%s-%d",
+ g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
}
+ selectAndGetResult(pThreadInfo->taos,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile);
+ int64_t t2 = taosGetTimestampUs();
+ printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n",
+ taosGetSelfPthreadId(), (t2 - t1)/1000000.0);
+ } else {
+ int64_t t1 = taosGetTimestampUs();
+ int retCode = postProceSql(g_queryInfo.host,
+ g_queryInfo.port,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]);
+ if (0 != retCode) {
+ printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
+ return NULL;
+ }
+ int64_t t2 = taosGetTimestampUs();
+ printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n",
+ taosGetSelfPthreadId(), (t2 - t1)/1000000.0);
+
}
+ totalQueried ++;
+ g_queryInfo.specifiedQueryInfo.totalQueried ++;
+
et = taosGetTimestampUs();
printf("==thread[%"PRId64"] complete all sqls to specify tables once queries duration:%.6fs\n\n",
taosGetSelfPthreadId(), (double)(et - st)/1000.0);
+
+ int64_t currentPrintTime = taosGetTimestampMs();
+ int64_t endTs = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently completed queries: %d, QPS: %10.2f\n",
+ pThreadInfo->threadID,
+ totalQueried,
+ totalQueried/((endTs-startTs)/1000.0));
+ }
+ lastPrintTime = currentPrintTime;
}
return NULL;
}
-static void replaceSubTblName(char* inSql, char* outSql, int tblIndex) {
+static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) {
char sourceString[32] = "xxxx";
char subTblName[MAX_TB_NAME_SIZE*3];
sprintf(subTblName, "%s.%s",
@@ -5693,11 +5890,11 @@ static void replaceSubTblName(char* inSql, char* outSql, int tblIndex) {
//printf("3: %s\n", outSql);
}
-static void *subQueryProcess(void *sarg) {
+static void *superTableQuery(void *sarg) {
char sqlstr[1024];
- threadInfo *winfo = (threadInfo *)sarg;
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
- if (winfo->taos == NULL) {
+ if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
g_queryInfo.user,
@@ -5706,10 +5903,10 @@ static void *subQueryProcess(void *sarg) {
g_queryInfo.port);
if (taos == NULL) {
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
- winfo->threadID, taos_errstr(NULL));
+ pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
} else {
- winfo->taos = taos;
+ pThreadInfo->taos = taos;
}
}
@@ -5717,33 +5914,49 @@ static void *subQueryProcess(void *sarg) {
int64_t et = (int64_t)g_queryInfo.superQueryInfo.rate*1000;
int queryTimes = g_queryInfo.superQueryInfo.queryTimes;
+ int totalQueried = 0;
+ int64_t startTs = taosGetTimestampMs();
+ int64_t lastPrintTime = taosGetTimestampMs();
while(queryTimes --) {
if (g_queryInfo.superQueryInfo.rate
&& (et - st) < (int64_t)g_queryInfo.superQueryInfo.rate*1000) {
taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms
- //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to);
+ //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
}
st = taosGetTimestampUs();
- for (int i = winfo->start_table_from; i <= winfo->end_table_to; i++) {
+ for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) {
for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
memset(sqlstr,0,sizeof(sqlstr));
- replaceSubTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i);
+ replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i);
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
if (g_queryInfo.superQueryInfo.result[j][0] != 0) {
sprintf(tmpFile, "%s-%d",
g_queryInfo.superQueryInfo.result[j],
- winfo->threadID);
+ pThreadInfo->threadID);
}
- selectAndGetResult(winfo->taos, sqlstr, tmpFile);
+ selectAndGetResult(pThreadInfo->taos, sqlstr, tmpFile);
+
+ totalQueried++;
+ g_queryInfo.superQueryInfo.totalQueried ++;
+
+ int64_t currentPrintTime = taosGetTimestampMs();
+ int64_t endTs = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently completed queries: %d, QPS: %10.2f\n",
+ pThreadInfo->threadID,
+ totalQueried,
+ totalQueried/((endTs-startTs)/1000.0));
+ }
+ lastPrintTime = currentPrintTime;
}
}
et = taosGetTimestampUs();
printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n",
taosGetSelfPthreadId(),
- winfo->start_table_from,
- winfo->end_table_to,
+ pThreadInfo->start_table_from,
+ pThreadInfo->end_table_to,
(double)(et - st)/1000000.0);
}
@@ -5786,43 +5999,47 @@ static int queryTestProcess() {
pthread_t *pids = NULL;
threadInfo *infos = NULL;
//==== create sub threads for query from specify table
- if (g_queryInfo.specifiedQueryInfo.sqlCount > 0
- && g_queryInfo.specifiedQueryInfo.concurrent > 0) {
+ int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent;
+ int nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount;
- pids = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t));
- if (NULL == pids) {
- taos_close(taos);
- ERROR_EXIT("memory allocation failed\n");
- }
- infos = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo));
- if (NULL == infos) {
+ int64_t startTs = taosGetTimestampMs();
+
+ if ((nSqlCount > 0) && (nConcurrent > 0)) {
+
+ pids = malloc(nConcurrent * nSqlCount * sizeof(pthread_t));
+ infos = malloc(nConcurrent * nSqlCount * sizeof(threadInfo));
+
+ if ((NULL == pids) || (NULL == infos)) {
taos_close(taos);
- free(pids);
ERROR_EXIT("memory allocation failed for create threads\n");
}
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) {
- threadInfo *t_info = infos + i;
- t_info->threadID = i;
+ for (int i = 0; i < nConcurrent; i++) {
+ for (int j = 0; j < nSqlCount; j++) {
+ threadInfo *t_info = infos + i * nSqlCount + j;
+ t_info->threadID = i * nSqlCount + j;
+ t_info->querySeq = j;
- if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
+ if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
- char sqlStr[MAX_TB_NAME_SIZE*2];
- sprintf(sqlStr, "use %s", g_queryInfo.dbName);
- verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr);
- if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) {
+ char sqlStr[MAX_TB_NAME_SIZE*2];
+ sprintf(sqlStr, "use %s", g_queryInfo.dbName);
+ verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr);
+ if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) {
taos_close(taos);
free(infos);
free(pids);
errorPrint( "use database %s failed!\n\n",
g_queryInfo.dbName);
return -1;
+ }
}
+
+ t_info->taos = NULL;// TODO: workaround to use separate taos connection;
+
+ pthread_create(pids + i * nSqlCount + j, NULL, specifiedTableQuery,
+ t_info);
}
-
- t_info->taos = NULL;// TODO: workaround to use separate taos connection;
-
- pthread_create(pids + i, NULL, superQueryProcess, t_info);
}
} else {
g_queryInfo.specifiedQueryInfo.concurrent = 0;
@@ -5836,18 +6053,12 @@ static int queryTestProcess() {
if ((g_queryInfo.superQueryInfo.sqlCount > 0)
&& (g_queryInfo.superQueryInfo.threadCnt > 0)) {
pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t));
- if (NULL == pidsOfSub) {
- free(infos);
- free(pids);
-
- ERROR_EXIT("memory allocation failed for create threads\n");
- }
-
infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo));
- if (NULL == infosOfSub) {
- free(pidsOfSub);
+
+ if ((NULL == pidsOfSub) || (NULL == infosOfSub)) {
free(infos);
free(pids);
+
ERROR_EXIT("memory allocation failed for create threads\n");
}
@@ -5875,7 +6086,7 @@ static int queryTestProcess() {
t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
startFrom = t_info->end_table_to + 1;
t_info->taos = NULL; // TODO: workaround to use separate taos connection;
- pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info);
+ pthread_create(pidsOfSub + i, NULL, superTableQuery, t_info);
}
g_queryInfo.superQueryInfo.threadCnt = threads;
@@ -5883,8 +6094,12 @@ static int queryTestProcess() {
g_queryInfo.superQueryInfo.threadCnt = 0;
}
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) {
- pthread_join(pids[i], NULL);
+ if ((nSqlCount > 0) && (nConcurrent > 0)) {
+ for (int i = 0; i < nConcurrent; i++) {
+ for (int j = 0; j < nSqlCount; j++) {
+ pthread_join(pids[i * nSqlCount + j], NULL);
+ }
+ }
}
tmfree((char*)pids);
@@ -5898,6 +6113,14 @@ static int queryTestProcess() {
tmfree((char*)infosOfSub);
// taos_close(taos);// TODO: workaround to use separate taos connection;
+ int64_t endTs = taosGetTimestampMs();
+
+ int totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
+ g_queryInfo.superQueryInfo.totalQueried;
+
+ printf("==== completed total queries: %d, the QPS of all threads: %10.2f====\n",
+ totalQueried,
+ totalQueried/((endTs-startTs)/1000.0));
return 0;
}
@@ -5915,7 +6138,7 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c
static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) {
TAOS_SUB* tsub = NULL;
- if (g_queryInfo.specifiedQueryInfo.subscribeMode) {
+ if (g_queryInfo.specifiedQueryInfo.mode) {
tsub = taos_subscribe(taos,
g_queryInfo.specifiedQueryInfo.subscribeRestart,
topic, sql, subscribe_callback, (void*)resultFileName,
@@ -5934,12 +6157,12 @@ static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultF
return tsub;
}
-static void *subSubscribeProcess(void *sarg) {
- threadInfo *winfo = (threadInfo *)sarg;
+static void *superSubscribe(void *sarg) {
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
char subSqlstr[1024];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
- if (winfo->taos == NULL) {
+ if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
g_queryInfo.user,
@@ -5948,17 +6171,17 @@ static void *subSubscribeProcess(void *sarg) {
g_queryInfo.port);
if (taos == NULL) {
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
- winfo->threadID, taos_errstr(NULL));
+ pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
} else {
- winfo->taos = taos;
+ pThreadInfo->taos = taos;
}
}
char sqlStr[MAX_TB_NAME_SIZE*2];
sprintf(sqlStr, "use %s", g_queryInfo.dbName);
- if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
- taos_close(winfo->taos);
+ if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
+ taos_close(pThreadInfo->taos);
errorPrint( "use database %s failed!\n\n",
g_queryInfo.dbName);
return NULL;
@@ -5969,7 +6192,7 @@ static void *subSubscribeProcess(void *sarg) {
do {
//if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < g_queryInfo.specifiedQueryInfo.rate*1000) {
// taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms
- // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to);
+ // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
//}
//st = taosGetTimestampMs();
@@ -5977,27 +6200,27 @@ static void *subSubscribeProcess(void *sarg) {
for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
sprintf(topic, "taosdemo-subscribe-%d", i);
memset(subSqlstr,0,sizeof(subSqlstr));
- replaceSubTblName(g_queryInfo.superQueryInfo.sql[i], subSqlstr, i);
+ replaceChildTblName(g_queryInfo.superQueryInfo.sql[i], subSqlstr, i);
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d",
- g_queryInfo.superQueryInfo.result[i], winfo->threadID);
+ g_queryInfo.superQueryInfo.result[i], pThreadInfo->threadID);
}
- tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile);
+ tsub[i] = subscribeImpl(pThreadInfo->taos, subSqlstr, topic, tmpFile);
if (NULL == tsub[i]) {
- taos_close(winfo->taos);
+ taos_close(pThreadInfo->taos);
return NULL;
}
}
//et = taosGetTimestampMs();
//printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
- } while (0);
+ } while(0);
// start loop to consume result
TAOS_RES* res = NULL;
- while (1) {
+ while(1) {
for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- if (1 == g_queryInfo.superQueryInfo.subscribeMode) {
+ if (1 == g_queryInfo.superQueryInfo.mode) {
continue;
}
@@ -6007,7 +6230,7 @@ static void *subSubscribeProcess(void *sarg) {
if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d",
g_queryInfo.superQueryInfo.result[i],
- winfo->threadID);
+ pThreadInfo->threadID);
}
getResult(res, tmpFile);
}
@@ -6019,15 +6242,15 @@ static void *subSubscribeProcess(void *sarg) {
taos_unsubscribe(tsub[i], g_queryInfo.superQueryInfo.subscribeKeepProgress);
}
- taos_close(winfo->taos);
+ taos_close(pThreadInfo->taos);
return NULL;
}
-static void *superSubscribeProcess(void *sarg) {
- threadInfo *winfo = (threadInfo *)sarg;
+static void *specifiedSubscribe(void *sarg) {
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
- if (winfo->taos == NULL) {
+ if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
g_queryInfo.user,
@@ -6036,18 +6259,18 @@ static void *superSubscribeProcess(void *sarg) {
g_queryInfo.port);
if (taos == NULL) {
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
- winfo->threadID, taos_errstr(NULL));
+ pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
} else {
- winfo->taos = taos;
+ pThreadInfo->taos = taos;
}
}
char sqlStr[MAX_TB_NAME_SIZE*2];
sprintf(sqlStr, "use %s", g_queryInfo.dbName);
debugPrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr);
- if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
- taos_close(winfo->taos);
+ if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
+ taos_close(pThreadInfo->taos);
return NULL;
}
@@ -6056,7 +6279,7 @@ static void *superSubscribeProcess(void *sarg) {
do {
//if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < g_queryInfo.specifiedQueryInfo.rate*1000) {
// taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms
- // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to);
+ // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
//}
//st = taosGetTimestampMs();
@@ -6066,23 +6289,24 @@ static void *superSubscribeProcess(void *sarg) {
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d",
- g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID);
+ g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID);
}
- tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile);
+ tsub[i] = subscribeImpl(pThreadInfo->taos,
+ g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile);
if (NULL == g_queryInfo.specifiedQueryInfo.tsub[i]) {
- taos_close(winfo->taos);
+ taos_close(pThreadInfo->taos);
return NULL;
}
}
//et = taosGetTimestampMs();
//printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
- } while (0);
+ } while(0);
// start loop to consume result
TAOS_RES* res = NULL;
- while (1) {
+ while(1) {
for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- if (1 == g_queryInfo.specifiedQueryInfo.subscribeMode) {
+ if (SYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) {
continue;
}
@@ -6091,7 +6315,7 @@ static void *superSubscribeProcess(void *sarg) {
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d",
- g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID);
+ g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID);
}
getResult(res, tmpFile);
}
@@ -6100,10 +6324,11 @@ static void *superSubscribeProcess(void *sarg) {
taos_free_result(res);
for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- taos_unsubscribe(tsub[i], g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
+ taos_unsubscribe(tsub[i],
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
}
- taos_close(winfo->taos);
+ taos_close(pThreadInfo->taos);
return NULL;
}
@@ -6161,7 +6386,7 @@ static int subscribeTestProcess() {
threadInfo *t_info = infos + i;
t_info->threadID = i;
t_info->taos = NULL; // TODO: workaround to use separate taos connection;
- pthread_create(pids + i, NULL, superSubscribeProcess, t_info);
+ pthread_create(pids + i, NULL, specifiedSubscribe, t_info);
}
//==== create sub threads for query from sub table
@@ -6204,7 +6429,7 @@ static int subscribeTestProcess() {
t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
startFrom = t_info->end_table_to + 1;
t_info->taos = NULL; // TODO: workaround to use separate taos connection;
- pthread_create(pidsOfSub + i, NULL, subSubscribeProcess, t_info);
+ pthread_create(pidsOfSub + i, NULL, superSubscribe, t_info);
}
g_queryInfo.superQueryInfo.threadCnt = threads;
@@ -6303,7 +6528,7 @@ static void setParaFromArg(){
g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
g_Dbs.threadCount = g_args.num_of_threads;
g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
- g_Dbs.queryMode = g_args.mode;
+ g_Dbs.queryMode = g_args.query_mode;
g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL;
g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS;
@@ -6405,7 +6630,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
double t = getCurrentTimeUs();
- while ((read_len = tgetline(&line, &line_len, fp)) != -1) {
+ while((read_len = tgetline(&line, &line_len, fp)) != -1) {
if (read_len >= MAX_SQL_SIZE) continue;
line[--read_len] = '\0';
@@ -6468,52 +6693,50 @@ static void testMetaFile() {
}
static void queryResult() {
- // select
- if (false == g_Dbs.insert_only) {
- // query data
+ // query data
- pthread_t read_id;
- threadInfo *rInfo = malloc(sizeof(threadInfo));
- rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
- rInfo->start_table_from = 0;
+ pthread_t read_id;
+ threadInfo *rInfo = malloc(sizeof(threadInfo));
+ assert(rInfo);
+ rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
+ rInfo->start_table_from = 0;
- //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
- if (g_args.use_metric) {
- rInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount;
- rInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
- rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
- tstrncpy(rInfo->tb_prefix,
- g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE);
- } else {
- rInfo->ntables = g_args.num_of_tables;
- rInfo->end_table_to = g_args.num_of_tables -1;
- tstrncpy(rInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
- }
+ //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
+ if (g_args.use_metric) {
+ rInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount;
+ rInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
+ rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
+ tstrncpy(rInfo->tb_prefix,
+ g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE);
+ } else {
+ rInfo->ntables = g_args.num_of_tables;
+ rInfo->end_table_to = g_args.num_of_tables -1;
+ tstrncpy(rInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
+ }
- rInfo->taos = taos_connect(
- g_Dbs.host,
- g_Dbs.user,
- g_Dbs.password,
- g_Dbs.db[0].dbName,
- g_Dbs.port);
- if (rInfo->taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n",
- taos_errstr(NULL));
- free(rInfo);
- exit(-1);
- }
+ rInfo->taos = taos_connect(
+ g_Dbs.host,
+ g_Dbs.user,
+ g_Dbs.password,
+ g_Dbs.db[0].dbName,
+ g_Dbs.port);
+ if (rInfo->taos == NULL) {
+ errorPrint( "Failed to connect to TDengine, reason:%s\n",
+ taos_errstr(NULL));
+ free(rInfo);
+ exit(-1);
+ }
- tstrncpy(rInfo->fp, g_Dbs.resultFile, MAX_FILE_NAME_LEN);
+ tstrncpy(rInfo->fp, g_Dbs.resultFile, MAX_FILE_NAME_LEN);
- if (!g_Dbs.use_metric) {
- pthread_create(&read_id, NULL, readTable, rInfo);
- } else {
- pthread_create(&read_id, NULL, readMetric, rInfo);
- }
- pthread_join(read_id, NULL);
- taos_close(rInfo->taos);
- free(rInfo);
- }
+ if (!g_Dbs.use_metric) {
+ pthread_create(&read_id, NULL, readTable, rInfo);
+ } else {
+ pthread_create(&read_id, NULL, readMetric, rInfo);
+ }
+ pthread_join(read_id, NULL);
+ taos_close(rInfo->taos);
+ free(rInfo);
}
static void testCmdLine() {
@@ -6531,9 +6754,7 @@ static void testCmdLine() {
g_args.test_mode = INSERT_TEST;
insertTestProcess();
- if (g_Dbs.insert_only)
- return;
- else
+ if (false == g_Dbs.insert_only)
queryResult();
}
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index 9f176904fe..092374cef1 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -484,24 +484,33 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-E") == 0) {
if (argv[i+1]) {
- char *tmp = argv[++i];
- int64_t tmpEpoch;
- if (strchr(tmp, ':') && strchr(tmp, '-')) {
- if (TSDB_CODE_SUCCESS != taosParseTime(
- tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) {
- fprintf(stderr, "Input end time error!\n");
- return;
- }
- } else {
- tmpEpoch = atoll(tmp);
- }
+ char *tmp = strdup(argv[++i]);
- sprintf(argv[i], "%"PRId64"", tmpEpoch);
- debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n",
- __func__, __LINE__, tmp, i, argv[i]);
+ if (tmp) {
+ int64_t tmpEpoch;
+ if (strchr(tmp, ':') && strchr(tmp, '-')) {
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) {
+ fprintf(stderr, "Input end time error!\n");
+ free(tmp);
+ return;
+ }
+ } else {
+ tmpEpoch = atoll(tmp);
+ }
+
+ sprintf(argv[i], "%"PRId64"", tmpEpoch);
+ debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n",
+ __func__, __LINE__, tmp, i, argv[i]);
+
+ free(tmp);
+ } else {
+ errorPrint("%s() LN%d, strdup() cannot allocate memory\n", __func__, __LINE__);
+ exit(-1);
+ }
} else {
- fprintf(stderr, "Input end time error!\n");
- return;
+ errorPrint("%s() LN%d, -E need a valid value following!\n", __func__, __LINE__);
+ exit(-1);
}
} else if (strcmp(argv[i], "-g") == 0) {
arguments->debug_print = true;
diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c
index 80473ba5ae..85d9f94b88 100644
--- a/src/mnode/src/mnodeDnode.c
+++ b/src/mnode/src/mnodeDnode.c
@@ -437,14 +437,14 @@ static int32_t mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) {
return TAOS_DN_OFF_TIME_ZONE_NOT_MATCH;
}
- if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) {
- mError("\"locale\"[%s - %s] cfg parameters inconsistent", clusterCfg->locale, tsLocale);
- return TAOS_DN_OFF_LOCALE_NOT_MATCH;
- }
- if (0 != strncasecmp(clusterCfg->charset, tsCharset, strlen(tsCharset))) {
- mError("\"charset\"[%s - %s] cfg parameters inconsistent.", clusterCfg->charset, tsCharset);
- return TAOS_DN_OFF_CHARSET_NOT_MATCH;
- }
+ // if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) {
+ // mError("\"locale\"[%s - %s] cfg parameters inconsistent", clusterCfg->locale, tsLocale);
+ // return TAOS_DN_OFF_LOCALE_NOT_MATCH;
+ // }
+ // if (0 != strncasecmp(clusterCfg->charset, tsCharset, strlen(tsCharset))) {
+ // mError("\"charset\"[%s - %s] cfg parameters inconsistent.", clusterCfg->charset, tsCharset);
+ // return TAOS_DN_OFF_CHARSET_NOT_MATCH;
+ // }
if (clusterCfg->enableBalance != tsEnableBalance) {
mError("\"balance\"[%d - %d] cfg parameters inconsistent", clusterCfg->enableBalance, tsEnableBalance);
diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c
index 381cb11952..505d3c519c 100644
--- a/src/mnode/src/mnodeSdb.c
+++ b/src/mnode/src/mnodeSdb.c
@@ -315,6 +315,10 @@ void sdbUpdateAsync() {
taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr);
}
+static int node_cmp(const void *l, const void *r) {
+ return ((SNodeInfo *)l)->nodeId - ((SNodeInfo *)r)->nodeId;
+}
+
int32_t sdbUpdateSync(void *pMnodes) {
SMInfos *pMinfos = pMnodes;
if (!mnodeIsRunning()) {
@@ -382,6 +386,8 @@ int32_t sdbUpdateSync(void *pMnodes) {
return TSDB_CODE_SUCCESS;
}
+ qsort(syncCfg.nodeInfo, syncCfg.replica, sizeof(syncCfg.nodeInfo[0]), node_cmp);
+
sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica);
for (int32_t i = 0; i < syncCfg.replica; ++i) {
sdbInfo("vgId:1, mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn,
@@ -1019,7 +1025,7 @@ static int32_t sdbWriteToQueue(SSdbRow *pRow, int32_t qtype) {
int32_t queued = atomic_add_fetch_32(&tsSdbMgmt.queuedMsg, 1);
if (queued > MAX_QUEUED_MSG_NUM) {
- sdbDebug("vgId:1, too many msg:%d in sdb queue, flow control", queued);
+ sdbInfo("vgId:1, too many msg:%d in sdb queue, flow control", queued);
taosMsleep(1);
}
@@ -1131,4 +1137,4 @@ static void *sdbWorkerFp(void *pWorker) {
int32_t sdbGetReplicaNum() {
return tsSdbMgmt.cfg.replica;
-}
\ No newline at end of file
+}
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index 39eca8819d..2a8e941fcb 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -1037,6 +1037,19 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
SCreateTableMsg* pCreate = (SCreateTableMsg*)((char*)pCreate1 + sizeof(SCMCreateTableMsg));
+ int16_t numOfTags = htons(pCreate->numOfTags);
+ if (numOfTags > TSDB_MAX_TAGS) {
+ mError("msg:%p, app:%p table:%s, failed to create, too many tags", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
+ return TSDB_CODE_MND_TOO_MANY_TAGS;
+ }
+
+ int16_t numOfColumns = htons(pCreate->numOfColumns);
+ int32_t numOfCols = numOfColumns + numOfTags;
+ if (numOfCols > TSDB_MAX_COLUMNS) {
+ mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
+ return TSDB_CODE_MND_TOO_MANY_COLUMNS;
+ }
+
SSTableObj * pStable = calloc(1, sizeof(SSTableObj));
if (pStable == NULL) {
mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
@@ -1050,10 +1063,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
pStable->sversion = 0;
pStable->tversion = 0;
- pStable->numOfColumns = htons(pCreate->numOfColumns);
- pStable->numOfTags = htons(pCreate->numOfTags);
+ pStable->numOfColumns = numOfColumns;
+ pStable->numOfTags = numOfTags;
- int32_t numOfCols = pStable->numOfColumns + pStable->numOfTags;
int32_t schemaSize = numOfCols * sizeof(SSchema);
pStable->schema = (SSchema *)calloc(1, schemaSize);
if (pStable->schema == NULL) {
@@ -1064,11 +1076,6 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema));
- if (pStable->numOfColumns > TSDB_MAX_COLUMNS || pStable->numOfTags > TSDB_MAX_TAGS) {
- mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
- return TSDB_CODE_MND_INVALID_TABLE_NAME;
- }
-
pStable->nextColId = 0;
for (int32_t col = 0; col < numOfCols; col++) {
@@ -1340,6 +1347,11 @@ static int32_t mnodeAddSuperTableColumn(SMnodeMsg *pMsg, SSchema schema[], int32
return TSDB_CODE_MND_APP_ERROR;
}
+ if (pStable->numOfColumns + ncols + pStable->numOfTags > TSDB_MAX_COLUMNS) {
+ mError("msg:%p, app:%p stable:%s, add column, too many columns", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId);
+ return TSDB_CODE_MND_TOO_MANY_COLUMNS;
+ }
+
for (int32_t i = 0; i < ncols; i++) {
if (mnodeFindSuperTableColumnIndex(pStable, schema[i].name) > 0) {
mError("msg:%p, app:%p stable:%s, add column, column:%s already exist", pMsg, pMsg->rpcMsg.ahandle,
diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c
index 7eb3122d83..7222c8d1a0 100644
--- a/src/mnode/src/mnodeVgroup.c
+++ b/src/mnode/src/mnodeVgroup.c
@@ -994,6 +994,7 @@ void mnodeSendSyncVgroupMsg(SVgObj *pVgroup) {
mDebug("vgId:%d, send sync all vnodes msg, numOfVnodes:%d db:%s", pVgroup->vgId, pVgroup->numOfVnodes,
pVgroup->dbName);
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
+ if (pVgroup->vnodeGid[i].role != TAOS_SYNC_ROLE_SLAVE) continue;
SRpcEpSet epSet = mnodeGetEpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp);
mDebug("vgId:%d, index:%d, send sync vnode msg to dnode %s", pVgroup->vgId, i,
pVgroup->vnodeGid[i].pDnode->dnodeEp);
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index a02df1a10e..ef1b7a7602 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -191,6 +191,8 @@ typedef struct SQuery {
bool stabledev; // super table stddev query
int32_t interBufSize; // intermediate buffer sizse
+ int32_t havingNum; // having expr number
+
SOrderVal order;
int16_t numOfCols;
int16_t numOfTags;
@@ -287,6 +289,7 @@ enum OPERATOR_TYPE_E {
OP_Fill = 13,
OP_MultiTableAggregate = 14,
OP_MultiTableTimeInterval = 15,
+ OP_Having = 16,
};
typedef struct SOperatorInfo {
@@ -405,6 +408,11 @@ typedef struct SOffsetOperatorInfo {
int64_t offset;
} SOffsetOperatorInfo;
+typedef struct SHavingOperatorInfo {
+ SArray* fp;
+} SHavingOperatorInfo;
+
+
typedef struct SFillOperatorInfo {
SFillInfo *pFillInfo;
SSDataBlock *pRes;
diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h
index 313e656cbd..a360b0218a 100644
--- a/src/query/inc/qSqlparser.h
+++ b/src/query/inc/qSqlparser.h
@@ -98,6 +98,7 @@ typedef struct SQuerySqlNode {
SLimitVal limit; // limit offset [optional]
SLimitVal slimit; // group limit offset [optional]
SStrToken sqlstr; // sql string in select clause
+ struct tSqlExpr *pHaving; // having clause [optional]
} SQuerySqlNode;
typedef struct STableNamePair {
@@ -262,6 +263,11 @@ SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder);
SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index);
SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder);
+tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType);
+
+int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right);
+
+tSqlExpr *tSqlExprClone(tSqlExpr *pSrc);
SFromInfo *setTableNameList(SFromInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias);
SFromInfo *setSubquery(SFromInfo* pFromInfo, SQuerySqlNode *pSqlNode);
void *destroyFromInfo(SFromInfo* pFromInfo);
@@ -281,7 +287,7 @@ void tSqlExprListDestroy(SArray *pList);
SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps,
- SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit);
+ SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving);
SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SQuerySqlNode *pSelect, int32_t type);
diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h
index 87f2458658..b9d52da39b 100644
--- a/src/query/inc/qUtil.h
+++ b/src/query/inc/qUtil.h
@@ -52,11 +52,20 @@ static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int
return pResultRowInfo->pResult[slot];
}
-static FORCE_INLINE char *getPosInResultPage(SQuery *pQuery, tFilePage* page, int32_t rowOffset, int16_t offset) {
- assert(rowOffset >= 0 && pQuery != NULL);
+static FORCE_INLINE char* getPosInResultPage(SQueryRuntimeEnv* pRuntimeEnv, tFilePage* page, int32_t rowOffset,
+ int16_t offset, int32_t size) {
+ assert(rowOffset >= 0 && pRuntimeEnv != NULL);
+
+ SQuery* pQuery = pRuntimeEnv->pQuery;
+ int64_t pageSize = pRuntimeEnv->pResultBuf->pageSize;
int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery);
- return ((char *)page->data) + rowOffset + offset * numOfRows;
+
+ // buffer overflow check
+ int64_t bufEnd = (rowOffset + offset * numOfRows + size);
+ assert(page->num <= pageSize && bufEnd <= page->num);
+
+ return ((char*)page->data) + rowOffset + offset * numOfRows;
}
bool isNullOperator(SColumnFilterElem *pFilter, const char* minval, const char* maxval, int16_t type);
diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y
index d39c938854..b31597bdda 100644
--- a/src/query/inc/sql.y
+++ b/src/query/inc/sql.y
@@ -457,7 +457,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
%type select {SQuerySqlNode*}
%destructor select {destroyQuerySqlNode($$);}
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
- A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G);
+ A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G, N);
}
select(A) ::= LP select(B) RP. {A = B;}
@@ -475,7 +475,7 @@ cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
// select client_version()
// select server_state()
select(A) ::= SELECT(T) selcollist(W). {
- A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
}
// selcollist is a list of expressions that are to become the return
diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c
index e32ef2479f..29f15f7acc 100644
--- a/src/query/src/qAggMain.c
+++ b/src/query/src/qAggMain.c
@@ -2773,14 +2773,16 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
+ pInfo->stage += 1;
+
// all data are null, set it completed
if (pInfo->numOfElems == 0) {
pResInfo->complete = true;
+
+ return;
} else {
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
}
-
- pInfo->stage += 1;
}
// the first stage, only acquire the min/max value
@@ -2859,14 +2861,16 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo);
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
+ pInfo->stage += 1;
+
// all data are null, set it completed
if (pInfo->numOfElems == 0) {
pResInfo->complete = true;
+
+ return;
} else {
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
}
-
- pInfo->stage += 1;
}
if (pInfo->stage == 0) {
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index e11459dede..65b19be0cd 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -98,6 +98,30 @@ int32_t getMaximumIdleDurationSec() {
return tsShellActivityTimer * 2;
}
+
+int64_t genQueryId(void) {
+ int64_t uid = 0;
+ int64_t did = tsDnodeId;
+
+ uid = did << 54;
+
+ int64_t pid = ((int64_t)taosGetPId()) & 0x3FF;
+
+ uid |= pid << 44;
+
+ int64_t ts = taosGetTimestampMs() & 0x1FFFFFFFF;
+
+ uid |= ts << 11;
+
+ int64_t sid = atomic_add_fetch_64(&queryHandleId, 1) & 0x7FF;
+
+ uid |= sid;
+
+ return uid;
+}
+
+
+
static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) {
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
if (pQuery->interval.intervalUnit != 'n' && pQuery->interval.intervalUnit != 'y') {
@@ -175,6 +199,7 @@ static SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntime
static SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
static SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput);
static SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
+static SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
static void destroySFillOperatorInfo(void* param, int32_t numOfOutput);
@@ -1863,6 +1888,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
}
+ if (pQuery->havingNum > 0) {
+ pRuntimeEnv->proot = createHavingOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQuery->pExpr1, pQuery->numOfOutput);
+ }
+
if (pQuery->limit.offset > 0) {
pRuntimeEnv->proot = createOffsetOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot);
}
@@ -1893,6 +1922,17 @@ static void doFreeQueryHandle(SQueryRuntimeEnv* pRuntimeEnv) {
assert(pMemRef->ref == 0 && pMemRef->snapshot.imem == NULL && pMemRef->snapshot.mem == NULL);
}
+static void destroyTsComp(SQueryRuntimeEnv *pRuntimeEnv, SQuery *pQuery) {
+ if (isTsCompQuery(pQuery)) {
+ SColumnInfoData* pColInfoData = taosArrayGet(pRuntimeEnv->outputBuf->pDataBlock, 0);
+ FILE *f = *(FILE **)pColInfoData->pData; // TODO refactor
+ if (f) {
+ fclose(f);
+ *(FILE **)pColInfoData->pData = NULL;
+ }
+ }
+}
+
static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery;
SQInfo* pQInfo = (SQInfo*) pRuntimeEnv->qinfo;
@@ -1913,6 +1953,8 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
destroyResultBuf(pRuntimeEnv->pResultBuf);
doFreeQueryHandle(pRuntimeEnv);
+ destroyTsComp(pRuntimeEnv, pQuery);
+
pRuntimeEnv->pTsBuf = tsBufDestroy(pRuntimeEnv->pTsBuf);
tfree(pRuntimeEnv->keyBuf);
@@ -2160,6 +2202,40 @@ static bool onlyFirstQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSD
static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); }
+static int32_t updateBlockLoadStatus(SQuery *pQuery, int32_t status) {
+ bool hasFirstLastFunc = false;
+ bool hasOtherFunc = false;
+
+ if (status == BLK_DATA_ALL_NEEDED || status == BLK_DATA_DISCARD) {
+ return status;
+ }
+
+ for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
+ int32_t functionId = pQuery->pExpr1[i].base.functionId;
+
+ if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG ||
+ functionId == TSDB_FUNC_TAG_DUMMY) {
+ continue;
+ }
+
+ if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) {
+ hasFirstLastFunc = true;
+ } else {
+ hasOtherFunc = true;
+ }
+ }
+
+ if (hasFirstLastFunc && status == BLK_DATA_NO_NEEDED) {
+ if(!hasOtherFunc) {
+ return BLK_DATA_DISCARD;
+ } else{
+ return BLK_DATA_ALL_NEEDED;
+ }
+ }
+
+ return status;
+}
+
static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) {
SQuery* pQuery = &pQInfo->query;
size_t t = taosArrayGetSize(pQuery->tableGroupInfo.pGroupList);
@@ -2606,11 +2682,12 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
pBlock->pDataBlock = NULL;
pBlock->pBlockStatis = NULL;
+ SQInfo* pQInfo = pRuntimeEnv->qinfo;
SQuery* pQuery = pRuntimeEnv->pQuery;
+
int64_t groupId = pQuery->current->groupIndex;
bool ascQuery = QUERY_IS_ASC_QUERY(pQuery);
- SQInfo* pQInfo = pRuntimeEnv->qinfo;
SQueryCostInfo* pCost = &pQInfo->summary;
if (pRuntimeEnv->pTsBuf != NULL) {
@@ -2667,7 +2744,9 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
}
SDataBlockInfo* pBlockInfo = &pBlock->info;
- if ((*status) == BLK_DATA_NO_NEEDED) {
+ *status = updateBlockLoadStatus(pRuntimeEnv->pQuery, *status);
+
+ if ((*status) == BLK_DATA_NO_NEEDED || (*status) == BLK_DATA_DISCARD) {
qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey,
pBlockInfo->window.ekey, pBlockInfo->rows);
pCost->discardBlocks += 1;
@@ -3334,7 +3413,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe
continue;
}
- pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQuery, bufPage, pResult->offset, offset);
+ pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, bufPage, pResult->offset, offset, pCtx[i].outputBytes);
offset += pCtx[i].outputBytes;
int32_t functionId = pCtx[i].functionId;
@@ -3396,7 +3475,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF
int16_t offset = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
- pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQuery, page, pResult->offset, offset);
+ pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, page, pResult->offset, offset, pCtx[i].outputBytes);
offset += pCtx[i].outputBytes;
int32_t functionId = pCtx[i].functionId;
@@ -3604,8 +3683,6 @@ void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) {
*/
static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock) {
- SQuery *pQuery = pRuntimeEnv->pQuery;
-
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
int32_t numOfResult = pBlock->info.rows; // there are already exists result rows
@@ -3640,7 +3717,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo*
int32_t bytes = pColInfoData->info.bytes;
char *out = pColInfoData->pData + numOfResult * bytes;
- char *in = getPosInResultPage(pQuery, page, pRow->offset, offset);
+ char *in = getPosInResultPage(pRuntimeEnv, page, pRow->offset, offset, bytes);
memcpy(out, in, bytes * numOfRowsToCopy);
offset += bytes;
@@ -4110,7 +4187,7 @@ static SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, in
return pFillCol;
}
-int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *tsdb, int32_t vgId, bool isSTableQuery) {
+int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
@@ -4121,8 +4198,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts
pQuery->timeWindowInterpo = timeWindowInterpoRequired(pQuery);
pQuery->stabledev = isStabledev(pQuery);
- pRuntimeEnv->prevResult = prevResult;
-
setScanLimitationByResultBuffer(pQuery);
int32_t code = setupQueryHandle(tsdb, pQInfo, isSTableQuery);
@@ -4764,6 +4839,111 @@ static SSDataBlock* doOffset(void* param) {
}
}
+
+bool doFilterData(SColumnInfoData* p, int32_t rid, SColumnFilterElem *filterElem, __filter_func_t fp) {
+ char* input = p->pData + p->info.bytes * rid;
+ bool isnull = isNull(input, p->info.type);
+ if (isnull) {
+ return (fp == isNullOperator) ? true : false;
+ } else {
+ if (fp == notNullOperator) {
+ return true;
+ } else if (fp == isNullOperator) {
+ return false;
+ }
+ }
+
+ if (fp(filterElem, input, input, p->info.type)) {
+ return true;
+ }
+
+ return false;
+}
+
+
+void doHavingImpl(SOperatorInfo *pOperator, SSDataBlock *pBlock) {
+ SHavingOperatorInfo* pInfo = pOperator->info;
+ int32_t f = 0;
+ int32_t allQualified = 1;
+ int32_t exprQualified = 0;
+
+ for (int32_t r = 0; r < pBlock->info.rows; ++r) {
+ allQualified = 1;
+
+ for (int32_t i = 0; i < pOperator->numOfOutput; ++i) {
+ SExprInfo* pExprInfo = &(pOperator->pExpr[i]);
+ if (pExprInfo->pFilter == NULL) {
+ continue;
+ }
+
+ SArray* es = taosArrayGetP(pInfo->fp, i);
+ assert(es);
+
+ size_t fpNum = taosArrayGetSize(es);
+
+ exprQualified = 0;
+ for (int32_t m = 0; m < fpNum; ++m) {
+ __filter_func_t fp = taosArrayGetP(es, m);
+
+ assert(fp);
+
+ //SColIndex* colIdx = &pExprInfo->base.colInfo;
+ SColumnInfoData* p = taosArrayGet(pBlock->pDataBlock, i);
+
+ SColumnFilterElem filterElem = {.filterInfo = pExprInfo->pFilter[m]};
+
+ if (doFilterData(p, r, &filterElem, fp)) {
+ exprQualified = 1;
+ break;
+ }
+ }
+
+ if (exprQualified == 0) {
+ allQualified = 0;
+ break;
+ }
+ }
+
+ if (allQualified == 0) {
+ continue;
+ }
+
+ for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
+ SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
+
+ int16_t bytes = pColInfoData->info.bytes;
+ memmove(pColInfoData->pData + f * bytes, pColInfoData->pData + bytes * r, bytes);
+ }
+
+ ++f;
+ }
+
+ pBlock->info.rows = f;
+}
+
+static SSDataBlock* doHaving(void* param) {
+ SOperatorInfo *pOperator = (SOperatorInfo *)param;
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
+
+ while (1) {
+ SSDataBlock *pBlock = pOperator->upstream->exec(pOperator->upstream);
+ if (pBlock == NULL) {
+ setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
+ pOperator->status = OP_EXEC_DONE;
+ return NULL;
+ }
+
+ doHavingImpl(pOperator, pBlock);
+
+ return pBlock;
+ }
+}
+
+
static SSDataBlock* doIntervalAgg(void* param) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
@@ -5114,6 +5294,13 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
pInfo->pRes = destroyOutputBuf(pInfo->pRes);
}
+static void destroyHavingOperatorInfo(void* param, int32_t numOfOutput) {
+ SHavingOperatorInfo* pInfo = (SHavingOperatorInfo*) param;
+ if (pInfo->fp) {
+ taosArrayDestroy(pInfo->fp);
+ }
+}
+
SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo));
@@ -5170,6 +5357,83 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI
return pOperator;
}
+
+int32_t initFilterFp(SExprInfo* pExpr, int32_t numOfOutput, SArray** fps) {
+ __filter_func_t fp = NULL;
+
+ *fps = taosArrayInit(numOfOutput, sizeof(SArray*));
+ if (*fps == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ for (int32_t i = 0; i < numOfOutput; ++i) {
+ SExprInfo* pExprInfo = &(pExpr[i]);
+ SColIndex* colIdx = &pExprInfo->base.colInfo;
+
+ if (pExprInfo->pFilter == NULL || !TSDB_COL_IS_NORMAL_COL(colIdx->flag)) {
+ taosArrayPush(*fps, &fp);
+
+ continue;
+ }
+
+ int32_t filterNum = pExprInfo->base.filterNum;
+ SColumnFilterInfo *filterInfo = pExprInfo->pFilter;
+
+ SArray* es = taosArrayInit(filterNum, sizeof(__filter_func_t));
+
+ for (int32_t j = 0; j < filterNum; ++j) {
+ int32_t lower = filterInfo->lowerRelOptr;
+ int32_t upper = filterInfo->upperRelOptr;
+ if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) {
+ qError("invalid rel optr");
+ taosArrayDestroy(es);
+ return TSDB_CODE_QRY_APP_ERROR;
+ }
+
+ __filter_func_t ffp = getFilterOperator(lower, upper);
+ if (ffp == NULL) {
+ qError("invalid filter info");
+ taosArrayDestroy(es);
+ return TSDB_CODE_QRY_APP_ERROR;
+ }
+
+ taosArrayPush(es, &ffp);
+
+ filterInfo += 1;
+ }
+
+ taosArrayPush(*fps, &es);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
+ SHavingOperatorInfo* pInfo = calloc(1, sizeof(SHavingOperatorInfo));
+
+ initFilterFp(pExpr, numOfOutput, &pInfo->fp);
+
+ assert(pInfo->fp);
+
+ SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
+
+ pOperator->name = "HavingOperator";
+ pOperator->operatorType = OP_Having;
+ pOperator->blockingOptr = false;
+ pOperator->status = OP_IN_EXECUTING;
+ pOperator->numOfOutput = numOfOutput;
+ pOperator->pExpr = pExpr;
+ pOperator->upstream = upstream;
+ pOperator->exec = doHaving;
+ pOperator->info = pInfo;
+ pOperator->pRuntimeEnv = pRuntimeEnv;
+ pOperator->cleanup = destroyHavingOperatorInfo;
+
+ return pOperator;
+}
+
+
+
SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream) {
SLimitOperatorInfo* pInfo = calloc(1, sizeof(SLimitOperatorInfo));
pInfo->limit = pRuntimeEnv->pQuery->limit.limit;
@@ -5744,9 +6008,35 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
pExprMsg->functionId = htons(pExprMsg->functionId);
pExprMsg->numOfParams = htons(pExprMsg->numOfParams);
pExprMsg->resColId = htons(pExprMsg->resColId);
+ pExprMsg->filterNum = htonl(pExprMsg->filterNum);
pMsg += sizeof(SSqlFuncMsg);
+ SColumnFilterInfo* pExprFilterInfo = pExprMsg->filterInfo;
+
+ pMsg += sizeof(SColumnFilterInfo) * pExprMsg->filterNum;
+
+ for (int32_t f = 0; f < pExprMsg->filterNum; ++f) {
+ SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)pExprFilterInfo;
+
+ pFilterMsg->filterstr = htons(pFilterMsg->filterstr);
+
+ if (pFilterMsg->filterstr) {
+ pFilterMsg->len = htobe64(pFilterMsg->len);
+
+ pFilterMsg->pz = (int64_t)pMsg;
+ pMsg += (pFilterMsg->len + 1);
+ } else {
+ pFilterMsg->lowerBndi = htobe64(pFilterMsg->lowerBndi);
+ pFilterMsg->upperBndi = htobe64(pFilterMsg->upperBndi);
+ }
+
+ pFilterMsg->lowerRelOptr = htons(pFilterMsg->lowerRelOptr);
+ pFilterMsg->upperRelOptr = htons(pFilterMsg->upperRelOptr);
+
+ pExprFilterInfo++;
+ }
+
for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) {
pExprMsg->arg[j].argType = htons(pExprMsg->arg[j].argType);
pExprMsg->arg[j].argBytes = htons(pExprMsg->arg[j].argBytes);
@@ -5955,6 +6245,42 @@ _cleanup:
return code;
}
+int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) {
+ if (filterNum <= 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ *dst = calloc(filterNum, sizeof(*src));
+ if (*dst == NULL) {
+ return TSDB_CODE_QRY_OUT_OF_MEMORY;
+ }
+
+ memcpy(*dst, src, sizeof(*src) * filterNum);
+
+ for (int32_t i = 0; i < filterNum; i++) {
+ if ((*dst)[i].filterstr && dst[i]->len > 0) {
+ void *pz = calloc(1, (size_t)(*dst)[i].len + 1);
+
+ if (pz == NULL) {
+ if (i == 0) {
+ free(*dst);
+ } else {
+ freeColumnFilterInfo(*dst, i);
+ }
+
+ return TSDB_CODE_QRY_OUT_OF_MEMORY;
+ }
+
+ memcpy(pz, (void *)src->pz, (size_t)src->len + 1);
+
+ (*dst)[i].pz = (int64_t)pz;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
static int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) {
qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg);
@@ -6187,6 +6513,13 @@ int32_t createQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t numOfOutpu
type = s->type;
bytes = s->bytes;
}
+
+ if (pExprs[i].base.filterNum > 0) {
+ int32_t ret = cloneExprFilterInfo(&pExprs[i].pFilter, pExprMsg[i]->filterInfo, pExprMsg[i]->filterNum);
+ if (ret) {
+ return ret;
+ }
+ }
}
int32_t param = (int32_t)pExprs[i].base.arg[0].argValue.i64;
@@ -6432,6 +6765,8 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr
goto _cleanup_qinfo;
}
+ pQInfo->qId = *qId;
+
// to make sure third party won't overwrite this structure
pQInfo->signature = pQInfo;
SQuery* pQuery = &pQInfo->query;
@@ -6482,6 +6817,10 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr
if (TSDB_COL_IS_TAG(pExprs[col].base.colInfo.flag)) {
pQuery->tagLen += pExprs[col].bytes;
}
+
+ if (pExprs[col].pFilter) {
+ ++pQuery->havingNum;
+ }
}
if (pSecExprs != NULL) {
@@ -6579,8 +6918,6 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr
// todo refactor
pQInfo->query.queryBlockDist = (numOfOutput == 1 && pExprs[0].base.colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX);
- pQInfo->qId = atomic_add_fetch_64(&queryHandleId, 1);
- *qId = pQInfo->qId;
qDebug("qmsg:%p QInfo:%" PRIu64 "-%p created", pQueryMsg, pQInfo->qId, pQInfo);
return pQInfo;
@@ -6599,6 +6936,10 @@ _cleanup_qinfo:
tExprTreeDestroy(pExprInfo->pExpr, NULL);
pExprInfo->pExpr = NULL;
}
+
+ if (pExprInfo->pFilter) {
+ freeColumnFilterInfo(pExprInfo->pFilter, pExprInfo->base.filterNum);
+ }
}
tfree(pExprs);
@@ -6644,6 +6985,8 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p
SArray* prevResult = NULL;
if (pQueryMsg->prevResultLen > 0) {
prevResult = interResFromBinary(param->prevResult, pQueryMsg->prevResultLen);
+
+ pRuntimeEnv->prevResult = prevResult;
}
pQuery->precision = tsdbGetCfg(tsdb)->precision;
@@ -6665,7 +7008,7 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p
}
// filter the qualified
- if ((code = doInitQInfo(pQInfo, pTsBuf, prevResult, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) {
+ if ((code = doInitQInfo(pQInfo, pTsBuf, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -6683,7 +7026,7 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) {
}
for (int32_t i = 0; i < numOfFilters; i++) {
- if (pFilter[i].filterstr) {
+ if (pFilter[i].filterstr && pFilter[i].pz) {
free((void*)(pFilter[i].pz));
}
}
@@ -6725,6 +7068,10 @@ static void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr) {
if (pExprInfo[i].pExpr != NULL) {
tExprTreeDestroy(pExprInfo[i].pExpr, NULL);
}
+
+ if (pExprInfo[i].pFilter) {
+ freeColumnFilterInfo(pExprInfo[i].pFilter, pExprInfo[i].base.filterNum);
+ }
}
tfree(pExprInfo);
@@ -6827,6 +7174,7 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
}
fclose(f);
+ *(FILE **)pColInfoData->pData = NULL;
}
// all data returned, set query over
diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c
index a73f385282..73b5b81e52 100644
--- a/src/query/src/qExtbuffer.c
+++ b/src/query/src/qExtbuffer.c
@@ -266,6 +266,7 @@ int32_t tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) {
size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file);
if (retVal <= 0) { // failed to write to buffer, may be not enough space
ret = TAOS_SYSTEM_ERROR(errno);
+ pMemBuffer->pHead = first;
return ret;
}
diff --git a/src/query/src/qFilterfunc.c b/src/query/src/qFilterfunc.c
index 884f7e653f..dabce88423 100644
--- a/src/query/src/qFilterfunc.c
+++ b/src/query/src/qFilterfunc.c
@@ -124,7 +124,7 @@ bool greaterEqualOperator(SColumnFilterElem *pFilter, const char *minval, const
bool equalOperator(SColumnFilterElem *pFilter, const char *minval, const char *maxval, int16_t type) {
SColumnFilterInfo *pFilterInfo = &pFilter->filterInfo;
- if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
+ if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TIMESTAMP) {
int64_t minv = -1, maxv = -1;
GET_TYPED_DATA(minv, int64_t, type, minval);
GET_TYPED_DATA(maxv, int64_t, type, maxval);
@@ -202,7 +202,7 @@ bool likeOperator(SColumnFilterElem *pFilter, const char *minval, const char *ma
bool notEqualOperator(SColumnFilterElem *pFilter, const char *minval, const char *maxval, int16_t type) {
SColumnFilterInfo *pFilterInfo = &pFilter->filterInfo;
- if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
+ if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TIMESTAMP) {
int64_t minv = -1, maxv = -1;
GET_TYPED_DATA(minv, int64_t, type, minval);
GET_TYPED_DATA(maxv, int64_t, type, maxval);
diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c
index c5dd6b3cac..f83caf2d8f 100644
--- a/src/query/src/qResultbuf.c
+++ b/src/query/src/qResultbuf.c
@@ -287,6 +287,10 @@ static void lruListMoveToFront(SList *pList, SPageInfo* pi) {
tdListPrependNode(pList, pi->pn);
}
+static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) {
+ return pageSize + POINTER_BYTES + 2 + sizeof(tFilePage);
+}
+
tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t* pageId) {
pResultBuf->statis.getPages += 1;
@@ -311,7 +315,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32
// allocate buf
if (availablePage == NULL) {
- pi->pData = calloc(1, pResultBuf->pageSize + POINTER_BYTES + 2); // add extract bytes in case of zipped buffer increased.
+ pi->pData = calloc(1, getAllocPageSize(pResultBuf->pageSize)); // add extract bytes in case of zipped buffer increased.
} else {
pi->pData = availablePage;
}
@@ -355,7 +359,7 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) {
}
if (availablePage == NULL) {
- (*pi)->pData = calloc(1, pResultBuf->pageSize + POINTER_BYTES);
+ (*pi)->pData = calloc(1, getAllocPageSize(pResultBuf->pageSize));
} else {
(*pi)->pData = availablePage;
}
diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c
index 438aa89c46..e8bea98eb7 100644
--- a/src/query/src/qSqlParser.c
+++ b/src/query/src/qSqlParser.c
@@ -310,6 +310,77 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
return pExpr;
}
+static FORCE_INLINE int32_t tStrTokenCompare(SStrToken* left, SStrToken* right) {
+ return (left->type == right->type && left->n == right->n && strncasecmp(left->z, right->z, left->n) == 0) ? 0 : 1;
+}
+
+
+int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) {
+ if ((left == NULL && right) || (left && right == NULL)) {
+ return 1;
+ }
+
+ if (left->type != right->type) {
+ return 1;
+ }
+
+ if (left->tokenId != right->tokenId) {
+ return 1;
+ }
+
+ if (left->functionId != right->functionId) {
+ return 1;
+ }
+
+ if ((left->pLeft && right->pLeft == NULL)
+ || (left->pLeft == NULL && right->pLeft)
+ || (left->pRight && right->pRight == NULL)
+ || (left->pRight == NULL && right->pRight)
+ || (left->pParam && right->pParam == NULL)
+ || (left->pParam == NULL && right->pParam)) {
+ return 1;
+ }
+
+ if (tVariantCompare(&left->value, &right->value)) {
+ return 1;
+ }
+
+ if (tStrTokenCompare(&left->colInfo, &right->colInfo)) {
+ return 1;
+ }
+
+
+ if (right->pParam && left->pParam) {
+ size_t size = taosArrayGetSize(right->pParam);
+ if (left->pParam && taosArrayGetSize(left->pParam) != size) {
+ return 1;
+ }
+
+ for (int32_t i = 0; i < size; i++) {
+ tSqlExprItem* pLeftElem = taosArrayGet(left->pParam, i);
+ tSqlExpr* pSubLeft = pLeftElem->pNode;
+ tSqlExprItem* pRightElem = taosArrayGet(left->pParam, i);
+ tSqlExpr* pSubRight = pRightElem->pNode;
+
+ if (tSqlExprCompare(pSubLeft, pSubRight)) {
+ return 1;
+ }
+ }
+ }
+
+ if (left->pLeft && tSqlExprCompare(left->pLeft, right->pLeft)) {
+ return 1;
+ }
+
+ if (left->pRight && tSqlExprCompare(left->pRight, right->pRight)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+
+
tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) {
tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr));
@@ -640,7 +711,7 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval,
SSessionWindowVal *pSession, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit,
- SLimitVal *psLimit) {
+ SLimitVal *psLimit, tSqlExpr *pHaving) {
assert(pSelectList != NULL);
SQuerySqlNode *pSqlNode = calloc(1, sizeof(SQuerySqlNode));
@@ -655,6 +726,7 @@ SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SF
pSqlNode->pSortOrder = pSortOrder;
pSqlNode->pWhere = pWhere;
pSqlNode->fillType = pFill;
+ pSqlNode->pHaving = pHaving;
if (pLimit != NULL) {
pSqlNode->limit = *pLimit;
@@ -717,6 +789,9 @@ void destroyQuerySqlNode(SQuerySqlNode *pQuerySql) {
tSqlExprDestroy(pQuerySql->pWhere);
pQuerySql->pWhere = NULL;
+
+ tSqlExprDestroy(pQuerySql->pHaving);
+ pQuerySql->pHaving = NULL;
taosArrayDestroyEx(pQuerySql->pSortOrder, freeVariant);
pQuerySql->pSortOrder = NULL;
diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c
index 9b0046fda0..aa793add84 100644
--- a/src/query/src/qUtil.c
+++ b/src/query/src/qUtil.c
@@ -140,7 +140,7 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16
SResultRowCellInfo *pResultInfo = &pResultRow->pCellInfo[i];
int16_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes;
- char * s = getPosInResultPage(pRuntimeEnv->pQuery, page, pResultRow->offset, offset);
+ char * s = getPosInResultPage(pRuntimeEnv, page, pResultRow->offset, offset, size);
memset(s, 0, size);
offset += size;
diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c
index 7eb80e200d..153c7cb0cb 100644
--- a/src/query/src/queryMain.c
+++ b/src/query/src/queryMain.c
@@ -200,6 +200,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
return code;
}
+
bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
SQInfo *pQInfo = (SQInfo *)qinfo;
assert(pQInfo && pQInfo->signature == pQInfo);
@@ -478,7 +479,7 @@ void qCleanupQueryMgmt(void* pQMgmt) {
qDebug("vgId:%d, queryMgmt cleanup completed", vgId);
}
-void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo) {
+void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo) {
if (pMgmt == NULL) {
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
return NULL;
@@ -519,8 +520,7 @@ void** qAcquireQInfo(void* pMgmt, uint64_t _key) {
return NULL;
}
- TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key;
- void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE));
+ void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &_key, sizeof(_key));
if (handle == NULL || *handle == NULL) {
terrno = TSDB_CODE_QRY_INVALID_QHANDLE;
return NULL;
diff --git a/src/query/src/sql.c b/src/query/src/sql.c
index 7dca72b937..f9f9d624d3 100644
--- a/src/query/src/sql.c
+++ b/src/query/src/sql.c
@@ -2925,7 +2925,7 @@ static YYACTIONTYPE yy_reduce(
break;
case 160: /* select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */
{
- yylhsminor.yy286 = tSetQuerySqlNode(&yymsp[-12].minor.yy0, yymsp[-11].minor.yy193, yymsp[-10].minor.yy370, yymsp[-9].minor.yy454, yymsp[-4].minor.yy193, yymsp[-3].minor.yy193, &yymsp[-8].minor.yy392, &yymsp[-7].minor.yy447, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy193, &yymsp[0].minor.yy482, &yymsp[-1].minor.yy482);
+ yylhsminor.yy286 = tSetQuerySqlNode(&yymsp[-12].minor.yy0, yymsp[-11].minor.yy193, yymsp[-10].minor.yy370, yymsp[-9].minor.yy454, yymsp[-4].minor.yy193, yymsp[-3].minor.yy193, &yymsp[-8].minor.yy392, &yymsp[-7].minor.yy447, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy193, &yymsp[0].minor.yy482, &yymsp[-1].minor.yy482, yymsp[-2].minor.yy454);
}
yymsp[-12].minor.yy286 = yylhsminor.yy286;
break;
@@ -2945,7 +2945,7 @@ static YYACTIONTYPE yy_reduce(
break;
case 165: /* select ::= SELECT selcollist */
{
- yylhsminor.yy286 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy193, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ yylhsminor.yy286 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy193, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
}
yymsp[-1].minor.yy286 = yylhsminor.yy286;
break;
diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c
index 133ae6d0ab..db3c72c2fc 100644
--- a/src/rpc/src/rpcMain.c
+++ b/src/rpc/src/rpcMain.c
@@ -295,7 +295,7 @@ void *rpcOpen(const SRpcInit *pInit) {
return NULL;
}
} else {
- pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime);
+ pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 30);
if ( pRpc->pCache == NULL ) {
tError("%s failed to init connection cache", pRpc->label);
rpcClose(pRpc);
@@ -470,7 +470,7 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
taosTmrStopA(&pConn->pTimer);
// set the idle timer to monitor the activity
- taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
+ taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime * 30, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
rpcSendMsgToPeer(pConn, msg, msgLen);
// if not set to secured, set it expcet NOT_READY case, since client wont treat it as secured
@@ -997,8 +997,8 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont
}
if ( rpcIsReq(pHead->msgType) ) {
- terrno = rpcProcessReqHead(pConn, pHead);
pConn->connType = pRecv->connType;
+ terrno = rpcProcessReqHead(pConn, pHead);
// stop idle timer
taosTmrStopA(&pConn->pIdleTimer);
@@ -1367,7 +1367,7 @@ static void rpcProcessConnError(void *param, void *id) {
tDebug("%s %p, connection error happens", pRpc->label, pContext->ahandle);
- if (pContext->numOfTry >= pContext->epSet.numOfEps) {
+ if (pContext->numOfTry >= pContext->epSet.numOfEps || pContext->msgType == TSDB_MSG_TYPE_FETCH) {
rpcMsg.msgType = pContext->msgType+1;
rpcMsg.ahandle = pContext->ahandle;
rpcMsg.code = pContext->code;
diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h
index b4d9315a8e..ec6dfcbc82 100644
--- a/src/sync/inc/syncInt.h
+++ b/src/sync/inc/syncInt.h
@@ -35,7 +35,7 @@ extern "C" {
#define SYNC_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + sizeof(SSyncHead) + 16)
#define SYNC_RECV_BUFFER_SIZE (5*1024*1024)
-#define SYNC_MAX_FWDS 1024
+#define SYNC_MAX_FWDS 4096
#define SYNC_FWD_TIMER 300
#define SYNC_ROLE_TIMER 15000 // ms
#define SYNC_CHECK_INTERVAL 1000 // ms
diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c
index 72442eee6c..e5f2d94c4a 100644
--- a/src/sync/src/syncMain.c
+++ b/src/sync/src/syncMain.c
@@ -409,23 +409,22 @@ void syncConfirmForward(int64_t rid, uint64_t version, int32_t code, bool force)
syncReleaseNode(pNode);
}
-#if 1
void syncRecover(int64_t rid) {
SSyncPeer *pPeer;
SSyncNode *pNode = syncAcquireNode(rid);
if (pNode == NULL) return;
- // to do: add a few lines to check if recover is OK
- // if take this node to unsync state, the whole system may not work
-
nodeRole = TAOS_SYNC_ROLE_UNSYNCED;
(*pNode->notifyRoleFp)(pNode->vgId, nodeRole);
- nodeVersion = 0;
pthread_mutex_lock(&pNode->mutex);
+ nodeVersion = 0;
+
for (int32_t i = 0; i < pNode->replica; ++i) {
+ if (i == pNode->selfIndex) continue;
+
pPeer = pNode->peerInfo[i];
if (pPeer->peerFd >= 0) {
syncRestartConnection(pPeer);
@@ -436,7 +435,6 @@ void syncRecover(int64_t rid) {
syncReleaseNode(pNode);
}
-#endif
int32_t syncGetNodesRole(int64_t rid, SNodesRole *pNodesRole) {
SSyncNode *pNode = syncAcquireNode(rid);
@@ -551,7 +549,10 @@ static void syncClosePeerConn(SSyncPeer *pPeer) {
if (pPeer->peerFd >= 0) {
pPeer->peerFd = -1;
void *pConn = pPeer->pConn;
- if (pConn != NULL) syncFreeTcpConn(pPeer->pConn);
+ if (pConn != NULL) {
+ syncFreeTcpConn(pPeer->pConn);
+ pPeer->pConn = NULL;
+ }
}
}
@@ -997,17 +998,24 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) {
sTrace("%s, forward is received, hver:%" PRIu64 ", len:%d", pPeer->id, pHead->version, pHead->len);
+ int32_t code = 0;
if (nodeRole == TAOS_SYNC_ROLE_SLAVE) {
// nodeVersion = pHead->version;
- (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL);
+ code = (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL);
} else {
if (nodeSStatus != TAOS_SYNC_STATUS_INIT) {
- syncSaveIntoBuffer(pPeer, pHead);
+ code = syncSaveIntoBuffer(pPeer, pHead);
} else {
sError("%s, forward discarded since sstatus:%s, hver:%" PRIu64, pPeer->id, syncStatus[nodeSStatus],
pHead->version);
+ code = -1;
}
}
+
+ if (code != 0) {
+ sError("%s, failed to process fwd msg, hver:%" PRIu64 ", len:%d", pPeer->id, pHead->version, pHead->len);
+ syncRestartConnection(pPeer);
+ }
}
static void syncProcessPeersStatusMsg(SPeersStatus *pPeersStatus, SSyncPeer *pPeer) {
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index ea72760568..cd97b2a9d6 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -2861,12 +2861,6 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta
if (pHandle->statis[i].numOfNull == -1) { // set the column data are all NULL
pHandle->statis[i].numOfNull = pBlockInfo->compBlock->numOfRows;
}
-
- SColumnInfo* pColInfo = taosArrayGet(pHandle->pColumns, i);
- if (pColInfo->type == TSDB_DATA_TYPE_TIMESTAMP) {
- pHandle->statis[i].min = pBlockInfo->compBlock->keyFirst;
- pHandle->statis[i].max = pBlockInfo->compBlock->keyLast;
- }
}
int64_t elapsed = taosGetTimestampUs() - stime;
diff --git a/src/util/src/terror.c b/src/util/src/terror.c
index fc600c8260..4fc7c510b0 100644
--- a/src/util/src/terror.c
+++ b/src/util/src/terror.c
@@ -176,6 +176,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_ID, "Table name too long")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_NAME, "Table does not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_TYPE, "Invalid table type in tsdb")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TAGS, "Too many tags")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_COLUMNS, "Too many columns")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TIMESERIES, "Too many time series")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_SUPER_TABLE, "Not super table") // operation only available for super table
TAOS_DEFINE_ERROR(TSDB_CODE_MND_COL_NAME_TOO_LONG, "Tag name too long")
diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h
index 4aa07196a7..d770a38e37 100644
--- a/src/vnode/inc/vnodeInt.h
+++ b/src/vnode/inc/vnodeInt.h
@@ -37,6 +37,7 @@ extern int32_t vDebugFlag;
typedef struct {
int32_t vgId; // global vnode group ID
int32_t refCount; // reference count
+ int64_t queuedWMsgSize;
int32_t queuedWMsg;
int32_t queuedRMsg;
int32_t flowctrlLevel;
diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c
index 69d94aff87..0921c5ce48 100644
--- a/src/vnode/src/vnodeMain.c
+++ b/src/vnode/src/vnodeMain.c
@@ -99,8 +99,13 @@ int32_t vnodeSync(int32_t vgId) {
return TSDB_CODE_VND_INVALID_VGROUP_ID;
}
- if (pVnode->role != TAOS_SYNC_ROLE_MASTER) {
+ if (pVnode->role == TAOS_SYNC_ROLE_SLAVE) {
vInfo("vgId:%d, vnode will sync, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
+
+ pVnode->version = 0;
+ pVnode->fversion = 0;
+ walResetVersion(pVnode->wal, pVnode->fversion);
+
syncRecover(pVnode->sync);
}
@@ -227,9 +232,28 @@ int32_t vnodeAlter(void *vparam, SCreateVnodeMsg *pVnodeCfg) {
return code;
}
+static void vnodeFindWalRootDir(int32_t vgId, char *walRootDir) {
+ char vnodeDir[TSDB_FILENAME_LEN] = "\0";
+ snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d/wal", vgId);
+
+ TDIR *tdir = tfsOpendir(vnodeDir);
+ if (!tdir) return;
+
+ const TFILE *tfile = tfsReaddir(tdir);
+ if (!tfile) {
+ tfsClosedir(tdir);
+ return;
+ }
+
+ sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(tfile->level, tfile->id), vgId);
+
+ tfsClosedir(tdir);
+}
+
int32_t vnodeOpen(int32_t vgId) {
char temp[TSDB_FILENAME_LEN * 3];
char rootDir[TSDB_FILENAME_LEN * 2];
+ char walRootDir[TSDB_FILENAME_LEN * 2] = {0};
snprintf(rootDir, TSDB_FILENAME_LEN * 2, "%s/vnode%d", tsVnodeDir, vgId);
SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1);
@@ -316,7 +340,21 @@ int32_t vnodeOpen(int32_t vgId) {
}
}
- sprintf(temp, "%s/wal", rootDir);
+ // walRootDir for wal & syncInfo.path (not empty dir of /vnode/vnode{pVnode->vgId}/wal)
+ vnodeFindWalRootDir(pVnode->vgId, walRootDir);
+ if (walRootDir[0] == 0) {
+ int level = -1, id = -1;
+
+ tfsAllocDisk(TFS_PRIMARY_LEVEL, &level, &id);
+ if (level < 0 || id < 0) {
+ vnodeCleanUp(pVnode);
+ return terrno;
+ }
+
+ sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(level, id), vgId);
+ }
+
+ sprintf(temp, "%s/wal", walRootDir);
pVnode->walCfg.vgId = pVnode->vgId;
pVnode->wal = walOpen(temp, &pVnode->walCfg);
if (pVnode->wal == NULL) {
@@ -348,7 +386,7 @@ int32_t vnodeOpen(int32_t vgId) {
pVnode->events = NULL;
- vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode);
+ vDebug("vgId:%d, vnode is opened in %s - %s, pVnode:%p", pVnode->vgId, rootDir, walRootDir, pVnode);
vnodeAddIntoHash(pVnode);
@@ -356,7 +394,7 @@ int32_t vnodeOpen(int32_t vgId) {
syncInfo.vgId = pVnode->vgId;
syncInfo.version = pVnode->version;
syncInfo.syncCfg = pVnode->syncCfg;
- tstrncpy(syncInfo.path, rootDir, TSDB_FILENAME_LEN);
+ tstrncpy(syncInfo.path, walRootDir, TSDB_FILENAME_LEN);
syncInfo.getWalInfoFp = vnodeGetWalInfo;
syncInfo.writeToCacheFp = vnodeWriteToCache;
syncInfo.confirmForward = vnodeConfirmForard;
diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c
index 0836ade77f..2448fada50 100644
--- a/src/vnode/src/vnodeRead.c
+++ b/src/vnode/src/vnodeRead.c
@@ -208,6 +208,7 @@ static void vnodeBuildNoResultQueryRsp(SRspRet *pRet) {
pRsp->completed = true;
}
+
static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
void * pCont = pRead->pCont;
int32_t contLen = pRead->contLen;
@@ -226,7 +227,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
if (contLen != 0) {
qinfo_t pQInfo = NULL;
- uint64_t qId = 0;
+ uint64_t qId = genQueryId();
code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, &pQInfo, &qId);
SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp));
@@ -239,7 +240,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
// current connect is broken
if (code == TSDB_CODE_SUCCESS) {
- handle = qRegisterQInfo(pVnode->qMgmt, qId, (uint64_t)pQInfo);
+ handle = qRegisterQInfo(pVnode->qMgmt, qId, pQInfo);
if (handle == NULL) { // failed to register qhandle
pRsp->code = terrno;
terrno = 0;
diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c
index a0be52db7a..aab685e678 100644
--- a/src/vnode/src/vnodeWrite.c
+++ b/src/vnode/src/vnodeWrite.c
@@ -25,6 +25,7 @@
#include "vnodeStatus.h"
#define MAX_QUEUED_MSG_NUM 100000
+#define MAX_QUEUED_MSG_SIZE 1024*1024*1024 //1GB
extern void * tsDnodeTmr;
static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *pCont, SRspRet *);
@@ -269,6 +270,13 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) {
}
}
+ if (tsAvailDataDirGB <= tsMinimalDataDirGB) {
+ vError("vgId:%d, failed to write into vwqueue since no diskspace, avail:%fGB", pVnode->vgId, tsAvailDataDirGB);
+ taosFreeQitem(pWrite);
+ vnodeRelease(pVnode);
+ return TSDB_CODE_VND_NO_DISKSPACE;
+ }
+
if (!vnodeInReadyOrUpdatingStatus(pVnode)) {
vError("vgId:%d, failed to write into vwqueue, vstatus is %s, refCount:%d pVnode:%p", pVnode->vgId,
vnodeStatus[pVnode->status], pVnode->refCount, pVnode);
@@ -278,14 +286,17 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) {
}
int32_t queued = atomic_add_fetch_32(&pVnode->queuedWMsg, 1);
- if (queued > MAX_QUEUED_MSG_NUM) {
+ int64_t queuedSize = atomic_add_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len);
+
+ if (queued > MAX_QUEUED_MSG_NUM || queuedSize > MAX_QUEUED_MSG_SIZE) {
int32_t ms = (queued / MAX_QUEUED_MSG_NUM) * 10 + 3;
if (ms > 100) ms = 100;
vDebug("vgId:%d, too many msg:%d in vwqueue, flow control %dms", pVnode->vgId, queued, ms);
taosMsleep(ms);
}
- vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedWMsg);
+ vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d size:%" PRId64, pVnode->vgId, pVnode->refCount,
+ pVnode->queuedWMsg, pVnode->queuedWMsgSize);
taosWriteQitem(pVnode->wqueue, pWrite->qtype, pWrite);
return TSDB_CODE_SUCCESS;
@@ -308,7 +319,10 @@ void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) {
SVnodeObj *pVnode = vparam;
int32_t queued = atomic_sub_fetch_32(&pVnode->queuedWMsg, 1);
- vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d", pVnode->vgId, pWrite, pWrite->rpcMsg.ahandle, queued);
+ int64_t queuedSize = atomic_sub_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len);
+
+ vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d size:%" PRId64, pVnode->vgId, pWrite,
+ pWrite->rpcMsg.ahandle, queued, queuedSize);
taosFreeQitem(pWrite);
vnodeRelease(pVnode);
@@ -344,7 +358,9 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) {
SVnodeObj *pVnode = pWrite->pVnode;
if (pWrite->qtype != TAOS_QTYPE_RPC) return 0;
- if (pVnode->queuedWMsg < MAX_QUEUED_MSG_NUM && pVnode->flowctrlLevel <= 0) return 0;
+ if (pVnode->queuedWMsg < MAX_QUEUED_MSG_NUM && pVnode->queuedWMsgSize < MAX_QUEUED_MSG_SIZE &&
+ pVnode->flowctrlLevel <= 0)
+ return 0;
if (tsEnableFlowCtrl == 0) {
int32_t ms = (int32_t)pow(2, pVnode->flowctrlLevel + 2);
diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c
index 39ce2657aa..55ab9b031b 100644
--- a/src/wal/src/walMgmt.c
+++ b/src/wal/src/walMgmt.c
@@ -104,7 +104,7 @@ int32_t walAlter(void *handle, SWalCfg *pCfg) {
pWal->level = pCfg->walLevel;
pWal->fsyncPeriod = pCfg->fsyncPeriod;
- pWal->fsyncSeq = pCfg->fsyncPeriod % 1000;
+ pWal->fsyncSeq = pCfg->fsyncPeriod / 1000;
if (pWal->fsyncSeq <= 0) pWal->fsyncSeq = 1;
return TSDB_CODE_SUCCESS;
diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile
index 178a0446c3..afd0b25338 100644
--- a/tests/Jenkinsfile
+++ b/tests/Jenkinsfile
@@ -227,11 +227,11 @@ pipeline {
}
}
- post {
+ post {
success {
emailext (
- subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
- body: '''
+ subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS",
+ body: """
@@ -247,29 +247,29 @@ pipeline {
- - 构建名称>>分支:${PROJECT_NAME}
+ - 构建名称>>分支:${env.BRANCH_NAME}
- 构建结果: Successful
- 构建编号:${BUILD_NUMBER}
- - 触发用户:${CAUSE}
- - 变更概要:${CHANGES}
+ - 触发用户:${env.CHANGE_AUTHOR}
+ - 提交信息:${env.CHANGE_TITLE}
- 构建地址:${BUILD_URL}
- 构建日志:${BUILD_URL}console
- - 变更集:${JELLY_SCRIPT}
+
|
- ''',
+