Merge branch 'master' of github.com:taosdata/TDengine into test/chr
This commit is contained in:
commit
53bba6113c
|
@ -363,7 +363,7 @@ typedef struct SDbs_S {
|
||||||
|
|
||||||
typedef struct SpecifiedQueryInfo_S {
|
typedef struct SpecifiedQueryInfo_S {
|
||||||
uint64_t queryInterval; // 0: unlimit > 0 loop/s
|
uint64_t queryInterval; // 0: unlimit > 0 loop/s
|
||||||
uint64_t concurrent;
|
uint32_t concurrent;
|
||||||
uint64_t sqlCount;
|
uint64_t sqlCount;
|
||||||
uint32_t asyncMode; // 0: sync, 1: async
|
uint32_t asyncMode; // 0: sync, 1: async
|
||||||
uint64_t subscribeInterval; // ms
|
uint64_t subscribeInterval; // ms
|
||||||
|
@ -374,6 +374,9 @@ typedef struct SpecifiedQueryInfo_S {
|
||||||
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
|
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
|
||||||
int resubAfterConsume[MAX_QUERY_SQL_COUNT];
|
int resubAfterConsume[MAX_QUERY_SQL_COUNT];
|
||||||
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
|
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
|
||||||
|
char topic[MAX_QUERY_SQL_COUNT][32];
|
||||||
|
int consumed[MAX_QUERY_SQL_COUNT];
|
||||||
|
TAOS_RES* res[MAX_QUERY_SQL_COUNT];
|
||||||
uint64_t totalQueried;
|
uint64_t totalQueried;
|
||||||
} SpecifiedQueryInfo;
|
} SpecifiedQueryInfo;
|
||||||
|
|
||||||
|
@ -418,7 +421,8 @@ typedef struct SThreadInfo_S {
|
||||||
int threadID;
|
int threadID;
|
||||||
char db_name[MAX_DB_NAME_SIZE+1];
|
char db_name[MAX_DB_NAME_SIZE+1];
|
||||||
uint32_t time_precision;
|
uint32_t time_precision;
|
||||||
char fp[4096];
|
char filePath[4096];
|
||||||
|
FILE *fp;
|
||||||
char tb_prefix[MAX_TB_NAME_SIZE];
|
char tb_prefix[MAX_TB_NAME_SIZE];
|
||||||
uint64_t start_table_from;
|
uint64_t start_table_from;
|
||||||
uint64_t end_table_to;
|
uint64_t end_table_to;
|
||||||
|
@ -451,6 +455,7 @@ typedef struct SThreadInfo_S {
|
||||||
|
|
||||||
// seq of query or subscribe
|
// seq of query or subscribe
|
||||||
uint64_t querySeq; // sequence number of sql command
|
uint64_t querySeq; // sequence number of sql command
|
||||||
|
TAOS_SUB* tsub;
|
||||||
|
|
||||||
} threadInfo;
|
} threadInfo;
|
||||||
|
|
||||||
|
@ -532,7 +537,7 @@ static int createDatabasesAndStables();
|
||||||
static void createChildTables();
|
static void createChildTables();
|
||||||
static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet);
|
static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet);
|
||||||
static int postProceSql(char *host, struct sockaddr_in *pServAddr,
|
static int postProceSql(char *host, struct sockaddr_in *pServAddr,
|
||||||
uint16_t port, char* sqlstr, char *resultFile);
|
uint16_t port, char* sqlstr, threadInfo *pThreadInfo);
|
||||||
|
|
||||||
/* ************ Global variables ************ */
|
/* ************ Global variables ************ */
|
||||||
|
|
||||||
|
@ -1112,24 +1117,22 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void appendResultBufToFile(char *resultBuf, char *resultFile)
|
static void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo)
|
||||||
{
|
{
|
||||||
FILE *fp = NULL;
|
pThreadInfo->fp = fopen(pThreadInfo->filePath, "at");
|
||||||
if (resultFile[0] != 0) {
|
if (pThreadInfo->fp == NULL) {
|
||||||
fp = fopen(resultFile, "at");
|
errorPrint(
|
||||||
if (fp == NULL) {
|
"%s() LN%d, failed to open result file: %s, result will not save to file\n",
|
||||||
errorPrint(
|
__func__, __LINE__, pThreadInfo->filePath);
|
||||||
"%s() LN%d, failed to open result file: %s, result will not save to file\n",
|
return;
|
||||||
__func__, __LINE__, resultFile);
|
}
|
||||||
return;
|
|
||||||
}
|
fprintf(pThreadInfo->fp, "%s", resultBuf);
|
||||||
fprintf(fp, "%s", resultBuf);
|
tmfclose(pThreadInfo->fp);
|
||||||
tmfclose(fp);
|
pThreadInfo->fp = NULL;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
|
||||||
static void appendResultToFile(TAOS_RES *res, char* resultFile) {
|
|
||||||
TAOS_ROW row = NULL;
|
TAOS_ROW row = NULL;
|
||||||
int num_rows = 0;
|
int num_rows = 0;
|
||||||
int num_fields = taos_field_count(res);
|
int num_fields = taos_field_count(res);
|
||||||
|
@ -1147,10 +1150,11 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
|
||||||
|
|
||||||
// fetch the records row by row
|
// fetch the records row by row
|
||||||
while((row = taos_fetch_row(res))) {
|
while((row = taos_fetch_row(res))) {
|
||||||
if (totalLen >= 100*1024*1024 - 32000) {
|
if ((strlen(pThreadInfo->filePath) > 0)
|
||||||
appendResultBufToFile(databuf, resultFile);
|
&& (totalLen >= 100*1024*1024 - 32000)) {
|
||||||
totalLen = 0;
|
appendResultBufToFile(databuf, pThreadInfo);
|
||||||
memset(databuf, 0, 100*1024*1024);
|
totalLen = 0;
|
||||||
|
memset(databuf, 0, 100*1024*1024);
|
||||||
}
|
}
|
||||||
num_rows++;
|
num_rows++;
|
||||||
int len = taos_print_row(temp, row, fields, num_fields);
|
int len = taos_print_row(temp, row, fields, num_fields);
|
||||||
|
@ -1161,8 +1165,10 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
|
||||||
}
|
}
|
||||||
|
|
||||||
verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n",
|
verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n",
|
||||||
__func__, __LINE__, databuf, resultFile);
|
__func__, __LINE__, databuf, pThreadInfo->filePath);
|
||||||
appendResultBufToFile(databuf, resultFile);
|
if (strlen(pThreadInfo->filePath) > 0) {
|
||||||
|
appendResultBufToFile(databuf, pThreadInfo);
|
||||||
|
}
|
||||||
free(databuf);
|
free(databuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1178,16 +1184,14 @@ static void selectAndGetResult(
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((strlen(pThreadInfo->fp))) {
|
fetchResult(res, pThreadInfo);
|
||||||
appendResultToFile(res, pThreadInfo->fp);
|
|
||||||
}
|
|
||||||
taos_free_result(res);
|
taos_free_result(res);
|
||||||
|
|
||||||
} else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
|
} else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
|
||||||
int retCode = postProceSql(
|
int retCode = postProceSql(
|
||||||
g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port,
|
g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port,
|
||||||
command,
|
command,
|
||||||
pThreadInfo->fp);
|
pThreadInfo);
|
||||||
if (0 != retCode) {
|
if (0 != retCode) {
|
||||||
printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
|
printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
|
||||||
}
|
}
|
||||||
|
@ -1720,7 +1724,7 @@ static void printfQueryMeta() {
|
||||||
printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
|
printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
|
||||||
g_queryInfo.specifiedQueryInfo.queryInterval);
|
g_queryInfo.specifiedQueryInfo.queryInterval);
|
||||||
printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
|
printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
|
||||||
printf("concurrent: \033[33m%"PRIu64"\033[0m\n",
|
printf("concurrent: \033[33m%d\033[0m\n",
|
||||||
g_queryInfo.specifiedQueryInfo.concurrent);
|
g_queryInfo.specifiedQueryInfo.concurrent);
|
||||||
printf("mod: \033[33m%s\033[0m\n",
|
printf("mod: \033[33m%s\033[0m\n",
|
||||||
(g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync");
|
(g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync");
|
||||||
|
@ -2017,13 +2021,13 @@ static void printfQuerySystemInfo(TAOS * taos) {
|
||||||
|
|
||||||
// show variables
|
// show variables
|
||||||
res = taos_query(taos, "show variables;");
|
res = taos_query(taos, "show variables;");
|
||||||
//appendResultToFile(res, filename);
|
//fetchResult(res, filename);
|
||||||
xDumpResultToFile(filename, res);
|
xDumpResultToFile(filename, res);
|
||||||
|
|
||||||
// show dnodes
|
// show dnodes
|
||||||
res = taos_query(taos, "show dnodes;");
|
res = taos_query(taos, "show dnodes;");
|
||||||
xDumpResultToFile(filename, res);
|
xDumpResultToFile(filename, res);
|
||||||
//appendResultToFile(res, filename);
|
//fetchResult(res, filename);
|
||||||
|
|
||||||
// show databases
|
// show databases
|
||||||
res = taos_query(taos, "show databases;");
|
res = taos_query(taos, "show databases;");
|
||||||
|
@ -2059,7 +2063,7 @@ static void printfQuerySystemInfo(TAOS * taos) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port,
|
static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port,
|
||||||
char* sqlstr, char *resultFile)
|
char* sqlstr, threadInfo *pThreadInfo)
|
||||||
{
|
{
|
||||||
char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s";
|
char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s";
|
||||||
|
|
||||||
|
@ -2195,8 +2199,8 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
|
||||||
response_buf[RESP_BUF_LEN - 1] = '\0';
|
response_buf[RESP_BUF_LEN - 1] = '\0';
|
||||||
printf("Response:\n%s\n", response_buf);
|
printf("Response:\n%s\n", response_buf);
|
||||||
|
|
||||||
if (resultFile) {
|
if (strlen(pThreadInfo->filePath) > 0) {
|
||||||
appendResultBufToFile(response_buf, resultFile);
|
appendResultBufToFile(response_buf, pThreadInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
free(request_buf);
|
free(request_buf);
|
||||||
|
@ -2959,18 +2963,18 @@ static int startMultiThreadCreateChildTable(
|
||||||
b = ntables % threads;
|
b = ntables % threads;
|
||||||
|
|
||||||
for (int64_t i = 0; i < threads; i++) {
|
for (int64_t i = 0; i < threads; i++) {
|
||||||
threadInfo *t_info = infos + i;
|
threadInfo *pThreadInfo = infos + i;
|
||||||
t_info->threadID = i;
|
pThreadInfo->threadID = i;
|
||||||
tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
|
tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE);
|
||||||
t_info->superTblInfo = superTblInfo;
|
pThreadInfo->superTblInfo = superTblInfo;
|
||||||
verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name);
|
verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name);
|
||||||
t_info->taos = taos_connect(
|
pThreadInfo->taos = taos_connect(
|
||||||
g_Dbs.host,
|
g_Dbs.host,
|
||||||
g_Dbs.user,
|
g_Dbs.user,
|
||||||
g_Dbs.password,
|
g_Dbs.password,
|
||||||
db_name,
|
db_name,
|
||||||
g_Dbs.port);
|
g_Dbs.port);
|
||||||
if (t_info->taos == NULL) {
|
if (pThreadInfo->taos == NULL) {
|
||||||
errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n",
|
errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n",
|
||||||
__func__, __LINE__, taos_errstr(NULL));
|
__func__, __LINE__, taos_errstr(NULL));
|
||||||
free(pids);
|
free(pids);
|
||||||
|
@ -2978,14 +2982,14 @@ static int startMultiThreadCreateChildTable(
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
t_info->start_table_from = startFrom;
|
pThreadInfo->start_table_from = startFrom;
|
||||||
t_info->ntables = i<b?a+1:a;
|
pThreadInfo->ntables = i<b?a+1:a;
|
||||||
t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
|
pThreadInfo->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
|
||||||
startFrom = t_info->end_table_to + 1;
|
startFrom = pThreadInfo->end_table_to + 1;
|
||||||
t_info->use_metric = true;
|
pThreadInfo->use_metric = true;
|
||||||
t_info->cols = cols;
|
pThreadInfo->cols = cols;
|
||||||
t_info->minDelay = UINT64_MAX;
|
pThreadInfo->minDelay = UINT64_MAX;
|
||||||
pthread_create(pids + i, NULL, createTable, t_info);
|
pthread_create(pids + i, NULL, createTable, pThreadInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < threads; i++) {
|
for (int i = 0; i < threads; i++) {
|
||||||
|
@ -2993,8 +2997,8 @@ static int startMultiThreadCreateChildTable(
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < threads; i++) {
|
for (int i = 0; i < threads; i++) {
|
||||||
threadInfo *t_info = infos + i;
|
threadInfo *pThreadInfo = infos + i;
|
||||||
taos_close(t_info->taos);
|
taos_close(pThreadInfo->taos);
|
||||||
}
|
}
|
||||||
|
|
||||||
free(pids);
|
free(pids);
|
||||||
|
@ -4199,7 +4203,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
|
||||||
if (concurrent && concurrent->type == cJSON_Number) {
|
if (concurrent && concurrent->type == cJSON_Number) {
|
||||||
if (concurrent->valueint <= 0) {
|
if (concurrent->valueint <= 0) {
|
||||||
errorPrint(
|
errorPrint(
|
||||||
"%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n",
|
"%s() LN%d, query sqlCount %"PRIu64" or concurrent %d is not correct.\n",
|
||||||
__func__, __LINE__,
|
__func__, __LINE__,
|
||||||
g_queryInfo.specifiedQueryInfo.sqlCount,
|
g_queryInfo.specifiedQueryInfo.sqlCount,
|
||||||
g_queryInfo.specifiedQueryInfo.concurrent);
|
g_queryInfo.specifiedQueryInfo.concurrent);
|
||||||
|
@ -4266,24 +4270,28 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sqls
|
// sqls
|
||||||
cJSON* superSqls = cJSON_GetObjectItem(specifiedQuery, "sqls");
|
cJSON* specifiedSqls = cJSON_GetObjectItem(specifiedQuery, "sqls");
|
||||||
if (!superSqls) {
|
if (!specifiedSqls) {
|
||||||
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
|
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
|
||||||
} else if (superSqls->type != cJSON_Array) {
|
} else if (specifiedSqls->type != cJSON_Array) {
|
||||||
errorPrint("%s() LN%d, failed to read json, super sqls not found\n",
|
errorPrint("%s() LN%d, failed to read json, super sqls not found\n",
|
||||||
__func__, __LINE__);
|
__func__, __LINE__);
|
||||||
goto PARSE_OVER;
|
goto PARSE_OVER;
|
||||||
} else {
|
} else {
|
||||||
int superSqlSize = cJSON_GetArraySize(superSqls);
|
int superSqlSize = cJSON_GetArraySize(specifiedSqls);
|
||||||
if (superSqlSize > MAX_QUERY_SQL_COUNT) {
|
if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent
|
||||||
errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n",
|
> MAX_QUERY_SQL_COUNT) {
|
||||||
__func__, __LINE__, MAX_QUERY_SQL_COUNT);
|
errorPrint("%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n",
|
||||||
|
__func__, __LINE__,
|
||||||
|
superSqlSize,
|
||||||
|
g_queryInfo.specifiedQueryInfo.concurrent,
|
||||||
|
MAX_QUERY_SQL_COUNT);
|
||||||
goto PARSE_OVER;
|
goto PARSE_OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize;
|
g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize;
|
||||||
for (int j = 0; j < superSqlSize; ++j) {
|
for (int j = 0; j < superSqlSize; ++j) {
|
||||||
cJSON* sql = cJSON_GetArrayItem(superSqls, j);
|
cJSON* sql = cJSON_GetArrayItem(specifiedSqls, j);
|
||||||
if (sql == NULL) continue;
|
if (sql == NULL) continue;
|
||||||
|
|
||||||
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
|
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
|
||||||
|
@ -4459,16 +4467,16 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
|
||||||
g_queryInfo.superQueryInfo.resubAfterConsume = 1;
|
g_queryInfo.superQueryInfo.resubAfterConsume = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// sqls
|
// supert table sqls
|
||||||
cJSON* subsqls = cJSON_GetObjectItem(superQuery, "sqls");
|
cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls");
|
||||||
if (!subsqls) {
|
if (!superSqls) {
|
||||||
g_queryInfo.superQueryInfo.sqlCount = 0;
|
g_queryInfo.superQueryInfo.sqlCount = 0;
|
||||||
} else if (subsqls->type != cJSON_Array) {
|
} else if (superSqls->type != cJSON_Array) {
|
||||||
errorPrint("%s() LN%d: failed to read json, super sqls not found\n",
|
errorPrint("%s() LN%d: failed to read json, super sqls not found\n",
|
||||||
__func__, __LINE__);
|
__func__, __LINE__);
|
||||||
goto PARSE_OVER;
|
goto PARSE_OVER;
|
||||||
} else {
|
} else {
|
||||||
int superSqlSize = cJSON_GetArraySize(subsqls);
|
int superSqlSize = cJSON_GetArraySize(superSqls);
|
||||||
if (superSqlSize > MAX_QUERY_SQL_COUNT) {
|
if (superSqlSize > MAX_QUERY_SQL_COUNT) {
|
||||||
errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n",
|
errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n",
|
||||||
__func__, __LINE__, MAX_QUERY_SQL_COUNT);
|
__func__, __LINE__, MAX_QUERY_SQL_COUNT);
|
||||||
|
@ -4477,7 +4485,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
|
||||||
|
|
||||||
g_queryInfo.superQueryInfo.sqlCount = superSqlSize;
|
g_queryInfo.superQueryInfo.sqlCount = superSqlSize;
|
||||||
for (int j = 0; j < superSqlSize; ++j) {
|
for (int j = 0; j < superSqlSize; ++j) {
|
||||||
cJSON* sql = cJSON_GetArrayItem(subsqls, j);
|
cJSON* sql = cJSON_GetArrayItem(superSqls, j);
|
||||||
if (sql == NULL) continue;
|
if (sql == NULL) continue;
|
||||||
|
|
||||||
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
|
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
|
||||||
|
@ -5823,49 +5831,49 @@ static void startMultiThreadInsertData(int threads, char* db_name,
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < threads; i++) {
|
for (int i = 0; i < threads; i++) {
|
||||||
threadInfo *t_info = infos + i;
|
threadInfo *pThreadInfo = infos + i;
|
||||||
t_info->threadID = i;
|
pThreadInfo->threadID = i;
|
||||||
tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
|
tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE);
|
||||||
t_info->time_precision = timePrec;
|
pThreadInfo->time_precision = timePrec;
|
||||||
t_info->superTblInfo = superTblInfo;
|
pThreadInfo->superTblInfo = superTblInfo;
|
||||||
|
|
||||||
t_info->start_time = start_time;
|
pThreadInfo->start_time = start_time;
|
||||||
t_info->minDelay = UINT64_MAX;
|
pThreadInfo->minDelay = UINT64_MAX;
|
||||||
|
|
||||||
if ((NULL == superTblInfo) ||
|
if ((NULL == superTblInfo) ||
|
||||||
(0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) {
|
(0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) {
|
||||||
//t_info->taos = taos;
|
//pThreadInfo->taos = taos;
|
||||||
t_info->taos = taos_connect(
|
pThreadInfo->taos = taos_connect(
|
||||||
g_Dbs.host, g_Dbs.user,
|
g_Dbs.host, g_Dbs.user,
|
||||||
g_Dbs.password, db_name, g_Dbs.port);
|
g_Dbs.password, db_name, g_Dbs.port);
|
||||||
if (NULL == t_info->taos) {
|
if (NULL == pThreadInfo->taos) {
|
||||||
errorPrint(
|
errorPrint(
|
||||||
"connect to server fail from insert sub thread, reason: %s\n",
|
"connect to server fail from insert sub thread, reason: %s\n",
|
||||||
taos_errstr(NULL));
|
taos_errstr(NULL));
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
t_info->taos = NULL;
|
pThreadInfo->taos = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if ((NULL == superTblInfo)
|
/* if ((NULL == superTblInfo)
|
||||||
|| (0 == superTblInfo->multiThreadWriteOneTbl)) {
|
|| (0 == superTblInfo->multiThreadWriteOneTbl)) {
|
||||||
*/
|
*/
|
||||||
t_info->start_table_from = startFrom;
|
pThreadInfo->start_table_from = startFrom;
|
||||||
t_info->ntables = i<b?a+1:a;
|
pThreadInfo->ntables = i<b?a+1:a;
|
||||||
t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
|
pThreadInfo->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
|
||||||
startFrom = t_info->end_table_to + 1;
|
startFrom = pThreadInfo->end_table_to + 1;
|
||||||
/* } else {
|
/* } else {
|
||||||
t_info->start_table_from = 0;
|
pThreadInfo->start_table_from = 0;
|
||||||
t_info->ntables = superTblInfo->childTblCount;
|
pThreadInfo->ntables = superTblInfo->childTblCount;
|
||||||
t_info->start_time = t_info->start_time + rand_int() % 10000 - rand_tinyint();
|
pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint();
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
tsem_init(&(t_info->lock_sem), 0, 0);
|
tsem_init(&(pThreadInfo->lock_sem), 0, 0);
|
||||||
if (ASYNC_MODE == g_Dbs.asyncMode) {
|
if (ASYNC_MODE == g_Dbs.asyncMode) {
|
||||||
pthread_create(pids + i, NULL, asyncWrite, t_info);
|
pthread_create(pids + i, NULL, asyncWrite, pThreadInfo);
|
||||||
} else {
|
} else {
|
||||||
pthread_create(pids + i, NULL, syncWrite, t_info);
|
pthread_create(pids + i, NULL, syncWrite, pThreadInfo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5880,27 +5888,27 @@ static void startMultiThreadInsertData(int threads, char* db_name,
|
||||||
double avgDelay = 0;
|
double avgDelay = 0;
|
||||||
|
|
||||||
for (int i = 0; i < threads; i++) {
|
for (int i = 0; i < threads; i++) {
|
||||||
threadInfo *t_info = infos + i;
|
threadInfo *pThreadInfo = infos + i;
|
||||||
|
|
||||||
tsem_destroy(&(t_info->lock_sem));
|
tsem_destroy(&(pThreadInfo->lock_sem));
|
||||||
taos_close(t_info->taos);
|
taos_close(pThreadInfo->taos);
|
||||||
|
|
||||||
debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n",
|
debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n",
|
||||||
__func__, __LINE__,
|
__func__, __LINE__,
|
||||||
t_info->threadID, t_info->totalInsertRows,
|
pThreadInfo->threadID, pThreadInfo->totalInsertRows,
|
||||||
t_info->totalAffectedRows);
|
pThreadInfo->totalAffectedRows);
|
||||||
if (superTblInfo) {
|
if (superTblInfo) {
|
||||||
superTblInfo->totalAffectedRows += t_info->totalAffectedRows;
|
superTblInfo->totalAffectedRows += pThreadInfo->totalAffectedRows;
|
||||||
superTblInfo->totalInsertRows += t_info->totalInsertRows;
|
superTblInfo->totalInsertRows += pThreadInfo->totalInsertRows;
|
||||||
} else {
|
} else {
|
||||||
g_args.totalAffectedRows += t_info->totalAffectedRows;
|
g_args.totalAffectedRows += pThreadInfo->totalAffectedRows;
|
||||||
g_args.totalInsertRows += t_info->totalInsertRows;
|
g_args.totalInsertRows += pThreadInfo->totalInsertRows;
|
||||||
}
|
}
|
||||||
|
|
||||||
totalDelay += t_info->totalDelay;
|
totalDelay += pThreadInfo->totalDelay;
|
||||||
cntDelay += t_info->cntDelay;
|
cntDelay += pThreadInfo->cntDelay;
|
||||||
if (t_info->maxDelay > maxDelay) maxDelay = t_info->maxDelay;
|
if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay;
|
||||||
if (t_info->minDelay < minDelay) minDelay = t_info->minDelay;
|
if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay;
|
||||||
}
|
}
|
||||||
cntDelay -= 1;
|
cntDelay -= 1;
|
||||||
|
|
||||||
|
@ -5956,26 +5964,26 @@ static void startMultiThreadInsertData(int threads, char* db_name,
|
||||||
|
|
||||||
static void *readTable(void *sarg) {
|
static void *readTable(void *sarg) {
|
||||||
#if 1
|
#if 1
|
||||||
threadInfo *rinfo = (threadInfo *)sarg;
|
threadInfo *pThreadInfo = (threadInfo *)sarg;
|
||||||
TAOS *taos = rinfo->taos;
|
TAOS *taos = pThreadInfo->taos;
|
||||||
char command[BUFFER_SIZE] = "\0";
|
char command[BUFFER_SIZE] = "\0";
|
||||||
uint64_t sTime = rinfo->start_time;
|
uint64_t sTime = pThreadInfo->start_time;
|
||||||
char *tb_prefix = rinfo->tb_prefix;
|
char *tb_prefix = pThreadInfo->tb_prefix;
|
||||||
FILE *fp = fopen(rinfo->fp, "a");
|
FILE *fp = fopen(pThreadInfo->filePath, "a");
|
||||||
if (NULL == fp) {
|
if (NULL == fp) {
|
||||||
errorPrint( "fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
|
errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t num_of_DPT;
|
int64_t num_of_DPT;
|
||||||
/* if (rinfo->superTblInfo) {
|
/* if (pThreadInfo->superTblInfo) {
|
||||||
num_of_DPT = rinfo->superTblInfo->insertRows; // nrecords_per_table;
|
num_of_DPT = pThreadInfo->superTblInfo->insertRows; // nrecords_per_table;
|
||||||
} else {
|
} else {
|
||||||
*/
|
*/
|
||||||
num_of_DPT = g_args.num_of_DPT;
|
num_of_DPT = g_args.num_of_DPT;
|
||||||
// }
|
// }
|
||||||
|
|
||||||
int64_t num_of_tables = rinfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
|
int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
|
||||||
int64_t totalData = num_of_DPT * num_of_tables;
|
int64_t totalData = num_of_DPT * num_of_tables;
|
||||||
bool do_aggreFunc = g_Dbs.do_aggreFunc;
|
bool do_aggreFunc = g_Dbs.do_aggreFunc;
|
||||||
|
|
||||||
|
@ -6028,17 +6036,17 @@ static void *readTable(void *sarg) {
|
||||||
|
|
||||||
static void *readMetric(void *sarg) {
|
static void *readMetric(void *sarg) {
|
||||||
#if 1
|
#if 1
|
||||||
threadInfo *rinfo = (threadInfo *)sarg;
|
threadInfo *pThreadInfo = (threadInfo *)sarg;
|
||||||
TAOS *taos = rinfo->taos;
|
TAOS *taos = pThreadInfo->taos;
|
||||||
char command[BUFFER_SIZE] = "\0";
|
char command[BUFFER_SIZE] = "\0";
|
||||||
FILE *fp = fopen(rinfo->fp, "a");
|
FILE *fp = fopen(pThreadInfo->filePath, "a");
|
||||||
if (NULL == fp) {
|
if (NULL == fp) {
|
||||||
printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
|
printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t num_of_DPT = rinfo->superTblInfo->insertRows;
|
int64_t num_of_DPT = pThreadInfo->superTblInfo->insertRows;
|
||||||
int64_t num_of_tables = rinfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
|
int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
|
||||||
int64_t totalData = num_of_DPT * num_of_tables;
|
int64_t totalData = num_of_DPT * num_of_tables;
|
||||||
bool do_aggreFunc = g_Dbs.do_aggreFunc;
|
bool do_aggreFunc = g_Dbs.do_aggreFunc;
|
||||||
|
|
||||||
|
@ -6237,8 +6245,8 @@ static void *specifiedTableQuery(void *sarg) {
|
||||||
uint64_t lastPrintTime = taosGetTimestampMs();
|
uint64_t lastPrintTime = taosGetTimestampMs();
|
||||||
uint64_t startTs = taosGetTimestampMs();
|
uint64_t startTs = taosGetTimestampMs();
|
||||||
|
|
||||||
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
|
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq] != NULL) {
|
||||||
sprintf(pThreadInfo->fp, "%s-%d",
|
sprintf(pThreadInfo->filePath, "%s-%d",
|
||||||
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
|
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
|
||||||
pThreadInfo->threadID);
|
pThreadInfo->threadID);
|
||||||
}
|
}
|
||||||
|
@ -6338,8 +6346,8 @@ static void *superTableQuery(void *sarg) {
|
||||||
for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
|
for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
|
||||||
memset(sqlstr,0,sizeof(sqlstr));
|
memset(sqlstr,0,sizeof(sqlstr));
|
||||||
replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i);
|
replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i);
|
||||||
if (g_queryInfo.superQueryInfo.result[j][0] != 0) {
|
if (g_queryInfo.superQueryInfo.result[j] != NULL) {
|
||||||
sprintf(pThreadInfo->fp, "%s-%d",
|
sprintf(pThreadInfo->filePath, "%s-%d",
|
||||||
g_queryInfo.superQueryInfo.result[j],
|
g_queryInfo.superQueryInfo.result[j],
|
||||||
pThreadInfo->threadID);
|
pThreadInfo->threadID);
|
||||||
}
|
}
|
||||||
|
@ -6429,9 +6437,9 @@ static int queryTestProcess() {
|
||||||
for (uint64_t i = 0; i < nSqlCount; i++) {
|
for (uint64_t i = 0; i < nSqlCount; i++) {
|
||||||
for (int j = 0; j < nConcurrent; j++) {
|
for (int j = 0; j < nConcurrent; j++) {
|
||||||
uint64_t seq = i * nConcurrent + j;
|
uint64_t seq = i * nConcurrent + j;
|
||||||
threadInfo *t_info = infos + seq;
|
threadInfo *pThreadInfo = infos + seq;
|
||||||
t_info->threadID = seq;
|
pThreadInfo->threadID = seq;
|
||||||
t_info->querySeq = i;
|
pThreadInfo->querySeq = i;
|
||||||
|
|
||||||
if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
|
if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
|
||||||
|
|
||||||
|
@ -6448,10 +6456,10 @@ static int queryTestProcess() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t_info->taos = NULL;// TODO: workaround to use separate taos connection;
|
pThreadInfo->taos = NULL;// TODO: workaround to use separate taos connection;
|
||||||
|
|
||||||
pthread_create(pids + seq, NULL, specifiedTableQuery,
|
pthread_create(pids + seq, NULL, specifiedTableQuery,
|
||||||
t_info);
|
pThreadInfo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -6491,15 +6499,15 @@ static int queryTestProcess() {
|
||||||
|
|
||||||
uint64_t startFrom = 0;
|
uint64_t startFrom = 0;
|
||||||
for (int i = 0; i < threads; i++) {
|
for (int i = 0; i < threads; i++) {
|
||||||
threadInfo *t_info = infosOfSub + i;
|
threadInfo *pThreadInfo = infosOfSub + i;
|
||||||
t_info->threadID = i;
|
pThreadInfo->threadID = i;
|
||||||
|
|
||||||
t_info->start_table_from = startFrom;
|
pThreadInfo->start_table_from = startFrom;
|
||||||
t_info->ntables = i<b?a+1:a;
|
pThreadInfo->ntables = i<b?a+1:a;
|
||||||
t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
|
pThreadInfo->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
|
||||||
startFrom = t_info->end_table_to + 1;
|
startFrom = pThreadInfo->end_table_to + 1;
|
||||||
t_info->taos = NULL; // TODO: workaround to use separate taos connection;
|
pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
|
||||||
pthread_create(pidsOfSub + i, NULL, superTableQuery, t_info);
|
pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
g_queryInfo.superQueryInfo.threadCnt = threads;
|
g_queryInfo.superQueryInfo.threadCnt = threads;
|
||||||
|
@ -6546,7 +6554,7 @@ static void stable_sub_callback(
|
||||||
}
|
}
|
||||||
|
|
||||||
if (param)
|
if (param)
|
||||||
appendResultToFile(res, ((threadInfo *)param)->fp);
|
fetchResult(res, (threadInfo *)param);
|
||||||
// tao_unscribe() will free result.
|
// tao_unscribe() will free result.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6559,7 +6567,7 @@ static void specified_sub_callback(
|
||||||
}
|
}
|
||||||
|
|
||||||
if (param)
|
if (param)
|
||||||
appendResultToFile(res, ((threadInfo *)param)->fp);
|
fetchResult(res, (threadInfo *)param);
|
||||||
// tao_unscribe() will free result.
|
// tao_unscribe() will free result.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6613,18 +6621,15 @@ static void *superSubscribe(void *sarg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pThreadInfo->taos == NULL) {
|
if (pThreadInfo->taos == NULL) {
|
||||||
TAOS * taos = NULL;
|
pThreadInfo->taos = taos_connect(g_queryInfo.host,
|
||||||
taos = taos_connect(g_queryInfo.host,
|
|
||||||
g_queryInfo.user,
|
g_queryInfo.user,
|
||||||
g_queryInfo.password,
|
g_queryInfo.password,
|
||||||
g_queryInfo.dbName,
|
g_queryInfo.dbName,
|
||||||
g_queryInfo.port);
|
g_queryInfo.port);
|
||||||
if (taos == NULL) {
|
if (pThreadInfo->taos == NULL) {
|
||||||
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
|
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
|
||||||
pThreadInfo->threadID, taos_errstr(NULL));
|
pThreadInfo->threadID, taos_errstr(NULL));
|
||||||
return NULL;
|
return NULL;
|
||||||
} else {
|
|
||||||
pThreadInfo->taos = taos;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6654,7 +6659,7 @@ static void *superSubscribe(void *sarg) {
|
||||||
g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq],
|
g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq],
|
||||||
subSqlstr, i);
|
subSqlstr, i);
|
||||||
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
|
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
|
||||||
sprintf(pThreadInfo->fp, "%s-%d",
|
sprintf(pThreadInfo->filePath, "%s-%d",
|
||||||
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
|
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
|
||||||
pThreadInfo->threadID);
|
pThreadInfo->threadID);
|
||||||
}
|
}
|
||||||
|
@ -6697,16 +6702,16 @@ static void *superSubscribe(void *sarg) {
|
||||||
|
|
||||||
if (res) {
|
if (res) {
|
||||||
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
|
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
|
||||||
sprintf(pThreadInfo->fp, "%s-%d",
|
sprintf(pThreadInfo->filePath, "%s-%d",
|
||||||
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
|
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
|
||||||
pThreadInfo->threadID);
|
pThreadInfo->threadID);
|
||||||
appendResultToFile(res, pThreadInfo->fp);
|
fetchResult(res, pThreadInfo);
|
||||||
}
|
}
|
||||||
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
|
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
|
||||||
sprintf(pThreadInfo->fp, "%s-%d",
|
sprintf(pThreadInfo->filePath, "%s-%d",
|
||||||
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
|
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
|
||||||
pThreadInfo->threadID);
|
pThreadInfo->threadID);
|
||||||
appendResultToFile(res, pThreadInfo->fp);
|
fetchResult(res, pThreadInfo);
|
||||||
}
|
}
|
||||||
consumed[tsubSeq] ++;
|
consumed[tsubSeq] ++;
|
||||||
|
|
||||||
|
@ -6747,21 +6752,18 @@ static void *superSubscribe(void *sarg) {
|
||||||
|
|
||||||
static void *specifiedSubscribe(void *sarg) {
|
static void *specifiedSubscribe(void *sarg) {
|
||||||
threadInfo *pThreadInfo = (threadInfo *)sarg;
|
threadInfo *pThreadInfo = (threadInfo *)sarg;
|
||||||
TAOS_SUB* tsub = NULL;
|
// TAOS_SUB* tsub = NULL;
|
||||||
|
|
||||||
if (pThreadInfo->taos == NULL) {
|
if (pThreadInfo->taos == NULL) {
|
||||||
TAOS * taos = NULL;
|
pThreadInfo->taos = taos_connect(g_queryInfo.host,
|
||||||
taos = taos_connect(g_queryInfo.host,
|
|
||||||
g_queryInfo.user,
|
g_queryInfo.user,
|
||||||
g_queryInfo.password,
|
g_queryInfo.password,
|
||||||
g_queryInfo.dbName,
|
g_queryInfo.dbName,
|
||||||
g_queryInfo.port);
|
g_queryInfo.port);
|
||||||
if (taos == NULL) {
|
if (pThreadInfo->taos == NULL) {
|
||||||
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
|
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
|
||||||
pThreadInfo->threadID, taos_errstr(NULL));
|
pThreadInfo->threadID, taos_errstr(NULL));
|
||||||
return NULL;
|
return NULL;
|
||||||
} else {
|
|
||||||
pThreadInfo->taos = taos;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6773,69 +6775,70 @@ static void *specifiedSubscribe(void *sarg) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
char topic[32] = {0};
|
sprintf(g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
|
||||||
sprintf(topic, "taosdemo-subscribe-%"PRIu64"", pThreadInfo->querySeq);
|
"taosdemo-subscribe-%"PRIu64"-%d",
|
||||||
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
|
pThreadInfo->querySeq,
|
||||||
sprintf(pThreadInfo->fp, "%s-%d",
|
pThreadInfo->threadID);
|
||||||
|
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq] != NULL) {
|
||||||
|
sprintf(pThreadInfo->filePath, "%s-%d",
|
||||||
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
|
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
|
||||||
pThreadInfo->threadID);
|
pThreadInfo->threadID);
|
||||||
}
|
}
|
||||||
tsub = subscribeImpl(
|
g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl(
|
||||||
SPECIFIED_CLASS, pThreadInfo,
|
SPECIFIED_CLASS, pThreadInfo,
|
||||||
g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
|
g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
|
||||||
topic,
|
g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
|
||||||
g_queryInfo.specifiedQueryInfo.subscribeRestart,
|
g_queryInfo.specifiedQueryInfo.subscribeRestart,
|
||||||
g_queryInfo.specifiedQueryInfo.subscribeInterval);
|
g_queryInfo.specifiedQueryInfo.subscribeInterval);
|
||||||
if (NULL == tsub) {
|
if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) {
|
||||||
taos_close(pThreadInfo->taos);
|
taos_close(pThreadInfo->taos);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// start loop to consume result
|
// start loop to consume result
|
||||||
TAOS_RES* res = NULL;
|
|
||||||
|
|
||||||
int consumed;
|
|
||||||
|
|
||||||
|
g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0;
|
||||||
while(1) {
|
while(1) {
|
||||||
if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) {
|
if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
res = taos_consume(tsub);
|
g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = taos_consume(
|
||||||
if (res) {
|
g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]);
|
||||||
|
if (g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]) {
|
||||||
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
|
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
|
||||||
sprintf(pThreadInfo->fp, "%s-%d",
|
sprintf(pThreadInfo->filePath, "%s-%d",
|
||||||
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
|
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
|
||||||
pThreadInfo->threadID);
|
pThreadInfo->threadID);
|
||||||
appendResultToFile(res, pThreadInfo->fp);
|
fetchResult(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], pThreadInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
consumed ++;
|
g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++;
|
||||||
if ((g_queryInfo.specifiedQueryInfo.subscribeKeepProgress)
|
if ((g_queryInfo.specifiedQueryInfo.subscribeKeepProgress)
|
||||||
&& (consumed >=
|
&& (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] >=
|
||||||
g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) {
|
g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) {
|
||||||
printf("keepProgress:%d, resub specified query: %"PRIu64"\n",
|
printf("keepProgress:%d, resub specified query: %"PRIu64"\n",
|
||||||
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress,
|
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress,
|
||||||
pThreadInfo->querySeq);
|
pThreadInfo->querySeq);
|
||||||
consumed = 0;
|
g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0;
|
||||||
taos_unsubscribe(tsub,
|
taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID],
|
||||||
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
|
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
|
||||||
tsub = subscribeImpl(
|
g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl(
|
||||||
SPECIFIED_CLASS,
|
SPECIFIED_CLASS,
|
||||||
pThreadInfo,
|
pThreadInfo,
|
||||||
g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
|
g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
|
||||||
topic,
|
g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
|
||||||
g_queryInfo.specifiedQueryInfo.subscribeRestart,
|
g_queryInfo.specifiedQueryInfo.subscribeRestart,
|
||||||
g_queryInfo.specifiedQueryInfo.subscribeInterval);
|
g_queryInfo.specifiedQueryInfo.subscribeInterval);
|
||||||
if (NULL == tsub) {
|
if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) {
|
||||||
taos_close(pThreadInfo->taos);
|
taos_close(pThreadInfo->taos);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
taos_free_result(res);
|
taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]);
|
||||||
taos_unsubscribe(tsub, 0);
|
taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->querySeq], 0);
|
||||||
taos_close(pThreadInfo->taos);
|
taos_close(pThreadInfo->taos);
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -6905,18 +6908,18 @@ static int subscribeTestProcess() {
|
||||||
for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
|
for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
|
||||||
for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) {
|
for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) {
|
||||||
uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j;
|
uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j;
|
||||||
threadInfo *t_info = infos + seq;
|
threadInfo *pThreadInfo = infos + seq;
|
||||||
t_info->threadID = seq;
|
pThreadInfo->threadID = seq;
|
||||||
t_info->querySeq = i;
|
pThreadInfo->querySeq = i;
|
||||||
t_info->taos = NULL; // TODO: workaround to use separate taos connection;
|
pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
|
||||||
pthread_create(pids + seq, NULL, specifiedSubscribe, t_info);
|
pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//==== create threads for super table query
|
//==== create threads for super table query
|
||||||
if (g_queryInfo.superQueryInfo.sqlCount <= 0) {
|
if (g_queryInfo.superQueryInfo.sqlCount <= 0) {
|
||||||
printf("%s() LN%d, super table query sqlCount %"PRIu64".\n",
|
debugPrint("%s() LN%d, super table query sqlCount %"PRIu64".\n",
|
||||||
__func__, __LINE__,
|
__func__, __LINE__,
|
||||||
g_queryInfo.superQueryInfo.sqlCount);
|
g_queryInfo.superQueryInfo.sqlCount);
|
||||||
} else {
|
} else {
|
||||||
|
@ -6955,17 +6958,17 @@ static int subscribeTestProcess() {
|
||||||
uint64_t startFrom = 0;
|
uint64_t startFrom = 0;
|
||||||
for (int j = 0; j < threads; j++) {
|
for (int j = 0; j < threads; j++) {
|
||||||
uint64_t seq = i * threads + j;
|
uint64_t seq = i * threads + j;
|
||||||
threadInfo *t_info = infosOfStable + seq;
|
threadInfo *pThreadInfo = infosOfStable + seq;
|
||||||
t_info->threadID = seq;
|
pThreadInfo->threadID = seq;
|
||||||
t_info->querySeq = i;
|
pThreadInfo->querySeq = i;
|
||||||
|
|
||||||
t_info->start_table_from = startFrom;
|
pThreadInfo->start_table_from = startFrom;
|
||||||
t_info->ntables = j<b?a+1:a;
|
pThreadInfo->ntables = j<b?a+1:a;
|
||||||
t_info->end_table_to = j<b?startFrom+a:startFrom+a-1;
|
pThreadInfo->end_table_to = j<b?startFrom+a:startFrom+a-1;
|
||||||
startFrom = t_info->end_table_to + 1;
|
startFrom = pThreadInfo->end_table_to + 1;
|
||||||
t_info->taos = NULL; // TODO: workaround to use separate taos connection;
|
pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
|
||||||
pthread_create(pidsOfStable + seq,
|
pthread_create(pidsOfStable + seq,
|
||||||
NULL, superSubscribe, t_info);
|
NULL, superSubscribe, pThreadInfo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7244,47 +7247,47 @@ static void queryResult() {
|
||||||
// query data
|
// query data
|
||||||
|
|
||||||
pthread_t read_id;
|
pthread_t read_id;
|
||||||
threadInfo *rInfo = malloc(sizeof(threadInfo));
|
threadInfo *pThreadInfo = malloc(sizeof(threadInfo));
|
||||||
assert(rInfo);
|
assert(pThreadInfo);
|
||||||
rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
|
pThreadInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
|
||||||
rInfo->start_table_from = 0;
|
pThreadInfo->start_table_from = 0;
|
||||||
|
|
||||||
//rInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
|
//pThreadInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
|
||||||
if (g_args.use_metric) {
|
if (g_args.use_metric) {
|
||||||
rInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount;
|
pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount;
|
||||||
rInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
|
pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
|
||||||
rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
|
pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
|
||||||
tstrncpy(rInfo->tb_prefix,
|
tstrncpy(pThreadInfo->tb_prefix,
|
||||||
g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE);
|
g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE);
|
||||||
} else {
|
} else {
|
||||||
rInfo->ntables = g_args.num_of_tables;
|
pThreadInfo->ntables = g_args.num_of_tables;
|
||||||
rInfo->end_table_to = g_args.num_of_tables -1;
|
pThreadInfo->end_table_to = g_args.num_of_tables -1;
|
||||||
tstrncpy(rInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
|
tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
rInfo->taos = taos_connect(
|
pThreadInfo->taos = taos_connect(
|
||||||
g_Dbs.host,
|
g_Dbs.host,
|
||||||
g_Dbs.user,
|
g_Dbs.user,
|
||||||
g_Dbs.password,
|
g_Dbs.password,
|
||||||
g_Dbs.db[0].dbName,
|
g_Dbs.db[0].dbName,
|
||||||
g_Dbs.port);
|
g_Dbs.port);
|
||||||
if (rInfo->taos == NULL) {
|
if (pThreadInfo->taos == NULL) {
|
||||||
errorPrint( "Failed to connect to TDengine, reason:%s\n",
|
errorPrint( "Failed to connect to TDengine, reason:%s\n",
|
||||||
taos_errstr(NULL));
|
taos_errstr(NULL));
|
||||||
free(rInfo);
|
free(pThreadInfo);
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
tstrncpy(rInfo->fp, g_Dbs.resultFile, MAX_FILE_NAME_LEN);
|
tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN);
|
||||||
|
|
||||||
if (!g_Dbs.use_metric) {
|
if (!g_Dbs.use_metric) {
|
||||||
pthread_create(&read_id, NULL, readTable, rInfo);
|
pthread_create(&read_id, NULL, readTable, pThreadInfo);
|
||||||
} else {
|
} else {
|
||||||
pthread_create(&read_id, NULL, readMetric, rInfo);
|
pthread_create(&read_id, NULL, readMetric, pThreadInfo);
|
||||||
}
|
}
|
||||||
pthread_join(read_id, NULL);
|
pthread_join(read_id, NULL);
|
||||||
taos_close(rInfo->taos);
|
taos_close(pThreadInfo->taos);
|
||||||
free(rInfo);
|
free(pThreadInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void testCmdLine() {
|
static void testCmdLine() {
|
||||||
|
|
|
@ -22,7 +22,7 @@ from queue import Queue, Empty
|
||||||
from .shared.config import Config
|
from .shared.config import Config
|
||||||
from .shared.db import DbTarget, DbConn
|
from .shared.db import DbTarget, DbConn
|
||||||
from .shared.misc import Logging, Helper, CrashGenError, Status, Progress, Dice
|
from .shared.misc import Logging, Helper, CrashGenError, Status, Progress, Dice
|
||||||
from .shared.types import DirPath
|
from .shared.types import DirPath, IpcStream
|
||||||
|
|
||||||
# from crash_gen.misc import CrashGenError, Dice, Helper, Logging, Progress, Status
|
# from crash_gen.misc import CrashGenError, Dice, Helper, Logging, Progress, Status
|
||||||
# from crash_gen.db import DbConn, DbTarget
|
# from crash_gen.db import DbConn, DbTarget
|
||||||
|
@ -177,13 +177,12 @@ quorum 2
|
||||||
return "127.0.0.1"
|
return "127.0.0.1"
|
||||||
|
|
||||||
def getServiceCmdLine(self): # to start the instance
|
def getServiceCmdLine(self): # to start the instance
|
||||||
cmdLine = []
|
|
||||||
if Config.getConfig().track_memory_leaks:
|
if Config.getConfig().track_memory_leaks:
|
||||||
Logging.info("Invoking VALGRIND on service...")
|
Logging.info("Invoking VALGRIND on service...")
|
||||||
cmdLine = ['valgrind', '--leak-check=yes']
|
return ['exec /usr/bin/valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()]
|
||||||
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
|
else:
|
||||||
cmdLine += ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen()
|
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
|
||||||
return cmdLine
|
return ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen()
|
||||||
|
|
||||||
def _getDnodes(self, dbc):
|
def _getDnodes(self, dbc):
|
||||||
dbc.query("show dnodes")
|
dbc.query("show dnodes")
|
||||||
|
@ -281,16 +280,16 @@ class TdeSubProcess:
|
||||||
return '[TdeSubProc: pid = {}, status = {}]'.format(
|
return '[TdeSubProc: pid = {}, status = {}]'.format(
|
||||||
self.getPid(), self.getStatus() )
|
self.getPid(), self.getStatus() )
|
||||||
|
|
||||||
def getStdOut(self) -> BinaryIO :
|
def getIpcStdOut(self) -> IpcStream :
|
||||||
if self._popen.universal_newlines : # alias of text_mode
|
if self._popen.universal_newlines : # alias of text_mode
|
||||||
raise CrashGenError("We need binary mode for STDOUT IPC")
|
raise CrashGenError("We need binary mode for STDOUT IPC")
|
||||||
# Logging.info("Type of stdout is: {}".format(type(self._popen.stdout)))
|
# Logging.info("Type of stdout is: {}".format(type(self._popen.stdout)))
|
||||||
return typing.cast(BinaryIO, self._popen.stdout)
|
return typing.cast(IpcStream, self._popen.stdout)
|
||||||
|
|
||||||
def getStdErr(self) -> BinaryIO :
|
def getIpcStdErr(self) -> IpcStream :
|
||||||
if self._popen.universal_newlines : # alias of text_mode
|
if self._popen.universal_newlines : # alias of text_mode
|
||||||
raise CrashGenError("We need binary mode for STDERR IPC")
|
raise CrashGenError("We need binary mode for STDERR IPC")
|
||||||
return typing.cast(BinaryIO, self._popen.stderr)
|
return typing.cast(IpcStream, self._popen.stderr)
|
||||||
|
|
||||||
# Now it's always running, since we matched the life cycle
|
# Now it's always running, since we matched the life cycle
|
||||||
# def isRunning(self):
|
# def isRunning(self):
|
||||||
|
@ -301,11 +300,6 @@ class TdeSubProcess:
|
||||||
|
|
||||||
def _start(self, cmdLine) -> Popen :
|
def _start(self, cmdLine) -> Popen :
|
||||||
ON_POSIX = 'posix' in sys.builtin_module_names
|
ON_POSIX = 'posix' in sys.builtin_module_names
|
||||||
|
|
||||||
# Sanity check
|
|
||||||
# if self.subProcess: # already there
|
|
||||||
# raise RuntimeError("Corrupt process state")
|
|
||||||
|
|
||||||
|
|
||||||
# Prepare environment variables for coverage information
|
# Prepare environment variables for coverage information
|
||||||
# Ref: https://stackoverflow.com/questions/2231227/python-subprocess-popen-with-a-modified-environment
|
# Ref: https://stackoverflow.com/questions/2231227/python-subprocess-popen-with-a-modified-environment
|
||||||
|
@ -314,9 +308,8 @@ class TdeSubProcess:
|
||||||
|
|
||||||
# print(myEnv)
|
# print(myEnv)
|
||||||
# print("Starting TDengine with env: ", myEnv.items())
|
# print("Starting TDengine with env: ", myEnv.items())
|
||||||
# print("Starting TDengine via Shell: {}".format(cmdLineStr))
|
print("Starting TDengine: {}".format(cmdLine))
|
||||||
|
|
||||||
# useShell = True # Needed to pass environments into it
|
|
||||||
return Popen(
|
return Popen(
|
||||||
' '.join(cmdLine), # ' '.join(cmdLine) if useShell else cmdLine,
|
' '.join(cmdLine), # ' '.join(cmdLine) if useShell else cmdLine,
|
||||||
shell=True, # Always use shell, since we need to pass ENV vars
|
shell=True, # Always use shell, since we need to pass ENV vars
|
||||||
|
@ -732,19 +725,19 @@ class ServiceManagerThread:
|
||||||
self._ipcQueue = Queue() # type: Queue
|
self._ipcQueue = Queue() # type: Queue
|
||||||
self._thread = threading.Thread( # First thread captures server OUTPUT
|
self._thread = threading.Thread( # First thread captures server OUTPUT
|
||||||
target=self.svcOutputReader,
|
target=self.svcOutputReader,
|
||||||
args=(subProc.getStdOut(), self._ipcQueue, logDir))
|
args=(subProc.getIpcStdOut(), self._ipcQueue, logDir))
|
||||||
self._thread.daemon = True # thread dies with the program
|
self._thread.daemon = True # thread dies with the program
|
||||||
self._thread.start()
|
self._thread.start()
|
||||||
time.sleep(0.01)
|
time.sleep(0.01)
|
||||||
if not self._thread.is_alive(): # What happened?
|
if not self._thread.is_alive(): # What happened?
|
||||||
Logging.info("Failed to started process to monitor STDOUT")
|
Logging.info("Failed to start process to monitor STDOUT")
|
||||||
self.stop()
|
self.stop()
|
||||||
raise CrashGenError("Failed to start thread to monitor STDOUT")
|
raise CrashGenError("Failed to start thread to monitor STDOUT")
|
||||||
Logging.info("Successfully started process to monitor STDOUT")
|
Logging.info("Successfully started process to monitor STDOUT")
|
||||||
|
|
||||||
self._thread2 = threading.Thread( # 2nd thread captures server ERRORs
|
self._thread2 = threading.Thread( # 2nd thread captures server ERRORs
|
||||||
target=self.svcErrorReader,
|
target=self.svcErrorReader,
|
||||||
args=(subProc.getStdErr(), self._ipcQueue, logDir))
|
args=(subProc.getIpcStdErr(), self._ipcQueue, logDir))
|
||||||
self._thread2.daemon = True # thread dies with the program
|
self._thread2.daemon = True # thread dies with the program
|
||||||
self._thread2.start()
|
self._thread2.start()
|
||||||
time.sleep(0.01)
|
time.sleep(0.01)
|
||||||
|
@ -887,14 +880,19 @@ class ServiceManagerThread:
|
||||||
print("\nNon-UTF8 server output: {}\n".format(bChunk.decode('cp437')))
|
print("\nNon-UTF8 server output: {}\n".format(bChunk.decode('cp437')))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _textChunkGenerator(self, streamIn: BinaryIO, logDir: str, logFile: str
|
def _textChunkGenerator(self, streamIn: IpcStream, logDir: str, logFile: str
|
||||||
) -> Generator[TextChunk, None, None]:
|
) -> Generator[TextChunk, None, None]:
|
||||||
'''
|
'''
|
||||||
Take an input stream with binary data, produced a generator of decoded
|
Take an input stream with binary data (likely from Popen), produced a generator of decoded
|
||||||
"text chunks", and also save the original binary data in a log file.
|
"text chunks".
|
||||||
|
|
||||||
|
Side effect: it also save the original binary data in a log file.
|
||||||
'''
|
'''
|
||||||
os.makedirs(logDir, exist_ok=True)
|
os.makedirs(logDir, exist_ok=True)
|
||||||
logF = open(os.path.join(logDir, logFile), 'wb')
|
logF = open(os.path.join(logDir, logFile), 'wb')
|
||||||
|
if logF is None:
|
||||||
|
Logging.error("Failed to open log file (binary write): {}/{}".format(logDir, logFile))
|
||||||
|
return
|
||||||
for bChunk in iter(streamIn.readline, b''):
|
for bChunk in iter(streamIn.readline, b''):
|
||||||
logF.write(bChunk) # Write to log file immediately
|
logF.write(bChunk) # Write to log file immediately
|
||||||
tChunk = self._decodeBinaryChunk(bChunk) # decode
|
tChunk = self._decodeBinaryChunk(bChunk) # decode
|
||||||
|
@ -902,14 +900,14 @@ class ServiceManagerThread:
|
||||||
yield tChunk # TODO: split into actual text lines
|
yield tChunk # TODO: split into actual text lines
|
||||||
|
|
||||||
# At the end...
|
# At the end...
|
||||||
streamIn.close() # Close the stream
|
streamIn.close() # Close the incoming stream
|
||||||
logF.close() # Close the output file
|
logF.close() # Close the log file
|
||||||
|
|
||||||
def svcOutputReader(self, stdOut: BinaryIO, queue, logDir: str):
|
def svcOutputReader(self, ipcStdOut: IpcStream, queue, logDir: str):
|
||||||
'''
|
'''
|
||||||
The infinite routine that processes the STDOUT stream for the sub process being managed.
|
The infinite routine that processes the STDOUT stream for the sub process being managed.
|
||||||
|
|
||||||
:param stdOut: the IO stream object used to fetch the data from
|
:param ipcStdOut: the IO stream object used to fetch the data from
|
||||||
:param queue: the queue where we dump the roughly parsed chunk-by-chunk text data
|
:param queue: the queue where we dump the roughly parsed chunk-by-chunk text data
|
||||||
:param logDir: where we should dump a verbatim output file
|
:param logDir: where we should dump a verbatim output file
|
||||||
'''
|
'''
|
||||||
|
@ -917,7 +915,7 @@ class ServiceManagerThread:
|
||||||
# Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
|
# Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
|
||||||
# print("This is the svcOutput Reader...")
|
# print("This is the svcOutput Reader...")
|
||||||
# stdOut.readline() # Skip the first output? TODO: remove?
|
# stdOut.readline() # Skip the first output? TODO: remove?
|
||||||
for tChunk in self._textChunkGenerator(stdOut, logDir, 'stdout.log') :
|
for tChunk in self._textChunkGenerator(ipcStdOut, logDir, 'stdout.log') :
|
||||||
queue.put(tChunk) # tChunk garanteed not to be None
|
queue.put(tChunk) # tChunk garanteed not to be None
|
||||||
self._printProgress("_i")
|
self._printProgress("_i")
|
||||||
|
|
||||||
|
@ -940,12 +938,12 @@ class ServiceManagerThread:
|
||||||
Logging.info("EOF found TDengine STDOUT, marking the process as terminated")
|
Logging.info("EOF found TDengine STDOUT, marking the process as terminated")
|
||||||
self.setStatus(Status.STATUS_STOPPED)
|
self.setStatus(Status.STATUS_STOPPED)
|
||||||
|
|
||||||
def svcErrorReader(self, stdErr: BinaryIO, queue, logDir: str):
|
def svcErrorReader(self, ipcStdErr: IpcStream, queue, logDir: str):
|
||||||
# os.makedirs(logDir, exist_ok=True)
|
# os.makedirs(logDir, exist_ok=True)
|
||||||
# logFile = os.path.join(logDir,'stderr.log')
|
# logFile = os.path.join(logDir,'stderr.log')
|
||||||
# fErr = open(logFile, 'wb')
|
# fErr = open(logFile, 'wb')
|
||||||
# for line in iter(err.readline, b''):
|
# for line in iter(err.readline, b''):
|
||||||
for tChunk in self._textChunkGenerator(stdErr, logDir, 'stderr.log') :
|
for tChunk in self._textChunkGenerator(ipcStdErr, logDir, 'stderr.log') :
|
||||||
queue.put(tChunk) # tChunk garanteed not to be None
|
queue.put(tChunk) # tChunk garanteed not to be None
|
||||||
# fErr.write(line)
|
# fErr.write(line)
|
||||||
Logging.info("TDengine STDERR: {}".format(tChunk))
|
Logging.info("TDengine STDERR: {}".format(tChunk))
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
from typing import Any, List, Dict, NewType
|
from typing import Any, BinaryIO, List, Dict, NewType
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
DirPath = NewType('DirPath', str)
|
DirPath = NewType('DirPath', str)
|
||||||
|
@ -26,3 +26,5 @@ class TdDataType(Enum):
|
||||||
|
|
||||||
TdColumns = Dict[str, TdDataType]
|
TdColumns = Dict[str, TdDataType]
|
||||||
TdTags = Dict[str, TdDataType]
|
TdTags = Dict[str, TdDataType]
|
||||||
|
|
||||||
|
IpcStream = NewType('IpcStream', BinaryIO)
|
Loading…
Reference in New Issue