From e620e2e83a26c8e8784d203010ef914172607a95 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 17 May 2021 12:53:35 +0800 Subject: [PATCH 01/14] [td-4209]: add the uid check for super table metadata that is already cached in local buffer. --- src/client/inc/tscUtil.h | 2 +- src/client/inc/tsclient.h | 6 ++++-- src/client/src/tscSchemaUtil.c | 1 + src/client/src/tscServer.c | 28 +++++++++++++--------------- src/client/src/tscUtil.c | 21 ++++++++++----------- 5 files changed, 29 insertions(+), 29 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index b80dcfb577..47b2865313 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -298,7 +298,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild); uint32_t tscGetTableMetaSize(STableMeta* pTableMeta); CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta); uint32_t tscGetTableMetaMaxSize(); -int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf); +int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, void* buf); STableMeta* tscTableMetaDup(STableMeta* pTableMeta); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 6d94d270a7..4bfd3bc88f 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -67,14 +67,16 @@ typedef struct CChildTableMeta { int32_t vgId; STableId id; uint8_t tableType; - char sTableName[TSDB_TABLE_FNAME_LEN]; //super table name, not full name + char sTableName[TSDB_TABLE_FNAME_LEN]; // TODO: refactor super table name, not full name + uint64_t suid; // super table id } CChildTableMeta; typedef struct STableMeta { int32_t vgId; STableId id; uint8_t tableType; - char sTableName[TSDB_TABLE_FNAME_LEN]; + char sTableName[TSDB_TABLE_FNAME_LEN]; // super table name + uint64_t suid; // super table id int16_t sversion; int16_t tversion; STableComInfo tableInfo; diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c index 2ea382132b..114fc8ee73 100644 --- a/src/client/src/tscSchemaUtil.c +++ b/src/client/src/tscSchemaUtil.c @@ -94,6 +94,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) { pTableMeta->tableType = pTableMetaMsg->tableType; pTableMeta->vgId = pTableMetaMsg->vgroup.vgId; + pTableMeta->suid = pTableMetaMsg->suid; pTableMeta->tableInfo = (STableComInfo) { .numOfTags = pTableMetaMsg->numOfTags, diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 33325c7fd7..664f2989a8 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1920,13 +1920,13 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int tscProcessTableMetaRsp(SSqlObj *pSql) { STableMetaMsg *pMetaMsg = (STableMetaMsg *)pSql->res.pRsp; - pMetaMsg->tid = htonl(pMetaMsg->tid); - pMetaMsg->sversion = htons(pMetaMsg->sversion); - pMetaMsg->tversion = htons(pMetaMsg->tversion); + pMetaMsg->tid = htonl(pMetaMsg->tid); + pMetaMsg->sversion = htons(pMetaMsg->sversion); + pMetaMsg->tversion = htons(pMetaMsg->tversion); pMetaMsg->vgroup.vgId = htonl(pMetaMsg->vgroup.vgId); - - pMetaMsg->uid = htobe64(pMetaMsg->uid); - pMetaMsg->contLen = htons(pMetaMsg->contLen); + pMetaMsg->uid = htobe64(pMetaMsg->uid); + pMetaMsg->suid = htobe64(pMetaMsg->suid); + pMetaMsg->contLen = htons(pMetaMsg->contLen); pMetaMsg->numOfColumns = htons(pMetaMsg->numOfColumns); if ((pMetaMsg->tableType != TSDB_SUPER_TABLE) && @@ -2537,19 +2537,16 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { pTableMetaInfo->pTableMeta = calloc(1, size); pTableMetaInfo->tableMetaSize = size; } else if (pTableMetaInfo->tableMetaSize < size) { - char *tmp = realloc(pTableMetaInfo->pTableMeta, size); - if (tmp == NULL) { + char *tmp = realloc(pTableMetaInfo->pTableMeta, size); + if (tmp == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } pTableMetaInfo->pTableMeta = (STableMeta *)tmp; - memset(pTableMetaInfo->pTableMeta, 0, size); - pTableMetaInfo->tableMetaSize = size; - } else { - //uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta); - memset(pTableMetaInfo->pTableMeta, 0, size); - pTableMetaInfo->tableMetaSize = size; } + memset(pTableMetaInfo->pTableMeta, 0, size); + pTableMetaInfo->tableMetaSize = size; + pTableMetaInfo->pTableMeta->tableType = -1; pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1; @@ -2565,8 +2562,9 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { STableMeta* pMeta = pTableMetaInfo->pTableMeta; if (pMeta->id.uid > 0) { + // in case of child table, here only get the if (pMeta->tableType == TSDB_CHILD_TABLE) { - int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name, buf); + int32_t code = tscCreateTableMetaFromSTableMeta(pTableMetaInfo->pTableMeta, name, buf); if (code != TSDB_CODE_SUCCESS) { return getTableMetaFromMnode(pSql, pTableMetaInfo); } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3d2cf44560..89749202ef 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2941,22 +2941,25 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) { assert(pTableMeta != NULL); CChildTableMeta* cMeta = calloc(1, sizeof(CChildTableMeta)); + cMeta->tableType = TSDB_CHILD_TABLE; - cMeta->vgId = pTableMeta->vgId; - cMeta->id = pTableMeta->id; + cMeta->vgId = pTableMeta->vgId; + cMeta->id = pTableMeta->id; + cMeta->suid = pTableMeta->suid; tstrncpy(cMeta->sTableName, pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN); return cMeta; } -int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf) { +int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, void* buf) { assert(pChild != NULL && buf != NULL); -// uint32_t size = tscGetTableMetaMaxSize(); - STableMeta* p = buf;//calloc(1, size); - + STableMeta* p = buf; taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1); - if (p->id.uid > 0) { // tableMeta exists, build child table meta and return + + // tableMeta exists, build child table meta according to the super table meta + // the uid need to be checked in addition to the general name of the super table. + if (p->id.uid > 0 && pChild->suid == p->id.uid) { pChild->sversion = p->sversion; pChild->tversion = p->tversion; @@ -2964,13 +2967,9 @@ int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, v int32_t total = pChild->tableInfo.numOfColumns + pChild->tableInfo.numOfTags; memcpy(pChild->schema, p->schema, sizeof(SSchema) *total); - -// tfree(p); return TSDB_CODE_SUCCESS; } else { // super table has been removed, current tableMeta is also expired. remove it here taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); - -// tfree(p); return -1; } } From 07aed4fa708386e96b441ea1becfb89af80bea50 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 17 May 2021 15:23:44 +0800 Subject: [PATCH 02/14] [td-4209] --- src/client/src/tscServer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 664f2989a8..0e16369cad 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1925,7 +1925,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { pMetaMsg->tversion = htons(pMetaMsg->tversion); pMetaMsg->vgroup.vgId = htonl(pMetaMsg->vgroup.vgId); pMetaMsg->uid = htobe64(pMetaMsg->uid); - pMetaMsg->suid = htobe64(pMetaMsg->suid); + pMetaMsg->suid = pMetaMsg->suid; pMetaMsg->contLen = htons(pMetaMsg->contLen); pMetaMsg->numOfColumns = htons(pMetaMsg->numOfColumns); From c824330bb5548f963f27255799c7f73d10765dcb Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 17 May 2021 15:51:49 +0800 Subject: [PATCH 03/14] [td-225] refactor a sim script. --- tests/script/general/parser/commit.sim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/script/general/parser/commit.sim b/tests/script/general/parser/commit.sim index dfe521b92b..7c4c883fb1 100644 --- a/tests/script/general/parser/commit.sim +++ b/tests/script/general/parser/commit.sim @@ -68,7 +68,7 @@ while $loop <= $loops while $i < 10 sql select count(*) from $stb where t1 = $i if $data00 != $rowNum then - print expect $rowNum, actual: $data00 + print expect $rowNum , actual: $data00 return -1 endi $i = $i + 1 From 4cb5bedfaeaeeec84a79e7a72e1a279f4f9111f6 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 18 May 2021 13:48:45 +0800 Subject: [PATCH 04/14] [TD-4232]: fix vnode queueWMsg & queuedWMsgSize counting under flow ctrl --- src/vnode/src/vnodeWrite.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 56ea32ccc0..16089c8e91 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -340,8 +340,11 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) { if (pWrite->processedCount >= 100) { vError("vgId:%d, msg:%p, failed to process since %s, retry:%d", pVnode->vgId, pWrite, tstrerror(code), pWrite->processedCount); - pWrite->processedCount = 1; - dnodeSendRpcVWriteRsp(pWrite->pVnode, pWrite, code); + void *handle = pWrite->rpcMsg.handle; + taosFreeQitem(pWrite); + vnodeRelease(pVnode); + SRpcMsg rpcRsp = {.handle = handle, .code = code}; + rpcSendResponse(&rpcRsp); } else { code = vnodePerformFlowCtrl(pWrite); if (code == 0) { From 6d448dcac1e522e3496c133d7d6d2a72dc2b7d5f Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 18 May 2021 16:48:17 +0800 Subject: [PATCH 05/14] Hotfix/sangshuduo/td 4136 taosdemo records morethan 32767 (#6147) * [TD-4136]: taosdemo max records per req < 32767 * [TD-4136]: taosdemo check insert rows not more than 32767. check insert rows for progressive. * fix with answer_yes. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 48 ++++++++++++---------- tests/pytest/tools/taosdemoTestWithJson.py | 22 +++++----- 2 files changed, 37 insertions(+), 33 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 12262f7a32..f6ac4a75ba 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -516,6 +516,8 @@ static int taosRandom() #endif // ifdef Windows +static void prompt(); +static void prompt2(); static int createDatabasesAndStables(); static void createChildTables(); static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet); @@ -1031,10 +1033,8 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { printf("# Print debug info: %d\n", arguments->debug_print); printf("# Print verbose info: %d\n", arguments->verbose_print); printf("###################################################################\n"); - if (!arguments->answer_yes) { - printf("Press enter key to continue\n\n"); - (void) getchar(); - } + + prompt(); } } @@ -3410,10 +3410,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_args.interlace_rows, g_args.num_of_RPR); printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n", g_args.num_of_RPR); - if (!g_args.answer_yes) { - printf(" press Enter key to continue or Ctrl-C to stop."); - (void)getchar(); - } + prompt2(); g_args.interlace_rows = g_args.num_of_RPR; } } else if (!interlaceRows) { @@ -3470,9 +3467,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_args.answer_yes = false; } } else if (!answerPrompt) { - g_args.answer_yes = false; + g_args.answer_yes = true; // default is no, mean answer_yes. } else { - printf("ERROR: failed to read json, confirm_parameter_prompt not found\n"); + errorPrint("%s", "failed to read json, confirm_parameter_prompt input mistake\n"); goto PARSE_OVER; } @@ -5342,6 +5339,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { int64_t start_time = pThreadInfo->start_time; int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; + verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows); for (uint64_t i = 0; i < insertRows;) { @@ -6022,6 +6020,21 @@ static void *readMetric(void *sarg) { return NULL; } +static void prompt() +{ + if (!g_args.answer_yes) { + printf("Press enter key to continue\n\n"); + (void)getchar(); + } +} + +static void prompt2() +{ + if (!g_args.answer_yes) { + printf(" press Enter key to continue or Ctrl-C to stop."); + (void)getchar(); + } +} static int insertTestProcess() { @@ -6042,10 +6055,7 @@ static int insertTestProcess() { if (g_fpOfInsertResult) printfInsertMetaToFile(g_fpOfInsertResult); - if (!g_args.answer_yes) { - printf("Press enter key to continue\n\n"); - (void)getchar(); - } + prompt(); init_rand_data(); @@ -6317,10 +6327,7 @@ static int queryTestProcess() { &g_queryInfo.superQueryInfo.childTblCount); } - if (!g_args.answer_yes) { - printf("Press enter key to continue\n\n"); - (void)getchar(); - } + prompt(); if (g_args.debug_print || g_args.verbose_print) { printfQuerySystemInfo(taos); @@ -6673,10 +6680,7 @@ static int subscribeTestProcess() { printfQueryMeta(); resetAfterAnsiEscape(); - if (!g_args.answer_yes) { - printf("Press enter key to continue\n\n"); - (void) getchar(); - } + prompt(); TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, diff --git a/tests/pytest/tools/taosdemoTestWithJson.py b/tests/pytest/tools/taosdemoTestWithJson.py index f57af9ce5c..b2ecd54976 100644 --- a/tests/pytest/tools/taosdemoTestWithJson.py +++ b/tests/pytest/tools/taosdemoTestWithJson.py @@ -23,32 +23,32 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] + if "community" in selfPath: + projPath = selfPath[: selfPath.find("community")] else: - projPath = selfPath[:selfPath.find("tests")] + projPath = selfPath[: selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if "taosd" in files: rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] + if "packaging" not in rootRealPath: + buildPath = root[: len(root) - len("/build/bin")] break return buildPath - + def run(self): tdSql.prepare() buildPath = self.getBuildPath() - if (buildPath == ""): + if buildPath == "": tdLog.exit("taosd not found!") else: tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" - os.system("yes | %staosdemo -f tools/insert.json" % binPath) + binPath = buildPath + "/build/bin/" + os.system("%staosdemo -f tools/insert.json -y" % binPath) tdSql.execute("use db01") tdSql.query("select count(*) from stb01") From 4412c52f27bca02a0b645f24858d94010d1d1039 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 18 May 2021 21:36:07 +0800 Subject: [PATCH 06/14] Hotfix/sangshuduo/td 4187 taosdemo keepprogress (#6149) * [TD-4187]: taosdemo keepProgress. * add consumed. * [TD-4187]: taosdemo keepProgress. add resubAfterConsume process. * print resub msg for test. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/subscribe.json | 40 +++-- src/kit/taosdemo/taosdemo.c | 281 +++++++++++++++++++++++--------- 2 files changed, 234 insertions(+), 87 deletions(-) diff --git a/src/kit/taosdemo/subscribe.json b/src/kit/taosdemo/subscribe.json index fd33a2e2e2..9faf03a03d 100644 --- a/src/kit/taosdemo/subscribe.json +++ b/src/kit/taosdemo/subscribe.json @@ -1,17 +1,37 @@ { - "filetype":"subscribe", + "filetype": "subscribe", "cfgdir": "/etc/taos", "host": "127.0.0.1", "port": 6030, "user": "root", "password": "taosdata", - "databases": "dbx", - "specified_table_query": - {"concurrent":1, "mode":"sync", "interval":5000, "restart":"yes", "keepProgress":"yes", - "sqls": [{"sql": "select avg(col1) from stb01 where col1 > 1;", "result": "./subscribe_res0.txt"}] - }, - "super_table_query": - {"stblname": "stb", "threads":1, "mode":"sync", "interval":10000, "restart":"yes", "keepProgress":"yes", - "sqls": [{"sql": "select col1 from xxxx where col1 > 10;", "result": "./subscribe_res1.txt"}] - } + "databases": "test", + "specified_table_query": { + "concurrent": 1, + "mode": "sync", + "interval": 1000, + "restart": "yes", + "keepProgress": "yes", + "resubAfterConsume": 10, + "sqls": [ + { + "sql": "select avg(col1) from meters where col1 > 1;", + "result": "./subscribe_res0.txt" + } + ] + }, + "super_table_query": { + "stblname": "meters", + "threads": 1, + "mode": "sync", + "interval": 1000, + "restart": "yes", + "keepProgress": "yes", + "sqls": [ + { + "sql": "select col1 from xxxx where col1 > 10;", + "result": "./subscribe_res1.txt" + } + ] + } } diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index f6ac4a75ba..08c1142070 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -227,7 +227,7 @@ typedef struct SColumn_S { char field[TSDB_COL_NAME_LEN + 1]; char dataType[MAX_TB_NAME_SIZE]; uint32_t dataLen; - char note[128]; + char note[128]; } StrColumn; typedef struct SSuperTable_S { @@ -360,10 +360,11 @@ typedef struct SpecifiedQueryInfo_S { uint32_t asyncMode; // 0: sync, 1: async uint64_t subscribeInterval; // ms uint64_t queryTimes; - int subscribeRestart; + bool subscribeRestart; int subscribeKeepProgress; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; + int resubAfterConsume[MAX_QUERY_SQL_COUNT]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; uint64_t totalQueried; } SpecifiedQueryInfo; @@ -374,7 +375,7 @@ typedef struct SuperQueryInfo_S { uint32_t threadCnt; uint32_t asyncMode; // 0: sync, 1: async uint64_t subscribeInterval; // ms - int subscribeRestart; + bool subscribeRestart; int subscribeKeepProgress; uint64_t queryTimes; int64_t childTblCount; @@ -382,6 +383,7 @@ typedef struct SuperQueryInfo_S { uint64_t sqlCount; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; + int resubAfterConsume[MAX_QUERY_SQL_COUNT]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; char* childTblName; @@ -399,7 +401,7 @@ typedef struct SQueryMetaInfo_S { char queryMode[MAX_TB_NAME_SIZE]; // taosc, rest SpecifiedQueryInfo specifiedQueryInfo; - SuperQueryInfo superQueryInfo; + SuperQueryInfo superQueryInfo; uint64_t totalQueried; } SQueryMetaInfo; @@ -521,8 +523,8 @@ static void prompt2(); static int createDatabasesAndStables(); static void createChildTables(); static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet); -static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port, - char* sqlstr, char *resultFile); +static int postProceSql(char *host, struct sockaddr_in *pServAddr, + uint16_t port, char* sqlstr, char *resultFile); /* ************ Global variables ************ */ @@ -1141,12 +1143,14 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) { totalLen += len; } - verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n", __func__, __LINE__, databuf, resultFile); + verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n", + __func__, __LINE__, databuf, resultFile); appendResultBufToFile(databuf, resultFile); free(databuf); } -static void selectAndGetResult(threadInfo *pThreadInfo, char *command, char* resultFile) +static void selectAndGetResult( + threadInfo *pThreadInfo, char *command, char* resultFile) { if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { TAOS_RES *res = taos_query(pThreadInfo->taos, command); @@ -1291,13 +1295,15 @@ static void init_rand_data() { static int printfInsertMeta() { SHOW_PARSE_RESULT_START(); - printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port); + printf("host: \033[33m%s:%u\033[0m\n", + g_Dbs.host, g_Dbs.port); printf("user: \033[33m%s\033[0m\n", g_Dbs.user); printf("password: \033[33m%s\033[0m\n", g_Dbs.password); printf("configDir: \033[33m%s\033[0m\n", configDir); printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); - printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl); + printf("thread num of create table: \033[33m%d\033[0m\n", + g_Dbs.threadCountByCreateTbl); printf("top insert interval: \033[33m%"PRIu64"\033[0m\n", g_args.insert_interval); printf("number of records per req: \033[33m%"PRIu64"\033[0m\n", @@ -1309,7 +1315,8 @@ static int printfInsertMeta() { for (int i = 0; i < g_Dbs.dbCount; i++) { printf("database[\033[33m%d\033[0m]:\n", i); - printf(" database[%d] name: \033[33m%s\033[0m\n", i, g_Dbs.db[i].dbName); + printf(" database[%d] name: \033[33m%s\033[0m\n", + i, g_Dbs.db[i].dbName); if (0 == g_Dbs.db[i].drop) { printf(" drop: \033[33mno\033[0m\n"); } else { @@ -1317,40 +1324,51 @@ static int printfInsertMeta() { } if (g_Dbs.db[i].dbCfg.blocks > 0) { - printf(" blocks: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.blocks); + printf(" blocks: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.blocks); } if (g_Dbs.db[i].dbCfg.cache > 0) { - printf(" cache: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.cache); + printf(" cache: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.cache); } if (g_Dbs.db[i].dbCfg.days > 0) { - printf(" days: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.days); + printf(" days: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.days); } if (g_Dbs.db[i].dbCfg.keep > 0) { - printf(" keep: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.keep); + printf(" keep: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.keep); } if (g_Dbs.db[i].dbCfg.replica > 0) { - printf(" replica: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.replica); + printf(" replica: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.replica); } if (g_Dbs.db[i].dbCfg.update > 0) { - printf(" update: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.update); + printf(" update: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.update); } if (g_Dbs.db[i].dbCfg.minRows > 0) { - printf(" minRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.minRows); + printf(" minRows: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.minRows); } if (g_Dbs.db[i].dbCfg.maxRows > 0) { - printf(" maxRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.maxRows); + printf(" maxRows: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.maxRows); } if (g_Dbs.db[i].dbCfg.comp > 0) { printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp); } if (g_Dbs.db[i].dbCfg.walLevel > 0) { - printf(" walLevel: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.walLevel); + printf(" walLevel: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.walLevel); } if (g_Dbs.db[i].dbCfg.fsync > 0) { - printf(" fsync: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.fsync); + printf(" fsync: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.fsync); } if (g_Dbs.db[i].dbCfg.quorum > 0) { - printf(" quorum: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.quorum); + printf(" quorum: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.quorum); } if (g_Dbs.db[i].dbCfg.precision[0] != 0) { if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) @@ -1543,21 +1561,26 @@ static void printfInsertMetaToFile(FILE* fp) { if (g_Dbs.db[i].dbCfg.precision[0] != 0) { if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - fprintf(fp, " precision: %s\n", g_Dbs.db[i].dbCfg.precision); + fprintf(fp, " precision: %s\n", + g_Dbs.db[i].dbCfg.precision); } else { - fprintf(fp, " precision error: %s\n", g_Dbs.db[i].dbCfg.precision); + fprintf(fp, " precision error: %s\n", + g_Dbs.db[i].dbCfg.precision); } } - fprintf(fp, " super table count: %"PRIu64"\n", g_Dbs.db[i].superTblCount); + fprintf(fp, " super table count: %"PRIu64"\n", + g_Dbs.db[i].superTblCount); for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { fprintf(fp, " super table[%d]:\n", j); - fprintf(fp, " stbName: %s\n", g_Dbs.db[i].superTbls[j].sTblName); + fprintf(fp, " stbName: %s\n", + g_Dbs.db[i].superTbls[j].sTblName); if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { fprintf(fp, " autoCreateTable: %s\n", "no"); - } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + } else if (AUTO_CREATE_SUBTBL + == g_Dbs.db[i].superTbls[j].autoCreateTable) { fprintf(fp, " autoCreateTable: %s\n", "yes"); } else { fprintf(fp, " autoCreateTable: %s\n", "error"); @@ -1565,7 +1588,8 @@ static void printfInsertMetaToFile(FILE* fp) { if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { fprintf(fp, " childTblExists: %s\n", "no"); - } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + } else if (TBL_ALREADY_EXISTS + == g_Dbs.db[i].superTbls[j].childTblExists) { fprintf(fp, " childTblExists: %s\n", "yes"); } else { fprintf(fp, " childTblExists: %s\n", "error"); @@ -1596,8 +1620,10 @@ static void printfInsertMetaToFile(FILE* fp) { */ fprintf(fp, " interlaceRows: %"PRIu64"\n", g_Dbs.db[i].superTbls[j].interlaceRows); - fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange); - fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio); + fprintf(fp, " disorderRange: %d\n", + g_Dbs.db[i].superTbls[j].disorderRange); + fprintf(fp, " disorderRatio: %d\n", + g_Dbs.db[i].superTbls[j].disorderRatio); fprintf(fp, " maxSqlLen: %"PRIu64"\n", g_Dbs.db[i].superTbls[j].maxSqlLen); @@ -1605,23 +1631,29 @@ static void printfInsertMetaToFile(FILE* fp) { g_Dbs.db[i].superTbls[j].timeStampStep); fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp); - fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat); - fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile); - fprintf(fp, " tagsFile: %s\n", g_Dbs.db[i].superTbls[j].tagsFile); + fprintf(fp, " sampleFormat: %s\n", + g_Dbs.db[i].superTbls[j].sampleFormat); + fprintf(fp, " sampleFile: %s\n", + g_Dbs.db[i].superTbls[j].sampleFile); + fprintf(fp, " tagsFile: %s\n", + g_Dbs.db[i].superTbls[j].tagsFile); - fprintf(fp, " columnCount: %d\n ", g_Dbs.db[i].superTbls[j].columnCount); + fprintf(fp, " columnCount: %d\n ", + g_Dbs.db[i].superTbls[j].columnCount); for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); if ((0 == strncasecmp( g_Dbs.db[i].superTbls[j].columns[k].dataType, "binary", strlen("binary"))) - || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, + || (0 == strncasecmp( + g_Dbs.db[i].superTbls[j].columns[k].dataType, "nchar", strlen("nchar")))) { fprintf(fp, "column[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); } else { - fprintf(fp, "column[%d]:%s ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType); + fprintf(fp, "column[%d]:%s ", + k, g_Dbs.db[i].superTbls[j].columns[k].dataType); } } fprintf(fp, "\n"); @@ -1634,7 +1666,8 @@ static void printfInsertMetaToFile(FILE* fp) { "binary", strlen("binary"))) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", strlen("nchar")))) { - fprintf(fp, "tag[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, + fprintf(fp, "tag[%d]:%s(%d) ", + k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); } else { fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType); @@ -4128,7 +4161,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { "query_times"); if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { if (specifiedQueryTimes->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + errorPrint( + "%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", __func__, __LINE__, specifiedQueryTimes->valueint); goto PARSE_OVER; @@ -4145,7 +4179,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); if (concurrent && concurrent->type == cJSON_Number) { if (concurrent->valueint <= 0) { - errorPrint("%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n", + errorPrint( + "%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, g_queryInfo.specifiedQueryInfo.concurrent); @@ -4184,15 +4219,15 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* restart = cJSON_GetObjectItem(specifiedQuery, "restart"); if (restart && restart->type == cJSON_String && restart->valuestring != NULL) { if (0 == strcmp("yes", restart->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeRestart = 1; + g_queryInfo.specifiedQueryInfo.subscribeRestart = true; } else if (0 == strcmp("no", restart->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeRestart = 0; + g_queryInfo.specifiedQueryInfo.subscribeRestart = false; } else { printf("ERROR: failed to read json, subscribe restart error\n"); goto PARSE_OVER; } } else { - g_queryInfo.specifiedQueryInfo.subscribeRestart = 1; + g_queryInfo.specifiedQueryInfo.subscribeRestart = true; } cJSON* keepProgress = cJSON_GetObjectItem(specifiedQuery, "keepProgress"); @@ -4237,13 +4272,29 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { printf("ERROR: failed to read json, sql not found\n"); goto PARSE_OVER; } - tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], + sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + + cJSON* resubAfterConsume = + cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume"); + if (resubAfterConsume + && resubAfterConsume->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] + = resubAfterConsume->valueint; + } else if (!resubAfterConsume) { + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = 1; + } cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (NULL != result && result->type == cJSON_String && result->valuestring != NULL) { - tstrncpy(g_queryInfo.specifiedQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); + if ((NULL != result) && (result->type == cJSON_String) + && (result->valuestring != NULL)) { + tstrncpy(g_queryInfo.specifiedQueryInfo.result[j], + result->valuestring, MAX_FILE_NAME_LEN); } else if (NULL == result) { - memset(g_queryInfo.specifiedQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); + memset(g_queryInfo.specifiedQueryInfo.result[j], + 0, MAX_FILE_NAME_LEN); } else { printf("ERROR: failed to read json, super query result file not found\n"); goto PARSE_OVER; @@ -4350,27 +4401,27 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (subrestart && subrestart->type == cJSON_String && subrestart->valuestring != NULL) { if (0 == strcmp("yes", subrestart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = 1; + g_queryInfo.superQueryInfo.subscribeRestart = true; } else if (0 == strcmp("no", subrestart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = 0; + g_queryInfo.superQueryInfo.subscribeRestart = false; } else { printf("ERROR: failed to read json, subscribe restart error\n"); goto PARSE_OVER; } } else { - g_queryInfo.superQueryInfo.subscribeRestart = 1; + g_queryInfo.superQueryInfo.subscribeRestart = true; } - cJSON* subkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); - if (subkeepProgress && - subkeepProgress->type == cJSON_String - && subkeepProgress->valuestring != NULL) { - if (0 == strcmp("yes", subkeepProgress->valuestring)) { + cJSON* superkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); + if (superkeepProgress && + superkeepProgress->type == cJSON_String + && superkeepProgress->valuestring != NULL) { + if (0 == strcmp("yes", superkeepProgress->valuestring)) { g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", subkeepProgress->valuestring)) { + } else if (0 == strcmp("no", superkeepProgress->valuestring)) { g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; } else { - printf("ERROR: failed to read json, subscribe keepProgress error\n"); + printf("ERROR: failed to read json, subscribe super table keepProgress error\n"); goto PARSE_OVER; } } else { @@ -4408,6 +4459,18 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + cJSON* superResubAfterConsume = + cJSON_GetObjectItem(sql, "resubAfterConsume"); + if (superResubAfterConsume + && superResubAfterConsume->type == cJSON_Number) { + g_queryInfo.superQueryInfo.resubAfterConsume[j] = + superResubAfterConsume->valueint; + } else if (!superResubAfterConsume) { + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.superQueryInfo.resubAfterConsume[j] = 1; + } + cJSON *result = cJSON_GetObjectItem(sql, "result"); if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){ @@ -5506,7 +5569,8 @@ static void callBack(void *param, TAOS_RES *res, int code) { int rand_num = taosRandom() % 100; if (0 != pThreadInfo->superTblInfo->disorderRatio && rand_num < pThreadInfo->superTblInfo->disorderRatio) { - int64_t d = pThreadInfo->lastTs - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1); + int64_t d = pThreadInfo->lastTs + - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1); generateRowData(data, d, pThreadInfo->superTblInfo); } else { generateRowData(data, pThreadInfo->lastTs += 1000, pThreadInfo->superTblInfo); @@ -6480,17 +6544,18 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c } static TAOS_SUB* subscribeImpl( - TAOS *taos, char *sql, char* topic, char* resultFileName) { + TAOS *taos, char *sql, char* topic, bool restart, + char* resultFileName) { TAOS_SUB* tsub = NULL; if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { tsub = taos_subscribe(taos, - g_queryInfo.specifiedQueryInfo.subscribeRestart, + restart, topic, sql, subscribe_callback, (void*)resultFileName, g_queryInfo.specifiedQueryInfo.subscribeInterval); } else { tsub = taos_subscribe(taos, - g_queryInfo.specifiedQueryInfo.subscribeRestart, + restart, topic, sql, NULL, NULL, 0); } @@ -6551,7 +6616,9 @@ static void *superSubscribe(void *sarg) { uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j; debugPrint("%s() LN%d, subSeq=%"PRIu64" subSqlstr: %s\n", __func__, __LINE__, subSeq, subSqlstr); - tsub[subSeq] = subscribeImpl(pThreadInfo->taos, subSqlstr, topic, tmpFile); + tsub[subSeq] = subscribeImpl(pThreadInfo->taos, subSqlstr, topic, + g_queryInfo.superQueryInfo.subscribeRestart, + tmpFile); if (NULL == tsub[subSeq]) { taos_close(pThreadInfo->taos); return NULL; @@ -6559,17 +6626,22 @@ static void *superSubscribe(void *sarg) { } } + int consumed[MAX_QUERY_SQL_COUNT]; + for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) + consumed[i] = 0; + // start loop to consume result TAOS_RES* res = NULL; while(1) { - for (uint64_t i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { + for (uint64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { continue; } uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j; - taosMsleep(100); // ms + taosMsleep(g_queryInfo.superQueryInfo.subscribeInterval); // ms res = taos_consume(tsub[subSeq]); if (res) { char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; @@ -6579,6 +6651,28 @@ static void *superSubscribe(void *sarg) { pThreadInfo->threadID); appendResultToFile(res, tmpFile); } + consumed[j] ++; + + if ((g_queryInfo.superQueryInfo.subscribeKeepProgress) + && (consumed[j] >= + g_queryInfo.superQueryInfo.resubAfterConsume[j])) { + printf("keepProgress:%d, resub super table query: %d\n", + g_queryInfo.superQueryInfo.subscribeKeepProgress, j); + taos_unsubscribe(tsub[subSeq], + g_queryInfo.superQueryInfo.subscribeKeepProgress); + consumed[j]= 0; + uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j; + debugPrint("%s() LN%d, subSeq=%"PRIu64" subSqlstr: %s\n", + __func__, __LINE__, subSeq, subSqlstr); + tsub[subSeq] = subscribeImpl( + pThreadInfo->taos, subSqlstr, topic, + g_queryInfo.superQueryInfo.subscribeRestart, + tmpFile); + if (NULL == tsub[subSeq]) { + taos_close(pThreadInfo->taos); + return NULL; + } + } } } } @@ -6589,8 +6683,7 @@ static void *superSubscribe(void *sarg) { i <= pThreadInfo->end_table_to; i++) { for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j; - taos_unsubscribe(tsub[subSeq], - g_queryInfo.superQueryInfo.subscribeKeepProgress); + taos_unsubscribe(tsub[subSeq], 0); } } @@ -6635,40 +6728,68 @@ static void *specifiedSubscribe(void *sarg) { char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID); + g_queryInfo.specifiedQueryInfo.result[i], + pThreadInfo->threadID); } tsub[i] = subscribeImpl(pThreadInfo->taos, - g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile); + g_queryInfo.specifiedQueryInfo.sql[i], topic, + g_queryInfo.specifiedQueryInfo.subscribeRestart, + tmpFile); if (NULL == tsub[i]) { taos_close(pThreadInfo->taos); return NULL; } } + // start loop to consume result TAOS_RES* res = NULL; + + int consumed[MAX_QUERY_SQL_COUNT]; + for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) + consumed[i] = 0; + while(1) { for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { continue; } - taosMsleep(1000); // ms + taosMsleep(g_queryInfo.specifiedQueryInfo.subscribeInterval); // ms res = taos_consume(tsub[i]); if (res) { char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID); + g_queryInfo.specifiedQueryInfo.result[i], + pThreadInfo->threadID); appendResultToFile(res, tmpFile); } + consumed[i] ++; + + if ((g_queryInfo.specifiedQueryInfo.subscribeKeepProgress) + && (consumed[i] >= + g_queryInfo.specifiedQueryInfo.resubAfterConsume[i])) { + printf("keepProgress:%d, resub specified query: %d\n", + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress, i); + consumed[i] = 0; + taos_unsubscribe(tsub[i], + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + tsub[i] = subscribeImpl(pThreadInfo->taos, + g_queryInfo.specifiedQueryInfo.sql[i], topic, + g_queryInfo.specifiedQueryInfo.subscribeRestart, + tmpFile); + if (NULL == tsub[i]) { + taos_close(pThreadInfo->taos); + return NULL; + } + } } } } taos_free_result(res); for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - taos_unsubscribe(tsub[i], - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + taos_unsubscribe(tsub[i], 0); } taos_close(pThreadInfo->taos); @@ -6719,8 +6840,10 @@ static int subscribeTestProcess() { exit(-1); } - pids = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t)); - infos = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo)); + pids = malloc( + g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t)); + infos = malloc( + g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); exit(-1); @@ -6905,17 +7028,21 @@ static void setParaFromArg(){ if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) { g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR; } else { - for (int i = g_Dbs.db[0].superTbls[0].columnCount; i < g_args.num_of_CPR; i++) { - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", MAX_TB_NAME_SIZE); + for (int i = g_Dbs.db[0].superTbls[0].columnCount; + i < g_args.num_of_CPR; i++) { + tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, + "INT", MAX_TB_NAME_SIZE); g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; g_Dbs.db[0].superTbls[0].columnCount++; } } - tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, "INT", MAX_TB_NAME_SIZE); + tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, + "INT", MAX_TB_NAME_SIZE); g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0; - tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", MAX_TB_NAME_SIZE); + tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, + "BINARY", MAX_TB_NAME_SIZE); g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary; g_Dbs.db[0].superTbls[0].tagCount = 2; } else { From 8e5681f1b004cb8b5d3982fb36f0ffd4d712e2ca Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 18 May 2021 21:36:35 +0800 Subject: [PATCH 07/14] [TD-4240]: taosdemo subscribe more than max query sql count. (#6154) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 08c1142070..92b5f6abff 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -6575,6 +6575,15 @@ static void *superSubscribe(void *sarg) { if (g_queryInfo.superQueryInfo.sqlCount == 0) return NULL; + if (g_queryInfo.superQueryInfo.sqlCount * pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { + errorPrint("The number %"PRId64" of sql count(%"PRIu64") multiple the table number(%"PRId64") of the thread is more than max query sql count: %d\n", + g_queryInfo.superQueryInfo.sqlCount * pThreadInfo->ntables, + g_queryInfo.superQueryInfo.sqlCount, + pThreadInfo->ntables, + MAX_QUERY_SQL_COUNT); + exit(-1); + } + if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, From 5c03dc9ae0f3f2a8efac6d4978878cd69eecd26b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 19 May 2021 16:20:38 +0800 Subject: [PATCH 08/14] Hotfix/sangshuduo/td 4238 taosdemo async subscribe (#6161) * [TD-4238]: taosdemo async subscribe. * [TD-4238]: taosdemo async subscribe subsribe sql command do not use aggregation functions. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/async-sub.json | 41 +++ src/kit/taosdemo/taosdemo.c | 430 +++++++++++++++++--------------- 2 files changed, 267 insertions(+), 204 deletions(-) create mode 100644 src/kit/taosdemo/async-sub.json diff --git a/src/kit/taosdemo/async-sub.json b/src/kit/taosdemo/async-sub.json new file mode 100644 index 0000000000..a30a1be45c --- /dev/null +++ b/src/kit/taosdemo/async-sub.json @@ -0,0 +1,41 @@ +{ + "filetype": "subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "test", + "specified_table_query": { + "concurrent": 1, + "mode": "async", + "interval": 1000, + "restart": "yes", + "keepProgress": "yes", + "resubAfterConsume": 10, + "sqls": [ + { + "sql": "select col1 from meters where col1 > 1;", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select col2 from meters where col2 > 1;", + "result": "./subscribe_res2.txt" + } + ] + }, + "super_table_query": { + "stblname": "meters", + "threads": 1, + "mode": "sync", + "interval": 1000, + "restart": "yes", + "keepProgress": "yes", + "sqls": [ + { + "sql": "select col1 from xxxx where col1 > 10;", + "result": "./subscribe_res1.txt" + } + ] + } +} diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 92b5f6abff..0831555d22 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -441,8 +441,9 @@ typedef struct SThreadInfo_S { uint64_t maxDelay; uint64_t minDelay; - // query + // seq of query or subscribe uint64_t querySeq; // sequence number of sql command + } threadInfo; #ifdef WINDOWS @@ -1107,7 +1108,6 @@ static void appendResultBufToFile(char *resultBuf, char *resultFile) } } - fprintf(fp, "%s", resultBuf); tmfclose(fp); } @@ -1150,7 +1150,7 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) { } static void selectAndGetResult( - threadInfo *pThreadInfo, char *command, char* resultFile) + threadInfo *pThreadInfo, char *command) { if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { TAOS_RES *res = taos_query(pThreadInfo->taos, command); @@ -1161,8 +1161,8 @@ static void selectAndGetResult( return; } - if ((resultFile) && (strlen(resultFile))) { - appendResultToFile(res, resultFile); + if ((strlen(pThreadInfo->fp))) { + appendResultToFile(res, pThreadInfo->fp); } taos_free_result(res); @@ -1170,7 +1170,7 @@ static void selectAndGetResult( int retCode = postProceSql( g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port, command, - resultFile); + pThreadInfo->fp); if (0 != retCode) { printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); } @@ -6230,23 +6230,22 @@ static void *specifiedTableQuery(void *sarg) { uint64_t lastPrintTime = taosGetTimestampMs(); uint64_t startTs = taosGetTimestampMs(); + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + sprintf(pThreadInfo->fp, "%s-%d", + g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); + } + while(queryTimes --) { if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) { taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms } - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { - sprintf(tmpFile, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - st = taosGetTimestampMs(); selectAndGetResult(pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile); + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); et = taosGetTimestampMs(); printf("=thread[%"PRId64"] use %s complete one sql, Spent %10.3f s\n", @@ -6332,13 +6331,12 @@ static void *superTableQuery(void *sarg) { for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { memset(sqlstr,0,sizeof(sqlstr)); replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.superQueryInfo.result[j][0] != 0) { - sprintf(tmpFile, "%s-%d", + sprintf(pThreadInfo->fp, "%s-%d", g_queryInfo.superQueryInfo.result[j], pThreadInfo->threadID); } - selectAndGetResult(pThreadInfo, sqlstr, tmpFile); + selectAndGetResult(pThreadInfo, sqlstr); totalQueried++; g_queryInfo.superQueryInfo.totalQueried ++; @@ -6407,7 +6405,7 @@ static int queryTestProcess() { threadInfo *infos = NULL; //==== create sub threads for query from specify table int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent; - int nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount; + uint64_t nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount; uint64_t startTs = taosGetTimestampMs(); @@ -6421,32 +6419,33 @@ static int queryTestProcess() { ERROR_EXIT("memory allocation failed for create threads\n"); } - for (int i = 0; i < nConcurrent; i++) { - for (int j = 0; j < nSqlCount; j++) { - threadInfo *t_info = infos + i * nSqlCount + j; - t_info->threadID = i * nSqlCount + j; - t_info->querySeq = j; + for (uint64_t i = 0; i < nSqlCount; i++) { + for (int j = 0; j < nConcurrent; j++) { + uint64_t seq = i * nConcurrent + j; + threadInfo *t_info = infos + seq; + t_info->threadID = seq; + t_info->querySeq = i; - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); - if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(taos); - free(infos); - free(pids); - errorPrint( "use database %s failed!\n\n", - g_queryInfo.dbName); - return -1; - } + char sqlStr[MAX_TB_NAME_SIZE*2]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); + if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(taos); + free(infos); + free(pids); + errorPrint( "use database %s failed!\n\n", + g_queryInfo.dbName); + return -1; + } + } + + t_info->taos = NULL;// TODO: workaround to use separate taos connection; + + pthread_create(pids + seq, NULL, specifiedTableQuery, + t_info); } - - t_info->taos = NULL;// TODO: workaround to use separate taos connection; - - pthread_create(pids + i * nSqlCount + j, NULL, specifiedTableQuery, - t_info); - } } } else { g_queryInfo.specifiedQueryInfo.concurrent = 0; @@ -6531,7 +6530,8 @@ static int queryTestProcess() { return 0; } -static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { +static void stable_sub_callback( + TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { if (res == NULL || taos_errno(res) != 0) { errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", __func__, __LINE__, code, taos_errstr(res)); @@ -6539,22 +6539,44 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c } if (param) - appendResultToFile(res, (char*)param); + appendResultToFile(res, ((threadInfo *)param)->fp); + // tao_unscribe() will free result. +} + +static void specified_sub_callback( + TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { + if (res == NULL || taos_errno(res) != 0) { + errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", + __func__, __LINE__, code, taos_errstr(res)); + return; + } + + if (param) + appendResultToFile(res, ((threadInfo *)param)->fp); // tao_unscribe() will free result. } static TAOS_SUB* subscribeImpl( - TAOS *taos, char *sql, char* topic, bool restart, - char* resultFileName) { + threadInfo *pThreadInfo, + char *sql, char* topic, bool restart) +{ TAOS_SUB* tsub = NULL; if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { - tsub = taos_subscribe(taos, + tsub = taos_subscribe( + pThreadInfo->taos, restart, - topic, sql, subscribe_callback, (void*)resultFileName, + topic, sql, specified_sub_callback, (void*)pThreadInfo, g_queryInfo.specifiedQueryInfo.subscribeInterval); + } else if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { + tsub = taos_subscribe( + pThreadInfo->taos, + restart, + topic, sql, stable_sub_callback, (void*)pThreadInfo, + g_queryInfo.superQueryInfo.subscribeInterval); } else { - tsub = taos_subscribe(taos, + tsub = taos_subscribe( + pThreadInfo->taos, restart, topic, sql, NULL, NULL, 0); } @@ -6572,13 +6594,8 @@ static void *superSubscribe(void *sarg) { char subSqlstr[MAX_QUERY_SQL_LENGTH]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; - if (g_queryInfo.superQueryInfo.sqlCount == 0) - return NULL; - - if (g_queryInfo.superQueryInfo.sqlCount * pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { - errorPrint("The number %"PRId64" of sql count(%"PRIu64") multiple the table number(%"PRId64") of the thread is more than max query sql count: %d\n", - g_queryInfo.superQueryInfo.sqlCount * pThreadInfo->ntables, - g_queryInfo.superQueryInfo.sqlCount, + if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { + errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); exit(-1); @@ -6612,88 +6629,80 @@ static void *superSubscribe(void *sarg) { char topic[32] = {0}; for (uint64_t i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { - for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { - sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%d", i, j); + sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"", + i, pThreadInfo->querySeq); memset(subSqlstr,0,sizeof(subSqlstr)); - replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], subSqlstr, i); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.superQueryInfo.result[j][0] != 0) { - sprintf(tmpFile, "%s-%d", - g_queryInfo.superQueryInfo.result[j], pThreadInfo->threadID); + replaceChildTblName( + g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq], + subSqlstr, i); + if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + sprintf(pThreadInfo->fp, "%s-%d", + g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); } - uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j; - debugPrint("%s() LN%d, subSeq=%"PRIu64" subSqlstr: %s\n", - __func__, __LINE__, subSeq, subSqlstr); - tsub[subSeq] = subscribeImpl(pThreadInfo->taos, subSqlstr, topic, - g_queryInfo.superQueryInfo.subscribeRestart, - tmpFile); - if (NULL == tsub[subSeq]) { + debugPrint("%s() LN%d, [%d] subSqlstr: %s\n", + __func__, __LINE__, pThreadInfo->threadID, subSqlstr); + tsub[i] = subscribeImpl( + pThreadInfo, subSqlstr, topic, + g_queryInfo.superQueryInfo.subscribeRestart); + if (NULL == tsub[i]) { taos_close(pThreadInfo->taos); return NULL; } - } } - int consumed[MAX_QUERY_SQL_COUNT]; - for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) - consumed[i] = 0; - // start loop to consume result + int consumed[MAX_QUERY_SQL_COUNT]; + for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) { + consumed[i] = 0; + } TAOS_RES* res = NULL; + while(1) { for (uint64_t i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { - for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { continue; } - uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j; taosMsleep(g_queryInfo.superQueryInfo.subscribeInterval); // ms - res = taos_consume(tsub[subSeq]); + res = taos_consume(tsub[i]); if (res) { - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.superQueryInfo.result[j][0] != 0) { - sprintf(tmpFile, "%s-%d", - g_queryInfo.superQueryInfo.result[j], + if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + sprintf(pThreadInfo->fp, "%s-%d", + g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); - appendResultToFile(res, tmpFile); + appendResultToFile(res, pThreadInfo->fp); } - consumed[j] ++; + consumed[i] ++; if ((g_queryInfo.superQueryInfo.subscribeKeepProgress) - && (consumed[j] >= - g_queryInfo.superQueryInfo.resubAfterConsume[j])) { - printf("keepProgress:%d, resub super table query: %d\n", - g_queryInfo.superQueryInfo.subscribeKeepProgress, j); - taos_unsubscribe(tsub[subSeq], + && (consumed[i] >= + g_queryInfo.superQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) { + printf("keepProgress:%d, resub super table query: %"PRIu64"\n", + g_queryInfo.superQueryInfo.subscribeKeepProgress, + pThreadInfo->querySeq); + taos_unsubscribe(tsub, g_queryInfo.superQueryInfo.subscribeKeepProgress); - consumed[j]= 0; - uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j; - debugPrint("%s() LN%d, subSeq=%"PRIu64" subSqlstr: %s\n", - __func__, __LINE__, subSeq, subSqlstr); - tsub[subSeq] = subscribeImpl( - pThreadInfo->taos, subSqlstr, topic, - g_queryInfo.superQueryInfo.subscribeRestart, - tmpFile); - if (NULL == tsub[subSeq]) { + consumed[i]= 0; + tsub[i] = subscribeImpl( + pThreadInfo, subSqlstr, topic, + g_queryInfo.superQueryInfo.subscribeRestart + ); + if (NULL == tsub[i]) { taos_close(pThreadInfo->taos); return NULL; } } } - } } } taos_free_result(res); for (uint64_t i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { - for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { - uint64_t subSeq = i * g_queryInfo.superQueryInfo.sqlCount + j; - taos_unsubscribe(tsub[subSeq], 0); - } + taos_unsubscribe(tsub[i], 0); } taos_close(pThreadInfo->taos); @@ -6702,10 +6711,7 @@ static void *superSubscribe(void *sarg) { static void *specifiedSubscribe(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; - - if (g_queryInfo.specifiedQueryInfo.sqlCount == 0) - return NULL; + TAOS_SUB* tsub = NULL; if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; @@ -6732,76 +6738,61 @@ static void *specifiedSubscribe(void *sarg) { } char topic[32] = {0}; - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - sprintf(topic, "taosdemo-subscribe-%d", i); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[i], + sprintf(topic, "taosdemo-subscribe-%"PRIu64"", pThreadInfo->querySeq); + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + sprintf(pThreadInfo->fp, "%s-%d", + g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); - } - tsub[i] = subscribeImpl(pThreadInfo->taos, - g_queryInfo.specifiedQueryInfo.sql[i], topic, - g_queryInfo.specifiedQueryInfo.subscribeRestart, - tmpFile); - if (NULL == tsub[i]) { - taos_close(pThreadInfo->taos); - return NULL; - } + } + tsub = subscribeImpl(pThreadInfo, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], + topic, + g_queryInfo.specifiedQueryInfo.subscribeRestart); + if (NULL == tsub) { + taos_close(pThreadInfo->taos); + return NULL; } // start loop to consume result TAOS_RES* res = NULL; - int consumed[MAX_QUERY_SQL_COUNT]; - for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) - consumed[i] = 0; + int consumed; while(1) { - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { continue; } taosMsleep(g_queryInfo.specifiedQueryInfo.subscribeInterval); // ms - res = taos_consume(tsub[i]); + res = taos_consume(tsub); if (res) { - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[i], - pThreadInfo->threadID); - appendResultToFile(res, tmpFile); - } - consumed[i] ++; - - if ((g_queryInfo.specifiedQueryInfo.subscribeKeepProgress) - && (consumed[i] >= - g_queryInfo.specifiedQueryInfo.resubAfterConsume[i])) { - printf("keepProgress:%d, resub specified query: %d\n", - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress, i); - consumed[i] = 0; - taos_unsubscribe(tsub[i], - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - tsub[i] = subscribeImpl(pThreadInfo->taos, - g_queryInfo.specifiedQueryInfo.sql[i], topic, - g_queryInfo.specifiedQueryInfo.subscribeRestart, - tmpFile); - if (NULL == tsub[i]) { + consumed ++; + if ((g_queryInfo.specifiedQueryInfo.subscribeKeepProgress) + && (consumed >= + g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) { + printf("keepProgress:%d, resub specified query: %"PRIu64"\n", + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress, + pThreadInfo->querySeq); + consumed = 0; + taos_unsubscribe(tsub, + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + tsub = subscribeImpl( + pThreadInfo, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], + topic, + g_queryInfo.specifiedQueryInfo.subscribeRestart + ); + if (NULL == tsub) { taos_close(pThreadInfo->taos); return NULL; - } - } + } + } } - } } taos_free_result(res); - - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - taos_unsubscribe(tsub[i], 0); - } - + taos_unsubscribe(tsub, 0); taos_close(pThreadInfo->taos); + return NULL; } @@ -6836,7 +6827,11 @@ static int subscribeTestProcess() { pthread_t *pids = NULL; threadInfo *infos = NULL; - //==== create sub threads for query for specified table + + pthread_t *pidsOfStable = NULL; + threadInfo *infosOfStable = NULL; + + //==== create threads for query for specified table if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { printf("%s() LN%d, sepcified query sqlCount %"PRIu64".\n", __func__, __LINE__, @@ -6850,81 +6845,108 @@ static int subscribeTestProcess() { } pids = malloc( - g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t)); + g_queryInfo.specifiedQueryInfo.sqlCount * + g_queryInfo.specifiedQueryInfo.concurrent * + sizeof(pthread_t)); infos = malloc( - g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo)); + g_queryInfo.specifiedQueryInfo.sqlCount * + g_queryInfo.specifiedQueryInfo.concurrent * + sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); exit(-1); } - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { - threadInfo *t_info = infos + i; - t_info->threadID = i; - t_info->taos = NULL; // TODO: workaround to use separate taos connection; - pthread_create(pids + i, NULL, specifiedSubscribe, t_info); + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) { + uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j; + threadInfo *t_info = infos + seq; + t_info->threadID = seq; + t_info->querySeq = i; + t_info->taos = NULL; // TODO: workaround to use separate taos connection; + pthread_create(pids + seq, NULL, specifiedSubscribe, t_info); + } } } - //==== create sub threads for super table query - pthread_t *pidsOfSub = NULL; - threadInfo *infosOfSub = NULL; - if ((g_queryInfo.superQueryInfo.sqlCount > 0) + //==== create threads for super table query + if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { + printf("%s() LN%d, sepcified query sqlCount %"PRIu64".\n", + __func__, __LINE__, + g_queryInfo.specifiedQueryInfo.sqlCount); + } else { + if ((g_queryInfo.superQueryInfo.sqlCount > 0) && (g_queryInfo.superQueryInfo.threadCnt > 0)) { - pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * + pidsOfStable = malloc( + g_queryInfo.superQueryInfo.sqlCount * + g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); - infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * + infosOfStable = malloc( + g_queryInfo.superQueryInfo.sqlCount * + g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); - if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { - errorPrint("%s() LN%d, malloc failed for create threads\n", + if ((NULL == pidsOfStable) || (NULL == infosOfStable)) { + errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); - // taos_close(taos); - exit(-1); - } + // taos_close(taos); + exit(-1); + } - int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; - int threads = g_queryInfo.superQueryInfo.threadCnt; + int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; + int threads = g_queryInfo.superQueryInfo.threadCnt; - int64_t a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } + int64_t a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } - int64_t b = 0; - if (threads != 0) { - b = ntables % threads; - } + int64_t b = 0; + if (threads != 0) { + b = ntables % threads; + } - uint64_t startFrom = 0; - for (int i = 0; i < threads; i++) { - threadInfo *t_info = infosOfSub + i; - t_info->threadID = i; + uint64_t startFrom = 0; + for (uint64_t i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + for (int j = 0; j < threads; j++) { + uint64_t seq = i * threads + j; + threadInfo *t_info = infosOfStable + seq; + t_info->threadID = seq; + t_info->querySeq = i; - t_info->start_table_from = startFrom; - t_info->ntables = iend_table_to = i < b ? startFrom + a : startFrom + a - 1; - startFrom = t_info->end_table_to + 1; - t_info->taos = NULL; // TODO: workaround to use separate taos connection; - pthread_create(pidsOfSub + i, NULL, superSubscribe, t_info); - } + t_info->start_table_from = startFrom; + t_info->ntables = jend_table_to = jend_table_to + 1; + t_info->taos = NULL; // TODO: workaround to use separate taos connection; + pthread_create(pidsOfStable + seq, + NULL, superSubscribe, t_info); + } + } - g_queryInfo.superQueryInfo.threadCnt = threads; + g_queryInfo.superQueryInfo.threadCnt = threads; - for (int i = 0; i < g_queryInfo.superQueryInfo.threadCnt; i++) { - pthread_join(pidsOfSub[i], NULL); + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + for (int j = 0; j < threads; j++) { + uint64_t seq = i * threads + j; + pthread_join(pidsOfStable[seq], NULL); + } + } } } - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { - pthread_join(pids[i], NULL); + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) { + uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j; + pthread_join(pids[seq], NULL); + } } tmfree((char*)pids); tmfree((char*)infos); - tmfree((char*)pidsOfSub); - tmfree((char*)infosOfSub); + tmfree((char*)pidsOfStable); + tmfree((char*)infosOfStable); // taos_close(taos); return 0; } From 78b8473e03670ec434fe7fe30fbeaf7730beace2 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 19 May 2021 20:12:19 +0800 Subject: [PATCH 09/14] Hotfix/sangshuduo/td 4238 taosdemo async subscribe (#6162) * [TD-4238]: taosdemo async subscribe. * [TD-4238]: taosdemo async subscribe subsribe sql command do not use aggregation functions. * [TD-4238]: taosdemo async subscribe. interval. * [TD-4238]: taosdemo async subscribe. fix super table sub result file. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 61 +++++++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 20 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 0831555d22..f8d3f83d47 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -114,12 +114,18 @@ typedef enum TALBE_EXISTS_EN { TBL_EXISTS_BUTT } TALBE_EXISTS_EN; -enum MODE { +enum enumSYNC_MODE { SYNC_MODE, ASYNC_MODE, MODE_BUT }; +typedef enum enumQUERY_CLASS { + SPECIFIED_CLASS, + STABLE_CLASS, + CLASS_BUT +} QUERY_CLASS; + typedef enum enum_INSERT_MODE { PROGRESSIVE_INSERT_MODE, INTERLACE_INSERT_MODE, @@ -6557,18 +6563,21 @@ static void specified_sub_callback( } static TAOS_SUB* subscribeImpl( + QUERY_CLASS class, threadInfo *pThreadInfo, - char *sql, char* topic, bool restart) + char *sql, char* topic, bool restart, uint64_t interval) { TAOS_SUB* tsub = NULL; - if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { + if ((SPECIFIED_CLASS == class) + && (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode)) { tsub = taos_subscribe( pThreadInfo->taos, restart, topic, sql, specified_sub_callback, (void*)pThreadInfo, g_queryInfo.specifiedQueryInfo.subscribeInterval); - } else if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { + } else if ((STABLE_CLASS == class) + && (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode)) { tsub = taos_subscribe( pThreadInfo->taos, restart, @@ -6578,7 +6587,7 @@ static TAOS_SUB* subscribeImpl( tsub = taos_subscribe( pThreadInfo->taos, restart, - topic, sql, NULL, NULL, 0); + topic, sql, NULL, NULL, interval); } if (tsub == NULL) { @@ -6629,9 +6638,14 @@ static void *superSubscribe(void *sarg) { char topic[32] = {0}; for (uint64_t i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { + verbosePrint("%s() LN%d, [%d], start=%"PRId64" end=%"PRId64" i=%"PRIu64"\n", + __func__, __LINE__, + pThreadInfo->threadID, + pThreadInfo->start_table_from, + pThreadInfo->end_table_to, i); sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"", i, pThreadInfo->querySeq); - memset(subSqlstr,0,sizeof(subSqlstr)); + memset(subSqlstr, 0, sizeof(subSqlstr)); replaceChildTblName( g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq], subSqlstr, i); @@ -6644,8 +6658,10 @@ static void *superSubscribe(void *sarg) { debugPrint("%s() LN%d, [%d] subSqlstr: %s\n", __func__, __LINE__, pThreadInfo->threadID, subSqlstr); tsub[i] = subscribeImpl( + STABLE_CLASS, pThreadInfo, subSqlstr, topic, - g_queryInfo.superQueryInfo.subscribeRestart); + g_queryInfo.superQueryInfo.subscribeRestart, + g_queryInfo.superQueryInfo.subscribeInterval); if (NULL == tsub[i]) { taos_close(pThreadInfo->taos); return NULL; @@ -6687,8 +6703,10 @@ static void *superSubscribe(void *sarg) { g_queryInfo.superQueryInfo.subscribeKeepProgress); consumed[i]= 0; tsub[i] = subscribeImpl( + STABLE_CLASS, pThreadInfo, subSqlstr, topic, - g_queryInfo.superQueryInfo.subscribeRestart + g_queryInfo.superQueryInfo.subscribeRestart, + g_queryInfo.superQueryInfo.subscribeInterval ); if (NULL == tsub[i]) { taos_close(pThreadInfo->taos); @@ -6744,10 +6762,12 @@ static void *specifiedSubscribe(void *sarg) { g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); } - tsub = subscribeImpl(pThreadInfo, + tsub = subscribeImpl( + SPECIFIED_CLASS, pThreadInfo, g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], topic, - g_queryInfo.specifiedQueryInfo.subscribeRestart); + g_queryInfo.specifiedQueryInfo.subscribeRestart, + g_queryInfo.specifiedQueryInfo.subscribeInterval); if (NULL == tsub) { taos_close(pThreadInfo->taos); return NULL; @@ -6777,11 +6797,12 @@ static void *specifiedSubscribe(void *sarg) { taos_unsubscribe(tsub, g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); tsub = subscribeImpl( - pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], - topic, - g_queryInfo.specifiedQueryInfo.subscribeRestart - ); + SPECIFIED_CLASS, + pThreadInfo, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], + topic, + g_queryInfo.specifiedQueryInfo.subscribeRestart, + g_queryInfo.specifiedQueryInfo.subscribeInterval); if (NULL == tsub) { taos_close(pThreadInfo->taos); return NULL; @@ -6833,7 +6854,7 @@ static int subscribeTestProcess() { //==== create threads for query for specified table if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { - printf("%s() LN%d, sepcified query sqlCount %"PRIu64".\n", + debugPrint("%s() LN%d, sepcified query sqlCount %"PRIu64".\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); } else { @@ -6870,10 +6891,10 @@ static int subscribeTestProcess() { } //==== create threads for super table query - if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { - printf("%s() LN%d, sepcified query sqlCount %"PRIu64".\n", + if (g_queryInfo.superQueryInfo.sqlCount <= 0) { + printf("%s() LN%d, super table query sqlCount %"PRIu64".\n", __func__, __LINE__, - g_queryInfo.specifiedQueryInfo.sqlCount); + g_queryInfo.superQueryInfo.sqlCount); } else { if ((g_queryInfo.superQueryInfo.sqlCount > 0) && (g_queryInfo.superQueryInfo.threadCnt > 0)) { @@ -6906,8 +6927,8 @@ static int subscribeTestProcess() { b = ntables % threads; } - uint64_t startFrom = 0; for (uint64_t i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + uint64_t startFrom = 0; for (int j = 0; j < threads; j++) { uint64_t seq = i * threads + j; threadInfo *t_info = infosOfStable + seq; From 4697cdfdafbc3e639a253f65fbae6bae4ea5c1a8 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 20 May 2021 14:01:32 +0800 Subject: [PATCH 10/14] Hotfix/sangshuduo/td 4238 taosdemo async subscribe (#6170) * [TD-4238]: taosdemo async subscribe. * [TD-4238]: taosdemo async subscribe subsribe sql command do not use aggregation functions. * [TD-4238]: taosdemo async subscribe. interval. * [TD-4238]: taosdemo async subscribe. fix super table sub result file. * fix specified sub sync mode. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index f8d3f83d47..ae7ffd16b9 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -6682,13 +6682,18 @@ static void *superSubscribe(void *sarg) { continue; } - taosMsleep(g_queryInfo.superQueryInfo.subscribeInterval); // ms res = taos_consume(tsub[i]); if (res) { if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { sprintf(pThreadInfo->fp, "%s-%d", - g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); + g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); + appendResultToFile(res, pThreadInfo->fp); + } + if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + sprintf(pThreadInfo->fp, "%s-%d", + g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); appendResultToFile(res, pThreadInfo->fp); } consumed[i] ++; @@ -6783,9 +6788,15 @@ static void *specifiedSubscribe(void *sarg) { continue; } - taosMsleep(g_queryInfo.specifiedQueryInfo.subscribeInterval); // ms res = taos_consume(tsub); if (res) { + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + sprintf(pThreadInfo->fp, "%s-%d", + g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); + appendResultToFile(res, pThreadInfo->fp); + } + consumed ++; if ((g_queryInfo.specifiedQueryInfo.subscribeKeepProgress) && (consumed >= From f0b77fa3c3af7564054fe1c4be9210be0d5e8805 Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Thu, 20 May 2021 17:16:09 +0800 Subject: [PATCH 11/14] [TD-4258]: speedup nodejs fetchall() with taos_fetch_block (#6165) --- src/connector/nodejs/nodetaos/cinterface.js | 254 ++++++++--------- src/connector/nodejs/nodetaos/cursor.js | 121 ++++----- src/connector/nodejs/package-lock.json | 285 -------------------- src/connector/nodejs/package.json | 2 +- 4 files changed, 188 insertions(+), 474 deletions(-) delete mode 100644 src/connector/nodejs/package-lock.json diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js index 43a08a800a..f3961e3787 100644 --- a/src/connector/nodejs/nodetaos/cinterface.js +++ b/src/connector/nodejs/nodetaos/cinterface.js @@ -9,7 +9,7 @@ const ffi = require('ffi-napi'); const ArrayType = require('ref-array-napi'); const Struct = require('ref-struct-napi'); const FieldTypes = require('./constants'); -const errors = require ('./error'); +const errors = require('./error'); const TaosObjects = require('./taosobjects'); const { NULL_POINTER } = require('ref-napi'); @@ -22,7 +22,7 @@ function convertMicrosecondsToDatetime(time) { return new TaosObjects.TaosTimestamp(time * 0.001, true); } -function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { +function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { timestampConverter = convertMillisecondsToDatetime; if (micro == true) { timestampConverter = convertMicrosecondsToDatetime; @@ -44,14 +44,14 @@ function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro=false } return res; } -function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { +function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = new Array(data.length); for (let i = 0; i < data.length; i++) { if (data[i] == 0) { res[i] = false; } - else if (data[i] == 1){ + else if (data[i] == 1) { res[i] = true; } else if (data[i] == FieldTypes.C_BOOL_NULL) { @@ -60,29 +60,29 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { } return res; } -function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { +function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; while (currOffset < data.length) { - let d = data.readIntLE(currOffset,1); + let d = data.readIntLE(currOffset, 1); res.push(d == FieldTypes.C_TINYINT_NULL ? null : d); currOffset += nbytes; } return res; } -function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { +function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; while (currOffset < data.length) { - let d = data.readIntLE(currOffset,2); + let d = data.readIntLE(currOffset, 2); res.push(d == FieldTypes.C_SMALLINT_NULL ? null : d); currOffset += nbytes; } return res; } -function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { +function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; @@ -93,7 +93,7 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { } return res; } -function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { +function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; @@ -104,7 +104,7 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { } return res; } -function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { +function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; @@ -115,7 +115,7 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { } return res; } -function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { +function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; @@ -126,7 +126,7 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { } return res; } -function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { +function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; @@ -142,7 +142,7 @@ function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { } return res; } -function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { +function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let dataEntry = data.slice(0, nbytes); //one entry in a row under a column; @@ -153,23 +153,23 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { // Object with all the relevant converters from pblock data to javascript readable data let convertFunctions = { - [FieldTypes.C_BOOL] : convertBool, - [FieldTypes.C_TINYINT] : convertTinyint, - [FieldTypes.C_SMALLINT] : convertSmallint, - [FieldTypes.C_INT] : convertInt, - [FieldTypes.C_BIGINT] : convertBigint, - [FieldTypes.C_FLOAT] : convertFloat, - [FieldTypes.C_DOUBLE] : convertDouble, - [FieldTypes.C_BINARY] : convertBinary, - [FieldTypes.C_TIMESTAMP] : convertTimestamp, - [FieldTypes.C_NCHAR] : convertNchar + [FieldTypes.C_BOOL]: convertBool, + [FieldTypes.C_TINYINT]: convertTinyint, + [FieldTypes.C_SMALLINT]: convertSmallint, + [FieldTypes.C_INT]: convertInt, + [FieldTypes.C_BIGINT]: convertBigint, + [FieldTypes.C_FLOAT]: convertFloat, + [FieldTypes.C_DOUBLE]: convertDouble, + [FieldTypes.C_BINARY]: convertBinary, + [FieldTypes.C_TIMESTAMP]: convertTimestamp, + [FieldTypes.C_NCHAR]: convertNchar } // Define TaosField structure var char_arr = ArrayType(ref.types.char); var TaosField = Struct({ - 'name': char_arr, - }); + 'name': char_arr, +}); TaosField.fields.name.type.size = 65; TaosField.defineProperty('type', ref.types.char); TaosField.defineProperty('bytes', ref.types.short); @@ -183,7 +183,7 @@ TaosField.defineProperty('bytes', ref.types.short); * @classdesc The CTaosInterface is the interface through which Node.JS communicates data back and forth with TDengine. It is not advised to * access this class directly and use it unless you understand what these functions do. */ -function CTaosInterface (config = null, pass = false) { +function CTaosInterface(config = null, pass = false) { ref.types.char_ptr = ref.refType(ref.types.char); ref.types.void_ptr = ref.refType(ref.types.void); ref.types.void_ptr2 = ref.refType(ref.types.void_ptr); @@ -196,64 +196,65 @@ function CTaosInterface (config = null, pass = false) { taoslibname = 'libtaos'; } this.libtaos = ffi.Library(taoslibname, { - 'taos_options': [ ref.types.int, [ ref.types.int , ref.types.void_ptr ] ], - 'taos_init': [ ref.types.void, [ ] ], + 'taos_options': [ref.types.int, [ref.types.int, ref.types.void_ptr]], + 'taos_init': [ref.types.void, []], //TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port) - 'taos_connect': [ ref.types.void_ptr, [ ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int ] ], + 'taos_connect': [ref.types.void_ptr, [ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int]], //void taos_close(TAOS *taos) - 'taos_close': [ ref.types.void, [ ref.types.void_ptr ] ], - //int *taos_fetch_lengths(TAOS_RES *taos); - 'taos_fetch_lengths': [ ref.types.void_ptr, [ ref.types.void_ptr ] ], + 'taos_close': [ref.types.void, [ref.types.void_ptr]], + //int *taos_fetch_lengths(TAOS_RES *res); + 'taos_fetch_lengths': [ref.types.void_ptr, [ref.types.void_ptr]], //int taos_query(TAOS *taos, char *sqlstr) - 'taos_query': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.char_ptr ] ], - //int taos_affected_rows(TAOS *taos) - 'taos_affected_rows': [ ref.types.int, [ ref.types.void_ptr] ], + 'taos_query': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr]], + //int taos_affected_rows(TAOS_RES *res) + 'taos_affected_rows': [ref.types.int, [ref.types.void_ptr]], //int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) - 'taos_fetch_block': [ ref.types.int, [ ref.types.void_ptr, ref.types.void_ptr] ], + 'taos_fetch_block': [ref.types.int, [ref.types.void_ptr, ref.types.void_ptr]], //int taos_num_fields(TAOS_RES *res); - 'taos_num_fields': [ ref.types.int, [ ref.types.void_ptr] ], + 'taos_num_fields': [ref.types.int, [ref.types.void_ptr]], //TAOS_ROW taos_fetch_row(TAOS_RES *res) //TAOS_ROW is void **, but we set the return type as a reference instead to get the row - 'taos_fetch_row': [ ref.refType(ref.types.void_ptr2), [ ref.types.void_ptr ] ], + 'taos_fetch_row': [ref.refType(ref.types.void_ptr2), [ref.types.void_ptr]], + 'taos_print_row': [ref.types.int, [ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]], //int taos_result_precision(TAOS_RES *res) - 'taos_result_precision': [ ref.types.int, [ ref.types.void_ptr ] ], + 'taos_result_precision': [ref.types.int, [ref.types.void_ptr]], //void taos_free_result(TAOS_RES *res) - 'taos_free_result': [ ref.types.void, [ ref.types.void_ptr] ], + 'taos_free_result': [ref.types.void, [ref.types.void_ptr]], //int taos_field_count(TAOS *taos) - 'taos_field_count': [ ref.types.int, [ ref.types.void_ptr ] ], + 'taos_field_count': [ref.types.int, [ref.types.void_ptr]], //TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) - 'taos_fetch_fields': [ ref.refType(TaosField), [ ref.types.void_ptr ] ], + 'taos_fetch_fields': [ref.refType(TaosField), [ref.types.void_ptr]], //int taos_errno(TAOS *taos) - 'taos_errno': [ ref.types.int, [ ref.types.void_ptr] ], + 'taos_errno': [ref.types.int, [ref.types.void_ptr]], //char *taos_errstr(TAOS *taos) - 'taos_errstr': [ ref.types.char_ptr, [ ref.types.void_ptr] ], + 'taos_errstr': [ref.types.char_ptr, [ref.types.void_ptr]], //void taos_stop_query(TAOS_RES *res); - 'taos_stop_query': [ ref.types.void, [ ref.types.void_ptr] ], + 'taos_stop_query': [ref.types.void, [ref.types.void_ptr]], //char *taos_get_server_info(TAOS *taos); - 'taos_get_server_info': [ ref.types.char_ptr, [ ref.types.void_ptr ] ], + 'taos_get_server_info': [ref.types.char_ptr, [ref.types.void_ptr]], //char *taos_get_client_info(); - 'taos_get_client_info': [ ref.types.char_ptr, [ ] ], + 'taos_get_client_info': [ref.types.char_ptr, []], // ASYNC // void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param) - 'taos_query_a': [ ref.types.void, [ ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr ] ], + 'taos_query_a': [ref.types.void, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr]], // void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); - 'taos_fetch_rows_a': [ ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr ]], + 'taos_fetch_rows_a': [ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr]], // Subscription //TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval) - 'taos_subscribe': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.int, ref.types.char_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int] ], + 'taos_subscribe': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.int, ref.types.char_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]], // TAOS_RES *taos_consume(TAOS_SUB *tsub) - 'taos_consume': [ ref.types.void_ptr, [ref.types.void_ptr] ], + 'taos_consume': [ref.types.void_ptr, [ref.types.void_ptr]], //void taos_unsubscribe(TAOS_SUB *tsub); - 'taos_unsubscribe': [ ref.types.void, [ ref.types.void_ptr ] ], + 'taos_unsubscribe': [ref.types.void, [ref.types.void_ptr]], // Continuous Query //TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), // int64_t stime, void *param, void (*callback)(void *)); - 'taos_open_stream': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr ] ], + 'taos_open_stream': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr]], //void taos_close_stream(TAOS_STREAM *tstr); - 'taos_close_stream': [ ref.types.void, [ ref.types.void_ptr ] ] + 'taos_close_stream': [ref.types.void, [ref.types.void_ptr]] }); if (pass == false) { @@ -264,7 +265,7 @@ function CTaosInterface (config = null, pass = false) { try { this._config = ref.allocCString(config); } - catch(err){ + catch (err) { throw "Attribute Error: config is expected as a str"; } } @@ -276,38 +277,38 @@ function CTaosInterface (config = null, pass = false) { return this; } CTaosInterface.prototype.config = function config() { - return this._config; - } -CTaosInterface.prototype.connect = function connect(host=null, user="root", password="taosdata", db=null, port=0) { - let _host,_user,_password,_db,_port; - try { + return this._config; +} +CTaosInterface.prototype.connect = function connect(host = null, user = "root", password = "taosdata", db = null, port = 0) { + let _host, _user, _password, _db, _port; + try { _host = host != null ? ref.allocCString(host) : ref.alloc(ref.types.char_ptr, ref.NULL); } - catch(err) { + catch (err) { throw "Attribute Error: host is expected as a str"; } try { _user = ref.allocCString(user) } - catch(err) { + catch (err) { throw "Attribute Error: user is expected as a str"; } try { _password = ref.allocCString(password); } - catch(err) { + catch (err) { throw "Attribute Error: password is expected as a str"; } try { _db = db != null ? ref.allocCString(db) : ref.alloc(ref.types.char_ptr, ref.NULL); } - catch(err) { + catch (err) { throw "Attribute Error: db is expected as a str"; } try { _port = ref.alloc(ref.types.int, port); } - catch(err) { + catch (err) { throw TypeError("port is expected as an int") } let connection = this.libtaos.taos_connect(_host, _user, _password, _db, _port); @@ -324,10 +325,10 @@ CTaosInterface.prototype.close = function close(connection) { console.log("Connection is closed"); } CTaosInterface.prototype.query = function query(connection, sql) { - return this.libtaos.taos_query(connection, ref.allocCString(sql)); + return this.libtaos.taos_query(connection, ref.allocCString(sql)); } -CTaosInterface.prototype.affectedRows = function affectedRows(connection) { - return this.libtaos.taos_affected_rows(connection); +CTaosInterface.prototype.affectedRows = function affectedRows(result) { + return this.libtaos.taos_affected_rows(result); } CTaosInterface.prototype.useResult = function useResult(result) { @@ -337,8 +338,8 @@ CTaosInterface.prototype.useResult = function useResult(result) { pfields = ref.reinterpret(pfields, this.fieldsCount(result) * 68, 0); for (let i = 0; i < pfields.length; i += 68) { //0 - 63 = name //64 - 65 = bytes, 66 - 67 = type - fields.push( { - name: ref.readCString(ref.reinterpret(pfields,65,i)), + fields.push({ + name: ref.readCString(ref.reinterpret(pfields, 65, i)), type: pfields[i + 65], bytes: pfields[i + 66] }) @@ -347,11 +348,10 @@ CTaosInterface.prototype.useResult = function useResult(result) { return fields; } CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) { - //let pblock = ref.ref(ref.ref(ref.NULL)); // equal to our raw data - let pblock = this.libtaos.taos_fetch_row(result); - let num_of_rows = 1; - if (ref.isNull(pblock) == true) { - return {block:null, num_of_rows:0}; + let pblock = ref.NULL_POINTER; + let num_of_rows = this.libtaos.taos_fetch_block(result, pblock); + if (ref.isNull(pblock.deref()) == true) { + return { block: null, num_of_rows: 0 }; } var fieldL = this.libtaos.taos_fetch_lengths(result); @@ -359,10 +359,10 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) { let isMicro = (this.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO); var fieldlens = []; - + if (ref.isNull(fieldL) == false) { - for (let i = 0; i < fields.length; i ++) { - let plen = ref.reinterpret(fieldL, 4, i*4); + for (let i = 0; i < fields.length; i++) { + let plen = ref.reinterpret(fieldL, 4, i * 4); let len = plen.readInt32LE(0); fieldlens.push(len); } @@ -370,21 +370,23 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) { let blocks = new Array(fields.length); blocks.fill(null); - //num_of_rows = Math.abs(num_of_rows); + num_of_rows = Math.abs(num_of_rows); let offset = 0; + let ptr = pblock.deref(); + for (let i = 0; i < fields.length; i++) { - pdata = ref.reinterpret(pblock,8,i*8); - if(ref.isNull(pdata.readPointer())){ - blocks[i] = new Array(); - }else{ - pdata = ref.ref(pdata.readPointer()); - if (!convertFunctions[fields[i]['type']] ) { - throw new errors.DatabaseError("Invalid data type returned from database"); - } - blocks[i] = convertFunctions[fields[i]['type']](pdata, 1, fieldlens[i], offset, isMicro); - } + pdata = ref.reinterpret(ptr, 8, i * 8); + if (ref.isNull(pdata.readPointer())) { + blocks[i] = new Array(); + } else { + pdata = ref.ref(pdata.readPointer()); + if (!convertFunctions[fields[i]['type']]) { + throw new errors.DatabaseError("Invalid data type returned from database"); + } + blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, isMicro); + } } - return {blocks: blocks, num_of_rows:Math.abs(num_of_rows)} + return { blocks: blocks, num_of_rows } } CTaosInterface.prototype.fetchRow = function fetchRow(result, fields) { let row = this.libtaos.taos_fetch_row(result); @@ -414,7 +416,7 @@ CTaosInterface.prototype.errStr = function errStr(result) { // Async CTaosInterface.prototype.query_a = function query_a(connection, sql, callback, param = ref.ref(ref.NULL)) { // void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int), void *param) - callback = ffi.Callback(ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.int ], callback); + callback = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], callback); this.libtaos.taos_query_a(connection, ref.allocCString(sql), callback, param); return param; } @@ -439,46 +441,46 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback, var fieldL = cti.libtaos.taos_fetch_lengths(result); var fieldlens = []; if (ref.isNull(fieldL) == false) { - - for (let i = 0; i < fields.length; i ++) { - let plen = ref.reinterpret(fieldL, 8, i*8); - let len = ref.get(plen,0,ref.types.int32); + + for (let i = 0; i < fields.length; i++) { + let plen = ref.reinterpret(fieldL, 8, i * 8); + let len = ref.get(plen, 0, ref.types.int32); fieldlens.push(len); } } - if (numOfRows2 > 0){ + if (numOfRows2 > 0) { for (let i = 0; i < fields.length; i++) { - if(ref.isNull(pdata.readPointer())){ - blocks[i] = new Array(); - }else{ - if (!convertFunctions[fields[i]['type']] ) { - throw new errors.DatabaseError("Invalid data type returned from database"); - } - let prow = ref.reinterpret(row,8,i*8); - prow = prow.readPointer(); - prow = ref.ref(prow); - blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro); - //offset += fields[i]['bytes'] * numOfRows2; - } + if (ref.isNull(pdata.readPointer())) { + blocks[i] = new Array(); + } else { + if (!convertFunctions[fields[i]['type']]) { + throw new errors.DatabaseError("Invalid data type returned from database"); + } + let prow = ref.reinterpret(row, 8, i * 8); + prow = prow.readPointer(); + prow = ref.ref(prow); + blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro); + //offset += fields[i]['bytes'] * numOfRows2; + } } } callback(param2, result2, numOfRows2, blocks); } - asyncCallbackWrapper = ffi.Callback(ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.int ], asyncCallbackWrapper); + asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], asyncCallbackWrapper); this.libtaos.taos_fetch_rows_a(result, asyncCallbackWrapper, param); return param; } // Fetch field meta data by result handle -CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) { +CTaosInterface.prototype.fetchFields_a = function fetchFields_a(result) { let pfields = this.fetchFields(result); let pfieldscount = this.numFields(result); let fields = []; if (ref.isNull(pfields) == false) { - pfields = ref.reinterpret(pfields, 68 * pfieldscount , 0); + pfields = ref.reinterpret(pfields, 68 * pfieldscount, 0); for (let i = 0; i < pfields.length; i += 68) { //0 - 64 = name //65 = type, 66 - 67 = bytes - fields.push( { - name: ref.readCString(ref.reinterpret(pfields,65,i)), + fields.push({ + name: ref.readCString(ref.reinterpret(pfields, 65, i)), type: pfields[i + 65], bytes: pfields[i + 66] }) @@ -488,7 +490,7 @@ CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) { } // Stop a query by result handle CTaosInterface.prototype.stopQuery = function stopQuery(result) { - if (result != null){ + if (result != null) { this.libtaos.taos_stop_query(result); } else { @@ -509,13 +511,13 @@ CTaosInterface.prototype.subscribe = function subscribe(connection, restart, top try { sql = sql != null ? ref.allocCString(sql) : ref.alloc(ref.types.char_ptr, ref.NULL); } - catch(err) { + catch (err) { throw "Attribute Error: sql is expected as a str"; } try { topic = topic != null ? ref.allocCString(topic) : ref.alloc(ref.types.char_ptr, ref.NULL); } - catch(err) { + catch (err) { throw TypeError("topic is expected as a str"); } @@ -539,8 +541,8 @@ CTaosInterface.prototype.consume = function consume(subscription) { pfields = ref.reinterpret(pfields, this.numFields(result) * 68, 0); for (let i = 0; i < pfields.length; i += 68) { //0 - 63 = name //64 - 65 = bytes, 66 - 67 = type - fields.push( { - name: ref.readCString(ref.reinterpret(pfields,64,i)), + fields.push({ + name: ref.readCString(ref.reinterpret(pfields, 64, i)), bytes: pfields[i + 64], type: pfields[i + 66] }) @@ -548,7 +550,7 @@ CTaosInterface.prototype.consume = function consume(subscription) { } let data = []; - while(true) { + while (true) { let { blocks, num_of_rows } = this.fetchBlock(result, fields); if (num_of_rows == 0) { break; @@ -559,7 +561,7 @@ CTaosInterface.prototype.consume = function consume(subscription) { for (let j = 0; j < fields.length; j++) { rowBlock[j] = blocks[j][i]; } - data[data.length-1] = (rowBlock); + data[data.length - 1] = (rowBlock); } } return { data: data, fields: fields, result: result }; @@ -570,11 +572,11 @@ CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) { } // Continuous Query -CTaosInterface.prototype.openStream = function openStream(connection, sql, callback, stime,stoppingCallback, param = ref.ref(ref.NULL)) { +CTaosInterface.prototype.openStream = function openStream(connection, sql, callback, stime, stoppingCallback, param = ref.ref(ref.NULL)) { try { sql = ref.allocCString(sql); } - catch(err) { + catch (err) { throw "Attribute Error: sql string is expected as a str"; } var cti = this; @@ -587,7 +589,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb let offset = 0; if (numOfRows2 > 0) { for (let i = 0; i < fields.length; i++) { - if (!convertFunctions[fields[i]['type']] ) { + if (!convertFunctions[fields[i]['type']]) { throw new errors.DatabaseError("Invalid data type returned from database"); } blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, isMicro); @@ -596,8 +598,8 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb } callback(param2, result2, blocks, fields); } - asyncCallbackWrapper = ffi.Callback(ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.refType(ref.types.void_ptr2) ], asyncCallbackWrapper); - asyncStoppingCallbackWrapper = ffi.Callback( ref.types.void, [ ref.types.void_ptr ], stoppingCallback); + asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.refType(ref.types.void_ptr2)], asyncCallbackWrapper); + asyncStoppingCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr], stoppingCallback); let streamHandle = this.libtaos.taos_open_stream(connection, sql, asyncCallbackWrapper, stime, param, asyncStoppingCallbackWrapper); if (ref.isNull(streamHandle)) { throw new errors.TDError('Failed to open a stream with TDengine'); diff --git a/src/connector/nodejs/nodetaos/cursor.js b/src/connector/nodejs/nodetaos/cursor.js index e18e6c2500..f879d89d48 100644 --- a/src/connector/nodejs/nodetaos/cursor.js +++ b/src/connector/nodejs/nodetaos/cursor.js @@ -1,7 +1,7 @@ const ref = require('ref-napi'); require('./globalfunc.js') const CTaosInterface = require('./cinterface') -const errors = require ('./error') +const errors = require('./error') const TaosQuery = require('./taosquery') const { PerformanceObserver, performance } = require('perf_hooks'); module.exports = TDengineCursor; @@ -22,7 +22,7 @@ module.exports = TDengineCursor; * @property {fields} - Array of the field objects in order from left to right of the latest data retrieved * @since 1.0.0 */ -function TDengineCursor(connection=null) { +function TDengineCursor(connection = null) { //All parameters are store for sync queries only. this._rowcount = -1; this._connection = null; @@ -91,7 +91,7 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback return null; } - if (typeof options == 'function') { + if (typeof options == 'function') { callback = options; } if (typeof options != 'object') options = {} @@ -144,10 +144,10 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback } TDengineCursor.prototype._createAffectedResponse = function (num, time) { - return "Query OK, " + num + " row(s) affected (" + (time * 0.001).toFixed(8) + "s)"; + return "Query OK, " + num + " row(s) affected (" + (time * 0.001).toFixed(8) + "s)"; } TDengineCursor.prototype._createSetResponse = function (num, time) { - return "Query OK, " + num + " row(s) in set (" + (time * 0.001).toFixed(8) + "s)"; + return "Query OK, " + num + " row(s) in set (" + (time * 0.001).toFixed(8) + "s)"; } TDengineCursor.prototype.executemany = function executemany() { @@ -176,27 +176,22 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) { throw new errors.OperationalError("Invalid use of fetchall, either result or fields from query are null. First execute a query first"); } - let data = []; + let num_of_rows = this._chandle.affectedRows(this._result); + let data = new Array(num_of_rows); + this._rowcount = 0; - //let nodetime = 0; + let time = 0; const obs = new PerformanceObserver((items) => { time += items.getEntries()[0].duration; performance.clearMarks(); }); - /* - const obs2 = new PerformanceObserver((items) => { - nodetime += items.getEntries()[0].duration; - performance.clearMarks(); - }); - obs2.observe({ entryTypes: ['measure'] }); - performance.mark('nodea'); - */ obs.observe({ entryTypes: ['measure'] }); performance.mark('A'); - while(true) { - + while (true) { let blockAndRows = this._chandle.fetchBlock(this._result, this._fields); + // console.log(blockAndRows); + // break; let block = blockAndRows.blocks; let num_of_rows = blockAndRows.num_of_rows; if (num_of_rows == 0) { @@ -205,22 +200,24 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) { this._rowcount += num_of_rows; let numoffields = this._fields.length; for (let i = 0; i < num_of_rows; i++) { - data.push([]); - + // data.push([]); + let rowBlock = new Array(numoffields); for (let j = 0; j < numoffields; j++) { rowBlock[j] = block[j][i]; } - data[data.length-1] = (rowBlock); + data[this._rowcount - num_of_rows + i] = (rowBlock); + // data.push(rowBlock); } } + performance.mark('B'); performance.measure('query', 'A', 'B'); let response = this._createSetResponse(this._rowcount, time) console.log(response); - // this._connection._clearResultSet(); + // this._connection._clearResultSet(); let fields = this.fields; this._reset_result(); this.data = data; @@ -239,12 +236,12 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) { * @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query * @since 1.0.0 */ -TDengineCursor.prototype.execute_a = function execute_a (operation, options, callback, param) { +TDengineCursor.prototype.execute_a = function execute_a(operation, options, callback, param) { if (operation == undefined) { throw new errors.ProgrammingError('No operation passed as argument'); return null; } - if (typeof options == 'function') { + if (typeof options == 'function') { //we expect the parameter after callback to be param param = callback; callback = options; @@ -265,14 +262,14 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal } if (resCode >= 0) { -// let fieldCount = cr._chandle.numFields(res2); -// if (fieldCount == 0) { -// //cr._chandle.freeResult(res2); -// return res2; -// } -// else { -// return res2; -// } + // let fieldCount = cr._chandle.numFields(res2); + // if (fieldCount == 0) { + // //cr._chandle.freeResult(res2); + // return res2; + // } + // else { + // return res2; + // } return res2; } @@ -317,7 +314,7 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal * }) */ TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callback, param = {}) { - if (typeof options == 'function') { + if (typeof options == 'function') { //we expect the parameter after callback to be param param = callback; callback = options; @@ -360,17 +357,17 @@ TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callb for (let k = 0; k < fields.length; k++) { rowBlock[k] = block[k][j]; } - data[data.length-1] = rowBlock; + data[data.length - 1] = rowBlock; } } cr._chandle.freeResult(result2); // free result, avoid seg faults and mem leaks! - callback(param2, result2, numOfRows2, {data:data,fields:fields}); + callback(param2, result2, numOfRows2, { data: data, fields: fields }); } } ref.writeObject(buf, 0, param); param = this._chandle.fetch_rows_a(result, asyncCallbackWrapper, buf); //returned param - return {param:param,result:result}; + return { param: param, result: result }; } /** * Stop a query given the result handle. @@ -428,7 +425,7 @@ TDengineCursor.prototype.subscribe = function subscribe(config) { */ TDengineCursor.prototype.consumeData = async function consumeData(subscription, callback) { while (true) { - let { data, fields, result} = this._chandle.consume(subscription); + let { data, fields, result } = this._chandle.consume(subscription); callback(data, fields, result); } } @@ -450,30 +447,30 @@ TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) { * @return {Buffer} A buffer pointing to the stream handle * @since 1.3.0 */ - TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = 0, stoppingCallback, param = {}) { - let buf = ref.alloc('Object'); - ref.writeObject(buf, 0, param); +TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = 0, stoppingCallback, param = {}) { + let buf = ref.alloc('Object'); + ref.writeObject(buf, 0, param); - let asyncCallbackWrapper = function (param2, result2, blocks, fields) { - let data = []; - let num_of_rows = blocks[0].length; - for (let j = 0; j < num_of_rows; j++) { - data.push([]); - let rowBlock = new Array(fields.length); - for (let k = 0; k < fields.length; k++) { - rowBlock[k] = blocks[k][j]; - } - data[data.length-1] = rowBlock; - } - callback(param2, result2, blocks, fields); - } - return this._chandle.openStream(this._connection._conn, sql, asyncCallbackWrapper, stime, stoppingCallback, buf); - } - /** - * Close a stream - * @param {Buffer} - A buffer pointing to the handle of the stream to be closed - * @since 1.3.0 - */ - TDengineCursor.prototype.closeStream = function closeStream(stream) { - this._chandle.closeStream(stream); - } + let asyncCallbackWrapper = function (param2, result2, blocks, fields) { + let data = []; + let num_of_rows = blocks[0].length; + for (let j = 0; j < num_of_rows; j++) { + data.push([]); + let rowBlock = new Array(fields.length); + for (let k = 0; k < fields.length; k++) { + rowBlock[k] = blocks[k][j]; + } + data[data.length - 1] = rowBlock; + } + callback(param2, result2, blocks, fields); + } + return this._chandle.openStream(this._connection._conn, sql, asyncCallbackWrapper, stime, stoppingCallback, buf); +} +/** + * Close a stream + * @param {Buffer} - A buffer pointing to the handle of the stream to be closed + * @since 1.3.0 + */ +TDengineCursor.prototype.closeStream = function closeStream(stream) { + this._chandle.closeStream(stream); +} diff --git a/src/connector/nodejs/package-lock.json b/src/connector/nodejs/package-lock.json deleted file mode 100644 index 9ca174ccd1..0000000000 --- a/src/connector/nodejs/package-lock.json +++ /dev/null @@ -1,285 +0,0 @@ -{ - "name": "td2.0-connector", - "version": "2.0.6", - "lockfileVersion": 1, - "requires": true, - "dependencies": { - "array-index": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/array-index/-/array-index-1.0.0.tgz", - "integrity": "sha1-7FanSe4QPk4Ix5C5w1PfFgVbl/k=", - "requires": { - "debug": "^2.2.0", - "es6-symbol": "^3.0.2" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - } - } - }, - "d": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", - "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==", - "requires": { - "es5-ext": "^0.10.50", - "type": "^1.0.1" - } - }, - "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "requires": { - "ms": "2.1.2" - } - }, - "es5-ext": { - "version": "0.10.53", - "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz", - "integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==", - "requires": { - "es6-iterator": "~2.0.3", - "es6-symbol": "~3.1.3", - "next-tick": "~1.0.0" - } - }, - "es6-iterator": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz", - "integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=", - "requires": { - "d": "1", - "es5-ext": "^0.10.35", - "es6-symbol": "^3.1.1" - } - }, - "es6-symbol": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz", - "integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==", - "requires": { - "d": "^1.0.1", - "ext": "^1.1.2" - } - }, - "ext": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz", - "integrity": "sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==", - "requires": { - "type": "^2.0.0" - }, - "dependencies": { - "type": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/type/-/type-2.1.0.tgz", - "integrity": "sha512-G9absDWvhAWCV2gmF1zKud3OyC61nZDwWvBL2DApaVFogI07CprggiQAOOjvp2NRjYWFzPyu7vwtDrQFq8jeSA==" - } - } - }, - "ffi-napi": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/ffi-napi/-/ffi-napi-3.1.0.tgz", - "integrity": "sha512-EsHO+sP2p/nUC/3l/l8m9niee1BLm4asUFDzkkBGR4kYVgp2KqdAYUomZhkKtzim4Fq7mcYHjpUaIHsMqs+E1g==", - "requires": { - "debug": "^4.1.1", - "get-uv-event-loop-napi-h": "^1.0.5", - "node-addon-api": "^2.0.0", - "node-gyp-build": "^4.2.1", - "ref-napi": "^2.0.1", - "ref-struct-di": "^1.1.0" - }, - "dependencies": { - "ref-napi": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-2.1.2.tgz", - "integrity": "sha512-aFl+vrIuLWUXMUTQGAwGAuSNLX3Ub5W3iVP8b7KyFFZUdn4+i4U1TXXTop0kCTUfGNu8glBGVz4lowkwMcPVVA==", - "requires": { - "debug": "^4.1.1", - "get-symbol-from-current-process-h": "^1.0.2", - "node-addon-api": "^2.0.0", - "node-gyp-build": "^4.2.1" - } - } - } - }, - "get-symbol-from-current-process-h": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/get-symbol-from-current-process-h/-/get-symbol-from-current-process-h-1.0.2.tgz", - "integrity": "sha512-syloC6fsCt62ELLrr1VKBM1ggOpMdetX9hTrdW77UQdcApPHLmf7CI7OKcN1c9kYuNxKcDe4iJ4FY9sX3aw2xw==" - }, - "get-uv-event-loop-napi-h": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/get-uv-event-loop-napi-h/-/get-uv-event-loop-napi-h-1.0.6.tgz", - "integrity": "sha512-t5c9VNR84nRoF+eLiz6wFrEp1SE2Acg0wS+Ysa2zF0eROes+LzOfuTaVHxGy8AbS8rq7FHEJzjnCZo1BupwdJg==", - "requires": { - "get-symbol-from-current-process-h": "^1.0.1" - } - }, - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "next-tick": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", - "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=" - }, - "node-addon-api": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-2.0.2.tgz", - "integrity": "sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA==" - }, - "node-gyp-build": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.2.3.tgz", - "integrity": "sha512-MN6ZpzmfNCRM+3t57PTJHgHyw/h4OWnZ6mR8P5j/uZtqQr46RRuDE/P+g3n0YR/AiYXeWixZZzaip77gdICfRg==" - }, - "ref-array-napi": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ref-array-napi/-/ref-array-napi-1.2.1.tgz", - "integrity": "sha512-jQp2WWSucmxkqVfoNfm7yDlDeGu3liAbzqfwjNybL80ooLOCnCZpAK2woDInY+lxNOK/VlIVSqeDEYb4gVPuNQ==", - "requires": { - "array-index": "1", - "debug": "2", - "ref-napi": "^1.4.2" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "ref-napi": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz", - "integrity": "sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw==", - "requires": { - "debug": "^3.1.0", - "node-addon-api": "^2.0.0", - "node-gyp-build": "^4.2.1" - }, - "dependencies": { - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "requires": { - "ms": "^2.1.1" - } - }, - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - } - } - } - } - }, - "ref-napi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-3.0.1.tgz", - "integrity": "sha512-W3rcb0E+tlO9u9ySFnX5vifInwwPGToOfFgTZUHJBNiOBsW0NNvgHz2zJN7ctABo/2yIlgdPQUvuqqfORIF4LA==", - "requires": { - "debug": "^4.1.1", - "get-symbol-from-current-process-h": "^1.0.2", - "node-addon-api": "^2.0.0", - "node-gyp-build": "^4.2.1" - } - }, - "ref-struct-di": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ref-struct-di/-/ref-struct-di-1.1.1.tgz", - "integrity": "sha512-2Xyn/0Qgz89VT+++WP0sTosdm9oeowLP23wRJYhG4BFdMUrLj3jhwHZNEytYNYgtPKLNTP3KJX4HEgBvM1/Y2g==", - "requires": { - "debug": "^3.1.0" - }, - "dependencies": { - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "requires": { - "ms": "^2.1.1" - } - } - } - }, - "ref-struct-napi": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ref-struct-napi/-/ref-struct-napi-1.1.1.tgz", - "integrity": "sha512-YgS5/d7+kT5zgtySYI5ieH0hREdv+DabgDvoczxsui0f9VLm0rrDcWEj4DHKehsH+tJnVMsLwuyctWgvdEcVRw==", - "requires": { - "debug": "2", - "ref-napi": "^1.4.2" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "ref-napi": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz", - "integrity": "sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw==", - "requires": { - "debug": "^3.1.0", - "node-addon-api": "^2.0.0", - "node-gyp-build": "^4.2.1" - }, - "dependencies": { - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "requires": { - "ms": "^2.1.1" - } - }, - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - } - } - } - } - }, - "type": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz", - "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==" - } - } -} diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json index b39ce2c17d..d21b62108b 100644 --- a/src/connector/nodejs/package.json +++ b/src/connector/nodejs/package.json @@ -1,6 +1,6 @@ { "name": "td2.0-connector", - "version": "2.0.6", + "version": "2.0.7", "description": "A Node.js connector for TDengine.", "main": "tdengine.js", "directories": { From 608e67826de2c23cddab295f5fc9c7498c4bcc7a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 20 May 2021 18:41:55 +0800 Subject: [PATCH 12/14] [TD-4272]: taosdemo specify columns. (#6173) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 70 ++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index ae7ffd16b9..34302ac2b0 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -189,6 +189,8 @@ typedef struct { } SColDes; /* Used by main to communicate with parse_opt. */ +static char *g_dupstr = NULL; + typedef struct SArguments_S { char * metaFile; uint32_t test_mode; @@ -526,7 +528,6 @@ static int taosRandom() #endif // ifdef Windows static void prompt(); -static void prompt2(); static int createDatabasesAndStables(); static void createChildTables(); static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet); @@ -679,8 +680,9 @@ static void printHelp() { "The data_type of columns, default: INT,INT,INT,INT."); printf("%s%s%s%s\n", indent, "-w", indent, "The length of data_type 'BINARY' or 'NCHAR'. Default is 16"); - printf("%s%s%s%s\n", indent, "-l", indent, - "The number of columns per record. Default is 4."); + printf("%s%s%s%s%d\n", indent, "-l", indent, + "The number of columns per record. Default is 4. Max values is ", + MAX_NUM_DATATYPE); printf("%s%s%s%s\n", indent, "-T", indent, "The number of threads. Default is 10."); printf("%s%s%s%s\n", indent, "-i", indent, @@ -724,7 +726,6 @@ static bool isStringNumber(char *input) } static void parse_args(int argc, char *argv[], SArguments *arguments) { - char **sptr; for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-f") == 0) { @@ -851,20 +852,31 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->database = argv[++i]; } else if (strcmp(argv[i], "-l") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-l need a number following!\n"); - exit(EXIT_FAILURE); + if (argc == i+1) { + if (!isStringNumber(argv[i+1])) { + printHelp(); + errorPrint("%s", "\n\t-l need a number following!\n"); + exit(EXIT_FAILURE); + } } arguments->num_of_CPR = atoi(argv[++i]); + + if (arguments->num_of_CPR > MAX_NUM_DATATYPE) { + printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_DATATYPE); + prompt(); + arguments->num_of_CPR = MAX_NUM_DATATYPE; + } + + for (int col = arguments->num_of_CPR; col < MAX_NUM_DATATYPE; col++) { + arguments->datatype[col] = NULL; + } + } else if (strcmp(argv[i], "-b") == 0) { if (argc == i+1) { printHelp(); errorPrint("%s", "\n\t-b need valid string following!\n"); exit(EXIT_FAILURE); } - sptr = arguments->datatype; ++i; if (strstr(argv[i], ",") == NULL) { // only one col @@ -881,12 +893,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrint("%s", "-b: Invalid data_type!\n"); exit(EXIT_FAILURE); } - sptr[0] = argv[i]; + arguments->datatype[0] = argv[i]; } else { // more than one col int index = 0; - char *dupstr = strdup(argv[i]); - char *running = dupstr; + g_dupstr = strdup(argv[i]); + char *running = g_dupstr; char *token = strsep(&running, ","); while(token != NULL) { if (strcasecmp(token, "INT") @@ -899,16 +911,15 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { && strcasecmp(token, "BINARY") && strcasecmp(token, "NCHAR")) { printHelp(); - free(dupstr); + free(g_dupstr); errorPrint("%s", "-b: Invalid data_type!\n"); exit(EXIT_FAILURE); } - sptr[index++] = token; + arguments->datatype[index++] = token; token = strsep(&running, ","); if (index >= MAX_NUM_DATATYPE) break; } - free(dupstr); - sptr[index] = NULL; + arguments->datatype[index] = NULL; } } else if (strcmp(argv[i], "-w") == 0) { if ((argc == i+1) || @@ -3449,7 +3460,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_args.interlace_rows, g_args.num_of_RPR); printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n", g_args.num_of_RPR); - prompt2(); + prompt(); g_args.interlace_rows = g_args.num_of_RPR; } } else if (!interlaceRows) { @@ -5759,19 +5770,13 @@ static void startMultiThreadInsertData(int threads, char* db_name, && ((superTblInfo->childTblOffset + superTblInfo->childTblLimit ) > superTblInfo->childTblCount)) { printf("WARNING: specified offset + limit > child table count!\n"); - if (!g_args.answer_yes) { - printf(" Press enter key to continue or Ctrl-C to stop\n\n"); - (void)getchar(); - } + prompt(); } if ((superTblInfo->childTblExists != TBL_NO_EXISTS) && (0 == superTblInfo->childTblLimit)) { printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n"); - if (!g_args.answer_yes) { - printf(" Press enter key to continue or Ctrl-C to stop\n\n"); - (void)getchar(); - } + prompt(); } superTblInfo->childTblName = (char*)calloc(1, @@ -6093,19 +6098,11 @@ static void *readMetric(void *sarg) { static void prompt() { if (!g_args.answer_yes) { - printf("Press enter key to continue\n\n"); + printf(" Press enter key to continue or Ctrl-C to stop\n\n"); (void)getchar(); } } -static void prompt2() -{ - if (!g_args.answer_yes) { - printf(" press Enter key to continue or Ctrl-C to stop."); - (void)getchar(); - } -} - static int insertTestProcess() { setupForAnsiEscape(); @@ -7325,6 +7322,9 @@ int main(int argc, char *argv[]) { } else { testCmdLine(); } + + if (g_dupstr) + free(g_dupstr); } return 0; From 4b016b75bb026f86bd5db56910409d6e2c17fa95 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 20 May 2021 19:08:42 +0800 Subject: [PATCH 13/14] [TD-4263]: update performance test script --- tests/perftest-scripts/perftest-query.sh | 2 +- tests/pytest/tools/taosdemoPerformance.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 9a16084683..c6d4687ed7 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -74,7 +74,7 @@ function runQueryPerfTest { python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT - python3 perfbenchmark/joinPerformance.py | tee -a $PERFORMANCE_TEST_REPORT + #python3 perfbenchmark/joinPerformance.py | tee -a $PERFORMANCE_TEST_REPORT } diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index b50180e2b3..a32cba167e 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -104,7 +104,7 @@ class taosdemoPerformace: return output def insertData(self): - os.system("taosdemo -f %s > taosdemoperf.txt" % self.generateJson()) + os.system("taosdemo -f %s > taosdemoperf.txt 2>&1" % self.generateJson()) self.createTableTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'") self.insertRecordsTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'") self.recordsPerSecond = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'") From d324edadfe03a70a77cf73523802e8caf5e1086e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 21 May 2021 06:24:17 +0800 Subject: [PATCH 14/14] Hotfix/sangshuduo/td 4136 taosdemo records morethan 32767 (#6177) * [TD-4136]: taosdemo max records per req < 32767 * [TD-4136]: taosdemo check insert rows not more than 32767. check insert rows for progressive. * fix with answer_yes. * add extra prompt. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 34302ac2b0..34f5c87c44 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -3494,6 +3494,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { __func__, __LINE__); goto PARSE_OVER; } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) { + printf("NOTICE: number of records per request value %"PRIu64" > %d\n\n", + numRecPerReq->valueint, MAX_RECORDS_PER_REQ); + printf(" number of records per request value will be set to %d\n\n", + MAX_RECORDS_PER_REQ); + prompt(); numRecPerReq->valueint = MAX_RECORDS_PER_REQ; } g_args.num_of_RPR = numRecPerReq->valueint; @@ -3979,10 +3984,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR); printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n", g_args.num_of_RPR); - if (!g_args.answer_yes) { - printf(" press Enter key to continue or Ctrl-C to stop."); - (void)getchar(); - } + prompt(); g_Dbs.db[i].superTbls[j].interlaceRows = g_args.num_of_RPR; } } else if (!interlaceRows) {