From 5eca4ebd278330a56ba2c9479a2d6107e9c02f39 Mon Sep 17 00:00:00 2001 From: lichuang Date: Fri, 18 Jun 2021 11:33:32 +0800 Subject: [PATCH 001/239] [TD-4352]refactor duplicate insert new table actions to function --- src/tsdb/src/tsdbMeta.c | 45 ++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 621be04e21..6cf4ef28d2 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -44,6 +44,7 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable); static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable); static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid); static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema); +static int tsdbInsertNewTableAction(STsdbRepo *pRepo, STable* pTable); // ------------------ OUTER FUNCTIONS ------------------ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { @@ -127,21 +128,16 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { tsdbUnlockRepoMeta(pRepo); // Write to memtable action - // TODO: refactor duplicate codes - int tlen = 0; - void *pBuf = NULL; if (newSuper || superChanged) { - tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, super); - pBuf = tsdbAllocBytes(pRepo, tlen); - if (pBuf == NULL) goto _err; - void *tBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, pBuf, super); - ASSERT(POINTER_DISTANCE(tBuf, pBuf) == tlen); + // add insert new super table action + if (tsdbInsertNewTableAction(pRepo, super) != 0) { + goto _err; + } + } + // add insert new table action + if (tsdbInsertNewTableAction(pRepo, table) != 0) { + goto _err; } - tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, table); - pBuf = tsdbAllocBytes(pRepo, tlen); - if (pBuf == NULL) goto _err; - void *tBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, pBuf, table); - ASSERT(POINTER_DISTANCE(tBuf, pBuf) == tlen); if (tsdbCheckCommit(pRepo) < 0) return -1; @@ -382,7 +378,7 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) { tdDestroyTSchemaBuilder(&schemaBuilder); } - // Chage in memory + // Change in memory if (pNewSchema != NULL) { // change super table tag schema TSDB_WLOCK_TABLE(pTable->pSuper); STSchema *pOldSchema = pTable->pSuper->tagSchema; @@ -425,6 +421,21 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) { } // ------------------ INTERNAL FUNCTIONS ------------------ +static int tsdbInsertNewTableAction(STsdbRepo *pRepo, STable* pTable) { + int tlen = 0; + void *pBuf = NULL; + + tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pTable); + pBuf = tsdbAllocBytes(pRepo, tlen); + if (pBuf == NULL) { + return -1; + } + void *tBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, pBuf, pTable); + ASSERT(POINTER_DISTANCE(tBuf, pBuf) == tlen); + + return 0; +} + STsdbMeta *tsdbNewMeta(STsdbCfg *pCfg) { STsdbMeta *pMeta = (STsdbMeta *)calloc(1, sizeof(*pMeta)); if (pMeta == NULL) { @@ -616,6 +627,7 @@ int16_t tsdbGetLastColumnsIndexByColId(STable* pTable, int16_t colId) { if (pTable->lastCols == NULL) { return -1; } + // TODO: use binary search instead for (int16_t i = 0; i < pTable->maxColNum; ++i) { if (pTable->lastCols[i].colId == colId) { return i; @@ -740,10 +752,7 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema, TSDB_WUNLOCK_TABLE(pCTable); if (insertAct) { - int tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pCTable); - void *buf = tsdbAllocBytes(pRepo, tlen); - ASSERT(buf != NULL); - tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pCTable); + ASSERT(tsdbInsertNewTableAction(pRepo, pCTable) == 0); } } From 5f5a802bb939c31c21cd800156adbff81600de33 Mon Sep 17 00:00:00 2001 From: lichuang Date: Fri, 18 Jun 2021 11:35:06 +0800 Subject: [PATCH 002/239] [TD-4352]update meta maxCols and maxRowBytes after remove table --- src/tsdb/src/tsdbMeta.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 6cf4ef28d2..9d5df9fb32 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -1043,6 +1043,8 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro maxRowBytes = MAX(maxRowBytes, schemaTLen(pSchema)); } } + pMeta->maxCols = maxCols; + pMeta->maxRowBytes = maxRowBytes; } if (lock) tsdbUnlockRepoMeta(pRepo); From b50154343ddc1cc144ffb507151a9d8929da11f7 Mon Sep 17 00:00:00 2001 From: lichuang Date: Tue, 22 Jun 2021 16:41:11 +0800 Subject: [PATCH 003/239] [TD-4352]compact tsdb meta data implementation --- src/tsdb/inc/tsdbFile.h | 5 +- src/tsdb/src/tsdbCommit.c | 107 ++++++++++++++++++++++++++++++++++++-- src/tsdb/src/tsdbFS.c | 2 +- src/tsdb/src/tsdbFile.c | 31 ++++++++--- src/tsdb/src/tsdbSync.c | 2 +- 5 files changed, 132 insertions(+), 15 deletions(-) diff --git a/src/tsdb/inc/tsdbFile.h b/src/tsdb/inc/tsdbFile.h index dcb5eadfab..9ebf1253cb 100644 --- a/src/tsdb/inc/tsdbFile.h +++ b/src/tsdb/inc/tsdbFile.h @@ -38,7 +38,7 @@ #define TSDB_FILE_IS_OK(tf) (TSDB_FILE_STATE(tf) == TSDB_FILE_STATE_OK) #define TSDB_FILE_IS_BAD(tf) (TSDB_FILE_STATE(tf) == TSDB_FILE_STATE_BAD) -typedef enum { TSDB_FILE_HEAD = 0, TSDB_FILE_DATA, TSDB_FILE_LAST, TSDB_FILE_MAX, TSDB_FILE_META } TSDB_FILE_T; +typedef enum { TSDB_FILE_HEAD = 0, TSDB_FILE_DATA, TSDB_FILE_LAST, TSDB_FILE_MAX, TSDB_FILE_META, TSDB_FILE_META_TMP} TSDB_FILE_T; // =============== SMFile typedef struct { @@ -56,7 +56,8 @@ typedef struct { uint8_t state; } SMFile; -void tsdbInitMFile(SMFile* pMFile, SDiskID did, int vid, uint32_t ver); +void tsdbInitMFile(SMFile* pMFile, SDiskID did, int vid, uint32_t ver, bool tmp); +void tsdbRenameOrDeleleTempMetaFile(SMFile* pMFile, SDiskID did, int vid, uint32_t ver, int code); void tsdbInitMFileEx(SMFile* pMFile, const SMFile* pOMFile); int tsdbEncodeSMFile(void** buf, SMFile* pMFile); void* tsdbDecodeSMFile(void* buf, SMFile* pMFile); diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 82cc6f07f7..6c42311abc 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -55,8 +55,9 @@ typedef struct { #define TSDB_COMMIT_TXN_VERSION(ch) FS_TXN_VERSION(REPO_FS(TSDB_COMMIT_REPO(ch))) static int tsdbCommitMeta(STsdbRepo *pRepo); -static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen); +static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen, bool updateMeta); static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid); +static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile); static int tsdbCommitTSData(STsdbRepo *pRepo); static void tsdbStartCommit(STsdbRepo *pRepo); static void tsdbEndCommit(STsdbRepo *pRepo, int eno); @@ -283,7 +284,7 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) { // Create a new meta file did.level = TFS_PRIMARY_LEVEL; did.id = TFS_PRIMARY_ID; - tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo))); + tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)), false); if (tsdbCreateMFile(&mf, true) < 0) { tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno)); @@ -305,7 +306,7 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) { pAct = (SActObj *)pNode->data; if (pAct->act == TSDB_UPDATE_META) { pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj)); - if (tsdbUpdateMetaRecord(pfs, &mf, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) { + if (tsdbUpdateMetaRecord(pfs, &mf, pAct->uid, (void *)(pCont->cont), pCont->len, true) < 0) { tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, tstrerror(terrno)); tsdbCloseMFile(&mf); @@ -338,6 +339,10 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) { tsdbCloseMFile(&mf); tsdbUpdateMFile(pfs, &mf); + if (tsdbCompactMetaFile(pRepo, pfs, &mf) < 0) { + tsdbError("compact meta file error"); + } + return 0; } @@ -375,7 +380,7 @@ void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn) { pRtn->minFid, pRtn->midFid, pRtn->maxFid); } -static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen) { +static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen, bool updateMeta) { char buf[64] = "\0"; void * pBuf = buf; SKVRecord rInfo; @@ -401,6 +406,11 @@ static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void } tsdbUpdateMFileMagic(pMFile, POINTER_SHIFT(cont, contLen - sizeof(TSCKSUM))); + if (!updateMeta) { + pMFile->info.nRecords++; + return 0; + } + SKVRecord *pRecord = taosHashGet(pfs->metaCache, (void *)&uid, sizeof(uid)); if (pRecord != NULL) { pMFile->info.tombSize += (pRecord->size + sizeof(SKVRecord)); @@ -442,6 +452,95 @@ static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid) { return 0; } +static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { + float delPercent = pMFile->info.nDels * 1.0 / pMFile->info.nRecords; + float tombPercent = pMFile->info.tombSize * 1.0 / pMFile->info.size; + + if (delPercent < 0.33 && tombPercent < 0.33) { + return 0; + } + + tsdbInfo("begin compact tsdb meta file, nDels:%" PRId64 ",nRecords:%" PRId64 ",tombSize:%" PRId64 ",size:%" PRId64, + pMFile->info.nDels,pMFile->info.nRecords,pMFile->info.tombSize,pMFile->info.size); + + SMFile mf; + SDiskID did; + + // first create tmp meta file + did.level = TFS_PRIMARY_LEVEL; + did.id = TFS_PRIMARY_ID; + tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)), true); + + if (tsdbCreateMFile(&mf, true) < 0) { + tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + tsdbInfo("vgId:%d meta file %s is created to compact meta data", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf)); + + // second iterator metaCache + int code = -1; + int64_t maxBufSize = 1024; + SKVRecord *pRecord; + void *pBuf = NULL; + + pBuf = malloc((size_t)maxBufSize); + if (pBuf == NULL) { + goto _err; + } + + pRecord = taosHashIterate(pfs->metaCache, NULL); + while (pRecord) { + if (tsdbSeekMFile(pMFile, pRecord->offset + sizeof(SKVRecord), SEEK_SET) < 0) { + tsdbError("vgId:%d failed to seek file %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), + tstrerror(terrno)); + break; + } + if (pRecord->size > maxBufSize) { + maxBufSize = pRecord->size; + void* tmp = realloc(pBuf, maxBufSize); + if (tmp == NULL) { + break; + } + pBuf = tmp; + } + int nread = (int)tsdbReadMFile(pMFile, pBuf, pRecord->size); + if (nread < 0) { + tsdbError("vgId:%d failed to read file %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), + tstrerror(terrno)); + break; + } + + if (nread < pRecord->size) { + tsdbError("vgId:%d failed to read file %s since file corrupted, expected read:%" PRId64 " actual read:%d", + REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), pRecord->size, nread); + break; + } + + if (tsdbUpdateMetaRecord(pfs, &mf, pRecord->uid, pBuf, pRecord->size, false) < 0) { + tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pRecord->uid, + tstrerror(terrno)); + break; + } + + pRecord = taosHashIterate(pfs->metaCache, pRecord); + } + code = 0; + +_err: + TSDB_FILE_FSYNC(&mf); + tsdbCloseMFile(&mf); + + tsdbRenameOrDeleleTempMetaFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)), code); + if (code == 0) { + tsdbUpdateMFile(pfs, &mf); + } + + tfree(pBuf); + tsdbInfo("end compact tsdb meta file, code:%d", code); + return code; +} + // =================== Commit Time-Series Data static int tsdbCommitTSData(STsdbRepo *pRepo) { SMemTable *pMem = pRepo->imem; diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index 54372ae8c2..35e9998323 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -272,7 +272,7 @@ static int tsdbCreateMeta(STsdbRepo *pRepo) { // Create a new meta file did.level = TFS_PRIMARY_LEVEL; did.id = TFS_PRIMARY_ID; - tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo))); + tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)), false); if (tsdbCreateMFile(&mf, true) < 0) { tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno)); diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index 50fa393e9f..b1ff79bfef 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -16,11 +16,12 @@ #include "tsdbint.h" static const char *TSDB_FNAME_SUFFIX[] = { - "head", // TSDB_FILE_HEAD - "data", // TSDB_FILE_DATA - "last", // TSDB_FILE_LAST - "", // TSDB_FILE_MAX - "meta" // TSDB_FILE_META + "head", // TSDB_FILE_HEAD + "data", // TSDB_FILE_DATA + "last", // TSDB_FILE_LAST + "", // TSDB_FILE_MAX + "meta" // TSDB_FILE_META + "meta.tmp" // TSDB_FILE_META_TMP }; static void tsdbGetFilename(int vid, int fid, uint32_t ver, TSDB_FILE_T ftype, char *fname); @@ -30,7 +31,7 @@ static void *tsdbDecodeDFInfo(void *buf, SDFInfo *pInfo); static int tsdbRollBackDFile(SDFile *pDFile); // ============== SMFile -void tsdbInitMFile(SMFile *pMFile, SDiskID did, int vid, uint32_t ver) { +void tsdbInitMFile(SMFile *pMFile, SDiskID did, int vid, uint32_t ver, bool tmp) { char fname[TSDB_FILENAME_LEN]; TSDB_FILE_SET_STATE(pMFile, TSDB_FILE_STATE_OK); @@ -38,10 +39,26 @@ void tsdbInitMFile(SMFile *pMFile, SDiskID did, int vid, uint32_t ver) { memset(&(pMFile->info), 0, sizeof(pMFile->info)); pMFile->info.magic = TSDB_FILE_INIT_MAGIC; - tsdbGetFilename(vid, 0, ver, TSDB_FILE_META, fname); + tsdbGetFilename(vid, 0, ver, tmp ? TSDB_FILE_META_TMP : TSDB_FILE_META, fname); tfsInitFile(TSDB_FILE_F(pMFile), did.level, did.id, fname); } +void tsdbRenameOrDeleleTempMetaFile(SMFile* pMFile, SDiskID did, int vid, uint32_t ver, int code) { + char mfname[TSDB_FILENAME_LEN] = {'\0'}; + char tfname[TSDB_FILENAME_LEN] = {'\0'}; + + tsdbGetFilename(vid, 0, ver, TSDB_FILE_META_TMP, tfname); + + if (code != 0) { + remove(tfname); + return; + } + + tsdbGetFilename(vid, 0, ver, TSDB_FILE_META, mfname); + + (void)taosRename(tfname, mfname); +} + void tsdbInitMFileEx(SMFile *pMFile, const SMFile *pOMFile) { *pMFile = *pOMFile; TSDB_FILE_SET_CLOSED(pMFile); diff --git a/src/tsdb/src/tsdbSync.c b/src/tsdb/src/tsdbSync.c index edcb84d091..f06943bc37 100644 --- a/src/tsdb/src/tsdbSync.c +++ b/src/tsdb/src/tsdbSync.c @@ -209,7 +209,7 @@ static int32_t tsdbSyncRecvMeta(SSyncH *pSynch) { // Recv from remote SMFile mf; SDiskID did = {.level = TFS_PRIMARY_LEVEL, .id = TFS_PRIMARY_ID}; - tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo))); + tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)), false); if (tsdbCreateMFile(&mf, false) < 0) { tsdbError("vgId:%d, failed to create file while recv metafile since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; From ea75e443ed80905718e01a61b091f4fc6589f944 Mon Sep 17 00:00:00 2001 From: lichuang Date: Tue, 22 Jun 2021 16:43:57 +0800 Subject: [PATCH 004/239] [TD-4352]compact tsdb meta data implementation --- src/tsdb/src/tsdbCommit.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 6c42311abc..a9a2b2ded3 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -537,7 +537,9 @@ _err: } tfree(pBuf); - tsdbInfo("end compact tsdb meta file, code:%d", code); + + tsdbInfo("end compact tsdb meta file, code:%d, nDels:%" PRId64 ",nRecords:%" PRId64 ",tombSize:%" PRId64 ",size:%" PRId64, + code, mf.info.nDels,mf.info.nRecords,mf.info.tombSize,mf.info.size); return code; } From dd5642260737f250ea472ab1ed2ecf7c23aeadff Mon Sep 17 00:00:00 2001 From: lichuang Date: Tue, 22 Jun 2021 16:47:13 +0800 Subject: [PATCH 005/239] [TD-4352]compact tsdb meta data implementation --- src/tsdb/src/tsdbCommit.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index a9a2b2ded3..68d0fb9ae1 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -538,8 +538,10 @@ _err: tfree(pBuf); - tsdbInfo("end compact tsdb meta file, code:%d, nDels:%" PRId64 ",nRecords:%" PRId64 ",tombSize:%" PRId64 ",size:%" PRId64, - code, mf.info.nDels,mf.info.nRecords,mf.info.tombSize,mf.info.size); + ASSERT(mf.info.nDels == 0); + ASSERT(mf.info.tombSize == 0); + tsdbInfo("end compact tsdb meta file,code:%d,nRecords:%" PRId64 ",size:%" PRId64, + code,mf.info.nRecords,mf.info.size); return code; } From 9b6edbe7e75814325c8482cb3264677cdc383ff4 Mon Sep 17 00:00:00 2001 From: lichuang Date: Tue, 22 Jun 2021 21:03:31 +0800 Subject: [PATCH 006/239] [TD-4352]fix compile error --- src/tsdb/src/tsdbCommit.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 68d0fb9ae1..bb08c159b8 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -453,8 +453,8 @@ static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid) { } static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { - float delPercent = pMFile->info.nDels * 1.0 / pMFile->info.nRecords; - float tombPercent = pMFile->info.tombSize * 1.0 / pMFile->info.size; + float delPercent = (float)(pMFile->info.nDels) / (float)(pMFile->info.nRecords); + float tombPercent = (float)(pMFile->info.tombSize) / (float)(pMFile->info.size); if (delPercent < 0.33 && tombPercent < 0.33) { return 0; @@ -540,6 +540,7 @@ _err: ASSERT(mf.info.nDels == 0); ASSERT(mf.info.tombSize == 0); + tsdbInfo("end compact tsdb meta file,code:%d,nRecords:%" PRId64 ",size:%" PRId64, code,mf.info.nRecords,mf.info.size); return code; From 64ade71d7504c0e680ab8637919a7e353dd5117d Mon Sep 17 00:00:00 2001 From: lichuang Date: Tue, 22 Jun 2021 21:11:48 +0800 Subject: [PATCH 007/239] [TD-4352]fix compile error --- src/tsdb/src/tsdbCommit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index bb08c159b8..9ff4f673da 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -517,7 +517,7 @@ static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { break; } - if (tsdbUpdateMetaRecord(pfs, &mf, pRecord->uid, pBuf, pRecord->size, false) < 0) { + if (tsdbUpdateMetaRecord(pfs, &mf, pRecord->uid, pBuf, (int)pRecord->size, false) < 0) { tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pRecord->uid, tstrerror(terrno)); break; From 3237ee5c5b0a6fd3587d6e2e09b87e2bc18fcca8 Mon Sep 17 00:00:00 2001 From: lichuang Date: Tue, 22 Jun 2021 22:30:19 +0800 Subject: [PATCH 008/239] [TD-4352]fix compile error --- src/tsdb/src/tsdbCommit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 9ff4f673da..df0f8b4d78 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -498,7 +498,7 @@ static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { } if (pRecord->size > maxBufSize) { maxBufSize = pRecord->size; - void* tmp = realloc(pBuf, maxBufSize); + void* tmp = realloc(pBuf, (size_t)maxBufSize); if (tmp == NULL) { break; } From eaf84f0fb7db5ba379c298e4c4dead8ae5d81312 Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 14 Jul 2021 18:14:01 +0800 Subject: [PATCH 009/239] [TD-4840]add testcase of compacting sdb --- tests/pytest/tsdb/insertDataDb1.json | 87 +++++++++++ tests/pytest/tsdb/insertDataDb1Replica2.json | 87 +++++++++++ tests/pytest/tsdb/insertDataDb2.json | 86 +++++++++++ tests/pytest/tsdb/insertDataDb2Newstab.json | 86 +++++++++++ .../tsdb/insertDataDb2NewstabReplica2.json | 86 +++++++++++ tests/pytest/tsdb/insertDataDb2Replica2.json | 86 +++++++++++ tests/pytest/tsdb/sdbComp.py | 121 ++++++++++++++++ tests/pytest/tsdb/sdbCompCluster.py | 135 +++++++++++++++++ tests/pytest/tsdb/sdbCompClusterReplica2.py | 136 ++++++++++++++++++ 9 files changed, 910 insertions(+) create mode 100644 tests/pytest/tsdb/insertDataDb1.json create mode 100644 tests/pytest/tsdb/insertDataDb1Replica2.json create mode 100644 tests/pytest/tsdb/insertDataDb2.json create mode 100644 tests/pytest/tsdb/insertDataDb2Newstab.json create mode 100644 tests/pytest/tsdb/insertDataDb2NewstabReplica2.json create mode 100644 tests/pytest/tsdb/insertDataDb2Replica2.json create mode 100644 tests/pytest/tsdb/sdbComp.py create mode 100644 tests/pytest/tsdb/sdbCompCluster.py create mode 100644 tests/pytest/tsdb/sdbCompClusterReplica2.py diff --git a/tests/pytest/tsdb/insertDataDb1.json b/tests/pytest/tsdb/insertDataDb1.json new file mode 100644 index 0000000000..f01cc35a1b --- /dev/null +++ b/tests/pytest/tsdb/insertDataDb1.json @@ -0,0 +1,87 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db1", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100000, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1000, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100000, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 1000, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 200, + "childtable_limit": 0, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tsdb/insertDataDb1Replica2.json b/tests/pytest/tsdb/insertDataDb1Replica2.json new file mode 100644 index 0000000000..fec38bcdec --- /dev/null +++ b/tests/pytest/tsdb/insertDataDb1Replica2.json @@ -0,0 +1,87 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db1", + "drop": "yes", + "replica": 2, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 200, + "childtable_limit": 0, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tsdb/insertDataDb2.json b/tests/pytest/tsdb/insertDataDb2.json new file mode 100644 index 0000000000..89536418a2 --- /dev/null +++ b/tests/pytest/tsdb/insertDataDb2.json @@ -0,0 +1,86 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 200000, + "childtable_prefix": "stb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 1000, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 2000, + "childtable_limit": 0, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 500000, + "childtable_prefix": "stb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 1000, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 5, + "childtable_limit": 0, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tsdb/insertDataDb2Newstab.json b/tests/pytest/tsdb/insertDataDb2Newstab.json new file mode 100644 index 0000000000..f9d0713385 --- /dev/null +++ b/tests/pytest/tsdb/insertDataDb2Newstab.json @@ -0,0 +1,86 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db2", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 0, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"yes", + "childtable_count": 1, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json b/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json new file mode 100644 index 0000000000..e052f2850f --- /dev/null +++ b/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json @@ -0,0 +1,86 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db2", + "drop": "no", + "replica": 2, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 0, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"yes", + "childtable_count": 1, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tsdb/insertDataDb2Replica2.json b/tests/pytest/tsdb/insertDataDb2Replica2.json new file mode 100644 index 0000000000..121f70956a --- /dev/null +++ b/tests/pytest/tsdb/insertDataDb2Replica2.json @@ -0,0 +1,86 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db2", + "drop": "yes", + "replica": 2, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 2000, + "childtable_prefix": "stb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 2000, + "childtable_limit": 0, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 2, + "childtable_prefix": "stb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 5, + "childtable_limit": 0, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tsdb/sdbComp.py b/tests/pytest/tsdb/sdbComp.py new file mode 100644 index 0000000000..6ee7a09fdb --- /dev/null +++ b/tests/pytest/tsdb/sdbComp.py @@ -0,0 +1,121 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from distutils.log import debug +import sys +import os +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + global selfPath + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + + # set path para + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + + binPath = buildPath+ "/build/bin/" + testPath = selfPath+ "/../../../" + walFilePath = testPath + "/sim/dnode1/data/mnode_bak/wal/" + + #new db and insert data + tdSql.execute("drop database if exists db2") + os.system("%staosdemo -f tsdb/insertDataDb1.json -y " % binPath) + tdSql.execute("drop database if exists db1") + os.system("%staosdemo -f tsdb/insertDataDb2.json -y " % binPath) + tdSql.execute("drop table if exists db2.stb0") + os.system("%staosdemo -f wal/insertDataDb2Newstab.json -y " % binPath) + # query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) + # print(query_pid1) + tdSql.execute("use db2") + tdSql.execute("drop table if exists stb1_0") + tdSql.execute("drop table if exists stb1_1") + tdSql.execute("insert into stb0_0 values(1614218412000,8637,78.861045,'R','bf3')(1614218422000,8637,98.861045,'R','bf3')") + tdSql.execute("alter table db2.stb0 add column col4 int") + tdSql.execute("alter table db2.stb0 drop column col2") + tdSql.execute("alter table db2.stb0 add tag t3 int;") + tdSql.execute("alter table db2.stb0 drop tag t1") + tdSql.execute("create table if not exists stb2_0 (ts timestamp, col0 int, col1 float) ") + tdSql.execute("insert into stb2_0 values(1614218412000,8637,78.861045)") + tdSql.execute("alter table stb2_0 add column col2 binary(4)") + tdSql.execute("alter table stb2_0 drop column col1") + tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") + + # stop taosd and compact wal file + tdDnodes.stop(1) + sleep(10) + # assert os.path.exists(walFilePath) , "%s is not generated, compact didn't take effect " % walFilePath + + # use new wal file to start taosd + tdDnodes.start(1) + sleep(5) + tdSql.execute("reset query cache") + query_pid2 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) + print(query_pid2) + + # verify that the data is correct + tdSql.execute("use db2") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1) + tdSql.query("select count (tbname) from stb1") + tdSql.checkRows(0) + tdSql.query("select count(*) from stb0_0") + tdSql.checkData(0, 0, 2) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 2) + tdSql.query("select count(*) from stb2_0") + tdSql.checkData(0, 0, 2) + + # delete useless file + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf wal/%s.sql" % testcaseFilename ) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tsdb/sdbCompCluster.py b/tests/pytest/tsdb/sdbCompCluster.py new file mode 100644 index 0000000000..4fa84817ec --- /dev/null +++ b/tests/pytest/tsdb/sdbCompCluster.py @@ -0,0 +1,135 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import os +import sys +sys.path.insert(0, os.getcwd()) +from util.log import * +from util.sql import * +from util.dnodes import * +import taos +import threading + + +class TwoClients: + def initConnection(self): + self.host = "chenhaoran02" + self.user = "root" + self.password = "taosdata" + self.config = "/etc/taos/" + self.port =6030 + self.rowNum = 10 + self.ts = 1537146000000 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + walFilePath = "/var/lib/taos/mnode_bak/wal/" + + # new taos client + conn1 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config ) + print(conn1) + cur1 = conn1.cursor() + tdSql.init(cur1, True) + + # new db and insert data + os.system("rm -rf /var/lib/taos/mnode_bak/") + os.system("rm -rf /var/lib/taos/mnode_temp/") + tdSql.execute("drop database if exists db2") + os.system("%staosdemo -f wal/insertDataDb1.json -y " % binPath) + tdSql.execute("drop database if exists db1") + os.system("%staosdemo -f wal/insertDataDb2.json -y " % binPath) + tdSql.execute("drop table if exists db2.stb0") + os.system("%staosdemo -f wal/insertDataDb2Newstab.json -y " % binPath) + query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) + print(query_pid1) + tdSql.execute("use db2") + tdSql.execute("drop table if exists stb1_0") + tdSql.execute("drop table if exists stb1_1") + tdSql.execute("insert into stb0_0 values(1614218412000,8637,78.861045,'R','bf3')(1614218422000,8637,98.861045,'R','bf3')") + tdSql.execute("alter table db2.stb0 add column col4 int") + tdSql.execute("alter table db2.stb0 drop column col2") + tdSql.execute("alter table db2.stb0 add tag t3 int") + tdSql.execute("alter table db2.stb0 drop tag t1") + tdSql.execute("create table if not exists stb2_0 (ts timestamp, col0 int, col1 float) ") + tdSql.execute("insert into stb2_0 values(1614218412000,8637,78.861045)") + tdSql.execute("alter table stb2_0 add column col2 binary(4)") + tdSql.execute("alter table stb2_0 drop column col1") + tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") + + # stop taosd and compact wal file + os.system("ps -ef |grep taosd |grep -v 'grep' |awk '{print $2}'|xargs kill -2") + sleep(10) + os.system("nohup taosd --compact-mnode-wal -c /etc/taos & ") + sleep(10) + os.system("nohup /usr/bin/taosd > /dev/null 2>&1 &") + sleep(4) + tdSql.execute("reset query cache") + query_pid2 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) + print(query_pid2) + assert os.path.exists(walFilePath) , "%s is not generated " % walFilePath + + # new taos connecting to server + conn2 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config ) + print(conn2) + cur2 = conn2.cursor() + tdSql.init(cur2, True) + + # use new wal file to start up tasod + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0]=="db2": + assert tdSql.queryResult[i][4]==1 , "replica is wrong" + tdSql.execute("use db2") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1) + tdSql.query("select count (tbname) from stb1") + tdSql.checkRows(0) + tdSql.query("select count(*) from stb0_0") + tdSql.checkData(0, 0, 2) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 2) + tdSql.query("select count(*) from stb2_0") + tdSql.checkData(0, 0, 2) + tdSql.query("select * from stb2_0") + tdSql.checkData(1, 2, 'R') + + # delete useless file + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf wal/%s.sql" % testcaseFilename ) + +clients = TwoClients() +clients.initConnection() +# clients.getBuildPath() +clients.run() \ No newline at end of file diff --git a/tests/pytest/tsdb/sdbCompClusterReplica2.py b/tests/pytest/tsdb/sdbCompClusterReplica2.py new file mode 100644 index 0000000000..117da8ca2f --- /dev/null +++ b/tests/pytest/tsdb/sdbCompClusterReplica2.py @@ -0,0 +1,136 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import os +import sys +sys.path.insert(0, os.getcwd()) +from util.log import * +from util.sql import * +from util.dnodes import * +import taos +import threading + + +class TwoClients: + def initConnection(self): + self.host = "chenhaoran02" + self.user = "root" + self.password = "taosdata" + self.config = "/etc/taos/" + self.port =6030 + self.rowNum = 10 + self.ts = 1537146000000 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + walFilePath = "/var/lib/taos/mnode_bak/wal/" + + # new taos client + conn1 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config ) + print(conn1) + cur1 = conn1.cursor() + tdSql.init(cur1, True) + + # new db and insert data + os.system("rm -rf /var/lib/taos/mnode_bak/") + os.system("rm -rf /var/lib/taos/mnode_temp/") + tdSql.execute("drop database if exists db2") + os.system("%staosdemo -f wal/insertDataDb1Replica2.json -y " % binPath) + tdSql.execute("drop database if exists db1") + os.system("%staosdemo -f wal/insertDataDb2Replica2.json -y " % binPath) + tdSql.execute("drop table if exists db2.stb0") + os.system("%staosdemo -f wal/insertDataDb2NewstabReplica2.json -y " % binPath) + query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) + print(query_pid1) + tdSql.execute("use db2") + tdSql.execute("drop table if exists stb1_0") + tdSql.execute("drop table if exists stb1_1") + tdSql.execute("insert into stb0_0 values(1614218412000,8637,78.861045,'R','bf3')(1614218422000,8637,98.861045,'R','bf3')") + tdSql.execute("alter table db2.stb0 add column col4 int") + tdSql.execute("alter table db2.stb0 drop column col2") + tdSql.execute("alter table db2.stb0 add tag t3 int") + tdSql.execute("alter table db2.stb0 drop tag t1") + tdSql.execute("create table if not exists stb2_0 (ts timestamp, col0 int, col1 float) ") + tdSql.execute("insert into stb2_0 values(1614218412000,8637,78.861045)") + tdSql.execute("alter table stb2_0 add column col2 binary(4)") + tdSql.execute("alter table stb2_0 drop column col1") + tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") + + + # stop taosd and compact wal file + os.system("ps -ef |grep taosd |grep -v 'grep' |awk '{print $2}'|xargs kill -2") + sleep(10) + os.system("nohup taosd --compact-mnode-wal -c /etc/taos & ") + sleep(10) + os.system("nohup /usr/bin/taosd > /dev/null 2>&1 &") + sleep(4) + tdSql.execute("reset query cache") + query_pid2 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) + print(query_pid2) + assert os.path.exists(walFilePath) , "%s is not generated " % walFilePath + + # new taos connecting to server + conn2 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config ) + print(conn2) + cur2 = conn2.cursor() + tdSql.init(cur2, True) + + # use new wal file to start up tasod + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0]=="db2": + assert tdSql.queryResult[i][4]==2 , "replica is wrong" + tdSql.execute("use db2") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1) + tdSql.query("select count (tbname) from stb1") + tdSql.checkRows(0) + tdSql.query("select count(*) from stb0_0") + tdSql.checkData(0, 0, 2) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 2) + tdSql.query("select count(*) from stb2_0") + tdSql.checkData(0, 0, 2) + tdSql.query("select * from stb2_0") + tdSql.checkData(1, 2, 'R') + + # delete useless file + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf wal/%s.sql" % testcaseFilename ) + +clients = TwoClients() +clients.initConnection() +# clients.getBuildPath() +clients.run() \ No newline at end of file From 2d0bca4290c9329508cef5bdefcbd99461f2cb59 Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 14 Jul 2021 18:36:59 +0800 Subject: [PATCH 010/239] [TD-4840]add testcase of compacting sdb --- tests/pytest/tsdb/insertDataDb2.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/tsdb/insertDataDb2.json b/tests/pytest/tsdb/insertDataDb2.json index 89536418a2..4cde724a82 100644 --- a/tests/pytest/tsdb/insertDataDb2.json +++ b/tests/pytest/tsdb/insertDataDb2.json @@ -38,7 +38,7 @@ "childtable_count": 200000, "childtable_prefix": "stb0_", "auto_create_table": "no", - "batch_create_tbl_num": 1000, + "batch_create_tbl_num": 100, "data_source": "rand", "insert_mode": "taosc", "insert_rows": 2000, From 543b17cb84d7689be334b8173b9a06f10ad99fbd Mon Sep 17 00:00:00 2001 From: lichuang Date: Thu, 15 Jul 2021 10:17:58 +0800 Subject: [PATCH 011/239] [TD-4352]fix tsdb meta compact bug --- src/tsdb/inc/tsdbFile.h | 1 - src/tsdb/src/tsdbCommit.c | 16 +++++++++++++++- src/tsdb/src/tsdbFile.c | 20 ++------------------ 3 files changed, 17 insertions(+), 20 deletions(-) diff --git a/src/tsdb/inc/tsdbFile.h b/src/tsdb/inc/tsdbFile.h index 9ebf1253cb..a2be99e279 100644 --- a/src/tsdb/inc/tsdbFile.h +++ b/src/tsdb/inc/tsdbFile.h @@ -57,7 +57,6 @@ typedef struct { } SMFile; void tsdbInitMFile(SMFile* pMFile, SDiskID did, int vid, uint32_t ver, bool tmp); -void tsdbRenameOrDeleleTempMetaFile(SMFile* pMFile, SDiskID did, int vid, uint32_t ver, int code); void tsdbInitMFileEx(SMFile* pMFile, const SMFile* pOMFile); int tsdbEncodeSMFile(void** buf, SMFile* pMFile); void* tsdbDecodeSMFile(void* buf, SMFile* pMFile); diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index df0f8b4d78..98aef13948 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -460,6 +460,11 @@ static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { return 0; } + if (tsdbOpenMFile(pMFile, O_RDONLY) < 0) { + tsdbError("open meta file %s compact fail", pMFile->f.rname); + return -1; + } + tsdbInfo("begin compact tsdb meta file, nDels:%" PRId64 ",nRecords:%" PRId64 ",tombSize:%" PRId64 ",size:%" PRId64, pMFile->info.nDels,pMFile->info.nRecords,pMFile->info.tombSize,pMFile->info.size); @@ -530,10 +535,19 @@ static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { _err: TSDB_FILE_FSYNC(&mf); tsdbCloseMFile(&mf); + tsdbCloseMFile(pMFile); - tsdbRenameOrDeleleTempMetaFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)), code); if (code == 0) { + // rename meta.tmp -> meta + taosRename(mf.f.aname,pMFile->f.aname); + tstrncpy(mf.f.aname, pMFile->f.aname, TSDB_FILENAME_LEN); + tstrncpy(mf.f.rname, pMFile->f.rname, TSDB_FILENAME_LEN); + // update current meta file info + pfs->nstatus->pmf = NULL; tsdbUpdateMFile(pfs, &mf); + } else { + // remove meta.tmp file + remove(mf.f.aname); } tfree(pBuf); diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index b1ff79bfef..cc6fbb632f 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -20,8 +20,8 @@ static const char *TSDB_FNAME_SUFFIX[] = { "data", // TSDB_FILE_DATA "last", // TSDB_FILE_LAST "", // TSDB_FILE_MAX - "meta" // TSDB_FILE_META - "meta.tmp" // TSDB_FILE_META_TMP + "meta", // TSDB_FILE_META + "meta.tmp", // TSDB_FILE_META_TMP }; static void tsdbGetFilename(int vid, int fid, uint32_t ver, TSDB_FILE_T ftype, char *fname); @@ -43,22 +43,6 @@ void tsdbInitMFile(SMFile *pMFile, SDiskID did, int vid, uint32_t ver, bool tmp) tfsInitFile(TSDB_FILE_F(pMFile), did.level, did.id, fname); } -void tsdbRenameOrDeleleTempMetaFile(SMFile* pMFile, SDiskID did, int vid, uint32_t ver, int code) { - char mfname[TSDB_FILENAME_LEN] = {'\0'}; - char tfname[TSDB_FILENAME_LEN] = {'\0'}; - - tsdbGetFilename(vid, 0, ver, TSDB_FILE_META_TMP, tfname); - - if (code != 0) { - remove(tfname); - return; - } - - tsdbGetFilename(vid, 0, ver, TSDB_FILE_META, mfname); - - (void)taosRename(tfname, mfname); -} - void tsdbInitMFileEx(SMFile *pMFile, const SMFile *pOMFile) { *pMFile = *pOMFile; TSDB_FILE_SET_CLOSED(pMFile); From 674992e016e9e2149ad0001d13d4c5f8868f5087 Mon Sep 17 00:00:00 2001 From: lichuang Date: Tue, 27 Jul 2021 11:26:40 +0800 Subject: [PATCH 012/239] [TD-4352]fix code bug --- src/tsdb/src/tsdbCommit.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 98aef13948..289bb518e9 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -499,13 +499,13 @@ static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { if (tsdbSeekMFile(pMFile, pRecord->offset + sizeof(SKVRecord), SEEK_SET) < 0) { tsdbError("vgId:%d failed to seek file %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), tstrerror(terrno)); - break; + goto _err; } if (pRecord->size > maxBufSize) { maxBufSize = pRecord->size; void* tmp = realloc(pBuf, (size_t)maxBufSize); if (tmp == NULL) { - break; + goto _err; } pBuf = tmp; } @@ -513,19 +513,19 @@ static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { if (nread < 0) { tsdbError("vgId:%d failed to read file %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), tstrerror(terrno)); - break; + goto _err; } if (nread < pRecord->size) { tsdbError("vgId:%d failed to read file %s since file corrupted, expected read:%" PRId64 " actual read:%d", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), pRecord->size, nread); - break; + goto _err; } if (tsdbUpdateMetaRecord(pfs, &mf, pRecord->uid, pBuf, (int)pRecord->size, false) < 0) { tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pRecord->uid, tstrerror(terrno)); - break; + goto _err; } pRecord = taosHashIterate(pfs->metaCache, pRecord); From 5602fef8f62dfa059dee00c9cda7751e4d8a2039 Mon Sep 17 00:00:00 2001 From: tomchon Date: Tue, 27 Jul 2021 14:59:32 +0800 Subject: [PATCH 013/239] [TD-4840]: add testcase of compressing tsdb meta data --- tests/pytest/tsdb/insertDataDb1.json | 6 +- tests/pytest/tsdb/insertDataDb2.json | 6 +- tests/pytest/tsdb/{sdbComp.py => tsdbComp.py} | 62 ++++++++++++++++--- 3 files changed, 60 insertions(+), 14 deletions(-) rename tests/pytest/tsdb/{sdbComp.py => tsdbComp.py} (64%) diff --git a/tests/pytest/tsdb/insertDataDb1.json b/tests/pytest/tsdb/insertDataDb1.json index f01cc35a1b..60c6def92c 100644 --- a/tests/pytest/tsdb/insertDataDb1.json +++ b/tests/pytest/tsdb/insertDataDb1.json @@ -35,13 +35,13 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 100000, + "childtable_count": 1000, "childtable_prefix": "stb00_", "auto_create_table": "no", "batch_create_tbl_num": 1000, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 100, + "insert_rows": 1000, "childtable_limit": 0, "childtable_offset":0, "interlace_rows": 0, @@ -60,7 +60,7 @@ { "name": "stb1", "child_table_exists":"no", - "childtable_count": 100000, + "childtable_count": 10000, "childtable_prefix": "stb01_", "auto_create_table": "no", "batch_create_tbl_num": 1000, diff --git a/tests/pytest/tsdb/insertDataDb2.json b/tests/pytest/tsdb/insertDataDb2.json index 4cde724a82..ead5f19716 100644 --- a/tests/pytest/tsdb/insertDataDb2.json +++ b/tests/pytest/tsdb/insertDataDb2.json @@ -38,10 +38,10 @@ "childtable_count": 200000, "childtable_prefix": "stb0_", "auto_create_table": "no", - "batch_create_tbl_num": 100, + "batch_create_tbl_num": 1000, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 2000, + "insert_rows": 0, "childtable_limit": 0, "childtable_offset":0, "interlace_rows": 0, @@ -60,7 +60,7 @@ { "name": "stb1", "child_table_exists":"no", - "childtable_count": 500000, + "childtable_count": 2, "childtable_prefix": "stb1_", "auto_create_table": "no", "batch_create_tbl_num": 1000, diff --git a/tests/pytest/tsdb/sdbComp.py b/tests/pytest/tsdb/tsdbComp.py similarity index 64% rename from tests/pytest/tsdb/sdbComp.py rename to tests/pytest/tsdb/tsdbComp.py index 6ee7a09fdb..b2ea36d239 100644 --- a/tests/pytest/tsdb/sdbComp.py +++ b/tests/pytest/tsdb/tsdbComp.py @@ -20,13 +20,13 @@ from util.cases import * from util.sql import * from util.dnodes import * import subprocess - +from random import choice class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + def getBuildPath(self): global selfPath selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -63,7 +63,7 @@ class TDTestCase: tdSql.execute("drop database if exists db1") os.system("%staosdemo -f tsdb/insertDataDb2.json -y " % binPath) tdSql.execute("drop table if exists db2.stb0") - os.system("%staosdemo -f wal/insertDataDb2Newstab.json -y " % binPath) + os.system("%staosdemo -f tsdb/insertDataDb2Newstab.json -y " % binPath) # query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) # print(query_pid1) tdSql.execute("use db2") @@ -80,12 +80,54 @@ class TDTestCase: tdSql.execute("alter table stb2_0 drop column col1") tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") - # stop taosd and compact wal file + # create db utest + + + dataType= [ "tinyint", "smallint", "int", "bigint", "float", "double", "bool", " binary(20)", "nchar(20)", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned"] + + tdSql.execute("drop database if exists utest") + tdSql.execute("create database utest keep 3650") + tdSql.execute("use utest") + tdSql.execute('''create table test(ts timestamp, col0 tinyint, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col10 tinyint unsigned, col11 smallint unsigned, col12 int unsigned, col13 bigint unsigned) tags(loc nchar(200), tag1 int)''') + + # rowNum1 = 13 + # for i in range(rowNum1): + # columnName= "col" + str(i+1) + # tdSql.execute("alter table test drop column %s ;" % columnName ) + + rowNum2= 988 + for i in range(rowNum2): + tdSql.execute("alter table test add column col%d %s ;" %( i+14, choice(dataType)) ) + + rowNum3= 988 + for i in range(rowNum3): + tdSql.execute("alter table test drop column col%d ;" %( i+14) ) + + + self.rowNum = 1 + self.rowNum2 = 100 + self.rowNum3 = 20 + self.ts = 1537146000000 + self.ts1 = 1537146000000000 + self.ts2 = 1597146000000 + # tdSql.execute("create table test1 using test tags('beijing', 10)") + # tdSql.execute("create table test2 using test tags('tianjing', 20)") + # tdSql.execute("create table test3 using test tags('shanghai', 20)") + + for j in range(self.rowNum2): + tdSql.execute("create table test%d using test tags('beijing%d', 10)" % (j,j) ) + for i in range(self.rowNum): + tdSql.execute("insert into test%d values(%d, %d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (j, self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + for j in range(self.rowNum2): + tdSql.execute("drop table if exists test%d" % (j+1)) + + + # stop taosd and restart taosd tdDnodes.stop(1) sleep(10) - # assert os.path.exists(walFilePath) , "%s is not generated, compact didn't take effect " % walFilePath - - # use new wal file to start taosd tdDnodes.start(1) sleep(5) tdSql.execute("reset query cache") @@ -104,7 +146,11 @@ class TDTestCase: tdSql.checkData(0, 0, 2) tdSql.query("select count(*) from stb2_0") tdSql.checkData(0, 0, 2) - + + tdSql.execute("use utest") + tdSql.query("select count (tbname) from test") + tdSql.checkData(0, 0, 1) + # delete useless file testcaseFilename = os.path.split(__file__)[-1] os.system("rm -rf ./insert_res.txt") From 705d6e3eb3d2a70d097d33a1c830cf2d6c4ee3bd Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 28 Jul 2021 16:11:09 +0800 Subject: [PATCH 014/239] [TD-4840]: update testcase of tsdb meta compressed --- tests/pytest/tsdb/tsdbComp.py | 10 +-- .../{sdbCompCluster.py => tsdbCompCluster.py} | 63 +++++++++----- ...Replica2.py => tsdbCompClusterReplica2.py} | 84 +++++++++++++------ 3 files changed, 105 insertions(+), 52 deletions(-) rename tests/pytest/tsdb/{sdbCompCluster.py => tsdbCompCluster.py} (65%) rename tests/pytest/tsdb/{sdbCompClusterReplica2.py => tsdbCompClusterReplica2.py} (57%) diff --git a/tests/pytest/tsdb/tsdbComp.py b/tests/pytest/tsdb/tsdbComp.py index b2ea36d239..a46b3280fb 100644 --- a/tests/pytest/tsdb/tsdbComp.py +++ b/tests/pytest/tsdb/tsdbComp.py @@ -64,8 +64,7 @@ class TDTestCase: os.system("%staosdemo -f tsdb/insertDataDb2.json -y " % binPath) tdSql.execute("drop table if exists db2.stb0") os.system("%staosdemo -f tsdb/insertDataDb2Newstab.json -y " % binPath) - # query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) - # print(query_pid1) + tdSql.execute("use db2") tdSql.execute("drop table if exists stb1_0") tdSql.execute("drop table if exists stb1_1") @@ -109,11 +108,6 @@ class TDTestCase: self.rowNum2 = 100 self.rowNum3 = 20 self.ts = 1537146000000 - self.ts1 = 1537146000000000 - self.ts2 = 1597146000000 - # tdSql.execute("create table test1 using test tags('beijing', 10)") - # tdSql.execute("create table test2 using test tags('tianjing', 20)") - # tdSql.execute("create table test3 using test tags('shanghai', 20)") for j in range(self.rowNum2): tdSql.execute("create table test%d using test tags('beijing%d', 10)" % (j,j) ) @@ -154,7 +148,7 @@ class TDTestCase: # delete useless file testcaseFilename = os.path.split(__file__)[-1] os.system("rm -rf ./insert_res.txt") - os.system("rm -rf wal/%s.sql" % testcaseFilename ) + os.system("rm -rf tsdb/%s.sql" % testcaseFilename ) diff --git a/tests/pytest/tsdb/sdbCompCluster.py b/tests/pytest/tsdb/tsdbCompCluster.py similarity index 65% rename from tests/pytest/tsdb/sdbCompCluster.py rename to tests/pytest/tsdb/tsdbCompCluster.py index 4fa84817ec..39c8b563c5 100644 --- a/tests/pytest/tsdb/sdbCompCluster.py +++ b/tests/pytest/tsdb/tsdbCompCluster.py @@ -19,6 +19,8 @@ from util.sql import * from util.dnodes import * import taos import threading +import subprocess +from random import choice class TwoClients: @@ -62,17 +64,15 @@ class TwoClients: cur1 = conn1.cursor() tdSql.init(cur1, True) - # new db and insert data - os.system("rm -rf /var/lib/taos/mnode_bak/") - os.system("rm -rf /var/lib/taos/mnode_temp/") + # new db ,new super tables , child tables, and insert data tdSql.execute("drop database if exists db2") - os.system("%staosdemo -f wal/insertDataDb1.json -y " % binPath) + os.system("%staosdemo -f tsdb/insertDataDb1.json -y " % binPath) tdSql.execute("drop database if exists db1") - os.system("%staosdemo -f wal/insertDataDb2.json -y " % binPath) + os.system("%staosdemo -f tsdb/insertDataDb2.json -y " % binPath) tdSql.execute("drop table if exists db2.stb0") - os.system("%staosdemo -f wal/insertDataDb2Newstab.json -y " % binPath) - query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) - print(query_pid1) + os.system("%staosdemo -f tsdb/insertDataDb2Newstab.json -y " % binPath) + + # new general tables and modify general tables; tdSql.execute("use db2") tdSql.execute("drop table if exists stb1_0") tdSql.execute("drop table if exists stb1_1") @@ -87,17 +87,41 @@ class TwoClients: tdSql.execute("alter table stb2_0 drop column col1") tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") - # stop taosd and compact wal file + # create db utest and modify super tables; + dataType= [ "tinyint", "smallint", "int", "bigint", "float", "double", "bool", " binary(20)", "nchar(20)", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned"] + tdSql.execute("drop database if exists utest") + tdSql.execute("create database utest keep 3650") + tdSql.execute("use utest") + tdSql.execute('''create table test(ts timestamp, col0 tinyint, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col10 tinyint unsigned, col11 smallint unsigned, col12 int unsigned, col13 bigint unsigned) tags(loc nchar(200), tag1 int)''') + rowNum2= 988 + for i in range(rowNum2): + tdSql.execute("alter table test add column col%d %s ;" %( i+14, choice(dataType)) ) + rowNum3= 988 + for i in range(rowNum3): + tdSql.execute("alter table test drop column col%d ;" %( i+14) ) + + self.rowNum = 1 + self.rowNum2 = 100 + self.ts = 1537146000000 + for j in range(self.rowNum2): + tdSql.execute("create table test%d using test tags('beijing%d', 10)" % (j,j) ) + for i in range(self.rowNum): + tdSql.execute("insert into test%d values(%d, %d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (j, self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + # delete child tables; + for j in range(self.rowNum2): + tdSql.execute("drop table if exists test%d" % (j+1)) + + #restart taosd os.system("ps -ef |grep taosd |grep -v 'grep' |awk '{print $2}'|xargs kill -2") - sleep(10) - os.system("nohup taosd --compact-mnode-wal -c /etc/taos & ") - sleep(10) + sleep(20) + print("123") os.system("nohup /usr/bin/taosd > /dev/null 2>&1 &") sleep(4) tdSql.execute("reset query cache") query_pid2 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) print(query_pid2) - assert os.path.exists(walFilePath) , "%s is not generated " % walFilePath # new taos connecting to server conn2 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config ) @@ -105,11 +129,9 @@ class TwoClients: cur2 = conn2.cursor() tdSql.init(cur2, True) - # use new wal file to start up tasod + + # check data correct tdSql.query("show databases") - for i in range(tdSql.queryRows): - if tdSql.queryResult[i][0]=="db2": - assert tdSql.queryResult[i][4]==1 , "replica is wrong" tdSql.execute("use db2") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 1) @@ -123,11 +145,14 @@ class TwoClients: tdSql.checkData(0, 0, 2) tdSql.query("select * from stb2_0") tdSql.checkData(1, 2, 'R') - + tdSql.execute("use utest") + tdSql.query("select count (tbname) from test") + tdSql.checkData(0, 0, 1) + # delete useless file testcaseFilename = os.path.split(__file__)[-1] os.system("rm -rf ./insert_res.txt") - os.system("rm -rf wal/%s.sql" % testcaseFilename ) + os.system("rm -rf tsdb/%s.sql" % testcaseFilename ) clients = TwoClients() clients.initConnection() diff --git a/tests/pytest/tsdb/sdbCompClusterReplica2.py b/tests/pytest/tsdb/tsdbCompClusterReplica2.py similarity index 57% rename from tests/pytest/tsdb/sdbCompClusterReplica2.py rename to tests/pytest/tsdb/tsdbCompClusterReplica2.py index 117da8ca2f..9b43b7ce80 100644 --- a/tests/pytest/tsdb/sdbCompClusterReplica2.py +++ b/tests/pytest/tsdb/tsdbCompClusterReplica2.py @@ -19,7 +19,8 @@ from util.sql import * from util.dnodes import * import taos import threading - +import subprocess +from random import choice class TwoClients: def initConnection(self): @@ -62,17 +63,15 @@ class TwoClients: cur1 = conn1.cursor() tdSql.init(cur1, True) - # new db and insert data - os.system("rm -rf /var/lib/taos/mnode_bak/") - os.system("rm -rf /var/lib/taos/mnode_temp/") + # new db ,new super tables , child tables, and insert data tdSql.execute("drop database if exists db2") - os.system("%staosdemo -f wal/insertDataDb1Replica2.json -y " % binPath) + os.system("%staosdemo -f tsdb/insertDataDb1Replica2.json -y " % binPath) tdSql.execute("drop database if exists db1") - os.system("%staosdemo -f wal/insertDataDb2Replica2.json -y " % binPath) + os.system("%staosdemo -f tsdb/insertDataDb2Replica2.json -y " % binPath) tdSql.execute("drop table if exists db2.stb0") - os.system("%staosdemo -f wal/insertDataDb2NewstabReplica2.json -y " % binPath) - query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) - print(query_pid1) + os.system("%staosdemo -f tsdb/insertDataDb2NewstabReplica2.json -y " % binPath) + + # new general tables and modify general tables; tdSql.execute("use db2") tdSql.execute("drop table if exists stb1_0") tdSql.execute("drop table if exists stb1_1") @@ -86,19 +85,53 @@ class TwoClients: tdSql.execute("alter table stb2_0 add column col2 binary(4)") tdSql.execute("alter table stb2_0 drop column col1") tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") - - # stop taosd and compact wal file - os.system("ps -ef |grep taosd |grep -v 'grep' |awk '{print $2}'|xargs kill -2") - sleep(10) - os.system("nohup taosd --compact-mnode-wal -c /etc/taos & ") - sleep(10) + + # create db utest replica 2 and modify super tables; + dataType= [ "tinyint", "smallint", "int", "bigint", "float", "double", "bool", " binary(20)", "nchar(20)", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned"] + tdSql.execute("drop database if exists utest") + tdSql.execute("create database utest keep 3650 replica 2 ") + tdSql.execute("use utest") + tdSql.execute('''create table test(ts timestamp, col0 tinyint, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col10 tinyint unsigned, col11 smallint unsigned, col12 int unsigned, col13 bigint unsigned) tags(loc nchar(200), tag1 int)''') + rowNum2= 988 + for i in range(rowNum2): + tdSql.execute("alter table test add column col%d %s ;" %( i+14, choice(dataType)) ) + rowNum3= 988 + for i in range(rowNum3): + tdSql.execute("alter table test drop column col%d ;" %( i+14) ) + self.rowNum = 1 + self.rowNum2 = 100 + self.ts = 1537146000000 + for j in range(self.rowNum2): + tdSql.execute("create table test%d using test tags('beijing%d', 10)" % (j,j) ) + for i in range(self.rowNum): + tdSql.execute("insert into test%d values(%d, %d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (j, self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + # delete child tables; + for j in range(self.rowNum2): + tdSql.execute("drop table if exists test%d" % (j+1)) + + # drop dnodes and restart taosd; + sleep(3) + tdSql.execute(" drop dnode 'chenhaoran02:6030'; ") + sleep(20) + os.system("rm -rf /var/lib/taos/*") + print("clear dnode chenhaoran02'data files") os.system("nohup /usr/bin/taosd > /dev/null 2>&1 &") - sleep(4) - tdSql.execute("reset query cache") - query_pid2 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) - print(query_pid2) - assert os.path.exists(walFilePath) , "%s is not generated " % walFilePath + print("start taosd") + sleep(10) + tdSql.execute("reset query cache ;") + tdSql.execute("create dnode chenhaoran02 ;") + + # # + # os.system("ps -ef |grep taosd |grep -v 'grep' |awk '{print $2}'|xargs kill -2") + # sleep(20) + # os.system("nohup /usr/bin/taosd > /dev/null 2>&1 &") + # sleep(4) + # tdSql.execute("reset query cache") + # query_pid2 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) + # print(query_pid2) # new taos connecting to server conn2 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config ) @@ -106,11 +139,8 @@ class TwoClients: cur2 = conn2.cursor() tdSql.init(cur2, True) - # use new wal file to start up tasod + # check data correct tdSql.query("show databases") - for i in range(tdSql.queryRows): - if tdSql.queryResult[i][0]=="db2": - assert tdSql.queryResult[i][4]==2 , "replica is wrong" tdSql.execute("use db2") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 1) @@ -125,10 +155,14 @@ class TwoClients: tdSql.query("select * from stb2_0") tdSql.checkData(1, 2, 'R') + tdSql.execute("use utest") + tdSql.query("select count (tbname) from test") + tdSql.checkData(0, 0, 1) + # delete useless file testcaseFilename = os.path.split(__file__)[-1] os.system("rm -rf ./insert_res.txt") - os.system("rm -rf wal/%s.sql" % testcaseFilename ) + os.system("rm -rf tsdb/%s.sql" % testcaseFilename ) clients = TwoClients() clients.initConnection() From 3fa6787382082cbb37227a5abf5c86c01d33aa49 Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 28 Jul 2021 18:30:00 +0800 Subject: [PATCH 015/239] [TD-4840]: update testcase of tsdb meta compressed --- tests/pytest/tsdb/tsdbComp.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/pytest/tsdb/tsdbComp.py b/tests/pytest/tsdb/tsdbComp.py index a46b3280fb..3563655efe 100644 --- a/tests/pytest/tsdb/tsdbComp.py +++ b/tests/pytest/tsdb/tsdbComp.py @@ -69,14 +69,14 @@ class TDTestCase: tdSql.execute("drop table if exists stb1_0") tdSql.execute("drop table if exists stb1_1") tdSql.execute("insert into stb0_0 values(1614218412000,8637,78.861045,'R','bf3')(1614218422000,8637,98.861045,'R','bf3')") - tdSql.execute("alter table db2.stb0 add column col4 int") - tdSql.execute("alter table db2.stb0 drop column col2") + tdSql.execute("alter table db2.stb0 add column c4 int") + tdSql.execute("alter table db2.stb0 drop column c2") tdSql.execute("alter table db2.stb0 add tag t3 int;") tdSql.execute("alter table db2.stb0 drop tag t1") - tdSql.execute("create table if not exists stb2_0 (ts timestamp, col0 int, col1 float) ") + tdSql.execute("create table if not exists stb2_0 (ts timestamp, c0 int, c1 float) ") tdSql.execute("insert into stb2_0 values(1614218412000,8637,78.861045)") - tdSql.execute("alter table stb2_0 add column col2 binary(4)") - tdSql.execute("alter table stb2_0 drop column col1") + tdSql.execute("alter table stb2_0 add column c2 binary(4)") + tdSql.execute("alter table stb2_0 drop column c1") tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") # create db utest From 7346cdeba5c1880f90132a710eb878064b072741 Mon Sep 17 00:00:00 2001 From: tomchon Date: Thu, 5 Aug 2021 18:22:50 +0800 Subject: [PATCH 016/239] [TD-4840]:modify testcase of tsdb meta compressed --- tests/pytest/tsdb/tsdbCompCluster.py | 10 +++++----- tests/pytest/tsdb/tsdbCompClusterReplica2.py | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/pytest/tsdb/tsdbCompCluster.py b/tests/pytest/tsdb/tsdbCompCluster.py index 39c8b563c5..3df4c9a9d4 100644 --- a/tests/pytest/tsdb/tsdbCompCluster.py +++ b/tests/pytest/tsdb/tsdbCompCluster.py @@ -77,14 +77,14 @@ class TwoClients: tdSql.execute("drop table if exists stb1_0") tdSql.execute("drop table if exists stb1_1") tdSql.execute("insert into stb0_0 values(1614218412000,8637,78.861045,'R','bf3')(1614218422000,8637,98.861045,'R','bf3')") - tdSql.execute("alter table db2.stb0 add column col4 int") - tdSql.execute("alter table db2.stb0 drop column col2") + tdSql.execute("alter table db2.stb0 add column c4 int") + tdSql.execute("alter table db2.stb0 drop column c2") tdSql.execute("alter table db2.stb0 add tag t3 int") tdSql.execute("alter table db2.stb0 drop tag t1") - tdSql.execute("create table if not exists stb2_0 (ts timestamp, col0 int, col1 float) ") + tdSql.execute("create table if not exists stb2_0 (ts timestamp, c0 int, c1 float) ") tdSql.execute("insert into stb2_0 values(1614218412000,8637,78.861045)") - tdSql.execute("alter table stb2_0 add column col2 binary(4)") - tdSql.execute("alter table stb2_0 drop column col1") + tdSql.execute("alter table stb2_0 add column c2 binary(4)") + tdSql.execute("alter table stb2_0 drop column c1") tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") # create db utest and modify super tables; diff --git a/tests/pytest/tsdb/tsdbCompClusterReplica2.py b/tests/pytest/tsdb/tsdbCompClusterReplica2.py index 9b43b7ce80..2e016deea0 100644 --- a/tests/pytest/tsdb/tsdbCompClusterReplica2.py +++ b/tests/pytest/tsdb/tsdbCompClusterReplica2.py @@ -76,14 +76,14 @@ class TwoClients: tdSql.execute("drop table if exists stb1_0") tdSql.execute("drop table if exists stb1_1") tdSql.execute("insert into stb0_0 values(1614218412000,8637,78.861045,'R','bf3')(1614218422000,8637,98.861045,'R','bf3')") - tdSql.execute("alter table db2.stb0 add column col4 int") - tdSql.execute("alter table db2.stb0 drop column col2") + tdSql.execute("alter table db2.stb0 add column c4 int") + tdSql.execute("alter table db2.stb0 drop column c2") tdSql.execute("alter table db2.stb0 add tag t3 int") tdSql.execute("alter table db2.stb0 drop tag t1") - tdSql.execute("create table if not exists stb2_0 (ts timestamp, col0 int, col1 float) ") + tdSql.execute("create table if not exists stb2_0 (ts timestamp, c0 int, c1 float) ") tdSql.execute("insert into stb2_0 values(1614218412000,8637,78.861045)") - tdSql.execute("alter table stb2_0 add column col2 binary(4)") - tdSql.execute("alter table stb2_0 drop column col1") + tdSql.execute("alter table stb2_0 add column c2 binary(4)") + tdSql.execute("alter table stb2_0 drop column c1") tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") From 67040b64cc83973faff2361677ad5ba16b31ce66 Mon Sep 17 00:00:00 2001 From: tomchon Date: Thu, 5 Aug 2021 18:23:54 +0800 Subject: [PATCH 017/239] [TD-5114]: add testcase of rollUpgrading --- .../manualTest/TD-5114/continueCreateDn.py | 97 ++++++ .../TD-5114/insertDataDb3Replica2.json | 61 ++++ .../manualTest/TD-5114/rollingUpgrade.py | 275 ++++++++++++++++++ 3 files changed, 433 insertions(+) create mode 100644 tests/pytest/manualTest/TD-5114/continueCreateDn.py create mode 100644 tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json create mode 100644 tests/pytest/manualTest/TD-5114/rollingUpgrade.py diff --git a/tests/pytest/manualTest/TD-5114/continueCreateDn.py b/tests/pytest/manualTest/TD-5114/continueCreateDn.py new file mode 100644 index 0000000000..4b724f0587 --- /dev/null +++ b/tests/pytest/manualTest/TD-5114/continueCreateDn.py @@ -0,0 +1,97 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import os +import sys +sys.path.insert(0, os.getcwd()) +from util.log import * +from util.sql import * +from util.dnodes import * +import taos +import threading +import subprocess +from random import choice + +class TwoClients: + def initConnection(self): + self.host = "chr03" + self.user = "root" + self.password = "taosdata" + self.config = "/etc/taos/" + self.port =6030 + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + + # new taos client + conn1 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config ) + print(conn1) + cur1 = conn1.cursor() + tdSql.init(cur1, True) + + tdSql.execute("drop database if exists db3") + + # insert data with taosc + for i in range(10): + os.system("taosdemo -f manualTest/TD-5114/insertDataDb3Replica2.json -y ") + # # check data correct + tdSql.execute("show databases") + tdSql.execute("use db3") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 20000) + tdSql.query("select count (*) from stb0") + tdSql.checkData(0, 0, 4000000) + + # insert data with python connector , if you want to use this case ,cancel note. + + # for x in range(10): + # dataType= [ "tinyint", "smallint", "int", "bigint", "float", "double", "bool", " binary(20)", "nchar(20)", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned"] + # tdSql.execute("drop database if exists db3") + # tdSql.execute("create database db3 keep 3650 replica 2 ") + # tdSql.execute("use db3") + # tdSql.execute('''create table test(ts timestamp, col0 tinyint, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + # col7 bool, col8 binary(20), col9 nchar(20), col10 tinyint unsigned, col11 smallint unsigned, col12 int unsigned, col13 bigint unsigned) tags(loc nchar(3000), tag1 int)''') + # rowNum2= 988 + # for i in range(rowNum2): + # tdSql.execute("alter table test add column col%d %s ;" %( i+14, choice(dataType)) ) + # rowNum3= 988 + # for i in range(rowNum3): + # tdSql.execute("alter table test drop column col%d ;" %( i+14) ) + # self.rowNum = 50 + # self.rowNum2 = 2000 + # self.ts = 1537146000000 + # for j in range(self.rowNum2): + # tdSql.execute("create table test%d using test tags('beijing%d', 10)" % (j,j) ) + # for i in range(self.rowNum): + # tdSql.execute("insert into test%d values(%d, %d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + # % (j, self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + # # check data correct + # tdSql.execute("show databases") + # tdSql.execute("use db3") + # tdSql.query("select count (tbname) from test") + # tdSql.checkData(0, 0, 200) + # tdSql.query("select count (*) from test") + # tdSql.checkData(0, 0, 200000) + + + # delete useless file + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf manualTest/TD-5114/%s.sql" % testcaseFilename ) + +clients = TwoClients() +clients.initConnection() +# clients.getBuildPath() +clients.run() \ No newline at end of file diff --git a/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json b/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json new file mode 100644 index 0000000000..b2755823ef --- /dev/null +++ b/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json @@ -0,0 +1,61 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db3", + "drop": "yes", + "replica": 2, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 20000, + "childtable_prefix": "stb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 1000, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 2000, + "childtable_limit": 0, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/manualTest/TD-5114/rollingUpgrade.py b/tests/pytest/manualTest/TD-5114/rollingUpgrade.py new file mode 100644 index 0000000000..f634eb1208 --- /dev/null +++ b/tests/pytest/manualTest/TD-5114/rollingUpgrade.py @@ -0,0 +1,275 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from sys import version +from fabric import Connection +import random +import time +import datetime +import logging +import subprocess +import os +import sys + +class Node: + def __init__(self, index, username, hostIP, password, version): + self.index = index + self.username = username + self.hostIP = hostIP + # self.hostName = hostName + # self.homeDir = homeDir + self.version = version + self.verName = "TDengine-enterprise-server-%s-Linux-x64.tar.gz" % self.version + self.installPath = "TDengine-enterprise-server-%s" % self.version + # self.corePath = '/coredump' + self.conn = Connection("{}@{}".format(username, hostIP), connect_kwargs={"password": "{}".format(password)}) + + + def buildTaosd(self): + try: + print(self.conn) + # self.conn.run('echo "1234" > /home/chr/installtest/test.log') + self.conn.run("cd /home/chr/installtest/ && tar -xvf %s " %self.verName) + self.conn.run("cd /home/chr/installtest/%s && ./install.sh " % self.installPath) + except Exception as e: + print("Build Taosd error for node %d " % self.index) + logging.exception(e) + pass + + def rebuildTaosd(self): + try: + print(self.conn) + # self.conn.run('echo "1234" > /home/chr/installtest/test.log') + self.conn.run("cd /home/chr/installtest/%s && ./install.sh " % self.installPath) + except Exception as e: + print("Build Taosd error for node %d " % self.index) + logging.exception(e) + pass + + def startTaosd(self): + try: + self.conn.run("sudo systemctl start taosd") + except Exception as e: + print("Start Taosd error for node %d " % self.index) + logging.exception(e) + + def restartTarbi(self): + try: + self.conn.run("sudo systemctl restart tarbitratord ") + except Exception as e: + print("Start Taosd error for node %d " % self.index) + logging.exception(e) + + def clearData(self): + timeNow = datetime.datetime.now() + # timeYes = datetime.datetime.now() + datetime.timedelta(days=-1) + timStr = timeNow.strftime('%Y%m%d%H%M%S') + # timStr = timeNow.strftime('%Y%m%d%H%M%S') + try: + # self.conn.run("mv /var/lib/taos/ /var/lib/taos%s " % timStr) + self.conn.run("rm -rf /home/chr/data/taos*") + except Exception as e: + print("rm -rf /var/lib/taos error %d " % self.index) + logging.exception(e) + + def stopTaosd(self): + try: + self.conn.run("sudo systemctl stop taosd") + except Exception as e: + print("Stop Taosd error for node %d " % self.index) + logging.exception(e) + + def restartTaosd(self): + try: + self.conn.run("sudo systemctl restart taosd") + except Exception as e: + print("Stop Taosd error for node %d " % self.index) + logging.exception(e) + +class oneNode: + + def FirestStartNode(self, id, username, IP, passwd, version): + # get installPackage + verName = "TDengine-enterprise-server-%s-Linux-x64.tar.gz" % version + # installPath = "TDengine-enterprise-server-%s" % self.version + node131 = Node(131, 'ubuntu', '192.168.1.131', 'tbase125!', '2.0.20.0') + node131.conn.run('sshpass -p tbase125! scp /nas/TDengine/v%s/enterprise/%s root@192.168.1.%d:/home/chr/installtest/' % (version,verName,id)) + node131.conn.close() + # install TDengine at 192.168.103/104/141 + try: + node = Node(id, username, IP, passwd, version) + node.conn.run('echo "start taosd"') + node.buildTaosd() + # clear DataPath , if need clear data + node.clearData() + node.startTaosd() + if id == 103 : + node.restartTarbi() + print("start taosd ver:%s node:%d successfully " % (version,id)) + node.conn.close() + + # query_pid = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) + # assert query_pid == 1 , "node %d: start taosd failed " % id + except Exception as e: + print("Stop Taosd error for node %d " % id) + logging.exception(e) + + def startNode(self, id, username, IP, passwd, version): + # start TDengine + try: + node = Node(id, username, IP, passwd, version) + node.conn.run('echo "restart taosd"') + # clear DataPath , if need clear data + node.clearData() + node.restartTaosd() + time.sleep(5) + if id == 103 : + node.restartTarbi() + print("start taosd ver:%s node:%d successfully " % (version,id)) + node.conn.close() + + # query_pid = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) + # assert query_pid == 1 , "node %d: start taosd failed " % id + except Exception as e: + print("Stop Taosd error for node %d " % id) + logging.exception(e) + + def firstUpgradeNode(self, id, username, IP, passwd, version): + # get installPackage + verName = "TDengine-enterprise-server-%s-Linux-x64.tar.gz" % version + # installPath = "TDengine-enterprise-server-%s" % self.version + node131 = Node(131, 'ubuntu', '192.168.1.131', 'tbase125!', '2.0.20.0') + node131.conn.run('echo "upgrade cluster"') + node131.conn.run('sshpass -p tbase125! scp /nas/TDengine/v%s/enterprise/%s root@192.168.1.%d:/home/chr/installtest/' % (version,verName,id)) + node131.conn.close() + # upgrade TDengine at 192.168.103/104/141 + try: + node = Node(id, username, IP, passwd, version) + node.conn.run('echo "start taosd"') + node.conn.run('echo "1234" > /home/chr/test.log') + node.buildTaosd() + time.sleep(5) + node.startTaosd() + if id == 103 : + node.restartTarbi() + print("start taosd ver:%s node:%d successfully " % (version,id)) + node.conn.close() + + # query_pid = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) + # assert query_pid == 1 , "node %d: start taosd failed " % id + except Exception as e: + print("Stop Taosd error for node %d " % id) + logging.exception(e) + + def upgradeNode(self, id, username, IP, passwd, version): + + # backCluster TDengine at 192.168.103/104/141 + try: + node = Node(id, username, IP, passwd, version) + node.conn.run('echo "rollback taos"') + node.rebuildTaosd() + time.sleep(5) + node.startTaosd() + if id == 103 : + node.restartTarbi() + print("start taosd ver:%s node:%d successfully " % (version,id)) + node.conn.close() + except Exception as e: + print("Stop Taosd error for node %d " % id) + logging.exception(e) + + +# how to use : cd TDinternal/commumity/test/pytest && python3 manualTest/rollingUpgrade.py ,when inserting data, we can start " python3 manualTest/rollingUpagrade.py". add example "oneNode().FirestStartNode(103,'root','192.168.1.103','tbase125!','2.0.20.0')" + + +# node103=oneNode().FirestStartNode(103,'root','192.168.1.103','tbase125!','2.0.20.0') +# node104=oneNode().FirestStartNode(104,'root','192.168.1.104','tbase125!','2.0.20.0') +# node141=oneNode().FirestStartNode(141,'root','192.168.1.141','tbase125!','2.0.20.0') + +# node103=oneNode().startNode(103,'root','192.168.1.103','tbase125!','2.0.20.0') +# time.sleep(30) +# node141=oneNode().startNode(141,'root','192.168.1.141','tbase125!','2.0.20.0') +# time.sleep(30) +# node104=oneNode().startNode(104,'root','192.168.1.104','tbase125!','2.0.20.0') +# time.sleep(30) + +# node103=oneNode().firstUpgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.5') +# time.sleep(30) +# node104=oneNode().firstUpgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.5') +# time.sleep(30) +# node141=oneNode().firstUpgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.5') +# time.sleep(30) + +# node141=oneNode().firstUpgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.10') +# time.sleep(30) +# node103=oneNode().firstUpgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.10') +# time.sleep(30) +# node104=oneNode().firstUpgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.10') +# time.sleep(30) + +# node141=oneNode().firstUpgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.12') +# time.sleep(30) +# node103=oneNode().firstUpgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.12') +# time.sleep(30) +# node104=oneNode().firstUpgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.12') +# time.sleep(30) + + + +# node103=oneNode().upgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.0') +# time.sleep(120) +# node104=oneNode().upgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.0') +# time.sleep(180) +# node141=oneNode().upgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.0') +# time.sleep(240) + +# node104=oneNode().upgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.5') +# time.sleep(120) +# node103=oneNode().upgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.5') +# time.sleep(120) +# node141=oneNode().upgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.5') +# time.sleep(180) + +# node141=oneNode().upgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.10') +# time.sleep(120) +# node103=oneNode().upgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.10') +# time.sleep(120) +# node104=oneNode().upgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.10') +# time.sleep(180) + +# node103=oneNode().upgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.12') +# time.sleep(180) +# node141=oneNode().upgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.12') +# time.sleep(180) +# node104=oneNode().upgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.12') + + +# node141=oneNode().firstUpgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.9') +# time.sleep(5) +# node103=oneNode().firstUpgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.9') +# time.sleep(5) +# node104=oneNode().firstUpgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.9') +# time.sleep(30) + +# node141=oneNode().upgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.10') +# time.sleep(12) +# node103=oneNode().upgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.10') +# time.sleep(12) +# node104=oneNode().upgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.10') +# time.sleep(180) + +# node103=oneNode().upgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.12') +# time.sleep(120) +# node141=oneNode().upgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.12') +# time.sleep(120) +# node104=oneNode().upgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.12') From 3c566750aa5e16c75a82ec81130dd8be21a32663 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 6 Aug 2021 10:23:39 +0800 Subject: [PATCH 018/239] [TD-5765]:check max length when alter tag value --- src/client/src/tscSQLParser.c | 5 + tests/pytest/fulltest.sh | 1 + tests/pytest/tag_lite/TestModifyTag.py | 125 +++++++++++++++++++++++++ 3 files changed, 131 insertions(+) create mode 100644 tests/pytest/tag_lite/TestModifyTag.py diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 7adc0812ae..8d89b7c3cc 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5884,6 +5884,11 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tVariantListItem* pItem = taosArrayGet(pVarList, 1); SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex); + + if (IS_VAR_DATA_TYPE(pTagsSchema->type) && (pItem->pVar.nLen > pTagsSchema->bytes * TSDB_NCHAR_SIZE)) { + return invalidOperationMsg(pMsg, msg14); + } + pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 783ee98da3..3cbc80b69d 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -76,6 +76,7 @@ python3 ./test.py -f tag_lite/set.py python3 ./test.py -f tag_lite/smallint.py python3 ./test.py -f tag_lite/tinyint.py python3 ./test.py -f tag_lite/timestamp.py +python3 ./test.py -f tag_lite/TestModifyTag.py #python3 ./test.py -f dbmgmt/database-name-boundary.py python3 test.py -f dbmgmt/nanoSecondCheck.py diff --git a/tests/pytest/tag_lite/TestModifyTag.py b/tests/pytest/tag_lite/TestModifyTag.py new file mode 100644 index 0000000000..acf63695f6 --- /dev/null +++ b/tests/pytest/tag_lite/TestModifyTag.py @@ -0,0 +1,125 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00" + self.numberOfTables = 10 + self.numberOfRecords = 100 + + def checkCommunity(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + return False + else: + return True + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + + + def run(self): + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + # basic test for alter tags + tdSql.execute("create database tagdb ") + tdSql.execute(" use tagdb") + tdSql.execute("create table st (ts timestamp , a int) tags (tg1 binary(20), tg2 binary(20), tg3 binary(20))") + tdSql.execute("insert into t using st (tg3, tg2, tg1) tags ('tg3', 'tg2', 'tg1') values (now, 1)") + tdSql.execute("alter table t set tag tg1='newtg1'") + res = tdSql.getResult("select tg1,tg2,tg3 from t") + + if res == [('newtg1', 'tg2', 'tg3')]: + tdLog.info(" alter tag check has pass!") + else: + tdLog.info(" alter tag failed , please check !") + + tdSql.error("alter stable st modify tag tg2 binary(2)") + tdSql.execute("alter stable st modify tag tg2 binary(30) ") + tdSql.execute("alter table t set tag tg2 = 'abcdefghijklmnopqrstuvwxyz1234'") + res = tdSql.getResult("select tg1,tg2,tg3 from t") + if res == [('newtg1', 'abcdefghijklmnopqrstuvwxyz1234', 'tg3')]: + tdLog.info(" alter tag check has pass!") + else: + tdLog.info(" alter tag failed , please check !") + + # test boundary about tags + tdSql.execute("create stable stb1 (ts timestamp , a int) tags (tg1 binary(16374))") + tdSql.error("create stable stb1 (ts timestamp , a int) tags (tg1 binary(16375))") + bound_sql = "create stable stb2 (ts timestamp , a int) tags (tg1 binary(10)," + for i in range(127): + bound_sql+="tag"+str(i)+" binary(10)," + sql1 = bound_sql[:-1]+")" + tdSql.execute(sql1) + sql2 = bound_sql[:-1]+"tag127 binary(10))" + tdSql.error(sql2) + tdSql.execute("create stable stb3 (ts timestamp , a int) tags (tg1 nchar(4093))") + tdSql.error("create stable stb3 (ts timestamp , a int) tags (tg1 nchar(4094))") + tdSql.execute("create stable stb4 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(8))") + tdSql.error("create stable stb4 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(9))") + tdSql.execute("create stable stb5 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(4),tag3 binary(2))") + tdSql.error("create stable stb5 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(4),tag3 binary(3))") + + tdSql.execute("create table stt (ts timestamp , a binary(100)) tags (tg1 binary(20), tg2 binary(20), tg3 binary(20))") + tdSql.execute("insert into tt using stt (tg3, tg2, tg1) tags ('tg3', 'tg2', 'tg1') values (now, 1)") + tags = "t"*16337 + sql3 = "alter table tt set tag tg1=" +"'"+tags+"'" + tdSql.error(sql3) + tdSql.execute("alter stable stt modify tag tg1 binary(16337)") + tdSql.execute(sql3) + res = tdSql.getResult("select tg1,tg2,tg3 from tt") + if res == [(tags, 'tg2', 'tg3')]: + tdLog.info(" alter tag check has pass!") + else: + tdLog.info(" alter tag failed , please check !") + + os.system("rm -rf ./tag_lite/TestModifyTag.py.sql") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From cc69fcc49ced0ad13e3be106af5e99b88a996e4f Mon Sep 17 00:00:00 2001 From: xywang Date: Mon, 9 Aug 2021 11:01:43 +0800 Subject: [PATCH 019/239] [TD-5784]: fixed a wrong check --- src/plugins/http/src/httpUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index 27b95a4934..ade50bdad6 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -188,7 +188,7 @@ bool httpMallocMultiCmds(HttpContext *pContext, int32_t cmdSize, int32_t bufferS bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) { HttpSqlCmds *multiCmds = pContext->multiCmds; - if (cmdSize <= 0 && cmdSize > HTTP_MAX_CMD_SIZE) { + if (cmdSize <= 0 || cmdSize > HTTP_MAX_CMD_SIZE) { httpError("context:%p, fd:%d, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, pContext->user, cmdSize, HTTP_MAX_CMD_SIZE); return false; From e554181cfb0f2641bcfcdbf4dee02ff8968099f1 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 10 Aug 2021 14:13:00 +0800 Subject: [PATCH 020/239] [TD-5921]:fix case problem in 'tbname in' syntax --- src/tsdb/src/tsdbRead.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index e1d40aa7d0..260c3d67dc 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -3468,6 +3468,7 @@ void filterPrepare(void* expr, void* param) { SArray *arr = (SArray *)(pCond->arr); for (size_t i = 0; i < taosArrayGetSize(arr); i++) { char* p = taosArrayGetP(arr, i); + strtolower(varDataVal(p), varDataVal(p)); taosHashPut(pObj, varDataVal(p),varDataLen(p), &dummy, sizeof(dummy)); } } else { From fbf76a3705bcb6014a8f413d0a18a553aa4f5a2e Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 10 Aug 2021 18:46:55 +0800 Subject: [PATCH 021/239] [TD-5922]: add unitest cases --- tests/pytest/query/tbname.py | 3 +++ .../script/general/parser/tbnameIn_query.sim | 24 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/tests/pytest/query/tbname.py b/tests/pytest/query/tbname.py index 08416ba3ed..30d90b1f9d 100644 --- a/tests/pytest/query/tbname.py +++ b/tests/pytest/query/tbname.py @@ -53,6 +53,9 @@ class TDTestCase: "select * from cars where id=0 and tbname in ('carzero', 'cartwo')") tdSql.checkRows(1) + tdSql.query("select * from cars where tbname in ('carZero', 'CARONE')") + tdSql.checkRows(2) + """ tdSql.query("select * from cars where tbname like 'car%'") tdSql.checkRows(2) diff --git a/tests/script/general/parser/tbnameIn_query.sim b/tests/script/general/parser/tbnameIn_query.sim index 65bb89d549..db27886bbf 100644 --- a/tests/script/general/parser/tbnameIn_query.sim +++ b/tests/script/general/parser/tbnameIn_query.sim @@ -101,6 +101,30 @@ if $data11 != 2 then return -1 endi +## tbname in can accpet Upper case table name +sql select count(*) from $stb where tbname in ('ti_tb0', 'TI_tb1', 'TI_TB2') group by t1 order by t1 +if $rows != 3 then + return -1 +endi +if $data00 != 10 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data10 != 10 then + return -1 +endi +if $data11 != 1 then + return -1 +endi +if $data20 != 10 then + return -1 +endi +if $data21 != 2 then + return -1 +endi + # multiple tbname in is not allowed NOW sql_error select count(*) from $stb where tbname in ('ti_tb1', 'ti_tb300') and tbname in ('ti_tb5', 'ti_tb1000') group by t1 order by t1 asc #if $rows != 4 then From 9bafb6436eb14c6b9fbbaf4c706a42142c0be87b Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 11 Aug 2021 09:04:04 +0800 Subject: [PATCH 022/239] trigger CI --- src/client/src/tscSQLParser.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 8d89b7c3cc..afbecd3a10 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5888,7 +5888,6 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (IS_VAR_DATA_TYPE(pTagsSchema->type) && (pItem->pVar.nLen > pTagsSchema->bytes * TSDB_NCHAR_SIZE)) { return invalidOperationMsg(pMsg, msg14); } - pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { From 52848c4de5ac5d6415267e8a08fb7860acb8cc43 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Wed, 11 Aug 2021 14:55:45 +0800 Subject: [PATCH 023/239] [TD-5923]finish test--->where tbname in UPPER/lower/mIXed add query/queryTbnameUpperLower.py add util/common.py for adding common function in case of duplicate use --- tests/pytest/fulltest.sh | 1 + tests/pytest/query/queryTbnameUpperLower.py | 78 +++++++++++++++++++++ tests/pytest/util/common.py | 53 ++++++++++++++ 3 files changed, 132 insertions(+) create mode 100644 tests/pytest/query/queryTbnameUpperLower.py create mode 100644 tests/pytest/util/common.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index fdcd0a3c42..17c2fc6915 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -381,6 +381,7 @@ python3 test.py -f alter/alter_create_exception.py python3 ./test.py -f insert/flushwhiledrop.py python3 ./test.py -f insert/schemalessInsert.py python3 ./test.py -f alter/alterColMultiTimes.py +python3 ./test.py -f query/queryTbnameUpperLower.py #======================p4-end=============== diff --git a/tests/pytest/query/queryTbnameUpperLower.py b/tests/pytest/query/queryTbnameUpperLower.py new file mode 100644 index 0000000000..bd4e85c5ca --- /dev/null +++ b/tests/pytest/query/queryTbnameUpperLower.py @@ -0,0 +1,78 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.common import tdCom + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def checkStbWhereIn(self): + ''' + where in ---> upper lower mixed + ''' + tdCom.cleanTb() + table_name = tdCom.getLongName(8, "letters_mixed") + table_name_sub = f'{table_name}_sub' + tb_name_lower = table_name_sub.lower() + tb_name_upper = table_name_sub.upper() + + ## create stb and tb + tdSql.execute(f'CREATE TABLE {table_name} (ts timestamp, id int, bi1 binary(20)) tags (si1 binary(20))') + tdSql.execute(f'create table {table_name_sub}1 using {table_name} tags ("{table_name_sub}1")') + tdSql.execute(f'create table {tb_name_lower}2 using {table_name} tags ("{tb_name_lower}2")') + tdSql.execute(f'create table {tb_name_upper}3 using {table_name} tags ("{tb_name_upper}3")') + + ## insert values + tdSql.execute(f'insert into {table_name_sub}1 values (now-1s, 1, "{table_name_sub}1")') + tdSql.execute(f'insert into {tb_name_lower}2 values (now-2s, 2, "{tb_name_lower}21")') + tdSql.execute(f'insert into {tb_name_lower}2 values (now-3s, 3, "{tb_name_lower}22")') + tdSql.execute(f'insert into {tb_name_upper}3 values (now-4s, 4, "{tb_name_upper}31")') + tdSql.execute(f'insert into {tb_name_upper}3 values (now-5s, 5, "{tb_name_upper}32")') + tdSql.execute(f'insert into {tb_name_upper}3 values (now-6s, 6, "{tb_name_upper}33")') + + ## query where tbname in single + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub}1")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub.upper()}1")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub.lower()}1")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_lower}2")') + tdSql.checkRows(2) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_lower.upper()}2")') + tdSql.checkRows(2) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_upper}3")') + tdSql.checkRows(3) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_upper.lower()}3")') + tdSql.checkRows(3) + + ## query where tbname in multi + tdSql.query(f'select * from {table_name} where id=5 and tbname in ("{table_name_sub}1", "{tb_name_lower.upper()}2", "{tb_name_upper.lower()}3")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub}1", "{tb_name_lower.upper()}2", "{tb_name_upper.lower()}3")') + tdSql.checkRows(6) + + def run(self): + tdSql.prepare() + self.checkStbWhereIn() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py new file mode 100644 index 0000000000..1c7d94a8a4 --- /dev/null +++ b/tests/pytest/util/common.py @@ -0,0 +1,53 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +from util.sql import tdSql + +class TDCom: + def init(self, conn, logSql): + tdSql.init(conn.cursor(), logSql) + + def cleanTb(self): + query_sql = "show stables" + res_row_list = tdSql.query(query_sql, True) + stb_list = map(lambda x: x[0], res_row_list) + for stb in stb_list: + tdSql.execute(f'drop table if exists {stb}') + + query_sql = "show tables" + res_row_list = tdSql.query(query_sql, True) + tb_list = map(lambda x: x[0], res_row_list) + for tb in tb_list: + tdSql.execute(f'drop table if exists {tb}') + + def getLongName(self, len, mode = "mixed"): + """ + generate long name + mode could be numbers/letters/letters_mixed/mixed + """ + if mode == "numbers": + chars = ''.join(random.choice(string.digits) for i in range(len)) + elif mode == "letters": + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(len)) + elif mode == "letters_mixed": + chars = ''.join(random.choice(string.ascii_letters.upper() + string.ascii_letters.lower()) for i in range(len)) + else: + chars = ''.join(random.choice(string.ascii_letters.lower() + string.digits) for i in range(len)) + return chars + + def close(self): + self.cursor.close() + +tdCom = TDCom() \ No newline at end of file From 24eda742e42011331d69fc0c183e4d20a7f6246a Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 11 Aug 2021 18:50:07 +0800 Subject: [PATCH 024/239] [TD-5918] add the configuration of max length wild cards --- packaging/cfg/taos.cfg | 3 +++ src/client/src/tscSQLParser.c | 16 ++++++++++------ src/common/inc/tglobal.h | 1 + src/common/src/tglobal.c | 13 +++++++++++++ src/util/inc/tcompare.h | 2 +- src/util/src/tcompare.c | 19 ++++++++++--------- .../functions/showOfflineThresholdIs864000.py | 2 +- 7 files changed, 39 insertions(+), 17 deletions(-) diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index bbe6eae419..3ae4e9941e 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -144,6 +144,9 @@ keepColumnName 1 # max length of an SQL # maxSQLLength 65480 +# max length of WildCards +# maxWildCardsLength 100 + # the maximum number of records allowed for super table time sorting # maxNumOfOrderedRes 100000 diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 6df724881e..490c8be468 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -3218,7 +3218,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { pCmd->command = TSDB_SQL_SHOW; const char* msg1 = "invalid name"; - const char* msg2 = "pattern filter string too long"; + const char* msg2 = "wildcard string should be less than %d characters"; const char* msg3 = "database name too long"; const char* msg4 = "invalid ip address"; const char* msg5 = "database name is empty"; @@ -3262,8 +3262,10 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - if (!tscValidateTableNameLength(pCmd->payloadLen)) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + if (pPattern->n > tsMaxWildCardsLen){ + char tmp[64] = {0}; + sprintf(tmp, msg2, tsMaxWildCardsLen); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), tmp); } } } else if (showType == TSDB_MGMT_TABLE_VNODES) { @@ -4394,15 +4396,17 @@ static int32_t validateNullExpr(tSqlExpr* pExpr, char* msgBuf) { // check for like expression static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) { - const char* msg1 = "wildcard string should be less than 20 characters"; + const char* msg1 = "wildcard string should be less than %d characters"; const char* msg2 = "illegal column name"; tSqlExpr* pLeft = pExpr->pLeft; tSqlExpr* pRight = pExpr->pRight; if (pExpr->tokenId == TK_LIKE) { - if (pRight->value.nLen > TSDB_PATTERN_STRING_MAX_LEN) { - return invalidOperationMsg(msgBuf, msg1); + if (pRight->value.nLen > tsMaxWildCardsLen) { + char tmp[64] = {0}; + sprintf(tmp, msg1, tsMaxWildCardsLen); + return invalidOperationMsg(msgBuf, tmp); } SSchema* pSchema = tscGetTableSchema(pTableMeta); diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 7290db6ec9..25d1c90ec5 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -70,6 +70,7 @@ extern int8_t tsKeepOriginalColumnName; // client extern int32_t tsMaxSQLStringLen; +extern int32_t tsMaxWildCardsLen; extern int8_t tsTscEnableRecordSql; extern int32_t tsMaxNumOfOrderedResults; extern int32_t tsMinSlidingTime; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index a58303e9fc..f9135605bb 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -25,6 +25,7 @@ #include "tutil.h" #include "tlocale.h" #include "ttimezone.h" +#include "tcompare.h" // cluster char tsFirst[TSDB_EP_LEN] = {0}; @@ -75,6 +76,7 @@ int32_t tsCompressMsgSize = -1; // client int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; +int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN; int8_t tsTscEnableRecordSql = 0; // the maximum number of results for projection query on super table that are returned from @@ -984,6 +986,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_BYTE; taosInitConfigOption(cfg); + cfg.option = "maxWildCardsLength"; + cfg.ptr = &tsMaxWildCardsLen; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = 0; + cfg.maxValue = TSDB_MAX_FIELD_LEN; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_BYTE; + taosInitConfigOption(cfg); + cfg.option = "maxNumOfOrderedRes"; cfg.ptr = &tsMaxNumOfOrderedResults; cfg.valType = TAOS_CFG_VTYPE_INT32; @@ -1531,6 +1543,7 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM); #ifdef TD_TSZ // lossy compress cfg.option = "lossyColumns"; diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index 612ce7ede0..fe46f00086 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -25,7 +25,7 @@ extern "C" { #define TSDB_PATTERN_MATCH 0 #define TSDB_PATTERN_NOMATCH 1 #define TSDB_PATTERN_NOWILDCARDMATCH 2 -#define TSDB_PATTERN_STRING_MAX_LEN 20 +#define TSDB_PATTERN_STRING_MAX_LEN 100 #define FLT_COMPAR_TOL_FACTOR 4 #define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON)) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index e953f4c464..ebcee2edff 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -264,18 +264,19 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; - - char pattern[128] = {0}; + + assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN); + char *pattern = calloc(varDataLen(pRight) + 1, sizeof(char)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); - assert(varDataLen(pRight) < 128); size_t sz = varDataLen(pLeft); - char *buf = malloc(sz + 1); - memcpy(buf, varDataVal(pLeft), sz); + char *buf = malloc(sz + 1); + memcpy(buf, varDataVal(pLeft), sz); buf[sz] = 0; int32_t ret = patternMatch(pattern, buf, sz, &pInfo); free(buf); + free(pattern); return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } @@ -297,13 +298,13 @@ static int32_t compareFindItemInSet(const void *pLeft, const void* pRight) { static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; - wchar_t pattern[128] = {0}; - assert(TSDB_PATTERN_STRING_MAX_LEN < 128); + assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE); + wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); - assert(varDataLen(pRight) < 128); - + int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo); + free(pattern); return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } diff --git a/tests/pytest/functions/showOfflineThresholdIs864000.py b/tests/pytest/functions/showOfflineThresholdIs864000.py index a7a1c2bf3f..57d0b1921b 100644 --- a/tests/pytest/functions/showOfflineThresholdIs864000.py +++ b/tests/pytest/functions/showOfflineThresholdIs864000.py @@ -25,7 +25,7 @@ class TDTestCase: def run(self): tdSql.query("show variables") - tdSql.checkData(53, 1, 864000) + tdSql.checkData(54, 1, 864000) def stop(self): tdSql.close() From 70f76974067260a1e6a95760bd121829ad807269 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Wed, 11 Aug 2021 19:17:08 +0800 Subject: [PATCH 025/239] [TD-5616]like wildcard max_length test--merge to master add getVariable() to util/sql.py add 4 testcases to query/queryWildcardLength.py --- tests/pytest/fulltest.sh | 1 + tests/pytest/query/queryWildcardLength.py | 207 ++++++++++++++++++++++ tests/pytest/util/sql.py | 16 ++ 3 files changed, 224 insertions(+) create mode 100644 tests/pytest/query/queryWildcardLength.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 5df37bb182..8b015727ea 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -382,6 +382,7 @@ python3 test.py -f alter/alter_create_exception.py python3 ./test.py -f insert/flushwhiledrop.py python3 ./test.py -f insert/schemalessInsert.py python3 ./test.py -f alter/alterColMultiTimes.py +python3 ./test.py -f query/queryWildcardLength.py #======================p4-end=============== diff --git a/tests/pytest/query/queryWildcardLength.py b/tests/pytest/query/queryWildcardLength.py new file mode 100644 index 0000000000..1fc46fe7d6 --- /dev/null +++ b/tests/pytest/query/queryWildcardLength.py @@ -0,0 +1,207 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +from copy import deepcopy +import string +import random +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def cleanTb(self): + query_sql = "show stables" + res_row_list = tdSql.query(query_sql, True) + stb_list = map(lambda x: x[0], res_row_list) + for stb in stb_list: + tdSql.execute(f'drop table if exists {stb}') + + query_sql = "show tables" + res_row_list = tdSql.query(query_sql, True) + tb_list = map(lambda x: x[0], res_row_list) + for tb in tb_list: + tdSql.execute(f'drop table if exists {tb}') + + def getLongWildcardStr(self, len=None): + """ + generate long wildcard str + """ + maxWildCardsLength = int(tdSql.getVariable('maxWildCardsLength')[0]) + if len: + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(len)) + else: + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(maxWildCardsLength+1)) + return chars + + def genTableName(self): + ''' + generate table name + hp_name--->'%str' + lp_name--->'str%' + ul_name--->'st_r' + ''' + table_name = self.getLongWildcardStr() + table_name_list = list(table_name) + table_name_list.pop(-1) + + if len(table_name_list) > 1: + lp_name = deepcopy(table_name_list) + lp_name[-1] = '%' + lp_name = ''.join(lp_name) + + ul_name = list(lp_name) + ul_name[int(len(ul_name)/2)] = '_' + ul_name = ''.join(ul_name) + + table_name_list = list(table_name) + hp_name = deepcopy(table_name_list) + hp_name.pop(1) + hp_name[0] = '%' + hp_name = ''.join(hp_name) + else: + hp_name = '%' + lp_name = '%' + ul_name = '_' + return table_name, hp_name, lp_name, ul_name + + def checkRegularTableWildcardLength(self): + ''' + check regular table wildcard length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + tdSql.execute(f"CREATE TABLE {table_name} (ts timestamp, a1 int)") + sql_list = [f'show tables like "{hp_name}"', f'show tables like "{lp_name}"', f'show tables like "{ul_name}"'] + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + + exceed_sql_list = [f'show tables like "%{hp_name}"', f'show tables like "{lp_name}%"', f'show tables like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def checkSuperTableWildcardLength(self): + ''' + check super table wildcard length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + tdSql.execute(f"CREATE TABLE {table_name} (ts timestamp, c1 int) tags (t1 int)") + sql_list = [f'show stables like "{hp_name}"', f'show stables like "{lp_name}"', f'show stables like "{ul_name}"'] + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + + exceed_sql_list = [f'show stables like "%{hp_name}"', f'show stables like "{lp_name}%"', f'show stables like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def checkRegularWildcardSelectLength(self): + ''' + check regular table wildcard select length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + tdSql.execute(f"CREATE TABLE {table_name} (ts timestamp, bi1 binary(200), nc1 nchar(200))") + tdSql.execute(f'insert into {table_name} values (now, "{table_name}", "{table_name}")') + sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}"', + f'select * from {table_name} where bi1 like "{ul_name}"', + f'select * from {table_name} where nc1 like "{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}"', + f'select * from {table_name} where nc1 like "{ul_name}"'] + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + + exceed_sql_list = [f'select * from {table_name} where bi1 like "%{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}%"', + f'select * from {table_name} where bi1 like "{ul_name}%"', + f'select * from {table_name} where nc1 like "%{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}%"', + f'select * from {table_name} where nc1 like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def checkStbWildcardSelectLength(self): + ''' + check stb wildcard select length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + + tdSql.execute(f'CREATE TABLE {table_name} (ts timestamp, bi1 binary(200), nc1 nchar(200)) tags (si1 binary(200), sc1 nchar(200))') + tdSql.execute(f'create table {table_name}_sub1 using {table_name} tags ("{table_name}", "{table_name}")') + tdSql.execute(f'insert into {table_name}_sub1 values (now, "{table_name}", "{table_name}");') + + sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}"', + f'select * from {table_name} where bi1 like "{ul_name}"', + f'select * from {table_name} where nc1 like "{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}"', + f'select * from {table_name} where nc1 like "{ul_name}"', + f'select * from {table_name} where si1 like "{hp_name}"', + f'select * from {table_name} where si1 like "{lp_name}"', + f'select * from {table_name} where si1 like "{ul_name}"', + f'select * from {table_name} where sc1 like "{hp_name}"', + f'select * from {table_name} where sc1 like "{lp_name}"', + f'select * from {table_name} where sc1 like "{ul_name}"'] + + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + exceed_sql_list = [f'select * from {table_name} where bi1 like "%{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}%"', + f'select * from {table_name} where bi1 like "{ul_name}%"', + f'select * from {table_name} where nc1 like "%{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}%"', + f'select * from {table_name} where nc1 like "{ul_name}%"', + f'select * from {table_name} where si1 like "%{hp_name}"', + f'select * from {table_name} where si1 like "{lp_name}%"', + f'select * from {table_name} where si1 like "{ul_name}%"', + f'select * from {table_name} where sc1 like "%{hp_name}"', + f'select * from {table_name} where sc1 like "{lp_name}%"', + f'select * from {table_name} where sc1 like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def run(self): + tdSql.prepare() + self.checkRegularTableWildcardLength() + self.checkSuperTableWildcardLength() + self.checkRegularWildcardSelectLength() + self.checkStbWildcardSelectLength() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index b42af27d06..dfe1e4a582 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -81,6 +81,22 @@ class TDSql: return self.queryResult return self.queryRows + def getVariable(self, search_attr): + ''' + get variable of search_attr access "show variables" + ''' + try: + sql = 'show variables' + param_list = self.query(sql, row_tag=True) + for param in param_list: + if param[0] == search_attr: + return param[1], param_list + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + def getColNameList(self, sql, col_tag=None): self.sql = sql try: From ad01fcb88497bcbb42549d3471e8640bf871ef88 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Wed, 11 Aug 2021 08:54:43 +0800 Subject: [PATCH 026/239] [TD-5930]:_block_dist initialize client merge buffer with max steps --- src/query/src/qAggMain.c | 11 +++++++---- src/query/src/qExecutor.c | 3 +++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 9f081cfb2f..6eadedcaf3 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -4062,12 +4062,15 @@ static void mergeTableBlockDist(SResultRowCellInfo* pResInfo, const STableBlockD pDist->maxRows = pSrc->maxRows; pDist->minRows = pSrc->minRows; - int32_t numSteps = tsMaxRowsInFileBlock/TSDB_BLOCK_DIST_STEP_ROWS; - pDist->dataBlockInfos = taosArrayInit(numSteps, sizeof(SFileBlockInfo)); - taosArraySetSize(pDist->dataBlockInfos, numSteps); + int32_t maxSteps = TSDB_MAX_MAX_ROW_FBLOCK/TSDB_BLOCK_DIST_STEP_ROWS; + if (TSDB_MAX_MAX_ROW_FBLOCK % TSDB_BLOCK_DIST_STEP_ROWS != 0) { + ++maxSteps; + } + pDist->dataBlockInfos = taosArrayInit(maxSteps, sizeof(SFileBlockInfo)); + taosArraySetSize(pDist->dataBlockInfos, maxSteps); } - size_t steps = taosArrayGetSize(pDist->dataBlockInfos); + size_t steps = taosArrayGetSize(pSrc->dataBlockInfos); for (int32_t i = 0; i < steps; ++i) { int32_t srcNumBlocks = ((SFileBlockInfo*)taosArrayGet(pSrc->dataBlockInfos, i))->numBlocksOfStep; SFileBlockInfo* blockInfo = (SFileBlockInfo*)taosArrayGet(pDist->dataBlockInfos, i); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 52e5dbb7f3..7b6dd86f22 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -5024,6 +5024,9 @@ static SSDataBlock* doBlockInfoScan(void* param, bool* newgroup) { tableBlockDist.numOfTables = (int32_t)pOperator->pRuntimeEnv->tableqinfoGroupInfo.numOfTables; int32_t numRowSteps = tsMaxRowsInFileBlock / TSDB_BLOCK_DIST_STEP_ROWS; + if (tsMaxRowsInFileBlock % TSDB_BLOCK_DIST_STEP_ROWS != 0) { + ++numRowSteps; + } tableBlockDist.dataBlockInfos = taosArrayInit(numRowSteps, sizeof(SFileBlockInfo)); taosArraySetSize(tableBlockDist.dataBlockInfos, numRowSteps); tableBlockDist.maxRows = INT_MIN; From 6cc49faf82fa5d1d546128756561ab3f9e377a22 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 10 Aug 2021 14:37:19 +0800 Subject: [PATCH 027/239] [TD-5931]:invalidate time range when no tables in table groups of tsdb query --- src/tsdb/src/tsdbRead.c | 12 ++++++++++++ tests/script/general/parser/interp.sim | 13 ++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index e1d40aa7d0..9cc9b7224c 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -691,6 +691,18 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pRef) { STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); + if (pNew->numOfTables == 0) { + tsdbDebug("update query time range to invalidate time window"); + + assert(taosArrayGetSize(pNew->pGroupList) == 0); + bool asc = ASCENDING_TRAVERSE(pCond->order); + if (asc) { + pCond->twindow.ekey = pCond->twindow.skey - 1; + } else { + pCond->twindow.skey = pCond->twindow.ekey - 1; + } + } + STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qId, pRef); pQueryHandle->loadExternalRow = true; pQueryHandle->currentLoadExternalRows = true; diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim index 55c5701985..7626b15647 100644 --- a/tests/script/general/parser/interp.sim +++ b/tests/script/general/parser/interp.sim @@ -68,4 +68,15 @@ print ================== server restart completed run general/parser/interp_test.sim -#system sh/exec.sh -n dnode1 -s stop -x SIGINT +print ================= TD-5931 +sql create stable st5931(ts timestamp, f int) tags(t int) +sql create table ct5931 using st5931 tags(1) +sql create table nt5931(ts timestamp, f int) +sql select interp(*) from nt5931 where ts=now +sql select interp(*) from st5931 where ts=now +sql select interp(*) from ct5931 where ts=now +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 5cb50f7922bfe7c6c38fa30e68e837763040060c Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Thu, 12 Aug 2021 10:25:06 +0800 Subject: [PATCH 028/239] fixed huffman tree memory leak --- deps/TSZ | 2 +- src/kit/taospack/taospack.c | 52 ++++++++++++++++++++++++++++++++++--- 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/deps/TSZ b/deps/TSZ index 0ca5b15a8e..ceda5bf9fc 160000 --- a/deps/TSZ +++ b/deps/TSZ @@ -1 +1 @@ -Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c +Subproject commit ceda5bf9fcd7836509ac97dcc0056b3f1dd48cc5 diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c index 33d779dfcf..ad188c3010 100644 --- a/src/kit/taospack/taospack.c +++ b/src/kit/taospack/taospack.c @@ -18,6 +18,7 @@ #include #include + #if defined(WINDOWS) int main(int argc, char *argv[]) { printf("welcome to use taospack tools v1.3 for windows.\n"); @@ -601,7 +602,6 @@ void test_threadsafe_double(int thread_count){ } - void unitTestFloat() { float ft1 [] = {1.11, 2.22, 3.333}; @@ -662,7 +662,50 @@ void unitTestFloat() { free(ft2); free(buff); free(output); - +} + +void leakFloat() { + + int cnt = sizeof(g_ft1)/sizeof(float); + float* floats = g_ft1; + int algorithm = 2; + + // compress + const char* input = (const char*)floats; + int input_len = cnt * sizeof(float); + int output_len = input_len + 1024; + char* output = (char*) malloc(output_len); + char* buff = (char*) malloc(input_len); + int buff_len = input_len; + + int ret_len = 0; + ret_len = tsCompressFloatLossy(input, input_len, cnt, output, output_len, algorithm, buff, buff_len); + + if(ret_len == 0) { + printf(" compress float error.\n"); + free(buff); + free(output); + return ; + } + + float* ft2 = (float*)malloc(input_len); + ret_len = tsDecompressFloatLossy(output, ret_len, cnt, (char*)ft2, input_len, algorithm, buff, buff_len); + if(ret_len == 0) { + printf(" decompress float error.\n"); + } + + free(ft2); + free(buff); + free(output); +} + + +void leakTest(){ + for(int i=0; i< 90000000000000; i++){ + if(i%10000==0) + printf(" ---------- %d ---------------- \n", i); + leakFloat(); + } } #define DB_CNT 500 @@ -689,7 +732,7 @@ extern char Compressor []; // ----------------- main ---------------------- // int main(int argc, char *argv[]) { - printf("welcome to use taospack tools v1.3\n"); + printf("welcome to use taospack tools v1.5\n"); //printf(" sizeof(int)=%d\n", (int)sizeof(int)); //printf(" sizeof(long)=%d\n", (int)sizeof(long)); @@ -753,6 +796,9 @@ int main(int argc, char *argv[]) { if(strcmp(argv[1], "-mem") == 0) { memTest(); } + else if(strcmp(argv[1], "-leak") == 0) { + leakTest(); + } } else{ unitTestFloat(); From 7573622bd2e2fb6a507b64d3d1926338caad0fd6 Mon Sep 17 00:00:00 2001 From: xywang Date: Thu, 12 Aug 2021 11:38:08 +0800 Subject: [PATCH 029/239] [TD-6012]: fixed no sql recordings via http while httpEnableRecordSql was on --- src/dnode/src/dnodeMain.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index eac04fe7bb..5291fb73a0 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -166,7 +166,6 @@ int32_t dnodeInitSystem() { taosInitGlobalCfg(); taosReadGlobalLogCfg(); taosSetCoreDump(); - taosInitNotes(); dnodeInitTmr(); if (dnodeCreateDir(tsLogDir) < 0) { @@ -188,6 +187,8 @@ int32_t dnodeInitSystem() { dInfo("start to initialize TDengine"); + taosInitNotes(); + if (dnodeInitComponents() != 0) { return -1; } From 5b9e1cff99ee931bfc65d61c6325f15d004d2004 Mon Sep 17 00:00:00 2001 From: xywang Date: Thu, 12 Aug 2021 11:44:14 +0800 Subject: [PATCH 030/239] [TD-6012]: fixed no sql recordings via http while httpEnableRecordSql was on --- src/dnode/src/dnodeMain.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 22ce6c995a..ab2fcbea6a 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -129,7 +129,6 @@ int32_t dnodeInitSystem() { taosInitGlobalCfg(); taosReadGlobalLogCfg(); taosSetCoreDump(); - taosInitNotes(); dnodeInitTmr(); if (dnodeCreateDir(tsLogDir) < 0) { @@ -151,6 +150,8 @@ int32_t dnodeInitSystem() { dInfo("start to initialize TDengine"); + taosInitNotes(); + if (dnodeInitComponents() != 0) { return -1; } From 735e6c1ea1720e0cd0f8156be797bfd48e050775 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 12 Aug 2021 11:53:53 +0800 Subject: [PATCH 031/239] fix: fix odr violation --- src/common/src/tglobal.c | 3 +++ src/tsdb/inc/tsdbFS.h | 5 ++++- src/tsdb/src/tsdbFS.c | 3 +-- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index a58303e9fc..b9a4f4ffea 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -26,6 +26,9 @@ #include "tlocale.h" #include "ttimezone.h" +// TSDB +bool tsdbForceKeepFile = false; + // cluster char tsFirst[TSDB_EP_LEN] = {0}; char tsSecond[TSDB_EP_LEN] = {0}; diff --git a/src/tsdb/inc/tsdbFS.h b/src/tsdb/inc/tsdbFS.h index d63aeb14ac..55db8d56ff 100644 --- a/src/tsdb/inc/tsdbFS.h +++ b/src/tsdb/inc/tsdbFS.h @@ -18,6 +18,9 @@ #define TSDB_FS_VERSION 0 +// ================== TSDB global config +extern bool tsdbForceKeepFile; + // ================== CURRENT file header info typedef struct { uint32_t version; // Current file system version (relating to code) @@ -109,4 +112,4 @@ static FORCE_INLINE int tsdbUnLockFS(STsdbFS* pFs) { return 0; } -#endif /* _TD_TSDB_FS_H_ */ \ No newline at end of file +#endif /* _TD_TSDB_FS_H_ */ diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index 68450301d8..70a5262138 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -38,7 +38,6 @@ static int tsdbProcessExpiredFS(STsdbRepo *pRepo); static int tsdbCreateMeta(STsdbRepo *pRepo); // For backward compatibility -bool tsdbForceKeepFile = false; // ================== CURRENT file header info static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) { int tlen = 0; @@ -1354,4 +1353,4 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired) { tsdbCloseDFileSet(&fset); } -} \ No newline at end of file +} From 7a0d2c10c530aa215169e9f737026739840a2b3a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 14:05:38 +0800 Subject: [PATCH 032/239] [td-225]merge master. --- src/connector/go | 2 +- src/connector/grafanaplugin | 2 +- src/connector/hivemq-tdengine-extension | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/connector/go b/src/connector/go index b8f76da4a7..050667e5b4 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit b8f76da4a708d158ec3cc4b844571dc4414e36b4 +Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin index a44ec1ca49..32e2c97a4c 160000 --- a/src/connector/grafanaplugin +++ b/src/connector/grafanaplugin @@ -1 +1 @@ -Subproject commit a44ec1ca493ad01b2bf825b6418f69e11f548206 +Subproject commit 32e2c97a4cf7bedaa99f5d6dd8cb036e7f4470df diff --git a/src/connector/hivemq-tdengine-extension b/src/connector/hivemq-tdengine-extension index ce52010141..b62a26ecc1 160000 --- a/src/connector/hivemq-tdengine-extension +++ b/src/connector/hivemq-tdengine-extension @@ -1 +1 @@ -Subproject commit ce5201014136503d34fecbd56494b67b4961056c +Subproject commit b62a26ecc164a310104df57691691b237e091c89 From 8ecda5ba5404a21b755c0d6ecada820cbbe290ac Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 10 Aug 2021 14:37:19 +0800 Subject: [PATCH 033/239] [TD-5931]:invalidate time range when no tables in table groups of tsdb query --- src/tsdb/src/tsdbRead.c | 12 ++++++++++++ tests/script/general/parser/interp.sim | 17 ++++++++++++++++- 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index e1d40aa7d0..9cc9b7224c 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -691,6 +691,18 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pRef) { STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); + if (pNew->numOfTables == 0) { + tsdbDebug("update query time range to invalidate time window"); + + assert(taosArrayGetSize(pNew->pGroupList) == 0); + bool asc = ASCENDING_TRAVERSE(pCond->order); + if (asc) { + pCond->twindow.ekey = pCond->twindow.skey - 1; + } else { + pCond->twindow.skey = pCond->twindow.ekey - 1; + } + } + STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qId, pRef); pQueryHandle->loadExternalRow = true; pQueryHandle->currentLoadExternalRows = true; diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim index 55c5701985..4654913d1e 100644 --- a/tests/script/general/parser/interp.sim +++ b/tests/script/general/parser/interp.sim @@ -68,4 +68,19 @@ print ================== server restart completed run general/parser/interp_test.sim -#system sh/exec.sh -n dnode1 -s stop -x SIGINT +print ================= TD-5931 +sql create stable st5931(ts timestamp, f int) tags(t int) +sql create table ct5931 using st5931 tags(1) +sql create table nt5931(ts timestamp, f int) + +sql select interp(*) from nt5931 where ts=now + +sql select interp(*) from st5931 where ts=now + +sql select interp(*) from ct5931 where ts=now + +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From b874090eac8637cbfd8c03e4fbc15d590f830d2d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 14:22:49 +0800 Subject: [PATCH 034/239] [td-255] cherry pick order support. --- src/client/src/tscSQLParser.c | 85 ++++++++++++++++++++--------------- src/client/src/tscServer.c | 6 +-- src/query/inc/qExecutor.h | 2 +- src/query/src/qPlan.c | 15 ++++--- 4 files changed, 62 insertions(+), 46 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 6df724881e..46292ceb91 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5482,14 +5482,19 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { pQueryInfo->order.order = TSDB_ORDER_ASC; if (isTopBottomQuery(pQueryInfo)) { pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; - } else { // in case of select tbname from super_table, the defualt order column can not be the primary ts column - pQueryInfo->order.orderColId = INT32_MIN; + } else { // in case of select tbname from super_table, the default order column can not be the primary ts column + pQueryInfo->order.orderColId = INT32_MIN; // todo define a macro } /* for super table query, set default ascending order for group output */ if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC; } + + if (pQueryInfo->distinct) { + pQueryInfo->order.order = TSDB_ORDER_ASC; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + } } int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) { @@ -5501,17 +5506,12 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - - if (pQueryInfo->distinct == true) { - pQueryInfo->order.order = TSDB_ORDER_ASC; - pQueryInfo->order.orderColId = 0; - return TSDB_CODE_SUCCESS; - } - if (pSqlNode->pSortOrder == NULL) { + if (pQueryInfo->distinct || pSqlNode->pSortOrder == NULL) { return TSDB_CODE_SUCCESS; } - SArray* pSortorder = pSqlNode->pSortOrder; + char* pMsgBuf = tscGetErrorMsgPayload(pCmd); + SArray* pSortOrder = pSqlNode->pSortOrder; /* * for table query, there is only one or none order option is allowed, which is the @@ -5519,19 +5519,19 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq * * for super table query, the order option must be less than 3. */ - size_t size = taosArrayGetSize(pSortorder); - if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { + size_t size = taosArrayGetSize(pSortOrder); + if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) { if (size > 1) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0); + return invalidOperationMsg(pMsgBuf, msg0); } } else { if (size > 2) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + return invalidOperationMsg(pMsgBuf, msg3); } } // handle the first part of order by - tVariant* pVar = taosArrayGet(pSortorder, 0); + tVariant* pVar = taosArrayGet(pSortOrder, 0); // e.g., order by 1 asc, return directly with out further check. if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) { @@ -5543,7 +5543,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + return invalidOperationMsg(pMsgBuf, msg1); } bool orderByTags = false; @@ -5555,7 +5555,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq // it is a tag column if (pQueryInfo->groupbyExpr.columnInfo == NULL) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); if (relTagIndex == pColIndex->colIndex) { @@ -5576,13 +5576,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq orderByGroupbyCol = true; } } + if (!(orderByTags || orderByTS || orderByGroupbyCol) && !isTopBottomQuery(pQueryInfo)) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + return invalidOperationMsg(pMsgBuf, msg3); } else { // order by top/bottom result value column is not supported in case of interval query. assert(!(orderByTags && orderByTS && orderByGroupbyCol)); } - size_t s = taosArrayGetSize(pSortorder); + size_t s = taosArrayGetSize(pSortOrder); if (s == 1) { if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); @@ -5601,7 +5602,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pExpr = tscExprGet(pQueryInfo, 1); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); @@ -5619,9 +5620,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq addPrimaryTsColIntoResult(pQueryInfo, pCmd); } } - } - - if (s == 2) { + } else { tVariantListItem *pItem = taosArrayGet(pSqlNode->pSortOrder, 0); if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); @@ -5638,22 +5637,23 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq tVariant* pVar2 = &pItem->pVar; SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz}; if (getColumnIndexByName(&cname, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + return invalidOperationMsg(pMsgBuf, msg1); } if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } else { - tVariantListItem* p1 = taosArrayGet(pSortorder, 1); + tVariantListItem* p1 = taosArrayGet(pSortOrder, 1); pQueryInfo->order.order = p1->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } - } else { // meter query - if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + } else if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) { // check order by clause for normal table & temp table + if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return invalidOperationMsg(pMsgBuf, msg1); } + if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) { bool validOrder = false; SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; @@ -5661,13 +5661,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq SColIndex* pColIndex = taosArrayGet(columnInfo, 0); validOrder = (pColIndex->colIndex == index.columnIndex); } + if (!validOrder) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } + tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId; pQueryInfo->groupbyExpr.orderType = p1->sortOrder; - } if (isTopBottomQuery(pQueryInfo)) { @@ -5683,13 +5684,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pExpr = tscExprGet(pQueryInfo, 1); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } + validOrder = true; } if (!validOrder) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); @@ -5699,6 +5701,18 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq return TSDB_CODE_SUCCESS; } + tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); + pQueryInfo->order.order = pItem->sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; + } else { + // handle the temp table order by clause. You can order by any single column in case of the temp table, created by + // inner subquery. + assert(UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo) && taosArrayGetSize(pSqlNode->pSortOrder) == 1); + + if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return invalidOperationMsg(pMsgBuf, msg1); + } + tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; @@ -8458,8 +8472,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf const char* msg8 = "condition missing for join query"; const char* msg9 = "not support 3 level select"; - int32_t code = TSDB_CODE_SUCCESS; - + int32_t code = TSDB_CODE_SUCCESS; SSqlCmd* pCmd = &pSql->cmd; STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -8754,8 +8767,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo); pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo); pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo); - //pQueryInfo->globalMerge = tscIsTwoStageSTableQuery(pQueryInfo, 0); - pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo); pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 3e8dfac1da..e809fe3137 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -880,16 +880,16 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; SQueryAttr query = {{0}}; tscCreateQueryFromQueryInfo(pQueryInfo, &query, pSql); + query.vgId = pTableMeta->vgId; SArray* tableScanOperator = createTableScanPlan(&query); SArray* queryOperator = createExecOperatorPlan(&query); - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; - SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload; tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version)); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 5fe68e456f..29cfa5edf1 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -417,7 +417,6 @@ typedef struct STableScanInfo { int32_t *rowCellInfoOffset; SExprInfo *pExpr; SSDataBlock block; - bool loadExternalRows; // load external rows (prev & next rows) int32_t numOfOutput; int64_t elapsedTime; @@ -570,6 +569,7 @@ SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter); SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUpstream, SSchema* pSchema, int32_t numOfOutput); +SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SSDataBlock* doGlobalAggregate(void* param, bool* newgroup); SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup); diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index b8a5ee7699..f72f70c911 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -557,10 +557,9 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { int32_t op = 0; if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query - if (onlyQueryTags(pQueryAttr)) { - op = OP_TagScan; - taosArrayPush(plan, &op); - } + op = OP_TagScan; + taosArrayPush(plan, &op); + if (pQueryAttr->distinct) { op = OP_Distinct; taosArrayPush(plan, &op); @@ -651,8 +650,14 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { taosArrayPush(plan, &op); } } + + // outer query order by support + int32_t orderColId = pQueryAttr->order.orderColId; + if (pQueryAttr->vgId == 0 && orderColId != PRIMARYKEY_TIMESTAMP_COL_INDEX && orderColId != INT32_MIN) { + op = OP_Order; + taosArrayPush(plan, &op); + } } - if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) { op = OP_Limit; From 6fafa5b1bef6eee56417995f85c2ce7a1d3ab17a Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Thu, 12 Aug 2021 14:59:57 +0800 Subject: [PATCH 035/239] [TD-6006] query where by column with no quotes --- tests/pytest/query/queryError.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/pytest/query/queryError.py b/tests/pytest/query/queryError.py index ac78c0518f..e5c468600b 100644 --- a/tests/pytest/query/queryError.py +++ b/tests/pytest/query/queryError.py @@ -65,6 +65,10 @@ class TDTestCase: # TD-2208 tdSql.error("select diff(tagtype),top(tagtype,1) from dev_001") + # TD-6006 + tdSql.error("select * from dev_001 where 'name' is not null") + tdSql.error("select * from dev_001 where \"name\" = 'first'") + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From f415a994fe7d6d1c0cc94ae3441b8a6709de888c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 15:18:25 +0800 Subject: [PATCH 036/239] [td-255] --- src/client/src/tscUtil.c | 1 - src/query/inc/qExecutor.h | 8 ++ src/query/inc/qExtbuffer.h | 2 + src/query/src/qExtbuffer.c | 54 +++++++ src/query/src/qPercentile.c | 2 +- src/util/inc/tcompare.h | 2 +- src/util/src/tcompare.c | 244 +++++++++++++++++++++----------- src/util/src/tskiplist.c | 2 +- src/util/tests/skiplistTest.cpp | 2 +- 9 files changed, 232 insertions(+), 85 deletions(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3c35795b0d..b393459c52 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1206,7 +1206,6 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols, &pFilterInfo); SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols); - pOutput->precision = pSqlObjList[0]->res.precision; SSchema* schema = NULL; diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 29cfa5edf1..6dca502838 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -333,6 +333,7 @@ enum OPERATOR_TYPE_E { OP_StateWindow = 22, OP_AllTimeWindow = 23, OP_AllMultiTableTimeInterval = 24, + OP_Order = 25, }; typedef struct SOperatorInfo { @@ -540,6 +541,13 @@ typedef struct SMultiwayMergeInfo { SArray *udfInfo; } SMultiwayMergeInfo; +// todo support the disk-based sort +typedef struct SOrderOperatorInfo { + int32_t colIndex; + int32_t order; + SSDataBlock *pDataBlock; +} SOrderOperatorInfo; + void appendUpstream(SOperatorInfo* p, SOperatorInfo* pUpstream); SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime, int32_t reverseTime); diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h index cf0e8ce31a..c06fae298c 100644 --- a/src/query/inc/qExtbuffer.h +++ b/src/query/inc/qExtbuffer.h @@ -220,6 +220,8 @@ tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrder void tOrderDescDestroy(tOrderDescriptor *pDesc); +void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn); + void tColModelAppend(SColumnModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows, int32_t numOfRowsToWrite, int32_t srcCapacity); diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index cc47cc824b..91004fe707 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -1102,3 +1102,57 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) { destroyColumnModel(pDesc->pColumnModel); tfree(pDesc); } + +void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) { + assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols); + + int32_t bytes = pSchema[index].bytes; + int32_t size = bytes + sizeof(int32_t); + + char* buf = calloc(1, size * numOfRows); + + for(int32_t i = 0; i < numOfRows; ++i) { + char* dest = buf + size * i; + memcpy(dest, pCols[index] + bytes * i, bytes); + *(int32_t*)(dest+bytes) = i; + } + + qsort(buf, numOfRows, size, compareFn); + + int32_t prevLength = 0; + char* p = NULL; + + for(int32_t i = 0; i < numOfCols; ++i) { + int32_t bytes1 = pSchema[i].bytes; + + if (i == index) { + for(int32_t j = 0; j < numOfRows; ++j){ + char* src = buf + (j * size); + char* dest = pCols[i] + (j * bytes1); + memcpy(dest, src, bytes1); + } + } else { + // make sure memory buffer is enough + if (prevLength < bytes1) { + char *tmp = realloc(p, bytes1 * numOfRows); + assert(tmp); + + p = tmp; + prevLength = bytes1; + } + + memcpy(p, pCols[i], bytes1 * numOfRows); + + for(int32_t j = 0; j < numOfRows; ++j){ + char* dest = pCols[i] + bytes1 * j; + + int32_t newPos = *(int32_t*)(buf + (j * size) + bytes); + char* src = p + (newPos * bytes1); + memcpy(dest, src, bytes1); + } + } + } + + tfree(buf); + tfree(p); +} \ No newline at end of file diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c index e3326cc26b..e9022db503 100644 --- a/src/query/src/qPercentile.c +++ b/src/query/src/qPercentile.c @@ -237,7 +237,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, } pBucket->elemPerPage = (pBucket->bufPageSize - sizeof(tFilePage))/pBucket->bytes; - pBucket->comparFn = getKeyComparFunc(pBucket->type); + pBucket->comparFn = getKeyComparFunc(pBucket->type, TSDB_ORDER_ASC); pBucket->hashFunc = getHashFunc(pBucket->type); if (pBucket->hashFunc == NULL) { diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index 612ce7ede0..4861779acd 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -47,7 +47,7 @@ int WCSPatternMatch(const wchar_t *pattern, const wchar_t *str, size_t size, con int32_t doCompare(const char* a, const char* b, int32_t type, size_t size); -__compar_fn_t getKeyComparFunc(int32_t keyType); +__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order); __compar_fn_t getComparFunc(int32_t type, int32_t optr); diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index e953f4c464..6e135878c1 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -16,28 +16,22 @@ #include "os.h" #include "ttype.h" #include "tcompare.h" -#include "tarray.h" #include "hash.h" -int32_t compareInt32Val(const void *pLeft, const void *pRight) { - int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight); - if (left > right) return 1; - if (left < right) return -1; - return 0; +int32_t setCompareBytes1(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 1) ? 1 : 0; } -int32_t compareInt64Val(const void *pLeft, const void *pRight) { - int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight); - if (left > right) return 1; - if (left < right) return -1; - return 0; +int32_t setCompareBytes2(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 2) ? 1 : 0; } -int32_t compareInt16Val(const void *pLeft, const void *pRight) { - int16_t left = GET_INT16_VAL(pLeft), right = GET_INT16_VAL(pRight); - if (left > right) return 1; - if (left < right) return -1; - return 0; +int32_t setCompareBytes4(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 4) ? 1 : 0; +} + +int32_t setCompareBytes8(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 8) ? 1 : 0; } int32_t compareInt8Val(const void *pLeft, const void *pRight) { @@ -47,27 +41,76 @@ int32_t compareInt8Val(const void *pLeft, const void *pRight) { return 0; } -int32_t compareUint32Val(const void *pLeft, const void *pRight) { - int32_t left = GET_UINT32_VAL(pLeft), right = GET_UINT32_VAL(pRight); +int32_t compareInt8ValDesc(const void *pLeft, const void *pRight) { + return compareInt8Val(pRight, pLeft); +} + +int32_t compareInt16Val(const void *pLeft, const void *pRight) { + int16_t left = GET_INT16_VAL(pLeft), right = GET_INT16_VAL(pRight); if (left > right) return 1; if (left < right) return -1; return 0; } +int32_t compareInt16ValDesc(const void* pLeft, const void* pRight) { + return compareInt16Val(pRight, pLeft); +} + +int32_t compareInt32Val(const void *pLeft, const void *pRight) { + int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt32ValDesc(const void* pLeft, const void* pRight) { + return compareInt32Val(pRight, pLeft); +} + +int32_t compareInt64Val(const void *pLeft, const void *pRight) { + int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt64ValDesc(const void* pLeft, const void* pRight) { + return compareInt64Val(pRight, pLeft); +} + +int32_t compareUint32Val(const void *pLeft, const void *pRight) { + uint32_t left = GET_UINT32_VAL(pLeft), right = GET_UINT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint32ValDesc(const void* pLeft, const void* pRight) { + return compareUint32Val(pRight, pLeft); +} + int32_t compareUint64Val(const void *pLeft, const void *pRight) { - int64_t left = GET_UINT64_VAL(pLeft), right = GET_UINT64_VAL(pRight); + uint64_t left = GET_UINT64_VAL(pLeft), right = GET_UINT64_VAL(pRight); if (left > right) return 1; if (left < right) return -1; return 0; } +int32_t compareUint64ValDesc(const void* pLeft, const void* pRight) { + return compareUint64Val(pRight, pLeft); +} + int32_t compareUint16Val(const void *pLeft, const void *pRight) { - int16_t left = GET_UINT16_VAL(pLeft), right = GET_UINT16_VAL(pRight); + uint16_t left = GET_UINT16_VAL(pLeft), right = GET_UINT16_VAL(pRight); if (left > right) return 1; if (left < right) return -1; return 0; } +int32_t compareUint16ValDesc(const void* pLeft, const void* pRight) { + return compareUint16Val(pRight, pLeft); +} + int32_t compareUint8Val(const void* pLeft, const void* pRight) { uint8_t left = GET_UINT8_VAL(pLeft), right = GET_UINT8_VAL(pRight); if (left > right) return 1; @@ -75,6 +118,10 @@ int32_t compareUint8Val(const void* pLeft, const void* pRight) { return 0; } +int32_t compareUint8ValDesc(const void* pLeft, const void* pRight) { + return compareUint8Val(pRight, pLeft); +} + int32_t compareFloatVal(const void *pLeft, const void *pRight) { float p1 = GET_FLOAT_VAL(pLeft); float p2 = GET_FLOAT_VAL(pRight); @@ -92,8 +139,12 @@ int32_t compareFloatVal(const void *pLeft, const void *pRight) { } if (FLT_EQUAL(p1, p2)) { return 0; - } - return FLT_GREATER(p1, p2) ? 1: -1; + } + return FLT_GREATER(p1, p2) ? 1: -1; +} + +int32_t compareFloatValDesc(const void* pLeft, const void* pRight) { + return compareFloatVal(pRight, pLeft); } int32_t compareDoubleVal(const void *pLeft, const void *pRight) { @@ -113,14 +164,18 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight) { } if (FLT_EQUAL(p1, p2)) { return 0; - } - return FLT_GREATER(p1, p2) ? 1: -1; + } + return FLT_GREATER(p1, p2) ? 1: -1; +} + +int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) { + return compareDoubleVal(pRight, pLeft); } int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) { int32_t len1 = varDataLen(pLeft); int32_t len2 = varDataLen(pRight); - + if (len1 != len2) { return len1 > len2? 1:-1; } else { @@ -133,10 +188,14 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) { } } +int32_t compareLenPrefixedStrDesc(const void* pLeft, const void* pRight) { + return compareLenPrefixedStr(pRight, pLeft); +} + int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { int32_t len1 = varDataLen(pLeft); int32_t len2 = varDataLen(pRight); - + if (len1 != len2) { return len1 > len2? 1:-1; } else { @@ -149,6 +208,10 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { } } +int32_t compareLenPrefixedWStrDesc(const void* pLeft, const void* pRight) { + return compareLenPrefixedWStr(pRight, pLeft); +} + /* * Compare two strings * TSDB_MATCH: Match @@ -161,33 +224,33 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { */ int patternMatch(const char *patterStr, const char *str, size_t size, const SPatternCompareInfo *pInfo) { char c, c1; - + int32_t i = 0; int32_t j = 0; - + while ((c = patterStr[i++]) != 0) { if (c == pInfo->matchAll) { /* Match "*" */ - + while ((c = patterStr[i++]) == pInfo->matchAll || c == pInfo->matchOne) { if (c == pInfo->matchOne && (j > size || str[j++] == 0)) { // empty string, return not match return TSDB_PATTERN_NOWILDCARDMATCH; } } - + if (c == 0) { return TSDB_PATTERN_MATCH; /* "*" at the end of the pattern matches */ } - + char next[3] = {toupper(c), tolower(c), 0}; while (1) { size_t n = strcspn(str, next); str += n; - + if (str[0] == 0 || (n >= size)) { break; } - + int32_t ret = patternMatch(&patterStr[i], ++str, size - n - 1, pInfo); if (ret != TSDB_PATTERN_NOMATCH) { return ret; @@ -195,18 +258,18 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat } return TSDB_PATTERN_NOWILDCARDMATCH; } - + c1 = str[j++]; - + if (j <= size) { if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) { continue; } } - + return TSDB_PATTERN_NOMATCH; } - + return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH; } @@ -214,13 +277,13 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c wchar_t c, c1; wchar_t matchOne = L'_'; // "_" wchar_t matchAll = L'%'; // "%" - + int32_t i = 0; int32_t j = 0; - + while ((c = patterStr[i++]) != 0) { if (c == matchAll) { /* Match "%" */ - + while ((c = patterStr[i++]) == matchAll || c == matchOne) { if (c == matchOne && (j > size || str[j++] == 0)) { return TSDB_PATTERN_NOWILDCARDMATCH; @@ -229,40 +292,40 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c if (c == 0) { return TSDB_PATTERN_MATCH; } - + wchar_t accept[3] = {towupper(c), towlower(c), 0}; while (1) { size_t n = wcscspn(str, accept); - + str += n; if (str[0] == 0 || (n >= size)) { break; } - + int32_t ret = WCSPatternMatch(&patterStr[i], ++str, size - n - 1, pInfo); if (ret != TSDB_PATTERN_NOMATCH) { return ret; } } - + return TSDB_PATTERN_NOWILDCARDMATCH; } - + c1 = str[j++]; - + if (j <= size) { if (c == c1 || towlower(c) == towlower(c1) || (c == matchOne && c1 != 0)) { continue; } } - + return TSDB_PATTERN_NOMATCH; } return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH; } -static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { +int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; char pattern[128] = {0}; @@ -270,8 +333,8 @@ static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { assert(varDataLen(pRight) < 128); size_t sz = varDataLen(pLeft); - char *buf = malloc(sz + 1); - memcpy(buf, varDataVal(pLeft), sz); + char *buf = malloc(sz + 1); + memcpy(buf, varDataVal(pLeft), sz); buf[sz] = 0; int32_t ret = patternMatch(pattern, buf, sz, &pInfo); @@ -282,19 +345,15 @@ static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { int32_t taosArrayCompareString(const void* a, const void* b) { const char* x = *(const char**)a; const char* y = *(const char**)b; - + return compareLenPrefixedStr(x, y); } -//static int32_t compareFindStrInArray(const void* pLeft, const void* pRight) { -// const SArray* arr = (const SArray*) pRight; -// return taosArraySearchString(arr, pLeft, taosArrayCompareString, TD_EQ) == NULL ? 0 : 1; -//} -static int32_t compareFindItemInSet(const void *pLeft, const void* pRight) { - return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0; +int32_t compareFindItemInSet(const void *pLeft, const void* pRight) { + return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0; } -static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { +int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; wchar_t pattern[128] = {0}; @@ -302,14 +361,37 @@ static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); assert(varDataLen(pRight) < 128); - + int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo); return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } __compar_fn_t getComparFunc(int32_t type, int32_t optr) { __compar_fn_t comparFn = NULL; - + + if (optr == TSDB_RELATION_IN && (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR)) { + switch (type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + return setCompareBytes1; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + return setCompareBytes2; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_FLOAT: + return setCompareBytes4; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + return setCompareBytes8; + default: + assert(0); + } + } + switch (type) { case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: comparFn = compareInt8Val; break; @@ -327,13 +409,15 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { } else { /* normal relational comparFn */ comparFn = compareLenPrefixedStr; } - + break; } - + case TSDB_DATA_TYPE_NCHAR: { if (optr == TSDB_RELATION_LIKE) { comparFn = compareWStrPatternComp; + } else if (optr == TSDB_RELATION_IN) { + comparFn = compareFindItemInSet; } else { comparFn = compareLenPrefixedWStr; } @@ -349,57 +433,57 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { comparFn = compareInt32Val; break; } - + return comparFn; } -__compar_fn_t getKeyComparFunc(int32_t keyType) { +__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) { __compar_fn_t comparFn = NULL; - + switch (keyType) { case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_BOOL: - comparFn = compareInt8Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt8Val:compareInt8ValDesc; break; case TSDB_DATA_TYPE_SMALLINT: - comparFn = compareInt16Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt16Val:compareInt16ValDesc; break; case TSDB_DATA_TYPE_INT: - comparFn = compareInt32Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc; break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - comparFn = compareInt64Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt64Val:compareInt64ValDesc; break; case TSDB_DATA_TYPE_FLOAT: - comparFn = compareFloatVal; + comparFn = (order == TSDB_ORDER_ASC)? compareFloatVal:compareFloatValDesc; break; case TSDB_DATA_TYPE_DOUBLE: - comparFn = compareDoubleVal; + comparFn = (order == TSDB_ORDER_ASC)? compareDoubleVal:compareDoubleValDesc; break; case TSDB_DATA_TYPE_UTINYINT: - comparFn = compareUint8Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint8Val:compareUint8ValDesc; break; case TSDB_DATA_TYPE_USMALLINT: - comparFn = compareUint16Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint16Val:compareUint16ValDesc; break; case TSDB_DATA_TYPE_UINT: - comparFn = compareUint32Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint32Val:compareUint32ValDesc; break; case TSDB_DATA_TYPE_UBIGINT: - comparFn = compareUint64Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint64Val:compareUint64ValDesc; break; case TSDB_DATA_TYPE_BINARY: - comparFn = compareLenPrefixedStr; + comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedStr:compareLenPrefixedStrDesc; break; case TSDB_DATA_TYPE_NCHAR: - comparFn = compareLenPrefixedWStr; + comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedWStr:compareLenPrefixedWStrDesc; break; default: - comparFn = compareInt32Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc; break; } - + return comparFn; } @@ -433,7 +517,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { default: { // todo refactor tstr* t1 = (tstr*) f1; tstr* t2 = (tstr*) f2; - + if (t1->len != t2->len) { return t1->len > t2->len? 1:-1; } else { diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index b464519ba6..98fd9c094c 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -54,7 +54,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _ pSkipList->keyFn = fn; pSkipList->seed = rand(); if (comparFn == NULL) { - pSkipList->comparFn = getKeyComparFunc(keyType); + pSkipList->comparFn = getKeyComparFunc(keyType, TSDB_ORDER_ASC); } else { pSkipList->comparFn = comparFn; } diff --git a/src/util/tests/skiplistTest.cpp b/src/util/tests/skiplistTest.cpp index dfbe0f6716..df4c5af5e2 100644 --- a/src/util/tests/skiplistTest.cpp +++ b/src/util/tests/skiplistTest.cpp @@ -70,7 +70,7 @@ void doubleSkipListTest() { } void randKeyTest() { - SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT), + SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT, TSDB_ORDER_ASC), false, getkey); int32_t size = 200000; From 799c642cecdcc1a660f5c6c82384806a5c51f45c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 13:50:29 +0800 Subject: [PATCH 037/239] [td-5881]: Sort the result according to any single column in the outer query result is allowed. --- src/client/inc/tscUtil.h | 11 +-- src/query/src/qExecutor.c | 158 ++++++++++++++++++++++++++++++++------ 2 files changed, 140 insertions(+), 29 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index b3674a7bf5..0b2d4fd115 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -29,15 +29,16 @@ extern "C" { #include "tsched.h" #include "tsclient.h" -#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \ +#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_SUPER_TABLE)) + #define UTIL_TABLE_IS_CHILD_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE)) - -#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\ - (!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo))) -#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \ +#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \ + (!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo) || UTIL_TABLE_IS_TMP_TABLE(metaInfo))) + +#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE)) #pragma pack(push,1) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index e31792398d..c124bd20fc 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -224,6 +224,7 @@ static void destroySFillOperatorInfo(void* param, int32_t numOfOutput); static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput); static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput); static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput); +static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput); static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyAggOperatorInfo(void* param, int32_t numOfOutput); @@ -1622,12 +1623,12 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe } startPos = pSDataBlock->info.rows - 1; - + // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); } - + break; } setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); @@ -2213,6 +2214,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf } break; } + case OP_StateWindow: { pRuntimeEnv->proot = createStatewindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; @@ -2229,24 +2231,20 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_Filter: { // todo refactor int32_t numOfFilterCols = 0; -// if (pQueryAttr->numOfFilterCols > 0) { -// pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, -// pQueryAttr->numOfOutput, pQueryAttr->tableCols, pQueryAttr->numOfFilterCols); -// } else { - if (pQueryAttr->stableQuery) { - SColumnInfo* pColInfo = - extractColumnFilterInfo(pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, &numOfFilterCols); - pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, - pQueryAttr->numOfExpr3, pColInfo, numOfFilterCols); - freeColumnInfo(pColInfo, pQueryAttr->numOfExpr3); - } else { - SColumnInfo* pColInfo = - extractColumnFilterInfo(pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &numOfFilterCols); - pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, - pQueryAttr->numOfOutput, pColInfo, numOfFilterCols); - freeColumnInfo(pColInfo, pQueryAttr->numOfOutput); - } -// } + if (pQueryAttr->stableQuery) { + SColumnInfo* pColInfo = + extractColumnFilterInfo(pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, &numOfFilterCols); + pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, + pQueryAttr->numOfExpr3, pColInfo, numOfFilterCols); + freeColumnInfo(pColInfo, pQueryAttr->numOfExpr3); + } else { + SColumnInfo* pColInfo = + extractColumnFilterInfo(pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &numOfFilterCols); + pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, + pQueryAttr->numOfOutput, pColInfo, numOfFilterCols); + freeColumnInfo(pColInfo, pQueryAttr->numOfOutput); + } + break; } @@ -2258,11 +2256,12 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_MultiwayMergeSort: { bool groupMix = true; - if(pQueryAttr->slimit.offset != 0 || pQueryAttr->slimit.limit != -1) { + if (pQueryAttr->slimit.offset != 0 || pQueryAttr->slimit.limit != -1) { groupMix = false; } + pRuntimeEnv->proot = createMultiwaySortOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, - 4096, merger, groupMix); // TODO hack it + 4096, merger, groupMix); // TODO hack it break; } @@ -2283,6 +2282,11 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf break; } + case OP_Order: { + pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + break; + } + default: { assert(0); } @@ -3092,7 +3096,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa // check if this data block is required to load if ((*status) != BLK_DATA_ALL_NEEDED) { bool needFilter = true; - + // the pCtx[i] result is belonged to previous time window since the outputBuf has not been set yet, // the filter result may be incorrect. So in case of interval query, we need to set the correct time output buffer if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) { @@ -5404,6 +5408,108 @@ SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SEx return pOperator; } +static int32_t doMergeSDatablock(SSDataBlock* pDest, SSDataBlock* pSrc) { + assert(pSrc != NULL && pDest != NULL && pDest->info.numOfCols == pSrc->info.numOfCols); + + int32_t numOfCols = pSrc->info.numOfCols; + for(int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i); + SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, i); + + int32_t newSize = (pDest->info.rows + pSrc->info.rows) * pCol2->info.bytes; + char* tmp = realloc(pCol2->pData, newSize); + if (tmp != NULL) { + pCol2->pData = tmp; + int32_t offset = pCol2->info.bytes * pDest->info.rows; + memcpy(pCol2->pData + offset, pCol1->pData, pSrc->info.rows * pCol2->info.bytes); + } else { + return TSDB_CODE_VND_OUT_OF_MEMORY; + } + } + + pDest->info.rows += pSrc->info.rows; + return TSDB_CODE_SUCCESS; +} + +static SSDataBlock* doSort(void* param, bool* newgroup) { + SOperatorInfo* pOperator = (SOperatorInfo*) param; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + SOrderOperatorInfo* pInfo = pOperator->info; + + SSDataBlock* pBlock = NULL; + while(1) { + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC); + pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup); + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC); + + // start to flush data into disk and try do multiway merge sort + if (pBlock == NULL) { + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + break; + } + + int32_t code = doMergeSDatablock(pInfo->pDataBlock, pBlock); + if (code != TSDB_CODE_SUCCESS) { + // todo handle error + } + } + + int32_t numOfCols = pInfo->pDataBlock->info.numOfCols; + void** pCols = calloc(numOfCols, POINTER_BYTES); + SSchema* pSchema = calloc(numOfCols, sizeof(SSchema)); + + for(int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* p1 = taosArrayGet(pInfo->pDataBlock->pDataBlock, i); + pCols[i] = p1->pData; + pSchema[i].colId = p1->info.colId; + pSchema[i].bytes = p1->info.bytes; + pSchema[i].type = (uint8_t) p1->info.type; + } + + __compar_fn_t comp = getKeyComparFunc(pSchema[pInfo->colIndex].type, pInfo->order); + taoscQSort(pCols, pSchema, numOfCols, pInfo->pDataBlock->info.rows, pInfo->colIndex, comp); + + tfree(pCols); + tfree(pSchema); + return (pInfo->pDataBlock->info.rows > 0)? pInfo->pDataBlock:NULL; +} + +SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { + SOrderOperatorInfo* pInfo = calloc(1, sizeof(SOrderOperatorInfo)); + + { + SSDataBlock* pDataBlock = calloc(1, sizeof(SSDataBlock)); + pDataBlock->pDataBlock = taosArrayInit(numOfOutput, sizeof(SColumnInfoData)); + for(int32_t i = 0; i < numOfOutput; ++i) { + SColumnInfoData col = {{0}}; + col.info.bytes = pExpr->base.resBytes; + col.info.colId = pExpr->base.resColId; + col.info.type = pExpr->base.resType; + taosArrayPush(pDataBlock->pDataBlock, &col); + } + + pDataBlock->info.numOfCols = numOfOutput; + pInfo->pDataBlock = pDataBlock; + } + + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + pOperator->name = "InMemoryOrder"; + pOperator->operatorType = OP_Order; + pOperator->blockingOptr = true; + pOperator->status = OP_IN_EXECUTING; + pOperator->info = pInfo; + pOperator->exec = doSort; + pOperator->cleanup = destroyOrderOperatorInfo; + pOperator->pRuntimeEnv = pRuntimeEnv; + + appendUpstream(pOperator, upstream); + return pOperator; +} + static int32_t getTableScanOrder(STableScanInfo* pTableScanInfo) { return pTableScanInfo->order; } @@ -6404,6 +6510,11 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { pInfo->pRes = destroyOutputBuf(pInfo->pRes); } +static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) { + SOrderOperatorInfo* pInfo = (SOrderOperatorInfo*) param; + pInfo->pDataBlock = destroyOutputBuf(pInfo->pDataBlock); +} + static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) { SFilterOperatorInfo* pInfo = (SFilterOperatorInfo*) param; doDestroyFilterInfo(pInfo->pFilterInfo, pInfo->numOfFilterCols); @@ -6752,7 +6863,6 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn pOperator->numOfOutput = numOfOutput; pOperator->info = pInfo; pOperator->pRuntimeEnv = pRuntimeEnv; - pOperator->exec = doFill; pOperator->cleanup = destroySFillOperatorInfo; From c0e81449bfc86d24b2d77cef25c5a7a417c7924e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 12 Aug 2021 15:36:29 +0800 Subject: [PATCH 038/239] [TD-6013]: taosdemo buffer overflow. (#7317) --- src/kit/taosdemo/taosdemo.c | 49 +++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 27b26e7364..bd0feaeb92 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5101,21 +5101,27 @@ static int64_t generateStbRowData( int64_t dataLen = 0; char *pstr = recBuf; int64_t maxLen = MAX_DATA_SIZE; + int tmpLen; dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ",", timestamp); for (int i = 0; i < stbInfo->columnCount; i++) { if ((0 == strncasecmp(stbInfo->columns[i].dataType, - "BINARY", strlen("BINARY"))) + "BINARY", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, - "NCHAR", strlen("NCHAR")))) { + "NCHAR", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint( "binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } + if ((stbInfo->columns[i].dataLen + 1) > + /* need count 3 extra chars \', \', and , */ + (remainderBufLen - dataLen - 3)) { + return 0; + } char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); if (NULL == buf) { errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); @@ -5129,19 +5135,20 @@ static int64_t generateStbRowData( char *tmp; if (0 == strncasecmp(stbInfo->columns[i].dataType, - "INT", strlen("INT"))) { + "INT", 3)) { if ((g_args.demo_mode) && (i == 1)) { tmp = demo_voltage_int_str(); } else { tmp = rand_int_str(); } - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BIGINT", strlen("BIGINT"))) { + "BIGINT", 6)) { tmp = rand_bigint_str(); tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "FLOAT", strlen("FLOAT"))) { + "FLOAT", 5)) { if (g_args.demo_mode) { if (i == 0) { tmp = demo_current_float_str(); @@ -5151,27 +5158,33 @@ static int64_t generateStbRowData( } else { tmp = rand_float_str(); } - tstrncpy(pstr + dataLen, tmp, FLOAT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "DOUBLE", strlen("DOUBLE"))) { + "DOUBLE", 6)) { tmp = rand_double_str(); - tstrncpy(pstr + dataLen, tmp, DOUBLE_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "SMALLINT", strlen("SMALLINT"))) { + "SMALLINT", 8)) { tmp = rand_smallint_str(); - tstrncpy(pstr + dataLen, tmp, SMALLINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TINYINT", strlen("TINYINT"))) { + "TINYINT", 7)) { tmp = rand_tinyint_str(); - tstrncpy(pstr + dataLen, tmp, TINYINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BOOL", strlen("BOOL"))) { + "BOOL", 4)) { tmp = rand_bool_str(); - tstrncpy(pstr + dataLen, tmp, BOOL_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { + "TIMESTAMP", 9)) { tmp = rand_int_str(); - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, INT_BUFF_LEN)); } else { errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType); return -1; @@ -5182,7 +5195,7 @@ static int64_t generateStbRowData( dataLen += 1; } - if (dataLen > (remainderBufLen - (DOUBLE_BUFF_LEN + 1))) + if (dataLen > (remainderBufLen - (128))) return 0; } From bf3477e7c3d16eda61785080124498fe47297f24 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 16:03:49 +0800 Subject: [PATCH 039/239] [td-5881]: Sort the result according to any single column in the outer query result is allowed. --- src/query/inc/qExecutor.h | 2 +- src/query/src/qExecutor.c | 15 ++++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 6dca502838..d30971ab47 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -577,7 +577,7 @@ SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter); SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUpstream, SSchema* pSchema, int32_t numOfOutput); -SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal); SSDataBlock* doGlobalAggregate(void* param, bool* newgroup); SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index c124bd20fc..55a762d809 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2283,7 +2283,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf } case OP_Order: { - pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &pQueryAttr->order); break; } @@ -5428,6 +5428,7 @@ static int32_t doMergeSDatablock(SSDataBlock* pDest, SSDataBlock* pSrc) { } pDest->info.rows += pSrc->info.rows; + return TSDB_CODE_SUCCESS; } @@ -5478,7 +5479,7 @@ static SSDataBlock* doSort(void* param, bool* newgroup) { return (pInfo->pDataBlock->info.rows > 0)? pInfo->pDataBlock:NULL; } -SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { +SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal) { SOrderOperatorInfo* pInfo = calloc(1, sizeof(SOrderOperatorInfo)); { @@ -5486,10 +5487,14 @@ SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI pDataBlock->pDataBlock = taosArrayInit(numOfOutput, sizeof(SColumnInfoData)); for(int32_t i = 0; i < numOfOutput; ++i) { SColumnInfoData col = {{0}}; - col.info.bytes = pExpr->base.resBytes; - col.info.colId = pExpr->base.resColId; - col.info.type = pExpr->base.resType; + col.info.colId = pExpr[i].base.colInfo.colId; + col.info.bytes = pExpr[i].base.colBytes; + col.info.type = pExpr[i].base.colType; taosArrayPush(pDataBlock->pDataBlock, &col); + + if (col.info.colId == pOrderVal->orderColId) { + pInfo->colIndex = i; + } } pDataBlock->info.numOfCols = numOfOutput; From 977b2202eaf775b0d33d95b0ea9f33887b14028e Mon Sep 17 00:00:00 2001 From: cpwu Date: Thu, 12 Aug 2021 16:21:14 +0800 Subject: [PATCH 040/239] [TD-5935] add case for TD-5935 --- tests/pytest/functions/queryTestCases.py | 394 ++++++++++++++++++++++- 1 file changed, 392 insertions(+), 2 deletions(-) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index b7480fdbd5..ae2bbc4a81 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -13,6 +13,8 @@ import sys import subprocess +import random +import math from util.log import * from util.cases import * @@ -106,6 +108,9 @@ class TDTestCase: tdSql.execute("drop database if exists db1") tdSql.execute("create database if not exists db keep 3650") tdSql.execute("create database if not exists db1 keep 3650") + tdSql.execute("create database if not exists new keep 3650") + tdSql.execute("create database if not exists private keep 3650") + tdSql.execute("create database if not exists db2 keep 3650") tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)") @@ -122,6 +127,14 @@ class TDTestCase: # p1 不进入指定数据库 tdSql.query("show create database db") tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.query("show create database db2") + tdSql.checkRows(1) + tdSql.query("show create database new") + tdSql.checkRows(1) + tdSql.query("show create database private") + tdSql.checkRows(1) tdSql.error("show create database ") tdSql.error("show create databases db ") tdSql.error("show create database db.stb1") @@ -340,17 +353,394 @@ class TDTestCase: pass + def td4889(self): + tdLog.printNoPrefix("==========TD-4889==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + + for i in range(1000): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + for j in range(100): + tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})") + + tdSql.query("show vgroups") + index = tdSql.getData(0,0) + tdSql.checkData(0, 6, 0) + tdSql.execute(f"compact vnodes in({index})") + for i in range(3): + tdSql.query("show vgroups") + if tdSql.getData(0, 6) == 1: + tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1") + break + if i == 3: + tdLog.exit("compacting not occured") + time.sleep(0.5) + + pass + + def td5168insert(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)") + tdSql.execute("create table db.t1 using db.stb1 tags(1)") + + for i in range(5): + c1 = 1001.11 + i*0.1 + c2 = 1001.11 + i*0.1 + 1*0.01 + c3 = 1001.11 + i*0.1 + 2*0.01 + c4 = 1001.11 + i*0.1 + 3*0.01 + tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})") + + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)") + + # for i in range(1000000): + for i in range(1000000): + random1 = random.uniform(1000,1001) + random2 = random.uniform(1000,1001) + random3 = random.uniform(1000,1001) + random4 = random.uniform(1000,1001) + tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})") + + pass + + def td5168(self): + tdLog.printNoPrefix("==========TD-5168==========") + # 插入小范围内的随机数 + tdLog.printNoPrefix("=====step0: 默认情况下插入数据========") + self.td5168insert() + + # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # tdSql.query("select * from db.t1 limit 100,1") + # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000,1") + # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 10000,1") + # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 100000,1") + # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000000,1") + # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + + # 关闭服务并获取未开启压缩情况下的数据容量 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + + cfgdir = self.getCfgDir() + cfgfile = self.getCfgFile() + + lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'" + data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'" + dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}") + + ################################################### + float_lossy = "float" + double_lossy = "double" + float_double_lossy = "float|double" + no_loosy = "" + + double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}" + _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8") + + lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} " + lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} " + lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} " + lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} " + + ################################################### + + # 开启有损压缩,参数float,并启动服务插入数据 + tdLog.printNoPrefix("=====step1: lossyColumns设置为float========") + lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为float情况下的数据容量 + tdDnodes.stop(index) + dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数double,并启动服务 + tdLog.printNoPrefix("=====step2: lossyColumns设置为double========") + lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为double情况下的数据容量 + tdDnodes.stop(index) + dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数 float&&double ,并启动服务 + tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========") + lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量 + tdDnodes.stop(index) + dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}") + + if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) : + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.exit("压缩未生效") + else: + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.printNoPrefix("压缩生效") + + pass + + def td5433(self): + tdLog.printNoPrefix("==========TD-5433==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))") + numtab=2000000 + for i in range(numtab): + sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})") + + tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')") + tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')") + tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')") + tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')") + tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')") + + tdSql.query("select distinct t1 from stb1 where t1 != '150'") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 != 150") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 = 150") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1 where t1 = '150'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(numtab) + + tdSql.query("select distinct t0 from stb1 where t0 != '2'") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 != 2") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1") + tdSql.checkRows(128) + + tdSql.query("select distinct t1 from stb2 where t1 != '200'") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 != 200") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 = 200") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2 where t1 = '200'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2") + tdSql.checkRows(5) + + tdSql.query("select distinct t0 from stb2 where t0 != '2'") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 != 2") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2") + tdSql.checkRows(5) + + pass + + def td5798(self): + tdLog.printNoPrefix("==========TD-5798==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 int) tags(t2 binary(16), t3 binary(16), t4 int)") + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i%7}, {(i-1)%7}, {i%2})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3}, {(i-2)%3})") + + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i}', '{100-i}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, {(i+1)%3})") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i*2}, {(i+2)%3})") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i*3}, {(i)%3})") + + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t0, t1 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t1, t0 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t1, t2 from stb1") + tdSql.checkRows(14) + tdSql.query("select distinct t0, t1, t2 from stb1") + tdSql.checkRows(14) + tdSql.query("select distinct t0 t1, t1 t2 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t0, t0, t0 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t0, t1 from t1") + tdSql.checkRows(1) + + + ########## should be error ######### + tdSql.error("select distinct from stb1") + tdSql.error("select distinct t2 from ") + tdSql.error("distinct t2 from stb1") + tdSql.error("select distinct stb1") + tdSql.error("select distinct t0, t1, t2, t3 from stb1") + tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1") + + tdSql.error("select dist t0 from stb1") + tdSql.error("select distinct stb2.t2, stb2.t3 from stb1") + tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1") + + tdSql.error("select distinct t0, t1 from t1 where t0 < 7") + + ########## add where condition ########## + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3") + tdSql.checkRows(3) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2") + tdSql.checkRows(2) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2") + tdSql.checkRows(3) + tdSql.error("select distinct t0, t1 from stb1 where c1 > 2") + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") + tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") + # tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") + tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") + tdSql.checkRows(5) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + # tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.query("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + + + pass + + def td5935(self): + tdLog.printNoPrefix("==========TD-5798==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)") + nowtime=int(round((time.time()*1000))) + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})" + tdSql.execute(sql) + for j in range(1000): + tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") + tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + + fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" + tdSql.query(fillsql) + fillResult=False + if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None): + fillResult=True + if fillResult: + tdLog.success(f"sql is :{fillsql}, fill(next) is correct") + else: + tdLog.exit("fill(next) is wrong") + + + def run(self): # master branch # self.td3690() # self.td4082() # self.td4288() - self.td4724() + # self.td4724() + # self.td5798() + self.td5935() # develop branch # self.td4097() - + # self.td4889() + # self.td5168() + # self.td5433() def stop(self): tdSql.close() From 4943f4ebc47261f4a2f53812fcd01fdd152903d1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 16:21:48 +0800 Subject: [PATCH 041/239] [td-5881]fix bug in TD-5881 --- src/query/src/qExecutor.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 55a762d809..7088d58830 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -5498,6 +5498,7 @@ SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI } pDataBlock->info.numOfCols = numOfOutput; + pInfo->order = pOrderVal->order; pInfo->pDataBlock = pDataBlock; } From 932d065da5de3df129a02813852493dc56d50ac8 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Thu, 12 Aug 2021 16:45:41 +0800 Subject: [PATCH 042/239] [TD-6032]: fix nanosecond 999999999 error for nodejs [ci skip] (#7329) --- src/connector/nodejs/nodetaos/taosobjects.js | 3 ++- src/connector/nodejs/package.json | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/connector/nodejs/nodetaos/taosobjects.js b/src/connector/nodejs/nodetaos/taosobjects.js index 0fc8dc8ef1..3bc0fe0aca 100644 --- a/src/connector/nodejs/nodetaos/taosobjects.js +++ b/src/connector/nodejs/nodetaos/taosobjects.js @@ -47,7 +47,8 @@ class TaosTimestamp extends Date { super(Math.floor(date / 1000)); this.precisionExtras = date % 1000; } else if (precision === 2) { - super(parseInt(date / 1000000)); + // use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected + super(parseInt(BigInt(date) / 1000000n)); // use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405) this.precisionExtras = parseInt(BigInt(date) % 1000000n); } else { diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json index db37318a16..6a2c66100b 100644 --- a/src/connector/nodejs/package.json +++ b/src/connector/nodejs/package.json @@ -1,6 +1,6 @@ { "name": "td2.0-connector", - "version": "2.0.9", + "version": "2.0.10", "description": "A Node.js connector for TDengine.", "main": "tdengine.js", "directories": { From ea16e9c73c1ed0e8038bbc6042e46e0d760602b7 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 12 Aug 2021 17:11:29 +0800 Subject: [PATCH 043/239] [TD-5835]: add test case for daily performance test --- tests/perftest-scripts/perftest-query.sh | 7 +++++++ tests/pytest/tools/taosdemoPerformance.py | 3 +-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 5b2c860122..d4853c0825 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -101,7 +101,14 @@ function runQueryPerfTest { python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT + echo "=========== taosdemo performance: 4 int columns, 10000 tables, 100000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT + + echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT + + echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT } diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index c28f94b3db..4a5abd49d8 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -63,7 +63,7 @@ class taosdemoPerformace: "batch_create_tbl_num": 10, "insert_mode": "taosc", "insert_rows": self.numOfRows, - "interlace_rows": 100, + "interlace_rows": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, @@ -172,7 +172,6 @@ class taosdemoPerformace: cursor.execute("create database if not exists %s" % self.dbName) cursor.execute("use %s" % self.dbName) cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float, branch binary(50), type binary(20), numoftables int, numofrows int, numofint int, numofdouble int, numofbinary int)") - print("==================== taosdemo performance ====================") print("create tables time: %f" % float(self.createTableTime)) print("insert records time: %f" % float(self.insertRecordsTime)) print("records per second: %f" % float(self.recordsPerSecond)) From 7e9be7c5635d7c8d728b8796f609bdfab4b7309a Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Thu, 12 Aug 2021 17:33:45 +0800 Subject: [PATCH 044/239] add version to trigger pr --- src/kit/taospack/taospack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c index ad188c3010..aa0bf4d485 100644 --- a/src/kit/taospack/taospack.c +++ b/src/kit/taospack/taospack.c @@ -732,7 +732,7 @@ extern char Compressor []; // ----------------- main ---------------------- // int main(int argc, char *argv[]) { - printf("welcome to use taospack tools v1.5\n"); + printf("welcome to use taospack tools v1.6\n"); //printf(" sizeof(int)=%d\n", (int)sizeof(int)); //printf(" sizeof(long)=%d\n", (int)sizeof(long)); From 1b7861f8fd30b3171e9ce1781aeab82e2eaabee5 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 12 Aug 2021 18:06:51 +0800 Subject: [PATCH 045/239] [TD-6013]: taosdemo buffer overflow. (#7319) --- src/kit/taosdemo/taosdemo.c | 49 +++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index c895545b81..c255ea6841 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5101,21 +5101,27 @@ static int64_t generateStbRowData( int64_t dataLen = 0; char *pstr = recBuf; int64_t maxLen = MAX_DATA_SIZE; + int tmpLen; dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ",", timestamp); for (int i = 0; i < stbInfo->columnCount; i++) { if ((0 == strncasecmp(stbInfo->columns[i].dataType, - "BINARY", strlen("BINARY"))) + "BINARY", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, - "NCHAR", strlen("NCHAR")))) { + "NCHAR", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint( "binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } + if ((stbInfo->columns[i].dataLen + 1) > + /* need count 3 extra chars \', \', and , */ + (remainderBufLen - dataLen - 3)) { + return 0; + } char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); if (NULL == buf) { errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); @@ -5129,19 +5135,20 @@ static int64_t generateStbRowData( char *tmp; if (0 == strncasecmp(stbInfo->columns[i].dataType, - "INT", strlen("INT"))) { + "INT", 3)) { if ((g_args.demo_mode) && (i == 1)) { tmp = demo_voltage_int_str(); } else { tmp = rand_int_str(); } - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BIGINT", strlen("BIGINT"))) { + "BIGINT", 6)) { tmp = rand_bigint_str(); tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "FLOAT", strlen("FLOAT"))) { + "FLOAT", 5)) { if (g_args.demo_mode) { if (i == 0) { tmp = demo_current_float_str(); @@ -5151,27 +5158,33 @@ static int64_t generateStbRowData( } else { tmp = rand_float_str(); } - tstrncpy(pstr + dataLen, tmp, FLOAT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "DOUBLE", strlen("DOUBLE"))) { + "DOUBLE", 6)) { tmp = rand_double_str(); - tstrncpy(pstr + dataLen, tmp, DOUBLE_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "SMALLINT", strlen("SMALLINT"))) { + "SMALLINT", 8)) { tmp = rand_smallint_str(); - tstrncpy(pstr + dataLen, tmp, SMALLINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TINYINT", strlen("TINYINT"))) { + "TINYINT", 7)) { tmp = rand_tinyint_str(); - tstrncpy(pstr + dataLen, tmp, TINYINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BOOL", strlen("BOOL"))) { + "BOOL", 4)) { tmp = rand_bool_str(); - tstrncpy(pstr + dataLen, tmp, BOOL_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { + "TIMESTAMP", 9)) { tmp = rand_int_str(); - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, INT_BUFF_LEN)); } else { errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType); return -1; @@ -5182,7 +5195,7 @@ static int64_t generateStbRowData( dataLen += 1; } - if (dataLen > (remainderBufLen - (DOUBLE_BUFF_LEN + 1))) + if (dataLen > (remainderBufLen - (128))) return 0; } From b61eaa10402dfb6a966ac63647914f04ec5ee7ec Mon Sep 17 00:00:00 2001 From: wpan Date: Fri, 13 Aug 2021 09:34:39 +0800 Subject: [PATCH 046/239] fix stddev query condition column issue and add test case --- src/client/src/tscSubquery.c | 4 ++++ tests/script/general/parser/function.sim | 22 ++++++++++++++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 2e7e02cfd5..293fe18c77 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2397,6 +2397,10 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { } else { SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex}; tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss); + int32_t ti = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid); + assert(ti >= 0); + SColumn* x = taosArrayGetP(pNewQueryInfo->colList, ti); + tscColumnCopy(x, pCol); } } } diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index 0c93fe919a..e9e907f97a 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -1149,9 +1149,11 @@ endi sql select derivative(test_column_alias_name, 1s, 0) from (select avg(k) test_column_alias_name from t1 interval(1s)); -sql create table smeters (ts timestamp, current float, voltage int); -sql insert into smeters values ('2021-08-08 10:10:10', 10, 1); -sql insert into smeters values ('2021-08-08 10:10:12', 10, 2); +sql create table smeters (ts timestamp, current float, voltage int) tags (t1 int); +sql create table smeter1 using smeters tags (1); +sql insert into smeter1 values ('2021-08-08 10:10:10', 10, 2); +sql insert into smeter1 values ('2021-08-08 10:10:12', 10, 2); +sql insert into smeter1 values ('2021-08-08 10:10:14', 20, 1); sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10 interval(1000a); if $rows != 2 then @@ -1160,9 +1162,21 @@ endi if $data00 != @21-08-08 10:10:10.000@ then return -1 endi +if $data01 != 0.000000000 then + return -1 +endi if $data10 != @21-08-08 10:10:12.000@ then return -1 endi +if $data11 != 0.000000000 then + return -1 +endi - +sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10; +if $rows != 1 then + return -1 +endi +if $data00 != 0.000000000 then + return -1 +endi From 356018a99057a3deedfd3d4b366ce1f4e0a61e87 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 13 Aug 2021 09:38:01 +0800 Subject: [PATCH 047/239] Hotfix/sangshuduo/td 5844 cmdline parameters align for master (#7337) * [TD-5844]: make cmd line parameter similar. * fix test case align with taosdemo change. * fix windows stack overflow issue. * fix mac compile error. * fix taosdemo cmdline parameter in tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py --- src/kit/taosdemo/taosdemo.c | 125 ++++++++++++------ .../taosdemoTestSupportNanoInsert.py | 73 +++++----- .../taosdemoTestSupportNanoInsert.py | 2 +- tests/pytest/tools/taosdemoTestInterlace.py | 2 +- 4 files changed, 126 insertions(+), 76 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index bd0feaeb92..aad2fe95fa 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -77,7 +77,7 @@ extern char configDir[]; #define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS) #define MAX_USERNAME_SIZE 64 -#define MAX_PASSWORD_SIZE 64 +#define MAX_PASSWORD_SIZE 16 #define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html #define MAX_TB_NAME_SIZE 64 #define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space @@ -216,7 +216,7 @@ typedef struct SArguments_S { uint16_t port; uint16_t iface; char * user; - char * password; + char password[MAX_PASSWORD_SIZE]; char * database; int replica; char * tb_prefix; @@ -709,24 +709,24 @@ static void printHelp() { printf("%s%s%s%s\n", indent, "-u", indent, "The TDengine user name to use when connecting to the server. Default is 'root'."); #ifdef _TD_POWER_ - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-p", indent, "The password to use when connecting to the server. Default is 'powerdb'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/power/'."); #elif (_TD_TQ_ == true) - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-p", indent, "The password to use when connecting to the server. Default is 'tqueue'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/tq/'."); #else - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-p", indent, "The password to use when connecting to the server. Default is 'taosdata'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/taos/'."); #endif printf("%s%s%s%s\n", indent, "-h", indent, "The host to connect to TDengine. Default is localhost."); - printf("%s%s%s%s\n", indent, "-p", indent, + printf("%s%s%s%s\n", indent, "-P", indent, "The TCP/IP port number to use for the connection. Default is 0."); printf("%s%s%s%s\n", indent, "-I", indent, #if STMT_IFACE_ENABLED == 1 @@ -826,11 +826,11 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->host = argv[++i]; - } else if (strcmp(argv[i], "-p") == 0) { + } else if (strcmp(argv[i], "-P") == 0) { if ((argc == i+1) || (!isStringNumber(argv[i+1]))) { printHelp(); - errorPrint("%s", "\n\t-p need a number following!\n"); + errorPrint("%s", "\n\t-P need a number following!\n"); exit(EXIT_FAILURE); } arguments->port = atoi(argv[++i]); @@ -860,13 +860,13 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->user = argv[++i]; - } else if (strcmp(argv[i], "-P") == 0) { - if (argc == i+1) { - printHelp(); - errorPrint("%s", "\n\t-P need a valid string following!\n"); - exit(EXIT_FAILURE); + } else if (strncmp(argv[i], "-p", 2) == 0) { + if (strlen(argv[i]) == 2) { + printf("Enter password:"); + scanf("%s", arguments->password); + } else { + tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); } - arguments->password = argv[++i]; } else if (strcmp(argv[i], "-o") == 0) { if (argc == i+1) { printHelp(); @@ -1065,7 +1065,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->debug_print = true; } else if (strcmp(argv[i], "-gg") == 0) { arguments->verbose_print = true; - } else if (strcmp(argv[i], "-pp") == 0) { + } else if (strcmp(argv[i], "-PP") == 0) { arguments->performance_print = true; } else if (strcmp(argv[i], "-O") == 0) { if ((argc == i+1) || @@ -2319,15 +2319,15 @@ static void printfDbInfoForQueryToFile( } static void printfQuerySystemInfo(TAOS * taos) { - char filename[BUFFER_SIZE+1] = {0}; - char buffer[BUFFER_SIZE+1] = {0}; + char filename[MAX_FILE_NAME_LEN] = {0}; + char buffer[1024] = {0}; TAOS_RES* res; time_t t; struct tm* lt; time(&t); lt = localtime(&t); - snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d", + snprintf(filename, MAX_FILE_NAME_LEN, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec); @@ -2359,12 +2359,12 @@ static void printfQuerySystemInfo(TAOS * taos) { printfDbInfoForQueryToFile(filename, dbInfos[i], i); // show db.vgroups - snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name); + snprintf(buffer, 1024, "show %s.vgroups;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); // show db.stables - snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name); + snprintf(buffer, 1024, "show %s.stables;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); free(dbInfos[i]); @@ -2713,7 +2713,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) { - char command[BUFFER_SIZE] = "\0"; + char command[1024] = "\0"; char limitBuf[100] = "\0"; TAOS_RES * res; @@ -2727,7 +2727,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, } //get all child table name use cmd: select tbname from superTblName; - snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s", + snprintf(command, 1024, "select tbname from %s.%s %s", dbName, sTblName, limitBuf); res = taos_query(taos, command); @@ -2805,13 +2805,13 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, static int getSuperTableFromServer(TAOS * taos, char* dbName, SSuperTable* superTbls) { - char command[BUFFER_SIZE] = "\0"; + char command[1024] = "\0"; TAOS_RES * res; TAOS_ROW row = NULL; int count = 0; //get schema use cmd: describe superTblName; - snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName); + snprintf(command, 1024, "describe %s.%s", dbName, superTbls->sTblName); res = taos_query(taos, command); int32_t code = taos_errno(res); if (code != 0) { @@ -2891,7 +2891,8 @@ static int createSuperTable( TAOS * taos, char* dbName, SSuperTable* superTbl) { - char command[BUFFER_SIZE] = "\0"; + char *command = calloc(1, BUFFER_SIZE); + assert(command); char cols[COL_BUFFER_LEN] = "\0"; int colIndex; @@ -2902,6 +2903,7 @@ static int createSuperTable( if (superTbl->columnCount == 0) { errorPrint("%s() LN%d, super table column count is %d\n", __func__, __LINE__, superTbl->columnCount); + free(command); return -1; } @@ -2964,6 +2966,7 @@ static int createSuperTable( taos_close(taos); errorPrint("%s() LN%d, config error data type : %s\n", __func__, __LINE__, dataType); + free(command); exit(-1); } } @@ -2976,6 +2979,7 @@ static int createSuperTable( errorPrint("%s() LN%d, Failed when calloc, size:%d", __func__, __LINE__, len+1); taos_close(taos); + free(command); exit(-1); } @@ -2986,6 +2990,7 @@ static int createSuperTable( if (superTbl->tagCount == 0) { errorPrint("%s() LN%d, super table tag count is %d\n", __func__, __LINE__, superTbl->tagCount); + free(command); return -1; } @@ -3051,6 +3056,7 @@ static int createSuperTable( taos_close(taos); errorPrint("%s() LN%d, config error tag type : %s\n", __func__, __LINE__, dataType); + free(command); exit(-1); } } @@ -3066,13 +3072,16 @@ static int createSuperTable( if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { errorPrint( "create supertable %s failed!\n\n", superTbl->sTblName); + free(command); return -1; } + debugPrint("create supertable %s success!\n\n", superTbl->sTblName); + free(command); return 0; } -static int createDatabasesAndStables() { +int createDatabasesAndStables(char *command) { TAOS * taos = NULL; int ret = 0; taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); @@ -3080,8 +3089,7 @@ static int createDatabasesAndStables() { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); return -1; } - char command[BUFFER_SIZE] = "\0"; - + for (int i = 0; i < g_Dbs.dbCount; i++) { if (g_Dbs.db[i].drop) { sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); @@ -7145,7 +7153,8 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(-1); } - char buffer[BUFFER_SIZE]; + char *buffer = calloc(1, BUFFER_SIZE); + assert(buffer); char *pstr = buffer; if ((superTblInfo) @@ -7174,8 +7183,11 @@ static void startMultiThreadInsertData(int threads, char* db_name, ret, taos_stmt_errstr(pThreadInfo->stmt)); free(pids); free(infos); + free(buffer); exit(-1); } + + free(buffer); } #endif } else { @@ -7310,12 +7322,15 @@ static void *readTable(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; setThreadName("readTable"); - char command[BUFFER_SIZE] = "\0"; + char *command = calloc(1, BUFFER_SIZE); + assert(command); + uint64_t sTime = pThreadInfo->start_time; char *tb_prefix = pThreadInfo->tb_prefix; FILE *fp = fopen(pThreadInfo->filePath, "a"); if (NULL == fp) { errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); + free(command); return NULL; } @@ -7354,6 +7369,7 @@ static void *readTable(void *sarg) { taos_free_result(pSql); taos_close(taos); fclose(fp); + free(command); return NULL; } @@ -7374,6 +7390,7 @@ static void *readTable(void *sarg) { } fprintf(fp, "\n"); fclose(fp); + free(command); #endif return NULL; } @@ -7383,10 +7400,13 @@ static void *readMetric(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; setThreadName("readMetric"); - char command[BUFFER_SIZE] = "\0"; + char *command = calloc(1, BUFFER_SIZE); + assert(command); + FILE *fp = fopen(pThreadInfo->filePath, "a"); if (NULL == fp) { printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); + free(command); return NULL; } @@ -7431,6 +7451,7 @@ static void *readMetric(void *sarg) { taos_free_result(pSql); taos_close(taos); fclose(fp); + free(command); return NULL; } int count = 0; @@ -7448,6 +7469,7 @@ static void *readMetric(void *sarg) { fprintf(fp, "\n"); } fclose(fp); + free(command); #endif return NULL; } @@ -7484,11 +7506,16 @@ static int insertTestProcess() { init_rand_data(); // create database and super tables - if(createDatabasesAndStables() != 0) { + char *cmdBuffer = calloc(1, BUFFER_SIZE); + assert(cmdBuffer); + + if(createDatabasesAndStables(cmdBuffer) != 0) { if (g_fpOfInsertResult) fclose(g_fpOfInsertResult); + free(cmdBuffer); return -1; } + free(cmdBuffer); // pretreatement if (prepareSampleData() != 0) { @@ -7657,7 +7684,9 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { } static void *superTableQuery(void *sarg) { - char sqlstr[BUFFER_SIZE]; + char *sqlstr = calloc(1, BUFFER_SIZE); + assert(sqlstr); + threadInfo *pThreadInfo = (threadInfo *)sarg; setThreadName("superTableQuery"); @@ -7672,6 +7701,7 @@ static void *superTableQuery(void *sarg) { if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); + free(sqlstr); return NULL; } else { pThreadInfo->taos = taos; @@ -7696,7 +7726,7 @@ static void *superTableQuery(void *sarg) { st = taosGetTimestampMs(); for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { - memset(sqlstr,0,sizeof(sqlstr)); + memset(sqlstr, 0, BUFFER_SIZE); replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); if (g_queryInfo.superQueryInfo.result[j][0] != '\0') { sprintf(pThreadInfo->filePath, "%s-%d", @@ -7727,6 +7757,7 @@ static void *superTableQuery(void *sarg) { (double)(et - st)/1000.0); } + free(sqlstr); return NULL; } @@ -7960,7 +7991,9 @@ static TAOS_SUB* subscribeImpl( static void *superSubscribe(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - char subSqlstr[BUFFER_SIZE]; + char *subSqlStr = calloc(1, BUFFER_SIZE); + assert(subSqlStr); + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; uint64_t tsubSeq; @@ -7969,6 +8002,7 @@ static void *superSubscribe(void *sarg) { if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); + free(subSqlStr); exit(-1); } @@ -7981,6 +8015,7 @@ static void *superSubscribe(void *sarg) { if (pThreadInfo->taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); + free(subSqlStr); return NULL; } } @@ -7991,6 +8026,7 @@ static void *superSubscribe(void *sarg) { taos_close(pThreadInfo->taos); errorPrint( "use database %s failed!\n\n", g_queryInfo.dbName); + free(subSqlStr); return NULL; } @@ -8005,25 +8041,26 @@ static void *superSubscribe(void *sarg) { pThreadInfo->end_table_to, i); sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"", i, pThreadInfo->querySeq); - memset(subSqlstr, 0, sizeof(subSqlstr)); + memset(subSqlStr, 0, BUFFER_SIZE); replaceChildTblName( g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq], - subSqlstr, i); + subSqlStr, i); if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); } - verbosePrint("%s() LN%d, [%d] subSqlstr: %s\n", - __func__, __LINE__, pThreadInfo->threadID, subSqlstr); + verbosePrint("%s() LN%d, [%d] subSqlStr: %s\n", + __func__, __LINE__, pThreadInfo->threadID, subSqlStr); tsub[tsubSeq] = subscribeImpl( STABLE_CLASS, - pThreadInfo, subSqlstr, topic, + pThreadInfo, subSqlStr, topic, g_queryInfo.superQueryInfo.subscribeRestart, g_queryInfo.superQueryInfo.subscribeInterval); if (NULL == tsub[tsubSeq]) { taos_close(pThreadInfo->taos); + free(subSqlStr); return NULL; } } @@ -8080,12 +8117,13 @@ static void *superSubscribe(void *sarg) { consumed[tsubSeq]= 0; tsub[tsubSeq] = subscribeImpl( STABLE_CLASS, - pThreadInfo, subSqlstr, topic, + pThreadInfo, subSqlStr, topic, g_queryInfo.superQueryInfo.subscribeRestart, g_queryInfo.superQueryInfo.subscribeInterval ); if (NULL == tsub[tsubSeq]) { taos_close(pThreadInfo->taos); + free(subSqlStr); return NULL; } } @@ -8105,6 +8143,7 @@ static void *superSubscribe(void *sarg) { } taos_close(pThreadInfo->taos); + free(subSqlStr); return NULL; } @@ -8407,9 +8446,7 @@ static void setParaFromArg() { tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE); } - if (g_args.password) { - tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE); - } + tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE); if (g_args.port) { g_Dbs.port = g_args.port; diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py index 8fcb263125..f069bb8f70 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py @@ -36,7 +36,7 @@ class TDTestCase: if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] + buildPath = root[:len(root) - len("/build/bin")] break return buildPath @@ -46,14 +46,15 @@ class TDTestCase: tdLog.exit("taosd not found!") else: tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" + binPath = buildPath + "/build/bin/" - # insert: create one or mutiple tables per sql and insert multiple rows per sql # insert data from a special timestamp # check stable stb0 - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % binPath) + os.system( + "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % + binPath) tdSql.execute("use nsdb") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -64,9 +65,9 @@ class TDTestCase: tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 10000) tdSql.query("describe stb0") - tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.checkDataType(9, 1, "TIMESTAMP") tdSql.query("select last(ts) from stb0") - tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") + tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000") # check stable stb1 which is insert with disord @@ -78,16 +79,18 @@ class TDTestCase: tdSql.checkData(0, 0, 10000) # check c8 is an nano timestamp tdSql.query("describe stb1") - tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.checkDataType(9, 1, "TIMESTAMP") # check insert timestamp_step is nano_second tdSql.query("select last(ts) from stb1") - tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") - + tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000") + # insert data from now time # check stable stb0 - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " % binPath) - + os.system( + "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " % + binPath) + tdSql.execute("use nsdb2") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -99,11 +102,14 @@ class TDTestCase: tdSql.checkData(0, 0, 10000) # check c8 is an nano timestamp tdSql.query("describe stb0") - tdSql.checkDataType(9,1,"TIMESTAMP") + tdSql.checkDataType(9, 1, "TIMESTAMP") - # insert by csv files and timetamp is long int , strings in ts and cols - - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % binPath) + # insert by csv files and timetamp is long int , strings in ts and + # cols + + os.system( + "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % + binPath) tdSql.execute("use nsdbcsv") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -111,29 +117,37 @@ class TDTestCase: tdSql.checkData(0, 0, 10000) tdSql.query("describe stb0") tdSql.checkDataType(3, 1, "TIMESTAMP") - tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") + tdSql.query( + "select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") tdSql.checkData(0, 0, 5000) tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") tdSql.checkData(0, 0, 10000) - - os.system("rm -rf ./insert_res.txt") - os.system("rm -rf tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNano*.py.sql") - # taosdemo test insert with command and parameter , detals show taosdemo --help - os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + os.system("rm -rf ./insert_res.txt") + os.system( + "rm -rf tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNano*.py.sql") + + # taosdemo test insert with command and parameter , detals show + # taosdemo --help + os.system( + "%staosdemo -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % + binPath) tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 600) # check taosdemo -s - sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 3600 days 6 update 1;', - 'use nsdbsql;','CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);', - 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);', - 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);', - 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);'] + sqls_ls = [ + 'drop database if exists nsdbsql;', + 'create database nsdbsql precision "ns" keep 3600 days 6 update 1;', + 'use nsdbsql;', + 'CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);', + 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);'] - with open("./taosdemoTestNanoCreateDB.sql",mode ="a" ) as sql_files: + with open("./taosdemoTestNanoCreateDB.sql", mode="a") as sql_files: for sql in sqls_ls: - sql_files.write(sql+"\n") + sql_files.write(sql + "\n") sql_files.close() sleep(10) @@ -141,11 +155,10 @@ class TDTestCase: os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath) tdSql.query("select count(*) from nsdbsql.meters") tdSql.checkData(0, 0, 2) - + os.system("rm -rf ./res.txt") os.system("rm -rf ./*.py.sql") os.system("rm -rf ./taosdemoTestNanoCreateDB.sql") - def stop(self): tdSql.close() diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py index 266a8fa712..c3fdff00ec 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -120,7 +120,7 @@ class TDTestCase: os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") # taosdemo test insert with command and parameter , detals show taosdemo --help - os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + os.system("%staosdemo -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 600) # check taosdemo -s diff --git a/tests/pytest/tools/taosdemoTestInterlace.py b/tests/pytest/tools/taosdemoTestInterlace.py index 4c551f327a..30c04729b7 100644 --- a/tests/pytest/tools/taosdemoTestInterlace.py +++ b/tests/pytest/tools/taosdemoTestInterlace.py @@ -49,7 +49,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath + "/build/bin/" - taosdemoCmd = "%staosdemo -f tools/insert-interlace.json -pp 2>&1 | grep sleep | wc -l" % binPath + taosdemoCmd = "%staosdemo -f tools/insert-interlace.json -PP 2>&1 | grep sleep | wc -l" % binPath sleepTimes = subprocess.check_output( taosdemoCmd, shell=True).decode("utf-8") print("sleep times: %d" % int(sleepTimes)) From 0309fd44126e3ff6ce44cd516f6b87163a5b46ef Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Aug 2021 10:17:47 +0800 Subject: [PATCH 048/239] [td-255] fix compiler error on windows. --- src/query/src/qExtbuffer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index 91004fe707..5152d17264 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -1113,7 +1113,7 @@ void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOf for(int32_t i = 0; i < numOfRows; ++i) { char* dest = buf + size * i; - memcpy(dest, pCols[index] + bytes * i, bytes); + memcpy(dest, ((char*) pCols[index]) + bytes * i, bytes); *(int32_t*)(dest+bytes) = i; } @@ -1128,7 +1128,7 @@ void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOf if (i == index) { for(int32_t j = 0; j < numOfRows; ++j){ char* src = buf + (j * size); - char* dest = pCols[i] + (j * bytes1); + char* dest = ((char*)pCols[i]) + (j * bytes1); memcpy(dest, src, bytes1); } } else { @@ -1144,7 +1144,7 @@ void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOf memcpy(p, pCols[i], bytes1 * numOfRows); for(int32_t j = 0; j < numOfRows; ++j){ - char* dest = pCols[i] + bytes1 * j; + char* dest = ((char*)pCols[i]) + bytes1 * j; int32_t newPos = *(int32_t*)(buf + (j * size) + bytes); char* src = p + (newPos * bytes1); From 7fc6dc17fec04d4fce2babe002ce865bee0aca89 Mon Sep 17 00:00:00 2001 From: cpwu Date: Fri, 13 Aug 2021 10:27:27 +0800 Subject: [PATCH 049/239] [TD-5933] add case for TD-5933 --- tests/pytest/functions/queryTestCases.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index ae2bbc4a81..8523c6310e 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -714,6 +714,12 @@ class TDTestCase: tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) fill(next) limit 10" + tdSql.query(stddevAndIntervalSql) + tdSql.checkRows(10) + + ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ########## fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" tdSql.query(fillsql) fillResult=False @@ -724,7 +730,7 @@ class TDTestCase: else: tdLog.exit("fill(next) is wrong") - + pass def run(self): From ecea1ce747a00ef859e3ec3241eb803358dac1a8 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Fri, 13 Aug 2021 11:17:55 +0800 Subject: [PATCH 050/239] fixed realloc can cause memory leak --- deps/MsvcLibX/src/iconv.c | 6 ++++-- deps/MsvcLibX/src/main.c | 10 +++++++--- deps/MsvcLibX/src/realpath.c | 16 ++++++++++++---- src/balance/src/bnScore.c | 3 ++- src/client/src/tscPrepare.c | 5 +++-- src/client/src/tscSql.c | 4 +++- src/client/src/tscUtil.c | 12 +++++++++--- src/common/inc/tdataformat.h | 10 ++++++---- src/common/src/tdataformat.c | 5 +++-- src/kit/shell/src/shellCheck.c | 5 +++-- src/kit/taospack/taospack.c | 5 ++++- src/mnode/src/mnodeTable.c | 5 +++-- src/os/src/detail/osMemory.c | 5 +++-- src/os/src/windows/wGetline.c | 6 ++++-- src/query/src/qTsbuf.c | 7 +++++-- 15 files changed, 71 insertions(+), 33 deletions(-) diff --git a/deps/MsvcLibX/src/iconv.c b/deps/MsvcLibX/src/iconv.c index 40b6e6462d..1ec0dc7354 100644 --- a/deps/MsvcLibX/src/iconv.c +++ b/deps/MsvcLibX/src/iconv.c @@ -98,6 +98,7 @@ int ConvertString(char *buf, size_t nBytes, UINT cpFrom, UINT cpTo, LPCSTR lpDef char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefaultChar) { int nBytes; char *pBuf; + char *pBuf1; nBytes = 4 * ((int)lstrlen(string) + 1); /* Worst case for the size needed */ pBuf = (char *)malloc(nBytes); if (!pBuf) { @@ -110,8 +111,9 @@ char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefault free(pBuf); return NULL; } - pBuf = realloc(pBuf, nBytes+1); - return pBuf; + pBuf1 = realloc(pBuf, nBytes+1); + if(pBuf1 == NULL && pBuf != NULL) free(pBuf); + return pBuf1; } int CountCharacters(const char *string, UINT cp) { diff --git a/deps/MsvcLibX/src/main.c b/deps/MsvcLibX/src/main.c index f366b081ad..9b9b4a46af 100644 --- a/deps/MsvcLibX/src/main.c +++ b/deps/MsvcLibX/src/main.c @@ -68,6 +68,7 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) { int iString = FALSE; /* TRUE = string mode; FALSE = non-string mode */ int nBackslash = 0; char **ppszArg; + char **ppszArg1; int iArg = FALSE; /* TRUE = inside an argument; FALSE = between arguments */ ppszArg = (char **)malloc((argc+1)*sizeof(char *)); @@ -89,7 +90,10 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) { if ((!iArg) && (c != ' ') && (c != '\t')) { /* Beginning of a new argument */ iArg = TRUE; ppszArg[argc++] = pszCopy+j; - ppszArg = (char **)realloc(ppszArg, (argc+1)*sizeof(char *)); + ppszArg1 = (char **)realloc(ppszArg, (argc+1)*sizeof(char *)); + if(ppszArg1 == NULL && ppszArg != NULL) + free(ppszArg); + ppszArg = ppszArg1; if (!ppszArg) return -1; pszCopy[j] = c0 = '\0'; } @@ -212,14 +216,14 @@ int _initU(void) { fprintf(stderr, "Warning: Can't convert the argument line to UTF-8\n"); _acmdln[0] = '\0'; } - realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */ + //realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */ /* Should not fail since we make it smaller */ /* Record the console code page, to allow converting the output accordingly */ codePage = GetConsoleOutputCP(); return 0; -} +}ß #endif /* defined(_WIN32) */ diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index 5fbcf773a2..ec1b606bf6 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -196,6 +196,7 @@ not_compact_enough: /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpath(const char *path, char *outbuf) { char *pOutbuf = outbuf; + char *pOutbuf1; int iErr; const char *pc; @@ -242,8 +243,11 @@ realpath_failed: return NULL; } - if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1); - return pOutbuf; + if (!outbuf) { + pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1); + if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf); + } + return pOutbuf1; } #endif @@ -517,6 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) { /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpathU(const char *path, char *outbuf) { char *pOutbuf = outbuf; + char *pOutbuf1; char *pPath1 = NULL; char *pPath2 = NULL; int iErr; @@ -590,10 +595,13 @@ realpathU_failed: } DEBUG_LEAVE(("return 0x%p; // \"%s\"\n", pOutbuf, pOutbuf)); - if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1); + if (!outbuf) { + pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1); + if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf); + } free(pPath1); free(pPath2); - return pOutbuf; + return pOutbuf1; } #endif /* defined(_WIN32) */ diff --git a/src/balance/src/bnScore.c b/src/balance/src/bnScore.c index 7d94df1c23..a2b9ba8e09 100644 --- a/src/balance/src/bnScore.c +++ b/src/balance/src/bnScore.c @@ -117,7 +117,8 @@ void bnCleanupDnodes() { static void bnCheckDnodesSize(int32_t dnodesNum) { if (tsBnDnodes.maxSize <= dnodesNum) { tsBnDnodes.maxSize = dnodesNum * 2; - tsBnDnodes.list = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *)); + SDnodeObj** list1 = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *)); + if(list1) tsBnDnodes.list = list1; } } diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 40664241c1..06a9505086 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1527,8 +1527,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT; pCmd->insertParam.objectId = pSql->self; - pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); - + char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr); + pSql->sqlstr = sqlstr; if (pSql->sqlstr == NULL) { tscError("%p failed to malloc sql string buffer", pSql); STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY); diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 6f3d5c3a63..026c65a595 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -887,7 +887,9 @@ int taos_validate_sql(TAOS *taos, const char *sql) { return TSDB_CODE_TSC_EXCEED_SQL_LIMIT; } - pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr); + pSql->sqlstr = sqlstr; if (pSql->sqlstr == NULL) { tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self); tfree(pSql); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3c35795b0d..ca7a5cbf77 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -625,8 +625,10 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo } else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) { // convert unicode to native code in a temporary buffer extra one byte for terminated symbol - pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); - + char* buffer = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); + if(buffer == NULL) + return ; + pRes->buffer[i] = buffer; // string terminated char for binary data memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows); @@ -4364,6 +4366,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, STableMeta* p = NULL; size_t sz = 0; STableMeta* pChild = *ppChild; + STableMeta* pChild1; taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz); @@ -4374,7 +4377,10 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema); int32_t tableMetaSize = sizeof(STableMeta) + totalBytes; if (*tableMetaCapacity < tableMetaSize) { - pChild = realloc(pChild, tableMetaSize); + pChild1 = realloc(pChild, tableMetaSize); + if(pChild1 == NULL) + return -1; + pChild = pChild1; *tableMetaCapacity = (size_t)tableMetaSize; } diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 46259c8488..a01c377539 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -547,8 +547,9 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder); static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) { if (pBuilder->nCols >= pBuilder->tCols) { pBuilder->tCols *= 2; - pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); - if (pBuilder->pColIdx == NULL) return -1; + SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); + if (pColIdx == NULL) return -1; + pBuilder->pColIdx = pColIdx; } pBuilder->pColIdx[pBuilder->nCols].colId = colId; @@ -561,8 +562,9 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, while (tlen > pBuilder->alloc - pBuilder->size) { pBuilder->alloc *= 2; } - pBuilder->buf = realloc(pBuilder->buf, pBuilder->alloc); - if (pBuilder->buf == NULL) return -1; + void* buf = realloc(pBuilder->buf, pBuilder->alloc); + if (buf == NULL) return -1; + pBuilder->buf = buf; } memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen); diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index a3a6c0fed4..181afa225d 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -138,8 +138,9 @@ int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int1 if (pBuilder->nCols >= pBuilder->tCols) { pBuilder->tCols *= 2; - pBuilder->columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols); - if (pBuilder->columns == NULL) return -1; + STColumn* columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols); + if (columns == NULL) return -1; + pBuilder->columns = columns; } STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]); diff --git a/src/kit/shell/src/shellCheck.c b/src/kit/shell/src/shellCheck.c index d78f1a6b99..7fc8b1409a 100644 --- a/src/kit/shell/src/shellCheck.c +++ b/src/kit/shell/src/shellCheck.c @@ -72,12 +72,13 @@ static int32_t shellShowTables(TAOS *con, char *db) { int32_t tbIndex = tbNum++; if (tbMallocNum < tbNum) { tbMallocNum = (tbMallocNum * 2 + 1); - tbNames = realloc(tbNames, tbMallocNum * sizeof(char *)); - if (tbNames == NULL) { + char** tbNames1 = realloc(tbNames, tbMallocNum * sizeof(char *)); + if (tbNames1 == NULL) { fprintf(stdout, "failed to malloc tablenames, num:%d\n", tbMallocNum); code = TSDB_CODE_TSC_OUT_OF_MEMORY; break; } + tbNames = tbNames1; } tbNames[tbIndex] = malloc(TSDB_TABLE_NAME_LEN); diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c index 33d779dfcf..1b22c16ee0 100644 --- a/src/kit/taospack/taospack.c +++ b/src/kit/taospack/taospack.c @@ -148,7 +148,10 @@ float* read_float(const char* inFile, int* pcount){ //printf(" buff=%s float=%.50f \n ", buf, floats[fi]); if ( ++fi == malloc_cnt ) { malloc_cnt += 100000; - floats = realloc(floats, malloc_cnt*sizeof(float)); + float* floats1 = realloc(floats, malloc_cnt*sizeof(float)); + if(floats1 == NULL) + break; + floats = floats1; } memset(buf, 0, sizeof(buf)); } diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 0bc114ffdf..a4ecf5d6f3 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -2921,10 +2921,11 @@ static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray (*totalMallocLen) *= 2; } - pMultiMeta = realloc(pMultiMeta, *totalMallocLen); - if (pMultiMeta == NULL) { + SMultiTableMeta* pMultiMeta1 = realloc(pMultiMeta, *totalMallocLen); + if (pMultiMeta1 == NULL) { return NULL; } + pMultiMeta = pMultiMeta1; } return pMultiMeta; diff --git a/src/os/src/detail/osMemory.c b/src/os/src/detail/osMemory.c index d8194feab4..22954f1523 100644 --- a/src/os/src/detail/osMemory.c +++ b/src/os/src/detail/osMemory.c @@ -504,8 +504,9 @@ void * taosTRealloc(void *ptr, size_t size) { void * tptr = (void *)((char *)ptr - sizeof(size_t)); size_t tsize = size + sizeof(size_t); - tptr = realloc(tptr, tsize); - if (tptr == NULL) return NULL; + void* tptr1 = realloc(tptr, tsize); + if (tptr1 == NULL) return NULL; + tptr = tptr1; *(size_t *)tptr = size; diff --git a/src/os/src/windows/wGetline.c b/src/os/src/windows/wGetline.c index 553aecaf0a..aa45854884 100644 --- a/src/os/src/windows/wGetline.c +++ b/src/os/src/windows/wGetline.c @@ -81,11 +81,13 @@ int32_t getstr(char **lineptr, size_t *n, FILE *stream, char terminator, int32_t *n += MIN_CHUNK; nchars_avail = (int32_t)(*n + *lineptr - read_pos); - *lineptr = realloc(*lineptr, *n); - if (!*lineptr) { + char* lineptr1 = realloc(*lineptr, *n); + if (!lineptr1) { errno = ENOMEM; return -1; } + *lineptr = lineptr1; + read_pos = *n - nchars_avail + *lineptr; assert((*lineptr + *n) == (read_pos + nchars_avail)); } diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index 825b7960de..4cf05dd2c7 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -223,8 +223,11 @@ static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) { static void shrinkBuffer(STSList* ptsData) { // shrink tmp buffer size if it consumes too many memory compared to the pre-defined size if (ptsData->allocSize >= ptsData->threshold * 2) { - ptsData->rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE); - ptsData->allocSize = MEM_BUF_SIZE; + char* rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE); + if(rawBuf) { + ptsData->rawBuf = rawBuf; + ptsData->allocSize = MEM_BUF_SIZE; + } } } From 92652f449186e766e3306dbeba28d0e7d6d52501 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Fri, 13 Aug 2021 11:47:39 +0800 Subject: [PATCH 051/239] bnScore.c modify ok --- src/balance/src/bnScore.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/balance/src/bnScore.c b/src/balance/src/bnScore.c index a2b9ba8e09..04a14357c9 100644 --- a/src/balance/src/bnScore.c +++ b/src/balance/src/bnScore.c @@ -116,9 +116,17 @@ void bnCleanupDnodes() { static void bnCheckDnodesSize(int32_t dnodesNum) { if (tsBnDnodes.maxSize <= dnodesNum) { - tsBnDnodes.maxSize = dnodesNum * 2; - SDnodeObj** list1 = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *)); - if(list1) tsBnDnodes.list = list1; + int32_t maxSize = dnodesNum * 2; + SDnodeObj** list1 = NULL; + int32_t retry = 0; + + while(list1 == NULL && retry++ < 3) { + list1 = realloc(tsBnDnodes.list, maxSize * sizeof(SDnodeObj *)); + } + if(list1) { + tsBnDnodes.list = list1; + tsBnDnodes.maxSize = maxSize; + } } } From 490702c941b0ebb29bd6471dce58d587eea37e23 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Fri, 13 Aug 2021 13:26:32 +0800 Subject: [PATCH 052/239] msvclibX main.c error append a charater --- deps/MsvcLibX/src/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/main.c b/deps/MsvcLibX/src/main.c index 9b9b4a46af..85f4c83f24 100644 --- a/deps/MsvcLibX/src/main.c +++ b/deps/MsvcLibX/src/main.c @@ -223,7 +223,7 @@ int _initU(void) { codePage = GetConsoleOutputCP(); return 0; -}ß +} #endif /* defined(_WIN32) */ From 16c429760665d8486e9a3089617bb9b8de28fd5d Mon Sep 17 00:00:00 2001 From: wu champion Date: Fri, 13 Aug 2021 14:47:54 +0800 Subject: [PATCH 053/239] Update queryTestCases.py --- tests/pytest/functions/queryTestCases.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index 8523c6310e..8cd3ef6b3a 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -715,7 +715,7 @@ class TDTestCase: tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## - stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) fill(next) limit 10" + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10" tdSql.query(stddevAndIntervalSql) tdSql.checkRows(10) From 9867ea1c156ce8a46a1dadf3fcc4ec029a29cf11 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Fri, 13 Aug 2021 14:55:56 +0800 Subject: [PATCH 054/239] [ci skip]: reduce data insertion time --- tests/perftest-scripts/perftest-query.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index d4853c0825..68b64fd4e0 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -105,10 +105,10 @@ function runQueryPerfTest { python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT - python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT - python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT } From 3948b97e85e19288859c857a3a8bbaee542ac4ad Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 13 Aug 2021 15:14:17 +0800 Subject: [PATCH 055/239] [TD-6044]: WAL compatibility since v2.1.5.0 --- src/inc/taoserror.h | 1 + src/inc/twal.h | 2 +- src/util/src/terror.c | 1 + src/wal/src/walWrite.c | 97 +++++++++++++++++++++++++++++++++++++++--- 4 files changed, 94 insertions(+), 7 deletions(-) diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 2214078f55..d7e1592911 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -306,6 +306,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) //"Unexpected generic error in wal") #define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001) //"WAL file is corrupted") #define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) //"WAL size exceeds limit") +#define TSDB_CODE_WAL_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1003) //"WAL out of memory") // http #define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not onlin") diff --git a/src/inc/twal.h b/src/inc/twal.h index bce398d6f9..868a1fbd78 100644 --- a/src/inc/twal.h +++ b/src/inc/twal.h @@ -32,7 +32,7 @@ typedef enum { typedef struct { int8_t msgType; - int8_t sver; + int8_t sver; // sver 2 for WAL SDataRow/SMemRow compatibility int8_t reserved[2]; int32_t len; uint64_t version; diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 42fc76e6c9..8d2ef29c8c 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -314,6 +314,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_MSGTYPE, "Invalid msg type") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, "Unexpected generic error in wal") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_FILE_CORRUPTED, "WAL file is corrupted") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_SIZE_LIMIT, "WAL size exceeds limit") +TAOS_DEFINE_ERROR(TSDB_CODE_WAL_OUT_OF_MEMORY, "WAL out of memory") // http TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SERVER_OFFLINE, "http server is not onlin") diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 4774998799..c1b3c1ac3f 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -17,6 +17,7 @@ #define TAOS_RANDOM_FILE_FAIL_TEST #include "os.h" #include "taoserror.h" +#include "taosmsg.h" #include "tchecksum.h" #include "tfile.h" #include "twal.h" @@ -114,7 +115,7 @@ void walRemoveAllOldFiles(void *handle) { #if defined(WAL_CHECKSUM_WHOLE) static void walUpdateChecksum(SWalHead *pHead) { - pHead->sver = 1; + pHead->sver = 2; pHead->cksum = 0; pHead->cksum = taosCalcChecksum(0, (uint8_t *)pHead, sizeof(*pHead) + pHead->len); } @@ -122,7 +123,7 @@ static void walUpdateChecksum(SWalHead *pHead) { static int walValidateChecksum(SWalHead *pHead) { if (pHead->sver == 0) { // for compatible with wal before sver 1 return taosCheckChecksumWhole((uint8_t *)pHead, sizeof(*pHead)); - } else if (pHead->sver == 1) { + } else if (pHead->sver == 2 || pHead->sver == 1) { uint32_t cksum = pHead->cksum; pHead->cksum = 0; return taosCheckChecksum((uint8_t *)pHead, sizeof(*pHead) + pHead->len, cksum); @@ -281,7 +282,7 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, return TSDB_CODE_SUCCESS; } - if (pHead->sver == 1) { + if (pHead->sver == 2 || pHead->sver == 1) { if (tfRead(tfd, pHead->cont, pHead->len) < pHead->len) { wError("vgId:%d, read to end of corrupted wal file, offset:%" PRId64, pWal->vgId, pos); return TSDB_CODE_WAL_FILE_CORRUPTED; @@ -306,7 +307,85 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, return TSDB_CODE_WAL_FILE_CORRUPTED; } +// Add SMemRowType ahead of SDataRow +static void expandSubmitBlk(SSubmitBlk *pDest, SSubmitBlk *pSrc, int32_t *lenExpand) { + memcpy(pDest, pSrc, sizeof(SSubmitBlk)); + int nRows = htons(pSrc->numOfRows); + if (nRows <= 0) { + return; + } + char *pDestData = pDest->data; + char *pSrcData = pSrc->data; + for (int i = 0; i < nRows; ++i) { + memRowSetType(pDestData, SMEM_ROW_DATA); + memcpy(memRowDataBody(pDestData), pSrcData, dataRowLen(pSrcData)); + pDestData = POINTER_SHIFT(pDestData, memRowTLen(pDestData)); + pSrcData = POINTER_SHIFT(pSrcData, dataRowLen(pSrcData)); + ++(*lenExpand); + } + int32_t dataLen = htonl(pDest->dataLen); + pDest->dataLen = htonl(dataLen + nRows * sizeof(uint8_t)); +} +static bool walIsSDataRow(void *pBlkData, int nRows, int32_t dataLen) { + int32_t len = 0; + for (int i = 0; i < nRows; ++i) { + len += dataRowLen(pBlkData); + if (len > dataLen) { + return false; + } + pBlkData = POINTER_SHIFT(pBlkData, dataRowLen(pBlkData)); + } + if (len != dataLen) { + return false; + } + return true; +} +// for WAL SMemRow/SDataRow compatibility +static int walSMemRowCheck(SWalHead *pHead) { + if ((pHead->sver < 2) && (pHead->msgType == TSDB_MSG_TYPE_SUBMIT)) { + SSubmitMsg *pMsg = (SSubmitMsg *)pHead->cont; + int32_t numOfBlocks = htonl(pMsg->numOfBlocks); + if (numOfBlocks <= 0) { + return 0; + } + + int32_t nTotalRows = 0; + SSubmitBlk *pBlk = (SSubmitBlk *)pMsg->blocks; + for (int32_t i = 0; i < numOfBlocks; ++i) { + int32_t dataLen = htonl(pBlk->dataLen); + int32_t nRows = htons(pBlk->numOfRows); + nTotalRows += nRows; + if (!walIsSDataRow(pBlk->data, nRows, dataLen)) { + return 0; + } + pBlk = (SSubmitBlk *)POINTER_SHIFT(pBlk, sizeof(SSubmitBlk) + dataLen); + } + + SWalHead *pWalHead = (SWalHead *)calloc(sizeof(SWalHead) + pHead->len + nTotalRows * sizeof(uint8_t), 1); + if (pWalHead == NULL) { + return TSDB_CODE_WAL_OUT_OF_MEMORY; + } + // len should be updated + memcpy(pWalHead, pHead, sizeof(SWalHead) + sizeof(SSubmitMsg)); + + SSubmitMsg *pDestMsg = (SSubmitMsg *)pWalHead->cont; + SSubmitBlk *pDestBlks = (SSubmitBlk *)pDestMsg->blocks; + SSubmitBlk *pSrcBlks = (SSubmitBlk *)pMsg->blocks; + int32_t lenExpand = 0; + for (int32_t i = 0; i < numOfBlocks; ++i) { + expandSubmitBlk(pDestBlks, pSrcBlks, &lenExpand); + } + if (lenExpand > 0) { + pDestMsg->length = htonl(htonl(pDestMsg->length) + lenExpand); + pWalHead->len = pWalHead->len + lenExpand; + } + + memcpy(pHead, pWalHead, sizeof(SWalHead) + pWalHead->len); + tfree(pWalHead); + } + return 0; +} static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, char *name, int64_t fileId) { int32_t size = WAL_MAX_SIZE; @@ -346,7 +425,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch } #if defined(WAL_CHECKSUM_WHOLE) - if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 1) { + if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 2) { wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, pHead->version, pHead->len, offset); code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); @@ -379,7 +458,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch continue; } - if (pHead->sver == 1 && !walValidateChecksum(pHead)) { + if ((pHead->sver == 2 || pHead->sver == 1) && !walValidateChecksum(pHead)) { wError("vgId:%d, file:%s, wal whole cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, pHead->version, pHead->len, offset); code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); @@ -431,7 +510,13 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch pWal->version = pHead->version; - //wInfo("writeFp: %ld", offset); + // wInfo("writeFp: %ld", offset); + + if (0 != walSMemRowCheck(pHead)) { + wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, + pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); + } + (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); } From 56d0d69a487bd91bfe5ce293bcbc8187e2aba048 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 13 Aug 2021 17:05:30 +0800 Subject: [PATCH 056/239] [TD-5538]: add --force-keep-file option --- src/common/src/tglobal.c | 3 +++ src/dnode/src/dnodeSystem.c | 2 ++ src/tsdb/inc/tsdbFS.h | 3 +++ src/tsdb/src/tsdbFS.c | 38 +++++++++++++++++++++++++++++++++++++ src/util/inc/tconfig.h | 1 + 5 files changed, 47 insertions(+) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index ec7249cef5..198d57b6b9 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -26,6 +26,9 @@ #include "tlocale.h" #include "ttimezone.h" +// TSDB +bool tsdbForceKeepFile = false; + // cluster char tsFirst[TSDB_EP_LEN] = {0}; char tsSecond[TSDB_EP_LEN] = {0}; diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index e49b3eba99..c3e4dae206 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -42,6 +42,8 @@ int32_t main(int32_t argc, char *argv[]) { } } else if (strcmp(argv[i], "-C") == 0) { dump_config = 1; + } else if (strcmp(argv[i], "--force-keep-file") == 0) { + tsdbForceKeepFile = true; } else if (strcmp(argv[i], "-V") == 0) { #ifdef _ACCT char *versionStr = "enterprise"; diff --git a/src/tsdb/inc/tsdbFS.h b/src/tsdb/inc/tsdbFS.h index d63aeb14ac..367d905522 100644 --- a/src/tsdb/inc/tsdbFS.h +++ b/src/tsdb/inc/tsdbFS.h @@ -18,6 +18,9 @@ #define TSDB_FS_VERSION 0 +// ================== TSDB global config +extern bool tsdbForceKeepFile; + // ================== CURRENT file header info typedef struct { uint32_t version; // Current file system version (relating to code) diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index c9f087a5cf..37f9f0af98 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -982,6 +982,26 @@ static int tsdbRestoreMeta(STsdbRepo *pRepo) { return -1; } + if (tsdbForceKeepFile) { + struct stat tfstat; + + // Get real file size + if (fstat(pfs->cstatus->pmf->fd, &tfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbCloseMFile(pfs->cstatus->pmf); + tfsClosedir(tdir); + regfree(®ex); + return -1; + } + + if (pfs->cstatus->pmf->info.size != tfstat.st_size) { + int64_t tfsize = pfs->cstatus->pmf->info.size; + pfs->cstatus->pmf->info.size = tfstat.st_size; + tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pfs->cstatus->pmf), tfsize, pfs->cstatus->pmf->info.size); + } + } + tsdbCloseMFile(pfs->cstatus->pmf); } } else if (code == REG_NOMATCH) { @@ -1141,6 +1161,24 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) { return -1; } + if (tsdbForceKeepFile) { + struct stat tfstat; + + // Get real file size + if (fstat(pDFile->fd, &tfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + taosArrayDestroy(fArray); + return -1; + } + + if (pDFile->info.size != tfstat.st_size) { + int64_t tfsize = pDFile->info.size; + pDFile->info.size = tfstat.st_size; + tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pDFile), tfsize, pDFile->info.size); + } + } + tsdbCloseDFile(pDFile); index++; } diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index fdb2595fd8..d6e0f4ca46 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -77,6 +77,7 @@ typedef struct { extern SGlobalCfg tsGlobalConfig[]; extern int32_t tsGlobalConfigNum; extern char * tsCfgStatusStr[]; +extern bool tsdbForceKeepFile; void taosReadGlobalLogCfg(); bool taosReadGlobalCfg(); From 124be91c397bd2889961b4e8dd7d4475e7c9effd Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 13 Aug 2021 17:19:19 +0800 Subject: [PATCH 057/239] speed up drone in master --- .drone.yml | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/.drone.yml b/.drone.yml index b520f308ba..085a07acf9 100644 --- a/.drone.yml +++ b/.drone.yml @@ -15,7 +15,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -23,6 +23,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: test_arm64_bionic @@ -39,7 +40,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -66,7 +67,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -91,7 +92,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -116,7 +117,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -142,7 +143,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch32 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -150,6 +151,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_trusty @@ -168,7 +170,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -176,6 +178,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_xenial @@ -193,7 +196,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -201,7 +204,7 @@ steps: branch: - develop - master - + - 2.0 --- kind: pipeline name: build_bionic @@ -218,7 +221,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -226,6 +229,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_centos7 @@ -241,7 +245,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -249,4 +253,4 @@ steps: branch: - develop - master - + - 2.0 \ No newline at end of file From 7d0e9c515e1902df66fb4a97f8234a100fae7883 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 13 Aug 2021 17:21:36 +0800 Subject: [PATCH 058/239] speed up drone in 2.0 --- .drone.yml | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/.drone.yml b/.drone.yml index b520f308ba..085a07acf9 100644 --- a/.drone.yml +++ b/.drone.yml @@ -15,7 +15,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -23,6 +23,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: test_arm64_bionic @@ -39,7 +40,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -66,7 +67,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -91,7 +92,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -116,7 +117,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -142,7 +143,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch32 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -150,6 +151,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_trusty @@ -168,7 +170,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -176,6 +178,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_xenial @@ -193,7 +196,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -201,7 +204,7 @@ steps: branch: - develop - master - + - 2.0 --- kind: pipeline name: build_bionic @@ -218,7 +221,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -226,6 +229,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_centos7 @@ -241,7 +245,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -249,4 +253,4 @@ steps: branch: - develop - master - + - 2.0 \ No newline at end of file From d8622364aafb84b2030e37f94b6266909ff195fa Mon Sep 17 00:00:00 2001 From: wpan Date: Fri, 13 Aug 2021 17:22:19 +0800 Subject: [PATCH 059/239] fix interp stable query issue --- src/client/src/tscSubquery.c | 1 - src/query/src/qAggMain.c | 18 +- src/query/src/qExecutor.c | 10 +- src/query/src/qPlan.c | 2 +- tests/script/general/parser/interp_test.sim | 246 ++++++++++++++++++++ 5 files changed, 263 insertions(+), 14 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 2e7e02cfd5..1a138f2b48 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2812,7 +2812,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p pParentSql->self, pState->numOfSub, pState->numOfRetrievedRows); SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd); - tscClearInterpInfo(pPQueryInfo); code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self); pParentSql->res.code = code; diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 6eadedcaf3..14d3f4e417 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3670,6 +3670,8 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { return; } + bool ascQuery = (pCtx->order == TSDB_ORDER_ASC); + if (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) { *(TSKEY *)pCtx->pOutput = pCtx->startTs; } else if (type == TSDB_FILL_NULL) { @@ -3677,7 +3679,7 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { } else if (type == TSDB_FILL_SET_VALUE) { tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true); } else { - if (pCtx->start.key != INT64_MIN && pCtx->start.key < pCtx->startTs && pCtx->end.key > pCtx->startTs) { + if (pCtx->start.key != INT64_MIN && ((ascQuery && pCtx->start.key <= pCtx->startTs && pCtx->end.key >= pCtx->startTs) || ((!ascQuery) && pCtx->start.key >= pCtx->startTs && pCtx->end.key <= pCtx->startTs))) { if (type == TSDB_FILL_PREV) { if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) { SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val); @@ -3716,13 +3718,14 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { TSKEY skey = GET_TS_DATA(pCtx, 0); if (type == TSDB_FILL_PREV) { - if (skey > pCtx->startTs) { + if ((ascQuery && skey > pCtx->startTs) || ((!ascQuery) && skey < pCtx->startTs)) { return; } if (pCtx->size > 1) { TSKEY ekey = GET_TS_DATA(pCtx, 1); - if (ekey > skey && ekey <= pCtx->startTs) { + if ((ascQuery && ekey > skey && ekey <= pCtx->startTs) || + ((!ascQuery) && ekey < skey && ekey >= pCtx->startTs)){ skey = ekey; } } @@ -3731,10 +3734,10 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { TSKEY ekey = skey; char* val = NULL; - if (ekey < pCtx->startTs) { + if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) { if (pCtx->size > 1) { ekey = GET_TS_DATA(pCtx, 1); - if (ekey < pCtx->startTs) { + if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) { return; } @@ -3755,12 +3758,11 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { TSKEY ekey = GET_TS_DATA(pCtx, 1); // no data generated yet - if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) { + if ((ascQuery && !(skey <= pCtx->startTs && ekey >= pCtx->startTs)) + || ((!ascQuery) && !(skey >= pCtx->startTs && ekey <= pCtx->startTs))) { return; } - assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs); - char *start = GET_INPUT_DATA(pCtx, 0); char *end = GET_INPUT_DATA(pCtx, 1); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index b1fb2add1b..bb16b74d3d 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1595,6 +1595,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe SResultRow* pResult = NULL; int32_t forwardStep = 0; int32_t ret = 0; + STimeWindow preWin = win; while (1) { // null data, failed to allocate more memory buffer @@ -1609,12 +1610,13 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); - + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + preWin = win; + int32_t prevEndPos = (forwardStep - 1) * step + startPos; startPos = getNextQualifiedWindow(pQueryAttr, &win, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos); if (startPos < 0) { - if (win.skey <= pQueryAttr->window.ekey) { + if ((ascQuery && win.skey <= pQueryAttr->window.ekey) || ((!ascQuery) && win.ekey >= pQueryAttr->window.ekey)) { int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { @@ -1625,7 +1627,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); } break; diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index b8a5ee7699..ad286f7afa 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -693,7 +693,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) { } // fill operator - if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) { + if (pQueryAttr->fillType != TSDB_FILL_NONE && pQueryAttr->interval.interval > 0) { op = OP_Fill; taosArrayPush(plan, &op); } diff --git a/tests/script/general/parser/interp_test.sim b/tests/script/general/parser/interp_test.sim index 845afb0173..5a2021dcfc 100644 --- a/tests/script/general/parser/interp_test.sim +++ b/tests/script/general/parser/interp_test.sim @@ -930,8 +930,254 @@ if $data44 != @18-11-25 19:06:00.000@ then endi +sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear); +if $rows != 8 then + return -1 +endi +if $data00 != @18-09-17 20:35:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:37:00.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @18-09-17 20:38:00.000@ then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data41 != NULL then + return -1 +endi +if $data50 != @18-09-17 20:40:00.000@ then + return -1 +endi +if $data51 != 0 then + return -1 +endi +if $data60 != @18-09-17 20:41:00.000@ then + return -1 +endi +if $data61 != NULL then + return -1 +endi +if $data70 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data71 != NULL then + return -1 +endi +sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear) order by ts desc; +if $rows != 8 then + return -1 +endi +if $data00 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:41:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:40:00.000@ then + return -1 +endi +if $data21 != 0 then + return -1 +endi +if $data30 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != @18-09-17 20:38:00.000@ then + return -1 +endi +if $data41 != NULL then + return -1 +endi +if $data50 != @18-09-17 20:37:00.000@ then + return -1 +endi +if $data51 != NULL then + return -1 +endi +if $data60 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data61 != NULL then + return -1 +endi +if $data70 != @18-09-17 20:35:00.000@ then + return -1 +endi +if $data71 != NULL then + return -1 +endi + +sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(2m) fill(linear) order by ts; +if $rows != 9 then + return -1 +endi +if $data00 != @18-09-17 20:34:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:38:00.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @18-09-17 20:40:00.000@ then + return -1 +endi +if $data31 != 0.00000 then + return -1 +endi +if $data40 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data41 != 0.20000 then + return -1 +endi +if $data50 != @18-09-17 20:44:00.000@ then + return -1 +endi +if $data51 != 0.40000 then + return -1 +endi +if $data60 != @18-09-17 20:46:00.000@ then + return -1 +endi +if $data61 != 0.60000 then + return -1 +endi +if $data70 != @18-09-17 20:48:00.000@ then + return -1 +endi +if $data71 != 0.80000 then + return -1 +endi +if $data80 != @18-09-17 20:50:00.000@ then + return -1 +endi +if $data81 != 1.00000 then + return -1 +endi + + +sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts; +if $rows != 6 then + return -1 +endi +if $data00 != @18-09-17 20:33:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data31 != 0.20000 then + return -1 +endi +if $data40 != @18-09-17 20:45:00.000@ then + return -1 +endi +if $data41 != 0.50000 then + return -1 +endi +if $data50 != @18-09-17 20:48:00.000@ then + return -1 +endi +if $data51 != 0.80000 then + return -1 +endi + +sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts desc; +if $rows != 6 then + return -1 +endi +if $data00 != @18-09-17 20:48:00.000@ then + return -1 +endi +if $data01 != 0.80000 then + return -1 +endi +if $data10 != @18-09-17 20:45:00.000@ then + return -1 +endi +if $data11 != 0.50000 then + return -1 +endi +if $data20 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data21 != 0.20000 then + return -1 +endi +if $data30 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data41 != NULL then + return -1 +endi +if $data50 != @18-09-17 20:33:00.000@ then + return -1 +endi +if $data51 != NULL then + return -1 +endi From eebc5d40000f793a6cf85778425b5d0f763c17c1 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 13 Aug 2021 18:04:43 +0800 Subject: [PATCH 060/239] bug fix --- src/wal/src/walWrite.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index c1b3c1ac3f..3b1eb55a72 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -375,15 +375,18 @@ static int walSMemRowCheck(SWalHead *pHead) { int32_t lenExpand = 0; for (int32_t i = 0; i < numOfBlocks; ++i) { expandSubmitBlk(pDestBlks, pSrcBlks, &lenExpand); + pDestBlks = POINTER_SHIFT(pDestBlks, htonl(pDestBlks->dataLen) + sizeof(SSubmitBlk)); + pSrcBlks = POINTER_SHIFT(pSrcBlks, htonl(pSrcBlks->dataLen) + sizeof(SSubmitBlk)); } if (lenExpand > 0) { - pDestMsg->length = htonl(htonl(pDestMsg->length) + lenExpand); + pDestMsg->header.contLen = htonl(pDestMsg->length) + lenExpand; + pDestMsg->length = htonl(pDestMsg->header.contLen); pWalHead->len = pWalHead->len + lenExpand; } memcpy(pHead, pWalHead, sizeof(SWalHead) + pWalHead->len); tfree(pWalHead); - } + } return 0; } @@ -511,12 +514,10 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch pWal->version = pHead->version; // wInfo("writeFp: %ld", offset); - if (0 != walSMemRowCheck(pHead)) { wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); } - (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); } From c06123053faac12b3d8a5452d177e6b601c38668 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 13 Aug 2021 18:17:29 +0800 Subject: [PATCH 061/239] code optimization --- src/wal/src/walWrite.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 3b1eb55a72..8d311d5e3e 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -366,7 +366,7 @@ static int walSMemRowCheck(SWalHead *pHead) { if (pWalHead == NULL) { return TSDB_CODE_WAL_OUT_OF_MEMORY; } - // len should be updated + memcpy(pWalHead, pHead, sizeof(SWalHead) + sizeof(SSubmitMsg)); SSubmitMsg *pDestMsg = (SSubmitMsg *)pWalHead->cont; @@ -386,7 +386,7 @@ static int walSMemRowCheck(SWalHead *pHead) { memcpy(pHead, pWalHead, sizeof(SWalHead) + pWalHead->len); tfree(pWalHead); - } + } return 0; } From 9c73bb0dc5c84d022f9526acd6efcc62a6a1da9d Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 13 Aug 2021 18:43:25 +0800 Subject: [PATCH 062/239] code optimization --- src/inc/taoserror.h | 1 - src/util/src/terror.c | 1 - src/wal/src/walWrite.c | 9 +++++---- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index d7e1592911..2214078f55 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -306,7 +306,6 @@ int32_t* taosGetErrno(); #define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) //"Unexpected generic error in wal") #define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001) //"WAL file is corrupted") #define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) //"WAL size exceeds limit") -#define TSDB_CODE_WAL_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1003) //"WAL out of memory") // http #define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not onlin") diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 8d2ef29c8c..42fc76e6c9 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -314,7 +314,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_MSGTYPE, "Invalid msg type") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, "Unexpected generic error in wal") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_FILE_CORRUPTED, "WAL file is corrupted") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_SIZE_LIMIT, "WAL size exceeds limit") -TAOS_DEFINE_ERROR(TSDB_CODE_WAL_OUT_OF_MEMORY, "WAL out of memory") // http TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SERVER_OFFLINE, "http server is not onlin") diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 8d311d5e3e..2dfdb84818 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -123,7 +123,7 @@ static void walUpdateChecksum(SWalHead *pHead) { static int walValidateChecksum(SWalHead *pHead) { if (pHead->sver == 0) { // for compatible with wal before sver 1 return taosCheckChecksumWhole((uint8_t *)pHead, sizeof(*pHead)); - } else if (pHead->sver == 2 || pHead->sver == 1) { + } else if (pHead->sver >= 1) { uint32_t cksum = pHead->cksum; pHead->cksum = 0; return taosCheckChecksum((uint8_t *)pHead, sizeof(*pHead) + pHead->len, cksum); @@ -282,7 +282,7 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, return TSDB_CODE_SUCCESS; } - if (pHead->sver == 2 || pHead->sver == 1) { + if (pHead->sver >= 1) { if (tfRead(tfd, pHead->cont, pHead->len) < pHead->len) { wError("vgId:%d, read to end of corrupted wal file, offset:%" PRId64, pWal->vgId, pos); return TSDB_CODE_WAL_FILE_CORRUPTED; @@ -364,7 +364,7 @@ static int walSMemRowCheck(SWalHead *pHead) { SWalHead *pWalHead = (SWalHead *)calloc(sizeof(SWalHead) + pHead->len + nTotalRows * sizeof(uint8_t), 1); if (pWalHead == NULL) { - return TSDB_CODE_WAL_OUT_OF_MEMORY; + return -1; } memcpy(pWalHead, pHead, sizeof(SWalHead) + sizeof(SSubmitMsg)); @@ -461,7 +461,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch continue; } - if ((pHead->sver == 2 || pHead->sver == 1) && !walValidateChecksum(pHead)) { + if ((pHead->sver >= 1) && !walValidateChecksum(pHead)) { wError("vgId:%d, file:%s, wal whole cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, pHead->version, pHead->len, offset); code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); @@ -517,6 +517,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch if (0 != walSMemRowCheck(pHead)) { wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); + return TAOS_SYSTEM_ERROR(errno); } (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); } From 57d5f22a13c5996a8489260c1f5b7329f391b698 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 13 Aug 2021 19:17:29 +0800 Subject: [PATCH 063/239] Hotfix/sangshuduo/td 5872 taosdemo stmt improve for master (#7338) * cherry pick from develop branch. * cherry pick 548131751fad2b0e11e15b75af6c254164f28eea * [TD-5872]: taosdemo stmt csv perf improve. * rand func back to early impl. * fix windows/mac compile error. * cherry pick from develop branch. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 1412 +++++++++++++++++++++++------------ 1 file changed, 932 insertions(+), 480 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index aad2fe95fa..3b91de32b0 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -295,6 +295,9 @@ typedef struct SSuperTable_S { uint64_t lenOfTagOfOneRow; char* sampleDataBuf; +#if STMT_IFACE_ENABLED == 1 + char* sampleBindArray; +#endif //int sampleRowCount; //int sampleUsePos; @@ -441,6 +444,7 @@ typedef struct SQueryMetaInfo_S { typedef struct SThreadInfo_S { TAOS * taos; TAOS_STMT *stmt; + int64_t *bind_ts; int threadID; char db_name[TSDB_DB_NAME_LEN]; uint32_t time_precision; @@ -454,7 +458,7 @@ typedef struct SThreadInfo_S { int64_t start_time; char* cols; bool use_metric; - SSuperTable* superTblInfo; + SSuperTable* stbInfo; char *buffer; // sql cmd buffer // for async insert @@ -674,7 +678,7 @@ static FILE * g_fpOfInsertResult = NULL; /////////////////////////////////////////////////// -static void ERROR_EXIT(const char *msg) { perror(msg); exit(-1); } +static void ERROR_EXIT(const char *msg) { errorPrint("%s", msg); exit(-1); } #ifndef TAOSDEMO_COMMIT_SHA1 #define TAOSDEMO_COMMIT_SHA1 "unknown" @@ -1136,8 +1140,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } if (0 == columnCount) { - perror("data type error!"); - exit(-1); + ERROR_EXIT("data type error!"); } g_args.num_of_CPR = columnCount; @@ -2425,14 +2428,14 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port #endif debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd); free(request_buf); - ERROR_EXIT("ERROR opening socket"); + ERROR_EXIT("opening socket"); } int retConn = connect(sockfd, (struct sockaddr *)pServAddr, sizeof(struct sockaddr)); debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn); if (retConn < 0) { free(request_buf); - ERROR_EXIT("ERROR connecting"); + ERROR_EXIT("connecting"); } memset(base64_buf, 0, INPUT_BUF_LEN); @@ -2465,7 +2468,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port auth, strlen(sqlstr), sqlstr); if (r >= req_buf_len) { free(request_buf); - ERROR_EXIT("ERROR too long request"); + ERROR_EXIT("too long request"); } verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf); @@ -2478,7 +2481,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port bytes = write(sockfd, request_buf + sent, req_str_len - sent); #endif if (bytes < 0) - ERROR_EXIT("ERROR writing message to socket"); + ERROR_EXIT("writing message to socket"); if (bytes == 0) break; sent+=bytes; @@ -2495,7 +2498,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port #endif if (bytes < 0) { free(request_buf); - ERROR_EXIT("ERROR reading response from socket"); + ERROR_EXIT("reading response from socket"); } if (bytes == 0) break; @@ -2504,7 +2507,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port if (received == resp_len) { free(request_buf); - ERROR_EXIT("ERROR storing complete response from socket"); + ERROR_EXIT("storing complete response from socket"); } response_buf[RESP_BUF_LEN - 1] = '\0'; @@ -2667,8 +2670,8 @@ static int calcRowLen(SSuperTable* superTbls) { } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { lenOfOneRow += TIMESTAMP_BUFF_LEN; } else { - printf("get error data type : %s\n", dataType); - exit(-1); + errorPrint("get error data type : %s\n", dataType); + exit(EXIT_FAILURE); } } @@ -2698,8 +2701,8 @@ static int calcRowLen(SSuperTable* superTbls) { } else if (strcasecmp(dataType, "DOUBLE") == 0) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; } else { - printf("get error tag type : %s\n", dataType); - exit(-1); + errorPrint("get error tag type : %s\n", dataType); + exit(EXIT_FAILURE); } } @@ -2737,7 +2740,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, taos_close(taos); errorPrint("%s() LN%d, failed to run command %s\n", __func__, __LINE__, command); - exit(-1); + exit(EXIT_FAILURE); } int64_t childTblCount = (limit < 0)?10000:limit; @@ -2748,7 +2751,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, taos_free_result(res); taos_close(taos); errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } } @@ -2759,7 +2762,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, if (0 == strlen((char *)row[0])) { errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n", __func__, __LINE__, count); - exit(-1); + exit(EXIT_FAILURE); } tstrncpy(pTblName, (char *)row[0], len[0]+1); @@ -2775,12 +2778,12 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); } else { // exit, if allocate more memory failed - errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", - __func__, __LINE__, dbName, sTblName); tmfree(childTblName); taos_free_result(res); taos_close(taos); - exit(-1); + errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", + __func__, __LINE__, dbName, sTblName); + exit(EXIT_FAILURE); } } pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; @@ -2964,10 +2967,10 @@ static int createSuperTable( lenOfOneRow += TIMESTAMP_BUFF_LEN; } else { taos_close(taos); + free(command); errorPrint("%s() LN%d, config error data type : %s\n", __func__, __LINE__, dataType); - free(command); - exit(-1); + exit(EXIT_FAILURE); } } @@ -2976,11 +2979,11 @@ static int createSuperTable( // save for creating child table superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1); if (NULL == superTbl->colsOfCreateChildTable) { - errorPrint("%s() LN%d, Failed when calloc, size:%d", - __func__, __LINE__, len+1); taos_close(taos); free(command); - exit(-1); + errorPrint("%s() LN%d, Failed when calloc, size:%d", + __func__, __LINE__, len+1); + exit(EXIT_FAILURE); } snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); @@ -3054,10 +3057,10 @@ static int createSuperTable( lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; } else { taos_close(taos); + free(command); errorPrint("%s() LN%d, config error tag type : %s\n", __func__, __LINE__, dataType); - free(command); - exit(-1); + exit(EXIT_FAILURE); } } @@ -3089,7 +3092,7 @@ int createDatabasesAndStables(char *command) { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); return -1; } - + for (int i = 0; i < g_Dbs.dbCount; i++) { if (g_Dbs.db[i].drop) { sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); @@ -3100,35 +3103,43 @@ int createDatabasesAndStables(char *command) { int dataLen = 0; dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName); + BUFFER_SIZE - dataLen, "create database if not exists %s", + g_Dbs.db[i].dbName); if (g_Dbs.db[i].dbCfg.blocks > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " blocks %d", g_Dbs.db[i].dbCfg.blocks); + BUFFER_SIZE - dataLen, " blocks %d", + g_Dbs.db[i].dbCfg.blocks); } if (g_Dbs.db[i].dbCfg.cache > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cache %d", g_Dbs.db[i].dbCfg.cache); + BUFFER_SIZE - dataLen, " cache %d", + g_Dbs.db[i].dbCfg.cache); } if (g_Dbs.db[i].dbCfg.days > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " days %d", g_Dbs.db[i].dbCfg.days); + BUFFER_SIZE - dataLen, " days %d", + g_Dbs.db[i].dbCfg.days); } if (g_Dbs.db[i].dbCfg.keep > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " keep %d", g_Dbs.db[i].dbCfg.keep); + BUFFER_SIZE - dataLen, " keep %d", + g_Dbs.db[i].dbCfg.keep); } if (g_Dbs.db[i].dbCfg.quorum > 1) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " quorum %d", g_Dbs.db[i].dbCfg.quorum); + BUFFER_SIZE - dataLen, " quorum %d", + g_Dbs.db[i].dbCfg.quorum); } if (g_Dbs.db[i].dbCfg.replica > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " replica %d", g_Dbs.db[i].dbCfg.replica); + BUFFER_SIZE - dataLen, " replica %d", + g_Dbs.db[i].dbCfg.replica); } if (g_Dbs.db[i].dbCfg.update > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " update %d", g_Dbs.db[i].dbCfg.update); + BUFFER_SIZE - dataLen, " update %d", + g_Dbs.db[i].dbCfg.update); } //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { // dataLen += snprintf(command + dataLen, @@ -3136,42 +3147,48 @@ int createDatabasesAndStables(char *command) { //} if (g_Dbs.db[i].dbCfg.minRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " minrows %d", g_Dbs.db[i].dbCfg.minRows); + BUFFER_SIZE - dataLen, " minrows %d", + g_Dbs.db[i].dbCfg.minRows); } if (g_Dbs.db[i].dbCfg.maxRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " maxrows %d", g_Dbs.db[i].dbCfg.maxRows); + BUFFER_SIZE - dataLen, " maxrows %d", + g_Dbs.db[i].dbCfg.maxRows); } if (g_Dbs.db[i].dbCfg.comp > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " comp %d", g_Dbs.db[i].dbCfg.comp); + BUFFER_SIZE - dataLen, " comp %d", + g_Dbs.db[i].dbCfg.comp); } if (g_Dbs.db[i].dbCfg.walLevel > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " wal %d", g_Dbs.db[i].dbCfg.walLevel); + BUFFER_SIZE - dataLen, " wal %d", + g_Dbs.db[i].dbCfg.walLevel); } if (g_Dbs.db[i].dbCfg.cacheLast > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cachelast %d", g_Dbs.db[i].dbCfg.cacheLast); + BUFFER_SIZE - dataLen, " cachelast %d", + g_Dbs.db[i].dbCfg.cacheLast); } if (g_Dbs.db[i].dbCfg.fsync > 0) { dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, " fsync %d", g_Dbs.db[i].dbCfg.fsync); } - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", strlen("ms"))) + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) #if NANO_SECOND_ENABLED == 1 || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, - "ns", strlen("ns"))) + "ns", 2)) #endif || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, - "us", strlen("us")))) { + "us", 2))) { dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); } if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { taos_close(taos); - errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); + errorPrint( "\ncreate database %s failed!\n\n", + g_Dbs.db[i].dbName); return -1; } printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); @@ -3218,7 +3235,7 @@ int createDatabasesAndStables(char *command) { static void* createTable(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; setThreadName("createTable"); @@ -3229,7 +3246,7 @@ static void* createTable(void *sarg) pThreadInfo->buffer = calloc(buff_len, 1); if (pThreadInfo->buffer == NULL) { errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } int len = 0; @@ -3248,11 +3265,11 @@ static void* createTable(void *sarg) g_args.tb_prefix, i, pThreadInfo->cols); } else { - if (superTblInfo == NULL) { + if (stbInfo == NULL) { + free(pThreadInfo->buffer); errorPrint("%s() LN%d, use metric, but super table info is NULL\n", __func__, __LINE__); - free(pThreadInfo->buffer); - exit(-1); + exit(EXIT_FAILURE); } else { if (0 == len) { batchNum = 0; @@ -3260,29 +3277,35 @@ static void* createTable(void *sarg) len += snprintf(pThreadInfo->buffer + len, buff_len - len, "create table "); } + char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(superTblInfo, i); + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, i); } else { + if (0 == stbInfo->tagSampleCount) { + free(pThreadInfo->buffer); + ERROR_EXIT("use sample file for tag, but has no content!\n"); + } tagsValBuf = getTagValueFromTagSample( - superTblInfo, - i % superTblInfo->tagSampleCount); + stbInfo, + i % stbInfo->tagSampleCount); } + if (NULL == tagsValBuf) { free(pThreadInfo->buffer); - return NULL; + ERROR_EXIT("use metric, but tag buffer is NULL\n"); } len += snprintf(pThreadInfo->buffer + len, buff_len - len, "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", - pThreadInfo->db_name, superTblInfo->childTblPrefix, + pThreadInfo->db_name, stbInfo->childTblPrefix, i, pThreadInfo->db_name, - superTblInfo->sTblName, tagsValBuf); + stbInfo->sTblName, tagsValBuf); free(tagsValBuf); batchNum++; - if ((batchNum < superTblInfo->batchCreateTableNum) + if ((batchNum < stbInfo->batchCreateTableNum) && ((buff_len - len) - >= (superTblInfo->lenOfTagOfOneRow + 256))) { + >= (stbInfo->lenOfTagOfOneRow + 256))) { continue; } } @@ -3317,14 +3340,13 @@ static void* createTable(void *sarg) static int startMultiThreadCreateChildTable( char* cols, int threads, uint64_t tableFrom, int64_t ntables, - char* db_name, SSuperTable* superTblInfo) { + char* db_name, SSuperTable* stbInfo) { pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed\n"); - exit(-1); + ERROR_EXIT("createChildTable malloc failed\n"); } if (threads < 1) { @@ -3344,7 +3366,7 @@ static int startMultiThreadCreateChildTable( threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); - pThreadInfo->superTblInfo = superTblInfo; + pThreadInfo->stbInfo = stbInfo; verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); pThreadInfo->taos = taos_connect( g_Dbs.host, @@ -3451,26 +3473,26 @@ static void createChildTables() { /* Read 10000 lines at most. If more than 10000 lines, continue to read after using */ -static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { +static int readTagFromCsvFileToMem(SSuperTable * stbInfo) { size_t n = 0; ssize_t readLen = 0; char * line = NULL; - FILE *fp = fopen(superTblInfo->tagsFile, "r"); + FILE *fp = fopen(stbInfo->tagsFile, "r"); if (fp == NULL) { printf("Failed to open tags file: %s, reason:%s\n", - superTblInfo->tagsFile, strerror(errno)); + stbInfo->tagsFile, strerror(errno)); return -1; } - if (superTblInfo->tagDataBuf) { - free(superTblInfo->tagDataBuf); - superTblInfo->tagDataBuf = NULL; + if (stbInfo->tagDataBuf) { + free(stbInfo->tagDataBuf); + stbInfo->tagDataBuf = NULL; } int tagCount = 10000; int count = 0; - char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount); + char* tagDataBuf = calloc(1, stbInfo->lenOfTagOfOneRow * tagCount); if (tagDataBuf == NULL) { printf("Failed to calloc, reason:%s\n", strerror(errno)); fclose(fp); @@ -3486,20 +3508,20 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { continue; } - memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen); + memcpy(tagDataBuf + count * stbInfo->lenOfTagOfOneRow, line, readLen); count++; if (count >= tagCount - 1) { char *tmp = realloc(tagDataBuf, - (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow); + (size_t)tagCount*1.5*stbInfo->lenOfTagOfOneRow); if (tmp != NULL) { tagDataBuf = tmp; tagCount = (int)(tagCount*1.5); - memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow, - 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow)); + memset(tagDataBuf + count*stbInfo->lenOfTagOfOneRow, + 0, (size_t)((tagCount-count)*stbInfo->lenOfTagOfOneRow)); } else { // exit, if allocate more memory failed - printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile); + printf("realloc fail for save tag val from %s\n", stbInfo->tagsFile); tmfree(tagDataBuf); free(line); fclose(fp); @@ -3508,8 +3530,8 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { } } - superTblInfo->tagDataBuf = tagDataBuf; - superTblInfo->tagSampleCount = count; + stbInfo->tagDataBuf = tagDataBuf; + stbInfo->tagSampleCount = count; free(line); fclose(fp); @@ -3520,28 +3542,28 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { Read 10000 lines at most. If more than 10000 lines, continue to read after using */ static int readSampleFromCsvFileToMem( - SSuperTable* superTblInfo) { + SSuperTable* stbInfo) { size_t n = 0; ssize_t readLen = 0; char * line = NULL; int getRows = 0; - FILE* fp = fopen(superTblInfo->sampleFile, "r"); + FILE* fp = fopen(stbInfo->sampleFile, "r"); if (fp == NULL) { errorPrint( "Failed to open sample file: %s, reason:%s\n", - superTblInfo->sampleFile, strerror(errno)); + stbInfo->sampleFile, strerror(errno)); return -1; } - assert(superTblInfo->sampleDataBuf); - memset(superTblInfo->sampleDataBuf, 0, - MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow); + assert(stbInfo->sampleDataBuf); + memset(stbInfo->sampleDataBuf, 0, + MAX_SAMPLES_ONCE_FROM_FILE * stbInfo->lenOfOneRow); while(1) { readLen = tgetline(&line, &n, fp); if (-1 == readLen) { if(0 != fseek(fp, 0, SEEK_SET)) { errorPrint( "Failed to fseek file: %s, reason:%s\n", - superTblInfo->sampleFile, strerror(errno)); + stbInfo->sampleFile, strerror(errno)); fclose(fp); return -1; } @@ -3556,13 +3578,13 @@ static int readSampleFromCsvFileToMem( continue; } - if (readLen > superTblInfo->lenOfOneRow) { + if (readLen > stbInfo->lenOfOneRow) { printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n", - (int32_t)readLen, superTblInfo->lenOfOneRow); + (int32_t)readLen, stbInfo->lenOfOneRow); continue; } - memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow, + memcpy(stbInfo->sampleDataBuf + getRows * stbInfo->lenOfOneRow, line, readLen); getRows++; @@ -5047,6 +5069,23 @@ static void postFreeResource() { free(g_Dbs.db[i].superTbls[j].sampleDataBuf); g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; } +#if STMT_IFACE_ENABLED == 1 + if (g_Dbs.db[i].superTbls[j].sampleBindArray) { + for (int k = 0; k < MAX_SAMPLES_ONCE_FROM_FILE; k++) { + uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)( + g_Dbs.db[i].superTbls[j].sampleBindArray + + sizeof(uintptr_t *) * k)); + for (int c = 1; c < g_Dbs.db[i].superTbls[j].columnCount + 1; c++) { + TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c)); + if (bind) + tmfree(bind->buffer); + } + tmfree((char *)tmp); + } + } + tmfree((char *)g_Dbs.db[i].superTbls[j].sampleBindArray); +#endif + if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { free(g_Dbs.db[i].superTbls[j].tagDataBuf); g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; @@ -5067,21 +5106,14 @@ static void postFreeResource() { tmfree(g_randfloat_buff); tmfree(g_rand_current_buff); tmfree(g_rand_phase_buff); - tmfree(g_randdouble_buff); + } static int getRowDataFromSample( char* dataBuf, int64_t maxLen, int64_t timestamp, - SSuperTable* superTblInfo, int64_t* sampleUsePos) + SSuperTable* stbInfo, int64_t* sampleUsePos) { if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { - /* int ret = readSampleFromCsvFileToMem(superTblInfo); - if (0 != ret) { - tmfree(superTblInfo->sampleDataBuf); - superTblInfo->sampleDataBuf = NULL; - return -1; - } - */ *sampleUsePos = 0; } @@ -5091,8 +5123,8 @@ static int getRowDataFromSample( "(%" PRId64 ", ", timestamp); dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%s", - superTblInfo->sampleDataBuf - + superTblInfo->lenOfOneRow * (*sampleUsePos)); + stbInfo->sampleDataBuf + + stbInfo->lenOfOneRow * (*sampleUsePos)); dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); (*sampleUsePos)++; @@ -5248,7 +5280,7 @@ static int64_t generateData(char *recBuf, char **data_type, if (s == NULL) { errorPrint("%s() LN%d, memory allocation %d bytes failed\n", __func__, __LINE__, lenOfBinary + 1); - exit(-1); + exit(EXIT_FAILURE); } rand_string(s, lenOfBinary); pstr += sprintf(pstr, ",\"%s\"", s); @@ -5258,7 +5290,7 @@ static int64_t generateData(char *recBuf, char **data_type, if (s == NULL) { errorPrint("%s() LN%d, memory allocation %d bytes failed\n", __func__, __LINE__, lenOfBinary + 1); - exit(-1); + exit(EXIT_FAILURE); } rand_string(s, lenOfBinary); pstr += sprintf(pstr, ",\"%s\"", s); @@ -5266,8 +5298,7 @@ static int64_t generateData(char *recBuf, char **data_type, } if (strlen(recBuf) > MAX_DATA_SIZE) { - perror("column length too long, abort"); - exit(-1); + ERROR_EXIT("column length too long, abort"); } } @@ -5278,27 +5309,27 @@ static int64_t generateData(char *recBuf, char **data_type, return (int32_t)strlen(recBuf); } -static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { +static int prepareSampleDataForSTable(SSuperTable *stbInfo) { char* sampleDataBuf = NULL; sampleDataBuf = calloc( - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); + stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", __func__, __LINE__, - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, + stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); return -1; } - superTblInfo->sampleDataBuf = sampleDataBuf; - int ret = readSampleFromCsvFileToMem(superTblInfo); + stbInfo->sampleDataBuf = sampleDataBuf; + int ret = readSampleFromCsvFileToMem(stbInfo); if (0 != ret) { errorPrint("%s() LN%d, read sample from csv file failed.\n", __func__, __LINE__); tmfree(sampleDataBuf); - superTblInfo->sampleDataBuf = NULL; + stbInfo->sampleDataBuf = NULL; return -1; } @@ -5308,14 +5339,14 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) { int32_t affectedRows; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer); uint16_t iface; - if (superTblInfo) - iface = superTblInfo->iface; + if (stbInfo) + iface = stbInfo->iface; else { if (g_args.iface == INTERFACE_BUT) iface = TAOSC_IFACE; @@ -5355,7 +5386,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt)); fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n"); - exit(-1); + exit(EXIT_FAILURE); } affectedRows = k; break; @@ -5363,7 +5394,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) default: errorPrint("%s() LN%d: unknown insert mode: %d\n", - __func__, __LINE__, superTblInfo->iface); + __func__, __LINE__, stbInfo->iface); affectedRows = 0; } @@ -5373,24 +5404,24 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t tableSeq) { - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - if (superTblInfo) { - if (AUTO_CREATE_SUBTBL != superTblInfo->autoCreateTable) { - if (superTblInfo->childTblLimit > 0) { + SSuperTable* stbInfo = pThreadInfo->stbInfo; + if (stbInfo) { + if (AUTO_CREATE_SUBTBL != stbInfo->autoCreateTable) { + if (stbInfo->childTblLimit > 0) { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - superTblInfo->childTblName + - (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); + stbInfo->childTblName + + (tableSeq - stbInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); } else { verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRId64" seq=%"PRIu64"\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->ntables, tableSeq); snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); + stbInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); } } else { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", - superTblInfo->childTblPrefix, tableSeq); + stbInfo->childTblPrefix, tableSeq); } } else { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", @@ -5471,7 +5502,7 @@ static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, } static int32_t generateStbDataTail( - SSuperTable* superTblInfo, + SSuperTable* stbInfo, uint32_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows, uint64_t recordFrom, int64_t startTime, @@ -5481,7 +5512,7 @@ static int32_t generateStbDataTail( char *pstr = buffer; bool tsRand; - if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { + if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { tsRand = true; } else { tsRand = false; @@ -5496,26 +5527,26 @@ static int32_t generateStbDataTail( int64_t lenOfRow = 0; if (tsRand) { - if (superTblInfo->disorderRatio > 0) { - lenOfRow = generateStbRowData(superTblInfo, data, + if (stbInfo->disorderRatio > 0) { + lenOfRow = generateStbRowData(stbInfo, data, remainderBufLen, startTime + getTSRandTail( - superTblInfo->timeStampStep, k, - superTblInfo->disorderRatio, - superTblInfo->disorderRange) + stbInfo->timeStampStep, k, + stbInfo->disorderRatio, + stbInfo->disorderRange) ); } else { - lenOfRow = generateStbRowData(superTblInfo, data, + lenOfRow = generateStbRowData(stbInfo, data, remainderBufLen, - startTime + superTblInfo->timeStampStep * k + startTime + stbInfo->timeStampStep * k ); } } else { lenOfRow = getRowDataFromSample( data, (remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE, - startTime + superTblInfo->timeStampStep * k, - superTblInfo, + startTime + stbInfo->timeStampStep * k, + stbInfo, pSamplePos); } @@ -5571,7 +5602,7 @@ static int generateSQLHeadWithoutStb(char *tableName, } static int generateStbSQLHead( - SSuperTable* superTblInfo, + SSuperTable* stbInfo, char *tableName, int64_t tableSeq, char *dbName, char *buffer, int remainderBufLen) @@ -5580,14 +5611,14 @@ static int generateStbSQLHead( char headBuf[HEAD_BUFF_LEN]; - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(superTblInfo, tableSeq); + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); } else { tagsValBuf = getTagValueFromTagSample( - superTblInfo, - tableSeq % superTblInfo->tagSampleCount); + stbInfo, + tableSeq % stbInfo->tagSampleCount); } if (NULL == tagsValBuf) { errorPrint("%s() LN%d, tag buf failed to allocate memory\n", @@ -5602,10 +5633,10 @@ static int generateStbSQLHead( dbName, tableName, dbName, - superTblInfo->sTblName, + stbInfo->sTblName, tagsValBuf); tmfree(tagsValBuf); - } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { + } else if (TBL_ALREADY_EXISTS == stbInfo->childTblExists) { len = snprintf( headBuf, HEAD_BUFF_LEN, @@ -5630,12 +5661,12 @@ static int generateStbSQLHead( } static int32_t generateStbInterlaceData( - SSuperTable *superTblInfo, + threadInfo *pThreadInfo, char *tableName, uint32_t batchPerTbl, uint64_t i, uint32_t batchPerTblTimes, uint64_t tableSeq, - threadInfo *pThreadInfo, char *buffer, + char *buffer, int64_t insertRows, int64_t startTime, uint64_t *pRemainderBufLen) @@ -5643,8 +5674,9 @@ static int32_t generateStbInterlaceData( assert(buffer); char *pstr = buffer; + SSuperTable *stbInfo = pThreadInfo->stbInfo; int headLen = generateStbSQLHead( - superTblInfo, + stbInfo, tableName, tableSeq, pThreadInfo->db_name, pstr, *pRemainderBufLen); @@ -5664,12 +5696,12 @@ static int32_t generateStbInterlaceData( pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, batchPerTbl); - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { startTime = taosGetTimestamp(pThreadInfo->time_precision); } int32_t k = generateStbDataTail( - superTblInfo, + stbInfo, batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, startTime, &(pThreadInfo->samplePos), &dataLen); @@ -5732,8 +5764,206 @@ static int64_t generateInterlaceDataWithoutStb( } #if STMT_IFACE_ENABLED == 1 -static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, - char *dataType, int32_t dataLen, char **ptr, char *value) +static int32_t prepareStmtBindArrayByType( + TAOS_BIND *bind, + char *dataType, int32_t dataLen, + int32_t timePrec, + char *value) +{ + if (0 == strncasecmp(dataType, + "BINARY", strlen("BINARY"))) { + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint( "binary length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_binary; + + bind->buffer_type = TSDB_DATA_TYPE_BINARY; + if (value) { + bind_binary = calloc(1, strlen(value) + 1); + strncpy(bind_binary, value, strlen(value)); + bind->buffer_length = strlen(bind_binary); + } else { + bind_binary = calloc(1, dataLen + 1); + rand_string(bind_binary, dataLen); + bind->buffer_length = dataLen; + } + + bind->length = &bind->buffer_length; + bind->buffer = bind_binary; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "NCHAR", strlen("NCHAR"))) { + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint( "nchar length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_nchar; + + bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + if (value) { + bind_nchar = calloc(1, strlen(value) + 1); + strncpy(bind_nchar, value, strlen(value)); + } else { + bind_nchar = calloc(1, dataLen + 1); + rand_string(bind_nchar, dataLen); + } + + bind->buffer_length = strlen(bind_nchar); + bind->buffer = bind_nchar; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "INT", strlen("INT"))) { + int32_t *bind_int = malloc(sizeof(int32_t)); + + if (value) { + *bind_int = atoi(value); + } else { + *bind_int = rand_int(); + } + bind->buffer_type = TSDB_DATA_TYPE_INT; + bind->buffer_length = sizeof(int32_t); + bind->buffer = bind_int; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "BIGINT", strlen("BIGINT"))) { + int64_t *bind_bigint = malloc(sizeof(int64_t)); + + if (value) { + *bind_bigint = atoll(value); + } else { + *bind_bigint = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_BIGINT; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_bigint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "FLOAT", strlen("FLOAT"))) { + float *bind_float = malloc(sizeof(float)); + + if (value) { + *bind_float = (float)atof(value); + } else { + *bind_float = rand_float(); + } + bind->buffer_type = TSDB_DATA_TYPE_FLOAT; + bind->buffer_length = sizeof(float); + bind->buffer = bind_float; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "DOUBLE", strlen("DOUBLE"))) { + double *bind_double = malloc(sizeof(double)); + + if (value) { + *bind_double = atof(value); + } else { + *bind_double = rand_double(); + } + bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; + bind->buffer_length = sizeof(double); + bind->buffer = bind_double; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "SMALLINT", strlen("SMALLINT"))) { + int16_t *bind_smallint = malloc(sizeof(int16_t)); + + if (value) { + *bind_smallint = (int16_t)atoi(value); + } else { + *bind_smallint = rand_smallint(); + } + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(int16_t); + bind->buffer = bind_smallint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "TINYINT", strlen("TINYINT"))) { + int8_t *bind_tinyint = malloc(sizeof(int8_t)); + + if (value) { + *bind_tinyint = (int8_t)atoi(value); + } else { + *bind_tinyint = rand_tinyint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TINYINT; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_tinyint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "BOOL", strlen("BOOL"))) { + int8_t *bind_bool = malloc(sizeof(int8_t)); + + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; + } + } else { + *bind_bool = rand_bool(); + } + bind->buffer_type = TSDB_DATA_TYPE_BOOL; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_bool; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + } else if (0 == strncasecmp(dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + int64_t *bind_ts2 = malloc(sizeof(int64_t)); + + if (value) { + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; + } + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + errorPrint("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); + } + } else { + *bind_ts2 = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts2; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else { + errorPrint( "No support data type: %s\n", dataType); + return -1; + } + + return 0; +} + +static int32_t prepareStmtBindArrayByTypeForRand( + TAOS_BIND *bind, + char *dataType, int32_t dataLen, + int32_t timePrec, + char **ptr, + char *value) { if (0 == strncasecmp(dataType, "BINARY", strlen("BINARY"))) { @@ -5814,7 +6044,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "FLOAT", strlen("FLOAT"))) { - float *bind_float = (float *) *ptr; + float *bind_float = (float *)*ptr; if (value) { *bind_float = (float)atof(value); @@ -5830,7 +6060,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "DOUBLE", strlen("DOUBLE"))) { - double *bind_double = (double *)*ptr; + double *bind_double = (double *)*ptr; if (value) { *bind_double = atof(value); @@ -5862,7 +6092,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "TINYINT", strlen("TINYINT"))) { - int8_t *bind_tinyint = (int8_t *)*ptr; + int8_t *bind_tinyint = (int8_t *)*ptr; if (value) { *bind_tinyint = (int8_t)atoi(value); @@ -5874,12 +6104,21 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, bind->buffer = bind_tinyint; bind->length = &bind->buffer_length; bind->is_null = NULL; + *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "BOOL", strlen("BOOL"))) { - int8_t *bind_bool = (int8_t *)*ptr; + int8_t *bind_bool = (int8_t *)*ptr; - *bind_bool = rand_bool(); + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; + } + } else { + *bind_bool = rand_bool(); + } bind->buffer_type = TSDB_DATA_TYPE_BOOL; bind->buffer_length = sizeof(int8_t); bind->buffer = bind_bool; @@ -5889,10 +6128,28 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "TIMESTAMP", strlen("TIMESTAMP"))) { - int64_t *bind_ts2 = (int64_t *) *ptr; + int64_t *bind_ts2 = (int64_t *)*ptr; if (value) { - *bind_ts2 = atoll(value); + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; + } + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + errorPrint("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); + } } else { *bind_ts2 = rand_bigint(); } @@ -5912,13 +6169,14 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, } static int32_t prepareStmtWithoutStb( - TAOS_STMT *stmt, + threadInfo *pThreadInfo, char *tableName, uint32_t batch, int64_t insertRows, int64_t recordFrom, int64_t startTime) { + TAOS_STMT *stmt = pThreadInfo->stmt; int ret = taos_stmt_set_tbname(stmt, tableName); if (ret != 0) { errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", @@ -5938,15 +6196,11 @@ static int32_t prepareStmtWithoutStb( int32_t k = 0; for (k = 0; k < batch;) { /* columnCount + 1 (ts) */ - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); - char *ptr = data; TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0); - int64_t *bind_ts; + int64_t *bind_ts = pThreadInfo->bind_ts; - bind_ts = (int64_t *)ptr; bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; if (g_args.disorderRatio) { @@ -5962,8 +6216,6 @@ static int32_t prepareStmtWithoutStb( bind->length = &bind->buffer_length; bind->is_null = NULL; - ptr += bind->buffer_length; - for (int i = 0; i < g_args.num_of_CPR; i ++) { bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); @@ -5971,7 +6223,8 @@ static int32_t prepareStmtWithoutStb( bind, data_type[i], g_args.len_of_binary, - &ptr, NULL)) { + pThreadInfo->time_precision, + NULL)) { return -1; } } @@ -5998,10 +6251,42 @@ static int32_t prepareStmtWithoutStb( return k; } -static int32_t prepareStbStmtBind( - char *bindArray, SSuperTable *stbInfo, bool sourceRand, +static int32_t prepareStbStmtBindTag( + char *bindArray, SSuperTable *stbInfo, + char *tagsVal, + int32_t timePrec) +{ + char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); + if (bindBuffer == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, g_args.len_of_binary); + return -1; + } + + TAOS_BIND *tag; + + for (int t = 0; t < stbInfo->tagCount; t ++) { + tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); + if ( -1 == prepareStmtBindArrayByType( + tag, + stbInfo->tags[t].dataType, + stbInfo->tags[t].dataLen, + timePrec, + NULL)) { + free(bindBuffer); + return -1; + } + } + + free(bindBuffer); + return 0; +} + +static int32_t prepareStbStmtBindRand( + int64_t *ts, + char *bindArray, SSuperTable *stbInfo, int64_t startTime, int32_t recSeq, - bool isColumn) + int32_t timePrec) { char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { @@ -6016,121 +6301,92 @@ static int32_t prepareStbStmtBind( TAOS_BIND *bind; - if (isColumn) { - int cursor = 0; + for (int i = 0; i < stbInfo->columnCount + 1; i ++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); - for (int i = 0; i < stbInfo->columnCount + 1; i ++) { - bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); + if (i == 0) { + int64_t *bind_ts = ts; - if (i == 0) { - int64_t *bind_ts; - - bind_ts = (int64_t *)ptr; - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - if (stbInfo->disorderRatio) { - *bind_ts = startTime + getTSRandTail( - stbInfo->timeStampStep, recSeq, - stbInfo->disorderRatio, - stbInfo->disorderRange); - } else { - *bind_ts = startTime + stbInfo->timeStampStep * recSeq; - } - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - ptr += bind->buffer_length; + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); } else { - - if (sourceRand) { - if ( -1 == prepareStmtBindArrayByType( - bind, - stbInfo->columns[i-1].dataType, - stbInfo->columns[i-1].dataLen, - &ptr, - NULL)) { - free(bindBuffer); - return -1; - } - } else { - char *restStr = stbInfo->sampleDataBuf + cursor; - int lengthOfRest = strlen(restStr); - - int index = 0; - for (index = 0; index < lengthOfRest; index ++) { - if (restStr[index] == ',') { - break; - } - } - - memset(bindBuffer, 0, g_args.len_of_binary); - strncpy(bindBuffer, restStr, index); - cursor += index + 1; // skip ',' too - - if ( -1 == prepareStmtBindArrayByType( - bind, - stbInfo->columns[i-1].dataType, - stbInfo->columns[i-1].dataLen, - &ptr, - bindBuffer)) { - free(bindBuffer); - return -1; - } - } + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; } - } - } else { - TAOS_BIND *tag; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - for (int t = 0; t < stbInfo->tagCount; t ++) { - tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); - if ( -1 == prepareStmtBindArrayByType( - tag, - stbInfo->tags[t].dataType, - stbInfo->tags[t].dataLen, - &ptr, - NULL)) { - free(bindBuffer); - return -1; - } + ptr += bind->buffer_length; + } else if ( -1 == prepareStmtBindArrayByTypeForRand( + bind, + stbInfo->columns[i-1].dataType, + stbInfo->columns[i-1].dataLen, + timePrec, + &ptr, + NULL)) { + tmfree(bindBuffer); + return -1; } - } - free(bindBuffer); + tmfree(bindBuffer); return 0; } -static int32_t prepareStbStmt( - SSuperTable *stbInfo, - TAOS_STMT *stmt, +static int32_t prepareStbStmtBindWithSample( + int64_t *ts, + char *bindArray, SSuperTable *stbInfo, + int64_t startTime, int32_t recSeq, + int32_t timePrec, + int64_t samplePos) +{ + TAOS_BIND *bind; + + bind = (TAOS_BIND *)bindArray; + + int64_t *bind_ts = ts; + + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; + } + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + return 0; +} + +static int32_t prepareStbStmtRand( + threadInfo *pThreadInfo, char *tableName, int64_t tableSeq, uint32_t batch, uint64_t insertRows, uint64_t recordFrom, - int64_t startTime, - int64_t *pSamplePos) + int64_t startTime) { int ret; - - bool sourceRand; - if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { - sourceRand = true; - } else { - sourceRand = false; // from sample data file - } + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { char* tagsValBuf = NULL; - bool tagRand; if (0 == stbInfo->tagSource) { - tagRand = true; tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); } else { - tagRand = false; tagsValBuf = getTagValueFromTagSample( stbInfo, tableSeq % stbInfo->tagSampleCount); @@ -6150,8 +6406,9 @@ static int32_t prepareStbStmt( return -1; } - if (-1 == prepareStbStmtBind( - tagsArray, stbInfo, tagRand, -1, -1, false /* is tag */)) { + if (-1 == prepareStbStmtBindTag( + tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision + /* is tag */)) { tmfree(tagsValBuf); tmfree(tagsArray); return -1; @@ -6186,8 +6443,12 @@ static int32_t prepareStbStmt( uint32_t k; for (k = 0; k < batch;) { /* columnCount + 1 (ts) */ - if (-1 == prepareStbStmtBind(bindArray, stbInfo, sourceRand, - startTime, k, true /* is column */)) { + if (-1 == prepareStbStmtBindRand( + pThreadInfo->bind_ts, + bindArray, stbInfo, + startTime, k, + pThreadInfo->time_precision + /* is column */)) { free(bindArray); return -1; } @@ -6210,10 +6471,6 @@ static int32_t prepareStbStmt( k++; recordFrom ++; - if (!sourceRand) { - (*pSamplePos) ++; - } - if (recordFrom >= insertRows) { break; } @@ -6223,9 +6480,8 @@ static int32_t prepareStbStmt( return k; } -static int32_t prepareStbStmtInterlace( - SSuperTable *stbInfo, - TAOS_STMT *stmt, +static int32_t prepareStbStmtWithSample( + threadInfo *pThreadInfo, char *tableName, int64_t tableSeq, uint32_t batch, @@ -6234,41 +6490,109 @@ static int32_t prepareStbStmtInterlace( int64_t startTime, int64_t *pSamplePos) { - return prepareStbStmt( - stbInfo, - stmt, - tableName, - tableSeq, - batch, - insertRows, 0, startTime, - pSamplePos); -} + int ret; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; -static int32_t prepareStbStmtProgressive( - SSuperTable *stbInfo, - TAOS_STMT *stmt, - char *tableName, - int64_t tableSeq, - uint32_t batch, - uint64_t insertRows, - uint64_t recordFrom, - int64_t startTime, - int64_t *pSamplePos) -{ - return prepareStbStmt( - stbInfo, - stmt, - tableName, - tableSeq, - g_args.num_of_RPR, - insertRows, recordFrom, startTime, - pSamplePos); -} + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { + char* tagsValBuf = NULL; + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); + } else { + tagsValBuf = getTagValueFromTagSample( + stbInfo, + tableSeq % stbInfo->tagSampleCount); + } + + if (NULL == tagsValBuf) { + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); + if (NULL == tagsArray) { + tmfree(tagsValBuf); + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + if (-1 == prepareStbStmtBindTag( + tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision + /* is tag */)) { + tmfree(tagsValBuf); + tmfree(tagsArray); + return -1; + } + + ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); + + tmfree(tagsValBuf); + tmfree(tagsArray); + + if (0 != ret) { + errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } else { + ret = taos_stmt_set_tbname(stmt, tableName); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } + + uint32_t k; + for (k = 0; k < batch;) { + char *bindArray = (char *)(*((uintptr_t *) + (stbInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos)))); + /* columnCount + 1 (ts) */ + if (-1 == prepareStbStmtBindWithSample( + pThreadInfo->bind_ts, + bindArray, stbInfo, + startTime, k, + pThreadInfo->time_precision, + *pSamplePos + /* is column */)) { + return -1; + } + ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + // if msg > 3MB, break + ret = taos_stmt_add_batch(stmt); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + + k++; + recordFrom ++; + + (*pSamplePos) ++; + if ((*pSamplePos) == MAX_SAMPLES_ONCE_FROM_FILE) { + *pSamplePos = 0; + } + + if (recordFrom >= insertRows) { + break; + } + } + + return k; +} #endif static int32_t generateStbProgressiveData( - SSuperTable *superTblInfo, + SSuperTable *stbInfo, char *tableName, int64_t tableSeq, char *dbName, char *buffer, @@ -6282,7 +6606,7 @@ static int32_t generateStbProgressiveData( memset(pstr, 0, *pRemainderBufLen); int64_t headLen = generateStbSQLHead( - superTblInfo, + stbInfo, tableName, tableSeq, dbName, buffer, *pRemainderBufLen); @@ -6294,7 +6618,7 @@ static int32_t generateStbProgressiveData( int64_t dataLen; - return generateStbDataTail(superTblInfo, + return generateStbDataTail(stbInfo, g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, recordFrom, startTime, @@ -6354,26 +6678,34 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int64_t nTimeStampStep; uint64_t insert_interval; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + bool sourceRand; - if (superTblInfo) { - insertRows = superTblInfo->insertRows; + SSuperTable* stbInfo = pThreadInfo->stbInfo; - if ((superTblInfo->interlaceRows == 0) + if (stbInfo) { + insertRows = stbInfo->insertRows; + + if ((stbInfo->interlaceRows == 0) && (g_args.interlace_rows > 0)) { interlaceRows = g_args.interlace_rows; } else { - interlaceRows = superTblInfo->interlaceRows; + interlaceRows = stbInfo->interlaceRows; + } + maxSqlLen = stbInfo->maxSqlLen; + nTimeStampStep = stbInfo->timeStampStep; + insert_interval = stbInfo->insertInterval; + if (0 == strncasecmp(stbInfo->dataSource, "rand", 4)) { + sourceRand = true; + } else { + sourceRand = false; // from sample data file } - maxSqlLen = superTblInfo->maxSqlLen; - nTimeStampStep = superTblInfo->timeStampStep; - insert_interval = superTblInfo->insertInterval; } else { insertRows = g_args.num_of_DPT; interlaceRows = g_args.interlace_rows; maxSqlLen = g_args.max_sql_len; nTimeStampStep = g_args.timestamp_step; insert_interval = g_args.insert_interval; + sourceRand = true; } debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", @@ -6456,28 +6788,38 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { uint64_t oldRemainderLen = remainderBufLen; int32_t generated; - if (superTblInfo) { - if (superTblInfo->iface == STMT_IFACE) { + if (stbInfo) { + if (stbInfo->iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 - generated = prepareStbStmtInterlace( - superTblInfo, - pThreadInfo->stmt, - tableName, - tableSeq, - batchPerTbl, - insertRows, i, - startTime, - &(pThreadInfo->samplePos)); + if (sourceRand) { + generated = prepareStbStmtRand( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime + ); + } else { + generated = prepareStbStmtWithSample( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime, + &(pThreadInfo->samplePos)); + } #else generated = -1; #endif } else { generated = generateStbInterlaceData( - superTblInfo, + pThreadInfo, tableName, batchPerTbl, i, batchPerTblTimes, tableSeq, - pThreadInfo, pstr, + pstr, insertRows, startTime, &remainderBufLen); @@ -6490,7 +6832,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { tableName, batchPerTbl, startTime); #if STMT_IFACE_ENABLED == 1 generated = prepareStmtWithoutStb( - pThreadInfo->stmt, tableName, + pThreadInfo, + tableName, batchPerTbl, insertRows, i, startTime); @@ -6639,12 +6982,12 @@ free_of_interlace: static void* syncWriteProgressive(threadInfo *pThreadInfo) { debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + SSuperTable* stbInfo = pThreadInfo->stbInfo; + uint64_t maxSqlLen = stbInfo?stbInfo->maxSqlLen:g_args.max_sql_len; int64_t timeStampStep = - superTblInfo?superTblInfo->timeStampStep:g_args.timestamp_step; + stbInfo?stbInfo->timeStampStep:g_args.timestamp_step; int64_t insertRows = - (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; + (stbInfo)?stbInfo->insertRows:g_args.num_of_DPT; verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows); @@ -6663,6 +7006,17 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->totalInsertRows = 0; pThreadInfo->totalAffectedRows = 0; + bool sourceRand; + if (stbInfo) { + if (0 == strncasecmp(stbInfo->dataSource, "rand", 4)) { + sourceRand = true; + } else { + sourceRand = false; // from sample data file + } + } else { + sourceRand = true; + } + pThreadInfo->samplePos = 0; int percentComplete = 0; @@ -6696,24 +7050,35 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { remainderBufLen -= len; int32_t generated; - if (superTblInfo) { - if (superTblInfo->iface == STMT_IFACE) { + if (stbInfo) { + if (stbInfo->iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 - generated = prepareStbStmtProgressive( - superTblInfo, - pThreadInfo->stmt, - tableName, - tableSeq, - g_args.num_of_RPR, - insertRows, i, start_time, - &(pThreadInfo->samplePos)); + if (sourceRand) { + generated = prepareStbStmtRand( + pThreadInfo, + tableName, + tableSeq, + g_args.num_of_RPR, + insertRows, + i, start_time + ); + } else { + generated = prepareStbStmtWithSample( + pThreadInfo, + tableName, + tableSeq, + g_args.num_of_RPR, + insertRows, i, start_time, + &(pThreadInfo->samplePos)); + } #else generated = -1; #endif } else { generated = generateStbProgressiveData( - superTblInfo, - tableName, tableSeq, pThreadInfo->db_name, pstr, + stbInfo, + tableName, tableSeq, + pThreadInfo->db_name, pstr, insertRows, i, start_time, &(pThreadInfo->samplePos), &remainderBufLen); @@ -6722,7 +7087,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { if (g_args.iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 generated = prepareStmtWithoutStb( - pThreadInfo->stmt, + pThreadInfo, tableName, g_args.num_of_RPR, insertRows, i, @@ -6792,9 +7157,9 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { } // num_of_DPT if ((g_args.verbose_print) && - (tableSeq == pThreadInfo->ntables - 1) && (superTblInfo) + (tableSeq == pThreadInfo->ntables - 1) && (stbInfo) && (0 == strncasecmp( - superTblInfo->dataSource, + stbInfo->dataSource, "sample", strlen("sample")))) { verbosePrint("%s() LN%d samplePos=%"PRId64"\n", __func__, __LINE__, pThreadInfo->samplePos); @@ -6812,18 +7177,18 @@ free_of_progressive: static void* syncWrite(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; setThreadName("syncWrite"); uint32_t interlaceRows; - if (superTblInfo) { - if ((superTblInfo->interlaceRows == 0) + if (stbInfo) { + if ((stbInfo->interlaceRows == 0) && (g_args.interlace_rows > 0)) { interlaceRows = g_args.interlace_rows; } else { - interlaceRows = superTblInfo->interlaceRows; + interlaceRows = stbInfo->interlaceRows; } } else { interlaceRows = g_args.interlace_rows; @@ -6840,10 +7205,10 @@ static void* syncWrite(void *sarg) { static void callBack(void *param, TAOS_RES *res, int code) { threadInfo* pThreadInfo = (threadInfo*)param; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; int insert_interval = - superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + stbInfo?stbInfo->insertInterval:g_args.insert_interval; if (insert_interval) { pThreadInfo->et = taosGetTimestampMs(); if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) { @@ -6851,13 +7216,13 @@ static void callBack(void *param, TAOS_RES *res, int code) { } } - char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen); + char *buffer = calloc(1, pThreadInfo->stbInfo->maxSqlLen); char data[MAX_DATA_SIZE]; char *pstr = buffer; pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values", pThreadInfo->db_name, pThreadInfo->tb_prefix, pThreadInfo->start_table_from); - // if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { + // if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { if (pThreadInfo->counter >= g_args.num_of_RPR) { pThreadInfo->start_table_from++; pThreadInfo->counter = 0; @@ -6871,15 +7236,15 @@ static void callBack(void *param, TAOS_RES *res, int code) { for (int i = 0; i < g_args.num_of_RPR; i++) { int rand_num = taosRandom() % 100; - if (0 != pThreadInfo->superTblInfo->disorderRatio - && rand_num < pThreadInfo->superTblInfo->disorderRatio) { + if (0 != pThreadInfo->stbInfo->disorderRatio + && rand_num < pThreadInfo->stbInfo->disorderRatio) { int64_t d = pThreadInfo->lastTs - - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1); - generateStbRowData(pThreadInfo->superTblInfo, data, + - (taosRandom() % pThreadInfo->stbInfo->disorderRange + 1); + generateStbRowData(pThreadInfo->stbInfo, data, MAX_DATA_SIZE, d); } else { - generateStbRowData(pThreadInfo->superTblInfo, + generateStbRowData(pThreadInfo->stbInfo, data, MAX_DATA_SIZE, pThreadInfo->lastTs += 1000); @@ -6887,7 +7252,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { pstr += sprintf(pstr, "%s", data); pThreadInfo->counter++; - if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { + if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { break; } } @@ -6903,7 +7268,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { static void *asyncWrite(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; setThreadName("asyncWrite"); @@ -6912,7 +7277,7 @@ static void *asyncWrite(void *sarg) { pThreadInfo->lastTs = pThreadInfo->start_time; int insert_interval = - superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + stbInfo?stbInfo->insertInterval:g_args.insert_interval; if (insert_interval) { pThreadInfo->st = taosGetTimestampMs(); } @@ -6949,8 +7314,81 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in * return 0; } +#if STMT_IFACE_ENABLED == 1 +static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) +{ + stbInfo->sampleBindArray = calloc(1, sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); + if (stbInfo->sampleBindArray == NULL) { + errorPrint("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", + __func__, __LINE__, (uint64_t)sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); + return -1; + } + + + for (int i=0; i < MAX_SAMPLES_ONCE_FROM_FILE; i++) { + char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); + if (bindArray == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind params\n", + __func__, __LINE__, (stbInfo->columnCount + 1)); + return -1; + } + + + TAOS_BIND *bind; + int cursor = 0; + + for (int c = 0; c < stbInfo->columnCount + 1; c++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c)); + + if (c == 0) { + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = NULL; //bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else { + char *restStr = stbInfo->sampleDataBuf + + stbInfo->lenOfOneRow * i + cursor; + int lengthOfRest = strlen(restStr); + + int index = 0; + for (index = 0; index < lengthOfRest; index ++) { + if (restStr[index] == ',') { + break; + } + } + + char *bindBuffer = calloc(1, index + 1); + if (bindBuffer == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, DOUBLE_BUFF_LEN); + return -1; + } + + strncpy(bindBuffer, restStr, index); + cursor += index + 1; // skip ',' too + + if (-1 == prepareStmtBindArrayByType( + bind, + stbInfo->columns[c-1].dataType, + stbInfo->columns[c-1].dataLen, + timePrec, + bindBuffer)) { + free(bindBuffer); + return -1; + } + free(bindBuffer); + } + } + *((uintptr_t *)(stbInfo->sampleBindArray + (sizeof(char *)) * i)) = (uintptr_t)bindArray; + } + + return 0; +} +#endif + static void startMultiThreadInsertData(int threads, char* db_name, - char* precision, SSuperTable* superTblInfo) { + char* precision, SSuperTable* stbInfo) { int32_t timePrec = TSDB_TIME_PRECISION_MILLI; if (0 != precision[0]) { @@ -6964,19 +7402,19 @@ static void startMultiThreadInsertData(int threads, char* db_name, #endif } else { errorPrint("Not support precision: %s\n", precision); - exit(-1); + exit(EXIT_FAILURE); } } int64_t start_time; - if (superTblInfo) { - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + if (stbInfo) { + if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { start_time = taosGetTimestamp(timePrec); } else { if (TSDB_CODE_SUCCESS != taosParseTime( - superTblInfo->startTimestamp, + stbInfo->startTimestamp, &start_time, - strlen(superTblInfo->startTimestamp), + strlen(stbInfo->startTimestamp), timePrec, 0)) { ERROR_EXIT("failed to parse time!\n"); } @@ -6990,12 +7428,12 @@ static void startMultiThreadInsertData(int threads, char* db_name, int64_t start = taosGetTimestampMs(); // read sample data from file first - if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, + if ((stbInfo) && (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample")))) { - if (0 != prepareSampleDataForSTable(superTblInfo)) { + if (0 != prepareSampleDataForSTable(stbInfo)) { errorPrint("%s() LN%d, prepare sample data for stable failed!\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } } @@ -7005,68 +7443,68 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (NULL == taos0) { errorPrint("%s() LN%d, connect to server fail , reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); - exit(-1); + exit(EXIT_FAILURE); } int64_t ntables = 0; uint64_t tableFrom; - if (superTblInfo) { + if (stbInfo) { int64_t limit; uint64_t offset; if ((NULL != g_args.sqlFile) - && (superTblInfo->childTblExists == TBL_NO_EXISTS) - && ((superTblInfo->childTblOffset != 0) - || (superTblInfo->childTblLimit >= 0))) { + && (stbInfo->childTblExists == TBL_NO_EXISTS) + && ((stbInfo->childTblOffset != 0) + || (stbInfo->childTblLimit >= 0))) { printf("WARNING: offset and limit will not be used since the child tables not exists!\n"); } - if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) { - if ((superTblInfo->childTblLimit < 0) - || ((superTblInfo->childTblOffset - + superTblInfo->childTblLimit) - > (superTblInfo->childTblCount))) { - superTblInfo->childTblLimit = - superTblInfo->childTblCount - superTblInfo->childTblOffset; + if (stbInfo->childTblExists == TBL_ALREADY_EXISTS) { + if ((stbInfo->childTblLimit < 0) + || ((stbInfo->childTblOffset + + stbInfo->childTblLimit) + > (stbInfo->childTblCount))) { + stbInfo->childTblLimit = + stbInfo->childTblCount - stbInfo->childTblOffset; } - offset = superTblInfo->childTblOffset; - limit = superTblInfo->childTblLimit; + offset = stbInfo->childTblOffset; + limit = stbInfo->childTblLimit; } else { - limit = superTblInfo->childTblCount; + limit = stbInfo->childTblCount; offset = 0; } ntables = limit; tableFrom = offset; - if ((superTblInfo->childTblExists != TBL_NO_EXISTS) - && ((superTblInfo->childTblOffset + superTblInfo->childTblLimit ) - > superTblInfo->childTblCount)) { + if ((stbInfo->childTblExists != TBL_NO_EXISTS) + && ((stbInfo->childTblOffset + stbInfo->childTblLimit) + > stbInfo->childTblCount)) { printf("WARNING: specified offset + limit > child table count!\n"); prompt(); } - if ((superTblInfo->childTblExists != TBL_NO_EXISTS) - && (0 == superTblInfo->childTblLimit)) { + if ((stbInfo->childTblExists != TBL_NO_EXISTS) + && (0 == stbInfo->childTblLimit)) { printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n"); prompt(); } - superTblInfo->childTblName = (char*)calloc(1, + stbInfo->childTblName = (char*)calloc(1, limit * TSDB_TABLE_NAME_LEN); - if (superTblInfo->childTblName == NULL) { - errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + if (stbInfo->childTblName == NULL) { taos_close(taos0); - exit(-1); + errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + exit(EXIT_FAILURE); } int64_t childTblCount; getChildNameOfSuperTableWithLimitAndOffset( taos0, - db_name, superTblInfo->sTblName, - &superTblInfo->childTblName, &childTblCount, + db_name, stbInfo->sTblName, + &stbInfo->childTblName, &childTblCount, limit, offset); } else { @@ -7087,11 +7525,11 @@ static void startMultiThreadInsertData(int threads, char* db_name, b = ntables % threads; } - if ((superTblInfo) - && (superTblInfo->iface == REST_IFACE)) { + if ((stbInfo) + && (stbInfo->iface == REST_IFACE)) { if (convertHostToServAddr( g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) { - exit(-1); + ERROR_EXIT("convert host to server address"); } } @@ -7104,98 +7542,110 @@ static void startMultiThreadInsertData(int threads, char* db_name, memset(pids, 0, threads * sizeof(pthread_t)); memset(infos, 0, threads * sizeof(threadInfo)); +#if STMT_IFACE_ENABLED == 1 + char *stmtBuffer = calloc(1, BUFFER_SIZE); + assert(stmtBuffer); + if ((g_args.iface == STMT_IFACE) + || ((stbInfo) + && (stbInfo->iface == STMT_IFACE))) { + char *pstr = stmtBuffer; + + if ((stbInfo) + && (AUTO_CREATE_SUBTBL + == stbInfo->autoCreateTable)) { + pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", + stbInfo->sTblName); + for (int tag = 0; tag < (stbInfo->tagCount - 1); + tag ++ ) { + pstr += sprintf(pstr, ",?"); + } + pstr += sprintf(pstr, ") VALUES(?"); + } else { + pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); + } + + int columnCount; + if (stbInfo) { + columnCount = stbInfo->columnCount; + } else { + columnCount = g_args.num_of_CPR; + } + + for (int col = 0; col < columnCount; col ++) { + pstr += sprintf(pstr, ",?"); + } + pstr += sprintf(pstr, ")"); + + debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer); + + if ((stbInfo) && (0 == strncasecmp(stbInfo->dataSource, + "sample", strlen("sample")))) { + parseSampleFileToStmt(stbInfo, timePrec); + } + } +#endif + for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); pThreadInfo->time_precision = timePrec; - pThreadInfo->superTblInfo = superTblInfo; + pThreadInfo->stbInfo = stbInfo; pThreadInfo->start_time = start_time; pThreadInfo->minDelay = UINT64_MAX; - if ((NULL == superTblInfo) || - (superTblInfo->iface != REST_IFACE)) { + if ((NULL == stbInfo) || + (stbInfo->iface != REST_IFACE)) { //t_info->taos = taos; pThreadInfo->taos = taos_connect( g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); if (NULL == pThreadInfo->taos) { + free(infos); errorPrint( "%s() LN%d, connect to server fail from insert sub thread, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); - free(infos); - exit(-1); + exit(EXIT_FAILURE); } #if STMT_IFACE_ENABLED == 1 if ((g_args.iface == STMT_IFACE) - || ((superTblInfo) - && (superTblInfo->iface == STMT_IFACE))) { + || ((stbInfo) + && (stbInfo->iface == STMT_IFACE))) { - int columnCount; - if (superTblInfo) { - columnCount = superTblInfo->columnCount; - } else { - columnCount = g_args.num_of_CPR; - } pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos); if (NULL == pThreadInfo->stmt) { + free(pids); + free(infos); errorPrint( "%s() LN%d, failed init stmt, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); + exit(EXIT_FAILURE); + } + + int ret = taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0); + if (ret != 0){ free(pids); free(infos); - exit(-1); - } - - char *buffer = calloc(1, BUFFER_SIZE); - assert(buffer); - char *pstr = buffer; - - if ((superTblInfo) - && (AUTO_CREATE_SUBTBL - == superTblInfo->autoCreateTable)) { - pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", - superTblInfo->sTblName); - for (int tag = 0; tag < (superTblInfo->tagCount - 1); - tag ++ ) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ") VALUES(?"); - } else { - pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); - } - - for (int col = 0; col < columnCount; col ++) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ")"); - - debugPrint("%s() LN%d, buffer: %s", __func__, __LINE__, buffer); - int ret = taos_stmt_prepare(pThreadInfo->stmt, buffer, 0); - if (ret != 0){ + free(stmtBuffer); errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", ret, taos_stmt_errstr(pThreadInfo->stmt)); - free(pids); - free(infos); - free(buffer); - exit(-1); + exit(EXIT_FAILURE); } - - free(buffer); + pThreadInfo->bind_ts = malloc(sizeof(int64_t)); } #endif } else { pThreadInfo->taos = NULL; } - /* if ((NULL == superTblInfo) - || (0 == superTblInfo->multiThreadWriteOneTbl)) { + /* if ((NULL == stbInfo) + || (0 == stbInfo->multiThreadWriteOneTbl)) { */ pThreadInfo->start_table_from = tableFrom; pThreadInfo->ntables = iend_table_to + 1; /* } else { pThreadInfo->start_table_from = 0; - pThreadInfo->ntables = superTblInfo->childTblCount; + pThreadInfo->ntables = stbInfo->childTblCount; pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint(); } */ @@ -7215,6 +7665,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, } } +#if STMT_IFACE_ENABLED == 1 + free(stmtBuffer); +#endif + for (int i = 0; i < threads; i++) { pthread_join(pids[i], NULL); } @@ -7228,11 +7682,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; - tsem_destroy(&(pThreadInfo->lock_sem)); - #if STMT_IFACE_ENABLED == 1 if (pThreadInfo->stmt) { taos_stmt_close(pThreadInfo->stmt); + tmfree((char *)pThreadInfo->bind_ts); } #endif tsem_destroy(&(pThreadInfo->lock_sem)); @@ -7242,9 +7695,9 @@ static void startMultiThreadInsertData(int threads, char* db_name, __func__, __LINE__, pThreadInfo->threadID, pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows); - if (superTblInfo) { - superTblInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; - superTblInfo->totalInsertRows += pThreadInfo->totalInsertRows; + if (stbInfo) { + stbInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; + stbInfo->totalInsertRows += pThreadInfo->totalInsertRows; } else { g_args.totalAffectedRows += pThreadInfo->totalAffectedRows; g_args.totalInsertRows += pThreadInfo->totalInsertRows; @@ -7265,22 +7718,22 @@ static void startMultiThreadInsertData(int threads, char* db_name, double tInMs = t/1000.0; - if (superTblInfo) { + if (stbInfo) { fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, superTblInfo->totalInsertRows, - superTblInfo->totalAffectedRows, - threads, db_name, superTblInfo->sTblName, + tInMs, stbInfo->totalInsertRows, + stbInfo->totalAffectedRows, + threads, db_name, stbInfo->sTblName, (tInMs)? - (double)(superTblInfo->totalInsertRows/tInMs):FLT_MAX); + (double)(stbInfo->totalInsertRows/tInMs):FLT_MAX); if (g_fpOfInsertResult) { fprintf(g_fpOfInsertResult, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, superTblInfo->totalInsertRows, - superTblInfo->totalAffectedRows, - threads, db_name, superTblInfo->sTblName, + tInMs, stbInfo->totalInsertRows, + stbInfo->totalAffectedRows, + threads, db_name, stbInfo->sTblName, (tInMs)? - (double)(superTblInfo->totalInsertRows/tInMs):FLT_MAX); + (double)(stbInfo->totalInsertRows/tInMs):FLT_MAX); } } else { fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", @@ -7335,8 +7788,8 @@ static void *readTable(void *sarg) { } int64_t num_of_DPT; - /* if (pThreadInfo->superTblInfo) { - num_of_DPT = pThreadInfo->superTblInfo->insertRows; // nrecords_per_table; + /* if (pThreadInfo->stbInfo) { + num_of_DPT = pThreadInfo->stbInfo->insertRows; // nrecords_per_table; } else { */ num_of_DPT = g_args.num_of_DPT; @@ -7410,7 +7863,7 @@ static void *readMetric(void *sarg) { return NULL; } - int64_t num_of_DPT = pThreadInfo->superTblInfo->insertRows; + int64_t num_of_DPT = pThreadInfo->stbInfo->insertRows; int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; int64_t totalData = num_of_DPT * num_of_tables; bool do_aggreFunc = g_Dbs.do_aggreFunc; @@ -7549,14 +8002,14 @@ static int insertTestProcess() { if (g_Dbs.db[i].superTblCount > 0) { for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j]; + SSuperTable* stbInfo = &g_Dbs.db[i].superTbls[j]; - if (superTblInfo && (superTblInfo->insertRows > 0)) { + if (stbInfo && (stbInfo->insertRows > 0)) { startMultiThreadInsertData( g_Dbs.threadCount, g_Dbs.db[i].dbName, g_Dbs.db[i].dbCfg.precision, - superTblInfo); + stbInfo); } } } @@ -7776,7 +8229,7 @@ static int queryTestProcess() { if (taos == NULL) { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); + exit(EXIT_FAILURE); } if (0 != g_queryInfo.superQueryInfo.sqlCount) { @@ -7796,7 +8249,7 @@ static int queryTestProcess() { if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { if (convertHostToServAddr( g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0) - exit(-1); + ERROR_EXIT("convert host to server address"); } pthread_t *pids = NULL; @@ -8000,10 +8453,10 @@ static void *superSubscribe(void *sarg) { setThreadName("superSub"); if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { + free(subSqlStr); errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); - free(subSqlStr); - exit(-1); + exit(EXIT_FAILURE); } if (pThreadInfo->taos == NULL) { @@ -8269,7 +8722,7 @@ static int subscribeTestProcess() { if (taos == NULL) { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); + exit(EXIT_FAILURE); } if (0 != g_queryInfo.superQueryInfo.sqlCount) { @@ -8298,7 +8751,7 @@ static int subscribeTestProcess() { errorPrint("%s() LN%d, sepcified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); - exit(-1); + exit(EXIT_FAILURE); } pids = calloc( @@ -8313,7 +8766,7 @@ static int subscribeTestProcess() { sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { @@ -8350,7 +8803,7 @@ static int subscribeTestProcess() { errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); // taos_close(taos); - exit(-1); + exit(EXIT_FAILURE); } int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; @@ -8553,8 +9006,7 @@ static int regexMatch(const char *s, const char *reg, int cflags) { /* Compile regular expression */ if (regcomp(®ex, reg, cflags) != 0) { - printf("Fail to compile regex\n"); - exit(-1); + ERROR_EXIT("Fail to compile regex\n"); } /* Execute regular expression */ @@ -8567,9 +9019,9 @@ static int regexMatch(const char *s, const char *reg, int cflags) { return 0; } else { regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); - printf("Regex match failed: %s\n", msgbuf); regfree(®ex); - exit(-1); + printf("Regex match failed: %s\n", msgbuf); + exit(EXIT_FAILURE); } return 0; @@ -8671,7 +9123,7 @@ static void queryResult() { if (g_args.use_metric) { pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; - pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; + pThreadInfo->stbInfo = &g_Dbs.db[0].superTbls[0]; tstrncpy(pThreadInfo->tb_prefix, g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN); } else { @@ -8687,10 +9139,10 @@ static void queryResult() { g_Dbs.db[0].dbName, g_Dbs.port); if (pThreadInfo->taos == NULL) { + free(pThreadInfo); errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - free(pThreadInfo); - exit(-1); + exit(EXIT_FAILURE); } tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN); From d5f38ac9c92f452386771fa7bfefcf29b2c94520 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Sat, 14 Aug 2021 08:57:03 +0800 Subject: [PATCH 064/239] do further check for blk with len 0 and SKVRow --- src/wal/src/walWrite.c | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 2dfdb84818..7523369dc2 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -309,36 +309,63 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, } // Add SMemRowType ahead of SDataRow static void expandSubmitBlk(SSubmitBlk *pDest, SSubmitBlk *pSrc, int32_t *lenExpand) { + // copy the header firstly memcpy(pDest, pSrc, sizeof(SSubmitBlk)); - int nRows = htons(pSrc->numOfRows); - if (nRows <= 0) { + + int32_t nRows = htons(pDest->numOfRows); + int32_t dataLen = htonl(pDest->dataLen); + + if ((nRows <= 0) || (dataLen <= 0)) { return; } + char *pDestData = pDest->data; char *pSrcData = pSrc->data; - for (int i = 0; i < nRows; ++i) { + for (int32_t i = 0; i < nRows; ++i) { memRowSetType(pDestData, SMEM_ROW_DATA); memcpy(memRowDataBody(pDestData), pSrcData, dataRowLen(pSrcData)); pDestData = POINTER_SHIFT(pDestData, memRowTLen(pDestData)); pSrcData = POINTER_SHIFT(pSrcData, dataRowLen(pSrcData)); ++(*lenExpand); } - int32_t dataLen = htonl(pDest->dataLen); pDest->dataLen = htonl(dataLen + nRows * sizeof(uint8_t)); } +// Check SDataRow by comparing the SDataRow len and SSubmitBlk dataLen static bool walIsSDataRow(void *pBlkData, int nRows, int32_t dataLen) { - int32_t len = 0; + if ((nRows <= 0) || (dataLen <= 0)) { + return true; + } + int32_t len = 0, kvLen = 0; for (int i = 0; i < nRows; ++i) { len += dataRowLen(pBlkData); if (len > dataLen) { return false; } + + /** + * For SDataRow between version [2.1.5.0 and 2.1.6.X], it would never conflict. + * For SKVRow between version [2.1.5.0 and 2.1.6.X], it may conflict in below scenario + * - with 1st type byte 0x01 and sversion 0x0101(257), thus do further check + */ + if (dataRowLen(pBlkData) == 257) { + SMemRow memRow = pBlkData; + SKVRow kvRow = memRowKvBody(memRow); + int nCols = kvRowNCols(kvRow); + uint16_t calcTsOffset = (uint16_t)(TD_MEM_ROW_KV_HEAD_SIZE + sizeof(SColIdx) * nCols); + uint16_t realTsOffset = (kvRowColIdx(kvRow))->offset; + if (calcTsOffset == realTsOffset) { + kvLen += memRowKvTLen(memRow); + } + } pBlkData = POINTER_SHIFT(pBlkData, dataRowLen(pBlkData)); } if (len != dataLen) { return false; } + if (kvLen == dataLen) { + return false; + } return true; } // for WAL SMemRow/SDataRow compatibility From 4227a8c19ca430e2fdfe840177d3c331b091bb4e Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Sat, 14 Aug 2021 09:41:39 +0800 Subject: [PATCH 065/239] update header --- src/wal/src/walWrite.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 7523369dc2..9590aba224 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -352,7 +352,7 @@ static bool walIsSDataRow(void *pBlkData, int nRows, int32_t dataLen) { SMemRow memRow = pBlkData; SKVRow kvRow = memRowKvBody(memRow); int nCols = kvRowNCols(kvRow); - uint16_t calcTsOffset = (uint16_t)(TD_MEM_ROW_KV_HEAD_SIZE + sizeof(SColIdx) * nCols); + uint16_t calcTsOffset = (uint16_t)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nCols); uint16_t realTsOffset = (kvRowColIdx(kvRow))->offset; if (calcTsOffset == realTsOffset) { kvLen += memRowKvTLen(memRow); From 7ed460bd035031a5ebc84b236d5b974d705ff385 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Sat, 14 Aug 2021 11:09:14 +0800 Subject: [PATCH 066/239] code optimization --- src/wal/src/walWrite.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 9590aba224..e991bf02aa 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -388,7 +388,7 @@ static int walSMemRowCheck(SWalHead *pHead) { } pBlk = (SSubmitBlk *)POINTER_SHIFT(pBlk, sizeof(SSubmitBlk) + dataLen); } - + ASSERT(nTotalRows >= 0); SWalHead *pWalHead = (SWalHead *)calloc(sizeof(SWalHead) + pHead->len + nTotalRows * sizeof(uint8_t), 1); if (pWalHead == NULL) { return -1; @@ -544,6 +544,8 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch if (0 != walSMemRowCheck(pHead)) { wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); + tfClose(tfd); + tfree(buffer); return TAOS_SYSTEM_ERROR(errno); } (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); From 9976d7a0931a43e3ab7f2785acae24c09f6013a1 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 14 Aug 2021 11:10:36 +0800 Subject: [PATCH 067/239] Hotfix/sangshuduo/td 5702 taosdemo remove memop for master (#7359) * [TD-5702]: taosdemo remove memory operation. * [TD-5702]: taosdemo remove memory operation. * add remainderBufLen to check row data generation. * row data generation with remainder buffer length checking. * git checkout --patch hotfix/sangshuduo/TD-5702-taosdemo-remove-memop taosdemo.c Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 38 +++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 3b91de32b0..f3270a22f2 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -1340,8 +1340,8 @@ static char *rand_bool_str(){ static int32_t rand_bool(){ static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor] % 2; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND] % 2; } static char *rand_tinyint_str() @@ -1356,8 +1356,8 @@ static int32_t rand_tinyint() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor] % 128; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND] % 128; } static char *rand_smallint_str() @@ -1372,8 +1372,8 @@ static int32_t rand_smallint() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor] % 32767; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND] % 32767; } static char *rand_int_str() @@ -1388,8 +1388,8 @@ static int32_t rand_int() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor]; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND]; } static char *rand_bigint_str() @@ -1404,8 +1404,8 @@ static int64_t rand_bigint() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randbigint[cursor]; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randbigint[cursor % MAX_PREPARED_RAND]; } static char *rand_float_str() @@ -1421,8 +1421,8 @@ static float rand_float() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randfloat[cursor]; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randfloat[cursor % MAX_PREPARED_RAND]; } static char *demo_current_float_str() @@ -1437,8 +1437,9 @@ static float UNUSED_FUNC demo_current_float() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return (float)(9.8 + 0.04 * (g_randint[cursor] % 10) + g_randfloat[cursor]/1000000000); + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return (float)(9.8 + 0.04 * (g_randint[cursor % MAX_PREPARED_RAND] % 10) + + g_randfloat[cursor % MAX_PREPARED_RAND]/1000000000); } static char *demo_voltage_int_str() @@ -1453,8 +1454,8 @@ static int32_t UNUSED_FUNC demo_voltage_int() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return 215 + g_randint[cursor] % 10; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return 215 + g_randint[cursor % MAX_PREPARED_RAND] % 10; } static char *demo_phase_float_str() { @@ -1467,8 +1468,9 @@ static char *demo_phase_float_str() { static float UNUSED_FUNC demo_phase_float(){ static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return (float)((115 + g_randint[cursor] % 10 + g_randfloat[cursor]/1000000000)/360); + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return (float)((115 + g_randint[cursor % MAX_PREPARED_RAND] % 10 + + g_randfloat[cursor % MAX_PREPARED_RAND]/1000000000)/360); } #if 0 From 9161244c22aa9584e3690c7158ac1ccce54e0d64 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Sat, 14 Aug 2021 12:04:12 +0800 Subject: [PATCH 068/239] [TD-6078]: fix binary/nchar null error in node.js [ci skip] (#7366) --- src/connector/nodejs/nodetaos/cinterface.js | 26 ++++++++++++++-- src/connector/nodejs/test/testnchar.js | 33 +++++++++++++++++++++ 2 files changed, 57 insertions(+), 2 deletions(-) create mode 100644 src/connector/nodejs/test/testnchar.js diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js index 03d27e5593..5ba2739c35 100644 --- a/src/connector/nodejs/nodetaos/cinterface.js +++ b/src/connector/nodejs/nodetaos/cinterface.js @@ -109,6 +109,24 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) return res; } +function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + + let currOffset = 0; + while (currOffset < data.length) { + let len = data.readIntLE(currOffset, 2); + let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column; + if (dataEntry[0] == 255) { + res.push(null) + } else { + res.push(dataEntry.toString("utf-8")); + } + currOffset += nbytes; + } + return res; +} + function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; @@ -117,7 +135,11 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) while (currOffset < data.length) { let len = data.readIntLE(currOffset, 2); let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column; - res.push(dataEntry.toString("utf-8")); + if (dataEntry[0] == 255 && dataEntry[1] == 255) { + res.push(null) + } else { + res.push(dataEntry.toString("utf-8")); + } currOffset += nbytes; } return res; @@ -132,7 +154,7 @@ let convertFunctions = { [FieldTypes.C_BIGINT]: convertBigint, [FieldTypes.C_FLOAT]: convertFloat, [FieldTypes.C_DOUBLE]: convertDouble, - [FieldTypes.C_BINARY]: convertNchar, + [FieldTypes.C_BINARY]: convertBinary, [FieldTypes.C_TIMESTAMP]: convertTimestamp, [FieldTypes.C_NCHAR]: convertNchar } diff --git a/src/connector/nodejs/test/testnchar.js b/src/connector/nodejs/test/testnchar.js new file mode 100644 index 0000000000..68fad89c22 --- /dev/null +++ b/src/connector/nodejs/test/testnchar.js @@ -0,0 +1,33 @@ +const taos = require('../tdengine'); +var conn = taos.connect({ host: "localhost" }); +var c1 = conn.cursor(); + + +function checkData(data, row, col, expect) { + let checkdata = data[row][col]; + if (checkdata == expect) { + // console.log('check pass') + } + else { + console.log('check failed, expect ' + expect + ', but is ' + checkdata) + } +} + +c1.execute('drop database if exists testnodejsnchar') +c1.execute('create database testnodejsnchar') +c1.execute('use testnodejsnchar'); +c1.execute('create table tb (ts timestamp, value float, text binary(200))') +c1.execute("insert into tb values('2021-06-10 00:00:00', 24.7, '中文10000000000000000000000');") - +c1.execute('insert into tb values(1623254400150, 24.7, NULL);') +c1.execute('import into tb values(1623254400300, 24.7, "中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000");') +sql = 'select * from tb;' + +console.log('*******************************************') + +c1.execute(sql); +data = c1.fetchall(); +console.log(data) +//check data about insert data +checkData(data, 0, 2, '中文10000000000000000000000') +checkData(data, 1, 2, null) +checkData(data, 2, 2, '中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000') \ No newline at end of file From f1c6cc11e64ce62ab17ca4a875dc218103e46ee6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 14 Aug 2021 13:45:45 +0800 Subject: [PATCH 069/239] [td-255] Fix error found by regression test, and rename a macro. --- src/common/src/tglobal.c | 2 +- src/util/inc/tcompare.h | 8 ++++---- src/util/src/tcompare.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index b4ccdf6e3b..d880a73e84 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -79,7 +79,7 @@ int32_t tsCompressMsgSize = -1; // client int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; -int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN; +int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN; int8_t tsTscEnableRecordSql = 0; // the maximum number of results for projection query on super table that are returned from diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index 624d92e36a..84346cc79c 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -22,10 +22,10 @@ extern "C" { #include "os.h" -#define TSDB_PATTERN_MATCH 0 -#define TSDB_PATTERN_NOMATCH 1 -#define TSDB_PATTERN_NOWILDCARDMATCH 2 -#define TSDB_PATTERN_STRING_MAX_LEN 100 +#define TSDB_PATTERN_MATCH 0 +#define TSDB_PATTERN_NOMATCH 1 +#define TSDB_PATTERN_NOWILDCARDMATCH 2 +#define TSDB_PATTERN_STRING_DEFAULT_LEN 100 #define FLT_COMPAR_TOL_FACTOR 4 #define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON)) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 5461b4b689..309aa55925 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -358,13 +358,13 @@ int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE); - wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t)); + wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); - assert(varDataLen(pRight) < 128); int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo); free(pattern); + return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } From 8b2b278fb02d844c830509c56126bd85f4227e13 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 15 Aug 2021 05:49:16 +0000 Subject: [PATCH 070/239] [TD-6087] merge distinct to master --- src/client/src/tscSQLParser.c | 54 ++++++++---- src/client/src/tscServer.c | 2 +- src/client/src/tscUtil.c | 4 +- src/common/inc/tglobal.h | 1 + src/common/src/tglobal.c | 13 +++ src/query/inc/qExecutor.h | 10 ++- src/query/src/qExecutor.c | 152 +++++++++++++++++++++------------- 7 files changed, 157 insertions(+), 79 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index fab7e8fa3b..f25ea70f32 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1940,20 +1940,6 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) { pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; } -bool isValidDistinctSql(SQueryInfo* pQueryInfo) { - if (pQueryInfo == NULL) { - return false; - } - if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY - && (pQueryInfo->type & TSDB_QUERY_TYPE_TABLE_QUERY) != TSDB_QUERY_TYPE_TABLE_QUERY) { - return false; - } - if (tscNumOfExprs(pQueryInfo) == 1){ - return true; - } - return false; -} - static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) { size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < numOfExprs; ++i) { @@ -2043,9 +2029,11 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS const char* msg1 = "too many items in selection clause"; const char* msg2 = "functions or others can not be mixed up"; const char* msg3 = "not support query expression"; - const char* msg4 = "only support distinct one column or tag"; + const char* msg4 = "not support distinct mixed with proj/agg func"; const char* msg5 = "invalid function name"; - const char* msg6 = "_block_dist not support subquery, only support stable/table"; + const char* msg6 = "not support distinct mixed with join"; + const char* msg7 = "not support distinct mixed with groupby"; + const char* msg8 = "not support distinct in nest query"; // too many result columns not support order by in query if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) { @@ -2056,18 +2044,25 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); } + bool hasDistinct = false; + bool hasAgg = false; size_t numOfExpr = taosArrayGetSize(pSelNodeList); + int32_t distIdx = -1; for (int32_t i = 0; i < numOfExpr; ++i) { int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo); tSqlExprItem* pItem = taosArrayGet(pSelNodeList, i); if (hasDistinct == false) { hasDistinct = (pItem->distinct == true); + distIdx = hasDistinct ? i : -1; } int32_t type = pItem->pNode->type; if (type == SQL_NODE_SQLFUNCTION) { + hasAgg = true; + if (hasDistinct) break; + pItem->pNode->functionId = isValidFunction(pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); if (pItem->pNode->functionId == TSDB_FUNC_BLKINFO && taosArrayGetSize(pQueryInfo->pUpstream) > 0) { @@ -2108,10 +2103,22 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS } } + //TODO(dengyihao), refactor as function + //handle distinct func mixed with other func if (hasDistinct == true) { - if (!isValidDistinctSql(pQueryInfo) ) { + if (distIdx != 0 || hasAgg) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + if (joinQuery) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } + if (pQueryInfo->groupbyExpr.numOfGroupCols != 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); + } + if (pQueryInfo->pDownstream != NULL) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); + } + pQueryInfo->distinct = true; } @@ -5512,6 +5519,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq const char* msg1 = "invalid column name"; const char* msg2 = "order by primary timestamp, first tag or groupby column in groupby clause allowed"; const char* msg3 = "invalid column in order by clause, only primary timestamp or first tag in groupby clause allowed"; + const char* msg4 = "orderby column must projected in subquery"; setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -5627,6 +5635,17 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq // orderby ts query on super table if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + bool found = false; + for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + found = true; + break; + } + } + if (!found && pQueryInfo->pDownstream) { + return invalidOperationMsg(pMsgBuf, msg4); + } addPrimaryTsColIntoResult(pQueryInfo, pCmd); } } @@ -8418,6 +8437,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS pSub->pUdfInfo = pUdfInfo; pSub->udfCopy = true; + pSub->pDownstream = pQueryInfo; int32_t code = validateSqlNode(pSql, p, pSub); if (code != TSDB_CODE_SUCCESS) { return code; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index e809fe3137..c0723e210a 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -409,7 +409,7 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) { if ((TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) || - (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY))) { + (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY)) || (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->distinct)) { // do nothing in case of super table subquery } else { pSql->retry += 1; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index f54827911a..f0a46d3e48 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3533,8 +3533,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pNewQueryInfo->numOfTables = 0; pNewQueryInfo->pTableMetaInfo = NULL; pNewQueryInfo->bufLen = pQueryInfo->bufLen; - pNewQueryInfo->buf = malloc(pQueryInfo->bufLen); + + + pNewQueryInfo->distinct = pQueryInfo->distinct; if (pNewQueryInfo->buf == NULL) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 25d1c90ec5..ed35168457 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -59,6 +59,7 @@ extern char tsLocale[]; extern char tsCharset[]; // default encode string extern int8_t tsEnableCoreFile; extern int32_t tsCompressMsgSize; +extern int32_t tsMaxNumOfDistinctResults; extern char tsTempDir[]; //query buffer management diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index d880a73e84..e4ff353787 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -89,6 +89,9 @@ int32_t tsMaxNumOfOrderedResults = 100000; // 10 ms for sliding time, the value will changed in case of time precision changed int32_t tsMinSlidingTime = 10; +// the maxinum number of distict query result +int32_t tsMaxNumOfDistinctResults = 1000 * 10000; + // 1 us for interval time range, changed accordingly int32_t tsMinIntervalTime = 1; @@ -546,6 +549,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "maxNumOfDistinctRes"; + cfg.ptr = &tsMaxNumOfDistinctResults; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 10*10000; + cfg.maxValue = 10000*10000; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "numOfMnodes"; cfg.ptr = &tsNumOfMnodes; cfg.valType = TAOS_CFG_VTYPE_INT32; diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index d30971ab47..e8b7855769 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -510,13 +510,21 @@ typedef struct SStateWindowOperatorInfo { bool reptScan; } SStateWindowOperatorInfo ; +typedef struct SDistinctDataInfo { + int32_t index; + int32_t type; + int32_t bytes; +} SDistinctDataInfo; + typedef struct SDistinctOperatorInfo { SHashObj *pSet; SSDataBlock *pRes; bool recordNullVal; //has already record the null value, no need to try again int64_t threshold; int64_t outputCapacity; - int32_t colIndex; + int32_t totalBytes; + char* buf; + SArray* pDistinctDataInfo; } SDistinctOperatorInfo; struct SGlobalMerger; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index bae62f1c82..cf77820b1f 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -44,6 +44,10 @@ #define SDATA_BLOCK_INITIALIZER (SDataBlockInfo) {{0}, 0} +#define MULTI_KEY_DELIM "-" + +#define HASH_CAPACITY_LIMIT 10000000 + #define TIME_WINDOW_COPY(_dst, _src) do {\ (_dst).skey = (_src).skey;\ (_dst).ekey = (_src).ekey;\ @@ -3581,6 +3585,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo; int64_t tid = 0; + pRuntimeEnv->keyBuf = realloc(pRuntimeEnv->keyBuf, sizeof(tid) + sizeof(int64_t) + POINTER_BYTES); SResultRow* pRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&tid, sizeof(tid), true, uid); for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { @@ -6534,6 +6539,8 @@ static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) { static void destroyDistinctOperatorInfo(void* param, int32_t numOfOutput) { SDistinctOperatorInfo* pInfo = (SDistinctOperatorInfo*) param; taosHashCleanup(pInfo->pSet); + tfree(pInfo->buf); + taosArrayDestroy(pInfo->pDistinctDataInfo); pInfo->pRes = destroyOutputBuf(pInfo->pRes); } @@ -7075,6 +7082,52 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf return pOperator; } +static bool initMultiDistinctInfo(SDistinctOperatorInfo *pInfo, SOperatorInfo* pOperator, SSDataBlock *pBlock) { + if (taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput) { + // distinct info already inited + return true; + } + for (int i = 0; i < pOperator->numOfOutput; i++) { + pInfo->totalBytes += pOperator->pExpr[i].base.colBytes; + } + for (int i = 0; i < pOperator->numOfOutput; i++) { + int numOfBlock = (int)taosArrayGetSize(pBlock->pDataBlock); + assert(i < numOfBlock); + for (int j = 0; j < numOfBlock; j++) { + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, j); + if (pColDataInfo->info.colId == pOperator->pExpr[i].base.resColId) { + SDistinctDataInfo item = {.index = j, .type = pColDataInfo->info.type, .bytes = pColDataInfo->info.bytes}; + taosArrayInsert(pInfo->pDistinctDataInfo, i, &item); + } + } + } + pInfo->totalBytes += (int32_t)strlen(MULTI_KEY_DELIM) * (pOperator->numOfOutput); + pInfo->buf = calloc(1, pInfo->totalBytes); + return taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput ? true : false; +} +static void buildMultiDistinctKey(SDistinctOperatorInfo *pInfo, SSDataBlock *pBlock, int32_t rowId) { + char *p = pInfo->buf; + memset(p, 0, pInfo->totalBytes); + + for (int i = 0; i < taosArrayGetSize(pInfo->pDistinctDataInfo); i++) { + SDistinctDataInfo* pDistDataInfo = (SDistinctDataInfo *)taosArrayGet(pInfo->pDistinctDataInfo, i); + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pDistDataInfo->index); + char *val = ((char *)pColDataInfo->pData) + pColDataInfo->info.bytes * rowId; + if (isNull(val, pDistDataInfo->type)) { + p += pDistDataInfo->bytes; + continue; + } + if (IS_VAR_DATA_TYPE(pDistDataInfo->type)) { + memcpy(p, varDataVal(val), varDataLen(val)); + p += varDataLen(val); + } else { + memcpy(p, val, pDistDataInfo->bytes); + p += pDistDataInfo->bytes; + } + memcpy(p, MULTI_KEY_DELIM, strlen(MULTI_KEY_DELIM)); + p += strlen(MULTI_KEY_DELIM); + } +} static SSDataBlock* hashDistinct(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; @@ -7082,11 +7135,9 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { return NULL; } - SDistinctOperatorInfo* pInfo = pOperator->info; SSDataBlock* pRes = pInfo->pRes; - pRes->info.rows = 0; SSDataBlock* pBlock = NULL; while(1) { @@ -7099,77 +7150,60 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { pOperator->status = OP_EXEC_DONE; break; } - if (pInfo->colIndex == -1) { - for (int i = 0; i < taosArrayGetSize(pBlock->pDataBlock); i++) { - SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, i); - if (pColDataInfo->info.colId == pOperator->pExpr[0].base.resColId) { - pInfo->colIndex = i; - break; - } - } - } - if (pInfo->colIndex == -1) { + if (!initMultiDistinctInfo(pInfo, pOperator, pBlock)) { setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); pOperator->status = OP_EXEC_DONE; - return NULL; - } - SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->colIndex); - - int16_t bytes = pColInfoData->info.bytes; - int16_t type = pColInfoData->info.type; - - // ensure the output buffer size - SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, 0); - if (pRes->info.rows + pBlock->info.rows > pInfo->outputCapacity) { - int32_t newSize = pRes->info.rows + pBlock->info.rows; - char* tmp = realloc(pResultColInfoData->pData, newSize * bytes); - if (tmp == NULL) { - return NULL; - } else { - pResultColInfoData->pData = tmp; + break; + } + + // ensure result output buf + if (pRes->info.rows + pBlock->info.rows > pInfo->outputCapacity) { + int32_t newSize = pRes->info.rows + pBlock->info.rows; + for (int i = 0; i < taosArrayGetSize(pRes->pDataBlock); i++) { + SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, i); + SDistinctDataInfo* pDistDataInfo = taosArrayGet(pInfo->pDistinctDataInfo, i); + char* tmp = realloc(pResultColInfoData->pData, newSize * pDistDataInfo->bytes); + if (tmp == NULL) { + return NULL; + } else { + pResultColInfoData->pData = tmp; + } + } pInfo->outputCapacity = newSize; - } - } - - for(int32_t i = 0; i < pBlock->info.rows; ++i) { - char* val = ((char*)pColInfoData->pData) + bytes * i; - if (isNull(val, type)) { - continue; - } - char* p = val; - size_t keyLen = 0; - if (IS_VAR_DATA_TYPE(pOperator->pExpr->base.colType)) { - tstr* var = (tstr*)(val); - p = var->data; - keyLen = varDataLen(var); - } else { - keyLen = bytes; - } - - int dummy; - void* res = taosHashGet(pInfo->pSet, p, keyLen); - if (res == NULL) { - taosHashPut(pInfo->pSet, p, keyLen, &dummy, sizeof(dummy)); - char* start = pResultColInfoData->pData + bytes * pInfo->pRes->info.rows; - memcpy(start, val, bytes); + } + for (int32_t i = 0; i < pBlock->info.rows; i++) { + buildMultiDistinctKey(pInfo, pBlock, i); + if (taosHashGet(pInfo->pSet, pInfo->buf, pInfo->totalBytes) == NULL) { + int32_t dummy; + taosHashPut(pInfo->pSet, pInfo->buf, pInfo->totalBytes, &dummy, sizeof(dummy)); + for (int j = 0; j < taosArrayGetSize(pRes->pDataBlock); j++) { + SDistinctDataInfo* pDistDataInfo = taosArrayGet(pInfo->pDistinctDataInfo, j); // distinct meta info + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pDistDataInfo->index); //src + SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, j); // dist + char* val = ((char*)pColInfoData->pData) + pDistDataInfo->bytes * i; + char *start = pResultColInfoData->pData + pDistDataInfo->bytes * pInfo->pRes->info.rows; + memcpy(start, val, pDistDataInfo->bytes); + } pRes->info.rows += 1; - } - } + } + } if (pRes->info.rows >= pInfo->threshold) { break; } } - return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; } SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SDistinctOperatorInfo* pInfo = calloc(1, sizeof(SDistinctOperatorInfo)); - pInfo->colIndex = -1; - pInfo->threshold = 10000000; // distinct result threshold - pInfo->outputCapacity = 4096; - pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(pExpr->base.colType), false, HASH_NO_LOCK); + + pInfo->totalBytes = 0; + pInfo->buf = NULL; + pInfo->threshold = tsMaxNumOfDistinctResults; // distinct result threshold + pInfo->outputCapacity = 4096; + pInfo->pDistinctDataInfo = taosArrayInit(numOfOutput, sizeof(SDistinctDataInfo)); + pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); pInfo->pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) pInfo->outputCapacity); SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); From 3aa24b6b235c75627b7bf95cb0f4bfd6827bd6aa Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 15 Aug 2021 13:52:07 +0800 Subject: [PATCH 071/239] Hotfix/sangshuduo/td 3197 taosdemo coverity scan for master (#7375) * [TD-3197]: taosdemo and taosdump coverity scan issues. * exit if read sample file failed. * fix converity scan issue. * fix coverity scan issue. * fix coverity scan memory leak. * fix resource leak reported by coverity scan. * fix taosdemo coverity scan issue. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index f3270a22f2..18f5877e09 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5820,6 +5820,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "INT", strlen("INT"))) { int32_t *bind_int = malloc(sizeof(int32_t)); + assert(bind_int); if (value) { *bind_int = atoi(value); @@ -5834,6 +5835,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "BIGINT", strlen("BIGINT"))) { int64_t *bind_bigint = malloc(sizeof(int64_t)); + assert(bind_bigint); if (value) { *bind_bigint = atoll(value); @@ -5848,6 +5850,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "FLOAT", strlen("FLOAT"))) { float *bind_float = malloc(sizeof(float)); + assert(bind_float); if (value) { *bind_float = (float)atof(value); @@ -5862,6 +5865,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "DOUBLE", strlen("DOUBLE"))) { double *bind_double = malloc(sizeof(double)); + assert(bind_double); if (value) { *bind_double = atof(value); @@ -5876,6 +5880,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "SMALLINT", strlen("SMALLINT"))) { int16_t *bind_smallint = malloc(sizeof(int16_t)); + assert(bind_smallint); if (value) { *bind_smallint = (int16_t)atoi(value); @@ -5890,6 +5895,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "TINYINT", strlen("TINYINT"))) { int8_t *bind_tinyint = malloc(sizeof(int8_t)); + assert(bind_tinyint); if (value) { *bind_tinyint = (int8_t)atoi(value); @@ -5904,6 +5910,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "BOOL", strlen("BOOL"))) { int8_t *bind_bool = malloc(sizeof(int8_t)); + assert(bind_bool); if (value) { if (strncasecmp(value, "true", 4)) { @@ -5923,6 +5930,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "TIMESTAMP", strlen("TIMESTAMP"))) { int64_t *bind_ts2 = malloc(sizeof(int64_t)); + assert(bind_ts2); if (value) { if (strchr(value, ':') && strchr(value, '-')) { @@ -5937,6 +5945,7 @@ static int32_t prepareStmtBindArrayByType( if (TSDB_CODE_SUCCESS != taosParseTime( value, &tmpEpoch, strlen(value), timePrec, 0)) { + free(bind_ts2); errorPrint("Input %s, time format error!\n", value); return -1; } @@ -6261,7 +6270,7 @@ static int32_t prepareStbStmtBindTag( char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, g_args.len_of_binary); + __func__, __LINE__, DOUBLE_BUFF_LEN); return -1; } @@ -6293,7 +6302,7 @@ static int32_t prepareStbStmtBindRand( char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, g_args.len_of_binary); + __func__, __LINE__, DOUBLE_BUFF_LEN); return -1; } From 6a18e3d3d512ed6eea96f04b717abfb359139167 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 15 Aug 2021 15:45:30 +0000 Subject: [PATCH 072/239] [TD-6097] Crash occurs when the select function in the subquery is used with group by --- src/client/src/tscSQLParser.c | 9 +++------ src/client/src/tscUtil.c | 1 - 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index c7db7b5d4b..4e5245742c 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2643,7 +2643,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO); } else if (info.precision == TSDB_TIME_PRECISION_MICRO) { tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI); - } + } if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10); @@ -2677,8 +2677,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col assert(ids.num == 1); tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema); } - tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid); + return TSDB_CODE_SUCCESS; } @@ -3060,7 +3060,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, &s); } } - tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); return TSDB_CODE_SUCCESS; } @@ -8418,7 +8417,7 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) { n += 1; } - + info->numOfColumns = n; return meta; } @@ -8447,8 +8446,6 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS return code; } - pSub->pDownstream = pQueryInfo; - // create dummy table meta info STableMetaInfo* pTableMetaInfo1 = calloc(1, sizeof(STableMetaInfo)); if (pTableMetaInfo1 == NULL) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index f0a46d3e48..c2cf24520d 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1503,7 +1503,6 @@ void tscFreeSqlObj(SSqlObj* pSql) { tscFreeSqlResult(pSql); tscResetSqlCmd(pCmd, false); - memset(pCmd->payload, 0, (size_t)pCmd->allocSize); tfree(pCmd->payload); pCmd->allocSize = 0; From d8897baa9cb3b9e8024889df16fdcbe6de697970 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 16 Aug 2021 08:51:02 +0800 Subject: [PATCH 073/239] [TD-5998]:_block_dist() only support tables, not subqueries --- src/client/src/tscSQLParser.c | 6 ++++++ tests/script/general/compute/block_dist.sim | 6 +++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 6df724881e..7b936487f0 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2045,6 +2045,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS const char* msg3 = "not support query expression"; const char* msg4 = "only support distinct one column or tag"; const char* msg5 = "invalid function name"; + const char* msg6 = "_block_dist not support subquery, only support stable/table"; // too many result columns not support order by in query if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) { @@ -2068,6 +2069,11 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS int32_t type = pItem->pNode->type; if (type == SQL_NODE_SQLFUNCTION) { pItem->pNode->functionId = isValidFunction(pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); + + if (pItem->pNode->functionId == TSDB_FUNC_BLKINFO && taosArrayGetSize(pQueryInfo->pUpstream) > 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + } + SUdfInfo* pUdfInfo = NULL; if (pItem->pNode->functionId < 0) { pUdfInfo = isValidUdf(pQueryInfo->pUdfInfo, pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); diff --git a/tests/script/general/compute/block_dist.sim b/tests/script/general/compute/block_dist.sim index 51cf903654..5343c1db28 100644 --- a/tests/script/general/compute/block_dist.sim +++ b/tests/script/general/compute/block_dist.sim @@ -84,6 +84,10 @@ if $rows != 1 then return -1 endi +print ============== TD-5998 +sql_error select _block_dist() from (select * from $nt) +sql_error select _block_dist() from (select * from $mt) + print =============== clear sql drop database $db sql show databases @@ -91,4 +95,4 @@ if $rows != 0 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 43bd6c65871c498842b940386facd421b2335aac Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Mon, 16 Aug 2021 09:37:13 +0800 Subject: [PATCH 074/239] [ci skip] update daily performance script --- tests/pytest/tools/taosdemoPerformance.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 4a5abd49d8..1d28a2708f 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -145,26 +145,26 @@ class taosdemoPerformace: binPath = buildPath + "/build/bin/" os.system( - "%staosdemo -f %s > taosdemoperf.txt 2>&1" % + "%staosdemo -f %s > /dev/null 2>&1" % (binPath, self.generateJson())) self.createTableTime = self.getCMDOutput( - "grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'") + "grep 'Spent' insert_res.txt | awk 'NR==1{print $2}'") self.insertRecordsTime = self.getCMDOutput( - "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'") + "grep 'Spent' insert_res.txt | awk 'NR==2{print $2}'") self.recordsPerSecond = self.getCMDOutput( - "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'") + "grep 'Spent' insert_res.txt | awk 'NR==2{print $16}'") self.commitID = self.getCMDOutput("git rev-parse --short HEAD") delay = self.getCMDOutput( - "grep 'delay' taosdemoperf.txt | awk '{print $4}'") + "grep 'delay' insert_res.txt | awk '{print $4}'") self.avgDelay = delay[:-4] delay = self.getCMDOutput( - "grep 'delay' taosdemoperf.txt | awk '{print $6}'") + "grep 'delay' insert_res.txt | awk '{print $6}'") self.maxDelay = delay[:-4] delay = self.getCMDOutput( - "grep 'delay' taosdemoperf.txt | awk '{print $8}'") + "grep 'delay' insert_res.txt | awk '{print $8}'") self.minDelay = delay[:-3] - os.system("[ -f taosdemoperf.txt ] && rm taosdemoperf.txt") + os.system("[ -f insert_res.txt ] && rm insert_res.txt") def createTablesAndStoreData(self): cursor = self.conn2.cursor() @@ -185,7 +185,7 @@ class taosdemoPerformace: cursor.close() cursor1 = self.conn.cursor() - # cursor1.execute("drop database if exists %s" % self.insertDB) + cursor1.execute("drop database if exists %s" % self.insertDB) cursor1.close() if __name__ == '__main__': From 2db8feb7448e299e14a25f5247cf7f4942b1f4b9 Mon Sep 17 00:00:00 2001 From: wpan Date: Mon, 16 Aug 2021 13:23:54 +0800 Subject: [PATCH 075/239] fix binary interp function --- src/query/src/qAggMain.c | 34 ++++++++++++++++++++++++++++++---- src/query/src/qExecutor.c | 10 ++++++++++ 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 14d3f4e417..1587746587 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3790,11 +3790,37 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { static void interp_function(SQLFunctionCtx *pCtx) { // at this point, the value is existed, return directly if (pCtx->size > 0) { - // impose the timestamp check - TSKEY key = GET_TS_DATA(pCtx, 0); + bool ascQuery = (pCtx->order == TSDB_ORDER_ASC); + TSKEY key; + char *pData; + int32_t typedData = 0; + + if (ascQuery) { + key = GET_TS_DATA(pCtx, 0); + pData = GET_INPUT_DATA(pCtx, 0); + } else { + key = pCtx->start.key; + if (key == INT64_MIN) { + key = GET_TS_DATA(pCtx, 0); + pData = GET_INPUT_DATA(pCtx, 0); + } else { + if (!(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL)) { + pData = pCtx->start.ptr; + } else { + typedData = 1; + pData = (char *)&pCtx->start.val; + } + } + } + + //if (key == pCtx->startTs && (ascQuery || !(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL))) { if (key == pCtx->startTs) { - char *pData = GET_INPUT_DATA(pCtx, 0); - assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType); + if (typedData) { + SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, *(double *)pData); + } else { + assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType); + } + SET_VAL(pCtx, 1, 1); } else { interp_function_impl(pCtx); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index bae62f1c82..a6a85b370e 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1327,6 +1327,16 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, pCtx[k].end.key = curTs; pCtx[k].end.val = v2; + + if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { + if (prevRowIndex == -1) { + pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[index]; + } else { + pCtx[k].start.ptr = (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes; + } + + pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes; + } } } else if (functionId == TSDB_FUNC_TWA) { SPoint point1 = (SPoint){.key = prevTs, .val = &v1}; From b077af04a6e1c4006005a045fdc8a6423f6859d4 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 16 Aug 2021 13:26:53 +0800 Subject: [PATCH 076/239] [6046] fix ts always in first output index using derivative function --- src/client/src/tscSQLParser.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 17b693faf2..781b1be76f 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2594,13 +2594,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // set the first column ts for diff query if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { - colIndex += 1; SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0}; SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pCmd), TSDB_KEYSIZE, false); SColumnList ids = createColumnList(1, 0, 0); - insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr); + insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr); } SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false); From 7e56ba09e9624a2530efd93ee59efc43c9fd4047 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 13:26:59 +0800 Subject: [PATCH 077/239] obtain coredump execfn in script[ci skip] --- tests/test-all.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test-all.sh b/tests/test-all.sh index 6e7963e787..8dd1ade9be 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -24,9 +24,9 @@ function stopTaosd { function dohavecore(){ corefile=`find $corepath -mmin 1` - core_file=`echo $corefile|cut -d " " -f2` - proc=`echo $corefile|cut -d "_" -f3` if [ -n "$corefile" ];then + core_file=`echo $corefile|cut -d " " -f2` + proc=`file $core_file|awk -F "execfn:" '/execfn:/{print $2}'|tr -d \' |awk '{print $1}'|tr -d \,` echo 'taosd or taos has generated core' rm case.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then @@ -46,7 +46,7 @@ function dohavecore(){ fi fi if [[ $1 == 1 ]];then - echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit + echo '\n'|gdb $proc $core_file -ex "bt 10" -ex quit exit 8 fi fi From 059576a3a50baabbc68971bf6ba87cd98c00ea54 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 16 Aug 2021 13:28:07 +0800 Subject: [PATCH 078/239] [6046] fix ts always in first output index using derivative function --- src/query/src/qExecutor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 9000bcdf77..af7408880a 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -3622,7 +3622,7 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf // re-estabilish output buffer pointer. int32_t functionId = pBInfo->pCtx[i].functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { - pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput; + if(i > 0) pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput; } } } From 4b9c461c542e6e0e71beaf9d4b677e46ef89a822 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 16 Aug 2021 15:16:44 +0800 Subject: [PATCH 079/239] [6046] fix ts is null --- src/client/src/tscSQLParser.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 781b1be76f..bb2b8f3c52 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -7052,6 +7052,10 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { continue; } + if(functionId == TSDB_FUNC_DERIVATIVE){ // to avoid ts function id was modufied below + tagTsColExists = false; + } + if (functionId < 0) { SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * functionId - 1); if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) { From 7759df3b0b5389857ce5461d4d804c2e7f70c6fc Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 16 Aug 2021 15:29:38 +0800 Subject: [PATCH 080/239] [TD-6054]: Filtered by tag with nchar value not as expected --- src/util/src/tcompare.c | 22 ++++++++- tests/pytest/query/filterWithinMultiNchar.py | 51 ++++++++++++++++++++ 2 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 tests/pytest/query/filterWithinMultiNchar.py diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 309aa55925..921da82d44 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -199,7 +199,16 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { if (len1 != len2) { return len1 > len2? 1:-1; } else { - int32_t ret = wcsncmp(varDataVal(pLeft), varDataVal(pRight), len1/TSDB_NCHAR_SIZE); + char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); + char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char)); + memcpy(pLeftTerm, varDataVal(pLeft), len1); + memcpy(pRightTerm, varDataVal(pRight), len2); + + int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE); + + tfree(pLeftTerm); + tfree(pRightTerm); + if (ret == 0) { return 0; } else { @@ -510,7 +519,16 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { return t1->len > t2->len? 1:-1; } - int32_t ret = wcsncmp((wchar_t*) t1->data, (wchar_t*) t2->data, t2->len/TSDB_NCHAR_SIZE); + char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char)); + char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char)); + memcpy(t1_term, t1->data, t1->len); + memcpy(t2_term, t2->data, t2->len); + + int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE); + + tfree(t1_term); + tfree(t2_term); + if (ret == 0) { return ret; } diff --git a/tests/pytest/query/filterWithinMultiNchar.py b/tests/pytest/query/filterWithinMultiNchar.py new file mode 100644 index 0000000000..15fcf4e24b --- /dev/null +++ b/tests/pytest/query/filterWithinMultiNchar.py @@ -0,0 +1,51 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "create stable t6 (ts timestamp,val int,flow nchar(36)) tags(dev nchar(36),dev1 nchar(36),dev2 nchar(36))") + tdSql.execute("insert into t6004 using t6 (dev,dev1,dev2) tags ('b50c79bc-b102-48e6-bda1-4212263e46d0','b50c79bc-b102-48e6-bda1-4212263e46d0', 'b50c79bc-b102-48e6-bda1-4212263e46d0') values(now,1,'b50c79bc-b102-48e6-bda1-4212263e46d0')") + + + print("==============step2") + tdSql.query("select * from t6 where dev='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev1='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev2='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 28a50c47b96dcd3dcc684099050553349b443791 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 16 Aug 2021 15:29:38 +0800 Subject: [PATCH 081/239] [TD-6054]: Filtered by tag with nchar value not as expected --- src/util/src/tcompare.c | 22 ++++++++- tests/pytest/query/filterWithinMultiNchar.py | 51 ++++++++++++++++++++ 2 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 tests/pytest/query/filterWithinMultiNchar.py diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 354e7899c2..3619dad83b 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -129,7 +129,16 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { if (len1 != len2) { return len1 > len2? 1:-1; } else { - int32_t ret = wcsncmp(varDataVal(pLeft), varDataVal(pRight), len1/TSDB_NCHAR_SIZE); + char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); + char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char)); + memcpy(pLeftTerm, varDataVal(pLeft), len1); + memcpy(pRightTerm, varDataVal(pRight), len2); + + int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE); + + tfree(pLeftTerm); + tfree(pRightTerm); + if (ret == 0) { return 0; } else { @@ -410,7 +419,16 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { return t1->len > t2->len? 1:-1; } - int32_t ret = wcsncmp((wchar_t*) t1->data, (wchar_t*) t2->data, t2->len/TSDB_NCHAR_SIZE); + char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char)); + char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char)); + memcpy(t1_term, t1->data, t1->len); + memcpy(t2_term, t2->data, t2->len); + + int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE); + + tfree(t1_term); + tfree(t2_term); + if (ret == 0) { return ret; } diff --git a/tests/pytest/query/filterWithinMultiNchar.py b/tests/pytest/query/filterWithinMultiNchar.py new file mode 100644 index 0000000000..15fcf4e24b --- /dev/null +++ b/tests/pytest/query/filterWithinMultiNchar.py @@ -0,0 +1,51 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "create stable t6 (ts timestamp,val int,flow nchar(36)) tags(dev nchar(36),dev1 nchar(36),dev2 nchar(36))") + tdSql.execute("insert into t6004 using t6 (dev,dev1,dev2) tags ('b50c79bc-b102-48e6-bda1-4212263e46d0','b50c79bc-b102-48e6-bda1-4212263e46d0', 'b50c79bc-b102-48e6-bda1-4212263e46d0') values(now,1,'b50c79bc-b102-48e6-bda1-4212263e46d0')") + + + print("==============step2") + tdSql.query("select * from t6 where dev='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev1='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev2='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 010d416f4a7ad5be907fa8672653f6a53414d99f Mon Sep 17 00:00:00 2001 From: wpan Date: Mon, 16 Aug 2021 16:07:43 +0800 Subject: [PATCH 082/239] fix crash issue --- src/client/src/tscUtil.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index f54827911a..a193921af2 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3831,6 +3831,9 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { pNew->sqlstr = strdup(pSql->sqlstr); pNew->fp = tscSubqueryCompleteCallback; pNew->maxRetry = pSql->maxRetry; + + pNew->cmd.resColumnId = TSDB_RES_COL_ID; + tsem_init(&pNew->rspSem, 0, 0); SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id From d7eb5a22fec0e02884a8d1b93cb2f68a9dc7fa18 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 16:42:27 +0800 Subject: [PATCH 083/239] [TD-6035]add connector test in CI --- Jenkinsfile | 21 ++++++++++++++++++++- tests/gotest/case001/case001.sh | 5 +++-- tests/test-all.sh | 8 ++++---- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 0eb9a1aa95..882d224407 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -160,7 +160,6 @@ pipeline { skipbuild='2' skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true) println skipbuild - } sh''' rm -rf ${WORKSPACE}.tes @@ -225,6 +224,26 @@ pipeline { steps { timeout(time: 55, unit: 'MINUTES'){ pre_test() + sh ''' + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + nohup taosd >/dev/null & + sleep 10 + ''' + sh ''' + cd ${WKC}/tests/examples/nodejs + npm install td2.0-connector > /dev/null 2>&1 + node nodejsChecker.js host=localhost + ''' + sh ''' + cd ${WKC}/tests/examples/C#/taosdemo + mcs -out:taosdemo *.cs > /dev/null 2>&1 + echo '' |./taosdemo + ''' + sh ''' + cd ${WKC}/tests/gotest + bash batchtest.sh + ''' sh ''' cd ${WKC}/tests ./test-all.sh b1fq diff --git a/tests/gotest/case001/case001.sh b/tests/gotest/case001/case001.sh index 831e9f83ac..94e5bb44e0 100644 --- a/tests/gotest/case001/case001.sh +++ b/tests/gotest/case001/case001.sh @@ -15,7 +15,8 @@ script_dir="$(dirname $(readlink -f $0))" ###### step 3: start build cd $script_dir rm -f go.* -go mod init demotest -go build +go mod init demotest > /dev/null 2>&1 +go mod tidy > /dev/null 2>&1 +go build > /dev/null 2>&1 sleep 1s ./demotest -h $1 -p $2 diff --git a/tests/test-all.sh b/tests/test-all.sh index 6e7963e787..c18951b8ac 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -12,7 +12,7 @@ IN_TDINTERNAL="community" function stopTaosd { echo "Stop taosd" - sudo systemctl stop taosd + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` while [ -n "$PID" ] do @@ -24,9 +24,9 @@ function stopTaosd { function dohavecore(){ corefile=`find $corepath -mmin 1` - core_file=`echo $corefile|cut -d " " -f2` - proc=`echo $corefile|cut -d "_" -f3` if [ -n "$corefile" ];then + core_file=`echo $corefile|cut -d " " -f2` + proc=`file $core_file|awk -F "execfn:" '/execfn:/{print $2}'|tr -d \' |awk '{print $1}'|tr -d \,` echo 'taosd or taos has generated core' rm case.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then @@ -46,7 +46,7 @@ function dohavecore(){ fi fi if [[ $1 == 1 ]];then - echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit + echo '\n'|gdb $proc $core_file -ex "bt 10" -ex quit exit 8 fi fi From 01f42064f26d4c4a4c2d470d1b43f1bbf893f5f8 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 16:58:43 +0800 Subject: [PATCH 084/239] make ci happy From 3f62a65b67c7723f6aee41eeb00917a342970670 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 17:06:05 +0800 Subject: [PATCH 085/239] test --- tests/test-all.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test-all.sh b/tests/test-all.sh index c18951b8ac..2578030165 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -12,7 +12,7 @@ IN_TDINTERNAL="community" function stopTaosd { echo "Stop taosd" - sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail ' PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` while [ -n "$PID" ] do From 7c36f3de4a6014e00671758574948ba00f780d95 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 16 Aug 2021 17:15:56 +0800 Subject: [PATCH 086/239] [TD-6046] fix ts,derivative() output error for top/bottom/diff/derivative --- src/client/src/tscSQLParser.c | 5 +++-- src/query/inc/qAggMain.h | 1 + src/query/src/qAggMain.c | 14 ++++++++++++++ src/query/src/qExecutor.c | 10 ++++++++-- 4 files changed, 26 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index bb2b8f3c52..b9172f5c8c 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2872,7 +2872,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX; SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX); - insertResultField(pQueryInfo, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, + insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].name, pExpr); colIndex += 1; // the first column is ts @@ -7052,7 +7052,8 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { continue; } - if(functionId == TSDB_FUNC_DERIVATIVE){ // to avoid ts function id was modufied below + if(functionId == TSDB_FUNC_DERIVATIVE || functionId == TSDB_FUNC_DIFF || + functionId == TSDB_FUNC_TOP ||functionId == TSDB_FUNC_BOTTOM ){ // to avoid ts function id was modufied below tagTsColExists = false; } diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index d4116fbfb2..4b2de758ac 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -186,6 +186,7 @@ typedef struct SQLFunctionCtx { tVariant param[4]; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param int64_t *ptsList; // corresponding timestamp array list void *ptsOutputBuf; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/ + void *ptsOriOutputBuf; SQLPreAggVal preAggVals; tVariant tag; diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index c19628eb37..89e91cb856 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -2789,6 +2789,7 @@ static void deriv_function(SQLFunctionCtx *pCtx) { int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1; TSKEY *pTimestamp = pCtx->ptsOutputBuf; + TSKEY *pTimestampOri = pCtx->ptsOriOutputBuf; TSKEY *tsList = GET_TS_LIST(pCtx); double *pOutput = (double *)pCtx->pOutput; @@ -2808,6 +2809,7 @@ static void deriv_function(SQLFunctionCtx *pCtx) { if (pDerivInfo->ignoreNegative && *pOutput < 0) { } else { *pTimestamp = tsList[i]; + if (pTimestampOri) {*pTimestampOri = tsList[i]; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; notNullElems++; @@ -2835,6 +2837,7 @@ static void deriv_function(SQLFunctionCtx *pCtx) { if (pDerivInfo->ignoreNegative && *pOutput < 0) { } else { *pTimestamp = tsList[i]; + if (pTimestampOri) {*pTimestampOri = tsList[i]; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; notNullElems++; @@ -2861,6 +2864,7 @@ static void deriv_function(SQLFunctionCtx *pCtx) { if (pDerivInfo->ignoreNegative && *pOutput < 0) { } else { *pTimestamp = tsList[i]; + if (pTimestampOri) {*pTimestampOri = tsList[i]; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; notNullElems++; @@ -2888,6 +2892,7 @@ static void deriv_function(SQLFunctionCtx *pCtx) { if (pDerivInfo->ignoreNegative && *pOutput < 0) { } else { *pTimestamp = tsList[i]; + if (pTimestampOri) {*pTimestampOri = tsList[i]; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; notNullElems++; @@ -2914,6 +2919,7 @@ static void deriv_function(SQLFunctionCtx *pCtx) { if (pDerivInfo->ignoreNegative && *pOutput < 0) { } else { *pTimestamp = tsList[i]; + if (pTimestampOri) {*pTimestampOri = tsList[i]; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; notNullElems++; @@ -2940,6 +2946,7 @@ static void deriv_function(SQLFunctionCtx *pCtx) { if (pDerivInfo->ignoreNegative && *pOutput < 0) { } else { *pTimestamp = tsList[i]; + if (pTimestampOri) {*pTimestampOri = tsList[i]; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; @@ -2982,6 +2989,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1; TSKEY* pTimestamp = pCtx->ptsOutputBuf; + TSKEY* pTimestampOri = pCtx->ptsOriOutputBuf; TSKEY* tsList = GET_TS_LIST(pCtx); switch (pCtx->inputType) { @@ -2997,6 +3005,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet *pOutput = (int32_t)(pData[i] - pCtx->param[1].i64); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; + if (pTimestampOri) {*pTimestampOri = *pTimestamp; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; } @@ -3019,6 +3028,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet *pOutput = pData[i] - pCtx->param[1].i64; // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; + if (pTimestampOri) {*pTimestampOri = *pTimestamp; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; } @@ -3041,6 +3051,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet SET_DOUBLE_VAL(pOutput, pData[i] - pCtx->param[1].dKey); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; + if (pTimestampOri) {*pTimestampOri = *pTimestamp; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; } @@ -3063,6 +3074,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet *pOutput = (float)(pData[i] - pCtx->param[1].dKey); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; + if (pTimestampOri) {*pTimestampOri = *pTimestamp; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; } @@ -3085,6 +3097,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet *pOutput = (int16_t)(pData[i] - pCtx->param[1].i64); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; + if (pTimestampOri) {*pTimestampOri = *pTimestamp; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; } @@ -3108,6 +3121,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet *pOutput = (int8_t)(pData[i] - pCtx->param[1].i64); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; + if (pTimestampOri) {*pTimestampOri = *pTimestamp; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index af7408880a..4ce9a1d849 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -3615,14 +3615,20 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf } } + char *tsbuf = NULL; for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { SColumnInfoData *pColInfo = taosArrayGet(pDataBlock->pDataBlock, i); pBInfo->pCtx[i].pOutput = pColInfo->pData + pColInfo->info.bytes * pDataBlock->info.rows; // re-estabilish output buffer pointer. int32_t functionId = pBInfo->pCtx[i].functionId; - if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { - if(i > 0) pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput; + if (functionId == TSDB_FUNC_PRJ && pColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP){ + tsbuf = pBInfo->pCtx[i].pOutput; + } + else if ((i > 0) && + (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE)) { + pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput; + pBInfo->pCtx[i].ptsOriOutputBuf = tsbuf; } } } From fbd648b5f2a4b91527f1ee351b1817dc6a329fe1 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 16 Aug 2021 17:57:08 +0800 Subject: [PATCH 087/239] [TD-6046] fix ts,derivative() output error for top/bottom/diff/derivative --- src/query/src/qExecutor.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 4ce9a1d849..caa6620b73 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -3616,19 +3616,32 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf } char *tsbuf = NULL; + int16_t tsFuncIndex = -1; + for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { + SColumnInfoData* pColInfo = taosArrayGet(pDataBlock->pDataBlock, i); + + // find the ts output data pointer + int32_t functionId = pBInfo->pCtx[i].functionId; + if (functionId == TSDB_FUNC_PRJ && pColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) { + tsbuf = pColInfo->pData + pColInfo->info.bytes * pDataBlock->info.rows; + tsFuncIndex = i; + break; + } + } + for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { SColumnInfoData *pColInfo = taosArrayGet(pDataBlock->pDataBlock, i); pBInfo->pCtx[i].pOutput = pColInfo->pData + pColInfo->info.bytes * pDataBlock->info.rows; // re-estabilish output buffer pointer. int32_t functionId = pBInfo->pCtx[i].functionId; - if (functionId == TSDB_FUNC_PRJ && pColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP){ - tsbuf = pBInfo->pCtx[i].pOutput; - } - else if ((i > 0) && + if ((i > 0) && (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE)) { pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput; pBInfo->pCtx[i].ptsOriOutputBuf = tsbuf; + if(tsFuncIndex != -1) { + pBInfo->pCtx[tsFuncIndex].functionId = TSDB_FUNC_TS_DUMMY; // to avoid query data + } } } } From a6011d55a20fe9542480ccb66635a01d4d8f214c Mon Sep 17 00:00:00 2001 From: tomchon Date: Mon, 16 Aug 2021 18:03:17 +0800 Subject: [PATCH 088/239] change version number --- cmake/version.inc | 2 +- snap/snapcraft.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/version.inc b/cmake/version.inc index d34080aa43..84ef060196 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.20.12") + SET(TD_VER_NUMBER "2.0.20.13") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 7538765d2c..b08ae53ecd 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.20.12' +version: '2.0.20.13' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.20.12 + - usr/lib/libtaos.so.2.0.20.13 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so From 203f5c7078645e990b7f2d015dcf84543be3959f Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 16 Aug 2021 18:14:21 +0800 Subject: [PATCH 089/239] [TD-6046] fix ts,derivative() output error for top/bottom/diff/derivative --- src/query/src/qExecutor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index caa6620b73..dfaf60d1d7 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2024,7 +2024,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { int32_t f = pExpr[0].base.functionId; - assert(f == TSDB_FUNC_TS || f == TSDB_FUNC_TS_DUMMY); + assert(f == TSDB_FUNC_TS || f == TSDB_FUNC_TS_DUMMY || f == TSDB_FUNC_PRJ); pCtx->param[2].i64 = pQueryAttr->order.order; pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; From da702897976e2d7a0ee84fe369a97c5eac1b7cf2 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 16 Aug 2021 18:18:39 +0800 Subject: [PATCH 090/239] [TD-6046] fix ts,derivative() output error for top/bottom/diff/derivative --- src/client/src/tscSQLParser.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index b9172f5c8c..62f1843ee5 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -7052,8 +7052,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { continue; } - if(functionId == TSDB_FUNC_DERIVATIVE || functionId == TSDB_FUNC_DIFF || - functionId == TSDB_FUNC_TOP ||functionId == TSDB_FUNC_BOTTOM ){ // to avoid ts function id was modufied below + if(functionId == TSDB_FUNC_DERIVATIVE || functionId == TSDB_FUNC_DIFF){ // to avoid ts function id was modufied below tagTsColExists = false; } From 296376df1fb92f5bbcd137d759c00689d56215e4 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 16 Aug 2021 18:36:09 +0800 Subject: [PATCH 091/239] [TD-6086]:num of tags taken from output cols instead of groupby expr --- src/client/src/tscLocalMerge.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 97d52cc684..5fe020bb33 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -396,7 +396,16 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde if (pQueryInfo->fillType != TSDB_FILL_NONE) { SFillColInfo* pFillCol = createFillColInfo(pQueryInfo); - pReducer->pFillInfo = taosCreateFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, + // support sql like: select selective_function, tag1... where ... group by tag3... fill(not fill none) + // the group by expr columns and select tags are different + int32_t numOfCols = tscNumOfFields(pQueryInfo); + int32_t numOfTags = 0; + for (int32_t i = 0; i < numOfCols; ++i) { + if (TSDB_COL_IS_TAG(pFillCol[i].flag)) { + numOfTags++; + } + } + pReducer->pFillInfo = taosCreateFillInfo(pQueryInfo->order.order, revisedSTime, numOfTags, 4096, (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit, tinfo.precision, pQueryInfo->fillType, pFillCol, pSql); } From abff364c1e204c9040447f53179023f3033e3178 Mon Sep 17 00:00:00 2001 From: tomchon Date: Mon, 16 Aug 2021 19:57:56 +0800 Subject: [PATCH 092/239] change version number --- cmake/version.inc | 2 +- snap/snapcraft.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/version.inc b/cmake/version.inc index ffceecf492..12dc2b1b16 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.1.6.0") + SET(TD_VER_NUMBER "2.1.7.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index c04fa3298b..260af6f21b 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.1.6.0' +version: '2.1.7.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.1.6.0 + - usr/lib/libtaos.so.2.1.7.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so From bdc6409e7740e6a711688fb25d7530fd43509e08 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 00:47:42 +0800 Subject: [PATCH 093/239] [TD-6046] fix ts derivative error --- src/client/src/tscSQLParser.c | 6 +--- src/query/inc/qAggMain.h | 1 - src/query/inc/qExecutor.h | 1 + src/query/src/qAggMain.c | 14 ---------- src/query/src/qExecutor.c | 52 +++++++++++++++++++++-------------- 5 files changed, 33 insertions(+), 41 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 62f1843ee5..781b1be76f 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2872,7 +2872,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX; SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX); - insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, + insertResultField(pQueryInfo, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].name, pExpr); colIndex += 1; // the first column is ts @@ -7052,10 +7052,6 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { continue; } - if(functionId == TSDB_FUNC_DERIVATIVE || functionId == TSDB_FUNC_DIFF){ // to avoid ts function id was modufied below - tagTsColExists = false; - } - if (functionId < 0) { SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * functionId - 1); if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) { diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index 4b2de758ac..d4116fbfb2 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -186,7 +186,6 @@ typedef struct SQLFunctionCtx { tVariant param[4]; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param int64_t *ptsList; // corresponding timestamp array list void *ptsOutputBuf; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/ - void *ptsOriOutputBuf; SQLPreAggVal preAggVals; tVariant tag; diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 56fab57e26..5b810e217e 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -595,6 +595,7 @@ int32_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx* pCtx, int3 void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResultRowInfo* pResultRowInfo, int32_t* rowCellInfoOffset); void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOfInputRows); void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity); +void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput); void freeParam(SQueryParam *param); int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param); diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 89e91cb856..c19628eb37 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -2789,7 +2789,6 @@ static void deriv_function(SQLFunctionCtx *pCtx) { int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1; TSKEY *pTimestamp = pCtx->ptsOutputBuf; - TSKEY *pTimestampOri = pCtx->ptsOriOutputBuf; TSKEY *tsList = GET_TS_LIST(pCtx); double *pOutput = (double *)pCtx->pOutput; @@ -2809,7 +2808,6 @@ static void deriv_function(SQLFunctionCtx *pCtx) { if (pDerivInfo->ignoreNegative && *pOutput < 0) { } else { *pTimestamp = tsList[i]; - if (pTimestampOri) {*pTimestampOri = tsList[i]; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; notNullElems++; @@ -2837,7 +2835,6 @@ static void deriv_function(SQLFunctionCtx *pCtx) { if (pDerivInfo->ignoreNegative && *pOutput < 0) { } else { *pTimestamp = tsList[i]; - if (pTimestampOri) {*pTimestampOri = tsList[i]; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; notNullElems++; @@ -2864,7 +2861,6 @@ static void deriv_function(SQLFunctionCtx *pCtx) { if (pDerivInfo->ignoreNegative && *pOutput < 0) { } else { *pTimestamp = tsList[i]; - if (pTimestampOri) {*pTimestampOri = tsList[i]; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; notNullElems++; @@ -2892,7 +2888,6 @@ static void deriv_function(SQLFunctionCtx *pCtx) { if (pDerivInfo->ignoreNegative && *pOutput < 0) { } else { *pTimestamp = tsList[i]; - if (pTimestampOri) {*pTimestampOri = tsList[i]; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; notNullElems++; @@ -2919,7 +2914,6 @@ static void deriv_function(SQLFunctionCtx *pCtx) { if (pDerivInfo->ignoreNegative && *pOutput < 0) { } else { *pTimestamp = tsList[i]; - if (pTimestampOri) {*pTimestampOri = tsList[i]; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; notNullElems++; @@ -2946,7 +2940,6 @@ static void deriv_function(SQLFunctionCtx *pCtx) { if (pDerivInfo->ignoreNegative && *pOutput < 0) { } else { *pTimestamp = tsList[i]; - if (pTimestampOri) {*pTimestampOri = tsList[i]; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; @@ -2989,7 +2982,6 @@ static void diff_function(SQLFunctionCtx *pCtx) { int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1; TSKEY* pTimestamp = pCtx->ptsOutputBuf; - TSKEY* pTimestampOri = pCtx->ptsOriOutputBuf; TSKEY* tsList = GET_TS_LIST(pCtx); switch (pCtx->inputType) { @@ -3005,7 +2997,6 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet *pOutput = (int32_t)(pData[i] - pCtx->param[1].i64); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; - if (pTimestampOri) {*pTimestampOri = *pTimestamp; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; } @@ -3028,7 +3019,6 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet *pOutput = pData[i] - pCtx->param[1].i64; // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; - if (pTimestampOri) {*pTimestampOri = *pTimestamp; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; } @@ -3051,7 +3041,6 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet SET_DOUBLE_VAL(pOutput, pData[i] - pCtx->param[1].dKey); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; - if (pTimestampOri) {*pTimestampOri = *pTimestamp; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; } @@ -3074,7 +3063,6 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet *pOutput = (float)(pData[i] - pCtx->param[1].dKey); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; - if (pTimestampOri) {*pTimestampOri = *pTimestamp; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; } @@ -3097,7 +3085,6 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet *pOutput = (int16_t)(pData[i] - pCtx->param[1].i64); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; - if (pTimestampOri) {*pTimestampOri = *pTimestamp; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; } @@ -3121,7 +3108,6 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet *pOutput = (int8_t)(pData[i] - pCtx->param[1].i64); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; - if (pTimestampOri) {*pTimestampOri = *pTimestamp; pTimestampOri += 1;} pOutput += 1; pTimestamp += 1; } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index dfaf60d1d7..3e1fac8fd3 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2024,7 +2024,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { int32_t f = pExpr[0].base.functionId; - assert(f == TSDB_FUNC_TS || f == TSDB_FUNC_TS_DUMMY || f == TSDB_FUNC_PRJ); + assert(f == TSDB_FUNC_TS || f == TSDB_FUNC_TS_DUMMY); pCtx->param[2].i64 = pQueryAttr->order.order; pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; @@ -3615,19 +3615,6 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf } } - char *tsbuf = NULL; - int16_t tsFuncIndex = -1; - for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { - SColumnInfoData* pColInfo = taosArrayGet(pDataBlock->pDataBlock, i); - - // find the ts output data pointer - int32_t functionId = pBInfo->pCtx[i].functionId; - if (functionId == TSDB_FUNC_PRJ && pColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - tsbuf = pColInfo->pData + pColInfo->info.bytes * pDataBlock->info.rows; - tsFuncIndex = i; - break; - } - } for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { SColumnInfoData *pColInfo = taosArrayGet(pDataBlock->pDataBlock, i); @@ -3635,13 +3622,8 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf // re-estabilish output buffer pointer. int32_t functionId = pBInfo->pCtx[i].functionId; - if ((i > 0) && - (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE)) { - pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput; - pBInfo->pCtx[i].ptsOriOutputBuf = tsbuf; - if(tsFuncIndex != -1) { - pBInfo->pCtx[tsFuncIndex].functionId = TSDB_FUNC_TS_DUMMY; // to avoid query data - } + if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE){ + if (i > 0) pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput; } } } @@ -3659,7 +3641,34 @@ void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity) { } } +void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput) { + bool needCopyTs = false; + int32_t tsNum = 0; + for (int32_t i = 0; i < numOfOutput; i++) { + int32_t functionId = pCtx[i].functionId; + if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { + needCopyTs = true; + }else if(functionId == TSDB_FUNC_TS_COMP) { + tsNum++; + } + } + char *src = NULL; + for (int32_t col = 0; col < numOfOutput; ++col) { + SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col); + if (strlen(pColRes->pData) != 0) { + src = pColRes->pData; // find ts data + } + } + if (!needCopyTs) return; + if (tsNum < 2) return; + if (src == NULL) return; + + for (int32_t col = 0; col < numOfOutput; ++col) { + SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col); + memcpy(pColRes->pData, src, pColRes->info.bytes * pRes->info.rows); + } +} void initCtxOutputBuffer(SQLFunctionCtx* pCtx, int32_t size) { for (int32_t j = 0; j < size; ++j) { @@ -5635,6 +5644,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) { if (pRes->info.rows >= 1000/*pRuntimeEnv->resultInfo.threshold*/) { break; } + copyTsColoum(pRes, pInfo->pCtx, pOperator->numOfOutput); } clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput); From 43a6843dba067997a2db892f60e4abbd1f9a36e8 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 00:57:24 +0800 Subject: [PATCH 094/239] [TD-6046] fix ts derivative error --- src/query/src/qExecutor.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 3e1fac8fd3..6fd9f70fe6 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -3644,22 +3644,20 @@ void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity) { void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput) { bool needCopyTs = false; int32_t tsNum = 0; + char *src = NULL; for (int32_t i = 0; i < numOfOutput; i++) { int32_t functionId = pCtx[i].functionId; if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { needCopyTs = true; - }else if(functionId == TSDB_FUNC_TS_COMP) { + }else if(functionId == TSDB_FUNC_TS_DUMMY) { + SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i); + if (strlen(pColRes->pData) != 0) { + src = pColRes->pData; // find ts data + } tsNum++; } } - - char *src = NULL; - for (int32_t col = 0; col < numOfOutput; ++col) { - SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col); - if (strlen(pColRes->pData) != 0) { - src = pColRes->pData; // find ts data - } - } + if (!needCopyTs) return; if (tsNum < 2) return; if (src == NULL) return; From dff67958b1ba5c7fa9d3aed1185a7ecbc2ed4131 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 01:02:53 +0800 Subject: [PATCH 095/239] [TD-6046] fix ts derivative error --- src/query/src/qExecutor.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 6fd9f70fe6..7ccaeb7ac9 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -3657,14 +3657,17 @@ void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput) tsNum++; } } - + if (!needCopyTs) return; if (tsNum < 2) return; if (src == NULL) return; - for (int32_t col = 0; col < numOfOutput; ++col) { - SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col); - memcpy(pColRes->pData, src, pColRes->info.bytes * pRes->info.rows); + for (int32_t i = 0; i < numOfOutput; i++) { + int32_t functionId = pCtx[i].functionId; + if(functionId == TSDB_FUNC_TS_DUMMY) { + SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i); + memcpy(pColRes->pData, src, pColRes->info.bytes * pRes->info.rows); + } } } From 2bd11ebfd66bf9525124b4c316674b365b2a1c11 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 17 Aug 2021 09:12:16 +0800 Subject: [PATCH 096/239] [TD-6086]:add test case --- tests/script/general/parser/function.sim | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index 65058333fb..c00f6fc898 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -310,6 +310,12 @@ if $rows != 6 then return -1 endi +print =============================> TD-6086 +sql create stable td6086st(ts timestamp, d double) tags(t nchar(50)); +sql create table td6086ct1 using td6086st tags("ct1"); +sql create table td6086ct2 using td6086st tags("ct2"); +sql SELECT LAST(d),t FROM td6086st WHERE tbname in ('td6086ct1', 'td6086ct2') and ts>="2019-07-30 00:00:00" and ts<="2021-08-31 00:00:00" interval(1800s) fill(prev) GROUP BY tbname; + print ==================> td-2624 sql create table tm2(ts timestamp, k int, b binary(12)); sql insert into tm2 values('2011-01-02 18:42:45.326', -1,'abc'); From 497159f3aa53b605585378fc4d5804a573cf1a4f Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 09:40:56 +0800 Subject: [PATCH 097/239] fix subquery reparse sql issue --- src/client/src/tscSubquery.c | 13 +++++++++---- src/client/src/tscUtil.c | 18 ++++++++++++------ 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 553d2c0804..04b10caa4e 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2735,18 +2735,23 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); - code = tsParseSql(pParentSql, true); + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + + code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; } if (code != TSDB_CODE_SUCCESS) { - pParentSql->res.code = code; - tscAsyncResultOnError(pParentSql); + userSql->res.code = code; + tscAsyncResultOnError(userSql); return; } - executeQuery(pParentSql, pQueryInfo); + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + + pQueryInfo = tscGetQueryInfo(&userSql->cmd); + executeQuery(userSql, pQueryInfo); } else { (*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code); } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 8c2165cadf..ae78c7a81e 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3771,19 +3771,24 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); - code = tsParseSql(pParentSql, true); + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + + code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; } if (code != TSDB_CODE_SUCCESS) { - pParentSql->res.code = code; - tscAsyncResultOnError(pParentSql); + userSql->res.code = code; + tscAsyncResultOnError(userSql); return; } - SQueryInfo *pQueryInfo = tscGetQueryInfo(pParentCmd); - executeQuery(pParentSql, pQueryInfo); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&userSql->cmd); + + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + + executeQuery(userSql, pQueryInfo); return; } @@ -3805,8 +3810,9 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { } if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly + assert(pSql->subState.numOfSub == 0); pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream); - + assert(pSql->pSubs == NULL); pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t)); code = pthread_mutex_init(&pSql->subState.mutex, NULL); From 762284c409276108e781c3599b3f55fa973eb00b Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 09:52:39 +0800 Subject: [PATCH 098/239] fix bug --- src/client/inc/tscSubquery.h | 3 +++ src/client/src/tscSubquery.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index f0349c2b3d..f21ce67e9e 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -50,6 +50,9 @@ void tscUnlockByThread(int64_t *lockedBy); int tsInsertInitialCheck(SSqlObj *pSql); +void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs); + + #ifdef __cplusplus } #endif diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 04b10caa4e..52bf5d9bfe 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2034,7 +2034,7 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) { tscAsyncResultOnError(pSql); } -static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { +void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { assert(numOfSubs <= pSql->subState.numOfSub && numOfSubs >= 0); for(int32_t i = 0; i < numOfSubs; ++i) { From 0a969d042c7dfaed35f130d2a77168b0a4870238 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 10:06:01 +0800 Subject: [PATCH 099/239] fix bug --- src/client/src/tscSubquery.c | 1 + src/client/src/tscUtil.c | 1 + 2 files changed, 2 insertions(+) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 52bf5d9bfe..32955bef3c 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2749,6 +2749,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO } doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; pQueryInfo = tscGetQueryInfo(&userSql->cmd); executeQuery(userSql, pQueryInfo); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ae78c7a81e..098204605a 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4934,3 +4934,4 @@ void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) { taosHashRemove(tscTableMetaMap, fname, len); tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap)); } + \ No newline at end of file From af641899eb93157f586ab5e3bb47c602d8027822 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 10:07:24 +0800 Subject: [PATCH 100/239] fix bug --- src/client/src/tscUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 098204605a..75473c7baf 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3787,6 +3787,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SQueryInfo *pQueryInfo = tscGetQueryInfo(&userSql->cmd); doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; executeQuery(userSql, pQueryInfo); return; @@ -4934,4 +4935,3 @@ void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) { taosHashRemove(tscTableMetaMap, fname, len); tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap)); } - \ No newline at end of file From 44be3ee34df9b406f00cf1da458ce22649d14928 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 10:11:11 +0800 Subject: [PATCH 101/239] fix bug --- src/client/src/tscSubquery.c | 6 +++--- src/client/src/tscUtil.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 32955bef3c..07ea4bf77f 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2737,6 +2737,9 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; + code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; @@ -2748,9 +2751,6 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO return; } - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; - pQueryInfo = tscGetQueryInfo(&userSql->cmd); executeQuery(userSql, pQueryInfo); } else { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 75473c7baf..9623b247a5 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3773,6 +3773,9 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; + code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; @@ -3786,9 +3789,6 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SQueryInfo *pQueryInfo = tscGetQueryInfo(&userSql->cmd); - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; - executeQuery(userSql, pQueryInfo); return; } From 6adcba9dd78f3d25cbde2f9a435d62e0d8600d07 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 10:23:56 +0800 Subject: [PATCH 102/239] fix bug --- src/client/src/tscSubquery.c | 10 +++++----- src/client/src/tscUtil.c | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 07ea4bf77f..f709be3767 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2729,17 +2729,17 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; + pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; - code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 9623b247a5..3583c8140c 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3765,16 +3765,16 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; + pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); - - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { From c180e69290124fac84468b5dae096804f33b98df Mon Sep 17 00:00:00 2001 From: lichuang Date: Tue, 17 Aug 2021 10:31:11 +0800 Subject: [PATCH 103/239] [TD-4352]add tsdbMetaCompactRatio config item --- packaging/cfg/taos.cfg | 2 ++ src/common/src/tglobal.c | 11 +++++++++++ src/inc/taosdef.h | 1 + src/tsdb/src/tsdbCommit.c | 11 +++++++---- 4 files changed, 21 insertions(+), 4 deletions(-) diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 3ae4e9941e..fdbb5829f8 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -284,3 +284,5 @@ keepColumnName 1 # 0 no query allowed, queries are disabled # queryBufferSize -1 +# percent of redundant data in tsdb meta will compact meta data,0 means donot compact +# tsdbMetaCompactRatio 30 diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 44b3e87e7d..6c51ba22a1 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -150,6 +150,7 @@ int32_t tsMaxVgroupsPerDb = 0; int32_t tsMinTablePerVnode = TSDB_TABLES_STEP; int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES; int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP; +int32_t tsTsdbMetaCompactRatio = TSDB_META_COMPACT_RATIO; // balance int8_t tsEnableBalance = 1; @@ -1576,6 +1577,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "tsdbMetaCompactRatio"; + cfg.ptr = &tsTsdbMetaCompactRatio; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; + cfg.minValue = 0; + cfg.maxValue = 100; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM); #ifdef TD_TSZ // lossy compress diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index fceeaea0ae..66811326ee 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -275,6 +275,7 @@ do { \ #define TSDB_MAX_TABLES 10000000 #define TSDB_DEFAULT_TABLES 1000000 #define TSDB_TABLES_STEP 1000 +#define TSDB_META_COMPACT_RATIO 30 #define TSDB_MIN_DAYS_PER_FILE 1 #define TSDB_MAX_DAYS_PER_FILE 3650 diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 9e441c1770..6bd47247a2 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -14,6 +14,8 @@ */ #include "tsdbint.h" +extern int32_t tsTsdbMetaCompactRatio; + #define TSDB_MAX_SUBBLOCKS 8 static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) { if (key < 0) { @@ -339,7 +341,7 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) { tsdbCloseMFile(&mf); tsdbUpdateMFile(pfs, &mf); - if (tsdbCompactMetaFile(pRepo, pfs, &mf) < 0) { + if (tsTsdbMetaCompactRatio > 0 && tsdbCompactMetaFile(pRepo, pfs, &mf) < 0) { tsdbError("compact meta file error"); } @@ -455,8 +457,9 @@ static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid) { static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { float delPercent = (float)(pMFile->info.nDels) / (float)(pMFile->info.nRecords); float tombPercent = (float)(pMFile->info.tombSize) / (float)(pMFile->info.size); + float compactRatio = (float)(tsTsdbMetaCompactRatio)/100; - if (delPercent < 0.33 && tombPercent < 0.33) { + if (delPercent < compactRatio && tombPercent < compactRatio) { return 0; } @@ -465,8 +468,8 @@ static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { return -1; } - tsdbInfo("begin compact tsdb meta file, nDels:%" PRId64 ",nRecords:%" PRId64 ",tombSize:%" PRId64 ",size:%" PRId64, - pMFile->info.nDels,pMFile->info.nRecords,pMFile->info.tombSize,pMFile->info.size); + tsdbInfo("begin compact tsdb meta file, ratio:%d, nDels:%" PRId64 ",nRecords:%" PRId64 ",tombSize:%" PRId64 ",size:%" PRId64, + tsTsdbMetaCompactRatio, pMFile->info.nDels,pMFile->info.nRecords,pMFile->info.tombSize,pMFile->info.size); SMFile mf; SDiskID did; From fa6f3c2fdb470452830d35c5008d87f27aa46d60 Mon Sep 17 00:00:00 2001 From: tomchon Date: Tue, 17 Aug 2021 11:13:03 +0800 Subject: [PATCH 104/239] change version number --- cmake/version.inc | 2 +- snap/snapcraft.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/version.inc b/cmake/version.inc index 12dc2b1b16..148c33106a 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.1.7.0") + SET(TD_VER_NUMBER "2.1.7.1") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 260af6f21b..859d40cf69 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.1.7.0' +version: '2.1.7.1' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.1.7.0 + - usr/lib/libtaos.so.2.1.7.1 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so From 6dac76a70d5de3c54832389a5ed57206f73cc9de Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 16:53:42 +0800 Subject: [PATCH 105/239] stop taosd using kill -9 --- tests/pytest/util/dnodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index ae4ba97eb3..698c3caa55 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -556,7 +556,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + killCmd = "kill -9 %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( From 9f0bbd0cb3a3148144a831306a2025b1a0e90b53 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 18:03:24 +0800 Subject: [PATCH 106/239] test --- tests/pytest/util/dnodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 698c3caa55..0f4919ba96 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -436,7 +436,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + killCmd = "kill -9 %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( @@ -445,7 +445,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + killCmd = "kill -9 %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( From 81cf139d0eef263e3a312ec7d0079ccade23f2b1 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 17 Aug 2021 11:32:03 +0800 Subject: [PATCH 107/239] [TD-6101]reduce python case execution time --- tests/pytest/fulltest.sh | 3 ++- tests/pytest/test.py | 3 +-- tests/script/jenkins/basic.txt | 14 ++++++++------ tests/test-all.sh | 3 +++ 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 600582098e..e444a0a318 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -152,6 +152,7 @@ python3 ./test.py -f import_merge/importCSV.py python3 ./test.py -f import_merge/import_update_0.py python3 ./test.py -f import_merge/import_update_1.py python3 ./test.py -f import_merge/import_update_2.py +python3 ./test.py -f update/merge_commit_data.py #======================p1-end=============== #======================p2-start=============== # tools @@ -180,7 +181,7 @@ python3 ./test.py -f update/allow_update-0.py python3 ./test.py -f update/append_commit_data.py python3 ./test.py -f update/append_commit_last-0.py python3 ./test.py -f update/append_commit_last.py -python3 ./test.py -f update/merge_commit_data.py + python3 ./test.py -f update/merge_commit_data2.py python3 ./test.py -f update/merge_commit_data2_update0.py diff --git a/tests/pytest/test.py b/tests/pytest/test.py index 65abd3ef93..97dca6be18 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -87,7 +87,7 @@ if __name__ == "__main__": else: toBeKilled = "valgrind.bin" - killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP > /dev/null 2>&1" % toBeKilled + killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled processID = subprocess.check_output(psCmd, shell=True) @@ -110,7 +110,6 @@ if __name__ == "__main__": time.sleep(2) tdLog.info('stop All dnodes') - sys.exit(0) tdDnodes.init(deployPath) tdDnodes.setTestCluster(testCluster) diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 6ad6a74eed..b4aad278d8 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -90,6 +90,14 @@ cd ../../../debug; make ./test.sh -f general/parser/function.sim ./test.sh -f unique/cluster/vgroup100.sim +./test.sh -f unique/http/admin.sim +./test.sh -f unique/http/opentsdb.sim + +./test.sh -f unique/import/replica2.sim +./test.sh -f unique/import/replica3.sim + +./test.sh -f general/alter/cached_schema_after_alter.sim + #======================b1-end=============== #======================b2-start=============== @@ -198,13 +206,7 @@ cd ../../../debug; make #======================b3-end=============== #======================b4-start=============== -./test.sh -f unique/http/admin.sim -./test.sh -f unique/http/opentsdb.sim -./test.sh -f unique/import/replica2.sim -./test.sh -f unique/import/replica3.sim - -./test.sh -f general/alter/cached_schema_after_alter.sim ./test.sh -f general/alter/count.sim ./test.sh -f general/alter/dnode.sim ./test.sh -f general/alter/import.sim diff --git a/tests/test-all.sh b/tests/test-all.sh index 2578030165..b54857cf88 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -179,6 +179,9 @@ function runPyCaseOneByOnefq() { start_time=`date +%s` date +%F\ %T | tee -a pytest-out.log echo -n $case + if [[ $1 =~ full ]] ; then + line=$line" -s" + fi $line > case.log 2>&1 && \ echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \ echo -e "${RED} failed${NC}" | tee -a pytest-out.log From 2a45714d95d48c5001281427bdb8215f1dc2bacb Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 11:32:06 +0800 Subject: [PATCH 108/239] [TD-6046] fix ts derivative error --- src/query/src/qExecutor.c | 7 +++---- tests/pytest/functions/function_derivative.py | 13 +++++++++++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 7ccaeb7ac9..0a092f1c2c 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -5590,6 +5590,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) { pRes->info.rows = getNumOfResult(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); if (pRes->info.rows >= pRuntimeEnv->resultInfo.threshold) { + copyTsColoum(pRes, pInfo->pCtx, pOperator->numOfOutput); clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput); return pRes; } @@ -5615,8 +5616,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) { if (*newgroup) { if (pRes->info.rows > 0) { pProjectInfo->existDataBlock = pBlock; - clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput); - return pInfo->pRes; + break; } else { // init output buffer for a new group data for (int32_t j = 0; j < pOperator->numOfOutput; ++j) { aAggs[pInfo->pCtx[j].functionId].xFinalize(&pInfo->pCtx[j]); @@ -5645,9 +5645,8 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) { if (pRes->info.rows >= 1000/*pRuntimeEnv->resultInfo.threshold*/) { break; } - copyTsColoum(pRes, pInfo->pCtx, pOperator->numOfOutput); } - + copyTsColoum(pRes, pInfo->pCtx, pOperator->numOfOutput); clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput); return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; } diff --git a/tests/pytest/functions/function_derivative.py b/tests/pytest/functions/function_derivative.py index 9d60129672..f701379f5e 100644 --- a/tests/pytest/functions/function_derivative.py +++ b/tests/pytest/functions/function_derivative.py @@ -54,6 +54,19 @@ class TDTestCase: tdSql.query("select derivative(col, 10s, 0) from stb group by tbname") tdSql.checkRows(10) + tdSql.query("select ts,derivative(col, 10s, 1),ts from stb group by tbname") + tdSql.checkRows(4) + tdSql.checkData(0, 0, self.ts + 10000) + tdSql.checkData(0, 1, self.ts + 10000) + tdSql.checkData(0, 3, self.ts + 10000) + tdSql.checkData(3, 0, self.ts + 70000) + tdSql.checkData(3, 1, self.ts + 70000) + tdSql.checkData(3, 3, self.ts + 70000) + + tdSql.query("select ts from(select ts,derivative(col, 10s, 0) from stb group by tbname") + + tdSql.checkData(0, 1, 1) + tdSql.error("select derivative(col, 10s, 0) from tb1 group by tbname") tdSql.query("select derivative(col, 10s, 1) from tb1") From b65bc4dc18ac8e346c61948a6a5d2d62600f36bd Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 11:35:55 +0800 Subject: [PATCH 109/239] fix bug --- src/client/src/tscSubquery.c | 3 +-- src/client/src/tscUtil.c | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index f709be3767..dfdbe9fd0b 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2731,8 +2731,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; + tscFreeSubobj(userSql); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3583c8140c..bc55357ddf 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3767,8 +3767,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; + tscFreeSubobj(userSql); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; @@ -3815,6 +3814,7 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream); assert(pSql->pSubs == NULL); pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); + assert(pSql->subState.states == NULL); pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t)); code = pthread_mutex_init(&pSql->subState.mutex, NULL); From 53aa829a8629c0098261d2c54ffdec1ad3867af5 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 11:40:14 +0800 Subject: [PATCH 110/239] fix bug --- src/client/src/tscSubquery.c | 3 ++- src/client/src/tscUtil.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index dfdbe9fd0b..4576af6e81 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2731,7 +2731,8 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - tscFreeSubobj(userSql); + tscFreeSubobj(userSql); + tfree(pSql->pSubs); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index bc55357ddf..cb797b5ceb 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3767,7 +3767,8 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - tscFreeSubobj(userSql); + tscFreeSubobj(userSql); + tfree(pSql->pSubs); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; From 83ec49107996a241ff3bd234e0a303bbb603eac3 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 11:48:54 +0800 Subject: [PATCH 111/239] fix bug --- src/client/src/tscSubquery.c | 2 +- src/client/src/tscUtil.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 4576af6e81..a27a8a41a1 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2732,7 +2732,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeSubobj(userSql); - tfree(pSql->pSubs); + tfree(userSql->pSubs); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index cb797b5ceb..ba533bb03b 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3768,7 +3768,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeSubobj(userSql); - tfree(pSql->pSubs); + tfree(userSql->pSubs); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; From b37281d5d754dd10e4cc10c1b1cf7c409847945e Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 12:08:58 +0800 Subject: [PATCH 112/239] [TD-6046] fix ts derivative error --- src/query/src/qExecutor.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 0a092f1c2c..7b320d9f58 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -3645,15 +3645,16 @@ void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput) bool needCopyTs = false; int32_t tsNum = 0; char *src = NULL; + int32_t preFunctionId = TSDB_FUNC_TS_DUMMY; for (int32_t i = 0; i < numOfOutput; i++) { int32_t functionId = pCtx[i].functionId; if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { needCopyTs = true; - }else if(functionId == TSDB_FUNC_TS_DUMMY) { - SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i); - if (strlen(pColRes->pData) != 0) { - src = pColRes->pData; // find ts data + if (i > 0 && pCtx[i-1].functionId == TSDB_FUNC_TS_DUMMY){ + SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i - 1); // find ts data + src = pColRes->pData; } + }else if(functionId == TSDB_FUNC_TS_DUMMY) { tsNum++; } } From 989299e59f8c3584cc78f69d23197eaede98ae5e Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 12:11:31 +0800 Subject: [PATCH 113/239] [TD-6046] fix ts derivative error --- src/query/src/qExecutor.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 7b320d9f58..08146b6200 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -3645,7 +3645,6 @@ void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput) bool needCopyTs = false; int32_t tsNum = 0; char *src = NULL; - int32_t preFunctionId = TSDB_FUNC_TS_DUMMY; for (int32_t i = 0; i < numOfOutput; i++) { int32_t functionId = pCtx[i].functionId; if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { From ee27f5758f08ff68a2d00f86aff936509a5c2cf0 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 12:24:34 +0800 Subject: [PATCH 114/239] [TD-6046] fix ts derivative error --- tests/pytest/functions/function_derivative.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/pytest/functions/function_derivative.py b/tests/pytest/functions/function_derivative.py index f701379f5e..0832977c3d 100644 --- a/tests/pytest/functions/function_derivative.py +++ b/tests/pytest/functions/function_derivative.py @@ -56,12 +56,12 @@ class TDTestCase: tdSql.query("select ts,derivative(col, 10s, 1),ts from stb group by tbname") tdSql.checkRows(4) - tdSql.checkData(0, 0, self.ts + 10000) - tdSql.checkData(0, 1, self.ts + 10000) - tdSql.checkData(0, 3, self.ts + 10000) - tdSql.checkData(3, 0, self.ts + 70000) - tdSql.checkData(3, 1, self.ts + 70000) - tdSql.checkData(3, 3, self.ts + 70000) + tdSql.checkData(0, 0, "2018-09-17 09:00:10.000") + tdSql.checkData(0, 1, "2018-09-17 09:00:10.000") + tdSql.checkData(0, 3, "2018-09-17 09:00:10.000") + tdSql.checkData(3, 0, "2018-09-17 09:01:20.000") + tdSql.checkData(3, 1, "2018-09-17 09:01:20.000") + tdSql.checkData(3, 3, "2018-09-17 09:01:20.000") tdSql.query("select ts from(select ts,derivative(col, 10s, 0) from stb group by tbname") From ea981c627de14f9b2fb76d1505327d3336b289ca Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 12:28:52 +0800 Subject: [PATCH 115/239] [TD-6046] fix ts derivative error --- tests/pytest/functions/function_derivative.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pytest/functions/function_derivative.py b/tests/pytest/functions/function_derivative.py index 0832977c3d..7d34b3ce60 100644 --- a/tests/pytest/functions/function_derivative.py +++ b/tests/pytest/functions/function_derivative.py @@ -63,9 +63,9 @@ class TDTestCase: tdSql.checkData(3, 1, "2018-09-17 09:01:20.000") tdSql.checkData(3, 3, "2018-09-17 09:01:20.000") - tdSql.query("select ts from(select ts,derivative(col, 10s, 0) from stb group by tbname") + tdSql.query("select ts from(select ts,derivative(col, 10s, 0) from stb group by tbname)") - tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 0, "2018-09-17 09:00:10.000") tdSql.error("select derivative(col, 10s, 0) from tb1 group by tbname") From f85b81e0560b4ba9e4c82b2c8ff30be3a89ee7d3 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 12:47:48 +0800 Subject: [PATCH 116/239] fix bug --- src/client/src/tscSubquery.c | 1 + src/client/src/tscUtil.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index a27a8a41a1..b4fb832b14 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2740,6 +2740,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); + tscResetSqlCmd(&userSql->cmd, false); code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ba533bb03b..d5323625da 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3775,6 +3775,9 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); + + + tscResetSqlCmd(&userSql->cmd, false); code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { From 176af30aecb94b3698e65211ae542fbab304d619 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 12:59:30 +0800 Subject: [PATCH 117/239] fix mem leak --- src/client/inc/tscSubquery.h | 2 ++ src/client/src/tscSubquery.c | 15 +++++++++++++++ src/client/src/tscUtil.c | 2 ++ 3 files changed, 19 insertions(+) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index f21ce67e9e..9e16f3f900 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -52,6 +52,8 @@ int tsInsertInitialCheck(SSqlObj *pSql); void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs); +void tscFreeRetrieveSupporters(SSqlObj *pSql); + #ifdef __cplusplus } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index b4fb832b14..9761c5b4ca 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2050,6 +2050,19 @@ void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { } } +void tscFreeRetrieveSupporters(SSqlObj *pSql) { + for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + assert(pSub != NULL); + + SRetrieveSupport* pSupport = pSub->param; + + tfree(pSupport->localBuffer); + tfree(pSub->param); + } +} + + void tscLockByThread(int64_t *lockedBy) { int64_t tid = taosGetSelfPthreadId(); int i = 0; @@ -2731,6 +2744,8 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + tscFreeRetrieveSupporters(pParentSql); + tscFreeSubobj(userSql); tfree(userSql->pSubs); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index d5323625da..67b1a1c199 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3767,6 +3767,8 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + tscFreeRetrieveSupporters(pParentSql); + tscFreeSubobj(userSql); tfree(userSql->pSubs); From 3885f10f8e2c6a1a62610be172dd32dfa2f5f7e1 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 16 Aug 2021 09:41:50 +0800 Subject: [PATCH 118/239] []:run all ci tests first --- src/client/inc/tscUtil.h | 1 + src/client/src/tscSQLParser.c | 12 ++++++++---- src/client/src/tscUtil.c | 21 +++++++++++++++++++++ 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 0b2d4fd115..e11748efbe 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -143,6 +143,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo); bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo); bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo); bool tscGroupbyColumn(SQueryInfo* pQueryInfo); +int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo); bool tscIsTopBotQuery(SQueryInfo* pQueryInfo); bool hasTagValOutput(SQueryInfo* pQueryInfo); bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4e5245742c..968d62d939 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5613,11 +5613,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pQueryInfo->groupbyExpr.orderType = p1->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; } else if (isTopBottomQuery(pQueryInfo)) { + int32_t topBotIndex = tscGetTopBotQueryExprIndex(pQueryInfo); + assert(topBotIndex >= 1); /* order of top/bottom query in interval is not valid */ - SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); + SExprInfo* pExpr = tscExprGet(pQueryInfo, topBotIndex-1); assert(pExpr->base.functionId == TSDB_FUNC_TS); - pExpr = tscExprGet(pQueryInfo, 1); + pExpr = tscExprGet(pQueryInfo, topBotIndex); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(pMsgBuf, msg2); } @@ -5706,11 +5708,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq SColIndex* pColIndex = taosArrayGet(columnInfo, 0); validOrder = (pColIndex->colIndex == index.columnIndex); } else { + int32_t topBotIndex = tscGetTopBotQueryExprIndex(pQueryInfo); + assert(topBotIndex >= 1); /* order of top/bottom query in interval is not valid */ - SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); + SExprInfo* pExpr = tscExprGet(pQueryInfo, topBotIndex-1); assert(pExpr->base.functionId == TSDB_FUNC_TS); - pExpr = tscExprGet(pQueryInfo, 1); + pExpr = tscExprGet(pQueryInfo, topBotIndex); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(pMsgBuf, msg2); } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 612d6a5642..6e0bf6f7d2 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -369,6 +369,27 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) { return false; } +int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) { + size_t numOfExprs = tscNumOfExprs(pQueryInfo); + + for (int32_t i = 0; i < numOfExprs; ++i) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, i); + if (pExpr == NULL) { + continue; + } + + if (pExpr->base.functionId == TSDB_FUNC_TS) { + continue; + } + + if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) { + return i; + } + } + + return -1; +} + bool tscIsTopBotQuery(SQueryInfo* pQueryInfo) { size_t numOfExprs = tscNumOfExprs(pQueryInfo); From e9ffa8b13a24f6136e7f1b4c104da471ab675db4 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 17 Aug 2021 11:48:42 +0800 Subject: [PATCH 119/239] [TD-6017]:add test case that top/bottom expr is not first expression --- tests/script/general/parser/limit.sim | 5 +++++ tests/script/general/parser/limit_tb.sim | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/tests/script/general/parser/limit.sim b/tests/script/general/parser/limit.sim index 23b85095c5..3af2cb3018 100644 --- a/tests/script/general/parser/limit.sim +++ b/tests/script/general/parser/limit.sim @@ -75,4 +75,9 @@ sleep 100 run general/parser/limit_tb.sim run general/parser/limit_stb.sim +print ========> TD-6017 +sql use $db +sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 order by ts desc limit 3 offset 1) +sql select * from (select ts, top(c1, 5) from $stb where ts >= $ts0 order by ts desc limit 3 offset 1) + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/limit_tb.sim b/tests/script/general/parser/limit_tb.sim index 0c987d88c9..4a93797d40 100644 --- a/tests/script/general/parser/limit_tb.sim +++ b/tests/script/general/parser/limit_tb.sim @@ -355,6 +355,10 @@ sql select top(c1, 1) from $tb where ts >= $ts0 and ts <= $tsu limit 5 offset 1 if $rows != 0 then return -1 endi + +print ========> TD-6017 +sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1) + sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1 print select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1 print $data00 $data01 From b9d5df6761cfedcf8a6dec148f5470840cb98c2d Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 13:54:15 +0800 Subject: [PATCH 120/239] fix crash issue --- src/client/src/tscSubquery.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 9761c5b4ca..5464d4168a 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2054,11 +2054,8 @@ void tscFreeRetrieveSupporters(SSqlObj *pSql) { for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { SSqlObj* pSub = pSql->pSubs[i]; assert(pSub != NULL); - - SRetrieveSupport* pSupport = pSub->param; - - tfree(pSupport->localBuffer); - tfree(pSub->param); + + tscFreeRetrieveSup(pSub); } } From e43b3c330ad47f447295e6ce789b3c84c3a4d214 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 13:56:11 +0800 Subject: [PATCH 121/239] fix issue --- src/client/src/tscSubquery.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 5464d4168a..04267421cf 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2050,16 +2050,6 @@ void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { } } -void tscFreeRetrieveSupporters(SSqlObj *pSql) { - for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - SSqlObj* pSub = pSql->pSubs[i]; - assert(pSub != NULL); - - tscFreeRetrieveSup(pSub); - } -} - - void tscLockByThread(int64_t *lockedBy) { int64_t tid = taosGetSelfPthreadId(); int i = 0; @@ -2586,6 +2576,18 @@ static void tscFreeRetrieveSup(SSqlObj *pSql) { tfree(trsupport); } +void tscFreeRetrieveSupporters(SSqlObj *pSql) { + for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + assert(pSub != NULL); + + tscFreeRetrieveSup(pSub); + } +} + + + + static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfRows); static void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows); From 829055102fa12bce12cb33eb8ad2d97676fc9449 Mon Sep 17 00:00:00 2001 From: cpwu Date: Tue, 17 Aug 2021 13:57:47 +0800 Subject: [PATCH 122/239] [TD-5798] add case for td5798 --- tests/pytest/functions/queryTestCases.py | 214 +++++++++++++++++++---- 1 file changed, 183 insertions(+), 31 deletions(-) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index 8cd3ef6b3a..f320db43af 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -58,7 +58,7 @@ class TDTestCase: def td3690(self): tdLog.printNoPrefix("==========TD-3690==========") tdSql.query("show variables") - tdSql.checkData(51, 1, 864000) + tdSql.checkData(53, 1, 864000) def td4082(self): tdLog.printNoPrefix("==========TD-4082==========") @@ -268,7 +268,7 @@ class TDTestCase: tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db") tdSql.query("show variables") - tdSql.checkData(36, 1, 3650) + tdSql.checkData(38, 1, 3650) tdSql.query("show databases") tdSql.checkData(0,7,"3650,3650,3650") @@ -296,7 +296,7 @@ class TDTestCase: tdSql.query("show databases") tdSql.checkData(0, 7, "3650,3650,3650") tdSql.query("show variables") - tdSql.checkData(36, 1, 3650) + tdSql.checkData(38, 1, 3650) tdSql.execute("alter database db1 keep 365") tdSql.execute("drop database if exists db1") @@ -613,45 +613,189 @@ class TDTestCase: pass def td5798(self): - tdLog.printNoPrefix("==========TD-5798==========") + tdLog.printNoPrefix("==========TD-5798 + TD-5810==========") tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db keep 3650") tdSql.execute("use db") tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") - tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 int) tags(t2 binary(16), t3 binary(16), t4 int)") - for i in range(100): - sql = f"create table db.t{i} using db.stb1 tags({i%7}, {(i-1)%7}, {i%2})" + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)") + maxRemainderNum=7 + tbnum=101 + for i in range(tbnum-1): + sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})" tdSql.execute(sql) tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") - tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2}, {(i-1)%3})") - tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)") - tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i}', '{100-i}', {i%3})") - tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, {(i+1)%3})") - tdSql.execute(f"insert into db.t0{i} values (now-9d, {i*2}, {(i+2)%3})") - tdSql.execute(f"insert into db.t0{i} values (now-8d, {i*3}, {(i)%3})") + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')") + tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)") + tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)") + tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)") + tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t100num (ts )values (now-7d)") + tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)") + #========== TD-5810 suport distinct multi-data-coloumn ========== + tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c2 from stb1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}") + tdSql.checkRows(2) + + tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c2 from t1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c1 from t1 ") + tdSql.checkRows(2) + tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2") + tdSql.checkRows(1) + + tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2") + tdSql.checkRows(2) + + tdSql.error("select distinct c5 from stb1") + tdSql.error("select distinct c5 from t1") + tdSql.error("select distinct c1 from db.*") + tdSql.error("select c2, distinct c1 from stb1") + tdSql.error("select c2, distinct c1 from t1") + tdSql.error("select distinct c2 from ") + tdSql.error("distinct c2 from stb1") + tdSql.error("distinct c2 from t1") + tdSql.error("select distinct c1, c2, c3 from stb1") + tdSql.error("select distinct c1, c2, c3 from t1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1") + tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1") + tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query("select distinct c1, c2 from stb1 order by ts") + tdSql.checkRows(tbnum*3+1) + tdSql.query("select distinct c1, c2 from t1 order by ts") + tdSql.checkRows(4) + tdSql.error("select distinct c1, ts from stb1 group by c2") + tdSql.error("select distinct c1, ts from t1 group by c2") + tdSql.error("select distinct c1, max(c2) from stb1 ") + tdSql.error("select distinct c1, max(c2) from t1 ") + tdSql.error("select max(c2), distinct c1 from stb1 ") + tdSql.error("select max(c2), distinct c1 from t1 ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1") + tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ") + tdSql.checkRows(6) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)") + tdSql.checkRows(15) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)") + tdSql.checkRows(3) + + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ") + tdSql.checkRows(4) + tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)") + # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)") + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )") + tdSql.checkRows(1) + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ") + + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)") + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)") + + + + #========== TD-5798 suport distinct multi-tags-coloumn ========== tdSql.query("select distinct t1 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t0, t1 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t1, t0 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t1, t2 from stb1") - tdSql.checkRows(14) + tdSql.checkRows(maxRemainderNum*2+1) tdSql.query("select distinct t0, t1, t2 from stb1") - tdSql.checkRows(14) + tdSql.checkRows(maxRemainderNum*2+1) tdSql.query("select distinct t0 t1, t1 t2 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t0, t0, t0 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t0, t1 from t1") tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from t100num") + tdSql.checkRows(1) + + tdSql.query("select distinct t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t4, t2 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2, t3, t4 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2 t1, t3 t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t3, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from t01") + tdSql.checkRows(1) + tdSql.query("select distinct t3, t4 from t0100num") + tdSql.checkRows(1) ########## should be error ######### tdSql.error("select distinct from stb1") + tdSql.error("select distinct t3 from stb1") + tdSql.error("select distinct t1 from db.*") tdSql.error("select distinct t2 from ") tdSql.error("distinct t2 from stb1") tdSql.error("select distinct stb1") @@ -678,29 +822,37 @@ class TDTestCase: tdSql.checkRows(1) tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") - # tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + + tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2") tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") tdSql.checkRows(5) tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") tdSql.checkRows(4) - tdSql.query("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") - tdSql.checkRows(1) - tdSql.query("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") - tdSql.checkRows(1) + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") tdSql.checkRows(1) tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)") tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") tdSql.checkRows(1) - # tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") - tdSql.query("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") - + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3") + tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ") pass def td5935(self): - tdLog.printNoPrefix("==========TD-5798==========") + tdLog.printNoPrefix("==========TD-5935==========") tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db keep 3650") @@ -739,8 +891,8 @@ class TDTestCase: # self.td4082() # self.td4288() # self.td4724() - # self.td5798() - self.td5935() + self.td5798() + # self.td5935() # develop branch # self.td4097() From 2fde105908837521e34cc03b048d1276466df1b6 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 14:04:27 +0800 Subject: [PATCH 123/239] fix mem leak --- src/client/src/tscSubquery.c | 2 +- src/client/src/tscUtil.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 04267421cf..9ee735de45 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2743,7 +2743,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - tscFreeRetrieveSupporters(pParentSql); + tscFreeRetrieveSup(pParentSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 67b1a1c199..ad04550e77 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3767,7 +3767,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - tscFreeRetrieveSupporters(pParentSql); + tscFreeRetrieveSup(pParentSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); From a299c05c96c0e8cef4e150626dc364143ca03f7e Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 14:07:04 +0800 Subject: [PATCH 124/239] fix bug --- src/client/inc/tscSubquery.h | 3 +++ src/client/src/tscSubquery.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index 9e16f3f900..99215551c3 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -54,6 +54,9 @@ void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs); void tscFreeRetrieveSupporters(SSqlObj *pSql); +void tscFreeRetrieveSup(SSqlObj *pSql); + + #ifdef __cplusplus } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 9ee735de45..56cee02f4d 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2562,7 +2562,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { return TSDB_CODE_SUCCESS; } -static void tscFreeRetrieveSup(SSqlObj *pSql) { +void tscFreeRetrieveSup(SSqlObj *pSql) { SRetrieveSupport *trsupport = pSql->param; void* p = atomic_val_compare_exchange_ptr(&pSql->param, trsupport, 0); From 1a3448ffbcb3db1afa9441e407435f68eb10d993 Mon Sep 17 00:00:00 2001 From: lichuang Date: Tue, 17 Aug 2021 14:08:50 +0800 Subject: [PATCH 125/239] [TD-4352]disable tsdbMetaCompactRatio by default --- packaging/cfg/taos.cfg | 2 +- src/inc/taosdef.h | 2 +- src/tsdb/src/tsdbCommit.c | 2 +- src/tsdb/src/tsdbMeta.c | 5 ++++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index fdbb5829f8..06e7cb7da0 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -285,4 +285,4 @@ keepColumnName 1 # queryBufferSize -1 # percent of redundant data in tsdb meta will compact meta data,0 means donot compact -# tsdbMetaCompactRatio 30 +# tsdbMetaCompactRatio 0 diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 66811326ee..4f70fd71f0 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -275,7 +275,7 @@ do { \ #define TSDB_MAX_TABLES 10000000 #define TSDB_DEFAULT_TABLES 1000000 #define TSDB_TABLES_STEP 1000 -#define TSDB_META_COMPACT_RATIO 30 +#define TSDB_META_COMPACT_RATIO 0 // disable tsdb meta compact by default #define TSDB_MIN_DAYS_PER_FILE 1 #define TSDB_MAX_DAYS_PER_FILE 3650 diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 6bd47247a2..dd5dec81e0 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -536,7 +536,7 @@ static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { code = 0; _err: - TSDB_FILE_FSYNC(&mf); + if (code == 0) TSDB_FILE_FSYNC(&mf); tsdbCloseMFile(&mf); tsdbCloseMFile(pMFile); diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 62c3a06321..eba14cbb87 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -746,7 +746,10 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema, TSDB_WUNLOCK_TABLE(pCTable); if (insertAct) { - ASSERT(tsdbInsertNewTableAction(pRepo, pCTable) == 0); + if (tsdbInsertNewTableAction(pRepo, pCTable) != 0) { + tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " tsdbInsertNewTableAction fail", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), + TABLE_TID(pTable), TABLE_UID(pTable)); + } } } From c2cf58938bdd59c51a64e0957ab629039b574394 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 14:48:17 +0800 Subject: [PATCH 126/239] [TD-6046] fix ts top/bottom error --- src/client/src/tscSQLParser.c | 2 +- src/query/src/qExecutor.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 781b1be76f..857d1a4419 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2872,7 +2872,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX; SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX); - insertResultField(pQueryInfo, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, + insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].name, pExpr); colIndex += 1; // the first column is ts diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 08146b6200..086b3d073c 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -3851,7 +3851,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe } if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { - pCtx[i].ptsOutputBuf = pCtx[0].pOutput; + if(i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput; } if (!pResInfo->initialized) { From 010db435a49f6936b5768ae12af765b0f61ebd05 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 14:50:19 +0800 Subject: [PATCH 127/239] fix crash issue --- src/query/src/qExecutor.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 844b8ec683..f59270d40a 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1604,6 +1604,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe int32_t startPos = ascQuery? 0 : (pSDataBlock->info.rows - 1); TSKEY ts = getStartTsKey(pQueryAttr, &pSDataBlock->info.window, tsCols, pSDataBlock->info.rows); + int32_t startInterp = (pResultRowInfo->curPos == -1); STimeWindow win = getCurrentActiveTimeWindow(pResultRowInfo, ts, pQueryAttr); bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); @@ -1625,7 +1626,10 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + if (!startInterp) { + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + } + startInterp = 0; preWin = win; int32_t prevEndPos = (forwardStep - 1) * step + startPos; From ecb66a1598a10c6ca11afaf73df0730009fd9adc Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 15:18:37 +0800 Subject: [PATCH 128/239] fix bug --- src/client/src/tscSQLParser.c | 4 ++++ src/query/src/qExecutor.c | 6 +----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4e5245742c..9bda1458f4 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5382,6 +5382,7 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo const char* msg3 = "top/bottom not support fill"; const char* msg4 = "illegal value or data overflow"; const char* msg5 = "fill only available for interval query"; + const char* msg6 = "not supported function now"; if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); @@ -5420,6 +5421,9 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo } } else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) { pQueryInfo->fillType = TSDB_FILL_PREV; + if (tscIsPointInterpQuery(pQueryInfo) && pQueryInfo->order.order == TSDB_ORDER_DESC) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + } } else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) { pQueryInfo->fillType = TSDB_FILL_NEXT; } else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) { diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index f59270d40a..844b8ec683 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1604,7 +1604,6 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe int32_t startPos = ascQuery? 0 : (pSDataBlock->info.rows - 1); TSKEY ts = getStartTsKey(pQueryAttr, &pSDataBlock->info.window, tsCols, pSDataBlock->info.rows); - int32_t startInterp = (pResultRowInfo->curPos == -1); STimeWindow win = getCurrentActiveTimeWindow(pResultRowInfo, ts, pQueryAttr); bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); @@ -1626,10 +1625,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - if (!startInterp) { - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); - } - startInterp = 0; + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); preWin = win; int32_t prevEndPos = (forwardStep - 1) * step + startPos; From dc778057deed2073916321f9b9c2609618ccebe1 Mon Sep 17 00:00:00 2001 From: lichuang Date: Tue, 17 Aug 2021 15:22:45 +0800 Subject: [PATCH 129/239] run ci again From 89028123997938fa5ccf07280207feacea58f894 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 16:04:48 +0800 Subject: [PATCH 130/239] fix bug --- src/client/src/tscSubquery.c | 1 + src/client/src/tscUtil.c | 1 + 2 files changed, 2 insertions(+) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 56cee02f4d..c5afbe6dba 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2744,6 +2744,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeRetrieveSup(pParentSql); + tscFreeRetrieveSup(userSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ad04550e77..a79f50b63d 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3768,6 +3768,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeRetrieveSup(pParentSql); + tscFreeRetrieveSup(userSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); From bd0b2e4b484da282f7b81e580c94ce327b3a5aa7 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Tue, 17 Aug 2021 16:17:39 +0800 Subject: [PATCH 131/239] [TD-6070] add test case --- tests/pytest/functions/function_interp.py | 70 ++++++++++++++++++++--- 1 file changed, 61 insertions(+), 9 deletions(-) diff --git a/tests/pytest/functions/function_interp.py b/tests/pytest/functions/function_interp.py index 810c90279c..41215f15eb 100644 --- a/tests/pytest/functions/function_interp.py +++ b/tests/pytest/functions/function_interp.py @@ -26,18 +26,70 @@ class TDTestCase: self.rowNum = 10 self.ts = 1537146000000 - + def run(self): tdSql.prepare() - tdSql.execute("create table t(ts timestamp, k int)") - tdSql.execute("insert into t values('2021-1-1 1:1:1', 12);") - - tdSql.query("select interp(*) from t where ts='2021-1-1 1:1:1'") - tdSql.checkRows(1) - tdSql.checkData(0, 1, 12) + tdSql.execute("create table ap1 (ts timestamp, pav float)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.119', 2.90799)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.317', 3.07399)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.517', 0.58117)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.717', 0.16150)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.918', 1.47885)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:56.569', 1.76472)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.381', 2.13722)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.574', 4.10256)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.776', 3.55345)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.976', 1.46624)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.187', 0.17943)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.372', 2.04101)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.573', 3.20924)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.768', 1.71807)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.964', 4.60900)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.155', 4.33907)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.359', 0.76940)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.553', 0.06458)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.742', 4.59857)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.938', 1.55081)") + + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV)") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT)") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR)") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)") + tdSql.checkRows(6) + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT)") + tdSql.checkRows(6) + tdSql.checkData(0,1,2.90799) + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV)") + tdSql.checkRows(7) + tdSql.checkData(1,1,1.47885) + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)") + tdSql.checkRows(7) + + # check desc order + tdSql.error("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV) order by ts desc") + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT) order by ts desc") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR) order by ts desc") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc") + tdSql.checkRows(6) + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT) order by ts desc") + tdSql.checkRows(6) + tdSql.checkData(0,1,4.60900) + tdSql.error("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV) order by ts desc") + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc") + tdSql.checkRows(7) + + # check exception + tdSql.error("select interp(*) from ap1") + tdSql.error("select interp(*) from ap1 FILL(NEXT)") + tdSql.error("select interp(*) from ap1 ts >= '2021-07-25 02:19:54' FILL(NEXT)") + tdSql.error("select interp(*) from ap1 ts <= '2021-07-25 02:19:54' FILL(NEXT)") + tdSql.error("select interp(*) from ap1 where ts >'2021-07-25 02:19:59.938' and ts < now interval(1s) fill(next)") - tdSql.error("select interp(*) from t where ts >'2021-1-1 1:1:1' and ts < now interval(1s) fill(next)") - def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From 55c8c15742aa29e09494bbfbf19b9ee4e52010e0 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 16:32:38 +0800 Subject: [PATCH 132/239] fix msg null issue --- src/client/src/tscSQLParser.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 968d62d939..e580232f01 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -4524,7 +4524,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql */ tSqlExprDestroy(*pExpr); } else { - ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pQueryInfo->msg); + ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pCmd->payload); } *pExpr = NULL; // remove this expression @@ -4562,7 +4562,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql } pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY; - ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg); + ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pCmd->payload); *pExpr = NULL; } else { // do nothing @@ -4580,7 +4580,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } - ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pQueryInfo->msg); + ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pCmd->payload); *pExpr = NULL; // remove it from expr tree } From f98b41bb4f07fe8ad2f66e02091dde9cdb73b6e3 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 17 Aug 2021 17:04:10 +0800 Subject: [PATCH 133/239] [TD-6165]: use memcpy to replace wcsncmp for nchar type comparision --- src/util/src/tcompare.c | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 921da82d44..1b980a4a1d 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -199,16 +199,7 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { if (len1 != len2) { return len1 > len2? 1:-1; } else { - char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - memcpy(pLeftTerm, varDataVal(pLeft), len1); - memcpy(pRightTerm, varDataVal(pRight), len2); - - int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE); - - tfree(pLeftTerm); - tfree(pRightTerm); - + int32_t ret = memcmp((wchar_t*) pLeft, (wchar_t*) pRight, len1); if (ret == 0) { return 0; } else { @@ -518,17 +509,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { if (t1->len != t2->len) { return t1->len > t2->len? 1:-1; } - - char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char)); - char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char)); - memcpy(t1_term, t1->data, t1->len); - memcpy(t2_term, t2->data, t2->len); - - int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE); - - tfree(t1_term); - tfree(t2_term); - + int32_t ret = memcmp((wchar_t*) t1, (wchar_t*) t2, t2->len); if (ret == 0) { return ret; } From 7e7b67146a4b91b33f3590efe39d20dc4c21801b Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 17 Aug 2021 17:04:10 +0800 Subject: [PATCH 134/239] [TD-6165]: use memcpy to replace wcsncmp for nchar type comparision --- src/util/src/tcompare.c | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 3619dad83b..f8deb65d48 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -129,16 +129,7 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { if (len1 != len2) { return len1 > len2? 1:-1; } else { - char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - memcpy(pLeftTerm, varDataVal(pLeft), len1); - memcpy(pRightTerm, varDataVal(pRight), len2); - - int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE); - - tfree(pLeftTerm); - tfree(pRightTerm); - + int32_t ret = memcmp((wchar_t*) pLeft, (wchar_t*) pRight, len1); if (ret == 0) { return 0; } else { @@ -418,17 +409,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { if (t1->len != t2->len) { return t1->len > t2->len? 1:-1; } - - char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char)); - char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char)); - memcpy(t1_term, t1->data, t1->len); - memcpy(t2_term, t2->data, t2->len); - - int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE); - - tfree(t1_term); - tfree(t2_term); - + int32_t ret = memcmp((wchar_t*) t1, (wchar_t*) t2, t2->len); if (ret == 0) { return ret; } From 1bd580402a508c69cd9e841054452952ad14dea3 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 18:11:47 +0800 Subject: [PATCH 135/239] [TD-6046] fix ts top/bottom error --- src/client/src/tscGlobalmerge.c | 2 +- src/query/src/qExecutor.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c index e696d54abd..ced81ff2f0 100644 --- a/src/client/src/tscGlobalmerge.c +++ b/src/client/src/tscGlobalmerge.c @@ -643,7 +643,7 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD for(int32_t j = 0; j < numOfExpr; ++j) { pCtx[j].pOutput += (pCtx[j].outputBytes * numOfRows); if (pCtx[j].functionId == TSDB_FUNC_TOP || pCtx[j].functionId == TSDB_FUNC_BOTTOM) { - pCtx[j].ptsOutputBuf = pCtx[0].pOutput; + if(j > 0)pCtx[j].ptsOutputBuf = pCtx[j - 1].pOutput; } } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 086b3d073c..74c7f70437 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -3587,7 +3587,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i // set the timestamp output buffer for top/bottom/diff query int32_t fid = pCtx[i].functionId; if (fid == TSDB_FUNC_TOP || fid == TSDB_FUNC_BOTTOM || fid == TSDB_FUNC_DIFF || fid == TSDB_FUNC_DERIVATIVE) { - pCtx[i].ptsOutputBuf = pCtx[0].pOutput; + if (i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput; } } @@ -3912,7 +3912,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF int32_t functionId = pCtx[i].functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { - pCtx[i].ptsOutputBuf = pCtx[0].pOutput; + if(i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput; } /* From 2798480b00151a6b92a53f97d0e5a265412d163c Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 18:34:34 +0800 Subject: [PATCH 136/239] [TD-6046] fix ts top/bottom error --- tests/pytest/functions/function_bottom.py | 15 +++++++++++++++ tests/pytest/functions/function_top.py | 15 +++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/tests/pytest/functions/function_bottom.py b/tests/pytest/functions/function_bottom.py index abb9ac48e7..e9e5003f6f 100644 --- a/tests/pytest/functions/function_bottom.py +++ b/tests/pytest/functions/function_bottom.py @@ -104,6 +104,21 @@ class TDTestCase: tdSql.checkRows(2) tdSql.checkData(0, 1, 1) tdSql.checkData(1, 1, 2) + + tdSql.query("select ts,bottom(col1, 2),ts from test1") + tdSql.checkRows(2) + tdSql.checkData(0, 0, "2018-09-17 09:00:00.000") + tdSql.checkData(0, 1, "2018-09-17 09:00:00.000") + tdSql.checkData(1, 0, "2018-09-17 09:00:00.001") + tdSql.checkData(1, 3, "2018-09-17 09:00:00.001") + + + tdSql.query("select ts,bottom(col1, 2),ts from test group by tbname") + tdSql.checkRows(2) + tdSql.checkData(0, 0, "2018-09-17 09:00:00.000") + tdSql.checkData(0, 1, "2018-09-17 09:00:00.000") + tdSql.checkData(1, 0, "2018-09-17 09:00:00.001") + tdSql.checkData(1, 3, "2018-09-17 09:00:00.001") #TD-2457 bottom + interval + order by tdSql.error('select top(col2,1) from test interval(1y) order by col2;') diff --git a/tests/pytest/functions/function_top.py b/tests/pytest/functions/function_top.py index f8318402b5..9824afc19f 100644 --- a/tests/pytest/functions/function_top.py +++ b/tests/pytest/functions/function_top.py @@ -117,6 +117,21 @@ class TDTestCase: tdSql.checkRows(2) tdSql.checkData(0, 1, 8.1) tdSql.checkData(1, 1, 9.1) + + tdSql.query("select ts,top(col1, 2),ts from test1") + tdSql.checkRows(2) + tdSql.checkData(0, 0, "2018-09-17 09:00:00.008") + tdSql.checkData(0, 1, "2018-09-17 09:00:00.008") + tdSql.checkData(1, 0, "2018-09-17 09:00:00.009") + tdSql.checkData(1, 3, "2018-09-17 09:00:00.009") + + + tdSql.query("select ts,top(col1, 2),ts from test group by tbname") + tdSql.checkRows(2) + tdSql.checkData(0, 0, "2018-09-17 09:00:00.008") + tdSql.checkData(0, 1, "2018-09-17 09:00:00.008") + tdSql.checkData(1, 0, "2018-09-17 09:00:00.009") + tdSql.checkData(1, 3, "2018-09-17 09:00:00.009") #TD-2563 top + super_table + interval tdSql.execute("create table meters(ts timestamp, c int) tags (d int)") From 2bed48b40274b9b69d494664a1f37ba3017575f9 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 17 Aug 2021 18:38:33 +0800 Subject: [PATCH 137/239] [TD-6166]: pointer not initialized led taos.exe exit on windows. (#7425) --- deps/MsvcLibX/src/realpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index ec1b606bf6..62e701bf3a 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -521,7 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) { /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpathU(const char *path, char *outbuf) { char *pOutbuf = outbuf; - char *pOutbuf1; + char *pOutbuf1 = NULL; char *pPath1 = NULL; char *pPath2 = NULL; int iErr; From 0c7cf3a918f01c51b8d8992fb64def668db0b338 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 17 Aug 2021 18:39:02 +0800 Subject: [PATCH 138/239] [TD-6166]: pointer not initialized led taos.exe exit on windows. (#7424) --- deps/MsvcLibX/src/realpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index ec1b606bf6..62e701bf3a 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -521,7 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) { /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpathU(const char *path, char *outbuf) { char *pOutbuf = outbuf; - char *pOutbuf1; + char *pOutbuf1 = NULL; char *pPath1 = NULL; char *pPath2 = NULL; int iErr; From 045f8736369a7d53f092039b5f3ed8dd628a784b Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 18:45:56 +0800 Subject: [PATCH 139/239] [TD-6046] fix ts top/bottom error --- tests/pytest/functions/function_derivative.py | 10 ++++++++++ tests/pytest/functions/function_diff.py | 18 ++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/tests/pytest/functions/function_derivative.py b/tests/pytest/functions/function_derivative.py index 7d34b3ce60..8b77159ffa 100644 --- a/tests/pytest/functions/function_derivative.py +++ b/tests/pytest/functions/function_derivative.py @@ -63,6 +63,16 @@ class TDTestCase: tdSql.checkData(3, 1, "2018-09-17 09:01:20.000") tdSql.checkData(3, 3, "2018-09-17 09:01:20.000") + tdSql.query("select ts,derivative(col1, 10, 1),ts from tb1") + tdSql.checkRows(2) + tdSql.checkData(0, 0, "2018-09-17 09:00:10.000") + tdSql.checkData(0, 1, "2018-09-17 09:00:10.000") + tdSql.checkData(0, 3, "2018-09-17 09:00:10.000") + tdSql.checkData(1, 0, "2018-09-17 09:00:20.009") + tdSql.checkData(1, 1, "2018-09-17 09:00:20.009") + tdSql.checkData(1, 3, "2018-09-17 09:00:20.009") + + tdSql.query("select ts from(select ts,derivative(col, 10s, 0) from stb group by tbname)") tdSql.checkData(0, 0, "2018-09-17 09:00:10.000") diff --git a/tests/pytest/functions/function_diff.py b/tests/pytest/functions/function_diff.py index fba3b4c0d4..4ef8ef7a98 100644 --- a/tests/pytest/functions/function_diff.py +++ b/tests/pytest/functions/function_diff.py @@ -95,6 +95,24 @@ class TDTestCase: tdSql.error("select diff(col14) from test") + tdSql.query("select ts,diff(col1),ts from test1") + tdSql.checkRows(10) + tdSql.checkData(0, 0, "2018-09-17 09:00:00.000") + tdSql.checkData(0, 1, "2018-09-17 09:00:00.000") + tdSql.checkData(0, 3, "2018-09-17 09:00:00.000") + tdSql.checkData(9, 0, "2018-09-17 09:00:00.009") + tdSql.checkData(9, 1, "2018-09-17 09:00:00.009") + tdSql.checkData(9, 3, "2018-09-17 09:00:00.009") + + tdSql.query("select ts,diff(col1),ts from test group by tbname") + tdSql.checkRows(10) + tdSql.checkData(0, 0, "2018-09-17 09:00:00.000") + tdSql.checkData(0, 1, "2018-09-17 09:00:00.000") + tdSql.checkData(0, 3, "2018-09-17 09:00:00.000") + tdSql.checkData(9, 0, "2018-09-17 09:00:00.009") + tdSql.checkData(9, 1, "2018-09-17 09:00:00.009") + tdSql.checkData(9, 3, "2018-09-17 09:00:00.009") + tdSql.query("select diff(col1) from test1") tdSql.checkRows(10) From e98863b5fd6feec4d9aaf92264e40550146ec6e9 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 18:48:01 +0800 Subject: [PATCH 140/239] [TD-6046] fix ts top/bottom error --- tests/pytest/functions/function_derivative.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/functions/function_derivative.py b/tests/pytest/functions/function_derivative.py index 8b77159ffa..61a3ed72fe 100644 --- a/tests/pytest/functions/function_derivative.py +++ b/tests/pytest/functions/function_derivative.py @@ -63,7 +63,7 @@ class TDTestCase: tdSql.checkData(3, 1, "2018-09-17 09:01:20.000") tdSql.checkData(3, 3, "2018-09-17 09:01:20.000") - tdSql.query("select ts,derivative(col1, 10, 1),ts from tb1") + tdSql.query("select ts,derivative(col, 10, 1),ts from tb1") tdSql.checkRows(2) tdSql.checkData(0, 0, "2018-09-17 09:00:10.000") tdSql.checkData(0, 1, "2018-09-17 09:00:10.000") From 0e4b683b6d55a4e9c5df07b135df571da7533de3 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 17 Aug 2021 18:48:50 +0800 Subject: [PATCH 141/239] [TD-6046] fix ts top/bottom error --- tests/pytest/functions/function_derivative.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/functions/function_derivative.py b/tests/pytest/functions/function_derivative.py index 61a3ed72fe..a97a041d0b 100644 --- a/tests/pytest/functions/function_derivative.py +++ b/tests/pytest/functions/function_derivative.py @@ -63,7 +63,7 @@ class TDTestCase: tdSql.checkData(3, 1, "2018-09-17 09:01:20.000") tdSql.checkData(3, 3, "2018-09-17 09:01:20.000") - tdSql.query("select ts,derivative(col, 10, 1),ts from tb1") + tdSql.query("select ts,derivative(col, 10s, 1),ts from tb1") tdSql.checkRows(2) tdSql.checkData(0, 0, "2018-09-17 09:00:10.000") tdSql.checkData(0, 1, "2018-09-17 09:00:10.000") From 2d62d2fe2e7e092e8c7bddf3510b304e6275358b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 17 Aug 2021 19:22:48 +0800 Subject: [PATCH 142/239] Hotfix/sangshuduo/td 6166 pointer not init for 2171 (#7428) * [TD-6166]: pointer not initialized led taos.exe exit on windows. * another line. --- deps/MsvcLibX/src/realpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index 62e701bf3a..e2ba755f2d 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -196,7 +196,7 @@ not_compact_enough: /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpath(const char *path, char *outbuf) { char *pOutbuf = outbuf; - char *pOutbuf1; + char *pOutbuf1 = NULL; int iErr; const char *pc; From 5e15ac45fc1c9d341d3fcc18ed6c72b663872055 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 17 Aug 2021 19:24:37 +0800 Subject: [PATCH 143/239] Hotfix/sangshuduo/td 6166 pointer not init (#7427) * [TD-6166]: pointer not initialized led taos.exe exit on windows. * another line. --- deps/MsvcLibX/src/realpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index 62e701bf3a..e2ba755f2d 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -196,7 +196,7 @@ not_compact_enough: /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpath(const char *path, char *outbuf) { char *pOutbuf = outbuf; - char *pOutbuf1; + char *pOutbuf1 = NULL; int iErr; const char *pc; From 121e0bc0332695e137e80ee68673e316ade8668b Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 17 Aug 2021 19:34:43 +0000 Subject: [PATCH 144/239] [TD-6131] show vnodes crash --- src/mnode/src/mnodeDnode.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 335f9af528..89bc0169d5 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -1238,7 +1238,7 @@ static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, vo pIter = mnodeGetNextVgroup(pIter, &pVgroup); if (pVgroup == NULL) break; - for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { + for (int32_t i = pShow->index; i < pVgroup->numOfVnodes && numOfRows < rows; ++i) { SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; if (pVgid->pDnode == pDnode) { cols = 0; @@ -1250,9 +1250,12 @@ static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, vo pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; STR_TO_VARSTR(pWrite, syncRole[pVgid->role]); cols++; - numOfRows++; } + pShow->index = i; + } + if (numOfRows >= rows) { + break; } mnodeDecVgroupRef(pVgroup); From c4136b1b39b0da6b340e4457a20a5f2a117f1af2 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 17 Aug 2021 20:11:47 +0000 Subject: [PATCH 145/239] [TD-6131] show vnodes crash --- src/mnode/inc/mnodeDef.h | 1 + src/mnode/src/mnodeDnode.c | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/mnode/inc/mnodeDef.h b/src/mnode/inc/mnodeDef.h index 5521267841..5acc8dd85e 100644 --- a/src/mnode/inc/mnodeDef.h +++ b/src/mnode/inc/mnodeDef.h @@ -274,6 +274,7 @@ typedef struct { int32_t rowSize; int32_t numOfRows; void * pIter; + void * pVgIter; void ** ppShow; int16_t offset[TSDB_MAX_COLUMNS]; int32_t bytes[TSDB_MAX_COLUMNS]; diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 89bc0169d5..320aa44ca6 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -1232,13 +1232,12 @@ static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, vo pDnode = (SDnodeObj *)(pShow->pIter); if (pDnode != NULL) { - void *pIter = NULL; SVgObj *pVgroup; while (1) { - pIter = mnodeGetNextVgroup(pIter, &pVgroup); + pShow->pVgIter = mnodeGetNextVgroup(pShow->pVgIter, &pVgroup); if (pVgroup == NULL) break; - for (int32_t i = pShow->index; i < pVgroup->numOfVnodes && numOfRows < rows; ++i) { + for (int32_t i = 0; i < pVgroup->numOfVnodes && numOfRows < rows; ++i) { SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; if (pVgid->pDnode == pDnode) { cols = 0; @@ -1251,8 +1250,8 @@ static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, vo STR_TO_VARSTR(pWrite, syncRole[pVgid->role]); cols++; numOfRows++; + } - pShow->index = i; } if (numOfRows >= rows) { break; From 584666056604327a5c3ecbf6fda1a02fc48a1a85 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 17 Aug 2021 21:13:03 +0000 Subject: [PATCH 146/239] [TD-6131] show vnodes crash --- src/mnode/src/mnodeDnode.c | 6 ++++++ src/mnode/src/mnodeShow.c | 9 +++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 320aa44ca6..7dd199cca4 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -196,14 +196,20 @@ int32_t mnodeInitDnodes() { mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CREATE_DNODE, mnodeProcessCreateDnodeMsg); mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_DROP_DNODE, mnodeProcessDropDnodeMsg); mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CONFIG_DNODE, mnodeProcessCfgDnodeMsg); + mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_CONFIG_DNODE_RSP, mnodeProcessCfgDnodeMsgRsp); mnodeAddPeerMsgHandle(TSDB_MSG_TYPE_DM_STATUS, mnodeProcessDnodeStatusMsg); + mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_MODULE, mnodeGetModuleMeta); mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_MODULE, mnodeRetrieveModules); + mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_VARIABLES, mnodeGetConfigMeta); mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_VARIABLES, mnodeRetrieveConfigs); + mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_VNODES, mnodeGetVnodeMeta); mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_VNODES, mnodeRetrieveVnodes); + mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_VNODES, mnodeCancelGetNextVgroup); + mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_DNODE, mnodeGetDnodeMeta); mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_DNODE, mnodeRetrieveDnodes); mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_DNODE, mnodeCancelGetNextDnode); diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 570f5c344b..bbfdb52e05 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -422,8 +422,13 @@ static void* mnodePutShowObj(SShowObj *pShow) { static void mnodeFreeShowObj(void *data) { SShowObj *pShow = *(SShowObj **)data; - if (tsMnodeShowFreeIterFp[pShow->type] != NULL && pShow->pIter != NULL) { - (*tsMnodeShowFreeIterFp[pShow->type])(pShow->pIter); + if (tsMnodeShowFreeIterFp[pShow->type] != NULL) { + if (pShow->pVgIter != NULL) { + // only used in 'show vnodes "ep"' + (*tsMnodeShowFreeIterFp[pShow->type])(pShow->pVgIter); + } else { + if (pShow->pIter != NULL) (*tsMnodeShowFreeIterFp[pShow->type])(pShow->pIter); + } } mDebug("%p, show is destroyed, data:%p index:%d", pShow, data, pShow->index); From 11996d25768daadf15f95007c41b95482ff3cff9 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 17 Aug 2021 21:42:23 +0000 Subject: [PATCH 147/239] [TD-6131] show vnodes crash --- src/common/inc/tglobal.h | 3 +++ src/common/src/tglobal.c | 5 +++++ src/tsdb/src/tsdbFS.c | 4 +--- src/util/inc/tconfig.h | 1 - 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 947fed60e4..4b8347ead0 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -107,6 +107,9 @@ extern int32_t tsQuorum; extern int8_t tsUpdate; extern int8_t tsCacheLastRow; +//tsdb +extern bool tsdbForceKeepFile; + // balance extern int8_t tsEnableBalance; extern int8_t tsAlternativeRole; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 44b3e87e7d..b7930dd43e 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -151,6 +151,11 @@ int32_t tsMinTablePerVnode = TSDB_TABLES_STEP; int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES; int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP; +// tsdb config + +// For backward compatibility +bool tsdbForceKeepFile = false; + // balance int8_t tsEnableBalance = 1; int8_t tsAlternativeRole = 0; diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index 68450301d8..63f89c1957 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -37,8 +37,6 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired); static int tsdbProcessExpiredFS(STsdbRepo *pRepo); static int tsdbCreateMeta(STsdbRepo *pRepo); -// For backward compatibility -bool tsdbForceKeepFile = false; // ================== CURRENT file header info static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) { int tlen = 0; @@ -1354,4 +1352,4 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired) { tsdbCloseDFileSet(&fset); } -} \ No newline at end of file +} diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index f146ec0b8b..d03ce6e0f1 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -81,7 +81,6 @@ typedef struct { extern SGlobalCfg tsGlobalConfig[]; extern int32_t tsGlobalConfigNum; extern char * tsCfgStatusStr[]; -extern bool tsdbForceKeepFile; void taosReadGlobalLogCfg(); bool taosReadGlobalCfg(); From 0e0e727eaf3a9cb8d0b189d717a6e955ed6fabcb Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 18 Aug 2021 01:08:22 +0000 Subject: [PATCH 148/239] [TD-6088] handle bigint sum --- src/common/src/ttypes.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index 13c4160e7f..08bfc2e9aa 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -38,11 +38,7 @@ const int32_t TYPE_BYTES[15] = { #define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \ do { \ - if (_list[(_index)] >= (INT64_MAX - (__sum))) { \ - __sum = INT64_MAX; \ - } else { \ - (__sum) += (_list)[(_index)]; \ - } \ + (__sum) += (_list)[(_index)]; \ if ((__min) > (_list)[(_index)]) { \ (__min) = (_list)[(_index)]; \ (__minIndex) = (_index); \ From 19feac31c96a61210e592589b4f376d3ea4df710 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 18 Aug 2021 01:13:22 +0000 Subject: [PATCH 149/239] [TD-6198] fix bigint sum error --- src/common/src/ttypes.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index eeffe49adc..ee940531e6 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -38,11 +38,7 @@ const int32_t TYPE_BYTES[15] = { #define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \ do { \ - if (_list[(_index)] >= (INT64_MAX - (__sum))) { \ - __sum = INT64_MAX; \ - } else { \ - (__sum) += (_list)[(_index)]; \ - } \ + (__sum) += (_list)[(_index)]; \ if ((__min) > (_list)[(_index)]) { \ (__min) = (_list)[(_index)]; \ (__minIndex) = (_index); \ From 35b82a644f559a240f78cde78af37bd778dd4626 Mon Sep 17 00:00:00 2001 From: wpan Date: Wed, 18 Aug 2021 09:40:18 +0800 Subject: [PATCH 150/239] fix nested query bug --- src/client/inc/tscSubquery.h | 2 -- src/client/src/tscSubquery.c | 42 ++++++++---------------------------- src/client/src/tscUtil.c | 23 +++++++++----------- 3 files changed, 19 insertions(+), 48 deletions(-) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index 99215551c3..a012ca5a7f 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -52,8 +52,6 @@ int tsInsertInitialCheck(SSqlObj *pSql); void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs); -void tscFreeRetrieveSupporters(SSqlObj *pSql); - void tscFreeRetrieveSup(SSqlObj *pSql); diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index c5afbe6dba..d16d744393 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2040,11 +2040,8 @@ void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { for(int32_t i = 0; i < numOfSubs; ++i) { SSqlObj* pSub = pSql->pSubs[i]; assert(pSub != NULL); - - SRetrieveSupport* pSupport = pSub->param; - - tfree(pSupport->localBuffer); - tfree(pSupport); + + tscFreeRetrieveSup(pSub); taos_free_result(pSub); } @@ -2576,18 +2573,6 @@ void tscFreeRetrieveSup(SSqlObj *pSql) { tfree(trsupport); } -void tscFreeRetrieveSupporters(SSqlObj *pSql) { - for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - SSqlObj* pSub = pSql->pSubs[i]; - assert(pSub != NULL); - - tscFreeRetrieveSup(pSub); - } -} - - - - static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfRows); static void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows); @@ -2732,30 +2717,21 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { int32_t code = pParentSql->res.code; - if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) { - // remove the cached tableMeta and vgroup id list, and then parse the sql again - SSqlCmd* pParentCmd = &pParentSql->cmd; - STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0); - tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self); - - pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); - pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) { tscFreeRetrieveSup(pParentSql); - tscFreeRetrieveSup(userSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); - pParentSql->res.code = TSDB_CODE_SUCCESS; - pParentSql->retry++; + userSql->res.code = TSDB_CODE_SUCCESS; + userSql->retry++; - tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, - tstrerror(code), pParentSql->retry); + tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self, + tstrerror(code), userSql->retry); - tscResetSqlCmd(&userSql->cmd, false); + tscResetSqlCmd(&userSql->cmd, true); code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index a79f50b63d..06b61d443d 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3758,29 +3758,26 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { // todo refactor tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self); - SSqlCmd* pParentCmd = &pParentSql->cmd; - STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0); - tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self); - - pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); - pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeRetrieveSup(pParentSql); - tscFreeRetrieveSup(userSql); + + if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry)) { + tscAsyncResultOnError(pParentSql); + return; + } tscFreeSubobj(userSql); tfree(userSql->pSubs); - pParentSql->res.code = TSDB_CODE_SUCCESS; - pParentSql->retry++; + userSql->res.code = TSDB_CODE_SUCCESS; + userSql->retry++; - tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, - tstrerror(code), pParentSql->retry); + tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self, + tstrerror(code), userSql->retry); - tscResetSqlCmd(&userSql->cmd, false); + tscResetSqlCmd(&userSql->cmd, true); code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { From cdac4b6d4029f06e64506bfe76ebda79a75ca91e Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 18 Aug 2021 02:53:55 +0000 Subject: [PATCH 151/239] [TD-6088] handel bigint sum --- src/common/src/ttypes.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index eeffe49adc..ee940531e6 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -38,11 +38,7 @@ const int32_t TYPE_BYTES[15] = { #define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \ do { \ - if (_list[(_index)] >= (INT64_MAX - (__sum))) { \ - __sum = INT64_MAX; \ - } else { \ - (__sum) += (_list)[(_index)]; \ - } \ + (__sum) += (_list)[(_index)]; \ if ((__min) > (_list)[(_index)]) { \ (__min) = (_list)[(_index)]; \ (__minIndex) = (_index); \ From 1c0e89de2bbcdbcf0ecb2f388d2b35d4917bb93c Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 18 Aug 2021 14:52:05 +0800 Subject: [PATCH 152/239] enhance performacne when inserting from csv --- src/client/inc/tsclient.h | 8 +- src/client/src/tscParseInsert.c | 384 ++++++++++++++++++++++++++++++-- src/client/src/tscUtil.c | 1 + 3 files changed, 375 insertions(+), 18 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 4ead7d4180..99ed082236 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -123,17 +123,15 @@ typedef struct { int32_t kvLen; // len of SKVRow } SMemRowInfo; typedef struct { - uint8_t memRowType; - uint8_t compareStat; // 0 unknown, 1 need compare, 2 no need - TDRowTLenT dataRowInitLen; + uint8_t memRowType; // default is 0, that is SDataRow + uint8_t compareStat; // 0 no need, 1 need compare TDRowTLenT kvRowInitLen; SMemRowInfo *rowInfo; } SMemRowBuilder; typedef enum { - ROW_COMPARE_UNKNOWN = 0, + ROW_COMPARE_NO_NEED = 0, ROW_COMPARE_NEED = 1, - ROW_COMPARE_NO_NEED = 2, } ERowCompareStat; int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec); diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 89e3832007..dab0dff1fc 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -53,18 +53,18 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 if (nBoundCols == 0) { // file input pBuilder->memRowType = SMEM_ROW_DATA; - pBuilder->compareStat = ROW_COMPARE_NO_NEED; + // pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } else { float boundRatio = ((float)nBoundCols / (float)nCols); if (boundRatio < KVRatioKV) { pBuilder->memRowType = SMEM_ROW_KV; - pBuilder->compareStat = ROW_COMPARE_NO_NEED; + // pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } else if (boundRatio > KVRatioData) { pBuilder->memRowType = SMEM_ROW_DATA; - pBuilder->compareStat = ROW_COMPARE_NO_NEED; + // pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } pBuilder->compareStat = ROW_COMPARE_NEED; @@ -76,7 +76,6 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 } } - pBuilder->dataRowInitLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen; pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx); if (nRows > 0) { @@ -86,7 +85,7 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 } for (int i = 0; i < nRows; ++i) { - (pBuilder->rowInfo + i)->dataLen = pBuilder->dataRowInitLen; + (pBuilder->rowInfo + i)->dataLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen; (pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen; } } @@ -449,6 +448,370 @@ int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) { return TSDB_CODE_SUCCESS; } +static int32_t tsParseOneColumnOld(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, + bool primaryKey, int16_t timePrec) { + int64_t iv; + int32_t ret; + char * endptr = NULL; + + if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) { + return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z); + } + + switch (pSchema->type) { + case TSDB_DATA_TYPE_BOOL: { // bool + if (isNullStr(pToken)) { + *((uint8_t *)payload) = TSDB_DATA_BOOL_NULL; + } else { + if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { + if (strncmp(pToken->z, "true", pToken->n) == 0) { + *(uint8_t *)payload = TSDB_TRUE; + } else if (strncmp(pToken->z, "false", pToken->n) == 0) { + *(uint8_t *)payload = TSDB_FALSE; + } else { + return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z); + } + } else if (pToken->type == TK_INTEGER) { + iv = strtoll(pToken->z, NULL, 10); + *(uint8_t *)payload = (int8_t)((iv == 0) ? TSDB_FALSE : TSDB_TRUE); + } else if (pToken->type == TK_FLOAT) { + double dv = strtod(pToken->z, NULL); + *(uint8_t *)payload = (int8_t)((dv == 0) ? TSDB_FALSE : TSDB_TRUE); + } else { + return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z); + } + } + break; + } + + case TSDB_DATA_TYPE_TINYINT: + if (isNullStr(pToken)) { + *((uint8_t *)payload) = TSDB_DATA_TINYINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z); + } else if (!IS_VALID_TINYINT(iv)) { + return tscInvalidOperationMsg(msg, "data overflow", pToken->z); + } + + *((uint8_t *)payload) = (uint8_t)iv; + } + + break; + + case TSDB_DATA_TYPE_UTINYINT: + if (isNullStr(pToken)) { + *((uint8_t *)payload) = TSDB_DATA_UTINYINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z); + } else if (!IS_VALID_UTINYINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z); + } + + *((uint8_t *)payload) = (uint8_t)iv; + } + + break; + + case TSDB_DATA_TYPE_SMALLINT: + if (isNullStr(pToken)) { + *((int16_t *)payload) = TSDB_DATA_SMALLINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z); + } else if (!IS_VALID_SMALLINT(iv)) { + return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z); + } + + *((int16_t *)payload) = (int16_t)iv; + } + + break; + + case TSDB_DATA_TYPE_USMALLINT: + if (isNullStr(pToken)) { + *((uint16_t *)payload) = TSDB_DATA_USMALLINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z); + } else if (!IS_VALID_USMALLINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z); + } + + *((uint16_t *)payload) = (uint16_t)iv; + } + + break; + + case TSDB_DATA_TYPE_INT: + if (isNullStr(pToken)) { + *((int32_t *)payload) = TSDB_DATA_INT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid int data", pToken->z); + } else if (!IS_VALID_INT(iv)) { + return tscInvalidOperationMsg(msg, "int data overflow", pToken->z); + } + + *((int32_t *)payload) = (int32_t)iv; + } + + break; + + case TSDB_DATA_TYPE_UINT: + if (isNullStr(pToken)) { + *((uint32_t *)payload) = TSDB_DATA_UINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z); + } else if (!IS_VALID_UINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z); + } + + *((uint32_t *)payload) = (uint32_t)iv; + } + + break; + + case TSDB_DATA_TYPE_BIGINT: + if (isNullStr(pToken)) { + *((int64_t *)payload) = TSDB_DATA_BIGINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z); + } else if (!IS_VALID_BIGINT(iv)) { + return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z); + } + + *((int64_t *)payload) = iv; + } + break; + + case TSDB_DATA_TYPE_UBIGINT: + if (isNullStr(pToken)) { + *((uint64_t *)payload) = TSDB_DATA_UBIGINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z); + } else if (!IS_VALID_UBIGINT((uint64_t)iv)) { + return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z); + } + + *((uint64_t *)payload) = iv; + } + break; + + case TSDB_DATA_TYPE_FLOAT: + if (isNullStr(pToken)) { + *((int32_t *)payload) = TSDB_DATA_FLOAT_NULL; + } else { + double dv; + if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { + return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); + } + + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || + isnan(dv)) { + return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); + } + + // *((float *)payload) = (float)dv; + SET_FLOAT_VAL(payload, dv); + } + break; + + case TSDB_DATA_TYPE_DOUBLE: + if (isNullStr(pToken)) { + *((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL; + } else { + double dv; + if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { + return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); + } + + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) { + return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); + } + + *((double *)payload) = dv; + } + break; + + case TSDB_DATA_TYPE_BINARY: + // binary data cannot be null-terminated char string, otherwise the last char of the string is lost + if (pToken->type == TK_NULL) { + setVardataNull(payload, TSDB_DATA_TYPE_BINARY); + } else { // too long values will return invalid sql, not be truncated automatically + if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor + return tscInvalidOperationMsg(msg, "string data overflow", pToken->z); + } + + STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); + } + + break; + + case TSDB_DATA_TYPE_NCHAR: + if (pToken->type == TK_NULL) { + setVardataNull(payload, TSDB_DATA_TYPE_NCHAR); + } else { + // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' + int32_t output = 0; + if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(payload), pSchema->bytes - VARSTR_HEADER_SIZE, &output)) { + char buf[512] = {0}; + snprintf(buf, tListLen(buf), "%s", strerror(errno)); + return tscInvalidOperationMsg(msg, buf, pToken->z); + } + + varDataSetLen(payload, output); + } + break; + + case TSDB_DATA_TYPE_TIMESTAMP: { + if (pToken->type == TK_NULL) { + if (primaryKey) { + *((int64_t *)payload) = 0; + } else { + *((int64_t *)payload) = TSDB_DATA_BIGINT_NULL; + } + } else { + int64_t temp; + if (tsParseTime(pToken, &temp, str, msg, timePrec) != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z); + } + + *((int64_t *)payload) = temp; + } + + break; + } + } + + return TSDB_CODE_SUCCESS; +} + +static int tsParseOneRowOld(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, + char *tmpTokenBuf, SInsertStatementParam *pInsertParam) { + int32_t index = 0; + SStrToken sToken = {0}; + char * payload = pDataBlocks->pData + pDataBlocks->size; + + SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo; + SSchema * schema = tscGetTableSchema(pDataBlocks->pTableMeta); + + // 1. set the parsed value from sql string + int32_t rowSize = 0; + for (int i = 0; i < spd->numOfBound; ++i) { + // the start position in data block buffer of current value in sql + int32_t colIndex = spd->boundedColumns[i]; + + char * start = payload + spd->cols[colIndex].offset; + SSchema *pSchema = &schema[colIndex]; + rowSize += pSchema->bytes; + + index = 0; + sToken = tStrGetToken(*str, &index, true); + *str += index; + + if (sToken.type == TK_QUESTION) { + if (pInsertParam->insertType != TSDB_QUERY_TYPE_STMT_INSERT) { + return tscSQLSyntaxErrMsg(pInsertParam->msg, "? only allowed in binding insertion", *str); + } + + uint32_t offset = (uint32_t)(start - pDataBlocks->pData); + if (tscAddParamToDataBlock(pDataBlocks, pSchema->type, (uint8_t)timePrec, pSchema->bytes, offset) != NULL) { + continue; + } + + strcpy(pInsertParam->msg, "client out of memory"); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + int16_t type = sToken.type; + if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL && + type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || + (sToken.n == 0) || (type == TK_RP)) { + return tscSQLSyntaxErrMsg(pInsertParam->msg, "invalid data or symbol", sToken.z); + } + + // Remove quotation marks + if (TK_STRING == sToken.type) { + // delete escape character: \\, \', \" + char delim = sToken.z[0]; + + int32_t cnt = 0; + int32_t j = 0; + if (sToken.n >= TSDB_MAX_BYTES_PER_ROW) { + return tscSQLSyntaxErrMsg(pInsertParam->msg, "too long string", sToken.z); + } + + for (uint32_t k = 1; k < sToken.n - 1; ++k) { + if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) { + tmpTokenBuf[j] = sToken.z[k + 1]; + + cnt++; + j++; + k++; + continue; + } + + tmpTokenBuf[j] = sToken.z[k]; + j++; + } + + tmpTokenBuf[j] = 0; + sToken.z = tmpTokenBuf; + sToken.n -= 2 + cnt; + } + + bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); + int32_t ret = tsParseOneColumnOld(pSchema, &sToken, start, pInsertParam->msg, str, isPrimaryKey, timePrec); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) { + tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z); + return TSDB_CODE_TSC_INVALID_TIME_STAMP; + } + } + + // 2. set the null value for the columns that do not assign values + if (spd->numOfBound < spd->numOfCols) { + char *ptr = payload; + + for (int32_t i = 0; i < spd->numOfCols; ++i) { + if (spd->cols[i].valStat == VAL_STAT_NONE) { // current column do not have any value to insert, set it to null + if (schema[i].type == TSDB_DATA_TYPE_BINARY) { + varDataSetLen(ptr, sizeof(int8_t)); + *(uint8_t *)varDataVal(ptr) = TSDB_DATA_BINARY_NULL; + } else if (schema[i].type == TSDB_DATA_TYPE_NCHAR) { + varDataSetLen(ptr, sizeof(int32_t)); + *(uint32_t *)varDataVal(ptr) = TSDB_DATA_NCHAR_NULL; + } else { + setNull(ptr, schema[i].type, schema[i].bytes); + } + } + + ptr += schema[i].bytes; + } + + rowSize = (int32_t)(ptr - payload); + } + + *len = rowSize; + return TSDB_CODE_SUCCESS; +} + int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, char *tmpTokenBuf, SInsertStatementParam *pInsertParam) { int32_t index = 0; @@ -460,7 +823,7 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i STableMeta * pTableMeta = pDataBlocks->pTableMeta; SSchema * schema = tscGetTableSchema(pTableMeta); SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder; - int32_t dataLen = pBuilder->dataRowInitLen; + int32_t dataLen = spd->allNullLen + TD_MEM_ROW_DATA_HEAD_SIZE; int32_t kvLen = pBuilder->kvRowInitLen; bool isParseBindParam = false; @@ -1698,6 +2061,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow STableComInfo tinfo = tscGetTableInfo(pTableMeta); SInsertStatementParam* pInsertParam = &pCmd->insertParam; + pInsertParam->payloadType = PAYLOAD_TYPE_RAW; destroyTableNameList(pInsertParam); pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks); @@ -1726,12 +2090,6 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow goto _error; } - if (TSDB_CODE_SUCCESS != - (ret = initMemRowBuilder(&pTableDataBlock->rowBuilder, 0, tinfo.numOfColumns, pTableDataBlock->numOfParams, - pTableDataBlock->boundColumnInfo.allNullLen))) { - goto _error; - } - while ((readLen = tgetline(&line, &n, fp)) != -1) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { line[--readLen] = 0; @@ -1745,7 +2103,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow strtolower(line, line); int32_t len = 0; - code = tsParseOneRow(&lineptr, pTableDataBlock, tinfo.precision, &len, tokenBuf, pInsertParam); + code = tsParseOneRowOld(&lineptr, pTableDataBlock, tinfo.precision, &len, tokenBuf, pInsertParam); if (code != TSDB_CODE_SUCCESS || pTableDataBlock->numOfParams > 0) { pSql->res.code = code; break; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 612d6a5642..f3e30172ab 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1744,6 +1744,7 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff dataBuf->tsSource = -1; dataBuf->vgId = dataBuf->pTableMeta->vgId; + tNameAssign(&dataBuf->tableName, name); assert(defaultSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL); From ffcc93016a6a46fb21001e213d5ac26e10172afc Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 18 Aug 2021 16:12:36 +0800 Subject: [PATCH 153/239] Hotfix/sangshuduo/td 5136 taosdemo rework for master (#7433) * cherry pick from develop branch. * [TD-5136]: taosdemo simulate real senario. * update test case according to taosdemo change * adjust range of semi-random data. * make demo mode use different tag name and value. * change malloc to calloc for pid allocation. * fix typo. * fix binary length default value and group id logic. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 18f5877e09..016e27dc13 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -626,7 +626,7 @@ SArguments g_args = { "INT", // datatype "FLOAT", // datatype. DEFAULT_DATATYPE_NUM is 3 }, - 16, // len_of_binary + 64, // len_of_binary 4, // num_of_CPR 10, // num_of_connections/thread 0, // insert_interval @@ -2598,7 +2598,7 @@ static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) { if ((g_args.demo_mode) && (i == 0)) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64",", tableSeq % 10); + "%"PRId64",", (tableSeq % 10) + 1); } else { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, From 53b398c08d904f3c97dd1790747782af08185135 Mon Sep 17 00:00:00 2001 From: fastjrun Date: Wed, 18 Aug 2021 16:19:25 +0800 Subject: [PATCH 154/239] =?UTF-8?q?=E6=96=87=E6=A1=A3=E8=BF=87=E6=97=B6?= =?UTF-8?q?=EF=BC=9A1=E3=80=81tdsdemo=E4=B8=AD=E5=88=9D=E5=A7=8B=E5=8C=96?= =?UTF-8?q?=E7=9A=84=E8=A1=A8=E5=90=8D=E5=89=8D=E7=BC=80=E5=B7=B2=E7=BB=8F?= =?UTF-8?q?=E7=94=B1t=E6=94=B9=E4=B8=BAd=EF=BC=8C2=E3=80=81=E7=9B=B8?= =?UTF-8?q?=E5=85=B3=E8=A1=A8=E5=AD=97=E6=AE=B5=E4=B9=9F=E6=9B=B4=E6=96=B0?= =?UTF-8?q?=EF=BC=9B3=E3=80=81=E7=9B=AE=E5=89=8D=E9=95=9C=E5=83=8F?= =?UTF-8?q?=E5=B7=B2=E7=BB=8F=E6=94=AF=E6=8C=81=E5=A4=9A=E5=B9=B3=E5=8F=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cn/02.getting-started/01.docker/docs.md | 170 +++++++++++------- 1 file changed, 101 insertions(+), 69 deletions(-) diff --git a/documentation20/cn/02.getting-started/01.docker/docs.md b/documentation20/cn/02.getting-started/01.docker/docs.md index 30803d9777..4ac6d96ec1 100644 --- a/documentation20/cn/02.getting-started/01.docker/docs.md +++ b/documentation20/cn/02.getting-started/01.docker/docs.md @@ -1,6 +1,6 @@ # 通过 Docker 快速体验 TDengine -虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。 +虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。另外,从2.0.14.0版本开始,TDengine提供的镜像已经可以同时支持X86-64、X86、arm64、arm32平台,像NAS、树莓派、嵌入式开发板之类可以运行docker的非主流计算机也可以基于本文档轻松体验TDengine。 下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。 @@ -12,7 +12,7 @@ Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.c ```bash $ docker -v -Docker version 20.10.5, build 55c4c88 +Docker version 20.10.3, build 48d30b5 ``` ## 在 Docker 容器中运行 TDengine @@ -20,21 +20,22 @@ Docker version 20.10.5, build 55c4c88 1,使用命令拉取 TDengine 镜像,并使它在后台运行。 ```bash -$ docker run -d tdengine/tdengine -cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316 +$ docker run -d --name tdengine tdengine/tdengine +7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292 ``` -- **docker run**:通过 Docker 运行一个容器。 -- **-d**:让容器在后台运行。 -- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像。 -- **cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316**:这个返回的长字符是容器 ID,我们可以通过容器 ID 来查看对应的容器。 +- **docker run**:通过 Docker 运行一个容器 +- **--name tdengine**:设置容器名称,我们可以通过容器名称来查看对应的容器 +- **-d**:让容器在后台运行 +- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像 +- **7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292**:这个返回的长字符是容器 ID,我们也可以通过容器 ID 来查看对应的容器 2,确认容器是否已经正确运行。 ```bash $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS ··· -cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ··· +c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ··· ``` - **docker ps**:列出所有正在运行状态的容器信息。 @@ -47,25 +48,25 @@ cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes · 3,进入 Docker 容器内,使用 TDengine。 ```bash -$ docker exec -it cdf548465318 /bin/bash -root@cdf548465318:~/TDengine-server-2.0.13.0# +$ docker exec -it tdengine /bin/bash +root@c452519b0f9b:~/TDengine-server-2.0.20.13# ``` - **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。 - **-i**:进入交互模式。 - **-t**:指定一个终端。 -- **cdf548465318**:容器 ID,需要根据 docker ps 指令返回的值进行修改。 +- **c452519b0f9b**:容器 ID,需要根据 docker ps 指令返回的值进行修改。 - **/bin/bash**:载入容器后运行 bash 来进行交互。 4,进入容器后,执行 taos shell 客户端程序。 ```bash -$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos +$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos -Welcome to the TDengine shell from Linux, Client Version:2.0.13.0 +Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. -taos> +taos> ``` TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。 @@ -78,45 +79,74 @@ TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息 ```bash $ taos> q -root@cdf548465318:~/TDengine-server-2.0.13.0# +root@c452519b0f9b:~/TDengine-server-2.0.20.13# ``` 2,在命令行界面执行 taosdemo。 ```bash -$ root@cdf548465318:~/TDengine-server-2.0.13.0# taosdemo -################################################################### -# Server IP: localhost:0 -# User: root -# Password: taosdata -# Use metric: true -# Datatype of Columns: int int int int int int int float -# Binary Length(If applicable): -1 -# Number of Columns per record: 3 -# Number of Threads: 10 -# Number of Tables: 10000 -# Number of Data per Table: 100000 -# Records/Request: 1000 -# Database name: test -# Table prefix: t -# Delete method: 0 -# Test time: 2021-04-13 02:05:20 -################################################################### +root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo + +taosdemo is simulating data generated by power equipments monitoring... + +host: 127.0.0.1:6030 +user: root +password: taosdata +configDir: +resultFile: ./output.txt +thread num of insert data: 10 +thread num of create table: 10 +top insert interval: 0 +number of records per req: 30000 +max sql length: 1048576 +database count: 1 +database[0]: + database[0] name: test + drop: yes + replica: 1 + precision: ms + super table count: 1 + super table[0]: + stbName: meters + autoCreateTable: no + childTblExists: no + childTblCount: 10000 + childTblPrefix: d + dataSource: rand + iface: taosc + insertRows: 10000 + interlaceRows: 0 + disorderRange: 1000 + disorderRatio: 0 + maxSqlLen: 1048576 + timeStampStep: 1 + startTimestamp: 2017-07-14 10:40:00.000 + sampleFormat: + sampleFile: + tagsFile: + columnCount: 3 +column[0]:FLOAT column[1]:INT column[2]:FLOAT + tagCount: 2 + tag[0]:INT tag[1]:BINARY(16) + + Press enter key to continue or Ctrl-C to stop ``` -回车后,该命令将新建一个数据库 test,并且自动创建一张超级表 meters,并以超级表 meters 为模版创建了 1 万张表,表名从 "t0" 到 "t9999"。每张表有 10 万条记录,每条记录有 f1,f2,f3 三个字段,时间戳 ts 字段从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:41:39 999"。每张表带有 areaid 和 loc 两个标签 TAG,areaid 被设置为 1 到 10,loc 被设置为 "beijing" 或 "shanghai"。 +回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。 + +执行这条命令大概需要几分钟,最后共插入 1 亿条记录。 3,进入 TDengine 终端,查看 taosdemo 生成的数据。 - **进入命令行。** ```bash -$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos +$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos -Welcome to the TDengine shell from Linux, Client Version:2.0.13.0 +Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. -taos> +taos> ``` - **查看数据库。** @@ -124,8 +154,8 @@ taos> ```bash $ taos> show databases; name | created_time | ntables | vgroups | ··· - test | 2021-04-13 02:14:15.950 | 10000 | 6 | ··· - log | 2021-04-12 09:36:37.549 | 4 | 1 | ··· + test | 2021-08-18 06:01:11.021 | 10000 | 6 | ··· + log | 2021-08-18 05:51:51.065 | 4 | 1 | ··· ``` @@ -136,10 +166,10 @@ $ taos> use test; Database changed. $ taos> show stables; - name | created_time | columns | tags | tables | -===================================================================================== - meters | 2021-04-13 02:14:15.955 | 4 | 2 | 10000 | -Query OK, 1 row(s) in set (0.001737s) + name | created_time | columns | tags | tables | +============================================================================================ + meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 | +Query OK, 1 row(s) in set (0.003259s) ``` @@ -147,42 +177,45 @@ Query OK, 1 row(s) in set (0.001737s) ```bash $ taos> select * from test.t0 limit 10; - ts | f1 | f2 | f3 | -==================================================================== - 2017-07-14 02:40:01.000 | 3 | 9 | 0 | - 2017-07-14 02:40:02.000 | 0 | 1 | 2 | - 2017-07-14 02:40:03.000 | 7 | 2 | 3 | - 2017-07-14 02:40:04.000 | 9 | 4 | 5 | - 2017-07-14 02:40:05.000 | 1 | 2 | 5 | - 2017-07-14 02:40:06.000 | 6 | 3 | 2 | - 2017-07-14 02:40:07.000 | 4 | 7 | 8 | - 2017-07-14 02:40:08.000 | 4 | 6 | 6 | - 2017-07-14 02:40:09.000 | 5 | 7 | 7 | - 2017-07-14 02:40:10.000 | 1 | 5 | 0 | -Query OK, 10 row(s) in set (0.003638s) + +DB error: Table does not exist (0.002857s) +taos> select * from test.d0 limit 10; + ts | current | voltage | phase | +====================================================================================== + 2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 | + 2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 | + 2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 | + 2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 | + 2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 | + 2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 | + 2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 | + 2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 | + 2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 | + 2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 | +Query OK, 10 row(s) in set (0.016791s) ``` -- **查看 t0 表的标签值。** +- **查看 d0 表的标签值。** ```bash -$ taos> select areaid, loc from test.t0; - areaid | loc | -=========================== - 10 | shanghai | -Query OK, 1 row(s) in set (0.002904s) +$ taos> select groupid, location from test.d0; + groupid | location | +================================= + 0 | shanghai | +Query OK, 1 row(s) in set (0.003490s) ``` ## 停止正在 Docker 中运行的 TDengine 服务 ```bash -$ docker stop cdf548465318 -cdf548465318 +$ docker stop tdengine +tdengine ``` - **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。 -- **cdf548465318**:容器 ID,根据 docker ps 指令返回的结果进行修改。 +- **tdengine**:容器名称。 ## 编程开发时连接在 Docker 中的 TDengine @@ -195,7 +228,7 @@ $ docker run -d -v /etc/taos:/etc/taos -p 6041:6041 tdengine/tdengine 526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd $ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql -{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0} +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} ``` - 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。 @@ -206,6 +239,5 @@ $ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql 2,直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。 ```bash -$ docker exec -it 526aa188da /bin/bash +$ docker exec -it tdengine /bin/bash ``` - From 16fe1e27bf5f5e62ab98527d36819e3818bcd102 Mon Sep 17 00:00:00 2001 From: wpan Date: Wed, 18 Aug 2021 16:45:59 +0800 Subject: [PATCH 155/239] fix nested query bug --- src/client/src/tscSubquery.c | 13 +++++++++++-- src/client/src/tscUtil.c | 33 ++++++++++++++------------------- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index d16d744393..816347cecd 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2717,10 +2717,19 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { int32_t code = pParentSql->res.code; - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + SSqlObj *userSql = NULL; + if (pParentSql->param) { + userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + } + + if (userSql == NULL) { + userSql = pParentSql; + } if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) { - tscFreeRetrieveSup(pParentSql); + if (userSql != pParentSql) { + tscFreeRetrieveSup(pParentSql); + } tscFreeSubobj(userSql); tfree(userSql->pSubs); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 06b61d443d..2ee6a85431 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3747,8 +3747,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { int32_t index = ps->subqueryIndex; bool ret = subAndCheckDone(pSql, pParentSql, index); - tfree(ps); - pSql->param = NULL; + tscFreeRetrieveSup(pSql); if (!ret) { tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index); @@ -3758,41 +3757,37 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { // todo refactor tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self); - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - - tscFreeRetrieveSup(pParentSql); - - if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry)) { + if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry)) { tscAsyncResultOnError(pParentSql); return; } - tscFreeSubobj(userSql); - tfree(userSql->pSubs); + tscFreeSubobj(pParentSql); + tfree(pParentSql->pSubs); - userSql->res.code = TSDB_CODE_SUCCESS; - userSql->retry++; + pParentSql->res.code = TSDB_CODE_SUCCESS; + pParentSql->retry++; - tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self, - tstrerror(code), userSql->retry); + tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, + tstrerror(code), pParentSql->retry); - tscResetSqlCmd(&userSql->cmd, true); + tscResetSqlCmd(&pParentSql->cmd, true); - code = tsParseSql(userSql, true); + code = tsParseSql(pParentSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; } if (code != TSDB_CODE_SUCCESS) { - userSql->res.code = code; - tscAsyncResultOnError(userSql); + pParentSql->res.code = code; + tscAsyncResultOnError(pParentSql); return; } - SQueryInfo *pQueryInfo = tscGetQueryInfo(&userSql->cmd); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pParentSql->cmd); - executeQuery(userSql, pQueryInfo); + executeQuery(pParentSql, pQueryInfo); return; } From 6dde4a6b36a78f71bbfa2308918be517d4bc31b5 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Wed, 18 Aug 2021 16:59:25 +0800 Subject: [PATCH 156/239] [TD-2639] : update parameter BLOCKS value in example to be 6. --- documentation20/cn/04.model/docs.md | 2 +- documentation20/cn/11.administrator/docs.md | 2 +- documentation20/en/04.model/docs.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation20/cn/04.model/docs.md b/documentation20/cn/04.model/docs.md index af4f85b5eb..e44de69024 100644 --- a/documentation20/cn/04.model/docs.md +++ b/documentation20/cn/04.model/docs.md @@ -11,7 +11,7 @@ TDengine采用关系型数据模型,需要建库、建表。因此对于一个 不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如: ```mysql -CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1; +CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1; ``` 上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4,允许更新数据。详细的语法及参数请见 [TAOS SQL 的数据管理](https://www.taosdata.com/cn/documentation/taos-sql#management) 章节。 diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index a5a916d346..ad4e17b7f0 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -230,7 +230,7 @@ taosd -C | 1 | days | 天 | 一个数据文件存储数据的时间跨度 | | 10 | | 2 | keep | 天 | (可通过 alter database 修改)数据库中数据保留的天数。 | 3650 | | 3 | cache | MB | 内存块的大小 | | 16 | -| 4 | blocks | | (可通过 alter database 修改)每个 VNODE(TSDB)中有多少个 cache 大小的内存块。因此一个 VNODE 使用的内存大小粗略为(cache * blocks)。 | | 4 | +| 4 | blocks | | (可通过 alter database 修改)每个 VNODE(TSDB)中有多少个 cache 大小的内存块。因此一个 VNODE 使用的内存大小粗略为(cache * blocks)。 | | 6 | | 5 | quorum | | (可通过 alter database 修改)多副本环境下指令执行的确认数要求 | 1-2 | 1 | | 6 | minRows | | 文件块中记录的最小条数 | | 100 | | 7 | maxRows | | 文件块中记录的最大条数 | | 4096 | diff --git a/documentation20/en/04.model/docs.md b/documentation20/en/04.model/docs.md index 2e69054fa1..1bf6c67878 100644 --- a/documentation20/en/04.model/docs.md +++ b/documentation20/en/04.model/docs.md @@ -9,7 +9,7 @@ Please watch the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1945. Different types of data collection points often have different data characteristics, including frequency of data collecting, length of data retention time, number of replicas, size of data blocks, whether to update data or not, and so on. To ensure TDengine working with great efficiency in various scenarios, TDengine suggests creating tables with different data characteristics in different databases, because each database can be configured with different storage strategies. When creating a database, in addition to SQL standard options, the application can also specify a variety of parameters such as retention duration, number of replicas, number of memory blocks, time accuracy, max and min number of records in a file block, whether it is compressed or not, and number of days a data file will be overwritten. For example: ```mysql -CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1; +CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1; ``` The above statement will create a database named “power”. The data of this database will be kept for 365 days (it will be automatically deleted 365 days later), one data file created per 10 days, and the number of memory blocks is 4 for data updating. For detailed syntax and parameters, please refer to [Data Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#management). From 34f4f8ca012eb9ee712f33a71b3f40262ede5f7e Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 18 Aug 2021 17:31:20 +0800 Subject: [PATCH 157/239] [TD-6011]: where clause including 'bool' Keyword causes core dump --- src/common/src/tvariant.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c index a491df6f98..ca3bb956a2 100644 --- a/src/common/src/tvariant.c +++ b/src/common/src/tvariant.c @@ -38,12 +38,12 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) { switch (token->type) { case TSDB_DATA_TYPE_BOOL: { - int32_t k = strncasecmp(token->z, "true", 4); - if (k == 0) { + if (strncasecmp(token->z, "true", 4) == 0) { pVar->i64 = TSDB_TRUE; - } else { - assert(strncasecmp(token->z, "false", 5) == 0); + } else if (strncasecmp(token->z, "false", 5) == 0) { pVar->i64 = TSDB_FALSE; + } else { + return; } break; From 4286f85718684d4e260615316ced9f49fe9caab4 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 18 Aug 2021 18:12:53 +0800 Subject: [PATCH 158/239] [TD-6165]: use memcpy to replace wcsncmp for nchar type comparision --- src/util/src/tcompare.c | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 1315f684bc..36480418c9 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -199,16 +199,7 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { if (len1 != len2) { return len1 > len2? 1:-1; } else { - char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - memcpy(pLeftTerm, varDataVal(pLeft), len1); - memcpy(pRightTerm, varDataVal(pRight), len2); - - int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE); - - tfree(pLeftTerm); - tfree(pRightTerm); - + int32_t ret = memcmp((wchar_t*) pLeft, (wchar_t*) pRight, len1); if (ret == 0) { return 0; } else { @@ -517,17 +508,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { if (t1->len != t2->len) { return t1->len > t2->len? 1:-1; } - - char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char)); - char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char)); - memcpy(t1_term, t1->data, t1->len); - memcpy(t2_term, t2->data, t2->len); - - int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE); - - tfree(t1_term); - tfree(t2_term); - + int32_t ret = memcmp((wchar_t*) t1, (wchar_t*) t2, t2->len); if (ret == 0) { return ret; } From e41ae9090835b2db841bebac824333f2cc858534 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Wed, 18 Aug 2021 19:23:42 +0800 Subject: [PATCH 159/239] [TD-4181] : fix some minor format things. --- documentation20/cn/02.getting-started/docs.md | 6 +-- documentation20/cn/03.architecture/docs.md | 33 ++++++------ documentation20/cn/04.model/docs.md | 2 +- documentation20/cn/05.insert/docs.md | 32 ++++++------ .../cn/07.advanced-features/docs.md | 4 +- .../cn/08.connector/01.java/docs.md | 19 ++----- documentation20/cn/08.connector/docs.md | 52 +++++++++---------- documentation20/cn/10.cluster/docs.md | 4 +- documentation20/cn/11.administrator/docs.md | 12 ++--- documentation20/cn/12.taos-sql/docs.md | 34 ++++-------- documentation20/cn/13.faq/docs.md | 18 +++---- 11 files changed, 91 insertions(+), 125 deletions(-) diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index f376d1b30c..dd7c20fe18 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -105,7 +105,7 @@ $ taos -h h1.taos.com -s "use db; show tables;" **运行 SQL 命令脚本** -TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本. +TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本。 ```mysql taos> source ; @@ -166,14 +166,12 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); **Note:** taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。 - ## 客户端和报警模块 如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux 和 Windows 安装包可以在 [这里](https://www.taosdata.com/cn/getting-started/#客户端) 下载。 报警模块的 Linux 和 Windows 安装包请在 [所有下载链接](https://www.taosdata.com/cn/all-downloads/) 页面搜索“TDengine Alert Linux”章节或“TDengine Alert Windows”章节进行下载。使用方法请参考 [报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)。 - ## 支持平台列表 ### TDengine 服务器支持的平台列表 @@ -193,8 +191,6 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); 注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 - - ### TDengine 客户端和连接器支持的平台列表 目前 TDengine 的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32 等开发环境。 diff --git a/documentation20/cn/03.architecture/docs.md b/documentation20/cn/03.architecture/docs.md index b53938dbec..8adafc73c2 100644 --- a/documentation20/cn/03.architecture/docs.md +++ b/documentation20/cn/03.architecture/docs.md @@ -161,17 +161,17 @@ TDengine 分布式架构的逻辑结构图如下: 一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine应用驱动(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过taosc的API与TDengine集群进行互动。下面对每个逻辑单元进行简要介绍。 -**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文[《一篇文章说清楚TDengine的FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。 +**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文[《一篇文章说清楚TDengine的FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。 -**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(vnode),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP)决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。 +**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(vnode),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP)决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。 -**虚拟节点(vnode)**: 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中V2, V3, V4等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个DB,但一个DB可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的schema、标签值等。一个虚拟节点由所属的数据节点的EP,以及所属的VGroup ID在系统内唯一标识,由管理节点创建并管理。 +**虚拟节点(vnode):** 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中V2, V3, V4等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个DB,但一个DB可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的schema、标签值等。一个虚拟节点由所属的数据节点的EP,以及所属的VGroup ID在系统内唯一标识,由管理节点创建并管理。 -**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(开源版最多不超过3个) mnode,它们自动构建成为一个虚拟管理节点组(图中M0, M1, M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步, 任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个dnode上至多有一个mnode,由所属的数据节点的EP来唯一标识。每个dnode通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的EP。 +**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(开源版最多不超过3个) mnode,它们自动构建成为一个虚拟管理节点组(图中M0, M1, M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步, 任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个dnode上至多有一个mnode,由所属的数据节点的EP来唯一标识。每个dnode通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的EP。 -**虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vnode group)来保证系统的高可靠。虚拟节点组内采取master/slave的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个DB的副本数为N,系统必须有至少N个数据节点。副本数在创建DB时通过参数 replica 可以指定,缺省为1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的ID,VGroup ID。如果两个虚拟节点的vnode group ID相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID是永远不变的,即使一个虚拟节点组被删除,它的ID也不会被收回重复利用。 +**虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vnode group)来保证系统的高可靠。虚拟节点组内采取master/slave的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个DB的副本数为N,系统必须有至少N个数据节点。副本数在创建DB时通过参数 replica 可以指定,缺省为1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的ID,VGroup ID。如果两个虚拟节点的vnode group ID相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID是永远不变的,即使一个虚拟节点组被删除,它的ID也不会被收回重复利用。 -**TAOSC:** taosc是TDengine给应用提供的驱动程序(driver),负责处理应用与集群的接口交互,提供C/C++语言原生接口,内嵌于JDBC、C#、Python、Go、Node.js语言连接库里。应用都是通过taosc而不是直接连接集群中的数据节点与整个集群进行交互的。这个模块负责获取并缓存元数据;将插入、查询等请求转发到正确的数据节点;在把结果返回给应用时,还需要负责最后一级的聚合、排序、过滤等操作。对于JDBC, C/C++/C#/Python/Go/Node.js接口而言,这个模块是在应用所处的物理节点上运行。同时,为支持全分布式的RESTful接口,taosc在TDengine集群的每个dnode上都有一运行实例。 +**TAOSC:** taosc是TDengine给应用提供的驱动程序(driver),负责处理应用与集群的接口交互,提供C/C++语言原生接口,内嵌于JDBC、C#、Python、Go、Node.js语言连接库里。应用都是通过taosc而不是直接连接集群中的数据节点与整个集群进行交互的。这个模块负责获取并缓存元数据;将插入、查询等请求转发到正确的数据节点;在把结果返回给应用时,还需要负责最后一级的聚合、排序、过滤等操作。对于JDBC、C/C++、C#、Python、Go、Node.js接口而言,这个模块是在应用所处的物理节点上运行。同时,为支持全分布式的RESTful接口,taosc在TDengine集群的每个dnode上都有一运行实例。 ### 节点之间的通讯 @@ -181,11 +181,9 @@ TDengine 分布式架构的逻辑结构图如下: **端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。 -集群内数据节点之间的数据复制操作占用一个TCP端口,是serverPort+10。 - -集群数据节点对外提供RESTful服务占用一个TCP端口,是serverPort+11。 - -集群内数据节点与Arbitrator节点之间通讯占用一个TCP端口,是serverPort+12。 +- 集群内数据节点之间的数据复制操作占用一个TCP端口,是serverPort+10。 +- 集群数据节点对外提供RESTful服务占用一个TCP端口,是serverPort+11。 +- 集群内数据节点与Arbitrator节点之间通讯占用一个TCP端口,是serverPort+12。 因此一个数据节点总的端口范围为serverPort到serverPort+12,总共13个TCP/UDP端口。使用时,需要确保防火墙将这些端口打开。每个数据节点可以配置不同的serverPort。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)) @@ -193,11 +191,9 @@ TDengine 分布式架构的逻辑结构图如下: **集群内部通讯:**各个数据节点之间通过TCP/UDP进行连接。一个数据节点启动时,将获取mnode所在的dnode的EP信息,然后与系统中的mnode建立起连接,交换信息。获取mnode的EP信息有三步: -1:检查mnodeEpSet.json文件是否存在,如果不存在或不能正常打开获得mnode EP信息,进入第二步; - -2:检查系统配置文件taos.cfg,获取节点配置参数firstEp、secondEp(这两个参数指定的节点可以是不带mnode的普通节点,这样的话,节点被连接时会尝试重定向到mnode节点),如果不存在或者taos.cfg里没有这两个配置参数,或无效,进入第三步; - -3:将自己的EP设为mnode EP,并独立运行起来。 +1. 检查mnodeEpSet.json文件是否存在,如果不存在或不能正常打开获得mnode EP信息,进入第二步; +2. 检查系统配置文件taos.cfg,获取节点配置参数firstEp、secondEp(这两个参数指定的节点可以是不带mnode的普通节点,这样的话,节点被连接时会尝试重定向到mnode节点),如果不存在或者taos.cfg里没有这两个配置参数,或无效,进入第三步; +3. 将自己的EP设为mnode EP,并独立运行起来。 获取mnode EP列表后,数据节点发起连接,如果连接成功,则成功加入进工作的集群,如果不成功,则尝试mnode EP列表中的下一个。如果都尝试了,但连接都仍然失败,则休眠几秒后,再进行尝试。 @@ -271,6 +267,7 @@ TDengine除vnode分片之外,还对时序数据按照时间段进行分区。 当新的数据节点被添加进集群,因为新的计算和存储被添加进来,系统也将自动启动负载均衡流程。 负载均衡过程无需任何人工干预,应用也无需重启,将自动连接新的节点,完全透明。 + **提示:负载均衡由参数balance控制,决定开启/关闭自动负载均衡。** ## 数据写入与复制流程 @@ -293,13 +290,13 @@ Master Vnode遵循下面的写入流程: ### Slave Vnode写入流程 -对于slave vnode, 写入流程是: +对于slave vnode,写入流程是: ![TDengine Slave写入流程](page://images/architecture/write_slave.png)
图 4 TDengine Slave写入流程
1. slave vnode收到Master vnode转发了的数据插入请求。检查last version是否与master一致,如果一致,进入下一步。如果不一致,需要进入同步状态。 -2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失; +2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失。 3. 写入内存,更新内存中的skip list。 与master vnode相比,slave vnode不存在转发环节,也不存在回复确认环节,少了两步。但写内存与WAL是完全一样的。 diff --git a/documentation20/cn/04.model/docs.md b/documentation20/cn/04.model/docs.md index e44de69024..ccdca64c10 100644 --- a/documentation20/cn/04.model/docs.md +++ b/documentation20/cn/04.model/docs.md @@ -65,7 +65,7 @@ TDengine建议将数据采集点的全局唯一ID作为表名(比如设备序列 INSERT INTO d1001 USING meters TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); ``` -上述SQL语句将记录 (now, 10.2, 219, 0.32) 插入表d1001。如果表d1001还未创建,则使用超级表meters做模板自动创建,同时打上标签值“Beijing.Chaoyang", 2。 +上述SQL语句将记录 (now, 10.2, 219, 0.32) 插入表d1001。如果表d1001还未创建,则使用超级表meters做模板自动创建,同时打上标签值 `“Beijing.Chaoyang", 2`。 关于自动建表的详细语法请参见 [插入记录时自动建表](https://www.taosdata.com/cn/documentation/taos-sql#auto_create_table) 章节。 diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md index b20b1e111d..f055b0c25b 100644 --- a/documentation20/cn/05.insert/docs.md +++ b/documentation20/cn/05.insert/docs.md @@ -35,7 +35,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: - Linux操作系统的服务器 -- 安装好Golang, 1.10版本以上 +- 安装好Golang,1.10版本以上 - 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在Bailongma所在的Linux服务器(可以与TDengine在同一台服务器,或者不同服务器) Bailongma项目中有一个文件夹blm_prometheus,存放了prometheus的写入API程序。编译过程如下: @@ -48,13 +48,15 @@ go build ### 安装Prometheus -通过Prometheus的官网下载安装。[下载地址](https://prometheus.io/download/) +通过Prometheus的官网下载安装。具体请见:[下载地址](https://prometheus.io/download/)。 ### 配置Prometheus -参考Prometheus的[配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/),在Prometheus的配置文件中的部分,增加以下配置 +参考Prometheus的[配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/),在Prometheus的配置文件中的部分,增加以下配置: -- url: bailongma API服务提供的URL,参考下面的blm_prometheus启动示例章节 +``` + - url: "bailongma API服务提供的URL"(参考下面的blm_prometheus启动示例章节) +``` 启动Prometheus后,可以通过taos客户端查询确认数据是否成功写入。 @@ -62,7 +64,7 @@ go build blm_prometheus程序有以下选项,在启动blm_prometheus程序时可以通过设定这些选项来设定blm_prometheus的配置。 ```bash --tdengine-name -如果TDengine安装在一台具备域名的服务器上,也可以通过配置TDengine的域名来访问TDengine。在K8S环境下,可以配置成TDengine所运行的service name +如果TDengine安装在一台具备域名的服务器上,也可以通过配置TDengine的域名来访问TDengine。在K8S环境下,可以配置成TDengine所运行的service name。 --batch-size blm_prometheus会将收到的prometheus的数据拼装成TDengine的写入请求,这个参数控制一次发给TDengine的写入请求中携带的数据条数。 @@ -71,10 +73,10 @@ blm_prometheus会将收到的prometheus的数据拼装成TDengine的写入请求 设置在TDengine中创建的数据库名称,blm_prometheus会自动在TDengine中创建一个以dbname为名称的数据库,缺省值是prometheus。 --dbuser -设置访问TDengine的用户名,缺省值是'root' +设置访问TDengine的用户名,缺省值是'root'。 --dbpassword -设置访问TDengine的密码,缺省值是'taosdata' +设置访问TDengine的密码,缺省值是'taosdata'。 --port blm_prometheus对prometheus提供服务的端口号。 @@ -125,7 +127,7 @@ select * from apiserver_request_latencies_bucket; 用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: - Linux操作系统的服务器 -- 安装好Golang, 1.10版本以上 +- 安装好Golang,1.10版本以上 - 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在Bailongma所在的Linux服务器(可以与TDengine在同一台服务器,或者不同服务器) Bailongma项目中有一个文件夹blm_telegraf,存放了Telegraf的写入API程序。编译过程如下: @@ -139,7 +141,7 @@ go build ### 安装Telegraf -目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统,到Telegraf官网下载安装包,并执行安装。下载地址如下:https://portal.influxdata.com/downloads +目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统,到Telegraf官网下载安装包,并执行安装。下载地址如下:https://portal.influxdata.com/downloads 。 ### 配置Telegraf @@ -153,7 +155,7 @@ go build 在agent部分: -- hostname: 区分不同采集设备的机器名称,需确保其唯一性 +- hostname: 区分不同采集设备的机器名称,需确保其唯一性。 - metric_batch_size: 100,允许Telegraf每批次写入记录最大数量,增大其数量可以降低Telegraf的请求发送频率。 关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)。 @@ -163,7 +165,7 @@ blm_telegraf程序有以下选项,在启动blm_telegraf程序时可以通过 ```bash --host -TDengine服务端的IP地址,缺省值为空 +TDengine服务端的IP地址,缺省值为空。 --batch-size blm_telegraf会将收到的telegraf的数据拼装成TDengine的写入请求,这个参数控制一次发给TDengine的写入请求中携带的数据条数。 @@ -172,10 +174,10 @@ blm_telegraf会将收到的telegraf的数据拼装成TDengine的写入请求, 设置在TDengine中创建的数据库名称,blm_telegraf会自动在TDengine中创建一个以dbname为名称的数据库,缺省值是prometheus。 --dbuser -设置访问TDengine的用户名,缺省值是'root' +设置访问TDengine的用户名,缺省值是'root'。 --dbpassword -设置访问TDengine的密码,缺省值是'taosdata' +设置访问TDengine的密码,缺省值是'taosdata'。 --port blm_telegraf对telegraf提供服务的端口号。 @@ -183,12 +185,12 @@ blm_telegraf对telegraf提供服务的端口号。 ### 启动示例 -通过以下命令启动一个blm_telegraf的API服务 +通过以下命令启动一个blm_telegraf的API服务: ```bash ./blm_telegraf -host 127.0.0.1 -port 8089 ``` -假设blm_telegraf所在服务器的IP地址为"10.1.2.3",则在telegraf的配置文件中, 在output plugins部分,增加[[outputs.http]]配置项: +假设blm_telegraf所在服务器的IP地址为"10.1.2.3",则在telegraf的配置文件中, 在output plugins部分,增加[[outputs.http]]配置项: ```yaml url = "http://10.1.2.3:8089/telegraf" diff --git a/documentation20/cn/07.advanced-features/docs.md b/documentation20/cn/07.advanced-features/docs.md index 3661d68427..32e7a2aabd 100644 --- a/documentation20/cn/07.advanced-features/docs.md +++ b/documentation20/cn/07.advanced-features/docs.md @@ -35,13 +35,13 @@ select avg(voltage) from meters interval(1m) sliding(30s); select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s); ``` -这样做没有问题,但TDengine提供了更简单的方法,只要在最初的查询语句前面加上 `create table {tableName} as ` 就可以了, 例如: +这样做没有问题,但TDengine提供了更简单的方法,只要在最初的查询语句前面加上 `create table {tableName} as ` 就可以了,例如: ```sql create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s); ``` -会自动创建一个名为 `avg_vol` 的新表,然后每隔30秒,TDengine会增量执行 `as` 后面的 SQL 语句,并将查询结果写入这个表中,用户程序后续只要从 `avg_vol` 中查询数据即可。 例如: +会自动创建一个名为 `avg_vol` 的新表,然后每隔30秒,TDengine会增量执行 `as` 后面的 SQL 语句,并将查询结果写入这个表中,用户程序后续只要从 `avg_vol` 中查询数据即可。例如: ```mysql taos> select * from avg_vol; diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index e1a5654871..ab9af42cf8 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -52,7 +52,6 @@ JDBCConnectorChecker JDBC安装校验源程序及jar包 Springbootdemo springboot示例源程序 SpringJdbcTemplate SpringJDBC模板 - ### 安装验证 @@ -65,7 +64,6 @@ java -jar JDBCConnectorChecker.jar -host 验证通过将打印出成功信息。 - ## Java连接器的使用 `taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。 @@ -85,7 +83,6 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 * 目前不支持嵌套查询(nested query)。 * 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。 - ### JDBC-JNI和JDBC-RESTful的对比 @@ -199,8 +196,6 @@ url中的配置参数如下: * locale:客户端语言环境,默认值系统当前 locale。 * timezone:客户端使用的时区,默认值为系统当前时区。 - - #### 指定URL和Properties获取连接 除了通过指定的 URL 获取连接,还可以使用 Properties 指定建立连接时的参数,如下所示: @@ -229,8 +224,6 @@ properties 中的配置参数如下: * TSDBDriver.PROPERTY_KEY_LOCALE:客户端语言环境,默认值系统当前 locale。 * TSDBDriver.PROPERTY_KEY_TIME_ZONE:客户端使用的时区,默认值为系统当前时区。 - - #### 使用客户端配置文件建立连接 当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。如下所示: @@ -484,8 +477,6 @@ conn.close(); > `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 - - ## 与连接池使用 **HikariCP** @@ -530,7 +521,7 @@ conn.close(); ``` > 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 -> 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP) +> 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP)。 **Druid** @@ -571,9 +562,9 @@ public static void main(String[] args) throws Exception { } ``` -> 更多 druid 使用问题请查看[官方说明](https://github.com/alibaba/druid) +> 更多 druid 使用问题请查看[官方说明](https://github.com/alibaba/druid)。 -**注意事项** +**注意事项:** * TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。 如下所示,`select server_status()` 执行成功会返回 `1`。 @@ -585,15 +576,11 @@ server_status()| Query OK, 1 row(s) in set (0.000141s) ``` - - ## 在框架中使用 * Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) * Springboot + Mybatis 中使用,可参考 [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) - - ## 常见问题 * java.lang.UnsatisfiedLinkError: no taos in java.library.path diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index 0397c61f75..364961ca63 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -58,7 +58,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 ​ *connector*: 各种编程语言连接器(go/grafanaplugin/nodejs/python/JDBC) ​ *examples*: 各种编程语言的示例程序(c/C#/go/JDBC/MATLAB/python/R) -运行install_client.sh进行安装 +运行install_client.sh进行安装。 **4. 配置taos.cfg** @@ -95,9 +95,8 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 **提示:** -**1. 如利用FQDN连接服务器,必须确认本机网络环境DNS已配置好,或在hosts文件中添加FQDN寻址记录,如编辑C:\Windows\system32\drivers\etc\hosts,添加如下的记录:** **192.168.1.99 h1.taos.com** - -**2.卸载:运行unins000.exe可卸载TDengine应用驱动。** +1. **如利用FQDN连接服务器,必须确认本机网络环境DNS已配置好,或在hosts文件中添加FQDN寻址记录,如编辑C:\Windows\system32\drivers\etc\hosts,添加如下的记录:`192.168.1.99 h1.taos.com` ** +2.**卸载:运行unins000.exe可卸载TDengine应用驱动。** ### 安装验证 @@ -408,11 +407,11 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 - `TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *))` 该API用来创建数据流,其中: - * taos:已经建立好的数据库连接 - * sql:SQL查询语句(仅能使用查询语句) + * taos:已经建立好的数据库连接。 + * sql:SQL查询语句(仅能使用查询语句)。 * fp:用户定义的回调函数指针,每次流式计算完成后,TDengine将查询的结果(TAOS_ROW)、查询状态(TAOS_RES)、用户定义参数(PARAM)传递给回调函数,在回调函数内,用户可以使用taos_num_fields获取结果集列数,taos_fetch_fields获取结果集每列数据的类型。 * stime:是流式计算开始的时间。如果是“64位整数最小值”,表示从现在开始;如果不为“64位整数最小值”,表示从指定的时间开始计算(UTC时间从1970/1/1算起的毫秒数)。 - * param:是应用提供的用于回调的一个参数,回调时,提供给应用 + * param:是应用提供的用于回调的一个参数,回调时,提供给应用。 * callback: 第二个回调函数,会在连续查询自动停止时被调用。 返回值为NULL,表示创建失败;返回值不为空,表示成功。 @@ -458,7 +457,6 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 - ## Python Connector Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1963.html) @@ -513,13 +511,12 @@ python -m pip install . - 通过TDengineConnection对象的 .cursor()方法获取一个新的游标对象,这个游标对象必须保证每个线程独享。 -- 通过游标对象的execute()方法,执行写入或查询的SQL语句 +- 通过游标对象的execute()方法,执行写入或查询的SQL语句。 -- 如果执行的是写入语句,execute返回的是成功写入的行数信息affected rows +- 如果执行的是写入语句,execute返回的是成功写入的行数信息affected rows。 - 如果执行的是查询语句,则execute执行成功后,需要通过fetchall方法去拉取结果集。 具体方法可以参考示例代码。 - ### 安装验证 运行如下指令: @@ -531,7 +528,6 @@ python3 PythonChecker.py -host 验证通过将打印出成功信息。 - ### Python连接器的使用 #### 代码示例 @@ -649,8 +645,8 @@ conn.close() - 通过taos.connect获取TDengineConnection对象,这个对象可以一个程序只申请一个,在多线程中共享。 - 通过TDengineConnection对象的 .cursor()方法获取一个新的游标对象,这个游标对象必须保证每个线程独享。 -- 通过游标对象的execute()方法,执行写入或查询的SQL语句 -- 如果执行的是写入语句,execute返回的是成功写入的行数信息affected rows +- 通过游标对象的execute()方法,执行写入或查询的SQL语句。 +- 如果执行的是写入语句,execute返回的是成功写入的行数信息affected rows。 - 如果执行的是查询语句,则execute执行成功后,需要通过fetchall方法去拉取结果集。 具体方法可以参考示例代码。 @@ -888,7 +884,7 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间 ### 重要配置项 -下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效 +下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。(注意:配置修改后,需要重启taosd服务才能生效) - 对外提供RESTful服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改) - httpMaxThreads: 启动的线程数量,默认为2(2.0.17.0版本开始,默认值改为CPU核数的一半向下取整) @@ -927,7 +923,7 @@ C#Checker.exe -h 在Windows系统上,C#应用程序可以使用TDengine的C#连接器接口来执行所有数据库的操作。使用的具体步骤如下所示: 1. 将接口文件TDengineDrivercs.cs加入到应用程序所在的项目空间中。 -2. 用户可以参考TDengineTest.cs来定义数据库连接参数,以及如何执行数据插入、查询等操作; +2. 用户可以参考TDengineTest.cs来定义数据库连接参数,以及如何执行数据插入、查询等操作。 此接口需要用到taos.dll文件,所以在执行应用程序前,拷贝Windows客户端install_directory/driver目录中的taos.dll文件到项目最后生成.exe可执行文件所在的文件夹。之后运行exe文件,即可访问TDengine数据库并做插入、查询等操作。 @@ -960,13 +956,13 @@ Go连接器支持的系统有: 安装前准备: -- 已安装好TDengine应用驱动,参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver) +- 已安装好TDengine应用驱动,参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。 ### 示例程序 使用 Go 连接器的示例代码请参考 https://github.com/taosdata/TDengine/tree/develop/tests/examples/go 以及[视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)。 -示例程序源码也位于安装目录下的 examples/go/taosdemo.go 文件中 +示例程序源码也位于安装目录下的 examples/go/taosdemo.go 文件中。 **提示:建议Go版本是1.13及以上,并开启模块支持:** ```sh @@ -1035,7 +1031,7 @@ Node.js连接器支持的系统有: | **OS类型** | Linux | Win64 | Win32 | Linux | Linux | | **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** | -Node.js连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1957.html) +Node.js连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1957.html)。 ### 安装准备 @@ -1045,14 +1041,14 @@ Node.js连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020 用户可以通过[npm](https://www.npmjs.com/)来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。具体安装步骤如下: -首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器. +首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器。 ```bash npm install td2.0-connector ``` -我们建议用户使用npm 安装node.js连接器。如果您没有安装npm, 可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下 +我们建议用户使用npm 安装node.js连接器。如果您没有安装npm,可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下。 -我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件: +我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js连接器之前,还需要根据具体操作系统来安装下文提到的一些依赖工具。 ### Linux @@ -1065,17 +1061,17 @@ npm install td2.0-connector #### 安装方法1 -使用微软的[windows-build-tools](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具 +使用微软的[windows-build-tools](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具。 #### 安装方法2 -手动安装以下工具: +手动安装以下工具: - 安装Visual Studio相关:[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) - 安装 [Python](https://www.python.org/downloads/) 2.7(`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7` - 进入`cmd`命令行界面,`npm config set msvs_version 2017` -如果以上步骤不能成功执行,可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) +如果以上步骤不能成功执行,可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules)。 如果在Windows 10 ARM 上使用ARM64 Node.js,还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64"。 @@ -1148,7 +1144,7 @@ TDengine目前还不支持update和delete语句。 var query = cursor.query('show databases;') ``` -查询的结果可以通过 `query.execute()` 函数获取并打印出来 +查询的结果可以通过 `query.execute()` 函数获取并打印出来。 ```javascript var promise = query.execute(); @@ -1196,6 +1192,6 @@ promise2.then(function(result) { ### 示例 -[node-example.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js)提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例 +[node-example.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js)提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例。 -[node-example-raw.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`. +[node-example-raw.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`。 diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index ecc9352ba6..f995597db0 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -12,7 +12,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预 **第零步**:规划集群所有物理节点的FQDN,将规划好的FQDN分别添加到每个物理节点的/etc/hostname;修改每个物理节点的/etc/hosts,将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS,请联系网络管理员在DNS上做好相关配置】 -**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html ) +**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。 **注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(`rm -rf /var/lib/taos/*`); **注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。 @@ -25,7 +25,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预 1. 每个物理节点上执行命令`hostname -f`,查看和确认所有节点的hostname是不相同的(应用驱动所在节点无需做此项检查); 2. 每个物理节点上执行`ping host`,其中host是其他物理节点的hostname,看能否ping通其它物理节点;如果不能ping通,需要检查网络设置,或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts),或DNS的配置。如果无法ping通,是无法组成集群的; 3. 从应用运行的物理节点,ping taosd运行的数据节点,如果无法ping通,应用是无法连接taosd的,请检查应用所在物理节点的DNS设置或hosts文件; -4. 每个数据节点的End Point就是输出的hostname外加端口号,比如h1.taosdata.com:6030 +4. 每个数据节点的End Point就是输出的hostname外加端口号,比如`h1.taosdata.com:6030`。 **第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030,其与集群配置相关参数如下: diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index ad4e17b7f0..c4bdecf294 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -73,7 +73,7 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable 因为 TDengine 具有很好的水平扩展能力,根据总量,再根据单个物理机或虚拟机的资源,就可以轻松决定需要购置多少台物理机或虚拟机了。 -**立即计算 CPU、内存、存储,请参见:[资源估算方法](https://www.taosdata.com/config/config.html)** +**立即计算 CPU、内存、存储,请参见:[资源估算方法](https://www.taosdata.com/config/config.html)。** ## 容错和灾备 @@ -433,7 +433,7 @@ SHOW USERS; 显示所有用户 -**注意:**SQL 语法中,< >表示需要用户输入的部分,但请不要输入< >本身 +**注意:**SQL 语法中,< >表示需要用户输入的部分,但请不要输入< >本身。 ## 数据导入 @@ -445,7 +445,7 @@ TDengine的shell支持source filename命令,用于批量运行文件中的SQL **按数据文件导入** -TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。CSV文件只属于一张表且CSV文件中的数据格式需与要导入表的结构相同, 在导入的时候,其语法如下 +TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。CSV文件只属于一张表且CSV文件中的数据格式需与要导入表的结构相同,在导入的时候,其语法如下: ```mysql insert into tb1 file 'path/data.csv'; @@ -487,7 +487,7 @@ Query OK, 9 row(s) affected (0.004763s) **taosdump工具导入** -TDengine提供了方便的数据库导入导出工具taosdump。用户可以将taosdump从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html) +TDengine提供了方便的数据库导入导出工具taosdump。用户可以将taosdump从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html)。 ## 数据导出 @@ -627,7 +627,7 @@ Active: inactive (dead) ...... ``` -卸载 TDengine,只需要执行如下命令 +卸载 TDengine,只需要执行如下命令: ``` rmtaos ``` @@ -724,7 +724,7 @@ rmtaos 2. 服务端命令行输入:`taos -n server -P ` 以服务端身份启动对端口 port 为基准端口的监听 3. 客户端命令行输入:`taos -n client -h -P ` 以客户端身份启动对指定的服务器、指定的端口发送测试包 -服务端运行正常的话会输出以下信息 +服务端运行正常的话会输出以下信息: ```bash # taos -n server -P 6000 diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 07ed1a5eae..372070d081 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -9,7 +9,7 @@ TAOS SQL 不支持关键字的缩写,例如 DESCRIBE 不能缩写为 DESC。 本章节 SQL 语法遵循如下约定: - < > 里的内容是用户需要输入的,但不要输入 <> 本身 -- [ ] 表示内容为可选项,但不能输入 [] 本身 +- \[ \] 表示内容为可选项,但不能输入 [] 本身 - | 表示多选一,选择其中一个即可,但不能输入 | 本身 - … 表示前面的项可重复多个 @@ -265,7 +265,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 ```mysql CREATE STABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]); ``` - 创建 STable,与创建表的 SQL 语法相似,但需要指定 TAGS 字段的名称和类型 + 创建 STable,与创建表的 SQL 语法相似,但需要指定 TAGS 字段的名称和类型。 说明: @@ -728,18 +728,6 @@ Query OK, 1 row(s) in set (0.001091s) 4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 - - ### UNION ALL 操作符 @@ -1025,9 +1013,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(\*); - 2) 如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL; + 2)如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL; - 3) 如果结果集中所有列全部为NULL值,则不返回结果。 + 3)如果结果集中所有列全部为NULL值,则不返回结果。 示例: ```mysql @@ -1187,7 +1175,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 适用于:**表、超级表**。 - 说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数 + 说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数。 ```mysql taos> SELECT APERCENTILE(current, 20) FROM d1001; @@ -1416,13 +1404,13 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P ## TAOS SQL 边界限制 -- 数据库名最大长度为 32 -- 表名最大长度为 192,每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) -- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳 -- 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符 -- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M +- 数据库名最大长度为 32。 +- 表名最大长度为 192,每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 +- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳。 +- 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符。 +- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M。 - SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。 -- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 +- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。 ## TAOS SQL其他约定 diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md index 300ff27fe4..d89b2adeb8 100644 --- a/documentation20/cn/13.faq/docs.md +++ b/documentation20/cn/13.faq/docs.md @@ -26,15 +26,15 @@ ## 2. Windows平台下JDBCDriver找不到动态链接库,怎么办? -请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/950.html) +请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。 ## 3. 创建数据表时提示more dnodes are needed -请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/965.html) +请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。 ## 4. 如何让TDengine crash时生成core文件? -请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/974.html) +请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。 ## 5. 遇到错误“Unable to establish connection”, 我怎么办? @@ -49,7 +49,7 @@ 3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd* -4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 +4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name —— 可在服务器上执行Linux命令hostname -f获得),FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件。如果部署的是TDengine集群,客户端需要能ping通所有集群节点的FQDN。 @@ -74,16 +74,16 @@ 产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查: -1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 -2. 如果网络配置有DNS server, 请检查是否正常工作 -3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址。 +1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html) +2. 如果网络配置有DNS server,请检查是否正常工作 +3. 如果网络没有配置DNS server,请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址 4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法连接服务器的 ## 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误 如果你确认语法正确,2.0之前版本,请检查SQL语句长度是否超过64K。如果超过,也会返回这个错误。 -## 8. 是否支持validation queries? +## 8. 是否支持validation queries? TDengine还没有一组专用的validation queries。然而建议你使用系统监测的数据库”log"来做。 @@ -137,7 +137,7 @@ Connection = DriverManager.getConnection(url, properties); TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A移动机器B时,注意如下两件事: -- 2.0.0.0 至 2.0.6.x 的版本,重新配置机器B的hostname为机器A的hostname +- 2.0.0.0 至 2.0.6.x 的版本,重新配置机器B的hostname为机器A的hostname。 - 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。 - 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 From 3155e72501e69d7ff3936cdf4927cd5e9215b262 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 18 Aug 2021 19:43:44 +0800 Subject: [PATCH 160/239] enhance --- src/client/src/tscParseInsert.c | 398 +++----------------------------- 1 file changed, 29 insertions(+), 369 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index dab0dff1fc..793cee4ca2 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -448,370 +448,6 @@ int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) { return TSDB_CODE_SUCCESS; } -static int32_t tsParseOneColumnOld(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, - bool primaryKey, int16_t timePrec) { - int64_t iv; - int32_t ret; - char * endptr = NULL; - - if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) { - return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z); - } - - switch (pSchema->type) { - case TSDB_DATA_TYPE_BOOL: { // bool - if (isNullStr(pToken)) { - *((uint8_t *)payload) = TSDB_DATA_BOOL_NULL; - } else { - if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { - if (strncmp(pToken->z, "true", pToken->n) == 0) { - *(uint8_t *)payload = TSDB_TRUE; - } else if (strncmp(pToken->z, "false", pToken->n) == 0) { - *(uint8_t *)payload = TSDB_FALSE; - } else { - return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z); - } - } else if (pToken->type == TK_INTEGER) { - iv = strtoll(pToken->z, NULL, 10); - *(uint8_t *)payload = (int8_t)((iv == 0) ? TSDB_FALSE : TSDB_TRUE); - } else if (pToken->type == TK_FLOAT) { - double dv = strtod(pToken->z, NULL); - *(uint8_t *)payload = (int8_t)((dv == 0) ? TSDB_FALSE : TSDB_TRUE); - } else { - return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z); - } - } - break; - } - - case TSDB_DATA_TYPE_TINYINT: - if (isNullStr(pToken)) { - *((uint8_t *)payload) = TSDB_DATA_TINYINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z); - } else if (!IS_VALID_TINYINT(iv)) { - return tscInvalidOperationMsg(msg, "data overflow", pToken->z); - } - - *((uint8_t *)payload) = (uint8_t)iv; - } - - break; - - case TSDB_DATA_TYPE_UTINYINT: - if (isNullStr(pToken)) { - *((uint8_t *)payload) = TSDB_DATA_UTINYINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z); - } else if (!IS_VALID_UTINYINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z); - } - - *((uint8_t *)payload) = (uint8_t)iv; - } - - break; - - case TSDB_DATA_TYPE_SMALLINT: - if (isNullStr(pToken)) { - *((int16_t *)payload) = TSDB_DATA_SMALLINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z); - } else if (!IS_VALID_SMALLINT(iv)) { - return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z); - } - - *((int16_t *)payload) = (int16_t)iv; - } - - break; - - case TSDB_DATA_TYPE_USMALLINT: - if (isNullStr(pToken)) { - *((uint16_t *)payload) = TSDB_DATA_USMALLINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z); - } else if (!IS_VALID_USMALLINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z); - } - - *((uint16_t *)payload) = (uint16_t)iv; - } - - break; - - case TSDB_DATA_TYPE_INT: - if (isNullStr(pToken)) { - *((int32_t *)payload) = TSDB_DATA_INT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid int data", pToken->z); - } else if (!IS_VALID_INT(iv)) { - return tscInvalidOperationMsg(msg, "int data overflow", pToken->z); - } - - *((int32_t *)payload) = (int32_t)iv; - } - - break; - - case TSDB_DATA_TYPE_UINT: - if (isNullStr(pToken)) { - *((uint32_t *)payload) = TSDB_DATA_UINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z); - } else if (!IS_VALID_UINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z); - } - - *((uint32_t *)payload) = (uint32_t)iv; - } - - break; - - case TSDB_DATA_TYPE_BIGINT: - if (isNullStr(pToken)) { - *((int64_t *)payload) = TSDB_DATA_BIGINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z); - } else if (!IS_VALID_BIGINT(iv)) { - return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z); - } - - *((int64_t *)payload) = iv; - } - break; - - case TSDB_DATA_TYPE_UBIGINT: - if (isNullStr(pToken)) { - *((uint64_t *)payload) = TSDB_DATA_UBIGINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z); - } else if (!IS_VALID_UBIGINT((uint64_t)iv)) { - return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z); - } - - *((uint64_t *)payload) = iv; - } - break; - - case TSDB_DATA_TYPE_FLOAT: - if (isNullStr(pToken)) { - *((int32_t *)payload) = TSDB_DATA_FLOAT_NULL; - } else { - double dv; - if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { - return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); - } - - if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || - isnan(dv)) { - return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); - } - - // *((float *)payload) = (float)dv; - SET_FLOAT_VAL(payload, dv); - } - break; - - case TSDB_DATA_TYPE_DOUBLE: - if (isNullStr(pToken)) { - *((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL; - } else { - double dv; - if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { - return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); - } - - if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) { - return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); - } - - *((double *)payload) = dv; - } - break; - - case TSDB_DATA_TYPE_BINARY: - // binary data cannot be null-terminated char string, otherwise the last char of the string is lost - if (pToken->type == TK_NULL) { - setVardataNull(payload, TSDB_DATA_TYPE_BINARY); - } else { // too long values will return invalid sql, not be truncated automatically - if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor - return tscInvalidOperationMsg(msg, "string data overflow", pToken->z); - } - - STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); - } - - break; - - case TSDB_DATA_TYPE_NCHAR: - if (pToken->type == TK_NULL) { - setVardataNull(payload, TSDB_DATA_TYPE_NCHAR); - } else { - // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' - int32_t output = 0; - if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(payload), pSchema->bytes - VARSTR_HEADER_SIZE, &output)) { - char buf[512] = {0}; - snprintf(buf, tListLen(buf), "%s", strerror(errno)); - return tscInvalidOperationMsg(msg, buf, pToken->z); - } - - varDataSetLen(payload, output); - } - break; - - case TSDB_DATA_TYPE_TIMESTAMP: { - if (pToken->type == TK_NULL) { - if (primaryKey) { - *((int64_t *)payload) = 0; - } else { - *((int64_t *)payload) = TSDB_DATA_BIGINT_NULL; - } - } else { - int64_t temp; - if (tsParseTime(pToken, &temp, str, msg, timePrec) != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z); - } - - *((int64_t *)payload) = temp; - } - - break; - } - } - - return TSDB_CODE_SUCCESS; -} - -static int tsParseOneRowOld(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, - char *tmpTokenBuf, SInsertStatementParam *pInsertParam) { - int32_t index = 0; - SStrToken sToken = {0}; - char * payload = pDataBlocks->pData + pDataBlocks->size; - - SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo; - SSchema * schema = tscGetTableSchema(pDataBlocks->pTableMeta); - - // 1. set the parsed value from sql string - int32_t rowSize = 0; - for (int i = 0; i < spd->numOfBound; ++i) { - // the start position in data block buffer of current value in sql - int32_t colIndex = spd->boundedColumns[i]; - - char * start = payload + spd->cols[colIndex].offset; - SSchema *pSchema = &schema[colIndex]; - rowSize += pSchema->bytes; - - index = 0; - sToken = tStrGetToken(*str, &index, true); - *str += index; - - if (sToken.type == TK_QUESTION) { - if (pInsertParam->insertType != TSDB_QUERY_TYPE_STMT_INSERT) { - return tscSQLSyntaxErrMsg(pInsertParam->msg, "? only allowed in binding insertion", *str); - } - - uint32_t offset = (uint32_t)(start - pDataBlocks->pData); - if (tscAddParamToDataBlock(pDataBlocks, pSchema->type, (uint8_t)timePrec, pSchema->bytes, offset) != NULL) { - continue; - } - - strcpy(pInsertParam->msg, "client out of memory"); - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - - int16_t type = sToken.type; - if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL && - type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || - (sToken.n == 0) || (type == TK_RP)) { - return tscSQLSyntaxErrMsg(pInsertParam->msg, "invalid data or symbol", sToken.z); - } - - // Remove quotation marks - if (TK_STRING == sToken.type) { - // delete escape character: \\, \', \" - char delim = sToken.z[0]; - - int32_t cnt = 0; - int32_t j = 0; - if (sToken.n >= TSDB_MAX_BYTES_PER_ROW) { - return tscSQLSyntaxErrMsg(pInsertParam->msg, "too long string", sToken.z); - } - - for (uint32_t k = 1; k < sToken.n - 1; ++k) { - if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) { - tmpTokenBuf[j] = sToken.z[k + 1]; - - cnt++; - j++; - k++; - continue; - } - - tmpTokenBuf[j] = sToken.z[k]; - j++; - } - - tmpTokenBuf[j] = 0; - sToken.z = tmpTokenBuf; - sToken.n -= 2 + cnt; - } - - bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); - int32_t ret = tsParseOneColumnOld(pSchema, &sToken, start, pInsertParam->msg, str, isPrimaryKey, timePrec); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - - if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) { - tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z); - return TSDB_CODE_TSC_INVALID_TIME_STAMP; - } - } - - // 2. set the null value for the columns that do not assign values - if (spd->numOfBound < spd->numOfCols) { - char *ptr = payload; - - for (int32_t i = 0; i < spd->numOfCols; ++i) { - if (spd->cols[i].valStat == VAL_STAT_NONE) { // current column do not have any value to insert, set it to null - if (schema[i].type == TSDB_DATA_TYPE_BINARY) { - varDataSetLen(ptr, sizeof(int8_t)); - *(uint8_t *)varDataVal(ptr) = TSDB_DATA_BINARY_NULL; - } else if (schema[i].type == TSDB_DATA_TYPE_NCHAR) { - varDataSetLen(ptr, sizeof(int32_t)); - *(uint32_t *)varDataVal(ptr) = TSDB_DATA_NCHAR_NULL; - } else { - setNull(ptr, schema[i].type, schema[i].bytes); - } - } - - ptr += schema[i].bytes; - } - - rowSize = (int32_t)(ptr - payload); - } - - *len = rowSize; - return TSDB_CODE_SUCCESS; -} - int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, char *tmpTokenBuf, SInsertStatementParam *pInsertParam) { int32_t index = 0; @@ -1036,6 +672,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn } } +#if 1 void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) { pColInfo->numOfCols = numOfCols; pColInfo->numOfBound = numOfCols; @@ -1071,6 +708,30 @@ void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32 pColInfo->allNullLen += pColInfo->flen; pColInfo->extendedVarLen = (uint16_t)(nVar * sizeof(VarDataOffsetT)); } +#endif + +#if 0 +void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) { + pColInfo->numOfCols = numOfCols; + pColInfo->numOfBound = numOfCols; + pColInfo->orderStatus = ORDER_STATUS_ORDERED; // default is ORDERED for non-bound mode + + pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t)); + pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn)); + pColInfo->colIdxInfo = NULL; + pColInfo->flen = 0; + pColInfo->allNullLen = 0; + + for (int32_t i = 0; i < pColInfo->numOfCols; ++i) { + if (i > 0) { + pColInfo->cols[i].offset = pSchema[i - 1].bytes + pColInfo->cols[i - 1].offset; + } + + pColInfo->cols[i].valStat = VAL_STAT_HAS; + pColInfo->boundedColumns[i] = i; + } +} +#endif int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) { size_t remain = pDataBlock->nAllocSize - pDataBlock->size; @@ -1172,13 +833,12 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk // allocate memory size_t nAlloc = nRows * sizeof(SBlockKeyTuple); if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) { - size_t nRealAlloc = nAlloc + 10 * sizeof(SBlockKeyTuple); - char * tmp = trealloc(pBlkKeyInfo->pKeyTuple, nRealAlloc); + char *tmp = trealloc(pBlkKeyInfo->pKeyTuple, nAlloc); if (tmp == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp; - pBlkKeyInfo->maxBytesAlloc = (int32_t)nRealAlloc; + pBlkKeyInfo->maxBytesAlloc = (int32_t)nAlloc; } memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc); @@ -2061,7 +1721,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow STableComInfo tinfo = tscGetTableInfo(pTableMeta); SInsertStatementParam* pInsertParam = &pCmd->insertParam; - pInsertParam->payloadType = PAYLOAD_TYPE_RAW; + // pInsertParam->payloadType = PAYLOAD_TYPE_RAW; destroyTableNameList(pInsertParam); pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks); @@ -2103,7 +1763,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow strtolower(line, line); int32_t len = 0; - code = tsParseOneRowOld(&lineptr, pTableDataBlock, tinfo.precision, &len, tokenBuf, pInsertParam); + code = tsParseOneRow(&lineptr, pTableDataBlock, tinfo.precision, &len, tokenBuf, pInsertParam); if (code != TSDB_CODE_SUCCESS || pTableDataBlock->numOfParams > 0) { pSql->res.code = code; break; From 2cddc07d2b95f9dc189de49dee5f7bd5d8289ebf Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 18 Aug 2021 19:48:30 +0800 Subject: [PATCH 161/239] [TD-6184]: optimize insert from imported file --- src/client/src/tscParseInsert.c | 32 ++------------------------------ src/client/src/tscUtil.c | 1 - 2 files changed, 2 insertions(+), 31 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 793cee4ca2..f5d9a6a17e 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -51,20 +51,18 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 } } + // default compareStat is ROW_COMPARE_NO_NEED if (nBoundCols == 0) { // file input pBuilder->memRowType = SMEM_ROW_DATA; - // pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } else { float boundRatio = ((float)nBoundCols / (float)nCols); if (boundRatio < KVRatioKV) { pBuilder->memRowType = SMEM_ROW_KV; - // pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } else if (boundRatio > KVRatioData) { pBuilder->memRowType = SMEM_ROW_DATA; - // pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } pBuilder->compareStat = ROW_COMPARE_NEED; @@ -672,7 +670,6 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn } } -#if 1 void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) { pColInfo->numOfCols = numOfCols; pColInfo->numOfBound = numOfCols; @@ -708,30 +705,6 @@ void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32 pColInfo->allNullLen += pColInfo->flen; pColInfo->extendedVarLen = (uint16_t)(nVar * sizeof(VarDataOffsetT)); } -#endif - -#if 0 -void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) { - pColInfo->numOfCols = numOfCols; - pColInfo->numOfBound = numOfCols; - pColInfo->orderStatus = ORDER_STATUS_ORDERED; // default is ORDERED for non-bound mode - - pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t)); - pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn)); - pColInfo->colIdxInfo = NULL; - pColInfo->flen = 0; - pColInfo->allNullLen = 0; - - for (int32_t i = 0; i < pColInfo->numOfCols; ++i) { - if (i > 0) { - pColInfo->cols[i].offset = pSchema[i - 1].bytes + pColInfo->cols[i - 1].offset; - } - - pColInfo->cols[i].valStat = VAL_STAT_HAS; - pColInfo->boundedColumns[i] = i; - } -} -#endif int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) { size_t remain = pDataBlock->nAllocSize - pDataBlock->size; @@ -1720,8 +1693,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; STableComInfo tinfo = tscGetTableInfo(pTableMeta); - SInsertStatementParam* pInsertParam = &pCmd->insertParam; - // pInsertParam->payloadType = PAYLOAD_TYPE_RAW; + SInsertStatementParam *pInsertParam = &pCmd->insertParam; destroyTableNameList(pInsertParam); pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index f3e30172ab..612d6a5642 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1744,7 +1744,6 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff dataBuf->tsSource = -1; dataBuf->vgId = dataBuf->pTableMeta->vgId; - tNameAssign(&dataBuf->tableName, name); assert(defaultSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL); From 47625967d22a1f2f44dbeff87194360e5ba7b41e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 19 Aug 2021 10:02:07 +0800 Subject: [PATCH 162/239] Feature/sangshuduo/td 5844 cmdline parameters align (#7442) * [TD-5844]: make cmd line parameter similar. * fix test case align with taosdemo change. * fix windows stack overflow issue. * fix mac compile error. * fix taosdemo cmdline parameter in tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py * fix windows compiler options. * make taos.exe use mysql style password input. * make taos shell and taosdump use mysql style password input. * determine scanf return value. * make console echo off during password input. --- src/kit/taosdemo/taosdemo.c | 4 +++- src/kit/taosdump/taosdump.c | 2 ++ src/os/inc/osSystem.h | 2 ++ src/os/src/darwin/darwinSystem.c | 26 ++++++++++++++++++++++++++ src/os/src/linux/osSystem.c | 24 ++++++++++++++++++++++++ src/os/src/windows/wSystem.c | 14 ++++++++++++++ 6 files changed, 71 insertions(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 7cee96b54b..a1c68b2f3c 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -866,10 +866,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->user = argv[++i]; } else if (strncmp(argv[i], "-p", 2) == 0) { if (strlen(argv[i]) == 2) { - printf("Enter password:"); + printf("Enter password: "); + taosSetConsoleEcho(false); if (scanf("%s", arguments->password) > 1) { fprintf(stderr, "password read error!\n"); } + taosSetConsoleEcho(true); } else { tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); } diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index b7073c28a7..c54b8da1b7 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -561,9 +561,11 @@ static void parse_password( if (strncmp(argv[i], "-p", 2) == 0) { if (strlen(argv[i]) == 2) { printf("Enter password: "); + taosSetConsoleEcho(false); if(scanf("%20s", arguments->password) > 1) { errorPrint("%s() LN%d, password read error!\n", __func__, __LINE__); } + taosSetConsoleEcho(true); } else { tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); } diff --git a/src/os/inc/osSystem.h b/src/os/inc/osSystem.h index e7a3ec13ae..4b79250740 100644 --- a/src/os/inc/osSystem.h +++ b/src/os/inc/osSystem.h @@ -24,6 +24,8 @@ void* taosLoadDll(const char *filename); void* taosLoadSym(void* handle, char* name); void taosCloseDll(void *handle); +int taosSetConsoleEcho(bool on); + #ifdef __cplusplus } #endif diff --git a/src/os/src/darwin/darwinSystem.c b/src/os/src/darwin/darwinSystem.c index 17cafdd664..6f296c9fef 100644 --- a/src/os/src/darwin/darwinSystem.c +++ b/src/os/src/darwin/darwinSystem.c @@ -29,4 +29,30 @@ void* taosLoadSym(void* handle, char* name) { void taosCloseDll(void *handle) { } +int taosSetConsoleEcho(bool on) +{ +#if 0 +#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL) + int err; + struct termios term; + + if (tcgetattr(STDIN_FILENO, &term) == -1) { + perror("Cannot get the attribution of the terminal"); + return -1; + } + + if (on) + term.c_lflag|=ECHOFLAGS; + else + term.c_lflag &=~ECHOFLAGS; + + err = tcsetattr(STDIN_FILENO,TCSAFLUSH,&term); + if (err == -1 && err == EINTR) { + perror("Cannot set the attribution of the terminal"); + return -1; + } + +#endif + return 0; +} diff --git a/src/os/src/linux/osSystem.c b/src/os/src/linux/osSystem.c index 052b7a22a8..0cdb20dbdb 100644 --- a/src/os/src/linux/osSystem.c +++ b/src/os/src/linux/osSystem.c @@ -51,4 +51,28 @@ void taosCloseDll(void *handle) { } } +int taosSetConsoleEcho(bool on) +{ +#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL) + int err; + struct termios term; + + if (tcgetattr(STDIN_FILENO, &term) == -1) { + perror("Cannot get the attribution of the terminal"); + return -1; + } + + if (on) + term.c_lflag|=ECHOFLAGS; + else + term.c_lflag &=~ECHOFLAGS; + + err = tcsetattr(STDIN_FILENO,TCSAFLUSH,&term); + if (err == -1 && err == EINTR) { + perror("Cannot set the attribution of the terminal"); + return -1; + } + + return 0; +} diff --git a/src/os/src/windows/wSystem.c b/src/os/src/windows/wSystem.c index 17cafdd664..564005f79b 100644 --- a/src/os/src/windows/wSystem.c +++ b/src/os/src/windows/wSystem.c @@ -30,3 +30,17 @@ void taosCloseDll(void *handle) { } +int taosSetConsoleEcho(bool on) +{ + HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE); + DWORD mode = 0; + GetConsoleMode(hStdin, &mode ); + if (on) { + mode |= ENABLE_ECHO_INPUT; + } else { + mode &= ~ENABLE_ECHO_INPUT; + } + SetConsoleMode(hStdin, mode); + + return 0; +} From ffe99d41d9eede67d473380f84834ab68e829b6a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 19 Aug 2021 10:02:12 +0800 Subject: [PATCH 163/239] Hotfix/sangshuduo/td 5844 cmdline parameters align for master (#7444) * [TD-5844]: make cmd line parameter similar. * fix test case align with taosdemo change. * fix windows stack overflow issue. * fix mac compile error. * fix taosdemo cmdline parameter in tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py * make taos.exe use mysql style password input. * make taos shell and taosdump use mysql style password input. * determine scanf return value. * cherry pick from develop/feature branch. --- src/kit/shell/src/shellDarwin.c | 23 ++++++++++++++--- src/kit/shell/src/shellEngine.c | 42 ++++++++++++++++++-------------- src/kit/shell/src/shellLinux.c | 35 +++++++++++++++++++++++--- src/kit/shell/src/shellMain.c | 2 ++ src/kit/shell/src/shellWindows.c | 30 +++++++++++++++++------ src/kit/taosdemo/taosdemo.c | 8 ++++-- src/kit/taosdump/taosdump.c | 31 +++++++++++++++++++---- src/os/inc/osSystem.h | 2 ++ src/os/src/darwin/darwinSystem.c | 26 ++++++++++++++++++++ src/os/src/linux/osSystem.c | 24 ++++++++++++++++++ src/os/src/windows/wSystem.c | 14 +++++++++++ 11 files changed, 196 insertions(+), 41 deletions(-) diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c index 4dcd8b3d50..5ca4537aeb 100644 --- a/src/kit/shell/src/shellDarwin.c +++ b/src/kit/shell/src/shellDarwin.c @@ -64,6 +64,10 @@ void printHelp() { exit(EXIT_SUCCESS); } +char DARWINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; +char g_password[MAX_PASSWORD_SIZE]; + void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { wordexp_t full_path; for (int i = 1; i < argc; i++) { @@ -77,10 +81,21 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } // for password - else if (strcmp(argv[i], "-p") == 0) { - arguments->is_use_passwd = true; + else if (strncmp(argv[i], "-p", 2) == 0) { + strcpy(tsOsName, "Darwin"); + printf(DARWINCLIENT_VERSION, tsOsName, taos_get_client_info()); + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + if (scanf("%s", g_password) > 1) { + fprintf(stderr, "password read error\n"); + } + getchar(); + } else { + tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + arguments->password = g_password; } - // for management port + // for management port else if (strcmp(argv[i], "-P") == 0) { if (i < argc - 1) { arguments->port = atoi(argv[++i]); @@ -98,7 +113,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-c") == 0) { - if (i < argc - 1) { + if (i < argc - 1) { if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) { fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1); exit(EXIT_FAILURE); diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 58f4b7ff02..51a25d59c4 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -65,7 +65,15 @@ extern TAOS *taos_connect_auth(const char *ip, const char *user, const char *aut */ TAOS *shellInit(SShellArguments *_args) { printf("\n"); - printf(CLIENT_VERSION, tsOsName, taos_get_client_info()); + if (!_args->is_use_passwd) { +#ifdef TD_WINDOWS + strcpy(tsOsName, "Windows"); +#elif defined(TD_DARWIN) + strcpy(tsOsName, "Darwin"); +#endif + printf(CLIENT_VERSION, tsOsName, taos_get_client_info()); + } + fflush(stdout); // set options before initializing @@ -73,9 +81,7 @@ TAOS *shellInit(SShellArguments *_args) { taos_options(TSDB_OPTION_TIMEZONE, _args->timezone); } - if (_args->is_use_passwd) { - if (_args->password == NULL) _args->password = getpass("Enter password: "); - } else { + if (!_args->is_use_passwd) { _args->password = TSDB_DEFAULT_PASS; } @@ -170,7 +176,7 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) { system("clear"); return 0; } - + if (regex_match(command, "^[\t ]*set[ \t]+max_binary_display_width[ \t]+(default|[1-9][0-9]*)[ \t;]*$", REG_EXTENDED | REG_ICASE)) { strtok(command, " \t"); strtok(NULL, " \t"); @@ -182,7 +188,7 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) { } return 0; } - + if (regex_match(command, "^[ \t]*source[\t ]+[^ ]+[ \t;]*$", REG_EXTENDED | REG_ICASE)) { /* If source file. */ char *c_ptr = strtok(command, " ;"); @@ -247,7 +253,7 @@ int32_t shellRunCommand(TAOS* con, char* command) { esc = false; continue; } - + if (c == '\\') { esc = true; continue; @@ -336,8 +342,8 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { } if (!tscIsUpdateQuery(pSql)) { // select and show kinds of commands - int error_no = 0; - + int error_no = 0; + int numOfRows = shellDumpResult(pSql, fname, &error_no, printMode); if (numOfRows < 0) { atomic_store_64(&result, 0); @@ -530,7 +536,7 @@ static int dumpResultToFile(const char* fname, TAOS_RES* tres) { fprintf(fp, "%s", fields[col].name); } fputc('\n', fp); - + int numOfRows = 0; do { int32_t* length = taos_fetch_lengths(tres); @@ -716,7 +722,7 @@ static int verticalPrintResult(TAOS_RES* tres) { int numOfRows = 0; int showMore = 1; - do { + do { if (numOfRows < resShowMaxNum) { printf("*************************** %d.row ***************************\n", numOfRows + 1); @@ -851,7 +857,7 @@ static int horizontalPrintResult(TAOS_RES* tres) { int numOfRows = 0; int showMore = 1; - + do { int32_t* length = taos_fetch_lengths(tres); if (numOfRows < resShowMaxNum) { @@ -867,7 +873,7 @@ static int horizontalPrintResult(TAOS_RES* tres) { printf("[You can add limit statement to show more or redirect results to specific file to get all.]\n"); showMore = 0; } - + numOfRows++; row = taos_fetch_row(tres); } while(row != NULL); @@ -909,7 +915,7 @@ void read_history() { if (errno != ENOENT) { fprintf(stderr, "Failed to open file %s, reason:%s\n", f_history, strerror(errno)); } -#endif +#endif return; } @@ -934,9 +940,9 @@ void write_history() { FILE *f = fopen(f_history, "w"); if (f == NULL) { -#ifndef WINDOWS +#ifndef WINDOWS fprintf(stderr, "Failed to open file %s for write, reason:%s\n", f_history, strerror(errno)); -#endif +#endif return; } @@ -982,13 +988,13 @@ void source_file(TAOS *con, char *fptr) { /* if (access(fname, F_OK) != 0) { fprintf(stderr, "ERROR: file %s is not exist\n", fptr); - + wordfree(&full_path); free(cmd); return; } */ - + FILE *f = fopen(fname, "r"); if (f == NULL) { fprintf(stderr, "ERROR: failed to open file %s\n", fname); diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index dc74f6fcaa..d051d3535e 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -34,7 +34,7 @@ static char doc[] = ""; static char args_doc[] = ""; static struct argp_option options[] = { {"host", 'h', "HOST", 0, "TDengine server FQDN to connect. The default host is localhost."}, - {"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."}, + {"password", 'p', 0, 0, "The password to use when connecting to the server."}, {"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."}, {"user", 'u', "USER", 0, "The user name to use when connecting to the server."}, {"auth", 'A', "Auth", 0, "The auth string to use when connecting to the server."}, @@ -63,8 +63,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { arguments->host = arg; break; case 'p': - arguments->is_use_passwd = true; - if (arg) arguments->password = arg; break; case 'P': if (arg) { @@ -160,12 +158,41 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Our argp parser. */ static struct argp argp = {options, parse_opt, args_doc, doc}; +char LINUXCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; +char g_password[MAX_PASSWORD_SIZE]; + +static void parse_password( + int argc, char *argv[], SShellArguments *arguments) { + for (int i = 1; i < argc; i++) { + if (strncmp(argv[i], "-p", 2) == 0) { + strcpy(tsOsName, "Linux"); + printf(LINUXCLIENT_VERSION, tsOsName, taos_get_client_info()); + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + if (scanf("%20s", g_password) > 1) { + fprintf(stderr, "password reading error\n"); + } + getchar(); + } else { + tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + arguments->password = g_password; + arguments->is_use_passwd = true; + } + } +} + void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { static char verType[32] = {0}; sprintf(verType, "version: %s\n", version); argp_program_version = verType; - + + if (argc > 1) { + parse_password(argc, argv, arguments); + } + argp_parse(&argp, argc, argv, 0, 0, arguments); if (arguments->abort) { #ifndef _ALPINE diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index 0c70386061..4e00b0d8ff 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -71,7 +71,9 @@ int checkVersion() { // Global configurations SShellArguments args = { .host = NULL, +#ifndef TD_WINDOWS .password = NULL, +#endif .user = NULL, .database = NULL, .timezone = NULL, diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 87d11a3516..bf9afe4b80 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -19,6 +19,9 @@ extern char configDir[]; +char WINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; + void printVersion() { printf("version: %s\n", version); } @@ -61,6 +64,8 @@ void printHelp() { exit(EXIT_SUCCESS); } +char g_password[MAX_PASSWORD_SIZE]; + void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { for (int i = 1; i < argc; i++) { // for host @@ -73,11 +78,20 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } // for password - else if (strcmp(argv[i], "-p") == 0) { - arguments->is_use_passwd = true; - if (i < argc - 1 && argv[i + 1][0] != '-') { - arguments->password = argv[++i]; - } + else if (strncmp(argv[i], "-p", 2) == 0) { + arguments->is_use_passwd = true; + strcpy(tsOsName, "Windows"); + printf(WINCLIENT_VERSION, tsOsName, taos_get_client_info()); + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + if (scanf("%s", g_password) > 1) { + fprintf(stderr, "password read error!\n"); + } + getchar(); + } else { + tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + arguments->password = g_password; } // for management port else if (strcmp(argv[i], "-P") == 0) { @@ -104,7 +118,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-c") == 0) { - if (i < argc - 1) { + if (i < argc - 1) { char *tmp = argv[++i]; if (strlen(tmp) >= TSDB_FILENAME_LEN) { fprintf(stderr, "config file path: %s overflow max len %d\n", tmp, TSDB_FILENAME_LEN - 1); @@ -265,7 +279,7 @@ void *shellLoopQuery(void *arg) { if (command == NULL) return NULL; int32_t err = 0; - + do { memset(command, 0, MAX_COMMAND_SIZE); shellPrintPrompt(); @@ -274,7 +288,7 @@ void *shellLoopQuery(void *arg) { err = shellReadCommand(con, command); if (err) { break; - } + } } while (shellRunCommand(con, command) == 0); return NULL; diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 016e27dc13..9a7d3cd25c 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -866,8 +866,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->user = argv[++i]; } else if (strncmp(argv[i], "-p", 2) == 0) { if (strlen(argv[i]) == 2) { - printf("Enter password:"); - scanf("%s", arguments->password); + printf("Enter password: "); + taosSetConsoleEcho(false); + if (scanf("%s", arguments->password) > 1) { + fprintf(stderr, "password read error!\n"); + } + taosSetConsoleEcho(true); } else { tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); } diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index bea6e65106..c54b8da1b7 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -206,9 +206,9 @@ static struct argp_option options[] = { {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, #ifdef _TD_POWER_ - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0}, + {"password", 'p', 0, 0, "User password to connect to server. Default is powerdb.", 0}, #else - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, + {"password", 'p', 0, 0, "User password to connect to server. Default is taosdata.", 0}, #endif {"port", 'P', "PORT", 0, "Port to connect", 0}, {"cversion", 'v', "CVERION", 0, "client version", 0}, @@ -248,12 +248,14 @@ static struct argp_option options[] = { {0} }; +#define MAX_PASSWORD_SIZE 20 + /* Used by main to communicate with parse_opt. */ typedef struct arguments { // connection option char *host; char *user; - char *password; + char password[MAX_PASSWORD_SIZE]; uint16_t port; char cversion[12]; uint16_t mysqlFlag; @@ -376,7 +378,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { g_args.user = arg; break; case 'p': - g_args.password = arg; break; case 'P': g_args.port = atoi(arg); @@ -554,6 +555,25 @@ static void parse_precision_first( } } +static void parse_password( + int argc, char *argv[], SArguments *arguments) { + for (int i = 1; i < argc; i++) { + if (strncmp(argv[i], "-p", 2) == 0) { + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + taosSetConsoleEcho(false); + if(scanf("%20s", arguments->password) > 1) { + errorPrint("%s() LN%d, password read error!\n", __func__, __LINE__); + } + taosSetConsoleEcho(true); + } else { + tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + argv[i] = ""; + } + } +} + static void parse_timestamp( int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { @@ -616,9 +636,10 @@ int main(int argc, char *argv[]) { int ret = 0; /* Parse our arguments; every option seen by parse_opt will be reflected in arguments. */ - if (argc > 2) { + if (argc > 1) { parse_precision_first(argc, argv, &g_args); parse_timestamp(argc, argv, &g_args); + parse_password(argc, argv, &g_args); } argp_parse(&argp, argc, argv, 0, 0, &g_args); diff --git a/src/os/inc/osSystem.h b/src/os/inc/osSystem.h index e7a3ec13ae..4b79250740 100644 --- a/src/os/inc/osSystem.h +++ b/src/os/inc/osSystem.h @@ -24,6 +24,8 @@ void* taosLoadDll(const char *filename); void* taosLoadSym(void* handle, char* name); void taosCloseDll(void *handle); +int taosSetConsoleEcho(bool on); + #ifdef __cplusplus } #endif diff --git a/src/os/src/darwin/darwinSystem.c b/src/os/src/darwin/darwinSystem.c index 17cafdd664..6f296c9fef 100644 --- a/src/os/src/darwin/darwinSystem.c +++ b/src/os/src/darwin/darwinSystem.c @@ -29,4 +29,30 @@ void* taosLoadSym(void* handle, char* name) { void taosCloseDll(void *handle) { } +int taosSetConsoleEcho(bool on) +{ +#if 0 +#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL) + int err; + struct termios term; + + if (tcgetattr(STDIN_FILENO, &term) == -1) { + perror("Cannot get the attribution of the terminal"); + return -1; + } + + if (on) + term.c_lflag|=ECHOFLAGS; + else + term.c_lflag &=~ECHOFLAGS; + + err = tcsetattr(STDIN_FILENO,TCSAFLUSH,&term); + if (err == -1 && err == EINTR) { + perror("Cannot set the attribution of the terminal"); + return -1; + } + +#endif + return 0; +} diff --git a/src/os/src/linux/osSystem.c b/src/os/src/linux/osSystem.c index 052b7a22a8..0cdb20dbdb 100644 --- a/src/os/src/linux/osSystem.c +++ b/src/os/src/linux/osSystem.c @@ -51,4 +51,28 @@ void taosCloseDll(void *handle) { } } +int taosSetConsoleEcho(bool on) +{ +#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL) + int err; + struct termios term; + + if (tcgetattr(STDIN_FILENO, &term) == -1) { + perror("Cannot get the attribution of the terminal"); + return -1; + } + + if (on) + term.c_lflag|=ECHOFLAGS; + else + term.c_lflag &=~ECHOFLAGS; + + err = tcsetattr(STDIN_FILENO,TCSAFLUSH,&term); + if (err == -1 && err == EINTR) { + perror("Cannot set the attribution of the terminal"); + return -1; + } + + return 0; +} diff --git a/src/os/src/windows/wSystem.c b/src/os/src/windows/wSystem.c index 17cafdd664..564005f79b 100644 --- a/src/os/src/windows/wSystem.c +++ b/src/os/src/windows/wSystem.c @@ -30,3 +30,17 @@ void taosCloseDll(void *handle) { } +int taosSetConsoleEcho(bool on) +{ + HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE); + DWORD mode = 0; + GetConsoleMode(hStdin, &mode ); + if (on) { + mode |= ENABLE_ECHO_INPUT; + } else { + mode &= ~ENABLE_ECHO_INPUT; + } + SetConsoleMode(hStdin, mode); + + return 0; +} From 9f0eb43164676fe8763dfeff90d63ced8416635e Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 19 Aug 2021 10:57:23 +0800 Subject: [PATCH 164/239] add topicBianryLen config parameters for cenc --- src/common/inc/tglobal.h | 1 + src/common/src/tglobal.c | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index ed35168457..3c2069339a 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -158,6 +158,7 @@ extern char tsDataDir[]; extern char tsLogDir[]; extern char tsScriptDir[]; extern int64_t tsTickPerDay[3]; +extern int32_t tsTopicBianryLen; // system info extern char tsOsName[]; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index e4ff353787..38ae56eb24 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -199,6 +199,7 @@ char tsScriptDir[PATH_MAX] = {0}; char tsTempDir[PATH_MAX] = "/tmp/"; int32_t tsDiskCfgNum = 0; +int32_t tsTopicBianryLen = 16000; #ifndef _STORAGE SDiskCfg tsDiskCfg[1]; @@ -1216,6 +1217,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "topicBianryLen"; + cfg.ptr = &tsTopicBianryLen; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; + cfg.minValue = 16; + cfg.maxValue = 16000; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "httpEnableRecordSql"; cfg.ptr = &tsHttpEnableRecordSql; cfg.valType = TAOS_CFG_VTYPE_INT8; From a1ac1c63488506db233641298719809f554dfbbb Mon Sep 17 00:00:00 2001 From: lichuang Date: Thu, 19 Aug 2021 11:10:10 +0800 Subject: [PATCH 165/239] [TD-4352]refactor code,use meta-ver instead of tmp file --- src/tsdb/inc/tsdbFile.h | 4 +-- src/tsdb/src/tsdbCommit.c | 63 ++++++++++++++++++++++++++------------- src/tsdb/src/tsdbFS.c | 2 +- src/tsdb/src/tsdbFile.c | 5 ++-- src/tsdb/src/tsdbSync.c | 2 +- 5 files changed, 48 insertions(+), 28 deletions(-) diff --git a/src/tsdb/inc/tsdbFile.h b/src/tsdb/inc/tsdbFile.h index 3913a97c3c..b9d5431de6 100644 --- a/src/tsdb/inc/tsdbFile.h +++ b/src/tsdb/inc/tsdbFile.h @@ -38,7 +38,7 @@ #define TSDB_FILE_IS_OK(tf) (TSDB_FILE_STATE(tf) == TSDB_FILE_STATE_OK) #define TSDB_FILE_IS_BAD(tf) (TSDB_FILE_STATE(tf) == TSDB_FILE_STATE_BAD) -typedef enum { TSDB_FILE_HEAD = 0, TSDB_FILE_DATA, TSDB_FILE_LAST, TSDB_FILE_MAX, TSDB_FILE_META, TSDB_FILE_META_TMP} TSDB_FILE_T; +typedef enum { TSDB_FILE_HEAD = 0, TSDB_FILE_DATA, TSDB_FILE_LAST, TSDB_FILE_MAX, TSDB_FILE_META } TSDB_FILE_T; // =============== SMFile typedef struct { @@ -56,7 +56,7 @@ typedef struct { uint8_t state; } SMFile; -void tsdbInitMFile(SMFile* pMFile, SDiskID did, int vid, uint32_t ver, bool tmp); +void tsdbInitMFile(SMFile* pMFile, SDiskID did, int vid, uint32_t ver); void tsdbInitMFileEx(SMFile* pMFile, const SMFile* pOMFile); int tsdbEncodeSMFile(void** buf, SMFile* pMFile); void* tsdbDecodeSMFile(void* buf, SMFile* pMFile); diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index dd5dec81e0..45fe6f6936 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -264,6 +264,35 @@ int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf) { // =================== Commit Meta Data +static int tsdbInitCommitMetaFile(STsdbRepo *pRepo, SMFile* pMf, bool open) { + STsdbFS * pfs = REPO_FS(pRepo); + SMFile * pOMFile = pfs->cstatus->pmf; + SDiskID did; + + // Create/Open a meta file or open the existing file + if (pOMFile == NULL) { + // Create a new meta file + did.level = TFS_PRIMARY_LEVEL; + did.id = TFS_PRIMARY_ID; + tsdbInitMFile(pMf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo))); + + if (open && tsdbCreateMFile(pMf, true) < 0) { + tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + tsdbInfo("vgId:%d meta file %s is created to commit", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMf)); + } else { + tsdbInitMFileEx(pMf, pOMFile); + if (open && tsdbOpenMFile(pMf, O_WRONLY) < 0) { + tsdbError("vgId:%d failed to open META file since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + } + + return 0; +} + static int tsdbCommitMeta(STsdbRepo *pRepo) { STsdbFS * pfs = REPO_FS(pRepo); SMemTable *pMem = pRepo->imem; @@ -272,34 +301,25 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) { SActObj * pAct = NULL; SActCont * pCont = NULL; SListNode *pNode = NULL; - SDiskID did; ASSERT(pOMFile != NULL || listNEles(pMem->actList) > 0); if (listNEles(pMem->actList) <= 0) { // no meta data to commit, just keep the old meta file tsdbUpdateMFile(pfs, pOMFile); + if (tsTsdbMetaCompactRatio > 0) { + if (tsdbInitCommitMetaFile(pRepo, &mf, false) < 0) { + return -1; + } + int ret = tsdbCompactMetaFile(pRepo, pfs, &mf); + if (ret < 0) tsdbError("compact meta file error"); + + return ret; + } return 0; } else { - // Create/Open a meta file or open the existing file - if (pOMFile == NULL) { - // Create a new meta file - did.level = TFS_PRIMARY_LEVEL; - did.id = TFS_PRIMARY_ID; - tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)), false); - - if (tsdbCreateMFile(&mf, true) < 0) { - tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno)); - return -1; - } - - tsdbInfo("vgId:%d meta file %s is created to commit", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf)); - } else { - tsdbInitMFileEx(&mf, pOMFile); - if (tsdbOpenMFile(&mf, O_WRONLY) < 0) { - tsdbError("vgId:%d failed to open META file since %s", REPO_ID(pRepo), tstrerror(terrno)); - return -1; - } + if (tsdbInitCommitMetaFile(pRepo, &mf, true) < 0) { + return -1; } } @@ -477,7 +497,7 @@ static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { // first create tmp meta file did.level = TFS_PRIMARY_LEVEL; did.id = TFS_PRIMARY_ID; - tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)), true); + tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)) + 1); if (tsdbCreateMFile(&mf, true) < 0) { tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno)); @@ -542,6 +562,7 @@ _err: if (code == 0) { // rename meta.tmp -> meta + tsdbInfo("vgId:%d meta file rename %s -> %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf), TSDB_FILE_FULL_NAME(pMFile)); taosRename(mf.f.aname,pMFile->f.aname); tstrncpy(mf.f.aname, pMFile->f.aname, TSDB_FILENAME_LEN); tstrncpy(mf.f.rname, pMFile->f.rname, TSDB_FILENAME_LEN); diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index 5f34764538..68450301d8 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -274,7 +274,7 @@ static int tsdbCreateMeta(STsdbRepo *pRepo) { // Create a new meta file did.level = TFS_PRIMARY_LEVEL; did.id = TFS_PRIMARY_ID; - tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)), false); + tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo))); if (tsdbCreateMFile(&mf, true) < 0) { tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno)); diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index cc6fbb632f..0f13b6108f 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -21,7 +21,6 @@ static const char *TSDB_FNAME_SUFFIX[] = { "last", // TSDB_FILE_LAST "", // TSDB_FILE_MAX "meta", // TSDB_FILE_META - "meta.tmp", // TSDB_FILE_META_TMP }; static void tsdbGetFilename(int vid, int fid, uint32_t ver, TSDB_FILE_T ftype, char *fname); @@ -31,7 +30,7 @@ static void *tsdbDecodeDFInfo(void *buf, SDFInfo *pInfo); static int tsdbRollBackDFile(SDFile *pDFile); // ============== SMFile -void tsdbInitMFile(SMFile *pMFile, SDiskID did, int vid, uint32_t ver, bool tmp) { +void tsdbInitMFile(SMFile *pMFile, SDiskID did, int vid, uint32_t ver) { char fname[TSDB_FILENAME_LEN]; TSDB_FILE_SET_STATE(pMFile, TSDB_FILE_STATE_OK); @@ -39,7 +38,7 @@ void tsdbInitMFile(SMFile *pMFile, SDiskID did, int vid, uint32_t ver, bool tmp) memset(&(pMFile->info), 0, sizeof(pMFile->info)); pMFile->info.magic = TSDB_FILE_INIT_MAGIC; - tsdbGetFilename(vid, 0, ver, tmp ? TSDB_FILE_META_TMP : TSDB_FILE_META, fname); + tsdbGetFilename(vid, 0, ver, TSDB_FILE_META, fname); tfsInitFile(TSDB_FILE_F(pMFile), did.level, did.id, fname); } diff --git a/src/tsdb/src/tsdbSync.c b/src/tsdb/src/tsdbSync.c index f06943bc37..edcb84d091 100644 --- a/src/tsdb/src/tsdbSync.c +++ b/src/tsdb/src/tsdbSync.c @@ -209,7 +209,7 @@ static int32_t tsdbSyncRecvMeta(SSyncH *pSynch) { // Recv from remote SMFile mf; SDiskID did = {.level = TFS_PRIMARY_LEVEL, .id = TFS_PRIMARY_ID}; - tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)), false); + tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo))); if (tsdbCreateMFile(&mf, false) < 0) { tsdbError("vgId:%d, failed to create file while recv metafile since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; From f0280f424fab2867391c363a895c78bac60bd70d Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 19 Aug 2021 11:13:27 +0800 Subject: [PATCH 166/239] [TD-6174] : remove links to Chinese pages in English "doc/getting-started". --- documentation20/en/02.getting-started/docs.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md index 3c9d9ac6af..7b7de202b7 100644 --- a/documentation20/en/02.getting-started/docs.md +++ b/documentation20/en/02.getting-started/docs.md @@ -16,9 +16,7 @@ Please visit our [TDengine Official Docker Image: Distribution, Downloading, and It’s extremely easy to install for TDengine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs: -Click [here](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) to download the install package. - -For more about installation process, please refer [TDengine Installation Packages: Install and Uninstall](https://www.taosdata.com/blog/2019/08/09/566.html), and [Video Tutorials](https://www.taosdata.com/blog/2020/11/11/1941.html). +Click [here](https://www.taosdata.com/en/getting-started/#Install-from-Package) to download the install package. ## Quick Launch From a17b3c9101b52c0e7b88a90702f2fd46c031b59c Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 19 Aug 2021 12:43:39 +0800 Subject: [PATCH 167/239] [TD-4181] : move intro part to be the beginning of Java doc. --- .../cn/08.connector/01.java/docs.md | 160 +++++++++--------- 1 file changed, 82 insertions(+), 78 deletions(-) diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index ab9af42cf8..9015fde12d 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -1,5 +1,86 @@ # Java Connector +## 总体介绍 + +TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索下载。 + +`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。 + +![tdengine-connector](page://images/tdengine-jdbc-connector.png) + +上图显示了 3 种 Java 应用使用连接器访问 TDengine 的方式: + +* JDBC-JNI:Java 应用在物理节点1(pnode1)上使用 JDBC-JNI 的 API ,直接调用客户端 API(libtaos.so 或 taos.dll)将写入和查询请求发送到位于物理节点2(pnode2)上的 taosd 实例。 +* RESTful:应用将 SQL 发送给位于物理节点2(pnode2)上的 RESTful 连接器,再调用客户端 API(libtaos.so)。 +* JDBC-RESTful:Java 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求,发送给物理节点2的 RESTful 连接器。 + +TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点: + +* TDengine 目前不支持针对单条数据记录的删除操作。 +* 目前不支持事务操作。 +* 目前不支持嵌套查询(nested query)。 +* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。 + +### JDBC-JNI和JDBC-RESTful的对比 + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
对比项JDBC-JNIJDBC-RESTful
支持的操作系统linux、windows全平台
是否需要安装 client需要不需要
server 升级后是否需要升级 client需要不需要
写入性能JDBC-RESTful 是 JDBC-JNI 的 50%~90%
查询性能JDBC-RESTful 与 JDBC-JNI 没有差别
+ +注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。 + +### TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 + +| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | +| -------------------- | ----------------- | -------- | +| 2.0.31 | 2.1.3.0 及以上 | 1.8.x | +| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | +| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | +| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | +| 1.0.3 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | + +### TDengine DataType 和 Java DataType + +TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: + +| TDengine DataType | Java DataType | +| ----------------- | ------------------ | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT | java.lang.Short | +| TINYINT | java.lang.Byte | +| BOOL | java.lang.Boolean | +| BINARY | byte array | +| NCHAR | java.lang.String | + ## 安装 Java连接器支持的系统有: Linux 64/Windows x64/Windows x86。 @@ -9,7 +90,7 @@ Java连接器支持的系统有: Linux 64/Windows x64/Windows x86。 - 已安装TDengine服务器端 - 已安装好TDengine应用驱动,具体请参照 [安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver) 章节 -TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索并下载。 +TDengine 为了方便 Java 应用使用,遵循 JDBC 标准(3.0)API 规范提供了 `taos-jdbcdriver` 实现。可以通过 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索并下载。 由于 TDengine 的应用驱动是使用C语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 @@ -66,83 +147,6 @@ java -jar JDBCConnectorChecker.jar -host ## Java连接器的使用 -`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。 - -![tdengine-connector](page://images/tdengine-jdbc-connector.png) - -上图显示了 3 种 Java 应用使用连接器访问 TDengine 的方式: - -* JDBC-JNI:Java 应用在物理节点1(pnode1)上使用 JDBC-JNI 的 API ,直接调用客户端 API(libtaos.so 或 taos.dll)将写入和查询请求发送到位于物理节点2(pnode2)上的 taosd 实例。 -* RESTful:应用将 SQL 发送给位于物理节点2(pnode2)上的 RESTful 连接器,再调用客户端 API(libtaos.so)。 -* JDBC-RESTful:Java 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求,发送给物理节点2的 RESTful 连接器。 - -TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点: - -* TDengine 目前不支持针对单条数据记录的删除操作。 -* 目前不支持事务操作。 -* 目前不支持嵌套查询(nested query)。 -* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。 - -### JDBC-JNI和JDBC-RESTful的对比 - - - - - - - - - - - - - - - - - - - - - - - - - - -
对比项JDBC-JNIJDBC-RESTful
支持的操作系统linux、windows全平台
是否需要安装 client需要不需要
server 升级后是否需要升级 client需要不需要
写入性能JDBC-RESTful 是 JDBC-JNI 的 50%~90%
查询性能JDBC-RESTful 与 JDBC-JNI 没有差别
- -注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。 - -### TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 - -| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | -| -------------------- | ----------------- | -------- | -| 2.0.31 | 2.1.3.0 及以上 | 1.8.x | -| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | -| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | -| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | -| 1.0.3 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | - -### TDengine DataType 和 Java DataType - -TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: - -| TDengine DataType | Java DataType | -| ----------------- | ------------------ | -| TIMESTAMP | java.sql.Timestamp | -| INT | java.lang.Integer | -| BIGINT | java.lang.Long | -| FLOAT | java.lang.Float | -| DOUBLE | java.lang.Double | -| SMALLINT | java.lang.Short | -| TINYINT | java.lang.Byte | -| BOOL | java.lang.Boolean | -| BINARY | byte array | -| NCHAR | java.lang.String | - ### 获取连接 #### 指定URL获取连接 From 021fc6af21c76a9912220233ee73c4ff2c34e3cb Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Thu, 19 Aug 2021 13:02:34 +0800 Subject: [PATCH 168/239] [TD-6184]: optimize insert from imported file --- src/client/inc/tsclient.h | 8 +++----- src/client/src/tscParseInsert.c | 20 +++++--------------- 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index b6821de87a..4ed7894931 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -123,17 +123,15 @@ typedef struct { int32_t kvLen; // len of SKVRow } SMemRowInfo; typedef struct { - uint8_t memRowType; - uint8_t compareStat; // 0 unknown, 1 need compare, 2 no need - TDRowTLenT dataRowInitLen; + uint8_t memRowType; // default is 0, that is SDataRow + uint8_t compareStat; // 0 no need, 1 need compare TDRowTLenT kvRowInitLen; SMemRowInfo *rowInfo; } SMemRowBuilder; typedef enum { - ROW_COMPARE_UNKNOWN = 0, + ROW_COMPARE_NO_NEED = 0, ROW_COMPARE_NEED = 1, - ROW_COMPARE_NO_NEED = 2, } ERowCompareStat; int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec); diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index f1ca6b7754..35b37b7ae6 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -51,20 +51,18 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 } } + // default compareStat is ROW_COMPARE_NO_NEED if (nBoundCols == 0) { // file input pBuilder->memRowType = SMEM_ROW_DATA; - pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } else { float boundRatio = ((float)nBoundCols / (float)nCols); if (boundRatio < KVRatioKV) { pBuilder->memRowType = SMEM_ROW_KV; - pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } else if (boundRatio > KVRatioData) { pBuilder->memRowType = SMEM_ROW_DATA; - pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } pBuilder->compareStat = ROW_COMPARE_NEED; @@ -76,7 +74,6 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 } } - pBuilder->dataRowInitLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen; pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx); if (nRows > 0) { @@ -86,7 +83,7 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 } for (int i = 0; i < nRows; ++i) { - (pBuilder->rowInfo + i)->dataLen = pBuilder->dataRowInitLen; + (pBuilder->rowInfo + i)->dataLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen; (pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen; } } @@ -460,7 +457,7 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i STableMeta * pTableMeta = pDataBlocks->pTableMeta; SSchema * schema = tscGetTableSchema(pTableMeta); SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder; - int32_t dataLen = pBuilder->dataRowInitLen; + int32_t dataLen = spd->allNullLen + TD_MEM_ROW_DATA_HEAD_SIZE; int32_t kvLen = pBuilder->kvRowInitLen; bool isParseBindParam = false; @@ -809,13 +806,12 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk // allocate memory size_t nAlloc = nRows * sizeof(SBlockKeyTuple); if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) { - size_t nRealAlloc = nAlloc + 10 * sizeof(SBlockKeyTuple); - char * tmp = trealloc(pBlkKeyInfo->pKeyTuple, nRealAlloc); + char *tmp = trealloc(pBlkKeyInfo->pKeyTuple, nAlloc); if (tmp == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp; - pBlkKeyInfo->maxBytesAlloc = (int32_t)nRealAlloc; + pBlkKeyInfo->maxBytesAlloc = (int32_t)nAlloc; } memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc); @@ -1726,12 +1722,6 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow goto _error; } - if (TSDB_CODE_SUCCESS != - (ret = initMemRowBuilder(&pTableDataBlock->rowBuilder, 0, tinfo.numOfColumns, pTableDataBlock->numOfParams, - pTableDataBlock->boundColumnInfo.allNullLen))) { - goto _error; - } - while ((readLen = tgetline(&line, &n, fp)) != -1) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { line[--readLen] = 0; From 895bbd9aab5ab2d86b513c69766ac80306bc0e95 Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Thu, 19 Aug 2021 13:46:11 +0800 Subject: [PATCH 169/239] Update docs.md suggest use taos-jdbcdriver-2.0.34 --- documentation20/cn/08.connector/01.java/docs.md | 1 + 1 file changed, 1 insertion(+) diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index 9015fde12d..308cc1f327 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -56,6 +56,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 | taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | | -------------------- | ----------------- | -------- | +| 2.0.34(建议使用) | 2.0.3.0 及以上 | 1.8.x | | 2.0.31 | 2.1.3.0 及以上 | 1.8.x | | 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | | 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | From a9aaa1c0998770097f52485be483cea114222a2e Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Thu, 19 Aug 2021 14:03:55 +0800 Subject: [PATCH 170/239] =?UTF-8?q?add=20jdbc=20driver=E2=80=99s=20compati?= =?UTF-8?q?bility=20description?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add jdbc driver’s compatibility description --- documentation20/cn/08.connector/01.java/docs.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index 308cc1f327..ca045eff87 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -56,8 +56,8 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 | taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | | -------------------- | ----------------- | -------- | -| 2.0.34(建议使用) | 2.0.3.0 及以上 | 1.8.x | -| 2.0.31 | 2.1.3.0 及以上 | 1.8.x | +| 2.0.33 - 2.0.34 | 2.0.3.0 及以上 | 1.8.x | +| 2.0.31 - 2.0.32 | 2.1.3.0 及以上 | 1.8.x | | 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | | 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | | 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | From eb4e9067376eac12fd930719a0eb5825c4283113 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 19 Aug 2021 14:13:17 +0800 Subject: [PATCH 171/239] [ci skip] update scirpt for csv generator --- .../pytest/insert/insertFromCSVPerformance.py | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/tests/pytest/insert/insertFromCSVPerformance.py b/tests/pytest/insert/insertFromCSVPerformance.py index f3b9c2734d..487497631a 100644 --- a/tests/pytest/insert/insertFromCSVPerformance.py +++ b/tests/pytest/insert/insertFromCSVPerformance.py @@ -28,7 +28,7 @@ class insertFromCSVPerformace: self.tbName = tbName self.branchName = branchName self.type = buildType - self.ts = 1500074556514 + self.ts = 1500000000000 self.host = "127.0.0.1" self.user = "root" self.password = "taosdata" @@ -46,13 +46,20 @@ class insertFromCSVPerformace: config = self.config) def writeCSV(self): - with open('test3.csv','w', encoding='utf-8', newline='') as csvFile: + tsset = set() + rows = 0 + with open('test4.csv','w', encoding='utf-8', newline='') as csvFile: writer = csv.writer(csvFile, dialect='excel') - for i in range(1000000): - newTimestamp = self.ts + random.randint(10000000, 10000000000) + random.randint(1000, 10000000) + random.randint(1, 1000) - d = datetime.datetime.fromtimestamp(newTimestamp / 1000) - dt = str(d.strftime("%Y-%m-%d %H:%M:%S.%f")) - writer.writerow(["'%s'" % dt, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)]) + while True: + newTimestamp = self.ts + random.randint(1, 10) * 10000000000 + random.randint(1, 10) * 1000000000 + random.randint(1, 10) * 100000000 + random.randint(1, 10) * 10000000 + random.randint(1, 10) * 1000000 + random.randint(1, 10) * 100000 + random.randint(1, 10) * 10000 + random.randint(1, 10) * 1000 + random.randint(1, 10) * 100 + random.randint(1, 10) * 10 + random.randint(1, 10) + if newTimestamp not in tsset: + tsset.add(newTimestamp) + d = datetime.datetime.fromtimestamp(newTimestamp / 1000) + dt = str(d.strftime("%Y-%m-%d %H:%M:%S.%f")) + writer.writerow(["'%s'" % dt, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)]) + rows += 1 + if rows == 2000000: + break def removCSVHeader(self): data = pd.read_csv("ordered.csv") @@ -71,7 +78,9 @@ class insertFromCSVPerformace: cursor.execute("create table if not exists t1(ts timestamp, c1 int, c2 float, c3 int, c4 int)") startTime = time.time() cursor.execute("insert into t1 file 'outoforder.csv'") - totalTime += time.time() - startTime + totalTime += time.time() - startTime + time.sleep(1) + out_of_order_time = (float) (totalTime / 10) print("Out of Order - Insert time: %f" % out_of_order_time) @@ -81,7 +90,8 @@ class insertFromCSVPerformace: cursor.execute("create table if not exists t2(ts timestamp, c1 int, c2 float, c3 int, c4 int)") startTime = time.time() cursor.execute("insert into t2 file 'ordered.csv'") - totalTime += time.time() - startTime + totalTime += time.time() - startTime + time.sleep(1) in_order_time = (float) (totalTime / 10) print("In order - Insert time: %f" % in_order_time) From b6b5c037443421c81f48b107bc12d314633a569f Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Tue, 10 Aug 2021 20:02:44 +0800 Subject: [PATCH 172/239] small fix --- .gitignore | 1 + src/common/src/tdataformat.c | 1 + src/util/src/tfunctional.c | 5 +++-- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 50f4251320..2c37aa92f7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ build/ +.ycm_extra_conf.py .vscode/ .idea/ cmake-build-debug/ diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index a3a6c0fed4..a5aabbe1f6 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -517,6 +517,7 @@ void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, b } } +//TODO: refactor this function to eliminate additional memory copy int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull) { ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows); ASSERT(target->numOfCols == source->numOfCols); diff --git a/src/util/src/tfunctional.c b/src/util/src/tfunctional.c index c470a2b8ae..8b20f8fc0a 100644 --- a/src/util/src/tfunctional.c +++ b/src/util/src/tfunctional.c @@ -14,23 +14,24 @@ */ #include "tfunctional.h" -#include "tarray.h" - tGenericSavedFunc* genericSavedFuncInit(GenericVaFunc func, int numOfArgs) { tGenericSavedFunc* pSavedFunc = malloc(sizeof(tGenericSavedFunc) + numOfArgs * (sizeof(void*))); + if(pSavedFunc == NULL) return NULL; pSavedFunc->func = func; return pSavedFunc; } tI32SavedFunc* i32SavedFuncInit(I32VaFunc func, int numOfArgs) { tI32SavedFunc* pSavedFunc = malloc(sizeof(tI32SavedFunc) + numOfArgs * sizeof(void *)); + if(pSavedFunc == NULL) return NULL; pSavedFunc->func = func; return pSavedFunc; } tVoidSavedFunc* voidSavedFuncInit(VoidVaFunc func, int numOfArgs) { tVoidSavedFunc* pSavedFunc = malloc(sizeof(tVoidSavedFunc) + numOfArgs * sizeof(void*)); + if(pSavedFunc == NULL) return NULL; pSavedFunc->func = func; return pSavedFunc; } From 909e57325a160c019735b6b2d9139dcce8fdc56e Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 19 Aug 2021 10:26:50 +0800 Subject: [PATCH 173/239] [TD-6207]: fix altering schema too much corrupting meta file --- src/tsdb/src/tsdbMeta.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 96e86a6d99..8407a0519a 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -1250,8 +1250,14 @@ static int tsdbEncodeTable(void **buf, STable *pTable) { tlen += taosEncodeFixedU64(buf, TABLE_SUID(pTable)); tlen += tdEncodeKVRow(buf, pTable->tagVal); } else { - tlen += taosEncodeFixedU8(buf, (uint8_t)taosArrayGetSize(pTable->schema)); - for (int i = 0; i < taosArrayGetSize(pTable->schema); i++) { + uint32_t arraySize = (uint32_t)taosArrayGetSize(pTable->schema); + if(arraySize > UINT8_MAX) { + tlen += taosEncodeFixedU8(buf, 0); + tlen += taosEncodeFixedU32(buf, arraySize); + } else { + tlen += taosEncodeFixedU8(buf, (uint8_t)arraySize); + } + for (uint32_t i = 0; i < arraySize; i++) { STSchema *pSchema = taosArrayGetP(pTable->schema, i); tlen += tdEncodeSchema(buf, pSchema); } @@ -1284,8 +1290,11 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) { buf = taosDecodeFixedU64(buf, &TABLE_SUID(pTable)); buf = tdDecodeKVRow(buf, &(pTable->tagVal)); } else { - uint8_t nSchemas; - buf = taosDecodeFixedU8(buf, &nSchemas); + uint32_t nSchemas = 0; + buf = taosDecodeFixedU8(buf, (uint8_t *)&nSchemas); + if(nSchemas == 0) { + buf = taosDecodeFixedU32(buf, &nSchemas); + } for (int i = 0; i < nSchemas; i++) { STSchema *pSchema; buf = tdDecodeSchema(buf, &pSchema); @@ -1485,4 +1494,4 @@ static void tsdbFreeTableSchema(STable *pTable) { taosArrayDestroy(pTable->schema); } -} \ No newline at end of file +} From 23cbcdd0b6d97055f40babeb0b3777ef2dd5b0c8 Mon Sep 17 00:00:00 2001 From: lichuang Date: Thu, 19 Aug 2021 15:11:11 +0800 Subject: [PATCH 174/239] [TD-4352]fix bug:save new file content in new meta cache --- src/tsdb/inc/tsdbFS.h | 5 +++-- src/tsdb/src/tsdbCommit.c | 33 +++++++++++++++++++++++---------- src/tsdb/src/tsdbFS.c | 1 + 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/src/tsdb/inc/tsdbFS.h b/src/tsdb/inc/tsdbFS.h index d63aeb14ac..3b6b6449f6 100644 --- a/src/tsdb/inc/tsdbFS.h +++ b/src/tsdb/inc/tsdbFS.h @@ -42,8 +42,9 @@ typedef struct { typedef struct { pthread_rwlock_t lock; - SFSStatus* cstatus; // current status - SHashObj* metaCache; // meta cache + SFSStatus* cstatus; // current status + SHashObj* metaCache; // meta cache + SHashObj* metaCacheComp; // meta cache for compact bool intxn; SFSStatus* nstatus; // new status } STsdbFS; diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 45fe6f6936..15fc3cc47d 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -57,7 +57,7 @@ typedef struct { #define TSDB_COMMIT_TXN_VERSION(ch) FS_TXN_VERSION(REPO_FS(TSDB_COMMIT_REPO(ch))) static int tsdbCommitMeta(STsdbRepo *pRepo); -static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen, bool updateMeta); +static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen, bool compact); static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid); static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile); static int tsdbCommitTSData(STsdbRepo *pRepo); @@ -328,7 +328,7 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) { pAct = (SActObj *)pNode->data; if (pAct->act == TSDB_UPDATE_META) { pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj)); - if (tsdbUpdateMetaRecord(pfs, &mf, pAct->uid, (void *)(pCont->cont), pCont->len, true) < 0) { + if (tsdbUpdateMetaRecord(pfs, &mf, pAct->uid, (void *)(pCont->cont), pCont->len, false) < 0) { tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, tstrerror(terrno)); tsdbCloseMFile(&mf); @@ -402,7 +402,7 @@ void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn) { pRtn->minFid, pRtn->midFid, pRtn->maxFid); } -static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen, bool updateMeta) { +static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen, bool compact) { char buf[64] = "\0"; void * pBuf = buf; SKVRecord rInfo; @@ -428,18 +428,18 @@ static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void } tsdbUpdateMFileMagic(pMFile, POINTER_SHIFT(cont, contLen - sizeof(TSCKSUM))); - if (!updateMeta) { - pMFile->info.nRecords++; - return 0; - } - SKVRecord *pRecord = taosHashGet(pfs->metaCache, (void *)&uid, sizeof(uid)); + SHashObj* cache = compact ? pfs->metaCacheComp : pfs->metaCache; + + pMFile->info.nRecords++; + + SKVRecord *pRecord = taosHashGet(cache, (void *)&uid, sizeof(uid)); if (pRecord != NULL) { pMFile->info.tombSize += (pRecord->size + sizeof(SKVRecord)); } else { pMFile->info.nRecords++; } - taosHashPut(pfs->metaCache, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo)); + taosHashPut(cache, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo)); return 0; } @@ -517,6 +517,13 @@ static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { goto _err; } + // init Comp + assert(pfs->metaCacheComp == NULL); + pfs->metaCacheComp = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + if (pfs->metaCacheComp == NULL) { + goto _err; + } + pRecord = taosHashIterate(pfs->metaCache, NULL); while (pRecord) { if (tsdbSeekMFile(pMFile, pRecord->offset + sizeof(SKVRecord), SEEK_SET) < 0) { @@ -545,7 +552,7 @@ static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) { goto _err; } - if (tsdbUpdateMetaRecord(pfs, &mf, pRecord->uid, pBuf, (int)pRecord->size, false) < 0) { + if (tsdbUpdateMetaRecord(pfs, &mf, pRecord->uid, pBuf, (int)pRecord->size, true) < 0) { tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pRecord->uid, tstrerror(terrno)); goto _err; @@ -569,9 +576,15 @@ _err: // update current meta file info pfs->nstatus->pmf = NULL; tsdbUpdateMFile(pfs, &mf); + + taosHashCleanup(pfs->metaCache); + pfs->metaCache = pfs->metaCacheComp; + pfs->metaCacheComp = NULL; } else { // remove meta.tmp file remove(mf.f.aname); + taosHashCleanup(pfs->metaCacheComp); + pfs->metaCacheComp = NULL; } tfree(pBuf); diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index 63f89c1957..a3d6c59f72 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -215,6 +215,7 @@ STsdbFS *tsdbNewFS(STsdbCfg *pCfg) { } pfs->intxn = false; + pfs->metaCacheComp = NULL; pfs->nstatus = tsdbNewFSStatus(maxFSet); if (pfs->nstatus == NULL) { From e09367b01e379cb345a2e569df236de0776d4f21 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 19 Aug 2021 14:18:18 +0800 Subject: [PATCH 175/239] [TD-6209]add case for TD-6027 --- tests/pytest/alter/alter_table.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/tests/pytest/alter/alter_table.py b/tests/pytest/alter/alter_table.py index a5acb7a73e..33e0aec727 100644 --- a/tests/pytest/alter/alter_table.py +++ b/tests/pytest/alter/alter_table.py @@ -102,6 +102,20 @@ class TDTestCase: print("check2: i=%d colIdx=%d" % (i, colIdx)) tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3)) + def alter_table_255_times(self): # add case for TD-6207 + for i in range(255): + tdLog.info("alter table st add column cb%d int"%i) + tdSql.execute("alter table st add column cb%d int"%i) + tdSql.execute("insert into t0 (ts,c1) values(now,1)") + tdSql.execute("reset query cache") + tdSql.query("select * from st") + tdSql.execute("create table mt(ts timestamp, i int)") + tdSql.execute("insert into mt values(now,11)") + tdSql.query("select * from mt") + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("describe db.st") + def run(self): # Setup params db = "db" @@ -131,12 +145,14 @@ class TDTestCase: tdSql.checkData(0, i, self.rowNum * (size - i)) - tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float)") - tdSql.execute("create table t0 using st tags(null)") + tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float,t2 int,t3 double)") + tdSql.execute("create table t0 using st tags(null,1,2.3)") tdSql.execute("alter table t0 set tag t1=2.1") tdSql.query("show tables") tdSql.checkRows(2) + self.alter_table_255_times() + def stop(self): tdSql.close() From 7f9eca2a192e649f8c7f0f117483425198d303c9 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 19 Aug 2021 17:02:48 +0800 Subject: [PATCH 176/239] [TD-2639] : fix typo in parameters. --- documentation20/cn/02.getting-started/01.docker/docs.md | 2 +- documentation20/cn/04.model/docs.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/documentation20/cn/02.getting-started/01.docker/docs.md b/documentation20/cn/02.getting-started/01.docker/docs.md index 30803d9777..32ac8fe7a3 100644 --- a/documentation20/cn/02.getting-started/01.docker/docs.md +++ b/documentation20/cn/02.getting-started/01.docker/docs.md @@ -191,7 +191,7 @@ cdf548465318 1,通过端口映射(-p),将容器内部开放的网络端口映射到宿主机的指定端口上。通过挂载本地目录(-v),可以实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。 ```bash -$ docker run -d -v /etc/taos:/etc/taos -p 6041:6041 tdengine/tdengine +$ docker run -d -v /etc/taos:/etc/taos -P 6041:6041 tdengine/tdengine 526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd $ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql diff --git a/documentation20/cn/04.model/docs.md b/documentation20/cn/04.model/docs.md index ccdca64c10..ed1d2f7168 100644 --- a/documentation20/cn/04.model/docs.md +++ b/documentation20/cn/04.model/docs.md @@ -13,7 +13,7 @@ TDengine采用关系型数据模型,需要建库、建表。因此对于一个 ```mysql CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1; ``` -上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4,允许更新数据。详细的语法及参数请见 [TAOS SQL 的数据管理](https://www.taosdata.com/cn/documentation/taos-sql#management) 章节。 +上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为6,允许更新数据。详细的语法及参数请见 [TAOS SQL 的数据管理](https://www.taosdata.com/cn/documentation/taos-sql#management) 章节。 创建库之后,需要使用SQL命令USE将当前库切换过来,例如: From dcb822e00c365e20a3a9d1ed508cb0d34847d932 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 19 Aug 2021 17:28:27 +0800 Subject: [PATCH 177/239] [TD-6046] fix ts top/bottom index error when using order by --- src/client/inc/tscUtil.h | 1 + src/client/src/tscSQLParser.c | 12 ++++++++---- src/client/src/tscUtil.c | 11 +++++++++++ 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index f2d25c1e84..f14cd8f9e2 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -213,6 +213,7 @@ SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t function int16_t size); size_t tscNumOfExprs(SQueryInfo* pQueryInfo); +size_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo); SExprInfo *tscExprGet(SQueryInfo* pQueryInfo, int32_t index); int32_t tscExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy); int32_t tscExprCopyAll(SArray* dst, const SArray* src, bool deepcopy); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 857d1a4419..63ce5ebf2c 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5864,10 +5864,12 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; } else if (isTopBottomQuery(pQueryInfo)) { /* order of top/bottom query in interval is not valid */ - SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); + size_t pos = tscExprTopBottomIndex(pQueryInfo); + assert(pos > 0); + SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1); assert(pExpr->base.functionId == TSDB_FUNC_TS); - pExpr = tscExprGet(pQueryInfo, 1); + pExpr = tscExprGet(pQueryInfo, pos); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } @@ -5949,10 +5951,12 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } } else { /* order of top/bottom query in interval is not valid */ - SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); + size_t pos = tscExprTopBottomIndex(pQueryInfo); + assert(pos > 0); + SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1); assert(pExpr->base.functionId == TSDB_FUNC_TS); - pExpr = tscExprGet(pQueryInfo, 1); + pExpr = tscExprGet(pQueryInfo, pos); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 19a816faeb..72791568ba 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2427,6 +2427,17 @@ size_t tscNumOfExprs(SQueryInfo* pQueryInfo) { return taosArrayGetSize(pQueryInfo->exprList); } +size_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo){ + size_t numOfExprs = tscNumOfExprs(pQueryInfo); + for(int32_t i = 0; i < numOfExprs; ++i) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) { + return i; + } + } + return -1; +} + // todo REFACTOR void tscExprAddParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) { assert (pExpr != NULL || argument != NULL || bytes != 0); From 99d8d0333a3a335a1a21be21408e691d468f332f Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 19 Aug 2021 18:53:53 +0800 Subject: [PATCH 178/239] [TD-6219]: remove few taosdemo macros. (#7468) --- src/kit/taosdemo/taosdemo.c | 62 ------------------------------------- 1 file changed, 62 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index a1c68b2f3c..ae1ce0e82a 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -53,14 +53,6 @@ #include "taoserror.h" #include "tutil.h" -#define STMT_IFACE_ENABLED 1 -#define NANO_SECOND_ENABLED 1 -#define SET_THREADNAME_ENABLED 1 - -#if SET_THREADNAME_ENABLED == 0 -#define setThreadName(name) -#endif - #define REQ_EXTRA_BUF_LEN 1024 #define RESP_BUF_LEN 4096 @@ -295,9 +287,7 @@ typedef struct SSuperTable_S { uint64_t lenOfTagOfOneRow; char* sampleDataBuf; -#if STMT_IFACE_ENABLED == 1 char* sampleBindArray; -#endif //int sampleRowCount; //int sampleUsePos; @@ -733,11 +723,7 @@ static void printHelp() { printf("%s%s%s%s\n", indent, "-P", indent, "The TCP/IP port number to use for the connection. Default is 0."); printf("%s%s%s%s\n", indent, "-I", indent, -#if STMT_IFACE_ENABLED == 1 "The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'."); -#else - "The interface (taosc, rest) taosdemo uses. Default is 'taosc'."); -#endif printf("%s%s%s%s\n", indent, "-d", indent, "Destination database. Default is 'test'."); printf("%s%s%s%s\n", indent, "-a", indent, @@ -849,10 +835,8 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->iface = TAOSC_IFACE; } else if (0 == strcasecmp(argv[i], "rest")) { arguments->iface = REST_IFACE; -#if STMT_IFACE_ENABLED == 1 } else if (0 == strcasecmp(argv[i], "stmt")) { arguments->iface = STMT_IFACE; -#endif } else { errorPrint("%s", "\n\t-I need a valid string following!\n"); exit(EXIT_FAILURE); @@ -1694,9 +1678,7 @@ static int printfInsertMeta() { } if (g_Dbs.db[i].dbCfg.precision[0] != 0) { if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) -#if NANO_SECOND_ENABLED == 1 || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2)) -#endif || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2))) { printf(" precision: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision); @@ -1887,9 +1869,7 @@ static void printfInsertMetaToFile(FILE* fp) { } if (g_Dbs.db[i].dbCfg.precision[0] != 0) { if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) -#if NANO_SECOND_ENABLED == 1 || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2)) -#endif || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { fprintf(fp, " precision: %s\n", g_Dbs.db[i].dbCfg.precision); @@ -2092,10 +2072,8 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { time_t tt; if (precision == TSDB_TIME_PRECISION_MICRO) { tt = (time_t)(val / 1000000); -#if NANO_SECOND_ENABLED == 1 } if (precision == TSDB_TIME_PRECISION_NANO) { tt = (time_t)(val / 1000000000); -#endif } else { tt = (time_t)(val / 1000); } @@ -2117,10 +2095,8 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { if (precision == TSDB_TIME_PRECISION_MICRO) { sprintf(buf + pos, ".%06d", (int)(val % 1000000)); -#if NANO_SECOND_ENABLED == 1 } else if (precision == TSDB_TIME_PRECISION_NANO) { sprintf(buf + pos, ".%09d", (int)(val % 1000000000)); -#endif } else { sprintf(buf + pos, ".%03d", (int)(val % 1000)); } @@ -3181,10 +3157,8 @@ int createDatabasesAndStables(char *command) { " fsync %d", g_Dbs.db[i].dbCfg.fsync); } if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) -#if NANO_SECOND_ENABLED == 1 || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2)) -#endif || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, @@ -4263,10 +4237,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].iface= TAOSC_IFACE; } else if (0 == strcasecmp(stbIface->valuestring, "rest")) { g_Dbs.db[i].superTbls[j].iface= REST_IFACE; -#if STMT_IFACE_ENABLED == 1 } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) { g_Dbs.db[i].superTbls[j].iface= STMT_IFACE; -#endif } else { errorPrint("%s() LN%d, failed to read json, insert_mode %s not recognized\n", __func__, __LINE__, stbIface->valuestring); @@ -5075,7 +5047,6 @@ static void postFreeResource() { free(g_Dbs.db[i].superTbls[j].sampleDataBuf); g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; } -#if STMT_IFACE_ENABLED == 1 if (g_Dbs.db[i].superTbls[j].sampleBindArray) { for (int k = 0; k < MAX_SAMPLES_ONCE_FROM_FILE; k++) { uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)( @@ -5090,7 +5061,6 @@ static void postFreeResource() { } } tmfree((char *)g_Dbs.db[i].superTbls[j].sampleBindArray); -#endif if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { free(g_Dbs.db[i].superTbls[j].tagDataBuf); @@ -5383,7 +5353,6 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) } break; -#if STMT_IFACE_ENABLED == 1 case STMT_IFACE: debugPrint("%s() LN%d, stmt=%p", __func__, __LINE__, pThreadInfo->stmt); @@ -5396,7 +5365,6 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) } affectedRows = k; break; -#endif default: errorPrint("%s() LN%d: unknown insert mode: %d\n", @@ -5769,7 +5737,6 @@ static int64_t generateInterlaceDataWithoutStb( return k; } -#if STMT_IFACE_ENABLED == 1 static int32_t prepareStmtBindArrayByType( TAOS_BIND *bind, char *dataType, int32_t dataLen, @@ -6604,7 +6571,6 @@ static int32_t prepareStbStmtWithSample( return k; } -#endif static int32_t generateStbProgressiveData( SSuperTable *stbInfo, @@ -6805,7 +6771,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int32_t generated; if (stbInfo) { if (stbInfo->iface == STMT_IFACE) { -#if STMT_IFACE_ENABLED == 1 if (sourceRand) { generated = prepareStbStmtRand( pThreadInfo, @@ -6825,9 +6790,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { startTime, &(pThreadInfo->samplePos)); } -#else - generated = -1; -#endif } else { generated = generateStbInterlaceData( pThreadInfo, @@ -6845,16 +6807,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__, tableName, batchPerTbl, startTime); -#if STMT_IFACE_ENABLED == 1 generated = prepareStmtWithoutStb( pThreadInfo, tableName, batchPerTbl, insertRows, i, startTime); -#else - generated = -1; -#endif } else { generated = generateInterlaceDataWithoutStb( tableName, batchPerTbl, @@ -7067,7 +7025,6 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { int32_t generated; if (stbInfo) { if (stbInfo->iface == STMT_IFACE) { -#if STMT_IFACE_ENABLED == 1 if (sourceRand) { generated = prepareStbStmtRand( pThreadInfo, @@ -7086,9 +7043,6 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { insertRows, i, start_time, &(pThreadInfo->samplePos)); } -#else - generated = -1; -#endif } else { generated = generateStbProgressiveData( stbInfo, @@ -7100,16 +7054,12 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { } } else { if (g_args.iface == STMT_IFACE) { -#if STMT_IFACE_ENABLED == 1 generated = prepareStmtWithoutStb( pThreadInfo, tableName, g_args.num_of_RPR, insertRows, i, start_time); -#else - generated = -1; -#endif } else { generated = generateProgressiveDataWithoutStb( tableName, @@ -7329,7 +7279,6 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in * return 0; } -#if STMT_IFACE_ENABLED == 1 static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) { stbInfo->sampleBindArray = calloc(1, sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); @@ -7400,7 +7349,6 @@ static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) return 0; } -#endif static void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSuperTable* stbInfo) { @@ -7411,10 +7359,8 @@ static void startMultiThreadInsertData(int threads, char* db_name, timePrec = TSDB_TIME_PRECISION_MILLI; } else if (0 == strncasecmp(precision, "us", 2)) { timePrec = TSDB_TIME_PRECISION_MICRO; -#if NANO_SECOND_ENABLED == 1 } else if (0 == strncasecmp(precision, "ns", 2)) { timePrec = TSDB_TIME_PRECISION_NANO; -#endif } else { errorPrint("Not support precision: %s\n", precision); exit(EXIT_FAILURE); @@ -7557,7 +7503,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, memset(pids, 0, threads * sizeof(pthread_t)); memset(infos, 0, threads * sizeof(threadInfo)); -#if STMT_IFACE_ENABLED == 1 char *stmtBuffer = calloc(1, BUFFER_SIZE); assert(stmtBuffer); if ((g_args.iface == STMT_IFACE) @@ -7598,7 +7543,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, parseSampleFileToStmt(stbInfo, timePrec); } } -#endif for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; @@ -7626,7 +7570,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(EXIT_FAILURE); } -#if STMT_IFACE_ENABLED == 1 if ((g_args.iface == STMT_IFACE) || ((stbInfo) && (stbInfo->iface == STMT_IFACE))) { @@ -7654,7 +7597,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, } pThreadInfo->bind_ts = malloc(sizeof(int64_t)); } -#endif } else { pThreadInfo->taos = NULL; } @@ -7680,9 +7622,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } } -#if STMT_IFACE_ENABLED == 1 free(stmtBuffer); -#endif for (int i = 0; i < threads; i++) { pthread_join(pids[i], NULL); @@ -7697,12 +7637,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; -#if STMT_IFACE_ENABLED == 1 if (pThreadInfo->stmt) { taos_stmt_close(pThreadInfo->stmt); tmfree((char *)pThreadInfo->bind_ts); } -#endif tsem_destroy(&(pThreadInfo->lock_sem)); taos_close(pThreadInfo->taos); From 3e64ce5c2d9eb1421cc63b0de178b522d94ead2d Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 19 Aug 2021 21:29:36 +0800 Subject: [PATCH 179/239] Feature/sangshuduo/td 5136 taosdemo rework (#7472) * [TD-5136]: taosdemo simulate real senario. * update test case according to taosdemo change * adjust range of semi-random data. * make demo mode use different tag name and value. * change malloc to calloc for pid allocation. * fix typo. * fix binary length default value and group id logic. * fix help msg. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index ae1ce0e82a..48d2aecc1f 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -738,12 +738,13 @@ static void printHelp() { "Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC."); printf("%s%s%s%s\n", indent, "-b", indent, "The data_type of columns, default: FLOAT, INT, FLOAT."); - printf("%s%s%s%s\n", indent, "-w", indent, - "The length of data_type 'BINARY' or 'NCHAR'. Default is 16"); + printf("%s%s%s%s%d\n", indent, "-w", indent, + "The length of data_type 'BINARY' or 'NCHAR'. Default is ", + g_args.len_of_binary); printf("%s%s%s%s%d%s%d\n", indent, "-l", indent, - "The number of columns per record. Default is ", + "The number of columns per record. Demo mode by default is ", DEFAULT_DATATYPE_NUM, - ". Max values is ", + " (float, int, float). Max values is ", MAX_NUM_COLUMNS); printf("%s%s%s%s\n", indent, indent, indent, "All of the new column(s) type is INT. If use -b to specify column type, -l will be ignored."); From f786c609bbca6f4d12a5001f69b5e6eadfeb9ba1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=8D=A1=E6=AF=94=E5=8D=A1=E6=AF=94?= <30525741+jackwener@users.noreply.github.com> Date: Thu, 19 Aug 2021 21:30:04 +0800 Subject: [PATCH 180/239] Update docs.md --- documentation20/cn/01.evaluation/docs.md | 27 ++++++++++++++---------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/documentation20/cn/01.evaluation/docs.md b/documentation20/cn/01.evaluation/docs.md index f7cdc31034..4aba82a54c 100644 --- a/documentation20/cn/01.evaluation/docs.md +++ b/documentation20/cn/01.evaluation/docs.md @@ -2,18 +2,18 @@ ## TDengine 简介 -TDengine是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品,它不依赖任何第三方软件,也不是优化或包装了一个开源的数据库或流式计算产品,而是在吸取众多传统关系型数据库、NoSQL数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品,在时序空间大数据处理上,有着自己独到的优势。 +TDengine 是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品,它不依赖任何第三方软件,也不是优化或包装了一个开源的数据库或流式计算产品,而是在吸取众多传统关系型数据库、NoSQL 数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品,在时序空间大数据处理上,有着自己独到的优势。 -TDengine的模块之一是时序数据库。但除此之外,为减少研发的复杂度、系统维护的难度,TDengine还提供缓存、消息队列、订阅、流式计算等功能,为物联网、工业互联网大数据的处理提供全栈的技术方案,是一个高效易用的物联网大数据平台。与Hadoop等典型的大数据平台相比,它具有如下鲜明的特点: +TDengine 的模块之一是时序数据库。但除此之外,为减少研发的复杂度、系统维护的难度,TDengine 还提供缓存、消息队列、订阅、流式计算等功能,为物联网、工业互联网大数据的处理提供全栈的技术方案,是一个高效易用的物联网大数据平台。与 Hadoop 等典型的大数据平台相比,它具有如下鲜明的特点: -* __10倍以上的性能提升__:定义了创新的数据存储结构,单核每秒能处理至少2万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。 -* __硬件或云服务成本降至1/5__:由于超强性能,计算资源不到通用大数据方案的1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的1/10。 -* __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件,大幅降低应用开发和维护的复杂度成本。 -* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, MATLAB随时进行。 -* __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R等集成。后续将支持OPC, Hadoop, Spark等,BI工具也将无缝连接。 -* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类标准SQL,支持RESTful,支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似,零学习成本。 +* __10倍以上的性能提升__:定义了创新的数据存储结构,单核每秒能处理至少 2 万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。 +* __硬件或云服务成本降至1/5__:由于超强性能,计算资源不到通用大数据方案的 1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的 1/10。 +* __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成 Kafka/Redis/HBase/Spark/HDFS 等软件,大幅降低应用开发和维护的复杂度成本。 +* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过 Shell, Python, R, MATLAB 随时进行。 +* __与第三方工具无缝连接__:不用一行代码,即可与 Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R 等集成。后续将支持 OPC, Hadoop, Spark 等,BI 工具也将无缝连接。 +* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类标准 SQL,支持 RESTful,支持 Python/Java/C/C++/C#/Go/Node.js, 与 MySQL 相似,零学习成本。 -采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。 +采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。 ![TDengine技术生态图](page://images/eco_system.png)
图 1. TDengine技术生态图
@@ -21,11 +21,12 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的 ## TDengine 总体适用场景 -作为一个IOT大数据平台,TDengine的典型适用场景是在IOT范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如CRM,ERP等,不在本文讨论范围内。 +作为一个 IOT 大数据平台,TDengine 的典型适用场景是在 IOT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRM,ERP 等,不在本文讨论范围内。 ### 数据源特点和需求 -从数据源角度,设计人员可以从下面几个角度分析TDengine在目标应用系统里面的适用性。 + +从数据源角度,设计人员可以从下面几个角度分析 TDengine 在目标应用系统里面的适用性。 |数据源特点和需求|不适用|可能适用|非常适用|简单说明| |---|---|---|---|---| @@ -34,6 +35,7 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的 |数据源数目巨大| | | √ |TDengine设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。| ### 系统架构要求 + |系统架构要求|不适用|可能适用|非常适用|简单说明| |---|---|---|---|---| |要求简单可靠的系统架构| | | √ |TDengine的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。| @@ -41,12 +43,14 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的 |标准化规范| | | √ |TDengine使用标准的SQL语言提供主要功能,遵守标准化规范。| ### 系统功能需求 + |系统功能需求|不适用|可能适用|非常适用|简单说明| |---|---|---|---|---| |要求完整的内置数据处理算法| | √ | |TDengine的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。| |需要大量的交叉查询处理| | √ | |这种类型的处理更多应该用关系型数据系统处理,或者应该考虑TDengine和关系型数据系统配合实现系统功能。| ### 系统性能需求 + |系统性能需求|不适用|可能适用|非常适用|简单说明| |---|---|---|---|---| |要求较大的总体处理能力| | | √ |TDengine的集群功能可以轻松地让多服务器配合达成处理能力的提升。| @@ -54,6 +58,7 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的 |要求快速处理小粒度数据| | | √ |这方面TDengine性能可以完全对标关系型和NoSQL型数据处理系统。| ### 系统维护需求 + |系统维护需求|不适用|可能适用|非常适用|简单说明| |---|---|---|---|---| |要求系统可靠运行| | | √ |TDengine的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。| From e9c28a630760bd728060fe3ab811fe7a5e1e303f Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 19 Aug 2021 23:34:47 +0800 Subject: [PATCH 181/239] Feature/sangshuduo/td 5844 cmdline parameters align (#7458) * [TD-5844]: make cmd line parameter similar. * fix test case align with taosdemo change. * fix windows stack overflow issue. * fix mac compile error. * fix taosdemo cmdline parameter in tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py * fix windows compiler options. * make taos.exe use mysql style password input. * make taos shell and taosdump use mysql style password input. * determine scanf return value. * make console echo off during password input. * use one macro to define password length. * fix --password. change taos shell '-z' for timezone --- src/inc/taosdef.h | 2 ++ src/kit/shell/inc/shell.h | 1 - src/kit/shell/src/shellDarwin.c | 14 +++++++++---- src/kit/shell/src/shellLinux.c | 21 ++++++++++++-------- src/kit/shell/src/shellWindows.c | 14 +++++++++---- src/kit/taosdemo/taosdemo.c | 23 +++++++++++---------- src/kit/taosdump/taosdump.c | 34 +++++++++++++++++++++----------- 7 files changed, 68 insertions(+), 41 deletions(-) diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index fceeaea0ae..74849555c6 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -88,6 +88,8 @@ extern const int32_t TYPE_BYTES[15]; #define TSDB_DEFAULT_PASS "taosdata" #endif +#define SHELL_MAX_PASSWORD_LEN 20 + #define TSDB_TRUE 1 #define TSDB_FALSE 0 #define TSDB_OK 0 diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index 4d72d36e2e..f207a866dd 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -25,7 +25,6 @@ #define MAX_USERNAME_SIZE 64 #define MAX_DBNAME_SIZE 64 #define MAX_IP_SIZE 20 -#define MAX_PASSWORD_SIZE 20 #define MAX_HISTORY_SIZE 1000 #define MAX_COMMAND_SIZE 1048586 #define HISTORY_FILE ".taos_history" diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c index 5ca4537aeb..9161860f07 100644 --- a/src/kit/shell/src/shellDarwin.c +++ b/src/kit/shell/src/shellDarwin.c @@ -66,7 +66,7 @@ void printHelp() { char DARWINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; -char g_password[MAX_PASSWORD_SIZE]; +char g_password[SHELL_MAX_PASSWORD_LEN]; void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { wordexp_t full_path; @@ -81,19 +81,25 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } // for password - else if (strncmp(argv[i], "-p", 2) == 0) { + else if ((strncmp(argv[i], "-p", 2) == 0) + || (strncmp(argv[i], "--password", 10) == 0)) { strcpy(tsOsName, "Darwin"); printf(DARWINCLIENT_VERSION, tsOsName, taos_get_client_info()); - if (strlen(argv[i]) == 2) { + if ((strlen(argv[i]) == 2) + || (strncmp(argv[i], "--password", 10) == 0)) { printf("Enter password: "); + taosSetConsoleEcho(false); if (scanf("%s", g_password) > 1) { fprintf(stderr, "password read error\n"); } + taosSetConsoleEcho(true); getchar(); } else { - tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN); } arguments->password = g_password; + strcpy(argv[i], ""); + argc -= 1; } // for management port else if (strcmp(argv[i], "-P") == 0) { diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 610cbba5c1..27d678c6d8 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -47,7 +47,7 @@ static struct argp_option options[] = { {"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."}, {"check", 'k', "CHECK", 0, "Check tables."}, {"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."}, - {"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."}, + {"timezone", 'z', "TIMEZONE", 0, "Time zone of the shell, default is local."}, {"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is startup, options: client|server|rpc|startup|sync|speen|fqdn."}, {"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."}, {"pktnum", 'N', "PKTNUM", 0, "Packet numbers used for net test, default is 100."}, @@ -76,7 +76,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { } break; - case 't': + case 'z': arguments->timezone = arg; break; case 'u': @@ -173,22 +173,27 @@ static struct argp argp = {options, parse_opt, args_doc, doc}; char LINUXCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; -char g_password[MAX_PASSWORD_SIZE]; +char g_password[SHELL_MAX_PASSWORD_LEN]; -static void parse_password( +static void parse_args( int argc, char *argv[], SShellArguments *arguments) { for (int i = 1; i < argc; i++) { - if (strncmp(argv[i], "-p", 2) == 0) { + if ((strncmp(argv[i], "-p", 2) == 0) + || (strncmp(argv[i], "--password", 10) == 0)) { strcpy(tsOsName, "Linux"); printf(LINUXCLIENT_VERSION, tsOsName, taos_get_client_info()); - if (strlen(argv[i]) == 2) { + if ((strlen(argv[i]) == 2) + || (strncmp(argv[i], "--password", 10) == 0)) { printf("Enter password: "); + taosSetConsoleEcho(false); if (scanf("%20s", g_password) > 1) { fprintf(stderr, "password reading error\n"); } + taosSetConsoleEcho(true); getchar(); } else { - tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN); + strcpy(argv[i], "-p"); } arguments->password = g_password; arguments->is_use_passwd = true; @@ -203,7 +208,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { argp_program_version = verType; if (argc > 1) { - parse_password(argc, argv, arguments); + parse_args(argc, argv, arguments); } argp_parse(&argp, argc, argv, 0, 0, arguments); diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index dd9f49dd12..cb707d9331 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -68,7 +68,7 @@ void printHelp() { exit(EXIT_SUCCESS); } -char g_password[MAX_PASSWORD_SIZE]; +char g_password[SHELL_MAX_PASSWORD_LEN]; void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { for (int i = 1; i < argc; i++) { @@ -82,20 +82,26 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } // for password - else if (strncmp(argv[i], "-p", 2) == 0) { + else if ((strncmp(argv[i], "-p", 2) == 0) + || (strncmp(argv[i], "--password", 10) == 0)) { arguments->is_use_passwd = true; strcpy(tsOsName, "Windows"); printf(WINCLIENT_VERSION, tsOsName, taos_get_client_info()); - if (strlen(argv[i]) == 2) { + if ((strlen(argv[i]) == 2) + || (strncmp(argv[i], "--password", 10) == 0)) { printf("Enter password: "); + taosSetConsoleEcho(false); if (scanf("%s", g_password) > 1) { fprintf(stderr, "password read error!\n"); } + taosSetConsoleEcho(true); getchar(); } else { - tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN); } arguments->password = g_password; + strcpy(argv[i], ""); + argc -= 1; } // for management port else if (strcmp(argv[i], "-P") == 0) { diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 48d2aecc1f..75da9d9f4b 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -69,7 +69,6 @@ extern char configDir[]; #define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS) #define MAX_USERNAME_SIZE 64 -#define MAX_PASSWORD_SIZE 20 #define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html #define MAX_TB_NAME_SIZE 64 #define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space @@ -208,7 +207,7 @@ typedef struct SArguments_S { uint16_t port; uint16_t iface; char * user; - char password[MAX_PASSWORD_SIZE]; + char password[SHELL_MAX_PASSWORD_LEN]; char * database; int replica; char * tb_prefix; @@ -356,7 +355,7 @@ typedef struct SDbs_S { uint16_t port; char user[MAX_USERNAME_SIZE]; - char password[MAX_PASSWORD_SIZE]; + char password[SHELL_MAX_PASSWORD_LEN]; char resultFile[MAX_FILE_NAME_LEN]; bool use_metric; bool insert_only; @@ -422,7 +421,7 @@ typedef struct SQueryMetaInfo_S { uint16_t port; struct sockaddr_in serv_addr; char user[MAX_USERNAME_SIZE]; - char password[MAX_PASSWORD_SIZE]; + char password[SHELL_MAX_PASSWORD_LEN]; char dbName[TSDB_DB_NAME_LEN]; char queryMode[SMALL_BUFF_LEN]; // taosc, rest @@ -858,7 +857,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } taosSetConsoleEcho(true); } else { - tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + tstrncpy(arguments->password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN); } } else if (strcmp(argv[i], "-o") == 0) { if (argc == i+1) { @@ -3780,9 +3779,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* password = cJSON_GetObjectItem(root, "password"); if (password && password->type == cJSON_String && password->valuestring != NULL) { - tstrncpy(g_Dbs.password, password->valuestring, MAX_PASSWORD_SIZE); + tstrncpy(g_Dbs.password, password->valuestring, SHELL_MAX_PASSWORD_LEN); } else if (!password) { - tstrncpy(g_Dbs.password, "taosdata", MAX_PASSWORD_SIZE); + tstrncpy(g_Dbs.password, "taosdata", SHELL_MAX_PASSWORD_LEN); } cJSON* resultfile = cJSON_GetObjectItem(root, "result_file"); @@ -4516,9 +4515,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* password = cJSON_GetObjectItem(root, "password"); if (password && password->type == cJSON_String && password->valuestring != NULL) { - tstrncpy(g_queryInfo.password, password->valuestring, MAX_PASSWORD_SIZE); + tstrncpy(g_queryInfo.password, password->valuestring, SHELL_MAX_PASSWORD_LEN); } else if (!password) { - tstrncpy(g_queryInfo.password, "taosdata", MAX_PASSWORD_SIZE);; + tstrncpy(g_queryInfo.password, "taosdata", SHELL_MAX_PASSWORD_LEN);; } cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, @@ -8826,7 +8825,7 @@ static void initOfInsertMeta() { tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); g_Dbs.port = 6030; tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE); - tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE); + tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, SHELL_MAX_PASSWORD_LEN); g_Dbs.threadCount = 2; g_Dbs.use_metric = g_args.use_metric; @@ -8839,7 +8838,7 @@ static void initOfQueryMeta() { tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE); g_queryInfo.port = 6030; tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE); - tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE); + tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, SHELL_MAX_PASSWORD_LEN); } static void setParaFromArg() { @@ -8853,7 +8852,7 @@ static void setParaFromArg() { tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE); } - tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE); + tstrncpy(g_Dbs.password, g_args.password, SHELL_MAX_PASSWORD_LEN); if (g_args.port) { g_Dbs.port = g_args.port; diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index c54b8da1b7..6b553b3824 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -243,19 +243,15 @@ static struct argp_option options[] = { {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, {"debug", 'g', 0, 0, "Print debug info.", 8}, - {"verbose", 'b', 0, 0, "Print verbose debug info.", 9}, - {"performanceprint", 'm', 0, 0, "Print performance debug info.", 10}, {0} }; -#define MAX_PASSWORD_SIZE 20 - /* Used by main to communicate with parse_opt. */ typedef struct arguments { // connection option char *host; char *user; - char password[MAX_PASSWORD_SIZE]; + char password[SHELL_MAX_PASSWORD_LEN]; uint16_t port; char cversion[12]; uint16_t mysqlFlag; @@ -432,7 +428,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { break; // dump unit option case 'A': - g_args.all_databases = true; break; case 'D': g_args.databases = true; @@ -555,11 +550,14 @@ static void parse_precision_first( } } -static void parse_password( +static void parse_args( int argc, char *argv[], SArguments *arguments) { + for (int i = 1; i < argc; i++) { - if (strncmp(argv[i], "-p", 2) == 0) { - if (strlen(argv[i]) == 2) { + if ((strncmp(argv[i], "-p", 2) == 0) + || (strncmp(argv[i], "--password", 10) == 0)) { + if ((strlen(argv[i]) == 2) + || (strncmp(argv[i], "--password", 10) == 0)) { printf("Enter password: "); taosSetConsoleEcho(false); if(scanf("%20s", arguments->password) > 1) { @@ -567,10 +565,22 @@ static void parse_password( } taosSetConsoleEcho(true); } else { - tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + tstrncpy(arguments->password, (char *)(argv[i] + 2), + SHELL_MAX_PASSWORD_LEN); + strcpy(argv[i], "-p"); } - argv[i] = ""; + } else if (strcmp(argv[i], "-gg") == 0) { + arguments->verbose_print = true; + strcpy(argv[i], ""); + } else if (strcmp(argv[i], "-PP") == 0) { + arguments->performance_print = true; + strcpy(argv[i], ""); + } else if (strcmp(argv[i], "-A") == 0) { + g_args.all_databases = true; + } else { + continue; } + } } @@ -639,7 +649,7 @@ int main(int argc, char *argv[]) { if (argc > 1) { parse_precision_first(argc, argv, &g_args); parse_timestamp(argc, argv, &g_args); - parse_password(argc, argv, &g_args); + parse_args(argc, argv, &g_args); } argp_parse(&argp, argc, argv, 0, 0, &g_args); From bb28e3a5f8c6c696c705516b35ee400ff439f43b Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 19 Aug 2021 23:43:02 +0800 Subject: [PATCH 182/239] [TD-6233]: make column compression threshold user configurable --- src/client/src/tscServer.c | 7 +++++-- src/common/src/tglobal.c | 19 +++++++++---------- src/query/inc/qExecutor.h | 4 +--- src/query/src/queryMain.c | 8 ++++++-- 4 files changed, 21 insertions(+), 17 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 29c0eb693f..783c7a3dbe 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2678,7 +2678,7 @@ int tscProcessQueryRsp(SSqlObj *pSql) { return 0; } -static void decompressQueryColData(SSqlRes *pRes, SQueryInfo* pQueryInfo, char **data, int8_t compressed, int compLen) { +static void decompressQueryColData(SSqlObj *pSql, SSqlRes *pRes, SQueryInfo* pQueryInfo, char **data, int8_t compressed, int32_t compLen) { int32_t decompLen = 0; int32_t numOfCols = pQueryInfo->fieldsInfo.numOfOutput; int32_t *compSizes; @@ -2715,6 +2715,9 @@ static void decompressQueryColData(SSqlRes *pRes, SQueryInfo* pQueryInfo, char * pData = *data + compLen + numOfCols * sizeof(int32_t); } + tscDebug("0x%"PRIx64" decompress col data, compressed size:%d, decompressed size:%d", + pSql->self, (int32_t)(compLen + numOfCols * sizeof(int32_t)), decompLen); + int32_t tailLen = pRes->rspLen - sizeof(SRetrieveTableRsp) - decompLen; memmove(*data + decompLen, pData, tailLen); memmove(*data, outputBuf, decompLen); @@ -2749,7 +2752,7 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) { //Decompress col data if compressed from server if (pRetrieve->compressed) { int32_t compLen = htonl(pRetrieve->compLen); - decompressQueryColData(pRes, pQueryInfo, &pRes->data, pRetrieve->compressed, compLen); + decompressQueryColData(pSql, pRes, pQueryInfo, &pRes->data, pRetrieve->compressed, compLen); } STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index b7930dd43e..3c84f16d4f 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -76,12 +76,11 @@ int32_t tsMaxBinaryDisplayWidth = 30; int32_t tsCompressMsgSize = -1; /* denote if server needs to compress the retrieved column data before adding to the rpc response message body. - * 0: disable column data compression - * 1: enable column data compression - * This option is default to disabled. Once enabled, compression will be conducted if any column has size more - * than QUERY_COMP_THRESHOLD. Otherwise, no further compression is needed. + * 0: all data are compressed + * -1: all data are not compressed + * other values: if the any retrieved column size is greater than the tsCompressColData, all data will be compressed. */ -int32_t tsCompressColData = 0; +int32_t tsCompressColData = -1; // client int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; @@ -95,7 +94,7 @@ int32_t tsMaxNumOfOrderedResults = 100000; // 10 ms for sliding time, the value will changed in case of time precision changed int32_t tsMinSlidingTime = 10; -// the maxinum number of distict query result +// the maxinum number of distict query result int32_t tsMaxNumOfDistinctResults = 1000 * 10000; // 1 us for interval time range, changed accordingly @@ -1006,10 +1005,10 @@ static void doInitGlobalConfig(void) { cfg.option = "compressColData"; cfg.ptr = &tsCompressColData; - cfg.valType = TAOS_CFG_VTYPE_INT8; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; - cfg.minValue = 0; - cfg.maxValue = 1; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = -1; + cfg.maxValue = 100000000.0f; cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 6e8eec2456..b54bead94a 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -43,9 +43,7 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int #define GET_NUM_OF_RESULTS(_r) (((_r)->outputBuf) == NULL? 0:((_r)->outputBuf)->info.rows) -//TODO: may need to fine tune this threshold -#define QUERY_COMP_THRESHOLD (1024 * 512) -#define NEEDTO_COMPRESS_QUERY(size) ((size) > QUERY_COMP_THRESHOLD ? 1 : 0) +#define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData? 1 : 0) enum { // when query starts to execute, this status will set diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index d25f5eab7a..64e3e67843 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -357,7 +357,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co } (*pRsp)->precision = htons(pQueryAttr->precision); - (*pRsp)->compressed = (int8_t)(tsCompressColData && checkNeedToCompressQueryCol(pQInfo)); + (*pRsp)->compressed = (int8_t)((tsCompressColData != -1) && checkNeedToCompressQueryCol(pQInfo)); if (GET_NUM_OF_RESULTS(&(pQInfo->runtimeEnv)) > 0 && pQInfo->code == TSDB_CODE_SUCCESS) { doDumpQueryResult(pQInfo, (*pRsp)->data, (*pRsp)->compressed, &compLen); @@ -367,8 +367,12 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co if ((*pRsp)->compressed && compLen != 0) { int32_t numOfCols = pQueryAttr->pExpr2 ? pQueryAttr->numOfExpr2 : pQueryAttr->numOfOutput; - *contLen = *contLen - pQueryAttr->resultRowSize * s + compLen + numOfCols * sizeof(int32_t); + int32_t origSize = pQueryAttr->resultRowSize * s; + int32_t compSize = compLen + numOfCols * sizeof(int32_t); + *contLen = *contLen - origSize + compSize; *pRsp = (SRetrieveTableRsp *)rpcReallocCont(*pRsp, *contLen); + qDebug("QInfo:0x%"PRIx64" compress col data, uncompressed size:%d, compressed size:%d, ratio:%.2f\n", + pQInfo->qId, origSize, compSize, (float)origSize / (float)compSize); } (*pRsp)->compLen = htonl(compLen); From 63223c59c0555e11ce2dac9c1cda6ac10d31229f Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 19 Aug 2021 23:43:02 +0800 Subject: [PATCH 183/239] [TD-6233]: make column compression threshold user configurable --- src/common/src/tglobal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 3c84f16d4f..52e129928e 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -78,7 +78,7 @@ int32_t tsCompressMsgSize = -1; /* denote if server needs to compress the retrieved column data before adding to the rpc response message body. * 0: all data are compressed * -1: all data are not compressed - * other values: if the any retrieved column size is greater than the tsCompressColData, all data will be compressed. + * other values: if any retrieved column size is greater than the tsCompressColData, all data will be compressed. */ int32_t tsCompressColData = -1; From 9cd910b96f485b1b0cd8977776e9e878a6cc21b3 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 19 Aug 2021 23:43:02 +0800 Subject: [PATCH 184/239] [TD-6233]: make column compression threshold user configurable --- src/query/src/queryMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 64e3e67843..d56c12ab87 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -371,7 +371,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co int32_t compSize = compLen + numOfCols * sizeof(int32_t); *contLen = *contLen - origSize + compSize; *pRsp = (SRetrieveTableRsp *)rpcReallocCont(*pRsp, *contLen); - qDebug("QInfo:0x%"PRIx64" compress col data, uncompressed size:%d, compressed size:%d, ratio:%.2f\n", + qDebug("QInfo:0x%"PRIx64" compress col data, uncompressed size:%d, compressed size:%d, ratio:%.2f", pQInfo->qId, origSize, compSize, (float)origSize / (float)compSize); } (*pRsp)->compLen = htonl(compLen); From c5cf06d2c6e912cc52552094016b9b1a6d417e85 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 20 Aug 2021 10:27:19 +0800 Subject: [PATCH 185/239] full test for feature/TD-6214 --- tests/pytest/query/nestedQuery/queryWithOrderLimit.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/pytest/query/nestedQuery/queryWithOrderLimit.py b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py index 692b5b7d36..aa16e8cc76 100644 --- a/tests/pytest/query/nestedQuery/queryWithOrderLimit.py +++ b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py @@ -29,7 +29,6 @@ class TDTestCase: self.tables = 10 self.rowsPerTable = 100 - def run(self): # tdSql.execute("drop database db ") tdSql.prepare() From 938275238990e5c79f08d5c9ad14356bfd624260 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 20 Aug 2021 10:34:53 +0800 Subject: [PATCH 186/239] [TD-6046] fix ts top/bottom index error when using order by --- src/client/inc/tscUtil.h | 2 +- src/client/src/tscSQLParser.c | 4 ++-- src/client/src/tscUtil.c | 6 ++++-- tests/pytest/functions/function_top.py | 2 +- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index f14cd8f9e2..690ec2c277 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -213,7 +213,7 @@ SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t function int16_t size); size_t tscNumOfExprs(SQueryInfo* pQueryInfo); -size_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo); +int32_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo); SExprInfo *tscExprGet(SQueryInfo* pQueryInfo, int32_t index); int32_t tscExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy); int32_t tscExprCopyAll(SArray* dst, const SArray* src, bool deepcopy); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 63ce5ebf2c..bb871488fc 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5864,7 +5864,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; } else if (isTopBottomQuery(pQueryInfo)) { /* order of top/bottom query in interval is not valid */ - size_t pos = tscExprTopBottomIndex(pQueryInfo); + int32_t pos = tscExprTopBottomIndex(pQueryInfo); assert(pos > 0); SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1); assert(pExpr->base.functionId == TSDB_FUNC_TS); @@ -5951,7 +5951,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } } else { /* order of top/bottom query in interval is not valid */ - size_t pos = tscExprTopBottomIndex(pQueryInfo); + int32_t pos = tscExprTopBottomIndex(pQueryInfo); assert(pos > 0); SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1); assert(pExpr->base.functionId == TSDB_FUNC_TS); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 72791568ba..c5aedf1b71 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2427,10 +2427,12 @@ size_t tscNumOfExprs(SQueryInfo* pQueryInfo) { return taosArrayGetSize(pQueryInfo->exprList); } -size_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo){ +int32_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo){ size_t numOfExprs = tscNumOfExprs(pQueryInfo); - for(int32_t i = 0; i < numOfExprs; ++i) { + for(size_t i = 0; i < numOfExprs; ++i) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); + if (pExpr == NULL) + continue; if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) { return i; } diff --git a/tests/pytest/functions/function_top.py b/tests/pytest/functions/function_top.py index 9824afc19f..03a00d918a 100644 --- a/tests/pytest/functions/function_top.py +++ b/tests/pytest/functions/function_top.py @@ -132,7 +132,7 @@ class TDTestCase: tdSql.checkData(0, 1, "2018-09-17 09:00:00.008") tdSql.checkData(1, 0, "2018-09-17 09:00:00.009") tdSql.checkData(1, 3, "2018-09-17 09:00:00.009") - + #TD-2563 top + super_table + interval tdSql.execute("create table meters(ts timestamp, c int) tags (d int)") tdSql.execute("create table t1 using meters tags (1)") From 033fcd857b223925774aa35ada1d083f7f7719e9 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 20 Aug 2021 10:46:21 +0800 Subject: [PATCH 187/239] [TD-6234]: add unit test case for TD-6011 --- tests/pytest/query/filterNoKeyword.py | 83 +++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 tests/pytest/query/filterNoKeyword.py diff --git a/tests/pytest/query/filterNoKeyword.py b/tests/pytest/query/filterNoKeyword.py new file mode 100644 index 0000000000..34d74efd82 --- /dev/null +++ b/tests/pytest/query/filterNoKeyword.py @@ -0,0 +1,83 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + print("======= Verify filter for bool, nchar and binary type =========") + tdLog.debug( + "create table st(ts timestamp, tbcol1 bool, tbcol2 binary(10), tbcol3 nchar(20), tbcol4 tinyint, tbcol5 smallint, tbcol6 int, tbcol7 bigint, tbcol8 float, tbcol9 double) tags(tagcol1 bool, tagcol2 binary(10), tagcol3 nchar(10))") + tdSql.execute( + "create table st(ts timestamp, tbcol1 bool, tbcol2 binary(10), tbcol3 nchar(20), tbcol4 tinyint, tbcol5 smallint, tbcol6 int, tbcol7 bigint, tbcol8 float, tbcol9 double) tags(tagcol1 bool, tagcol2 binary(10), tagcol3 nchar(10))") + + tdSql.execute("create table st1 using st tags(true, 'table1', '水表')") + for i in range(1, 6): + tdSql.execute( + "insert into st1 values(%d, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %f, %f)" % + (self.ts + i, i % + 2, i, i, + i, i, i, i, 1.0, 1.0)) + + # =============Data type keywords cannot be used in filter==================== + # timestamp + tdSql.error("select * from st where timestamp = 1629417600") + + # bool + tdSql.error("select * from st where bool = false") + + #binary + tdSql.error("select * from st where binary = 'taosdata'") + + # nchar + tdSql.error("select * from st where nchar = '涛思数据'") + + # tinyint + tdSql.error("select * from st where tinyint = 127") + + # smallint + tdSql.error("select * from st where smallint = 32767") + + # int + tdSql.error("select * from st where INTEGER = 2147483647") + tdSql.error("select * from st where int = 2147483647") + + # bigint + tdSql.error("select * from st where bigint = 2147483647") + + # float + tdSql.error("select * from st where float = 3.4E38") + + # double + tdSql.error("select * from st where double = 1.7E308") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From a2e5f6504cd1edc664926636b2eec35866732b8c Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Fri, 20 Aug 2021 14:00:45 +0800 Subject: [PATCH 188/239] [TD-6231]: fix all none error of stmt multibind in python connector (#7482) --- src/connector/python/taos/bind.py | 14 +++++-- src/connector/python/tests/test-td6231.py | 50 +++++++++++++++++++++++ 2 files changed, 60 insertions(+), 4 deletions(-) create mode 100644 src/connector/python/tests/test-td6231.py diff --git a/src/connector/python/taos/bind.py b/src/connector/python/taos/bind.py index ede6381628..2628a641f1 100644 --- a/src/connector/python/taos/bind.py +++ b/src/connector/python/taos/bind.py @@ -10,7 +10,8 @@ import sys _datetime_epoch = datetime.utcfromtimestamp(0) def _is_not_none(obj): - obj != None + return obj != None + class TaosBind(ctypes.Structure): _fields_ = [ ("buffer_type", c_int), @@ -320,6 +321,14 @@ class TaosMultiBind(ctypes.Structure): def nchar(self, values): # type: (list[str]) -> None + self.num = len(values) + self.buffer_type = FieldType.C_NCHAR + is_null = [1 if v == None else 0 for v in values] + self.is_null = cast((c_byte * self.num)(*is_null), c_char_p) + + if sum(is_null) == self.num: + self.length = (c_int32 * len(values))(0 * self.num) + return if sys.version_info < (3, 0): _bytes = [bytes(value) if value is not None else None for value in values] buffer_length = max(len(b) + 1 for b in _bytes if b is not None) @@ -347,9 +356,6 @@ class TaosMultiBind(ctypes.Structure): ) self.length = (c_int32 * len(values))(*[len(b) if b is not None else 0 for b in _bytes]) self.buffer_length = buffer_length - self.num = len(values) - self.is_null = cast((c_byte * self.num)(*[1 if v == None else 0 for v in values]), c_char_p) - self.buffer_type = FieldType.C_NCHAR def tinyint_unsigned(self, values): self.buffer_type = FieldType.C_TINYINT_UNSIGNED diff --git a/src/connector/python/tests/test-td6231.py b/src/connector/python/tests/test-td6231.py new file mode 100644 index 0000000000..e55d22c107 --- /dev/null +++ b/src/connector/python/tests/test-td6231.py @@ -0,0 +1,50 @@ +from taos import * + +conn = connect() + +dbname = "pytest_taos_stmt_multi" +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s" % dbname) +conn.select_db(dbname) + +conn.execute( + "create table if not exists log(ts timestamp, bo bool, nil tinyint, \ + ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \ + su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)", +) + +stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + +params = new_multi_binds(16) +params[0].timestamp((1626861392589, 1626861392590, 1626861392591)) +params[1].bool((True, None, False)) +params[2].tinyint([-128, -128, None]) # -128 is tinyint null +params[3].tinyint([0, 127, None]) +params[4].smallint([3, None, 2]) +params[5].int([3, 4, None]) +params[6].bigint([3, 4, None]) +params[7].tinyint_unsigned([3, 4, None]) +params[8].smallint_unsigned([3, 4, None]) +params[9].int_unsigned([3, 4, None]) +params[10].bigint_unsigned([3, 4, None]) +params[11].float([3, None, 1]) +params[12].double([3, None, 1.2]) +params[13].binary(["abc", "dddafadfadfadfadfa", None]) +# params[14].nchar(["涛思数据", None, "a long string with 中文字符"]) +params[14].nchar([None, None, None]) +params[15].timestamp([None, None, 1626861392591]) +stmt.bind_param_batch(params) +stmt.execute() + + +result = stmt.use_result() +assert result.affected_rows == 3 +result.close() + +result = conn.query("select * from log") +for row in result: + print(row) +result.close() +stmt.close() +conn.close() From 3716370ab973e1fce8896b916323d714521b39fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=8D=A1=E6=AF=94=E5=8D=A1=E6=AF=94?= <30525741+jackwener@users.noreply.github.com> Date: Fri, 20 Aug 2021 15:33:37 +0800 Subject: [PATCH 189/239] Update docs.md --- documentation20/cn/01.evaluation/docs.md | 26 ++++++++++++------------ 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/documentation20/cn/01.evaluation/docs.md b/documentation20/cn/01.evaluation/docs.md index 4aba82a54c..88b591faf9 100644 --- a/documentation20/cn/01.evaluation/docs.md +++ b/documentation20/cn/01.evaluation/docs.md @@ -30,38 +30,38 @@ TDengine 的模块之一是时序数据库。但除此之外,为减少研发 |数据源特点和需求|不适用|可能适用|非常适用|简单说明| |---|---|---|---|---| -|总体数据量巨大| | | √ |TDengine在容量方面提供出色的水平扩展功能,并且具备匹配高压缩的存储结构,达到业界最优的存储效率。| -|数据输入速度偶尔或者持续巨大| | | √ | TDengine的性能大大超过同类产品,可以在同样的硬件环境下持续处理大量的输入数据,并且提供很容易在用户环境里面运行的性能评估工具。| -|数据源数目巨大| | | √ |TDengine设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。| +|总体数据量巨大| | | √ |TDengine 在容量方面提供出色的水平扩展功能,并且具备匹配高压缩的存储结构,达到业界最优的存储效率。| +|数据输入速度偶尔或者持续巨大| | | √ | TDengine 的性能大大超过同类产品,可以在同样的硬件环境下持续处理大量的输入数据,并且提供很容易在用户环境里面运行的性能评估工具。| +|数据源数目巨大| | | √ | TDengine 设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。| ### 系统架构要求 |系统架构要求|不适用|可能适用|非常适用|简单说明| |---|---|---|---|---| -|要求简单可靠的系统架构| | | √ |TDengine的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。| -|要求容错和高可靠| | | √ |TDengine的集群功能,自动提供容错灾备等高可靠功能。| -|标准化规范| | | √ |TDengine使用标准的SQL语言提供主要功能,遵守标准化规范。| +|要求简单可靠的系统架构| | | √ | TDengine 的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。| +|要求容错和高可靠| | | √ | TDengine 的集群功能,自动提供容错灾备等高可靠功能。| +|标准化规范| | | √ | TDengine 使用标准的 SQL 语言提供主要功能,遵守标准化规范。| ### 系统功能需求 |系统功能需求|不适用|可能适用|非常适用|简单说明| |---|---|---|---|---| -|要求完整的内置数据处理算法| | √ | |TDengine的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。| -|需要大量的交叉查询处理| | √ | |这种类型的处理更多应该用关系型数据系统处理,或者应该考虑TDengine和关系型数据系统配合实现系统功能。| +|要求完整的内置数据处理算法| | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。| +|需要大量的交叉查询处理| | √ | |这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。| ### 系统性能需求 |系统性能需求|不适用|可能适用|非常适用|简单说明| |---|---|---|---|---| -|要求较大的总体处理能力| | | √ |TDengine的集群功能可以轻松地让多服务器配合达成处理能力的提升。| -|要求高速处理数据 | | | √ |TDengine的专门为IOT优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。| -|要求快速处理小粒度数据| | | √ |这方面TDengine性能可以完全对标关系型和NoSQL型数据处理系统。| +|要求较大的总体处理能力| | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。| +|要求高速处理数据 | | | √ | TDengine 的专门为 IOT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。| +|要求快速处理小粒度数据| | | √ |这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。| ### 系统维护需求 |系统维护需求|不适用|可能适用|非常适用|简单说明| |---|---|---|---|---| -|要求系统可靠运行| | | √ |TDengine的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。| +|要求系统可靠运行| | | √ | TDengine 的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。| |要求运维学习成本可控| | | √ |同上。| -|要求市场有大量人才储备| √ | | |TDengine作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。| +|要求市场有大量人才储备| √ | | | TDengine 作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。| From 306137cac54783dc5b1063a844ae989474929864 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Fri, 20 Aug 2021 16:28:09 +0800 Subject: [PATCH 190/239] =?UTF-8?q?[TD-6241]:=20fix=20unexpected=20na?= =?UTF-8?q?n=20while=20inserting=20None=20as=20float/doub=E2=80=A6=20(#748?= =?UTF-8?q?6)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [TD-6241]: fix unexpected nan while inserting None as float/double type * [TD-6241]: fix binary bind error in python connector --- src/connector/python/taos/bind.py | 43 +++++++++++++------------- src/connector/python/taos/constants.py | 7 +++-- 2 files changed, 26 insertions(+), 24 deletions(-) diff --git a/src/connector/python/taos/bind.py b/src/connector/python/taos/bind.py index 2628a641f1..083ddc99ae 100644 --- a/src/connector/python/taos/bind.py +++ b/src/connector/python/taos/bind.py @@ -300,29 +300,8 @@ class TaosMultiBind(ctypes.Structure): self.buffer = cast(buffer, c_void_p) self.num = len(values) - def binary(self, values): + def _str_to_buffer(self, values): self.num = len(values) - self.buffer = cast(c_char_p("".join(filter(_is_not_none, values)).encode("utf-8")), c_void_p) - self.length = (c_int * len(values))(*[len(value) if value is not None else 0 for value in values]) - self.buffer_type = FieldType.C_BINARY - self.is_null = cast((c_byte * self.num)(*[1 if v == None else 0 for v in values]), c_char_p) - - def timestamp(self, values, precision=PrecisionEnum.Milliseconds): - try: - buffer = cast(values, c_void_p) - except: - buffer_type = c_int64 * len(values) - buffer = buffer_type(*[_datetime_to_timestamp(value, precision) for value in values]) - - self.buffer_type = FieldType.C_TIMESTAMP - self.buffer = cast(buffer, c_void_p) - self.buffer_length = sizeof(c_int64) - self.num = len(values) - - def nchar(self, values): - # type: (list[str]) -> None - self.num = len(values) - self.buffer_type = FieldType.C_NCHAR is_null = [1 if v == None else 0 for v in values] self.is_null = cast((c_byte * self.num)(*is_null), c_char_p) @@ -356,6 +335,26 @@ class TaosMultiBind(ctypes.Structure): ) self.length = (c_int32 * len(values))(*[len(b) if b is not None else 0 for b in _bytes]) self.buffer_length = buffer_length + def binary(self, values): + self.buffer_type = FieldType.C_BINARY + self._str_to_buffer(values) + + def timestamp(self, values, precision=PrecisionEnum.Milliseconds): + try: + buffer = cast(values, c_void_p) + except: + buffer_type = c_int64 * len(values) + buffer = buffer_type(*[_datetime_to_timestamp(value, precision) for value in values]) + + self.buffer_type = FieldType.C_TIMESTAMP + self.buffer = cast(buffer, c_void_p) + self.buffer_length = sizeof(c_int64) + self.num = len(values) + + def nchar(self, values): + # type: (list[str]) -> None + self.buffer_type = FieldType.C_NCHAR + self._str_to_buffer(values) def tinyint_unsigned(self, values): self.buffer_type = FieldType.C_TINYINT_UNSIGNED diff --git a/src/connector/python/taos/constants.py b/src/connector/python/taos/constants.py index b500df627c..8ad5b69fc0 100644 --- a/src/connector/python/taos/constants.py +++ b/src/connector/python/taos/constants.py @@ -3,6 +3,9 @@ """Constants in TDengine python """ +import ctypes, struct + + class FieldType(object): """TDengine Field Types""" @@ -33,8 +36,8 @@ class FieldType(object): C_INT_UNSIGNED_NULL = 4294967295 C_BIGINT_NULL = -9223372036854775808 C_BIGINT_UNSIGNED_NULL = 18446744073709551615 - C_FLOAT_NULL = float("nan") - C_DOUBLE_NULL = float("nan") + C_FLOAT_NULL = ctypes.c_float(struct.unpack(" Date: Fri, 20 Aug 2021 16:34:41 +0800 Subject: [PATCH 191/239] [TD-6223]concurrent query support nested query --- tests/pytest/concurrent_inquiry.py | 76 ++++++++++++++++++++++++++---- 1 file changed, 66 insertions(+), 10 deletions(-) diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py index 333c2a0a57..7af38c3b56 100644 --- a/tests/pytest/concurrent_inquiry.py +++ b/tests/pytest/concurrent_inquiry.py @@ -175,12 +175,62 @@ class ConcurrentInquiry: def con_group(self,tlist,col_list,tag_list): rand_tag = random.randint(0,5) rand_col = random.randint(0,1) - return 'group by '+','.join(random.sample(col_list,rand_col) + random.sample(tag_list,rand_tag)) - + if len(tag_list): + return 'group by '+','.join(random.sample(col_list,rand_col) + random.sample(tag_list,rand_tag)) + else: + return 'group by '+','.join(random.sample(col_list,rand_col)) + def con_order(self,tlist,col_list,tag_list): return 'order by '+random.choice(tlist) - def gen_query_sql(self): #生成查询语句 + def gen_subquery_sql(self): + subsql ,col_num = self.gen_query_sql(1) + if col_num == 0: + return 0 + col_list=[] + tag_list=[] + for i in range(col_num): + col_list.append("taosd%d"%i) + + tlist=col_list+['abc'] #增加不存在的域'abc',是否会引起新bug + con_rand=random.randint(0,len(condition_list)) + func_rand=random.randint(0,len(func_list)) + col_rand=random.randint(0,len(col_list)) + t_rand=random.randint(0,len(tlist)) + sql='select ' #select + random.shuffle(col_list) + random.shuffle(func_list) + sel_col_list=[] + col_rand=random.randint(0,len(col_list)) + loop = 0 + for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数 + alias = ' as '+ 'sub%d ' % loop + loop += 1 + pick_func = '' + if j == 'leastsquares': + pick_func=j+'('+i+',1,1)' + elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile': + pick_func=j+'('+i+',1)' + else: + pick_func=j+'('+i+')' + if bool(random.getrandbits(1)) : + pick_func+=alias + sel_col_list.append(pick_func) + if col_rand == 0: + sql = sql + '*' + else: + sql=sql+','.join(sel_col_list) #select col & func + sql = sql + ' from ('+ subsql +') ' + con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill] + sel_con=random.sample(con_func,random.randint(0,len(con_func))) + sel_con_list=[] + for i in sel_con: + sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数 + sql+=' '.join(sel_con_list) # condition + #print(sql) + return sql + + def gen_query_sql(self,subquery=0): #生成查询语句 tbi=random.randint(0,len(self.subtb_list)+len(self.stb_list)) #随机决定查询哪张表 tbname='' col_list=[] @@ -218,10 +268,10 @@ class ConcurrentInquiry: pick_func=j+'('+i+',1)' else: pick_func=j+'('+i+')' - if bool(random.getrandbits(1)): + if bool(random.getrandbits(1)) | subquery : pick_func+=alias sel_col_list.append(pick_func) - if col_rand == 0: + if col_rand == 0 & subquery : sql = sql + '*' else: sql=sql+','.join(sel_col_list) #select col & func @@ -238,7 +288,7 @@ class ConcurrentInquiry: sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数 sql+=' '.join(sel_con_list) # condition #print(sql) - return sql + return (sql,loop) def gen_query_join(self): #生成join查询语句 tbname = [] @@ -429,9 +479,12 @@ class ConcurrentInquiry: try: if self.random_pick(): - sql=self.gen_query_sql() + if self.random_pick(): + sql,temp=self.gen_query_sql() + else: + sql = self.gen_subquery_sql() else: - sql=self.gen_query_join() + sql = self.gen_query_join() print("sql is ",sql) fo.write(sql+'\n') start = time.time() @@ -496,9 +549,12 @@ class ConcurrentInquiry: while loop: try: if self.random_pick(): - sql=self.gen_query_sql() + if self.random_pick(): + sql,temp=self.gen_query_sql() + else: + sql = self.gen_subquery_sql() else: - sql=self.gen_query_join() + sql = self.gen_query_join() print("sql is ",sql) fo.write(sql+'\n') start = time.time() From 7ad6085339a2f74b1f12023d3341e159ef5c2982 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Fri, 20 Aug 2021 17:32:32 +0800 Subject: [PATCH 192/239] [TD-5331] : describe stmt function for inserting data into multi sub-tables belong to one same super table. --- documentation20/cn/08.connector/docs.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index 364961ca63..5b695b845a 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -315,6 +315,10 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线 1. 调用 `taos_stmt_init` 创建参数绑定对象; 2. 调用 `taos_stmt_prepare` 解析 INSERT 语句; 3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname` 来设置表名; + * 从 2.1.6.0 版本开始,对于向一个超级表下的多个子表同时写入数据(每个子表写入的数据较少,可能只有一行)的情形,提供了一个专用的优化接口 `taos_stmt_set_sub_tbname`,可以通过提前载入 meta 数据以及避免对 SQL 语法的重复解析来节省总体的处理时间(但这个优化方法并不支持自动建表语法)。具体使用方法如下: + 1. 必须先提前调用 `taos_load_table_info` 来加载所有需要的超级表和子表的 table meta; + 2. 然后对一个超级表的第一个子表调用 `taos_stmt_set_tbname` 来设置表名; + 3. 后续子表用 `taos_stmt_set_sub_tbname` 来设置表名。 4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags` 来设置表名和 TAGS 的值; 5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param` 以单行的方式设置 VALUES 的值; 6. 调用 `taos_stmt_add_batch` 把当前绑定的参数加入批处理; @@ -358,6 +362,12 @@ typedef struct TAOS_BIND { (2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值) 当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。 +- `int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name)` + + (2.1.6.0 版本新增,仅支持用于替换 INSERT 语句中、属于同一个超级表下的多个子表中、作为写入目标的第 2 个到第 n 个子表的表名) + 当 SQL 语句中的表名使用了 `?` 占位时,如果想要一批写入的表是多个属于同一个超级表的子表,那么可以使用此函数绑定除第一个子表之外的其他子表的表名。 + *注意:*在使用时,客户端必须先调用 `taos_load_table_info` 来加载所有需要的超级表和子表的 table meta,然后对一个超级表的第一个子表调用 `taos_stmt_set_tbname`,后续子表用 `taos_stmt_set_sub_tbname`。 + - `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)` (2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值) From d4acd45407e948b325d4321605c4cc487379eaae Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Fri, 20 Aug 2021 18:57:10 +0800 Subject: [PATCH 193/239] [TD-4555] : add example code about SQL function DERIVATIVE(). --- documentation20/cn/12.taos-sql/docs.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 372070d081..46f8c54a22 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -1285,6 +1285,19 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 说明:(从 2.1.3.0 版本开始新增此函数)输出结果行数是范围内总行数减一,第一行没有结果输出。DERIVATIVE 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。 + 示例: + ```mysql + taos> select derivative(current, 1d, 0) from t1; + ts | derivative(current, 1d, 0) | + ======================================================= + 2021-08-20 09:18:44.032 | 119.999966667 | + 2021-08-20 10:18:44.032 | -48.000000000 | + 2021-08-20 11:18:44.032 | -48.000000000 | + 2021-08-20 12:18:44.033 | 215.999940000 | + 2021-08-20 13:18:44.034 | -167.999953333 | + Query OK, 5 row(s) in set (0.004772s) + ``` + - **SPREAD** ```mysql SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; From d0c6f75f15e8c80760f8ade301917323606cb07b Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Fri, 20 Aug 2021 19:16:47 +0800 Subject: [PATCH 194/239] [TD-4555] : update example code about SQL function DERIVATIVE(). --- documentation20/cn/12.taos-sql/docs.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 46f8c54a22..16b52f5773 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -1287,15 +1287,15 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 示例: ```mysql - taos> select derivative(current, 1d, 0) from t1; - ts | derivative(current, 1d, 0) | - ======================================================= - 2021-08-20 09:18:44.032 | 119.999966667 | - 2021-08-20 10:18:44.032 | -48.000000000 | - 2021-08-20 11:18:44.032 | -48.000000000 | - 2021-08-20 12:18:44.033 | 215.999940000 | - 2021-08-20 13:18:44.034 | -167.999953333 | - Query OK, 5 row(s) in set (0.004772s) + taos> select derivative(current, 10m, 0) from t1; + ts | derivative(current, 10m, 0) | + ======================================================== + 2021-08-20 10:11:22.790 | 0.500000000 | + 2021-08-20 11:11:22.791 | 0.166666620 | + 2021-08-20 12:11:22.791 | 0.000000000 | + 2021-08-20 13:11:22.792 | 0.166666620 | + 2021-08-20 14:11:22.792 | -0.666666667 | + Query OK, 5 row(s) in set (0.004883s) ``` - **SPREAD** From d02d71c16df649b001752c77fde7cf2866ab321d Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 21 Aug 2021 09:06:19 +0800 Subject: [PATCH 195/239] [TD-6251]: taosdemo error msg with datetime info. (#7500) --- src/kit/taosdemo/taosdemo.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 75da9d9f4b..6099ac9803 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -659,7 +659,21 @@ static FILE * g_fpOfInsertResult = NULL; fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0) #define errorPrint(fmt, ...) \ - do { fprintf(stderr, " \033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, " \033[0m"); } while(0) + do {\ + struct tm Tm, *ptm;\ + struct timeval timeSecs; \ + time_t curTime;\ + gettimeofday(&timeSecs, NULL); \ + curTime = timeSecs.tv_sec;\ + ptm = localtime_r(&curTime, &Tm);\ + fprintf(stderr, " \033[31m");\ + fprintf(stderr, "%02d/%02d %02d:%02d:%02d.%06d %08" PRId64 " ",\ + ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour,\ + ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec,\ + taosGetSelfPthreadId());\ + fprintf(stderr, "ERROR: "fmt, __VA_ARGS__);\ + fprintf(stderr, " \033[0m");\ + } while(0) // for strncpy buffer overflow #define min(a, b) (((a) < (b)) ? (a) : (b)) From aa3102cd72270145c3a2becbd2b4cdabf38690b4 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Sat, 21 Aug 2021 11:10:59 +0800 Subject: [PATCH 196/239] [TD-6046] fix ts top/bottom error --- src/client/src/tscUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index c5aedf1b71..c2df10b223 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2429,7 +2429,7 @@ size_t tscNumOfExprs(SQueryInfo* pQueryInfo) { int32_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo){ size_t numOfExprs = tscNumOfExprs(pQueryInfo); - for(size_t i = 0; i < numOfExprs; ++i) { + for(int32_t i = 0; i < numOfExprs; ++i) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); if (pExpr == NULL) continue; From 87bf0e2b36d12045ddd92c721b9c5c01d4452e2e Mon Sep 17 00:00:00 2001 From: Zhiqiang Wang <1296468573@qq.com> Date: Sat, 21 Aug 2021 17:23:39 +0800 Subject: [PATCH 197/239] [TD-5772]: arm64 compilation problem. (#7357) * [TD-5772]: arm64 compilation problem. * [TD-5772]: arm64 compilation problem. * [TD-5772]: arm64 compilation problem. * [TD-5772]: arm64 compilation problem. * [TD-5772]: arm64 compilation problem. * [TD-5772]: arm64 compilation problem. * [TD-5772]: arm64 compilation problem. --- src/common/src/tarithoperator.c | 2878 ++++--------------------------- 1 file changed, 323 insertions(+), 2555 deletions(-) diff --git a/src/common/src/tarithoperator.c b/src/common/src/tarithoperator.c index 3779303e1a..000ef79fcf 100644 --- a/src/common/src/tarithoperator.c +++ b/src/common/src/tarithoperator.c @@ -21,187 +21,6 @@ #include "tcompare.h" //GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); -#define ARRAY_LIST_OP_DIV(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \ - { \ - int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \ - int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; \ - \ - if ((len1) == (len2)) { \ - for (; i < (len2) && i >= 0; i += step, (out) += 1) { \ - if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - double v, z = 0.0; \ - GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \ - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - *(out) = (double)(left)[i] op(right)[i]; \ - } \ - } else if ((len1) == 1) { \ - for (; i >= 0 && i < (len2); i += step, (out) += 1) { \ - if (isNull((char *)(left), _left_type) || isNull((char *)&(right)[i], _right_type)) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - double v, z = 0.0; \ - GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \ - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - *(out) = (double)(left)[0] op(right)[i]; \ - } \ - } else if ((len2) == 1) { \ - for (; i >= 0 && i < (len1); i += step, (out) += 1) { \ - if (isNull((char *)&(left)[i], _left_type) || isNull((char *)(right), _right_type)) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - double v, z = 0.0; \ - GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[0])); \ - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - *(out) = (double)(left)[i] op(right)[0]; \ - } \ - } \ - } -#define ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \ - { \ - int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \ - int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; \ - \ - if ((len1) == (len2)) { \ - for (; i < (len2) && i >= 0; i += step, (out) += 1) { \ - if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - *(out) = (double)(left)[i] op(right)[i]; \ - } \ - } else if ((len1) == 1) { \ - for (; i >= 0 && i < (len2); i += step, (out) += 1) { \ - if (isNull((char *)(left), _left_type) || isNull((char *)&(right)[i], _right_type)) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - *(out) = (double)(left)[0] op(right)[i]; \ - } \ - } else if ((len2) == 1) { \ - for (; i >= 0 && i < (len1); i += step, (out) += 1) { \ - if (isNull((char *)&(left)[i], _left_type) || isNull((char *)(right), _right_type)) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - *(out) = (double)(left)[i] op(right)[0]; \ - } \ - } \ - } - -#define ARRAY_LIST_OP_REM(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \ - { \ - int32_t i = (_ord == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \ - int32_t step = (_ord == TSDB_ORDER_ASC) ? 1 : -1; \ - \ - if (len1 == (len2)) { \ - for (; i >= 0 && i < (len2); i += step, (out) += 1) { \ - if (isNull((char *)&(left[i]), _left_type) || isNull((char *)&(right[i]), _right_type)) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - double v, z = 0.0; \ - GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \ - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - *(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[i])) * (right)[i]; \ - } \ - } else if (len1 == 1) { \ - for (; i >= 0 && i < (len2); i += step, (out) += 1) { \ - if (isNull((char *)(left), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - double v, z = 0.0; \ - GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \ - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - *(out) = (double)(left)[0] - ((int64_t)(((double)(left)[0]) / (right)[i])) * (right)[i]; \ - } \ - } else if ((len2) == 1) { \ - for (; i >= 0 && i < len1; i += step, (out) += 1) { \ - if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)(right), _right_type)) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - double v, z = 0.0; \ - GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[0])); \ - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ - SET_DOUBLE_NULL(out); \ - continue; \ - } \ - *(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[0])) * (right)[0]; \ - } \ - } \ - } - -#define ARRAY_LIST_ADD(left, right, _left_type, _right_type, len1, len2, out, _ord) \ - ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, +, TSDB_DATA_TYPE_DOUBLE, _ord) -#define ARRAY_LIST_SUB(left, right, _left_type, _right_type, len1, len2, out, _ord) \ - ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, -, TSDB_DATA_TYPE_DOUBLE, _ord) -#define ARRAY_LIST_MULTI(left, right, _left_type, _right_type, len1, len2, out, _ord) \ - ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, *, TSDB_DATA_TYPE_DOUBLE, _ord) -#define ARRAY_LIST_DIV(left, right, _left_type, _right_type, len1, len2, out, _ord) \ - ARRAY_LIST_OP_DIV(left, right, _left_type, _right_type, len1, len2, out, /, TSDB_DATA_TYPE_DOUBLE, _ord) -#define ARRAY_LIST_REM(left, right, _left_type, _right_type, len1, len2, out, _ord) \ - ARRAY_LIST_OP_REM(left, right, _left_type, _right_type, len1, len2, out, %, TSDB_DATA_TYPE_DOUBLE, _ord) - -#define TYPE_CONVERT_DOUBLE_RES(left, right, out, _type_left, _type_right, _type_res) \ - _type_left * pLeft = (_type_left *)(left); \ - _type_right *pRight = (_type_right *)(right); \ - _type_res * pOutput = (_type_res *)(out); - -#define DO_VECTOR_ADD(left, numLeft, leftType, leftOriginType, right, numRight, rightType, rightOriginType, _output, \ - _order) \ - do { \ - TYPE_CONVERT_DOUBLE_RES(left, right, _output, leftOriginType, rightOriginType, double); \ - ARRAY_LIST_ADD(pLeft, pRight, leftType, rightType, numLeft, numRight, pOutput, _order); \ - } while (0) - -#define DO_VECTOR_SUB(left, numLeft, leftType, leftOriginType, right, numRight, rightType, rightOriginType, _output, \ - _order) \ - do { \ - TYPE_CONVERT_DOUBLE_RES(left, right, _output, leftOriginType, rightOriginType, double); \ - ARRAY_LIST_SUB(pLeft, pRight, leftType, rightType, numLeft, numRight, pOutput, _order); \ - } while (0) - -#define DO_VECTOR_MULTIPLY(left, numLeft, leftType, leftOriginType, right, numRight, rightType, rightOriginType, \ - _output, _order) \ - do { \ - TYPE_CONVERT_DOUBLE_RES(left, right, _output, leftOriginType, rightOriginType, double); \ - ARRAY_LIST_MULTI(pLeft, pRight, leftType, rightType, numLeft, numRight, pOutput, _order); \ - } while (0) - -#define DO_VECTOR_DIVIDE(left, numLeft, leftType, leftOriginType, right, numRight, rightType, rightOriginType, \ - _output, _order) \ - do { \ - TYPE_CONVERT_DOUBLE_RES(left, right, _output, leftOriginType, rightOriginType, double); \ - ARRAY_LIST_DIV(pLeft, pRight, leftType, rightType, numLeft, numRight, pOutput, _order); \ - } while (0) - -#define DO_VECTOR_REMAINDER(left, numLeft, leftType, leftOriginType, right, numRight, rightType, rightOriginType, \ - _output, _order) \ - do { \ - TYPE_CONVERT_DOUBLE_RES(left, right, _output, leftOriginType, rightOriginType, double); \ - ARRAY_LIST_REM(pLeft, pRight, leftType, rightType, numLeft, numRight, pOutput, _order); \ - } while (0) void calc_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { int32_t *pLeft = (int32_t *)left; @@ -240,2389 +59,338 @@ void calc_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight } } -void vectorAdd(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight, int32_t rightType, - void *output, int32_t order) { - switch(leftType) { - case TSDB_DATA_TYPE_TINYINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; +typedef double (*_arithmetic_getVectorDoubleValue_fn_t)(void *src, int32_t index); + +double getVectorDoubleValue_TINYINT(void *src, int32_t index) { + return (double)*((int8_t *)src + index); +} +double getVectorDoubleValue_UTINYINT(void *src, int32_t index) { + return (double)*((uint8_t *)src + index); +} +double getVectorDoubleValue_SMALLINT(void *src, int32_t index) { + return (double)*((int16_t *)src + index); +} +double getVectorDoubleValue_USMALLINT(void *src, int32_t index) { + return (double)*((uint16_t *)src + index); +} +double getVectorDoubleValue_INT(void *src, int32_t index) { + return (double)*((int32_t *)src + index); +} +double getVectorDoubleValue_UINT(void *src, int32_t index) { + return (double)*((uint32_t *)src + index); +} +double getVectorDoubleValue_BIGINT(void *src, int32_t index) { + return (double)*((int64_t *)src + index); +} +double getVectorDoubleValue_UBIGINT(void *src, int32_t index) { + return (double)*((uint64_t *)src + index); +} +double getVectorDoubleValue_FLOAT(void *src, int32_t index) { + return (double)*((float *)src + index); +} +double getVectorDoubleValue_DOUBLE(void *src, int32_t index) { + return (double)*((double *)src + index); +} +_arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFn(int32_t srcType) { + _arithmetic_getVectorDoubleValue_fn_t p = NULL; + if(srcType==TSDB_DATA_TYPE_TINYINT) { + p = getVectorDoubleValue_TINYINT; + }else if(srcType==TSDB_DATA_TYPE_UTINYINT) { + p = getVectorDoubleValue_UTINYINT; + }else if(srcType==TSDB_DATA_TYPE_SMALLINT) { + p = getVectorDoubleValue_SMALLINT; + }else if(srcType==TSDB_DATA_TYPE_USMALLINT) { + p = getVectorDoubleValue_USMALLINT; + }else if(srcType==TSDB_DATA_TYPE_INT) { + p = getVectorDoubleValue_INT; + }else if(srcType==TSDB_DATA_TYPE_UINT) { + p = getVectorDoubleValue_UINT; + }else if(srcType==TSDB_DATA_TYPE_BIGINT) { + p = getVectorDoubleValue_BIGINT; + }else if(srcType==TSDB_DATA_TYPE_UBIGINT) { + p = getVectorDoubleValue_UBIGINT; + }else if(srcType==TSDB_DATA_TYPE_FLOAT) { + p = getVectorDoubleValue_FLOAT; + }else if(srcType==TSDB_DATA_TYPE_DOUBLE) { + p = getVectorDoubleValue_DOUBLE; + }else { + assert(0); } - case TSDB_DATA_TYPE_UTINYINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_INT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_UINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_BIGINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_FLOAT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - default:; - } + return p; } -void vectorSub(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight, int32_t rightType, - void *output, int32_t order) { - switch(leftType) { - case TSDB_DATA_TYPE_TINYINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_INT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_UINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_BIGINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_FLOAT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - default:; - } + +typedef void* (*_arithmetic_getVectorValueAddr_fn_t)(void *src, int32_t index); + +void* getVectorValueAddr_TINYINT(void *src, int32_t index) { + return (void*)((int8_t *)src + index); +} +void* getVectorValueAddr_UTINYINT(void *src, int32_t index) { + return (void*)((uint8_t *)src + index); +} +void* getVectorValueAddr_SMALLINT(void *src, int32_t index) { + return (void*)((int16_t *)src + index); +} +void* getVectorValueAddr_USMALLINT(void *src, int32_t index) { + return (void*)((uint16_t *)src + index); +} +void* getVectorValueAddr_INT(void *src, int32_t index) { + return (void*)((int32_t *)src + index); +} +void* getVectorValueAddr_UINT(void *src, int32_t index) { + return (void*)((uint32_t *)src + index); +} +void* getVectorValueAddr_BIGINT(void *src, int32_t index) { + return (void*)((int64_t *)src + index); +} +void* getVectorValueAddr_UBIGINT(void *src, int32_t index) { + return (void*)((uint64_t *)src + index); +} +void* getVectorValueAddr_FLOAT(void *src, int32_t index) { + return (void*)((float *)src + index); +} +void* getVectorValueAddr_DOUBLE(void *src, int32_t index) { + return (void*)((double *)src + index); } -void vectorMultiply(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight, int32_t rightType, - void *output, int32_t order) { - switch(leftType) { - case TSDB_DATA_TYPE_TINYINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; +_arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFn(int32_t srcType) { + _arithmetic_getVectorValueAddr_fn_t p = NULL; + if(srcType==TSDB_DATA_TYPE_TINYINT) { + p = getVectorValueAddr_TINYINT; + }else if(srcType==TSDB_DATA_TYPE_UTINYINT) { + p = getVectorValueAddr_UTINYINT; + }else if(srcType==TSDB_DATA_TYPE_SMALLINT) { + p = getVectorValueAddr_SMALLINT; + }else if(srcType==TSDB_DATA_TYPE_USMALLINT) { + p = getVectorValueAddr_USMALLINT; + }else if(srcType==TSDB_DATA_TYPE_INT) { + p = getVectorValueAddr_INT; + }else if(srcType==TSDB_DATA_TYPE_UINT) { + p = getVectorValueAddr_UINT; + }else if(srcType==TSDB_DATA_TYPE_BIGINT) { + p = getVectorValueAddr_BIGINT; + }else if(srcType==TSDB_DATA_TYPE_UBIGINT) { + p = getVectorValueAddr_UBIGINT; + }else if(srcType==TSDB_DATA_TYPE_FLOAT) { + p = getVectorValueAddr_FLOAT; + }else if(srcType==TSDB_DATA_TYPE_DOUBLE) { + p = getVectorValueAddr_DOUBLE; + }else { + assert(0); } - case TSDB_DATA_TYPE_UTINYINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_INT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_UINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_BIGINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_FLOAT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - default:; - } + return p; } -void vectorDivide(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight, int32_t rightType, - void *output, int32_t order) { - switch(leftType) { - case TSDB_DATA_TYPE_TINYINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_INT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_UINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_BIGINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_FLOAT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - default:; - } +void vectorAdd(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; + double *output=(double*)out; + _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); + _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); + _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); + _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); + + if ((len1) == (len2)) { + for (; i < (len2) && i >= 0; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) + getVectorDoubleValueFnRight(right,i)); + } + } else if ((len1) == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) + getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < (len1); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) + getVectorDoubleValueFnRight(right,0)); + } + } } - -void vectorRemainder(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight, int32_t rightType, - void *output, int32_t order) { - switch(leftType) { - case TSDB_DATA_TYPE_TINYINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_INT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_UINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_BIGINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_FLOAT: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - switch (rightType) { - case TSDB_DATA_TYPE_TINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, int8_t, output, order); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, uint8_t, output, order); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, int16_t, output, order); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, uint16_t, output, order); - break; - } - case TSDB_DATA_TYPE_INT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, int32_t, output, order); - break; - } - case TSDB_DATA_TYPE_UINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, uint32_t, output, order); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, int64_t, output, order); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, uint64_t, output, order); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, float, output, order); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, double, output, order); - break; - } - default: - assert(0); - } - break; - } - default:; - } +void vectorSub(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; + double *output=(double*)out; + _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); + _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); + _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); + _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); + + if ((len1) == (len2)) { + for (; i < (len2) && i >= 0; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - getVectorDoubleValueFnRight(right,i)); + } + } else if ((len1) == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) - getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < (len1); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - getVectorDoubleValueFnRight(right,0)); + } + } +} +void vectorMultiply(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; + double *output=(double*)out; + _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); + _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); + _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); + _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); + + if ((len1) == (len2)) { + for (; i < (len2) && i >= 0; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) * getVectorDoubleValueFnRight(right,i)); + } + } else if ((len1) == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) * getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < (len1); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) * getVectorDoubleValueFnRight(right,0)); + } + } +} +void vectorDivide(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; + double *output=(double*)out; + _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); + _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); + _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); + _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); + + if ((len1) == (len2)) { + for (; i < (len2) && i >= 0; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) /getVectorDoubleValueFnRight(right,i)); + } + } else if ((len1) == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) /getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < (len1); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,0)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) /getVectorDoubleValueFnRight(right,0)); + } + } +} +void vectorRemainder(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { + int32_t i = (_ord == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = (_ord == TSDB_ORDER_ASC) ? 1 : -1; + double *output=(double*)out; + _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); + _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); + _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); + _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); + + if (len1 == (len2)) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - ((int64_t)(getVectorDoubleValueFnLeft(left,i) / getVectorDoubleValueFnRight(right,i))) * getVectorDoubleValueFnRight(right,i)); + } + } else if (len1 == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) - ((int64_t)(getVectorDoubleValueFnLeft(left,0) / getVectorDoubleValueFnRight(right,i))) * getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < len1; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,0)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - ((int64_t)(getVectorDoubleValueFnLeft(left,i) / getVectorDoubleValueFnRight(right,0))) * getVectorDoubleValueFnRight(right,0)); + } + } } _arithmetic_operator_fn_t getArithmeticOperatorFn(int32_t arithmeticOptr) { From c61c3de6381236ea40511c075035bd1352d98f01 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Sat, 21 Aug 2021 20:00:10 +0800 Subject: [PATCH 198/239] [TD-2639] : describe version dependency of cacheLast parameter. --- documentation20/cn/11.administrator/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index c4bdecf294..f9061200f9 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -217,7 +217,7 @@ taosd -C | 99 | queryBufferSize | | **S** | MB | 为所有并发查询占用保留的内存大小。 | | | 计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。(2.0.15 以前的版本中,此参数的单位是字节) | | 100 | ratioOfQueryCores | | **S** | | 设置查询线程的最大数量。 | | | 最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。 | | 101 | update | | **S** | | 允许更新已存在的数据行 | 0 \| 1 | 0 | 从 2.0.8.0 版本开始 | -| 102 | cacheLast | | **S** | | 是否在内存中缓存子表的最近数据 | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。 | 0 | 2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。 | +| 102 | cacheLast | | **S** | | 是否在内存中缓存子表的最近数据 | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。(2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1]) | 0 | 2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。 | | 103 | numOfCommitThreads | YES | **S** | | 设置写入线程的最大数量 | | | | | 104 | maxWildCardsLength | | **C** | bytes | 设定 LIKE 算子的通配符字符串允许的最大长度 | 0-16384 | 100 | 2.1.6.1 版本新增。 | From 5080c2a4bef7ef26271177387bd0a6d1dd475815 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 21 Aug 2021 21:58:08 +0800 Subject: [PATCH 199/239] [TD-6253]: taosdump cmdline param verification. (#7505) --- src/kit/taosdump/taosdump.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 6b553b3824..30b5d91b10 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -62,6 +62,20 @@ typedef struct { #define errorPrint(fmt, ...) \ do { fprintf(stderr, "\033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, "\033[0m"); } while(0) +static bool isStringNumber(char *input) +{ + int len = strlen(input); + if (0 == len) { + return false; + } + + for (int i = 0; i < len; i++) { + if (!isdigit(input[i])) + return false; + } + + return true; +} // -------------------------- SHOW DATABASE INTERFACE----------------------- enum _show_db_index { @@ -472,6 +486,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { g_args.table_batch = atoi(arg); break; case 'T': + if (!isStringNumber(arg)) { + errorPrint("%s", "\n\t-T need a number following!\n"); + exit(EXIT_FAILURE); + } g_args.thread_num = atoi(arg); break; case OPT_ABORT: From 4d01963ec2241d01d4b3b9caeaff7b5462f8e276 Mon Sep 17 00:00:00 2001 From: zyyang Date: Sun, 22 Aug 2021 01:18:57 +0800 Subject: [PATCH 200/239] [TD-6255]: java connector document --- .../en/08.connector/01.java/docs.md | 546 ++++++++++++++++++ 1 file changed, 546 insertions(+) create mode 100644 documentation20/en/08.connector/01.java/docs.md diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md new file mode 100644 index 0000000000..0ee0446db4 --- /dev/null +++ b/documentation20/en/08.connector/01.java/docs.md @@ -0,0 +1,546 @@ +# Java connection + +## Introduction + +The taos-jdbcdriver is implemented in two forms: JDBC-JNI and JDBC-RESTful (which supported since taos-jdbcdriver-2.0.18). JDBC-JNI is implemented by calling the local methods of libtaos.so (or taos.dll) on the client, while JDBC-RESTful encapsulates the RESTful interface implementation internally. + +![tdengine-connector](https://www.taosdata.com/cn/documentation20/user/pages/images/tdengine-jdbc-connector.png) + +The figure above shows the three ways Java applications can access the TDengine: + +* JDBC-JNI: The Java application uses JDBC-JNI's API on physical node1 (pnode1) and directly calls the client API (libtaos.so or taos.dll) to send write and query requests to the taosd instance on physical node2 (Pnode2). +* RESTful: The Java application sends the SQL to the RESTful connector on physical node2 (pnode2), which then calls the client API (libtaos.so). +* JDBC-RESTful: The Java application uses the JDBC-restful API to encapsulate SQL into a RESTful request and send it to the RESTful connector of physical node 2. + +In terms of implementation, the JDBC driver of TDengine is as consistent as possible with the behavior of the relational database driver. However, due to the differences between TDengine and relational database in the object and technical characteristics of services, There are some differences between taos-jdbcdriver and traditional relational database JDBC driver. The following points should be paid attention to when using: + +* Currently, you cannot delete a single data record in TDengine. +* Transaction are not currently supported. + +### Difference between JDBC-JNI and JDBC-restful + + + + + + + + + + + + + + + + + + + + + + + + + + + + +**Note**: RESTful interfaces are stateless. Therefore, when using JDBC-restful, you should specify the database name in SQL before all table names and super table names, for example: + +```sql +INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6); +``` + +## JDBC driver version and supported TDengine and JDK versions + +| taos-jdbcdriver | TDengine | JDK | +| -------------------- | ----------------- | -------- | +| 2.0.33 - 2.0.34 | 2.0.3.0 and above | 1.8.x | +| 2.0.31 - 2.0.32 | 2.1.3.0 and above | 1.8.x | +| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | +| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | +| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | +| 1.0.3 | 1.6.1.x and above | 1.8.x | +| 1.0.2 | 1.6.1.x and above | 1.8.x | +| 1.0.1 | 1.6.1.x and above | 1.8.x | + + + +## DataType in TDengine and Java connector + +The TDengine supports the following data types and Java data types: + +| TDengine DataType | Java DataType | +| ----------------- | ------------------ | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT | java.lang.Short | +| TINYINT | java.lang.Byte | +| BOOL | java.lang.Boolean | +| BINARY | byte[] | +| NCHAR | java.lang.String | + + + +## Install Java connector + +### Runtime Requirements + +To actually run TDengine's Java connector, you'll need to meet the following: + +1. A Linux or Windows System + +2. Java Runtime Environment 1.8 or later + +3. TDengine client (required for JDBC-JNI, not required for JDBC-restful) + +**Note**: + +* After the TDengine client is successfully installed on Linux, the libtaos.so file is automatically copied to /usr/lib/libtaos.so, which is included in the Linux automatic scan path and does not need to be specified separately. +* After the TDengine client is installed on Windows, the taos.dll file that the driver package depends on is automatically copied to the default search path C:/Windows/System32. You do not need to specify it separately. + +### Obtain JDBC driver by maven + +To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver). Add the following dependencies in pom.xml for your maven projects. + +```xml + + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.34 + + +``` + +### Obtain JDBC driver by compile source codes + +You can download the TDengine source code and compile the latest version of the JDBC Connector. + + ```shell + git clone https://github.com/taosdata/TDengine.git + cd TDengine/src/connector/jdbc + mvn clean package -Dmaven.test.skip=true + ``` + +a taos-jdbcdriver-2.0.xx-dist.jar will be released in the target directory + + + +## Usage of java connector + +### Establishing a Connection + +#### Establishing a connection with URL + +Establish the connection by specifying the URL, as shown below: + +```java +String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` + +In the example above, the JDBC-RESTful driver is used to establish a connection to the hostname of 'taosdemo.com', port of 6041, and database name of 'test'. This URL specifies the user name as 'root' and the password as 'taosdata'. + +The JDBC-RESTful does not depend on the local function library. Compared with JDBC-JNI, only the following is required: + +* DriverClass designated as "com.taosdata.jdbc.rs.RestfulDriver" +* JdbcUrl starts with "JDBC:TAOS-RS://" +* Use port 6041 as the connection port + +For better write and query performance, Java applications can use the JDBC-JNI driver, as shown below: + +```java +String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` + +In the example above, The JDBC-JNI driver is used to establish a connection to the hostname of 'taosdemo.com', port 6030 (TDengine's default port), and database name of 'test'. This URL specifies the user name as 'root' and the password as 'taosdata'. + +You can also see the JDBC-JNI video tutorial: [JDBC connector of TDengine](https://www.taosdata.com/blog/2020/11/11/1955.html) + +The format of JDBC URL is: + +```url +jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}] +``` + +The configuration parameters in the URL are as follows: + +* user: user name for logging in to the TDengine. The default value is root. +* password: the user login password. The default value is taosdata. +* cfgdir: directory of the client configuration file. It is valid only for JDBC-JNI. The default value is /etc/taos on Linux and C:/TDengine/cfg on Windows. +* charset: character set used by the client. The default value is the system character set. +* locale: client locale. The default value is the current system locale. +* timezone: timezone used by the client. The default value is the current timezone of the system. +* batchfetch: only valid for JDBC-JNI. True if batch ResultSet fetching is enabled; false if row-by-row ResultSet fetching is enabled. Default value is flase. +* timestampFormat: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'. +* batchErrorIgnore: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false. + + + +#### Establishing a connection with URL and Properties + +In addition to establish the connection with the specified URL, you can also use Properties to specify the parameters to set up the connection, as shown below: + +```java +public Connection getConn() throws Exception{ + String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; + // String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; +} +``` + +In the example above, JDBC-JNI is used to establish a connection to hostname of 'taosdemo.com', port at 6030, and database name of 'test'. The annotation is the method when using JDBC-RESTful. The connection specifies the user name as 'root' and the password as 'taosdata' in the URL, and the character set to use, locale, time zone, and so on in connProps. + +The configuration parameters in properties are as follows: + +* TSDBDriver.PROPERTY_KEY_USER: user name for logging in to the TDengine. The default value is root. +* TSDBDriver.PROPERTY_KEY_PASSWORD: the user login password. The default value is taosdata. +* TSDBDriver.PROPERTY_KEY_CONFIG_DIR: directory of the client configuration file. It is valid only for JDBC-JNI. The default value is /etc/taos on Linux and C:/TDengine/cfg on Windows. +* TSDBDriver.PROPERTY_KEY_CHARSET: character set used by the client. The default value is the system character set. +* TSDBDriver.PROPERTY_KEY_LOCALE: client locale. The default value is the current system locale. +* TSDBDriver.PROPERTY_KEY_TIME_ZONE: timezone used by the client. The default value is the current timezone of the system. +* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-JNI. True if batch ResultSet fetching is enabled; false if row-by-row ResultSet fetching is enabled. Default value is flase. +* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'. +* TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false. + + + +#### Establishing a connection with configuration file + +When JDBC-JNI is used to connect to the TDengine cluster, you can specify firstEp and secondEp parameters of the cluster in the client configuration file. As follows: + +1. The hostname and port are not specified in Java applications + +```java +public Connection getConn() throws Exception{ + String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; +} +``` + +2. Specify firstEp and secondEp in the configuration file + +```txt +# first fully qualified domain name (FQDN) for TDengine system +firstEp cluster_node1:6030 +# second fully qualified domain name (FQDN) for TDengine system, for cluster only +secondEp cluster_node2:6030 +``` + +In the above example, JDBC driver uses the client configuration file to establish a connection to the hostname of 'cluster_node1', port 6030, and database name of 'test'. When the firstEp node in the cluster fails, JDBC will try to connect to the cluster using secondEp. In the TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established. + +**Note**: In this case, the configuration file is belongs to TDengine client which running a Java application. default file path of Linux OS is '/etc/taos/taos.cfg', and default file path of Windows OS is 'C://TDengine/cfg/taos.cfg'. + +#### Priority of the parameters + +If the parameters in the URL, Properties, and client configuration file are repeated set, the priorities of the parameters in descending order are as follows: + +1. URL parameters +2. Properties +3. Client configuration file in taos.cfg + +For example, if you specify password as 'taosdata' in the URL and password as 'taosdemo' in the Properties, JDBC will establish a connection using the password in the URL. + +For details, see Client Configuration:[client configuration](https://www.taosdata.com/en/documentation/administrator#client) + + + +### Create database and table + +```java +Statement stmt = conn.createStatement(); +// create database +stmt.executeUpdate("create database if not exists db"); +// use database +stmt.executeUpdate("use db"); +// create table +stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); +``` + + + +### Insert + +```java +// insert data +int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); +System.out.println("insert " + affectedRows + " rows."); +``` + +**Note**: 'now' is an internal system function. The default value is the current time of the computer where the client resides. 'now + 1s' indicates that the current time on the client is added by one second. The following time units are a(millisecond), s (second), m(minute), h(hour), d(day), w(week), n(month), and y(year). + + + +### Query + +```java +// query data +ResultSet resultSet = stmt.executeQuery("select * from tb"); +Timestamp ts = null; +int temperature = 0; +float humidity = 0; +while(resultSet.next()){ + ts = resultSet.getTimestamp(1); + temperature = resultSet.getInt(2); + humidity = resultSet.getFloat("humidity"); + System.out.printf("%s, %d, %s\n", ts, temperature, humidity); +} +``` + +**Note**: The query is consistent with the operation of the relational database, and the index in ResultSet starts from 1. + + + +### Handle exceptions + +```java +try (Statement statement = connection.createStatement()) { + // executeQuery + ResultSet resultSet = statement.executeQuery(sql); + // print result + printResult(resultSet); +} catch (SQLException e) { + System.out.println("ERROR Message: " + e.getMessage()); + System.out.println("ERROR Code: " + e.getErrorCode()); + e.printStackTrace(); +} +``` + +The Java connector may report three types of error codes: JDBC Driver (error codes ranging from 0x2301 to 0x2350), JNI method (error codes ranging from 0x2351 to 0x2400), and TDengine Error. For details about the error code, see: + +- https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java +- https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h + + + +### Write data through parameter binding + +Since version 2.1.2.0, TDengine's JDBC-JNI implementation has significantly improved parameter binding support for data write (INSERT) scenarios. Data can be written in the following way, avoiding SQL parsing and significantly improving the write performance.(**Note**: parameter binding is not supported in JDBC-restful) + +```java +Statement stmt = conn.createStatement(); +Random r = new Random(); + +TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)"); + +s.setTableName("w1"); + +s.setTagInt(0, r.nextInt(10)); +s.setTagString(1, "Beijing"); +int numOfRows = 10; + +ArrayList ts = new ArrayList<>(); +for (int i = 0; i < numOfRows; i++){ + ts.add(System.currentTimeMillis() + i); +} +s.setTimestamp(0, ts); +ArrayList s1 = new ArrayList<>(); +for (int i = 0; i < numOfRows; i++){ + s1.add(r.nextInt(100)); +} +s.setInt(1, s1); +ArrayList s2 = new ArrayList<>(); +for (int i = 0; i < numOfRows; i++){ + s2.add("test" + r.nextInt(100)); +} +s.setString(2, s2, 10); + +s.columnDataAddBatch(); +s.columnDataExecuteBatch(); + +s.columnDataClearBatch(); +s.columnDataCloseBatch(); +``` + +The methods used to set tags are: + +```java +public void setTagNull(int index, int type) +public void setTagBoolean(int index, boolean value) +public void setTagInt(int index, int value) +public void setTagByte(int index, byte value) +public void setTagShort(int index, short value) +public void setTagLong(int index, long value) +public void setTagTimestamp(int index, long value) +public void setTagFloat(int index, float value) +public void setTagDouble(int index, double value) +public void setTagString(int index, String value) +public void setTagNString(int index, String value) +``` + +The methods used to set columns are: + +```java +public void setInt(int columnIndex, ArrayList list) throws SQLException +public void setFloat(int columnIndex, ArrayList list) throws SQLException +public void setTimestamp(int columnIndex, ArrayList list) throws SQLException +public void setLong(int columnIndex, ArrayList list) throws SQLException +public void setDouble(int columnIndex, ArrayList list) throws SQLException +public void setBoolean(int columnIndex, ArrayList list) throws SQLException +public void setByte(int columnIndex, ArrayList list) throws SQLException +public void setShort(int columnIndex, ArrayList list) throws SQLException +public void setString(int columnIndex, ArrayList list, int size) throws SQLException +public void setNString(int columnIndex, ArrayList list, int size) throws SQLException +``` + +**Note**: Both setString and setNString require the user to declare the column width of the corresponding column in the table definition in the size parameter. + + + +### Subscribe + +#### Create + +```java +TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); +``` + +parameters: + +* topic: the unique topic name of the subscription. +* sql: a select statement . +* restart: true if restart the subscription already exists; false if continue the previous subscription + +In the example above, a subscription named 'topic' is created which use the SQL statement 'select * from meters'. If the subscription already exists, it will continue with the previous query progress, rather than consuming all the data from scratch. + +#### Consume + +```java +int total = 0; +while(true) { + TSDBResultSet rs = sub.consume(); + int count = 0; + while(rs.next()) { + count++; + } + total += count; + System.out.printf("%d rows consumed, total %d\n", count, total); + Thread.sleep(1000); +} +``` + +The consume method returns a result set containing all the new data so far since the last consume. Make sure to call consume as often as you need (like Thread.sleep(1000) in the example), otherwise you will put unnecessary stress on the server. + +#### Close + +```java +sub.close(true); +// release resources +resultSet.close(); +stmt.close(); +conn.close(); +``` + +The close method closes a subscription. If the parameter is true, the subscription progress information is reserved, and a subscription with the same name can be created later to continue consuming data. If false, the subscription progress is not retained. + +**Note**: the connection must be closed; otherwise, a connection leak may occur. + + + +## Connection Pool + +### HikariCP example + +```java +public static void main(String[] args) throws SQLException { + HikariConfig config = new HikariConfig(); + // jdbc properties + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + // connection pool configurations + config.setMinimumIdle(10); //minimum number of idle connection + config.setMaximumPoolSize(10); //maximum number of connection in the pool + config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool + config.setMaxLifetime(0); // maximum life time for each connection + config.setIdleTimeout(0); // max idle time for recycle idle connection + config.setConnectionTestQuery("select server_status()"); //validation query + HikariDataSource ds = new HikariDataSource(config); //create datasource + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + //query or insert + // ... + connection.close(); // put back to conneciton pool +} +``` + +### Druid example + +```java +public static void main(String[] args) throws Exception { + DruidDataSource dataSource = new DruidDataSource(); + // jdbc properties + dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + dataSource.setUrl(url); + dataSource.setUsername("root"); + dataSource.setPassword("taosdata"); + // pool configurations + dataSource.setInitialSize(10); + dataSource.setMinIdle(10); + dataSource.setMaxActive(10); + dataSource.setMaxWait(30000); + dataSource.setValidationQuery("select server_status()"); + Connection connection = dataSource.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + //query or insert + // ... + connection.close(); // put back to conneciton pool +} +``` + +**Note**: + +As of TDengine V1.6.4.1, the function select server_status() is supported specifically for heartbeat detection, so it is recommended to use select server_status() for Validation queries when using connection pools. + +Select server_status() returns 1 on success, as shown below. + +```txt +taos> select server_status(); +server_status()| +================ +1 | +Query OK, 1 row(s) in set (0.000141s) +``` + + + +## Integrated with framework + +- Please refer to [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) if using taos-jdbcdriver in Spring JdbcTemplate +- Please refer to [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) if using taos-jdbcdriver in Spring JdbcTemplate + + + +## FAQ + +- java.lang.UnsatisfiedLinkError: no taos in java.library.path + + **Cause**:The application program cannot find Library function *taos* + + **Answer**:Copy `C:\TDengine\driver\taos.dll` to `C:\Windows\System32\` on Windows and make a soft link through `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` on Linux. + +- java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform + + **Cause**:Currently TDengine only support 64bit JDK + + **Answer**:re-install 64bit JDK. + +- For other questions, please refer to [Issues](https://github.com/taosdata/TDengine/issues) + + From 3c2732e1d2bbfad5a829a8be9c7c4760fd592728 Mon Sep 17 00:00:00 2001 From: zyyang Date: Sun, 22 Aug 2021 01:24:59 +0800 Subject: [PATCH 201/239] fix the title --- documentation20/en/08.connector/01.java/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md index 0ee0446db4..fa133882e4 100644 --- a/documentation20/en/08.connector/01.java/docs.md +++ b/documentation20/en/08.connector/01.java/docs.md @@ -1,4 +1,4 @@ -# Java connection +# Java connector ## Introduction From 96a4a48e43cee783208e504a3ae9b9d3b844ffc7 Mon Sep 17 00:00:00 2001 From: xywang Date: Sun, 22 Aug 2021 17:01:46 +0800 Subject: [PATCH 202/239] [TD-6001]: use db_name in url if exists --- src/client/CMakeLists.txt | 1 + src/client/inc/tsclient.h | 6 ++++++ src/client/src/tscSQLParser.c | 24 ++++++++++++++++++++++-- src/plugins/http/inc/httpInt.h | 1 + src/plugins/http/inc/httpRestHandle.h | 10 +++++----- src/plugins/http/src/httpRestHandle.c | 14 ++++++++++++-- src/plugins/http/src/httpSql.c | 5 +++++ 7 files changed, 52 insertions(+), 9 deletions(-) diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 2f83557d63..77417a24a4 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -4,6 +4,7 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(jni) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/plugins/http/inc) AUX_SOURCE_DIRECTORY(src SRC) IF (TD_LINUX) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 4ed7894931..6cec0bd798 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -38,6 +38,11 @@ extern "C" { #include "qUtil.h" #include "tcmdtype.h" +typedef enum { + TAOS_REQ_FROM_SHELL, + TAOS_REQ_FROM_HTTP +} SReqOrigin; + // forward declaration struct SSqlInfo; @@ -340,6 +345,7 @@ typedef struct STscObj { SRpcCorEpSet *tscCorMgmtEpSet; pthread_mutex_t mutex; int32_t numOfObj; // number of sqlObj from this tscObj + SReqOrigin from; } STscObj; typedef struct SSubqueryState { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 612a3d4798..d1b2e6fd27 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -40,6 +40,7 @@ #include "qScript.h" #include "ttype.h" #include "qFilter.h" +#include "httpInt.h" #define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0" @@ -1687,8 +1688,28 @@ static bool has(SArray* pFieldList, int32_t startIdx, const char* name) { static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; } static char* cloneCurrentDBName(SSqlObj* pSql) { + char *p = NULL; + HttpContext *pCtx = NULL; + pthread_mutex_lock(&pSql->pTscObj->mutex); - char *p = strdup(pSql->pTscObj->db); + STscObj *pTscObj = pSql->pTscObj; + switch (pTscObj->from) { + case TAOS_REQ_FROM_HTTP: + pCtx = pSql->param; + if (pCtx && pCtx->db[0] != '\0') { + char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN] = {0}; + int32_t len = sprintf(db, "%s%s%s", pTscObj->acctId, TS_PATH_DELIMITER, pCtx->db); + assert(len <= sizeof(db)); + + p = strdup(db); + } + break; + default: + break; + } + if (p == NULL) { + p = strdup(pSql->pTscObj->db); + } pthread_mutex_unlock(&pSql->pTscObj->mutex); return p; @@ -8684,7 +8705,6 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) { n += 1; } info->numOfColumns = n; - return meta; } diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h index 0a5822b908..99a5b770aa 100644 --- a/src/plugins/http/inc/httpInt.h +++ b/src/plugins/http/inc/httpInt.h @@ -150,6 +150,7 @@ typedef struct HttpContext { char ipstr[22]; char user[TSDB_USER_LEN]; // parsed from auth token or login message char pass[HTTP_PASSWORD_LEN]; + char db[/*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN]; TAOS * taos; void * ppContext; HttpSession *session; diff --git a/src/plugins/http/inc/httpRestHandle.h b/src/plugins/http/inc/httpRestHandle.h index 632a1dc647..df405685e9 100644 --- a/src/plugins/http/inc/httpRestHandle.h +++ b/src/plugins/http/inc/httpRestHandle.h @@ -22,12 +22,12 @@ #include "httpResp.h" #include "httpSql.h" -#define REST_ROOT_URL_POS 0 -#define REST_ACTION_URL_POS 1 -#define REST_USER_URL_POS 2 -#define REST_PASS_URL_POS 3 +#define REST_ROOT_URL_POS 0 +#define REST_ACTION_URL_POS 1 +#define REST_USER_USEDB_URL_POS 2 +#define REST_PASS_URL_POS 3 void restInitHandle(HttpServer* pServer); bool restProcessRequest(struct HttpContext* pContext); -#endif \ No newline at end of file +#endif diff --git a/src/plugins/http/src/httpRestHandle.c b/src/plugins/http/src/httpRestHandle.c index a285670d20..a029adec0c 100644 --- a/src/plugins/http/src/httpRestHandle.c +++ b/src/plugins/http/src/httpRestHandle.c @@ -62,11 +62,11 @@ void restInitHandle(HttpServer* pServer) { bool restGetUserFromUrl(HttpContext* pContext) { HttpParser* pParser = pContext->parser; - if (pParser->path[REST_USER_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_URL_POS].pos <= 0) { + if (pParser->path[REST_USER_USEDB_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_USEDB_URL_POS].pos <= 0) { return false; } - tstrncpy(pContext->user, pParser->path[REST_USER_URL_POS].str, TSDB_USER_LEN); + tstrncpy(pContext->user, pParser->path[REST_USER_USEDB_URL_POS].str, TSDB_USER_LEN); return true; } @@ -107,6 +107,16 @@ bool restProcessSqlRequest(HttpContext* pContext, int32_t timestampFmt) { HttpSqlCmd* cmd = &(pContext->singleCmd); cmd->nativSql = sql; + /* find if there is db_name in url */ + pContext->db[0] = '\0'; + + HttpString *path = &pContext->parser->path[REST_USER_USEDB_URL_POS]; + if (path->pos > 0 && !(strlen(sql) > 4 && (sql[0] == 'u' || sql[0] == 'U') && + (sql[1] == 's' || sql[1] == 'S') && (sql[2] == 'e' || sql[2] == 'E') && sql[3] == ' ')) + { + snprintf(pContext->db, /*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN, "%s", path->str); + } + pContext->reqType = HTTP_REQTYPE_SINGLE_SQL; if (timestampFmt == REST_TIMESTAMP_FMT_LOCAL_STRING) { pContext->encodeMethod = &restEncodeSqlLocalTimeStringMethod; diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index c2e723732a..0dd451f72d 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -419,6 +419,11 @@ void httpProcessRequest(HttpContext *pContext) { &(pContext->taos)); httpDebug("context:%p, fd:%d, user:%s, try connect tdengine, taos:%p", pContext, pContext->fd, pContext->user, pContext->taos); + + if (pContext->taos != NULL) { + STscObj *pObj = pContext->taos; + pObj->from = TAOS_REQ_FROM_HTTP; + } } else { httpExecCmd(pContext); } From 03c9253d9a3712169f476066a4255a350ffce038 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 22 Aug 2021 21:10:56 +0800 Subject: [PATCH 203/239] Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#7509) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. * [TD-3197]: taosdemo coverity scan issue. configDir buffer overwrite. * [TD-3197]: coverity scan issue. taosdump argument validation. * [TD-3197]: taosdemo and taosdump coverity scan issues. * [TD-3197]: taosdemo coverity scan. append result buf to file. for develop branch. * exit if read sample file failed. * fix converity scan issue. * fix coverity scan issue. * fix coverity scan memory leak. * fix resource leak reported by coverity scan. * fix taosdemo coverity scan issue. * fix tcsetattr and getchar return value determination bug. Co-authored-by: Shuduo Sang --- src/kit/shell/src/shellLinux.c | 4 +++- src/os/src/linux/osSystem.c | 8 ++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 27d678c6d8..f1c578015d 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -190,7 +190,9 @@ static void parse_args( fprintf(stderr, "password reading error\n"); } taosSetConsoleEcho(true); - getchar(); + if (EOF == getchar()) { + fprintf(stderr, "getchar() return EOF\n"); + } } else { tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN); strcpy(argv[i], "-p"); diff --git a/src/os/src/linux/osSystem.c b/src/os/src/linux/osSystem.c index 0cdb20dbdb..a82149dccb 100644 --- a/src/os/src/linux/osSystem.c +++ b/src/os/src/linux/osSystem.c @@ -63,12 +63,12 @@ int taosSetConsoleEcho(bool on) } if (on) - term.c_lflag|=ECHOFLAGS; + term.c_lflag |= ECHOFLAGS; else - term.c_lflag &=~ECHOFLAGS; + term.c_lflag &= ~ECHOFLAGS; - err = tcsetattr(STDIN_FILENO,TCSAFLUSH,&term); - if (err == -1 && err == EINTR) { + err = tcsetattr(STDIN_FILENO, TCSAFLUSH, &term); + if (err == -1 || err == EINTR) { perror("Cannot set the attribution of the terminal"); return -1; } From bb9322681ef7013425260fa26987b9874ba874bc Mon Sep 17 00:00:00 2001 From: zyyang Date: Mon, 23 Aug 2021 09:58:36 +0800 Subject: [PATCH 204/239] fix some error --- .../en/08.connector/01.java/docs.md | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md index fa133882e4..5cc9257925 100644 --- a/documentation20/en/08.connector/01.java/docs.md +++ b/documentation20/en/08.connector/01.java/docs.md @@ -1,21 +1,25 @@ # Java connector + + ## Introduction -The taos-jdbcdriver is implemented in two forms: JDBC-JNI and JDBC-RESTful (which supported since taos-jdbcdriver-2.0.18). JDBC-JNI is implemented by calling the local methods of libtaos.so (or taos.dll) on the client, while JDBC-RESTful encapsulates the RESTful interface implementation internally. +The taos-jdbcdriver is implemented in two forms: JDBC-JNI and JDBC-RESTful (supported from taos-jdbcdriver-2.0.18). JDBC-JNI is implemented by calling the local methods of libtaos.so (or taos.dll) on the client, while JDBC-RESTful encapsulates the RESTful interface implementation internally. + + ![tdengine-connector](https://www.taosdata.com/cn/documentation20/user/pages/images/tdengine-jdbc-connector.png) The figure above shows the three ways Java applications can access the TDengine: -* JDBC-JNI: The Java application uses JDBC-JNI's API on physical node1 (pnode1) and directly calls the client API (libtaos.so or taos.dll) to send write and query requests to the taosd instance on physical node2 (Pnode2). +* JDBC-JNI: The Java application uses JDBC-JNI's API on physical node1 (pnode1) and directly calls the client API (libtaos.so or taos.dll) to send write or query requests to the taosd instance on physical node2 (pnode2). * RESTful: The Java application sends the SQL to the RESTful connector on physical node2 (pnode2), which then calls the client API (libtaos.so). * JDBC-RESTful: The Java application uses the JDBC-restful API to encapsulate SQL into a RESTful request and send it to the RESTful connector of physical node 2. -In terms of implementation, the JDBC driver of TDengine is as consistent as possible with the behavior of the relational database driver. However, due to the differences between TDengine and relational database in the object and technical characteristics of services, There are some differences between taos-jdbcdriver and traditional relational database JDBC driver. The following points should be paid attention to when using: +In terms of implementation, the JDBC driver of TDengine is as consistent as possible with the behavior of the relational database driver. However, due to the differences between TDengine and relational database in the object and technical characteristics of services, there are some differences between taos-jdbcdriver and traditional relational database JDBC driver. The following points should be watched: -* Currently, you cannot delete a single data record in TDengine. -* Transaction are not currently supported. +* deleting a record is not supported in TDengine. +* transaction is not supported in TDengine. ### Difference between JDBC-JNI and JDBC-restful @@ -90,7 +94,7 @@ The TDengine supports the following data types and Java data types: ### Runtime Requirements -To actually run TDengine's Java connector, you'll need to meet the following: +To run TDengine's Java connector, the following requirements shall be met: 1. A Linux or Windows System @@ -117,7 +121,7 @@ To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3. ``` -### Obtain JDBC driver by compile source codes +### Obtain JDBC driver by compiling source code You can download the TDengine source code and compile the latest version of the JDBC Connector. @@ -181,8 +185,6 @@ The configuration parameters in the URL are as follows: * timestampFormat: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'. * batchErrorIgnore: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false. - - #### Establishing a connection with URL and Properties In addition to establish the connection with the specified URL, you can also use Properties to specify the parameters to set up the connection, as shown below: @@ -214,8 +216,6 @@ The configuration parameters in properties are as follows: * TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'. * TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false. - - #### Establishing a connection with configuration file When JDBC-JNI is used to connect to the TDengine cluster, you can specify firstEp and secondEp parameters of the cluster in the client configuration file. As follows: @@ -245,7 +245,7 @@ secondEp cluster_node2:6030 In the above example, JDBC driver uses the client configuration file to establish a connection to the hostname of 'cluster_node1', port 6030, and database name of 'test'. When the firstEp node in the cluster fails, JDBC will try to connect to the cluster using secondEp. In the TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established. -**Note**: In this case, the configuration file is belongs to TDengine client which running a Java application. default file path of Linux OS is '/etc/taos/taos.cfg', and default file path of Windows OS is 'C://TDengine/cfg/taos.cfg'. +**Note**: In this case, the configuration file belongs to TDengine client which is running inside a Java application. default file path of Linux OS is '/etc/taos/taos.cfg', and default file path of Windows OS is 'C://TDengine/cfg/taos.cfg'. #### Priority of the parameters @@ -403,9 +403,9 @@ public void setNString(int columnIndex, ArrayList list, int size) throws -### Subscribe +### Data Subscription -#### Create +#### Subscribe ```java TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); @@ -543,4 +543,3 @@ Query OK, 1 row(s) in set (0.000141s) - For other questions, please refer to [Issues](https://github.com/taosdata/TDengine/issues) - From c1fe68407dcde4fef46a48da8e2f2a522bd8abad Mon Sep 17 00:00:00 2001 From: zyyang Date: Mon, 23 Aug 2021 10:07:56 +0800 Subject: [PATCH 205/239] fix a spelling error --- documentation20/en/08.connector/01.java/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md index 5cc9257925..fb2fb726a1 100644 --- a/documentation20/en/08.connector/01.java/docs.md +++ b/documentation20/en/08.connector/01.java/docs.md @@ -331,7 +331,7 @@ The Java connector may report three types of error codes: JDBC Driver (error cod ### Write data through parameter binding -Since version 2.1.2.0, TDengine's JDBC-JNI implementation has significantly improved parameter binding support for data write (INSERT) scenarios. Data can be written in the following way, avoiding SQL parsing and significantly improving the write performance.(**Note**: parameter binding is not supported in JDBC-restful) +Since version 2.1.2.0, TDengine's JDBC-JNI implementation has significantly improved parameter binding support for data write (INSERT) scenarios. Data can be written in the following way, avoiding SQL parsing and significantly improving the write performance.(**Note**: parameter binding is not supported in JDBC-RESTful) ```java Statement stmt = conn.createStatement(); From 913bb6d2cf07969fcb7538be30d8638c9b87867e Mon Sep 17 00:00:00 2001 From: xywang Date: Mon, 23 Aug 2021 11:08:40 +0800 Subject: [PATCH 206/239] [TD-6005]: test cases --- tests/http/restful/http_create_db.c | 429 ++++++++++++++++++++++++++ tests/http/restful/http_create_tb.c | 433 ++++++++++++++++++++++++++ tests/http/restful/http_drop_db.c | 433 ++++++++++++++++++++++++++ tests/http/restful/http_insert_tb.c | 455 ++++++++++++++++++++++++++++ tests/http/restful/http_query_tb.c | 432 ++++++++++++++++++++++++++ tests/http/restful/http_use_db.c | 430 ++++++++++++++++++++++++++ 6 files changed, 2612 insertions(+) create mode 100644 tests/http/restful/http_create_db.c create mode 100644 tests/http/restful/http_create_tb.c create mode 100644 tests/http/restful/http_drop_db.c create mode 100644 tests/http/restful/http_insert_tb.c create mode 100644 tests/http/restful/http_query_tb.c create mode 100644 tests/http/restful/http_use_db.c diff --git a/tests/http/restful/http_create_db.c b/tests/http/restful/http_create_db.c new file mode 100644 index 0000000000..0bc52fa6cc --- /dev/null +++ b/tests/http/restful/http_create_db.c @@ -0,0 +1,429 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define RECV_MAX_LINE 2048 +#define ITEM_MAX_LINE 128 +#define REQ_MAX_LINE 2048 +#define REQ_CLI_COUNT 100 + + +typedef enum +{ + uninited, + connecting, + connected, + datasent +} conn_stat; + + +typedef enum +{ + false, + true +} bool; + + +typedef unsigned short u16_t; +typedef unsigned int u32_t; + + +typedef struct +{ + int sockfd; + int index; + conn_stat state; + size_t nsent; + size_t nrecv; + size_t nlen; + bool error; + bool success; + struct sockaddr_in serv_addr; +} socket_ctx; + + +int set_nonblocking(int sockfd) +{ + int ret; + + ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK); + if (ret == -1) { + printf("failed to fcntl for %d\r\n", sockfd); + return ret; + } + + return ret; +} + + +int create_socket(const char *ip, const u16_t port, socket_ctx *pctx) +{ + int ret; + + if (ip == NULL || port == 0 || pctx == NULL) { + printf("invalid parameter\r\n"); + return -1; + } + + pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (pctx->sockfd == -1) { + printf("failed to create socket\r\n"); + return -1; + } + + bzero(&pctx->serv_addr, sizeof(struct sockaddr_in)); + + pctx->serv_addr.sin_family = AF_INET; + pctx->serv_addr.sin_port = htons(port); + + ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr); + if (ret <= 0) { + printf("inet_pton error, ip: %s\r\n", ip); + return -1; + } + + ret = set_nonblocking(pctx->sockfd); + if (ret == -1) { + printf("failed to set %d as nonblocking\r\n", pctx->sockfd); + return -1; + } + + return pctx->sockfd; +} + + +void close_sockets(socket_ctx *pctx, int cnt) +{ + int i; + + if (pctx == NULL) { + return; + } + + for (i = 0; i < cnt; i++) { + if (pctx[i].sockfd > 0) { + close(pctx[i].sockfd); + pctx[i].sockfd = -1; + } + } +} + + +int proc_pending_error(socket_ctx *ctx) +{ + int ret; + int err; + socklen_t len; + + if (ctx == NULL) { + return 0; + } + + err = 0; + len = sizeof(int); + + ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (ret == -1) { + err = errno; + } + + if (err) { + printf("failed to connect at index: %d\r\n", ctx->index); + + close(ctx->sockfd); + ctx->sockfd = -1; + + return -1; + } + + return 0; +} + + +void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len) +{ + char req_line[ITEM_MAX_LINE]; + char req_host[ITEM_MAX_LINE]; + char req_cont_type[ITEM_MAX_LINE]; + char req_cont_len[ITEM_MAX_LINE]; + const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + + if (ip == NULL || port == 0 || + url == NULL || url[0] == '\0' || + sql == NULL || sql[0] == '\0' || + req_buf == NULL || len <= 0) + { + return; + } + + snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url); + snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port); + snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain"); + snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql)); + + snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql); +} + + +int add_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op); +} + + +int mod_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op); +} + + +int del_event(int epfd, int sockfd) +{ + struct epoll_event evs_op; + + evs_op.events = 0; + evs_op.data.ptr = NULL; + + return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op); +} + + +int main() +{ + int i; + int ret, n, nsent, nrecv; + int epfd; + u32_t events; + char *str; + socket_ctx *pctx, ctx[REQ_CLI_COUNT]; + char *ip = "127.0.0.1"; + char *url = "/rest/sql"; + u16_t port = 6041; + struct epoll_event evs[REQ_CLI_COUNT]; + char sql[REQ_MAX_LINE]; + char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE]; + char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE]; + int count; + + signal(SIGPIPE, SIG_IGN); + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ctx[i].sockfd = -1; + ctx[i].index = i; + ctx[i].state = uninited; + ctx[i].nsent = 0; + ctx[i].nrecv = 0; + ctx[i].error = false; + ctx[i].success = false; + + memset(sql, 0, REQ_MAX_LINE); + memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + memset(recv_buf[i], 0, RECV_MAX_LINE); + + snprintf(sql, REQ_MAX_LINE, "create database if not exists db%d precision 'us'", i); + build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + + ctx[i].nlen = strlen(send_buf[i]); + } + + epfd = epoll_create(REQ_CLI_COUNT); + if (epfd <= 0) { + printf("failed to create epoll\r\n"); + goto failed; + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = create_socket(ip, port, &ctx[i]); + if (ret == -1) { + printf("failed to create socket ar %d\r\n", i); + goto failed; + } + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + events = EPOLLET | EPOLLIN | EPOLLOUT; + ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]); + if (ret == -1) { + printf("failed to add sockfd at %d to epoll\r\n", i); + goto failed; + } + } + + count = 0; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr)); + if (ret == -1) { + if (errno != EINPROGRESS) { + printf("connect error, index: %d\r\n", ctx[i].index); + (void) del_event(epfd, ctx[i].sockfd); + close(ctx[i].sockfd); + ctx[i].sockfd = -1; + } else { + ctx[i].state = connecting; + count++; + } + + continue; + } + + ctx[i].state = connected; + count++; + } + + printf("clients: %d\r\n", count); + + while (count > 0) { + n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0); + if (n == -1) { + if (errno != EINTR) { + printf("epoll_wait error, reason: %s\r\n", strerror(errno)); + break; + } + } else { + for (i = 0; i < n; i++) { + if (evs[i].events & EPOLLERR) { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("event error, index: %d\r\n", pctx->index); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } else if (evs[i].events & EPOLLIN) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0); + if (nrecv == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno)); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } else if (nrecv == 0) { + printf("peer closed connection, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + break; + } + + pctx->nrecv += nrecv; + if (pctx->nrecv > 12) { + if (pctx->error == false && pctx->success == false) { + str = recv_buf[pctx->index] + 9; + if (str[0] != '2' || str[1] != '0' || str[2] != '0') { + printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]); + pctx->error = true; + } else { + printf("response ok, index: %d\r\n", pctx->index); + pctx->success = true; + } + } + } + } + } else if (evs[i].events & EPOLLOUT) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0); + if (nsent == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to send, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } + + if (nsent == (int) (pctx->nlen - pctx->nsent)) { + printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index); + + pctx->state = datasent; + + events = EPOLLET | EPOLLIN; + (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx); + + break; + } else { + pctx->nsent += nsent; + } + } + } else { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + } + } + } + +failed: + + if (epfd > 0) { + close(epfd); + } + + close_sockets(ctx, REQ_CLI_COUNT); + + return 0; +} diff --git a/tests/http/restful/http_create_tb.c b/tests/http/restful/http_create_tb.c new file mode 100644 index 0000000000..91ffc54627 --- /dev/null +++ b/tests/http/restful/http_create_tb.c @@ -0,0 +1,433 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define RECV_MAX_LINE 2048 +#define ITEM_MAX_LINE 128 +#define REQ_MAX_LINE 2048 +#define REQ_CLI_COUNT 100 + + +typedef enum +{ + uninited, + connecting, + connected, + datasent +} conn_stat; + + +typedef enum +{ + false, + true +} bool; + + +typedef unsigned short u16_t; +typedef unsigned int u32_t; + + +typedef struct +{ + int sockfd; + int index; + conn_stat state; + size_t nsent; + size_t nrecv; + size_t nlen; + bool error; + bool success; + struct sockaddr_in serv_addr; +} socket_ctx; + + +int set_nonblocking(int sockfd) +{ + int ret; + + ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK); + if (ret == -1) { + printf("failed to fcntl for %d\r\n", sockfd); + return ret; + } + + return ret; +} + + +int create_socket(const char *ip, const u16_t port, socket_ctx *pctx) +{ + int ret; + + if (ip == NULL || port == 0 || pctx == NULL) { + printf("invalid parameter\r\n"); + return -1; + } + + pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (pctx->sockfd == -1) { + printf("failed to create socket\r\n"); + return -1; + } + + bzero(&pctx->serv_addr, sizeof(struct sockaddr_in)); + + pctx->serv_addr.sin_family = AF_INET; + pctx->serv_addr.sin_port = htons(port); + + ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr); + if (ret <= 0) { + printf("inet_pton error, ip: %s\r\n", ip); + return -1; + } + + ret = set_nonblocking(pctx->sockfd); + if (ret == -1) { + printf("failed to set %d as nonblocking\r\n", pctx->sockfd); + return -1; + } + + return pctx->sockfd; +} + + +void close_sockets(socket_ctx *pctx, int cnt) +{ + int i; + + if (pctx == NULL) { + return; + } + + for (i = 0; i < cnt; i++) { + if (pctx[i].sockfd > 0) { + close(pctx[i].sockfd); + pctx[i].sockfd = -1; + } + } +} + + +int proc_pending_error(socket_ctx *ctx) +{ + int ret; + int err; + socklen_t len; + + if (ctx == NULL) { + return 0; + } + + err = 0; + len = sizeof(int); + + ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (ret == -1) { + err = errno; + } + + if (err) { + printf("failed to connect at index: %d\r\n", ctx->index); + + close(ctx->sockfd); + ctx->sockfd = -1; + + return -1; + } + + return 0; +} + + +void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len) +{ + char req_line[ITEM_MAX_LINE]; + char req_host[ITEM_MAX_LINE]; + char req_cont_type[ITEM_MAX_LINE]; + char req_cont_len[ITEM_MAX_LINE]; + const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + + if (ip == NULL || port == 0 || + url == NULL || url[0] == '\0' || + sql == NULL || sql[0] == '\0' || + req_buf == NULL || len <= 0) + { + return; + } + + snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url); + snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port); + snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain"); + snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql)); + + snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql); +} + + +int add_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op); +} + + +int mod_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op); +} + + +int del_event(int epfd, int sockfd) +{ + struct epoll_event evs_op; + + evs_op.events = 0; + evs_op.data.ptr = NULL; + + return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op); +} + + +int main() +{ + int i; + int ret, n, nsent, nrecv; + int epfd; + u32_t events; + char *str; + socket_ctx *pctx, ctx[REQ_CLI_COUNT]; + char *ip = "127.0.0.1"; + char *url_prefix = "/rest/sql"; + char url[ITEM_MAX_LINE]; + u16_t port = 6041; + struct epoll_event evs[REQ_CLI_COUNT]; + char sql[REQ_MAX_LINE]; + char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE]; + char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE]; + int count; + + signal(SIGPIPE, SIG_IGN); + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ctx[i].sockfd = -1; + ctx[i].index = i; + ctx[i].state = uninited; + ctx[i].nsent = 0; + ctx[i].nrecv = 0; + ctx[i].error = false; + ctx[i].success = false; + + memset(url, 0, ITEM_MAX_LINE); + memset(sql, 0, REQ_MAX_LINE); + memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + memset(recv_buf[i], 0, RECV_MAX_LINE); + + snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i); + snprintf(sql, REQ_MAX_LINE, "create table if not exists tb%d (ts timestamp, index int, val binary(40))", i); + + build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + + ctx[i].nlen = strlen(send_buf[i]); + } + + epfd = epoll_create(REQ_CLI_COUNT); + if (epfd <= 0) { + printf("failed to create epoll\r\n"); + goto failed; + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = create_socket(ip, port, &ctx[i]); + if (ret == -1) { + printf("failed to create socket, index: %d\r\n", i); + goto failed; + } + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + events = EPOLLET | EPOLLIN | EPOLLOUT; + ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]); + if (ret == -1) { + printf("failed to add sockfd to epoll, index: %d\r\n", i); + goto failed; + } + } + + count = 0; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr)); + if (ret == -1) { + if (errno != EINPROGRESS) { + printf("connect error, index: %d\r\n", ctx[i].index); + (void) del_event(epfd, ctx[i].sockfd); + close(ctx[i].sockfd); + ctx[i].sockfd = -1; + } else { + ctx[i].state = connecting; + count++; + } + + continue; + } + + ctx[i].state = connected; + count++; + } + + printf("clients: %d\r\n", count); + + while (count > 0) { + n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0); + if (n == -1) { + if (errno != EINTR) { + printf("epoll_wait error, reason: %s\r\n", strerror(errno)); + break; + } + } else { + for (i = 0; i < n; i++) { + if (evs[i].events & EPOLLERR) { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("event error, index: %d\r\n", pctx->index); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } else if (evs[i].events & EPOLLIN) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0); + if (nrecv == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno)); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } else if (nrecv == 0) { + printf("peer closed connection, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + break; + } + + pctx->nrecv += nrecv; + if (pctx->nrecv > 12) { + if (pctx->error == false && pctx->success == false) { + str = recv_buf[pctx->index] + 9; + if (str[0] != '2' || str[1] != '0' || str[2] != '0') { + printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]); + pctx->error = true; + } else { + printf("response ok, index: %d\r\n", pctx->index); + pctx->success = true; + } + } + } + } + } else if (evs[i].events & EPOLLOUT) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0); + if (nsent == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to send, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } + + if (nsent == (int) (pctx->nlen - pctx->nsent)) { + printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index); + + pctx->state = datasent; + + events = EPOLLET | EPOLLIN; + (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx); + + break; + } else { + pctx->nsent += nsent; + } + } + } else { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + } + } + } + +failed: + + if (epfd > 0) { + close(epfd); + } + + close_sockets(ctx, REQ_CLI_COUNT); + + return 0; +} diff --git a/tests/http/restful/http_drop_db.c b/tests/http/restful/http_drop_db.c new file mode 100644 index 0000000000..f82db901dd --- /dev/null +++ b/tests/http/restful/http_drop_db.c @@ -0,0 +1,433 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define RECV_MAX_LINE 2048 +#define ITEM_MAX_LINE 128 +#define REQ_MAX_LINE 2048 +#define REQ_CLI_COUNT 100 + + +typedef enum +{ + uninited, + connecting, + connected, + datasent +} conn_stat; + + +typedef enum +{ + false, + true +} bool; + + +typedef unsigned short u16_t; +typedef unsigned int u32_t; + + +typedef struct +{ + int sockfd; + int index; + conn_stat state; + size_t nsent; + size_t nrecv; + size_t nlen; + bool error; + bool success; + struct sockaddr_in serv_addr; +} socket_ctx; + + +int set_nonblocking(int sockfd) +{ + int ret; + + ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK); + if (ret == -1) { + printf("failed to fcntl for %d\r\n", sockfd); + return ret; + } + + return ret; +} + + +int create_socket(const char *ip, const u16_t port, socket_ctx *pctx) +{ + int ret; + + if (ip == NULL || port == 0 || pctx == NULL) { + printf("invalid parameter\r\n"); + return -1; + } + + pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (pctx->sockfd == -1) { + printf("failed to create socket\r\n"); + return -1; + } + + bzero(&pctx->serv_addr, sizeof(struct sockaddr_in)); + + pctx->serv_addr.sin_family = AF_INET; + pctx->serv_addr.sin_port = htons(port); + + ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr); + if (ret <= 0) { + printf("inet_pton error, ip: %s\r\n", ip); + return -1; + } + + ret = set_nonblocking(pctx->sockfd); + if (ret == -1) { + printf("failed to set %d as nonblocking\r\n", pctx->sockfd); + return -1; + } + + return pctx->sockfd; +} + + +void close_sockets(socket_ctx *pctx, int cnt) +{ + int i; + + if (pctx == NULL) { + return; + } + + for (i = 0; i < cnt; i++) { + if (pctx[i].sockfd > 0) { + close(pctx[i].sockfd); + pctx[i].sockfd = -1; + } + } +} + + +int proc_pending_error(socket_ctx *ctx) +{ + int ret; + int err; + socklen_t len; + + if (ctx == NULL) { + return 0; + } + + err = 0; + len = sizeof(int); + + ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (ret == -1) { + err = errno; + } + + if (err) { + printf("failed to connect at index: %d\r\n", ctx->index); + + close(ctx->sockfd); + ctx->sockfd = -1; + + return -1; + } + + return 0; +} + + +void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len) +{ + char req_line[ITEM_MAX_LINE]; + char req_host[ITEM_MAX_LINE]; + char req_cont_type[ITEM_MAX_LINE]; + char req_cont_len[ITEM_MAX_LINE]; + const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + + if (ip == NULL || port == 0 || + url == NULL || url[0] == '\0' || + sql == NULL || sql[0] == '\0' || + req_buf == NULL || len <= 0) + { + return; + } + + snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url); + snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port); + snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain"); + snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql)); + + snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql); +} + + +int add_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op); +} + + +int mod_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op); +} + + +int del_event(int epfd, int sockfd) +{ + struct epoll_event evs_op; + + evs_op.events = 0; + evs_op.data.ptr = NULL; + + return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op); +} + + +int main() +{ + int i; + int ret, n, nsent, nrecv; + int epfd; + u32_t events; + char *str; + socket_ctx *pctx, ctx[REQ_CLI_COUNT]; + char *ip = "127.0.0.1"; + char *url_prefix = "/rest/sql"; + char url[ITEM_MAX_LINE]; + u16_t port = 6041; + struct epoll_event evs[REQ_CLI_COUNT]; + char sql[REQ_MAX_LINE]; + char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE]; + char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE]; + int count; + + signal(SIGPIPE, SIG_IGN); + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ctx[i].sockfd = -1; + ctx[i].index = i; + ctx[i].state = uninited; + ctx[i].nsent = 0; + ctx[i].nrecv = 0; + ctx[i].error = false; + ctx[i].success = false; + + memset(url, 0, ITEM_MAX_LINE); + memset(sql, 0, REQ_MAX_LINE); + memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + memset(recv_buf[i], 0, RECV_MAX_LINE); + + snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i); + snprintf(sql, REQ_MAX_LINE, "drop database if exists db%d", i); + + build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + + ctx[i].nlen = strlen(send_buf[i]); + } + + epfd = epoll_create(REQ_CLI_COUNT); + if (epfd <= 0) { + printf("failed to create epoll\r\n"); + goto failed; + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = create_socket(ip, port, &ctx[i]); + if (ret == -1) { + printf("failed to create socket, index: %d\r\n", i); + goto failed; + } + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + events = EPOLLET | EPOLLIN | EPOLLOUT; + ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]); + if (ret == -1) { + printf("failed to add sockfd to epoll, index: %d\r\n", i); + goto failed; + } + } + + count = 0; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr)); + if (ret == -1) { + if (errno != EINPROGRESS) { + printf("connect error, index: %d\r\n", ctx[i].index); + (void) del_event(epfd, ctx[i].sockfd); + close(ctx[i].sockfd); + ctx[i].sockfd = -1; + } else { + ctx[i].state = connecting; + count++; + } + + continue; + } + + ctx[i].state = connected; + count++; + } + + printf("clients: %d\r\n", count); + + while (count > 0) { + n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0); + if (n == -1) { + if (errno != EINTR) { + printf("epoll_wait error, reason: %s\r\n", strerror(errno)); + break; + } + } else { + for (i = 0; i < n; i++) { + if (evs[i].events & EPOLLERR) { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("event error, index: %d\r\n", pctx->index); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } else if (evs[i].events & EPOLLIN) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0); + if (nrecv == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno)); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } else if (nrecv == 0) { + printf("peer closed connection, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + break; + } + + pctx->nrecv += nrecv; + if (pctx->nrecv > 12) { + if (pctx->error == false && pctx->success == false) { + str = recv_buf[pctx->index] + 9; + if (str[0] != '2' || str[1] != '0' || str[2] != '0') { + printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]); + pctx->error = true; + } else { + printf("response ok, index: %d\r\n", pctx->index); + pctx->success = true; + } + } + } + } + } else if (evs[i].events & EPOLLOUT) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0); + if (nsent == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to send, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } + + if (nsent == (int) (pctx->nlen - pctx->nsent)) { + printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index); + + pctx->state = datasent; + + events = EPOLLET | EPOLLIN; + (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx); + + break; + } else { + pctx->nsent += nsent; + } + } + } else { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + } + } + } + +failed: + + if (epfd > 0) { + close(epfd); + } + + close_sockets(ctx, REQ_CLI_COUNT); + + return 0; +} diff --git a/tests/http/restful/http_insert_tb.c b/tests/http/restful/http_insert_tb.c new file mode 100644 index 0000000000..f9590d856c --- /dev/null +++ b/tests/http/restful/http_insert_tb.c @@ -0,0 +1,455 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define RECV_MAX_LINE 2048 +#define ITEM_MAX_LINE 128 +#define REQ_MAX_LINE 4096 +#define REQ_CLI_COUNT 100 + + +typedef enum +{ + uninited, + connecting, + connected, + datasent +} conn_stat; + + +typedef enum +{ + false, + true +} bool; + + +typedef struct +{ + int sockfd; + int index; + conn_stat state; + size_t nsent; + size_t nrecv; + size_t nlen; + bool error; + bool success; + struct sockaddr_in serv_addr; +} socket_ctx; + + +int set_nonblocking(int sockfd) +{ + int ret; + + ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK); + if (ret == -1) { + printf("failed to fcntl for %d\r\n", sockfd); + return ret; + } + + return ret; +} + + +int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx) +{ + int ret; + + if (ip == NULL || port == 0 || pctx == NULL) { + printf("invalid parameter\r\n"); + return -1; + } + + pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (pctx->sockfd == -1) { + printf("failed to create socket\r\n"); + return -1; + } + + bzero(&pctx->serv_addr, sizeof(struct sockaddr_in)); + + pctx->serv_addr.sin_family = AF_INET; + pctx->serv_addr.sin_port = htons(port); + + ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr); + if (ret <= 0) { + printf("inet_pton error, ip: %s\r\n", ip); + return -1; + } + + ret = set_nonblocking(pctx->sockfd); + if (ret == -1) { + printf("failed to set %d as nonblocking\r\n", pctx->sockfd); + return -1; + } + + return pctx->sockfd; +} + + +void close_sockets(socket_ctx *pctx, int cnt) +{ + int i; + + if (pctx == NULL) { + return; + } + + for (i = 0; i < cnt; i++) { + if (pctx[i].sockfd > 0) { + close(pctx[i].sockfd); + pctx[i].sockfd = -1; + } + } +} + + +int proc_pending_error(socket_ctx *ctx) +{ + int ret; + int err; + socklen_t len; + + if (ctx == NULL) { + return 0; + } + + err = 0; + len = sizeof(int); + + ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (ret == -1) { + err = errno; + } + + if (err) { + printf("failed to connect at index: %d\r\n", ctx->index); + + close(ctx->sockfd); + ctx->sockfd = -1; + + return -1; + } + + return 0; +} + + +void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len) +{ + char req_line[ITEM_MAX_LINE]; + char req_host[ITEM_MAX_LINE]; + char req_cont_type[ITEM_MAX_LINE]; + char req_cont_len[ITEM_MAX_LINE]; + const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + + if (ip == NULL || port == 0 || + url == NULL || url[0] == '\0' || + sql == NULL || sql[0] == '\0' || + req_buf == NULL || len <= 0) + { + return; + } + + snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url); + snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port); + snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain"); + snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql)); + + snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql); +} + + +int add_event(int epfd, int sockfd, uint32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op); +} + + +int mod_event(int epfd, int sockfd, uint32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op); +} + + +int del_event(int epfd, int sockfd) +{ + struct epoll_event evs_op; + + evs_op.events = 0; + evs_op.data.ptr = NULL; + + return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op); +} + + +int main() +{ + int i; + int ret, n, nsent, nrecv, offset; + int epfd; + uint32_t events; + char *str; + socket_ctx *pctx, ctx[REQ_CLI_COUNT]; + char *ip = "127.0.0.1"; + char *url_prefix = "/rest/sql"; + char url[ITEM_MAX_LINE]; + uint16_t port = 6041; + struct epoll_event evs[REQ_CLI_COUNT]; + struct timeval now; + int64_t start_time; + char sql[REQ_MAX_LINE]; + char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE]; + char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE]; + int count; + + signal(SIGPIPE, SIG_IGN); + + gettimeofday(&now, NULL); + start_time = now.tv_sec * 1000000 + now.tv_usec; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ctx[i].sockfd = -1; + ctx[i].index = i; + ctx[i].state = uninited; + ctx[i].nsent = 0; + ctx[i].nrecv = 0; + ctx[i].error = false; + ctx[i].success = false; + + memset(url, 0, ITEM_MAX_LINE); + memset(sql, 0, REQ_MAX_LINE); + memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + memset(recv_buf[i], 0, RECV_MAX_LINE); + + snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i); + + offset = 0; + + ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "insert into tb%d values ", i); + if (ret <= 0) { + printf("failed to snprintf for sql(prefix), index: %d\r\n ", i); + goto failed; + } + + offset += ret; + + while (offset < REQ_MAX_LINE - 128) { + ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "(%"PRId64", %d, 'test_string_%d') ", start_time + i, i, i); + if (ret <= 0) { + printf("failed to snprintf for sql(values), index: %d\r\n ", i); + goto failed; + } + + offset += ret; + } + + build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + + ctx[i].nlen = strlen(send_buf[i]); + } + + epfd = epoll_create(REQ_CLI_COUNT); + if (epfd <= 0) { + printf("failed to create epoll\r\n"); + goto failed; + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = create_socket(ip, port, &ctx[i]); + if (ret == -1) { + printf("failed to create socket, index: %d\r\n", i); + goto failed; + } + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + events = EPOLLET | EPOLLIN | EPOLLOUT; + ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]); + if (ret == -1) { + printf("failed to add sockfd to epoll, index: %d\r\n", i); + goto failed; + } + } + + count = 0; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr)); + if (ret == -1) { + if (errno != EINPROGRESS) { + printf("connect error, index: %d\r\n", ctx[i].index); + (void) del_event(epfd, ctx[i].sockfd); + close(ctx[i].sockfd); + ctx[i].sockfd = -1; + } else { + ctx[i].state = connecting; + count++; + } + + continue; + } + + ctx[i].state = connected; + count++; + } + + printf("clients: %d\r\n", count); + + while (count > 0) { + n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0); + if (n == -1) { + if (errno != EINTR) { + printf("epoll_wait error, reason: %s\r\n", strerror(errno)); + break; + } + } else { + for (i = 0; i < n; i++) { + if (evs[i].events & EPOLLERR) { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("event error, index: %d\r\n", pctx->index); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } else if (evs[i].events & EPOLLIN) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0); + if (nrecv == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno)); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } else if (nrecv == 0) { + printf("peer closed connection, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + break; + } + + pctx->nrecv += nrecv; + if (pctx->nrecv > 12) { + if (pctx->error == false && pctx->success == false) { + str = recv_buf[pctx->index] + 9; + if (str[0] != '2' || str[1] != '0' || str[2] != '0') { + printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]); + pctx->error = true; + } else { + printf("response ok, index: %d\r\n", pctx->index); + pctx->success = true; + } + } + } + } + } else if (evs[i].events & EPOLLOUT) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0); + if (nsent == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to send, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } + + if (nsent == (int) (pctx->nlen - pctx->nsent)) { + printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index); + + pctx->state = datasent; + + events = EPOLLET | EPOLLIN; + (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx); + + break; + } else { + pctx->nsent += nsent; + } + } + } else { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + } + } + } + +failed: + + if (epfd > 0) { + close(epfd); + } + + close_sockets(ctx, REQ_CLI_COUNT); + + return 0; +} diff --git a/tests/http/restful/http_query_tb.c b/tests/http/restful/http_query_tb.c new file mode 100644 index 0000000000..e7ac0d4b01 --- /dev/null +++ b/tests/http/restful/http_query_tb.c @@ -0,0 +1,432 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define RECV_MAX_LINE 2048 +#define ITEM_MAX_LINE 128 +#define REQ_MAX_LINE 4096 +#define REQ_CLI_COUNT 100 + + +typedef enum +{ + uninited, + connecting, + connected, + datasent +} conn_stat; + + +typedef enum +{ + false, + true +} bool; + + +typedef struct +{ + int sockfd; + int index; + conn_stat state; + size_t nsent; + size_t nrecv; + size_t nlen; + bool error; + bool success; + struct sockaddr_in serv_addr; +} socket_ctx; + + +int set_nonblocking(int sockfd) +{ + int ret; + + ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK); + if (ret == -1) { + printf("failed to fcntl for %d\r\n", sockfd); + return ret; + } + + return ret; +} + + +int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx) +{ + int ret; + + if (ip == NULL || port == 0 || pctx == NULL) { + printf("invalid parameter\r\n"); + return -1; + } + + pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (pctx->sockfd == -1) { + printf("failed to create socket\r\n"); + return -1; + } + + bzero(&pctx->serv_addr, sizeof(struct sockaddr_in)); + + pctx->serv_addr.sin_family = AF_INET; + pctx->serv_addr.sin_port = htons(port); + + ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr); + if (ret <= 0) { + printf("inet_pton error, ip: %s\r\n", ip); + return -1; + } + + ret = set_nonblocking(pctx->sockfd); + if (ret == -1) { + printf("failed to set %d as nonblocking\r\n", pctx->sockfd); + return -1; + } + + return pctx->sockfd; +} + + +void close_sockets(socket_ctx *pctx, int cnt) +{ + int i; + + if (pctx == NULL) { + return; + } + + for (i = 0; i < cnt; i++) { + if (pctx[i].sockfd > 0) { + close(pctx[i].sockfd); + pctx[i].sockfd = -1; + } + } +} + + +int proc_pending_error(socket_ctx *ctx) +{ + int ret; + int err; + socklen_t len; + + if (ctx == NULL) { + return 0; + } + + err = 0; + len = sizeof(int); + + ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (ret == -1) { + err = errno; + } + + if (err) { + printf("failed to connect at index: %d\r\n", ctx->index); + + close(ctx->sockfd); + ctx->sockfd = -1; + + return -1; + } + + return 0; +} + + +void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len) +{ + char req_line[ITEM_MAX_LINE]; + char req_host[ITEM_MAX_LINE]; + char req_cont_type[ITEM_MAX_LINE]; + char req_cont_len[ITEM_MAX_LINE]; + const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + + if (ip == NULL || port == 0 || + url == NULL || url[0] == '\0' || + sql == NULL || sql[0] == '\0' || + req_buf == NULL || len <= 0) + { + return; + } + + snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url); + snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port); + snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain"); + snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql)); + + snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql); +} + + +int add_event(int epfd, int sockfd, uint32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op); +} + + +int mod_event(int epfd, int sockfd, uint32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op); +} + + +int del_event(int epfd, int sockfd) +{ + struct epoll_event evs_op; + + evs_op.events = 0; + evs_op.data.ptr = NULL; + + return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op); +} + + +int main() +{ + int i; + int ret, n, nsent, nrecv; + int epfd; + uint32_t events; + char *str; + socket_ctx *pctx, ctx[REQ_CLI_COUNT]; + char *ip = "127.0.0.1"; + char *url_prefix = "/rest/sql"; + char url[ITEM_MAX_LINE]; + uint16_t port = 6041; + struct epoll_event evs[REQ_CLI_COUNT]; + char sql[REQ_MAX_LINE]; + char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE]; + char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE]; + int count; + + signal(SIGPIPE, SIG_IGN); + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ctx[i].sockfd = -1; + ctx[i].index = i; + ctx[i].state = uninited; + ctx[i].nsent = 0; + ctx[i].nrecv = 0; + ctx[i].error = false; + ctx[i].success = false; + + memset(url, 0, ITEM_MAX_LINE); + memset(sql, 0, REQ_MAX_LINE); + memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + memset(recv_buf[i], 0, RECV_MAX_LINE); + + snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i); + + snprintf(sql, REQ_MAX_LINE, "select count(*) from tb%d", i); + + build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + + ctx[i].nlen = strlen(send_buf[i]); + } + + epfd = epoll_create(REQ_CLI_COUNT); + if (epfd <= 0) { + printf("failed to create epoll\r\n"); + goto failed; + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = create_socket(ip, port, &ctx[i]); + if (ret == -1) { + printf("failed to create socket, index: %d\r\n", i); + goto failed; + } + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + events = EPOLLET | EPOLLIN | EPOLLOUT; + ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]); + if (ret == -1) { + printf("failed to add sockfd to epoll, index: %d\r\n", i); + goto failed; + } + } + + count = 0; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr)); + if (ret == -1) { + if (errno != EINPROGRESS) { + printf("connect error, index: %d\r\n", ctx[i].index); + (void) del_event(epfd, ctx[i].sockfd); + close(ctx[i].sockfd); + ctx[i].sockfd = -1; + } else { + ctx[i].state = connecting; + count++; + } + + continue; + } + + ctx[i].state = connected; + count++; + } + + printf("clients: %d\r\n", count); + + while (count > 0) { + n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 2); + if (n == -1) { + if (errno != EINTR) { + printf("epoll_wait error, reason: %s\r\n", strerror(errno)); + break; + } + } else { + for (i = 0; i < n; i++) { + if (evs[i].events & EPOLLERR) { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("event error, index: %d\r\n", pctx->index); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } else if (evs[i].events & EPOLLIN) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0); + if (nrecv == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno)); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } else if (nrecv == 0) { + printf("peer closed connection, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + break; + } + + pctx->nrecv += nrecv; + if (pctx->nrecv > 12) { + if (pctx->error == false && pctx->success == false) { + str = recv_buf[pctx->index] + 9; + if (str[0] != '2' || str[1] != '0' || str[2] != '0') { + printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]); + pctx->error = true; + } else { + printf("response ok, index: %d\r\n", pctx->index); + pctx->success = true; + } + } + } + } + } else if (evs[i].events & EPOLLOUT) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0); + if (nsent == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to send, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } + + if (nsent == (int) (pctx->nlen - pctx->nsent)) { + printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index); + + pctx->state = datasent; + + events = EPOLLET | EPOLLIN; + (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx); + + break; + } else { + pctx->nsent += nsent; + } + } + } else { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + } + } + } + +failed: + + if (epfd > 0) { + close(epfd); + } + + close_sockets(ctx, REQ_CLI_COUNT); + + return 0; +} diff --git a/tests/http/restful/http_use_db.c b/tests/http/restful/http_use_db.c new file mode 100644 index 0000000000..3b27022470 --- /dev/null +++ b/tests/http/restful/http_use_db.c @@ -0,0 +1,430 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define RECV_MAX_LINE 2048 +#define ITEM_MAX_LINE 128 +#define REQ_MAX_LINE 2048 +#define REQ_CLI_COUNT 100 + + +typedef enum +{ + uninited, + connecting, + connected, + datasent +} conn_stat; + + +typedef enum +{ + false, + true +} bool; + + +typedef unsigned short u16_t; +typedef unsigned int u32_t; + + +typedef struct +{ + int sockfd; + int index; + conn_stat state; + size_t nsent; + size_t nrecv; + size_t nlen; + bool error; + bool success; + struct sockaddr_in serv_addr; +} socket_ctx; + + +int set_nonblocking(int sockfd) +{ + int ret; + + ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK); + if (ret == -1) { + printf("failed to fcntl for %d\r\n", sockfd); + return ret; + } + + return ret; +} + + +int create_socket(const char *ip, const u16_t port, socket_ctx *pctx) +{ + int ret; + + if (ip == NULL || port == 0 || pctx == NULL) { + printf("invalid parameter\r\n"); + return -1; + } + + pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (pctx->sockfd == -1) { + printf("failed to create socket\r\n"); + return -1; + } + + bzero(&pctx->serv_addr, sizeof(struct sockaddr_in)); + + pctx->serv_addr.sin_family = AF_INET; + pctx->serv_addr.sin_port = htons(port); + + ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr); + if (ret <= 0) { + printf("inet_pton error, ip: %s\r\n", ip); + return -1; + } + + ret = set_nonblocking(pctx->sockfd); + if (ret == -1) { + printf("failed to set %d as nonblocking\r\n", pctx->sockfd); + return -1; + } + + return pctx->sockfd; +} + + +void close_sockets(socket_ctx *pctx, int cnt) +{ + int i; + + if (pctx == NULL) { + return; + } + + for (i = 0; i < cnt; i++) { + if (pctx[i].sockfd > 0) { + close(pctx[i].sockfd); + pctx[i].sockfd = -1; + } + } +} + + +int proc_pending_error(socket_ctx *ctx) +{ + int ret; + int err; + socklen_t len; + + if (ctx == NULL) { + return 0; + } + + err = 0; + len = sizeof(int); + + ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (ret == -1) { + err = errno; + } + + if (err) { + printf("failed to connect at index: %d\r\n", ctx->index); + + close(ctx->sockfd); + ctx->sockfd = -1; + + return -1; + } + + return 0; +} + + +void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len) +{ + char req_line[ITEM_MAX_LINE]; + char req_host[ITEM_MAX_LINE]; + char req_cont_type[ITEM_MAX_LINE]; + char req_cont_len[ITEM_MAX_LINE]; + const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + + if (ip == NULL || port == 0 || + url == NULL || url[0] == '\0' || + sql == NULL || sql[0] == '\0' || + req_buf == NULL || len <= 0) + { + return; + } + + snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url); + snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port); + snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain"); + snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql)); + + snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql); +} + + +int add_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op); +} + + +int mod_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op); +} + + +int del_event(int epfd, int sockfd) +{ + struct epoll_event evs_op; + + evs_op.events = 0; + evs_op.data.ptr = NULL; + + return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op); +} + + +int main() +{ + int i; + int ret, n, nsent, nrecv; + int epfd; + u32_t events; + char *str; + socket_ctx *pctx, ctx[REQ_CLI_COUNT]; + char *ip = "127.0.0.1"; + char *url = "/rest/sql"; + u16_t port = 6041; + struct epoll_event evs[REQ_CLI_COUNT]; + char sql[REQ_MAX_LINE]; + char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE]; + char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE]; + int count; + + signal(SIGPIPE, SIG_IGN); + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ctx[i].sockfd = -1; + ctx[i].index = i; + ctx[i].state = uninited; + ctx[i].nsent = 0; + ctx[i].nrecv = 0; + ctx[i].error = false; + ctx[i].success = false; + + memset(sql, 0, REQ_MAX_LINE); + memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + memset(recv_buf[i], 0, RECV_MAX_LINE); + + snprintf(sql, REQ_MAX_LINE, "use db%d", i); + + build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + + ctx[i].nlen = strlen(send_buf[i]); + } + + epfd = epoll_create(REQ_CLI_COUNT); + if (epfd <= 0) { + printf("failed to create epoll\r\n"); + goto failed; + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = create_socket(ip, port, &ctx[i]); + if (ret == -1) { + printf("failed to create socket, index: %d\r\n", i); + goto failed; + } + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + events = EPOLLET | EPOLLIN | EPOLLOUT; + ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]); + if (ret == -1) { + printf("failed to add sockfd to epoll, index: %d\r\n", i); + goto failed; + } + } + + count = 0; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr)); + if (ret == -1) { + if (errno != EINPROGRESS) { + printf("connect error, index: %d\r\n", ctx[i].index); + (void) del_event(epfd, ctx[i].sockfd); + close(ctx[i].sockfd); + ctx[i].sockfd = -1; + } else { + ctx[i].state = connecting; + count++; + } + + continue; + } + + ctx[i].state = connected; + count++; + } + + printf("clients: %d\r\n", count); + + while (count > 0) { + n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 2); + if (n == -1) { + if (errno != EINTR) { + printf("epoll_wait error, reason: %s\r\n", strerror(errno)); + break; + } + } else { + for (i = 0; i < n; i++) { + if (evs[i].events & EPOLLERR) { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("event error, index: %d\r\n", pctx->index); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } else if (evs[i].events & EPOLLIN) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0); + if (nrecv == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno)); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } else if (nrecv == 0) { + printf("peer closed connection, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + break; + } + + pctx->nrecv += nrecv; + if (pctx->nrecv > 12) { + if (pctx->error == false && pctx->success == false) { + str = recv_buf[pctx->index] + 9; + if (str[0] != '2' || str[1] != '0' || str[2] != '0') { + printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]); + pctx->error = true; + } else { + printf("response ok, index: %d\r\n", pctx->index); + pctx->success = true; + } + } + } + } + } else if (evs[i].events & EPOLLOUT) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0); + if (nsent == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to send, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } + + if (nsent == (int) (pctx->nlen - pctx->nsent)) { + printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index); + + pctx->state = datasent; + + events = EPOLLET | EPOLLIN; + (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx); + + break; + } else { + pctx->nsent += nsent; + } + } + } else { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + } + } + } + +failed: + + if (epfd > 0) { + close(epfd); + } + + close_sockets(ctx, REQ_CLI_COUNT); + + return 0; +} From db2360e5feeb03314955a67847761cc30287f266 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=8D=A1=E6=AF=94=E5=8D=A1=E6=AF=94?= <30525741+jackwener@users.noreply.github.com> Date: Mon, 23 Aug 2021 11:25:31 +0800 Subject: [PATCH 207/239] Update docs.md --- documentation20/cn/01.evaluation/docs.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/documentation20/cn/01.evaluation/docs.md b/documentation20/cn/01.evaluation/docs.md index 88b591faf9..2cc6033ccc 100644 --- a/documentation20/cn/01.evaluation/docs.md +++ b/documentation20/cn/01.evaluation/docs.md @@ -6,8 +6,8 @@ TDengine 是涛思数据面对高速增长的物联网大数据市场和技术 TDengine 的模块之一是时序数据库。但除此之外,为减少研发的复杂度、系统维护的难度,TDengine 还提供缓存、消息队列、订阅、流式计算等功能,为物联网、工业互联网大数据的处理提供全栈的技术方案,是一个高效易用的物联网大数据平台。与 Hadoop 等典型的大数据平台相比,它具有如下鲜明的特点: -* __10倍以上的性能提升__:定义了创新的数据存储结构,单核每秒能处理至少 2 万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。 -* __硬件或云服务成本降至1/5__:由于超强性能,计算资源不到通用大数据方案的 1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的 1/10。 +* __10 倍以上的性能提升__:定义了创新的数据存储结构,单核每秒能处理至少 2 万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。 +* __硬件或云服务成本降至 1/5__:由于超强性能,计算资源不到通用大数据方案的 1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的 1/10。 * __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成 Kafka/Redis/HBase/Spark/HDFS 等软件,大幅降低应用开发和维护的复杂度成本。 * __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过 Shell, Python, R, MATLAB 随时进行。 * __与第三方工具无缝连接__:不用一行代码,即可与 Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R 等集成。后续将支持 OPC, Hadoop, Spark 等,BI 工具也将无缝连接。 From 0c073c84dac6ef19af2ae73265ffcf6b0ad53e33 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 23 Aug 2021 11:27:47 +0800 Subject: [PATCH 208/239] [TD-6255] : fix minor format things in English Java doc. --- .../cn/08.connector/01.java/docs.md | 22 +++----- documentation20/en/00.index/docs.md | 2 +- .../en/08.connector/01.java/docs.md | 56 +++++-------------- 3 files changed, 23 insertions(+), 57 deletions(-) diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index ca045eff87..641ef05a2e 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -178,15 +178,9 @@ Connection conn = DriverManager.getConnection(jdbcUrl); 以上示例,使用了 JDBC-JNI 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。 -**注意**:使用 JDBC-JNI 的 driver,taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 +**注意**:使用 JDBC-JNI 的 driver,taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库(Linux 下是 libtaos.so;Windows 下是 taos.dll)。 -* libtaos.so - 在 Linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 - -* taos.dll - 在 Windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 - -> 在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。 +> 在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client),Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#%E5%AE%A2%E6%88%B7%E7%AB%AF) 连接远程 TDengine Server。 JDBC-JNI 的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。 @@ -194,9 +188,9 @@ TDengine 的 JDBC URL 规范格式为: `jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` url中的配置参数如下: -* user:登录 TDengine 用户名,默认值 root。 -* password:用户登录密码,默认值 taosdata。 -* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。 +* user:登录 TDengine 用户名,默认值 'root'。 +* password:用户登录密码,默认值 'taosdata'。 +* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。 * charset:客户端使用的字符集,默认值为系统字符集。 * locale:客户端语言环境,默认值系统当前 locale。 * timezone:客户端使用的时区,默认值为系统当前时区。 @@ -222,9 +216,9 @@ public Connection getConn() throws Exception{ 以上示例,建立一个到 hostname 为 taosdemo.com,端口为 6030,数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root,密码(password)为 taosdata,并在 connProps 中指定了使用的字符集、语言环境、时区等信息。 properties 中的配置参数如下: -* TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 root。 -* TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 taosdata。 -* TSDBDriver.PROPERTY_KEY_CONFIG_DIR:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。 +* TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 'root'。 +* TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 'taosdata'。 +* TSDBDriver.PROPERTY_KEY_CONFIG_DIR:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。 * TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。 * TSDBDriver.PROPERTY_KEY_LOCALE:客户端语言环境,默认值系统当前 locale。 * TSDBDriver.PROPERTY_KEY_TIME_ZONE:客户端使用的时区,默认值为系统当前时区。 diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md index a10c22ee62..1672c70b3c 100644 --- a/documentation20/en/00.index/docs.md +++ b/documentation20/en/00.index/docs.md @@ -71,7 +71,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ## [Connector](/connector) - [C/C++ Connector](/connector#c-cpp): primary method to connect to TDengine server through libtaos client library -- [Java Connector(JDBC)]: driver for connecting to the server from Java applications using the JDBC API +- [Java Connector(JDBC)](/connector/java): driver for connecting to the server from Java applications using the JDBC API - [Python Connector](/connector#python): driver for connecting to TDengine server from Python applications - [RESTful Connector](/connector#restful): a simple way to interact with TDengine via HTTP - [Go Connector](/connector#go): driver for connecting to TDengine server from Go applications diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md index fb2fb726a1..b5bbdc9949 100644 --- a/documentation20/en/08.connector/01.java/docs.md +++ b/documentation20/en/08.connector/01.java/docs.md @@ -1,14 +1,10 @@ # Java connector - - ## Introduction The taos-jdbcdriver is implemented in two forms: JDBC-JNI and JDBC-RESTful (supported from taos-jdbcdriver-2.0.18). JDBC-JNI is implemented by calling the local methods of libtaos.so (or taos.dll) on the client, while JDBC-RESTful encapsulates the RESTful interface implementation internally. - - -![tdengine-connector](https://www.taosdata.com/cn/documentation20/user/pages/images/tdengine-jdbc-connector.png) +![tdengine-connector](page://images/tdengine-jdbc-connector.png) The figure above shows the three ways Java applications can access the TDengine: @@ -69,8 +65,6 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES( | 1.0.2 | 1.6.1.x and above | 1.8.x | | 1.0.1 | 1.6.1.x and above | 1.8.x | - - ## DataType in TDengine and Java connector The TDengine supports the following data types and Java data types: @@ -88,8 +82,6 @@ The TDengine supports the following data types and Java data types: | BINARY | byte[] | | NCHAR | java.lang.String | - - ## Install Java connector ### Runtime Requirements @@ -131,9 +123,7 @@ You can download the TDengine source code and compile the latest version of the mvn clean package -Dmaven.test.skip=true ``` -a taos-jdbcdriver-2.0.xx-dist.jar will be released in the target directory - - +a taos-jdbcdriver-2.0.xx-dist.jar will be released in the target directory. ## Usage of java connector @@ -165,7 +155,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl); In the example above, The JDBC-JNI driver is used to establish a connection to the hostname of 'taosdemo.com', port 6030 (TDengine's default port), and database name of 'test'. This URL specifies the user name as 'root' and the password as 'taosdata'. -You can also see the JDBC-JNI video tutorial: [JDBC connector of TDengine](https://www.taosdata.com/blog/2020/11/11/1955.html) + The format of JDBC URL is: @@ -175,9 +165,9 @@ jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password= The configuration parameters in the URL are as follows: -* user: user name for logging in to the TDengine. The default value is root. -* password: the user login password. The default value is taosdata. -* cfgdir: directory of the client configuration file. It is valid only for JDBC-JNI. The default value is /etc/taos on Linux and C:/TDengine/cfg on Windows. +* user: user name for logging in to the TDengine. The default value is 'root'. +* password: the user login password. The default value is 'taosdata'. +* cfgdir: directory of the client configuration file. It is valid only for JDBC-JNI. The default value is `/etc/taos` on Linux and `C:/TDengine/cfg` on Windows. * charset: character set used by the client. The default value is the system character set. * locale: client locale. The default value is the current system locale. * timezone: timezone used by the client. The default value is the current timezone of the system. @@ -206,9 +196,9 @@ In the example above, JDBC-JNI is used to establish a connection to hostname of The configuration parameters in properties are as follows: -* TSDBDriver.PROPERTY_KEY_USER: user name for logging in to the TDengine. The default value is root. -* TSDBDriver.PROPERTY_KEY_PASSWORD: the user login password. The default value is taosdata. -* TSDBDriver.PROPERTY_KEY_CONFIG_DIR: directory of the client configuration file. It is valid only for JDBC-JNI. The default value is /etc/taos on Linux and C:/TDengine/cfg on Windows. +* TSDBDriver.PROPERTY_KEY_USER: user name for logging in to the TDengine. The default value is 'root'. +* TSDBDriver.PROPERTY_KEY_PASSWORD: the user login password. The default value is 'taosdata'. +* TSDBDriver.PROPERTY_KEY_CONFIG_DIR: directory of the client configuration file. It is valid only for JDBC-JNI. The default value is `/etc/taos` on Linux and `C:/TDengine/cfg on Windows`. * TSDBDriver.PROPERTY_KEY_CHARSET: character set used by the client. The default value is the system character set. * TSDBDriver.PROPERTY_KEY_LOCALE: client locale. The default value is the current system locale. * TSDBDriver.PROPERTY_KEY_TIME_ZONE: timezone used by the client. The default value is the current timezone of the system. @@ -259,8 +249,6 @@ For example, if you specify password as 'taosdata' in the URL and password as 't For details, see Client Configuration:[client configuration](https://www.taosdata.com/en/documentation/administrator#client) - - ### Create database and table ```java @@ -273,8 +261,6 @@ stmt.executeUpdate("use db"); stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); ``` - - ### Insert ```java @@ -285,8 +271,6 @@ System.out.println("insert " + affectedRows + " rows."); **Note**: 'now' is an internal system function. The default value is the current time of the computer where the client resides. 'now + 1s' indicates that the current time on the client is added by one second. The following time units are a(millisecond), s (second), m(minute), h(hour), d(day), w(week), n(month), and y(year). - - ### Query ```java @@ -305,8 +289,6 @@ while(resultSet.next()){ **Note**: The query is consistent with the operation of the relational database, and the index in ResultSet starts from 1. - - ### Handle exceptions ```java @@ -327,8 +309,6 @@ The Java connector may report three types of error codes: JDBC Driver (error cod - https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java - https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h - - ### Write data through parameter binding Since version 2.1.2.0, TDengine's JDBC-JNI implementation has significantly improved parameter binding support for data write (INSERT) scenarios. Data can be written in the following way, avoiding SQL parsing and significantly improving the write performance.(**Note**: parameter binding is not supported in JDBC-RESTful) @@ -401,8 +381,6 @@ public void setNString(int columnIndex, ArrayList list, int size) throws **Note**: Both setString and setNString require the user to declare the column width of the corresponding column in the table definition in the size parameter. - - ### Data Subscription #### Subscribe @@ -414,8 +392,8 @@ TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from met parameters: * topic: the unique topic name of the subscription. -* sql: a select statement . -* restart: true if restart the subscription already exists; false if continue the previous subscription +* sql: a select statement. +* restart: true if restart the subscription already exists; false if continue the previous subscription. In the example above, a subscription named 'topic' is created which use the SQL statement 'select * from meters'. If the subscription already exists, it will continue with the previous query progress, rather than consuming all the data from scratch. @@ -451,8 +429,6 @@ The close method closes a subscription. If the parameter is true, the subscripti **Note**: the connection must be closed; otherwise, a connection leak may occur. - - ## Connection Pool ### HikariCP example @@ -510,7 +486,7 @@ As of TDengine V1.6.4.1, the function select server_status() is supported specif Select server_status() returns 1 on success, as shown below. -```txt +```sql taos> select server_status(); server_status()| ================ @@ -518,14 +494,10 @@ server_status()| Query OK, 1 row(s) in set (0.000141s) ``` - - ## Integrated with framework -- Please refer to [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) if using taos-jdbcdriver in Spring JdbcTemplate -- Please refer to [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) if using taos-jdbcdriver in Spring JdbcTemplate - - +- Please refer to [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) if using taos-jdbcdriver in Spring JdbcTemplate. +- Please refer to [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) if using taos-jdbcdriver in Spring JdbcTemplate. ## FAQ From a1659f09862746aa477a8df4680f91f9ff256d9c Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 23 Aug 2021 12:02:29 +0800 Subject: [PATCH 209/239] [TD-6194]: taosdemo wrong data (#7516) --- src/kit/taosdemo/taosdemo.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 6099ac9803..f222266ee8 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5216,7 +5216,8 @@ static int64_t generateStbRowData( tmpLen = strlen(tmp); tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, INT_BUFF_LEN)); } else { - errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType); + errorPrint( "Not support data type: %s\n", + stbInfo->columns[i].dataType); return -1; } @@ -5229,8 +5230,7 @@ static int64_t generateStbRowData( return 0; } - dataLen -= 1; - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")"); + tstrncpy(pstr + dataLen - 1, ")", 2); verbosePrint("%s() LN%d, dataLen:%"PRId64"\n", __func__, __LINE__, dataLen); verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); From 864318bca50970e8f26e8fc868620497c0f9b652 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 23 Aug 2021 13:24:02 +0800 Subject: [PATCH 210/239] [TD-6255] : fix html code typo in English Java doc. --- documentation20/en/08.connector/01.java/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md index b5bbdc9949..de555b0a9c 100644 --- a/documentation20/en/08.connector/01.java/docs.md +++ b/documentation20/en/08.connector/01.java/docs.md @@ -44,7 +44,7 @@ In terms of implementation, the JDBC driver of TDengine is as consistent as poss - +
DifferenceJDBC-JNIJDBC-RESTful
Supported OSlinux、windowsall platform
Whether to install the Clientneeddo not need
Whether to upgrade the client after the server is upgradedneeddo not need
Write performanceJDBC-RESTful is 50% to 90% of JDBC-JNI
Read performanceJDBC-RESTful is no different from JDBC-JNI
Read performance JDBC-RESTful is no different from JDBC-JNI
**Note**: RESTful interfaces are stateless. Therefore, when using JDBC-restful, you should specify the database name in SQL before all table names and super table names, for example: From 95b1135eaa55b2a5d4cbad1de6f78fb4be5c27e5 Mon Sep 17 00:00:00 2001 From: xywang Date: Mon, 23 Aug 2021 13:31:01 +0800 Subject: [PATCH 211/239] [TD-6001]: fixed missing zlib.h error --- src/client/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 77417a24a4..0d06e5d39c 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -4,6 +4,7 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(jni) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/plugins/http/inc) AUX_SOURCE_DIRECTORY(src SRC) From 6bc27b402d7124901adb0553fdfdb9aca5712ff6 Mon Sep 17 00:00:00 2001 From: xywang Date: Mon, 23 Aug 2021 13:35:11 +0800 Subject: [PATCH 212/239] [TD-6005]: fixed missing zlib.h error --- src/client/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 77417a24a4..0d06e5d39c 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -4,6 +4,7 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(jni) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/plugins/http/inc) AUX_SOURCE_DIRECTORY(src SRC) From 607fe1b7d31464d8b7301ca7ccd8b04130812af3 Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Mon, 23 Aug 2021 16:25:26 +0800 Subject: [PATCH 213/239] Update docs.md --- .../cn/08.connector/01.java/docs.md | 137 +++++++----------- 1 file changed, 49 insertions(+), 88 deletions(-) diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index 641ef05a2e..4f0ee3702c 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -2,8 +2,6 @@ ## 总体介绍 -TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索下载。 - `taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。 ![tdengine-connector](page://images/tdengine-jdbc-connector.png) @@ -14,12 +12,10 @@ TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实 * RESTful:应用将 SQL 发送给位于物理节点2(pnode2)上的 RESTful 连接器,再调用客户端 API(libtaos.so)。 * JDBC-RESTful:Java 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求,发送给物理节点2的 RESTful 连接器。 -TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点: +TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但TDengine与关系对象型数据库的使用场景和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点: * TDengine 目前不支持针对单条数据记录的删除操作。 * 目前不支持事务操作。 -* 目前不支持嵌套查询(nested query)。 -* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。 ### JDBC-JNI和JDBC-RESTful的对比 @@ -50,9 +46,12 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 -注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。 +注意:与 JNI 方式不同,RESTful 接口是无状态的。在使用JDBC-RESTful时,需要在sql中指定表、超级表的数据库名称。例如: +```sql +INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6); +``` -### TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 +## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 | taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | | -------------------- | ----------------- | -------- | @@ -65,7 +64,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 | 1.0.2 | 1.6.1.x 及以上 | 1.8.x | | 1.0.1 | 1.6.1.x 及以上 | 1.8.x | -### TDengine DataType 和 Java DataType +## TDengine DataType 和 Java DataType TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: @@ -82,36 +81,27 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 | BINARY | byte array | | NCHAR | java.lang.String | -## 安装 +## 安装Java Connector -Java连接器支持的系统有: Linux 64/Windows x64/Windows x86。 - -**安装前准备:** - -- 已安装TDengine服务器端 -- 已安装好TDengine应用驱动,具体请参照 [安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver) 章节 - -TDengine 为了方便 Java 应用使用,遵循 JDBC 标准(3.0)API 规范提供了 `taos-jdbcdriver` 实现。可以通过 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索并下载。 - -由于 TDengine 的应用驱动是使用C语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 +### 安装前准备 +使用Java Connector连接数据库前,需要具备以下条件: +1. Linux或Windows操作系统 +2. Java 1.8以上运行时环境 +3. TDengine-client(使用JDBC-JNI时必须,使用JDBC-RESTful时非必须) +**注意**:由于 TDengine 的应用驱动是使用C语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 - libtaos.so 在 Linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 - - taos.dll 在 Windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 -注意:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client),Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。 - -### 如何获取 TAOS-JDBCDriver - -**maven仓库** +**注意**:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client),Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。 +### 通过maven获取JDBC driver 目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。 - - [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) - [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver) - [maven.aliyun](https://maven.aliyun.com/mvn/search) -maven 项目中使用如下 pom.xml 配置即可: +maven 项目中,在pom.xml 中添加以下依赖: ```xml-dtd com.taosdata.jdbc @@ -119,39 +109,22 @@ maven 项目中使用如下 pom.xml 配置即可: 2.0.18 ``` -**源码编译打包** -下载 TDengine 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。 +### 通过源码编译获取JDBC driver -### 示例程序 - -示例程序源码位于install_directory/examples/JDBC,有如下目录: - -JDBCDemo JDBC示例源程序 - -JDBCConnectorChecker JDBC安装校验源程序及jar包 - -Springbootdemo springboot示例源程序 - -SpringJdbcTemplate SpringJDBC模板 - -### 安装验证 - -运行如下指令: - -```Bash -cd {install_directory}/examples/JDBC/JDBCConnectorChecker -java -jar JDBCConnectorChecker.jar -host +可以通过下载TDengine的源码,自己编译最新版本的java connector +```shell +git clone https://github.com/taosdata/TDengine.git +cd TDengine/src/connector/jdbc +mvn clean package -Dmaven.test.skip=true ``` - -验证通过将打印出成功信息。 +编译后,在target目录下会产生taos-jdbcdriver-2.0.XX-dist.jar的jar包。 ## Java连接器的使用 ### 获取连接 #### 指定URL获取连接 - 通过指定URL获取连接,如下所示: ```java @@ -159,23 +132,19 @@ Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(jdbcUrl); ``` - 以上示例,使用 **JDBC-RESTful** 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。 使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要: - 1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”; 2. jdbcUrl 以“jdbc:TAOS-RS://”开头; 3. 使用 6041 作为连接端口。 如果希望获得更好的写入和查询性能,Java 应用可以使用 **JDBC-JNI** 的driver,如下所示: - ```java Class.forName("com.taosdata.jdbc.TSDBDriver"); String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(jdbcUrl); ``` - 以上示例,使用了 JDBC-JNI 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。 **注意**:使用 JDBC-JNI 的 driver,taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库(Linux 下是 libtaos.so;Windows 下是 taos.dll)。 @@ -194,6 +163,9 @@ url中的配置参数如下: * charset:客户端使用的字符集,默认值为系统字符集。 * locale:客户端语言环境,默认值系统当前 locale。 * timezone:客户端使用的时区,默认值为系统当前时区。 +* batchfetch: 仅在使用JDBC-JNI时生效。true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。 +* timestampFormat: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP':结果集中timestamp类型的字段为一个long值; 'UTC':结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING':结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。 +* batchErrorIgnore:true:在执行Statement的executeBatch时,如果中间有一条sql执行失败,继续执行下面的sq了。false:不再执行失败sql后的任何语句。默认值为:false。 #### 指定URL和Properties获取连接 @@ -222,11 +194,13 @@ properties 中的配置参数如下: * TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。 * TSDBDriver.PROPERTY_KEY_LOCALE:客户端语言环境,默认值系统当前 locale。 * TSDBDriver.PROPERTY_KEY_TIME_ZONE:客户端使用的时区,默认值为系统当前时区。 +* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: 仅在使用JDBC-JNI时生效。true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。 +* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP':结果集中timestamp类型的字段为一个long值; 'UTC':结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING':结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。 +* TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE:true:在执行Statement的executeBatch时,如果中间有一条sql执行失败,继续执行下面的sq了。false:不再执行失败sql后的任何语句。默认值为:false。 #### 使用客户端配置文件建立连接 当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。如下所示: - 1. 在 Java 应用中不指定 hostname 和 port ```java @@ -243,7 +217,6 @@ public Connection getConn() throws Exception{ ``` 2. 在配置文件中指定 firstEp 和 secondEp - ``` # first fully qualified domain name (FQDN) for TDengine system firstEp cluster_node1:6030 @@ -424,9 +397,9 @@ public void setNString(int columnIndex, ArrayList list, int size) throws ``` 其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。 -### 订阅 +## 订阅 -#### 创建 +### 创建 ```java TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); @@ -440,7 +413,7 @@ TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from met 如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。 -#### 消费数据 +### 消费数据 ```java int total = 0; @@ -458,7 +431,7 @@ while(true) { `consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。 -#### 关闭订阅 +### 关闭订阅 ```java sub.close(true); @@ -466,7 +439,7 @@ sub.close(true); `close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。 -### 关闭资源 +## 关闭资源 ```java resultSet.close(); @@ -478,19 +451,8 @@ conn.close(); ## 与连接池使用 -**HikariCP** - -* 引入相应 HikariCP maven 依赖: - -```xml - - com.zaxxer - HikariCP - 3.4.1 - -``` - -* 使用示例如下: +### HikariCP +使用示例如下: ```java public static void main(String[] args) throws SQLException { @@ -522,19 +484,8 @@ conn.close(); > 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 > 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP)。 -**Druid** - -* 引入相应 Druid maven 依赖: - -```xml - - com.alibaba - druid - 1.1.20 - -``` - -* 使用示例如下: +### Druid +使用示例如下: ```java public static void main(String[] args) throws Exception { @@ -580,6 +531,16 @@ Query OK, 1 row(s) in set (0.000141s) * Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) * Springboot + Mybatis 中使用,可参考 [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) +### 示例程序 + +示例程序源码位于TDengine/test/examples/JDBC下: +* JDBCDemo:JDBC示例源程序 +* JDBCConnectorChecker:JDBC安装校验源程序及jar包 +* Springbootdemo:springboot示例源程序 +* SpringJdbcTemplate:SpringJDBC模板 + +请参考:![JDBC example](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC) + ## 常见问题 * java.lang.UnsatisfiedLinkError: no taos in java.library.path From 915e1c0e53f80c2d49e1fe147c54085942f72fbd Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Mon, 23 Aug 2021 16:27:27 +0800 Subject: [PATCH 214/239] Update docs.md --- documentation20/cn/08.connector/01.java/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index 4f0ee3702c..fdb07ae179 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -531,7 +531,7 @@ Query OK, 1 row(s) in set (0.000141s) * Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) * Springboot + Mybatis 中使用,可参考 [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) -### 示例程序 +## 示例程序 示例程序源码位于TDengine/test/examples/JDBC下: * JDBCDemo:JDBC示例源程序 From 78928a135e995d198f4970f673f64beb89fc0711 Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Mon, 23 Aug 2021 16:28:43 +0800 Subject: [PATCH 215/239] Update docs.md fix a spelling error --- documentation20/en/08.connector/01.java/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md index de555b0a9c..fa8b4e1362 100644 --- a/documentation20/en/08.connector/01.java/docs.md +++ b/documentation20/en/08.connector/01.java/docs.md @@ -203,7 +203,7 @@ The configuration parameters in properties are as follows: * TSDBDriver.PROPERTY_KEY_LOCALE: client locale. The default value is the current system locale. * TSDBDriver.PROPERTY_KEY_TIME_ZONE: timezone used by the client. The default value is the current timezone of the system. * TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-JNI. True if batch ResultSet fetching is enabled; false if row-by-row ResultSet fetching is enabled. Default value is flase. -* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'. +* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'. * TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false. #### Establishing a connection with configuration file From d872147e992a7339c85a31dcec7b01978f406c1a Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Mon, 23 Aug 2021 16:38:11 +0800 Subject: [PATCH 216/239] Update docs.md --- documentation20/en/08.connector/01.java/docs.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md index fa8b4e1362..e9eb88242f 100644 --- a/documentation20/en/08.connector/01.java/docs.md +++ b/documentation20/en/08.connector/01.java/docs.md @@ -317,14 +317,17 @@ Since version 2.1.2.0, TDengine's JDBC-JNI implementation has significantly impr Statement stmt = conn.createStatement(); Random r = new Random(); +// In the INSERT statement, the VALUES clause allows you to specify a specific column; If automatic table creation is adopted, the TAGS clause needs to set the parameter values of all TAGS columns TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)"); s.setTableName("w1"); +// set tags s.setTagInt(0, r.nextInt(10)); s.setTagString(1, "Beijing"); int numOfRows = 10; +// set values ArrayList ts = new ArrayList<>(); for (int i = 0; i < numOfRows; i++){ ts.add(System.currentTimeMillis() + i); @@ -341,9 +344,10 @@ for (int i = 0; i < numOfRows; i++){ } s.setString(2, s2, 10); +// The cache is not cleared after AddBatch. Do not bind new data again before ExecuteBatch s.columnDataAddBatch(); s.columnDataExecuteBatch(); - +// Clear the cache, after which you can bind new data(including table names, tags, values): s.columnDataClearBatch(); s.columnDataCloseBatch(); ``` @@ -499,6 +503,10 @@ Query OK, 1 row(s) in set (0.000141s) - Please refer to [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) if using taos-jdbcdriver in Spring JdbcTemplate. - Please refer to [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) if using taos-jdbcdriver in Spring JdbcTemplate. +## Example Codes +you see sample code here: ![JDBC example](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC) + + ## FAQ - java.lang.UnsatisfiedLinkError: no taos in java.library.path From 298f97fe0b37a444c8a70f916a66dee06cd3169f Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Mon, 23 Aug 2021 16:38:29 +0800 Subject: [PATCH 217/239] remove dulplicate init --- tests/pytest/util/dnodes.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 0f4919ba96..5572e81f37 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -20,9 +20,9 @@ from util.log import * class TDSimClient: - def __init__(self): + def __init__(self, path): self.testCluster = False - + self.path = path self.cfgDict = { "numOfLogLines": "100000000", "numOfThreadsPerCore": "2.0", @@ -41,10 +41,7 @@ class TDSimClient: "jnidebugFlag": "135", "qdebugFlag": "135", "telemetryReporting": "0", - } - def init(self, path): - self.__init__() - self.path = path + } def getLogDir(self): self.logDir = "%s/sim/psim/log" % (self.path) @@ -480,8 +477,8 @@ class TDDnodes: for i in range(len(self.dnodes)): self.dnodes[i].init(self.path) - self.sim = TDSimClient() - self.sim.init(self.path) + self.sim = TDSimClient(self.path) + # self.sim.init(self.path) def setTestCluster(self, value): self.testCluster = value From 89b64600640d81ac31d742ce68dbf3e85e910896 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Mon, 23 Aug 2021 16:42:27 +0800 Subject: [PATCH 218/239] remove dulplicate init --- tests/pytest/util/dnodes.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 5572e81f37..b176035b57 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -478,7 +478,6 @@ class TDDnodes: self.dnodes[i].init(self.path) self.sim = TDSimClient(self.path) - # self.sim.init(self.path) def setTestCluster(self, value): self.testCluster = value From 6a0735a62a06e377f607d33d6a260046bd33bb10 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 23 Aug 2021 17:20:31 +0800 Subject: [PATCH 219/239] [TD-5331] : move to some other doc space. --- documentation20/cn/08.connector/docs.md | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index 5b695b845a..364961ca63 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -315,10 +315,6 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线 1. 调用 `taos_stmt_init` 创建参数绑定对象; 2. 调用 `taos_stmt_prepare` 解析 INSERT 语句; 3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname` 来设置表名; - * 从 2.1.6.0 版本开始,对于向一个超级表下的多个子表同时写入数据(每个子表写入的数据较少,可能只有一行)的情形,提供了一个专用的优化接口 `taos_stmt_set_sub_tbname`,可以通过提前载入 meta 数据以及避免对 SQL 语法的重复解析来节省总体的处理时间(但这个优化方法并不支持自动建表语法)。具体使用方法如下: - 1. 必须先提前调用 `taos_load_table_info` 来加载所有需要的超级表和子表的 table meta; - 2. 然后对一个超级表的第一个子表调用 `taos_stmt_set_tbname` 来设置表名; - 3. 后续子表用 `taos_stmt_set_sub_tbname` 来设置表名。 4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags` 来设置表名和 TAGS 的值; 5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param` 以单行的方式设置 VALUES 的值; 6. 调用 `taos_stmt_add_batch` 把当前绑定的参数加入批处理; @@ -362,12 +358,6 @@ typedef struct TAOS_BIND { (2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值) 当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。 -- `int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name)` - - (2.1.6.0 版本新增,仅支持用于替换 INSERT 语句中、属于同一个超级表下的多个子表中、作为写入目标的第 2 个到第 n 个子表的表名) - 当 SQL 语句中的表名使用了 `?` 占位时,如果想要一批写入的表是多个属于同一个超级表的子表,那么可以使用此函数绑定除第一个子表之外的其他子表的表名。 - *注意:*在使用时,客户端必须先调用 `taos_load_table_info` 来加载所有需要的超级表和子表的 table meta,然后对一个超级表的第一个子表调用 `taos_stmt_set_tbname`,后续子表用 `taos_stmt_set_sub_tbname`。 - - `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)` (2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值) From ce587d9981eb54169201c952895c153fafb140f9 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 23 Aug 2021 17:39:32 +0800 Subject: [PATCH 220/239] [TD-6277] : remove outdated description for func "last_row()". --- documentation20/cn/12.taos-sql/docs.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 16b52f5773..48409537bb 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -1197,8 +1197,6 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 适用于:**表、超级表**。 - 说明:与LAST函数不同,LAST_ROW不支持时间范围限制,强制返回最后一条记录。 - 限制:LAST_ROW()不能与INTERVAL一起使用。 示例: From 068318ded98e906c77a38b8f283368f35debb12f Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Mon, 23 Aug 2021 17:57:34 +0800 Subject: [PATCH 221/239] [TD-6188] add new benchmark tool --- packaging/tools/make_install.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 7851587c82..55ca1174c9 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -142,6 +142,7 @@ function install_bin() { if [ "$osType" != "Darwin" ]; then ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : + ${csudo} rm -f ${bin_link_dir}/perfMonitor || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/set_core || : fi @@ -167,6 +168,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || : [ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : fi From 708981bb2b095a8b6cbb4e8caf25af1118db6aff Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 23 Aug 2021 18:35:31 +0800 Subject: [PATCH 222/239] Hotfix/sangshuduo/td 6194 taosdemo wrong data (#7526) * [TD-6194]: taosdemo wrong data * fix few format mistakes. --- src/kit/taosdemo/taosdemo.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index f222266ee8..977b51ee46 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2141,7 +2141,7 @@ static void xDumpFieldToFile(FILE* fp, const char* val, fprintf(fp, "%d", *((int32_t *)val)); break; case TSDB_DATA_TYPE_BIGINT: - fprintf(fp, "%" PRId64, *((int64_t *)val)); + fprintf(fp, "%"PRId64"", *((int64_t *)val)); break; case TSDB_DATA_TYPE_FLOAT: fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); @@ -5242,7 +5242,7 @@ static int64_t generateData(char *recBuf, char **data_type, int64_t timestamp, int lenOfBinary) { memset(recBuf, 0, MAX_DATA_SIZE); char *pstr = recBuf; - pstr += sprintf(pstr, "(%" PRId64, timestamp); + pstr += sprintf(pstr, "(%"PRId64"", timestamp); int columnCount = g_args.num_of_CPR; @@ -5254,9 +5254,9 @@ static int64_t generateData(char *recBuf, char **data_type, } else if (strcasecmp(data_type[i % columnCount], "INT") == 0) { pstr += sprintf(pstr, ",%d", rand_int()); } else if (strcasecmp(data_type[i % columnCount], "BIGINT") == 0) { - pstr += sprintf(pstr, ",%" PRId64, rand_bigint()); + pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); } else if (strcasecmp(data_type[i % columnCount], "TIMESTAMP") == 0) { - pstr += sprintf(pstr, ",%" PRId64, rand_bigint()); + pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); } else if (strcasecmp(data_type[i % columnCount], "FLOAT") == 0) { pstr += sprintf(pstr, ",%10.4f", rand_float()); } else if (strcasecmp(data_type[i % columnCount], "DOUBLE") == 0) { @@ -8069,7 +8069,7 @@ static void *specifiedTableQuery(void *sarg) { uint64_t currentPrintTime = taosGetTimestampMs(); uint64_t endTs = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { - debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n", + debugPrint("%s() LN%d, endTs=%"PRIu64" ms, startTs=%"PRIu64" ms\n", __func__, __LINE__, endTs, startTs); printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n", pThreadInfo->threadID, From ac3766acf383a7c7d2f0e8d608051f568db6c4b0 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 23 Aug 2021 19:23:57 +0800 Subject: [PATCH 223/239] [TD-5940] : describe FQDN resolving test mode of taos. --- documentation20/cn/11.administrator/docs.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index f9061200f9..ff44dd1225 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -800,7 +800,7 @@ taos -n sync -P 6042 -h `taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP` -从 2.1.7.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下: +从 2.1.8.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下: -n:设为“speed”时,表示对网络速度进行诊断。 -h:所要连接的服务端的 FQDN 或 ip 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。 @@ -809,6 +809,15 @@ taos -n sync -P 6042 -h -l:单个网络包的大小(单位:字节)。最小值是 1024、最大值是 1024*1024*1024,默认值为 1000。 -S:网络封包的类型。可以是 TCP 或 UDP,默认值为 TCP。 +#### FQDN 解析速度诊断 + +`taos -n fqdn -h ` + +从 2.1.8.0 版本开始,taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下: + +-n:设为“fqdn”时,表示对 FQDN 解析进行诊断。 +-h:所要解析的目标 FQDN 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。 + #### 服务端日志 taosd 服务端日志文件标志位 debugflag 默认为 131,在 debug 时往往需要将其提升到 135 或 143 。 From 8ca78abf3a271ef19492aca3ce87834e66e9c96e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 24 Aug 2021 06:51:54 +0800 Subject: [PATCH 224/239] [TD-5852]: taosdemo data generation race. (#7532) --- src/kit/taosdemo/taosdemo.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 977b51ee46..50f35faa63 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -1336,7 +1336,7 @@ static char *rand_bool_str(){ static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_randbool_buff + (cursor * BOOL_BUFF_LEN); + return g_randbool_buff + ((cursor % MAX_PREPARED_RAND) * BOOL_BUFF_LEN); } static int32_t rand_bool(){ @@ -1351,7 +1351,8 @@ static char *rand_tinyint_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_randtinyint_buff + (cursor * TINYINT_BUFF_LEN); + return g_randtinyint_buff + + ((cursor % MAX_PREPARED_RAND) * TINYINT_BUFF_LEN); } static int32_t rand_tinyint() @@ -1367,7 +1368,8 @@ static char *rand_smallint_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_randsmallint_buff + (cursor * SMALLINT_BUFF_LEN); + return g_randsmallint_buff + + ((cursor % MAX_PREPARED_RAND) * SMALLINT_BUFF_LEN); } static int32_t rand_smallint() @@ -1383,7 +1385,7 @@ static char *rand_int_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_randint_buff + (cursor * INT_BUFF_LEN); + return g_randint_buff + ((cursor % MAX_PREPARED_RAND) * INT_BUFF_LEN); } static int32_t rand_int() @@ -1399,7 +1401,8 @@ static char *rand_bigint_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_randbigint_buff + (cursor * BIGINT_BUFF_LEN); + return g_randbigint_buff + + ((cursor % MAX_PREPARED_RAND) * BIGINT_BUFF_LEN); } static int64_t rand_bigint() @@ -1415,7 +1418,7 @@ static char *rand_float_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_randfloat_buff + (cursor * FLOAT_BUFF_LEN); + return g_randfloat_buff + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN); } @@ -1432,7 +1435,8 @@ static char *demo_current_float_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_rand_current_buff + (cursor * FLOAT_BUFF_LEN); + return g_rand_current_buff + + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN); } static float UNUSED_FUNC demo_current_float() @@ -1449,7 +1453,8 @@ static char *demo_voltage_int_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_rand_voltage_buff + (cursor * INT_BUFF_LEN); + return g_rand_voltage_buff + + ((cursor % MAX_PREPARED_RAND) * INT_BUFF_LEN); } static int32_t UNUSED_FUNC demo_voltage_int() @@ -1464,7 +1469,7 @@ static char *demo_phase_float_str() { static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_rand_phase_buff + (cursor * FLOAT_BUFF_LEN); + return g_rand_phase_buff + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN); } static float UNUSED_FUNC demo_phase_float(){ @@ -5199,7 +5204,8 @@ static int64_t generateStbRowData( "SMALLINT", 8)) { tmp = rand_smallint_str(); tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN)); + tstrncpy(pstr + dataLen, tmp, + min(tmpLen + 1, SMALLINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "TINYINT", 7)) { tmp = rand_tinyint_str(); @@ -5212,9 +5218,9 @@ static int64_t generateStbRowData( tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "TIMESTAMP", 9)) { - tmp = rand_int_str(); + tmp = rand_bigint_str(); tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, INT_BUFF_LEN)); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BIGINT_BUFF_LEN)); } else { errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType); From 6456cdba3f68cc478f80c0e4bf851b1355cf5a7a Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Tue, 24 Aug 2021 10:43:07 +0800 Subject: [PATCH 225/239] [TD-6009]restful support url set database test[ci skip] --- tests/script/http/httpTest | Bin 0 -> 23008 bytes tests/script/http/httpTest.c | 128 +++++++++++++++++++++++++++++++++++ tests/script/http/makefile | 2 + 3 files changed, 130 insertions(+) create mode 100755 tests/script/http/httpTest create mode 100644 tests/script/http/httpTest.c create mode 100644 tests/script/http/makefile diff --git a/tests/script/http/httpTest b/tests/script/http/httpTest new file mode 100755 index 0000000000000000000000000000000000000000..68aab9aa63bbf5c64593e591aaa6eb4c6621a7fb GIT binary patch literal 23008 zcmeHvdvsLSdGDSX9qH&jLc&OZVQ|0|!Fpl9JdCZ^z{nOLgJgqWhtbSPnjpq?dxrviY?uxHfjBTiGMAvF^ac-AGnwuMvh88)ti{(d1(pdd{ z`!PCmG;(t9>K}LA#Rko{zvuq;xA)m+pMCZ|gWmQnPKSf3zF&kkFqyn<8)rCpcI;Qvi|9a8Vm4Z@Bc1_hZZ zsLHzwdK8!6HK}IaDa)Iy9WKjwDoE{3sjffVy>3lif3T)M97!IiIk0|R&AK(UiD>O= z&L_X94C+%`w|7xi>sNeX-N5*Sm zDBiiPr9T=8b^5#eLyYsXavq9@%@B)+{6Q8o`v@8V{DP*?PesEK7Klb7p@7K}(ZIft z$wCLhChIe!kpxS`;^Bzd!we%~`UCrnK;J&2#~P)NcIMw2G%i6zYh$_O0r8$IEOzd!siYGq(d2vE6k(-`o> z&7OED#6Xn3)96r7cz|;2*w*&8<`!dh?dsaK>HHd)=^cnhq#1*P>XcW#3%MNp(Lc)5 zKjJD(j17Y7z=%+QzDQ$`2Y<Tt(FU3w$MvCMf_Z`(66)5uUhC; z7P{)UG#=AMYIz?Hf4hx;eXO(ta=zJa_8G9{s>;0+ELZ`A-idpC? zro=m7p`#(w>5zpkr#77TgoR$1mS*gTg^mVKr=u2nNt%jzo`tUNnZ!PBp_f_YPgv;I zab(;=mtBNwowm^FeyGwp3w^#K0-m?f7g*@mvezPTEdtjfa4iD=xe<7;bjh!Lhu_kD z!`ct(81oICHl5SweTQGwU*QE!uPp$1YI;Q>KUEQ zrzY=8<8-5(nruqrbc39lY)IoY0hyXymd5EuIW<|9#_0w*HR(v>G%=Z)eD@<&U%FAE z{%M?Ukf?teryC>cuW&?fq?V%B%6R;MzgClpzi7jsv*F*c;eTbr|J;T@Wy42o_>c|% zj15oP@Btehvf&Td@Gcv^&4znz_+}fv)`~a0-IjX&L0{^pzQaGe($U#IqTP)a_l*?) z7#+bkviWNO)AQ#+<^3Z-Hz4C1(N+`KaMhfTuKmtU$fIlLPhTuudI+t@FMxRK$f$f9p#C?@2}ta3ic`NmtK5i=AqYkVg=55__pJ!Z(48oDSvxJ+RD zfLRyo$8!+qS3u~Q7`h5jTyS>iSAfhn-jp=^qVe#XJl;$uK0Ad4y-6tuYFVV2~GL3bYw}74+ItONZ3`W}f(t&xr#)Rnha>Z~YlKct3E4^kmt zphC|6n#4!Q+%R|JBn?r%)OTU=OI(&a^!p_HB`(XWOtJeqp%l9j3WCrUijQUrb_4zS z#^IrVAlHtbeO$YOIH48h|NTl7H8Mn~FZBl5`~pM|U!6{pfAgu#8@Z_w5|RGVVx=hN zVenGi>fsm3HXHo1%y6H7f=m7Kzx)bAuW#6UHRxL<`n#!Jc0lp(UrAp#hu`Y3Uk&38 zt8b~T$3I`nd$wo7``rdM;T``Xo-2Ig@le97OMJQ?tQXIEUxPE=^Ap|+b;0h2v)=zj z=#M76mw>K5>zyR@%?a;R-GKu^*!i#<74CX*I63Yc@t(y%w3WsqRQwj+*j7%vHa=tuE=LyLX4c+zTjJ3~6PA_z3 z#Xa>)Tio~hQlB~gaj}1gVn(rlbyn;zo%O!WUZURcGF?uWpM-G)`<#VzP>{}}{f~on z0r0|bqn<+GH|N1oF}RhJ6mdE*m+_v%mHQFh9n{!oX0XqL{kE0e#o60H@47Ucd=<_5 z4z*`5`rV9C()vfM*3#@+4bYnKo}z2~=j(>MPQe1Hp0TR_>?&DCfx}(rECqfDi-*0Z z9K6s*_Xh8&M_!unp5~^ua$=0CN3|T@hT-0Oz6tlMk+maM+fQV-{UvVuw6tBc!F&3V zZ@|Jis%YpJyrSnRzX$nQjsAHMGcLB6t-&X}Uk$>|3GZ}*}-S{}U(A3$K`u8s1NX`8q z?`mK98=Avku6rMKJM$ZtxwPSja(>gEnrctI)0+C%rs;|w`wqY2@NM`}@=coG-}gY% z{Y?)vJ=kQt($iCGEstjYG_3OHg7lLBcjwyAwFq2`!2d@Pp!WonI(F>p^wh0Lczm6m z9d!-04K%|BZKG#JV&jS+F!emp9*XpueW3S)3i^t~%s=_U#uJLiqw$$nG7^_>btJ~$ zvB*5V2$I=gJR0)^{ieU$p9p!vJs!OLNtg)_=4Irkcw3Yq5Q&;rp}7sL{vGs5hLS7? zZuEydWA z0p`CuJ$(hR3AZ`7gR#H;V0xO~XMGbl;SRtl%zI*h9!zwO03HFn07yx_dvH9wlQ|B! z9gB+cbdn)D?J+ruhaaqPG1i(#k51Uz!q^(hz^6vCQR3cGQh7&d;Y0c%cKgDOpS*eb z4a6q>`|(!^U%4U?k%(^M&kcIL^nvJ9)|2=vL!I~@4R4A2v(A>{Tvwe7E^+>|;IDjd zdintArTI~Z;{OB4k0J&iWbofg^WOshZp!AT#LZ92waQ6wFvyb9s#xgN$q!{sT3vJb45wv zw8x54kpSua#l;e@qzNCA+N-4YSuGOe^!}-Z5?A}Jq*|K)!AH|k5*ed+sYq&0wO(#I zQhSfiNj_HHd1{mX(fpcHr2x#j=Y~j9jaAG2H{z*~v2#+N=ChR4UM96S=OPv!t#n=D z5-^&-Q^GnePpbSS5?69q2IKWZah!z3FpiJQ0$;l>*b`sQNj)h_evO(@Q{Q@Bpj3QgoLLhJTKuz39m?q&6zy8CG<#GFJYsE9TM8y z{H>_oC`7ilv~2WLcXcNtX42D8UwdqYZzsy1urdex)R{o1h_@Lo{Ma?)xfL zZE7oYC+InxraO87i$00QE`Wt>icWeEalXD_1At4Ja02+9+5tIT_ZQ8JwsBTCB>svkAvilQ~ryKD4(nU z7ZP&h*1%$(13wpX8uVKL^x+h8!~7LK^}91>g4kxD&9 zVjDQ((cgz~&PI+@>+L8rXA?*2^&b;*D@WGrXGv!>M;i4evew8EpH7!{P7_Bu^xq?E z%^caSuO$y!uj_#7Ui}#o+fs2ikUsrWWWdM0j_Gd^r;Q_~P6J5J9fiXH2lNRtux%a% z?U25Nbhek?1LQHikKEaz-w$wD{~C$iRoV~a37xL!oIS-~063zLlg@n$9s=j6{%tDf z{;J1-jOnzoKIZ}M&T;)gGVoyjR_L72$BAPU;I%s&*FPeiz1+9c`cdL^>rVi@%A6X2 zmREHGa{2e+Cudbj0Tq<1-^qW8QAq7Zeic!NAzllRg6H5?t{$iCw@^T?{uyMsEEU0J z%R!|EEx>IbXGznZ!z!fd3!)}>;FyvLkQ{b)6cgxvd{DyaP;R2 zh~X4IegRqTV=;3sgX#RL3VPai=2L!Q#R1BfLZ)EJysrS{{Sx$?f~EQ!Ky+%@f@P)u z6?uq#ld#{PM;7z`gj^_?(w9?#50T67OBarVUGPCE={xnS#QCt|1t2;4Cer_9IagtY zZ7{h8+T6K3Rv{u){0YYn5X&w?k(J#GJqMd#=_tw{0rk3AILI8u#ovMhmPO&rQ0n(XqjWrZoFSDh+8L z7>mNp(O)7=J~_DfkQDihkYH7}BI6jS5H^kvR(+4kj8sy`bB&%D#41y}o~4TI zkQMv(VAT()VjTrB&hDzZKRY(G3UWwy z3czlM74rFB>GQLLZ1Gp5%>7b^_zy}xRhnOXR6OrH`2s(raGh1&M`7BSdq(aj=gVub z&b-TC-RAa?~zfNUuRy1Yz;?s(S{9_jK*(I39rwS>D; z{t3Z9+O>2b(F^%nfgkCrx|0g8kgkt+Es6kh3++?jQaSSrs36zq#e1q=CeN>{{3(EI z^mJF%t%NTqs1$so>VJ~8%7T>~AG^EiFR0}fO8ny8Rn$2hiwibz{`sz|=Slwhf;Ns{ z>8jdC_!6amkB9|X_t3aH?GgksF9@ZAK=ydF8U#-EIKGfAqrsl%SHX0dV?{}Mp{rz3 zNtt6_o*NTW=XK0gvaxVe;fBIC%t>6!Sz+OAkicjIZWP#0isns~f%pCA< zDkS0za6->mw?-b<>y0Gq8tUu9 zkwAYk7-C#dBC`d%!#M2o+ePY3?1Qrj;?NNtJj&2!II3jEI`QtMHUR(WOi?h@Q`?7o z7UL$6#YluRNXdX{Y-wxvf`CfS<}@mtHRako z=ZZ%(xQQB6CgRq1<1wFOg}o&@m?E&onUKZV@0l+#W*SC(r9m$dnHTG4x2q5haw zyhn3Cua$sb(4*!385wb$(&p=ZCrK_}->elR^<|eXX_{W4k7oYU9QvrXSnr#f5!EX6 zWm=hDr@8T`9fth(@vm3^K8onoG*ioG&Ky@A{pe3?t2hj59ze$(nnu9XO771Tblz01 zt;{BQ{|hf@j%6tF-jiDXrDRfDm_#vqNc%>T-=kGDBmHoVyr`#5>y<7D0rBjMj4GD49cj@5+%$(f^fEP+#QhKaX^ zf&Do2iX(acxPJi0;Gly;bNj%;DOFEP!!7a&&{H#DCL^KEy`e}bj(wap1O9lRZ?iZm z>#6DW)b#ih2P1*Lcr+4CCTfzAhr*Fy4S%Pa02A*i1N&-X@u-Qje$jXhu83%av;-s2 z?@#n8Hnxv~5$Lht+%bNj5a<5ljGxFe47w0*UJYL($$=d`;-x4Z(`qvcMcqp9UiZznV8q#xd<8r{jBbU^6@VImyC@xy^||3Mas zM)*0!1h%~L7rn9x5)20;dp-OIJ}@3jCW63dhpZ^6b?eTiZOjNH<3=FbpB&(aAZY`x z5%c$kSVyOCr?;uqXu)r%*SKRx+xDjBb}x5;1_mAs8bMfHJQ_9YI$LYj)z@!e2^?VL z{-Nf{NFv-D2?afYK7U-4y0di~H}P;N9<}(HmL!K!$KL*sh$J(3^(YMS=tClKB8V(u zr2P<$LND-{k&Gl0q2Oxn9}h9L6`~eHl;D$r>IK&VQ0L=9u+4j?*+&OBg`2y)ciCH_ zAG-o%8`C@bgb-|L-{IZuZJ`3l8x%m%Yv8}|!LSU3G5}5Ys0IZa2R(ZumVQOu!4~;e zZ)a1B&)W)VTxsjppa|a6cJgo#4HVG_GNOo+s&r(OIt*1^6cP@G40@%{@bZ6uI(*%@ zERr#U$1fQPe>yo67!lK+et&Ob^-S9`V<3vV99^h9#9V41*>8qbtL^9gO^prc6uea4 z#dme0OQGkfk%21 zG<$O4);X4yp3Brat5wH^)lsz`n@QK1TBppU=QFjwnMp6evXYe|@|Y?!wccvwx@h&; zZKWvrV&=(8FJXJjvdNdSSXR0_{XGz?mJ5rLYJEGCjsq|rD@Ej8temOuJy^Le?3TEE zgO#G>=QFilZsoezb?I}-ne+uL+d8catKMo|J5wIB-$pA%;5-qe59Zi;zB9gA&izw#f$T$$U%0@vU|0o^7pCe0qJr6qBKV-B23YVYV?mq=R zoBwmw>uqkQg7q&)#+I_#`8AiBKdWESUKt0Q-LK~J3eNVkn$u_Z^G)2&?0)!3j4#eC z*Y91BpNIIkW!J(ao+m%2OT+>abI|`7bdN<&Wxo!3w(mWB4NoQ>dRx_~Bn7-4bXynU z4BEAyjs88*-LT`f*}0G8owLV{AIxFr?K$ZAbNDlWcxLlw@f`AXobG1!B`{hN&Sr;R zL}a7WI(atwA4>c7MW@ftA%7fniW{@V?K#jrJmQ%@uYz9YtYYf@jpDvebn-|lP)jqBahCM2v*pVxv;cHA~Q zO;X;z9=nCpGs`4wuavj1()Q0G|L7d_Kb?b4>(>-#yPx!~!!z3*_Fd3#$U=W}4*OyS z;p5gaYHRDn|5*ziy8>~up*BkM*Y2=k`g^gP(@dC2Z0irOnNPGB=715PkF?O^P%vur z_D8$@{YDUvUYNTl4`3cV5bF<_p&-6ul2wAfg%URW@wopWrs!t;AijX&AHWngIWTY# zDi#d0zOb1gYJKi9w%}pMYk0S}8V2+-WP{9T-Lt)ETU$#ehwo|y3D2O0*C#FbT6Z#I zYx|Dorgme;mMy!yokk};7m-!^5Q`B=B)Ql8TP=-^)=#pi8LQa(n#udPuiJ=k#AM3z zC!S1_5fU?d{;3%o%l7e_8SWbKnVT5`eLIHll+GlnM|Ya;XL4qJr6!ZDD$PHRLvhb! zXFThPkL6?vpkLs5MSQ&mG0ymu4S0#D(dUl@`8R;D(ts7Hh(XVPcp#IX=gBme;ZEka zdNKtq%}HJ}vEE^HClb;Z{`jxV(MN z`kAB5F9&5>5RaglG*+89IDkhqKr=2NK3F7jcmxl{W2_e2y+gHF*Q~{YNep|*4_Zjw z$uPcDhSy+1s;RjRt1P{Yi}zs_h1CWRMo@%+c&g@{{aE_Ix=IFOfEN$-`$<9OV*Mto zfYq9z13+n9fOM4i_}WmP90~fcE|JEB4KXAL4V6bxSfYS2p^i3X zwHS*AFvNfs>Q44T(jV!?z$MXeq$jHK{_gI0Xurbv!j+Ul1hb65|7Rq+?cr93Ihsi7 zc~e3A%%5bf=@UTk-Y2cE?t2P)tis6KAN$sUX4T(~`v)Zj?bBOzpBGY$(fd@ZzIrZD z(4zz;wCmF@0Bj|Xs``(~@)fL>dU!j*lihzmsCfI3uD^P2Rxl#(5 zW+TW@OqITRu2=Aa6jbFaeN}(jUP1cJQcgYpE2#Eokv{3z^`8NaVxsJ0vL%v)a#|;e zqO3H6%rX2~^*vHg!A4uaZRXkE*!0!9gMvz4m8ozAPuTR;`hx9Z z3I>&+g!b}(Xwz5gEDAnhYd?GWm!OXqr~DeEBGh_}`ktm;Uf~M93!PGnzS`gDsUd4K z{Z(KyxYGX(7}R&=u#;ZjQQt>ZfuZq(lG0P{_;t{zifR4ZNebW1ln*BA`V-U{ePt&X z%2s{WC?)F!ROQ(96;A61WJ{H=o*&%5qkfqzUy3WUOzE#mQ$&V*rKBM3PgQxr5x<+H zLAzbM?8-Ux_esUORiLDCyFP7Av(rfQ%zB~l6C2ZpvX(jY|8k?yzflENptH)_A@%L` zJhxdWykWawRQXn@u02@<2V>0wyDaN}1JJejQ{hnKHw`Z~7rEjSa&epXy;+kgSJ7xW S)+WMPpWPuO?zJh{Q1&0v)9gzC literal 0 HcmV?d00001 diff --git a/tests/script/http/httpTest.c b/tests/script/http/httpTest.c new file mode 100644 index 0000000000..36ce6b95ba --- /dev/null +++ b/tests/script/http/httpTest.c @@ -0,0 +1,128 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAXLINE 1024 + +typedef struct { + pthread_t pid; + int threadId; + int rows; + int tables; +} ThreadObj; + +void post(char *ip,int port,char *page,char *msg) { + int sockfd,n; + char recvline[MAXLINE]; + struct sockaddr_in servaddr; + char content[4096]; + char content_page[50]; + sprintf(content_page,"POST /%s HTTP/1.1\r\n",page); + char content_host[50]; + sprintf(content_host,"HOST: %s:%d\r\n",ip,port); + char content_type[] = "Content-Type: text/plain\r\n"; + char Auth[] = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + char content_len[50]; + sprintf(content_len,"Content-Length: %ld\r\n\r\n",strlen(msg)); + sprintf(content,"%s%s%s%s%s%s",content_page,content_host,content_type,Auth,content_len,msg); + if((sockfd = socket(AF_INET,SOCK_STREAM,0)) < 0) { + printf("socket error\n"); + } + bzero(&servaddr,sizeof(servaddr)); + servaddr.sin_family = AF_INET; + servaddr.sin_port = htons(port); + if(inet_pton(AF_INET,ip,&servaddr.sin_addr) <= 0) { + printf("inet_pton error\n"); + } + if(connect(sockfd,(struct sockaddr *)&servaddr,sizeof(servaddr)) < 0) { + printf("connect error\n"); + } + write(sockfd,content,strlen(content)); + printf("%s\n", content); + while((n = read(sockfd,recvline,MAXLINE)) > 0) { + recvline[n] = 0; + if(fputs(recvline,stdout) == EOF) { + printf("fputs error\n"); + } + } + if(n < 0) { + printf("read error\n"); + } +} + +void singleThread() { + char ip[] = "127.0.0.1"; + int port = 6041; + char page[] = "rest/sql"; + char page1[] = "rest/sql/db1"; + char page2[] = "rest/sql/db2"; + char nonexit[] = "rest/sql/xxdb"; + + post(ip,port,page,"drop database if exists db1"); + post(ip,port,page,"create database if not exists db1"); + post(ip,port,page,"drop database if exists db2"); + post(ip,port,page,"create database if not exists db2"); + post(ip,port,page1,"create table t11 (ts timestamp, c1 int)"); + post(ip,port,page2,"create table t21 (ts timestamp, c1 int)"); + post(ip,port,page1,"insert into t11 values (now, 1)"); + post(ip,port,page2,"insert into t21 values (now, 2)"); + post(ip,port,nonexit,"create database if not exists db3"); +} + +void execute(void *params) { + char ip[] = "127.0.0.1"; + int port = 6041; + char page[] = "rest/sql"; + char *unique = calloc(1, 1024); + char *sql = calloc(1, 1024); + ThreadObj *pThread = (ThreadObj *)params; + printf("Thread %d started\n", pThread->threadId); + sprintf(unique, "rest/sql/db%d",pThread->threadId); + sprintf(sql, "drop database if exists db%d", pThread->threadId); + post(ip,port,page, sql); + sprintf(sql, "create database if not exists db%d", pThread->threadId); + post(ip,port,page, sql); + for (int i = 0; i < pThread->tables; i++) { + sprintf(sql, "create table t%d (ts timestamp, c1 int)", i); + post(ip,port,unique, sql); + } + for (int i = 0; i < pThread->rows; i++) { + sprintf(sql, "insert into t%d values (now + %ds, %d)", pThread->threadId, i, pThread->threadId); + post(ip,port,unique, sql); + } + free(unique); + free(sql); + return; +} + +void multiThread() { + int numOfThreads = 100; + int numOfTables = 100; + int numOfRows = 1; + ThreadObj *threads = calloc((size_t)numOfThreads, sizeof(ThreadObj)); + for (int i = 0; i < numOfThreads; i++) { + ThreadObj *pthread = threads + i; + pthread_attr_t thattr; + pthread->threadId = i + 1; + pthread->rows = numOfRows; + pthread->tables = numOfTables; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + pthread_create(&pthread->pid, &thattr, (void *(*)(void *))execute, pthread); + } + for (int i = 0; i < numOfThreads; i++) { + pthread_join(threads[i].pid, NULL); + } + free(threads); +} + +int main() { + singleThread(); + multiThread(); + exit(0); +} \ No newline at end of file diff --git a/tests/script/http/makefile b/tests/script/http/makefile new file mode 100644 index 0000000000..d1be683eda --- /dev/null +++ b/tests/script/http/makefile @@ -0,0 +1,2 @@ +all: + gcc -g httpTest.c -o httpTest -lpthread \ No newline at end of file From f0b8a9eb6d1c2d24cfca2abc403f4437e63e7497 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Tue, 24 Aug 2021 10:44:42 +0800 Subject: [PATCH 226/239] remove bin file[ci skip] --- tests/script/http/httpTest | Bin 23008 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100755 tests/script/http/httpTest diff --git a/tests/script/http/httpTest b/tests/script/http/httpTest deleted file mode 100755 index 68aab9aa63bbf5c64593e591aaa6eb4c6621a7fb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23008 zcmeHvdvsLSdGDSX9qH&jLc&OZVQ|0|!Fpl9JdCZ^z{nOLgJgqWhtbSPnjpq?dxrviY?uxHfjBTiGMAvF^ac-AGnwuMvh88)ti{(d1(pdd{ z`!PCmG;(t9>K}LA#Rko{zvuq;xA)m+pMCZ|gWmQnPKSf3zF&kkFqyn<8)rCpcI;Qvi|9a8Vm4Z@Bc1_hZZ zsLHzwdK8!6HK}IaDa)Iy9WKjwDoE{3sjffVy>3lif3T)M97!IiIk0|R&AK(UiD>O= z&L_X94C+%`w|7xi>sNeX-N5*Sm zDBiiPr9T=8b^5#eLyYsXavq9@%@B)+{6Q8o`v@8V{DP*?PesEK7Klb7p@7K}(ZIft z$wCLhChIe!kpxS`;^Bzd!we%~`UCrnK;J&2#~P)NcIMw2G%i6zYh$_O0r8$IEOzd!siYGq(d2vE6k(-`o> z&7OED#6Xn3)96r7cz|;2*w*&8<`!dh?dsaK>HHd)=^cnhq#1*P>XcW#3%MNp(Lc)5 zKjJD(j17Y7z=%+QzDQ$`2Y<Tt(FU3w$MvCMf_Z`(66)5uUhC; z7P{)UG#=AMYIz?Hf4hx;eXO(ta=zJa_8G9{s>;0+ELZ`A-idpC? zro=m7p`#(w>5zpkr#77TgoR$1mS*gTg^mVKr=u2nNt%jzo`tUNnZ!PBp_f_YPgv;I zab(;=mtBNwowm^FeyGwp3w^#K0-m?f7g*@mvezPTEdtjfa4iD=xe<7;bjh!Lhu_kD z!`ct(81oICHl5SweTQGwU*QE!uPp$1YI;Q>KUEQ zrzY=8<8-5(nruqrbc39lY)IoY0hyXymd5EuIW<|9#_0w*HR(v>G%=Z)eD@<&U%FAE z{%M?Ukf?teryC>cuW&?fq?V%B%6R;MzgClpzi7jsv*F*c;eTbr|J;T@Wy42o_>c|% zj15oP@Btehvf&Td@Gcv^&4znz_+}fv)`~a0-IjX&L0{^pzQaGe($U#IqTP)a_l*?) z7#+bkviWNO)AQ#+<^3Z-Hz4C1(N+`KaMhfTuKmtU$fIlLPhTuudI+t@FMxRK$f$f9p#C?@2}ta3ic`NmtK5i=AqYkVg=55__pJ!Z(48oDSvxJ+RD zfLRyo$8!+qS3u~Q7`h5jTyS>iSAfhn-jp=^qVe#XJl;$uK0Ad4y-6tuYFVV2~GL3bYw}74+ItONZ3`W}f(t&xr#)Rnha>Z~YlKct3E4^kmt zphC|6n#4!Q+%R|JBn?r%)OTU=OI(&a^!p_HB`(XWOtJeqp%l9j3WCrUijQUrb_4zS z#^IrVAlHtbeO$YOIH48h|NTl7H8Mn~FZBl5`~pM|U!6{pfAgu#8@Z_w5|RGVVx=hN zVenGi>fsm3HXHo1%y6H7f=m7Kzx)bAuW#6UHRxL<`n#!Jc0lp(UrAp#hu`Y3Uk&38 zt8b~T$3I`nd$wo7``rdM;T``Xo-2Ig@le97OMJQ?tQXIEUxPE=^Ap|+b;0h2v)=zj z=#M76mw>K5>zyR@%?a;R-GKu^*!i#<74CX*I63Yc@t(y%w3WsqRQwj+*j7%vHa=tuE=LyLX4c+zTjJ3~6PA_z3 z#Xa>)Tio~hQlB~gaj}1gVn(rlbyn;zo%O!WUZURcGF?uWpM-G)`<#VzP>{}}{f~on z0r0|bqn<+GH|N1oF}RhJ6mdE*m+_v%mHQFh9n{!oX0XqL{kE0e#o60H@47Ucd=<_5 z4z*`5`rV9C()vfM*3#@+4bYnKo}z2~=j(>MPQe1Hp0TR_>?&DCfx}(rECqfDi-*0Z z9K6s*_Xh8&M_!unp5~^ua$=0CN3|T@hT-0Oz6tlMk+maM+fQV-{UvVuw6tBc!F&3V zZ@|Jis%YpJyrSnRzX$nQjsAHMGcLB6t-&X}Uk$>|3GZ}*}-S{}U(A3$K`u8s1NX`8q z?`mK98=Avku6rMKJM$ZtxwPSja(>gEnrctI)0+C%rs;|w`wqY2@NM`}@=coG-}gY% z{Y?)vJ=kQt($iCGEstjYG_3OHg7lLBcjwyAwFq2`!2d@Pp!WonI(F>p^wh0Lczm6m z9d!-04K%|BZKG#JV&jS+F!emp9*XpueW3S)3i^t~%s=_U#uJLiqw$$nG7^_>btJ~$ zvB*5V2$I=gJR0)^{ieU$p9p!vJs!OLNtg)_=4Irkcw3Yq5Q&;rp}7sL{vGs5hLS7? zZuEydWA z0p`CuJ$(hR3AZ`7gR#H;V0xO~XMGbl;SRtl%zI*h9!zwO03HFn07yx_dvH9wlQ|B! z9gB+cbdn)D?J+ruhaaqPG1i(#k51Uz!q^(hz^6vCQR3cGQh7&d;Y0c%cKgDOpS*eb z4a6q>`|(!^U%4U?k%(^M&kcIL^nvJ9)|2=vL!I~@4R4A2v(A>{Tvwe7E^+>|;IDjd zdintArTI~Z;{OB4k0J&iWbofg^WOshZp!AT#LZ92waQ6wFvyb9s#xgN$q!{sT3vJb45wv zw8x54kpSua#l;e@qzNCA+N-4YSuGOe^!}-Z5?A}Jq*|K)!AH|k5*ed+sYq&0wO(#I zQhSfiNj_HHd1{mX(fpcHr2x#j=Y~j9jaAG2H{z*~v2#+N=ChR4UM96S=OPv!t#n=D z5-^&-Q^GnePpbSS5?69q2IKWZah!z3FpiJQ0$;l>*b`sQNj)h_evO(@Q{Q@Bpj3QgoLLhJTKuz39m?q&6zy8CG<#GFJYsE9TM8y z{H>_oC`7ilv~2WLcXcNtX42D8UwdqYZzsy1urdex)R{o1h_@Lo{Ma?)xfL zZE7oYC+InxraO87i$00QE`Wt>icWeEalXD_1At4Ja02+9+5tIT_ZQ8JwsBTCB>svkAvilQ~ryKD4(nU z7ZP&h*1%$(13wpX8uVKL^x+h8!~7LK^}91>g4kxD&9 zVjDQ((cgz~&PI+@>+L8rXA?*2^&b;*D@WGrXGv!>M;i4evew8EpH7!{P7_Bu^xq?E z%^caSuO$y!uj_#7Ui}#o+fs2ikUsrWWWdM0j_Gd^r;Q_~P6J5J9fiXH2lNRtux%a% z?U25Nbhek?1LQHikKEaz-w$wD{~C$iRoV~a37xL!oIS-~063zLlg@n$9s=j6{%tDf z{;J1-jOnzoKIZ}M&T;)gGVoyjR_L72$BAPU;I%s&*FPeiz1+9c`cdL^>rVi@%A6X2 zmREHGa{2e+Cudbj0Tq<1-^qW8QAq7Zeic!NAzllRg6H5?t{$iCw@^T?{uyMsEEU0J z%R!|EEx>IbXGznZ!z!fd3!)}>;FyvLkQ{b)6cgxvd{DyaP;R2 zh~X4IegRqTV=;3sgX#RL3VPai=2L!Q#R1BfLZ)EJysrS{{Sx$?f~EQ!Ky+%@f@P)u z6?uq#ld#{PM;7z`gj^_?(w9?#50T67OBarVUGPCE={xnS#QCt|1t2;4Cer_9IagtY zZ7{h8+T6K3Rv{u){0YYn5X&w?k(J#GJqMd#=_tw{0rk3AILI8u#ovMhmPO&rQ0n(XqjWrZoFSDh+8L z7>mNp(O)7=J~_DfkQDihkYH7}BI6jS5H^kvR(+4kj8sy`bB&%D#41y}o~4TI zkQMv(VAT()VjTrB&hDzZKRY(G3UWwy z3czlM74rFB>GQLLZ1Gp5%>7b^_zy}xRhnOXR6OrH`2s(raGh1&M`7BSdq(aj=gVub z&b-TC-RAa?~zfNUuRy1Yz;?s(S{9_jK*(I39rwS>D; z{t3Z9+O>2b(F^%nfgkCrx|0g8kgkt+Es6kh3++?jQaSSrs36zq#e1q=CeN>{{3(EI z^mJF%t%NTqs1$so>VJ~8%7T>~AG^EiFR0}fO8ny8Rn$2hiwibz{`sz|=Slwhf;Ns{ z>8jdC_!6amkB9|X_t3aH?GgksF9@ZAK=ydF8U#-EIKGfAqrsl%SHX0dV?{}Mp{rz3 zNtt6_o*NTW=XK0gvaxVe;fBIC%t>6!Sz+OAkicjIZWP#0isns~f%pCA< zDkS0za6->mw?-b<>y0Gq8tUu9 zkwAYk7-C#dBC`d%!#M2o+ePY3?1Qrj;?NNtJj&2!II3jEI`QtMHUR(WOi?h@Q`?7o z7UL$6#YluRNXdX{Y-wxvf`CfS<}@mtHRako z=ZZ%(xQQB6CgRq1<1wFOg}o&@m?E&onUKZV@0l+#W*SC(r9m$dnHTG4x2q5haw zyhn3Cua$sb(4*!385wb$(&p=ZCrK_}->elR^<|eXX_{W4k7oYU9QvrXSnr#f5!EX6 zWm=hDr@8T`9fth(@vm3^K8onoG*ioG&Ky@A{pe3?t2hj59ze$(nnu9XO771Tblz01 zt;{BQ{|hf@j%6tF-jiDXrDRfDm_#vqNc%>T-=kGDBmHoVyr`#5>y<7D0rBjMj4GD49cj@5+%$(f^fEP+#QhKaX^ zf&Do2iX(acxPJi0;Gly;bNj%;DOFEP!!7a&&{H#DCL^KEy`e}bj(wap1O9lRZ?iZm z>#6DW)b#ih2P1*Lcr+4CCTfzAhr*Fy4S%Pa02A*i1N&-X@u-Qje$jXhu83%av;-s2 z?@#n8Hnxv~5$Lht+%bNj5a<5ljGxFe47w0*UJYL($$=d`;-x4Z(`qvcMcqp9UiZznV8q#xd<8r{jBbU^6@VImyC@xy^||3Mas zM)*0!1h%~L7rn9x5)20;dp-OIJ}@3jCW63dhpZ^6b?eTiZOjNH<3=FbpB&(aAZY`x z5%c$kSVyOCr?;uqXu)r%*SKRx+xDjBb}x5;1_mAs8bMfHJQ_9YI$LYj)z@!e2^?VL z{-Nf{NFv-D2?afYK7U-4y0di~H}P;N9<}(HmL!K!$KL*sh$J(3^(YMS=tClKB8V(u zr2P<$LND-{k&Gl0q2Oxn9}h9L6`~eHl;D$r>IK&VQ0L=9u+4j?*+&OBg`2y)ciCH_ zAG-o%8`C@bgb-|L-{IZuZJ`3l8x%m%Yv8}|!LSU3G5}5Ys0IZa2R(ZumVQOu!4~;e zZ)a1B&)W)VTxsjppa|a6cJgo#4HVG_GNOo+s&r(OIt*1^6cP@G40@%{@bZ6uI(*%@ zERr#U$1fQPe>yo67!lK+et&Ob^-S9`V<3vV99^h9#9V41*>8qbtL^9gO^prc6uea4 z#dme0OQGkfk%21 zG<$O4);X4yp3Brat5wH^)lsz`n@QK1TBppU=QFjwnMp6evXYe|@|Y?!wccvwx@h&; zZKWvrV&=(8FJXJjvdNdSSXR0_{XGz?mJ5rLYJEGCjsq|rD@Ej8temOuJy^Le?3TEE zgO#G>=QFilZsoezb?I}-ne+uL+d8catKMo|J5wIB-$pA%;5-qe59Zi;zB9gA&izw#f$T$$U%0@vU|0o^7pCe0qJr6qBKV-B23YVYV?mq=R zoBwmw>uqkQg7q&)#+I_#`8AiBKdWESUKt0Q-LK~J3eNVkn$u_Z^G)2&?0)!3j4#eC z*Y91BpNIIkW!J(ao+m%2OT+>abI|`7bdN<&Wxo!3w(mWB4NoQ>dRx_~Bn7-4bXynU z4BEAyjs88*-LT`f*}0G8owLV{AIxFr?K$ZAbNDlWcxLlw@f`AXobG1!B`{hN&Sr;R zL}a7WI(atwA4>c7MW@ftA%7fniW{@V?K#jrJmQ%@uYz9YtYYf@jpDvebn-|lP)jqBahCM2v*pVxv;cHA~Q zO;X;z9=nCpGs`4wuavj1()Q0G|L7d_Kb?b4>(>-#yPx!~!!z3*_Fd3#$U=W}4*OyS z;p5gaYHRDn|5*ziy8>~up*BkM*Y2=k`g^gP(@dC2Z0irOnNPGB=715PkF?O^P%vur z_D8$@{YDUvUYNTl4`3cV5bF<_p&-6ul2wAfg%URW@wopWrs!t;AijX&AHWngIWTY# zDi#d0zOb1gYJKi9w%}pMYk0S}8V2+-WP{9T-Lt)ETU$#ehwo|y3D2O0*C#FbT6Z#I zYx|Dorgme;mMy!yokk};7m-!^5Q`B=B)Ql8TP=-^)=#pi8LQa(n#udPuiJ=k#AM3z zC!S1_5fU?d{;3%o%l7e_8SWbKnVT5`eLIHll+GlnM|Ya;XL4qJr6!ZDD$PHRLvhb! zXFThPkL6?vpkLs5MSQ&mG0ymu4S0#D(dUl@`8R;D(ts7Hh(XVPcp#IX=gBme;ZEka zdNKtq%}HJ}vEE^HClb;Z{`jxV(MN z`kAB5F9&5>5RaglG*+89IDkhqKr=2NK3F7jcmxl{W2_e2y+gHF*Q~{YNep|*4_Zjw z$uPcDhSy+1s;RjRt1P{Yi}zs_h1CWRMo@%+c&g@{{aE_Ix=IFOfEN$-`$<9OV*Mto zfYq9z13+n9fOM4i_}WmP90~fcE|JEB4KXAL4V6bxSfYS2p^i3X zwHS*AFvNfs>Q44T(jV!?z$MXeq$jHK{_gI0Xurbv!j+Ul1hb65|7Rq+?cr93Ihsi7 zc~e3A%%5bf=@UTk-Y2cE?t2P)tis6KAN$sUX4T(~`v)Zj?bBOzpBGY$(fd@ZzIrZD z(4zz;wCmF@0Bj|Xs``(~@)fL>dU!j*lihzmsCfI3uD^P2Rxl#(5 zW+TW@OqITRu2=Aa6jbFaeN}(jUP1cJQcgYpE2#Eokv{3z^`8NaVxsJ0vL%v)a#|;e zqO3H6%rX2~^*vHg!A4uaZRXkE*!0!9gMvz4m8ozAPuTR;`hx9Z z3I>&+g!b}(Xwz5gEDAnhYd?GWm!OXqr~DeEBGh_}`ktm;Uf~M93!PGnzS`gDsUd4K z{Z(KyxYGX(7}R&=u#;ZjQQt>ZfuZq(lG0P{_;t{zifR4ZNebW1ln*BA`V-U{ePt&X z%2s{WC?)F!ROQ(96;A61WJ{H=o*&%5qkfqzUy3WUOzE#mQ$&V*rKBM3PgQxr5x<+H zLAzbM?8-Ux_esUORiLDCyFP7Av(rfQ%zB~l6C2ZpvX(jY|8k?yzflENptH)_A@%L` zJhxdWykWawRQXn@u02@<2V>0wyDaN}1JJejQ{hnKHw`Z~7rEjSa&epXy;+kgSJ7xW S)+WMPpWPuO?zJh{Q1&0v)9gzC From 98a5ad09f0603b8dde1b15ec77ba89c239f0c7aa Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 24 Aug 2021 12:12:14 +0800 Subject: [PATCH 227/239] [TD-5466] handle invalid Escapes --- src/kit/shell/src/shellEngine.c | 8 ++++++-- src/util/src/tcompare.c | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index bf19394d05..efc37403b4 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -254,8 +254,12 @@ int32_t shellRunCommand(TAOS* con, char* command) { } if (c == '\\') { - esc = true; - continue; + if (quote != 0 && (*command == '_' || *command == '\\')) { + //DO nothing + } else { + esc = true; + continue; + } } if (quote == c) { diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 36480418c9..cbb6747052 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -262,6 +262,7 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat c1 = str[j++]; if (j <= size) { + if (c == '\\' && patterStr[i] == '_' && c1 == '_') { i++; continue; } if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) { continue; } From 8931394a8d6e9d19cb45e32f4ebdac0319533677 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 24 Aug 2021 13:38:44 +0800 Subject: [PATCH 228/239] [TD-6303]: taosdemo -f miss filename. (#7539) --- src/kit/taosdemo/taosdemo.c | 499 +++++++++++++++++------------------- 1 file changed, 239 insertions(+), 260 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 50f35faa63..5d851eafd0 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -659,6 +659,13 @@ static FILE * g_fpOfInsertResult = NULL; fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0) #define errorPrint(fmt, ...) \ + do {\ + fprintf(stderr, " \033[31m");\ + fprintf(stderr, "ERROR: "fmt, __VA_ARGS__);\ + fprintf(stderr, " \033[0m");\ + } while(0) + +#define errorPrint2(fmt, ...) \ do {\ struct tm Tm, *ptm;\ struct timeval timeSecs; \ @@ -671,8 +678,8 @@ static FILE * g_fpOfInsertResult = NULL; ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour,\ ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec,\ taosGetSelfPthreadId());\ - fprintf(stderr, "ERROR: "fmt, __VA_ARGS__);\ fprintf(stderr, " \033[0m");\ + errorPrint(fmt, __VA_ARGS__);\ } while(0) // for strncpy buffer overflow @@ -815,6 +822,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-f") == 0) { arguments->demo_mode = false; + + if (NULL == argv[i+1]) { + printHelp(); + errorPrint("%s", "\n\t-f need a valid json file following!\n"); + exit(EXIT_FAILURE); + } arguments->metaFile = argv[++i]; } else if (strcmp(argv[i], "-c") == 0) { if (argc == i+1) { @@ -1227,7 +1240,7 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); if (code != 0) { if (!quiet) { - errorPrint("Failed to execute %s, reason: %s\n", + errorPrint2("Failed to execute %s, reason: %s\n", command, taos_errstr(res)); } taos_free_result(res); @@ -1249,7 +1262,7 @@ static void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo) { pThreadInfo->fp = fopen(pThreadInfo->filePath, "at"); if (pThreadInfo->fp == NULL) { - errorPrint( + errorPrint2( "%s() LN%d, failed to open result file: %s, result will not save to file\n", __func__, __LINE__, pThreadInfo->filePath); return; @@ -1268,7 +1281,7 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) { char* databuf = (char*) calloc(1, 100*1024*1024); if (databuf == NULL) { - errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n", + errorPrint2("%s() LN%d, failed to malloc, warning: save result to file slowly!\n", __func__, __LINE__); return ; } @@ -1308,7 +1321,7 @@ static void selectAndGetResult( if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { TAOS_RES *res = taos_query(pThreadInfo->taos, command); if (res == NULL || taos_errno(res) != 0) { - errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n", + errorPrint2("%s() LN%d, failed to execute sql:%s, reason:%s\n", __func__, __LINE__, command, taos_errstr(res)); taos_free_result(res); return; @@ -1327,7 +1340,7 @@ static void selectAndGetResult( } } else { - errorPrint("%s() LN%d, unknown query mode: %s\n", + errorPrint2("%s() LN%d, unknown query mode: %s\n", __func__, __LINE__, g_queryInfo.queryMode); } } @@ -2177,7 +2190,7 @@ static int xDumpResultToFile(const char* fname, TAOS_RES* tres) { FILE* fp = fopen(fname, "at"); if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file: %s\n", + errorPrint2("%s() LN%d, failed to open file: %s\n", __func__, __LINE__, fname); return -1; } @@ -2224,7 +2237,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { int32_t code = taos_errno(res); if (code != 0) { - errorPrint( "failed to run , reason: %s\n", + errorPrint2("failed to run , reason: %s\n", taos_errstr(res)); return -1; } @@ -2240,7 +2253,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); if (dbInfos[count] == NULL) { - errorPrint( "failed to allocate memory for some dbInfo[%d]\n", count); + errorPrint2("failed to allocate memory for some dbInfo[%d]\n", count); return -1; } @@ -2393,7 +2406,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port request_buf = malloc(req_buf_len); if (NULL == request_buf) { - errorPrint("%s", "ERROR, cannot allocate memory.\n"); + errorPrint("%s", "cannot allocate memory.\n"); exit(EXIT_FAILURE); } @@ -2532,7 +2545,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) { char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); if (NULL == dataBuf) { - errorPrint("%s() LN%d, calloc failed! size:%d\n", + errorPrint2("%s() LN%d, calloc failed! size:%d\n", __func__, __LINE__, TSDB_MAX_SQL_LEN+1); return NULL; } @@ -2632,7 +2645,7 @@ static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64",", rand_bigint()); } else { - errorPrint("No support data type: %s\n", stbInfo->tags[i].dataType); + errorPrint2("No support data type: %s\n", stbInfo->tags[i].dataType); tmfree(dataBuf); return NULL; } @@ -2671,7 +2684,7 @@ static int calcRowLen(SSuperTable* superTbls) { } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { lenOfOneRow += TIMESTAMP_BUFF_LEN; } else { - errorPrint("get error data type : %s\n", dataType); + errorPrint2("get error data type : %s\n", dataType); exit(EXIT_FAILURE); } } @@ -2702,7 +2715,7 @@ static int calcRowLen(SSuperTable* superTbls) { } else if (strcasecmp(dataType, "DOUBLE") == 0) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; } else { - errorPrint("get error tag type : %s\n", dataType); + errorPrint2("get error tag type : %s\n", dataType); exit(EXIT_FAILURE); } } @@ -2739,7 +2752,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, if (code != 0) { taos_free_result(res); taos_close(taos); - errorPrint("%s() LN%d, failed to run command %s\n", + errorPrint2("%s() LN%d, failed to run command %s\n", __func__, __LINE__, command); exit(EXIT_FAILURE); } @@ -2751,7 +2764,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, if (NULL == childTblName) { taos_free_result(res); taos_close(taos); - errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); + errorPrint2("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); exit(EXIT_FAILURE); } } @@ -2761,7 +2774,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, int32_t* len = taos_fetch_lengths(res); if (0 == strlen((char *)row[0])) { - errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n", + errorPrint2("%s() LN%d, No.%"PRId64" table return empty name\n", __func__, __LINE__, count); exit(EXIT_FAILURE); } @@ -2782,7 +2795,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, tmfree(childTblName); taos_free_result(res); taos_close(taos); - errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", + errorPrint2("%s() LN%d, realloc fail for save child table name of %s.%s\n", __func__, __LINE__, dbName, sTblName); exit(EXIT_FAILURE); } @@ -2879,7 +2892,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, int childTblCount = 10000; superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); if (superTbls->childTblName == NULL) { - errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); return -1; } getAllChildNameOfSuperTable(taos, dbName, @@ -2905,7 +2918,7 @@ static int createSuperTable( int lenOfOneRow = 0; if (superTbl->columnCount == 0) { - errorPrint("%s() LN%d, super table column count is %d\n", + errorPrint2("%s() LN%d, super table column count is %d\n", __func__, __LINE__, superTbl->columnCount); free(command); return -1; @@ -2969,7 +2982,7 @@ static int createSuperTable( } else { taos_close(taos); free(command); - errorPrint("%s() LN%d, config error data type : %s\n", + errorPrint2("%s() LN%d, config error data type : %s\n", __func__, __LINE__, dataType); exit(EXIT_FAILURE); } @@ -2982,7 +2995,7 @@ static int createSuperTable( if (NULL == superTbl->colsOfCreateChildTable) { taos_close(taos); free(command); - errorPrint("%s() LN%d, Failed when calloc, size:%d", + errorPrint2("%s() LN%d, Failed when calloc, size:%d", __func__, __LINE__, len+1); exit(EXIT_FAILURE); } @@ -2992,7 +3005,7 @@ static int createSuperTable( __func__, __LINE__, superTbl->colsOfCreateChildTable); if (superTbl->tagCount == 0) { - errorPrint("%s() LN%d, super table tag count is %d\n", + errorPrint2("%s() LN%d, super table tag count is %d\n", __func__, __LINE__, superTbl->tagCount); free(command); return -1; @@ -3059,7 +3072,7 @@ static int createSuperTable( } else { taos_close(taos); free(command); - errorPrint("%s() LN%d, config error tag type : %s\n", + errorPrint2("%s() LN%d, config error tag type : %s\n", __func__, __LINE__, dataType); exit(EXIT_FAILURE); } @@ -3074,7 +3087,7 @@ static int createSuperTable( "create table if not exists %s.%s (ts timestamp%s) tags %s", dbName, superTbl->sTblName, cols, tags); if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - errorPrint( "create supertable %s failed!\n\n", + errorPrint2("create supertable %s failed!\n\n", superTbl->sTblName); free(command); return -1; @@ -3090,7 +3103,7 @@ int createDatabasesAndStables(char *command) { int ret = 0; taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); if (taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + errorPrint2("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); return -1; } @@ -3186,7 +3199,7 @@ int createDatabasesAndStables(char *command) { if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { taos_close(taos); - errorPrint( "\ncreate database %s failed!\n\n", + errorPrint("\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); return -1; } @@ -3216,7 +3229,7 @@ int createDatabasesAndStables(char *command) { ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j]); if (0 != ret) { - errorPrint("\nget super table %s.%s info failed!\n\n", + errorPrint2("\nget super table %s.%s info failed!\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); continue; } @@ -3244,7 +3257,7 @@ static void* createTable(void *sarg) pThreadInfo->buffer = calloc(buff_len, 1); if (pThreadInfo->buffer == NULL) { - errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); + errorPrint2("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); exit(EXIT_FAILURE); } @@ -3266,7 +3279,7 @@ static void* createTable(void *sarg) } else { if (stbInfo == NULL) { free(pThreadInfo->buffer); - errorPrint("%s() LN%d, use metric, but super table info is NULL\n", + errorPrint2("%s() LN%d, use metric, but super table info is NULL\n", __func__, __LINE__); exit(EXIT_FAILURE); } else { @@ -3313,7 +3326,7 @@ static void* createTable(void *sarg) len = 0; if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, NO_INSERT_TYPE, false)){ - errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); free(pThreadInfo->buffer); return NULL; } @@ -3329,7 +3342,7 @@ static void* createTable(void *sarg) if (0 != len) { if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, NO_INSERT_TYPE, false)) { - errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); } } @@ -3374,7 +3387,7 @@ static int startMultiThreadCreateChildTable( db_name, g_Dbs.port); if (pThreadInfo->taos == NULL) { - errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n", + errorPrint2("%s() LN%d, Failed to connect to TDengine, reason:%s\n", __func__, __LINE__, taos_errstr(NULL)); free(pids); free(infos); @@ -3549,7 +3562,7 @@ static int readSampleFromCsvFileToMem( FILE* fp = fopen(stbInfo->sampleFile, "r"); if (fp == NULL) { - errorPrint( "Failed to open sample file: %s, reason:%s\n", + errorPrint("Failed to open sample file: %s, reason:%s\n", stbInfo->sampleFile, strerror(errno)); return -1; } @@ -3561,7 +3574,7 @@ static int readSampleFromCsvFileToMem( readLen = tgetline(&line, &n, fp); if (-1 == readLen) { if(0 != fseek(fp, 0, SEEK_SET)) { - errorPrint( "Failed to fseek file: %s, reason:%s\n", + errorPrint("Failed to fseek file: %s, reason:%s\n", stbInfo->sampleFile, strerror(errno)); fclose(fp); return -1; @@ -3604,7 +3617,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( // columns cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); if (columns && columns->type != cJSON_Array) { - printf("ERROR: failed to read json, columns not found\n"); + errorPrint("%s", "failed to read json, columns not found\n"); goto PARSE_OVER; } else if (NULL == columns) { superTbls->columnCount = 0; @@ -3614,8 +3627,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile( int columnSize = cJSON_GetArraySize(columns); if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) { - errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n", - __func__, __LINE__, TSDB_MAX_COLUMNS); + errorPrint("failed to read json, column size overflow, max column size is %d\n", + TSDB_MAX_COLUMNS); goto PARSE_OVER; } @@ -3633,8 +3646,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (countObj && countObj->type == cJSON_Number) { count = countObj->valueint; } else if (countObj && countObj->type != cJSON_Number) { - errorPrint("%s() LN%d, failed to read json, column count not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, column count not found\n"); goto PARSE_OVER; } else { count = 1; @@ -3645,8 +3657,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( cJSON *dataType = cJSON_GetObjectItem(column, "type"); if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - errorPrint("%s() LN%d: failed to read json, column type not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, column type not found\n"); goto PARSE_OVER; } //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, DATATYPE_BUFF_LEN); @@ -3674,8 +3685,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile( } if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) { - errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n", - __func__, __LINE__, MAX_NUM_COLUMNS); + errorPrint("failed to read json, column size overflow, allowed max column size is %d\n", + MAX_NUM_COLUMNS); goto PARSE_OVER; } @@ -3686,15 +3697,14 @@ static bool getColumnAndTagTypeFromInsertJsonFile( // tags cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); if (!tags || tags->type != cJSON_Array) { - errorPrint("%s() LN%d, failed to read json, tags not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, tags not found\n"); goto PARSE_OVER; } int tagSize = cJSON_GetArraySize(tags); if (tagSize > TSDB_MAX_TAGS) { - errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", - __func__, __LINE__, TSDB_MAX_TAGS); + errorPrint("failed to read json, tags size overflow, max tag size is %d\n", + TSDB_MAX_TAGS); goto PARSE_OVER; } @@ -3708,7 +3718,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (countObj && countObj->type == cJSON_Number) { count = countObj->valueint; } else if (countObj && countObj->type != cJSON_Number) { - printf("ERROR: failed to read json, column count not found\n"); + errorPrint("%s", "failed to read json, column count not found\n"); goto PARSE_OVER; } else { count = 1; @@ -3719,8 +3729,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( cJSON *dataType = cJSON_GetObjectItem(tag, "type"); if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - errorPrint("%s() LN%d, failed to read json, tag type not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, tag type not found\n"); goto PARSE_OVER; } tstrncpy(columnCase.dataType, dataType->valuestring, @@ -3730,8 +3739,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (dataLen && dataLen->type == cJSON_Number) { columnCase.dataLen = dataLen->valueint; } else if (dataLen && dataLen->type != cJSON_Number) { - errorPrint("%s() LN%d, failed to read json, column len not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, column len not found\n"); goto PARSE_OVER; } else { columnCase.dataLen = 0; @@ -3746,16 +3754,16 @@ static bool getColumnAndTagTypeFromInsertJsonFile( } if (index > TSDB_MAX_TAGS) { - errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n", - __func__, __LINE__, TSDB_MAX_TAGS); + errorPrint("failed to read json, tags size overflow, allowed max tag count is %d\n", + TSDB_MAX_TAGS); goto PARSE_OVER; } superTbls->tagCount = index; if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) { - errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n", - __func__, __LINE__, TSDB_MAX_COLUMNS); + errorPrint("columns + tags is more than allowed max columns count: %d\n", + TSDB_MAX_COLUMNS); goto PARSE_OVER; } ret = true; @@ -3778,7 +3786,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!host) { tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); } else { - printf("ERROR: failed to read json, host not found\n"); + errorPrint("%s", "failed to read json, host not found\n"); goto PARSE_OVER; } @@ -3816,7 +3824,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!threads) { g_Dbs.threadCount = 1; } else { - printf("ERROR: failed to read json, threads not found\n"); + errorPrint("%s", "failed to read json, threads not found\n"); goto PARSE_OVER; } @@ -3826,32 +3834,28 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!threads2) { g_Dbs.threadCountByCreateTbl = 1; } else { - errorPrint("%s() LN%d, failed to read json, threads2 not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, threads2 not found\n"); goto PARSE_OVER; } cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval"); if (gInsertInterval && gInsertInterval->type == cJSON_Number) { if (gInsertInterval->valueint <0) { - errorPrint("%s() LN%d, failed to read json, insert interval input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, insert interval input mistake\n"); goto PARSE_OVER; } g_args.insert_interval = gInsertInterval->valueint; } else if (!gInsertInterval) { g_args.insert_interval = 0; } else { - errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, insert_interval input mistake\n"); goto PARSE_OVER; } cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows"); if (interlaceRows && interlaceRows->type == cJSON_Number) { if (interlaceRows->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, interlace_rows input mistake\n"); goto PARSE_OVER; } @@ -3859,8 +3863,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!interlaceRows) { g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { - errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, interlace_rows input mistake\n"); goto PARSE_OVER; } @@ -3933,14 +3936,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* dbs = cJSON_GetObjectItem(root, "databases"); if (!dbs || dbs->type != cJSON_Array) { - printf("ERROR: failed to read json, databases not found\n"); + errorPrint("%s", "failed to read json, databases not found\n"); goto PARSE_OVER; } int dbSize = cJSON_GetArraySize(dbs); if (dbSize > MAX_DB_COUNT) { errorPrint( - "ERROR: failed to read json, databases size overflow, max database is %d\n", + "failed to read json, databases size overflow, max database is %d\n", MAX_DB_COUNT); goto PARSE_OVER; } @@ -3953,13 +3956,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { // dbinfo cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); if (!dbinfo || dbinfo->type != cJSON_Object) { - printf("ERROR: failed to read json, dbinfo not found\n"); + errorPrint("%s", "failed to read json, dbinfo not found\n"); goto PARSE_OVER; } cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name"); if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) { - printf("ERROR: failed to read json, db name not found\n"); + errorPrint("%s", "failed to read json, db name not found\n"); goto PARSE_OVER; } tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN); @@ -3974,8 +3977,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!drop) { g_Dbs.db[i].drop = g_args.drop_database; } else { - errorPrint("%s() LN%d, failed to read json, drop input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, drop input mistake\n"); goto PARSE_OVER; } @@ -3987,7 +3989,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!precision) { memset(g_Dbs.db[i].dbCfg.precision, 0, SMALL_BUFF_LEN); } else { - printf("ERROR: failed to read json, precision not found\n"); + errorPrint("%s", "failed to read json, precision not found\n"); goto PARSE_OVER; } @@ -3997,7 +3999,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!update) { g_Dbs.db[i].dbCfg.update = -1; } else { - printf("ERROR: failed to read json, update not found\n"); + errorPrint("%s", "failed to read json, update not found\n"); goto PARSE_OVER; } @@ -4007,7 +4009,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!replica) { g_Dbs.db[i].dbCfg.replica = -1; } else { - printf("ERROR: failed to read json, replica not found\n"); + errorPrint("%s", "failed to read json, replica not found\n"); goto PARSE_OVER; } @@ -4017,7 +4019,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!keep) { g_Dbs.db[i].dbCfg.keep = -1; } else { - printf("ERROR: failed to read json, keep not found\n"); + errorPrint("%s", "failed to read json, keep not found\n"); goto PARSE_OVER; } @@ -4027,7 +4029,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!days) { g_Dbs.db[i].dbCfg.days = -1; } else { - printf("ERROR: failed to read json, days not found\n"); + errorPrint("%s", "failed to read json, days not found\n"); goto PARSE_OVER; } @@ -4037,7 +4039,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!cache) { g_Dbs.db[i].dbCfg.cache = -1; } else { - printf("ERROR: failed to read json, cache not found\n"); + errorPrint("%s", "failed to read json, cache not found\n"); goto PARSE_OVER; } @@ -4047,7 +4049,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!blocks) { g_Dbs.db[i].dbCfg.blocks = -1; } else { - printf("ERROR: failed to read json, block not found\n"); + errorPrint("%s", "failed to read json, block not found\n"); goto PARSE_OVER; } @@ -4067,7 +4069,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!minRows) { g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default } else { - printf("ERROR: failed to read json, minRows not found\n"); + errorPrint("%s", "failed to read json, minRows not found\n"); goto PARSE_OVER; } @@ -4077,7 +4079,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!maxRows) { g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default } else { - printf("ERROR: failed to read json, maxRows not found\n"); + errorPrint("%s", "failed to read json, maxRows not found\n"); goto PARSE_OVER; } @@ -4087,7 +4089,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!comp) { g_Dbs.db[i].dbCfg.comp = -1; } else { - printf("ERROR: failed to read json, comp not found\n"); + errorPrint("%s", "failed to read json, comp not found\n"); goto PARSE_OVER; } @@ -4097,7 +4099,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!walLevel) { g_Dbs.db[i].dbCfg.walLevel = -1; } else { - printf("ERROR: failed to read json, walLevel not found\n"); + errorPrint("%s", "failed to read json, walLevel not found\n"); goto PARSE_OVER; } @@ -4107,7 +4109,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!cacheLast) { g_Dbs.db[i].dbCfg.cacheLast = -1; } else { - printf("ERROR: failed to read json, cacheLast not found\n"); + errorPrint("%s", "failed to read json, cacheLast not found\n"); goto PARSE_OVER; } @@ -4127,24 +4129,22 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!fsync) { g_Dbs.db[i].dbCfg.fsync = -1; } else { - errorPrint("%s() LN%d, failed to read json, fsync input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, fsync input mistake\n"); goto PARSE_OVER; } // super_talbes cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); if (!stables || stables->type != cJSON_Array) { - errorPrint("%s() LN%d, failed to read json, super_tables not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, super_tables not found\n"); goto PARSE_OVER; } int stbSize = cJSON_GetArraySize(stables); if (stbSize > MAX_SUPER_TABLE_COUNT) { errorPrint( - "%s() LN%d, failed to read json, supertable size overflow, max supertable is %d\n", - __func__, __LINE__, MAX_SUPER_TABLE_COUNT); + "failed to read json, supertable size overflow, max supertable is %d\n", + MAX_SUPER_TABLE_COUNT); goto PARSE_OVER; } @@ -4157,8 +4157,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); if (!stbName || stbName->type != cJSON_String || stbName->valuestring == NULL) { - errorPrint("%s() LN%d, failed to read json, stb name not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, stb name not found\n"); goto PARSE_OVER; } tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, @@ -4166,7 +4165,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { - printf("ERROR: failed to read json, childtable_prefix not found\n"); + errorPrint("%s", "failed to read json, childtable_prefix not found\n"); goto PARSE_OVER; } tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, @@ -4187,7 +4186,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!autoCreateTbl) { g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; } else { - printf("ERROR: failed to read json, auto_create_table not found\n"); + errorPrint("%s", "failed to read json, auto_create_table not found\n"); goto PARSE_OVER; } @@ -4197,7 +4196,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!batchCreateTbl) { g_Dbs.db[i].superTbls[j].batchCreateTableNum = 1000; } else { - printf("ERROR: failed to read json, batch_create_tbl_num not found\n"); + errorPrint("%s", "failed to read json, batch_create_tbl_num not found\n"); goto PARSE_OVER; } @@ -4217,8 +4216,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!childTblExists) { g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; } else { - errorPrint("%s() LN%d, failed to read json, child_table_exists not found\n", - __func__, __LINE__); + errorPrint("%s", + "failed to read json, child_table_exists not found\n"); goto PARSE_OVER; } @@ -4228,8 +4227,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); if (!count || count->type != cJSON_Number || 0 >= count->valueint) { - errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n", - __func__, __LINE__); + errorPrint("%s", + "failed to read json, childtable_count input mistake\n"); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; @@ -4244,8 +4243,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", min(SMALL_BUFF_LEN, strlen("rand") + 1)); } else { - errorPrint("%s() LN%d, failed to read json, data_source not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, data_source not found\n"); goto PARSE_OVER; } @@ -4259,8 +4257,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) { g_Dbs.db[i].superTbls[j].iface= STMT_IFACE; } else { - errorPrint("%s() LN%d, failed to read json, insert_mode %s not recognized\n", - __func__, __LINE__, stbIface->valuestring); + errorPrint("failed to read json, insert_mode %s not recognized\n", + stbIface->valuestring); goto PARSE_OVER; } } else if (!stbIface) { @@ -4274,7 +4272,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if ((childTbl_limit) && (g_Dbs.db[i].drop != true) && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { if (childTbl_limit->type != cJSON_Number) { - printf("ERROR: failed to read json, childtable_limit\n"); + errorPrint("%s", "failed to read json, childtable_limit\n"); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint; @@ -4287,7 +4285,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { if ((childTbl_offset->type != cJSON_Number) || (0 > childTbl_offset->valueint)) { - printf("ERROR: failed to read json, childtable_offset\n"); + errorPrint("%s", "failed to read json, childtable_offset\n"); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblOffset = childTbl_offset->valueint; @@ -4303,7 +4301,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, "now", TSDB_DB_NAME_LEN); } else { - printf("ERROR: failed to read json, start_timestamp not found\n"); + errorPrint("%s", "failed to read json, start_timestamp not found\n"); goto PARSE_OVER; } @@ -4313,7 +4311,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!timestampStep) { g_Dbs.db[i].superTbls[j].timeStampStep = g_args.timestamp_step; } else { - printf("ERROR: failed to read json, timestamp_step not found\n"); + errorPrint("%s", "failed to read json, timestamp_step not found\n"); goto PARSE_OVER; } @@ -4328,7 +4326,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", SMALL_BUFF_LEN); } else { - printf("ERROR: failed to read json, sample_format not found\n"); + errorPrint("%s", "failed to read json, sample_format not found\n"); goto PARSE_OVER; } @@ -4343,7 +4341,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN); } else { - printf("ERROR: failed to read json, sample_file not found\n"); + errorPrint("%s", "failed to read json, sample_file not found\n"); goto PARSE_OVER; } @@ -4361,7 +4359,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN); g_Dbs.db[i].superTbls[j].tagSource = 0; } else { - printf("ERROR: failed to read json, tags_file not found\n"); + errorPrint("%s", "failed to read json, tags_file not found\n"); goto PARSE_OVER; } @@ -4377,8 +4375,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!maxSqlLen) { g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; } else { - errorPrint("%s() LN%d, failed to read json, stbMaxSqlLen input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, stbMaxSqlLen input mistake\n"); goto PARSE_OVER; } /* @@ -4395,31 +4392,28 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!multiThreadWriteOneTbl) { g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; } else { - printf("ERROR: failed to read json, multiThreadWriteOneTbl not found\n"); + errorPrint("%s", "failed to read json, multiThreadWriteOneTbl not found\n"); goto PARSE_OVER; } */ cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows"); if (insertRows && insertRows->type == cJSON_Number) { if (insertRows->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, insert_rows input mistake\n"); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint; } else if (!insertRows) { g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; } else { - errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, insert_rows input mistake\n"); goto PARSE_OVER; } cJSON* stbInterlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows"); if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) { if (stbInterlaceRows->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, interlace rows input mistake\n"); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].interlaceRows = stbInterlaceRows->valueint; @@ -4437,8 +4431,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { errorPrint( - "%s() LN%d, failed to read json, interlace rows input mistake\n", - __func__, __LINE__); + "%s", "failed to read json, interlace rows input mistake\n"); goto PARSE_OVER; } @@ -4454,7 +4447,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!disorderRatio) { g_Dbs.db[i].superTbls[j].disorderRatio = 0; } else { - printf("ERROR: failed to read json, disorderRatio not found\n"); + errorPrint("%s", "failed to read json, disorderRatio not found\n"); goto PARSE_OVER; } @@ -4464,7 +4457,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!disorderRange) { g_Dbs.db[i].superTbls[j].disorderRange = 1000; } else { - printf("ERROR: failed to read json, disorderRange not found\n"); + errorPrint("%s", "failed to read json, disorderRange not found\n"); goto PARSE_OVER; } @@ -4472,8 +4465,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (insertInterval && insertInterval->type == cJSON_Number) { g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint; if (insertInterval->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, insert_interval input mistake\n"); goto PARSE_OVER; } } else if (!insertInterval) { @@ -4481,8 +4473,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { __func__, __LINE__, g_args.insert_interval); g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; } else { - errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, insert_interval input mistake\n"); goto PARSE_OVER; } @@ -4514,7 +4505,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (!host) { tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE); } else { - printf("ERROR: failed to read json, host not found\n"); + errorPrint("%s", "failed to read json, host not found\n"); goto PARSE_OVER; } @@ -4552,23 +4543,21 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (!answerPrompt) { g_args.answer_yes = false; } else { - printf("ERROR: failed to read json, confirm_parameter_prompt not found\n"); + errorPrint("%s", "failed to read json, confirm_parameter_prompt not found\n"); goto PARSE_OVER; } cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times"); if (gQueryTimes && gQueryTimes->type == cJSON_Number) { if (gQueryTimes->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + errorPrint("%s()", "failed to read json, query_times input mistake\n"); goto PARSE_OVER; } g_args.query_times = gQueryTimes->valueint; } else if (!gQueryTimes) { g_args.query_times = 1; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, query_times input mistake\n"); goto PARSE_OVER; } @@ -4576,7 +4565,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN); } else if (!dbs) { - printf("ERROR: failed to read json, databases not found\n"); + errorPrint("%s", "failed to read json, databases not found\n"); goto PARSE_OVER; } @@ -4590,7 +4579,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { tstrncpy(g_queryInfo.queryMode, "taosc", min(SMALL_BUFF_LEN, strlen("taosc") + 1)); } else { - printf("ERROR: failed to read json, query_mode not found\n"); + errorPrint("%s", "failed to read json, query_mode not found\n"); goto PARSE_OVER; } @@ -4600,7 +4589,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.specifiedQueryInfo.concurrent = 1; g_queryInfo.specifiedQueryInfo.sqlCount = 0; } else if (specifiedQuery->type != cJSON_Object) { - printf("ERROR: failed to read json, super_table_query not found\n"); + errorPrint("%s", "failed to read json, super_table_query not found\n"); goto PARSE_OVER; } else { cJSON* queryInterval = cJSON_GetObjectItem(specifiedQuery, "query_interval"); @@ -4615,8 +4604,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { if (specifiedQueryTimes->valueint <= 0) { errorPrint( - "%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", - __func__, __LINE__, specifiedQueryTimes->valueint); + "failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + specifiedQueryTimes->valueint); goto PARSE_OVER; } @@ -4633,8 +4622,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (concurrent && concurrent->type == cJSON_Number) { if (concurrent->valueint <= 0) { errorPrint( - "%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", - __func__, __LINE__, + "query sqlCount %d or concurrent %d is not correct.\n", g_queryInfo.specifiedQueryInfo.sqlCount, g_queryInfo.specifiedQueryInfo.concurrent); goto PARSE_OVER; @@ -4652,8 +4640,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) { g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE; } else { - errorPrint("%s() LN%d, failed to read json, async mode input error\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, async mode input error\n"); goto PARSE_OVER; } } else { @@ -4676,7 +4663,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (0 == strcmp("no", restart->valuestring)) { g_queryInfo.specifiedQueryInfo.subscribeRestart = false; } else { - printf("ERROR: failed to read json, subscribe restart error\n"); + errorPrint("%s", "failed to read json, subscribe restart error\n"); goto PARSE_OVER; } } else { @@ -4692,7 +4679,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (0 == strcmp("no", keepProgress->valuestring)) { g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; } else { - printf("ERROR: failed to read json, subscribe keepProgress error\n"); + errorPrint("%s", "failed to read json, subscribe keepProgress error\n"); goto PARSE_OVER; } } else { @@ -4704,15 +4691,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (!specifiedSqls) { g_queryInfo.specifiedQueryInfo.sqlCount = 0; } else if (specifiedSqls->type != cJSON_Array) { - errorPrint("%s() LN%d, failed to read json, super sqls not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, super sqls not found\n"); goto PARSE_OVER; } else { int superSqlSize = cJSON_GetArraySize(specifiedSqls); if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent > MAX_QUERY_SQL_COUNT) { - errorPrint("%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n", - __func__, __LINE__, + errorPrint("failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n", superSqlSize, g_queryInfo.specifiedQueryInfo.concurrent, MAX_QUERY_SQL_COUNT); @@ -4726,7 +4711,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - printf("ERROR: failed to read json, sql not found\n"); + errorPrint("%s", "failed to read json, sql not found\n"); goto PARSE_OVER; } tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], @@ -4766,7 +4751,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { memset(g_queryInfo.specifiedQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); } else { - printf("ERROR: failed to read json, super query result file not found\n"); + errorPrint("%s", + "failed to read json, super query result file not found\n"); goto PARSE_OVER; } } @@ -4779,7 +4765,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.superQueryInfo.threadCnt = 1; g_queryInfo.superQueryInfo.sqlCount = 0; } else if (superQuery->type != cJSON_Object) { - printf("ERROR: failed to read json, sub_table_query not found\n"); + errorPrint("%s", "failed to read json, sub_table_query not found\n"); ret = true; goto PARSE_OVER; } else { @@ -4793,24 +4779,22 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); if (superQueryTimes && superQueryTimes->type == cJSON_Number) { if (superQueryTimes->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", - __func__, __LINE__, superQueryTimes->valueint); + errorPrint("failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + superQueryTimes->valueint); goto PARSE_OVER; } g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; } else if (!superQueryTimes) { g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, query_times input mistake\n"); goto PARSE_OVER; } cJSON* threads = cJSON_GetObjectItem(superQuery, "threads"); if (threads && threads->type == cJSON_Number) { if (threads->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, threads input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, threads input mistake\n"); goto PARSE_OVER; } @@ -4832,8 +4816,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, TSDB_TABLE_NAME_LEN); } else { - errorPrint("%s() LN%d, failed to read json, super table name input error\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, super table name input error\n"); goto PARSE_OVER; } @@ -4845,8 +4828,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (0 == strcmp("async", superAsyncMode->valuestring)) { g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE; } else { - errorPrint("%s() LN%d, failed to read json, async mode input error\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, async mode input error\n"); goto PARSE_OVER; } } else { @@ -4856,8 +4838,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval"); if (superInterval && superInterval->type == cJSON_Number) { if (superInterval->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, interval input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, interval input mistake\n"); goto PARSE_OVER; } g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint; @@ -4875,7 +4856,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (0 == strcmp("no", subrestart->valuestring)) { g_queryInfo.superQueryInfo.subscribeRestart = false; } else { - printf("ERROR: failed to read json, subscribe restart error\n"); + errorPrint("%s", "failed to read json, subscribe restart error\n"); goto PARSE_OVER; } } else { @@ -4891,7 +4872,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (0 == strcmp("no", superkeepProgress->valuestring)) { g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; } else { - printf("ERROR: failed to read json, subscribe super table keepProgress error\n"); + errorPrint("%s", + "failed to read json, subscribe super table keepProgress error\n"); goto PARSE_OVER; } } else { @@ -4928,14 +4910,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (!superSqls) { g_queryInfo.superQueryInfo.sqlCount = 0; } else if (superSqls->type != cJSON_Array) { - errorPrint("%s() LN%d: failed to read json, super sqls not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, super sqls not found\n"); goto PARSE_OVER; } else { int superSqlSize = cJSON_GetArraySize(superSqls); if (superSqlSize > MAX_QUERY_SQL_COUNT) { - errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", - __func__, __LINE__, MAX_QUERY_SQL_COUNT); + errorPrint("failed to read json, query sql size overflow, max is %d\n", + MAX_QUERY_SQL_COUNT); goto PARSE_OVER; } @@ -4947,8 +4928,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - errorPrint("%s() LN%d, failed to read json, sql not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, sql not found\n"); goto PARSE_OVER; } tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, @@ -4962,8 +4942,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (NULL == result) { memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); } else { - errorPrint("%s() LN%d, failed to read json, sub query result file not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, sub query result file not found\n"); goto PARSE_OVER; } } @@ -4981,7 +4960,7 @@ static bool getInfoFromJsonFile(char* file) { FILE *fp = fopen(file, "r"); if (!fp) { - printf("failed to read %s, reason:%s\n", file, strerror(errno)); + errorPrint("failed to read %s, reason:%s\n", file, strerror(errno)); return false; } @@ -4992,14 +4971,14 @@ static bool getInfoFromJsonFile(char* file) { if (len <= 0) { free(content); fclose(fp); - printf("failed to read %s, content is null", file); + errorPrint("failed to read %s, content is null", file); return false; } content[len] = 0; cJSON* root = cJSON_Parse(content); if (root == NULL) { - printf("ERROR: failed to cjson parse %s, invalid json format\n", file); + errorPrint("failed to cjson parse %s, invalid json format\n", file); goto PARSE_OVER; } @@ -5012,13 +4991,13 @@ static bool getInfoFromJsonFile(char* file) { } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { g_args.test_mode = SUBSCRIBE_TEST; } else { - printf("ERROR: failed to read json, filetype not support\n"); + errorPrint("%s", "failed to read json, filetype not support\n"); goto PARSE_OVER; } } else if (!filetype) { g_args.test_mode = INSERT_TEST; } else { - printf("ERROR: failed to read json, filetype not found\n"); + errorPrint("%s", "failed to read json, filetype not found\n"); goto PARSE_OVER; } @@ -5028,8 +5007,8 @@ static bool getInfoFromJsonFile(char* file) { || (SUBSCRIBE_TEST == g_args.test_mode)) { ret = getMetaFromQueryJsonFile(root); } else { - errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n", - __func__, __LINE__); + errorPrint("%s", + "input json file type error! please input correct file type: insert or query or subscribe\n"); goto PARSE_OVER; } @@ -5147,7 +5126,7 @@ static int64_t generateStbRowData( || (0 == strncasecmp(stbInfo->columns[i].dataType, "NCHAR", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint( "binary or nchar length overflow, max size:%u\n", + errorPrint2("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } @@ -5159,7 +5138,7 @@ static int64_t generateStbRowData( } char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); if (NULL == buf) { - errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); + errorPrint2("calloc failed! size:%d\n", stbInfo->columns[i].dataLen); return -1; } rand_string(buf, stbInfo->columns[i].dataLen); @@ -5222,7 +5201,7 @@ static int64_t generateStbRowData( tmpLen = strlen(tmp); tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BIGINT_BUFF_LEN)); } else { - errorPrint( "Not support data type: %s\n", + errorPrint2("Not support data type: %s\n", stbInfo->columns[i].dataType); return -1; } @@ -5274,7 +5253,7 @@ static int64_t generateData(char *recBuf, char **data_type, } else if (strcasecmp(data_type[i % columnCount], "BINARY") == 0) { char *s = malloc(lenOfBinary + 1); if (s == NULL) { - errorPrint("%s() LN%d, memory allocation %d bytes failed\n", + errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", __func__, __LINE__, lenOfBinary + 1); exit(EXIT_FAILURE); } @@ -5284,7 +5263,7 @@ static int64_t generateData(char *recBuf, char **data_type, } else if (strcasecmp(data_type[i % columnCount], "NCHAR") == 0) { char *s = malloc(lenOfBinary + 1); if (s == NULL) { - errorPrint("%s() LN%d, memory allocation %d bytes failed\n", + errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", __func__, __LINE__, lenOfBinary + 1); exit(EXIT_FAILURE); } @@ -5311,7 +5290,7 @@ static int prepareSampleDataForSTable(SSuperTable *stbInfo) { sampleDataBuf = calloc( stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { - errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", + errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", __func__, __LINE__, stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); @@ -5322,7 +5301,7 @@ static int prepareSampleDataForSTable(SSuperTable *stbInfo) { int ret = readSampleFromCsvFileToMem(stbInfo); if (0 != ret) { - errorPrint("%s() LN%d, read sample from csv file failed.\n", + errorPrint2("%s() LN%d, read sample from csv file failed.\n", __func__, __LINE__); tmfree(sampleDataBuf); stbInfo->sampleDataBuf = NULL; @@ -5377,7 +5356,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) debugPrint("%s() LN%d, stmt=%p", __func__, __LINE__, pThreadInfo->stmt); if (0 != taos_stmt_execute(pThreadInfo->stmt)) { - errorPrint("%s() LN%d, failied to execute insert statement. reason: %s\n", + errorPrint2("%s() LN%d, failied to execute insert statement. reason: %s\n", __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt)); fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n"); @@ -5387,7 +5366,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) break; default: - errorPrint("%s() LN%d: unknown insert mode: %d\n", + errorPrint2("%s() LN%d: unknown insert mode: %d\n", __func__, __LINE__, stbInfo->iface); affectedRows = 0; } @@ -5615,7 +5594,7 @@ static int generateStbSQLHead( tableSeq % stbInfo->tagSampleCount); } if (NULL == tagsValBuf) { - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); return -1; } @@ -5766,7 +5745,7 @@ static int32_t prepareStmtBindArrayByType( if (0 == strncasecmp(dataType, "BINARY", strlen("BINARY"))) { if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint( "binary length overflow, max size:%u\n", + errorPrint2("binary length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } @@ -5789,7 +5768,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "NCHAR", strlen("NCHAR"))) { if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint( "nchar length overflow, max size:%u\n", + errorPrint2("nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } @@ -5937,7 +5916,7 @@ static int32_t prepareStmtBindArrayByType( value, &tmpEpoch, strlen(value), timePrec, 0)) { free(bind_ts2); - errorPrint("Input %s, time format error!\n", value); + errorPrint2("Input %s, time format error!\n", value); return -1; } *bind_ts2 = tmpEpoch; @@ -5953,7 +5932,7 @@ static int32_t prepareStmtBindArrayByType( bind->length = &bind->buffer_length; bind->is_null = NULL; } else { - errorPrint( "No support data type: %s\n", dataType); + errorPrint2("Not support data type: %s\n", dataType); return -1; } @@ -5970,7 +5949,7 @@ static int32_t prepareStmtBindArrayByTypeForRand( if (0 == strncasecmp(dataType, "BINARY", strlen("BINARY"))) { if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint( "binary length overflow, max size:%u\n", + errorPrint2("binary length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } @@ -5993,7 +5972,7 @@ static int32_t prepareStmtBindArrayByTypeForRand( } else if (0 == strncasecmp(dataType, "NCHAR", strlen("NCHAR"))) { if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint( "nchar length overflow, max size:%u\n", + errorPrint2("nchar length overflow, max size: %u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } @@ -6145,7 +6124,7 @@ static int32_t prepareStmtBindArrayByTypeForRand( if (TSDB_CODE_SUCCESS != taosParseTime( value, &tmpEpoch, strlen(value), timePrec, 0)) { - errorPrint("Input %s, time format error!\n", value); + errorPrint2("Input %s, time format error!\n", value); return -1; } *bind_ts2 = tmpEpoch; @@ -6163,7 +6142,7 @@ static int32_t prepareStmtBindArrayByTypeForRand( *ptr += bind->buffer_length; } else { - errorPrint( "No support data type: %s\n", dataType); + errorPrint2("No support data type: %s\n", dataType); return -1; } @@ -6181,7 +6160,7 @@ static int32_t prepareStmtWithoutStb( TAOS_STMT *stmt = pThreadInfo->stmt; int ret = taos_stmt_set_tbname(stmt, tableName); if (ret != 0) { - errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", + errorPrint2("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", tableName, ret, taos_stmt_errstr(stmt)); return ret; } @@ -6190,7 +6169,7 @@ static int32_t prepareStmtWithoutStb( char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.num_of_CPR + 1)); if (bindArray == NULL) { - errorPrint("Failed to allocate %d bind params\n", + errorPrint2("Failed to allocate %d bind params\n", (g_args.num_of_CPR + 1)); return -1; } @@ -6231,13 +6210,13 @@ static int32_t prepareStmtWithoutStb( } } if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) { - errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); break; } // if msg > 3MB, break if (0 != taos_stmt_add_batch(stmt)) { - errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); break; } @@ -6260,7 +6239,7 @@ static int32_t prepareStbStmtBindTag( { char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { - errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", __func__, __LINE__, DOUBLE_BUFF_LEN); return -1; } @@ -6292,7 +6271,7 @@ static int32_t prepareStbStmtBindRand( { char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { - errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", __func__, __LINE__, DOUBLE_BUFF_LEN); return -1; } @@ -6395,7 +6374,7 @@ static int32_t prepareStbStmtRand( } if (NULL == tagsValBuf) { - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); return -1; } @@ -6403,7 +6382,7 @@ static int32_t prepareStbStmtRand( char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); if (NULL == tagsArray) { tmfree(tagsValBuf); - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); return -1; } @@ -6422,14 +6401,14 @@ static int32_t prepareStbStmtRand( tmfree(tagsArray); if (0 != ret) { - errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); return -1; } } else { ret = taos_stmt_set_tbname(stmt, tableName); if (0 != ret) { - errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); return -1; } @@ -6437,7 +6416,7 @@ static int32_t prepareStbStmtRand( char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); if (bindArray == NULL) { - errorPrint("%s() LN%d, Failed to allocate %d bind params\n", + errorPrint2("%s() LN%d, Failed to allocate %d bind params\n", __func__, __LINE__, (stbInfo->columnCount + 1)); return -1; } @@ -6456,7 +6435,7 @@ static int32_t prepareStbStmtRand( } ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); if (0 != ret) { - errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); free(bindArray); return -1; @@ -6464,7 +6443,7 @@ static int32_t prepareStbStmtRand( // if msg > 3MB, break ret = taos_stmt_add_batch(stmt); if (0 != ret) { - errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); free(bindArray); return -1; @@ -6508,7 +6487,7 @@ static int32_t prepareStbStmtWithSample( } if (NULL == tagsValBuf) { - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); return -1; } @@ -6516,7 +6495,7 @@ static int32_t prepareStbStmtWithSample( char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); if (NULL == tagsArray) { tmfree(tagsValBuf); - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); return -1; } @@ -6535,14 +6514,14 @@ static int32_t prepareStbStmtWithSample( tmfree(tagsArray); if (0 != ret) { - errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); return -1; } } else { ret = taos_stmt_set_tbname(stmt, tableName); if (0 != ret) { - errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); return -1; } @@ -6564,14 +6543,14 @@ static int32_t prepareStbStmtWithSample( } ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); if (0 != ret) { - errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); return -1; } // if msg > 3MB, break ret = taos_stmt_add_batch(stmt); if (0 != ret) { - errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); return -1; } @@ -6732,7 +6711,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->buffer = calloc(maxSqlLen, 1); if (NULL == pThreadInfo->buffer) { - errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", + errorPrint2( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", __func__, __LINE__, maxSqlLen, strerror(errno)); return NULL; } @@ -6780,7 +6759,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { getTableName(tableName, pThreadInfo, tableSeq); if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", + errorPrint2("[%d] %s() LN%d, getTableName return null\n", pThreadInfo->threadID, __func__, __LINE__); free(pThreadInfo->buffer); return NULL; @@ -6847,7 +6826,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { debugPrint("[%d] %s() LN%d, generated records is %d\n", pThreadInfo->threadID, __func__, __LINE__, generated); if (generated < 0) { - errorPrint("[%d] %s() LN%d, generated records is %d\n", + errorPrint2("[%d] %s() LN%d, generated records is %d\n", pThreadInfo->threadID, __func__, __LINE__, generated); goto free_of_interlace; } else if (generated == 0) { @@ -6901,7 +6880,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { startTs = taosGetTimestampUs(); if (recOfBatch == 0) { - errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n", + errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n", pThreadInfo->threadID, __func__, __LINE__, batchPerTbl); if (batchPerTbl > 0) { @@ -6928,7 +6907,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->totalDelay += delay; if (recOfBatch != affectedRows) { - errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n", + errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n", pThreadInfo->threadID, __func__, __LINE__, recOfBatch, affectedRows, pThreadInfo->buffer); goto free_of_interlace; @@ -6986,7 +6965,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->buffer = calloc(maxSqlLen, 1); if (NULL == pThreadInfo->buffer) { - errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n", + errorPrint2("Failed to alloc %"PRIu64" bytes, reason:%s\n", maxSqlLen, strerror(errno)); return NULL; @@ -7027,7 +7006,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { __func__, __LINE__, pThreadInfo->threadID, tableSeq, tableName); if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", + errorPrint2("[%d] %s() LN%d, getTableName return null\n", pThreadInfo->threadID, __func__, __LINE__); free(pThreadInfo->buffer); return NULL; @@ -7116,7 +7095,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->totalDelay += delay; if (affectedRows < 0) { - errorPrint("%s() LN%d, affected rows: %d\n", + errorPrint2("%s() LN%d, affected rows: %d\n", __func__, __LINE__, affectedRows); goto free_of_progressive; } @@ -7278,7 +7257,7 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in * uint16_t rest_port = port + TSDB_PORT_HTTP; struct hostent *server = gethostbyname(host); if ((server == NULL) || (server->h_addr == NULL)) { - errorPrint("%s", "ERROR, no such host"); + errorPrint2("%s", "no such host"); return -1; } @@ -7303,7 +7282,7 @@ static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) { stbInfo->sampleBindArray = calloc(1, sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); if (stbInfo->sampleBindArray == NULL) { - errorPrint("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", + errorPrint2("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", __func__, __LINE__, (uint64_t)sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); return -1; } @@ -7312,7 +7291,7 @@ static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) for (int i=0; i < MAX_SAMPLES_ONCE_FROM_FILE; i++) { char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); if (bindArray == NULL) { - errorPrint("%s() LN%d, Failed to allocate %d bind params\n", + errorPrint2("%s() LN%d, Failed to allocate %d bind params\n", __func__, __LINE__, (stbInfo->columnCount + 1)); return -1; } @@ -7344,7 +7323,7 @@ static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) char *bindBuffer = calloc(1, index + 1); if (bindBuffer == NULL) { - errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", __func__, __LINE__, DOUBLE_BUFF_LEN); return -1; } @@ -7382,7 +7361,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } else if (0 == strncasecmp(precision, "ns", 2)) { timePrec = TSDB_TIME_PRECISION_NANO; } else { - errorPrint("Not support precision: %s\n", precision); + errorPrint2("Not support precision: %s\n", precision); exit(EXIT_FAILURE); } } @@ -7412,7 +7391,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, if ((stbInfo) && (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample")))) { if (0 != prepareSampleDataForSTable(stbInfo)) { - errorPrint("%s() LN%d, prepare sample data for stable failed!\n", + errorPrint2("%s() LN%d, prepare sample data for stable failed!\n", __func__, __LINE__); exit(EXIT_FAILURE); } @@ -7422,7 +7401,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); if (NULL == taos0) { - errorPrint("%s() LN%d, connect to server fail , reason: %s\n", + errorPrint2("%s() LN%d, connect to server fail , reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); exit(EXIT_FAILURE); } @@ -7477,7 +7456,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, limit * TSDB_TABLE_NAME_LEN); if (stbInfo->childTblName == NULL) { taos_close(taos0); - errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); exit(EXIT_FAILURE); } @@ -7583,7 +7562,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, g_Dbs.password, db_name, g_Dbs.port); if (NULL == pThreadInfo->taos) { free(infos); - errorPrint( + errorPrint2( "%s() LN%d, connect to server fail from insert sub thread, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); @@ -7599,7 +7578,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (NULL == pThreadInfo->stmt) { free(pids); free(infos); - errorPrint( + errorPrint2( "%s() LN%d, failed init stmt, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); @@ -7611,7 +7590,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, free(pids); free(infos); free(stmtBuffer); - errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", + errorPrint2("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", ret, taos_stmt_errstr(pThreadInfo->stmt)); exit(EXIT_FAILURE); } @@ -7755,7 +7734,7 @@ static void *readTable(void *sarg) { char *tb_prefix = pThreadInfo->tb_prefix; FILE *fp = fopen(pThreadInfo->filePath, "a"); if (NULL == fp) { - errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); + errorPrint2("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); free(command); return NULL; } @@ -7791,7 +7770,7 @@ static void *readTable(void *sarg) { int32_t code = taos_errno(pSql); if (code != 0) { - errorPrint( "Failed to query:%s\n", taos_errstr(pSql)); + errorPrint2("Failed to query:%s\n", taos_errstr(pSql)); taos_free_result(pSql); taos_close(taos); fclose(fp); @@ -7873,7 +7852,7 @@ static void *readMetric(void *sarg) { int32_t code = taos_errno(pSql); if (code != 0) { - errorPrint( "Failed to query:%s\n", taos_errstr(pSql)); + errorPrint2("Failed to query:%s\n", taos_errstr(pSql)); taos_free_result(pSql); taos_close(taos); fclose(fp); @@ -7920,7 +7899,7 @@ static int insertTestProcess() { debugPrint("%d result file: %s\n", __LINE__, g_Dbs.resultFile); g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a"); if (NULL == g_fpOfInsertResult) { - errorPrint( "Failed to open %s for save result\n", g_Dbs.resultFile); + errorPrint("Failed to open %s for save result\n", g_Dbs.resultFile); return -1; } @@ -8022,7 +8001,7 @@ static void *specifiedTableQuery(void *sarg) { NULL, g_queryInfo.port); if (taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } else { @@ -8034,7 +8013,7 @@ static void *specifiedTableQuery(void *sarg) { sprintf(sqlStr, "use %s", g_queryInfo.dbName); if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { taos_close(pThreadInfo->taos); - errorPrint( "use database %s failed!\n\n", + errorPrint("use database %s failed!\n\n", g_queryInfo.dbName); return NULL; } @@ -8200,7 +8179,7 @@ static int queryTestProcess() { NULL, g_queryInfo.port); if (taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", + errorPrint("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); exit(EXIT_FAILURE); } @@ -8258,7 +8237,7 @@ static int queryTestProcess() { taos_close(taos); free(infos); free(pids); - errorPrint( "use database %s failed!\n\n", + errorPrint2("use database %s failed!\n\n", g_queryInfo.dbName); return -1; } @@ -8356,7 +8335,7 @@ static int queryTestProcess() { static void stable_sub_callback( TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { if (res == NULL || taos_errno(res) != 0) { - errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", + errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", __func__, __LINE__, code, taos_errstr(res)); return; } @@ -8369,7 +8348,7 @@ static void stable_sub_callback( static void specified_sub_callback( TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { if (res == NULL || taos_errno(res) != 0) { - errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", + errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", __func__, __LINE__, code, taos_errstr(res)); return; } @@ -8408,7 +8387,7 @@ static TAOS_SUB* subscribeImpl( } if (tsub == NULL) { - errorPrint("failed to create subscription. topic:%s, sql:%s\n", topic, sql); + errorPrint2("failed to create subscription. topic:%s, sql:%s\n", topic, sql); return NULL; } @@ -8439,7 +8418,7 @@ static void *superSubscribe(void *sarg) { g_queryInfo.dbName, g_queryInfo.port); if (pThreadInfo->taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); free(subSqlStr); return NULL; @@ -8450,7 +8429,7 @@ static void *superSubscribe(void *sarg) { sprintf(sqlStr, "USE %s", g_queryInfo.dbName); if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { taos_close(pThreadInfo->taos); - errorPrint( "use database %s failed!\n\n", + errorPrint2("use database %s failed!\n\n", g_queryInfo.dbName); free(subSqlStr); return NULL; @@ -8586,7 +8565,7 @@ static void *specifiedSubscribe(void *sarg) { g_queryInfo.dbName, g_queryInfo.port); if (pThreadInfo->taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } @@ -8693,7 +8672,7 @@ static int subscribeTestProcess() { g_queryInfo.dbName, g_queryInfo.port); if (taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", + errorPrint2("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); exit(EXIT_FAILURE); } @@ -8721,7 +8700,7 @@ static int subscribeTestProcess() { g_queryInfo.specifiedQueryInfo.sqlCount); } else { if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint("%s() LN%d, sepcified query sqlCount %d.\n", + errorPrint2("%s() LN%d, sepcified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); exit(EXIT_FAILURE); @@ -8738,7 +8717,7 @@ static int subscribeTestProcess() { g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { - errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); + errorPrint2("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); exit(EXIT_FAILURE); } @@ -8773,7 +8752,7 @@ static int subscribeTestProcess() { g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); if ((NULL == pidsOfStable) || (NULL == infosOfStable)) { - errorPrint("%s() LN%d, malloc failed for create threads\n", + errorPrint2("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); // taos_close(taos); exit(EXIT_FAILURE); @@ -9039,7 +9018,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile) memcpy(cmd + cmd_len, line, read_len); if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) { - errorPrint("%s() LN%d, queryDbExec %s failed!\n", + errorPrint2("%s() LN%d, queryDbExec %s failed!\n", __func__, __LINE__, cmd); tmfree(cmd); tmfree(line); @@ -9113,7 +9092,7 @@ static void queryResult() { g_Dbs.port); if (pThreadInfo->taos == NULL) { free(pThreadInfo); - errorPrint( "Failed to connect to TDengine, reason:%s\n", + errorPrint2("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); exit(EXIT_FAILURE); } @@ -9135,7 +9114,7 @@ static void testCmdLine() { if (strlen(configDir)) { wordexp_t full_path; if (wordexp(configDir, &full_path, 0) != 0) { - errorPrint( "Invalid path %s\n", configDir); + errorPrint("Invalid path %s\n", configDir); return; } taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); From 3b384d8203926216378298a31803b33dc2a640b7 Mon Sep 17 00:00:00 2001 From: cpvmrd Date: Tue, 24 Aug 2021 14:21:05 +0800 Subject: [PATCH 229/239] Update docs.md Change "IOT" to "IoT". --- documentation20/cn/01.evaluation/docs.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/documentation20/cn/01.evaluation/docs.md b/documentation20/cn/01.evaluation/docs.md index 2cc6033ccc..f5af3a4b8d 100644 --- a/documentation20/cn/01.evaluation/docs.md +++ b/documentation20/cn/01.evaluation/docs.md @@ -21,7 +21,7 @@ TDengine 的模块之一是时序数据库。但除此之外,为减少研发 ## TDengine 总体适用场景 -作为一个 IOT 大数据平台,TDengine 的典型适用场景是在 IOT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRM,ERP 等,不在本文讨论范围内。 +作为一个 IoT 大数据平台,TDengine 的典型适用场景是在 IoT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRM,ERP 等,不在本文讨论范围内。 ### 数据源特点和需求 @@ -54,7 +54,7 @@ TDengine 的模块之一是时序数据库。但除此之外,为减少研发 |系统性能需求|不适用|可能适用|非常适用|简单说明| |---|---|---|---|---| |要求较大的总体处理能力| | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。| -|要求高速处理数据 | | | √ | TDengine 的专门为 IOT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。| +|要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。| |要求快速处理小粒度数据| | | √ |这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。| ### 系统维护需求 From ac4d59a051413b59556e54b02ffbc8d178a69e3d Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 24 Aug 2021 14:27:56 +0800 Subject: [PATCH 230/239] [TD-6296]:When the timestamp column is filtered using the or field, the error content is abnormal --- src/query/inc/queryLog.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/query/inc/queryLog.h b/src/query/inc/queryLog.h index 5c48c43c45..87a221943a 100644 --- a/src/query/inc/queryLog.h +++ b/src/query/inc/queryLog.h @@ -24,10 +24,10 @@ extern "C" { extern uint32_t qDebugFlag; -#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", 255, __VA_ARGS__); }} while(0) -#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", 255, __VA_ARGS__); }} while(0) -#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", 255, __VA_ARGS__); }} while(0) -#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", 255, __VA_ARGS__); }} while(0) +#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", qDebugFlag, __VA_ARGS__); }} while(0) +#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", qDebugFlag, __VA_ARGS__); }} while(0) +#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", qDebugFlag, __VA_ARGS__); }} while(0) +#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0) #define qDebug(...) do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0) #define qTrace(...) do { if (qDebugFlag & DEBUG_TRACE) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0) #define qDump(a, l) do { if (qDebugFlag & DEBUG_DUMP) { taosDumpData((unsigned char *)a, l); }} while(0) From d17c106e0f7a054e1a87ce6968d52bdb3fc7a410 Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Tue, 24 Aug 2021 14:29:35 +0800 Subject: [PATCH 231/239] Update docs.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change “需要考虑库的设计,超级表和普通表的设计” to “需要考虑库、超级表和普通表的设计” --- documentation20/cn/04.model/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/04.model/docs.md b/documentation20/cn/04.model/docs.md index ed1d2f7168..45a4537d9b 100644 --- a/documentation20/cn/04.model/docs.md +++ b/documentation20/cn/04.model/docs.md @@ -2,7 +2,7 @@ # TDengine数据建模 -TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库的设计,超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。 +TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库、超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。 关于数据建模请参考[视频教程](https://www.taosdata.com/blog/2020/11/11/1945.html)。 From eb4466adaa9797908992cbf53cec3643f5c73d2d Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Tue, 24 Aug 2021 15:13:40 +0800 Subject: [PATCH 232/239] fix: fix coredump for last_row query when last row is cached --- src/tsdb/src/tsdbRead.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 9cc9b7224c..d72d38bf2b 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -1572,7 +1572,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfColsOfRow1 = 0; if (pSchema1 == NULL) { - pSchema1 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row1)); + pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); } if(isRow1DataRow) { numOfColsOfRow1 = schemaNCols(pSchema1); @@ -1584,7 +1584,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, if(row2) { isRow2DataRow = isDataRow(row2); if (pSchema2 == NULL) { - pSchema2 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row2)); + pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); } if(isRow2DataRow) { numOfColsOfRow2 = schemaNCols(pSchema2); From 6e53e0a6badfc8870f3c469ff7ac3c73a19f68f4 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Tue, 24 Aug 2021 18:43:02 +0800 Subject: [PATCH 233/239] [TD-6313]: improve error handling if loading taos failed in python (#7550) --- src/connector/python/taos/cinterface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index 51e9a8667d..42dac3c2e8 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -49,7 +49,7 @@ def _load_taos(): try: return load_func[platform.system()]() except: - sys.exit("unsupported platform to TDengine connector") + raise InterfaceError('unsupported platform or failed to load taos client library') _libtaos = _load_taos() From 1351b5703629cc5233edf3e2c520887e31f5471a Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 24 Aug 2021 20:27:49 +0800 Subject: [PATCH 234/239] [TD-6317] forbidden distinct with order by --- src/client/src/tscSQLParser.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 48c1a39a47..e157f26c9d 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5787,11 +5787,6 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC; } - - if (pQueryInfo->distinct) { - pQueryInfo->order.order = TSDB_ORDER_ASC; - pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; - } } int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) { @@ -5805,14 +5800,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq const char* msg7 = "only primary timestamp/column in groupby clause allowed as order column"; const char* msg8 = "only column in groupby clause allowed as order column"; const char* msg9 = "orderby column must projected in subquery"; + const char* msg10 = "not support distinct mixed with order by"; setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - - if (pQueryInfo->distinct || pSqlNode->pSortOrder == NULL) { - return TSDB_CODE_SUCCESS; - } - + if (pSqlNode->pSortOrder == NULL) { + return TSDB_CODE_SUCCESS; + } char* pMsgBuf = tscGetErrorMsgPayload(pCmd); SArray* pSortOrder = pSqlNode->pSortOrder; @@ -5832,6 +5826,9 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq return invalidOperationMsg(pMsgBuf, msg2); } } + if (size > 0 && pQueryInfo->distinct) { + return invalidOperationMsg(pMsgBuf, msg10); + } // handle the first part of order by tVariant* pVar = taosArrayGet(pSortOrder, 0); From 9dc8e658796d82b9be884c82dd9af582adc12cab Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Tue, 24 Aug 2021 21:30:51 +0800 Subject: [PATCH 235/239] [TD-6337]: fix taosdemo.go compile and build guidelines [ci skip] --- documentation20/cn/08.connector/docs.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index 364961ca63..0ac5a91b50 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -966,13 +966,17 @@ Go连接器支持的系统有: **提示:建议Go版本是1.13及以上,并开启模块支持:** ```sh - go env -w GO111MODULE=on - go env -w GOPROXY=https://goproxy.io,direct +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.io,direct ``` 在taosdemo.go所在目录下进行编译和执行: ```sh - go mod init *demo* - go build ./demo -h fqdn -p serverPort +go mod init taosdemo +go get github.com/taosdata/driver-go/taosSql +# use win branch in Windows platform. +#go get github.com/taosdata/driver-go/taosSql@win +go build +./taosdemo -h fqdn -p serverPort ``` ### Go连接器的使用 From 36c2980092751f13dacf5fccb1b1c094ef62728b Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 24 Aug 2021 22:42:40 +0800 Subject: [PATCH 236/239] [TS-106] : describe the difference of timezone setting between Win & Linux. --- documentation20/cn/11.administrator/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index ff44dd1225..29e49aa902 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -375,7 +375,7 @@ taos -C 或 taos --dump-config timezone GMT-8 timezone Asia/Shanghai ``` - 均是合法的设置东八区时区的格式。 + 均是合法的设置东八区时区的格式。但需注意,Windows 下并不支持 `timezone Asia/Shanghai` 这样的写法,而必须写成 `timezone UTC-8`。 时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词now的解析)产生影响。例如: ```sql From 87d54c1765dfa2f054173f5fae1ea2818a05c676 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 25 Aug 2021 08:05:58 +0800 Subject: [PATCH 237/239] [TD-6317] forbidden distinct with order by --- src/client/src/tscSQLParser.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e157f26c9d..e068eaae72 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5787,6 +5787,11 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC; } + + if (pQueryInfo->distinct) { + pQueryInfo->order.order = TSDB_ORDER_ASC; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + } } int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) { From 3d06d17b5343413c1bea7ccb3126b4223b7d3f6b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 25 Aug 2021 08:15:05 +0800 Subject: [PATCH 238/239] Feature/sangshuduo/td 5875 taosdemo ue improve (#7560) * [TD-5875]: taosdemo show progress * empty commit for CI * better msg for create child table. --- src/kit/taosdemo/taosdemo.c | 58 +++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 5d851eafd0..e0cc76d5a8 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -443,6 +443,7 @@ typedef struct SThreadInfo_S { uint64_t start_table_from; uint64_t end_table_to; int64_t ntables; + int64_t tables_created; uint64_t data_of_rate; int64_t start_time; char* cols; @@ -639,6 +640,7 @@ SArguments g_args = { static SDbs g_Dbs; static int64_t g_totalChildTables = 0; +static int64_t g_actualChildTables = 0; static SQueryMetaInfo g_queryInfo; static FILE * g_fpOfInsertResult = NULL; @@ -964,6 +966,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->num_of_tables = atoi(argv[++i]); + g_totalChildTables = arguments->num_of_tables; } else if (strcmp(argv[i], "-n") == 0) { if ((argc == i+1) || (!isStringNumber(argv[i+1]))) { @@ -1134,7 +1137,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } } else if ((strcmp(argv[i], "--version") == 0) || - (strcmp(argv[i], "-V") == 0)){ + (strcmp(argv[i], "-V") == 0)) { printVersion(); exit(0); } else if (strcmp(argv[i], "--help") == 0) { @@ -1345,14 +1348,14 @@ static void selectAndGetResult( } } -static char *rand_bool_str(){ +static char *rand_bool_str() { static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; return g_randbool_buff + ((cursor % MAX_PREPARED_RAND) * BOOL_BUFF_LEN); } -static int32_t rand_bool(){ +static int32_t rand_bool() { static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; @@ -1485,7 +1488,7 @@ static char *demo_phase_float_str() { return g_rand_phase_buff + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN); } -static float UNUSED_FUNC demo_phase_float(){ +static float UNUSED_FUNC demo_phase_float() { static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; @@ -1564,7 +1567,7 @@ static void init_rand_data() { g_randdouble_buff = calloc(1, DOUBLE_BUFF_LEN * MAX_PREPARED_RAND); assert(g_randdouble_buff); - for (int i = 0; i < MAX_PREPARED_RAND; i++){ + for (int i = 0; i < MAX_PREPARED_RAND; i++) { g_randint[i] = (int)(taosRandom() % 65535); sprintf(g_randint_buff + i * INT_BUFF_LEN, "%d", g_randint[i]); @@ -3276,6 +3279,7 @@ static void* createTable(void *sarg) pThreadInfo->db_name, g_args.tb_prefix, i, pThreadInfo->cols); + batchNum ++; } else { if (stbInfo == NULL) { free(pThreadInfo->buffer); @@ -3325,13 +3329,14 @@ static void* createTable(void *sarg) len = 0; if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)){ + NO_INSERT_TYPE, false)) { errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); free(pThreadInfo->buffer); return NULL; } + pThreadInfo->tables_created += batchNum; - uint64_t currentPrintTime = taosGetTimestampMs(); + uint64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n", pThreadInfo->threadID, pThreadInfo->start_table_from, i); @@ -3401,6 +3406,7 @@ static int startMultiThreadCreateChildTable( pThreadInfo->use_metric = true; pThreadInfo->cols = cols; pThreadInfo->minDelay = UINT64_MAX; + pThreadInfo->tables_created = 0; pthread_create(pids + i, NULL, createTable, pThreadInfo); } @@ -3411,6 +3417,8 @@ static int startMultiThreadCreateChildTable( for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; taos_close(pThreadInfo->taos); + + g_actualChildTables += pThreadInfo->tables_created; } free(pids); @@ -3437,7 +3445,6 @@ static void createChildTables() { verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); uint64_t startFrom = 0; - g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; verbosePrint("%s() LN%d: create %"PRId64" child tables from %"PRIu64"\n", __func__, __LINE__, g_totalChildTables, startFrom); @@ -4232,6 +4239,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; + g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source"); if (dataSource && dataSource->type == cJSON_String @@ -4936,7 +4944,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON *result = cJSON_GetObjectItem(sql, "result"); if (result != NULL && result->type == cJSON_String - && result->valuestring != NULL){ + && result->valuestring != NULL) { tstrncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); } else if (NULL == result) { @@ -7586,7 +7594,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } int ret = taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0); - if (ret != 0){ + if (ret != 0) { free(pids); free(infos); free(stmtBuffer); @@ -7932,18 +7940,30 @@ static int insertTestProcess() { double start; double end; - // create child tables - start = taosGetTimestampMs(); - createChildTables(); - end = taosGetTimestampMs(); - if (g_totalChildTables > 0) { - fprintf(stderr, "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n", - (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); + fprintf(stderr, + "creating %"PRId64" table(s) with %d thread(s)\n\n", + g_totalChildTables, g_Dbs.threadCountByCreateTbl); if (g_fpOfInsertResult) { fprintf(g_fpOfInsertResult, - "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n", - (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); + "creating %"PRId64" table(s) with %d thread(s)\n\n", + g_totalChildTables, g_Dbs.threadCountByCreateTbl); + } + + // create child tables + start = taosGetTimestampMs(); + createChildTables(); + end = taosGetTimestampMs(); + + fprintf(stderr, + "Spent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n", + (end - start)/1000.0, g_totalChildTables, + g_Dbs.threadCountByCreateTbl, g_actualChildTables); + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, + "Spent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n", + (end - start)/1000.0, g_totalChildTables, + g_Dbs.threadCountByCreateTbl, g_actualChildTables); } } From 5584038709d5ff3c392ca222e681de1ae52e5b32 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Wed, 25 Aug 2021 11:37:09 +0800 Subject: [PATCH 239/239] [TD-2639] : update description about operator "like". --- documentation20/cn/12.taos-sql/docs.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 48409537bb..b183b6e419 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -206,10 +206,6 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 显示当前数据库下的所有数据表信息。 - 说明:可在 like 中使用通配符进行名称的匹配,这一通配符字符串最长不能超过 20 字节。( 从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) - - 通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。 - - **显示一个数据表的创建语句** ```mysql @@ -718,15 +714,19 @@ Query OK, 1 row(s) in set (0.001091s) | = | equal to | all types | | <> | not equal to | all types | | between and | within a certain range | **`timestamp`** and all numeric types | -| in | matches any value in a set | all types except first column `timestamp` | +| in | match any value in a set | all types except first column `timestamp` | +| like | match a wildcard string | **`binary`** **`nchar`** | | % | match with any char sequences | **`binary`** **`nchar`** | | _ | match with a single char | **`binary`** **`nchar`** | 1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。 -2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 -3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 -4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 -5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 +2. like 算子使用通配符字符串进行匹配检查。 + * 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意字符。 + * 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) +3. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 +4. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 +5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 +6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 ### UNION ALL 操作符