From b980bb35bc3dc9f3a726e1919a6f668d7a95b63d Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 20 Feb 2025 18:08:20 +0800 Subject: [PATCH 01/16] more code --- source/dnode/vnode/CMakeLists.txt | 1 + source/dnode/vnode/src/inc/vnodeInt.h | 6 +- source/dnode/vnode/src/meta/metaOpen.c | 2 +- tests/test_new/storage/test_compact_meta.py | 85 +++++++++++++++++++++ 4 files changed, 92 insertions(+), 2 deletions(-) create mode 100644 tests/test_new/storage/test_compact_meta.py diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index b90e1844ae..1df28336b5 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -96,6 +96,7 @@ if(TD_VNODE_PLUGINS) vnode PRIVATE ${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/tsdbCompact.c + ${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/metaCompact.c ${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/tsdbCompactMonitor.c ${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/vnodeCompact.c ) diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 5a61c1c124..27ee54ec70 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -486,7 +486,11 @@ struct SVnode { // commit variables SVATaskID commitTask; - SMeta* pMeta; + struct { + SMeta* pMeta; + SMeta* pNewMeta; + }; + SSma* pSma; STsdb* pTsdb; SWal* pWal; diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index 35e761e51e..5e185e3bb2 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -133,7 +133,7 @@ static void doScan(SMeta *pMeta) { } } -static int32_t metaOpenImpl(SVnode *pVnode, SMeta **ppMeta, const char *metaDir, int8_t rollback) { +int32_t metaOpenImpl(SVnode *pVnode, SMeta **ppMeta, const char *metaDir, int8_t rollback) { SMeta *pMeta = NULL; int32_t code = 0; int32_t lino; diff --git a/tests/test_new/storage/test_compact_meta.py b/tests/test_new/storage/test_compact_meta.py new file mode 100644 index 0000000000..e1ee134c6a --- /dev/null +++ b/tests/test_new/storage/test_compact_meta.py @@ -0,0 +1,85 @@ +# tests/test_new/xxx/xxx/test_xxx.py +# import ... + +import random +import taos + + +class TestXxxx: + def init(self): + tdLog.debug("start to execute %s" % __file__) + + def test_template(self): + """用例目标,必填,用一行简短总结 + <空行> + 用例详细描述,必填,允许多行 + <空行> + Since: 用例开始支持的TDengine版本,新增用例必填 + <空行> + Labels: 筛选标签,选填,多个标签用英文逗号分隔 + <空行> + Jira: 相关jira任务id,选填 + <空行> + History: 用例变更历史,选填,每行一次变更信息 + - 日期1 变更人1 变更原因1 + - 日期2 变更人2 变更原因2 + """ + + def test_demo(self): + """测试超级表插入各种数据类型 + + 使用多种数据类型创建超级表,向超级表插入数据, + 包括:常规数据,空数据,边界值等,插入均执行成功 + + Since: v3.3.0.0 + + Labels: stable, data_type + + Jira: TD-12345, TS-1234 + + History: + - 2024-2-6 Feng Chao Created + - 2024-2-7 Huo Hong updated for feature TD-23456 + """ + + def test_case1(self): + dbname = 'db' + super_table = "stb" + child_table_prefix = "ctb" + num_of_child_tables = 10000 + max_alter_times = 100 + + # Create database + tdSql.query(f'create database {dbname} vgroups 1') + + # Create super table + sql = f'create {dbname}.{super_table} (ts timestamp, c1 int) tags (tag1 int)' + tdSql.query(sql) + + # Create child tables + for i in range(num_of_child_tables): + sql = f'create {dbname}.{child_table_prefix}{i} using {dbname}.{super_table} tags ({i})' + tdSql.query(sql) + + # Alter child tables + for i in range(num_of_child_tables): + for j in range(random.randint(1, max_alter_times)): + sql = f'alter table {dbname}.{child_table_prefix}{i} set tag1 = {i + j}' + tdSql.query(sql) + + # Alter super tables + for i in range(random.randint(1, max_alter_times)): + sql = f'alter table {dbname}.{super_table} add column c{i+1} int' + tdSql.query(sql) + + # Compact meta + sql = f'compact database {dbname}' + tdSql.query(sql) + + def run(self): + self.test_template() + self.test_demo() + self.test_case1() + + def stop(self): + tdLog.success("%s successfully executed" % __file__) From b048c597dcca55da4e0344b9f8b66b55c64e1a41 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Mon, 24 Feb 2025 14:41:56 +0800 Subject: [PATCH 02/16] more code --- source/dnode/vnode/src/inc/meta.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/source/dnode/vnode/src/inc/meta.h b/source/dnode/vnode/src/inc/meta.h index c05953c67d..53e1e0b6c7 100644 --- a/source/dnode/vnode/src/inc/meta.h +++ b/source/dnode/vnode/src/inc/meta.h @@ -24,7 +24,6 @@ extern "C" { #endif -typedef struct SMetaIdx SMetaIdx; typedef struct SMetaDB SMetaDB; typedef struct SMetaCache SMetaCache; @@ -103,8 +102,6 @@ struct SMeta { // stream TTB* pStreamDb; - SMetaIdx* pIdx; - SMetaCache* pCache; }; From b01ed3bb80f17273bbed01a0f20b8ac354ce2991 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Mon, 24 Feb 2025 15:32:46 +0800 Subject: [PATCH 03/16] more code --- source/dnode/mgmt/exe/dmMain.c | 4 - source/dnode/vnode/src/meta/metaOpen.c | 191 +++---------------------- 2 files changed, 22 insertions(+), 173 deletions(-) diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index bd7da3f4d6..51476757e5 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -170,8 +170,6 @@ static void dmSetSignalHandle() { #endif } -extern bool generateNewMeta; - static int32_t dmParseArgs(int32_t argc, char const *argv[]) { global.startTime = taosGetTimestampMs(); @@ -210,8 +208,6 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) { global.dumpSdb = true; } else if (strcmp(argv[i], "-dTxn") == 0) { global.deleteTrans = true; - } else if (strcmp(argv[i], "-r") == 0) { - generateNewMeta = true; } else if (strcmp(argv[i], "-E") == 0) { if (i < argc - 1) { if (strlen(argv[++i]) >= PATH_MAX) { diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index 5e185e3bb2..b80351d144 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -251,187 +251,40 @@ _exit: return code; } -bool generateNewMeta = false; - -static int32_t metaGenerateNewMeta(SMeta **ppMeta) { - SMeta *pNewMeta = NULL; - SMeta *pMeta = *ppMeta; - SVnode *pVnode = pMeta->pVnode; - - metaInfo("vgId:%d start to generate new meta", TD_VID(pMeta->pVnode)); - - // Open a new meta for orgainzation - int32_t code = metaOpenImpl(pMeta->pVnode, &pNewMeta, VNODE_META_TMP_DIR, false); - if (code) { - return code; - } - - code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL); - if (code) { - return code; - } - - // i == 0, scan super table - // i == 1, scan normal table and child table - for (int i = 0; i < 2; i++) { - TBC *uidCursor = NULL; - int32_t counter = 0; - - code = tdbTbcOpen(pMeta->pUidIdx, &uidCursor, NULL); - if (code) { - metaError("vgId:%d failed to open uid index cursor, reason:%s", TD_VID(pVnode), tstrerror(code)); - return code; - } - - code = tdbTbcMoveToFirst(uidCursor); - if (code) { - metaError("vgId:%d failed to move to first, reason:%s", TD_VID(pVnode), tstrerror(code)); - tdbTbcClose(uidCursor); - return code; - } - - for (;;) { - const void *pKey; - int kLen; - const void *pVal; - int vLen; - - if (tdbTbcGet(uidCursor, &pKey, &kLen, &pVal, &vLen) < 0) { - break; - } - - tb_uid_t uid = *(tb_uid_t *)pKey; - SUidIdxVal *pUidIdxVal = (SUidIdxVal *)pVal; - if ((i == 0 && (pUidIdxVal->suid && pUidIdxVal->suid == uid)) // super table - || (i == 1 && (pUidIdxVal->suid == 0 || pUidIdxVal->suid != uid)) // normal table and child table - ) { - counter++; - if (i == 0) { - metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter, "super", uid); - } else { - metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter, - pUidIdxVal->suid == 0 ? "normal" : "child", uid); - } - - // fetch table entry - void *value = NULL; - int valueSize = 0; - if (tdbTbGet(pMeta->pTbDb, - &(STbDbKey){ - .version = pUidIdxVal->version, - .uid = uid, - }, - sizeof(uid), &value, &valueSize) == 0) { - SDecoder dc = {0}; - SMetaEntry me = {0}; - tDecoderInit(&dc, value, valueSize); - if (metaDecodeEntry(&dc, &me) == 0) { - if (me.type == TSDB_CHILD_TABLE && - tdbTbGet(pMeta->pUidIdx, &me.ctbEntry.suid, sizeof(me.ctbEntry.suid), NULL, NULL) != 0) { - metaError("vgId:%d failed to get super table uid:%" PRId64 " for child table uid:%" PRId64, - TD_VID(pVnode), me.ctbEntry.suid, uid); - } else if (metaHandleEntry2(pNewMeta, &me) != 0) { - metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid); - } - } - tDecoderClear(&dc); - } - tdbFree(value); - } - - code = tdbTbcMoveToNext(uidCursor); - if (code) { - metaError("vgId:%d failed to move to next, reason:%s", TD_VID(pVnode), tstrerror(code)); - return code; - } - } - - tdbTbcClose(uidCursor); - } - - code = metaCommit(pNewMeta, pNewMeta->txn); - if (code) { - metaError("vgId:%d failed to commit, reason:%s", TD_VID(pVnode), tstrerror(code)); - return code; - } - - code = metaFinishCommit(pNewMeta, pNewMeta->txn); - if (code) { - metaError("vgId:%d failed to finish commit, reason:%s", TD_VID(pVnode), tstrerror(code)); - return code; - } - - if ((code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL)) != 0) { - metaError("vgId:%d failed to begin new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); - } - metaClose(&pNewMeta); - metaInfo("vgId:%d finish to generate new meta", TD_VID(pVnode)); - return 0; +void vnodeGetMetaPath(SVnode *pVnode, const char *metaDir, char *fname) { + vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, fname, TSDB_FILENAME_LEN); + int32_t offset = strlen(fname); + snprintf(fname + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, metaDir); } int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { - if (generateNewMeta) { - char path[TSDB_FILENAME_LEN] = {0}; - char oldMetaPath[TSDB_FILENAME_LEN] = {0}; - char newMetaPath[TSDB_FILENAME_LEN] = {0}; - char backupMetaPath[TSDB_FILENAME_LEN] = {0}; + int32_t code = TSDB_CODE_SUCCESS; + char metaDir[TSDB_FILENAME_LEN] = {0}; + char metaTempDir[TSDB_FILENAME_LEN] = {0}; - vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN); - snprintf(oldMetaPath, sizeof(oldMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_DIR); - snprintf(newMetaPath, sizeof(newMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_TMP_DIR); - snprintf(backupMetaPath, sizeof(backupMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_BACKUP_DIR); + vnodeGetMetaPath(pVnode, VNODE_META_DIR, metaDir); + vnodeGetMetaPath(pVnode, VNODE_META_TMP_DIR, metaTempDir); - bool oldMetaExist = taosCheckExistFile(oldMetaPath); - bool newMetaExist = taosCheckExistFile(newMetaPath); - bool backupMetaExist = taosCheckExistFile(backupMetaPath); - - if ((!backupMetaExist && !oldMetaExist && newMetaExist) // case 2 - || (backupMetaExist && !oldMetaExist && !newMetaExist) // case 4 - || (backupMetaExist && oldMetaExist && newMetaExist) // case 8 - ) { - metaError("vgId:%d invalid meta state, please check", TD_VID(pVnode)); + // Check file states + if (!taosCheckExistFile(metaDir)) { + if (!taosCheckExistFile(metaTempDir)) { + metaError("vgId:%d, cannot find meta dir:%s and meta temp dir:%s", TD_VID(pVnode), metaDir, metaTempDir); return TSDB_CODE_FAILED; - } else if ((backupMetaExist && oldMetaExist && !newMetaExist) // case 7 - || (!backupMetaExist && !oldMetaExist && !newMetaExist) // case 1 - ) { - return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); - } else if (backupMetaExist && !oldMetaExist && newMetaExist) { - if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { - metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); - return terrno; - } - return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); } else { - int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + code = taosRenameFile(metaTempDir, metaDir); if (code) { - return code; - } - - code = metaGenerateNewMeta(ppMeta); - if (code) { - metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); - } - - metaClose(ppMeta); - if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) { - metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno)); - return terrno; - } - - // rename the new meta to old meta - if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { - metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); - return terrno; - } - code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false); - if (code) { - metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + metaError("vgId:%d, %s failed at %s:%d since %s: rename %s to %s failed", TD_VID(pVnode), __func__, __FILE__, + __LINE__, tstrerror(code), metaTempDir, metaDir); return code; } } + } - } else { - return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + // Do open meta + code = metaOpenImpl(pVnode, ppMeta, metaDir, rollback); + if (code) { + metaError("vgId:%d, %s failed at %s:%d since %s", TD_VID(pVnode), __func__, __FILE__, __LINE__, tstrerror(code)); + return code; } return TSDB_CODE_SUCCESS; From 9a95abd26d2ec104febad4b771e0e909de6ab134 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Mon, 24 Feb 2025 16:32:09 +0800 Subject: [PATCH 04/16] more code --- source/dnode/vnode/src/meta/metaEntry.c | 9 +++- source/dnode/vnode/src/meta/metaOpen.c | 19 +++---- source/dnode/vnode/src/meta/metaSnapshot.c | 59 +++++++++++++++++++--- 3 files changed, 67 insertions(+), 20 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaEntry.c b/source/dnode/vnode/src/meta/metaEntry.c index 302a1eb04a..a234ea257c 100644 --- a/source/dnode/vnode/src/meta/metaEntry.c +++ b/source/dnode/vnode/src/meta/metaEntry.c @@ -135,12 +135,17 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) { return 0; } -int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) { +int metaDecodeEntryImpl(SDecoder *pCoder, SMetaEntry *pME, bool headerOnly) { TAOS_CHECK_RETURN(tStartDecode(pCoder)); TAOS_CHECK_RETURN(tDecodeI64(pCoder, &pME->version)); TAOS_CHECK_RETURN(tDecodeI8(pCoder, &pME->type)); TAOS_CHECK_RETURN(tDecodeI64(pCoder, &pME->uid)); + if (headerOnly) { + tEndDecode(pCoder); + return 0; + } + if (pME->type > 0) { TAOS_CHECK_RETURN(tDecodeCStr(pCoder, &pME->name)); @@ -209,6 +214,8 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) { return 0; } +int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) { return metaDecodeEntryImpl(pCoder, pME, false); } + static int32_t metaCloneSchema(const SSchemaWrapper *pSrc, SSchemaWrapper *pDst) { if (pSrc == NULL || pDst == NULL) { return TSDB_CODE_INVALID_PARA; diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index b80351d144..15debf07f8 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -266,22 +266,17 @@ int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { vnodeGetMetaPath(pVnode, VNODE_META_TMP_DIR, metaTempDir); // Check file states - if (!taosCheckExistFile(metaDir)) { - if (!taosCheckExistFile(metaTempDir)) { - metaError("vgId:%d, cannot find meta dir:%s and meta temp dir:%s", TD_VID(pVnode), metaDir, metaTempDir); - return TSDB_CODE_FAILED; - } else { - code = taosRenameFile(metaTempDir, metaDir); - if (code) { - metaError("vgId:%d, %s failed at %s:%d since %s: rename %s to %s failed", TD_VID(pVnode), __func__, __FILE__, - __LINE__, tstrerror(code), metaTempDir, metaDir); - return code; - } + if (!taosCheckExistFile(metaDir) && taosCheckExistFile(metaTempDir)) { + code = taosRenameFile(metaTempDir, metaDir); + if (code) { + metaError("vgId:%d, %s failed at %s:%d since %s: rename %s to %s failed", TD_VID(pVnode), __func__, __FILE__, + __LINE__, tstrerror(code), metaTempDir, metaDir); + return code; } } // Do open meta - code = metaOpenImpl(pVnode, ppMeta, metaDir, rollback); + code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); if (code) { metaError("vgId:%d, %s failed at %s:%d since %s", TD_VID(pVnode), __func__, __FILE__, __LINE__, tstrerror(code)); return code; diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c index b227653e5e..64693274f4 100644 --- a/source/dnode/vnode/src/meta/metaSnapshot.c +++ b/source/dnode/vnode/src/meta/metaSnapshot.c @@ -21,6 +21,7 @@ struct SMetaSnapReader { int64_t sver; int64_t ever; TBC* pTbc; + int32_t iLoop; }; int32_t metaSnapReaderOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapReader** ppReader) { @@ -65,6 +66,22 @@ void metaSnapReaderClose(SMetaSnapReader** ppReader) { } } +extern int metaDecodeEntryImpl(SDecoder* pCoder, SMetaEntry* pME, bool headerOnly); + +static int32_t metaDecodeEntryHeader(void* data, int32_t size, SMetaEntry* entry) { + SDecoder decoder = {0}; + tDecoderInit(&decoder, (uint8_t*)data, size); + + int32_t code = metaDecodeEntryImpl(&decoder, entry, true); + if (code) { + tDecoderClear(&decoder); + return code; + } + + tDecoderClear(&decoder); + return 0; +} + int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) { int32_t code = 0; const void* pKey = NULL; @@ -72,19 +89,47 @@ int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) { int32_t nKey = 0; int32_t nData = 0; STbDbKey key; + int32_t c; *ppData = NULL; - for (;;) { - if (tdbTbcGet(pReader->pTbc, &pKey, &nKey, &pData, &nData)) { + while (pReader->iLoop < 2) { + if (tdbTbcGet(pReader->pTbc, &pKey, &nKey, &pData, &nData) != 0 || ((STbDbKey*)pKey)->version > pReader->ever) { + pReader->iLoop++; + + // Reopen the cursor to read from the beginning + tdbTbcClose(pReader->pTbc); + pReader->pTbc = NULL; + code = tdbTbcOpen(pReader->pMeta->pTbDb, &pReader->pTbc, NULL); + if (code) { + metaError("vgId:%d, %s failed at %s:%d since %s", TD_VID(pReader->pMeta->pVnode), __func__, __FILE__, __LINE__, + tstrerror(code)); + goto _exit; + } + + code = tdbTbcMoveTo(pReader->pTbc, &(STbDbKey){.version = pReader->sver, .uid = INT64_MIN}, sizeof(STbDbKey), &c); + if (code) { + metaError("vgId:%d, %s failed at %s:%d since %s", TD_VID(pReader->pMeta->pVnode), __func__, __FILE__, __LINE__, + tstrerror(code)); + goto _exit; + } + + continue; + } + + // Decode meta entry + SMetaEntry entry = {0}; + code = metaDecodeEntryHeader((void*)pData, nData, &entry); + if (code) { + metaError("vgId:%d, %s failed at %s:%d since %s", TD_VID(pReader->pMeta->pVnode), __func__, __FILE__, __LINE__, + tstrerror(code)); goto _exit; } key = ((STbDbKey*)pKey)[0]; - if (key.version > pReader->ever) { - goto _exit; - } - - if (key.version < pReader->sver) { + if (key.version < pReader->sver // + || (pReader->iLoop == 0 && TABS(entry.type) != TSDB_SUPER_TABLE) // First loop send super table entry + || (pReader->iLoop == 1 && TABS(entry.type) == TSDB_SUPER_TABLE) // Second loop send non-super table entry + ) { if (tdbTbcMoveToNext(pReader->pTbc) != 0) { metaTrace("vgId:%d, vnode snapshot meta read data done", TD_VID(pReader->pMeta->pVnode)); } From d48547ed6443e700d03740f96cde0487d5cdde07 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Tue, 25 Feb 2025 16:48:43 +0800 Subject: [PATCH 05/16] more code --- include/common/tmsg.h | 2 ++ include/libs/nodes/cmdnodes.h | 2 ++ source/common/src/msg/tmsg.c | 15 +++++++++++++++ source/dnode/mnode/impl/src/mndCompact.c | 5 +++-- source/libs/parser/inc/parAst.h | 4 ++-- source/libs/parser/inc/sql.y | 9 +++++++-- source/libs/parser/src/parAstCreater.c | 6 ++++-- source/libs/parser/src/parTokenizer.c | 3 ++- source/libs/parser/src/parTranslater.c | 12 ++++++++---- 9 files changed, 45 insertions(+), 13 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 6d58748a3b..1ce8da415c 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1639,6 +1639,7 @@ typedef struct { int32_t sqlLen; char* sql; SArray* vgroupIds; + int8_t metaOnly; } SCompactDbReq; int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq); @@ -2083,6 +2084,7 @@ typedef struct { int64_t compactStartTime; STimeWindow tw; int32_t compactId; + int8_t metaOnly; } SCompactVnodeReq; int32_t tSerializeSCompactVnodeReq(void* buf, int32_t bufLen, SCompactVnodeReq* pReq); diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index 2cd743b37e..92fa9bf0ee 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -169,6 +169,7 @@ typedef struct SCompactDatabaseStmt { char dbName[TSDB_DB_NAME_LEN]; SNode* pStart; SNode* pEnd; + bool metaOnly; } SCompactDatabaseStmt; typedef struct SCompactVgroupsStmt { @@ -177,6 +178,7 @@ typedef struct SCompactVgroupsStmt { SNodeList* vgidList; SNode* pStart; SNode* pEnd; + bool metaOnly; } SCompactVgroupsStmt; typedef struct STableOptions { diff --git a/source/common/src/msg/tmsg.c b/source/common/src/msg/tmsg.c index 6a3e1948c8..ed0a50e880 100644 --- a/source/common/src/msg/tmsg.c +++ b/source/common/src/msg/tmsg.c @@ -4686,6 +4686,8 @@ int32_t tSerializeSCompactDbReq(void *buf, int32_t bufLen, SCompactDbReq *pReq) } } + TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->metaOnly)); + tEndEncode(&encoder); _exit: @@ -4729,6 +4731,12 @@ int32_t tDeserializeSCompactDbReq(void *buf, int32_t bufLen, SCompactDbReq *pReq } } } + + if (!tDecodeIsEnd(&decoder)) { + TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->metaOnly)); + } else { + pReq->metaOnly = false; + } tEndDecode(&decoder); _exit: @@ -7156,6 +7164,7 @@ int32_t tSerializeSCompactVnodeReq(void *buf, int32_t bufLen, SCompactVnodeReq * TAOS_CHECK_EXIT(tEncodeI64(&encoder, pReq->tw.ekey)); TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->compactId)); + TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->metaOnly)); tEndEncode(&encoder); @@ -7193,6 +7202,12 @@ int32_t tDeserializeSCompactVnodeReq(void *buf, int32_t bufLen, SCompactVnodeReq TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->compactId)); } + if (!tDecodeIsEnd(&decoder)) { + TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->metaOnly)); + } else { + pReq->metaOnly = false; + } + tEndDecode(&decoder); _exit: tDecoderClear(&decoder); diff --git a/source/dnode/mnode/impl/src/mndCompact.c b/source/dnode/mnode/impl/src/mndCompact.c index 33a6ddcc5d..f9f50f235f 100644 --- a/source/dnode/mnode/impl/src/mndCompact.c +++ b/source/dnode/mnode/impl/src/mndCompact.c @@ -927,7 +927,8 @@ static int32_t mndCompactDispatchAudit(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pD return 0; } -extern int32_t mndCompactDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, STimeWindow tw, SArray *vgroupIds); +extern int32_t mndCompactDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, STimeWindow tw, SArray *vgroupIds, + bool metaOnly); static int32_t mndCompactDispatch(SRpcMsg *pReq) { int32_t code = 0; SMnode *pMnode = pReq->info.node; @@ -982,7 +983,7 @@ static int32_t mndCompactDispatch(SRpcMsg *pReq) { .skey = convertTimePrecision(curMs + compactStartTime * 60000LL, TSDB_TIME_PRECISION_MILLI, pDb->cfg.precision), .ekey = convertTimePrecision(curMs + compactEndTime * 60000LL, TSDB_TIME_PRECISION_MILLI, pDb->cfg.precision)}; - if ((code = mndCompactDb(pMnode, NULL, pDb, tw, NULL)) == 0) { + if ((code = mndCompactDb(pMnode, NULL, pDb, tw, NULL, false)) == 0) { mInfo("db:%p,%s, succeed to dispatch compact with range:[%" PRIi64 ",%" PRIi64 "], interval:%dm, start:%" PRIi64 "m, end:%" PRIi64 "m, offset:%" PRIi8 "h", pDb, pDb->name, tw.skey, tw.ekey, pDb->cfg.compactInterval, compactStartTime, compactEndTime, diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index 65274f85e1..41162c46f6 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -202,9 +202,9 @@ SNode* createAlterDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* SNode* createFlushDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName); SNode* createTrimDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, int32_t maxSpeed); SNode* createS3MigrateDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName); -SNode* createCompactStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pStart, SNode* pEnd); +SNode* createCompactStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pStart, SNode* pEnd, bool metaOnly); SNode* createCompactVgroupsStmt(SAstCreateContext* pCxt, SNode* pDbName, SNodeList* vgidList, SNode* pStart, - SNode* pEnd); + SNode* pEnd, bool metaOnly); SNode* createDefaultTableOptions(SAstCreateContext* pCxt); SNode* createAlterTableOptions(SAstCreateContext* pCxt); SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType type, void* pVal); diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 42d2e95d24..0b1cafac32 100755 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -238,8 +238,13 @@ cmd ::= ALTER DATABASE db_name(A) alter_db_options(B). cmd ::= FLUSH DATABASE db_name(A). { pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &A); } cmd ::= TRIM DATABASE db_name(A) speed_opt(B). { pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &A, B); } cmd ::= S3MIGRATE DATABASE db_name(A). { pCxt->pRootNode = createS3MigrateDatabaseStmt(pCxt, &A); } -cmd ::= COMPACT DATABASE db_name(A) start_opt(B) end_opt(C). { pCxt->pRootNode = createCompactStmt(pCxt, &A, B, C); } -cmd ::= COMPACT db_name_cond_opt(A) VGROUPS IN NK_LP integer_list(B) NK_RP start_opt(C) end_opt(D). { pCxt->pRootNode = createCompactVgroupsStmt(pCxt, A, B, C, D); } +cmd ::= COMPACT DATABASE db_name(A) start_opt(B) end_opt(C) meta_only(D). { pCxt->pRootNode = createCompactStmt(pCxt, &A, B, C, D); } +cmd ::= COMPACT db_name_cond_opt(A) VGROUPS IN NK_LP integer_list(B) NK_RP start_opt(C) end_opt(D) meta_only(E). { pCxt->pRootNode = createCompactVgroupsStmt(pCxt, A, B, C, D, E); } + +%type meta_only { bool } +%destructor meta_only { } +meta_only(A) ::= . { A = false; } +exists_opt(A) ::= META_ONLY. { A = true; } %type not_exists_opt { bool } %destructor not_exists_opt { } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 341fc7e603..55e9ca2431 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -2212,7 +2212,7 @@ _err: return NULL; } -SNode* createCompactStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pStart, SNode* pEnd) { +SNode* createCompactStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pStart, SNode* pEnd, bool metaOnly) { CHECK_PARSER_STATUS(pCxt); CHECK_NAME(checkDbName(pCxt, pDbName, false)); SCompactDatabaseStmt* pStmt = NULL; @@ -2221,6 +2221,7 @@ SNode* createCompactStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pStart COPY_STRING_FORM_ID_TOKEN(pStmt->dbName, pDbName); pStmt->pStart = pStart; pStmt->pEnd = pEnd; + pStmt->metaOnly = metaOnly; return (SNode*)pStmt; _err: nodesDestroyNode(pStart); @@ -2229,7 +2230,7 @@ _err: } SNode* createCompactVgroupsStmt(SAstCreateContext* pCxt, SNode* pDbName, SNodeList* vgidList, SNode* pStart, - SNode* pEnd) { + SNode* pEnd, bool metaOnly) { CHECK_PARSER_STATUS(pCxt); if (NULL == pDbName) { snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "database not specified"); @@ -2243,6 +2244,7 @@ SNode* createCompactVgroupsStmt(SAstCreateContext* pCxt, SNode* pDbName, SNodeLi pStmt->vgidList = vgidList; pStmt->pStart = pStart; pStmt->pEnd = pEnd; + pStmt->metaOnly = metaOnly; return (SNode*)pStmt; _err: nodesDestroyNode(pDbName); diff --git a/source/libs/parser/src/parTokenizer.c b/source/libs/parser/src/parTokenizer.c index eb949d5206..641d3877d8 100644 --- a/source/libs/parser/src/parTokenizer.c +++ b/source/libs/parser/src/parTokenizer.c @@ -360,7 +360,8 @@ static SKeyword keywordTable[] = { {"ON_FAILURE", TK_ON_FAILURE}, {"NOTIFY_HISTORY", TK_NOTIFY_HISTORY}, {"REGEXP", TK_REGEXP}, - {"TRUE_FOR", TK_TRUE_FOR} + {"TRUE_FOR", TK_TRUE_FOR}, + {"META_ONLY", TK_META_ONLY} }; // clang-format on diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 2fdba9bad9..568e9d70b7 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -11315,9 +11315,11 @@ static int32_t translateCompactRange(STranslateContext* pCxt, const char* dbName } static int32_t translateCompactDb(STranslateContext* pCxt, SCompactDatabaseStmt* pStmt) { - SCompactDbReq compactReq = {0}; - SName name; - int32_t code = TSDB_CODE_SUCCESS; + SCompactDbReq compactReq = { + .metaOnly = pStmt->metaOnly, + }; + SName name; + int32_t code = TSDB_CODE_SUCCESS; code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); if (TSDB_CODE_SUCCESS != code) return code; @@ -11384,7 +11386,9 @@ static int32_t translateVgroupList(STranslateContext* pCxt, SNodeList* vgroupLis static int32_t translateCompactVgroups(STranslateContext* pCxt, SCompactVgroupsStmt* pStmt) { int32_t code = TSDB_CODE_SUCCESS; SName name; - SCompactDbReq req = {0}; + SCompactDbReq req = { + .metaOnly = pStmt->metaOnly, + }; code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, strlen(((SValueNode*)pStmt->pDbName)->literal)); From 9290c09566d0bca025e85ab73ebb59d4fb91abce Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Tue, 25 Feb 2025 17:19:46 +0800 Subject: [PATCH 06/16] more code --- contrib/test/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/test/CMakeLists.txt b/contrib/test/CMakeLists.txt index 318d00b92c..d88af23a6d 100644 --- a/contrib/test/CMakeLists.txt +++ b/contrib/test/CMakeLists.txt @@ -20,9 +20,9 @@ if(${BUILD_WITH_SQLITE}) add_subdirectory(sqlite) endif(${BUILD_WITH_SQLITE}) -if(${BUILD_S3}) - add_subdirectory(azure) -endif() +# if(${BUILD_S3}) +# add_subdirectory(azure) +# endif() add_subdirectory(tdev) add_subdirectory(lz4) From 275c7f55ce1fc5bafd5203c965f4279830a8049f Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Tue, 25 Feb 2025 17:24:30 +0800 Subject: [PATCH 07/16] update document --- docs/en/08-operation/04-maintenance.md | 5 +++-- docs/zh/08-operation/04-maintenance.md | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/en/08-operation/04-maintenance.md b/docs/en/08-operation/04-maintenance.md index 5712a710a1..a0aa473918 100644 --- a/docs/en/08-operation/04-maintenance.md +++ b/docs/en/08-operation/04-maintenance.md @@ -16,8 +16,8 @@ TDengine is designed for various writing scenarios, and many of these scenarios ### Syntax ```sql -COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY']; -COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY']; +COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'] [META_ONLY]; +COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY'] [META_ONLY]; SHOW COMPACTS; SHOW COMPACT compact_id; KILL COMPACT compact_id; @@ -30,6 +30,7 @@ KILL COMPACT compact_id; - COMPACT will merge multiple STT files - You can specify the start time of the COMPACT data with the start with keyword - You can specify the end time of the COMPACT data with the end with keyword +- You can specify the META_ONLY keyword to only compact the meta data which are not compacted by default - The COMPACT command will return the ID of the COMPACT task - COMPACT tasks are executed asynchronously in the background, and you can view the progress of COMPACT tasks using the SHOW COMPACTS command - The SHOW command will return the ID of the COMPACT task, and you can terminate the COMPACT task using the KILL COMPACT command diff --git a/docs/zh/08-operation/04-maintenance.md b/docs/zh/08-operation/04-maintenance.md index bb9ea20fbf..c0ca5ee00f 100644 --- a/docs/zh/08-operation/04-maintenance.md +++ b/docs/zh/08-operation/04-maintenance.md @@ -17,8 +17,8 @@ TDengine 面向多种写入场景,而很多写入场景下,TDengine 的存 ### 语法 ```SQL -COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY']; -COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY']; +COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'] [META_ONLY]; +COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY'] [META_ONLY]; SHOW COMPACTS; SHOW COMPACT compact_id; KILL COMPACT compact_id; @@ -32,6 +32,7 @@ KILL COMPACT compact_id; - COMPACT 会合并多个 STT 文件 - 可通过 start with 关键字指定 COMPACT 数据的起始时间 - 可通过 end with 关键字指定 COMPACT 数据的终止时间 +- 可通过 `META_ONLY` 关键字指定只 compact 元数据。元数据默认情况下不会 compact。 - COMPACT 命令会返回 COMPACT 任务的 ID - COMPACT 任务会在后台异步执行,可以通过 SHOW COMPACTS 命令查看 COMPACT 任务的进度 - SHOW 命令会返回 COMPACT 任务的 ID,可以通过 KILL COMPACT 命令终止 COMPACT 任务 From b26166b48d1cea29ff16ab8955cbe28802279a5c Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Tue, 25 Feb 2025 17:57:25 +0800 Subject: [PATCH 08/16] support meta_only option --- source/dnode/mnode/impl/inc/mndVgroup.h | 2 +- source/dnode/mnode/impl/src/mndVgroup.c | 11 ++++++----- source/libs/parser/inc/sql.y | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/source/dnode/mnode/impl/inc/mndVgroup.h b/source/dnode/mnode/impl/inc/mndVgroup.h index a8a806e497..cff3064ed9 100644 --- a/source/dnode/mnode/impl/inc/mndVgroup.h +++ b/source/dnode/mnode/impl/inc/mndVgroup.h @@ -47,7 +47,7 @@ int32_t mndSetMoveVgroupsInfoToTrans(SMnode *, STrans *pTrans, int32_t dropDnode int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pVgroup, SArray *pArray, SVgObj* pNewVgroup); int32_t mndBuildCompactVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int64_t compactTs, - STimeWindow tw); + STimeWindow tw, bool metaOnly); int32_t mndBuildRaftAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pVgroup, SArray *pArray); diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 311beb0daa..c1d75a1a4d 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -3632,11 +3632,12 @@ bool mndVgroupInDnode(SVgObj *pVgroup, int32_t dnodeId) { } static void *mndBuildCompactVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen, int64_t compactTs, - STimeWindow tw) { + STimeWindow tw, bool metaOnly) { SCompactVnodeReq compactReq = {0}; compactReq.dbUid = pDb->uid; compactReq.compactStartTime = compactTs; compactReq.tw = tw; + compactReq.metaOnly = metaOnly; tstrncpy(compactReq.db, pDb->name, TSDB_DB_FNAME_LEN); mInfo("vgId:%d, build compact vnode config req", pVgroup->vgId); @@ -3667,13 +3668,13 @@ static void *mndBuildCompactVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgrou } static int32_t mndAddCompactVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int64_t compactTs, - STimeWindow tw) { + STimeWindow tw, bool metaOnly) { int32_t code = 0; STransAction action = {0}; action.epSet = mndGetVgroupEpset(pMnode, pVgroup); int32_t contLen = 0; - void *pReq = mndBuildCompactVnodeReq(pMnode, pDb, pVgroup, &contLen, compactTs, tw); + void *pReq = mndBuildCompactVnodeReq(pMnode, pDb, pVgroup, &contLen, compactTs, tw, metaOnly); if (pReq == NULL) { code = TSDB_CODE_MND_RETURN_VALUE_NULL; if (terrno != 0) code = terrno; @@ -3693,7 +3694,7 @@ static int32_t mndAddCompactVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj * } int32_t mndBuildCompactVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int64_t compactTs, - STimeWindow tw) { - TAOS_CHECK_RETURN(mndAddCompactVnodeAction(pMnode, pTrans, pDb, pVgroup, compactTs, tw)); + STimeWindow tw, bool metaOnly) { + TAOS_CHECK_RETURN(mndAddCompactVnodeAction(pMnode, pTrans, pDb, pVgroup, compactTs, tw, metaOnly)); return 0; } diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 0b1cafac32..d30b247af9 100755 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -244,7 +244,7 @@ cmd ::= COMPACT db_name_cond_opt(A) VGROUPS IN NK_LP integer_list(B) NK_RP start %type meta_only { bool } %destructor meta_only { } meta_only(A) ::= . { A = false; } -exists_opt(A) ::= META_ONLY. { A = true; } +meta_only(A) ::= META_ONLY. { A = true; } %type not_exists_opt { bool } %destructor not_exists_opt { } From 0cba2fe2e420248458b48f4bec1475fa052a9454 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 26 Feb 2025 16:09:07 +0800 Subject: [PATCH 09/16] more --- .../{ => compact}/test_compact_meta.py | 65 +++++++++++++------ 1 file changed, 44 insertions(+), 21 deletions(-) rename tests/test_new/storage/{ => compact}/test_compact_meta.py (52%) diff --git a/tests/test_new/storage/test_compact_meta.py b/tests/test_new/storage/compact/test_compact_meta.py similarity index 52% rename from tests/test_new/storage/test_compact_meta.py rename to tests/test_new/storage/compact/test_compact_meta.py index e1ee134c6a..0088ba3252 100644 --- a/tests/test_new/storage/test_compact_meta.py +++ b/tests/test_new/storage/compact/test_compact_meta.py @@ -1,11 +1,23 @@ # tests/test_new/xxx/xxx/test_xxx.py # import ... +# ./pytest.sh python3 ./test.py -f storage/compact/test_compact_meta.py import random import taos +import taos +import frame +import frame.etool -class TestXxxx: +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame import * +from frame.autogen import * + + +class TestCompactMeta: def init(self): tdLog.debug("start to execute %s" % __file__) @@ -49,32 +61,39 @@ class TestXxxx: num_of_child_tables = 10000 max_alter_times = 100 + # Drop database + sql = f'drop database if exists {dbname}' + print(sql) + tdSql.execute(sql) + # Create database - tdSql.query(f'create database {dbname} vgroups 1') - - # Create super table - sql = f'create {dbname}.{super_table} (ts timestamp, c1 int) tags (tag1 int)' + sql = f'create database {dbname}' + print(sql) tdSql.query(sql) - # Create child tables - for i in range(num_of_child_tables): - sql = f'create {dbname}.{child_table_prefix}{i} using {dbname}.{super_table} tags ({i})' - tdSql.query(sql) + # # Create super table + # sql = f'create {dbname}.{super_table} (ts timestamp, c1 int) tags (tag1 int)' + # tdSql.query(sql) - # Alter child tables - for i in range(num_of_child_tables): - for j in range(random.randint(1, max_alter_times)): - sql = f'alter table {dbname}.{child_table_prefix}{i} set tag1 = {i + j}' - tdSql.query(sql) + # # Create child tables + # for i in range(num_of_child_tables): + # sql = f'create {dbname}.{child_table_prefix}{i} using {dbname}.{super_table} tags ({i})' + # tdSql.query(sql) - # Alter super tables - for i in range(random.randint(1, max_alter_times)): - sql = f'alter table {dbname}.{super_table} add column c{i+1} int' - tdSql.query(sql) + # # Alter child tables + # for i in range(num_of_child_tables): + # for j in range(random.randint(1, max_alter_times)): + # sql = f'alter table {dbname}.{child_table_prefix}{i} set tag1 = {i + j}' + # tdSql.query(sql) - # Compact meta - sql = f'compact database {dbname}' - tdSql.query(sql) + # # Alter super tables + # for i in range(random.randint(1, max_alter_times)): + # sql = f'alter table {dbname}.{super_table} add column c{i+1} int' + # tdSql.query(sql) + + # # Compact meta + # sql = f'compact database {dbname}' + # tdSql.query(sql) def run(self): self.test_template() @@ -83,3 +102,7 @@ class TestXxxx: def stop(self): tdLog.success("%s successfully executed" % __file__) + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From 791c9de7297ea33893c05483a104f7ab2dcb3f91 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 26 Feb 2025 16:21:21 +0800 Subject: [PATCH 10/16] refactor: simplify module loading by introducing dynamicLoadModule function --- tests/pytest/util/cases.py | 32 ++++++++++++++++++++++++++------ tests/test_new/test.py | 30 +++++++++++++++++++++--------- 2 files changed, 47 insertions(+), 15 deletions(-) diff --git a/tests/pytest/util/cases.py b/tests/pytest/util/cases.py index eee8809ad0..9774e91a9c 100644 --- a/tests/pytest/util/cases.py +++ b/tests/pytest/util/cases.py @@ -51,12 +51,21 @@ class TDCases: def addCluster(self, name, case): self.clusterCases.append(TDCase(name, case)) + def get_local_classes(self, module): + classes = [] + for name, obj in inspect.getmembers(module, inspect.isclass): + if inspect.getmodule(obj) == module: + classes.append(name) + return classes + def runAllLinux(self, conn): # TODO: load all Linux cases here runNum = 0 for tmp in self.linuxCases: if tmp.name.find(fileName) != -1: - case = testModule.TDTestCase() + # get the last class name as the test case class name + case_class = getattr(testModule, self.get_local_classes(testModule)[0]) + case = case_class() case.init(conn) case.run() case.stop() @@ -71,7 +80,9 @@ class TDCases: runNum = 0 for tmp in self.linuxCases: if tmp.name.find(fileName) != -1: - case = testModule.TDTestCase() + # get the last class name as the test case class name + case_class = getattr(testModule, self.get_local_classes(testModule)[-1]) + case = case_class() case.init(conn, self._logSql, replicaVar) try: case.run() @@ -88,7 +99,9 @@ class TDCases: runNum = 0 for tmp in self.windowsCases: if tmp.name.find(fileName) != -1: - case = testModule.TDTestCase() + # get the last class name as the test case class name + case_class = getattr(testModule, self.get_local_classes(testModule)[-1]) + case = case_class() case.init(conn) case.run() case.stop() @@ -102,7 +115,9 @@ class TDCases: runNum = 0 for tmp in self.windowsCases: - if tmp.name.find(fileName) != -1: + # get the last class name as the test case class name + case_class = getattr(testModule, self.get_local_classes(testModule)[-1]) + case = case_class() case = testModule.TDTestCase() case.init(conn, self._logSql,replicaVar) try: @@ -117,12 +132,15 @@ class TDCases: def runAllCluster(self): # TODO: load all cluster case module here + testModule = self.__dynamicLoadModule(fileName) runNum = 0 for tmp in self.clusterCases: if tmp.name.find(fileName) != -1: tdLog.notice("run cases like %s" % (fileName)) - case = testModule.TDTestCase() + # get the last class name as the test case class name + case_class = getattr(testModule, self.get_local_classes(testModule)[-1]) + case = case_class() case.init() case.run() case.stop() @@ -137,7 +155,9 @@ class TDCases: runNum = 0 for tmp in self.clusterCases: if tmp.name.find(fileName) != -1: - tdLog.notice("run cases like %s" % (fileName)) + # get the last class name as the test case class name + case_class = getattr(testModule, self.get_local_classes(testModule)[-1]) + case = case_class() case = testModule.TDTestCase() case.init() case.run() diff --git a/tests/test_new/test.py b/tests/test_new/test.py index ab1bdc21d3..e9e6f12236 100644 --- a/tests/test_new/test.py +++ b/tests/test_new/test.py @@ -24,6 +24,7 @@ import platform import socket import threading import importlib +import inspect print(f"Python version: {sys.version}") print(f"Version info: {sys.version_info}") @@ -58,6 +59,17 @@ def checkRunTimeError(): if hwnd: os.system("TASKKILL /F /IM taosd.exe") +def get_local_classes(module): + classes = [] + for name, obj in inspect.getmembers(module, inspect.isclass): + if inspect.getmodule(obj) == module: + classes.append(name) + return classes + +def dynamicLoadModule(fileName): + moduleName = fileName.replace(".py", "").replace(os.sep, ".") + return importlib.import_module(moduleName, package='..') + # # run case on previous cluster # @@ -68,9 +80,9 @@ def runOnPreviousCluster(host, config, fileName): sep = "/" if platform.system().lower() == 'windows': sep = os.sep - moduleName = fileName.replace(".py", "").replace(sep, ".") - uModule = importlib.import_module(moduleName) - case = uModule.TDTestCase() + uModule = dynamicLoadModule(fileName) + case_class = getattr(uModule, get_local_classes(uModule)[-1]) + case = case_class() # create conn conn = taos.connect(host, config) @@ -350,10 +362,10 @@ if __name__ == "__main__": updateCfgDictStr = '' # adapter_cfg_dict_str = '' if is_test_framework: - moduleName = fileName.replace(".py", "").replace(os.sep, ".") - uModule = importlib.import_module(moduleName) + uModule = dynamicLoadModule(fileName) try: - ucase = uModule.TDTestCase() + case_class = getattr(uModule, get_local_classes(uModule)[-1]) + ucase = case_class() if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')): updateCfgDict = ucase.updatecfgDict updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode() @@ -522,10 +534,10 @@ if __name__ == "__main__": except: pass if is_test_framework: - moduleName = fileName.replace(".py", "").replace("/", ".") - uModule = importlib.import_module(moduleName) + uModule = dynamicLoadModule(fileName) try: - ucase = uModule.TDTestCase() + case_class = getattr(uModule, get_local_classes(uModule)[-1]) + ucase = case_class() if (json.dumps(updateCfgDict) == '{}'): updateCfgDict = ucase.updatecfgDict if (json.dumps(adapter_cfg_dict) == '{}'): From f1470cd79c3fa8cbb4091201d8b606324c187b3c Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 26 Feb 2025 18:19:45 +0800 Subject: [PATCH 11/16] more code --- .../storage/compact/test_compact_meta.py | 156 ++++++++---------- 1 file changed, 66 insertions(+), 90 deletions(-) diff --git a/tests/test_new/storage/compact/test_compact_meta.py b/tests/test_new/storage/compact/test_compact_meta.py index 0088ba3252..c806567aa2 100644 --- a/tests/test_new/storage/compact/test_compact_meta.py +++ b/tests/test_new/storage/compact/test_compact_meta.py @@ -1,108 +1,84 @@ # tests/test_new/xxx/xxx/test_xxx.py # import ... -# ./pytest.sh python3 ./test.py -f storage/compact/test_compact_meta.py +''' +./pytest.sh python3 ./test.py -f storage/compact/test_compact_meta.py +''' -import random import taos -import taos -import frame -import frame.etool +import sys +from math import inf +from util.dnodes import tdDnodes +from util.sql import * +from util.cases import * +from util.log import * -from frame.log import * -from frame.cases import * -from frame.sql import * -from frame.caseBase import * -from frame import * -from frame.autogen import * +sys.path.append("../tests/pytest") class TestCompactMeta: - def init(self): + def caseDescription(self): + ''' + case1: [TS-5445] Compact Meta Data + ''' + return + + def init(self, conn, logSql, replicaVer=1): tdLog.debug("start to execute %s" % __file__) - - def test_template(self): - """用例目标,必填,用一行简短总结 - <空行> - 用例详细描述,必填,允许多行 - <空行> - Since: 用例开始支持的TDengine版本,新增用例必填 - <空行> - Labels: 筛选标签,选填,多个标签用英文逗号分隔 - <空行> - Jira: 相关jira任务id,选填 - <空行> - History: 用例变更历史,选填,每行一次变更信息 - - 日期1 变更人1 变更原因1 - - 日期2 变更人2 变更原因2 - """ - - def test_demo(self): - """测试超级表插入各种数据类型 - - 使用多种数据类型创建超级表,向超级表插入数据, - 包括:常规数据,空数据,边界值等,插入均执行成功 - - Since: v3.3.0.0 - - Labels: stable, data_type - - Jira: TD-12345, TS-1234 - - History: - - 2024-2-6 Feng Chao Created - - 2024-2-7 Huo Hong updated for feature TD-23456 - """ - - def test_case1(self): - dbname = 'db' - super_table = "stb" - child_table_prefix = "ctb" - num_of_child_tables = 10000 - max_alter_times = 100 - - # Drop database - sql = f'drop database if exists {dbname}' - print(sql) - tdSql.execute(sql) - - # Create database - sql = f'create database {dbname}' - print(sql) - tdSql.query(sql) - - # # Create super table - # sql = f'create {dbname}.{super_table} (ts timestamp, c1 int) tags (tag1 int)' - # tdSql.query(sql) - - # # Create child tables - # for i in range(num_of_child_tables): - # sql = f'create {dbname}.{child_table_prefix}{i} using {dbname}.{super_table} tags ({i})' - # tdSql.query(sql) - - # # Alter child tables - # for i in range(num_of_child_tables): - # for j in range(random.randint(1, max_alter_times)): - # sql = f'alter table {dbname}.{child_table_prefix}{i} set tag1 = {i + j}' - # tdSql.query(sql) - - # # Alter super tables - # for i in range(random.randint(1, max_alter_times)): - # sql = f'alter table {dbname}.{super_table} add column c{i+1} int' - # tdSql.query(sql) - - # # Compact meta - # sql = f'compact database {dbname}' - # tdSql.query(sql) + tdSql.init(conn.cursor(), True) + self.conn = conn def run(self): - self.test_template() - self.test_demo() self.test_case1() def stop(self): + tdSql.close() tdLog.success("%s successfully executed" % __file__) + def test_case1(self): + """ + Description: + 1. Alter child table tags + 2. Make sure compact meta works + """ + db_name = 'db1' + stb_name = 'stb1' + ctb_name_prefix = 'ctb' + num_child_tables = 10000 -tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) + # Drop database + sql = f'drop database if exists {db_name}' + tdSql.execute(sql) + + # Create database + sql = f'create database {db_name} vgroups 1' + tdSql.execute(sql) + + # Create super table + sql = f'create table {db_name}.{stb_name} (ts timestamp, c1 int, c2 int) tags(t1 int)' + tdSql.execute(sql) + + # Create child tables + for i in range(1, num_child_tables+1): + sql = f'create table {db_name}.{ctb_name_prefix}{i} using {db_name}.{stb_name} tags({i})' + tdSql.execute(sql) + + # Alter child table tags + for i in range(1, num_child_tables+1): + sql = f'alter table {db_name}.{ctb_name_prefix}{i} set tag t1 = {i+1}' + tdSql.execute(sql) + + # Compact meta + sql = f'compact database {db_name} meta_only' + tdSql.execute(sql) + + def test_case2(self): + """ + Description: + After compact, the snapshot still works + """ + pass + + +tdCases.addWindows(__file__, TestCompactMeta()) +tdCases.addLinux(__file__, TestCompactMeta()) From b3ae5df8ac003676fda4d5d46841ead42d1de527 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 27 Feb 2025 14:06:30 +0800 Subject: [PATCH 12/16] enh: more code --- source/dnode/vnode/src/inc/vnodeInt.h | 7 ++++--- .../test_new/storage/compact/test_compact_meta.py | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 0feb2e8e4f..68eb4c3ce7 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -168,7 +168,7 @@ int metaTtlFindExpired(SMeta* pMeta, int64_t timePointMs, SArray* tb int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp); int metaUpdateChangeTimeWithLock(SMeta* pMeta, tb_uid_t uid, int64_t changeTimeMs); SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock); -int64_t metaGetTableCreateTime(SMeta *pMeta, tb_uid_t uid, int lock); +int64_t metaGetTableCreateTime(SMeta* pMeta, tb_uid_t uid, int lock); int32_t metaGetTbTSchemaNotNull(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema); int32_t metaGetTbTSchemaMaybeNull(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema); STSchema* metaGetTbTSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock); @@ -488,8 +488,9 @@ struct SVnode { SVATaskID commitTask; struct { - SMeta* pMeta; - SMeta* pNewMeta; + SMeta* pMeta; + SMeta* pNewMeta; + SVATaskID metaCompactTask; }; SSma* pSma; diff --git a/tests/test_new/storage/compact/test_compact_meta.py b/tests/test_new/storage/compact/test_compact_meta.py index c806567aa2..19aca5ff54 100644 --- a/tests/test_new/storage/compact/test_compact_meta.py +++ b/tests/test_new/storage/compact/test_compact_meta.py @@ -12,6 +12,7 @@ from util.dnodes import tdDnodes from util.sql import * from util.cases import * from util.log import * +import inspect sys.path.append("../tests/pytest") @@ -41,6 +42,8 @@ class TestCompactMeta: 1. Alter child table tags 2. Make sure compact meta works """ + tdLog.info(f'case {inspect.currentframe().f_code.co_name} start') + db_name = 'db1' stb_name = 'stb1' ctb_name_prefix = 'ctb' @@ -59,16 +62,28 @@ class TestCompactMeta: tdSql.execute(sql) # Create child tables + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: create {num_child_tables} child tables') for i in range(1, num_child_tables+1): + if i % 100 == 0: + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: create {i} child tables') sql = f'create table {db_name}.{ctb_name_prefix}{i} using {db_name}.{stb_name} tags({i})' tdSql.execute(sql) # Alter child table tags + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: alter child table tags') for i in range(1, num_child_tables+1): + if i % 100 == 0: + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: altered {i} child tables') sql = f'alter table {db_name}.{ctb_name_prefix}{i} set tag t1 = {i+1}' tdSql.execute(sql) # Compact meta + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: start to compact meta') sql = f'compact database {db_name} meta_only' tdSql.execute(sql) From e2b48e43979a4513f9d5c7957ad6abb1f990bc1c Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 27 Feb 2025 15:06:49 +0800 Subject: [PATCH 13/16] enh: add more code --- source/dnode/vnode/src/inc/vnodeInt.h | 7 ++++--- source/dnode/vnode/src/vnd/vnodeOpen.c | 2 ++ source/dnode/vnode/src/vnd/vnodeQuery.c | 20 +++++++++++++------- 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 68eb4c3ce7..b13a66da99 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -488,9 +488,10 @@ struct SVnode { SVATaskID commitTask; struct { - SMeta* pMeta; - SMeta* pNewMeta; - SVATaskID metaCompactTask; + TdThreadRwlock metaRWLock; + SMeta* pMeta; + SMeta* pNewMeta; + SVATaskID metaCompactTask; }; SSma* pSma; diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 280ee527f7..26b8d310e8 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -449,6 +449,7 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC } // open meta + (void)taosThreadRwlockInit(&pVnode->metaRWLock, NULL); vInfo("vgId:%d, start to open vnode meta", TD_VID(pVnode)); if (metaOpen(pVnode, &pVnode->pMeta, rollback) < 0) { vError("vgId:%d, failed to open vnode meta since %s", TD_VID(pVnode), tstrerror(terrno)); @@ -548,6 +549,7 @@ _err: if (pVnode->pMeta) metaClose(&pVnode->pMeta); if (pVnode->freeList) vnodeCloseBufPool(pVnode); + (void)taosThreadRwlockDestroy(&pVnode->metaRWLock); taosMemoryFree(pVnode); return NULL; } diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index d765861381..49dfb99499 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -49,7 +49,7 @@ int32_t fillTableColCmpr(SMetaReader *reader, SSchemaExt *pExt, int32_t numOfCol return 0; } -void vnodePrintTableMeta(STableMetaRsp* pMeta) { +void vnodePrintTableMeta(STableMetaRsp *pMeta) { if (!(qDebugFlag & DEBUG_DEBUG)) { return; } @@ -70,14 +70,13 @@ void vnodePrintTableMeta(STableMetaRsp* pMeta) { qDebug("sysInfo:%d", pMeta->sysInfo); if (pMeta->pSchemas) { for (int32_t i = 0; i < (pMeta->numOfColumns + pMeta->numOfTags); ++i) { - SSchema* pSchema = pMeta->pSchemas + i; - qDebug("%d col/tag: type:%d, flags:%d, colId:%d, bytes:%d, name:%s", i, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes, pSchema->name); + SSchema *pSchema = pMeta->pSchemas + i; + qDebug("%d col/tag: type:%d, flags:%d, colId:%d, bytes:%d, name:%s", i, pSchema->type, pSchema->flags, + pSchema->colId, pSchema->bytes, pSchema->name); } } - } - int32_t vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg, bool direct) { STableInfoReq infoReq = {0}; STableMetaRsp metaRsp = {0}; @@ -528,6 +527,13 @@ _exit: return code; } +#define VNODE_DO_META_QUERY(pVnode, cmd) \ + do { \ + (void)taosThreadRwlockRdlock(&(pVnode)->metaRWLock); \ + cmd; \ + (void)taosThreadRwlockUnlock(&(pVnode)->metaRWLock); \ + } while (0) + int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) { SSyncState state = syncGetState(pVnode->sync); pLoad->syncAppliedIndex = pVnode->state.applied; @@ -543,8 +549,8 @@ int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) { pLoad->learnerProgress = state.progress; pLoad->cacheUsage = tsdbCacheGetUsage(pVnode); pLoad->numOfCachedTables = tsdbCacheGetElems(pVnode); - pLoad->numOfTables = metaGetTbNum(pVnode->pMeta); - pLoad->numOfTimeSeries = metaGetTimeSeriesNum(pVnode->pMeta, 1); + VNODE_DO_META_QUERY(pVnode, pLoad->numOfTables = metaGetTbNum(pVnode->pMeta)); + VNODE_DO_META_QUERY(pVnode, pLoad->numOfTimeSeries = metaGetTimeSeriesNum(pVnode->pMeta, 1)); pLoad->totalStorage = (int64_t)3 * 1073741824; pLoad->compStorage = (int64_t)2 * 1073741824; pLoad->pointsWritten = 100; From cb517d75871776c0160758a8497afd2b83b25f6b Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 27 Feb 2025 16:01:22 +0800 Subject: [PATCH 14/16] more test --- .../storage/compact/test_compact_meta.py | 153 +++++++++++++++++- 1 file changed, 151 insertions(+), 2 deletions(-) diff --git a/tests/test_new/storage/compact/test_compact_meta.py b/tests/test_new/storage/compact/test_compact_meta.py index 19aca5ff54..b209cbf4e9 100644 --- a/tests/test_new/storage/compact/test_compact_meta.py +++ b/tests/test_new/storage/compact/test_compact_meta.py @@ -13,6 +13,7 @@ from util.sql import * from util.cases import * from util.log import * import inspect +import random sys.path.append("../tests/pytest") @@ -31,6 +32,7 @@ class TestCompactMeta: def run(self): self.test_case1() + self.test_case2() def stop(self): tdSql.close() @@ -71,6 +73,16 @@ class TestCompactMeta: sql = f'create table {db_name}.{ctb_name_prefix}{i} using {db_name}.{stb_name} tags({i})' tdSql.execute(sql) + # Insert some data + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: insert data to child tables') + for i in range(1, num_child_tables+1): + if i % 100 == 0: + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: insert data to {i} child tables') + sql = f'insert into {db_name}.{ctb_name_prefix}{i} values(now, 1, 2)' + tdSql.execute(sql) + # Alter child table tags tdLog.info( f'case {inspect.currentframe().f_code.co_name}: alter child table tags') @@ -81,18 +93,155 @@ class TestCompactMeta: sql = f'alter table {db_name}.{ctb_name_prefix}{i} set tag t1 = {i+1}' tdSql.execute(sql) + # Randomly select 100 child tables to do query + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: randomly select 100 child tables to query') + selected_tables = random.sample(range(1, num_child_tables + 1), 100) + for i, table_idx in enumerate(selected_tables): + # Query data from the child table + sql = f'select count(*) from {db_name}.{stb_name} where t1 = {table_idx + 1}' + tdSql.query(sql) + tdSql.checkData(0, 0, 1) # Check c2 column value + # Compact meta tdLog.info( f'case {inspect.currentframe().f_code.co_name}: start to compact meta') sql = f'compact database {db_name} meta_only' tdSql.execute(sql) + # Wait for the compact is done + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: wait compact is done') + while True: + sql = 'show compacts' + rows = tdSql.query(sql) + if rows == 0: + break + time.sleep(1) + + # Write more data + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: insert more data to child tables') + for i in range(1, num_child_tables+1): + if i % 100 == 0: + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: insert data to {i} child tables') + sql = f'insert into {db_name}.{ctb_name_prefix}{i} values(now, 1, 2)' + tdSql.execute(sql) + + # Randomly select 100 child tables to do query + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: query data again to verify') + for i, table_idx in enumerate(selected_tables): + # Query data from the child table + sql = f'select count(*) from {db_name}.{stb_name} where t1 = {table_idx + 1}' + tdSql.query(sql) + tdSql.checkData(0, 0, 2) # Check c2 column value + def test_case2(self): """ Description: - After compact, the snapshot still works + 1. Alter super table schema + 2. Make sure compact meta works """ - pass + tdLog.info(f'case {inspect.currentframe().f_code.co_name} start') + + db_name = 'db2' + stb_name = 'stb2' + ctb_name_prefix = 'ctb' + num_child_tables = 1000 + + # Drop database + sql = f'drop database if exists {db_name}' + tdSql.execute(sql) + + # Create database + sql = f'create database {db_name} vgroups 1' + tdSql.execute(sql) + + # Create super table + sql = f'create table {db_name}.{stb_name} (ts timestamp, c1 int, c2 int) tags(t1 int)' + tdSql.execute(sql) + + # Create child tables + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: create {num_child_tables} child tables') + for i in range(1, num_child_tables+1): + if i % 100 == 0: + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: create {i} child tables') + sql = f'create table {db_name}.{ctb_name_prefix}{i} using {db_name}.{stb_name} tags({i})' + tdSql.execute(sql) + + # Insert some data + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: insert data to child tables') + for i in range(1, num_child_tables+1): + if i % 100 == 0: + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: insert data to {i} child tables') + sql = f'insert into {db_name}.{ctb_name_prefix}{i} (ts, c1) values (now, 1)' + tdSql.execute(sql) + + # Alter super table schema + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: alter super table schema') + for i in range(3, 2000): + if i % 100 == 0: + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: altered {i} times of super table schema') + # Add a column + sql = f'alter table {db_name}.{stb_name} add column c{i} int' + tdSql.execute(sql) + + # Drop a column + sql = f'alter table {db_name}.{stb_name} drop column c{i}' + tdSql.execute(sql) + + # Randomly select 100 child tables to do query + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: randomly select 100 child tables to query') + selected_tables = random.sample(range(1, num_child_tables + 1), 100) + for i, table_idx in enumerate(selected_tables): + # Query data from the child table + sql = f'select count(*) from {db_name}.{stb_name} where t1 = {table_idx + 1}' + tdSql.query(sql) + tdSql.checkData(0, 0, 1) # Check c2 column value + + # Compact meta + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: start to compact meta') + sql = f'compact database {db_name} meta_only' + tdSql.execute(sql) + + # Wait for the compact is done + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: wait compact is done') + while True: + sql = 'show compacts' + rows = tdSql.query(sql) + if rows == 0: + break + time.sleep(1) + + # Write more data + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: insert more data to child tables') + for i in range(1, num_child_tables+1): + if i % 100 == 0: + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: insert data to {i} child tables') + sql = f'insert into {db_name}.{ctb_name_prefix}{i} values(now, 1, 2)' + tdSql.execute(sql) + + # Randomly select 100 child tables to do query + tdLog.info( + f'case {inspect.currentframe().f_code.co_name}: query data again to verify') + for i, table_idx in enumerate(selected_tables): + # Query data from the child table + sql = f'select count(*) from {db_name}.{stb_name} where t1 = {table_idx + 1}' + tdSql.query(sql) + tdSql.checkData(0, 0, 2) # Check c2 column value tdCases.addWindows(__file__, TestCompactMeta()) From de730ad9c68a4d3f05615f6786b82e173dab2201 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 27 Feb 2025 17:20:02 +0800 Subject: [PATCH 15/16] more --- tests/parallel_test/cases.task | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index a12ebafc60..8f986ad445 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1813,3 +1813,6 @@ ,,n,develop-test,python3 ./test.py -f 2-query/ts-range.py ,,n,develop-test,python3 ./test.py -f 2-query/tag_scan.py ,,n,develop-test,python3 ./test.py -f 2-query/show_create_db.py + +# new test +,,y,test_new,./pytest.sh python3 ./test.py -f storage/compact/test_compact_meta.py From 57b9ca3b6ac2ab3860b26236ec18a374356e8142 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 28 Feb 2025 15:57:29 +0800 Subject: [PATCH 16/16] refactor: reorganize import statements in test.py for clarity --- tests/test_new/test.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/test_new/test.py b/tests/test_new/test.py index 5a77551045..52b466c130 100644 --- a/tests/test_new/test.py +++ b/tests/test_new/test.py @@ -13,15 +13,6 @@ # pip install src/connector/python/ # -*- coding: utf-8 -*- -import taosws -import taosrest -import taos -from util.taosadapter import * -from util.cluster import * -from util.cases import * -from util.dnodes import * -from util.log import * -import toml import os import sys import getopt @@ -33,11 +24,20 @@ import platform import socket import threading import importlib -import ast print(f"Python version: {sys.version}") print(f"Version info: {sys.version_info}") +import toml sys.path.append("../pytest") +from util.log import * +from util.dnodes import * +from util.cases import * +from util.cluster import * +from util.taosadapter import * + +import taos +import taosrest +import taosws def checkRunTimeError():