From 0b5f2ec57b8d8bcf8c8632527eccabc185f76285 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 30 Oct 2024 11:11:29 +0800 Subject: [PATCH 01/10] test(blob): testing & fixes for blob --- tests/army/storage/blob/s3Basic.json | 2 +- tests/army/storage/blob/s3Basic1.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/army/storage/blob/s3Basic.json b/tests/army/storage/blob/s3Basic.json index ee341b2096..2b911a989f 100644 --- a/tests/army/storage/blob/s3Basic.json +++ b/tests/army/storage/blob/s3Basic.json @@ -20,7 +20,7 @@ "replica": 1, "duration":"10d", "s3_keeplocal":"30d", - "s3_chunksize":"131072", + "s3_chunkpages":"131072", "tsdb_pagesize":"1", "s3_compact":"1", "wal_retention_size":"1", diff --git a/tests/army/storage/blob/s3Basic1.json b/tests/army/storage/blob/s3Basic1.json index 02be308443..087f89edec 100644 --- a/tests/army/storage/blob/s3Basic1.json +++ b/tests/army/storage/blob/s3Basic1.json @@ -20,7 +20,7 @@ "replica": 1, "duration":"10d", "s3_keeplocal":"30d", - "s3_chunksize":"131072", + "s3_chunkpages":"131072", "tsdb_pagesize":"1", "s3_compact":"1", "wal_retention_size":"1", From e9ef7a85744d7a84c6008fe4ea7cb192a03297ac Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 30 Oct 2024 14:29:26 +0800 Subject: [PATCH 02/10] rename s3_chunksize to s3_chunkpages --- tests/army/storage/blob/ablob.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/army/storage/blob/ablob.py b/tests/army/storage/blob/ablob.py index fae492a3df..f6e783d6f3 100644 --- a/tests/army/storage/blob/ablob.py +++ b/tests/army/storage/blob/ablob.py @@ -152,13 +152,13 @@ class TDTestCase(TBase): if keepLocal is not None: kw1 = f"s3_keeplocal {keepLocal}" if chunkSize is not None: - kw2 = f"s3_chunksize {chunkSize}" + kw2 = f"s3_chunkpages {chunkSize}" if compact is not None: kw3 = f"s3_compact {compact}" sql = f" create database db1 vgroups 1 duration 1h {kw1} {kw2} {kw3}" tdSql.execute(sql, show=True) - #sql = f"select name,s3_keeplocal,s3_chunksize,s3_compact from information_schema.ins_databases where name='db1';" + #sql = f"select name,s3_keeplocal,s3_chunkpages,s3_compact from information_schema.ins_databases where name='db1';" sql = f"select * from information_schema.ins_databases where name='db1';" tdSql.query(sql) # 29 30 31 -> chunksize keeplocal compact @@ -178,9 +178,9 @@ class TDTestCase(TBase): f"create database db2 s3_keeplocal -1", f"create database db2 s3_keeplocal 0", f"create database db2 s3_keeplocal 365001", - f"create database db2 s3_chunksize -1", - f"create database db2 s3_chunksize 0", - f"create database db2 s3_chunksize 900000000", + f"create database db2 s3_chunkpages -1", + f"create database db2 s3_chunkpages 0", + f"create database db2 s3_chunkpages 900000000", f"create database db2 s3_compact -1", f"create database db2 s3_compact 100", f"create database db2 duration 1d s3_keeplocal 1d" From 6010e52a2389a77897a0bf474066973baf0f40ac Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 31 Oct 2024 17:42:06 +0800 Subject: [PATCH 03/10] blob/param: new cases for default value checking --- tests/army/storage/blob/ablob.py | 39 ++++++++++++++++---------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/tests/army/storage/blob/ablob.py b/tests/army/storage/blob/ablob.py index f6e783d6f3..d3e00f3424 100644 --- a/tests/army/storage/blob/ablob.py +++ b/tests/army/storage/blob/ablob.py @@ -31,8 +31,6 @@ from frame.eos import * class TDTestCase(TBase): - index = eutil.cpuRand(20) + 1 - bucketName = f"ci-bucket{index}" updatecfgDict = { "supportVnodes":"1000", 's3EndPoint': 'https://.blob.core.windows.net', @@ -44,7 +42,6 @@ class TDTestCase(TBase): 's3MigrateEnabled': '1' } - tdLog.info(f"assign bucketName is {bucketName}\n") maxFileSize = (128 + 10) * 1014 * 1024 # add 10M buffer def insertData(self): @@ -172,6 +169,23 @@ class TDTestCase(TBase): sql = "drop database db1" tdSql.execute(sql) + def checkDefault(self, keepLocal, chunkSize, compact): + sql = f" create database db1 vgroups 1" + tdSql.execute(sql, show=True) + #sql = f"select name,s3_keeplocal,s3_chunkpages,s3_compact from information_schema.ins_databases where name='db1';" + sql = f"select * from information_schema.ins_databases where name='db1';" + tdSql.query(sql) + # 29 30 31 -> chunksize keeplocal compact + if chunkSize is not None: + tdSql.checkData(0, 29, chunkSize) + if keepLocal is not None: + keepLocalm = keepLocal * 24 * 60 + tdSql.checkData(0, 30, f"{keepLocalm}m") + if compact is not None: + tdSql.checkData(0, 31, compact) + sql = "drop database db1" + tdSql.execute(sql) + def checkExcept(self): # errors sqls = [ @@ -226,16 +240,7 @@ class TDTestCase(TBase): # except self.checkExcept() - - # - def preDb(self, vgroups): - cnt = int(time.time())%2 + 1 - for i in range(cnt): - vg = eutil.cpuRand(9) + 1 - sql = f"create database predb vgroups {vg}" - tdSql.execute(sql, show=True) - sql = "drop database predb" - tdSql.execute(sql, show=True) + self.checkDefault(365, 131072, 1) # history def insertHistory(self): @@ -287,9 +292,6 @@ class TDTestCase(TBase): if eos.isArm64Cpu(): tdLog.success(f"{__file__} arm64 ignore executed") else: - - self.preDb(10) - # insert data self.insertData() @@ -311,7 +313,6 @@ class TDTestCase(TBase): # check insert correct again self.checkInsertCorrect() - # check stream correct and drop stream #self.checkStreamCorrect() @@ -321,7 +322,7 @@ class TDTestCase(TBase): # insert history disorder data self.insertHistory() - # checkBasic + # check db params self.checkBasic() #self.checkInsertCorrect() @@ -335,10 +336,8 @@ class TDTestCase(TBase): # drop database and free s3 file self.dropDb() - tdLog.success(f"{__file__} successfully executed") - tdCases.addLinux(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase()) From 71e27240cb3be095f5e6abf9f7e2d00722cfa7f3 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 4 Nov 2024 14:00:27 +0800 Subject: [PATCH 04/10] stream/rocks: fix meta file dir --- source/libs/stream/src/streamBackendRocksdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index b82d06b6c7..5923efbac9 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -5065,7 +5065,7 @@ int32_t dbChkpDumpTo(SDbChkp* p, char* dname, SArray* list) { goto _ERROR; } memset(dstBuf, 0, cap); - nBytes = snprintf(dstDir, cap, "%s%s%s", dstDir, TD_DIRSEP, chkpMeta); + nBytes = snprintf(dstBuf, cap, "%s%s%s", dstDir, TD_DIRSEP, chkpMeta); if (nBytes <= 0 || nBytes >= cap) { code = TSDB_CODE_OUT_OF_RANGE; goto _ERROR; From 175a3f7d57b887ab414f4108577fec6e619ad017 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 4 Nov 2024 14:22:28 +0800 Subject: [PATCH 05/10] strea/checkpoint: fix meta file path --- source/libs/stream/src/streamBackendRocksdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 5923efbac9..b69e191059 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -5071,7 +5071,7 @@ int32_t dbChkpDumpTo(SDbChkp* p, char* dname, SArray* list) { goto _ERROR; } - TdFilePtr pFile = taosOpenFile(dstDir, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); + TdFilePtr pFile = taosOpenFile(dstBuf, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); if (pFile == NULL) { code = terrno; stError("chkp failed to create meta file: %s, reason:%s", dstDir, tstrerror(code)); From 109c1a799f5a58f16582f783fca7ee4573c70a1f Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 4 Nov 2024 18:49:36 +0800 Subject: [PATCH 06/10] test/blob: perf json file for taos benchmark --- tests/army/storage/blob/perf.json | 67 +++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 tests/army/storage/blob/perf.json diff --git a/tests/army/storage/blob/perf.json b/tests/army/storage/blob/perf.json new file mode 100644 index 0000000000..002515873e --- /dev/null +++ b/tests/army/storage/blob/perf.json @@ -0,0 +1,67 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "num_of_records_per_req": 4000, + "prepared_rand": 500, + "thread_count": 4, + "create_table_thread_count": 1, + "confirm_parameter_prompt": "no", + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "vgroups": 3, + "replica": 3, + "duration":"10d", + "s3_keeplocal":"30d", + "s3_chunkpages":"131072", + "tsdb_pagesize":"1", + "s3_compact":"1", + "wal_retention_size":"1", + "wal_retention_period":"1", + "flush_each_batch":"no", + "keep": "3650d" + }, + "super_tables": [ + { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 500, + "insert_rows": 200000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "timestamp_step": 100, + "start_timestamp": 1600000000000, + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc" }, + { "type": "double", "name": "dc"}, + { "type": "tinyint", "name": "ti"}, + { "type": "smallint", "name": "si" }, + { "type": "int", "name": "ic" ,"max": 1,"min": 1}, + { "type": "bigint", "name": "bi" }, + { "type": "utinyint", "name": "uti"}, + { "type": "usmallint", "name": "usi"}, + { "type": "uint", "name": "ui" }, + { "type": "ubigint", "name": "ubi"}, + { "type": "binary", "name": "bin", "len": 50}, + { "type": "nchar", "name": "nch", "len": 100} + ], + "tags": [ + {"type": "tinyint", "name": "groupid","max": 10,"min": 1}, + {"name": "location","type": "binary", "len": 16, "values": + ["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"] + } + ] + } + ] + } + ] +} + From d572f9e38779047463ebb35bdf1d4f45b9541af8 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 4 Nov 2024 19:21:27 +0800 Subject: [PATCH 07/10] blob/doc: connect blob directly without flexify --- docs/zh/08-operation/12-multi.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/zh/08-operation/12-multi.md b/docs/zh/08-operation/12-multi.md index 5489226ce1..1e81a7ff1e 100644 --- a/docs/zh/08-operation/12-multi.md +++ b/docs/zh/08-operation/12-multi.md @@ -163,3 +163,15 @@ s3BucketName td-test - 认为全部 S3 服务均指向同一数据源,对各个 S3 服务操作完全等价 - 在某一 S3 服务上操作失败后会切换至其他服务,全部服务都失败后将返回最后产生的错误码 - 最大支持的 S3 服务配置数为 10 + +### 不依赖 Flexify 服务 + +用户界面同 S3,不同的地方在于下面三个参数的配置: + +| # | 参数 | 示例值 | 描述 | +| :--- | :----------- | :--------------------------------------- | :----------------------------------------------------------- | +| 1 | s3EndPoint | https://fd2d01c73.blob.core.windows.net | Blob URL | +| 2 | s3AccessKey | fd2d01c73:veUy/iRBeWaI2YAerl+AStw6PPqg== | 冒号分隔的用户 accountId:accountKey | +| 3 | s3BucketName | test-container | Container name | + +其中 fd2d01c73 是账户 ID;微软 Blob 存储服务只支持 Https 协议,不支持 Http。 From 90d1e014db7dbcbddfa0d6d303dbbede7f266638 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 5 Nov 2024 14:25:24 +0800 Subject: [PATCH 08/10] az/begin: remove duplicate begin & end of empty impl. --- source/libs/azure/src/az.cpp | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/source/libs/azure/src/az.cpp b/source/libs/azure/src/az.cpp index 5f95624c94..9a1f95b142 100644 --- a/source/libs/azure/src/az.cpp +++ b/source/libs/azure/src/az.cpp @@ -22,6 +22,10 @@ #include "taoserror.h" #include "tglobal.h" +int32_t azBegin() { return TSDB_CODE_SUCCESS; } + +void azEnd() {} + #if defined(USE_S3) #include @@ -40,10 +44,6 @@ extern char tsS3BucketName[TSDB_FQDN_LEN]; extern int8_t tsS3Enabled; extern int8_t tsS3EpNum; -int32_t azBegin() { return TSDB_CODE_SUCCESS; } - -void azEnd() {} - static void checkPrint(const char *fmt, ...) { va_list arg_ptr; va_start(arg_ptr, fmt); @@ -524,10 +524,6 @@ int32_t azDeleteObjects(const char *object_name[], int nobject) { #else -int32_t azBegin() { return TSDB_CODE_SUCCESS; } - -void azEnd() {} - int32_t azCheckCfg() { return TSDB_CODE_SUCCESS; } int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) { From 7316b339b6fa3b17a45120402ad12cb551b03f8d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 5 Nov 2024 14:34:20 +0800 Subject: [PATCH 09/10] az/get object by prefix: catch all cpp eceptions --- source/libs/azure/src/az.cpp | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/source/libs/azure/src/az.cpp b/source/libs/azure/src/az.cpp index 9a1f95b142..1380b58bbd 100644 --- a/source/libs/azure/src/az.cpp +++ b/source/libs/azure/src/az.cpp @@ -32,7 +32,6 @@ void azEnd() {} #include #include "td_block_blob_client.hpp" -// Add appropriate using namespace directives using namespace Azure::Storage; using namespace Azure::Storage::Blobs; @@ -223,7 +222,6 @@ static int32_t azPutObjectFromFileOffsetImpl(const char *file, const char *objec uint8_t blobContent[] = "Hello Azure!"; // Create the block blob client // BlockBlobClient blobClient = containerClient.GetBlockBlobClient(blobName); - // TDBlockBlobClient blobClient(containerClient.GetBlobClient(blobName)); TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name)); blobClient.UploadFrom(file, offset, size); @@ -467,7 +465,7 @@ int32_t azGetObjectToFile(const char *object_name, const char *fileName) { TAOS_RETURN(code); } -int32_t azGetObjectsByPrefix(const char *prefix, const char *path) { +static int32_t azGetObjectsByPrefixImpl(const char *prefix, const char *path) { const std::string delimiter = "/"; std::string accountName = tsS3AccessKeyId[0]; std::string accountKey = tsS3AccessKeySecret[0]; @@ -514,6 +512,23 @@ int32_t azGetObjectsByPrefix(const char *prefix, const char *path) { return 0; } +int32_t azGetObjectsByPrefix(const char *prefix, const char *path) { + int32_t code = 0; + + try { + code = azGetObjectsByPrefixImpl(prefix, path); + } catch (const std::exception &e) { + azError("%s: Reason Phrase: %s", __func__, e.what()); + + code = TAOS_SYSTEM_ERROR(EIO); + azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + + TAOS_RETURN(code); + } + + TAOS_RETURN(code); +} + int32_t azDeleteObjects(const char *object_name[], int nobject) { for (int i = 0; i < nobject; ++i) { azDeleteObjectsByPrefix(object_name[i]); From 16570822e8bce1de77b59c0d56318fd309faffac Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 5 Nov 2024 17:16:20 +0800 Subject: [PATCH 10/10] blob/config: check dnode s3 params --- tests/system-test/2-query/db.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/tests/system-test/2-query/db.py b/tests/system-test/2-query/db.py index d4e5f89aa3..ee6b517061 100644 --- a/tests/system-test/2-query/db.py +++ b/tests/system-test/2-query/db.py @@ -62,12 +62,30 @@ class TDTestCase: tdSql.query("show dnode 1 variables like '____debugFlag'") tdSql.checkRows(2) - tdSql.query("show dnode 1 variables like 's3MigrateEna%'") + tdSql.query("show dnode 1 variables like 's3MigrateEnab%'") tdSql.checkRows(1) tdSql.checkData(0, 0, 1) tdSql.checkData(0, 1, 's3MigrateEnabled') tdSql.checkData(0, 2, 0) + tdSql.query("show dnode 1 variables like 's3MigrateIntervalSec%'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 's3MigrateIntervalSec') + tdSql.checkData(0, 2, 3600) + + tdSql.query("show dnode 1 variables like 's3PageCacheSize%'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 's3PageCacheSize') + tdSql.checkData(0, 2, 4096) + + tdSql.query("show dnode 1 variables like 's3UploadDelaySec%'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 's3UploadDelaySec') + tdSql.checkData(0, 2, 60) + def threadTest(self, threadID): print(f"Thread {threadID} starting...") tdsqln = tdCom.newTdSql()