Merge pull request #28268 from taosdata/feat/TS-5215-2
feat(az/blob): new s3 connection for ablob
This commit is contained in:
commit
46c1c09b3b
|
@ -121,6 +121,7 @@ TAGS
|
|||
contrib/*
|
||||
!contrib/CMakeLists.txt
|
||||
!contrib/test
|
||||
!contrib/azure-cmake
|
||||
sql
|
||||
debug*/
|
||||
.env
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
# azure
|
||||
ExternalProject_Add(azure
|
||||
URL https://github.com/Azure/azure-sdk-for-cpp/archive/refs/tags/azure-storage-blobs_12.13.0-beta.1.tar.gz
|
||||
URL_HASH SHA256=3eca486fd60e3522d0a633025ecd652a71515b1e944799b2e8ee31fd590305a9
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1"
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
#BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -256,7 +256,8 @@ ELSE ()
|
|||
|
||||
IF (${BUILD_SANITIZER})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
#SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||
ELSEIF (${BUILD_RELEASE})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||
|
|
|
@ -152,6 +152,7 @@ if(${BUILD_WITH_S3})
|
|||
cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/azure_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_S3)
|
||||
|
||||
# cos
|
||||
|
@ -614,9 +615,17 @@ if (${BUILD_PCRE2})
|
|||
add_subdirectory(pcre2 EXCLUDE_FROM_ALL)
|
||||
endif(${BUILD_PCRE2})
|
||||
|
||||
|
||||
if(${TD_LINUX})
|
||||
add_subdirectory(azure-cmake EXCLUDE_FROM_ALL)
|
||||
endif(${TD_LINUX})
|
||||
|
||||
# ================================================================================================
|
||||
# Build test
|
||||
# ================================================================================================
|
||||
|
||||
MESSAGE("build with dependency tests: ${BUILD_DEPENDENCY_TESTS}")
|
||||
|
||||
if(${BUILD_DEPENDENCY_TESTS})
|
||||
add_subdirectory(test EXCLUDE_FROM_ALL)
|
||||
endif(${BUILD_DEPENDENCY_TESTS})
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
# lib_azure_sdk
|
||||
set(AZURE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1")
|
||||
set(AZURE_SDK_LIBRARY_DIR "${AZURE_DIR}/sdk")
|
||||
|
||||
file(GLOB AZURE_SDK_SRC
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/credentials/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/cryptography/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/io/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/tracing/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/private/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/*.cpp"
|
||||
)
|
||||
|
||||
file(GLOB AZURE_SDK_UNIFIED_SRC
|
||||
${AZURE_SDK_SRC}
|
||||
)
|
||||
|
||||
set(AZURE_SDK_INCLUDES
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/inc/"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/inc/"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/inc/"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/inc/"
|
||||
)
|
||||
|
||||
add_library(_azure_sdk STATIC ${AZURE_SDK_UNIFIED_SRC})
|
||||
target_compile_definitions(_azure_sdk PRIVATE BUILD_CURL_HTTP_TRANSPORT_ADAPTER)
|
||||
|
||||
target_include_directories(
|
||||
_azure_sdk
|
||||
PUBLIC "$ENV{HOME}/.cos-local.2/include"
|
||||
)
|
||||
|
||||
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
#find_library(CURL_LIBRARY curl)
|
||||
#find_library(XML2_LIBRARY xml2)
|
||||
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
#find_library(CoreFoundation_Library CoreFoundation)
|
||||
#find_library(SystemConfiguration_Library SystemConfiguration)
|
||||
|
||||
target_link_libraries(
|
||||
_azure_sdk
|
||||
PRIVATE ${CURL_LIBRARY}
|
||||
PRIVATE ${SSL_LIBRARY}
|
||||
PRIVATE ${CRYPTO_LIBRARY}
|
||||
PRIVATE ${XML2_LIBRARY}
|
||||
#PRIVATE xml2
|
||||
PRIVATE zlib
|
||||
# PRIVATE ${CoreFoundation_Library}
|
||||
# PRIVATE ${SystemConfiguration_Library}
|
||||
)
|
||||
|
||||
# Originally, on Windows azure-core is built with bcrypt and crypt32 by default
|
||||
if (TARGET OpenSSL::SSL)
|
||||
target_link_libraries(_azure_sdk PRIVATE OpenSSL::Crypto OpenSSL::SSL)
|
||||
endif()
|
||||
|
||||
# Originally, on Windows azure-core is built with winhttp by default
|
||||
if (TARGET td_contrib::curl)
|
||||
target_link_libraries(_azure_sdk PRIVATE td_contrib::curl)
|
||||
endif()
|
||||
|
||||
target_include_directories(_azure_sdk SYSTEM BEFORE PUBLIC ${AZURE_SDK_INCLUDES})
|
||||
add_library(td_contrib::azure_sdk ALIAS _azure_sdk)
|
|
@ -28,5 +28,6 @@ if(${BUILD_WITH_TRAFT})
|
|||
# add_subdirectory(traft)
|
||||
endif(${BUILD_WITH_TRAFT})
|
||||
|
||||
add_subdirectory(azure)
|
||||
add_subdirectory(tdev)
|
||||
add_subdirectory(lz4)
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
set(CMAKE_CXX_STANDARD 14)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED True)
|
||||
|
||||
add_executable (
|
||||
azure-test
|
||||
main.cpp
|
||||
)
|
||||
|
||||
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
#find_library(XML2_LIBRARY xml2)
|
||||
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
#find_library(CoreFoundation_Library CoreFoundation)
|
||||
#find_library(SystemConfiguration_Library SystemConfiguration)
|
||||
|
||||
target_link_libraries(
|
||||
azure-test
|
||||
PRIVATE _azure_sdk
|
||||
PRIVATE ${CURL_LIBRARY}
|
||||
PRIVATE ${XML2_LIBRARY}
|
||||
PRIVATE ${SSL_LIBRARY}
|
||||
PRIVATE ${CRYPTO_LIBRARY}
|
||||
PRIVATE dl
|
||||
PRIVATE pthread
|
||||
)
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
|
||||
// Include the necessary SDK headers
|
||||
#include <azure/core.hpp>
|
||||
#include <azure/storage/blobs.hpp>
|
||||
|
||||
// Add appropriate using namespace directives
|
||||
using namespace Azure::Storage;
|
||||
using namespace Azure::Storage::Blobs;
|
||||
|
||||
// Secrets should be stored & retrieved from secure locations such as Azure::KeyVault. For
|
||||
// convenience and brevity of samples, the secrets are retrieved from environment variables.
|
||||
|
||||
std::string GetEndpointUrl() {
|
||||
// return std::getenv("AZURE_STORAGE_ACCOUNT_URL");
|
||||
std::string accountId = getenv("ablob_account_id");
|
||||
if (accountId.empty()) {
|
||||
return accountId;
|
||||
}
|
||||
|
||||
return accountId + ".blob.core.windows.net";
|
||||
}
|
||||
|
||||
std::string GetAccountName() {
|
||||
// return std::getenv("AZURE_STORAGE_ACCOUNT_NAME");
|
||||
return getenv("ablob_account_id");
|
||||
}
|
||||
|
||||
std::string GetAccountKey() {
|
||||
// return std::getenv("AZURE_STORAGE_ACCOUNT_KEY");
|
||||
|
||||
return getenv("ablob_account_secret");
|
||||
}
|
||||
|
||||
int main() {
|
||||
std::string endpointUrl = GetEndpointUrl();
|
||||
std::string accountName = GetAccountName();
|
||||
std::string accountKey = GetAccountKey();
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
std::string accountURL = "https://fd2d01cd892f844eeaa2273.blob.core.windows.net";
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = "myblobcontainer";
|
||||
// auto containerClient = blobServiceClient.GetBlobContainerClient("myblobcontainer");
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient("td-test");
|
||||
|
||||
// Create the container if it does not exist
|
||||
std::cout << "Creating container: " << containerName << std::endl;
|
||||
// containerClient.CreateIfNotExists();
|
||||
|
||||
std::string blobName = "blob.txt";
|
||||
uint8_t blobContent[] = "Hello Azure!";
|
||||
// Create the block blob client
|
||||
BlockBlobClient blobClient = containerClient.GetBlockBlobClient(blobName);
|
||||
|
||||
// Upload the blob
|
||||
std::cout << "Uploading blob: " << blobName << std::endl;
|
||||
blobClient.UploadFrom(blobContent, sizeof(blobContent));
|
||||
/*
|
||||
auto blockBlobClient = BlockBlobClient(endpointUrl, sharedKeyCredential);
|
||||
|
||||
// Create some data to upload into the blob.
|
||||
std::vector<uint8_t> data = {1, 2, 3, 4};
|
||||
Azure::Core::IO::MemoryBodyStream stream(data);
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobResult> response = blockBlobClient.Upload(stream);
|
||||
|
||||
Models::UploadBlockBlobResult model = response.Value;
|
||||
std::cout << "Last modified date of uploaded blob: " << model.LastModified.ToString()
|
||||
<< std::endl;
|
||||
*/
|
||||
} catch (const Azure::Core::RequestFailedException& e) {
|
||||
std::cout << "Status Code: " << static_cast<int>(e.StatusCode) << ", Reason Phrase: " << e.ReasonPhrase
|
||||
<< std::endl;
|
||||
std::cout << e.what() << std::endl;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -32,6 +32,8 @@ extern int32_t tsS3PageCacheSize;
|
|||
extern int32_t tsS3UploadDelaySec;
|
||||
|
||||
int32_t s3Init();
|
||||
int32_t s3Begin();
|
||||
void s3End();
|
||||
int32_t s3CheckCfg();
|
||||
int32_t s3PutObjectFromFile(const char *file, const char *object);
|
||||
int32_t s3PutObjectFromFile2(const char *file, const char *object, int8_t withcp);
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_AZURE_H_
|
||||
#define _TD_AZURE_H_
|
||||
|
||||
#include "os.h"
|
||||
#include "tarray.h"
|
||||
#include "tdef.h"
|
||||
#include "tlog.h"
|
||||
#include "tmsg.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int32_t azBegin();
|
||||
void azEnd();
|
||||
int32_t azCheckCfg();
|
||||
int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size);
|
||||
int32_t azGetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock);
|
||||
void azDeleteObjectsByPrefix(const char *prefix);
|
||||
|
||||
int32_t azPutObjectFromFile2(const char *file, const char *object, int8_t withcp);
|
||||
int32_t azGetObjectsByPrefix(const char *prefix, const char *path);
|
||||
int32_t azGetObjectToFile(const char *object_name, const char *fileName);
|
||||
int32_t azDeleteObjects(const char *object_name[], int nobject);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // _TD_AZURE_H_
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_TCS_H_
|
||||
#define _TD_TCS_H_
|
||||
|
||||
#include "os.h"
|
||||
#include "tarray.h"
|
||||
#include "tdef.h"
|
||||
#include "tlog.h"
|
||||
#include "tmsg.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern int8_t tsS3Enabled;
|
||||
extern int8_t tsS3EnabledCfg;
|
||||
|
||||
extern int32_t tsS3UploadDelaySec;
|
||||
extern int32_t tsS3BlockSize;
|
||||
extern int32_t tsS3BlockCacheSize;
|
||||
extern int32_t tsS3PageCacheSize;
|
||||
|
||||
extern int8_t tsS3StreamEnabled;
|
||||
|
||||
int32_t tcsInit();
|
||||
void tcsUninit();
|
||||
|
||||
int32_t tcsCheckCfg();
|
||||
|
||||
int32_t tcsPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size);
|
||||
int32_t tcsGetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock);
|
||||
|
||||
void tcsDeleteObjectsByPrefix(const char *prefix);
|
||||
|
||||
int32_t tcsPutObjectFromFile2(const char *file, const char *object, int8_t withcp);
|
||||
int32_t tcsGetObjectsByPrefix(const char *prefix, const char *path);
|
||||
int32_t tcsDeleteObjects(const char *object_name[], int nobject);
|
||||
int32_t tcsGetObjectToFile(const char *object_name, const char *fileName);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // _TD_TCS_H_
|
|
@ -57,6 +57,7 @@ extern int32_t rpcDebugFlag;
|
|||
extern int32_t qDebugFlag;
|
||||
extern int32_t stDebugFlag;
|
||||
extern int32_t wDebugFlag;
|
||||
extern int32_t azDebugFlag;
|
||||
extern int32_t sDebugFlag;
|
||||
extern int32_t tsdbDebugFlag;
|
||||
extern int32_t tqDebugFlag;
|
||||
|
|
|
@ -89,20 +89,8 @@ static void s3DumpCfgByEp(int8_t epIndex) {
|
|||
|
||||
int32_t s3CheckCfg() {
|
||||
int32_t code = 0, lino = 0;
|
||||
int8_t i = 0;
|
||||
|
||||
if (!tsS3Enabled) {
|
||||
(void)fprintf(stderr, "s3 not configured.\n");
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
code = s3Begin();
|
||||
if (code != 0) {
|
||||
(void)fprintf(stderr, "failed to initialize s3.\n");
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
for (; i < tsS3EpNum; i++) {
|
||||
for (int8_t i = 0; i < tsS3EpNum; i++) {
|
||||
(void)fprintf(stdout, "test s3 ep (%d/%d):\n", i + 1, tsS3EpNum);
|
||||
s3DumpCfgByEp(i);
|
||||
|
||||
|
@ -192,7 +180,7 @@ int32_t s3CheckCfg() {
|
|||
(void)fprintf(stdout, "=================================================================\n");
|
||||
}
|
||||
|
||||
s3End();
|
||||
// s3End();
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
@ -1529,6 +1517,8 @@ void s3EvictCache(const char *path, long object_size) {}
|
|||
#include "cos_http_io.h"
|
||||
#include "cos_log.h"
|
||||
|
||||
int32_t s3Begin() { TAOS_RETURN(TSDB_CODE_SUCCESS); }
|
||||
|
||||
int32_t s3Init() {
|
||||
if (cos_http_io_initialize(NULL, 0) != COSE_OK) {
|
||||
return -1;
|
||||
|
@ -1967,6 +1957,10 @@ long s3Size(const char *object_name) {
|
|||
#else
|
||||
|
||||
int32_t s3Init() { return 0; }
|
||||
int32_t s3Begin() { TAOS_RETURN(TSDB_CODE_SUCCESS); }
|
||||
|
||||
void s3End() {}
|
||||
int32_t s3CheckCfg() { return 0; }
|
||||
int32_t s3PutObjectFromFile(const char *file, const char *object) { return 0; }
|
||||
int32_t s3PutObjectFromFile2(const char *file, const char *object, int8_t withcp) { return 0; }
|
||||
int32_t s3PutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) { return 0; }
|
||||
|
|
|
@ -160,7 +160,11 @@ int32_t startRsync() {
|
|||
code = system(cmd);
|
||||
if (code != 0) {
|
||||
uError("[rsync] cmd:%s start server failed, code:%d," ERRNO_ERR_FORMAT, cmd, code, ERRNO_ERR_DATA);
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
if (errno == 0) {
|
||||
return 0;
|
||||
} else {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
}
|
||||
} else {
|
||||
uInfo("[rsync] cmd:%s start server successful", cmd);
|
||||
}
|
||||
|
@ -358,4 +362,4 @@ int32_t deleteRsync(const char* id) {
|
|||
|
||||
uDebug("[rsync] delete data:%s successful", id);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -308,6 +308,7 @@ char tsS3AppId[TSDB_MAX_EP_NUM][TSDB_FQDN_LEN] = {"<appid>"};
|
|||
int8_t tsS3Enabled = false;
|
||||
int8_t tsS3EnabledCfg = false;
|
||||
int8_t tsS3Oss[TSDB_MAX_EP_NUM] = {false};
|
||||
int8_t tsS3Ablob = false;
|
||||
int8_t tsS3StreamEnabled = false;
|
||||
|
||||
int8_t tsS3Https[TSDB_MAX_EP_NUM] = {true};
|
||||
|
@ -436,6 +437,7 @@ int32_t taosSetS3Cfg(SConfig *pCfg) {
|
|||
}
|
||||
tsS3Https[i] = (strstr(tsS3Endpoint[i], "https://") != NULL);
|
||||
tsS3Oss[i] = (strstr(tsS3Endpoint[i], "aliyuncs.") != NULL);
|
||||
tsS3Ablob = (strstr(tsS3Endpoint[i], ".blob.core.windows.net") != NULL);
|
||||
}
|
||||
|
||||
if (tsS3BucketName[0] != '<') {
|
||||
|
@ -542,6 +544,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
|
|||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "vDebugFlag", vDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "mDebugFlag", mDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "wDebugFlag", wDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "azDebugFlag", azDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "sDebugFlag", sDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tsdbDebugFlag", tsdbDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqDebugFlag", tqDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
|
@ -1063,6 +1066,9 @@ static int32_t taosSetServerLogCfg(SConfig *pCfg) {
|
|||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "wDebugFlag");
|
||||
wDebugFlag = pItem->i32;
|
||||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "azDebugFlag");
|
||||
azDebugFlag = pItem->i32;
|
||||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "sDebugFlag");
|
||||
sDebugFlag = pItem->i32;
|
||||
|
||||
|
@ -1987,13 +1993,14 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
|
|||
|
||||
{ // 'bool/int32_t/int64_t/float/double' variables with general modification function
|
||||
static OptionNameAndVar debugOptions[] = {
|
||||
{"dDebugFlag", &dDebugFlag}, {"vDebugFlag", &vDebugFlag}, {"mDebugFlag", &mDebugFlag},
|
||||
{"wDebugFlag", &wDebugFlag}, {"sDebugFlag", &sDebugFlag}, {"tsdbDebugFlag", &tsdbDebugFlag},
|
||||
{"tqDebugFlag", &tqDebugFlag}, {"fsDebugFlag", &fsDebugFlag}, {"udfDebugFlag", &udfDebugFlag},
|
||||
{"smaDebugFlag", &smaDebugFlag}, {"idxDebugFlag", &idxDebugFlag}, {"tdbDebugFlag", &tdbDebugFlag},
|
||||
{"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag}, {"smaDebugFlag", &smaDebugFlag},
|
||||
{"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag}, {"metaDebugFlag", &metaDebugFlag},
|
||||
{"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, {"tqClientDebug", &tqClientDebug},
|
||||
{"dDebugFlag", &dDebugFlag}, {"vDebugFlag", &vDebugFlag}, {"mDebugFlag", &mDebugFlag},
|
||||
{"wDebugFlag", &wDebugFlag}, {"azDebugFlag", &azDebugFlag}, {"sDebugFlag", &sDebugFlag},
|
||||
{"tsdbDebugFlag", &tsdbDebugFlag}, {"tqDebugFlag", &tqDebugFlag}, {"fsDebugFlag", &fsDebugFlag},
|
||||
{"udfDebugFlag", &udfDebugFlag}, {"smaDebugFlag", &smaDebugFlag}, {"idxDebugFlag", &idxDebugFlag},
|
||||
{"tdbDebugFlag", &tdbDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag},
|
||||
{"smaDebugFlag", &smaDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag},
|
||||
{"metaDebugFlag", &metaDebugFlag}, {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag},
|
||||
{"tqClientDebug", &tqClientDebug},
|
||||
};
|
||||
|
||||
static OptionNameAndVar options[] = {{"audit", &tsEnableAudit},
|
||||
|
@ -2371,6 +2378,7 @@ static int32_t taosSetAllDebugFlag(SConfig *pCfg, int32_t flag) {
|
|||
taosCheckAndSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, noNeedToSetVars);
|
||||
taosCheckAndSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, noNeedToSetVars);
|
||||
taosCheckAndSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, noNeedToSetVars);
|
||||
taosCheckAndSetDebugFlag(&azDebugFlag, "azDebugFlag", flag, noNeedToSetVars);
|
||||
taosCheckAndSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, noNeedToSetVars);
|
||||
taosCheckAndSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, noNeedToSetVars);
|
||||
taosCheckAndSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, noNeedToSetVars);
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "jemalloc/jemalloc.h"
|
||||
#endif
|
||||
#include "dmUtil.h"
|
||||
#include "tcs.h"
|
||||
|
||||
#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL)
|
||||
#include "cus_name.h"
|
||||
|
@ -330,10 +331,9 @@ static int32_t dmCheckS3() {
|
|||
int32_t code = 0;
|
||||
SConfig *pCfg = taosGetCfg();
|
||||
cfgDumpCfgS3(pCfg, 0, true);
|
||||
#if defined(USE_S3)
|
||||
extern int32_t s3CheckCfg();
|
||||
|
||||
code = s3CheckCfg();
|
||||
#if defined(USE_S3)
|
||||
code = tcsCheckCfg();
|
||||
#endif
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
aux_source_directory(src IMPLEMENT_SRC)
|
||||
add_library(dnode STATIC ${IMPLEMENT_SRC})
|
||||
target_link_libraries(
|
||||
dnode mgmt_mnode mgmt_qnode mgmt_snode mgmt_vnode mgmt_dnode monitorfw
|
||||
dnode mgmt_mnode mgmt_qnode mgmt_snode mgmt_vnode mgmt_dnode monitorfw tcs
|
||||
)
|
||||
|
||||
IF (TD_ENTERPRISE)
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "libs/function/tudf.h"
|
||||
#include "tgrant.h"
|
||||
#include "tcompare.h"
|
||||
#include "tcs.h"
|
||||
#include "tanal.h"
|
||||
// clang-format on
|
||||
|
||||
|
@ -98,9 +99,9 @@ static bool dmDataSpaceAvailable() {
|
|||
static int32_t dmCheckDiskSpace() {
|
||||
// availability
|
||||
int32_t code = 0;
|
||||
code = osUpdate();
|
||||
if(code != 0) {
|
||||
code = 0; // ignore the error, just log it
|
||||
code = osUpdate();
|
||||
if (code != 0) {
|
||||
code = 0; // ignore the error, just log it
|
||||
dError("failed to update os info since %s", tstrerror(code));
|
||||
}
|
||||
if (!dmDataSpaceAvailable()) {
|
||||
|
@ -163,13 +164,6 @@ static int32_t dmCheckDataDirVersionWrapper() {
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
#if defined(USE_S3)
|
||||
|
||||
extern int32_t s3Begin();
|
||||
extern void s3End();
|
||||
extern int8_t tsS3Enabled;
|
||||
|
||||
#endif
|
||||
|
||||
int32_t dmInit() {
|
||||
dInfo("start to init dnode env");
|
||||
|
@ -187,7 +181,7 @@ int32_t dmInit() {
|
|||
if ((code = dmInitDnode(dmInstance())) != 0) return code;
|
||||
if ((code = InitRegexCache() != 0)) return code;
|
||||
#if defined(USE_S3)
|
||||
if ((code = s3Begin()) != 0) return code;
|
||||
if ((code = tcsInit()) != 0) return code;
|
||||
#endif
|
||||
|
||||
dInfo("dnode env is initialized");
|
||||
|
@ -221,7 +215,7 @@ void dmCleanup() {
|
|||
DestroyRegexCache();
|
||||
|
||||
#if defined(USE_S3)
|
||||
s3End();
|
||||
tcsUninit();
|
||||
#endif
|
||||
|
||||
dInfo("dnode env is cleaned up");
|
||||
|
|
|
@ -4,7 +4,7 @@ aux_source_directory(. MNODE_ARBGROUP_TEST_SRC)
|
|||
add_executable(arbgroupTest ${MNODE_ARBGROUP_TEST_SRC})
|
||||
target_link_libraries(
|
||||
arbgroupTest
|
||||
PRIVATE dnode nodes planner gtest qcom
|
||||
PRIVATE dnode nodes planner gtest qcom tcs
|
||||
)
|
||||
|
||||
add_test(
|
||||
|
|
|
@ -119,6 +119,7 @@ if (${BUILD_CONTRIB})
|
|||
vnode
|
||||
PUBLIC "inc"
|
||||
PUBLIC "src/inc"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/tcs"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/crypt"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/dnode/vnode"
|
||||
|
@ -129,6 +130,7 @@ else()
|
|||
vnode
|
||||
PUBLIC "inc"
|
||||
PUBLIC "src/inc"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/tcs"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/crypt"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/dnode/vnode"
|
||||
|
@ -164,6 +166,7 @@ target_link_libraries(
|
|||
PUBLIC tdb
|
||||
PUBLIC audit
|
||||
PUBLIC crypt
|
||||
PUBLIC tcs
|
||||
|
||||
# PUBLIC bdb
|
||||
# PUBLIC scalar
|
||||
|
|
|
@ -12,8 +12,8 @@
|
|||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include "cos.h"
|
||||
#include "functionMgt.h"
|
||||
#include "tcs.h"
|
||||
#include "tsdb.h"
|
||||
#include "tsdbDataFileRW.h"
|
||||
#include "tsdbIter.h"
|
||||
|
@ -1258,7 +1258,8 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray
|
|||
}
|
||||
|
||||
if (NULL == pLastCol || cmp_res < 0 || (cmp_res == 0 && !COL_VAL_IS_NONE(pColVal))) {
|
||||
SLastCol lastColTmp = {.rowKey = *pRowKey, .colVal = *pColVal, .dirty = 0, .cacheStatus = TSDB_LAST_CACHE_VALID};
|
||||
SLastCol lastColTmp = {
|
||||
.rowKey = *pRowKey, .colVal = *pColVal, .dirty = 0, .cacheStatus = TSDB_LAST_CACHE_VALID};
|
||||
if ((code = tsdbCachePutToRocksdb(pTsdb, &idxKey->key, &lastColTmp)) != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("tsdb/cache: vgId:%d, put rocks failed at line %d since %s.", TD_VID(pTsdb->pVnode), lino,
|
||||
tstrerror(code));
|
||||
|
@ -1705,8 +1706,7 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA
|
|||
if (pLastCol && pLastCol->cacheStatus != TSDB_LAST_CACHE_NO_CACHE) {
|
||||
code = tsdbCachePutToLRU(pTsdb, &idxKey->key, pLastCol, 0);
|
||||
if (code) {
|
||||
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code));
|
||||
taosMemoryFreeClear(pToFree);
|
||||
TAOS_CHECK_EXIT(code);
|
||||
}
|
||||
|
@ -3520,7 +3520,7 @@ static int32_t tsdbCacheLoadBlockS3(STsdbFD *pFD, uint8_t **ppBlock) {
|
|||
|
||||
int64_t block_offset = (pFD->blkno - 1) * tsS3BlockSize * pFD->szPage;
|
||||
|
||||
TAOS_CHECK_RETURN(s3GetObjectBlock(pFD->objName, block_offset, tsS3BlockSize * pFD->szPage, 0, ppBlock));
|
||||
TAOS_CHECK_RETURN(tcsGetObjectBlock(pFD->objName, block_offset, tsS3BlockSize * pFD->szPage, 0, ppBlock));
|
||||
|
||||
tsdbTrace("block:%p load from s3", *ppBlock);
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
*/
|
||||
|
||||
#include "tsdbFile2.h"
|
||||
#include "cos.h"
|
||||
#include "tcs.h"
|
||||
#include "vnd.h"
|
||||
|
||||
// to_json
|
||||
|
@ -318,7 +318,7 @@ static void tsdbTFileObjRemoveLC(STFileObj *fobj, bool remove_all) {
|
|||
}
|
||||
*(dot + 1) = 0;
|
||||
|
||||
s3DeleteObjectsByPrefix(object_name_prefix);
|
||||
tcsDeleteObjectsByPrefix(object_name_prefix);
|
||||
|
||||
// remove local last chunk file
|
||||
dot = strrchr(lc_path, '.');
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cos.h"
|
||||
#include "crypt.h"
|
||||
#include "tcs.h"
|
||||
#include "tsdb.h"
|
||||
#include "tsdbDef.h"
|
||||
#include "vnd.h"
|
||||
|
@ -391,7 +391,7 @@ static int32_t tsdbReadFileBlock(STsdbFD *pFD, int64_t offset, int64_t size, boo
|
|||
|
||||
snprintf(dot + 1, TSDB_FQDN_LEN - (dot + 1 - object_name_prefix), "%d.data", chunkno);
|
||||
|
||||
code = s3GetObjectBlock(object_name_prefix, cOffset, nRead, check, &pBlock);
|
||||
code = tcsGetObjectBlock(object_name_prefix, cOffset, nRead, check, &pBlock);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
memcpy(buf + n, pBlock, nRead);
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cos.h"
|
||||
#include "tcs.h"
|
||||
#include "tsdb.h"
|
||||
#include "tsdbFS2.h"
|
||||
#include "vnd.h"
|
||||
|
@ -426,35 +426,6 @@ static int32_t tsdbS3FidLevel(int32_t fid, STsdbKeepCfg *pKeepCfg, int32_t s3Kee
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile *to) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
||||
char fname[TSDB_FILENAME_LEN];
|
||||
TdFilePtr fdFrom = NULL;
|
||||
// TdFilePtr fdTo = NULL;
|
||||
|
||||
tsdbTFileName(rtner->tsdb, to, fname);
|
||||
|
||||
fdFrom = taosOpenFile(from->fname, TD_FILE_READ);
|
||||
if (fdFrom == NULL) {
|
||||
TAOS_CHECK_GOTO(terrno, &lino, _exit);
|
||||
}
|
||||
|
||||
char *object_name = taosDirEntryBaseName(fname);
|
||||
TAOS_CHECK_GOTO(s3PutObjectFromFile2(from->fname, object_name, 1), &lino, _exit);
|
||||
|
||||
_exit:
|
||||
if (code) {
|
||||
tsdbError("vgId:%d %s failed at line %s:%d since %s", TD_VID(rtner->tsdb->pVnode), __func__, __FILE__, lino,
|
||||
tstrerror(code));
|
||||
}
|
||||
if (taosCloseFile(&fdFrom) != 0) {
|
||||
tsdbTrace("vgId:%d, failed to close file", TD_VID(rtner->tsdb->pVnode));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t tsdbMigrateDataFileLCS3(SRTNer *rtner, const STFileObj *fobj, int64_t size, int64_t chunksize) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
@ -519,7 +490,7 @@ static int32_t tsdbMigrateDataFileLCS3(SRTNer *rtner, const STFileObj *fobj, int
|
|||
snprintf(dot + 1, TSDB_FQDN_LEN - (dot + 1 - object_name_prefix), "%d.data", cn);
|
||||
int64_t c_offset = chunksize * (cn - fobj->f->lcn);
|
||||
|
||||
TAOS_CHECK_GOTO(s3PutObjectFromFileOffset(fname, object_name_prefix, c_offset, chunksize), &lino, _exit);
|
||||
TAOS_CHECK_GOTO(tcsPutObjectFromFileOffset(fname, object_name_prefix, c_offset, chunksize), &lino, _exit);
|
||||
}
|
||||
|
||||
// copy last chunk
|
||||
|
@ -618,7 +589,7 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, int64
|
|||
snprintf(dot + 1, TSDB_FQDN_LEN - (dot + 1 - object_name_prefix), "%d.data", cn);
|
||||
int64_t c_offset = chunksize * (cn - 1);
|
||||
|
||||
TAOS_CHECK_GOTO(s3PutObjectFromFileOffset(fobj->fname, object_name_prefix, c_offset, chunksize), &lino, _exit);
|
||||
TAOS_CHECK_GOTO(tcsPutObjectFromFileOffset(fobj->fname, object_name_prefix, c_offset, chunksize), &lino, _exit);
|
||||
}
|
||||
|
||||
// copy last chunk
|
||||
|
@ -741,8 +712,6 @@ _exit:
|
|||
int32_t tsdbAsyncS3Migrate(STsdb *tsdb, int64_t now) {
|
||||
int32_t code = 0;
|
||||
|
||||
extern int8_t tsS3EnabledCfg;
|
||||
|
||||
int32_t expired = grantCheck(TSDB_GRANT_OBJECT_STORAGE);
|
||||
if (expired && tsS3Enabled) {
|
||||
tsdbWarn("s3 grant expired: %d", expired);
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cos.h"
|
||||
#include "sync.h"
|
||||
#include "tcs.h"
|
||||
#include "tsdb.h"
|
||||
#include "vnd.h"
|
||||
|
||||
|
@ -327,7 +327,7 @@ void vnodeDestroy(int32_t vgId, const char *path, STfs *pTfs, int32_t nodeId) {
|
|||
if (nodeId > 0 && vgId > 0 /*&& nlevel > 1*/ && tsS3Enabled) {
|
||||
char vnode_prefix[TSDB_FILENAME_LEN];
|
||||
snprintf(vnode_prefix, TSDB_FILENAME_LEN, "%d/v%df", nodeId, vgId);
|
||||
s3DeleteObjectsByPrefix(vnode_prefix);
|
||||
tcsDeleteObjectsByPrefix(vnode_prefix);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,4 +22,6 @@ add_subdirectory(stream)
|
|||
add_subdirectory(planner)
|
||||
add_subdirectory(qworker)
|
||||
add_subdirectory(geometry)
|
||||
add_subdirectory(command)
|
||||
add_subdirectory(command)
|
||||
add_subdirectory(azure)
|
||||
add_subdirectory(tcs)
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
#if(${TD_LINUX})
|
||||
aux_source_directory(src AZ_SRC)
|
||||
|
||||
add_library(az STATIC ${AZ_SRC})
|
||||
|
||||
if(${BUILD_S3})
|
||||
add_definitions(-DUSE_S3)
|
||||
target_link_libraries(
|
||||
az
|
||||
PUBLIC _azure_sdk
|
||||
PUBLIC crypt
|
||||
)
|
||||
endif()
|
||||
|
||||
target_include_directories(
|
||||
az
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/azure"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
az
|
||||
PUBLIC cjson
|
||||
PUBLIC os
|
||||
PUBLIC util
|
||||
PUBLIC common
|
||||
)
|
||||
|
||||
if(${BUILD_TEST})
|
||||
add_subdirectory(test)
|
||||
endif(${BUILD_TEST})
|
||||
#endif(${TD_LINUX})
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_AZ_INT_H_
|
||||
#define _TD_AZ_INT_H_
|
||||
|
||||
#include "os.h"
|
||||
#include "tarray.h"
|
||||
#include "tdef.h"
|
||||
#include "tlog.h"
|
||||
#include "tmsg.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// clang-format off
|
||||
#define azFatal(...) { if (azDebugFlag & DEBUG_FATAL) { taosPrintLog("AZR FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }}
|
||||
#define azError(...) { if (azDebugFlag & DEBUG_ERROR) { taosPrintLog("AZR ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }}
|
||||
#define azWarn(...) { if (azDebugFlag & DEBUG_WARN) { taosPrintLog("AZR WARN ", DEBUG_WARN, 255, __VA_ARGS__); }}
|
||||
#define azInfo(...) { if (azDebugFlag & DEBUG_INFO) { taosPrintLog("AZR ", DEBUG_INFO, 255, __VA_ARGS__); }}
|
||||
#define azDebug(...) { if (azDebugFlag & DEBUG_DEBUG) { taosPrintLog("AZR ", DEBUG_DEBUG, azDebugFlag, __VA_ARGS__); }}
|
||||
#define azTrace(...) { if (azDebugFlag & DEBUG_TRACE) { taosPrintLog("AZR ", DEBUG_TRACE, azDebugFlag, __VA_ARGS__); }}
|
||||
// clang-format on
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // _TD_AZ_INT_H_
|
|
@ -0,0 +1,191 @@
|
|||
#pragma once
|
||||
|
||||
#include "azure/storage/blobs/blob_options.hpp"
|
||||
|
||||
#include <azure/core/io/body_stream.hpp>
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <type_traits>
|
||||
|
||||
namespace Azure {
|
||||
namespace Storage {
|
||||
namespace Blobs {
|
||||
namespace _detail {
|
||||
enum class AvroDatumType {
|
||||
String,
|
||||
Bytes,
|
||||
Int,
|
||||
Long,
|
||||
Float,
|
||||
Double,
|
||||
Bool,
|
||||
Null,
|
||||
Record,
|
||||
Enum,
|
||||
Array,
|
||||
Map,
|
||||
Union,
|
||||
Fixed,
|
||||
};
|
||||
|
||||
class AvroStreamReader final {
|
||||
public:
|
||||
// position of a vector that lives through vector resizing
|
||||
struct ReaderPos final {
|
||||
const std::vector<uint8_t>* BufferPtr = nullptr;
|
||||
size_t Offset = 0;
|
||||
};
|
||||
explicit AvroStreamReader(Core::IO::BodyStream& stream) : m_stream(&stream), m_pos{&m_streambuffer, 0} {}
|
||||
AvroStreamReader(const AvroStreamReader&) = delete;
|
||||
AvroStreamReader& operator=(const AvroStreamReader&) = delete;
|
||||
|
||||
int64_t ParseInt(const Core::Context& context);
|
||||
void Advance(size_t n, const Core::Context& context);
|
||||
// Read at least n bytes from m_stream and append data to m_streambuffer. Return number of bytes
|
||||
// available in m_streambuffer;
|
||||
size_t Preload(size_t n, const Core::Context& context);
|
||||
size_t TryPreload(size_t n, const Core::Context& context);
|
||||
// discards data that's before m_pos
|
||||
void Discard();
|
||||
|
||||
private:
|
||||
size_t AvailableBytes() const { return m_streambuffer.size() - m_pos.Offset; }
|
||||
|
||||
private:
|
||||
Core::IO::BodyStream* m_stream;
|
||||
std::vector<uint8_t> m_streambuffer;
|
||||
ReaderPos m_pos;
|
||||
|
||||
friend class AvroDatum;
|
||||
};
|
||||
|
||||
class AvroSchema final {
|
||||
public:
|
||||
static const AvroSchema StringSchema;
|
||||
static const AvroSchema BytesSchema;
|
||||
static const AvroSchema IntSchema;
|
||||
static const AvroSchema LongSchema;
|
||||
static const AvroSchema FloatSchema;
|
||||
static const AvroSchema DoubleSchema;
|
||||
static const AvroSchema BoolSchema;
|
||||
static const AvroSchema NullSchema;
|
||||
static AvroSchema RecordSchema(std::string name, const std::vector<std::pair<std::string, AvroSchema>>& fieldsSchema);
|
||||
static AvroSchema ArraySchema(AvroSchema elementSchema);
|
||||
static AvroSchema MapSchema(AvroSchema elementSchema);
|
||||
static AvroSchema UnionSchema(std::vector<AvroSchema> schemas);
|
||||
static AvroSchema FixedSchema(std::string name, int64_t size);
|
||||
|
||||
const std::string& Name() const { return m_name; }
|
||||
AvroDatumType Type() const { return m_type; }
|
||||
const std::vector<std::string>& FieldNames() const { return m_status->m_keys; }
|
||||
AvroSchema ItemSchema() const { return m_status->m_schemas[0]; }
|
||||
const std::vector<AvroSchema>& FieldSchemas() const { return m_status->m_schemas; }
|
||||
size_t Size() const { return static_cast<size_t>(m_status->m_size); }
|
||||
|
||||
private:
|
||||
explicit AvroSchema(AvroDatumType type) : m_type(type) {}
|
||||
|
||||
private:
|
||||
AvroDatumType m_type;
|
||||
std::string m_name;
|
||||
|
||||
struct SharedStatus {
|
||||
std::vector<std::string> m_keys;
|
||||
std::vector<AvroSchema> m_schemas;
|
||||
int64_t m_size = 0;
|
||||
};
|
||||
std::shared_ptr<SharedStatus> m_status;
|
||||
};
|
||||
|
||||
class AvroDatum final {
|
||||
public:
|
||||
AvroDatum() : m_schema(AvroSchema::NullSchema) {}
|
||||
explicit AvroDatum(AvroSchema schema) : m_schema(std::move(schema)) {}
|
||||
|
||||
void Fill(AvroStreamReader& reader, const Core::Context& context);
|
||||
void Fill(AvroStreamReader::ReaderPos& data);
|
||||
|
||||
const AvroSchema& Schema() const { return m_schema; }
|
||||
|
||||
template <class T>
|
||||
T Value() const;
|
||||
struct StringView {
|
||||
const uint8_t* Data = nullptr;
|
||||
size_t Length = 0;
|
||||
};
|
||||
|
||||
private:
|
||||
AvroSchema m_schema;
|
||||
AvroStreamReader::ReaderPos m_data;
|
||||
};
|
||||
|
||||
using AvroMap = std::map<std::string, AvroDatum>;
|
||||
|
||||
class AvroRecord final {
|
||||
public:
|
||||
bool HasField(const std::string& key) const { return FindField(key) != m_keys->size(); }
|
||||
const AvroDatum& Field(const std::string& key) const { return m_values.at(FindField(key)); }
|
||||
AvroDatum& Field(const std::string& key) { return m_values.at(FindField(key)); }
|
||||
const AvroDatum& FieldAt(size_t i) const { return m_values.at(i); }
|
||||
AvroDatum& FieldAt(size_t i) { return m_values.at(i); }
|
||||
|
||||
private:
|
||||
size_t FindField(const std::string& key) const {
|
||||
auto i = find(m_keys->begin(), m_keys->end(), key);
|
||||
return i - m_keys->begin();
|
||||
}
|
||||
const std::vector<std::string>* m_keys = nullptr;
|
||||
std::vector<AvroDatum> m_values;
|
||||
|
||||
friend class AvroDatum;
|
||||
};
|
||||
|
||||
class AvroObjectContainerReader final {
|
||||
public:
|
||||
explicit AvroObjectContainerReader(Core::IO::BodyStream& stream);
|
||||
|
||||
bool End() const { return m_eof; }
|
||||
// Calling Next() will invalidates the previous AvroDatum returned by this function and all
|
||||
// AvroDatums propagated from there.
|
||||
AvroDatum Next(const Core::Context& context) { return NextImpl(m_objectSchema.get(), context); }
|
||||
|
||||
private:
|
||||
AvroDatum NextImpl(const AvroSchema* schema, const Core::Context& context);
|
||||
|
||||
private:
|
||||
std::unique_ptr<AvroStreamReader> m_reader;
|
||||
std::unique_ptr<AvroSchema> m_objectSchema;
|
||||
std::string m_syncMarker;
|
||||
int64_t m_remainingObjectInCurrentBlock = 0;
|
||||
bool m_eof = false;
|
||||
};
|
||||
|
||||
class AvroStreamParser final : public Core::IO::BodyStream {
|
||||
public:
|
||||
explicit AvroStreamParser(std::unique_ptr<Azure::Core::IO::BodyStream> inner,
|
||||
std::function<void(int64_t, int64_t)> progressCallback,
|
||||
std::function<void(BlobQueryError)> errorCallback)
|
||||
: m_inner(std::move(inner)),
|
||||
m_parser(*m_inner),
|
||||
m_progressCallback(std::move(progressCallback)),
|
||||
m_errorCallback(std::move(errorCallback)) {}
|
||||
|
||||
int64_t Length() const override { return -1; }
|
||||
void Rewind() override { this->m_inner->Rewind(); }
|
||||
|
||||
private:
|
||||
size_t OnRead(uint8_t* buffer, size_t count, const Azure::Core::Context& context) override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<Azure::Core::IO::BodyStream> m_inner;
|
||||
AvroObjectContainerReader m_parser;
|
||||
std::function<void(int64_t, int64_t)> m_progressCallback;
|
||||
std::function<void(BlobQueryError)> m_errorCallback;
|
||||
AvroDatum::StringView m_parserBuffer;
|
||||
};
|
||||
|
||||
} // namespace _detail
|
||||
} // namespace Blobs
|
||||
} // namespace Storage
|
||||
} // namespace Azure
|
|
@ -0,0 +1,260 @@
|
|||
#pragma once
|
||||
|
||||
#include "azure/storage/blobs/blob_client.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace Azure {
|
||||
namespace Storage {
|
||||
namespace Files {
|
||||
namespace DataLake {
|
||||
class FileClient;
|
||||
}
|
||||
} // namespace Files
|
||||
} // namespace Storage
|
||||
} // namespace Azure
|
||||
|
||||
namespace Azure {
|
||||
namespace Storage {
|
||||
namespace Blobs {
|
||||
|
||||
/**
|
||||
* @brief The TDBlockBlobClient allows you to manipulate Azure Storage block blobs.
|
||||
*
|
||||
* Block blobs let you upload large blobs efficiently. Block blobs are comprised of blocks, each
|
||||
* of which is identified by a block ID. You create or modify a block blob by writing a set of
|
||||
* blocks and committing them by their block IDs. Each block can be a different size.
|
||||
*
|
||||
* When you upload a block to a blob in your storage account, it is associated with the specified
|
||||
* block blob, but it does not become part of the blob until you commit a list of blocks that
|
||||
* includes the new block's ID. New blocks remain in an uncommitted state until they are
|
||||
* specifically committed or discarded. Writing a block does not update the last modified time of
|
||||
* an existing blob.
|
||||
*/
|
||||
class TDBlockBlobClient final : public BlobClient {
|
||||
public:
|
||||
/**
|
||||
* @brief Initialize a new instance of TDBlockBlobClient.
|
||||
*
|
||||
* @param connectionString A connection string includes the authentication information required
|
||||
* for your application to access data in an Azure Storage account at runtime.
|
||||
* @param blobContainerName The name of the container containing this blob.
|
||||
* @param blobName The name of this blob.
|
||||
* @param options Optional client options that define the transport pipeline policies for
|
||||
* authentication, retries, etc., that are applied to every request.
|
||||
* @return A new TDBlockBlobClient instance.
|
||||
*/
|
||||
static TDBlockBlobClient CreateFromConnectionString(const std::string& connectionString,
|
||||
const std::string& blobContainerName, const std::string& blobName,
|
||||
const BlobClientOptions& options = BlobClientOptions());
|
||||
|
||||
/**
|
||||
* @brief Initialize a new instance of TDBlockBlobClient.
|
||||
*
|
||||
* @param blobUrl A URL
|
||||
* referencing the blob that includes the name of the account, the name of the container, and
|
||||
* the name of the blob.
|
||||
* @param credential The shared key credential used to sign
|
||||
* requests.
|
||||
* @param options Optional client options that define the transport pipeline
|
||||
* policies for authentication, retries, etc., that are applied to every request.
|
||||
*/
|
||||
explicit TDBlockBlobClient(const std::string& blobUrl, std::shared_ptr<StorageSharedKeyCredential> credential,
|
||||
const BlobClientOptions& options = BlobClientOptions());
|
||||
|
||||
/**
|
||||
* @brief Initialize a new instance of TDBlockBlobClient.
|
||||
*
|
||||
* @param blobUrl A URL
|
||||
* referencing the blob that includes the name of the account, the name of the container, and
|
||||
* the name of the blob.
|
||||
* @param credential The token credential used to sign requests.
|
||||
* @param options Optional client options that define the transport pipeline policies for
|
||||
* authentication, retries, etc., that are applied to every request.
|
||||
*/
|
||||
explicit TDBlockBlobClient(const std::string& blobUrl, std::shared_ptr<Core::Credentials::TokenCredential> credential,
|
||||
const BlobClientOptions& options = BlobClientOptions());
|
||||
|
||||
/**
|
||||
* @brief Initialize a new instance of TDBlockBlobClient.
|
||||
*
|
||||
* @param blobUrl A URL
|
||||
* referencing the blob that includes the name of the account, the name of the container, and
|
||||
* the name of the blob, and possibly also a SAS token.
|
||||
* @param options Optional client
|
||||
* options that define the transport pipeline policies for authentication, retries, etc., that
|
||||
* are applied to every request.
|
||||
*/
|
||||
explicit TDBlockBlobClient(const std::string& blobUrl, const BlobClientOptions& options = BlobClientOptions());
|
||||
|
||||
/**
|
||||
* @brief Initializes a new instance of the TDBlockBlobClient class with an identical URL
|
||||
* source but the specified snapshot timestamp.
|
||||
*
|
||||
* @param snapshot The snapshot
|
||||
* identifier.
|
||||
* @return A new TDBlockBlobClient instance.
|
||||
* @remarks Pass empty string to remove the snapshot returning the base blob.
|
||||
*/
|
||||
TDBlockBlobClient WithSnapshot(const std::string& snapshot) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a clone of this instance that references a version ID rather than the base
|
||||
* blob.
|
||||
*
|
||||
* @param versionId The version ID returning a URL to the base blob.
|
||||
* @return A new TDBlockBlobClient instance.
|
||||
* @remarks Pass empty string to remove the version ID returning the base blob.
|
||||
*/
|
||||
TDBlockBlobClient WithVersionId(const std::string& versionId) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a new block blob, or updates the content of an existing block blob. Updating
|
||||
* an existing block blob overwrites any existing metadata on the blob.
|
||||
*
|
||||
* @param content A BodyStream containing the content to upload.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A UploadBlockBlobResult describing the state of the updated block blob.
|
||||
*/
|
||||
Azure::Response<Models::UploadBlockBlobResult> Upload(
|
||||
Azure::Core::IO::BodyStream& content, const UploadBlockBlobOptions& options = UploadBlockBlobOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a new block blob, or updates the content of an existing block blob. Updating
|
||||
* an existing block blob overwrites any existing metadata on the blob.
|
||||
*
|
||||
* @param buffer A memory buffer containing the content to upload.
|
||||
* @param bufferSize Size of the memory buffer.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A UploadBlockBlobFromResult describing the state of the updated block blob.
|
||||
*/
|
||||
Azure::Response<Models::UploadBlockBlobFromResult> UploadFrom(
|
||||
const uint8_t* buffer, size_t bufferSize,
|
||||
const UploadBlockBlobFromOptions& options = UploadBlockBlobFromOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a new block blob, or updates the content of an existing block blob. Updating
|
||||
* an existing block blob overwrites any existing metadata on the blob.
|
||||
*
|
||||
* @param fileName A file containing the content to upload.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A UploadBlockBlobFromResult describing the state of the updated block blob.
|
||||
*/
|
||||
Azure::Response<Models::UploadBlockBlobFromResult> UploadFrom(
|
||||
const std::string& fileName, const UploadBlockBlobFromOptions& options = UploadBlockBlobFromOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobFromResult> UploadFrom(
|
||||
const std::string& fileName, int64_t offset, int64_t size,
|
||||
const UploadBlockBlobFromOptions& options = UploadBlockBlobFromOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a new Block Blob where the contents of the blob are read from a given URL.
|
||||
*
|
||||
* @param sourceUri Specifies the URL of the source blob.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A UploadBlockBlobFromUriResult describing the state of the updated block blob.
|
||||
*/
|
||||
Azure::Response<Models::UploadBlockBlobFromUriResult> UploadFromUri(
|
||||
const std::string& sourceUri, const UploadBlockBlobFromUriOptions& options = UploadBlockBlobFromUriOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a new block as part of a block blob's staging area to be eventually
|
||||
* committed via the CommitBlockList operation.
|
||||
*
|
||||
* @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the
|
||||
* string must be less than or equal to 64 bytes in size.
|
||||
* @param content A BodyStream containing the content to upload.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A StageBlockResult describing the state of the updated block.
|
||||
*/
|
||||
Azure::Response<Models::StageBlockResult> StageBlock(
|
||||
const std::string& blockId, Azure::Core::IO::BodyStream& content,
|
||||
const StageBlockOptions& options = StageBlockOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a new block to be committed as part of a blob where the contents are read from
|
||||
* the sourceUri.
|
||||
*
|
||||
* @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the
|
||||
* string must be less than or equal to 64 bytes in size.
|
||||
* @param sourceUri Specifies the uri of the source
|
||||
* blob. The value may be a uri of up to 2 KB in length that specifies a blob. The source blob
|
||||
* must either be public or must be authenticated via a shared access signature. If the source
|
||||
* blob is public, no authentication is required to perform the operation.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A StageBlockFromUriResult describing the state of the updated block blob.
|
||||
*/
|
||||
Azure::Response<Models::StageBlockFromUriResult> StageBlockFromUri(
|
||||
const std::string& blockId, const std::string& sourceUri,
|
||||
const StageBlockFromUriOptions& options = StageBlockFromUriOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Writes a blob by specifying the list of block IDs that make up the blob. In order to
|
||||
* be written as part of a blob, a block must have been successfully written to the server in a
|
||||
* prior StageBlock operation. You can call CommitBlockList to update a blob by uploading only
|
||||
* those blocks that have changed, then committing the new and existing blocks together. You can
|
||||
* do this by specifying whether to commit a block from the committed block list or from the
|
||||
* uncommitted block list, or to commit the most recently uploaded version of the block,
|
||||
* whichever list it may belong to.
|
||||
*
|
||||
* @param blockIds Base64 encoded block IDs to indicate that make up the blob.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A CommitBlobBlockListResult describing the state of the updated block blob.
|
||||
*/
|
||||
Azure::Response<Models::CommitBlockListResult> CommitBlockList(
|
||||
const std::vector<std::string>& blockIds, const CommitBlockListOptions& options = CommitBlockListOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Retrieves the list of blocks that have been uploaded as part of a block blob. There
|
||||
* are two block lists maintained for a blob. The Committed Block list has blocks that have been
|
||||
* successfully committed to a given blob with CommitBlockList. The Uncommitted Block list has
|
||||
* blocks that have been uploaded for a blob using StageBlock, but that have not yet been
|
||||
* committed.
|
||||
*
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A GetBlobBlockListResult describing requested block list.
|
||||
*/
|
||||
Azure::Response<Models::GetBlockListResult> GetBlockList(
|
||||
const GetBlockListOptions& options = GetBlockListOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Returns the result of a query against the blob.
|
||||
*
|
||||
* @param querySqlExpression The query expression in SQL.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A QueryBlobResult describing the query result.
|
||||
*/
|
||||
Azure::Response<Models::QueryBlobResult> Query(const std::string& querySqlExpression,
|
||||
const QueryBlobOptions& options = QueryBlobOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
explicit TDBlockBlobClient(BlobClient blobClient);
|
||||
|
||||
private:
|
||||
friend class BlobClient;
|
||||
friend class Files::DataLake::DataLakeFileClient;
|
||||
};
|
||||
|
||||
} // namespace Blobs
|
||||
} // namespace Storage
|
||||
} // namespace Azure
|
|
@ -0,0 +1,534 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define ALLOW_FORBID_FUNC
|
||||
|
||||
#include "az.h"
|
||||
#include "azInt.h"
|
||||
|
||||
#include "os.h"
|
||||
#include "taoserror.h"
|
||||
#include "tglobal.h"
|
||||
|
||||
#if defined(USE_S3)
|
||||
|
||||
#include <azure/core.hpp>
|
||||
#include <azure/storage/blobs.hpp>
|
||||
#include "td_block_blob_client.hpp"
|
||||
|
||||
// Add appropriate using namespace directives
|
||||
using namespace Azure::Storage;
|
||||
using namespace Azure::Storage::Blobs;
|
||||
|
||||
extern char tsS3Hostname[][TSDB_FQDN_LEN];
|
||||
extern char tsS3AccessKeyId[][TSDB_FQDN_LEN];
|
||||
extern char tsS3AccessKeySecret[][TSDB_FQDN_LEN];
|
||||
extern char tsS3BucketName[TSDB_FQDN_LEN];
|
||||
|
||||
extern int8_t tsS3Enabled;
|
||||
extern int8_t tsS3EpNum;
|
||||
|
||||
int32_t azBegin() { return TSDB_CODE_SUCCESS; }
|
||||
|
||||
void azEnd() {}
|
||||
|
||||
static void checkPrint(const char *fmt, ...) {
|
||||
va_list arg_ptr;
|
||||
va_start(arg_ptr, fmt);
|
||||
(void)vfprintf(stderr, fmt, arg_ptr);
|
||||
va_end(arg_ptr);
|
||||
}
|
||||
|
||||
static void azDumpCfgByEp(int8_t epIndex) {
|
||||
// clang-format off
|
||||
checkPrint(
|
||||
"%-24s %s\n"
|
||||
"%-24s %s\n"
|
||||
"%-24s %s\n"
|
||||
"%-24s %s\n"
|
||||
"%-24s %s\n"
|
||||
"%-24s %s\n",
|
||||
"hostName", tsS3Hostname[epIndex],
|
||||
"bucketName", tsS3BucketName,
|
||||
"protocol", "https only",
|
||||
"uristyle", "path only",
|
||||
"accessKey", tsS3AccessKeyId[epIndex],
|
||||
"accessKeySecret", tsS3AccessKeySecret[epIndex]);
|
||||
// clang-format on
|
||||
}
|
||||
|
||||
static int32_t azListBucket(char const *bucketname) {
|
||||
int32_t code = 0;
|
||||
const std::string delimiter = "/";
|
||||
std::string accountName = tsS3AccessKeyId[0];
|
||||
std::string accountKey = tsS3AccessKeySecret[0];
|
||||
std::string accountURL = tsS3Hostname[0];
|
||||
accountURL = "https://" + accountURL;
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = bucketname;
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient(containerName);
|
||||
|
||||
Azure::Storage::Blobs::ListBlobsOptions options;
|
||||
options.Prefix = "s3";
|
||||
|
||||
checkPrint("objects:\n");
|
||||
for (auto pageResult = containerClient.ListBlobs(options); pageResult.HasPage(); pageResult.MoveToNextPage()) {
|
||||
for (const auto &blob : pageResult.Blobs) {
|
||||
checkPrint("%s\n", blob.Name.c_str());
|
||||
}
|
||||
}
|
||||
} catch (const Azure::Core::RequestFailedException &e) {
|
||||
azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast<int>(e.StatusCode),
|
||||
e.ReasonPhrase.c_str());
|
||||
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
int32_t azCheckCfg() {
|
||||
int32_t code = 0, lino = 0;
|
||||
|
||||
azDumpCfgByEp(0);
|
||||
|
||||
// test put
|
||||
char testdata[17] = "0123456789abcdef";
|
||||
const char *objectname[] = {"s3test.txt"};
|
||||
char path[PATH_MAX] = {0};
|
||||
int ds_len = strlen(TD_DIRSEP);
|
||||
int tmp_len = strlen(tsTempDir);
|
||||
|
||||
(void)snprintf(path, PATH_MAX, "%s", tsTempDir);
|
||||
if (strncmp(tsTempDir + tmp_len - ds_len, TD_DIRSEP, ds_len) != 0) {
|
||||
(void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", TD_DIRSEP);
|
||||
(void)snprintf(path + tmp_len + ds_len, PATH_MAX - tmp_len - ds_len, "%s", objectname[0]);
|
||||
} else {
|
||||
(void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", objectname[0]);
|
||||
}
|
||||
|
||||
uint8_t *pBlock = NULL;
|
||||
int c_offset = 10;
|
||||
int c_len = 6;
|
||||
char buf[7] = {0};
|
||||
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_READ | TD_FILE_TRUNC);
|
||||
if (!fp) {
|
||||
checkPrint("failed to open test file: %s.\n", path);
|
||||
TAOS_CHECK_GOTO(terrno, &lino, _next);
|
||||
}
|
||||
if (taosWriteFile(fp, testdata, strlen(testdata)) < 0) {
|
||||
checkPrint("failed to write test file: %s.\n", path);
|
||||
TAOS_CHECK_GOTO(terrno, &lino, _next);
|
||||
}
|
||||
if (taosFsyncFile(fp) < 0) {
|
||||
checkPrint("failed to fsync test file: %s.\n", path);
|
||||
TAOS_CHECK_GOTO(terrno, &lino, _next);
|
||||
}
|
||||
(void)taosCloseFile(&fp);
|
||||
|
||||
checkPrint("\nstart to put object: %s, file: %s content: %s\n", objectname[0], path, testdata);
|
||||
code = azPutObjectFromFileOffset(path, objectname[0], 0, 16);
|
||||
if (code != 0) {
|
||||
checkPrint("put object %s : failed.\n", objectname[0]);
|
||||
TAOS_CHECK_GOTO(code, &lino, _next);
|
||||
}
|
||||
checkPrint("put object %s: success.\n\n", objectname[0]);
|
||||
|
||||
// list buckets
|
||||
checkPrint("start to list bucket %s by prefix s3.\n", tsS3BucketName);
|
||||
code = azListBucket(tsS3BucketName);
|
||||
if (code != 0) {
|
||||
checkPrint("listing bucket %s : failed.\n", tsS3BucketName);
|
||||
TAOS_CHECK_GOTO(code, &lino, _next);
|
||||
}
|
||||
checkPrint("listing bucket %s: success.\n\n", tsS3BucketName);
|
||||
|
||||
// test range get
|
||||
checkPrint("start to range get object %s offset: %d len: %d.\n", objectname[0], c_offset, c_len);
|
||||
code = azGetObjectBlock(objectname[0], c_offset, c_len, true, &pBlock);
|
||||
if (code != 0) {
|
||||
checkPrint("get object %s : failed.\n", objectname[0]);
|
||||
TAOS_CHECK_GOTO(code, &lino, _next);
|
||||
}
|
||||
|
||||
(void)memcpy(buf, pBlock, c_len);
|
||||
taosMemoryFree(pBlock);
|
||||
checkPrint("object content: %s\n", buf);
|
||||
checkPrint("get object %s: success.\n\n", objectname[0]);
|
||||
|
||||
// delete test object
|
||||
checkPrint("start to delete object: %s.\n", objectname[0]);
|
||||
// code = azDeleteObjectsByPrefix(objectname[0]);
|
||||
azDeleteObjectsByPrefix(objectname[0]);
|
||||
/*
|
||||
if (code != 0) {
|
||||
(void)fprintf(stderr, "delete object %s : failed.\n", objectname[0]);
|
||||
TAOS_CHECK_GOTO(code, &lino, _next);
|
||||
}
|
||||
*/
|
||||
checkPrint("delete object %s: success.\n\n", objectname[0]);
|
||||
|
||||
_next:
|
||||
if (fp) {
|
||||
(void)taosCloseFile(&fp);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
checkPrint("s3 check failed, code: %d, line: %d.\n", code, lino);
|
||||
}
|
||||
|
||||
checkPrint("=================================================================\n");
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
static int32_t azPutObjectFromFileOffsetImpl(const char *file, const char *object_name, int64_t offset, int64_t size) {
|
||||
int32_t code = 0;
|
||||
|
||||
std::string endpointUrl = tsS3Hostname[0];
|
||||
std::string accountName = tsS3AccessKeyId[0];
|
||||
std::string accountKey = tsS3AccessKeySecret[0];
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
std::string accountURL = tsS3Hostname[0];
|
||||
|
||||
accountURL = "https://" + accountURL;
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = tsS3BucketName;
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient(containerName);
|
||||
|
||||
std::string blobName = "blob.txt";
|
||||
uint8_t blobContent[] = "Hello Azure!";
|
||||
// Create the block blob client
|
||||
// BlockBlobClient blobClient = containerClient.GetBlockBlobClient(blobName);
|
||||
// TDBlockBlobClient blobClient(containerClient.GetBlobClient(blobName));
|
||||
TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name));
|
||||
|
||||
blobClient.UploadFrom(file, offset, size);
|
||||
} catch (const Azure::Core::RequestFailedException &e) {
|
||||
azError("%s: Status Code: %d, Reason Phrase: %s", __func__, static_cast<int>(e.StatusCode), e.ReasonPhrase.c_str());
|
||||
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) {
|
||||
int32_t code = 0;
|
||||
|
||||
try {
|
||||
code = azPutObjectFromFileOffsetImpl(file, object_name, offset, size);
|
||||
} catch (const std::exception &e) {
|
||||
azError("%s: Reason Phrase: %s", __func__, e.what());
|
||||
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
static int32_t azGetObjectBlockImpl(const char *object_name, int64_t offset, int64_t size, bool check,
|
||||
uint8_t **ppBlock) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
std::string accountName = tsS3AccessKeyId[0];
|
||||
std::string accountKey = tsS3AccessKeySecret[0];
|
||||
std::string accountURL = tsS3Hostname[0];
|
||||
uint8_t *buf = NULL;
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
accountURL = "https://" + accountURL;
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = tsS3BucketName;
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient(containerName);
|
||||
|
||||
TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name));
|
||||
|
||||
Blobs::DownloadBlobToOptions options;
|
||||
options.Range = Azure::Core::Http::HttpRange();
|
||||
options.Range.Value().Offset = offset;
|
||||
options.Range.Value().Length = size;
|
||||
|
||||
buf = (uint8_t *)taosMemoryCalloc(1, size);
|
||||
if (!buf) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
auto res = blobClient.DownloadTo(buf, size, options);
|
||||
if (check && res.Value.ContentRange.Length.Value() != size) {
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
*ppBlock = buf;
|
||||
} catch (const Azure::Core::RequestFailedException &e) {
|
||||
azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast<int>(e.StatusCode),
|
||||
e.ReasonPhrase.c_str());
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
|
||||
if (buf) {
|
||||
taosMemoryFree(buf);
|
||||
}
|
||||
*ppBlock = NULL;
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
static int32_t azGetObjectBlockRetry(const char *object_name, int64_t offset, int64_t size, bool check,
|
||||
uint8_t **ppBlock) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
// May use an exponential backoff policy for retries with 503
|
||||
int retryCount = 0;
|
||||
static int maxRetryCount = 5;
|
||||
static int minRetryInterval = 1000; // ms
|
||||
static int maxRetryInterval = 3000; // ms
|
||||
|
||||
_retry:
|
||||
code = azGetObjectBlockImpl(object_name, offset, size, check, ppBlock);
|
||||
if (TSDB_CODE_SUCCESS != code && retryCount++ < maxRetryCount) {
|
||||
taosMsleep(taosRand() % (maxRetryInterval - minRetryInterval + 1) + minRetryInterval);
|
||||
uInfo("%s: 0x%x(%s) and retry get object", __func__, code, tstrerror(code));
|
||||
goto _retry;
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
int32_t azGetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
try {
|
||||
code = azGetObjectBlockRetry(object_name, offset, size, check, ppBlock);
|
||||
} catch (const std::exception &e) {
|
||||
azError("%s: Reason Phrase: %s", __func__, e.what());
|
||||
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
static void azDeleteObjectsByPrefixImpl(const char *prefix) {
|
||||
const std::string delimiter = "/";
|
||||
std::string accountName = tsS3AccessKeyId[0];
|
||||
std::string accountKey = tsS3AccessKeySecret[0];
|
||||
std::string accountURL = tsS3Hostname[0];
|
||||
accountURL = "https://" + accountURL;
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = tsS3BucketName;
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient(containerName);
|
||||
|
||||
Azure::Storage::Blobs::ListBlobsOptions options;
|
||||
options.Prefix = prefix;
|
||||
|
||||
std::set<std::string> listBlobs;
|
||||
for (auto pageResult = containerClient.ListBlobs(options); pageResult.HasPage(); pageResult.MoveToNextPage()) {
|
||||
for (const auto &blob : pageResult.Blobs) {
|
||||
listBlobs.insert(blob.Name);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto blobName : listBlobs) {
|
||||
auto blobClient = containerClient.GetAppendBlobClient(blobName);
|
||||
blobClient.Delete();
|
||||
}
|
||||
} catch (const Azure::Core::RequestFailedException &e) {
|
||||
azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast<int>(e.StatusCode),
|
||||
e.ReasonPhrase.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
void azDeleteObjectsByPrefix(const char *prefix) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
try {
|
||||
azDeleteObjectsByPrefixImpl(prefix);
|
||||
} catch (const std::exception &e) {
|
||||
azError("%s: Reason Phrase: %s", __func__, e.what());
|
||||
}
|
||||
}
|
||||
|
||||
int32_t azPutObjectFromFile2(const char *file, const char *object, int8_t withcp) {
|
||||
int32_t code = 0, lino = 0;
|
||||
uint64_t contentLength = 0;
|
||||
|
||||
if (taosStatFile(file, (int64_t *)&contentLength, NULL, NULL) < 0) {
|
||||
azError("ERROR: %s Failed to stat file %s: ", __func__, file);
|
||||
TAOS_RETURN(terrno);
|
||||
}
|
||||
|
||||
code = azPutObjectFromFileOffset(file, object, 0, contentLength);
|
||||
if (code != 0) {
|
||||
azError("ERROR: %s Failed to put file %s: ", __func__, file);
|
||||
TAOS_CHECK_GOTO(code, &lino, _exit);
|
||||
}
|
||||
|
||||
_exit:
|
||||
if (code) {
|
||||
azError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t azGetObjectToFile(const char *object_name, const char *fileName) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
std::string accountName = tsS3AccessKeyId[0];
|
||||
std::string accountKey = tsS3AccessKeySecret[0];
|
||||
std::string accountURL = tsS3Hostname[0];
|
||||
accountURL = "https://" + accountURL;
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = tsS3BucketName;
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient(containerName);
|
||||
|
||||
TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name));
|
||||
|
||||
auto res = blobClient.DownloadTo(fileName);
|
||||
if (res.Value.ContentRange.Length.Value() <= 0) {
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
} catch (const Azure::Core::RequestFailedException &e) {
|
||||
azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast<int>(e.StatusCode),
|
||||
e.ReasonPhrase.c_str());
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
int32_t azGetObjectsByPrefix(const char *prefix, const char *path) {
|
||||
const std::string delimiter = "/";
|
||||
std::string accountName = tsS3AccessKeyId[0];
|
||||
std::string accountKey = tsS3AccessKeySecret[0];
|
||||
std::string accountURL = tsS3Hostname[0];
|
||||
accountURL = "https://" + accountURL;
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = tsS3BucketName;
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient(containerName);
|
||||
|
||||
Azure::Storage::Blobs::ListBlobsOptions options;
|
||||
options.Prefix = prefix;
|
||||
|
||||
std::set<std::string> listBlobs;
|
||||
for (auto pageResult = containerClient.ListBlobs(options); pageResult.HasPage(); pageResult.MoveToNextPage()) {
|
||||
for (const auto &blob : pageResult.Blobs) {
|
||||
listBlobs.insert(blob.Name);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto blobName : listBlobs) {
|
||||
const char *tmp = strchr(blobName.c_str(), '/');
|
||||
tmp = (tmp == NULL) ? blobName.c_str() : tmp + 1;
|
||||
char fileName[PATH_MAX] = {0};
|
||||
if (path[strlen(path) - 1] != TD_DIRSEP_CHAR) {
|
||||
(void)snprintf(fileName, PATH_MAX, "%s%s%s", path, TD_DIRSEP, tmp);
|
||||
} else {
|
||||
(void)snprintf(fileName, PATH_MAX, "%s%s", path, tmp);
|
||||
}
|
||||
if (azGetObjectToFile(blobName.c_str(), fileName)) {
|
||||
TAOS_RETURN(TSDB_CODE_FAILED);
|
||||
}
|
||||
}
|
||||
} catch (const Azure::Core::RequestFailedException &e) {
|
||||
azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast<int>(e.StatusCode),
|
||||
e.ReasonPhrase.c_str());
|
||||
TAOS_RETURN(TSDB_CODE_FAILED);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t azDeleteObjects(const char *object_name[], int nobject) {
|
||||
for (int i = 0; i < nobject; ++i) {
|
||||
azDeleteObjectsByPrefix(object_name[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
int32_t azBegin() { return TSDB_CODE_SUCCESS; }
|
||||
|
||||
void azEnd() {}
|
||||
|
||||
int32_t azCheckCfg() { return TSDB_CODE_SUCCESS; }
|
||||
|
||||
int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t azGetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void azDeleteObjectsByPrefix(const char *prefix) {}
|
||||
|
||||
int32_t azPutObjectFromFile2(const char *file, const char *object, int8_t withcp) { return 0; }
|
||||
|
||||
int32_t azGetObjectsByPrefix(const char *prefix, const char *path) { return 0; }
|
||||
|
||||
int32_t azGetObjectToFile(const char *object_name, const char *fileName) { return 0; }
|
||||
|
||||
int32_t azDeleteObjects(const char *object_name[], int nobject) { return 0; }
|
||||
|
||||
#endif
|
|
@ -0,0 +1,531 @@
|
|||
#if defined(USE_S3)
|
||||
#include <td_avro_parser.h>
|
||||
|
||||
#include <azure/core/azure_assert.hpp>
|
||||
#include <azure/core/internal/json/json.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
|
||||
namespace Azure {
|
||||
namespace Storage {
|
||||
namespace Blobs {
|
||||
namespace _detail {
|
||||
|
||||
namespace {
|
||||
int64_t parseInt(AvroStreamReader::ReaderPos& data) {
|
||||
uint64_t r = 0;
|
||||
int nb = 0;
|
||||
while (true) {
|
||||
uint8_t c = (*data.BufferPtr)[data.Offset++];
|
||||
r = r | ((static_cast<uint64_t>(c) & 0x7f) << (nb * 7));
|
||||
if (c & 0x80) {
|
||||
++nb;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return static_cast<int64_t>(r >> 1) ^ -static_cast<int64_t>(r & 0x01);
|
||||
}
|
||||
|
||||
AvroSchema ParseSchemaFromJsonString(const std::string& jsonSchema) {
|
||||
const static std::map<std::string, AvroSchema> BuiltinNameSchemaMap = {
|
||||
{"string", AvroSchema::StringSchema}, {"bytes", AvroSchema::BytesSchema}, {"int", AvroSchema::IntSchema},
|
||||
{"long", AvroSchema::LongSchema}, {"float", AvroSchema::FloatSchema}, {"double", AvroSchema::DoubleSchema},
|
||||
{"boolean", AvroSchema::BoolSchema}, {"null", AvroSchema::NullSchema}, {"string", AvroSchema::StringSchema},
|
||||
};
|
||||
std::map<std::string, AvroSchema> nameSchemaMap = BuiltinNameSchemaMap;
|
||||
|
||||
std::function<AvroSchema(const Core::Json::_internal::json& obj)> parseSchemaFromJsonObject;
|
||||
parseSchemaFromJsonObject = [&](const Core::Json::_internal::json& obj) -> AvroSchema {
|
||||
if (obj.is_string()) {
|
||||
auto typeName = obj.get<std::string>();
|
||||
return nameSchemaMap.find(typeName)->second;
|
||||
} else if (obj.is_array()) {
|
||||
std::vector<AvroSchema> unionSchemas;
|
||||
for (const auto& s : obj) {
|
||||
unionSchemas.push_back(parseSchemaFromJsonObject(s));
|
||||
}
|
||||
return AvroSchema::UnionSchema(std::move(unionSchemas));
|
||||
} else if (obj.is_object()) {
|
||||
if (obj.count("namespace") != 0) {
|
||||
throw std::runtime_error("Namespace isn't supported yet in Avro schema.");
|
||||
}
|
||||
if (obj.count("aliases") != 0) {
|
||||
throw std::runtime_error("Alias isn't supported yet in Avro schema.");
|
||||
}
|
||||
auto typeName = obj["type"].get<std::string>();
|
||||
auto i = nameSchemaMap.find(typeName);
|
||||
if (i != nameSchemaMap.end()) {
|
||||
return i->second;
|
||||
}
|
||||
if (typeName == "record") {
|
||||
std::vector<std::pair<std::string, AvroSchema>> fieldsSchema;
|
||||
for (const auto& field : obj["fields"]) {
|
||||
fieldsSchema.push_back(
|
||||
std::make_pair(field["name"].get<std::string>(), parseSchemaFromJsonObject(field["type"])));
|
||||
}
|
||||
|
||||
const std::string recordName = obj["name"].get<std::string>();
|
||||
auto recordSchema = AvroSchema::RecordSchema(recordName, std::move(fieldsSchema));
|
||||
nameSchemaMap.insert(std::make_pair(recordName, recordSchema));
|
||||
return recordSchema;
|
||||
} else if (typeName == "enum") {
|
||||
throw std::runtime_error("Enum type isn't supported yet in Avro schema.");
|
||||
} else if (typeName == "array") {
|
||||
return AvroSchema::ArraySchema(parseSchemaFromJsonObject(obj["items"]));
|
||||
} else if (typeName == "map") {
|
||||
return AvroSchema::MapSchema(parseSchemaFromJsonObject(obj["items"]));
|
||||
} else if (typeName == "fixed") {
|
||||
const std::string fixedName = obj["name"].get<std::string>();
|
||||
auto fixedSchema = AvroSchema::FixedSchema(fixedName, obj["size"].get<int64_t>());
|
||||
nameSchemaMap.insert(std::make_pair(fixedName, fixedSchema));
|
||||
return fixedSchema;
|
||||
} else {
|
||||
throw std::runtime_error("Unrecognized type " + typeName + " in Avro schema.");
|
||||
}
|
||||
}
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
};
|
||||
|
||||
auto jsonRoot = Core::Json::_internal::json::parse(jsonSchema.begin(), jsonSchema.end());
|
||||
return parseSchemaFromJsonObject(jsonRoot);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
int64_t AvroStreamReader::ParseInt(const Core::Context& context) {
|
||||
uint64_t r = 0;
|
||||
int nb = 0;
|
||||
while (true) {
|
||||
Preload(1, context);
|
||||
uint8_t c = m_streambuffer[m_pos.Offset++];
|
||||
|
||||
r = r | ((static_cast<uint64_t>(c) & 0x7f) << (nb * 7));
|
||||
if (c & 0x80) {
|
||||
++nb;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return static_cast<int64_t>(r >> 1) ^ -static_cast<int64_t>(r & 0x01);
|
||||
}
|
||||
|
||||
void AvroStreamReader::Advance(size_t n, const Core::Context& context) {
|
||||
Preload(n, context);
|
||||
m_pos.Offset += n;
|
||||
}
|
||||
|
||||
size_t AvroStreamReader::Preload(size_t n, const Core::Context& context) {
|
||||
size_t oldAvailable = AvailableBytes();
|
||||
while (true) {
|
||||
size_t newAvailable = TryPreload(n, context);
|
||||
if (newAvailable >= n) {
|
||||
return newAvailable;
|
||||
}
|
||||
if (oldAvailable == newAvailable) {
|
||||
throw std::runtime_error("Unexpected EOF of Avro stream.");
|
||||
}
|
||||
oldAvailable = newAvailable;
|
||||
}
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
|
||||
size_t AvroStreamReader::TryPreload(size_t n, const Core::Context& context) {
|
||||
size_t availableBytes = AvailableBytes();
|
||||
if (availableBytes >= n) {
|
||||
return availableBytes;
|
||||
}
|
||||
const size_t MinRead = 4096;
|
||||
size_t tryReadSize = (std::max)(n, MinRead);
|
||||
size_t currSize = m_streambuffer.size();
|
||||
m_streambuffer.resize(m_streambuffer.size() + tryReadSize);
|
||||
size_t actualReadSize = m_stream->Read(m_streambuffer.data() + currSize, tryReadSize, context);
|
||||
m_streambuffer.resize(currSize + actualReadSize);
|
||||
return AvailableBytes();
|
||||
}
|
||||
|
||||
void AvroStreamReader::Discard() {
|
||||
constexpr size_t MinimumReleaseMemory = 128 * 1024;
|
||||
if (m_pos.Offset < MinimumReleaseMemory) {
|
||||
return;
|
||||
}
|
||||
const size_t availableBytes = AvailableBytes();
|
||||
std::memmove(&m_streambuffer[0], &m_streambuffer[m_pos.Offset], availableBytes);
|
||||
m_streambuffer.resize(availableBytes);
|
||||
m_pos.Offset = 0;
|
||||
}
|
||||
|
||||
const AvroSchema AvroSchema::StringSchema(AvroDatumType::String);
|
||||
const AvroSchema AvroSchema::BytesSchema(AvroDatumType::Bytes);
|
||||
const AvroSchema AvroSchema::IntSchema(AvroDatumType::Int);
|
||||
const AvroSchema AvroSchema::LongSchema(AvroDatumType::Long);
|
||||
const AvroSchema AvroSchema::FloatSchema(AvroDatumType::Float);
|
||||
const AvroSchema AvroSchema::DoubleSchema(AvroDatumType::Double);
|
||||
const AvroSchema AvroSchema::BoolSchema(AvroDatumType::Bool);
|
||||
const AvroSchema AvroSchema::NullSchema(AvroDatumType::Null);
|
||||
|
||||
AvroSchema AvroSchema::RecordSchema(std::string name,
|
||||
const std::vector<std::pair<std::string, AvroSchema>>& fieldsSchema) {
|
||||
AvroSchema recordSchema(AvroDatumType::Record);
|
||||
recordSchema.m_name = std::move(name);
|
||||
recordSchema.m_status = std::make_shared<SharedStatus>();
|
||||
for (auto& i : fieldsSchema) {
|
||||
recordSchema.m_status->m_keys.push_back(i.first);
|
||||
recordSchema.m_status->m_schemas.push_back(i.second);
|
||||
}
|
||||
return recordSchema;
|
||||
}
|
||||
|
||||
AvroSchema AvroSchema::ArraySchema(AvroSchema elementSchema) {
|
||||
AvroSchema arraySchema(AvroDatumType::Array);
|
||||
arraySchema.m_status = std::make_shared<SharedStatus>();
|
||||
arraySchema.m_status->m_schemas.push_back(std::move(elementSchema));
|
||||
return arraySchema;
|
||||
}
|
||||
|
||||
AvroSchema AvroSchema::MapSchema(AvroSchema elementSchema) {
|
||||
AvroSchema mapSchema(AvroDatumType::Map);
|
||||
mapSchema.m_status = std::make_shared<SharedStatus>();
|
||||
mapSchema.m_status->m_schemas.push_back(std::move(elementSchema));
|
||||
return mapSchema;
|
||||
}
|
||||
|
||||
AvroSchema AvroSchema::UnionSchema(std::vector<AvroSchema> schemas) {
|
||||
AvroSchema unionSchema(AvroDatumType::Union);
|
||||
unionSchema.m_status = std::make_shared<SharedStatus>();
|
||||
unionSchema.m_status->m_schemas = std::move(schemas);
|
||||
return unionSchema;
|
||||
}
|
||||
|
||||
AvroSchema AvroSchema::FixedSchema(std::string name, int64_t size) {
|
||||
AvroSchema fixedSchema(AvroDatumType::Fixed);
|
||||
fixedSchema.m_name = std::move(name);
|
||||
fixedSchema.m_status = std::make_shared<SharedStatus>();
|
||||
fixedSchema.m_status->m_size = size;
|
||||
return fixedSchema;
|
||||
}
|
||||
|
||||
void AvroDatum::Fill(AvroStreamReader& reader, const Core::Context& context) {
|
||||
m_data = reader.m_pos;
|
||||
if (m_schema.Type() == AvroDatumType::String || m_schema.Type() == AvroDatumType::Bytes) {
|
||||
int64_t stringSize = reader.ParseInt(context);
|
||||
reader.Advance(static_cast<size_t>(stringSize), context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Int || m_schema.Type() == AvroDatumType::Long ||
|
||||
m_schema.Type() == AvroDatumType::Enum) {
|
||||
reader.ParseInt(context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Float) {
|
||||
reader.Advance(4, context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Double) {
|
||||
reader.Advance(8, context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Bool) {
|
||||
reader.Advance(1, context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Null) {
|
||||
reader.Advance(0, context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Record) {
|
||||
for (const auto& s : m_schema.FieldSchemas()) {
|
||||
AvroDatum(s).Fill(reader, context);
|
||||
}
|
||||
} else if (m_schema.Type() == AvroDatumType::Array) {
|
||||
while (true) {
|
||||
int64_t numElementsInBlock = reader.ParseInt(context);
|
||||
if (numElementsInBlock == 0) {
|
||||
break;
|
||||
} else if (numElementsInBlock < 0) {
|
||||
int64_t blockSize = reader.ParseInt(context);
|
||||
reader.Advance(static_cast<size_t>(blockSize), context);
|
||||
} else {
|
||||
for (auto i = 0; i < numElementsInBlock; ++i) {
|
||||
AvroDatum(m_schema.ItemSchema()).Fill(reader, context);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (m_schema.Type() == AvroDatumType::Map) {
|
||||
while (true) {
|
||||
int64_t numElementsInBlock = reader.ParseInt(context);
|
||||
if (numElementsInBlock == 0) {
|
||||
break;
|
||||
} else if (numElementsInBlock < 0) {
|
||||
int64_t blockSize = reader.ParseInt(context);
|
||||
reader.Advance(static_cast<size_t>(blockSize), context);
|
||||
} else {
|
||||
for (int64_t i = 0; i < numElementsInBlock; ++i) {
|
||||
AvroDatum(AvroSchema::StringSchema).Fill(reader, context);
|
||||
AvroDatum(m_schema.ItemSchema()).Fill(reader, context);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (m_schema.Type() == AvroDatumType::Union) {
|
||||
int64_t i = reader.ParseInt(context);
|
||||
AvroDatum(m_schema.FieldSchemas()[static_cast<size_t>(i)]).Fill(reader, context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Fixed) {
|
||||
reader.Advance(m_schema.Size(), context);
|
||||
} else {
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
}
|
||||
|
||||
void AvroDatum::Fill(AvroStreamReader::ReaderPos& data) {
|
||||
m_data = data;
|
||||
if (m_schema.Type() == AvroDatumType::String || m_schema.Type() == AvroDatumType::Bytes) {
|
||||
int64_t stringSize = parseInt(data);
|
||||
data.Offset += static_cast<size_t>(stringSize);
|
||||
} else if (m_schema.Type() == AvroDatumType::Int || m_schema.Type() == AvroDatumType::Long ||
|
||||
m_schema.Type() == AvroDatumType::Enum) {
|
||||
parseInt(data);
|
||||
} else if (m_schema.Type() == AvroDatumType::Float) {
|
||||
data.Offset += 4;
|
||||
} else if (m_schema.Type() == AvroDatumType::Double) {
|
||||
data.Offset += 8;
|
||||
} else if (m_schema.Type() == AvroDatumType::Bool) {
|
||||
data.Offset += 1;
|
||||
} else if (m_schema.Type() == AvroDatumType::Null) {
|
||||
data.Offset += 0;
|
||||
} else if (m_schema.Type() == AvroDatumType::Record) {
|
||||
for (const auto& s : m_schema.FieldSchemas()) {
|
||||
AvroDatum(s).Fill(data);
|
||||
}
|
||||
} else if (m_schema.Type() == AvroDatumType::Array) {
|
||||
while (true) {
|
||||
int64_t numElementsInBlock = parseInt(data);
|
||||
if (numElementsInBlock == 0) {
|
||||
break;
|
||||
} else if (numElementsInBlock < 0) {
|
||||
int64_t blockSize = parseInt(data);
|
||||
data.Offset += static_cast<size_t>(blockSize);
|
||||
} else {
|
||||
for (auto i = 0; i < numElementsInBlock; ++i) {
|
||||
AvroDatum(m_schema.ItemSchema()).Fill(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (m_schema.Type() == AvroDatumType::Map) {
|
||||
while (true) {
|
||||
int64_t numElementsInBlock = parseInt(data);
|
||||
if (numElementsInBlock == 0) {
|
||||
break;
|
||||
} else if (numElementsInBlock < 0) {
|
||||
int64_t blockSize = parseInt(data);
|
||||
data.Offset += static_cast<size_t>(blockSize);
|
||||
} else {
|
||||
for (int64_t i = 0; i < numElementsInBlock; ++i) {
|
||||
AvroDatum(AvroSchema::StringSchema).Fill(data);
|
||||
AvroDatum(m_schema.ItemSchema()).Fill(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (m_schema.Type() == AvroDatumType::Union) {
|
||||
int64_t i = parseInt(data);
|
||||
AvroDatum(m_schema.FieldSchemas()[static_cast<size_t>(i)]).Fill(data);
|
||||
} else if (m_schema.Type() == AvroDatumType::Fixed) {
|
||||
data.Offset += m_schema.Size();
|
||||
} else {
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
AvroDatum::StringView AvroDatum::Value() const {
|
||||
auto data = m_data;
|
||||
if (m_schema.Type() == AvroDatumType::String || m_schema.Type() == AvroDatumType::Bytes) {
|
||||
const int64_t length = parseInt(data);
|
||||
const uint8_t* start = &(*data.BufferPtr)[data.Offset];
|
||||
StringView ret{start, static_cast<size_t>(length)};
|
||||
data.Offset += static_cast<size_t>(length);
|
||||
return ret;
|
||||
}
|
||||
if (m_schema.Type() == AvroDatumType::Fixed) {
|
||||
const size_t fixedSize = m_schema.Size();
|
||||
const uint8_t* start = &(*data.BufferPtr)[data.Offset];
|
||||
StringView ret{start, fixedSize};
|
||||
data.Offset += fixedSize;
|
||||
return ret;
|
||||
}
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
|
||||
template <>
|
||||
std::string AvroDatum::Value() const {
|
||||
auto stringView = Value<StringView>();
|
||||
return std::string(stringView.Data, stringView.Data + stringView.Length);
|
||||
}
|
||||
|
||||
template <>
|
||||
std::vector<uint8_t> AvroDatum::Value() const {
|
||||
auto stringView = Value<StringView>();
|
||||
return std::vector<uint8_t>(stringView.Data, stringView.Data + stringView.Length);
|
||||
}
|
||||
|
||||
template <>
|
||||
int64_t AvroDatum::Value() const {
|
||||
auto data = m_data;
|
||||
return parseInt(data);
|
||||
}
|
||||
|
||||
template <>
|
||||
int32_t AvroDatum::Value() const {
|
||||
return static_cast<int32_t>(Value<int64_t>());
|
||||
}
|
||||
|
||||
template <>
|
||||
bool AvroDatum::Value() const {
|
||||
return Value<int64_t>();
|
||||
}
|
||||
|
||||
template <>
|
||||
std::nullptr_t AvroDatum::Value() const {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <>
|
||||
AvroRecord AvroDatum::Value() const {
|
||||
auto data = m_data;
|
||||
|
||||
AvroRecord r;
|
||||
r.m_keys = &m_schema.FieldNames();
|
||||
for (const auto& schema : m_schema.FieldSchemas()) {
|
||||
auto datum = AvroDatum(schema);
|
||||
datum.Fill(data);
|
||||
r.m_values.push_back(std::move(datum));
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
template <>
|
||||
AvroMap AvroDatum::Value() const {
|
||||
auto data = m_data;
|
||||
|
||||
AvroMap m;
|
||||
while (true) {
|
||||
int64_t numElementsInBlock = parseInt(data);
|
||||
if (numElementsInBlock == 0) {
|
||||
break;
|
||||
}
|
||||
if (numElementsInBlock < 0) {
|
||||
numElementsInBlock = -numElementsInBlock;
|
||||
parseInt(data);
|
||||
}
|
||||
for (int64_t i = 0; i < numElementsInBlock; ++i) {
|
||||
auto keyDatum = AvroDatum(AvroSchema::StringSchema);
|
||||
keyDatum.Fill(data);
|
||||
auto valueDatum = AvroDatum(m_schema.ItemSchema());
|
||||
valueDatum.Fill(data);
|
||||
m[keyDatum.Value<std::string>()] = valueDatum;
|
||||
}
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
template <>
|
||||
AvroDatum AvroDatum::Value() const {
|
||||
auto data = m_data;
|
||||
if (m_schema.Type() == AvroDatumType::Union) {
|
||||
int64_t i = parseInt(data);
|
||||
auto datum = AvroDatum(m_schema.FieldSchemas()[static_cast<size_t>(i)]);
|
||||
datum.Fill(data);
|
||||
return datum;
|
||||
}
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
|
||||
AvroObjectContainerReader::AvroObjectContainerReader(Core::IO::BodyStream& stream)
|
||||
: m_reader(std::make_unique<AvroStreamReader>(stream)) {}
|
||||
|
||||
AvroDatum AvroObjectContainerReader::NextImpl(const AvroSchema* schema, const Core::Context& context) {
|
||||
AZURE_ASSERT_FALSE(m_eof);
|
||||
static const auto SyncMarkerSchema = AvroSchema::FixedSchema("Sync", 16);
|
||||
if (!schema) {
|
||||
static AvroSchema FileHeaderSchema = []() {
|
||||
std::vector<std::pair<std::string, AvroSchema>> fieldsSchema;
|
||||
fieldsSchema.push_back(std::make_pair("magic", AvroSchema::FixedSchema("Magic", 4)));
|
||||
fieldsSchema.push_back(std::make_pair("meta", AvroSchema::MapSchema(AvroSchema::BytesSchema)));
|
||||
fieldsSchema.push_back(std::make_pair("sync", SyncMarkerSchema));
|
||||
return AvroSchema::RecordSchema("org.apache.avro.file.Header", std::move(fieldsSchema));
|
||||
}();
|
||||
auto fileHeaderDatum = AvroDatum(FileHeaderSchema);
|
||||
fileHeaderDatum.Fill(*m_reader, context);
|
||||
auto fileHeader = fileHeaderDatum.Value<AvroRecord>();
|
||||
if (fileHeader.Field("magic").Value<std::string>() != "Obj\01") {
|
||||
throw std::runtime_error("Invalid Avro object container magic.");
|
||||
}
|
||||
AvroMap meta = fileHeader.Field("meta").Value<AvroMap>();
|
||||
std::string objectSchemaJson = meta["avro.schema"].Value<std::string>();
|
||||
std::string codec = "null";
|
||||
if (meta.count("avro.codec") != 0) {
|
||||
codec = meta["avro.codec"].Value<std::string>();
|
||||
}
|
||||
if (codec != "null") {
|
||||
throw std::runtime_error("Unsupported Avro codec: " + codec);
|
||||
}
|
||||
m_syncMarker = fileHeader.Field("sync").Value<std::string>();
|
||||
m_objectSchema = std::make_unique<AvroSchema>(ParseSchemaFromJsonString(objectSchemaJson));
|
||||
schema = m_objectSchema.get();
|
||||
}
|
||||
|
||||
if (m_remainingObjectInCurrentBlock == 0) {
|
||||
m_reader->Discard();
|
||||
m_remainingObjectInCurrentBlock = m_reader->ParseInt(context);
|
||||
int64_t ObjectsSize = m_reader->ParseInt(context);
|
||||
m_reader->Preload(static_cast<size_t>(ObjectsSize), context);
|
||||
}
|
||||
|
||||
auto objectDatum = AvroDatum(*m_objectSchema);
|
||||
objectDatum.Fill(*m_reader, context);
|
||||
if (--m_remainingObjectInCurrentBlock == 0) {
|
||||
auto markerDatum = AvroDatum(SyncMarkerSchema);
|
||||
markerDatum.Fill(*m_reader, context);
|
||||
auto marker = markerDatum.Value<std::string>();
|
||||
if (marker != m_syncMarker) {
|
||||
throw std::runtime_error("Sync marker doesn't match.");
|
||||
}
|
||||
m_eof = m_reader->TryPreload(1, context) == 0;
|
||||
}
|
||||
return objectDatum;
|
||||
}
|
||||
|
||||
size_t AvroStreamParser::OnRead(uint8_t* buffer, size_t count, Azure::Core::Context const& context) {
|
||||
if (m_parserBuffer.Length != 0) {
|
||||
size_t bytesToCopy = (std::min)(m_parserBuffer.Length, count);
|
||||
std::memcpy(buffer, m_parserBuffer.Data, bytesToCopy);
|
||||
m_parserBuffer.Data += bytesToCopy;
|
||||
m_parserBuffer.Length -= bytesToCopy;
|
||||
return bytesToCopy;
|
||||
}
|
||||
while (!m_parser.End()) {
|
||||
auto datum = m_parser.Next(context);
|
||||
if (datum.Schema().Type() == AvroDatumType::Union) {
|
||||
datum = datum.Value<AvroDatum>();
|
||||
}
|
||||
if (datum.Schema().Type() != AvroDatumType::Record) {
|
||||
continue;
|
||||
}
|
||||
if (datum.Schema().Name() == "com.microsoft.azure.storage.queryBlobContents.resultData") {
|
||||
auto record = datum.Value<AvroRecord>();
|
||||
auto dataDatum = record.Field("data");
|
||||
m_parserBuffer = dataDatum.Value<AvroDatum::StringView>();
|
||||
return OnRead(buffer, count, context);
|
||||
}
|
||||
if (datum.Schema().Name() == "com.microsoft.azure.storage.queryBlobContents.progress" && m_progressCallback) {
|
||||
auto record = datum.Value<AvroRecord>();
|
||||
auto bytesScanned = record.Field("bytesScanned").Value<int64_t>();
|
||||
auto totalBytes = record.Field("totalBytes").Value<int64_t>();
|
||||
m_progressCallback(bytesScanned, totalBytes);
|
||||
}
|
||||
if (datum.Schema().Name() == "com.microsoft.azure.storage.queryBlobContents.error" && m_errorCallback) {
|
||||
auto record = datum.Value<AvroRecord>();
|
||||
BlobQueryError e;
|
||||
e.Name = record.Field("name").Value<std::string>();
|
||||
e.Description = record.Field("description").Value<std::string>();
|
||||
e.IsFatal = record.Field("fatal").Value<bool>();
|
||||
e.Position = record.Field("position").Value<int64_t>();
|
||||
m_errorCallback(std::move(e));
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
} // namespace _detail
|
||||
} // namespace Blobs
|
||||
} // namespace Storage
|
||||
} // namespace Azure
|
||||
|
||||
#endif
|
|
@ -0,0 +1,625 @@
|
|||
#if defined(USE_S3)
|
||||
|
||||
#include "td_block_blob_client.hpp"
|
||||
|
||||
#include <azure/core/platform.hpp>
|
||||
|
||||
#if defined(AZ_PLATFORM_WINDOWS)
|
||||
#if !defined(WIN32_LEAN_AND_MEAN)
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#endif
|
||||
#if !defined(NOMINMAX)
|
||||
#define NOMINMAX
|
||||
#endif
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
#include <td_avro_parser.h>
|
||||
|
||||
#include <azure/core/io/body_stream.hpp>
|
||||
#include <azure/storage/common/crypt.hpp>
|
||||
#include <azure/storage/common/internal/concurrent_transfer.hpp>
|
||||
#include <azure/storage/common/internal/constants.hpp>
|
||||
#include <azure/storage/common/internal/file_io.hpp>
|
||||
#include <azure/storage/common/internal/storage_switch_to_secondary_policy.hpp>
|
||||
#include <azure/storage/common/storage_common.hpp>
|
||||
#include <azure/storage/common/storage_exception.hpp>
|
||||
|
||||
namespace Azure {
|
||||
namespace Storage {
|
||||
namespace Blobs {
|
||||
|
||||
TDBlockBlobClient TDBlockBlobClient::CreateFromConnectionString(const std::string& connectionString,
|
||||
const std::string& blobContainerName,
|
||||
const std::string& blobName,
|
||||
const BlobClientOptions& options) {
|
||||
TDBlockBlobClient newClient(
|
||||
BlobClient::CreateFromConnectionString(connectionString, blobContainerName, blobName, options));
|
||||
return newClient;
|
||||
}
|
||||
|
||||
TDBlockBlobClient::TDBlockBlobClient(const std::string& blobUrl, std::shared_ptr<StorageSharedKeyCredential> credential,
|
||||
const BlobClientOptions& options)
|
||||
: BlobClient(blobUrl, std::move(credential), options) {}
|
||||
|
||||
TDBlockBlobClient::TDBlockBlobClient(const std::string& blobUrl,
|
||||
std::shared_ptr<Core::Credentials::TokenCredential> credential,
|
||||
const BlobClientOptions& options)
|
||||
: BlobClient(blobUrl, std::move(credential), options) {}
|
||||
|
||||
TDBlockBlobClient::TDBlockBlobClient(const std::string& blobUrl, const BlobClientOptions& options)
|
||||
: BlobClient(blobUrl, options) {}
|
||||
|
||||
TDBlockBlobClient::TDBlockBlobClient(BlobClient blobClient) : BlobClient(std::move(blobClient)) {}
|
||||
|
||||
TDBlockBlobClient TDBlockBlobClient::WithSnapshot(const std::string& snapshot) const {
|
||||
TDBlockBlobClient newClient(*this);
|
||||
if (snapshot.empty()) {
|
||||
newClient.m_blobUrl.RemoveQueryParameter(_internal::HttpQuerySnapshot);
|
||||
} else {
|
||||
newClient.m_blobUrl.AppendQueryParameter(_internal::HttpQuerySnapshot,
|
||||
_internal::UrlEncodeQueryParameter(snapshot));
|
||||
}
|
||||
return newClient;
|
||||
}
|
||||
|
||||
TDBlockBlobClient TDBlockBlobClient::WithVersionId(const std::string& versionId) const {
|
||||
TDBlockBlobClient newClient(*this);
|
||||
if (versionId.empty()) {
|
||||
newClient.m_blobUrl.RemoveQueryParameter(_internal::HttpQueryVersionId);
|
||||
} else {
|
||||
newClient.m_blobUrl.AppendQueryParameter(_internal::HttpQueryVersionId,
|
||||
_internal::UrlEncodeQueryParameter(versionId));
|
||||
}
|
||||
return newClient;
|
||||
}
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobResult> TDBlockBlobClient::Upload(Azure::Core::IO::BodyStream& content,
|
||||
const UploadBlockBlobOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlockBlobClient::UploadBlockBlobOptions protocolLayerOptions;
|
||||
if (options.TransactionalContentHash.HasValue()) {
|
||||
if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Md5) {
|
||||
protocolLayerOptions.TransactionalContentMD5 = options.TransactionalContentHash.Value().Value;
|
||||
} else if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Crc64) {
|
||||
protocolLayerOptions.TransactionalContentCrc64 = options.TransactionalContentHash.Value().Value;
|
||||
}
|
||||
}
|
||||
protocolLayerOptions.BlobContentType = options.HttpHeaders.ContentType;
|
||||
protocolLayerOptions.BlobContentEncoding = options.HttpHeaders.ContentEncoding;
|
||||
protocolLayerOptions.BlobContentLanguage = options.HttpHeaders.ContentLanguage;
|
||||
protocolLayerOptions.BlobContentMD5 = options.HttpHeaders.ContentHash.Value;
|
||||
protocolLayerOptions.BlobContentDisposition = options.HttpHeaders.ContentDisposition;
|
||||
protocolLayerOptions.BlobCacheControl = options.HttpHeaders.CacheControl;
|
||||
protocolLayerOptions.Metadata = std::map<std::string, std::string>(options.Metadata.begin(), options.Metadata.end());
|
||||
protocolLayerOptions.BlobTagsString = _detail::TagsToString(options.Tags);
|
||||
protocolLayerOptions.Tier = options.AccessTier;
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
protocolLayerOptions.IfModifiedSince = options.AccessConditions.IfModifiedSince;
|
||||
protocolLayerOptions.IfUnmodifiedSince = options.AccessConditions.IfUnmodifiedSince;
|
||||
protocolLayerOptions.IfMatch = options.AccessConditions.IfMatch;
|
||||
protocolLayerOptions.IfNoneMatch = options.AccessConditions.IfNoneMatch;
|
||||
protocolLayerOptions.IfTags = options.AccessConditions.TagConditions;
|
||||
if (m_customerProvidedKey.HasValue()) {
|
||||
protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key;
|
||||
protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash;
|
||||
protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString();
|
||||
}
|
||||
protocolLayerOptions.EncryptionScope = m_encryptionScope;
|
||||
if (options.ImmutabilityPolicy.HasValue()) {
|
||||
protocolLayerOptions.ImmutabilityPolicyExpiry = options.ImmutabilityPolicy.Value().ExpiresOn;
|
||||
protocolLayerOptions.ImmutabilityPolicyMode = options.ImmutabilityPolicy.Value().PolicyMode;
|
||||
}
|
||||
protocolLayerOptions.LegalHold = options.HasLegalHold;
|
||||
|
||||
return _detail::BlockBlobClient::Upload(*m_pipeline, m_blobUrl, content, protocolLayerOptions, context);
|
||||
}
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobFromResult> TDBlockBlobClient::UploadFrom(
|
||||
const uint8_t* buffer, size_t bufferSize, const UploadBlockBlobFromOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
constexpr int64_t DefaultStageBlockSize = 4 * 1024 * 1024ULL;
|
||||
constexpr int64_t MaxStageBlockSize = 4000 * 1024 * 1024ULL;
|
||||
constexpr int64_t MaxBlockNumber = 50000;
|
||||
constexpr int64_t BlockGrainSize = 1 * 1024 * 1024;
|
||||
|
||||
if (static_cast<uint64_t>(options.TransferOptions.SingleUploadThreshold) > (std::numeric_limits<size_t>::max)()) {
|
||||
throw Azure::Core::RequestFailedException("Single upload threshold is too big");
|
||||
}
|
||||
if (bufferSize <= static_cast<size_t>(options.TransferOptions.SingleUploadThreshold)) {
|
||||
Azure::Core::IO::MemoryBodyStream contentStream(buffer, bufferSize);
|
||||
UploadBlockBlobOptions uploadBlockBlobOptions;
|
||||
uploadBlockBlobOptions.HttpHeaders = options.HttpHeaders;
|
||||
uploadBlockBlobOptions.Metadata = options.Metadata;
|
||||
uploadBlockBlobOptions.Tags = options.Tags;
|
||||
uploadBlockBlobOptions.AccessTier = options.AccessTier;
|
||||
uploadBlockBlobOptions.ImmutabilityPolicy = options.ImmutabilityPolicy;
|
||||
uploadBlockBlobOptions.HasLegalHold = options.HasLegalHold;
|
||||
return Upload(contentStream, uploadBlockBlobOptions, context);
|
||||
}
|
||||
|
||||
int64_t chunkSize;
|
||||
if (options.TransferOptions.ChunkSize.HasValue()) {
|
||||
chunkSize = options.TransferOptions.ChunkSize.Value();
|
||||
} else {
|
||||
int64_t minChunkSize = (bufferSize + MaxBlockNumber - 1) / MaxBlockNumber;
|
||||
minChunkSize = (minChunkSize + BlockGrainSize - 1) / BlockGrainSize * BlockGrainSize;
|
||||
chunkSize = (std::max)(DefaultStageBlockSize, minChunkSize);
|
||||
}
|
||||
if (chunkSize > MaxStageBlockSize) {
|
||||
throw Azure::Core::RequestFailedException("Block size is too big.");
|
||||
}
|
||||
|
||||
std::vector<std::string> blockIds;
|
||||
auto getBlockId = [](int64_t id) {
|
||||
constexpr size_t BlockIdLength = 64;
|
||||
std::string blockId = std::to_string(id);
|
||||
blockId = std::string(BlockIdLength - blockId.length(), '0') + blockId;
|
||||
return Azure::Core::Convert::Base64Encode(std::vector<uint8_t>(blockId.begin(), blockId.end()));
|
||||
};
|
||||
|
||||
auto uploadBlockFunc = [&](int64_t offset, int64_t length, int64_t chunkId, int64_t numChunks) {
|
||||
Azure::Core::IO::MemoryBodyStream contentStream(buffer + offset, static_cast<size_t>(length));
|
||||
StageBlockOptions chunkOptions;
|
||||
auto blockInfo = StageBlock(getBlockId(chunkId), contentStream, chunkOptions, context);
|
||||
if (chunkId == numChunks - 1) {
|
||||
blockIds.resize(static_cast<size_t>(numChunks));
|
||||
}
|
||||
};
|
||||
|
||||
_internal::ConcurrentTransfer(0, bufferSize, chunkSize, options.TransferOptions.Concurrency, uploadBlockFunc);
|
||||
|
||||
for (size_t i = 0; i < blockIds.size(); ++i) {
|
||||
blockIds[i] = getBlockId(static_cast<int64_t>(i));
|
||||
}
|
||||
CommitBlockListOptions commitBlockListOptions;
|
||||
commitBlockListOptions.HttpHeaders = options.HttpHeaders;
|
||||
commitBlockListOptions.Metadata = options.Metadata;
|
||||
commitBlockListOptions.Tags = options.Tags;
|
||||
commitBlockListOptions.AccessTier = options.AccessTier;
|
||||
commitBlockListOptions.ImmutabilityPolicy = options.ImmutabilityPolicy;
|
||||
commitBlockListOptions.HasLegalHold = options.HasLegalHold;
|
||||
auto commitBlockListResponse = CommitBlockList(blockIds, commitBlockListOptions, context);
|
||||
|
||||
Models::UploadBlockBlobFromResult ret;
|
||||
ret.ETag = std::move(commitBlockListResponse.Value.ETag);
|
||||
ret.LastModified = std::move(commitBlockListResponse.Value.LastModified);
|
||||
ret.VersionId = std::move(commitBlockListResponse.Value.VersionId);
|
||||
ret.IsServerEncrypted = commitBlockListResponse.Value.IsServerEncrypted;
|
||||
ret.EncryptionKeySha256 = std::move(commitBlockListResponse.Value.EncryptionKeySha256);
|
||||
ret.EncryptionScope = std::move(commitBlockListResponse.Value.EncryptionScope);
|
||||
return Azure::Response<Models::UploadBlockBlobFromResult>(std::move(ret),
|
||||
std::move(commitBlockListResponse.RawResponse));
|
||||
}
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobFromResult> TDBlockBlobClient::UploadFrom(
|
||||
const std::string& fileName, const UploadBlockBlobFromOptions& options, const Azure::Core::Context& context) const {
|
||||
constexpr int64_t DefaultStageBlockSize = 4 * 1024 * 1024ULL;
|
||||
constexpr int64_t MaxStageBlockSize = 4000 * 1024 * 1024ULL;
|
||||
constexpr int64_t MaxBlockNumber = 50000;
|
||||
constexpr int64_t BlockGrainSize = 1 * 1024 * 1024;
|
||||
|
||||
{
|
||||
Azure::Core::IO::FileBodyStream contentStream(fileName);
|
||||
|
||||
if (contentStream.Length() <= options.TransferOptions.SingleUploadThreshold) {
|
||||
UploadBlockBlobOptions uploadBlockBlobOptions;
|
||||
uploadBlockBlobOptions.HttpHeaders = options.HttpHeaders;
|
||||
uploadBlockBlobOptions.Metadata = options.Metadata;
|
||||
uploadBlockBlobOptions.Tags = options.Tags;
|
||||
uploadBlockBlobOptions.AccessTier = options.AccessTier;
|
||||
uploadBlockBlobOptions.ImmutabilityPolicy = options.ImmutabilityPolicy;
|
||||
uploadBlockBlobOptions.HasLegalHold = options.HasLegalHold;
|
||||
return Upload(contentStream, uploadBlockBlobOptions, context);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> blockIds;
|
||||
auto getBlockId = [](int64_t id) {
|
||||
constexpr size_t BlockIdLength = 64;
|
||||
std::string blockId = std::to_string(id);
|
||||
blockId = std::string(BlockIdLength - blockId.length(), '0') + blockId;
|
||||
return Azure::Core::Convert::Base64Encode(std::vector<uint8_t>(blockId.begin(), blockId.end()));
|
||||
};
|
||||
|
||||
_internal::FileReader fileReader(fileName);
|
||||
|
||||
auto uploadBlockFunc = [&](int64_t offset, int64_t length, int64_t chunkId, int64_t numChunks) {
|
||||
Azure::Core::IO::_internal::RandomAccessFileBodyStream contentStream(fileReader.GetHandle(), offset, length);
|
||||
StageBlockOptions chunkOptions;
|
||||
auto blockInfo = StageBlock(getBlockId(chunkId), contentStream, chunkOptions, context);
|
||||
if (chunkId == numChunks - 1) {
|
||||
blockIds.resize(static_cast<size_t>(numChunks));
|
||||
}
|
||||
};
|
||||
|
||||
int64_t chunkSize;
|
||||
if (options.TransferOptions.ChunkSize.HasValue()) {
|
||||
chunkSize = options.TransferOptions.ChunkSize.Value();
|
||||
} else {
|
||||
int64_t minChunkSize = (fileReader.GetFileSize() + MaxBlockNumber - 1) / MaxBlockNumber;
|
||||
minChunkSize = (minChunkSize + BlockGrainSize - 1) / BlockGrainSize * BlockGrainSize;
|
||||
chunkSize = (std::max)(DefaultStageBlockSize, minChunkSize);
|
||||
}
|
||||
if (chunkSize > MaxStageBlockSize) {
|
||||
throw Azure::Core::RequestFailedException("Block size is too big.");
|
||||
}
|
||||
|
||||
_internal::ConcurrentTransfer(0, fileReader.GetFileSize(), chunkSize, options.TransferOptions.Concurrency,
|
||||
uploadBlockFunc);
|
||||
|
||||
for (size_t i = 0; i < blockIds.size(); ++i) {
|
||||
blockIds[i] = getBlockId(static_cast<int64_t>(i));
|
||||
}
|
||||
CommitBlockListOptions commitBlockListOptions;
|
||||
commitBlockListOptions.HttpHeaders = options.HttpHeaders;
|
||||
commitBlockListOptions.Metadata = options.Metadata;
|
||||
commitBlockListOptions.Tags = options.Tags;
|
||||
commitBlockListOptions.AccessTier = options.AccessTier;
|
||||
commitBlockListOptions.ImmutabilityPolicy = options.ImmutabilityPolicy;
|
||||
commitBlockListOptions.HasLegalHold = options.HasLegalHold;
|
||||
auto commitBlockListResponse = CommitBlockList(blockIds, commitBlockListOptions, context);
|
||||
|
||||
Models::UploadBlockBlobFromResult result;
|
||||
result.ETag = commitBlockListResponse.Value.ETag;
|
||||
result.LastModified = commitBlockListResponse.Value.LastModified;
|
||||
result.VersionId = commitBlockListResponse.Value.VersionId;
|
||||
result.IsServerEncrypted = commitBlockListResponse.Value.IsServerEncrypted;
|
||||
result.EncryptionKeySha256 = commitBlockListResponse.Value.EncryptionKeySha256;
|
||||
result.EncryptionScope = commitBlockListResponse.Value.EncryptionScope;
|
||||
return Azure::Response<Models::UploadBlockBlobFromResult>(std::move(result),
|
||||
std::move(commitBlockListResponse.RawResponse));
|
||||
}
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobFromResult> TDBlockBlobClient::UploadFrom(
|
||||
const std::string& fileName, int64_t offset, int64_t size, const UploadBlockBlobFromOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_internal::FileReader fileReader(fileName);
|
||||
|
||||
{
|
||||
Azure::Core::IO::_internal::RandomAccessFileBodyStream contentStream(fileReader.GetHandle(), offset, size);
|
||||
|
||||
if (size <= options.TransferOptions.SingleUploadThreshold) {
|
||||
UploadBlockBlobOptions uploadBlockBlobOptions;
|
||||
uploadBlockBlobOptions.HttpHeaders = options.HttpHeaders;
|
||||
uploadBlockBlobOptions.Metadata = options.Metadata;
|
||||
uploadBlockBlobOptions.Tags = options.Tags;
|
||||
uploadBlockBlobOptions.AccessTier = options.AccessTier;
|
||||
uploadBlockBlobOptions.ImmutabilityPolicy = options.ImmutabilityPolicy;
|
||||
uploadBlockBlobOptions.HasLegalHold = options.HasLegalHold;
|
||||
return Upload(contentStream, uploadBlockBlobOptions, context);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> blockIds;
|
||||
auto getBlockId = [](int64_t id) {
|
||||
constexpr size_t BlockIdLength = 64;
|
||||
std::string blockId = std::to_string(id);
|
||||
blockId = std::string(BlockIdLength - blockId.length(), '0') + blockId;
|
||||
return Azure::Core::Convert::Base64Encode(std::vector<uint8_t>(blockId.begin(), blockId.end()));
|
||||
};
|
||||
|
||||
auto uploadBlockFunc = [&](int64_t offset, int64_t length, int64_t chunkId, int64_t numChunks) {
|
||||
Azure::Core::IO::_internal::RandomAccessFileBodyStream contentStream(fileReader.GetHandle(), offset, length);
|
||||
StageBlockOptions chunkOptions;
|
||||
auto blockInfo = StageBlock(getBlockId(chunkId), contentStream, chunkOptions, context);
|
||||
if (chunkId == numChunks - 1) {
|
||||
blockIds.resize(static_cast<size_t>(numChunks));
|
||||
}
|
||||
};
|
||||
|
||||
constexpr int64_t DefaultStageBlockSize = 4 * 1024 * 1024ULL;
|
||||
constexpr int64_t MaxStageBlockSize = 4000 * 1024 * 1024ULL;
|
||||
constexpr int64_t MaxBlockNumber = 50000;
|
||||
constexpr int64_t BlockGrainSize = 1 * 1024 * 1024;
|
||||
|
||||
int64_t chunkSize;
|
||||
if (options.TransferOptions.ChunkSize.HasValue()) {
|
||||
chunkSize = options.TransferOptions.ChunkSize.Value();
|
||||
} else {
|
||||
int64_t minChunkSize = (size + MaxBlockNumber - 1) / MaxBlockNumber;
|
||||
minChunkSize = (minChunkSize + BlockGrainSize - 1) / BlockGrainSize * BlockGrainSize;
|
||||
chunkSize = (std::max)(DefaultStageBlockSize, minChunkSize);
|
||||
}
|
||||
if (chunkSize > MaxStageBlockSize) {
|
||||
throw Azure::Core::RequestFailedException("Block size is too big.");
|
||||
}
|
||||
|
||||
_internal::ConcurrentTransfer(offset, size, chunkSize, options.TransferOptions.Concurrency, uploadBlockFunc);
|
||||
|
||||
for (size_t i = 0; i < blockIds.size(); ++i) {
|
||||
blockIds[i] = getBlockId(static_cast<int64_t>(i));
|
||||
}
|
||||
CommitBlockListOptions commitBlockListOptions;
|
||||
commitBlockListOptions.HttpHeaders = options.HttpHeaders;
|
||||
commitBlockListOptions.Metadata = options.Metadata;
|
||||
commitBlockListOptions.Tags = options.Tags;
|
||||
commitBlockListOptions.AccessTier = options.AccessTier;
|
||||
commitBlockListOptions.ImmutabilityPolicy = options.ImmutabilityPolicy;
|
||||
commitBlockListOptions.HasLegalHold = options.HasLegalHold;
|
||||
auto commitBlockListResponse = CommitBlockList(blockIds, commitBlockListOptions, context);
|
||||
|
||||
Models::UploadBlockBlobFromResult result;
|
||||
result.ETag = commitBlockListResponse.Value.ETag;
|
||||
result.LastModified = commitBlockListResponse.Value.LastModified;
|
||||
result.VersionId = commitBlockListResponse.Value.VersionId;
|
||||
result.IsServerEncrypted = commitBlockListResponse.Value.IsServerEncrypted;
|
||||
result.EncryptionKeySha256 = commitBlockListResponse.Value.EncryptionKeySha256;
|
||||
result.EncryptionScope = commitBlockListResponse.Value.EncryptionScope;
|
||||
return Azure::Response<Models::UploadBlockBlobFromResult>(std::move(result),
|
||||
std::move(commitBlockListResponse.RawResponse));
|
||||
}
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobFromUriResult> TDBlockBlobClient::UploadFromUri(
|
||||
const std::string& sourceUri, const UploadBlockBlobFromUriOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlockBlobClient::UploadBlockBlobFromUriOptions protocolLayerOptions;
|
||||
protocolLayerOptions.CopySource = sourceUri;
|
||||
protocolLayerOptions.CopySourceBlobProperties = options.CopySourceBlobProperties;
|
||||
protocolLayerOptions.BlobContentType = options.HttpHeaders.ContentType;
|
||||
protocolLayerOptions.BlobContentEncoding = options.HttpHeaders.ContentEncoding;
|
||||
protocolLayerOptions.BlobContentLanguage = options.HttpHeaders.ContentLanguage;
|
||||
protocolLayerOptions.BlobContentMD5 = options.HttpHeaders.ContentHash.Value;
|
||||
protocolLayerOptions.BlobCacheControl = options.HttpHeaders.CacheControl;
|
||||
protocolLayerOptions.BlobContentDisposition = options.HttpHeaders.ContentDisposition;
|
||||
protocolLayerOptions.Metadata = std::map<std::string, std::string>(options.Metadata.begin(), options.Metadata.end());
|
||||
protocolLayerOptions.BlobTagsString = _detail::TagsToString(options.Tags);
|
||||
protocolLayerOptions.Tier = options.AccessTier;
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
protocolLayerOptions.IfMatch = options.AccessConditions.IfMatch;
|
||||
protocolLayerOptions.IfNoneMatch = options.AccessConditions.IfNoneMatch;
|
||||
protocolLayerOptions.IfModifiedSince = options.AccessConditions.IfModifiedSince;
|
||||
protocolLayerOptions.IfUnmodifiedSince = options.AccessConditions.IfUnmodifiedSince;
|
||||
protocolLayerOptions.IfTags = options.AccessConditions.TagConditions;
|
||||
protocolLayerOptions.SourceIfMatch = options.SourceAccessConditions.IfMatch;
|
||||
protocolLayerOptions.SourceIfNoneMatch = options.SourceAccessConditions.IfNoneMatch;
|
||||
protocolLayerOptions.SourceIfModifiedSince = options.SourceAccessConditions.IfModifiedSince;
|
||||
protocolLayerOptions.SourceIfUnmodifiedSince = options.SourceAccessConditions.IfUnmodifiedSince;
|
||||
protocolLayerOptions.SourceIfTags = options.SourceAccessConditions.TagConditions;
|
||||
if (options.TransactionalContentHash.HasValue()) {
|
||||
if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Md5) {
|
||||
protocolLayerOptions.SourceContentMD5 = options.TransactionalContentHash.Value().Value;
|
||||
} else if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Crc64) {
|
||||
protocolLayerOptions.SourceContentcrc64 = options.TransactionalContentHash.Value().Value;
|
||||
}
|
||||
}
|
||||
if (m_customerProvidedKey.HasValue()) {
|
||||
protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key;
|
||||
protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash;
|
||||
protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString();
|
||||
}
|
||||
protocolLayerOptions.EncryptionScope = m_encryptionScope;
|
||||
protocolLayerOptions.CopySourceTags = options.CopySourceTagsMode;
|
||||
if (!options.SourceAuthorization.empty()) {
|
||||
protocolLayerOptions.CopySourceAuthorization = options.SourceAuthorization;
|
||||
}
|
||||
|
||||
return _detail::BlockBlobClient::UploadFromUri(*m_pipeline, m_blobUrl, protocolLayerOptions, context);
|
||||
}
|
||||
|
||||
Azure::Response<Models::StageBlockResult> TDBlockBlobClient::StageBlock(const std::string& blockId,
|
||||
Azure::Core::IO::BodyStream& content,
|
||||
const StageBlockOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlockBlobClient::StageBlockBlobBlockOptions protocolLayerOptions;
|
||||
protocolLayerOptions.BlockId = blockId;
|
||||
if (options.TransactionalContentHash.HasValue()) {
|
||||
if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Md5) {
|
||||
protocolLayerOptions.TransactionalContentMD5 = options.TransactionalContentHash.Value().Value;
|
||||
} else if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Crc64) {
|
||||
protocolLayerOptions.TransactionalContentCrc64 = options.TransactionalContentHash.Value().Value;
|
||||
}
|
||||
}
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
if (m_customerProvidedKey.HasValue()) {
|
||||
protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key;
|
||||
protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash;
|
||||
protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString();
|
||||
}
|
||||
protocolLayerOptions.EncryptionScope = m_encryptionScope;
|
||||
return _detail::BlockBlobClient::StageBlock(*m_pipeline, m_blobUrl, content, protocolLayerOptions, context);
|
||||
}
|
||||
|
||||
Azure::Response<Models::StageBlockFromUriResult> TDBlockBlobClient::StageBlockFromUri(
|
||||
const std::string& blockId, const std::string& sourceUri, const StageBlockFromUriOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlockBlobClient::StageBlockBlobBlockFromUriOptions protocolLayerOptions;
|
||||
protocolLayerOptions.BlockId = blockId;
|
||||
protocolLayerOptions.SourceUrl = sourceUri;
|
||||
if (options.SourceRange.HasValue()) {
|
||||
std::string rangeStr = "bytes=" + std::to_string(options.SourceRange.Value().Offset) + "-";
|
||||
if (options.SourceRange.Value().Length.HasValue()) {
|
||||
rangeStr += std::to_string(options.SourceRange.Value().Offset + options.SourceRange.Value().Length.Value() - 1);
|
||||
}
|
||||
protocolLayerOptions.SourceRange = rangeStr;
|
||||
}
|
||||
if (options.TransactionalContentHash.HasValue()) {
|
||||
if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Md5) {
|
||||
protocolLayerOptions.SourceContentMD5 = options.TransactionalContentHash.Value().Value;
|
||||
} else if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Crc64) {
|
||||
protocolLayerOptions.SourceContentcrc64 = options.TransactionalContentHash.Value().Value;
|
||||
}
|
||||
}
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
protocolLayerOptions.SourceIfModifiedSince = options.SourceAccessConditions.IfModifiedSince;
|
||||
protocolLayerOptions.SourceIfUnmodifiedSince = options.SourceAccessConditions.IfUnmodifiedSince;
|
||||
protocolLayerOptions.SourceIfMatch = options.SourceAccessConditions.IfMatch;
|
||||
protocolLayerOptions.SourceIfNoneMatch = options.SourceAccessConditions.IfNoneMatch;
|
||||
if (m_customerProvidedKey.HasValue()) {
|
||||
protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key;
|
||||
protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash;
|
||||
protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString();
|
||||
}
|
||||
protocolLayerOptions.EncryptionScope = m_encryptionScope;
|
||||
if (!options.SourceAuthorization.empty()) {
|
||||
protocolLayerOptions.CopySourceAuthorization = options.SourceAuthorization;
|
||||
}
|
||||
|
||||
return _detail::BlockBlobClient::StageBlockFromUri(*m_pipeline, m_blobUrl, protocolLayerOptions, context);
|
||||
}
|
||||
|
||||
Azure::Response<Models::CommitBlockListResult> TDBlockBlobClient::CommitBlockList(
|
||||
const std::vector<std::string>& blockIds, const CommitBlockListOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlockBlobClient::CommitBlockBlobBlockListOptions protocolLayerOptions;
|
||||
protocolLayerOptions.Blocks.Latest = blockIds;
|
||||
protocolLayerOptions.BlobContentType = options.HttpHeaders.ContentType;
|
||||
protocolLayerOptions.BlobContentEncoding = options.HttpHeaders.ContentEncoding;
|
||||
protocolLayerOptions.BlobContentLanguage = options.HttpHeaders.ContentLanguage;
|
||||
protocolLayerOptions.BlobContentMD5 = options.HttpHeaders.ContentHash.Value;
|
||||
protocolLayerOptions.BlobContentDisposition = options.HttpHeaders.ContentDisposition;
|
||||
protocolLayerOptions.BlobCacheControl = options.HttpHeaders.CacheControl;
|
||||
protocolLayerOptions.Metadata = std::map<std::string, std::string>(options.Metadata.begin(), options.Metadata.end());
|
||||
protocolLayerOptions.BlobTagsString = _detail::TagsToString(options.Tags);
|
||||
protocolLayerOptions.Tier = options.AccessTier;
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
protocolLayerOptions.IfModifiedSince = options.AccessConditions.IfModifiedSince;
|
||||
protocolLayerOptions.IfUnmodifiedSince = options.AccessConditions.IfUnmodifiedSince;
|
||||
protocolLayerOptions.IfMatch = options.AccessConditions.IfMatch;
|
||||
protocolLayerOptions.IfNoneMatch = options.AccessConditions.IfNoneMatch;
|
||||
protocolLayerOptions.IfTags = options.AccessConditions.TagConditions;
|
||||
if (m_customerProvidedKey.HasValue()) {
|
||||
protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key;
|
||||
protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash;
|
||||
protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString();
|
||||
}
|
||||
protocolLayerOptions.EncryptionScope = m_encryptionScope;
|
||||
if (options.ImmutabilityPolicy.HasValue()) {
|
||||
protocolLayerOptions.ImmutabilityPolicyExpiry = options.ImmutabilityPolicy.Value().ExpiresOn;
|
||||
protocolLayerOptions.ImmutabilityPolicyMode = options.ImmutabilityPolicy.Value().PolicyMode;
|
||||
}
|
||||
protocolLayerOptions.LegalHold = options.HasLegalHold;
|
||||
|
||||
return _detail::BlockBlobClient::CommitBlockList(*m_pipeline, m_blobUrl, protocolLayerOptions, context);
|
||||
}
|
||||
|
||||
Azure::Response<Models::GetBlockListResult> TDBlockBlobClient::GetBlockList(const GetBlockListOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlockBlobClient::GetBlockBlobBlockListOptions protocolLayerOptions;
|
||||
protocolLayerOptions.ListType = options.ListType;
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
protocolLayerOptions.IfTags = options.AccessConditions.TagConditions;
|
||||
return _detail::BlockBlobClient::GetBlockList(*m_pipeline, m_blobUrl, protocolLayerOptions,
|
||||
_internal::WithReplicaStatus(context));
|
||||
}
|
||||
/*
|
||||
Azure::Response<Models::QueryBlobResult> TDBlockBlobClient::Query(const std::string& querySqlExpression,
|
||||
const QueryBlobOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlobClient::QueryBlobOptions protocolLayerOptions;
|
||||
protocolLayerOptions.QueryRequest.QueryType = Models::_detail::QueryRequestQueryType::SQL;
|
||||
protocolLayerOptions.QueryRequest.Expression = querySqlExpression;
|
||||
if (options.InputTextConfiguration.m_format == Models::_detail::QueryFormatType::Delimited) {
|
||||
Models::_detail::DelimitedTextConfiguration c;
|
||||
c.RecordSeparator = options.InputTextConfiguration.m_recordSeparator;
|
||||
c.ColumnSeparator = options.InputTextConfiguration.m_columnSeparator;
|
||||
c.FieldQuote = options.InputTextConfiguration.m_quotationCharacter;
|
||||
c.EscapeChar = options.InputTextConfiguration.m_escapeCharacter;
|
||||
c.HeadersPresent = options.InputTextConfiguration.m_hasHeaders;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.InputTextConfiguration.m_format;
|
||||
q.Format.DelimitedTextConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.InputSerialization = std::move(q);
|
||||
} else if (options.InputTextConfiguration.m_format == Models::_detail::QueryFormatType::Json) {
|
||||
Models::_detail::JsonTextConfiguration c;
|
||||
c.RecordSeparator = options.InputTextConfiguration.m_recordSeparator;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.InputTextConfiguration.m_format;
|
||||
q.Format.JsonTextConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.InputSerialization = std::move(q);
|
||||
} else if (options.InputTextConfiguration.m_format == Models::_detail::QueryFormatType::Parquet) {
|
||||
Models::_detail::ParquetConfiguration c;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.InputTextConfiguration.m_format;
|
||||
q.Format.ParquetTextConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.InputSerialization = std::move(q);
|
||||
} else if (options.InputTextConfiguration.m_format.ToString().empty()) {
|
||||
} else {
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
if (options.OutputTextConfiguration.m_format == Models::_detail::QueryFormatType::Delimited) {
|
||||
Models::_detail::DelimitedTextConfiguration c;
|
||||
c.RecordSeparator = options.OutputTextConfiguration.m_recordSeparator;
|
||||
c.ColumnSeparator = options.OutputTextConfiguration.m_columnSeparator;
|
||||
c.FieldQuote = options.OutputTextConfiguration.m_quotationCharacter;
|
||||
c.EscapeChar = options.OutputTextConfiguration.m_escapeCharacter;
|
||||
c.HeadersPresent = options.OutputTextConfiguration.m_hasHeaders;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.OutputTextConfiguration.m_format;
|
||||
q.Format.DelimitedTextConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.OutputSerialization = std::move(q);
|
||||
} else if (options.OutputTextConfiguration.m_format == Models::_detail::QueryFormatType::Json) {
|
||||
Models::_detail::JsonTextConfiguration c;
|
||||
c.RecordSeparator = options.OutputTextConfiguration.m_recordSeparator;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.OutputTextConfiguration.m_format;
|
||||
q.Format.JsonTextConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.OutputSerialization = std::move(q);
|
||||
} else if (options.OutputTextConfiguration.m_format == Models::_detail::QueryFormatType::Parquet) {
|
||||
Models::_detail::ParquetConfiguration c;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.OutputTextConfiguration.m_format;
|
||||
q.Format.ParquetTextConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.OutputSerialization = std::move(q);
|
||||
} else if (options.OutputTextConfiguration.m_format == Models::_detail::QueryFormatType::Arrow) {
|
||||
Models::_detail::ArrowConfiguration c;
|
||||
c.Schema = options.OutputTextConfiguration.m_schema;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.OutputTextConfiguration.m_format;
|
||||
q.Format.ArrowConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.OutputSerialization = std::move(q);
|
||||
} else if (options.InputTextConfiguration.m_format.ToString().empty()) {
|
||||
} else {
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
if (m_customerProvidedKey.HasValue()) {
|
||||
protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key;
|
||||
protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash;
|
||||
protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString();
|
||||
}
|
||||
protocolLayerOptions.EncryptionScope = m_encryptionScope;
|
||||
protocolLayerOptions.IfModifiedSince = options.AccessConditions.IfModifiedSince;
|
||||
protocolLayerOptions.IfUnmodifiedSince = options.AccessConditions.IfUnmodifiedSince;
|
||||
protocolLayerOptions.IfMatch = options.AccessConditions.IfMatch;
|
||||
protocolLayerOptions.IfNoneMatch = options.AccessConditions.IfNoneMatch;
|
||||
protocolLayerOptions.IfTags = options.AccessConditions.TagConditions;
|
||||
auto response =
|
||||
_detail::BlobClient::Query(*m_pipeline, m_blobUrl, protocolLayerOptions, _internal::WithReplicaStatus(context));
|
||||
|
||||
const auto statusCode = response.RawResponse->GetStatusCode();
|
||||
const auto reasonPhrase = response.RawResponse->GetReasonPhrase();
|
||||
const auto requestId = response.RawResponse->GetHeaders().count(_internal::HttpHeaderRequestId) != 0
|
||||
? response.RawResponse->GetHeaders().at(_internal::HttpHeaderRequestId)
|
||||
: std::string();
|
||||
|
||||
const auto clientRequestId = response.RawResponse->GetHeaders().count(_internal::HttpHeaderClientRequestId) != 0
|
||||
? response.RawResponse->GetHeaders().at(_internal::HttpHeaderClientRequestId)
|
||||
: std::string();
|
||||
|
||||
auto defaultErrorHandler = [statusCode, reasonPhrase, requestId, clientRequestId](BlobQueryError e) {
|
||||
if (e.IsFatal) {
|
||||
StorageException exception("Fatal " + e.Name + " at " + std::to_string(e.Position));
|
||||
exception.StatusCode = statusCode;
|
||||
exception.ReasonPhrase = reasonPhrase;
|
||||
exception.RequestId = requestId;
|
||||
exception.ClientRequestId = clientRequestId;
|
||||
exception.ErrorCode = e.Name;
|
||||
exception.Message = e.Description;
|
||||
|
||||
throw exception;
|
||||
}
|
||||
};
|
||||
|
||||
response.Value.BodyStream =
|
||||
std::make_unique<_detail::AvroStreamParser>(std::move(response.Value.BodyStream), options.ProgressHandler,
|
||||
options.ErrorHandler ? options.ErrorHandler : defaultErrorHandler);
|
||||
return response;
|
||||
}
|
||||
*/
|
||||
} // namespace Blobs
|
||||
} // namespace Storage
|
||||
} // namespace Azure
|
||||
|
||||
#endif
|
|
@ -0,0 +1,22 @@
|
|||
if (TD_LINUX)
|
||||
|
||||
aux_source_directory(. AZ_TEST_SRC)
|
||||
|
||||
add_executable(azTest ${AZ_TEST_SRC})
|
||||
target_include_directories(azTest
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/azure"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
|
||||
target_link_libraries(azTest
|
||||
az
|
||||
gtest_main
|
||||
)
|
||||
enable_testing()
|
||||
add_test(
|
||||
NAME az_test
|
||||
COMMAND azTest
|
||||
)
|
||||
|
||||
endif(TD_LINUX)
|
|
@ -0,0 +1,201 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <queue>
|
||||
|
||||
#include "az.h"
|
||||
|
||||
extern int8_t tsS3Enabled;
|
||||
|
||||
int32_t azInitEnv() {
|
||||
int32_t code = 0;
|
||||
|
||||
extern int8_t tsS3EpNum;
|
||||
|
||||
extern char tsS3Hostname[][TSDB_FQDN_LEN];
|
||||
extern char tsS3AccessKeyId[][TSDB_FQDN_LEN];
|
||||
extern char tsS3AccessKeySecret[][TSDB_FQDN_LEN];
|
||||
extern char tsS3BucketName[TSDB_FQDN_LEN];
|
||||
|
||||
/* TCS parameter format
|
||||
tsS3Hostname[0] = "<endpoint>/<account-name>.blob.core.windows.net";
|
||||
tsS3AccessKeyId[0] = "<access-key-id/account-name>";
|
||||
tsS3AccessKeySecret[0] = "<access-key-secret/account-key>";
|
||||
tsS3BucketName = "<bucket/container-name>";
|
||||
*/
|
||||
|
||||
const char *hostname = "<endpoint>/<account-name>.blob.core.windows.net";
|
||||
const char *accessKeyId = "<access-key-id/account-name>";
|
||||
const char *accessKeySecret = "<access-key-secret/account-key>";
|
||||
const char *bucketName = "<bucket/container-name>";
|
||||
|
||||
if (hostname[0] != '<') {
|
||||
tstrncpy(&tsS3Hostname[0][0], hostname, TSDB_FQDN_LEN);
|
||||
tstrncpy(&tsS3AccessKeyId[0][0], accessKeyId, TSDB_FQDN_LEN);
|
||||
tstrncpy(&tsS3AccessKeySecret[0][0], accessKeySecret, TSDB_FQDN_LEN);
|
||||
tstrncpy(tsS3BucketName, bucketName, TSDB_FQDN_LEN);
|
||||
} else {
|
||||
const char *accountId = getenv("ablob_account_id");
|
||||
if (!accountId) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
const char *accountSecret = getenv("ablob_account_secret");
|
||||
if (!accountSecret) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
const char *containerName = getenv("ablob_container");
|
||||
if (!containerName) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
TAOS_STRCPY(&tsS3Hostname[0][0], accountId);
|
||||
TAOS_STRCAT(&tsS3Hostname[0][0], ".blob.core.windows.net");
|
||||
TAOS_STRCPY(&tsS3AccessKeyId[0][0], accountId);
|
||||
TAOS_STRCPY(&tsS3AccessKeySecret[0][0], accountSecret);
|
||||
TAOS_STRCPY(tsS3BucketName, containerName);
|
||||
}
|
||||
|
||||
tstrncpy(tsTempDir, "/tmp/", PATH_MAX);
|
||||
|
||||
tsS3Enabled = true;
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
// TEST(AzTest, DISABLED_InterfaceTest) {
|
||||
TEST(AzTest, InterfaceTest) {
|
||||
int code = 0;
|
||||
bool check = false;
|
||||
bool withcp = false;
|
||||
|
||||
code = azInitEnv();
|
||||
if (code) {
|
||||
std::cout << "ablob env init failed with: " << code << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
GTEST_ASSERT_EQ(tsS3Enabled, 1);
|
||||
|
||||
code = azBegin();
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
code = azCheckCfg();
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
const int size = 4096;
|
||||
char data[size] = {0};
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
data[i * 2 + 1] = 1;
|
||||
}
|
||||
|
||||
const char object_name[] = "azut.bin";
|
||||
char path[PATH_MAX] = {0};
|
||||
char path_download[PATH_MAX] = {0};
|
||||
int ds_len = strlen(TD_DIRSEP);
|
||||
int tmp_len = strlen(tsTempDir);
|
||||
|
||||
(void)snprintf(path, PATH_MAX, "%s", tsTempDir);
|
||||
if (strncmp(tsTempDir + tmp_len - ds_len, TD_DIRSEP, ds_len) != 0) {
|
||||
(void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", TD_DIRSEP);
|
||||
(void)snprintf(path + tmp_len + ds_len, PATH_MAX - tmp_len - ds_len, "%s", object_name);
|
||||
} else {
|
||||
(void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", object_name);
|
||||
}
|
||||
|
||||
tstrncpy(path_download, path, strlen(path) + 1);
|
||||
tstrncpy(path_download + strlen(path), ".download", strlen(".download") + 1);
|
||||
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_WRITE_THROUGH);
|
||||
GTEST_ASSERT_NE(fp, nullptr);
|
||||
|
||||
int n = taosWriteFile(fp, data, size);
|
||||
GTEST_ASSERT_EQ(n, size);
|
||||
|
||||
code = taosCloseFile(&fp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
code = azPutObjectFromFileOffset(path, object_name, 0, size);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
uint8_t *pBlock = NULL;
|
||||
code = azGetObjectBlock(object_name, 0, size, check, &pBlock);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
GTEST_ASSERT_EQ(pBlock[i * 2], 0);
|
||||
GTEST_ASSERT_EQ(pBlock[i * 2 + 1], 1);
|
||||
}
|
||||
|
||||
taosMemoryFree(pBlock);
|
||||
|
||||
code = azGetObjectToFile(object_name, path_download);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
{
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_READ);
|
||||
GTEST_ASSERT_NE(fp, nullptr);
|
||||
|
||||
(void)memset(data, 0, size);
|
||||
|
||||
int64_t n = taosReadFile(fp, data, size);
|
||||
GTEST_ASSERT_EQ(n, size);
|
||||
|
||||
code = taosCloseFile(&fp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
GTEST_ASSERT_EQ(data[i * 2], 0);
|
||||
GTEST_ASSERT_EQ(data[i * 2 + 1], 1);
|
||||
}
|
||||
}
|
||||
|
||||
azDeleteObjectsByPrefix(object_name);
|
||||
// list object to check
|
||||
|
||||
code = azPutObjectFromFile2(path, object_name, withcp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
code = azGetObjectsByPrefix(object_name, tsTempDir);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
{
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_READ);
|
||||
GTEST_ASSERT_NE(fp, nullptr);
|
||||
|
||||
(void)memset(data, 0, size);
|
||||
|
||||
int64_t n = taosReadFile(fp, data, size);
|
||||
GTEST_ASSERT_EQ(n, size);
|
||||
|
||||
code = taosCloseFile(&fp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
GTEST_ASSERT_EQ(data[i * 2], 0);
|
||||
GTEST_ASSERT_EQ(data[i * 2 + 1], 1);
|
||||
}
|
||||
}
|
||||
|
||||
const char *object_name_arr[] = {object_name};
|
||||
code = azDeleteObjects(object_name_arr, 1);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
azEnd();
|
||||
}
|
|
@ -3,6 +3,7 @@ add_library(stream STATIC ${STREAM_SRC})
|
|||
target_include_directories(
|
||||
stream
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/stream"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/tcs"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
|
@ -11,7 +12,7 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
if (${BUILD_CONTRIB})
|
||||
target_link_libraries(
|
||||
stream
|
||||
PUBLIC rocksdb tdb
|
||||
PUBLIC rocksdb tdb tcs
|
||||
PRIVATE os util transport qcom executor wal index
|
||||
)
|
||||
target_include_directories(
|
||||
|
@ -30,13 +31,13 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
)
|
||||
target_link_libraries(
|
||||
stream
|
||||
PUBLIC rocksdb tdb
|
||||
PUBLIC rocksdb tdb tcs
|
||||
PRIVATE os util transport qcom executor wal index
|
||||
)
|
||||
else()
|
||||
target_link_libraries(
|
||||
stream
|
||||
PUBLIC rocksdb tdb
|
||||
PUBLIC rocksdb tdb tcs
|
||||
PRIVATE os util transport qcom executor wal index
|
||||
)
|
||||
target_include_directories(
|
||||
|
@ -58,4 +59,3 @@ endif(${BUILD_WITH_ROCKSDB})
|
|||
if(${BUILD_TEST})
|
||||
ADD_SUBDIRECTORY(test)
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
|
|
|
@ -13,10 +13,10 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cos.h"
|
||||
#include "rsync.h"
|
||||
#include "streamBackendRocksdb.h"
|
||||
#include "streamInt.h"
|
||||
#include "tcs.h"
|
||||
|
||||
static int32_t downloadCheckpointDataByName(const char* id, const char* fname, const char* dstName);
|
||||
static int32_t deleteCheckpointFile(const char* id, const char* name);
|
||||
|
@ -343,7 +343,7 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
|
|||
// And if we don't start a new timer, and the lost of checkpoint-trigger message may cause the whole checkpoint
|
||||
// procedure to be stucked.
|
||||
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr;
|
||||
int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1);
|
||||
int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1);
|
||||
if (old == 0) {
|
||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||
stDebug("s-task:%s start checkpoint-trigger monitor in 10s, ref:%d ", pTask->id.idStr, ref);
|
||||
|
@ -352,7 +352,7 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
|
|||
streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
||||
"trigger-recv-monitor");
|
||||
pTmrInfo->launchChkptId = pActiveInfo->activeId;
|
||||
} else { // already launched, do nothing
|
||||
} else { // already launched, do nothing
|
||||
stError("s-task:%s previous checkpoint-trigger monitor tmr is set, not start new one", pTask->id.idStr);
|
||||
}
|
||||
}
|
||||
|
@ -373,10 +373,10 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
|
|||
|
||||
if (type == TASK_OUTPUT__FIXED_DISPATCH || type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||
stDebug("s-task:%s set childIdx:%d, and add checkpoint-trigger block into outputQ", id, pTask->info.selfChildId);
|
||||
code = continueDispatchCheckpointTriggerBlock(pBlock, pTask); // todo handle this failure
|
||||
code = continueDispatchCheckpointTriggerBlock(pBlock, pTask); // todo handle this failure
|
||||
} else { // only one task exists, no need to dispatch downstream info
|
||||
code = appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT, pActiveInfo->activeId, pActiveInfo->transId,
|
||||
-1);
|
||||
code =
|
||||
appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT, pActiveInfo->activeId, pActiveInfo->transId, -1);
|
||||
streamFreeQitem((SStreamQueueItem*)pBlock);
|
||||
}
|
||||
} else if (taskLevel == TASK_LEVEL__SINK || taskLevel == TASK_LEVEL__AGG) {
|
||||
|
@ -399,8 +399,8 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
|
|||
if (taskLevel == TASK_LEVEL__SINK) {
|
||||
stDebug("s-task:%s process checkpoint-trigger block, all %d upstreams sent, send ready msg to upstream", id, num);
|
||||
streamFreeQitem((SStreamQueueItem*)pBlock);
|
||||
code = streamTaskBuildCheckpoint(pTask); // todo: not handle error yet
|
||||
} else { // source & agg tasks need to forward the checkpoint msg downwards
|
||||
code = streamTaskBuildCheckpoint(pTask); // todo: not handle error yet
|
||||
} else { // source & agg tasks need to forward the checkpoint msg downwards
|
||||
stDebug("s-task:%s process checkpoint-trigger block, all %d upstreams sent, forwards to downstream", id, num);
|
||||
code = flushStateDataInExecutor(pTask, (SStreamQueueItem*)pBlock);
|
||||
if (code) {
|
||||
|
@ -445,7 +445,7 @@ static int32_t processCheckpointReadyHelp(SActiveCheckpointInfo* pInfo, int32_t
|
|||
.transId = pInfo->transId,
|
||||
.streamId = streamId,
|
||||
.downstreamNodeId = downstreamNodeId};
|
||||
void* p = taosArrayPush(pInfo->pCheckpointReadyRecvList, &info);
|
||||
void* p = taosArrayPush(pInfo->pCheckpointReadyRecvList, &info);
|
||||
if (p == NULL) {
|
||||
stError("s-task:%s failed to set checkpoint ready recv msg, code:%s", id, tstrerror(terrno));
|
||||
return terrno;
|
||||
|
@ -560,8 +560,8 @@ void streamTaskClearCheckInfo(SStreamTask* pTask, bool clearChkpReadyMsg) {
|
|||
}
|
||||
streamMutexUnlock(&pInfo->lock);
|
||||
|
||||
stDebug("s-task:%s clear active checkpointInfo, failed checkpointId:%"PRId64", current checkpointId:%"PRId64,
|
||||
pTask->id.idStr, pInfo->failedId, pTask->chkInfo.checkpointId);
|
||||
stDebug("s-task:%s clear active checkpointInfo, failed checkpointId:%" PRId64 ", current checkpointId:%" PRId64,
|
||||
pTask->id.idStr, pInfo->failedId, pTask->chkInfo.checkpointId);
|
||||
}
|
||||
|
||||
int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SVUpdateCheckpointInfoReq* pReq) {
|
||||
|
@ -575,8 +575,7 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV
|
|||
|
||||
if (pReq->checkpointId <= pInfo->checkpointId) {
|
||||
stDebug("s-task:%s vgId:%d latest checkpointId:%" PRId64 " Ver:%" PRId64
|
||||
" no need to update checkpoint info, updated checkpointId:%" PRId64 " Ver:%" PRId64
|
||||
" transId:%d ignored",
|
||||
" no need to update checkpoint info, updated checkpointId:%" PRId64 " Ver:%" PRId64 " transId:%d ignored",
|
||||
id, vgId, pInfo->checkpointId, pInfo->checkpointVer, pReq->checkpointId, pReq->checkpointVer,
|
||||
pReq->transId);
|
||||
streamMutexUnlock(&pTask->lock);
|
||||
|
@ -623,7 +622,7 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV
|
|||
}
|
||||
|
||||
bool valid = (pInfo->checkpointId <= pReq->checkpointId && pInfo->checkpointVer <= pReq->checkpointVer &&
|
||||
pInfo->processedVer <= pReq->checkpointVer);
|
||||
pInfo->processedVer <= pReq->checkpointVer);
|
||||
|
||||
if (!valid) {
|
||||
stFatal("invalid checkpoint id check, current checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
|
@ -908,7 +907,7 @@ static int32_t doChkptStatusCheck(SStreamTask* pTask) {
|
|||
if (pTmrInfo->launchChkptId != pActiveInfo->activeId) {
|
||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
||||
stWarn("s-task:%s vgId:%d checkpoint-trigger retrieve by previous checkpoint procedure, checkpointId:%" PRId64
|
||||
", quit, ref:%d",
|
||||
", quit, ref:%d",
|
||||
id, vgId, pTmrInfo->launchChkptId, ref);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1005,7 +1004,7 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) {
|
|||
int32_t numOfNotSend = 0;
|
||||
|
||||
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
|
||||
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr;
|
||||
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr;
|
||||
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
||||
|
@ -1023,7 +1022,8 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) {
|
|||
}
|
||||
|
||||
if (++pTmrInfo->activeCounter < 50) {
|
||||
streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor");
|
||||
streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
||||
"trigger-recv-monitor");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1201,8 +1201,8 @@ int32_t streamTaskInitTriggerDispatchInfo(SStreamTask* pTask) {
|
|||
STaskDispatcherFixed* pDispatch = &pTask->outputInfo.fixedDispatcher;
|
||||
|
||||
STaskTriggerSendInfo p = {.sendTs = now, .recved = false, .nodeId = pDispatch->nodeId, .taskId = pDispatch->taskId};
|
||||
void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p);
|
||||
if (px == NULL) { // pause the stream task, if memory not enough
|
||||
void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p);
|
||||
if (px == NULL) { // pause the stream task, if memory not enough
|
||||
code = terrno;
|
||||
}
|
||||
} else {
|
||||
|
@ -1213,8 +1213,8 @@ int32_t streamTaskInitTriggerDispatchInfo(SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
STaskTriggerSendInfo p = {.sendTs = now, .recved = false, .nodeId = pVgInfo->vgId, .taskId = pVgInfo->taskId};
|
||||
void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p);
|
||||
if (px == NULL) { // pause the stream task, if memory not enough
|
||||
void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p);
|
||||
if (px == NULL) { // pause the stream task, if memory not enough
|
||||
code = terrno;
|
||||
break;
|
||||
}
|
||||
|
@ -1288,11 +1288,11 @@ void streamTaskSetTriggerDispatchConfirmed(SStreamTask* pTask, int32_t vgId) {
|
|||
static int32_t uploadCheckpointToS3(const char* id, const char* path) {
|
||||
int32_t code = 0;
|
||||
int32_t nBytes = 0;
|
||||
|
||||
/*
|
||||
if (s3Init() != 0) {
|
||||
return TSDB_CODE_THIRDPARTY_ERROR;
|
||||
}
|
||||
|
||||
*/
|
||||
TdDirPtr pDir = taosOpenDir(path);
|
||||
if (pDir == NULL) {
|
||||
return terrno;
|
||||
|
@ -1325,11 +1325,11 @@ static int32_t uploadCheckpointToS3(const char* id, const char* path) {
|
|||
break;
|
||||
}
|
||||
|
||||
code = s3PutObjectFromFile2(filename, object, 0);
|
||||
code = tcsPutObjectFromFile2(filename, object, 0);
|
||||
if (code != 0) {
|
||||
stError("[s3] failed to upload checkpoint:%s, reason:%s", filename, tstrerror(code));
|
||||
stError("[tcs] failed to upload checkpoint:%s, reason:%s", filename, tstrerror(code));
|
||||
} else {
|
||||
stDebug("[s3] upload checkpoint:%s", filename);
|
||||
stDebug("[tcs] upload checkpoint:%s", filename);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1355,7 +1355,7 @@ int32_t downloadCheckpointByNameS3(const char* id, const char* fname, const char
|
|||
taosMemoryFree(buf);
|
||||
return TSDB_CODE_OUT_OF_RANGE;
|
||||
}
|
||||
int32_t code = s3GetObjectToFile(buf, dstName);
|
||||
int32_t code = tcsGetObjectToFile(buf, dstName);
|
||||
if (code != 0) {
|
||||
taosMemoryFree(buf);
|
||||
return TAOS_SYSTEM_ERROR(errno);
|
||||
|
@ -1418,7 +1418,7 @@ int32_t streamTaskDownloadCheckpointData(const char* id, char* path, int64_t che
|
|||
if (strlen(tsSnodeAddress) != 0) {
|
||||
return downloadByRsync(id, path, checkpointId);
|
||||
} else if (tsS3StreamEnabled) {
|
||||
return s3GetObjectsByPrefix(id, path);
|
||||
return tcsGetObjectsByPrefix(id, path);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1432,7 +1432,7 @@ int32_t deleteCheckpoint(const char* id) {
|
|||
if (strlen(tsSnodeAddress) != 0) {
|
||||
return deleteRsync(id);
|
||||
} else if (tsS3StreamEnabled) {
|
||||
s3DeleteObjectsByPrefix(id);
|
||||
tcsDeleteObjectsByPrefix(id);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1446,7 +1446,7 @@ int32_t deleteCheckpointFile(const char* id, const char* name) {
|
|||
}
|
||||
|
||||
char* tmp = object;
|
||||
int32_t code = s3DeleteObjects((const char**)&tmp, 1);
|
||||
int32_t code = tcsDeleteObjects((const char**)&tmp, 1);
|
||||
if (code != 0) {
|
||||
return TSDB_CODE_THIRDPARTY_ERROR;
|
||||
}
|
||||
|
@ -1488,4 +1488,4 @@ int32_t streamTaskSendCheckpointsourceRsp(SStreamTask* pTask) {
|
|||
streamMutexUnlock(&pTask->lock);
|
||||
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -101,4 +101,4 @@ IF(NOT TD_DARWIN)
|
|||
NAME backendTest
|
||||
COMMAND backendTest
|
||||
)
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
aux_source_directory(src TCS_SRC)
|
||||
|
||||
add_library(tcs STATIC ${TCS_SRC})
|
||||
target_include_directories(
|
||||
tcs
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/tcs"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
tcs
|
||||
PUBLIC az
|
||||
PUBLIC common
|
||||
# PUBLIC cjson
|
||||
# PUBLIC os
|
||||
# PUBLIC util
|
||||
# PUBLIC crypt
|
||||
)
|
||||
|
||||
if(${BUILD_TEST})
|
||||
add_subdirectory(test)
|
||||
endif(${BUILD_TEST})
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_TCS_INT_H_
|
||||
#define _TD_TCS_INT_H_
|
||||
|
||||
#include "os.h"
|
||||
#include "tarray.h"
|
||||
#include "tdef.h"
|
||||
#include "tlog.h"
|
||||
#include "tmsg.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern int8_t tsS3Ablob;
|
||||
|
||||
typedef enum {
|
||||
TOS_PROTO_NIL,
|
||||
TOS_PROTO_S3,
|
||||
TOS_PROTO_ABLOB,
|
||||
} STosProto;
|
||||
|
||||
typedef struct {
|
||||
int32_t (*Begin)();
|
||||
void (*End)();
|
||||
int32_t (*CheckCfg)();
|
||||
|
||||
int32_t (*PutObjectFromFileOffset)(const char* file, const char* object_name, int64_t offset, int64_t size);
|
||||
int32_t (*GetObjectBlock)(const char* object_name, int64_t offset, int64_t size, bool check, uint8_t** ppBlock);
|
||||
|
||||
void (*DeleteObjectsByPrefix)(const char* prefix);
|
||||
|
||||
int32_t (*PutObjectFromFile2)(const char* file, const char* object, int8_t withcp);
|
||||
int32_t (*GetObjectsByPrefix)(const char* prefix, const char* path);
|
||||
int32_t (*DeleteObjects)(const char* object_name[], int nobject);
|
||||
int32_t (*GetObjectToFile)(const char* object_name, const char* fileName);
|
||||
} STcs;
|
||||
|
||||
extern STcs tcs;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // _TD_TCS_INT_H_
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "tcs.h"
|
||||
#include "os.h"
|
||||
#include "taoserror.h"
|
||||
#include "tcsInt.h"
|
||||
#include "tglobal.h"
|
||||
|
||||
#include "az.h"
|
||||
#include "cos.h"
|
||||
|
||||
int32_t tcsInit() {
|
||||
int32_t code = 0;
|
||||
|
||||
STosProto proto = tsS3Ablob ? TOS_PROTO_ABLOB : TOS_PROTO_S3;
|
||||
|
||||
if (TOS_PROTO_S3 == proto) {
|
||||
tcs.Begin = s3Begin;
|
||||
tcs.End = s3End;
|
||||
tcs.CheckCfg = s3CheckCfg;
|
||||
|
||||
tcs.PutObjectFromFileOffset = s3PutObjectFromFileOffset;
|
||||
tcs.GetObjectBlock = s3GetObjectBlock;
|
||||
|
||||
tcs.DeleteObjectsByPrefix = s3DeleteObjectsByPrefix;
|
||||
|
||||
tcs.PutObjectFromFile2 = s3PutObjectFromFile2;
|
||||
tcs.GetObjectsByPrefix = s3GetObjectsByPrefix;
|
||||
tcs.DeleteObjects = s3DeleteObjects;
|
||||
tcs.GetObjectToFile = s3GetObjectToFile;
|
||||
|
||||
} else if (TOS_PROTO_ABLOB == proto) {
|
||||
tcs.Begin = azBegin;
|
||||
tcs.End = azEnd;
|
||||
tcs.CheckCfg = azCheckCfg;
|
||||
|
||||
tcs.PutObjectFromFileOffset = azPutObjectFromFileOffset;
|
||||
tcs.GetObjectBlock = azGetObjectBlock;
|
||||
|
||||
tcs.DeleteObjectsByPrefix = azDeleteObjectsByPrefix;
|
||||
|
||||
tcs.PutObjectFromFile2 = azPutObjectFromFile2;
|
||||
tcs.GetObjectsByPrefix = azGetObjectsByPrefix;
|
||||
tcs.DeleteObjects = azDeleteObjects;
|
||||
tcs.GetObjectToFile = azGetObjectToFile;
|
||||
|
||||
} else {
|
||||
code = TSDB_CODE_INVALID_PARA;
|
||||
return code;
|
||||
}
|
||||
|
||||
code = tcs.Begin();
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
void tcsUninit() { tcs.End(); }
|
||||
|
||||
int32_t tcsCheckCfg() {
|
||||
int32_t code = 0;
|
||||
|
||||
if (!tsS3Enabled) {
|
||||
(void)fprintf(stderr, "tcs not configured.\n");
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
code = tcsInit();
|
||||
if (code != 0) {
|
||||
(void)fprintf(stderr, "failed to initialize tcs.\n");
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
code = tcs.CheckCfg();
|
||||
if (code != 0) {
|
||||
(void)fprintf(stderr, "failed to check tcs.\n");
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
tcsUninit();
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tcsPutObjectFromFileOffset(const char* file, const char* object_name, int64_t offset, int64_t size) {
|
||||
return tcs.PutObjectFromFileOffset(file, object_name, offset, size);
|
||||
}
|
||||
|
||||
int32_t tcsGetObjectBlock(const char* object_name, int64_t offset, int64_t size, bool check, uint8_t** ppBlock) {
|
||||
return tcs.GetObjectBlock(object_name, offset, size, check, ppBlock);
|
||||
}
|
||||
|
||||
void tcsDeleteObjectsByPrefix(const char* prefix) { return tcs.DeleteObjectsByPrefix(prefix); }
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "tcs.h"
|
||||
#include "tcsInt.h"
|
||||
|
||||
STcs tcs;
|
||||
|
||||
int32_t tcsPutObjectFromFile2(const char* file, const char* object, int8_t withcp) {
|
||||
return tcs.PutObjectFromFile2(file, object, withcp);
|
||||
}
|
||||
|
||||
int32_t tcsGetObjectsByPrefix(const char* prefix, const char* path) { return tcs.GetObjectsByPrefix(prefix, path); }
|
||||
|
||||
int32_t tcsDeleteObjects(const char* object_name[], int nobject) { return tcs.DeleteObjects(object_name, nobject); }
|
||||
|
||||
int32_t tcsGetObjectToFile(const char* object_name, const char* fileName) {
|
||||
return tcs.GetObjectToFile(object_name, fileName);
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
if (TD_LINUX)
|
||||
|
||||
aux_source_directory(. TCS_TEST_SRC)
|
||||
|
||||
add_executable(tcsTest ${TCS_TEST_SRC})
|
||||
target_include_directories(tcsTest
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/tcs"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
|
||||
target_link_libraries(tcsTest
|
||||
tcs
|
||||
gtest_main
|
||||
)
|
||||
enable_testing()
|
||||
add_test(
|
||||
NAME tcs_test
|
||||
COMMAND tcsTest
|
||||
)
|
||||
|
||||
endif()
|
|
@ -0,0 +1,351 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <queue>
|
||||
|
||||
#include "tcs.h"
|
||||
#include "tcsInt.h"
|
||||
|
||||
int32_t tcsInitEnv(int8_t isBlob) {
|
||||
int32_t code = 0;
|
||||
|
||||
extern char tsS3Hostname[][TSDB_FQDN_LEN];
|
||||
extern char tsS3AccessKeyId[][TSDB_FQDN_LEN];
|
||||
extern char tsS3AccessKeySecret[][TSDB_FQDN_LEN];
|
||||
extern char tsS3BucketName[TSDB_FQDN_LEN];
|
||||
|
||||
/* TCS parameter format
|
||||
tsS3Hostname[0] = "<endpoint>/<account-name>.blob.core.windows.net";
|
||||
tsS3AccessKeyId[0] = "<access-key-id/account-name>";
|
||||
tsS3AccessKeySecret[0] = "<access-key-secret/account-key>";
|
||||
tsS3BucketName = "<bucket/container-name>";
|
||||
*/
|
||||
|
||||
tsS3Ablob = isBlob;
|
||||
if (isBlob) {
|
||||
const char *hostname = "<endpoint>/<account-name>.blob.core.windows.net";
|
||||
const char *accessKeyId = "<access-key-id/account-name>";
|
||||
const char *accessKeySecret = "<access-key-secret/account-key>";
|
||||
const char *bucketName = "<bucket/container-name>";
|
||||
|
||||
if (hostname[0] != '<') {
|
||||
tstrncpy(&tsS3Hostname[0][0], hostname, TSDB_FQDN_LEN);
|
||||
tstrncpy(&tsS3AccessKeyId[0][0], accessKeyId, TSDB_FQDN_LEN);
|
||||
tstrncpy(&tsS3AccessKeySecret[0][0], accessKeySecret, TSDB_FQDN_LEN);
|
||||
tstrncpy(tsS3BucketName, bucketName, TSDB_FQDN_LEN);
|
||||
} else {
|
||||
const char *accountId = getenv("ablob_account_id");
|
||||
if (!accountId) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
const char *accountSecret = getenv("ablob_account_secret");
|
||||
if (!accountSecret) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
const char *containerName = getenv("ablob_container");
|
||||
if (!containerName) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
TAOS_STRCPY(&tsS3Hostname[0][0], accountId);
|
||||
TAOS_STRCAT(&tsS3Hostname[0][0], ".blob.core.windows.net");
|
||||
TAOS_STRCPY(&tsS3AccessKeyId[0][0], accountId);
|
||||
TAOS_STRCPY(&tsS3AccessKeySecret[0][0], accountSecret);
|
||||
TAOS_STRCPY(tsS3BucketName, containerName);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
const char *hostname = "endpoint/<account-name>.blob.core.windows.net";
|
||||
const char *accessKeyId = "<access-key-id/account-name>";
|
||||
const char *accessKeySecret = "<access-key-secret/account-key>";
|
||||
const char *bucketName = "<bucket/container-name>";
|
||||
*/
|
||||
|
||||
// const char *hostname = "http://192.168.1.52:9000";
|
||||
// const char *accessKeyId = "zOgllR6bSnw2Ah3mCNel";
|
||||
// const char *accessKeySecret = "cdO7oXAu3Cqdb1rUdevFgJMi0LtRwCXdWKQx4bhX";
|
||||
// const char *bucketName = "test-bucket";
|
||||
const char *hostname = "192.168.1.52:9000";
|
||||
const char *accessKeyId = "fGPPyYjzytw05nw44ViA";
|
||||
const char *accessKeySecret = "vK1VcwxgSOykicx6hk8fL1x15uEtyDSFU3w4hTaZ";
|
||||
|
||||
const char *bucketName = "ci-bucket19";
|
||||
|
||||
tstrncpy(&tsS3Hostname[0][0], hostname, TSDB_FQDN_LEN);
|
||||
tstrncpy(&tsS3AccessKeyId[0][0], accessKeyId, TSDB_FQDN_LEN);
|
||||
tstrncpy(&tsS3AccessKeySecret[0][0], accessKeySecret, TSDB_FQDN_LEN);
|
||||
tstrncpy(tsS3BucketName, bucketName, TSDB_FQDN_LEN);
|
||||
|
||||
// setup s3 env
|
||||
extern int8_t tsS3EpNum;
|
||||
extern int8_t tsS3Https[TSDB_MAX_EP_NUM];
|
||||
|
||||
tsS3EpNum = 1;
|
||||
tsS3Https[0] = false;
|
||||
}
|
||||
|
||||
tstrncpy(tsTempDir, "/tmp/", PATH_MAX);
|
||||
|
||||
tsS3Enabled = true;
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
// TEST(TcsTest, DISABLED_InterfaceTest) {
|
||||
TEST(TcsTest, InterfaceTest) {
|
||||
int code = 0;
|
||||
bool check = false;
|
||||
bool withcp = false;
|
||||
|
||||
code = tcsInitEnv(true);
|
||||
if (code) {
|
||||
std::cout << "ablob env init failed with: " << code << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
GTEST_ASSERT_EQ(tsS3Enabled, 1);
|
||||
GTEST_ASSERT_EQ(tsS3Ablob, 1);
|
||||
|
||||
code = tcsInit();
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
code = tcsCheckCfg();
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
const int size = 4096;
|
||||
char data[size] = {0};
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
data[i * 2 + 1] = 1;
|
||||
}
|
||||
|
||||
const char object_name[] = "tcsut.bin";
|
||||
char path[PATH_MAX] = {0};
|
||||
char path_download[PATH_MAX] = {0};
|
||||
int ds_len = strlen(TD_DIRSEP);
|
||||
int tmp_len = strlen(tsTempDir);
|
||||
|
||||
(void)snprintf(path, PATH_MAX, "%s", tsTempDir);
|
||||
if (strncmp(tsTempDir + tmp_len - ds_len, TD_DIRSEP, ds_len) != 0) {
|
||||
(void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", TD_DIRSEP);
|
||||
(void)snprintf(path + tmp_len + ds_len, PATH_MAX - tmp_len - ds_len, "%s", object_name);
|
||||
} else {
|
||||
(void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", object_name);
|
||||
}
|
||||
|
||||
tstrncpy(path_download, path, strlen(path) + 1);
|
||||
tstrncpy(path_download + strlen(path), ".download", strlen(".download") + 1);
|
||||
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_WRITE_THROUGH);
|
||||
GTEST_ASSERT_NE(fp, nullptr);
|
||||
|
||||
int n = taosWriteFile(fp, data, size);
|
||||
GTEST_ASSERT_EQ(n, size);
|
||||
|
||||
code = taosCloseFile(&fp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
code = tcsPutObjectFromFileOffset(path, object_name, 0, size);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
uint8_t *pBlock = NULL;
|
||||
code = tcsGetObjectBlock(object_name, 0, size, check, &pBlock);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
GTEST_ASSERT_EQ(pBlock[i * 2], 0);
|
||||
GTEST_ASSERT_EQ(pBlock[i * 2 + 1], 1);
|
||||
}
|
||||
|
||||
taosMemoryFree(pBlock);
|
||||
|
||||
code = tcsGetObjectToFile(object_name, path_download);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
{
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_READ);
|
||||
GTEST_ASSERT_NE(fp, nullptr);
|
||||
|
||||
(void)memset(data, 0, size);
|
||||
|
||||
int64_t n = taosReadFile(fp, data, size);
|
||||
GTEST_ASSERT_EQ(n, size);
|
||||
|
||||
code = taosCloseFile(&fp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
GTEST_ASSERT_EQ(data[i * 2], 0);
|
||||
GTEST_ASSERT_EQ(data[i * 2 + 1], 1);
|
||||
}
|
||||
}
|
||||
|
||||
tcsDeleteObjectsByPrefix(object_name);
|
||||
// list object to check
|
||||
|
||||
code = tcsPutObjectFromFile2(path, object_name, withcp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
code = tcsGetObjectsByPrefix(object_name, tsTempDir);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
{
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_READ);
|
||||
GTEST_ASSERT_NE(fp, nullptr);
|
||||
|
||||
(void)memset(data, 0, size);
|
||||
|
||||
int64_t n = taosReadFile(fp, data, size);
|
||||
GTEST_ASSERT_EQ(n, size);
|
||||
|
||||
code = taosCloseFile(&fp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
GTEST_ASSERT_EQ(data[i * 2], 0);
|
||||
GTEST_ASSERT_EQ(data[i * 2 + 1], 1);
|
||||
}
|
||||
}
|
||||
|
||||
const char *object_name_arr[] = {object_name};
|
||||
code = tcsDeleteObjects(object_name_arr, 1);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
tcsUninit();
|
||||
}
|
||||
|
||||
// TEST(TcsTest, DISABLED_InterfaceNonBlobTest) {
|
||||
TEST(TcsTest, InterfaceNonBlobTest) {
|
||||
int code = 0;
|
||||
bool check = false;
|
||||
bool withcp = false;
|
||||
|
||||
code = tcsInitEnv(false);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
GTEST_ASSERT_EQ(tsS3Enabled, 1);
|
||||
GTEST_ASSERT_EQ(tsS3Ablob, 0);
|
||||
|
||||
code = tcsInit();
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
code = tcsCheckCfg();
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
const int size = 4096;
|
||||
char data[size] = {0};
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
data[i * 2 + 1] = 1;
|
||||
}
|
||||
|
||||
const char object_name[] = "tcsut.bin";
|
||||
char path[PATH_MAX] = {0};
|
||||
char path_download[PATH_MAX] = {0};
|
||||
int ds_len = strlen(TD_DIRSEP);
|
||||
int tmp_len = strlen(tsTempDir);
|
||||
|
||||
(void)snprintf(path, PATH_MAX, "%s", tsTempDir);
|
||||
if (strncmp(tsTempDir + tmp_len - ds_len, TD_DIRSEP, ds_len) != 0) {
|
||||
(void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", TD_DIRSEP);
|
||||
(void)snprintf(path + tmp_len + ds_len, PATH_MAX - tmp_len - ds_len, "%s", object_name);
|
||||
} else {
|
||||
(void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", object_name);
|
||||
}
|
||||
|
||||
tstrncpy(path_download, path, strlen(path) + 1);
|
||||
tstrncpy(path_download + strlen(path), ".download", strlen(".download") + 1);
|
||||
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_WRITE_THROUGH);
|
||||
GTEST_ASSERT_NE(fp, nullptr);
|
||||
|
||||
int n = taosWriteFile(fp, data, size);
|
||||
GTEST_ASSERT_EQ(n, size);
|
||||
|
||||
code = taosCloseFile(&fp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
code = tcsPutObjectFromFileOffset(path, object_name, 0, size);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
uint8_t *pBlock = NULL;
|
||||
code = tcsGetObjectBlock(object_name, 0, size, check, &pBlock);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
GTEST_ASSERT_EQ(pBlock[i * 2], 0);
|
||||
GTEST_ASSERT_EQ(pBlock[i * 2 + 1], 1);
|
||||
}
|
||||
|
||||
taosMemoryFree(pBlock);
|
||||
|
||||
code = tcsGetObjectToFile(object_name, path_download);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
{
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_READ);
|
||||
GTEST_ASSERT_NE(fp, nullptr);
|
||||
|
||||
(void)memset(data, 0, size);
|
||||
|
||||
int64_t n = taosReadFile(fp, data, size);
|
||||
GTEST_ASSERT_EQ(n, size);
|
||||
|
||||
code = taosCloseFile(&fp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
GTEST_ASSERT_EQ(data[i * 2], 0);
|
||||
GTEST_ASSERT_EQ(data[i * 2 + 1], 1);
|
||||
}
|
||||
}
|
||||
|
||||
tcsDeleteObjectsByPrefix(object_name);
|
||||
// list object to check
|
||||
|
||||
code = tcsPutObjectFromFile2(path, object_name, withcp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
code = tcsGetObjectsByPrefix(object_name, tsTempDir);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
{
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_READ);
|
||||
GTEST_ASSERT_NE(fp, nullptr);
|
||||
|
||||
(void)memset(data, 0, size);
|
||||
|
||||
int64_t n = taosReadFile(fp, data, size);
|
||||
GTEST_ASSERT_EQ(n, size);
|
||||
|
||||
code = taosCloseFile(&fp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
GTEST_ASSERT_EQ(data[i * 2], 0);
|
||||
GTEST_ASSERT_EQ(data[i * 2 + 1], 1);
|
||||
}
|
||||
}
|
||||
|
||||
const char *object_name_arr[] = {object_name};
|
||||
code = tcsDeleteObjects(object_name_arr, 1);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
tcsUninit();
|
||||
}
|
|
@ -26,7 +26,7 @@
|
|||
#define LOG_MAX_LINE_DUMP_SIZE (1024 * 1024)
|
||||
#define LOG_MAX_LINE_DUMP_BUFFER_SIZE (LOG_MAX_LINE_DUMP_SIZE + 128)
|
||||
|
||||
#define LOG_FILE_DAY_LEN 64
|
||||
#define LOG_FILE_DAY_LEN 64
|
||||
|
||||
#define LOG_DEFAULT_BUF_SIZE (20 * 1024 * 1024) // 20MB
|
||||
#define LOG_SLOW_BUF_SIZE (10 * 1024 * 1024) // 10MB
|
||||
|
@ -113,6 +113,7 @@ int32_t rpcDebugFlag = 131;
|
|||
int32_t qDebugFlag = 131;
|
||||
int32_t stDebugFlag = 131;
|
||||
int32_t wDebugFlag = 131;
|
||||
int32_t azDebugFlag = 131;
|
||||
int32_t sDebugFlag = 131;
|
||||
int32_t tsdbDebugFlag = 131;
|
||||
int32_t tdbDebugFlag = 131;
|
||||
|
@ -151,7 +152,7 @@ static int32_t taosStartLog() {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void getDay(char* buf, int32_t bufSize){
|
||||
static void getDay(char *buf, int32_t bufSize) {
|
||||
time_t t = taosTime(NULL);
|
||||
struct tm tmInfo;
|
||||
if (taosLocalTime(&t, &tmInfo, buf, bufSize) != NULL) {
|
||||
|
@ -172,7 +173,7 @@ static int64_t getTimestampToday() {
|
|||
return (int64_t)taosMktime(&tm);
|
||||
}
|
||||
|
||||
static void getFullPathName(char* fullName, const char* logName){
|
||||
static void getFullPathName(char *fullName, const char *logName) {
|
||||
if (strlen(tsLogDir) != 0) {
|
||||
char lastC = tsLogDir[strlen(tsLogDir) - 1];
|
||||
if (lastC == '\\' || lastC == '/') {
|
||||
|
@ -225,7 +226,7 @@ int32_t taosInitLog(const char *logName, int32_t maxFiles, bool tsc) {
|
|||
}
|
||||
|
||||
TAOS_CHECK_RETURN(taosInitNormalLog(logName, maxFiles));
|
||||
if (tsc){
|
||||
if (tsc) {
|
||||
TAOS_CHECK_RETURN(taosInitSlowLog());
|
||||
}
|
||||
TAOS_CHECK_RETURN(taosStartLog());
|
||||
|
@ -397,7 +398,7 @@ static int32_t taosOpenNewLogFile() {
|
|||
|
||||
OldFileKeeper *oldFileKeeper = taosOpenNewFile();
|
||||
if (!oldFileKeeper) {
|
||||
TAOS_UNUSED(taosThreadMutexUnlock(&tsLogObj.logMutex));
|
||||
TAOS_UNUSED(taosThreadMutexUnlock(&tsLogObj.logMutex));
|
||||
return terrno;
|
||||
}
|
||||
if (taosThreadCreate(&thread, &attr, taosThreadToCloseOldFile, oldFileKeeper) != 0) {
|
||||
|
@ -433,7 +434,7 @@ static void taosOpenNewSlowLogFile() {
|
|||
char day[TD_TIME_STR_LEN] = {0};
|
||||
getDay(day, sizeof(day));
|
||||
TdFilePtr pFile = NULL;
|
||||
char name[PATH_MAX + TD_TIME_STR_LEN] = {0};
|
||||
char name[PATH_MAX + TD_TIME_STR_LEN] = {0};
|
||||
(void)snprintf(name, PATH_MAX + TD_TIME_STR_LEN, "%s.%s", tsLogObj.slowLogName, day);
|
||||
pFile = taosOpenFile(name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND);
|
||||
if (pFile == NULL) {
|
||||
|
@ -455,7 +456,7 @@ void taosResetLog() {
|
|||
|
||||
if (tsLogObj.logHandle) {
|
||||
int32_t code = taosOpenNewLogFile();
|
||||
if(code != 0){
|
||||
if (code != 0) {
|
||||
uError("failed to open new log file, reason:%s", tstrerror(code));
|
||||
}
|
||||
uInfo("==================================");
|
||||
|
@ -508,12 +509,12 @@ static void decideLogFileName(const char *fn, int32_t maxFileNum) {
|
|||
}
|
||||
}
|
||||
|
||||
static void decideLogFileNameFlag(){
|
||||
static void decideLogFileNameFlag() {
|
||||
char name[PATH_MAX + 50] = "\0";
|
||||
int32_t logstat0_mtime = 0;
|
||||
int32_t logstat1_mtime = 0;
|
||||
bool log0Exist = false;
|
||||
bool log1Exist = false;
|
||||
bool log0Exist = false;
|
||||
bool log1Exist = false;
|
||||
|
||||
if (strlen(tsLogObj.logName) < PATH_MAX + 50 - 2) {
|
||||
strcpy(name, tsLogObj.logName);
|
||||
|
@ -535,7 +536,7 @@ static void decideLogFileNameFlag(){
|
|||
}
|
||||
}
|
||||
|
||||
static void processLogFileName(const char* logName , int32_t maxFileNum){
|
||||
static void processLogFileName(const char *logName, int32_t maxFileNum) {
|
||||
char fullName[PATH_MAX] = {0};
|
||||
getFullPathName(fullName, logName);
|
||||
decideLogFileName(fullName, maxFileNum);
|
||||
|
@ -872,7 +873,7 @@ static int32_t taosGetLogRemainSize(SLogBuff *pLogBuf, int32_t start, int32_t en
|
|||
return rSize >= 0 ? rSize : LOG_BUF_SIZE(pLogBuf) + rSize;
|
||||
}
|
||||
|
||||
static void taosWriteSlowLog(SLogBuff *pLogBuf){
|
||||
static void taosWriteSlowLog(SLogBuff *pLogBuf) {
|
||||
int32_t lock = atomic_val_compare_exchange_32(&pLogBuf->lock, 0, 1);
|
||||
if (lock == 1) return;
|
||||
taosWriteLog(pLogBuf);
|
||||
|
|
|
@ -0,0 +1,344 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import time
|
||||
import random
|
||||
|
||||
import taos
|
||||
import frame
|
||||
import frame.etool
|
||||
import frame.eos
|
||||
import frame.eutil
|
||||
|
||||
from frame.log import *
|
||||
from frame.cases import *
|
||||
from frame.sql import *
|
||||
from frame.caseBase import *
|
||||
from frame.srvCtl import *
|
||||
from frame import *
|
||||
from frame.eos import *
|
||||
|
||||
|
||||
class TDTestCase(TBase):
|
||||
index = eutil.cpuRand(20) + 1
|
||||
bucketName = f"ci-bucket{index}"
|
||||
updatecfgDict = {
|
||||
"supportVnodes":"1000",
|
||||
's3EndPoint': 'https://<account-id>.blob.core.windows.net',
|
||||
's3AccessKey': '<account-name>:<account-key>',
|
||||
's3BucketName': '<test-bucket>',
|
||||
's3PageCacheSize': '10240',
|
||||
"s3UploadDelaySec": "10",
|
||||
's3MigrateIntervalSec': '600',
|
||||
's3MigrateEnabled': '1'
|
||||
}
|
||||
|
||||
tdLog.info(f"assign bucketName is {bucketName}\n")
|
||||
maxFileSize = (128 + 10) * 1014 * 1024 # add 10M buffer
|
||||
|
||||
def insertData(self):
|
||||
tdLog.info(f"insert data.")
|
||||
# taosBenchmark run
|
||||
json = etool.curFile(__file__, "s3Basic.json")
|
||||
etool.benchMark(json=json)
|
||||
|
||||
tdSql.execute(f"use {self.db}")
|
||||
# come from s3_basic.json
|
||||
self.childtable_count = 6
|
||||
self.insert_rows = 2000000
|
||||
self.timestamp_step = 100
|
||||
|
||||
def createStream(self, sname):
|
||||
sql = f"create stream {sname} fill_history 1 into stm1 as select count(*) from {self.db}.{self.stb} interval(10s);"
|
||||
tdSql.execute(sql)
|
||||
|
||||
def migrateDbS3(self):
|
||||
sql = f"s3migrate database {self.db}"
|
||||
tdSql.execute(sql, show=True)
|
||||
|
||||
def checkDataFile(self, lines, maxFileSize):
|
||||
# ls -l
|
||||
# -rwxrwxrwx 1 root root 41652224 Apr 17 14:47 vnode2/tsdb/v2f1974ver47.3.data
|
||||
overCnt = 0
|
||||
for line in lines:
|
||||
cols = line.split()
|
||||
fileSize = int(cols[4])
|
||||
fileName = cols[8]
|
||||
#print(f" filesize={fileSize} fileName={fileName} line={line}")
|
||||
if fileSize > maxFileSize:
|
||||
tdLog.info(f"error, {fileSize} over max size({maxFileSize}) {fileName}\n")
|
||||
overCnt += 1
|
||||
else:
|
||||
tdLog.info(f"{fileName}({fileSize}) check size passed.")
|
||||
|
||||
return overCnt
|
||||
|
||||
def checkUploadToS3(self):
|
||||
rootPath = sc.clusterRootPath()
|
||||
cmd = f"ls -l {rootPath}/dnode*/data/vnode/vnode*/tsdb/*.data"
|
||||
tdLog.info(cmd)
|
||||
loop = 0
|
||||
rets = []
|
||||
overCnt = 0
|
||||
while loop < 200:
|
||||
time.sleep(3)
|
||||
|
||||
# check upload to s3
|
||||
rets = eos.runRetList(cmd)
|
||||
cnt = len(rets)
|
||||
if cnt == 0:
|
||||
overCnt = 0
|
||||
tdLog.info("All data file upload to server over.")
|
||||
break
|
||||
overCnt = self.checkDataFile(rets, self.maxFileSize)
|
||||
if overCnt == 0:
|
||||
uploadOK = True
|
||||
tdLog.info(f"All data files({len(rets)}) size bellow {self.maxFileSize}, check upload to s3 ok.")
|
||||
break
|
||||
|
||||
tdLog.info(f"loop={loop} no upload {overCnt} data files wait 3s retry ...")
|
||||
if loop == 3:
|
||||
sc.dnodeStop(1)
|
||||
time.sleep(2)
|
||||
sc.dnodeStart(1)
|
||||
loop += 1
|
||||
# migrate
|
||||
self.migrateDbS3()
|
||||
|
||||
# check can pass
|
||||
if overCnt > 0:
|
||||
tdLog.exit(f"s3 have {overCnt} files over size.")
|
||||
|
||||
|
||||
def doAction(self):
|
||||
tdLog.info(f"do action.")
|
||||
|
||||
self.flushDb(show=True)
|
||||
#self.compactDb(show=True)
|
||||
|
||||
# sleep 70s
|
||||
self.migrateDbS3()
|
||||
|
||||
# check upload to s3
|
||||
self.checkUploadToS3()
|
||||
|
||||
def checkStreamCorrect(self):
|
||||
sql = f"select count(*) from {self.db}.stm1"
|
||||
count = 0
|
||||
for i in range(120):
|
||||
tdSql.query(sql)
|
||||
count = tdSql.getData(0, 0)
|
||||
if count == 100000 or count == 100001:
|
||||
return True
|
||||
time.sleep(1)
|
||||
|
||||
tdLog.exit(f"stream count is not expect . expect = 100000 or 100001 real={count} . sql={sql}")
|
||||
|
||||
|
||||
def checkCreateDb(self, keepLocal, chunkSize, compact):
|
||||
# keyword
|
||||
kw1 = kw2 = kw3 = ""
|
||||
if keepLocal is not None:
|
||||
kw1 = f"s3_keeplocal {keepLocal}"
|
||||
if chunkSize is not None:
|
||||
kw2 = f"s3_chunksize {chunkSize}"
|
||||
if compact is not None:
|
||||
kw3 = f"s3_compact {compact}"
|
||||
|
||||
sql = f" create database db1 vgroups 1 duration 1h {kw1} {kw2} {kw3}"
|
||||
tdSql.execute(sql, show=True)
|
||||
#sql = f"select name,s3_keeplocal,s3_chunksize,s3_compact from information_schema.ins_databases where name='db1';"
|
||||
sql = f"select * from information_schema.ins_databases where name='db1';"
|
||||
tdSql.query(sql)
|
||||
# 29 30 31 -> chunksize keeplocal compact
|
||||
if chunkSize is not None:
|
||||
tdSql.checkData(0, 29, chunkSize)
|
||||
if keepLocal is not None:
|
||||
keepLocalm = keepLocal * 24 * 60
|
||||
tdSql.checkData(0, 30, f"{keepLocalm}m")
|
||||
if compact is not None:
|
||||
tdSql.checkData(0, 31, compact)
|
||||
sql = "drop database db1"
|
||||
tdSql.execute(sql)
|
||||
|
||||
def checkExcept(self):
|
||||
# errors
|
||||
sqls = [
|
||||
f"create database db2 s3_keeplocal -1",
|
||||
f"create database db2 s3_keeplocal 0",
|
||||
f"create database db2 s3_keeplocal 365001",
|
||||
f"create database db2 s3_chunksize -1",
|
||||
f"create database db2 s3_chunksize 0",
|
||||
f"create database db2 s3_chunksize 900000000",
|
||||
f"create database db2 s3_compact -1",
|
||||
f"create database db2 s3_compact 100",
|
||||
f"create database db2 duration 1d s3_keeplocal 1d"
|
||||
]
|
||||
tdSql.errors(sqls)
|
||||
|
||||
|
||||
def checkBasic(self):
|
||||
# create db
|
||||
keeps = [1, 256, 1024, 365000, None]
|
||||
chunks = [131072, 600000, 820000, 1048576, None]
|
||||
comps = [0, 1, None]
|
||||
|
||||
for keep in keeps:
|
||||
for chunk in chunks:
|
||||
for comp in comps:
|
||||
self.checkCreateDb(keep, chunk, comp)
|
||||
|
||||
|
||||
# --checks3
|
||||
idx = 1
|
||||
taosd = sc.taosdFile(idx)
|
||||
cfg = sc.dnodeCfgPath(idx)
|
||||
cmd = f"{taosd} -c {cfg} --checks3"
|
||||
|
||||
eos.exe(cmd)
|
||||
#output, error = eos.run(cmd)
|
||||
#print(lines)
|
||||
|
||||
'''
|
||||
tips = [
|
||||
"put object s3test.txt: success",
|
||||
"listing bucket ci-bucket: success",
|
||||
"get object s3test.txt: success",
|
||||
"delete object s3test.txt: success"
|
||||
]
|
||||
pos = 0
|
||||
for tip in tips:
|
||||
pos = output.find(tip, pos)
|
||||
#if pos == -1:
|
||||
# tdLog.exit(f"checks3 failed not found {tip}. cmd={cmd} output={output}")
|
||||
'''
|
||||
|
||||
# except
|
||||
self.checkExcept()
|
||||
|
||||
#
|
||||
def preDb(self, vgroups):
|
||||
cnt = int(time.time())%2 + 1
|
||||
for i in range(cnt):
|
||||
vg = eutil.cpuRand(9) + 1
|
||||
sql = f"create database predb vgroups {vg}"
|
||||
tdSql.execute(sql, show=True)
|
||||
sql = "drop database predb"
|
||||
tdSql.execute(sql, show=True)
|
||||
|
||||
# history
|
||||
def insertHistory(self):
|
||||
tdLog.info(f"insert history data.")
|
||||
# taosBenchmark run
|
||||
json = etool.curFile(__file__, "s3Basic1.json")
|
||||
etool.benchMark(json=json)
|
||||
|
||||
# come from s3_basic.json
|
||||
self.insert_rows += self.insert_rows/4
|
||||
self.timestamp_step = 50
|
||||
|
||||
# delete
|
||||
def checkDelete(self):
|
||||
# del 1000 rows
|
||||
start = 1600000000000
|
||||
drows = 200
|
||||
for i in range(1, drows, 2):
|
||||
sql = f"from {self.db}.{self.stb} where ts = {start + i*500}"
|
||||
tdSql.execute("delete " + sql, show=True)
|
||||
tdSql.query("select * " + sql)
|
||||
tdSql.checkRows(0)
|
||||
|
||||
# delete all 500 step
|
||||
self.flushDb()
|
||||
self.compactDb()
|
||||
self.insert_rows -= drows/2
|
||||
sql = f"select count(*) from {self.db}.{self.stb}"
|
||||
tdSql.checkAgg(sql, self.insert_rows * self.childtable_count)
|
||||
|
||||
# delete 10W rows from 100000
|
||||
drows = 100000
|
||||
sdel = start + 100000 * self.timestamp_step
|
||||
edel = start + 100000 * self.timestamp_step + drows * self.timestamp_step
|
||||
sql = f"from {self.db}.{self.stb} where ts >= {sdel} and ts < {edel}"
|
||||
tdSql.execute("delete " + sql, show=True)
|
||||
tdSql.query("select * " + sql)
|
||||
tdSql.checkRows(0)
|
||||
|
||||
self.insert_rows -= drows
|
||||
sql = f"select count(*) from {self.db}.{self.stb}"
|
||||
tdSql.checkAgg(sql, self.insert_rows * self.childtable_count)
|
||||
|
||||
|
||||
# run
|
||||
def run(self):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.sname = "stream1"
|
||||
if eos.isArm64Cpu():
|
||||
tdLog.success(f"{__file__} arm64 ignore executed")
|
||||
else:
|
||||
|
||||
self.preDb(10)
|
||||
|
||||
# insert data
|
||||
self.insertData()
|
||||
|
||||
# creat stream
|
||||
self.createStream(self.sname)
|
||||
|
||||
# check insert data correct
|
||||
#self.checkInsertCorrect()
|
||||
|
||||
# save
|
||||
self.snapshotAgg()
|
||||
|
||||
# do action
|
||||
self.doAction()
|
||||
|
||||
# check save agg result correct
|
||||
self.checkAggCorrect()
|
||||
|
||||
# check insert correct again
|
||||
self.checkInsertCorrect()
|
||||
|
||||
|
||||
# check stream correct and drop stream
|
||||
#self.checkStreamCorrect()
|
||||
|
||||
# drop stream
|
||||
self.dropStream(self.sname)
|
||||
|
||||
# insert history disorder data
|
||||
self.insertHistory()
|
||||
|
||||
# checkBasic
|
||||
self.checkBasic()
|
||||
|
||||
#self.checkInsertCorrect()
|
||||
self.snapshotAgg()
|
||||
self.doAction()
|
||||
self.checkAggCorrect()
|
||||
self.checkInsertCorrect(difCnt=self.childtable_count*1499999)
|
||||
self.checkDelete()
|
||||
self.doAction()
|
||||
|
||||
# drop database and free s3 file
|
||||
self.dropDb()
|
||||
|
||||
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,66 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"connection_pool_size": 8,
|
||||
"num_of_records_per_req": 4000,
|
||||
"prepared_rand": 500,
|
||||
"thread_count": 4,
|
||||
"create_table_thread_count": 1,
|
||||
"confirm_parameter_prompt": "no",
|
||||
"databases": [
|
||||
{
|
||||
"dbinfo": {
|
||||
"name": "db",
|
||||
"drop": "yes",
|
||||
"vgroups": 2,
|
||||
"replica": 1,
|
||||
"duration":"10d",
|
||||
"s3_keeplocal":"30d",
|
||||
"s3_chunksize":"131072",
|
||||
"tsdb_pagesize":"1",
|
||||
"s3_compact":"1",
|
||||
"wal_retention_size":"1",
|
||||
"wal_retention_period":"1",
|
||||
"flush_each_batch":"no",
|
||||
"keep": "3650d"
|
||||
},
|
||||
"super_tables": [
|
||||
{
|
||||
"name": "stb",
|
||||
"child_table_exists": "no",
|
||||
"childtable_count": 6,
|
||||
"insert_rows": 2000000,
|
||||
"childtable_prefix": "d",
|
||||
"insert_mode": "taosc",
|
||||
"timestamp_step": 100,
|
||||
"start_timestamp": 1600000000000,
|
||||
"columns": [
|
||||
{ "type": "bool", "name": "bc"},
|
||||
{ "type": "float", "name": "fc" },
|
||||
{ "type": "double", "name": "dc"},
|
||||
{ "type": "tinyint", "name": "ti"},
|
||||
{ "type": "smallint", "name": "si" },
|
||||
{ "type": "int", "name": "ic" ,"max": 1,"min": 1},
|
||||
{ "type": "bigint", "name": "bi" },
|
||||
{ "type": "utinyint", "name": "uti"},
|
||||
{ "type": "usmallint", "name": "usi"},
|
||||
{ "type": "uint", "name": "ui" },
|
||||
{ "type": "ubigint", "name": "ubi"},
|
||||
{ "type": "binary", "name": "bin", "len": 50},
|
||||
{ "type": "nchar", "name": "nch", "len": 100}
|
||||
],
|
||||
"tags": [
|
||||
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
|
||||
{"name": "location","type": "binary", "len": 16, "values":
|
||||
["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"connection_pool_size": 8,
|
||||
"num_of_records_per_req": 5000,
|
||||
"prepared_rand": 500,
|
||||
"thread_count": 4,
|
||||
"create_table_thread_count": 1,
|
||||
"confirm_parameter_prompt": "no",
|
||||
"databases": [
|
||||
{
|
||||
"dbinfo": {
|
||||
"name": "db",
|
||||
"drop": "no",
|
||||
"vgroups": 2,
|
||||
"replica": 1,
|
||||
"duration":"10d",
|
||||
"s3_keeplocal":"30d",
|
||||
"s3_chunksize":"131072",
|
||||
"tsdb_pagesize":"1",
|
||||
"s3_compact":"1",
|
||||
"wal_retention_size":"1",
|
||||
"wal_retention_period":"1",
|
||||
"flush_each_batch":"no",
|
||||
"keep": "3650d"
|
||||
},
|
||||
"super_tables": [
|
||||
{
|
||||
"name": "stb",
|
||||
"child_table_exists": "yes",
|
||||
"childtable_count": 6,
|
||||
"insert_rows": 1000000,
|
||||
"childtable_prefix": "d",
|
||||
"insert_mode": "taosc",
|
||||
"timestamp_step": 50,
|
||||
"start_timestamp": 1600000000000,
|
||||
"columns": [
|
||||
{ "type": "bool", "name": "bc"},
|
||||
{ "type": "float", "name": "fc" },
|
||||
{ "type": "double", "name": "dc"},
|
||||
{ "type": "tinyint", "name": "ti"},
|
||||
{ "type": "smallint", "name": "si" },
|
||||
{ "type": "int", "name": "ic" ,"max": 1,"min": 1},
|
||||
{ "type": "bigint", "name": "bi" },
|
||||
{ "type": "utinyint", "name": "uti"},
|
||||
{ "type": "usmallint", "name": "usi"},
|
||||
{ "type": "uint", "name": "ui" },
|
||||
{ "type": "ubigint", "name": "ubi"},
|
||||
{ "type": "binary", "name": "bin", "len": 50},
|
||||
{ "type": "nchar", "name": "nch", "len": 100}
|
||||
],
|
||||
"tags": [
|
||||
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
|
||||
{"name": "location","type": "binary", "len": 16, "values":
|
||||
["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -129,7 +129,7 @@ def scan_files_path(source_file_path):
|
|||
def input_files(change_files):
|
||||
# scan_dir_list = ["source", "include", "docs/examples", "tests/script/api", "src/plugins"]
|
||||
scan_dir_list = ["source", "include", "docs/examples", "src/plugins"]
|
||||
scan_skip_file_list = [f"{TD_project_path}/TDinternal/community/tools/taosws-rs/target/release/build/openssl-sys-7811e597b848e397/out/openssl-build/install/include/openssl", "/test/", "contrib", "debug", "deps", f"{TD_project_path}/TDinternal/community/source/libs/parser/src/sql.c", f"{TD_project_path}/TDinternal/community/source/client/jni/windows/win32/bridge/AccessBridgeCalls.c"]
|
||||
scan_skip_file_list = ["tools/taosws-rs/target/release/build/openssl-sys-7811e597b848e397/out/openssl-build/install/include/openssl", "/test/", "contrib", "debug", "deps", "source/libs/parser/src/sql.c", "source/libs/azure", "source/client/jni/windows/win32/bridge/AccessBridgeCalls.c"]
|
||||
with open(change_files, 'r') as file:
|
||||
for line in file:
|
||||
file_name = line.strip()
|
||||
|
@ -141,7 +141,7 @@ def input_files(change_files):
|
|||
tdc_file_path = os.path.join(TD_project_path, "community/")
|
||||
file_name = os.path.join(tdc_file_path, file_name)
|
||||
all_file_path.append(file_name)
|
||||
# print(f"all_file_path:{all_file_path}")
|
||||
print(f"all_file_path:{all_file_path}")
|
||||
logger.info("Found %s files" % len(all_file_path))
|
||||
file_res_path = ""
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 2, 0)
|
||||
|
||||
tdSql.query("show dnode 1 variables like '%debugFlag'")
|
||||
tdSql.checkRows(23)
|
||||
tdSql.checkRows(24)
|
||||
|
||||
tdSql.query("show dnode 1 variables like '____debugFlag'")
|
||||
tdSql.checkRows(2)
|
||||
|
|
Loading…
Reference in New Issue