merge from 3.0

This commit is contained in:
Liu Jicong 2022-05-09 18:13:51 +08:00
commit 0c71b07b9a
97 changed files with 3726 additions and 1032 deletions

View File

@ -15,12 +15,12 @@ MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH
MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH})
find_package(Git QUIET)
if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git")
if(GIT_FOUND AND EXISTS "${TD_SOURCE_DIR}/.git")
# Update submodules as needed
option(GIT_SUBMODULE "Check submodules during build" ON)
if(GIT_SUBMODULE)
message(STATUS "Submodule update")
execute_process(COMMAND ${GIT_EXECUTABLE} submodule update --init --recursive
execute_process(COMMAND cd ${TD_SOURCE_DIR} && ${GIT_EXECUTABLE} submodule update --init --recursive
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
RESULT_VARIABLE GIT_SUBMOD_RESULT)
if(NOT GIT_SUBMOD_RESULT EQUAL "0")
@ -29,7 +29,7 @@ if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git")
endif()
endif()
if(NOT EXISTS "${PROJECT_SOURCE_DIR}/tools/taos-tools/CMakeLists.txt")
if(NOT EXISTS "${TD_SOURCE_DIR}/tools/taos-tools/CMakeLists.txt")
message(WARNING "The submodules were not downloaded! GIT_SUBMODULE was turned off or failed. Please update submodules manually if you need build them.")
endif()
@ -66,7 +66,7 @@ ENDIF ()
IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
SET(COMMON_FLAGS "/W3 /D_WIN32")
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
# ENDIF ()

View File

@ -101,8 +101,8 @@ int32_t create_topic() {
}
taos_free_result(pRes);
/*pRes = taos_query(pConn, "create topic topic_ctb_column as abc1");*/
pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from ct1");
pRes = taos_query(pConn, "create topic topic_ctb_column as abc1");
/*pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from ct1");*/
if (taos_errno(pRes) != 0) {
printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
return -1;

View File

@ -23,7 +23,6 @@ extern "C" {
#define TDENGINE_SYSTABLE_H
#define TSDB_INFORMATION_SCHEMA_DB "information_schema"
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
#define TSDB_INS_TABLE_DNODES "dnodes"
#define TSDB_INS_TABLE_MNODES "mnodes"
#define TSDB_INS_TABLE_MODULES "modules"

View File

@ -17,8 +17,8 @@
#define _TD_COMMON_GLOBAL_H_
#include "tarray.h"
#include "tdef.h"
#include "tconfig.h"
#include "tdef.h"
#ifdef __cplusplus
extern "C" {
@ -121,15 +121,16 @@ extern char tsCompressor[];
extern int32_t tsDiskCfgNum;
extern SDiskCfg tsDiskCfg[];
// internal
extern int32_t tsTransPullupMs;
extern int32_t tsMaRebalanceMs;
// internal
extern int32_t tsTransPullupInterval;
extern int32_t tsMqRebalanceInterval;
#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd, const char *envFile,
char *apolloUrl, SArray *pArgs, bool tsc);
int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc);
int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd,
const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc);
int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile, char *apolloUrl, SArray *pArgs,
bool tsc);
void taosCleanupCfg();
void taosCfgDynamicOptions(const char *option, const char *value);
void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary);

View File

@ -190,6 +190,8 @@ typedef struct SRetention {
int8_t keepUnit;
} SRetention;
#define RETENTION_VALID(r) (((r)->freq > 0) && ((r)->keep > 0))
#pragma pack(push, 1)
// null-terminated string instead of char array to avoid too many memory consumption in case of more than 1M tableMeta

View File

@ -204,7 +204,6 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp)
// sync integration
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_TIMEOUT, "vnode-sync-timeout", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_PING, "vnode-sync-ping", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_PING_REPLY, "vnode-sync-ping-reply", NULL, NULL)

View File

@ -164,7 +164,6 @@ typedef enum ENodeType {
QUERY_NODE_SHOW_TOPICS_STMT,
QUERY_NODE_SHOW_CONSUMERS_STMT,
QUERY_NODE_SHOW_SUBSCRIBES_STMT,
QUERY_NODE_SHOW_TRANS_STMT,
QUERY_NODE_SHOW_SMAS_STMT,
QUERY_NODE_SHOW_CONFIGS_STMT,
QUERY_NODE_SHOW_CONNECTIONS_STMT,

View File

@ -105,7 +105,7 @@ int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char*
void* smlInitHandle(SQuery *pQuery);
void smlDestroyHandle(void *pHandle);
int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SHashObj *colsHash, SArray *cols, bool format, STableMeta *pTableMeta, char *tableName, char *msgBuf, int16_t msgBufLen);
int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SArray *colsSchema, SArray *cols, bool format, STableMeta *pTableMeta, char *tableName, char *msgBuf, int16_t msgBufLen);
int32_t smlBuildOutput(void* handle, SHashObj* pVgHash);
#ifdef __cplusplus

View File

@ -80,6 +80,7 @@ int64_t taosWriteFile(TdFilePtr pFile, const void *buf, int64_t count);
void taosFprintfFile(TdFilePtr pFile, const char *format, ...);
int64_t taosGetLineFile(TdFilePtr pFile, char ** __restrict ptrBuf);
int64_t taosGetsFile(TdFilePtr pFile, int32_t maxSize, char *__restrict buf);
int32_t taosEOFFile(TdFilePtr pFile);

View File

@ -411,7 +411,6 @@ int32_t* taosGetErrno();
#define TSDB_CODE_SYN_INVALID_MSGLEN TAOS_DEF_ERROR_CODE(0, 0x0909)
#define TSDB_CODE_SYN_INVALID_MSGTYPE TAOS_DEF_ERROR_CODE(0, 0x090A)
// sync integration
#define TSDB_CODE_SYN_NOT_LEADER TAOS_DEF_ERROR_CODE(0, 0x0910)
#define TSDB_CODE_SYN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x09FF)

View File

@ -37,8 +37,8 @@ typedef enum {
typedef struct {
char sTableName[TSDB_TABLE_NAME_LEN];
SHashObj *tags;
SHashObj *fields;
SArray *tags;
SArray *fields;
} SCreateSTableActionInfo;
typedef struct {
@ -78,14 +78,17 @@ typedef struct {
// colsFormat store cols formated, for quick parse, if info->formatData is true
SArray *colsFormat; // elements are SArray<SSmlKv*>
// cols & colsColumn store cols un formated
// cols store cols un formated
SArray *cols; // elements are SHashObj<cols key string, SSmlKv*> for find by key quickly
SHashObj *columnsHash; // elements are <cols key string, 1>, just for judge if key exists quickly.
} SSmlTableInfo;
typedef struct {
SHashObj *tagHash;
SArray *tags; // save the origin order to create table
SHashObj *tagHash; // elements are <key, index in tags>
SArray *cols;
SHashObj *fieldHash;
STableMeta *tableMeta;
} SSmlSTableMeta;
@ -113,6 +116,8 @@ typedef struct {
int32_t affectedRows;
SSmlMsgBuf msgBuf;
SHashObj *dumplicateKey; // for dumplicate key
SArray *colsContainer; // for cols parse, if is dataFormat == false
} SSmlHandle;
//=================================================================================================
@ -143,8 +148,8 @@ static int32_t smlBuildInvalidDataMsg(SSmlMsgBuf* pBuf, const char *msg1, const
}
static int smlCompareKv(const void* p1, const void* p2) {
SSmlKv* kv1 = (SSmlKv*)p1;
SSmlKv* kv2 = (SSmlKv*)p2;
SSmlKv* kv1 = *(SSmlKv**)p1;
SSmlKv* kv2 = *(SSmlKv**)p2;
int32_t kvLen1 = kv1->keyLen;
int32_t kvLen2 = kv2->keyLen;
int32_t res = strncasecmp(kv1->key, kv2->key, TMIN(kvLen1, kvLen2));
@ -174,8 +179,9 @@ static void smlBuildChildTableName(SSmlTableInfo *tags) {
tMD5Update(&context, (uint8_t *)keyJoined, (uint32_t)len);
tMD5Final(&context);
uint64_t digest1 = *(uint64_t*)(context.digest);
uint64_t digest2 = *(uint64_t*)(context.digest + 8);
snprintf(tags->childTableName, TSDB_TABLE_NAME_LEN, "t_%016"PRIx64"%016"PRIx64, digest1, digest2);
//uint64_t digest2 = *(uint64_t*)(context.digest + 8);
//snprintf(tags->childTableName, TSDB_TABLE_NAME_LEN, "t_%016"PRIx64"%016"PRIx64, digest1, digest2);
snprintf(tags->childTableName, TSDB_TABLE_NAME_LEN, "t_%016"PRIx64, digest1);
taosStringBuilderDestroy(&sb);
tags->uid = digest1;
}
@ -350,37 +356,26 @@ static int32_t smlApplySchemaAction(SSmlHandle* info, SSchemaAction* action) {
int n = sprintf(result, "create stable %s (", action->createSTable.sTableName);
char* pos = result + n; int freeBytes = capacity - n;
size_t size = taosHashGetSize(action->createSTable.fields);
SArray *cols = taosArrayInit(size, POINTER_BYTES);
SSmlKv **kv = taosHashIterate(action->createSTable.fields, NULL);
while(kv){
if(strncmp((*kv)->key, TS, strlen(TS)) == 0 && (*kv)->type == TSDB_DATA_TYPE_TIMESTAMP){
taosArrayInsert(cols, 0, kv);
}else{
taosArrayPush(cols, kv);
}
kv = taosHashIterate(action->createSTable.fields, kv);
}
SArray *cols = action->createSTable.fields;
for(int i = 0; i < taosArrayGetSize(cols); i++){
SSmlKv *kvNew = taosArrayGetP(cols, i);
smlBuildColumnDescription(kvNew, pos, freeBytes, &outBytes);
SSmlKv *kv = taosArrayGetP(cols, i);
smlBuildColumnDescription(kv, pos, freeBytes, &outBytes);
pos += outBytes; freeBytes -= outBytes;
*pos = ','; ++pos; --freeBytes;
}
taosArrayDestroy(cols);
--pos; ++freeBytes;
outBytes = snprintf(pos, freeBytes, ") tags (");
pos += outBytes; freeBytes -= outBytes;
kv = taosHashIterate(action->createSTable.tags, NULL);
while(kv){
smlBuildColumnDescription(*kv, pos, freeBytes, &outBytes);
cols = action->createSTable.tags;
for(int i = 0; i < taosArrayGetSize(cols); i++){
SSmlKv *kv = taosArrayGetP(cols, i);
smlBuildColumnDescription(kv, pos, freeBytes, &outBytes);
pos += outBytes; freeBytes -= outBytes;
*pos = ','; ++pos; --freeBytes;
kv = taosHashIterate(action->createSTable.tags, kv);
}
pos--; ++freeBytes;
outBytes = snprintf(pos, freeBytes, ")");
@ -419,7 +414,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle* info) {
SSmlSTableMeta** tableMetaSml = taosHashIterate(info->superTables, NULL);
while (tableMetaSml) {
SSmlSTableMeta* cTablePoints = *tableMetaSml;
SSmlSTableMeta* sTableData = *tableMetaSml;
STableMeta *pTableMeta = NULL;
SEpSet ep = getEpSet_s(&info->taos->pAppInfo->mgmtEp);
@ -436,8 +431,8 @@ static int32_t smlModifyDBSchemas(SSmlHandle* info) {
SSchemaAction schemaAction = {0};
schemaAction.action = SCHEMA_ACTION_CREATE_STABLE;
memcpy(schemaAction.createSTable.sTableName, superTable, superTableLen);
schemaAction.createSTable.tags = cTablePoints->tagHash;
schemaAction.createSTable.fields = cTablePoints->fieldHash;
schemaAction.createSTable.tags = sTableData->tags;
schemaAction.createSTable.fields = sTableData->cols;
code = smlApplySchemaAction(info, &schemaAction);
if (code != 0) {
uError("SML:0x%"PRIx64" smlApplySchemaAction failed. can not create %s", info->id, schemaAction.createSTable.sTableName);
@ -454,7 +449,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle* info) {
uError("SML:0x%"PRIx64" load table meta error: %s", info->id, tstrerror(code));
return code;
}
cTablePoints->tableMeta = pTableMeta;
sTableData->tableMeta = pTableMeta;
tableMetaSml = taosHashIterate(info->superTables, tableMetaSml);
}
@ -1034,7 +1029,7 @@ static int32_t smlParseString(const char* sql, SSmlLineInfo *elements, SSmlMsgBu
return TSDB_CODE_SUCCESS;
}
static int32_t smlParseCols(const char* data, int32_t len, SArray *cols, bool isTag, SSmlMsgBuf *msg){
static int32_t smlParseCols(const char* data, int32_t len, SArray *cols, bool isTag, SHashObj *dumplicateKey, SSmlMsgBuf *msg){
if(isTag && len == 0){
SSmlKv *kv = taosMemoryCalloc(sizeof(SSmlKv), 1);
kv->key = TAG;
@ -1062,6 +1057,13 @@ static int32_t smlParseCols(const char* data, int32_t len, SArray *cols, bool is
return TSDB_CODE_SML_INVALID_DATA;
}
if(taosHashGet(dumplicateKey, key, keyLen)){
smlBuildInvalidDataMsg(msg, "dumplicate key", key);
return TSDB_CODE_SML_INVALID_DATA;
}else{
taosHashPut(dumplicateKey, key, keyLen, key, CHAR_BYTES);
}
// parse value
i++;
const char *value = data + i;
@ -1295,14 +1297,19 @@ static bool smlUpdateMeta(SSmlSTableMeta* tableMeta, SArray *tags, SArray *cols,
SSmlKv *kv = taosArrayGetP(tags, i);
ASSERT(kv->type == TSDB_DATA_TYPE_NCHAR);
SSmlKv **value = taosHashGet(tableMeta->tagHash, kv->key, kv->keyLen);
if(value){
uint8_t *index = taosHashGet(tableMeta->tagHash, kv->key, kv->keyLen);
if(index){
SSmlKv **value = taosArrayGet(tableMeta->tags, *index);
ASSERT((*value)->type == TSDB_DATA_TYPE_NCHAR);
if(kv->valueLen > (*value)->valueLen){ // tags type is nchar
*value = kv;
}
}else{
taosHashPut(tableMeta->tagHash, kv->key, kv->keyLen, &kv, POINTER_BYTES);
size_t tmp = taosArrayGetSize(tableMeta->tags);
ASSERT(tmp <= UINT8_MAX);
uint8_t size = tmp;
taosArrayPush(tableMeta->tags, &kv);
taosHashPut(tableMeta->tagHash, kv->key, kv->keyLen, &size, CHAR_BYTES);
}
}
}
@ -1310,8 +1317,10 @@ static bool smlUpdateMeta(SSmlSTableMeta* tableMeta, SArray *tags, SArray *cols,
if(cols){
for (int i = 1; i < taosArrayGetSize(cols); ++i) { //jump timestamp
SSmlKv *kv = taosArrayGetP(cols, i);
SSmlKv **value = taosHashGet(tableMeta->fieldHash, kv->key, kv->keyLen);
if(value){
int16_t *index = taosHashGet(tableMeta->fieldHash, kv->key, kv->keyLen);
if(index){
SSmlKv **value = taosArrayGet(tableMeta->cols, *index);
if(kv->type != (*value)->type){
smlBuildInvalidDataMsg(msg, "the type is not the same like before", kv->key);
return false;
@ -1323,7 +1332,11 @@ static bool smlUpdateMeta(SSmlSTableMeta* tableMeta, SArray *tags, SArray *cols,
}
}
}else{
taosHashPut(tableMeta->fieldHash, kv->key, kv->keyLen, &kv, POINTER_BYTES);
size_t tmp = taosArrayGetSize(tableMeta->cols);
ASSERT(tmp <= INT16_MAX);
int16_t size = tmp;
taosArrayPush(tableMeta->cols, &kv);
taosHashPut(tableMeta->fieldHash, kv->key, kv->keyLen, &size, SHORT_BYTES);
}
}
}
@ -1332,16 +1345,18 @@ static bool smlUpdateMeta(SSmlSTableMeta* tableMeta, SArray *tags, SArray *cols,
static void smlInsertMeta(SSmlSTableMeta* tableMeta, SArray *tags, SArray *cols){
if(tags){
for (int i = 0; i < taosArrayGetSize(tags); ++i) {
for (uint8_t i = 0; i < taosArrayGetSize(tags); ++i) {
SSmlKv *kv = taosArrayGetP(tags, i);
taosHashPut(tableMeta->tagHash, kv->key, kv->keyLen, &kv, POINTER_BYTES);
taosArrayPush(tableMeta->tags, &kv);
taosHashPut(tableMeta->tagHash, kv->key, kv->keyLen, &i, CHAR_BYTES);
}
}
if(cols){
for (int i = 0; i < taosArrayGetSize(cols); ++i) {
for (int16_t i = 0; i < taosArrayGetSize(cols); ++i) {
SSmlKv *kv = taosArrayGetP(cols, i);
taosHashPut(tableMeta->fieldHash, kv->key, kv->keyLen, &kv, POINTER_BYTES);
taosArrayPush(tableMeta->cols, &kv);
taosHashPut(tableMeta->fieldHash, kv->key, kv->keyLen, &i, SHORT_BYTES);
}
}
}
@ -1364,12 +1379,6 @@ static SSmlTableInfo* smlBuildTableInfo(bool format){
uError("SML:smlParseLine failed to allocate memory");
goto cleanup;
}
tag->columnsHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
if (tag->columnsHash == NULL) {
uError("SML:smlParseLine failed to allocate memory");
goto cleanup;
}
}
tag->tags = taosArrayInit(16, POINTER_BYTES);
@ -1399,7 +1408,6 @@ static void smlDestroyBuildTableInfo(SSmlTableInfo *tag, bool format){
}
taosHashCleanup(kvHash);
}
taosHashCleanup(tag->columnsHash);
}
taosArrayDestroy(tag->tags);
taosMemoryFreeClear(tag);
@ -1408,23 +1416,20 @@ static void smlDestroyBuildTableInfo(SSmlTableInfo *tag, bool format){
static int32_t smlDealCols(SSmlTableInfo* oneTable, bool dataFormat, SArray *cols){
if(dataFormat){
taosArrayPush(oneTable->colsFormat, &cols);
}else{
SHashObj *kvHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
if(!kvHash){
uError("SML:smlDealCols failed to allocate memory");
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
for(size_t i = 0; i < taosArrayGetSize(cols); i++){
SSmlKv *kv = taosArrayGetP(cols, i);
taosHashPut(kvHash, kv->key, kv->keyLen, &kv, POINTER_BYTES); // todo key need escape, like \=, because find by schema name later
if(taosHashGet(oneTable->columnsHash, kv->key, kv->keyLen) != NULL){
continue;
}
taosHashPut(oneTable->columnsHash, kv->key, kv->keyLen, &kv, POINTER_BYTES);
}
taosArrayPush(oneTable->cols, &kvHash);
return TSDB_CODE_SUCCESS;
}
SHashObj *kvHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
if(!kvHash){
uError("SML:smlDealCols failed to allocate memory");
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
for(size_t i = 0; i < taosArrayGetSize(cols); i++){
SSmlKv *kv = taosArrayGetP(cols, i);
taosHashPut(kvHash, kv->key, kv->keyLen, &kv, POINTER_BYTES); // todo key need escape, like \=, because find by schema name later
}
taosArrayPush(oneTable->cols, &kvHash);
return TSDB_CODE_SUCCESS;
}
@ -1444,6 +1449,18 @@ static SSmlSTableMeta* smlBuildSTableMeta(){
uError("SML:smlBuildSTableMeta failed to allocate memory");
goto cleanup;
}
meta->tags = taosArrayInit(32, POINTER_BYTES);
if (meta->tags == NULL) {
uError("SML:smlBuildSTableMeta failed to allocate memory");
goto cleanup;
}
meta->cols = taosArrayInit(32, POINTER_BYTES);
if (meta->cols == NULL) {
uError("SML:smlBuildSTableMeta failed to allocate memory");
goto cleanup;
}
return meta;
cleanup:
@ -1454,6 +1471,8 @@ cleanup:
static void smlDestroySTableMeta(SSmlSTableMeta *meta){
taosHashCleanup(meta->tagHash);
taosHashCleanup(meta->fieldHash);
taosArrayDestroy(meta->tags);
taosArrayDestroy(meta->cols);
taosMemoryFree(meta->tableMeta);
}
@ -1465,10 +1484,15 @@ static int32_t smlParseLine(SSmlHandle* info, const char* sql) {
return ret;
}
SArray *cols = taosArrayInit(16, POINTER_BYTES);
if (cols == NULL) {
uError("SML:0x%"PRIx64" smlParseLine failed to allocate memory", info->id);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
SArray *cols = NULL;
if(info->dataFormat){ // if dataFormat, cols need new memory to save data
cols = taosArrayInit(16, POINTER_BYTES);
if (cols == NULL) {
uError("SML:0x%"PRIx64" smlParseLine failed to allocate memory", info->id);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
}else{ // if dataFormat is false, cols do not need to save data, there is another new memory to save data
cols = info->colsContainer;
}
ret = smlParseTS(info, elements.timestamp, elements.timestampLen, cols);
@ -1476,7 +1500,7 @@ static int32_t smlParseLine(SSmlHandle* info, const char* sql) {
uError("SML:0x%"PRIx64" smlParseTS failed", info->id);
return ret;
}
ret = smlParseCols(elements.cols, elements.colsLen, cols, false, &info->msgBuf);
ret = smlParseCols(elements.cols, elements.colsLen, cols, false, info->dumplicateKey, &info->msgBuf);
if(ret != TSDB_CODE_SUCCESS){
uError("SML:0x%"PRIx64" smlParseCols parse cloums fields failed", info->id);
return ret;
@ -1500,46 +1524,51 @@ static int32_t smlParseLine(SSmlHandle* info, const char* sql) {
return ret;
}
}else{
SSmlTableInfo *tag = smlBuildTableInfo(info->dataFormat);
if(!tag){
SSmlTableInfo *tinfo = smlBuildTableInfo(info->dataFormat);
if(!tinfo){
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
ret = smlDealCols(tag, info->dataFormat, cols);
ret = smlDealCols(tinfo, info->dataFormat, cols);
if(ret != TSDB_CODE_SUCCESS){
return ret;
}
ret = smlParseCols(elements.tags, elements.tagsLen, tag->tags, true, &info->msgBuf);
ret = smlParseCols(elements.tags, elements.tagsLen, tinfo->tags, true, info->dumplicateKey, &info->msgBuf);
if(ret != TSDB_CODE_SUCCESS){
uError("SML:0x%"PRIx64" smlParseCols parse tag fields failed", info->id);
return ret;
}
if(taosArrayGetSize(tag->tags) > TSDB_MAX_TAGS){
if(taosArrayGetSize(tinfo->tags) > TSDB_MAX_TAGS){
smlBuildInvalidDataMsg(&info->msgBuf, "too many tags than 128", NULL);
return TSDB_CODE_SML_INVALID_DATA;
}
tag->sTableName = elements.measure;
tag->sTableNameLen = elements.measureLen;
smlBuildChildTableName(tag);
uDebug("SML:0x%"PRIx64" child table name: %s", info->id, tag->childTableName);
tinfo->sTableName = elements.measure;
tinfo->sTableNameLen = elements.measureLen;
smlBuildChildTableName(tinfo);
uDebug("SML:0x%"PRIx64" child table name: %s", info->id, tinfo->childTableName);
SSmlSTableMeta** tableMeta = taosHashGet(info->superTables, elements.measure, elements.measureLen);
if(tableMeta){ // update meta
ret = smlUpdateMeta(*tableMeta, tag->tags, cols, &info->msgBuf);
ret = smlUpdateMeta(*tableMeta, tinfo->tags, cols, &info->msgBuf);
if(!ret){
uError("SML:0x%"PRIx64" smlUpdateMeta failed", info->id);
return TSDB_CODE_SML_INVALID_DATA;
}
}else{
SSmlSTableMeta *meta = smlBuildSTableMeta();
smlInsertMeta(meta, tag->tags, cols);
smlInsertMeta(meta, tinfo->tags, cols);
taosHashPut(info->superTables, elements.measure, elements.measureLen, &meta, POINTER_BYTES);
}
taosHashPut(info->childTables, elements.measure, elements.measureTagsLen, &tag, POINTER_BYTES);
taosHashPut(info->childTables, elements.measure, elements.measureTagsLen, &tinfo, POINTER_BYTES);
}
if(!info->dataFormat){
taosArrayClear(info->colsContainer);
}
taosHashClear(info->dumplicateKey);
return TSDB_CODE_SUCCESS;
}
@ -1568,6 +1597,7 @@ static void smlDestroyInfo(SSmlHandle* info){
// destroy info->pVgHash
taosHashCleanup(info->pVgHash);
taosHashCleanup(info->dumplicateKey);
taosMemoryFreeClear(info);
}
@ -1614,8 +1644,17 @@ static SSmlHandle* smlBuildSmlInfo(TAOS* taos, SRequestObj* request, SMLProtocol
info->superTables = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
info->pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
info->dumplicateKey = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
if(!dataFormat){
info->colsContainer = taosArrayInit(32, POINTER_BYTES);
if(NULL == info->colsContainer){
uError("SML:0x%"PRIx64" create info failed", info->id);
goto cleanup;
}
}
if(NULL == info->exec || NULL == info->childTables
|| NULL == info->superTables || NULL == info->pVgHash){
|| NULL == info->superTables || NULL == info->pVgHash
|| NULL == info->dumplicateKey){
uError("SML:0x%"PRIx64" create info failed", info->id);
goto cleanup;
}
@ -1651,7 +1690,7 @@ static int32_t smlInsertData(SSmlHandle* info) {
(*pMeta)->tableMeta->vgId = vg.vgId;
(*pMeta)->tableMeta->uid = tableData->uid; // one table merge data block together according uid
code = smlBindData(info->exec, tableData->tags, tableData->colsFormat, tableData->columnsHash,
code = smlBindData(info->exec, tableData->tags, tableData->colsFormat, (*pMeta)->cols,
tableData->cols, info->dataFormat, (*pMeta)->tableMeta, tableData->childTableName, info->msgBuf.buf, info->msgBuf.len);
if(code != TSDB_CODE_SUCCESS){
return code;
@ -1730,7 +1769,7 @@ TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int pr
return NULL;
}
SSmlHandle* info = smlBuildSmlInfo(taos, request, protocol, precision, false);
SSmlHandle* info = smlBuildSmlInfo(taos, request, protocol, precision, true);
if(!info){
return (TAOS_RES*)request;
}

View File

@ -190,17 +190,21 @@ TEST(testCase, smlParseCols_Error_Test) {
"c=-3.402823466e+39u64",
"c=-339u64",
"c=18446744073709551616u64",
"c=1,c=2"
};
SHashObj *dumplicateKey = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
for(int i = 0; i < sizeof(data)/sizeof(data[0]); i++){
char msg[256] = {0};
SSmlMsgBuf msgBuf;
msgBuf.buf = msg;
msgBuf.len = 256;
int32_t len = strlen(data[i]);
int32_t ret = smlParseCols(data[i], len, NULL, false, &msgBuf);
int32_t ret = smlParseCols(data[i], len, NULL, false, dumplicateKey, &msgBuf);
ASSERT_NE(ret, TSDB_CODE_SUCCESS);
taosHashClear(dumplicateKey);
}
taosHashCleanup(dumplicateKey);
}
TEST(testCase, smlParseCols_tag_Test) {
@ -211,11 +215,12 @@ TEST(testCase, smlParseCols_tag_Test) {
SArray *cols = taosArrayInit(16, POINTER_BYTES);
ASSERT_NE(cols, NULL);
SHashObj *dumplicateKey = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
const char *data =
"cbin=\"passit hello,c=2\",cnch=L\"iisdfsf\",cbool=false,cf64=4.31f64,cf32_=8.32,cf32=8.23f32,ci8=-34i8,cu8=89u8,ci16=233i16,cu16=898u16,ci32=98289i32,cu32=12323u32,ci64=-89238i64,ci=989i,cu64=8989323u64,cbooltrue=true,cboolt=t,cboolf=f,cnch_=l\"iuwq\"";
int32_t len = strlen(data);
int32_t ret = smlParseCols(data, len, cols, true, &msgBuf);
int32_t ret = smlParseCols(data, len, cols, true, dumplicateKey, &msgBuf);
ASSERT_EQ(ret, TSDB_CODE_SUCCESS);
int32_t size = taosArrayGetSize(cols);
ASSERT_EQ(size, 19);
@ -239,10 +244,14 @@ TEST(testCase, smlParseCols_tag_Test) {
taosMemoryFree(kv);
taosArrayClear(cols);
// test tag is null
data = "t=3e";
len = 0;
memset(msgBuf.buf, 0, msgBuf.len);
ret = smlParseCols(data, len, cols, true, &msgBuf);
taosHashClear(dumplicateKey);
ret = smlParseCols(data, len, cols, true, dumplicateKey, &msgBuf);
ASSERT_EQ(ret, TSDB_CODE_SUCCESS);
size = taosArrayGetSize(cols);
ASSERT_EQ(size, 1);
@ -255,6 +264,9 @@ TEST(testCase, smlParseCols_tag_Test) {
ASSERT_EQ(kv->valueLen, strlen(TAG));
ASSERT_EQ(strncasecmp(kv->value, TAG, strlen(TAG)), 0);
taosMemoryFree(kv);
taosArrayDestroy(cols);
taosHashCleanup(dumplicateKey);
}
TEST(testCase, smlParseCols_Test) {
@ -266,9 +278,11 @@ TEST(testCase, smlParseCols_Test) {
SArray *cols = taosArrayInit(16, POINTER_BYTES);
ASSERT_NE(cols, NULL);
SHashObj *dumplicateKey = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
const char *data = "cbin=\"passit hello,c=2\",cnch=L\"iisdfsf\",cbool=false,cf64=4.31f64,cf32_=8.32,cf32=8.23f32,ci8=-34i8,cu8=89u8,ci16=233i16,cu16=898u16,ci32=98289i32,cu32=12323u32,ci64=-89238i64,ci=989i,cu64=8989323u64,cbooltrue=true,cboolt=t,cboolf=f,cnch_=l\"iuwq\"";
int32_t len = strlen(data);
int32_t ret = smlParseCols(data, len, cols, false, &msgBuf);
int32_t ret = smlParseCols(data, len, cols, false, dumplicateKey, &msgBuf);
ASSERT_EQ(ret, TSDB_CODE_SUCCESS);
int32_t size = taosArrayGetSize(cols);
ASSERT_EQ(size, 19);
@ -450,6 +464,7 @@ TEST(testCase, smlParseCols_Test) {
taosMemoryFree(kv);
taosArrayDestroy(cols);
taosHashCleanup(dumplicateKey);
}
TEST(testCase, smlParseLine_Test) {
@ -468,17 +483,47 @@ TEST(testCase, smlParseLine_Test) {
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS, true);
ASSERT_NE(info, NULL);
const char *sql[3] = {
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451606400000000000",
const char *sql[9] = {
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0 1451606400000000000",
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451607400000000000",
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,heading=221,grade=0,fuel_consumption=25 1451608400000000000",
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451609400000000000",
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451619400000000000",
"readings,name=truck_1,fleet=South,driver=Albert,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=72.45258,longitude=68.83761,elevation=255,velocity=0,heading=181,grade=0,fuel_consumption=25 1451606400000000000",
"readings,name=truck_2,fleet=North,driver=Derek,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451606400000000000"
"readings,name=truck_2,driver=Derek,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451606400000000000",
"readings,name=truck_2,fleet=North,driver=Derek,model=F-150 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451609400000000000",
"readings,fleet=South,name=truck_0,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451629400000000000"
};
smlInsertLines(info, sql, 3);
smlInsertLines(info, sql, 9);
// for (int i = 0; i < 3; i++) {
// smlParseLine(info, sql[i]);
// }
}
TEST(testCase, smlParseLine_error_Test) {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(taos, NULL);
TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db");
taos_free_result(pRes);
pRes = taos_query(taos, "use sml_db");
taos_free_result(pRes);
SRequestObj *request = createRequest(taos, NULL, NULL, TSDB_SQL_INSERT);
ASSERT_NE(request, NULL);
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS, true);
ASSERT_NE(info, NULL);
const char *sql[2] = {
"measure,t1=3 c1=8",
"measure,t2=3 c1=8u8"
};
int ret = smlInsertLines(info, sql, 2);
ASSERT_NE(ret, 0);
}
// TEST(testCase, smlParseTS_Test) {
// char msg[256] = {0};
// SSmlMsgBuf msgBuf;

View File

@ -211,6 +211,7 @@ static const SSysDbTableSchema transSchema[] = {
{.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "type", .bytes = TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "last_error", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};

View File

@ -170,8 +170,8 @@ uint32_t tsCurRange = 100; // range
char tsCompressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
// internal
int32_t tsTransPullupMs = 6000;
int32_t tsMaRebalanceMs = 2000;
int32_t tsTransPullupInterval = 6;
int32_t tsMqRebalanceInterval = 2;
void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary) {
tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN);
@ -438,6 +438,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddString(pCfg, "telemetryServer", tsTelemServer, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "telemetryPort", tsTelemPort, 1, 65056, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1;
return 0;
}
@ -575,6 +578,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN);
tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32;
tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32;
tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32;
if (tsQueryBufferSize >= 0) {
tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL;
}

View File

@ -51,7 +51,7 @@ int32_t dmReadEps(SDnode *pDnode) {
pDnode->data.dnodeEps = taosArrayInit(1, sizeof(SDnodeEp));
if (pDnode->data.dnodeEps == NULL) {
dError("failed to calloc dnodeEp array since %s", strerror(errno));
goto PRASE_DNODE_OVER;
goto _OVER;
}
snprintf(file, sizeof(file), "%s%sdnode.json", pDnode->wrappers[DNODE].path, TD_DIRSEP);
@ -59,53 +59,53 @@ int32_t dmReadEps(SDnode *pDnode) {
if (pFile == NULL) {
// dDebug("file %s not exist", file);
code = 0;
goto PRASE_DNODE_OVER;
goto _OVER;
}
len = (int32_t)taosReadFile(pFile, content, maxLen);
if (len <= 0) {
dError("failed to read %s since content is null", file);
goto PRASE_DNODE_OVER;
goto _OVER;
}
content[len] = 0;
root = cJSON_Parse(content);
if (root == NULL) {
dError("failed to read %s since invalid json format", file);
goto PRASE_DNODE_OVER;
goto _OVER;
}
cJSON *dnodeId = cJSON_GetObjectItem(root, "dnodeId");
if (!dnodeId || dnodeId->type != cJSON_Number) {
dError("failed to read %s since dnodeId not found", file);
goto PRASE_DNODE_OVER;
goto _OVER;
}
pDnode->data.dnodeId = dnodeId->valueint;
cJSON *clusterId = cJSON_GetObjectItem(root, "clusterId");
if (!clusterId || clusterId->type != cJSON_String) {
dError("failed to read %s since clusterId not found", file);
goto PRASE_DNODE_OVER;
goto _OVER;
}
pDnode->data.clusterId = atoll(clusterId->valuestring);
cJSON *dropped = cJSON_GetObjectItem(root, "dropped");
if (!dropped || dropped->type != cJSON_Number) {
dError("failed to read %s since dropped not found", file);
goto PRASE_DNODE_OVER;
goto _OVER;
}
pDnode->data.dropped = dropped->valueint;
cJSON *dnodes = cJSON_GetObjectItem(root, "dnodes");
if (!dnodes || dnodes->type != cJSON_Array) {
dError("failed to read %s since dnodes not found", file);
goto PRASE_DNODE_OVER;
goto _OVER;
}
int32_t numOfDnodes = cJSON_GetArraySize(dnodes);
if (numOfDnodes <= 0) {
dError("failed to read %s since numOfDnodes:%d invalid", file, numOfDnodes);
goto PRASE_DNODE_OVER;
goto _OVER;
}
for (int32_t i = 0; i < numOfDnodes; ++i) {
@ -117,7 +117,7 @@ int32_t dmReadEps(SDnode *pDnode) {
cJSON *did = cJSON_GetObjectItem(node, "id");
if (!did || did->type != cJSON_Number) {
dError("failed to read %s since dnodeId not found", file);
goto PRASE_DNODE_OVER;
goto _OVER;
}
dnodeEp.id = did->valueint;
@ -125,14 +125,14 @@ int32_t dmReadEps(SDnode *pDnode) {
cJSON *dnodeFqdn = cJSON_GetObjectItem(node, "fqdn");
if (!dnodeFqdn || dnodeFqdn->type != cJSON_String || dnodeFqdn->valuestring == NULL) {
dError("failed to read %s since dnodeFqdn not found", file);
goto PRASE_DNODE_OVER;
goto _OVER;
}
tstrncpy(dnodeEp.ep.fqdn, dnodeFqdn->valuestring, TSDB_FQDN_LEN);
cJSON *dnodePort = cJSON_GetObjectItem(node, "port");
if (!dnodePort || dnodePort->type != cJSON_Number) {
dError("failed to read %s since dnodePort not found", file);
goto PRASE_DNODE_OVER;
goto _OVER;
}
dnodeEp.ep.port = dnodePort->valueint;
@ -140,7 +140,7 @@ int32_t dmReadEps(SDnode *pDnode) {
cJSON *isMnode = cJSON_GetObjectItem(node, "isMnode");
if (!isMnode || isMnode->type != cJSON_Number) {
dError("failed to read %s since isMnode not found", file);
goto PRASE_DNODE_OVER;
goto _OVER;
}
dnodeEp.isMnode = isMnode->valueint;
@ -151,7 +151,7 @@ int32_t dmReadEps(SDnode *pDnode) {
dDebug("succcessed to read file %s", file);
dmPrintEps(pDnode);
PRASE_DNODE_OVER:
_OVER:
if (content != NULL) taosMemoryFree(content);
if (root != NULL) cJSON_Delete(root);
if (pFile != NULL) taosCloseFile(&pFile);
@ -176,7 +176,7 @@ PRASE_DNODE_OVER:
int32_t dmWriteEps(SDnode *pDnode) {
char file[PATH_MAX] = {0};
char realfile[PATH_MAX];
char realfile[PATH_MAX] = {0};
snprintf(file, sizeof(file), "%s%sdnode.json.bak", pDnode->wrappers[DNODE].path, TD_DIRSEP);
snprintf(realfile, sizeof(realfile), "%s%sdnode.json", pDnode->wrappers[DNODE].path, TD_DIRSEP);

View File

@ -241,7 +241,11 @@ static int32_t dmSpawnUdfd(SDnode *pDnode) {
strncpy(path, tsProcPath, strlen(tsProcPath));
taosDirName(path);
}
#ifdef WINDOWS
strcat(path, "udfd.exe");
#else
strcat(path, "/udfd");
#endif
char* argsUdfd[] = {path, "-c", configDir, NULL};
options.args = argsUdfd;
options.file = path;

View File

@ -105,12 +105,13 @@ void dmStopMonitorThread(SDnode *pDnode) {
}
static void dmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
SDnode * pDnode = pInfo->ahandle;
SRpcMsg *pRpc = &pMsg->rpcMsg;
int32_t code = -1;
SDnode *pDnode = pInfo->ahandle;
int32_t code = -1;
tmsg_t msgType = pMsg->rpcMsg.msgType;
dTrace("msg:%p, will be processed in dnode-mgmt queue", pMsg);
switch (pRpc->msgType) {
switch (msgType) {
case TDMT_DND_CONFIG_DNODE:
code = dmProcessConfigReq(pDnode, pMsg);
break;
@ -148,9 +149,14 @@ static void dmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
break;
}
if (pRpc->msgType & 1u) {
if (code != 0) code = terrno;
SRpcMsg rsp = {.handle = pRpc->handle, .ahandle = pRpc->ahandle, .code = code, .refId = pRpc->refId};
if (msgType & 1u) {
if (code != 0 && terrno != 0) code = terrno;
SRpcMsg rsp = {
.handle = pMsg->rpcMsg.handle,
.ahandle = pMsg->rpcMsg.ahandle,
.code = code,
.refId = pMsg->rpcMsg.refId,
};
rpcSendResponse(&rsp);
}
@ -160,7 +166,13 @@ static void dmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
}
int32_t dmStartWorker(SDnode *pDnode) {
SSingleWorkerCfg cfg = {.min = 1, .max = 1, .name = "dnode-mgmt", .fp = (FItem)dmProcessMgmtQueue, .param = pDnode};
SSingleWorkerCfg cfg = {
.min = 1,
.max = 1,
.name = "dnode-mgmt",
.fp = (FItem)dmProcessMgmtQueue,
.param = pDnode,
};
if (tSingleWorkerInit(&pDnode->data.mgmtWorker, &cfg) != 0) {
dError("failed to start dnode-mgmt worker since %s", terrstr());
return -1;

View File

@ -41,6 +41,8 @@
#include "monitor.h"
#include "sync.h"
#include "libs/function/function.h"
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -55,6 +55,7 @@ void dmCleanup() {
monCleanup();
syncCleanUp();
walCleanUp();
udfcClose();
taosStopCacheRefreshWorker();
dInfo("dnode env is cleaned up");
}

View File

@ -18,7 +18,11 @@
static void bmSendErrorRsp(SNodeMsg *pMsg, int32_t code) {
SRpcMsg rpcRsp = {
.handle = pMsg->rpcMsg.handle, .ahandle = pMsg->rpcMsg.ahandle, .code = code, .refId = pMsg->rpcMsg.refId};
.handle = pMsg->rpcMsg.handle,
.ahandle = pMsg->rpcMsg.ahandle,
.code = code,
.refId = pMsg->rpcMsg.refId,
};
tmsgSendRsp(&rpcRsp);
dTrace("msg:%p, is freed", pMsg);
@ -103,7 +107,7 @@ static void bmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
}
int32_t bmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
SBnodeMgmt * pMgmt = pWrapper->pMgmt;
SBnodeMgmt *pMgmt = pWrapper->pMgmt;
SMultiWorker *pWorker = &pMgmt->writeWorker;
dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name);
@ -112,7 +116,7 @@ int32_t bmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
}
int32_t bmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
SBnodeMgmt * pMgmt = pWrapper->pMgmt;
SBnodeMgmt *pMgmt = pWrapper->pMgmt;
SSingleWorker *pWorker = &pMgmt->monitorWorker;
dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name);
@ -121,7 +125,12 @@ int32_t bmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
}
int32_t bmStartWorker(SBnodeMgmt *pMgmt) {
SMultiWorkerCfg cfg = {.max = 1, .name = "bnode-write", .fp = (FItems)bmProcessWriteQueue, .param = pMgmt};
SMultiWorkerCfg cfg = {
.max = 1,
.name = "bnode-write",
.fp = (FItems)bmProcessWriteQueue,
.param = pMgmt,
};
if (tMultiWorkerInit(&pMgmt->writeWorker, &cfg) != 0) {
dError("failed to start bnode-write worker since %s", terrstr());
return -1;
@ -129,7 +138,12 @@ int32_t bmStartWorker(SBnodeMgmt *pMgmt) {
if (tsMultiProcess) {
SSingleWorkerCfg mCfg = {
.min = 1, .max = 1, .name = "bnode-monitor", .fp = (FItem)bmProcessMonitorQueue, .param = pMgmt};
.min = 1,
.max = 1,
.name = "bnode-monitor",
.fp = (FItem)bmProcessMonitorQueue,
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) {
dError("failed to start bnode-monitor worker since %s", terrstr());
return -1;

View File

@ -22,7 +22,7 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) {
int32_t maxLen = 4096;
char *content = taosMemoryCalloc(1, maxLen + 1);
cJSON *root = NULL;
char file[PATH_MAX];
char file[PATH_MAX] = {0};
TdFilePtr pFile = NULL;
snprintf(file, sizeof(file), "%s%smnode.json", pMgmt->path, TD_DIRSEP);
@ -30,39 +30,39 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) {
if (pFile == NULL) {
// dDebug("file %s not exist", file);
code = 0;
goto PRASE_MNODE_OVER;
goto _OVER;
}
len = (int32_t)taosReadFile(pFile, content, maxLen);
if (len <= 0) {
dError("failed to read %s since content is null", file);
goto PRASE_MNODE_OVER;
goto _OVER;
}
content[len] = 0;
root = cJSON_Parse(content);
if (root == NULL) {
dError("failed to read %s since invalid json format", file);
goto PRASE_MNODE_OVER;
goto _OVER;
}
cJSON *deployed = cJSON_GetObjectItem(root, "deployed");
if (!deployed || deployed->type != cJSON_Number) {
dError("failed to read %s since deployed not found", file);
goto PRASE_MNODE_OVER;
goto _OVER;
}
*pDeployed = deployed->valueint;
cJSON *mnodes = cJSON_GetObjectItem(root, "mnodes");
if (!mnodes || mnodes->type != cJSON_Array) {
dError("failed to read %s since nodes not found", file);
goto PRASE_MNODE_OVER;
goto _OVER;
}
pMgmt->replica = cJSON_GetArraySize(mnodes);
if (pMgmt->replica <= 0 || pMgmt->replica > TSDB_MAX_REPLICA) {
dError("failed to read %s since mnodes size %d invalid", file, pMgmt->replica);
goto PRASE_MNODE_OVER;
goto _OVER;
}
for (int32_t i = 0; i < pMgmt->replica; ++i) {
@ -74,21 +74,21 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) {
cJSON *id = cJSON_GetObjectItem(node, "id");
if (!id || id->type != cJSON_Number) {
dError("failed to read %s since id not found", file);
goto PRASE_MNODE_OVER;
goto _OVER;
}
pReplica->id = id->valueint;
cJSON *fqdn = cJSON_GetObjectItem(node, "fqdn");
if (!fqdn || fqdn->type != cJSON_String || fqdn->valuestring == NULL) {
dError("failed to read %s since fqdn not found", file);
goto PRASE_MNODE_OVER;
goto _OVER;
}
tstrncpy(pReplica->fqdn, fqdn->valuestring, TSDB_FQDN_LEN);
cJSON *port = cJSON_GetObjectItem(node, "port");
if (!port || port->type != cJSON_Number) {
dError("failed to read %s since port not found", file);
goto PRASE_MNODE_OVER;
goto _OVER;
}
pReplica->port = port->valueint;
}
@ -96,7 +96,7 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) {
code = 0;
dDebug("succcessed to read file %s, deployed:%d", file, *pDeployed);
PRASE_MNODE_OVER:
_OVER:
if (content != NULL) taosMemoryFree(content);
if (root != NULL) cJSON_Delete(root);
if (pFile != NULL) taosCloseFile(&pFile);

View File

@ -161,9 +161,7 @@ static int32_t mmOpen(SMgmtWrapper *pWrapper) {
SMnodeOpt option = {0};
if (!deployed) {
dInfo("mnode start to deploy");
// if (pWrapper->procType == DND_PROC_CHILD) {
pWrapper->pDnode->data.dnodeId = 1;
// }
pWrapper->pDnode->data.dnodeId = 1;
mmBuildOptionForDeploy(pMgmt, &option);
} else {
dInfo("mnode start to open");

View File

@ -17,42 +17,48 @@
#include "mmInt.h"
static inline void mmSendRsp(SNodeMsg *pMsg, int32_t code) {
SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle,
.ahandle = pMsg->rpcMsg.ahandle,
.refId = pMsg->rpcMsg.refId,
.code = code,
.pCont = pMsg->pRsp,
.contLen = pMsg->rspLen};
SRpcMsg rsp = {
.handle = pMsg->rpcMsg.handle,
.ahandle = pMsg->rpcMsg.ahandle,
.refId = pMsg->rpcMsg.refId,
.code = code,
.pCont = pMsg->pRsp,
.contLen = pMsg->rspLen,
};
tmsgSendRsp(&rsp);
}
static void mmProcessQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
SMnodeMgmt *pMgmt = pInfo->ahandle;
int32_t code = -1;
tmsg_t msgType = pMsg->rpcMsg.msgType;
dTrace("msg:%p, get from mnode queue", pMsg);
SRpcMsg *pRpc = &pMsg->rpcMsg;
int32_t code = -1;
if (pMsg->rpcMsg.msgType == TDMT_DND_ALTER_MNODE) {
code = mmProcessAlterReq(pMgmt, pMsg);
} else if (pMsg->rpcMsg.msgType == TDMT_MON_MM_INFO) {
code = mmProcessGetMonMmInfoReq(pMgmt->pWrapper, pMsg);
} else if (pMsg->rpcMsg.msgType == TDMT_MON_MM_LOAD) {
code = mmProcessGetMnodeLoadsReq(pMgmt->pWrapper, pMsg);
} else {
pMsg->pNode = pMgmt->pMnode;
code = mndProcessMsg(pMsg);
switch (msgType) {
case TDMT_DND_ALTER_MNODE:
code = mmProcessAlterReq(pMgmt, pMsg);
break;
case TDMT_MON_MM_INFO:
code = mmProcessGetMonMmInfoReq(pMgmt->pWrapper, pMsg);
break;
case TDMT_MON_MM_LOAD:
code = mmProcessGetMnodeLoadsReq(pMgmt->pWrapper, pMsg);
break;
default:
pMsg->pNode = pMgmt->pMnode;
code = mndProcessMsg(pMsg);
}
if (pRpc->msgType & 1U) {
if (pRpc->handle != NULL && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
if (msgType & 1U) {
if (pMsg->rpcMsg.handle != NULL && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
if (code != 0 && terrno != 0) code = terrno;
mmSendRsp(pMsg, code);
}
}
dTrace("msg:%p, is freed, result:0x%04x:%s", pMsg, code & 0XFFFF, tstrerror(code));
rpcFreeCont(pRpc->pCont);
rpcFreeCont(pMsg->rpcMsg.pCont);
taosFreeQitem(pMsg);
}
@ -78,38 +84,38 @@ static void mmProcessQueryQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
taosFreeQitem(pMsg);
}
static void mmPutMsgToWorker(SSingleWorker *pWorker, SNodeMsg *pMsg) {
static void mmPutNodeMsgToWorker(SSingleWorker *pWorker, SNodeMsg *pMsg) {
dTrace("msg:%p, put into worker %s", pMsg, pWorker->name);
taosWriteQitem(pWorker->queue, pMsg);
}
int32_t mmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
SMnodeMgmt *pMgmt = pWrapper->pMgmt;
mmPutMsgToWorker(&pMgmt->writeWorker, pMsg);
mmPutNodeMsgToWorker(&pMgmt->writeWorker, pMsg);
return 0;
}
int32_t mmProcessSyncMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
SMnodeMgmt *pMgmt = pWrapper->pMgmt;
mmPutMsgToWorker(&pMgmt->syncWorker, pMsg);
mmPutNodeMsgToWorker(&pMgmt->syncWorker, pMsg);
return 0;
}
int32_t mmProcessReadMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
SMnodeMgmt *pMgmt = pWrapper->pMgmt;
mmPutMsgToWorker(&pMgmt->readWorker, pMsg);
mmPutNodeMsgToWorker(&pMgmt->readWorker, pMsg);
return 0;
}
int32_t mmProcessQueryMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
SMnodeMgmt *pMgmt = pWrapper->pMgmt;
mmPutMsgToWorker(&pMgmt->queryWorker, pMsg);
mmPutNodeMsgToWorker(&pMgmt->queryWorker, pMsg);
return 0;
}
int32_t mmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
SMnodeMgmt *pMgmt = pWrapper->pMgmt;
mmPutMsgToWorker(&pMgmt->monitorWorker, pMsg);
mmPutNodeMsgToWorker(&pMgmt->monitorWorker, pMsg);
return 0;
}
@ -144,40 +150,62 @@ int32_t mmPutMsgToSyncQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) {
}
int32_t mmStartWorker(SMnodeMgmt *pMgmt) {
SSingleWorkerCfg qCfg = {.min = tsNumOfMnodeQueryThreads,
.max = tsNumOfMnodeQueryThreads,
.name = "mnode-query",
.fp = (FItem)mmProcessQueryQueue,
.param = pMgmt};
SSingleWorkerCfg qCfg = {
.min = tsNumOfMnodeQueryThreads,
.max = tsNumOfMnodeQueryThreads,
.name = "mnode-query",
.fp = (FItem)mmProcessQueryQueue,
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->queryWorker, &qCfg) != 0) {
dError("failed to start mnode-query worker since %s", terrstr());
return -1;
}
SSingleWorkerCfg rCfg = {.min = tsNumOfMnodeReadThreads,
.max = tsNumOfMnodeReadThreads,
.name = "mnode-read",
.fp = (FItem)mmProcessQueue,
.param = pMgmt};
SSingleWorkerCfg rCfg = {
.min = tsNumOfMnodeReadThreads,
.max = tsNumOfMnodeReadThreads,
.name = "mnode-read",
.fp = (FItem)mmProcessQueue,
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->readWorker, &rCfg) != 0) {
dError("failed to start mnode-read worker since %s", terrstr());
return -1;
}
SSingleWorkerCfg wCfg = {.min = 1, .max = 1, .name = "mnode-write", .fp = (FItem)mmProcessQueue, .param = pMgmt};
SSingleWorkerCfg wCfg = {
.min = 1,
.max = 1,
.name = "mnode-write",
.fp = (FItem)mmProcessQueue,
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->writeWorker, &wCfg) != 0) {
dError("failed to start mnode-write worker since %s", terrstr());
return -1;
}
SSingleWorkerCfg sCfg = {.min = 1, .max = 1, .name = "mnode-sync", .fp = (FItem)mmProcessQueue, .param = pMgmt};
SSingleWorkerCfg sCfg = {
.min = 1,
.max = 1,
.name = "mnode-sync",
.fp = (FItem)mmProcessQueue,
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->syncWorker, &sCfg) != 0) {
dError("failed to start mnode mnode-sync worker since %s", terrstr());
return -1;
}
if (tsMultiProcess) {
SSingleWorkerCfg mCfg = {.min = 1, .max = 1, .name = "mnode-monitor", .fp = (FItem)mmProcessQueue, .param = pMgmt};
SSingleWorkerCfg mCfg = {
.min = 1,
.max = 1,
.name = "mnode-monitor",
.fp = (FItem)mmProcessQueue,
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) {
dError("failed to start mnode mnode-monitor worker since %s", terrstr());
return -1;

View File

@ -17,12 +17,14 @@
#include "qmInt.h"
static inline void qmSendRsp(SNodeMsg *pMsg, int32_t code) {
SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle,
.ahandle = pMsg->rpcMsg.ahandle,
.refId = pMsg->rpcMsg.refId,
.code = code,
.pCont = pMsg->pRsp,
.contLen = pMsg->rspLen};
SRpcMsg rsp = {
.handle = pMsg->rpcMsg.handle,
.ahandle = pMsg->rpcMsg.ahandle,
.refId = pMsg->rpcMsg.refId,
.code = code,
.pCont = pMsg->pRsp,
.contLen = pMsg->rspLen,
};
tmsgSendRsp(&rsp);
}
@ -145,22 +147,26 @@ int32_t qmGetQueueSize(SMgmtWrapper *pWrapper, int32_t vgId, EQueueType qtype) {
}
int32_t qmStartWorker(SQnodeMgmt *pMgmt) {
SSingleWorkerCfg queryCfg = {.min = tsNumOfVnodeQueryThreads,
.max = tsNumOfVnodeQueryThreads,
.name = "qnode-query",
.fp = (FItem)qmProcessQueryQueue,
.param = pMgmt};
SSingleWorkerCfg queryCfg = {
.min = tsNumOfVnodeQueryThreads,
.max = tsNumOfVnodeQueryThreads,
.name = "qnode-query",
.fp = (FItem)qmProcessQueryQueue,
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->queryWorker, &queryCfg) != 0) {
dError("failed to start qnode-query worker since %s", terrstr());
return -1;
}
SSingleWorkerCfg fetchCfg = {.min = tsNumOfQnodeFetchThreads,
.max = tsNumOfQnodeFetchThreads,
.name = "qnode-fetch",
.fp = (FItem)qmProcessFetchQueue,
.param = pMgmt};
SSingleWorkerCfg fetchCfg = {
.min = tsNumOfQnodeFetchThreads,
.max = tsNumOfQnodeFetchThreads,
.name = "qnode-fetch",
.fp = (FItem)qmProcessFetchQueue,
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->fetchWorker, &fetchCfg) != 0) {
dError("failed to start qnode-fetch worker since %s", terrstr());
@ -169,7 +175,12 @@ int32_t qmStartWorker(SQnodeMgmt *pMgmt) {
if (tsMultiProcess) {
SSingleWorkerCfg mCfg = {
.min = 1, .max = 1, .name = "qnode-monitor", .fp = (FItem)qmProcessMonitorQueue, .param = pMgmt};
.min = 1,
.max = 1,
.name = "qnode-monitor",
.fp = (FItem)qmProcessMonitorQueue,
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) {
dError("failed to start qnode-monitor worker since %s", terrstr());
return -1;

View File

@ -15,6 +15,7 @@
#define _DEFAULT_SOURCE
#include "smInt.h"
#include "libs/function/function.h"
static int32_t smRequire(SMgmtWrapper *pWrapper, bool *required) { return dmReadFile(pWrapper, required); }
@ -29,6 +30,9 @@ static void smClose(SMgmtWrapper *pWrapper) {
if (pMgmt == NULL) return;
dInfo("snode-mgmt start to cleanup");
udfcClose();
if (pMgmt->pSnode != NULL) {
smStopWorker(pMgmt);
sndClose(pMgmt->pSnode);
@ -68,6 +72,10 @@ int32_t smOpen(SMgmtWrapper *pWrapper) {
}
dmReportStartup(pWrapper->pDnode, "snode-worker", "initialized");
if (udfcOpen() != 0) {
dError("failed to open udfc in snode");
}
return 0;
}

View File

@ -17,12 +17,14 @@
#include "smInt.h"
static inline void smSendRsp(SNodeMsg *pMsg, int32_t code) {
SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle,
.ahandle = pMsg->rpcMsg.ahandle,
.refId = pMsg->rpcMsg.refId,
.code = code,
.pCont = pMsg->pRsp,
.contLen = pMsg->rspLen};
SRpcMsg rsp = {
.handle = pMsg->rpcMsg.handle,
.ahandle = pMsg->rpcMsg.ahandle,
.refId = pMsg->rpcMsg.refId,
.code = code,
.pCont = pMsg->pRsp,
.contLen = pMsg->rspLen,
};
tmsgSendRsp(&rsp);
}
@ -90,7 +92,12 @@ int32_t smStartWorker(SSnodeMgmt *pMgmt) {
return -1;
}
SMultiWorkerCfg cfg = {.max = 1, .name = "snode-unique", .fp = smProcessUniqueQueue, .param = pMgmt};
SMultiWorkerCfg cfg = {
.max = 1,
.name = "snode-unique",
.fp = smProcessUniqueQueue,
.param = pMgmt,
};
if (tMultiWorkerInit(pUniqueWorker, &cfg) != 0) {
dError("failed to start snode-unique worker since %s", terrstr());
return -1;
@ -101,11 +108,13 @@ int32_t smStartWorker(SSnodeMgmt *pMgmt) {
}
}
SSingleWorkerCfg cfg = {.min = tsNumOfSnodeSharedThreads,
.max = tsNumOfSnodeSharedThreads,
.name = "snode-shared",
.fp = (FItem)smProcessSharedQueue,
.param = pMgmt};
SSingleWorkerCfg cfg = {
.min = tsNumOfSnodeSharedThreads,
.max = tsNumOfSnodeSharedThreads,
.name = "snode-shared",
.fp = (FItem)smProcessSharedQueue,
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->sharedWorker, &cfg)) {
dError("failed to start snode shared-worker since %s", terrstr());
@ -114,7 +123,12 @@ int32_t smStartWorker(SSnodeMgmt *pMgmt) {
if (tsMultiProcess) {
SSingleWorkerCfg mCfg = {
.min = 1, .max = 1, .name = "snode-monitor", .fp = (FItem)smProcessMonitorQueue, .param = pMgmt};
.min = 1,
.max = 1,
.name = "snode-monitor",
.fp = (FItem)smProcessMonitorQueue,
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) {
dError("failed to start snode-monitor worker since %s", terrstr());
return -1;
@ -150,7 +164,7 @@ static FORCE_INLINE int32_t smGetSWTypeFromMsg(SRpcMsg *pMsg) {
}
int32_t smProcessMgmtMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
SSnodeMgmt * pMgmt = pWrapper->pMgmt;
SSnodeMgmt *pMgmt = pWrapper->pMgmt;
SMultiWorker *pWorker = taosArrayGetP(pMgmt->uniqueWorkers, 0);
if (pWorker == NULL) {
terrno = TSDB_CODE_INVALID_MSG;
@ -163,7 +177,7 @@ int32_t smProcessMgmtMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
}
int32_t smProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
SSnodeMgmt * pMgmt = pWrapper->pMgmt;
SSnodeMgmt *pMgmt = pWrapper->pMgmt;
SSingleWorker *pWorker = &pMgmt->monitorWorker;
dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name);
@ -172,7 +186,7 @@ int32_t smProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
}
int32_t smProcessUniqueMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
SSnodeMgmt * pMgmt = pWrapper->pMgmt;
SSnodeMgmt *pMgmt = pWrapper->pMgmt;
int32_t index = smGetSWIdFromMsg(&pMsg->rpcMsg);
SMultiWorker *pWorker = taosArrayGetP(pMgmt->uniqueWorkers, index);
if (pWorker == NULL) {
@ -186,7 +200,7 @@ int32_t smProcessUniqueMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
}
int32_t smProcessSharedMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
SSnodeMgmt * pMgmt = pWrapper->pMgmt;
SSnodeMgmt *pMgmt = pWrapper->pMgmt;
SSingleWorker *pWorker = &pMgmt->sharedWorker;
dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name);

View File

@ -29,15 +29,15 @@ typedef struct SVnodesMgmt {
SHashObj *hash;
SRWLatch latch;
SVnodesStat state;
const char *path;
SDnode *pDnode;
SMgmtWrapper *pWrapper;
STfs *pTfs;
SQWorkerPool queryPool;
SQWorkerPool fetchPool;
SWWorkerPool syncPool;
SWWorkerPool writePool;
SWWorkerPool mergePool;
const char *path;
SDnode *pDnode;
SMgmtWrapper *pWrapper;
SSingleWorker mgmtWorker;
SSingleWorker monitorWorker;
} SVnodesMgmt;
@ -95,9 +95,9 @@ int32_t vmProcessGetVnodeLoadsReq(SMgmtWrapper *pWrapper, SNodeMsg *pReq);
void vmGetVnodeLoads(SMgmtWrapper *pWrapper, SMonVloadInfo *pInfo);
// vmFile.c
int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes);
int32_t vmWriteVnodesToFile(SVnodesMgmt *pMgmt);
SVnodeObj **vmGetVnodesFromHash(SVnodesMgmt *pMgmt, int32_t *numOfVnodes);
int32_t vmGetVnodeListFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes);
int32_t vmWriteVnodeListToFile(SVnodesMgmt *pMgmt);
SVnodeObj **vmGetVnodeListFromHash(SVnodesMgmt *pMgmt, int32_t *numOfVnodes);
// vmWorker.c
int32_t vmStartWorker(SVnodesMgmt *pMgmt);
@ -105,11 +105,12 @@ void vmStopWorker(SVnodesMgmt *pMgmt);
int32_t vmAllocQueue(SVnodesMgmt *pMgmt, SVnodeObj *pVnode);
void vmFreeQueue(SVnodesMgmt *pMgmt, SVnodeObj *pVnode);
int32_t vmPutMsgToSyncQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc); // sync integration
int32_t vmPutMsgToWriteQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc);
int32_t vmPutMsgToWriteQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);
int32_t vmPutMsgToSyncQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);
int32_t vmPutMsgToApplyQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);
int32_t vmPutMsgToQueryQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);
int32_t vmPutMsgToFetchQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);
int32_t vmPutMsgToApplyQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);
int32_t vmPutMsgToMergeQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);
int32_t vmGetQueueSize(SMgmtWrapper *pWrapper, int32_t vgId, EQueueType qtype);
int32_t vmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg);

View File

@ -16,7 +16,7 @@
#define _DEFAULT_SOURCE
#include "vmInt.h"
SVnodeObj **vmGetVnodesFromHash(SVnodesMgmt *pMgmt, int32_t *numOfVnodes) {
SVnodeObj **vmGetVnodeListFromHash(SVnodesMgmt *pMgmt, int32_t *numOfVnodes) {
taosRLockLatch(&pMgmt->latch);
int32_t num = 0;
@ -44,14 +44,14 @@ SVnodeObj **vmGetVnodesFromHash(SVnodesMgmt *pMgmt, int32_t *numOfVnodes) {
return pVnodes;
}
int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes) {
int32_t vmGetVnodeListFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes) {
int32_t code = TSDB_CODE_INVALID_JSON_FORMAT;
int32_t len = 0;
int32_t maxLen = 30000;
char *content = taosMemoryCalloc(1, maxLen + 1);
cJSON *root = NULL;
FILE *fp = NULL;
char file[PATH_MAX];
char file[PATH_MAX] = {0};
SWrapperCfg *pCfgs = NULL;
TdFilePtr pFile = NULL;
@ -61,26 +61,26 @@ int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *n
if (pFile == NULL) {
dDebug("file %s not exist", file);
code = 0;
goto PRASE_VNODE_OVER;
goto _OVER;
}
len = (int32_t)taosReadFile(pFile, content, maxLen);
if (len <= 0) {
dError("failed to read %s since content is null", file);
goto PRASE_VNODE_OVER;
goto _OVER;
}
content[len] = 0;
root = cJSON_Parse(content);
if (root == NULL) {
dError("failed to read %s since invalid json format", file);
goto PRASE_VNODE_OVER;
goto _OVER;
}
cJSON *vnodes = cJSON_GetObjectItem(root, "vnodes");
if (!vnodes || vnodes->type != cJSON_Array) {
dError("failed to read %s since vnodes not found", file);
goto PRASE_VNODE_OVER;
goto _OVER;
}
int32_t vnodesNum = cJSON_GetArraySize(vnodes);
@ -88,7 +88,7 @@ int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *n
pCfgs = taosMemoryCalloc(vnodesNum, sizeof(SWrapperCfg));
if (pCfgs == NULL) {
dError("failed to read %s since out of memory", file);
goto PRASE_VNODE_OVER;
goto _OVER;
}
for (int32_t i = 0; i < vnodesNum; ++i) {
@ -98,7 +98,7 @@ int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *n
cJSON *vgId = cJSON_GetObjectItem(vnode, "vgId");
if (!vgId || vgId->type != cJSON_Number) {
dError("failed to read %s since vgId not found", file);
goto PRASE_VNODE_OVER;
goto _OVER;
}
pCfg->vgId = vgId->valueint;
snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCfg->vgId);
@ -106,28 +106,28 @@ int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *n
cJSON *dropped = cJSON_GetObjectItem(vnode, "dropped");
if (!dropped || dropped->type != cJSON_Number) {
dError("failed to read %s since dropped not found", file);
goto PRASE_VNODE_OVER;
goto _OVER;
}
pCfg->dropped = dropped->valueint;
cJSON *vgVersion = cJSON_GetObjectItem(vnode, "vgVersion");
if (!vgVersion || vgVersion->type != cJSON_Number) {
dError("failed to read %s since vgVersion not found", file);
goto PRASE_VNODE_OVER;
goto _OVER;
}
pCfg->vgVersion = vgVersion->valueint;
cJSON *dbUid = cJSON_GetObjectItem(vnode, "dbUid");
if (!dbUid || dbUid->type != cJSON_String) {
dError("failed to read %s since dbUid not found", file);
goto PRASE_VNODE_OVER;
goto _OVER;
}
pCfg->dbUid = atoll(dbUid->valuestring);
cJSON *db = cJSON_GetObjectItem(vnode, "db");
if (!db || db->type != cJSON_String) {
dError("failed to read %s since db not found", file);
goto PRASE_VNODE_OVER;
goto _OVER;
}
tstrncpy(pCfg->db, db->valuestring, TSDB_DB_FNAME_LEN);
}
@ -139,7 +139,7 @@ int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *n
code = 0;
dInfo("succcessed to read file %s", file);
PRASE_VNODE_OVER:
_OVER:
if (content != NULL) taosMemoryFree(content);
if (root != NULL) cJSON_Delete(root);
if (pFile != NULL) taosCloseFile(&pFile);
@ -148,7 +148,7 @@ PRASE_VNODE_OVER:
return code;
}
int32_t vmWriteVnodesToFile(SVnodesMgmt *pMgmt) {
int32_t vmWriteVnodeListToFile(SVnodesMgmt *pMgmt) {
char file[PATH_MAX];
char realfile[PATH_MAX];
snprintf(file, sizeof(file), "%s%svnodes.json.bak", pMgmt->path, TD_DIRSEP);
@ -162,7 +162,7 @@ int32_t vmWriteVnodesToFile(SVnodesMgmt *pMgmt) {
}
int32_t numOfVnodes = 0;
SVnodeObj **pVnodes = vmGetVnodesFromHash(pMgmt, &numOfVnodes);
SVnodeObj **pVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes);
int32_t len = 0;
int32_t maxLen = 65536;

View File

@ -16,12 +16,37 @@
#define _DEFAULT_SOURCE
#include "vmInt.h"
void vmGetVnodeLoads(SMgmtWrapper *pWrapper, SMonVloadInfo *pInfo) {
SVnodesMgmt *pMgmt = pWrapper->pMgmt;
pInfo->pVloads = taosArrayInit(pMgmt->state.totalVnodes, sizeof(SVnodeLoad));
if (pInfo->pVloads == NULL) return;
taosRLockLatch(&pMgmt->latch);
void *pIter = taosHashIterate(pMgmt->hash, NULL);
while (pIter) {
SVnodeObj **ppVnode = pIter;
if (ppVnode == NULL || *ppVnode == NULL) continue;
SVnodeObj *pVnode = *ppVnode;
SVnodeLoad vload = {0};
vnodeGetLoad(pVnode->pImpl, &vload);
taosArrayPush(pInfo->pVloads, &vload);
pIter = taosHashIterate(pMgmt->hash, pIter);
}
taosRUnLockLatch(&pMgmt->latch);
}
void vmGetMonitorInfo(SMgmtWrapper *pWrapper, SMonVmInfo *pInfo) {
SVnodesMgmt *pMgmt = pWrapper->pMgmt;
SMonVloadInfo vloads = {0};
vmGetVnodeLoads(pWrapper, &vloads);
if (vloads.pVloads == NULL) return;
SArray *pVloads = vloads.pVloads;
if (pVloads == NULL) return;
int32_t totalVnodes = 0;
int32_t masterNum = 0;
@ -31,8 +56,8 @@ void vmGetMonitorInfo(SMgmtWrapper *pWrapper, SMonVmInfo *pInfo) {
int64_t numOfBatchInsertReqs = 0;
int64_t numOfBatchInsertSuccessReqs = 0;
for (int32_t i = 0; i < taosArrayGetSize(vloads.pVloads); ++i) {
SVnodeLoad *pLoad = taosArrayGet(vloads.pVloads, i);
for (int32_t i = 0; i < taosArrayGetSize(pVloads); ++i) {
SVnodeLoad *pLoad = taosArrayGet(pVloads, i);
numOfSelectReqs += pLoad->numOfSelectReqs;
numOfInsertReqs += pLoad->numOfInsertReqs;
numOfInsertSuccessReqs += pLoad->numOfInsertSuccessReqs;
@ -49,10 +74,16 @@ void vmGetMonitorInfo(SMgmtWrapper *pWrapper, SMonVmInfo *pInfo) {
pInfo->vstat.numOfInsertSuccessReqs = numOfInsertSuccessReqs - pMgmt->state.numOfInsertSuccessReqs;
pInfo->vstat.numOfBatchInsertReqs = numOfBatchInsertReqs - pMgmt->state.numOfBatchInsertReqs;
pInfo->vstat.numOfBatchInsertSuccessReqs = numOfBatchInsertSuccessReqs - pMgmt->state.numOfBatchInsertSuccessReqs;
pMgmt->state = pInfo->vstat;
pMgmt->state.totalVnodes = totalVnodes;
pMgmt->state.masterNum = masterNum;
pMgmt->state.numOfSelectReqs = numOfSelectReqs;
pMgmt->state.numOfInsertReqs = numOfInsertReqs;
pMgmt->state.numOfInsertSuccessReqs = numOfInsertSuccessReqs;
pMgmt->state.numOfBatchInsertReqs = numOfBatchInsertReqs;
pMgmt->state.numOfBatchInsertSuccessReqs = numOfBatchInsertSuccessReqs;
tfsGetMonitorInfo(pMgmt->pTfs, &pInfo->tfs);
taosArrayDestroy(vloads.pVloads);
taosArrayDestroy(pVloads);
}
int32_t vmProcessGetMonVmInfoReq(SMgmtWrapper *pWrapper, SNodeMsg *pReq) {
@ -107,12 +138,13 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
memcpy(pCfg, &vnodeCfgDefault, sizeof(SVnodeCfg));
pCfg->vgId = pCreate->vgId;
strcpy(pCfg->dbname, pCreate->db);
tstrncpy(pCfg->dbname, pCreate->db, sizeof(pCfg->dbname));
pCfg->dbId = pCreate->dbUid;
pCfg->isWeak = true;
pCfg->tsdbCfg.days = 10;
pCfg->tsdbCfg.keep2 = 3650;
pCfg->tsdbCfg.keep0 = 3650;
pCfg->tsdbCfg.keep1 = 3650;
pCfg->tsdbCfg.keep2 = 3650;
for (size_t i = 0; i < taosArrayGetSize(pCreate->pRetensions); ++i) {
memcpy(&pCfg->tsdbCfg.retentions[i], taosArrayGet(pCreate->pRetensions, i), sizeof(SRetention));
}
@ -121,30 +153,30 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
pCfg->hashEnd = pCreate->hashEnd;
pCfg->hashMethod = pCreate->hashMethod;
// sync integration
pCfg->syncCfg.myIndex = pCreate->selfIndex;
pCfg->syncCfg.replicaNum = pCreate->replica;
memset(&(pCfg->syncCfg.nodeInfo), 0, sizeof(pCfg->syncCfg.nodeInfo));
memset(&pCfg->syncCfg.nodeInfo, 0, sizeof(pCfg->syncCfg.nodeInfo));
for (int i = 0; i < pCreate->replica; ++i) {
(pCfg->syncCfg.nodeInfo)[i].nodePort = (pCreate->replicas)[i].port;
snprintf((pCfg->syncCfg.nodeInfo)[i].nodeFqdn, sizeof((pCfg->syncCfg.nodeInfo)[i].nodeFqdn), "%s",
(pCreate->replicas)[i].fqdn);
pCfg->syncCfg.nodeInfo[i].nodePort = pCreate->replicas[i].port;
snprintf(pCfg->syncCfg.nodeInfo[i].nodeFqdn, sizeof(pCfg->syncCfg.nodeInfo[i].nodeFqdn), "%s",
pCreate->replicas[i].fqdn);
}
}
static void vmGenerateWrapperCfg(SVnodesMgmt *pMgmt, SCreateVnodeReq *pCreate, SWrapperCfg *pCfg) {
memcpy(pCfg->db, pCreate->db, TSDB_DB_FNAME_LEN);
pCfg->dbUid = pCreate->dbUid;
pCfg->dropped = 0;
snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCreate->vgId);
pCfg->vgId = pCreate->vgId;
pCfg->vgVersion = pCreate->vgVersion;
pCfg->dropped = 0;
pCfg->dbUid = pCreate->dbUid;
tstrncpy(pCfg->db, pCreate->db, TSDB_DB_FNAME_LEN);
snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCreate->vgId);
}
int32_t vmProcessCreateVnodeReq(SVnodesMgmt *pMgmt, SNodeMsg *pMsg) {
SRpcMsg *pReq = &pMsg->rpcMsg;
SCreateVnodeReq createReq = {0};
char path[TSDB_FILENAME_LEN];
int32_t code = -1;
char path[TSDB_FILENAME_LEN] = {0};
if (tDeserializeSCreateVnodeReq(pReq->pCont, pReq->contLen, &createReq) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
@ -161,14 +193,13 @@ int32_t vmProcessCreateVnodeReq(SVnodesMgmt *pMgmt, SNodeMsg *pMsg) {
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, createReq.vgId);
if (pVnode != NULL) {
tFreeSCreateVnodeReq(&createReq);
dDebug("vgId:%d, already exist", createReq.vgId);
tFreeSCreateVnodeReq(&createReq);
vmReleaseVnode(pMgmt, pVnode);
terrno = TSDB_CODE_NODE_ALREADY_DEPLOYED;
return -1;
}
// create vnode
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vnodeCfg.vgId);
if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) {
tFreeSCreateVnodeReq(&createReq);
@ -179,49 +210,43 @@ int32_t vmProcessCreateVnodeReq(SVnodesMgmt *pMgmt, SNodeMsg *pMsg) {
SMsgCb msgCb = pMgmt->pDnode->data.msgCb;
msgCb.pWrapper = pMgmt->pWrapper;
msgCb.queueFps[WRITE_QUEUE] = vmPutMsgToWriteQueue;
msgCb.queueFps[SYNC_QUEUE] = vmPutMsgToSyncQueue;
msgCb.queueFps[APPLY_QUEUE] = vmPutMsgToApplyQueue;
msgCb.queueFps[QUERY_QUEUE] = vmPutMsgToQueryQueue;
msgCb.queueFps[FETCH_QUEUE] = vmPutMsgToFetchQueue;
msgCb.queueFps[APPLY_QUEUE] = vmPutMsgToApplyQueue;
msgCb.queueFps[SYNC_QUEUE] = vmPutMsgToSyncQueue; // sync integration
msgCb.queueFps[MERGE_QUEUE] = vmPutMsgToMergeQueue;
msgCb.qsizeFp = vmGetQueueSize;
SVnode *pImpl = vnodeOpen(path, pMgmt->pTfs, msgCb);
if (pImpl == NULL) {
dError("vgId:%d, failed to create vnode since %s", createReq.vgId, terrstr());
tFreeSCreateVnodeReq(&createReq);
return -1;
goto _OVER;
}
int32_t code = vmOpenVnode(pMgmt, &wrapperCfg, pImpl);
code = vmOpenVnode(pMgmt, &wrapperCfg, pImpl);
if (code != 0) {
tFreeSCreateVnodeReq(&createReq);
dError("vgId:%d, failed to open vnode since %s", createReq.vgId, terrstr());
vnodeClose(pImpl);
vnodeDestroy(path, pMgmt->pTfs);
terrno = code;
return code;
goto _OVER;
}
code = vnodeStart(pImpl);
if (code != 0) {
tFreeSCreateVnodeReq(&createReq);
dError("vgId:%d, failed to start sync since %s", createReq.vgId, terrstr());
vnodeClose(pImpl);
vnodeDestroy(path, pMgmt->pTfs);
terrno = code;
return code;
goto _OVER;
}
code = vmWriteVnodesToFile(pMgmt);
code = vmWriteVnodeListToFile(pMgmt);
if (code != 0) goto _OVER;
_OVER:
if (code != 0) {
tFreeSCreateVnodeReq(&createReq);
vnodeClose(pImpl);
vnodeDestroy(path, pMgmt->pTfs);
terrno = code;
return code;
}
return 0;
tFreeSCreateVnodeReq(&createReq);
terrno = code;
return code;
}
int32_t vmProcessDropVnodeReq(SVnodesMgmt *pMgmt, SNodeMsg *pMsg) {
@ -243,14 +268,14 @@ int32_t vmProcessDropVnodeReq(SVnodesMgmt *pMgmt, SNodeMsg *pMsg) {
}
pVnode->dropped = 1;
if (vmWriteVnodesToFile(pMgmt) != 0) {
if (vmWriteVnodeListToFile(pMgmt) != 0) {
pVnode->dropped = 0;
vmReleaseVnode(pMgmt, pVnode);
return -1;
}
vmCloseVnode(pMgmt, pVnode);
vmWriteVnodesToFile(pMgmt);
vmWriteVnodeListToFile(pMgmt);
return 0;
}
@ -287,7 +312,7 @@ void vmInitMsgHandle(SMgmtWrapper *pWrapper) {
dmSetMsgHandle(pWrapper, TDMT_VND_CANCEL_SMA, vmProcessWriteMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_DROP_SMA, vmProcessWriteMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SUBMIT_RSMA, vmProcessWriteMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_MQ_VG_CHANGE, (NodeMsgFp)vmProcessWriteMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_MQ_VG_CHANGE, vmProcessWriteMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_CONSUME, vmProcessFetchMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_TASK_DEPLOY, vmProcessWriteMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_QUERY_HEARTBEAT, vmProcessFetchMsg, DEFAULT_HANDLE);
@ -300,14 +325,13 @@ void vmInitMsgHandle(SMgmtWrapper *pWrapper) {
dmSetMsgHandle(pWrapper, TDMT_DND_CREATE_VNODE, vmProcessMgmtMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_DND_DROP_VNODE, vmProcessMgmtMsg, DEFAULT_HANDLE);
// sync integration
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_TIMEOUT, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_PING, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_PING_REPLY, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_CLIENT_REQUEST, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_CLIENT_REQUEST_REPLY, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_REQUEST_VOTE, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_REQUEST_VOTE_REPLY, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_APPEND_ENTRIES, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_APPEND_ENTRIES_REPLY, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_TIMEOUT, vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_PING, vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_PING_REPLY, vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_CLIENT_REQUEST, vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_CLIENT_REQUEST_REPLY, vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_REQUEST_VOTE, vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_REQUEST_VOTE_REPLY, vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_APPEND_ENTRIES, vmProcessSyncMsg, DEFAULT_HANDLE);
dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_APPEND_ENTRIES_REPLY, vmProcessSyncMsg, DEFAULT_HANDLE);
}

View File

@ -15,7 +15,6 @@
#define _DEFAULT_SOURCE
#include "vmInt.h"
#include "libs/function/function.h"
SVnodeObj *vmAcquireVnode(SVnodesMgmt *pMgmt, int32_t vgId) {
SVnodeObj *pVnode = NULL;
@ -55,14 +54,14 @@ int32_t vmOpenVnode(SVnodesMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
pVnode->vgId = pCfg->vgId;
pVnode->refCount = 0;
pVnode->vgVersion = pCfg->vgVersion;
pVnode->dropped = 0;
pVnode->accessState = TSDB_VN_ALL_ACCCESS;
pVnode->pWrapper = pMgmt->pWrapper;
pVnode->pImpl = pImpl;
pVnode->vgVersion = pCfg->vgVersion;
pVnode->dbUid = pCfg->dbUid;
pVnode->db = tstrdup(pCfg->db);
pVnode->path = tstrdup(pCfg->path);
pVnode->pImpl = pImpl;
pVnode->pWrapper = pMgmt->pWrapper;
if (pVnode->path == NULL || pVnode->db == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -78,14 +77,11 @@ int32_t vmOpenVnode(SVnodesMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
int32_t code = taosHashPut(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), &pVnode, sizeof(SVnodeObj *));
taosWUnLockLatch(&pMgmt->latch);
if (code != 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
}
return code;
}
void vmCloseVnode(SVnodesMgmt *pMgmt, SVnodeObj *pVnode) {
char path[TSDB_FILENAME_LEN];
char path[TSDB_FILENAME_LEN] = {0};
taosWLockLatch(&pMgmt->latch);
taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t));
@ -98,6 +94,7 @@ void vmCloseVnode(SVnodesMgmt *pMgmt, SVnodeObj *pVnode) {
while (!taosQueueEmpty(pVnode->pApplyQ)) taosMsleep(10);
while (!taosQueueEmpty(pVnode->pQueryQ)) taosMsleep(10);
while (!taosQueueEmpty(pVnode->pFetchQ)) taosMsleep(10);
while (!taosQueueEmpty(pVnode->pMergeQ)) taosMsleep(10);
vmFreeQueue(pMgmt, pVnode);
vnodeClose(pVnode->pImpl);
@ -116,7 +113,7 @@ void vmCloseVnode(SVnodesMgmt *pMgmt, SVnodeObj *pVnode) {
taosMemoryFree(pVnode);
}
static void *vmOpenVnodeFunc(void *param) {
static void *vmOpenVnodeInThread(void *param) {
SVnodeThread *pThread = param;
SVnodesMgmt *pMgmt = pThread->pMgmt;
SDnode *pDnode = pMgmt->pDnode;
@ -136,10 +133,11 @@ static void *vmOpenVnodeFunc(void *param) {
SMsgCb msgCb = pMgmt->pDnode->data.msgCb;
msgCb.pWrapper = pMgmt->pWrapper;
msgCb.queueFps[WRITE_QUEUE] = vmPutMsgToWriteQueue;
msgCb.queueFps[SYNC_QUEUE] = vmPutMsgToSyncQueue;
msgCb.queueFps[APPLY_QUEUE] = vmPutMsgToApplyQueue;
msgCb.queueFps[QUERY_QUEUE] = vmPutMsgToQueryQueue;
msgCb.queueFps[FETCH_QUEUE] = vmPutMsgToFetchQueue;
msgCb.queueFps[APPLY_QUEUE] = vmPutMsgToApplyQueue;
msgCb.queueFps[SYNC_QUEUE] = vmPutMsgToSyncQueue; // sync integration
msgCb.queueFps[MERGE_QUEUE] = vmPutMsgToMergeQueue;
msgCb.qsizeFp = vmGetQueueSize;
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, pCfg->vgId);
SVnode *pImpl = vnodeOpen(path, pMgmt->pTfs, msgCb);
@ -148,12 +146,10 @@ static void *vmOpenVnodeFunc(void *param) {
pThread->failed++;
} else {
vmOpenVnode(pMgmt, pCfg, pImpl);
// vnodeStart(pImpl);
dDebug("vgId:%d, is opened by thread:%d", pCfg->vgId, pThread->threadIndex);
pThread->opened++;
atomic_add_fetch_32(&pMgmt->state.openVnodes, 1);
}
atomic_add_fetch_32(&pMgmt->state.openVnodes, 1);
}
dDebug("thread:%d, total vnodes:%d, opened:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened,
@ -163,29 +159,24 @@ static void *vmOpenVnodeFunc(void *param) {
static int32_t vmOpenVnodes(SVnodesMgmt *pMgmt) {
SDnode *pDnode = pMgmt->pDnode;
taosInitRWLatch(&pMgmt->latch);
pMgmt->hash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
if (pMgmt->hash == NULL) {
dError("failed to init vnode hash");
terrno = TSDB_CODE_OUT_OF_MEMORY;
dError("failed to init vnode hash since %s", terrstr());
return -1;
}
SWrapperCfg *pCfgs = NULL;
int32_t numOfVnodes = 0;
if (vmGetVnodesFromFile(pMgmt, &pCfgs, &numOfVnodes) != 0) {
if (vmGetVnodeListFromFile(pMgmt, &pCfgs, &numOfVnodes) != 0) {
dInfo("failed to get vnode list from disk since %s", terrstr());
return -1;
}
pMgmt->state.totalVnodes = numOfVnodes;
#if 0
int32_t threadNum = tsNumOfCores;
#else
int32_t threadNum = 1;
#endif
int32_t threadNum = 1; // tsNumOfCores;
int32_t vnodesPerThread = numOfVnodes / threadNum + 1;
SVnodeThread *threads = taosMemoryCalloc(threadNum, sizeof(SVnodeThread));
@ -210,7 +201,7 @@ static int32_t vmOpenVnodes(SVnodesMgmt *pMgmt) {
TdThreadAttr thAttr;
taosThreadAttrInit(&thAttr);
taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
if (taosThreadCreate(&pThread->thread, &thAttr, vmOpenVnodeFunc, pThread) != 0) {
if (taosThreadCreate(&pThread->thread, &thAttr, vmOpenVnodeInThread, pThread) != 0) {
dError("thread:%d, failed to create thread to open vnode, reason:%s", pThread->threadIndex, strerror(errno));
}
@ -240,7 +231,7 @@ static void vmCloseVnodes(SVnodesMgmt *pMgmt) {
dInfo("start to close all vnodes");
int32_t numOfVnodes = 0;
SVnodeObj **pVnodes = vmGetVnodesFromHash(pMgmt, &numOfVnodes);
SVnodeObj **pVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes);
for (int32_t i = 0; i < numOfVnodes; ++i) {
vmCloseVnode(pMgmt, pVnodes[i]);
@ -267,12 +258,9 @@ static void vmCleanup(SMgmtWrapper *pWrapper) {
vmStopWorker(pMgmt);
vnodeCleanup();
tfsClose(pMgmt->pTfs);
// walCleanUp();
taosMemoryFree(pMgmt);
pWrapper->pMgmt = NULL;
// syncCleanUp();
udfcClose();
dInfo("vnode-mgmt is cleaned up");
}
@ -313,7 +301,6 @@ static int32_t vmInit(SMgmtWrapper *pWrapper) {
}
dmReportStartup(pDnode, "vnode-wal", "initialized");
// sync integration
if (syncInit() != 0) {
dError("failed to open sync since %s", terrstr());
return -1;
@ -337,7 +324,7 @@ static int32_t vmInit(SMgmtWrapper *pWrapper) {
dmReportStartup(pDnode, "vnode-vnodes", "initialized");
if (udfcOpen() != 0) {
dError("failed to open udfc in dnode");
dError("failed to open udfc in vnode");
}
code = 0;
@ -381,23 +368,7 @@ static int32_t vmStart(SMgmtWrapper *pWrapper) {
}
static void vmStop(SMgmtWrapper *pWrapper) {
#if 0
dDebug("vnode-mgmt start to stop");
SVnodesMgmt *pMgmt = pWrapper->pMgmt;
taosRLockLatch(&pMgmt->latch);
void *pIter = taosHashIterate(pMgmt->hash, NULL);
while (pIter) {
SVnodeObj **ppVnode = pIter;
if (ppVnode == NULL || *ppVnode == NULL) continue;
SVnodeObj *pVnode = *ppVnode;
vnodeStop(pVnode->pImpl);
pIter = taosHashIterate(pMgmt->hash, pIter);
}
taosRUnLockLatch(&pMgmt->latch);
#endif
// process inside the vnode
}
void vmSetMgmtFp(SMgmtWrapper *pWrapper) {
@ -413,25 +384,3 @@ void vmSetMgmtFp(SMgmtWrapper *pWrapper) {
pWrapper->fp = mgmtFp;
}
void vmGetVnodeLoads(SMgmtWrapper *pWrapper, SMonVloadInfo *pInfo) {
SVnodesMgmt *pMgmt = pWrapper->pMgmt;
pInfo->pVloads = taosArrayInit(pMgmt->state.totalVnodes, sizeof(SVnodeLoad));
if (pInfo->pVloads == NULL) return;
taosRLockLatch(&pMgmt->latch);
void *pIter = taosHashIterate(pMgmt->hash, NULL);
while (pIter) {
SVnodeObj **ppVnode = pIter;
if (ppVnode == NULL || *ppVnode == NULL) continue;
SVnodeObj *pVnode = *ppVnode;
SVnodeLoad vload = {0};
vnodeGetLoad(pVnode->pImpl, &vload);
taosArrayPush(pInfo->pVloads, &vload);
pIter = taosHashIterate(pMgmt->hash, pIter);
}
taosRUnLockLatch(&pMgmt->latch);
}

View File

@ -14,28 +14,29 @@
*/
#define _DEFAULT_SOURCE
#include "vmInt.h"
#include "sync.h"
#include "syncTools.h"
static inline void vmSendRsp(SMgmtWrapper *pWrapper, SNodeMsg *pMsg, int32_t code) {
SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle,
.ahandle = pMsg->rpcMsg.ahandle,
.refId = pMsg->rpcMsg.refId,
.code = code,
.pCont = pMsg->pRsp,
.contLen = pMsg->rspLen};
static inline void vmSendRsp(SNodeMsg *pMsg, int32_t code) {
SRpcMsg rsp = {
.handle = pMsg->rpcMsg.handle,
.ahandle = pMsg->rpcMsg.ahandle,
.refId = pMsg->rpcMsg.refId,
.code = code,
.pCont = pMsg->pRsp,
.contLen = pMsg->rspLen,
};
tmsgSendRsp(&rsp);
}
static void vmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
static void vmProcessMgmtMonitorQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
SVnodesMgmt *pMgmt = pInfo->ahandle;
int32_t code = -1;
tmsg_t msgType = pMsg->rpcMsg.msgType;
dTrace("msg:%p, will be processed in vnode-m queue", pMsg);
dTrace("msg:%p, will be processed in vnode-mgmt/monitor queue", pMsg);
switch (msgType) {
case TDMT_MON_VM_INFO:
@ -52,12 +53,12 @@ static void vmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
break;
default:
terrno = TSDB_CODE_MSG_NOT_PROCESSED;
dError("msg:%p, not processed in vnode-mgmt queue", pMsg);
dError("msg:%p, not processed in vnode-mgmt/monitor queue", pMsg);
}
if (msgType & 1u) {
if (code != 0 && terrno != 0) code = terrno;
vmSendRsp(pMgmt->pWrapper, pMsg, code);
vmSendRsp(pMsg, code);
}
dTrace("msg:%p, is freed, result:0x%04x:%s", pMsg, code & 0XFFFF, tstrerror(code));
@ -71,7 +72,9 @@ static void vmProcessQueryQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
dTrace("msg:%p, will be processed in vnode-query queue", pMsg);
int32_t code = vnodeProcessQueryMsg(pVnode->pImpl, &pMsg->rpcMsg);
if (code != 0) {
vmSendRsp(pVnode->pWrapper, pMsg, code);
if (terrno != 0) code = terrno;
vmSendRsp(pMsg, code);
dTrace("msg:%p, is freed, result:0x%04x:%s", pMsg, code & 0XFFFF, tstrerror(code));
rpcFreeCont(pMsg->rpcMsg.pCont);
taosFreeQitem(pMsg);
@ -84,7 +87,9 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
dTrace("msg:%p, will be processed in vnode-fetch queue", pMsg);
int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, &pMsg->rpcMsg, pInfo);
if (code != 0) {
vmSendRsp(pVnode->pWrapper, pMsg, code);
if (terrno != 0) code = terrno;
vmSendRsp(pMsg, code);
dTrace("msg:%p, is freed, result:0x%04x:%s", pMsg, code & 0XFFFF, tstrerror(code));
rpcFreeCont(pMsg->rpcMsg.pCont);
taosFreeQitem(pMsg);
@ -108,32 +113,10 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
dTrace("msg:%p, will be processed in vnode-write queue", pMsg);
if (taosArrayPush(pArray, &pMsg) == NULL) {
dTrace("msg:%p, failed to process since %s", pMsg, terrstr());
vmSendRsp(pVnode->pWrapper, pMsg, TSDB_CODE_OUT_OF_MEMORY);
vmSendRsp(pMsg, TSDB_CODE_OUT_OF_MEMORY);
}
}
#if 0
int64_t version;
vnodePreprocessWriteReqs(pVnode->pImpl, pArray, &version);
numOfMsgs = taosArrayGetSize(pArray);
for (int32_t i = 0; i < numOfMsgs; i++) {
SNodeMsg *pMsg = *(SNodeMsg **)taosArrayGet(pArray, i);
SRpcMsg *pRpc = &pMsg->rpcMsg;
rsp.pCont = NULL;
rsp.contLen = 0;
rsp.code = 0;
rsp.handle = pRpc->handle;
rsp.ahandle = pRpc->ahandle;
rsp.refId = pRpc->refId;
int32_t code = vnodeProcessWriteReq(pVnode->pImpl, pRpc, version++, &rsp);
tmsgSendRsp(&rsp);
}
#else
// sync integration response
for (int i = 0; i < taosArrayGetSize(pArray); i++) {
SNodeMsg *pMsg;
SRpcMsg *pRpc;
@ -168,7 +151,6 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
assert(0);
}
}
#endif
for (int32_t i = 0; i < numOfMsgs; i++) {
SNodeMsg *pMsg = *(SNodeMsg **)taosArrayGet(pArray, i);
@ -186,9 +168,6 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
SRpcMsg rsp;
for (int32_t i = 0; i < numOfMsgs; ++i) {
#if 1
// sync integration
taosGetQitem(qall, (void **)&pMsg);
// init response rpc msg
@ -219,7 +198,9 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
rsp.refId = pMsg->rpcMsg.refId;
tmsgSendRsp(&rsp);
}
#endif
rpcFreeCont(pMsg->rpcMsg.pCont);
taosFreeQitem(pMsg);
}
}
@ -233,6 +214,9 @@ static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOf
// todo
SRpcMsg *pRsp = NULL;
(void)vnodeProcessSyncReq(pVnode->pImpl, &pMsg->rpcMsg, &pRsp);
rpcFreeCont(pMsg->rpcMsg.pCont);
taosFreeQitem(pMsg);
}
}
@ -246,7 +230,9 @@ static void vmProcessMergeQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
dTrace("msg:%p, will be processed in vnode-merge queue", pMsg);
int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, &pMsg->rpcMsg, pInfo);
if (code != 0) {
vmSendRsp(pVnode->pWrapper, pMsg, code);
if (terrno != 0) code = terrno;
vmSendRsp(pMsg, code);
dTrace("msg:%p, is freed, result:0x%04x:%s", pMsg, code & 0XFFFF, tstrerror(code));
rpcFreeCont(pMsg->rpcMsg.pCont);
taosFreeQitem(pMsg);
@ -257,16 +243,17 @@ static void vmProcessMergeQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
static int32_t vmPutNodeMsgToQueue(SVnodesMgmt *pMgmt, SNodeMsg *pMsg, EQueueType qtype) {
SRpcMsg *pRpc = &pMsg->rpcMsg;
SMsgHead *pHead = pRpc->pCont;
int32_t code = 0;
pHead->contLen = ntohl(pHead->contLen);
pHead->vgId = ntohl(pHead->vgId);
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId);
if (pVnode == NULL) {
dError("vgId:%d, failed to write msg:%p to vnode-queue since %s", pHead->vgId, pMsg, terrstr());
return terrno;
return terrno != 0 ? terrno : -1;
}
int32_t code = 0;
switch (qtype) {
case QUERY_QUEUE:
dTrace("msg:%p, type:%s will be written into vnode-query queue", pMsg, TMSG_INFO(pRpc->msgType));
@ -326,7 +313,7 @@ int32_t vmProcessMergeMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
int32_t vmProcessMgmtMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
SVnodesMgmt *pMgmt = pWrapper->pMgmt;
SSingleWorker *pWorker = &pMgmt->mgmtWorker;
dTrace("msg:%p, will be written to vnode-mgmt queue, worker:%s", pMsg, pWorker->name);
dTrace("msg:%p, will be put into vnode-mgmt queue, worker:%s", pMsg, pWorker->name);
taosWriteQitem(pWorker->queue, pMsg);
return 0;
}
@ -335,7 +322,7 @@ int32_t vmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
SVnodesMgmt *pMgmt = pWrapper->pMgmt;
SSingleWorker *pWorker = &pMgmt->monitorWorker;
dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name);
dTrace("msg:%p, will be put into vnode-monitor queue, worker:%s", pMsg, pWorker->name);
taosWriteQitem(pWorker->queue, pMsg);
return 0;
}
@ -350,9 +337,7 @@ static int32_t vmPutRpcMsgToQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc, EQueueT
SNodeMsg *pMsg = taosAllocateQitem(sizeof(SNodeMsg));
int32_t code = 0;
if (pMsg == NULL) {
code = -1;
} else {
if (pMsg != NULL) {
dTrace("msg:%p, is created, type:%s", pMsg, TMSG_INFO(pRpc->msgType));
pMsg->rpcMsg = *pRpc;
// if (pMsg->rpcMsg.handle != NULL) assert(pMsg->rpcMsg.refId != 0);
@ -377,7 +362,7 @@ static int32_t vmPutRpcMsgToQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc, EQueueT
dTrace("msg:%p, will be put into vnode-merge queue", pMsg);
taosWriteQitem(pVnode->pMergeQ, pMsg);
break;
case SYNC_QUEUE: // sync integration
case SYNC_QUEUE:
dTrace("msg:%p, will be put into vnode-sync queue", pMsg);
taosWriteQitem(pVnode->pSyncQ, pMsg);
break;
@ -387,6 +372,7 @@ static int32_t vmPutRpcMsgToQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc, EQueueT
break;
}
}
vmReleaseVnode(pMgmt, pVnode);
return code;
}
@ -395,6 +381,14 @@ int32_t vmPutMsgToWriteQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) {
return vmPutRpcMsgToQueue(pWrapper, pRpc, WRITE_QUEUE);
}
int32_t vmPutMsgToSyncQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) {
return vmPutRpcMsgToQueue(pWrapper, pRpc, SYNC_QUEUE);
}
int32_t vmPutMsgToApplyQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) {
return vmPutRpcMsgToQueue(pWrapper, pRpc, APPLY_QUEUE);
}
int32_t vmPutMsgToQueryQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) {
return vmPutRpcMsgToQueue(pWrapper, pRpc, QUERY_QUEUE);
}
@ -403,30 +397,15 @@ int32_t vmPutMsgToFetchQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) {
return vmPutRpcMsgToQueue(pWrapper, pRpc, FETCH_QUEUE);
}
int32_t vmPutMsgToApplyQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) {
return vmPutRpcMsgToQueue(pWrapper, pRpc, APPLY_QUEUE);
}
int32_t vmPutMsgToMergeQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) {
return vmPutRpcMsgToQueue(pWrapper, pRpc, MERGE_QUEUE);
}
// sync integration
int32_t vmPutMsgToSyncQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) {
return vmPutRpcMsgToQueue(pWrapper, pRpc, SYNC_QUEUE);
}
int32_t vmGetQueueSize(SMgmtWrapper *pWrapper, int32_t vgId, EQueueType qtype) {
int32_t size = -1;
SVnodeObj *pVnode = vmAcquireVnode(pWrapper->pMgmt, vgId);
if (pVnode != NULL) {
switch (qtype) {
case QUERY_QUEUE:
size = taosQueueSize(pVnode->pQueryQ);
break;
case FETCH_QUEUE:
size = taosQueueSize(pVnode->pFetchQ);
break;
case WRITE_QUEUE:
size = taosQueueSize(pVnode->pWriteQ);
break;
@ -436,6 +415,12 @@ int32_t vmGetQueueSize(SMgmtWrapper *pWrapper, int32_t vgId, EQueueType qtype) {
case APPLY_QUEUE:
size = taosQueueSize(pVnode->pApplyQ);
break;
case QUERY_QUEUE:
size = taosQueueSize(pVnode->pQueryQ);
break;
case FETCH_QUEUE:
size = taosQueueSize(pVnode->pFetchQ);
break;
case MERGE_QUEUE:
size = taosQueueSize(pVnode->pMergeQ);
break;
@ -449,14 +434,14 @@ int32_t vmGetQueueSize(SMgmtWrapper *pWrapper, int32_t vgId, EQueueType qtype) {
int32_t vmAllocQueue(SVnodesMgmt *pMgmt, SVnodeObj *pVnode) {
pVnode->pWriteQ = tWWorkerAllocQueue(&pMgmt->writePool, pVnode, (FItems)vmProcessWriteQueue);
pVnode->pApplyQ = tWWorkerAllocQueue(&pMgmt->writePool, pVnode, (FItems)vmProcessApplyQueue);
pVnode->pMergeQ = tWWorkerAllocQueue(&pMgmt->mergePool, pVnode, (FItems)vmProcessMergeQueue);
pVnode->pSyncQ = tWWorkerAllocQueue(&pMgmt->syncPool, pVnode, (FItems)vmProcessSyncQueue);
pVnode->pFetchQ = tQWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItem)vmProcessFetchQueue);
pVnode->pApplyQ = tWWorkerAllocQueue(&pMgmt->writePool, pVnode, (FItems)vmProcessApplyQueue);
pVnode->pQueryQ = tQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue);
pVnode->pFetchQ = tQWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItem)vmProcessFetchQueue);
pVnode->pMergeQ = tWWorkerAllocQueue(&pMgmt->mergePool, pVnode, (FItems)vmProcessMergeQueue);
if (pVnode->pApplyQ == NULL || pVnode->pWriteQ == NULL || pVnode->pSyncQ == NULL || pVnode->pFetchQ == NULL ||
pVnode->pQueryQ == NULL || pVnode->pMergeQ == NULL) {
if (pVnode->pWriteQ == NULL || pVnode->pSyncQ == NULL || pVnode->pApplyQ == NULL || pVnode->pQueryQ == NULL ||
pVnode->pFetchQ == NULL || pVnode->pMergeQ == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
@ -466,17 +451,17 @@ int32_t vmAllocQueue(SVnodesMgmt *pMgmt, SVnodeObj *pVnode) {
}
void vmFreeQueue(SVnodesMgmt *pMgmt, SVnodeObj *pVnode) {
tWWorkerFreeQueue(&pMgmt->writePool, pVnode->pWriteQ);
tWWorkerFreeQueue(&pMgmt->syncPool, pVnode->pSyncQ);
tWWorkerFreeQueue(&pMgmt->writePool, pVnode->pApplyQ);
tQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ);
tQWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
tWWorkerFreeQueue(&pMgmt->writePool, pVnode->pWriteQ);
tWWorkerFreeQueue(&pMgmt->writePool, pVnode->pApplyQ);
tWWorkerFreeQueue(&pMgmt->mergePool, pVnode->pMergeQ);
tWWorkerFreeQueue(&pMgmt->syncPool, pVnode->pSyncQ);
pVnode->pWriteQ = NULL;
pVnode->pApplyQ = NULL;
pVnode->pSyncQ = NULL;
pVnode->pFetchQ = NULL;
pVnode->pApplyQ = NULL;
pVnode->pQueryQ = NULL;
pVnode->pFetchQ = NULL;
pVnode->pMergeQ = NULL;
dDebug("vgId:%d, vnode queue is freed", pVnode->vgId);
}
@ -499,17 +484,23 @@ int32_t vmStartWorker(SVnodesMgmt *pMgmt) {
pWPool->max = tsNumOfVnodeWriteThreads;
if (tWWorkerInit(pWPool) != 0) return -1;
pWPool = &pMgmt->syncPool;
pWPool->name = "vnode-sync";
pWPool->max = tsNumOfVnodeSyncThreads;
if (tWWorkerInit(pWPool) != 0) return -1;
SWWorkerPool *pSPool = &pMgmt->syncPool;
pSPool->name = "vnode-sync";
pSPool->max = tsNumOfVnodeSyncThreads;
if (tWWorkerInit(pSPool) != 0) return -1;
pWPool = &pMgmt->mergePool;
pWPool->name = "vnode-merge";
pWPool->max = tsNumOfVnodeMergeThreads;
if (tWWorkerInit(pWPool) != 0) return -1;
SWWorkerPool *pMPool = &pMgmt->mergePool;
pMPool->name = "vnode-merge";
pMPool->max = tsNumOfVnodeMergeThreads;
if (tWWorkerInit(pMPool) != 0) return -1;
SSingleWorkerCfg cfg = {.min = 1, .max = 1, .name = "vnode-mgmt", .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt};
SSingleWorkerCfg cfg = {
.min = 1,
.max = 1,
.name = "vnode-mgmt",
.fp = (FItem)vmProcessMgmtMonitorQueue,
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->mgmtWorker, &cfg) != 0) {
dError("failed to start vnode-mgmt worker since %s", terrstr());
return -1;
@ -517,7 +508,12 @@ int32_t vmStartWorker(SVnodesMgmt *pMgmt) {
if (tsMultiProcess) {
SSingleWorkerCfg mCfg = {
.min = 1, .max = 1, .name = "vnode-monitor", .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt};
.min = 1,
.max = 1,
.name = "vnode-monitor",
.fp = (FItem)vmProcessMgmtMonitorQueue,
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) {
dError("failed to start mnode vnode-monitor worker since %s", terrstr());
return -1;
@ -531,10 +527,10 @@ int32_t vmStartWorker(SVnodesMgmt *pMgmt) {
void vmStopWorker(SVnodesMgmt *pMgmt) {
tSingleWorkerCleanup(&pMgmt->monitorWorker);
tSingleWorkerCleanup(&pMgmt->mgmtWorker);
tQWorkerCleanup(&pMgmt->fetchPool);
tQWorkerCleanup(&pMgmt->queryPool);
tWWorkerCleanup(&pMgmt->writePool);
tWWorkerCleanup(&pMgmt->syncPool);
tQWorkerCleanup(&pMgmt->queryPool);
tQWorkerCleanup(&pMgmt->fetchPool);
tWWorkerCleanup(&pMgmt->mergePool);
dDebug("vnode workers are closed");
}

View File

@ -335,7 +335,6 @@ static int32_t mndProcessStatusReq(SNodeMsg *pReq) {
}
bool roleChanged = false;
for (int32_t vg = 0; vg < pVgroup->replica; ++vg) {
// sync integration
if (pVgroup->vnodeGid[vg].dnodeId == statusReq.dnodeId) {
if (pVgroup->vnodeGid[vg].role != pVload->syncState) {
roleChanged = true;

View File

@ -1405,15 +1405,18 @@ static int32_t mndRetrieveTrans(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *pB
colDataAppend(pColInfo, numOfRows, (const char *)dbname, false);
char type[TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(dbname, mndTransType(pTrans->type), pShow->pMeta->pSchemas[cols].bytes);
STR_WITH_MAXSIZE_TO_VARSTR(type, mndTransType(pTrans->type), pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)type, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->failedTimes, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->lastExecTime, false);
char lastError[TSDB_TRANS_ERROR_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(dbname, pTrans->lastError, pShow->pMeta->pSchemas[cols].bytes);
STR_WITH_MAXSIZE_TO_VARSTR(lastError, pTrans->lastError, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)lastError, false);

View File

@ -65,7 +65,7 @@ static void mndPullupTrans(void *param, void *tmrId) {
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg);
}
taosTmrReset(mndPullupTrans, tsTransPullupMs, pMnode, pMnode->timer, &pMnode->transTimer);
taosTmrReset(mndPullupTrans, tsTransPullupInterval * 1000, pMnode, pMnode->timer, &pMnode->transTimer);
}
static void mndCalMqRebalance(void *param, void *tmrId) {
@ -81,7 +81,7 @@ static void mndCalMqRebalance(void *param, void *tmrId) {
tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg);
}
taosTmrReset(mndCalMqRebalance, tsMaRebalanceMs, pMnode, pMnode->timer, &pMnode->mqTimer);
taosTmrReset(mndCalMqRebalance, tsMqRebalanceInterval * 1000, pMnode, pMnode->timer, &pMnode->mqTimer);
}
static void mndPullupTelem(void *param, void *tmrId) {
@ -103,12 +103,12 @@ static int32_t mndInitTimer(SMnode *pMnode) {
return -1;
}
if (taosTmrReset(mndPullupTrans, tsTransPullupMs, pMnode, pMnode->timer, &pMnode->transTimer)) {
if (taosTmrReset(mndPullupTrans, tsTransPullupInterval * 1000, pMnode, pMnode->timer, &pMnode->transTimer)) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
if (taosTmrReset(mndCalMqRebalance, tsMaRebalanceMs, pMnode, pMnode->timer, &pMnode->mqTimer)) {
if (taosTmrReset(mndCalMqRebalance, tsMqRebalanceInterval * 1000, pMnode, pMnode->timer, &pMnode->mqTimer)) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}

View File

@ -58,7 +58,7 @@ class MndTestTrans2 : public ::testing::Test {
strcpy(opt.replicas[0].fqdn, "localhost");
opt.msgCb = msgCb;
tsTransPullupMs = 1000;
tsTransPullupInterval = 1;
const char *mnodepath = "/tmp/mnode_test_trans";
taosRemoveDir(mnodepath);

View File

@ -310,7 +310,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
code = taosFsyncFile(pFile);
if (code != 0) {
code = TAOS_SYSTEM_ERROR(errno);
mError("failed to write file:%s since %s", tmpfile, tstrerror(code));
mError("failed to sync file:%s since %s", tmpfile, tstrerror(code));
}
}

View File

@ -137,28 +137,18 @@ struct STsdbCfg {
int8_t update;
int8_t compression;
int8_t slLevel;
int32_t days;
int32_t minRows;
int32_t maxRows;
int32_t keep0;
int32_t keep1;
int32_t keep2;
// TODO: save to tsdb cfg file
int8_t type; // ETsdbType
int32_t days; // just for save config, don't use in tsdbRead/tsdbCommit/..., and use STsdbKeepCfg in STsdb instead
int32_t keep0; // just for save config, don't use in tsdbRead/tsdbCommit/..., and use STsdbKeepCfg in STsdb instead
int32_t keep1; // just for save config, don't use in tsdbRead/tsdbCommit/..., and use STsdbKeepCfg in STsdb instead
int32_t keep2; // just for save config, don't use in tsdbRead/tsdbCommit/..., and use STsdbKeepCfg in STsdb instead
SRetention retentions[TSDB_RETENTION_MAX];
};
typedef enum {
TSDB_TYPE_TSDB = 0, // TSDB
TSDB_TYPE_TSMA = 1, // TSMA
TSDB_TYPE_RSMA_L0 = 2, // RSMA Level 0
TSDB_TYPE_RSMA_L1 = 3, // RSMA Level 1
TSDB_TYPE_RSMA_L2 = 4, // RSMA Level 2
} ETsdbType;
struct SVnodeCfg {
int32_t vgId;
char dbname[TSDB_DB_NAME_LEN];
char dbname[TSDB_DB_FNAME_LEN];
uint64_t dbId;
int32_t szPage;
int32_t szCache;
@ -167,7 +157,7 @@ struct SVnodeCfg {
bool isWeak;
STsdbCfg tsdbCfg;
SWalCfg walCfg;
SSyncCfg syncCfg; // sync integration
SSyncCfg syncCfg;
uint32_t hashBegin;
uint32_t hashEnd;
int8_t hashMethod;

View File

@ -70,9 +70,10 @@ struct SSmaEnvs {
struct STsdb {
char *path;
SVnode *pVnode;
TdThreadMutex mutex;
bool repoLocked;
int8_t level; // retention level
TdThreadMutex mutex;
STsdbKeepCfg keepCfg;
STsdbMemTable *mem;
STsdbMemTable *imem;
SRtn rtn;
@ -185,6 +186,7 @@ struct STsdbFS {
#define REPO_ID(r) TD_VID((r)->pVnode)
#define REPO_CFG(r) (&(r)->pVnode->config.tsdbCfg)
#define REPO_KEEP_CFG(r) (&(r)->keepCfg)
#define REPO_LEVEL(r) ((r)->level)
#define REPO_FS(r) ((r)->fs)
#define REPO_META(r) ((r)->pVnode->pMeta)
@ -830,7 +832,7 @@ typedef struct {
#define TSDB_FS_ITER_FORWARD TSDB_ORDER_ASC
#define TSDB_FS_ITER_BACKWARD TSDB_ORDER_DESC
STsdbFS *tsdbNewFS(const STsdbCfg *pCfg);
STsdbFS *tsdbNewFS(const STsdbKeepCfg *pCfg);
void *tsdbFreeFS(STsdbFS *pfs);
int tsdbOpenFS(STsdb *pRepo);
void tsdbCloseFS(STsdb *pRepo);

View File

@ -59,6 +59,7 @@ typedef struct SQWorker SQHandle;
#define VNODE_TQ_DIR "tq"
#define VNODE_WAL_DIR "wal"
#define VNODE_TSMA_DIR "tsma"
#define VNODE_RSMA0_DIR "tsdb"
#define VNODE_RSMA1_DIR "rsma1"
#define VNODE_RSMA2_DIR "rsma2"
@ -154,6 +155,22 @@ struct SVnodeInfo {
SVState state;
};
typedef enum {
TSDB_TYPE_TSDB = 0, // TSDB
TSDB_TYPE_TSMA = 1, // TSMA
TSDB_TYPE_RSMA_L0 = 2, // RSMA Level 0
TSDB_TYPE_RSMA_L1 = 3, // RSMA Level 1
TSDB_TYPE_RSMA_L2 = 4, // RSMA Level 2
} ETsdbType;
typedef struct {
int8_t precision; // precision always be used with below keep cfgs
int32_t days;
int32_t keep0;
int32_t keep1;
int32_t keep2;
} STsdbKeepCfg;
struct SVnode {
char* path;
SVnodeCfg config;
@ -176,10 +193,11 @@ struct SVnode {
SQHandle* pQuery;
};
#define VND_TSDB(vnd) ((vnd)->pTsdb)
#define VND_RSMA0(vnd) ((vnd)->pTsdb)
#define VND_RSMA1(vnd) ((vnd)->pRSma1)
#define VND_RSMA2(vnd) ((vnd)->pRSma2)
#define VND_TSDB(vnd) ((vnd)->pTsdb)
#define VND_RSMA0(vnd) ((vnd)->pTsdb)
#define VND_RSMA1(vnd) ((vnd)->pRSma1)
#define VND_RSMA2(vnd) ((vnd)->pRSma2)
#define VND_RETENTIONS(vnd) (&(vnd)->config.tsdbCfg.retentions)
struct STbUidStore {
tb_uid_t suid;

View File

@ -210,7 +210,7 @@ int tsdbCommit(STsdb *pRepo) {
}
void tsdbGetRtnSnap(STsdb *pRepo, SRtn *pRtn) {
STsdbCfg *pCfg = REPO_CFG(pRepo);
STsdbKeepCfg *pCfg = REPO_KEEP_CFG(pRepo);
TSKEY minKey, midKey, maxKey, now;
now = taosGetTimestamp(pCfg->precision);
@ -304,9 +304,9 @@ static void tsdbSeekCommitIter(SCommitH *pCommith, TSKEY key) {
}
static int tsdbNextCommitFid(SCommitH *pCommith) {
STsdb *pRepo = TSDB_COMMIT_REPO(pCommith);
STsdbCfg *pCfg = REPO_CFG(pRepo);
int fid = TSDB_IVLD_FID;
STsdb *pRepo = TSDB_COMMIT_REPO(pCommith);
STsdbKeepCfg *pCfg = REPO_KEEP_CFG(pRepo);
int fid = TSDB_IVLD_FID;
for (int i = 0; i < pCommith->niters; i++) {
SCommitIter *pIter = pCommith->iters + i;
@ -337,8 +337,8 @@ static void tsdbDestroyCommitH(SCommitH *pCommith) {
}
static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) {
STsdb *pRepo = TSDB_COMMIT_REPO(pCommith);
STsdbCfg *pCfg = REPO_CFG(pRepo);
STsdb *pRepo = TSDB_COMMIT_REPO(pCommith);
STsdbKeepCfg *pCfg = REPO_KEEP_CFG(pRepo);
ASSERT(pSet == NULL || pSet->fid == fid);

View File

@ -191,7 +191,7 @@ static int tsdbAddDFileSetToStatus(SFSStatus *pStatus, const SDFileSet *pSet) {
}
// ================== STsdbFS
STsdbFS *tsdbNewFS(const STsdbCfg *pCfg) {
STsdbFS *tsdbNewFS(const STsdbKeepCfg *pCfg) {
int keep = pCfg->keep2;
int days = pCfg->days;
int maxFSet = TSDB_MAX_FSETS(keep, days);

View File

@ -15,7 +15,30 @@
#include "tsdb.h"
static int tsdbOpenImpl(SVnode *pVnode, int8_t type, STsdb **ppTsdb, const char *dir, int8_t level);
#define TSDB_OPEN_RSMA_IMPL(v, l) \
do { \
SRetention *r = VND_RETENTIONS(v)[0]; \
if (RETENTION_VALID(r)) { \
return tsdbOpenImpl((v), type, &VND_RSMA##l(v), VNODE_RSMA##l##_DIR, TSDB_RETENTION_L##l); \
} \
} while (0)
#define TSDB_SET_KEEP_CFG(l) \
do { \
SRetention *r = &pCfg->retentions[l]; \
pKeepCfg->keep2 = convertTimeFromPrecisionToUnit(r->keep, pCfg->precision, TIME_UNIT_MINUTE); \
pKeepCfg->keep0 = pKeepCfg->keep2; \
pKeepCfg->keep1 = pKeepCfg->keep2; \
pKeepCfg->days = tsdbEvalDays(r, pCfg->precision); \
} while (0)
#define RETENTION_DAYS_SPLIT_RATIO 10
#define RETENTION_DAYS_SPLIT_MIN 1
#define RETENTION_DAYS_SPLIT_MAX 30
static int32_t tsdbSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int8_t type);
static int32_t tsdbEvalDays(SRetention *r, int8_t precision);
static int32_t tsdbOpenImpl(SVnode *pVnode, int8_t type, STsdb **ppTsdb, const char *dir, int8_t level);
int tsdbOpen(SVnode *pVnode, int8_t type) {
switch (type) {
@ -25,11 +48,63 @@ int tsdbOpen(SVnode *pVnode, int8_t type) {
ASSERT(0);
break;
case TSDB_TYPE_RSMA_L0:
return tsdbOpenImpl(pVnode, type, &VND_RSMA0(pVnode), VNODE_TSDB_DIR, TSDB_RETENTION_L0);
TSDB_OPEN_RSMA_IMPL(pVnode, 0);
break;
case TSDB_TYPE_RSMA_L1:
return tsdbOpenImpl(pVnode, type, &VND_RSMA1(pVnode), VNODE_RSMA1_DIR, TSDB_RETENTION_L1);
TSDB_OPEN_RSMA_IMPL(pVnode, 1);
break;
case TSDB_TYPE_RSMA_L2:
return tsdbOpenImpl(pVnode, type, &VND_RSMA2(pVnode), VNODE_RSMA2_DIR, TSDB_RETENTION_L2);
TSDB_OPEN_RSMA_IMPL(pVnode, 2);
break;
default:
ASSERT(0);
break;
}
return 0;
}
static int32_t tsdbEvalDays(SRetention *r, int8_t precision) {
int32_t keepDays = convertTimeFromPrecisionToUnit(r->keep, precision, TIME_UNIT_DAY);
int32_t freqDays = convertTimeFromPrecisionToUnit(r->freq, precision, TIME_UNIT_DAY);
int32_t days = keepDays / RETENTION_DAYS_SPLIT_RATIO;
if (days <= RETENTION_DAYS_SPLIT_MIN) {
days = RETENTION_DAYS_SPLIT_MIN;
if (days < freqDays) {
days = freqDays + 1;
}
} else {
if (days > RETENTION_DAYS_SPLIT_MAX) {
days = RETENTION_DAYS_SPLIT_MAX;
}
if (days < freqDays) {
days = freqDays + 1;
}
}
return days * 1440;
}
static int32_t tsdbSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int8_t type) {
pKeepCfg->precision = pCfg->precision;
switch (type) {
case TSDB_TYPE_TSDB:
pKeepCfg->days = pCfg->days;
pKeepCfg->keep0 = pCfg->keep0;
pKeepCfg->keep1 = pCfg->keep1;
pKeepCfg->keep2 = pCfg->keep2;
break;
case TSDB_TYPE_TSMA:
ASSERT(0);
break;
case TSDB_TYPE_RSMA_L0:
TSDB_SET_KEEP_CFG(0);
break;
case TSDB_TYPE_RSMA_L1:
TSDB_SET_KEEP_CFG(1);
break;
case TSDB_TYPE_RSMA_L2:
TSDB_SET_KEEP_CFG(2);
break;
default:
ASSERT(0);
break;
@ -38,16 +113,16 @@ int tsdbOpen(SVnode *pVnode, int8_t type) {
}
/**
* @brief
*
* @param pVnode
* @param type
* @param ppTsdb
* @param dir
* @brief
*
* @param pVnode
* @param type
* @param ppTsdb
* @param dir
* @param level retention level
* @return int
* @return int
*/
int tsdbOpenImpl(SVnode *pVnode, int8_t type, STsdb **ppTsdb, const char *dir, int8_t level) {
int32_t tsdbOpenImpl(SVnode *pVnode, int8_t type, STsdb **ppTsdb, const char *dir, int8_t level) {
STsdb *pTsdb = NULL;
int slen = 0;
@ -62,13 +137,13 @@ int tsdbOpenImpl(SVnode *pVnode, int8_t type, STsdb **ppTsdb, const char *dir, i
}
pTsdb->path = (char *)&pTsdb[1];
sprintf(pTsdb->path, "%s%s%s%s%s", tfsGetPrimaryPath(pVnode->pTfs), TD_DIRSEP, pVnode->path, TD_DIRSEP,
dir);
sprintf(pTsdb->path, "%s%s%s%s%s", tfsGetPrimaryPath(pVnode->pTfs), TD_DIRSEP, pVnode->path, TD_DIRSEP, dir);
pTsdb->pVnode = pVnode;
pTsdb->level = level;
pTsdb->repoLocked = false;
taosThreadMutexInit(&pTsdb->mutex, NULL);
pTsdb->fs = tsdbNewFS(REPO_CFG(pTsdb));
tsdbSetKeepCfg(REPO_KEEP_CFG(pTsdb), REPO_CFG(pTsdb), type);
pTsdb->fs = tsdbNewFS(REPO_KEEP_CFG(pTsdb));
// create dir (TODO: use tfsMkdir)
taosMkDir(pTsdb->path);

View File

@ -320,7 +320,7 @@ static bool emptyQueryTimewindow(STsdbReadHandle* pTsdbReadHandle) {
// Update the query time window according to the data time to live(TTL) information, in order to avoid to return
// the expired data to client, even it is queried already.
static int64_t getEarliestValidTimestamp(STsdb* pTsdb) {
STsdbCfg* pCfg = REPO_CFG(pTsdb);
STsdbKeepCfg* pCfg = REPO_KEEP_CFG(pTsdb);
int64_t now = taosGetTimestamp(pCfg->precision);
return now - (tsTickPerDay[pCfg->precision] * pCfg->keep2) + 1; // needs to add one tick
@ -879,14 +879,14 @@ static TSKEY extractFirstTraverseKey(STableCheckInfo* pCheckInfo, int32_t order,
}
}
static STSRow* getSRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update, STSRow** extraRow, TDRowVerT maxVer) {
static STSRow* getSRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update, STSRow** extraRow,
TDRowVerT maxVer) {
STSRow *rmem = NULL, *rimem = NULL;
if (pCheckInfo->iter) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
if (node != NULL) {
rmem = (STSRow*)SL_GET_NODE_DATA(node);
#if 0 // TODO: skiplist refactor
#if 0 // TODO: skiplist refactor
if (TD_ROW_VER(rmem) > maxVer) {
rmem = NULL;
}
@ -898,7 +898,7 @@ static STSRow* getSRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter);
if (node != NULL) {
rimem = (STSRow*)SL_GET_NODE_DATA(node);
#if 0 // TODO: skiplist refactor
#if 0 // TODO: skiplist refactor
if (TD_ROW_VER(rimem) > maxVer) {
rimem = NULL;
}
@ -1677,7 +1677,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa
colIdOfRow2 = tdKvRowColIdAt(row2, k);
}
if (colIdOfRow1 < colIdOfRow2) { // the most probability
if (colIdOfRow1 < colIdOfRow2) { // the most probability
if (colIdOfRow1 < pColInfo->info.colId) {
++j;
continue;
@ -1720,7 +1720,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa
++(*curRow);
}
++nResult;
} else if (update){
} else if (update) {
mergeOption = 2;
} else {
mergeOption = 0;
@ -1741,7 +1741,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa
++(*curRow);
}
++nResult;
} else if(update) {
} else if (update) {
mergeOption = 2;
} else {
mergeOption = 0;
@ -1985,7 +1985,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
return;
} else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) {
SSkipListNode* node = NULL;
TSKEY lastRowKey = TSKEY_INITIAL_VAL;
TSKEY lastKeyAppend = TSKEY_INITIAL_VAL;
do {
STSRow* row2 = NULL;
@ -2018,9 +2018,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
rv2 = TD_ROW_SVER(row2);
}
numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastRowKey);
// numOfRows += 1;
numOfRows +=
mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
}
@ -2028,7 +2028,6 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
cur->win.ekey = key;
cur->lastKey = key + step;
cur->mixBlock = true;
moveToNextRowInMem(pCheckInfo);
} else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it
#if 0
@ -2064,7 +2063,11 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
}
#endif
if (TD_SUPPORT_UPDATE(pCfg->update)) {
doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos);
if (lastKeyAppend != key) {
lastKeyAppend = key;
++curRow;
}
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos);
if (rv1 != TD_ROW_SVER(row1)) {
// pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1));
@ -2074,10 +2077,10 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
// pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2));
rv2 = TD_ROW_SVER(row2);
}
numOfRows +=
mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastRowKey);
// ++numOfRows;
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
}
@ -2118,11 +2121,19 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
int32_t qstart = 0, qend = 0;
getQualifiedRowsPos(pTsdbReadHandle, pos, end, numOfRows, &qstart, &qend);
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, numOfRows, qstart, qend);
if ((lastKeyAppend != TSKEY_INITIAL_VAL) &&
(lastKeyAppend != (ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? tsArray[qstart] : tsArray[qend]))) {
++curRow;
}
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, qstart, qend);
pos += (qend - qstart + 1) * step;
if (numOfRows > 0) {
curRow = numOfRows - 1;
}
cur->win.ekey = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? tsArray[qend] : tsArray[qstart];
cur->lastKey = cur->win.ekey + step;
lastKeyAppend = cur->win.ekey;
}
} while (numOfRows < pTsdbReadHandle->outputCapacity);
@ -2425,8 +2436,8 @@ static int32_t getFirstFileDataBlock(STsdbReadHandle* pTsdbReadHandle, bool* exi
int32_t numOfBlocks = 0;
int32_t numOfTables = (int32_t)taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo);
STsdbCfg* pCfg = REPO_CFG(pTsdbReadHandle->pTsdb);
STimeWindow win = TSWINDOW_INITIALIZER;
STsdbKeepCfg* pCfg = REPO_KEEP_CFG(pTsdbReadHandle->pTsdb);
STimeWindow win = TSWINDOW_INITIALIZER;
while (true) {
tsdbRLockFS(REPO_FS(pTsdbReadHandle->pTsdb));
@ -2531,8 +2542,8 @@ int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT* queryHandle, STableBlockDistInfo*
// find the start data block in file
pTsdbReadHandle->locateStart = true;
STsdbCfg* pCfg = REPO_CFG(pTsdbReadHandle->pTsdb);
int32_t fid = getFileIdFromKey(pTsdbReadHandle->window.skey, pCfg->days, pCfg->precision);
STsdbKeepCfg* pCfg = REPO_KEEP_CFG(pTsdbReadHandle->pTsdb);
int32_t fid = getFileIdFromKey(pTsdbReadHandle->window.skey, pCfg->days, pCfg->precision);
tsdbRLockFS(pFileHandle);
tsdbFSIterInit(&pTsdbReadHandle->fileIter, pFileHandle, pTsdbReadHandle->order);
@ -2632,8 +2643,8 @@ static int32_t getDataBlocksInFiles(STsdbReadHandle* pTsdbReadHandle, bool* exis
// find the start data block in file
if (!pTsdbReadHandle->locateStart) {
pTsdbReadHandle->locateStart = true;
STsdbCfg* pCfg = REPO_CFG(pTsdbReadHandle->pTsdb);
int32_t fid = getFileIdFromKey(pTsdbReadHandle->window.skey, pCfg->days, pCfg->precision);
STsdbKeepCfg* pCfg = REPO_KEEP_CFG(pTsdbReadHandle->pTsdb);
int32_t fid = getFileIdFromKey(pTsdbReadHandle->window.skey, pCfg->days, pCfg->precision);
tsdbRLockFS(pFileHandle);
tsdbFSIterInit(&pTsdbReadHandle->fileIter, pFileHandle, pTsdbReadHandle->order);
@ -2732,7 +2743,6 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
STSchema* pSchema = NULL;
TSKEY lastRowKey = TSKEY_INITIAL_VAL;
do {
STSRow* row = getSRowInTableMem(pCheckInfo, pTsdbReadHandle->order, pCfg->update, NULL, TD_VER_MAX);
if (row == NULL) {
@ -2757,8 +2767,8 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
pSchema = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), pCheckInfo->tableId, 0);
rv = TD_ROW_SVER(row);
}
numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, maxRowsToRead, &curRows, row, NULL, numOfCols, pCheckInfo->tableId, pSchema,
NULL, pCfg->update, &lastRowKey);
numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, maxRowsToRead, &curRows, row, NULL, numOfCols, pCheckInfo->tableId,
pSchema, NULL, pCfg->update, &lastRowKey);
if (numOfRows >= maxRowsToRead) {
moveToNextRowInMem(pCheckInfo);
@ -2767,7 +2777,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
} while (moveToNextRowInMem(pCheckInfo));
taosMemoryFreeClear(pSchema); // free the STSChema
taosMemoryFreeClear(pSchema); // free the STSChema
assert(numOfRows <= maxRowsToRead);
@ -2895,8 +2905,8 @@ static bool loadCachedLastRow(STsdbReadHandle* pTsdbReadHandle) {
// if (ret != TSDB_CODE_SUCCESS) {
// return false;
// }
mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, pRow, NULL, numOfCols, pCheckInfo->tableId,
NULL, NULL, true, &lastRowKey);
mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, pRow, NULL, numOfCols,
pCheckInfo->tableId, NULL, NULL, true, &lastRowKey);
taosMemoryFreeClear(pRow);
// update the last key value
@ -3465,7 +3475,7 @@ void tsdbRetrieveDataBlockInfo(tsdbReaderT* pTsdbReadHandle, SDataBlockInfo* pDa
pDataBlockInfo->rows = cur->rows;
pDataBlockInfo->window = cur->win;
// ASSERT(pDataBlockInfo->numOfCols >= (int32_t)(QH_GET_NUM_OF_COLS(pHandle));
// ASSERT(pDataBlockInfo->numOfCols >= (int32_t)(QH_GET_NUM_OF_COLS(pHandle));
}
/*

View File

@ -1013,8 +1013,8 @@ static int32_t tsdbSetTSmaDataFile(STSmaWriteH *pSmaH, int64_t indexUid, int32_t
* @return int32_t
*/
static int32_t tsdbGetTSmaDays(STsdb *pTsdb, int64_t interval, int32_t storageLevel) {
STsdbCfg *pCfg = REPO_CFG(pTsdb);
int32_t daysPerFile = pCfg->days;
STsdbKeepCfg *pCfg = REPO_KEEP_CFG(pTsdb);
int32_t daysPerFile = pCfg->days;
if (storageLevel == SMA_STORAGE_LEVEL_TSDB) {
int32_t days = SMA_STORAGE_TSDB_TIMES * (interval / tsTickPerDay[pCfg->precision]);

View File

@ -60,7 +60,7 @@ static int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq *pMsg) {
SSubmitBlk *pBlock = NULL;
SSubmitBlkIter blkIter = {0};
STSRow *row = NULL;
STsdbCfg *pCfg = REPO_CFG(pTsdb);
STsdbKeepCfg *pCfg = REPO_KEEP_CFG(pTsdb);
TSKEY now = taosGetTimestamp(pCfg->precision);
TSKEY minKey = now - tsTickPerDay[pCfg->precision] * pCfg->keep2;
TSKEY maxKey = now + tsTickPerDay[pCfg->precision] * pCfg->days;

View File

@ -97,7 +97,6 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
if (tjsonAddIntegerToObject(pJson, "hashEnd", pCfg->hashEnd) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "hashMethod", pCfg->hashMethod) < 0) return -1;
// sync integration
if (tjsonAddIntegerToObject(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1;
SJson *pNodeInfoArr = tjsonCreateArray();
@ -157,7 +156,6 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
if (tjsonGetNumberValue(pJson, "hashEnd", pCfg->hashEnd) < 0) return -1;
if (tjsonGetNumberValue(pJson, "hashMethod", pCfg->hashMethod) < 0) return -1;
// sync integration
if (tjsonGetNumberValue(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1;
if (tjsonGetNumberValue(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1;

View File

@ -124,8 +124,7 @@ _exit:
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) {
pLoad->vgId = TD_VID(pVnode);
// pLoad->syncState = TAOS_SYNC_STATE_LEADER;
pLoad->syncState = syncGetMyRole(pVnode->sync); // sync integration
pLoad->syncState = syncGetMyRole(pVnode->sync);
pLoad->numOfTables = metaGetTbNum(pVnode->pMeta);
pLoad->numOfTimeSeries = 400;
pLoad->totalStorage = 300;

View File

@ -198,7 +198,6 @@ void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
tsdbInsertTSmaData(((SVnode *)pVnode)->pTsdb, smaId, (const char *)data);
}
// sync integration
int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
if (syncEnvIsStart()) {
SSyncNode *pSyncNode = syncNodeAcquire(pVnode->sync);

View File

@ -14,12 +14,6 @@
*/
#include "vnd.h"
// #include "sync.h"
// #include "syncTools.h"
// #include "tmsgcb.h"
// #include "vnodeInt.h"
// sync integration
int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
SSyncInfo syncInfo;

View File

@ -15,8 +15,9 @@
#ifndef TDENGINE_QUERYUTIL_H
#define TDENGINE_QUERYUTIL_H
#include "tcommon.h"
#include <libs/function/function.h>
#include "tbuffer.h"
#include "tcommon.h"
#include "tpagedbuf.h"
#define SET_RES_WINDOW_KEY(_k, _ori, _len, _uid) \
@ -56,9 +57,9 @@ typedef struct SResultRow {
bool endInterp; // the time window end timestamp has done the interpolation already.
bool closed; // this result status: closed or opened
uint32_t numOfRows; // number of rows of current time window
struct SResultRowEntryInfo* pEntryInfo; // For each result column, there is a resultInfo
STimeWindow win;
char *key; // start key of current result row
struct SResultRowEntryInfo pEntryInfo[]; // For each result column, there is a resultInfo
// char *key; // start key of current result row
} SResultRow;
typedef struct SResultRowPosition {

View File

@ -711,7 +711,7 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo*
SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResultBlock, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput, SSDataBlock* pResBlock, SArray* pColMatchInfo, STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo);
#if 0

View File

@ -157,8 +157,6 @@ void clearResultRow(STaskRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow) {
pResultRow->pageId = -1;
pResultRow->offset = -1;
pResultRow->closed = false;
taosMemoryFreeClear(pResultRow->key);
pResultRow->win = TSWINDOW_INITIALIZER;
}

View File

@ -388,6 +388,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// allocate a new buffer page
prepareResultListBuffer(pResultRowInfo, pTaskInfo->env);
if (pResult == NULL) {
ASSERT(pSup->resultRowSize > 0);
pResult = getNewResultRow_rv(pResultBuf, groupId, pSup->resultRowSize);
initResultRow(pResult);
@ -1152,7 +1153,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
pCtx->resDataInfo.interBufSize = env.calcMemSize;
} else if (pExpr->pExpr->nodeType == QUERY_NODE_COLUMN || pExpr->pExpr->nodeType == QUERY_NODE_OPERATOR ||
pExpr->pExpr->nodeType == QUERY_NODE_VALUE) {
// for simple column, the intermediate buffer needs to hold one element.
// for simple column, the result buffer needs to hold at least one element.
pCtx->resDataInfo.interBufSize = pFunct->resSchema.bytes;
}
@ -1872,7 +1873,7 @@ static void updateTableQueryInfoForReverseScan(STableQueryInfo* pTableQueryInfo)
}
void initResultRow(SResultRow* pResultRow) {
pResultRow->pEntryInfo = (struct SResultRowEntryInfo*)((char*)pResultRow + sizeof(SResultRow));
// pResultRow->pEntryInfo = (struct SResultRowEntryInfo*)((char*)pResultRow + sizeof(SResultRow));
}
/*
@ -1884,7 +1885,7 @@ void initResultRow(SResultRow* pResultRow) {
* offset[0] offset[1] offset[2]
*/
// TODO refactor: some function move away
void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t stage, SExecTaskInfo* pTaskInfo) {
void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t stage, int32_t numOfExprs, SExecTaskInfo* pTaskInfo) {
SqlFunctionCtx* pCtx = pInfo->pCtx;
SSDataBlock* pDataBlock = pInfo->pRes;
int32_t* rowCellInfoOffset = pInfo->rowCellInfoOffset;
@ -1897,6 +1898,7 @@ void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t
SResultRow* pRow = doSetResultOutBufByKey(pSup->pResultBuf, pResultRowInfo, (char*)&tid, sizeof(tid), true, groupId,
pTaskInfo, false, pSup);
ASSERT(pDataBlock->info.numOfCols == numOfExprs);
for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
struct SResultRowEntryInfo* pEntry = getResultCell(pRow, i, rowCellInfoOffset);
cleanupResultRowEntry(pEntry);
@ -3624,7 +3626,7 @@ SOperatorInfo* createSortedMergeOperatorInfo(SOperatorInfo** downstream, int32_t
goto _error;
}
setFunctionResultOutput(&pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, pTaskInfo);
setFunctionResultOutput(&pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, num, pTaskInfo);
code = initGroupCol(pExprInfo, num, pGroupInfo, pInfo);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
@ -4217,12 +4219,22 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t));
pAggSup->pResultRowHashTable = taosHashInit(10, hashFn, true, HASH_NO_LOCK);
if (pAggSup->keyBuf == NULL /*|| pAggSup->pResultRowArrayList == NULL || pAggSup->pResultRowListSet == NULL*/ ||
pAggSup->pResultRowHashTable == NULL) {
if (pAggSup->keyBuf == NULL || pAggSup->pResultRowHashTable == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, 4096, 4096 * 256, pKey, "/tmp/");
uint32_t defaultPgsz = 4096;
while(defaultPgsz < pAggSup->resultRowSize*4) {
defaultPgsz <<= 1u;
}
// at least four pages need to be in buffer
int32_t defaultBufsz = 4096 * 256;
if (defaultBufsz <= defaultPgsz) {
defaultBufsz = defaultPgsz * 4;
}
int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, "/tmp/");
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -4362,6 +4374,10 @@ void destroyBasicOperatorInfo(void* param, int32_t numOfOutput) {
doDestroyBasicInfo(pInfo, numOfOutput);
}
void destroyMergeJoinOperator(void* param, int32_t numOfOutput) {
SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*) param;
}
void destroyAggOperatorInfo(void* param, int32_t numOfOutput) {
SAggOperatorInfo* pInfo = (SAggOperatorInfo*)param;
doDestroyBasicInfo(&pInfo->binfo, numOfOutput);
@ -4425,7 +4441,7 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* p
initResultSizeInfo(pOperator, numOfRows);
initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str);
setFunctionResultOutput(&pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, pTaskInfo);
setFunctionResultOutput(&pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfCols, pTaskInfo);
pInfo->pPseudoColInfo = setRowTsColumnOutputInfo(pInfo->binfo.pCtx, numOfCols);
pOperator->name = "ProjectOperator";
@ -4938,7 +4954,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
SExprInfo* pExprInfo = createExprInfo(pJoinNode->pTargets, NULL, &num);
pOptr = createJoinOperatorInfo(ops, size, pExprInfo, num, pResBlock, pJoinNode->pOnConditions, pTaskInfo);
pOptr = createMergeJoinOperatorInfo(ops, size, pExprInfo, num, pResBlock, pJoinNode->pOnConditions, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_FILL == type) {
SFillPhysiNode* pFillNode = (SFillPhysiNode*)pPhyNode;
SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
@ -5510,7 +5526,7 @@ static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) {
return (pRes->info.rows > 0) ? pRes : NULL;
}
SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo,
SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo,
int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition,
SExecTaskInfo* pTaskInfo) {
SJoinOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SJoinOperatorInfo));
@ -5536,7 +5552,7 @@ SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOf
setJoinColumnInfo(&pInfo->rightCol, (SColumnNode*)pNode->pRight);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doMergeJoin, NULL, NULL, destroyBasicOperatorInfo, NULL, NULL, NULL);
createOperatorFpSet(operatorDummyOpenFn, doMergeJoin, NULL, NULL, destroyMergeJoinOperator, NULL, NULL, NULL);
int32_t code = appendDownstream(pOperator, pDownstream, numOfDownstream);
if (code != TSDB_CODE_SUCCESS) {
goto _error;

View File

@ -656,7 +656,7 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, SSDataBlock*
SArray* pColIds = taosArrayInit(4, sizeof(int16_t));
for (int32_t i = 0; i < numOfOutput; ++i) {
SColMatchInfo* id = taosArrayGet(pColList, i);
int16_t colId = id->colId;
int16_t colId = id->colId;
taosArrayPush(pColIds, &colId);
}

View File

@ -36,7 +36,7 @@ target_link_libraries(
PRIVATE os util common nodes function
)
add_library(udf1 MODULE test/udf1.c)
add_library(udf1 STATIC MODULE test/udf1.c)
target_include_directories(
udf1
PUBLIC
@ -50,7 +50,7 @@ target_include_directories(
target_link_libraries(
udf1 PUBLIC os)
add_library(udf2 MODULE test/udf2.c)
add_library(udf2 STATIC MODULE test/udf2.c)
target_include_directories(
udf2
PUBLIC

View File

@ -266,6 +266,20 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l
return TSDB_CODE_SUCCESS;
}
static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
int32_t paraLen = LIST_LENGTH(pFunc->pParameterList);
if (paraLen == 0 || paraLen > 2) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
SExprNode* p1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
if (!IS_NUMERIC_TYPE(p1->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
pFunc->node.resType = p1->resType;
return TSDB_CODE_SUCCESS;
}
static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
@ -617,7 +631,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.name = "diff",
.type = FUNCTION_TYPE_DIFF,
.classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateInOutNum,
.translateFunc = translateDiff,
.getEnvFunc = getDiffFuncEnv,
.initFunc = diffFunctionSetup,
.processFunc = diffFunction,

View File

@ -75,7 +75,7 @@ typedef struct SPercentileInfo {
typedef struct SDiffInfo {
bool hasPrev;
bool includeNull;
bool ignoreNegative;
bool ignoreNegative; // replace the ignore with case when
bool firstOutput;
union {
int64_t i64;
@ -1419,248 +1419,192 @@ bool diffFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
SDiffInfo* pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo);
pDiffInfo->hasPrev = false;
pDiffInfo->prev.i64 = 0;
pDiffInfo->ignoreNegative = false; // TODO set correct param
pDiffInfo->ignoreNegative = pCtx->param[1].param.i; // TODO set correct param
pDiffInfo->includeNull = false;
pDiffInfo->firstOutput = false;
return true;
}
static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) {
switch(type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
pDiffInfo->prev.i64 = *(int8_t*) pv; break;
case TSDB_DATA_TYPE_INT:
pDiffInfo->prev.i64 = *(int32_t*) pv; break;
case TSDB_DATA_TYPE_SMALLINT:
pDiffInfo->prev.i64 = *(int16_t*) pv; break;
case TSDB_DATA_TYPE_BIGINT:
pDiffInfo->prev.i64 = *(int64_t*) pv; break;
case TSDB_DATA_TYPE_FLOAT:
pDiffInfo->prev.d64 = *(float *) pv; break;
case TSDB_DATA_TYPE_DOUBLE:
pDiffInfo->prev.d64 = *(double*) pv; break;
default:
ASSERT(0);
}
}
static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SColumnInfoData* pOutput, int32_t pos, int32_t order) {
int32_t factor = (order == TSDB_ORDER_ASC)? 1:-1;
switch (type) {
case TSDB_DATA_TYPE_INT: {
int32_t v = *(int32_t*)pv;
int32_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null
if (delta < 0 && pDiffInfo->ignoreNegative) {
colDataSetNull_f(pOutput->nullbitmap, pos);
} else {
colDataAppendInt32(pOutput, pos, &delta);
}
pDiffInfo->prev.i64 = v;
break;
}
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: {
int8_t v = *(int8_t*)pv;
int8_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null
if (delta < 0 && pDiffInfo->ignoreNegative) {
colDataSetNull_f(pOutput->nullbitmap, pos);
} else {
colDataAppendInt8(pOutput, pos, &delta);
}
pDiffInfo->prev.i64 = v;
break;
}
case TSDB_DATA_TYPE_SMALLINT: {
int16_t v = *(int16_t*)pv;
int16_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null
if (delta < 0 && pDiffInfo->ignoreNegative) {
colDataSetNull_f(pOutput->nullbitmap, pos);
} else {
colDataAppendInt16(pOutput, pos, &delta);
}
pDiffInfo->prev.i64 = v;
break;
}
case TSDB_DATA_TYPE_BIGINT: {
int64_t v = *(int64_t*)pv;
int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null
if (delta < 0 && pDiffInfo->ignoreNegative) {
colDataSetNull_f(pOutput->nullbitmap, pos);
} else {
colDataAppendInt64(pOutput, pos, &delta);
}
pDiffInfo->prev.i64 = v;
break;
}
case TSDB_DATA_TYPE_FLOAT: {
float v = *(float*)pv;
float delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null
if (delta < 0 && pDiffInfo->ignoreNegative) {
colDataSetNull_f(pOutput->nullbitmap, pos);
} else {
colDataAppendFloat(pOutput, pos, &delta);
}
pDiffInfo->prev.d64 = v;
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
double v = *(double*)pv;
double delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null
if (delta < 0 && pDiffInfo->ignoreNegative) {
colDataSetNull_f(pOutput->nullbitmap, pos);
} else {
colDataAppendDouble(pOutput, pos, &delta);
}
pDiffInfo->prev.d64 = v;
break;
}
default:
ASSERT(0);
}
}
int32_t diffFunction(SqlFunctionCtx* pCtx) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SDiffInfo* pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo);
SInputColumnInfoData* pInput = &pCtx->input;
SColumnInfoData* pInputCol = pInput->pData[0];
bool isFirstBlock = (pDiffInfo->hasPrev == false);
int32_t numOfElems = 0;
SColumnInfoData* pInputCol = pInput->pData[0];
SColumnInfoData* pTsOutput = pCtx->pTsOutput;
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
int32_t numOfElems = 0;
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
int32_t startOffset = pCtx->offset;
switch (pInputCol->info.type) {
case TSDB_DATA_TYPE_INT: {
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
if (pCtx->order == TSDB_ORDER_ASC) {
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
int32_t pos = startOffset + (isFirstBlock ? (numOfElems - 1) : numOfElems);
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
if (pDiffInfo->includeNull) {
colDataSetNull_f(pOutput->nullbitmap, pos);
if (tsList != NULL) {
colDataAppendInt64(pTsOutput, pos, &tsList[i]);
}
numOfElems += 1;
}
continue;
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
if (pCtx->order == TSDB_ORDER_ASC) {
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
int32_t pos = startOffset + numOfElems;
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
if (pDiffInfo->includeNull) {
colDataSetNull_f(pOutput->nullbitmap, pos);
if (tsList != NULL) {
colDataAppendInt64(pTsOutput, pos, &tsList[i]);
}
int32_t v = *(int32_t*)colDataGetData(pInputCol, i);
if (pDiffInfo->hasPrev) {
int32_t delta = (int32_t)(v - pDiffInfo->prev.i64); // direct previous may be null
if (delta < 0 && pDiffInfo->ignoreNegative) {
colDataSetNull_f(pOutput->nullbitmap, pos);
} else {
colDataAppendInt32(pOutput, pos, &delta);
}
if (pTsOutput != NULL) {
colDataAppendInt64(pTsOutput, pos, &tsList[i]);
}
}
pDiffInfo->prev.i64 = v;
pDiffInfo->hasPrev = true;
numOfElems++;
numOfElems += 1;
}
continue;
}
char* pv = colDataGetData(pInputCol, i);
if (pDiffInfo->hasPrev) {
doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order);
if (pTsOutput != NULL) {
colDataAppendInt64(pTsOutput, pos, &tsList[i]);
}
numOfElems++;
} else {
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
int32_t v = *(int32_t*)colDataGetData(pInputCol, i);
int32_t pos = startOffset + numOfElems;
doSetPrevVal(pDiffInfo, pInputCol->info.type, pv);
}
// there is a row of previous data block to be handled in the first place.
if (pDiffInfo->hasPrev) {
int32_t delta = (int32_t)(pDiffInfo->prev.i64 - v); // direct previous may be null
if (delta < 0 && pDiffInfo->ignoreNegative) {
colDataSetNull_f(pOutput->nullbitmap, pos);
} else {
colDataAppendInt32(pOutput, pos, &delta);
}
pDiffInfo->hasPrev = true;
}
} else {
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
int32_t pos = startOffset + numOfElems;
if (pTsOutput != NULL) {
colDataAppendInt64(pTsOutput, pos, &pDiffInfo->prevTs);
}
pDiffInfo->hasPrev = false;
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
if (pDiffInfo->includeNull) {
colDataSetNull_f(pOutput->nullbitmap, pos);
if (tsList != NULL) {
colDataAppendInt64(pTsOutput, pos, &tsList[i]);
}
// it is not the last row of current block
if (i < pInput->numOfRows + pInput->startRowIndex - 1) {
int32_t next = *(int32_t*)colDataGetData(pInputCol, i + 1);
int32_t delta = v - next; // direct previous may be null
colDataAppendInt32(pOutput, pos, &delta);
if (pTsOutput != NULL) {
colDataAppendInt64(pTsOutput, pos, &tsList[i]);
}
} else {
pDiffInfo->prev.i64 = v;
if (pTsOutput != NULL) {
pDiffInfo->prevTs = tsList[i];
}
pDiffInfo->hasPrev = true;
}
numOfElems++;
numOfElems += 1;
}
continue;
}
break;
}
case TSDB_DATA_TYPE_BIGINT: {
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
continue;
char* pv = colDataGetData(pInputCol, i);
// there is a row of previous data block to be handled in the first place.
if (pDiffInfo->hasPrev) {
doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order);
if (pTsOutput != NULL) {
colDataAppendInt64(pTsOutput, pos, &pDiffInfo->prevTs);
}
int32_t v = 0;
if (pDiffInfo->hasPrev) {
v = *(int64_t*)colDataGetData(pInputCol, i);
int64_t delta = (int64_t)(v - pDiffInfo->prev.i64); // direct previous may be null
if (pDiffInfo->ignoreNegative) {
continue;
}
// *(pOutput++) = delta;
// *pTimestamp = (tsList != NULL)? tsList[i]:0;
//
// pOutput += 1;
// pTimestamp += 1;
}
pDiffInfo->prev.i64 = v;
pDiffInfo->hasPrev = true;
numOfElems++;
} else {
doSetPrevVal(pDiffInfo, pInputCol->info.type, pv);
}
break;
}
#if 0
case TSDB_DATA_TYPE_DOUBLE: {
double *pData = (double *)data;
double *pOutput = (double *)pCtx->pOutput;
for (; i < pCtx->size && i >= 0; i += step) {
if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) {
continue;
}
if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) {
continue;
}
if (pDiffInfo->hasPrev) { // initial value is not set yet
SET_DOUBLE_VAL(pOutput, pData[i] - pDiffInfo->d64Prev); // direct previous may be null
*pTimestamp = (tsList != NULL)? tsList[i]:0;
pOutput += 1;
pTimestamp += 1;
}
pDiffInfo->d64Prev = pData[i];
pDiffInfo->hasPrev = true;
numOfElems++;
pDiffInfo->hasPrev = true;
if (pTsOutput != NULL) {
pDiffInfo->prevTs = tsList[i];
}
break;
}
case TSDB_DATA_TYPE_FLOAT: {
float *pData = (float *)data;
float *pOutput = (float *)pCtx->pOutput;
for (; i < pCtx->size && i >= 0; i += step) {
if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) {
continue;
}
if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) {
continue;
}
if (pDiffInfo->hasPrev) { // initial value is not set yet
*pOutput = (float)(pData[i] - pDiffInfo->d64Prev); // direct previous may be null
*pTimestamp = (tsList != NULL)? tsList[i]:0;
pOutput += 1;
pTimestamp += 1;
}
pDiffInfo->d64Prev = pData[i];
pDiffInfo->hasPrev = true;
numOfElems++;
}
break;
}
case TSDB_DATA_TYPE_SMALLINT: {
int16_t *pData = (int16_t *)data;
int16_t *pOutput = (int16_t *)pCtx->pOutput;
for (; i < pCtx->size && i >= 0; i += step) {
if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) {
continue;
}
if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) {
continue;
}
if (pDiffInfo->hasPrev) { // initial value is not set yet
*pOutput = (int16_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null
*pTimestamp = (tsList != NULL)? tsList[i]:0;
pOutput += 1;
pTimestamp += 1;
}
pDiffInfo->i64Prev = pData[i];
pDiffInfo->hasPrev = true;
numOfElems++;
}
break;
}
case TSDB_DATA_TYPE_TINYINT: {
int8_t *pData = (int8_t *)data;
int8_t *pOutput = (int8_t *)pCtx->pOutput;
for (; i < pCtx->size && i >= 0; i += step) {
if (pCtx->hasNull && isNull((char *)&pData[i], pCtx->inputType)) {
continue;
}
if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) {
continue;
}
if (pDiffInfo->hasPrev) { // initial value is not set yet
*pOutput = (int8_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null
*pTimestamp = (tsList != NULL)? tsList[i]:0;
pOutput += 1;
pTimestamp += 1;
}
pDiffInfo->i64Prev = pData[i];
pDiffInfo->hasPrev = true;
numOfElems++;
}
break;
}
#endif
default:
break;
// qError("error input type");
}
// initial value is not set yet
if (numOfElems <= 0) {
return 0;
} else {
return (isFirstBlock) ? numOfElems - 1 : numOfElems;
}
return numOfElems;
}
bool getTopBotFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {

View File

@ -19,7 +19,6 @@
#include "thash.h"
#include "ttypes.h"
//#include "tfill.h"
#include "function.h"
#include "taggfunction.h"
#include "tbuffer.h"
@ -27,7 +26,6 @@
#include "thistogram.h"
#include "tpercentile.h"
#include "ttszip.h"
//#include "queryLog.h"
#include "tdatablock.h"
#include "tudf.h"

View File

@ -127,7 +127,7 @@ enum {
int64_t gUdfTaskSeqNum = 0;
typedef struct SUdfdProxy {
char udfdPipeName[UDF_LISTEN_PIPE_NAME_LEN];
char udfdPipeName[PATH_MAX + UDF_LISTEN_PIPE_NAME_LEN + 2];
uv_barrier_t gUdfInitBarrier;
uv_loop_t gUdfdLoop;
@ -224,9 +224,15 @@ int32_t getUdfdPipeName(char* pipeName, int32_t size) {
size_t dnodeIdSize = sizeof(dnodeId);
int32_t err = uv_os_getenv(UDF_DNODE_ID_ENV_NAME, dnodeId, &dnodeIdSize);
if (err != 0) {
fnError("get dnode id from env. error: %s.", uv_err_name(err));
dnodeId[0] = '1';
}
#ifdef _WIN32
snprintf(pipeName, size, "%s%s", UDF_LISTEN_PIPE_NAME_PREFIX, dnodeId);
#else
snprintf(pipeName, size, "%s/%s%s", tsDataDir, UDF_LISTEN_PIPE_NAME_PREFIX, dnodeId);
#endif
fnInfo("get dnode id from env. dnode id: %s. pipe path: %s", dnodeId, pipeName);
return 0;
}
@ -998,7 +1004,7 @@ int32_t udfcOpen() {
return 0;
}
SUdfdProxy *proxy = &gUdfdProxy;
getUdfdPipeName(proxy->udfdPipeName, UDF_LISTEN_PIPE_NAME_LEN);
getUdfdPipeName(proxy->udfdPipeName, sizeof(proxy->udfdPipeName));
proxy->gUdfcState = UDFC_STATE_STARTNG;
uv_barrier_init(&proxy->gUdfInitBarrier, 2);
uv_thread_create(&proxy->gUdfLoopThread, constructUdfService, proxy);
@ -1253,7 +1259,9 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResult
return false;
}
UdfcFuncHandle handle;
if (setupUdf((char*)pCtx->udfName, &handle) != 0) {
int32_t udfCode = 0;
if ((udfCode = setupUdf((char*)pCtx->udfName, &handle)) != 0) {
fnError("udfAggInit error. step setupUdf. udf code: %d", udfCode);
return false;
}
SClientUdfUvSession *session = (SClientUdfUvSession *)handle;
@ -1266,7 +1274,8 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResult
udfRes->session = (SClientUdfUvSession *)handle;
SUdfInterBuf buf = {0};
if (callUdfAggInit(handle, &buf) != 0) {
if ((udfCode = callUdfAggInit(handle, &buf)) != 0) {
fnError("udfAggInit error. step callUdfAggInit. udf code: %d", udfCode);
return false;
}
udfRes->interResNum = buf.numOfResult;
@ -1310,21 +1319,23 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) {
.numOfResult = udfRes->interResNum};
SUdfInterBuf newState = {0};
callUdfAggProcess(session, inputBlock, &state, &newState);
udfRes->interResNum = newState.numOfResult;
memcpy(udfRes->interResBuf, newState.buf, newState.bufLen);
int32_t udfCode = callUdfAggProcess(session, inputBlock, &state, &newState);
if (udfCode != 0) {
fnError("udfAggProcess error. code: %d", udfCode);
newState.numOfResult = 0;
} else {
udfRes->interResNum = newState.numOfResult;
memcpy(udfRes->interResBuf, newState.buf, newState.bufLen);
}
if (newState.numOfResult == 1 || state.numOfResult == 1) {
GET_RES_INFO(pCtx)->numOfRes = 1;
}
blockDataDestroy(inputBlock);
taosArrayDestroy(tempBlock.pDataBlock);
taosMemoryFree(newState.buf);
return 0;
return TSDB_CODE_SUCCESS;
}
int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) {
@ -1338,15 +1349,22 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) {
SUdfInterBuf state = {.buf = udfRes->interResBuf,
.bufLen = session->bufSize,
.numOfResult = udfRes->interResNum};
callUdfAggFinalize(session, &state, &resultBuf);
udfRes->finalResBuf = resultBuf.buf;
udfRes->finalResNum = resultBuf.numOfResult;
teardownUdf(session);
if (resultBuf.numOfResult == 1) {
GET_RES_INFO(pCtx)->numOfRes = 1;
int32_t udfCallCode= 0;
udfCallCode= callUdfAggFinalize(session, &state, &resultBuf);
if (udfCallCode!= 0) {
fnError("udfAggFinalize error. callUdfAggFinalize step. udf code:%d", udfCallCode);
GET_RES_INFO(pCtx)->numOfRes = 0;
} else {
memcpy(udfRes->finalResBuf, resultBuf.buf, session->outputLen);
udfRes->finalResNum = resultBuf.numOfResult;
GET_RES_INFO(pCtx)->numOfRes = udfRes->finalResNum;
}
int32_t code = teardownUdf(session);
if (code != 0) {
fnError("udfAggFinalize error. teardownUdf step. udf code: %d", code);
}
return functionFinalizeWithResultBuf(pCtx, pBlock, udfRes->finalResBuf);
}

View File

@ -30,7 +30,7 @@ typedef struct SUdfdContext {
uv_loop_t *loop;
uv_pipe_t ctrlPipe;
uv_signal_t intrSignal;
char listenPipeName[UDF_LISTEN_PIPE_NAME_LEN];
char listenPipeName[PATH_MAX + UDF_LISTEN_PIPE_NAME_LEN + 2];
uv_pipe_t listeningPipe;
void *clientRpc;
@ -652,7 +652,7 @@ static int32_t udfdUvInit() {
uv_pipe_open(&global.ctrlPipe, 0);
uv_read_start((uv_stream_t *)&global.ctrlPipe, udfdCtrlAllocBufCb, udfdCtrlReadCb);
getUdfdPipeName(global.listenPipeName, UDF_LISTEN_PIPE_NAME_LEN);
getUdfdPipeName(global.listenPipeName, sizeof(global.listenPipeName));
removeListeningPipe();
@ -696,6 +696,7 @@ static int32_t udfdRun() {
fnInfo("udfd stopped. result: %s, code: %d", uv_err_name(code), code);
int codeClose = uv_loop_close(global.loop);
fnDebug("uv loop close. result: %s", uv_err_name(codeClose));
removeListeningPipe();
udfdCloseClientRpc();
uv_mutex_destroy(&global.udfsMutex);
taosHashCleanup(global.udfsHash);

View File

@ -30,6 +30,7 @@ static void indexMemUnRef(MemTable* tbl);
static void indexCacheTermDestroy(CacheTerm* ct);
static int32_t indexCacheTermCompare(const void* l, const void* r);
static int32_t indexCacheJsonTermCompare(const void* l, const void* r);
static char* indexCacheTermGet(const void* pData);
static MemTable* indexInternalCacheCreate(int8_t type);
@ -63,6 +64,7 @@ typedef enum { MATCH, CONTINUE, BREAK } TExeCond;
typedef TExeCond (*_cache_range_compare)(void* a, void* b, int8_t type);
static TExeCond tDoCommpare(__compar_fn_t func, int8_t comType, void* a, void* b) {
// optime later
int32_t ret = func(a, b);
switch (comType) {
case QUERY_LESS_THAN: {
@ -242,6 +244,7 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul
break;
}
CacheTerm* c = (CacheTerm*)SL_GET_NODE_DATA(node);
if (0 == strcmp(c->colVal, pCt->colVal)) {
if (c->operaType == ADD_VALUE) {
INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid)
@ -311,6 +314,7 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe
}
char* key = indexCacheTermGet(pCt);
// SSkipListIterator* iter = tSkipListCreateIter(mem->mem);
SSkipListIterator* iter = tSkipListCreateIterFromVal(mem->mem, key, TSDB_DATA_TYPE_BINARY, TSDB_ORDER_ASC);
while (tSkipListIterNext(iter)) {
SSkipListNode* node = tSkipListIterGet(iter);
@ -318,6 +322,10 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe
break;
}
CacheTerm* c = (CacheTerm*)SL_GET_NODE_DATA(node);
printf("json val: %s\n", c->colVal);
if (0 != strncmp(c->colVal, term->colName, term->nColName)) {
continue;
}
TExeCond cond = cmpFn(c->colVal + skip, term->colVal, dType);
if (cond == MATCH) {
@ -598,24 +606,11 @@ int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result
indexMemRef(imm);
taosThreadMutexUnlock(&pCache->mtx);
// SIndexTerm* term = query->term;
// EIndexQueryType qtype = query->qType;
// bool isJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(term->colType, TSDB_DATA_TYPE_JSON);
// char* p = term->colVal;
// if (isJson) {
// p = indexPackJsonData(term);
//}
// CacheTerm ct = {.colVal = p, .version = atomic_load_32(&pCache->version)};
int ret = indexQueryMem(mem, query, result, s);
if (ret == 0 && *s != kTypeDeletion) {
// continue search in imm
ret = indexQueryMem(imm, query, result, s);
}
// if (isJson) {
// taosMemoryFreeClear(p);
//}
indexMemUnRef(mem);
indexMemUnRef(imm);
@ -682,14 +677,52 @@ static int32_t indexCacheTermCompare(const void* l, const void* r) {
return cmp;
}
static int indexFindCh(char* a, char c) {
char* p = a;
while (*p != 0 && *p++ != c) {
}
return p - a;
}
static int indexCacheJsonTermCompareImpl(char* a, char* b) {
int alen = indexFindCh(a, '&');
int blen = indexFindCh(b, '&');
int cmp = strncmp(a, b, MIN(alen, blen));
if (cmp == 0) {
cmp = alen - blen;
if (cmp != 0) {
return cmp;
}
cmp = *(a + alen) - *(b + blen);
if (cmp != 0) {
return cmp;
}
alen += 2;
blen += 2;
cmp = strcmp(a + alen, b + blen);
}
return cmp;
}
static int32_t indexCacheJsonTermCompare(const void* l, const void* r) {
CacheTerm* lt = (CacheTerm*)l;
CacheTerm* rt = (CacheTerm*)r;
// compare colVal
int cmp = indexCacheJsonTermCompareImpl(lt->colVal, rt->colVal);
if (cmp == 0) {
return rt->version - lt->version;
}
return cmp;
}
static MemTable* indexInternalCacheCreate(int8_t type) {
type = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : type;
int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : type;
int32_t (*cmpFn)(const void* l, const void* r) =
INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? indexCacheJsonTermCompare : indexCacheTermCompare;
MemTable* tbl = taosMemoryCalloc(1, sizeof(MemTable));
indexMemRef(tbl);
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
tbl->mem = tSkipListCreate(MAX_SKIP_LIST_LEVEL, type, MAX_INDEX_KEY_LEN, indexCacheTermCompare, SL_ALLOW_DUP_KEY,
indexCacheTermGet);
if (ttype == TSDB_DATA_TYPE_BINARY || ttype == TSDB_DATA_TYPE_NCHAR) {
tbl->mem =
tSkipListCreate(MAX_SKIP_LIST_LEVEL, ttype, MAX_INDEX_KEY_LEN, cmpFn, SL_ALLOW_DUP_KEY, indexCacheTermGet);
}
return tbl;
}

View File

@ -129,7 +129,7 @@ TEST_F(JsonEnv, testWriteMillonData) {
SIndexMultiTerm* terms = indexMultiTermCreate();
indexMultiTermAdd(terms, term);
for (size_t i = 0; i < 1000000; i++) {
for (size_t i = 0; i < 1000; i++) {
tIndexJsonPut(index, terms, i);
}
indexMultiTermDestroy(terms);
@ -148,4 +148,36 @@ TEST_F(JsonEnv, testWriteMillonData) {
assert(100 == taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
}
{
{
std::string colName("test");
std::string colVal("ab");
SIndexMultiTermQuery* mq = indexMultiTermQueryCreate(MUST);
SIndexTerm* q = indexTermCreate(1, ADD_VALUE, TSDB_DATA_TYPE_BINARY, colName.c_str(), colName.size(),
colVal.c_str(), colVal.size());
SArray* result = taosArrayInit(1, sizeof(uint64_t));
indexMultiTermQueryAdd(mq, q, QUERY_GREATER_THAN);
tIndexJsonSearch(index, mq, result);
assert(0 == taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
}
{
{
std::string colName("test");
std::string colVal("ab");
SIndexMultiTermQuery* mq = indexMultiTermQueryCreate(MUST);
SIndexTerm* q = indexTermCreate(1, ADD_VALUE, TSDB_DATA_TYPE_BINARY, colName.c_str(), colName.size(),
colVal.c_str(), colVal.size());
SArray* result = taosArrayInit(1, sizeof(uint64_t));
indexMultiTermQueryAdd(mq, q, QUERY_GREATER_EQUAL);
tIndexJsonSearch(index, mq, result);
assert(100 == taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
}
}
}
}

View File

@ -168,8 +168,6 @@ const char* nodesNodeName(ENodeType type) {
return "ShowConsumersStmt";
case QUERY_NODE_SHOW_SUBSCRIBES_STMT:
return "ShowSubscribesStmt";
case QUERY_NODE_SHOW_TRANS_STMT:
return "ShowTransStmt";
case QUERY_NODE_SHOW_SMAS_STMT:
return "ShowSmasStmt";
case QUERY_NODE_SHOW_CONFIGS_STMT:
@ -1972,10 +1970,10 @@ static int32_t datumToJson(const void* pObj, SJson* pJson) {
code = tjsonAddDoubleToObject(pJson, jkValueDatum, pNode->datum.d);
break;
case TSDB_DATA_TYPE_NCHAR: {
//cJSON only support utf-8 encoding. Convert memory content to hex string.
char *buf = taosMemoryCalloc(varDataLen(pNode->datum.p) * 2 + 1, sizeof(char));
// cJSON only support utf-8 encoding. Convert memory content to hex string.
char* buf = taosMemoryCalloc(varDataLen(pNode->datum.p) * 2 + 1, sizeof(char));
code = taosHexEncode(varDataVal(pNode->datum.p), buf, varDataLen(pNode->datum.p));
if(code != TSDB_CODE_SUCCESS) {
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFree(buf);
return TSDB_CODE_TSC_INVALID_VALUE;
}
@ -2086,7 +2084,7 @@ static int32_t jsonToDatum(const SJson* pJson, void* pObj) {
}
varDataSetLen(pNode->datum.p, pNode->node.resType.bytes);
if (TSDB_DATA_TYPE_NCHAR == pNode->node.resType.type) {
char *buf = taosMemoryCalloc(1, pNode->node.resType.bytes * 2 + VARSTR_HEADER_SIZE + 1);
char* buf = taosMemoryCalloc(1, pNode->node.resType.bytes * 2 + VARSTR_HEADER_SIZE + 1);
if (NULL == buf) {
code = TSDB_CODE_OUT_OF_MEMORY;
break;

View File

@ -190,7 +190,6 @@ SNodeptr nodesMakeNode(ENodeType type) {
case QUERY_NODE_SHOW_TOPICS_STMT:
case QUERY_NODE_SHOW_CONSUMERS_STMT:
case QUERY_NODE_SHOW_SUBSCRIBES_STMT:
case QUERY_NODE_SHOW_TRANS_STMT:
case QUERY_NODE_SHOW_SMAS_STMT:
case QUERY_NODE_SHOW_CONFIGS_STMT:
case QUERY_NODE_SHOW_QUERIES_STMT:

View File

@ -1300,9 +1300,10 @@ SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const
}
SNode* createKillStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pId) {
SNode* pStmt = nodesMakeNode(type);
SKillStmt* pStmt = nodesMakeNode(type);
CHECK_OUT_OF_MEM(pStmt);
return pStmt;
pStmt->targetId = strtol(pId->z, NULL, 10);
return (SNode*)pStmt;
}
SNode* createMergeVgroupStmt(SAstCreateContext* pCxt, const SToken* pVgId1, const SToken* pVgId2) {

View File

@ -23,12 +23,17 @@ typedef struct SAuthCxt {
static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt);
static int32_t checkAuth(SParseContext* pCxt, const char* dbName, AUTH_TYPE type) {
static int32_t checkAuth(SParseContext* pCxt, const char* pDbName, AUTH_TYPE type) {
if (pCxt->isSuperUser) {
return TSDB_CODE_SUCCESS;
}
SName name;
tNameSetDbName(&name, pCxt->acctId, pDbName, strlen(pDbName));
char dbFname[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(&name, dbFname);
bool pass = false;
int32_t code = catalogChkAuth(pCxt->pCatalog, pCxt->pTransporter, &pCxt->mgmtEpSet, pCxt->pUser, dbName, type, &pass);
int32_t code =
catalogChkAuth(pCxt->pCatalog, pCxt->pTransporter, &pCxt->mgmtEpSet, pCxt->pUser, dbFname, type, &pass);
return TSDB_CODE_SUCCESS == code ? (pass ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED) : code;
}
@ -130,7 +135,6 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
case QUERY_NODE_SHOW_TOPICS_STMT:
case QUERY_NODE_SHOW_CONSUMERS_STMT:
case QUERY_NODE_SHOW_SUBSCRIBES_STMT:
case QUERY_NODE_SHOW_TRANS_STMT:
case QUERY_NODE_SHOW_SMAS_STMT:
case QUERY_NODE_SHOW_CONFIGS_STMT:
case QUERY_NODE_SHOW_CONNECTIONS_STMT:

View File

@ -1549,7 +1549,7 @@ typedef struct SmlExecHandle {
SQuery* pQuery;
} SSmlExecHandle;
static int32_t smlBoundColumns(SArray *cols, SParsedDataColInfo* pColList, SSchema* pSchema) {
static int32_t smlBoundColumnData(SArray *cols, SParsedDataColInfo* pColList, SSchema* pSchema) {
col_id_t nCols = pColList->numOfCols;
pColList->numOfBound = 0;
@ -1620,7 +1620,7 @@ static int32_t smlBoundColumns(SArray *cols, SParsedDataColInfo* pColList, SSche
return TSDB_CODE_SUCCESS;
}
static int32_t smlBoundTags(SArray *cols, SKVRowBuilder *tagsBuilder, SParsedDataColInfo* tags, SSchema* pSchema, SKVRow *row, SMsgBuf *msg) {
static int32_t smlBuildTagRow(SArray *cols, SKVRowBuilder *tagsBuilder, SParsedDataColInfo* tags, SSchema* pSchema, SKVRow *row, SMsgBuf *msg) {
if (tdInitKVRowBuilder(tagsBuilder) < 0) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@ -1642,20 +1642,20 @@ static int32_t smlBoundTags(SArray *cols, SKVRowBuilder *tagsBuilder, SParsedDat
return TSDB_CODE_SUCCESS;
}
int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SHashObj *colsHash, SArray *cols, bool format,
int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SArray *colsSchema, SArray *cols, bool format,
STableMeta *pTableMeta, char *tableName, char *msgBuf, int16_t msgBufLen) {
SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen};
SSmlExecHandle *smlHandle = (SSmlExecHandle *)handle;
SSchema* pTagsSchema = getTableTagSchema(pTableMeta);
setBoundColumnInfo(&smlHandle->tags, pTagsSchema, getNumOfTags(pTableMeta));
int ret = smlBoundColumns(tags, &smlHandle->tags, pTagsSchema);
int ret = smlBoundColumnData(tags, &smlHandle->tags, pTagsSchema);
if(ret != TSDB_CODE_SUCCESS){
buildInvalidOperationMsg(&pBuf, "bound tags error");
return ret;
}
SKVRow row = NULL;
ret = smlBoundTags(tags, &smlHandle->tagsBuilder, &smlHandle->tags, pTagsSchema, &row, &pBuf);
ret = smlBuildTagRow(tags, &smlHandle->tagsBuilder, &smlHandle->tags, pTagsSchema, &row, &pBuf);
if(ret != TSDB_CODE_SUCCESS){
return ret;
}
@ -1673,21 +1673,7 @@ int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SHashObj *co
SSchema* pSchema = getTableColumnSchema(pTableMeta);
if(format){
ret = smlBoundColumns(taosArrayGetP(colsFormat, 0), &pDataBlock->boundColumnInfo, pSchema);
}else{
SArray *columns = taosArrayInit(16, POINTER_BYTES);
void **p1 = taosHashIterate(colsHash, NULL);
while (p1) {
SSmlKv* kv = *p1;
taosArrayPush(columns, &kv);
p1 = taosHashIterate(colsHash, p1);
}
ret = smlBoundColumns(columns, &pDataBlock->boundColumnInfo, pSchema);
taosArrayDestroy(columns);
}
ret = smlBoundColumnData(colsSchema, &pDataBlock->boundColumnInfo, pSchema);
if(ret != TSDB_CODE_SUCCESS){
buildInvalidOperationMsg(&pBuf, "bound cols error");
return ret;
@ -1712,14 +1698,16 @@ int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SHashObj *co
STSRow* row = (STSRow*)(pDataBlock->pData + pDataBlock->size); // skip the SSubmitBlk header
tdSRowResetBuf(pBuilder, row);
void *rowData = NULL;
size_t rowDataSize = 0;
if(format){
rowData = taosArrayGetP(colsFormat, r);
rowDataSize = taosArrayGetSize(rowData);
}else{
rowData = taosArrayGetP(cols, r);
}
// 1. set the parsed value from sql string
for (int c = 0; c < spd->numOfBound; ++c) {
for (int c = 0, j = 0; c < spd->numOfBound; ++c) {
SSchema* pColSchema = &pSchema[spd->boundColumns[c] - 1];
param.schema = pColSchema;
@ -1727,23 +1715,27 @@ int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SHashObj *co
SSmlKv *kv = NULL;
if(format){
kv = taosArrayGetP(rowData, c);
if (!kv){
char msg[64] = {0};
sprintf(msg, "cols num not the same like before:%d", r);
return buildInvalidOperationMsg(&pBuf, msg);
if(j < rowDataSize){
kv = taosArrayGetP(rowData, j);
if (rowDataSize != spd->numOfBound && (kv->keyLen != strlen(pColSchema->name) || strncmp(kv->key, pColSchema->name, kv->keyLen) != 0)){
kv = NULL;
}else{
j++;
}
}
}else{
void **p =taosHashGet(rowData, pColSchema->name, strlen(pColSchema->name));
kv = *p;
if(p) kv = *p;
}
if (kv->length == 0) {
if (!kv || kv->length == 0) {
MemRowAppend(&pBuf, NULL, 0, &param);
} else {
int32_t colLen = pColSchema->bytes;
if (IS_VAR_DATA_TYPE(pColSchema->type)) {
colLen = kv->length;
} else if(pColSchema->type == TSDB_DATA_TYPE_TIMESTAMP){
kv->i = convertTimePrecision(kv->i, TSDB_TIME_PRECISION_NANO, pTableMeta->tableInfo.precision);
}
MemRowAppend(&pBuf, &(kv->value), colLen, &param);

View File

@ -4131,6 +4131,7 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) {
case QUERY_NODE_SHOW_QUERIES_STMT:
case QUERY_NODE_SHOW_CLUSTER_STMT:
case QUERY_NODE_SHOW_TOPICS_STMT:
case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
code = rewriteShow(pCxt, pQuery);
break;
case QUERY_NODE_CREATE_TABLE_STMT:

View File

@ -100,6 +100,14 @@ void generateInformationSchema(MockCatalogService* mcs) {
}
}
void generatePerformanceSchema(MockCatalogService* mcs) {
{
ITableBuilder& builder = mcs->createTableBuilder("performance_schema", "trans", TSDB_SYSTEM_TABLE, 1)
.addColumn("id", TSDB_DATA_TYPE_INT);
builder.done();
}
}
/*
* Table:t1
* Field | Type | DataType | Bytes |
@ -244,6 +252,7 @@ void initMetaDataEnv() {
void generateMetaData() {
generateInformationSchema(mockCatalogService.get());
generatePerformanceSchema(mockCatalogService.get());
generateTestT1(mockCatalogService.get());
generateTestST1(mockCatalogService.get());
mockCatalogService->showTables();

View File

@ -347,10 +347,21 @@ int32_t sclExecFunction(SFunctionNode *node, SScalarCtx *ctx, SScalarParam *outp
if (fmIsUserDefinedFunc(node->funcId)) {
UdfcFuncHandle udfHandle = NULL;
SCL_ERR_JRET(setupUdf(node->functionName, &udfHandle));
code = setupUdf(node->functionName, &udfHandle);
if (code != 0) {
sclError("fmExecFunction error. setupUdf. function name: %s, code:%d", node->functionName, code);
goto _return;
}
code = callUdfScalarFunc(udfHandle, params, paramNum, output);
teardownUdf(udfHandle);
SCL_ERR_JRET(code);
if (code != 0) {
sclError("fmExecFunction error. callUdfScalarFunc. function name: %s, udf code:%d", node->functionName, code);
goto _return;
}
code = teardownUdf(udfHandle);
if (code != 0) {
sclError("fmExecFunction error. callUdfScalarFunc. function name: %s, udf code:%d", node->functionName, code);
goto _return;
}
} else {
SScalarFuncExecFuncs ffpSet = {0};
code = fmGetScalarFuncExecFuncs(node->funcId, &ffpSet);

View File

@ -709,10 +709,6 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp
int16_t outputType = GET_PARAM_TYPE(&pOutput[0]);
int64_t outputLen = GET_PARAM_BYTES(&pOutput[0]);
if (IS_VAR_DATA_TYPE(outputType)) {
outputLen += VARSTR_HEADER_SIZE;
}
char *outputBuf = taosMemoryCalloc(outputLen * pInput[0].numOfRows, 1);
char *output = outputBuf;
@ -826,7 +822,7 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp
varDataSetLen(output, len);
}
//for constant conversion, need to set proper length of pOutput description
if (len < outputLen - VARSTR_HEADER_SIZE) {
if (len < outputLen) {
pOutput->columnData->info.bytes = len;
}
break;

View File

@ -65,6 +65,8 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
newCommitIndex = index;
sTrace("syncMaybeAdvanceCommitIndex maybe to update, newCommitIndex:%ld commit, pSyncNode->commitIndex:%ld",
newCommitIndex, pSyncNode->commitIndex);
syncEntryDestory(pEntry);
break;
} else {
sTrace(
@ -72,6 +74,8 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
"pSyncNode->pRaftStore->currentTerm:%lu",
pEntry->term, pSyncNode->pRaftStore->currentTerm);
}
syncEntryDestory(pEntry);
}
}

View File

@ -67,6 +67,7 @@ typedef struct SSrvMsg {
typedef struct SWorkThrdObj {
TdThread thread;
uv_connect_t connect_req;
uv_pipe_t* pipe;
uv_os_fd_t fd;
uv_loop_t* loop;
@ -87,8 +88,10 @@ typedef struct SServerObj {
// work thread info
int workerIdx;
int numOfThreads;
int numOfWorkerReady;
SWorkThrdObj** pThreadObj;
uv_pipe_t pipeListen;
uv_pipe_t** pipe;
uint32_t ip;
uint32_t port;
@ -161,7 +164,7 @@ static void* transWorkerThread(void* arg);
static void* transAcceptThread(void* arg);
// add handle loop
static bool addHandleToWorkloop(void* arg);
static bool addHandleToWorkloop(SWorkThrdObj* pThrd,char *pipeName);
static bool addHandleToAcceptloop(void* arg);
#define CONN_SHOULD_RELEASE(conn, head) \
@ -577,6 +580,12 @@ void uvOnAcceptCb(uv_stream_t* stream, int status) {
uv_tcp_init(pObj->loop, cli);
if (uv_accept(stream, (uv_stream_t*)cli) == 0) {
if (pObj->numOfWorkerReady < pObj->numOfThreads) {
tError("worker-threads are not ready for all, need %d instead of %d.", pObj->numOfThreads, pObj->numOfWorkerReady);
uv_close((uv_handle_t*)cli, NULL);
return;
}
uv_write_t* wr = (uv_write_t*)taosMemoryMalloc(sizeof(uv_write_t));
wr->data = cli;
uv_buf_t buf = uv_buf_init((char*)notify, strlen(notify));
@ -672,15 +681,21 @@ void* transAcceptThread(void* arg) {
return NULL;
}
static bool addHandleToWorkloop(void* arg) {
SWorkThrdObj* pThrd = arg;
void uvOnPipeConnectionCb(uv_connect_t *connect, int status) {
if (status != 0) {
return;
}
SWorkThrdObj* pThrd = container_of(connect, SWorkThrdObj, connect_req);
uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
}
static bool addHandleToWorkloop(SWorkThrdObj* pThrd,char *pipeName) {
pThrd->loop = (uv_loop_t*)taosMemoryMalloc(sizeof(uv_loop_t));
if (0 != uv_loop_init(pThrd->loop)) {
return false;
}
uv_pipe_init(pThrd->loop, pThrd->pipe, 1);
uv_pipe_open(pThrd->pipe, pThrd->fd);
// int r = uv_pipe_open(pThrd->pipe, pThrd->fd);
pThrd->pipe->data = pThrd;
@ -691,7 +706,8 @@ static bool addHandleToWorkloop(void* arg) {
QUEUE_INIT(&pThrd->conn);
pThrd->asyncPool = transCreateAsyncPool(pThrd->loop, 5, pThrd, uvWorkerAsyncCb);
uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
uv_pipe_connect(&pThrd->connect_req, pThrd->pipe, pipeName, uvOnPipeConnectionCb);
// uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
return true;
}
@ -802,12 +818,32 @@ static void uvDestroyConn(uv_handle_t* handle) {
uv_walk(thrd->loop, uvWalkCb, NULL);
}
}
static void uvPipeListenCb(uv_stream_t* handle, int status) {
ASSERT(status == 0);
SServerObj* srv = container_of(handle, SServerObj, pipeListen);
uv_pipe_t* pipe = &(srv->pipe[srv->numOfWorkerReady][0]);
ASSERT(0 == uv_pipe_init(srv->loop, pipe, 1));
ASSERT(0 == uv_accept((uv_stream_t*)&srv->pipeListen, (uv_stream_t*)pipe));
ASSERT(1 == uv_is_readable((uv_stream_t*)pipe));
ASSERT(1 == uv_is_writable((uv_stream_t*)pipe));
ASSERT(0 == uv_is_closing((uv_handle_t*)pipe));
srv->numOfWorkerReady++;
// ASSERT(0 == uv_listen((uv_stream_t*)&ctx.send.tcp, 512, uvOnAcceptCb));
// r = uv_read_start((uv_stream_t*)&ctx.channel, alloc_cb, read_cb);
// ASSERT(r == 0);
}
void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) {
SServerObj* srv = taosMemoryCalloc(1, sizeof(SServerObj));
srv->loop = (uv_loop_t*)taosMemoryMalloc(sizeof(uv_loop_t));
srv->numOfThreads = numOfThreads;
srv->workerIdx = 0;
srv->numOfWorkerReady = 0;
srv->pThreadObj = (SWorkThrdObj**)taosMemoryCalloc(srv->numOfThreads, sizeof(SWorkThrdObj*));
srv->pipe = (uv_pipe_t**)taosMemoryCalloc(srv->numOfThreads, sizeof(uv_pipe_t*));
srv->ip = ip;
@ -817,6 +853,16 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
taosThreadOnce(&transModuleInit, uvInitEnv);
transSrvInst++;
char pipeName[64];
assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0));
#ifdef WINDOWS
snprintf(pipeName, sizeof(pipeName), "\\\\?\\pipe\\trans.rpc\\%p-%lu", taosSafeRand(), GetCurrentProcessId());
#else
snprintf(pipeName, sizeof(pipeName), ".trans.rpc\\%08X-%lu", taosSafeRand(), taosGetSelfPthreadId());
#endif
assert(0 == uv_pipe_bind(&srv->pipeListen, pipeName));
assert(0 == uv_listen((uv_stream_t*)&srv->pipeListen, SOMAXCONN, uvPipeListenCb));
for (int i = 0; i < srv->numOfThreads; i++) {
SWorkThrdObj* thrd = (SWorkThrdObj*)taosMemoryCalloc(1, sizeof(SWorkThrdObj));
thrd->pTransInst = shandle;
@ -826,17 +872,22 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
srv->pipe[i] = (uv_pipe_t*)taosMemoryCalloc(2, sizeof(uv_pipe_t));
uv_os_sock_t fds[2];
if (uv_socketpair(SOCK_STREAM, 0, fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE) != 0) {
goto End;
}
uv_pipe_init(srv->loop, &(srv->pipe[i][0]), 1);
uv_pipe_open(&(srv->pipe[i][0]), fds[1]); // init write
// #ifdef WINDOWS
// uv_file fds[2];
// if (uv_pipe(fds, UV_READABLE_PIPE|UV_WRITABLE_PIPE|UV_NONBLOCK_PIPE, UV_READABLE_PIPE|UV_WRITABLE_PIPE|UV_NONBLOCK_PIPE) != 0) {
// #else
// uv_os_sock_t fds[2];
// if (uv_socketpair(SOCK_STREAM, 0, fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE) != 0) {
// #endif
// goto End;
// }
// uv_pipe_init(srv->loop, &(srv->pipe[i][0]), 1);
// uv_pipe_open(&(srv->pipe[i][0]), fds[1]); // init write
thrd->fd = fds[0];
// thrd->fd = fds[0];
thrd->pipe = &(srv->pipe[i][1]); // init read
if (false == addHandleToWorkloop(thrd)) {
if (false == addHandleToWorkloop(thrd,pipeName)) {
goto End;
}
int err = taosThreadCreate(&(thrd->thread), NULL, transWorkerThread, (void*)(thrd));

View File

@ -26,7 +26,6 @@ typedef struct TdDirEntry {
WIN32_FIND_DATA findFileData;
} TdDirEntry;
typedef struct TdDir {
TdDirEntry dirEntry;
HANDLE hFind;
@ -59,7 +58,7 @@ void wordfree(wordexp_t *pwordexp) {}
#include <wordexp.h>
typedef struct dirent dirent;
typedef struct DIR TdDir;
typedef struct DIR TdDir;
typedef struct dirent TdDirEntry;
#endif
@ -78,14 +77,14 @@ void taosRemoveDir(const char *dirname) {
taosRemoveDir(filename);
} else {
(void)taosRemoveFile(filename);
//printf("file:%s is removed\n", filename);
// printf("file:%s is removed\n", filename);
}
}
taosCloseDir(&pDir);
rmdir(dirname);
//printf("dir:%s is removed\n", dirname);
// printf("dir:%s is removed\n", dirname);
return;
}
@ -102,8 +101,8 @@ int32_t taosMkDir(const char *dirname) {
int32_t taosMulMkDir(const char *dirname) {
if (dirname == NULL) return -1;
char *temp = strdup(dirname);
char *pos = temp;
char * temp = strdup(dirname);
char * pos = temp;
int32_t code = 0;
if (strncmp(temp, "/", 1) == 0) {
@ -111,8 +110,8 @@ int32_t taosMulMkDir(const char *dirname) {
} else if (strncmp(temp, "./", 2) == 0) {
pos += 2;
}
for ( ; *pos != '\0'; pos++) {
for (; *pos != '\0'; pos++) {
if (*pos == '/') {
*pos = '\0';
code = mkdir(temp, 0755);
@ -123,7 +122,7 @@ int32_t taosMulMkDir(const char *dirname) {
*pos = '/';
}
}
if (*(pos - 1) != '/') {
code = mkdir(temp, 0755);
if (code < 0 && errno != EEXIST) {
@ -145,7 +144,7 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) {
TdDirPtr pDir = taosOpenDir(dirname);
if (pDir == NULL) return;
int64_t sec = taosGetTimestampSec();
int64_t sec = taosGetTimestampSec();
TdDirEntryPtr de = NULL;
while ((de = taosReadDir(pDir)) != NULL) {
@ -173,9 +172,9 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) {
int32_t days = (int32_t)(TABS(sec - fileSec) / 86400 + 1);
if (days > keepDays) {
(void)taosRemoveFile(filename);
//printf("file:%s is removed, days:%d keepDays:%d", filename, days, keepDays);
// printf("file:%s is removed, days:%d keepDays:%d", filename, days, keepDays);
} else {
//printf("file:%s won't be removed, days:%d keepDays:%d", filename, days, keepDays);
// printf("file:%s won't be removed, days:%d keepDays:%d", filename, days, keepDays);
}
}
}
@ -187,7 +186,7 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) {
int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen) {
wordexp_t full_path;
if (0 != wordexp(dirname, &full_path, 0)) {
//printf("failed to expand path:%s since %s", dirname, strerror(errno));
// printf("failed to expand path:%s since %s", dirname, strerror(errno));
wordfree(&full_path);
return -1;
}
@ -204,7 +203,7 @@ int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen) {
int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen) {
char tmp[PATH_MAX] = {0};
#ifdef WINDOWS
if (_fullpath(dirname, tmp, maxlen) != NULL) {
if (_fullpath(tmp, dirname, maxlen) != NULL) {
#else
if (realpath(dirname, tmp) != NULL) {
#endif
@ -228,9 +227,9 @@ bool taosIsDir(const char *dirname) {
return false;
}
char* taosDirName(char *name) {
char *taosDirName(char *name) {
#ifdef WINDOWS
char Drive1[MAX_PATH], Dir1[MAX_PATH];
char Drive1[MAX_PATH], Dir1[MAX_PATH];
_splitpath(name, Drive1, Dir1, NULL, NULL);
size_t dirNameLen = strlen(Drive1) + strlen(Dir1);
if (dirNameLen > 0) {
@ -242,13 +241,13 @@ char* taosDirName(char *name) {
#endif
}
char* taosDirEntryBaseName(char *name) {
char *taosDirEntryBaseName(char *name) {
#ifdef WINDOWS
char Filename1[MAX_PATH], Ext1[MAX_PATH];
_splitpath(name, NULL, NULL, Filename1, Ext1);
return name + (strlen(name) - strlen(Filename1) - strlen(Ext1));
#else
return (char*)basename(name);
return (char *)basename(name);
#endif
}
@ -258,8 +257,8 @@ TdDirPtr taosOpenDir(const char *dirname) {
}
#ifdef WINDOWS
char szFind[MAX_PATH]; //这是要找的
HANDLE hFind;
char szFind[MAX_PATH]; //这是要找的
HANDLE hFind;
TdDirPtr pDir = taosMemoryMalloc(sizeof(TdDir));
@ -275,7 +274,6 @@ TdDirPtr taosOpenDir(const char *dirname) {
#else
return (TdDirPtr)opendir(dirname);
#endif
}
TdDirEntryPtr taosReadDir(TdDirPtr pDir) {
@ -286,9 +284,9 @@ TdDirEntryPtr taosReadDir(TdDirPtr pDir) {
if (!FindNextFile(pDir->hFind, &(pDir->dirEntry.findFileData))) {
return NULL;
}
return (TdDirEntryPtr)&(pDir->dirEntry.findFileData);
return (TdDirEntryPtr) & (pDir->dirEntry.findFileData);
#else
return (TdDirEntryPtr)readdir((DIR*)pDir);
return (TdDirEntryPtr)readdir((DIR *)pDir);
#endif
}
@ -299,18 +297,18 @@ bool taosDirEntryIsDir(TdDirEntryPtr pDirEntry) {
#ifdef WINDOWS
return (pDirEntry->findFileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0;
#else
return (((dirent*)pDirEntry)->d_type & DT_DIR) != 0;
return (((dirent *)pDirEntry)->d_type & DT_DIR) != 0;
#endif
}
char* taosGetDirEntryName(TdDirEntryPtr pDirEntry) {
char *taosGetDirEntryName(TdDirEntryPtr pDirEntry) {
if (pDirEntry == NULL) {
return NULL;
}
#ifdef WINDOWS
return pDirEntry->findFileData.cFileName;
#else
return ((dirent*)pDirEntry)->d_name;
return ((dirent *)pDirEntry)->d_name;
#endif
}
@ -324,7 +322,7 @@ int32_t taosCloseDir(TdDirPtr *ppDir) {
*ppDir = NULL;
return 0;
#else
closedir((DIR*)*ppDir);
closedir((DIR *)*ppDir);
*ppDir = NULL;
return 0;
#endif

View File

@ -543,7 +543,7 @@ int32_t taosFsyncFile(TdFilePtr pFile) {
HANDLE h = (HANDLE)_get_osfhandle(pFile->fd);
return FlushFileBuffers(h);
return !FlushFileBuffers(h);
#else
if (pFile == NULL) {
return 0;
@ -688,6 +688,16 @@ int64_t taosGetLineFile(TdFilePtr pFile, char **__restrict ptrBuf) {
return getline(ptrBuf, &len, pFile->fp);
#endif
}
int64_t taosGetsFile(TdFilePtr pFile, int32_t maxSize, char *__restrict buf) {
if (pFile == NULL || buf == NULL ) {
return -1;
}
assert(pFile->fp != NULL);
if (fgets(buf, maxSize, pFile->fp) == NULL) {
return -1;
}
return strlen(buf);
}
int32_t taosEOFFile(TdFilePtr pFile) {
if (pFile == NULL) {
return 0;

View File

@ -28,6 +28,7 @@
#else
#include <arpa/inet.h>
#include <fcntl.h>
#include <net/if.h>
#include <netdb.h>
#include <netinet/in.h>
#include <netinet/ip.h>
@ -638,6 +639,73 @@ int32_t taosKeepTcpAlive(TdSocketPtr pSocket) {
return 0;
}
int taosGetLocalIp(const char *eth, char *ip) {
#if defined(WINDOWS)
// DO NOTHAING
return 0;
#else
int fd;
struct ifreq ifr;
struct sockaddr_in sin;
fd = socket(AF_INET, SOCK_DGRAM, 0);
if (-1 == fd) {
return -1;
}
strncpy(ifr.ifr_name, eth, IFNAMSIZ);
ifr.ifr_name[IFNAMSIZ - 1] = 0;
if (ioctl(fd, SIOCGIFADDR, &ifr) < 0) {
taosCloseSocketNoCheck1(fd);
return -1;
}
memcpy(&sin, &ifr.ifr_addr, sizeof(sin));
snprintf(ip, 64, "%s", inet_ntoa(sin.sin_addr));
taosCloseSocketNoCheck1(fd);
#endif
return 0;
}
int taosValidIp(uint32_t ip) {
#if defined(WINDOWS)
// DO NOTHAING
return 0;
#else
int ret = -1;
int fd;
struct ifconf ifconf;
char buf[512] = {0};
ifconf.ifc_len = 512;
ifconf.ifc_buf = buf;
if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) {
return -1;
}
ioctl(fd, SIOCGIFCONF, &ifconf);
struct ifreq *ifreq = (struct ifreq *)ifconf.ifc_buf;
for (int i = (ifconf.ifc_len / sizeof(struct ifreq)); i > 0; i--) {
char ip_str[64] = {0};
if (ifreq->ifr_flags == AF_INET) {
ret = taosGetLocalIp(ifreq->ifr_name, ip_str);
if (ret != 0) {
break;
}
ret = -1;
if (ip == (uint32_t)taosInetAddr(ip_str)) {
ret = 0;
break;
}
ifreq++;
}
}
taosCloseSocketNoCheck1(fd);
return ret;
#endif
return 0;
}
bool taosValidIpAndPort(uint32_t ip, uint16_t port) {
struct sockaddr_in serverAdd;
SocketFd fd;
@ -677,13 +745,9 @@ bool taosValidIpAndPort(uint32_t ip, uint16_t port) {
taosCloseSocket(&pSocket);
return false;
}
if (listen(pSocket->fd, 1024) < 0) {
// printf("listen tcp server socket failed, 0x%x:%hu(%s)", ip, port, strerror(errno));
taosCloseSocket(&pSocket);
return false;
}
taosCloseSocket(&pSocket);
return true;
// return 0 == taosValidIp(ip) ? true : false;
}
TdSocketServerPtr taosOpenTcpServerSocket(uint32_t ip, uint16_t port) {
struct sockaddr_in serverAdd;

View File

@ -869,11 +869,15 @@ SysNameInfo taosGetSysNameInfo() {
SysNameInfo info = {0};
DWORD dwVersion = GetVersion();
tstrncpy(info.sysname, getenv("OS"), sizeof(info.sysname));
tstrncpy(info.nodename, getenv("COMPUTERNAME"), sizeof(info.nodename));
char *tmp = NULL;
tmp = getenv("OS");
if (tmp != NULL) tstrncpy(info.sysname, tmp, sizeof(info.sysname));
tmp = getenv("COMPUTERNAME");
if (tmp != NULL) tstrncpy(info.nodename, tmp, sizeof(info.nodename));
sprintf_s(info.release, sizeof(info.release), "%d", dwVersion & 0x0F);
sprintf_s(info.version, sizeof(info.release), "%d", (dwVersion >> 8) & 0x0F);
tstrncpy(info.machine, getenv("PROCESSOR_ARCHITECTURE"), sizeof(info.machine));
tmp = getenv("PROCESSOR_ARCHITECTURE");
if (tmp != NULL) tstrncpy(info.machine, tmp, sizeof(info.machine));
return info;
#elif defined(_TD_DARWIN_64)

View File

@ -62,6 +62,8 @@
# ---- tstream
./test.sh -f tsim/tstream/basic0.sim
# ---- transaction
./test.sh -f tsim/trans/create_db.sim
# ---- tmq
./test.sh -f tsim/tmq/basic1.sim

View File

@ -5,7 +5,7 @@ set +e
echo "Executing copy_udf.sh"
SCRIPT_DIR=`dirname $0`
SCRIPT_DIR=`pwd`
cd $SCRIPT_DIR/../
IN_TDINTERNAL="community"

View File

@ -62,11 +62,7 @@ fi
TOP_DIR=`pwd`
if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then
BIN_DIR=`find . -name "tmq_sim"|grep bin|head -n1|cut -d '/' -f 2,3`
else
BIN_DIR=`find . -name "tmq_sim"|grep bin|head -n1|cut -d '/' -f 2`
fi
BIN_DIR=`find . -name "tmq_sim"|grep bin|head -n1|cut -d '/' -f 2`
declare -x BUILD_DIR=$TOP_DIR/$BIN_DIR

View File

@ -0,0 +1,166 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode1 -c transPullupInterval -v 1
system sh/cfg.sh -n dnode2 -c transPullupInterval -v 1
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sql connect
print =============== show dnodes
sql show dnodes;
if $rows != 1 then
return -1
endi
if $data00 != 1 then
return -1
endi
sql show mnodes;
if $rows != 1 then
return -1
endi
if $data00 != 1 then
return -1
endi
if $data02 != LEADER then
return -1
endi
print =============== create dnodes
sql create dnode $hostname port 7200
sleep 2000
sql show dnodes;
if $rows != 2 then
return -1
endi
if $data00 != 1 then
return -1
endi
if $data10 != 2 then
return -1
endi
print =============== kill dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
print =============== create database
sql show transactions
if $rows != 0 then
return -1
endi
sql_error create database d1 vgroups 2;
print =============== show transactions
sql show transactions
if $rows != 1 then
return -1
endi
if $data[0][0] != 2 then
return -1
endi
if $data[0][2] != undoAction then
return -1
endi
if $data[0][3] != d1 then
return -1
endi
if $data[0][4] != create-db then
return -1
endi
if $data[0][7] != @Unable to establish connection@ then
return -1
endi
sql_error create database d1 vgroups 2;
print =============== start dnode2
system sh/exec.sh -n dnode2 -s start
sleep 3000
sql show transactions
if $rows != 0 then
return -1
endi
sql create database d1 vgroups 2;
print =============== kill dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
print =============== create database
sql show transactions
if $rows != 0 then
return -1
endi
sql_error create database d2 vgroups 2;
print =============== show transactions
sql show transactions
if $rows != 1 then
return -1
endi
if $data[0][0] != 4 then
return -1
endi
if $data[0][2] != undoAction then
return -1
endi
if $data[0][3] != d2 then
return -1
endi
if $data[0][4] != create-db then
return -1
endi
if $data[0][7] != @Unable to establish connection@ then
return -1
endi
sql_error create database d2 vgroups 2;
print =============== kill transaction
sql kill transaction 4;
sleep 2000
sql show transactions
if $rows != 0 then
return -1
endi
print =============== start dnode2
system sh/exec.sh -n dnode2 -s start
sleep 3000
sql show transactions
if $rows != 0 then
return -1
endi
sql create database d2 vgroups 2;
sql_error kill transaction 1;
sql_error kill transaction 2;
sql_error kill transaction 3;
sql_error kill transaction 4;
sql_error kill transaction 5;
return
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT

View File

@ -0,0 +1,311 @@
import taos
import sys
import time
import socket
import pexpect
import os
import http.server
import gzip
import threading
import json
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
telemetryPort = '6043'
def telemetryInfoCheck(infoDict=''):
hostname = socket.gethostname()
serverPort = 7080
if "ts" not in infoDict or len(infoDict["ts"]) == 0:
tdLog.exit("ts is null!")
if "dnode_id" not in infoDict or infoDict["dnode_id"] != 1:
tdLog.exit("dnode_id is null!")
if "dnode_ep" not in infoDict:
tdLog.exit("dnode_ep is null!")
if "cluster_id" not in infoDict:
tdLog.exit("cluster_id is null!")
if "protocol" not in infoDict or infoDict["protocol"] != 1:
tdLog.exit("protocol is null!")
if "cluster_info" not in infoDict :
tdLog.exit("cluster_info is null!")
# cluster_info ====================================
if "first_ep" not in infoDict["cluster_info"] or infoDict["cluster_info"]["first_ep"] == None:
tdLog.exit("first_ep is null!")
if "first_ep_dnode_id" not in infoDict["cluster_info"] or infoDict["cluster_info"]["first_ep_dnode_id"] != 1:
tdLog.exit("first_ep_dnode_id is null!")
if "version" not in infoDict["cluster_info"] or infoDict["cluster_info"]["version"] == None:
tdLog.exit("first_ep_dnode_id is null!")
if "master_uptime" not in infoDict["cluster_info"] or infoDict["cluster_info"]["master_uptime"] == None:
tdLog.exit("master_uptime is null!")
if "monitor_interval" not in infoDict["cluster_info"] or infoDict["cluster_info"]["monitor_interval"] !=5:
tdLog.exit("monitor_interval is null!")
if "vgroups_total" not in infoDict["cluster_info"] or infoDict["cluster_info"]["vgroups_total"] < 0:
tdLog.exit("vgroups_total is null!")
if "vgroups_alive" not in infoDict["cluster_info"] or infoDict["cluster_info"]["vgroups_alive"] < 0:
tdLog.exit("vgroups_alive is null!")
if "connections_total" not in infoDict["cluster_info"] or infoDict["cluster_info"]["connections_total"] < 0 :
tdLog.exit("connections_total is null!")
if "dnodes" not in infoDict["cluster_info"] or infoDict["cluster_info"]["dnodes"] == None :
tdLog.exit("dnodes is null!")
dnodes_info = { "dnode_id": 1,"dnode_ep": f"{hostname}:{serverPort}","status":"ready"}
for k ,v in dnodes_info.items():
if k not in infoDict["cluster_info"]["dnodes"][0] or v != infoDict["cluster_info"]["dnodes"][0][k] :
tdLog.exit("dnodes info is null!")
mnodes_info = { "mnode_id":1, "mnode_ep":f"{hostname}:{serverPort}","role": "LEADER" }
for k ,v in mnodes_info.items():
if k not in infoDict["cluster_info"]["mnodes"][0] or v != infoDict["cluster_info"]["mnodes"][0][k] :
tdLog.exit("mnodes info is null!")
# vgroup_infos ====================================
if "vgroup_infos" not in infoDict or infoDict["vgroup_infos"]== None:
tdLog.exit("vgroup_infos is null!")
vgroup_infos_nums = len(infoDict["vgroup_infos"])
for index in range(vgroup_infos_nums):
if "vgroup_id" not in infoDict["vgroup_infos"][index] or infoDict["vgroup_infos"][index]["vgroup_id"]<0:
tdLog.exit("vgroup_id is null!")
if "database_name" not in infoDict["vgroup_infos"][index] or len(infoDict["vgroup_infos"][index]["database_name"]) < 0:
tdLog.exit("database_name is null!")
if "tables_num" not in infoDict["vgroup_infos"][index] or infoDict["vgroup_infos"][index]["tables_num"]!= 0:
tdLog.exit("tables_num is null!")
if "status" not in infoDict["vgroup_infos"][index] or len(infoDict["vgroup_infos"][index]["status"]) < 0 :
tdLog.exit("status is null!")
if "vnodes" not in infoDict["vgroup_infos"][index] or infoDict["vgroup_infos"][index]["vnodes"] ==None :
tdLog.exit("vnodes is null!")
if "dnode_id" not in infoDict["vgroup_infos"][index]["vnodes"][0] or infoDict["vgroup_infos"][index]["vnodes"][0]["dnode_id"] < 0 :
tdLog.exit("vnodes is null!")
# grant_info ====================================
if "grant_info" not in infoDict or infoDict["grant_info"]== None:
tdLog.exit("grant_info is null!")
if "expire_time" not in infoDict["grant_info"] or not infoDict["grant_info"]["expire_time"] > 0:
tdLog.exit("expire_time is null!")
if "timeseries_used" not in infoDict["grant_info"] or not infoDict["grant_info"]["timeseries_used"] > 0:
tdLog.exit("timeseries_used is null!")
if "timeseries_total" not in infoDict["grant_info"] or not infoDict["grant_info"]["timeseries_total"] > 0:
tdLog.exit("timeseries_total is null!")
# dnode_info ====================================
if "dnode_info" not in infoDict or infoDict["dnode_info"]== None:
tdLog.exit("dnode_info is null!")
dnode_infos = ['uptime', 'cpu_engine', 'cpu_system', 'cpu_cores', 'mem_engine', 'mem_system', 'mem_total', 'disk_engine',
'disk_used', 'disk_total', 'net_in', 'net_out', 'io_read', 'io_write', 'io_read_disk', 'io_write_disk', 'req_select',
'req_select_rate', 'req_insert', 'req_insert_success', 'req_insert_rate', 'req_insert_batch', 'req_insert_batch_success',
'req_insert_batch_rate', 'errors', 'vnodes_num', 'masters', 'has_mnode', 'has_qnode', 'has_snode', 'has_bnode']
for elem in dnode_infos:
if elem not in infoDict["dnode_info"] or infoDict["dnode_info"][elem] < 0:
tdLog.exit(f"{elem} is null!")
# dnode_info ====================================
if "disk_infos" not in infoDict or infoDict["disk_infos"]== None:
tdLog.exit("disk_infos is null!")
# bug for data_dir
if "datadir" not in infoDict["disk_infos"] or len(infoDict["disk_infos"]["datadir"]) <=0 :
tdLog.exit("datadir is null!")
if "name" not in infoDict["disk_infos"]["datadir"][0] or len(infoDict["disk_infos"]["datadir"][0]["name"]) <= 0:
tdLog.exit("name is null!")
if "level" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["level"] < 0:
tdLog.exit("level is null!")
if "avail" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["avail"] <= 0:
tdLog.exit("avail is null!")
if "used" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["used"] <= 0:
tdLog.exit("used is null!")
if "total" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["total"] <= 0:
tdLog.exit("total is null!")
if "logdir" not in infoDict["disk_infos"] or infoDict["disk_infos"]["logdir"]== None:
tdLog.exit("logdir is null!")
if "name" not in infoDict["disk_infos"]["logdir"] or len(infoDict["disk_infos"]["logdir"]["name"]) <= 0:
tdLog.exit("name is null!")
if "avail" not in infoDict["disk_infos"]["logdir"] or infoDict["disk_infos"]["logdir"]["avail"] <= 0:
tdLog.exit("avail is null!")
if "used" not in infoDict["disk_infos"]["logdir"] or infoDict["disk_infos"]["logdir"]["used"] <= 0:
tdLog.exit("used is null!")
if "total" not in infoDict["disk_infos"]["logdir"] or infoDict["disk_infos"]["logdir"]["total"] <= 0:
tdLog.exit("total is null!")
if "tempdir" not in infoDict["disk_infos"] or infoDict["disk_infos"]["tempdir"]== None:
tdLog.exit("tempdir is null!")
if "name" not in infoDict["disk_infos"]["tempdir"] or len(infoDict["disk_infos"]["tempdir"]["name"]) <= 0:
tdLog.exit("name is null!")
if "avail" not in infoDict["disk_infos"]["tempdir"] or infoDict["disk_infos"]["tempdir"]["avail"] <= 0:
tdLog.exit("avail is null!")
if "used" not in infoDict["disk_infos"]["tempdir"] or infoDict["disk_infos"]["tempdir"]["used"] <= 0:
tdLog.exit("used is null!")
if "total" not in infoDict["disk_infos"]["tempdir"] or infoDict["disk_infos"]["tempdir"]["total"] <= 0:
tdLog.exit("total is null!")
# log_infos ====================================
if "log_infos" not in infoDict or infoDict["log_infos"]== None:
tdLog.exit("log_infos is null!")
if "logs" not in infoDict["log_infos"] or len(infoDict["log_infos"]["logs"])!= 10:
tdLog.exit("logs is null!")
if "ts" not in infoDict["log_infos"]["logs"][0] or len(infoDict["log_infos"]["logs"][0]["ts"]) <= 10:
tdLog.exit("ts is null!")
if "level" not in infoDict["log_infos"]["logs"][0] or infoDict["log_infos"]["logs"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]:
tdLog.exit("level is null!")
if "content" not in infoDict["log_infos"]["logs"][0] or len(infoDict["log_infos"]["logs"][0]["ts"]) <= 1:
tdLog.exit("content is null!")
if "summary" not in infoDict["log_infos"] or len(infoDict["log_infos"]["summary"])!= 4:
tdLog.exit("summary is null!")
if "total" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["total"] < 0 :
tdLog.exit("total is null!")
if "level" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]:
tdLog.exit("level is null!")
class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""
process GET request
"""
def do_POST(self):
"""
process POST request
"""
contentEncoding = self.headers["Content-Encoding"]
if contentEncoding == 'gzip':
req_body = self.rfile.read(int(self.headers["Content-Length"]))
plainText = gzip.decompress(req_body).decode()
else:
plainText = self.rfile.read(int(self.headers["Content-Length"])).decode()
print(plainText)
# 1. send response code and header
self.send_response(200)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.end_headers()
# 2. send response content
#self.wfile.write(("Hello World: " + req_body + "\n").encode("utf-8"))
# 3. check request body info
infoDict = json.loads(plainText)
#print("================")
# print(infoDict)
telemetryInfoCheck(infoDict)
# 4. shutdown the server and exit case
assassin = threading.Thread(target=httpServer.shutdown)
assassin.daemon = True
assassin.start()
print ("==== shutdown http server ====")
class TDTestCase:
hostname = socket.gethostname()
serverPort = '7080'
rpcDebugFlagVal = '143'
clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
clientCfgDict["serverPort"] = serverPort
clientCfgDict["firstEp"] = hostname + ':' + serverPort
clientCfgDict["secondEp"] = hostname + ':' + serverPort
clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
clientCfgDict["fqdn"] = hostname
updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
updatecfgDict["clientCfg"] = clientCfgDict
updatecfgDict["serverPort"] = serverPort
updatecfgDict["firstEp"] = hostname + ':' + serverPort
updatecfgDict["secondEp"] = hostname + ':' + serverPort
updatecfgDict["fqdn"] = hostname
updatecfgDict["monitorFqdn"] = hostname
updatecfgDict["monitorPort"] = '6043'
updatecfgDict["monitor"] = '1'
updatecfgDict["monitorInterval"] = "5"
updatecfgDict["monitorMaxLogs"] = "10"
updatecfgDict["monitorComp"] = "1"
print ("===================: ", updatecfgDict)
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
# time.sleep(2)
vgroups = "30"
sql = "create database db3 vgroups " + vgroups
tdSql.query(sql)
# loop to wait request
httpServer.serve_forever()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
# create http server: bing ip/port , and request processor
serverAddress = ("", int(telemetryPort))
httpServer = http.server.HTTPServer(serverAddress, RequestHandlerImpl)
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -12,6 +12,7 @@
# -*- coding: utf-8 -*-
import sys
import os
import threading
import multiprocessing as mp
from numpy.lib.function_base import insert
@ -66,14 +67,19 @@ class TDTestCase:
# run case
def run(self):
# test base case
self.test_case1()
tdLog.debug(" LIMIT test_case1 ............ [OK]")
# # test base case
# self.test_case1()
# tdLog.debug(" LIMIT test_case1 ............ [OK]")
# test advance case
# test case
# self.test_case2()
# tdLog.debug(" LIMIT test_case2 ............ [OK]")
# test case
self.test_case3()
tdLog.debug(" LIMIT test_case3 ............ [OK]")
# stop
def stop(self):
tdSql.close()
@ -115,11 +121,12 @@ class TDTestCase:
return cur
def new_create_tables(self,dbname,vgroups,stbname,tcountStart,tcountStop):
host = "chenhaoran02"
host = "localhost"
buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/"
tsql=self.newcur(host,config)
tsql.execute("drop database if exists %s"%dbname)
tsql.execute("create database %s vgroups %d"%(dbname,vgroups))
tsql.execute("use %s" %dbname)
tsql.execute("create stable %s(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%stbname)
@ -182,7 +189,52 @@ class TDTestCase:
tdLog.debug("INSERT TABLE DATA ............ [OK]")
return
def taosBench(self,jsonFile):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
taosBenchbin = buildPath+ "/build/bin/taosBenchmark"
os.system("%s -f %s -y " %(taosBenchbin,jsonFile))
return
def taosBenchCreate(self,dbname,stbname,vgroups,threadNumbers,count):
# count=50000
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
taosBenchbin = buildPath+ "/build/bin/taosBenchmark"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
tdSql.execute("drop database if exists %s"%dbname)
tdSql.execute("create database %s vgroups %d"%(dbname,vgroups))
tdSql.execute("use %s" %dbname)
threads = []
# threadNumbers=2
for i in range(threadNumbers):
jsonfile="1-insert/Vgroups%d%d.json"%(vgroups,i)
os.system("cp -f 1-insert/manyVgroups.json %s"%(jsonfile))
os.system("sed -i 's/\"name\": \"db\",/\"name\": \"%s%d\",/g' %s"%(dbname,i,jsonfile))
os.system("sed -i 's/\"childtable_count\": 300000,/\"childtable_count\": %d,/g' %s "%(count,jsonfile))
os.system("sed -i 's/\"name\": \"stb1\",/\"name\": \"%s%d\",/g' %s "%(stbname,i,jsonfile))
os.system("sed -i 's/\"childtable_prefix\": \"stb1_\",/\"childtable_prefix\": \"%s%d_\",/g' %s "%(stbname,i,jsonfile))
threads.append(mp.Process(target=self.taosBench, args=("%s"%jsonfile,)))
start_time = time.time()
for tr in threads:
tr.start()
for tr in threads:
tr.join()
end_time = time.time()
spendTime=end_time-start_time
speedCreate=count/spendTime
tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate))
return
# test case1 base
def test_case1(self):
tdLog.debug("-----create database and tables test------- ")
@ -284,6 +336,12 @@ class TDTestCase:
return
def test_case3(self):
self.taosBenchCreate("db1", "stb1", 1, 2, 1*50000)
return
#
# add case with filename
#

View File

@ -0,0 +1,76 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos/",
"host": "test216",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 8,
"thread_count_create_tbl": 8,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100000,
"num_of_records_per_req": 100000,
"databases": [
{
"dbinfo": {
"name": "db",
"drop": "yes",
"vgroups": 1
},
"super_tables": [
{
"name": "stb1",
"child_table_exists": "no",
"childtable_count": 300000,
"childtable_prefix": "stb1_",
"auto_create_table": "no",
"batch_create_tbl_num": 50000,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 0,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 10000000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10,
"sample_format": "csv",
"use_sample_ts": "no",
"tags_file": "",
"columns": [
{
"type": "INT"
},
{
"type": "DOUBLE",
"count": 100
},
{
"type": "BINARY",
"len": 400,
"count": 10
},
{
"type": "nchar",
"len": 200,
"count": 20
}
],
"tags": [
{
"type": "TINYINT",
"count": 2
},
{
"type": "BINARY",
"len": 16,
"count": 2
}
]
}
]
}
]
}

View File

@ -0,0 +1,299 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
selfPath = os.path.dirname(os.path.realpath(__file__))
utilPath="%s/../../pytest/"%selfPath
import threading
import multiprocessing as mp
from numpy.lib.function_base import insert
import taos
sys.path.append(utilPath)
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
import datetime as dt
import time
# constant define
WAITS = 5 # wait seconds
class TDTestCase:
#
# --------------- main frame -------------------
#
def caseDescription(self):
'''
limit and offset keyword function test cases;
case1: limit offset base function test
case2: offset return valid
'''
return
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
# init
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
# tdSql.init(conn.cursor())
# tdSql.prepare()
# self.create_tables();
self.ts = 1500000000000
# run case
def run(self):
# test base case
self.test_case1()
tdLog.debug(" LIMIT test_case1 ............ [OK]")
# test advance case
# self.test_case2()
# tdLog.debug(" LIMIT test_case2 ............ [OK]")
# stop
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
# --------------- case -------------------
# create tables
def create_tables(self,dbname,stbname,count):
tdSql.execute("use %s" %dbname)
tdSql.execute("create stable %s(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%stbname)
pre_create = "create table"
sql = pre_create
tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
# print(time.time())
exeStartTime=time.time()
for i in range(count):
sql += " %s_%d using %s tags(%d)"%(stbname,i,stbname,i+1)
if i >0 and i%3000 == 0:
tdSql.execute(sql)
sql = pre_create
# print(time.time())
# end sql
if sql != pre_create:
tdSql.execute(sql)
exeEndTime=time.time()
spendTime=exeEndTime-exeStartTime
speedCreate=count/spendTime
tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate))
return
def newcur(self,host,cfg):
user = "root"
password = "taosdata"
port =6030
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
cur=con.cursor()
print(cur)
return cur
def new_create_tables(self,dbname,vgroups,stbname,tcountStart,tcountStop):
host = "127.0.0.1"
buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/"
tsql=self.newcur(host,config)
tsql.execute("drop database if exists %s" %(dbname))
tsql.execute("create database if not exists %s vgroups %d"%(dbname,vgroups))
tsql.execute("use %s" %dbname)
tsql.execute("create stable %s(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%stbname)
pre_create = "create table"
sql = pre_create
tcountStop=int(tcountStop)
tcountStart=int(tcountStart)
count=tcountStop-tcountStart
tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
# print(time.time())
exeStartTime=time.time()
# print(type(tcountStop),type(tcountStart))
for i in range(tcountStart,tcountStop):
sql += " %s_%d using %s tags(%d)"%(stbname,i,stbname,i+1)
if i >0 and i%20000 == 0:
# print(sql)
tsql.execute(sql)
sql = pre_create
# print(time.time())
# end sql
if sql != pre_create:
# print(sql)
tsql.execute(sql)
exeEndTime=time.time()
spendTime=exeEndTime-exeStartTime
speedCreate=count/spendTime
# tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate))
return
# insert data
def insert_data(self, dbname, stbname, ts_start, tcountStart,tcountStop,rowCount):
tdSql.execute("use %s" %dbname)
pre_insert = "insert into "
sql = pre_insert
tcount=tcountStop-tcountStart
allRows=tcount*rowCount
tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbname, allRows))
exeStartTime=time.time()
for i in range(tcountStart,tcountStop):
sql += " %s_%d values "%(stbname,i)
for j in range(rowCount):
sql += "(%d, %d, 'taos_%d') "%(ts_start + j*1000, j, j)
if j >0 and j%5000 == 0:
# print(sql)
tdSql.execute(sql)
sql = "insert into %s_%d values " %(stbname,i)
# end sql
if sql != pre_insert:
# print(sql)
tdSql.execute(sql)
exeEndTime=time.time()
spendTime=exeEndTime-exeStartTime
speedInsert=allRows/spendTime
# tdLog.debug("spent %.2fs to INSERT %d rows , insert rate is %.2f rows/s... [OK]"% (spendTime,allRows,speedInsert))
tdLog.debug("INSERT TABLE DATA ............ [OK]")
return
# test case1 base
def test_case1(self):
tdLog.debug("-----create database and tables test------- ")
# tdSql.execute("drop database if exists db1")
# tdSql.execute("drop database if exists db4")
# tdSql.execute("drop database if exists db6")
# tdSql.execute("drop database if exists db8")
# tdSql.execute("drop database if exists db12")
# tdSql.execute("drop database if exists db16")
#create database and tables;
# tdSql.execute("create database db11 vgroups 1")
# # self.create_tables("db1", "stb1", 30*10000)
# tdSql.execute("use db1")
# tdSql.execute("create stable stb1(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)")
# tdSql.execute("create database db12 vgroups 1")
# # self.create_tables("db1", "stb1", 30*10000)
# tdSql.execute("use db1")
# t1 = threading.Thread(target=self.new_create_tables("db1", "stb1", 15*10000), args=(1,))
# t2 = threading.Thread(target=self.new_create_tables("db1", "stb1", 15*10000), args=(2,))
# t1 = mp.Process(target=self.new_create_tables, args=("db1", "stb1", 0,count/2,))
# t2 = mp.Process(target=self.new_create_tables, args=("db1", "stb1", count/2,count,))
count=500000
vgroups=1
threads = []
threadNumbers=2
for i in range(threadNumbers):
threads.append(mp.Process(target=self.new_create_tables, args=("db1%d"%i, vgroups, "stb1", 0,count,)))
start_time = time.time()
for tr in threads:
tr.start()
for tr in threads:
tr.join()
end_time = time.time()
spendTime=end_time-start_time
speedCreate=count/spendTime
tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate))
# self.new_create_tables("db1", "stb1", 15*10000)
# self.new_create_tables("db1", "stb1", 15*10000)
# tdSql.execute("create database db4 vgroups 4")
# self.create_tables("db4", "stb4", 30*10000)
# tdSql.execute("create database db6 vgroups 6")
# self.create_tables("db6", "stb6", 30*10000)
# tdSql.execute("create database db8 vgroups 8")
# self.create_tables("db8", "stb8", 30*10000)
# tdSql.execute("create database db12 vgroups 12")
# self.create_tables("db12", "stb12", 30*10000)
# tdSql.execute("create database db16 vgroups 16")
# self.create_tables("db16", "stb16", 30*10000)
return
# test case2 base:insert data
def test_case2(self):
tdLog.debug("-----insert data test------- ")
# drop database
tdSql.execute("drop database if exists db1")
tdSql.execute("drop database if exists db4")
tdSql.execute("drop database if exists db6")
tdSql.execute("drop database if exists db8")
tdSql.execute("drop database if exists db12")
tdSql.execute("drop database if exists db16")
#create database and tables;
tdSql.execute("create database db1 vgroups 1")
self.create_tables("db1", "stb1", 1*100)
self.insert_data("db1", "stb1", self.ts, 1*50,1*10000)
tdSql.execute("create database db4 vgroups 4")
self.create_tables("db4", "stb4", 1*100)
self.insert_data("db4", "stb4", self.ts, 1*100,1*10000)
tdSql.execute("create database db6 vgroups 6")
self.create_tables("db6", "stb6", 1*100)
self.insert_data("db6", "stb6", self.ts, 1*100,1*10000)
tdSql.execute("create database db8 vgroups 8")
self.create_tables("db8", "stb8", 1*100)
self.insert_data("db8", "stb8", self.ts, 1*100,1*10000)
tdSql.execute("create database db12 vgroups 12")
self.create_tables("db12", "stb12", 1*100)
self.insert_data("db12", "stb12", self.ts, 1*100,1*10000)
tdSql.execute("create database db16 vgroups 16")
self.create_tables("db16", "stb16", 1*100)
self.insert_data("db16", "stb16", self.ts, 1*100,1*10000)
return
#
# add case with filename
#
# tdCases.addWindows(__file__, TDTestCase())
# tdCases.addLinux(__file__, TDTestCase())
case=TDTestCase()
case.test_case1()

View File

@ -20,7 +20,7 @@ class TDTestCase:
__sql = f"select cast({col_name} as bigint), {col_name} from {tbname}"
tdSql.query(sql=__sql)
data_tb_col = [result[1] for result in tdSql.queryResult]
for i in range(len(tdSql.queryRows)):
for i in range(tdSql.queryRows):
tdSql.checkData( i, 0, None ) if data_tb_col[i] is None else tdSql.checkData( i, 0, int(data_tb_col[i]) )
def __range_to_bigint(self,cols,tables):
@ -32,7 +32,7 @@ class TDTestCase:
__sql = f"select cast({col_name} as timestamp), {col_name} from {tbname}"
tdSql.query(sql=__sql)
data_tb_col = [result[1] for result in tdSql.queryResult]
for i in range(len(tdSql.queryRows)):
for i in range(tdSql.queryRows):
if data_tb_col[i] is None:
tdSql.checkData( i, 0 , None )
if col_name not in ["c2", "double"] or tbname != "t1" or i != 10:
@ -597,37 +597,37 @@ class TDTestCase:
tdLog.printNoPrefix("==========step39: cast constant operation to bigint, expect change to int ")
tdSql.query("select cast(12121.23323131 as bigint) as b from ct4")
( tdSql.checkData(i, 0, 12121) for i in range(len(tdSql.queryRows) ) )
( tdSql.checkData(i, 0, 12121) for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 as binary(16)) as b from ct4")
( tdSql.checkData(i, 0, '12121.233231') for i in range(len(tdSql.queryRows)) )
( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 as binary(2)) as b from ct4")
( tdSql.checkData(i, 0, '12') for i in range(len(tdSql.queryRows) ) )
( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 as nchar(16)) as b from ct4")
( tdSql.checkData(i, 0, '12121.233231') for i in range(len(tdSql.queryRows) ) )
( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 as nchar(2)) as b from ct4")
( tdSql.checkData(i, 0, '12') for i in range(len(tdSql.queryRows) ) )
( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 + 321.876897998 as bigint) as b from ct4")
( tdSql.checkData(i, 0, 12443) for i in range(len(tdSql.queryRows) ) )
( tdSql.checkData(i, 0, 12443) for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 + 321.876897998 as binary(16)) as b from ct4")
( tdSql.checkData(i, 0, '12443.110129') for i in range(len(tdSql.queryRows)) )
( tdSql.checkData(i, 0, '12443.110129') for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 + 321.876897998 as binary(3)) as b from ct4")
( tdSql.checkData(i, 0, '124') for i in range(len(tdSql.queryRows) ) )
( tdSql.checkData(i, 0, '124') for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 + 321.876897998 as nchar(16)) as b from ct4")
( tdSql.checkData(i, 0, '12443.110129') for i in range(len(tdSql.queryRows)) )
( tdSql.checkData(i, 0, '12443.110129') for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 + 321.876897998 as nchar(3)) as b from ct4")
( tdSql.checkData(i, 0, '124') for i in range(len(tdSql.queryRows) ) )
( tdSql.checkData(i, 0, '124') for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as bigint) as b from ct4")
( tdSql.checkData(i, 0, 12121) for i in range(len(tdSql.queryRows) ) )
( tdSql.checkData(i, 0, 12121) for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as binary(16)) as b from ct4")
( tdSql.checkData(i, 0, '12121.233231') for i in range(len(tdSql.queryRows)) )
( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as binary(2)) as b from ct4")
( tdSql.checkData(i, 0, '12') for i in range(len(tdSql.queryRows) ) )
( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as nchar(16)) as b from ct4")
( tdSql.checkData(i, 0, '12121.233231') for i in range(len(tdSql.queryRows) ) )
( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) )
tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as nchar(2)) as b from ct4")
( tdSql.checkData(i, 0, '12') for i in range(len(tdSql.queryRows) ) )
( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) )
tdLog.printNoPrefix("==========step40: error cast condition, should return error ")
tdSql.error("select cast(c1 as int) as b from ct4")

View File

@ -232,13 +232,13 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
# tdDnodes.stop(1)
# tdDnodes.start(1)
tdDnodes.stop(1)
tdDnodes.start(1)
# tdSql.execute("use db")
tdSql.execute("use db")
# tdLog.printNoPrefix("==========step4:after wal, all check again ")
# self.all_test()
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()

View File

@ -0,0 +1,287 @@
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
PRIMARY_COL = "ts"
INT_COL = "c1"
BINT_COL = "c2"
SINT_COL = "c3"
TINT_COL = "c4"
FLOAT_COL = "c5"
DOUBLE_COL = "c6"
BOOL_COL = "c7"
BINARY_COL = "c8"
NCHAR_COL = "c9"
TS_COL = "c10"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def __concat_condition(self): # sourcery skip: extract-method
concat_condition = []
for char_col in CHAR_COL:
concat_condition.extend(
(
char_col,
f"upper( {char_col} )",
)
)
concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
# concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
for num_col in NUM_COL:
concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
concat_condition.append('''"test1234!@#$%^&*():'><?/.,][}{"''')
return concat_condition
def __where_condition(self, col):
# return f" where count({col}) > 0 "
return ""
def __concat_num(self, concat_lists, num):
return [ concat_lists[i] for i in range(num) ]
def __group_condition(self, col, having = ""):
return f" group by {col} having {having}" if having else f" group by {col} "
def __concat_check(self, tbname, num):
concat_condition = self.__concat_condition()
for i in range(len(concat_condition) - num + 1 ):
condition = self.__concat_num(concat_condition[i:], num)
concat_filter = f"concat( {','.join( condition ) }) "
where_condition = self.__where_condition(condition[0])
# group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " )
concat_group_having = self.__group_condition(concat_filter, having=f"{concat_filter} is not null " )
# group_no_having= self.__group_condition(condition[0] )
concat_group_no_having= self.__group_condition(concat_filter)
groups = ["", concat_group_having, concat_group_no_having]
if num > 8 or num < 2 :
[tdSql.error(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
break
tdSql.query(f"select {','.join(condition)} from {tbname} ")
rows = tdSql.queryRows
concat_data = []
for m in range(rows):
concat_data.append("".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_data.append(None)
tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} ")
tdSql.checkRows(rows)
for j in range(tdSql.queryRows):
assert tdSql.getData(j, 0) in concat_data
[ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
def __concat_err_check(self,tbname):
sqls = []
for char_col in CHAR_COL:
sqls.extend(
(
f"select concat( {char_col} ) from {tbname} ",
f"select concat(ceil( {char_col} )) from {tbname} ",
f"select {char_col} from {tbname} group by concat( {char_col} ) ",
)
)
sqls.extend( f"select concat( {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL )
sqls.extend( f"select concat( {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select concat( {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat( {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat( {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
sqls.extend( f"select concat( {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
sqls.extend( f"select concat( {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
sqls.extend( f"select concat( {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
sqls.extend( f"select concat( {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
sqls.extend( f"select concat({char_col}, 11) from {tbname} " for char_col in CHAR_COL )
sqls.extend( f"select concat({num_col}, '1') from {tbname} " for num_col in NUM_COL )
sqls.extend( f"select concat({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select concat({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
sqls.extend(
(
f"select concat() from {tbname} ",
f"select concat(*) from {tbname} ",
f"select concat(ccccccc) from {tbname} ",
f"select concat(111) from {tbname} ",
)
)
return sqls
def __test_current(self): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
for tb in tbname:
for i in range(2,8):
self.__concat_check(tb,i)
tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
def __test_error(self):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
for tb in tbname:
for errsql in self.__concat_err_check(tb):
tdSql.error(sql=errsql)
self.__concat_check(tb,1)
self.__concat_check(tb,9)
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
def all_test(self):
self.__test_current()
self.__test_error()
def __create_tb(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
create_stb_sql = f'''create table stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
create_ntb_sql = f'''create table t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
)
'''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def __insert_data(self, rows):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f'''insert into ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
f'''insert into ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
f'''insert into ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
insert_data = f'''insert into t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
f'''insert into t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
self.__insert_data(self.rows)
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,287 @@
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
PRIMARY_COL = "ts"
INT_COL = "c1"
BINT_COL = "c2"
SINT_COL = "c3"
TINT_COL = "c4"
FLOAT_COL = "c5"
DOUBLE_COL = "c6"
BOOL_COL = "c7"
BINARY_COL = "c8"
NCHAR_COL = "c9"
TS_COL = "c10"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def __concat_ws_condition(self): # sourcery skip: extract-method
concat_ws_condition = []
for char_col in CHAR_COL:
concat_ws_condition.extend(
(
char_col,
f"upper( {char_col} )",
)
)
concat_ws_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
concat_ws_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_ws_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
# concat_ws_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
concat_ws_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
for num_col in NUM_COL:
concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_ws_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
concat_ws_condition.append('''"test1234!@#$%^&*():'><?/.,][}{"''')
return concat_ws_condition
def __where_condition(self, col):
# return f" where count({col}) > 0 "
return ""
def __concat_ws_num(self, concat_ws_lists, num):
return [ concat_ws_lists[i] for i in range(num) ]
def __group_condition(self, col, having = ""):
return f" group by {col} having {having}" if having else f" group by {col} "
def __concat_ws_check(self, tbname, num):
concat_ws_condition = self.__concat_ws_condition()
for i in range(len(concat_ws_condition) - num + 1 ):
condition = self.__concat_ws_num(concat_ws_condition[i:], num)
concat_ws_filter = f"concat_ws('_', {','.join( condition ) }) "
where_condition = self.__where_condition(condition[0])
# group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " )
concat_ws_group_having = self.__group_condition(concat_ws_filter, having=f"{concat_ws_filter} is not null " )
# group_no_having= self.__group_condition(condition[0] )
concat_ws_group_no_having= self.__group_condition(concat_ws_filter)
groups = ["", concat_ws_group_having, concat_ws_group_no_having]
if num > 8 or num < 2 :
[tdSql.error(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
break
tdSql.query(f"select {','.join(condition)} from {tbname} ")
rows = tdSql.queryRows
concat_ws_data = []
for m in range(rows):
concat_ws_data.append("_".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_ws_data.append(None)
tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} ")
tdSql.checkRows(rows)
for j in range(tdSql.queryRows):
assert tdSql.getData(j, 0) in concat_ws_data
[ tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
def __concat_ws_err_check(self,tbname):
sqls = []
for char_col in CHAR_COL:
sqls.extend(
(
f"select concat_ws('_', {char_col} ) from {tbname} ",
f"select concat_ws('_', ceil( {char_col} )) from {tbname} ",
f"select {char_col} from {tbname} group by concat_ws('_', {char_col} ) ",
)
)
sqls.extend( f"select concat_ws('_', {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL )
sqls.extend( f"select concat_ws('_', {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select concat_ws('_', {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat_ws('_', {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat_ws('_', {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
sqls.extend( f"select concat_ws('_', {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
sqls.extend( f"select concat_ws('_', {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
sqls.extend( f"select concat_ws('_', {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
sqls.extend( f"select concat_ws('_', {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat_ws('_', {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
sqls.extend( f"select concat_ws('_', {char_col}, 11) from {tbname} " for char_col in CHAR_COL )
sqls.extend( f"select concat_ws('_', {num_col}, '1') from {tbname} " for num_col in NUM_COL )
sqls.extend( f"select concat_ws('_', {ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select concat_ws('_', {bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat_ws('_', {char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
sqls.extend(
(
f"select concat_ws('_', ) from {tbname} ",
f"select concat_ws('_', *) from {tbname} ",
f"select concat_ws('_', ccccccc) from {tbname} ",
f"select concat_ws('_', 111) from {tbname} ",
)
)
return sqls
def __test_current(self): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
for tb in tbname:
for i in range(2,8):
self.__concat_ws_check(tb,i)
tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
def __test_error(self):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
for tb in tbname:
for errsql in self.__concat_ws_err_check(tb):
tdSql.error(sql=errsql)
self.__concat_ws_check(tb,1)
self.__concat_ws_check(tb,9)
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
def all_test(self):
self.__test_current()
self.__test_error()
def __create_tb(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
create_stb_sql = f'''create table stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
create_ntb_sql = f'''create table t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
)
'''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def __insert_data(self, rows):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f'''insert into ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
f'''insert into ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
f'''insert into ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
insert_data = f'''insert into t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
f'''insert into t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
self.__insert_data(self.rows)
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -99,6 +99,7 @@ class TDTestCase:
if not join_flag :
tdSql.error(sql=sql)
break
if len(tblist) == 2:
if "ct1" in tblist or "t1" in tblist:
self.__join_current(sql, checkrows)
@ -111,42 +112,9 @@ class TDTestCase:
if len(tblist) > 2 or len(tblist) < 1:
tdSql.error(sql=sql)
# def __join_err_check(self,tbname):
# sqls = []
# for un_char_col in NUM_COL:
# sqls.extend(
# (
# f"select length( {un_char_col} ) from {tbname} ",
# f"select length(ceil( {un_char_col} )) from {tbname} ",
# f"select {un_char_col} from {tbname} group by length( {un_char_col} ) ",
# )
# )
# sqls.extend( f"select length( {un_char_col} + {un_char_col_2} ) from {tbname} " for un_char_col_2 in NUM_COL )
# sqls.extend( f"select length( {un_char_col} + {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
# sqls.extend( f"select {char_col} from {tbname} group by length( {char_col} ) " for char_col in CHAR_COL)
# sqls.extend( f"select length( {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
# sqls.extend( f"select length( {char_col} + {ts_col} ) from {tbname} " for char_col in NUM_COL for ts_col in TS_TYPE_COL)
# sqls.extend( f"select length( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
# sqls.extend( f"select upper({char_col}, 11) from {tbname} " for char_col in CHAR_COL )
# sqls.extend( f"select upper({char_col}) from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
# sqls.extend(
# (
# f"select length() from {tbname} ",
# f"select length(*) from {tbname} ",
# f"select length(ccccccc) from {tbname} ",
# f"select length(111) from {tbname} ",
# f"select length(c8, 11) from {tbname} ",
# )
# )
# return sqls
def __join_current(self, sql, checkrows):
tdSql.query(sql=sql)
tdSql.checkRows(checkrows)
# tdSql.checkRows(checkrows)
def __test_current(self):
@ -197,10 +165,10 @@ class TDTestCase:
tbname = ["ct1", "ct2", "ct4", "t1"]
for tb in tbname:
for errsql in self.__length_err_check(tb):
tdSql.error(sql=errsql)
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
# for tb in tbname:
# for errsql in self.__join_err_check(tb):
# tdSql.error(sql=errsql)
# tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
def all_test(self):
@ -319,13 +287,13 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
# tdDnodes.stop(1)
# tdDnodes.start(1)
tdDnodes.stop(1)
tdDnodes.start(1)
# tdSql.execute("use db")
tdSql.execute("use db")
# tdLog.printNoPrefix("==========step4:after wal, all check again ")
# self.all_test()
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()

View File

@ -233,13 +233,13 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
# tdDnodes.stop(1)
# tdDnodes.start(1)
tdDnodes.stop(1)
tdDnodes.start(1)
# tdSql.execute("use db")
tdSql.execute("use db")
# tdLog.printNoPrefix("==========step4:after wal, all check again ")
# self.all_test()
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()

View File

@ -59,12 +59,9 @@ class TDTestCase:
groups = ["", group_having, group_no_having]
for group_condition in groups:
tdSql.query(f"select {condition} from {tbname} {where_condition} {group_condition} ")
datas = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
lower_data = [ str(data).lower() if data else None for data in datas ]
tdSql.query(f"select lower( {condition} ) from {tbname} {where_condition} {group_condition}")
for i in range(len(lower_data)):
tdSql.checkData(i, 0, lower_data[i] ) if lower_data[i] else tdSql.checkData(i, 0, None)
tdSql.query(f"select lower( {condition} ), {condition} from {tbname} {where_condition} {group_condition}")
for i in range(tdSql.queryRows):
tdSql.checkData(i, 0, str(tdSql.getData(i, 1)).lower() ) if tdSql.getData(i, 1) else tdSql.checkData(i, 0, None)
def __lower_err_check(self,tbname):
sqls = []
@ -230,13 +227,13 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
# tdDnodes.stop(1)
# tdDnodes.start(1)
tdDnodes.stop(1)
tdDnodes.start(1)
# tdSql.execute("use db")
tdSql.execute("use db")
# tdLog.printNoPrefix("==========step4:after wal, all check again ")
# self.all_test()
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()

View File

@ -0,0 +1,267 @@
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
PRIMARY_COL = "ts"
INT_COL = "c1"
BINT_COL = "c2"
SINT_COL = "c3"
TINT_COL = "c4"
FLOAT_COL = "c5"
DOUBLE_COL = "c6"
BOOL_COL = "c7"
BINARY_COL = "c8"
NCHAR_COL = "c9"
TS_COL = "c10"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def __ltrim_condition(self): # sourcery skip: extract-method
ltrim_condition = []
for char_col in CHAR_COL:
ltrim_condition.extend(
(
char_col,
f"upper( {char_col} )",
)
)
ltrim_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
ltrim_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
ltrim_condition.extend( f"concat( cast( {char_col} + {num_col} as binary(16) ), {char_col}) " for num_col in NUM_COL )
ltrim_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
ltrim_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
ltrim_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
# ltrim_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
ltrim_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
ltrim_condition.extend( f"concat( {char_col}, {char_col_2} ) " for char_col_2 in CHAR_COL )
for num_col in NUM_COL:
ltrim_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
ltrim_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
ltrim_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
ltrim_condition.append(''' " test1234!@#$%^&*() :'><?/.,][}{ " ''')
return ltrim_condition
def __where_condition(self, col):
# return f" where count({col}) > 0 "
return ""
def __group_condition(self, col, having = ""):
return f" group by {col} having {having}" if having else f" group by {col} "
def __ltrim_check(self, tbname):
ltrim_condition = self.__ltrim_condition()
for condition in ltrim_condition:
where_condition = self.__where_condition(condition)
ltrim_group_having = self.__group_condition(condition, having=f"{condition} is not null " )
ltrim_group_no_having= self.__group_condition(condition)
groups = ["", ltrim_group_having, ltrim_group_no_having]
tdSql.query(f"select ltrim( {condition}) , {condition} from {tbname} ")
for j in range(tdSql.queryRows):
tdSql.checkData(j,0, tdSql.getData(j,1).lstrip()) if tdSql.getData(j,1) else tdSql.checkData(j, 0, None)
[ tdSql.query(f"select ltrim({condition}) from {tbname} {where_condition} {group} ") for group in groups ]
def __ltrim_err_check(self,tbname):
sqls = []
for num_col in NUM_COL:
sqls.extend(
(
f"select ltrim( {num_col} ) from {tbname} ",
f"select ltrim(ceil( {num_col} )) from {tbname} ",
f"select {num_col} from {tbname} group by ltrim( {num_col} ) ",
)
)
sqls.extend( f"select ltrim( {char_col} , {num_col} ) from {tbname} " for char_col in CHAR_COL )
sqls.extend( f"select ltrim( {num_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select ltrim( {num_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select ltrim( {ts_col}+{bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select ltrim( {num_col}+{ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
sqls.extend( f"select ltrim( {num_col}+ {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
sqls.extend( f"select ltrim( {num_col}+ {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
sqls.extend( f"select ltrim( {ts_col}+{ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
sqls.extend( f"select ltrim( {bool_col}+ {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select ltrim( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
sqls.extend( f"select ltrim({num_col}, '1') from {tbname} " for num_col in NUM_COL )
sqls.extend( f"select ltrim({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select ltrim({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select ltrim({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
sqls.extend(
(
f"select ltrim() from {tbname} ",
f"select ltrim(*) from {tbname} ",
f"select ltrim(ccccccc) from {tbname} ",
f"select ltrim(111) from {tbname} ",
)
)
return sqls
def __test_current(self): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
for tb in tbname:
self.__ltrim_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
def __test_error(self):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
for tb in tbname:
for errsql in self.__ltrim_err_check(tb):
tdSql.error(sql=errsql)
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
def all_test(self):
self.__test_current()
self.__test_error()
def __create_tb(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
create_stb_sql = f'''create table stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
create_ntb_sql = f'''create table t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
)
'''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def __insert_data(self, rows):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f'''insert into ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
f'''insert into ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
f'''insert into ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
insert_data = f'''insert into t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
f'''insert into t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
self.__insert_data(self.rows)
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,267 @@
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
PRIMARY_COL = "ts"
INT_COL = "c1"
BINT_COL = "c2"
SINT_COL = "c3"
TINT_COL = "c4"
FLOAT_COL = "c5"
DOUBLE_COL = "c6"
BOOL_COL = "c7"
BINARY_COL = "c8"
NCHAR_COL = "c9"
TS_COL = "c10"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def __rtrim_condition(self): # sourcery skip: extract-method
rtrim_condition = []
for char_col in CHAR_COL:
rtrim_condition.extend(
(
char_col,
f"upper( {char_col} )",
)
)
rtrim_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
rtrim_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
rtrim_condition.extend( f"concat( cast( {char_col} + {num_col} as binary(16) ), {char_col}) " for num_col in NUM_COL )
rtrim_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
rtrim_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
rtrim_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
# rtrim_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
rtrim_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
rtrim_condition.extend( f"concat( {char_col}, {char_col_2} ) " for char_col_2 in CHAR_COL )
for num_col in NUM_COL:
rtrim_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
rtrim_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL )
rtrim_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
rtrim_condition.append(''' " test1234!@#$%^&*() :'><?/.,][}{ " ''')
return rtrim_condition
def __where_condition(self, col):
# return f" where count({col}) > 0 "
return ""
def __group_condition(self, col, having = ""):
return f" group by {col} having {having}" if having else f" group by {col} "
def __rtrim_check(self, tbname):
rtrim_condition = self.__rtrim_condition()
for condition in rtrim_condition:
where_condition = self.__where_condition(condition)
rtrim_group_having = self.__group_condition(condition, having=f"{condition} is not null " )
rtrim_group_no_having= self.__group_condition(condition)
groups = ["", rtrim_group_having, rtrim_group_no_having]
tdSql.query(f"select rtrim( {condition}) , {condition} from {tbname} ")
for j in range(tdSql.queryRows):
tdSql.checkData(j,0, tdSql.getData(j,1).rstrip()) if tdSql.getData(j,1) else tdSql.checkData(j, 0, None)
[ tdSql.query(f"select rtrim({condition}) from {tbname} {where_condition} {group} ") for group in groups ]
def __rtrim_err_check(self,tbname):
sqls = []
for num_col in NUM_COL:
sqls.extend(
(
f"select rtrim( {num_col} ) from {tbname} ",
f"select rtrim(ceil( {num_col} )) from {tbname} ",
f"select {num_col} from {tbname} group by rtrim( {num_col} ) ",
)
)
sqls.extend( f"select rtrim( {char_col} , {num_col} ) from {tbname} " for char_col in CHAR_COL )
sqls.extend( f"select rtrim( {num_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select rtrim( {num_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select rtrim( {ts_col}+{bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select rtrim( {num_col}+{ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
sqls.extend( f"select rtrim( {num_col}+ {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
sqls.extend( f"select rtrim( {num_col}+ {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
sqls.extend( f"select rtrim( {ts_col}+{ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
sqls.extend( f"select rtrim( {bool_col}+ {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select rtrim( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
sqls.extend( f"select rtrim({num_col}, '1') from {tbname} " for num_col in NUM_COL )
sqls.extend( f"select rtrim({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select rtrim({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select rtrim({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
sqls.extend(
(
f"select rtrim() from {tbname} ",
f"select rtrim(*) from {tbname} ",
f"select rtrim(ccccccc) from {tbname} ",
f"select rtrim(111) from {tbname} ",
)
)
return sqls
def __test_current(self): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
for tb in tbname:
self.__rtrim_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
def __test_error(self):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
for tb in tbname:
for errsql in self.__rtrim_err_check(tb):
tdSql.error(sql=errsql)
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
def all_test(self):
self.__test_current()
self.__test_error()
def __create_tb(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
create_stb_sql = f'''create table stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
create_ntb_sql = f'''create table t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
)
'''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def __insert_data(self, rows):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f'''insert into ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
f'''insert into ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
f'''insert into ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
insert_data = f'''insert into t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
f'''insert into t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
self.__insert_data(self.rows)
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,271 @@
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
PRIMARY_COL = "ts"
INT_COL = "c1"
BINT_COL = "c2"
SINT_COL = "c3"
TINT_COL = "c4"
FLOAT_COL = "c5"
DOUBLE_COL = "c6"
BOOL_COL = "c7"
BINARY_COL = "c8"
NCHAR_COL = "c9"
TS_COL = "c10"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def __substr_condition(self): # sourcery skip: extract-method
substr_condition = []
for char_col in CHAR_COL:
substr_condition.extend(
(
char_col,
f"upper( {char_col} )",
)
)
substr_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
substr_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
substr_condition.extend( f"concat( cast( {char_col} + {num_col} as binary(16) ), {char_col}) " for num_col in NUM_COL )
substr_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
substr_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
substr_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
# substr_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
substr_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
substr_condition.extend( f"concat( {char_col}, {char_col_2} ) " for char_col_2 in CHAR_COL )
for num_col in NUM_COL:
substr_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
substr_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
substr_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
substr_condition.append(''' " test1234!@#$%^&*() :'><?/.,][}{ " ''')
return substr_condition
def __where_condition(self, col):
# return f" where count({col}) > 0 "
return ""
def __group_condition(self, col, having = ""):
return f" group by {col} having {having}" if having else f" group by {col} "
def __substr_check(self, tbname,pos, lens=None):
substr_condition = self.__substr_condition()
for condition in substr_condition:
where_condition = self.__where_condition(condition)
substr_group_having = self.__group_condition(condition, having=f"{condition} is not null " )
substr_group_no_having= self.__group_condition(condition)
groups = ["", substr_group_having, substr_group_no_having]
if pos < 1:
tdSql.error(f"select substr( {condition}, {pos}, {lens}) , {condition} from {tbname} ")
tdSql.query(f"select substr( {condition}, {pos}, {lens}) , {condition} from {tbname} ")
for j in range(tdSql.queryRows):
tdSql.checkData(j,0, tdSql.getData(j,1)[pos-1:lens]) if tdSql.getData(j,1) else tdSql.checkData(j, 0, None)
[ tdSql.query(f"select substr({condition}, {pos}, {lens}) from {tbname} {where_condition} {group} ") for group in groups ]
def __substr_err_check(self,tbname):
sqls = []
for num_col in NUM_COL:
sqls.extend(
(
f"select substr( {num_col} ) from {tbname} ",
f"select substr(ceil( {num_col} )) from {tbname} ",
f"select {num_col} from {tbname} group by substr( {num_col} ) ",
)
)
sqls.extend( f"select substr( {char_col} + {num_col} ) from {tbname} " for char_col in CHAR_COL )
sqls.extend( f"select substr( {num_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select substr( {num_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select substr( {ts_col}+{bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select substr( {num_col}+{ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
sqls.extend( f"select substr( {num_col}+ {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
sqls.extend( f"select substr( {num_col}+ {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
sqls.extend( f"select substr( {ts_col}+{ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
sqls.extend( f"select substr( {bool_col}+ {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select substr( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
sqls.extend( f"select substr({num_col}, '1') from {tbname} " for num_col in NUM_COL )
sqls.extend( f"select substr({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select substr({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select substr({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
sqls.extend(
(
f"select substr() from {tbname} ",
f"select substr(*) from {tbname} ",
f"select substr(ccccccc) from {tbname} ",
f"select substr(111) from {tbname} ",
)
)
return sqls
def __test_current(self): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
for tb in tbname:
self.__substr_check(tb, 1, 6)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
def __test_error(self):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
for tb in tbname:
for errsql in self.__substr_err_check(tb):
tdSql.error(sql=errsql)
self.__substr_check(tb, 0, 6)
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
def all_test(self):
self.__test_current()
self.__test_error()
def __create_tb(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
create_stb_sql = f'''create table stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
create_ntb_sql = f'''create table t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
)
'''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def __insert_data(self, rows):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f'''insert into ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
f'''insert into ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
f'''insert into ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
insert_data = f'''insert into t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
f'''insert into t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
self.__insert_data(self.rows)
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -59,12 +59,9 @@ class TDTestCase:
groups = ["", group_having, group_no_having]
for group_condition in groups:
tdSql.query(f"select {condition} from {tbname} {where_condition} {group_condition} ")
datas = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
upper_data = [ str(data).upper() if data else None for data in datas ]
tdSql.query(f"select upper( {condition} ) from {tbname} {where_condition} {group_condition}")
for i in range(len(upper_data)):
tdSql.checkData(i, 0, upper_data[i] ) if upper_data[i] else tdSql.checkData(i, 0, None)
tdSql.query(f"select upper( {condition} ), {condition} from {tbname} {where_condition} {group_condition}")
for i in range(tdSql.queryRows):
tdSql.checkData(i, 0, str(tdSql.getData(i, 1)).upper() ) if tdSql.getData(i, 1) else tdSql.checkData(i, 0, None)
def __upper_err_check(self,tbname):
sqls = []
@ -229,13 +226,13 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
# tdDnodes.stop(1)
# tdDnodes.start(1)
tdDnodes.stop(1)
tdDnodes.start(1)
# tdSql.execute("use db")
tdSql.execute("use db")
# tdLog.printNoPrefix("==========step4:after wal, all check again ")
# self.all_test()
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()

View File

@ -6,11 +6,20 @@ python3 ./test.py -f 0-others/taosShell.py
python3 ./test.py -f 0-others/taosShellError.py
python3 ./test.py -f 0-others/taosShellNetChk.py
python3 ./test.py -f 0-others/telemetry.py
python3 ./test.py -f 0-others/taosdMonitor.py
#python3 ./test.py -f 2-query/between.py
python3 ./test.py -f 2-query/distinct.py
python3 ./test.py -f 2-query/varchar.py
python3 ./test.py -f 2-query/ltrim.py
python3 ./test.py -f 2-query/rtrim.py
python3 ./test.py -f 2-query/length.py
python3 ./test.py -f 2-query/char_length.py
python3 ./test.py -f 2-query/upper.py
python3 ./test.py -f 2-query/lower.py
python3 ./test.py -f 2-query/join.py
# python3 ./test.py -f 2-query/concat.py # after wal ,crash occured
# python3 ./test.py -f 2-query/concat_ws.py
python3 ./test.py -f 2-query/timezone.py
python3 ./test.py -f 2-query/Now.py
@ -23,8 +32,7 @@ python3 ./test.py -f 2-query/last.py
python3 ./test.py -f 2-query/To_unixtimestamp.py
python3 ./test.py -f 2-query/timetruncate.py
python3 ./test.py -f 2-query/Timediff.py
# python3 ./test.py -f 2-query/diff.py
# python3 ./test.py -f 2-query/Timediff.py
#python3 ./test.py -f 2-query/cast.py