From 37bc1bca3697126a545f5d33939b582047e0cea3 Mon Sep 17 00:00:00 2001 From: kailixu Date: Mon, 3 Apr 2023 17:00:52 +0800 Subject: [PATCH 01/16] enh: column/row max length support up to 64K --- docs/examples/c/async_query_example.c | 2 +- docs/examples/c/query_example.c | 2 +- include/libs/function/function.h | 10 +++++----- include/libs/nodes/cmdnodes.h | 2 +- include/libs/parser/parser.h | 2 +- include/util/tdef.h | 11 +++++++---- source/client/src/clientSmlJson.c | 2 +- source/dnode/mnode/impl/src/mndFunc.c | 2 +- source/libs/executor/src/timewindowoperator.c | 2 +- source/libs/function/inc/tpercentile.h | 4 ++-- source/libs/function/src/builtinsimpl.c | 18 +++++++++--------- source/libs/function/src/tpercentile.c | 2 +- source/libs/parser/src/parInsertSml.c | 2 +- source/libs/scalar/src/sclfunc.c | 16 ++++++++-------- source/util/src/tcompare.c | 2 +- tools/shell/src/shellEngine.c | 2 +- 16 files changed, 42 insertions(+), 39 deletions(-) diff --git a/docs/examples/c/async_query_example.c b/docs/examples/c/async_query_example.c index b370420b12..3807c4bfd7 100644 --- a/docs/examples/c/async_query_example.c +++ b/docs/examples/c/async_query_example.c @@ -8,7 +8,7 @@ #include #include -typedef int16_t VarDataLenT; +typedef uint16_t VarDataLenT; #define TSDB_NCHAR_SIZE sizeof(int32_t) #define VARSTR_HEADER_SIZE sizeof(VarDataLenT) diff --git a/docs/examples/c/query_example.c b/docs/examples/c/query_example.c index fcae95bcd4..c7d52115b5 100644 --- a/docs/examples/c/query_example.c +++ b/docs/examples/c/query_example.c @@ -6,7 +6,7 @@ #include #include -typedef int16_t VarDataLenT; +typedef uint16_t VarDataLenT; #define TSDB_NCHAR_SIZE sizeof(int32_t) #define VARSTR_HEADER_SIZE sizeof(VarDataLenT) diff --git a/include/libs/function/function.h b/include/libs/function/function.h index fb6ef26a8a..1411ee7c4b 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -99,11 +99,11 @@ typedef struct SSubsidiaryResInfo { } SSubsidiaryResInfo; typedef struct SResultDataInfo { - int16_t precision; - int16_t scale; - int16_t type; - int16_t bytes; - int32_t interBufSize; + int16_t precision; + int16_t scale; + int16_t type; + uint16_t bytes; + int32_t interBufSize; } SResultDataInfo; #define GET_RES_INFO(ctx) ((ctx)->resultInfo) diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index c716d77b32..e276373f2d 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -30,7 +30,7 @@ extern "C" { #define SHOW_CREATE_DB_RESULT_COLS 2 #define SHOW_CREATE_DB_RESULT_FIELD1_LEN (TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE) -#define SHOW_CREATE_DB_RESULT_FIELD2_LEN (TSDB_MAX_BINARY_LEN + VARSTR_HEADER_SIZE) +#define SHOW_CREATE_DB_RESULT_FIELD2_LEN (TSDB_MAX_BINARY_LEN) #define SHOW_CREATE_TB_RESULT_COLS 2 #define SHOW_CREATE_TB_RESULT_FIELD1_LEN (TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE) diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index 558203052f..94fb6824d2 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -114,7 +114,7 @@ STableDataCxt* smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta); int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols, STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, - char* msgBuf, int16_t msgBufLen); + char* msgBuf, int32_t msgBufLen); int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash); int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD *fields, int numFields, bool needChangeLength); diff --git a/include/util/tdef.h b/include/util/tdef.h index e5000891c9..8c5c5502fd 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -236,8 +236,11 @@ typedef enum ELogicConditionType { * - Firstly, we use 65531(65535 - 4), as the SDataRow/SKVRow contains 4 bits header. * - Secondly, if all cols are VarDataT type except primary key, we need 4 bits to store the offset, thus * the final value is 65531-(4096-1)*4 = 49151. + * + * History value:49151/65531 + * - 65531 compatible with 2.0 */ -#define TSDB_MAX_BYTES_PER_ROW 49151 +#define TSDB_MAX_BYTES_PER_ROW 65531 #define TSDB_MAX_TAGS_LEN 16384 #define TSDB_MAX_TAGS 128 @@ -406,9 +409,9 @@ typedef enum ELogicConditionType { #define TSDB_EXPLAIN_RESULT_ROW_SIZE (16 * 1024) #define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN" -#define TSDB_MAX_FIELD_LEN 16384 -#define TSDB_MAX_BINARY_LEN (TSDB_MAX_FIELD_LEN - TSDB_KEYSIZE) // keep 16384 -#define TSDB_MAX_NCHAR_LEN (TSDB_MAX_FIELD_LEN - TSDB_KEYSIZE) // keep 16384 +#define TSDB_MAX_FIELD_LEN 65519 // compatible with 2.0 +#define TSDB_MAX_BINARY_LEN TSDB_MAX_FIELD_LEN // 16384:65519 +#define TSDB_MAX_NCHAR_LEN TSDB_MAX_FIELD_LEN // 16384:65519 #define PRIMARYKEY_TIMESTAMP_COL_ID 1 #define COL_REACH_END(colId, maxColId) ((colId) > (maxColId)) diff --git a/source/client/src/clientSmlJson.c b/source/client/src/clientSmlJson.c index 9fd98e33b7..b0ae316031 100644 --- a/source/client/src/clientSmlJson.c +++ b/source/client/src/clientSmlJson.c @@ -575,7 +575,7 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) { uError("OTD:invalid type(%s) for JSON String", typeStr); return TSDB_CODE_TSC_INVALID_JSON_TYPE; } - pVal->length = (int16_t)strlen(value->valuestring); + pVal->length = strlen(value->valuestring); if (pVal->type == TSDB_DATA_TYPE_BINARY && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c index 7a475c61b6..f4451d1630 100644 --- a/source/dnode/mnode/impl/src/mndFunc.c +++ b/source/dnode/mnode/impl/src/mndFunc.c @@ -475,7 +475,7 @@ RETRIEVE_FUNC_OVER: return code; } -static void *mnodeGenTypeStr(char *buf, int32_t buflen, uint8_t type, int16_t len) { +static void *mnodeGenTypeStr(char *buf, int32_t buflen, uint8_t type, int32_t len) { char *msg = "unknown"; if (type >= sizeof(tDataTypes) / sizeof(tDataTypes[0])) { return msg; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index fef588a503..3272eefba4 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1111,7 +1111,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI bool masterScan = true; int32_t numOfOutput = pOperator->exprSupp.numOfExprs; - int16_t bytes = pStateColInfoData->info.bytes; + int32_t bytes = pStateColInfoData->info.bytes; SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->tsSlotId); TSKEY* tsList = (TSKEY*)pColInfoData->pData; diff --git a/source/libs/function/inc/tpercentile.h b/source/libs/function/inc/tpercentile.h index 80159460f5..65b7b38a05 100644 --- a/source/libs/function/inc/tpercentile.h +++ b/source/libs/function/inc/tpercentile.h @@ -53,7 +53,7 @@ typedef int32_t (*__perc_hash_func_t)(struct tMemBucket *pBucket, const void *va typedef struct tMemBucket { int16_t numOfSlots; int16_t type; - int16_t bytes; + int32_t bytes; int32_t total; int32_t elemPerPage; // number of elements for each object int32_t maxCapacity; // maximum allowed number of elements that can be sort directly to get the result @@ -67,7 +67,7 @@ typedef struct tMemBucket { SHashObj *groupPagesMap; // disk page map for different groups; } tMemBucket; -tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, double maxval); +tMemBucket *tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, double maxval); void tMemBucketDestroy(tMemBucket *pBucket); diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 4760358e0d..62d1f6244b 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -196,11 +196,11 @@ typedef struct SMavgInfo { } SMavgInfo; typedef struct SSampleInfo { - int32_t samples; - int32_t totalPoints; - int32_t numSampled; - uint8_t colType; - int16_t colBytes; + int32_t samples; + int32_t totalPoints; + int32_t numSampled; + uint8_t colType; + uint16_t colBytes; STuplePos nullTuplePos; bool nullTupleSaved; @@ -220,7 +220,7 @@ typedef struct STailInfo { int32_t numAdded; int32_t offset; uint8_t colType; - int16_t colBytes; + uint16_t colBytes; STailItem** pItems; } STailInfo; @@ -233,7 +233,7 @@ typedef struct SUniqueItem { typedef struct SUniqueInfo { int32_t numOfPoints; uint8_t colType; - int16_t colBytes; + uint16_t colBytes; bool hasNull; // null is not hashable, handle separately SHashObj* pHash; char pItems[]; @@ -247,13 +247,13 @@ typedef struct SModeItem { typedef struct SModeInfo { uint8_t colType; - int16_t colBytes; + uint16_t colBytes; SHashObj* pHash; STuplePos nullTuplePos; bool nullTupleSaved; - char* buf; // serialize data buffer + char* buf; // serialize data buffer } SModeInfo; typedef struct SDerivInfo { diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index de381fadbd..a18051e0b6 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -236,7 +236,7 @@ static void resetSlotInfo(tMemBucket *pBucket) { } } -tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, double maxval) { +tMemBucket *tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, double maxval) { tMemBucket *pBucket = (tMemBucket *)taosMemoryCalloc(1, sizeof(tMemBucket)); if (pBucket == NULL) { return NULL; diff --git a/source/libs/parser/src/parInsertSml.c b/source/libs/parser/src/parInsertSml.c index 106ee641af..c05ce02aa2 100644 --- a/source/libs/parser/src/parInsertSml.c +++ b/source/libs/parser/src/parInsertSml.c @@ -242,7 +242,7 @@ end: int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols, STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, - char* msgBuf, int16_t msgBufLen) { + char* msgBuf, int32_t msgBufLen) { SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen}; SSchema* pTagsSchema = getTableTagSchema(pTableMeta); diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 195a08525c..23f07fb332 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -12,7 +12,7 @@ typedef double (*_double_fn)(double); typedef double (*_double_fn_2)(double, double); typedef int (*_conv_fn)(int); typedef void (*_trim_fn)(char *, char *, int32_t, int32_t); -typedef int16_t (*_len_fn)(char *, int32_t); +typedef uint16_t (*_len_fn)(char *, int32_t); /** Math functions **/ static double tlog(double v) { return log(v); } @@ -286,9 +286,9 @@ static int32_t doScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarP } /** String functions **/ -static int16_t tlength(char *input, int32_t type) { return varDataLen(input); } +static VarDataLenT tlength(char *input, int32_t type) { return varDataLen(input); } -static int16_t tcharlength(char *input, int32_t type) { +static VarDataLenT tcharlength(char *input, int32_t type) { if (type == TSDB_DATA_TYPE_VARCHAR) { return varDataLen(input); } else { // NCHAR @@ -377,7 +377,7 @@ static int32_t doLengthFunction(SScalarParam *pInput, int32_t inputNum, SScalarP return TSDB_CODE_SUCCESS; } -static int32_t concatCopyHelper(const char *input, char *output, bool hasNchar, int32_t type, int16_t *dataLen) { +static int32_t concatCopyHelper(const char *input, char *output, bool hasNchar, int32_t type, VarDataLenT *dataLen) { if (hasNchar && type == TSDB_DATA_TYPE_VARCHAR) { TdUcs4 *newBuf = taosMemoryCalloc((varDataLen(input) + 1) * TSDB_NCHAR_SIZE, 1); int32_t len = varDataLen(input); @@ -457,7 +457,7 @@ int32_t concatFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu continue; } - int16_t dataLen = 0; + VarDataLenT dataLen = 0; for (int32_t i = 0; i < inputNum; ++i) { int32_t rowIdx = (pInput[i].numOfRows == 1) ? 0 : k; input[i] = colDataGetData(pInputData[i], rowIdx); @@ -526,8 +526,8 @@ int32_t concatWsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p continue; } - int16_t dataLen = 0; - bool hasNull = false; + VarDataLenT dataLen = 0; + bool hasNull = false; for (int32_t i = 1; i < inputNum; ++i) { if (colDataIsNull_s(pInputData[i], k) || IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[i]))) { hasNull = true; @@ -695,7 +695,7 @@ int32_t substrFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu /** Conversion functions **/ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { int16_t inputType = GET_PARAM_TYPE(&pInput[0]); - int16_t inputLen = GET_PARAM_BYTES(&pInput[0]); + int32_t inputLen = GET_PARAM_BYTES(&pInput[0]); int16_t outputType = GET_PARAM_TYPE(&pOutput[0]); int64_t outputLen = GET_PARAM_BYTES(&pOutput[0]); diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c index f8f78ae6a5..be2ad730f7 100644 --- a/source/util/src/tcompare.c +++ b/source/util/src/tcompare.c @@ -1241,7 +1241,7 @@ int32_t taosArrayCompareString(const void *a, const void *b) { int32_t comparestrPatternMatch(const void *pLeft, const void *pRight) { SPatternCompareInfo pInfo = PATTERN_COMPARE_INFO_INITIALIZER; - ASSERT(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN); + ASSERT(varDataTLen(pRight) <= TSDB_MAX_FIELD_LEN); size_t pLen = varDataLen(pRight); size_t sz = varDataLen(pLeft); diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index a87ba16267..165b00dc68 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -695,7 +695,7 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) { case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_JSON: { - int16_t bytes = field->bytes * TSDB_NCHAR_SIZE; + uint16_t bytes = field->bytes * TSDB_NCHAR_SIZE; if (bytes > shell.args.displayWidth) { return TMAX(shell.args.displayWidth, width); } else { From 303bc7dc230cbed51ff941ad36e3e11930dbc41e Mon Sep 17 00:00:00 2001 From: kailixu Date: Mon, 3 Apr 2023 17:12:14 +0800 Subject: [PATCH 02/16] enh: column/row max length support up to 64K --- include/util/tdef.h | 17 ++++------------- source/client/src/clientSmlJson.c | 2 +- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/include/util/tdef.h b/include/util/tdef.h index 8c5c5502fd..5782da35bc 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -231,16 +231,7 @@ typedef enum ELogicConditionType { #define TSDB_QUERY_ID_LEN 26 #define TSDB_TRANS_OPER_LEN 16 -/** - * In some scenarios uint16_t (0~65535) is used to store the row len. - * - Firstly, we use 65531(65535 - 4), as the SDataRow/SKVRow contains 4 bits header. - * - Secondly, if all cols are VarDataT type except primary key, we need 4 bits to store the offset, thus - * the final value is 65531-(4096-1)*4 = 49151. - * - * History value:49151/65531 - * - 65531 compatible with 2.0 - */ -#define TSDB_MAX_BYTES_PER_ROW 65531 +#define TSDB_MAX_BYTES_PER_ROW 65531 // 49151:65531 #define TSDB_MAX_TAGS_LEN 16384 #define TSDB_MAX_TAGS 128 @@ -409,9 +400,9 @@ typedef enum ELogicConditionType { #define TSDB_EXPLAIN_RESULT_ROW_SIZE (16 * 1024) #define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN" -#define TSDB_MAX_FIELD_LEN 65519 // compatible with 2.0 -#define TSDB_MAX_BINARY_LEN TSDB_MAX_FIELD_LEN // 16384:65519 -#define TSDB_MAX_NCHAR_LEN TSDB_MAX_FIELD_LEN // 16384:65519 +#define TSDB_MAX_FIELD_LEN 65519 // 16384:65519 +#define TSDB_MAX_BINARY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519 +#define TSDB_MAX_NCHAR_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519 #define PRIMARYKEY_TIMESTAMP_COL_ID 1 #define COL_REACH_END(colId, maxColId) ((colId) > (maxColId)) diff --git a/source/client/src/clientSmlJson.c b/source/client/src/clientSmlJson.c index b0ae316031..c3a6e15697 100644 --- a/source/client/src/clientSmlJson.c +++ b/source/client/src/clientSmlJson.c @@ -575,7 +575,7 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) { uError("OTD:invalid type(%s) for JSON String", typeStr); return TSDB_CODE_TSC_INVALID_JSON_TYPE; } - pVal->length = strlen(value->valuestring); + pVal->length = (uint16_t)strlen(value->valuestring); if (pVal->type == TSDB_DATA_TYPE_BINARY && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; From 2d4dd64584fb3bc6516bc1aa63570223efd35273 Mon Sep 17 00:00:00 2001 From: kailixu Date: Mon, 24 Apr 2023 20:21:28 +0800 Subject: [PATCH 03/16] chore: more code --- include/libs/nodes/cmdnodes.h | 2 +- include/util/tdef.h | 7 + source/client/src/clientSml.c | 7 +- source/client/src/clientSmlJson.c | 2 + source/client/src/clientSmlLine.c | 5 +- source/client/src/clientSmlTelnet.c | 3 +- source/libs/parser/src/parAstCreater.c | 2 +- source/libs/parser/src/parTranslater.c | 10 +- tests/parallel_test/cases.task | 1 + .../1-insert/influxdb_line_taosc_insert.py | 17 +- tests/system-test/1-insert/stmt_error.py | 225 ++++++++++++++++++ tests/system-test/runAllOne.sh | 1 + tests/system-test/win-test-file | 1 + 13 files changed, 267 insertions(+), 16 deletions(-) create mode 100644 tests/system-test/1-insert/stmt_error.py diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index 0a9893907b..2323d044ec 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -30,7 +30,7 @@ extern "C" { #define SHOW_CREATE_DB_RESULT_COLS 2 #define SHOW_CREATE_DB_RESULT_FIELD1_LEN (TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE) -#define SHOW_CREATE_DB_RESULT_FIELD2_LEN (TSDB_MAX_BINARY_LEN) +#define SHOW_CREATE_DB_RESULT_FIELD2_LEN (TSDB_MAX_BINARY_LEN + VARSTR_HEADER_SIZE) #define SHOW_CREATE_TB_RESULT_COLS 2 #define SHOW_CREATE_TB_RESULT_FIELD1_LEN (TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE) diff --git a/include/util/tdef.h b/include/util/tdef.h index ead649a51c..7a2c57b9f8 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -22,6 +22,13 @@ extern "C" { #endif + +#if 1 +#define TASSERT assert(0); +#else +#define TASSERT +#endif + #define TSDB__packed #define TSKEY int64_t diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 17150286e1..0bca52449c 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -565,8 +565,8 @@ static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) { } if (type == TSDB_DATA_TYPE_BINARY && result > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { result = TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE; - } else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { - result = (TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; + } else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { + result = (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; } if (type == TSDB_DATA_TYPE_NCHAR) { @@ -637,6 +637,9 @@ static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashO field.bytes = getBytes(kv->type, kv->length); memcpy(field.name, kv->key, kv->keyLen); taosArrayPush(results, &field); + if(numOfCols == 0) { + + } } else if (action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE || action == SCHEMA_ACTION_CHANGE_TAG_SIZE) { uint16_t *index = (uint16_t *)taosHashGet(schemaHash, kv->key, kv->keyLen); if (index == NULL) { diff --git a/source/client/src/clientSmlJson.c b/source/client/src/clientSmlJson.c index c3a6e15697..b50a29d4fb 100644 --- a/source/client/src/clientSmlJson.c +++ b/source/client/src/clientSmlJson.c @@ -578,10 +578,12 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) { pVal->length = (uint16_t)strlen(value->valuestring); if (pVal->type == TSDB_DATA_TYPE_BINARY && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { + TASSERT return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; } if (pVal->type == TSDB_DATA_TYPE_NCHAR && pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { + TASSERT return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; } diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index 335e3a1dc7..7dd087039b 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -81,6 +81,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { pVal->type = TSDB_DATA_TYPE_BINARY; pVal->length -= BINARY_ADD_LEN; if (pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { + TASSERT return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; } pVal->value += (BINARY_ADD_LEN - 1); @@ -94,6 +95,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { pVal->type = TSDB_DATA_TYPE_NCHAR; pVal->length -= NCHAR_ADD_LEN; if (pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { + TASSERT return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; } pVal->value += (NCHAR_ADD_LEN - 1); @@ -236,7 +238,8 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin PROCESS_SLASH(value, valueLen) } - if (unlikely(valueLen > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) { + if (unlikely(valueLen > (TSDB_MAX_TAGS_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) { + TASSERT return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; } diff --git a/source/client/src/clientSmlTelnet.c b/source/client/src/clientSmlTelnet.c index 036442573d..9baa1e5758 100644 --- a/source/client/src/clientSmlTelnet.c +++ b/source/client/src/clientSmlTelnet.c @@ -158,7 +158,8 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS return TSDB_CODE_TSC_INVALID_VALUE; } - if (unlikely(valueLen > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) { + if (unlikely(valueLen > (TSDB_MAX_TAGS_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) { + TASSERT return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 2afe34c1f7..c53721f865 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1208,7 +1208,7 @@ SDataType createDataType(uint8_t type) { } SDataType createVarLenDataType(uint8_t type, const SToken* pLen) { - SDataType dt = {.type = type, .precision = 0, .scale = 0, .bytes = taosStr2Int16(pLen->z, NULL, 10)}; + SDataType dt = {.type = type, .precision = 0, .scale = 0, .bytes = taosStr2Int32(pLen->z, NULL, 10)}; return dt; } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 25e92a55ec..20ca987250 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -4498,8 +4498,9 @@ static int32_t checkTableTagsSchema(STranslateContext* pCxt, SHashObj* pHash, SN code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG); } if (TSDB_CODE_SUCCESS == code) { - if ((TSDB_DATA_TYPE_VARCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_BINARY_LEN) || - (TSDB_DATA_TYPE_NCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_NCHAR_LEN)) { + if ((TSDB_DATA_TYPE_VARCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_TAGS_LEN) || + (TSDB_DATA_TYPE_NCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_TAGS_LEN)) { + TASSERT code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN); } } @@ -4551,6 +4552,7 @@ static int32_t checkTableColsSchema(STranslateContext* pCxt, SHashObj* pHash, in if (TSDB_CODE_SUCCESS == code) { if ((TSDB_DATA_TYPE_VARCHAR == pCol->dataType.type && calcTypeBytes(pCol->dataType) > TSDB_MAX_BINARY_LEN) || (TSDB_DATA_TYPE_NCHAR == pCol->dataType.type && calcTypeBytes(pCol->dataType) > TSDB_MAX_NCHAR_LEN)) { + TASSERT code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN); } } @@ -5236,6 +5238,7 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType) { if (calcTypeBytes(pStmt->dataType) > TSDB_MAX_FIELD_LEN) { + TASSERT return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN); } @@ -5245,7 +5248,8 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable } if (TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType) { - if (calcTypeBytes(pStmt->dataType) > TSDB_MAX_FIELD_LEN) { + if (calcTypeBytes(pStmt->dataType) > TSDB_MAX_TAGS_LEN) { + TASSERT return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN); } diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index dda4ec3e84..bf91f7c8c7 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -338,6 +338,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_childtable.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_normaltable.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/keep_expired.py +,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/stmt_error.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/drop.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/drop.py -N 3 -M 3 -i False -n 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join2.py diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py index 6372502484..b53abc41aa 100644 --- a/tests/system-test/1-insert/influxdb_line_taosc_insert.py +++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py @@ -673,11 +673,11 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # # # binary - # stb_name = tdCom.getLongName(7, "letters") - # input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}" 1626006833639000000' - # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name},t0=t c0=f,c11=f,c2=f,c3=f,c4=f,c5=f,c6=f,c7=f,c8=f,c9=f,c10=f,c12=f,c1="{tdCom.getLongName(65519, "letters")}" 1626006833639000000' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - # input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16375, "letters")}" 1626006833639000000' + # input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(65514, "letters")}" 1626006833639000000' # try: # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) # except SchemalessError as err: @@ -884,7 +884,7 @@ class TDTestCase: tdSql.checkRows(2) tdSql.checkNotEqual(tb_name1, tb_name3) - # * tag binary max is 16384, col+ts binary max 49151 + # * tag binary max is 16384, col+ts binary max 65531 def tagColBinaryMaxLengthCheckCase(self): """ every binary and nchar must be length+2 @@ -911,7 +911,10 @@ class TDTestCase: tdSql.checkRows(2) # # * check col,col+ts max in describe ---> 16143 - input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}",c2="{tdCom.getLongName(16374, "letters")}",c3="{tdCom.getLongName(16374, "letters")}",c4="{tdCom.getLongName(12, "letters")}" 1626006833639000000' + input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(65517, "letters")}" 1626006833639000000' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(49133, "letters")}",c2="{tdCom.getLongName(16384, "letters")}" 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) tdSql.query(f"select * from {stb_name}") @@ -1280,7 +1283,7 @@ class TDTestCase: self.nowTsCheckCase() self.dateFormatTsCheckCase() self.illegalTsCheckCase() - # self.tagValueLengthCheckCase() + self.tagValueLengthCheckCase() self.colValueLengthCheckCase() self.tagColIllegalValueCheckCase() self.duplicateIdTagColInsertCheckCase() diff --git a/tests/system-test/1-insert/stmt_error.py b/tests/system-test/1-insert/stmt_error.py new file mode 100644 index 0000000000..b77cd488f7 --- /dev/null +++ b/tests/system-test/1-insert/stmt_error.py @@ -0,0 +1,225 @@ +# encoding:UTF-8 +from taos import * + +from ctypes import * +from datetime import datetime +import taos + +import taos +import time + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + +class TDTestCase: + def __init__(self): + self.err_case = 0 + self.curret_case = 0 + + def caseDescription(self): + + ''' + case1 : [TD-11899] : this is an test case for check stmt error use . + ''' + return + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def conn(self): + # type: () -> taos.TaosConnection + return connect() + + def test_stmt_insert(self,conn): + # type: (TaosConnection) -> None + + dbname = "pytest_taos_stmt" + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) + conn.select_db(dbname) + + conn.execute( + "create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\ + bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(65059), nn nchar(100), tt timestamp)", + ) + conn.load_table_info("log") + + + stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + params = new_bind_params(16) + params[0].timestamp(1626861392589, PrecisionEnum.Milliseconds) + params[1].bool(True) + params[2].tinyint(None) + params[3].tinyint(2) + params[4].smallint(3) + params[5].int(4) + params[6].bigint(5) + params[7].tinyint_unsigned(6) + params[8].smallint_unsigned(7) + params[9].int_unsigned(8) + params[10].bigint_unsigned(9) + params[11].float(10.1) + params[12].double(10.11) + binaryStr = '123456789' + for i in range(1301): + binaryStr += "1234567890abcdefghij1234567890abcdefghij12345hello" + params[13].binary(binaryStr) + params[14].nchar("stmt") + params[15].timestamp(1626861392589, PrecisionEnum.Milliseconds) + + stmt.bind_param(params) + stmt.execute() + + assert stmt.affected_rows == 1 + stmt.close() + + querystmt=conn.statement("select ?, bo, nil, ti, si, ii,bi, tu, su, iu, bu, ff, dd, bb, nn, tt from log") + queryparam=new_bind_params(1) + print(type(queryparam)) + queryparam[0].binary("ts") + querystmt.bind_param(queryparam) + querystmt.execute() + result=querystmt.use_result() + + row=result.fetch_all() + print(row) + + assert row[0][1] == True + assert row[0][2] == None + for i in range(3, 10): + assert row[0][i] == i - 1 + #float == may not work as expected + # assert row[0][11] == c_float(10.1) + assert row[0][12] == 10.11 + assert row[0][13][65054:] == "hello" + assert row[0][14] == "stmt" + + conn.execute("drop database if exists %s" % dbname) + conn.close() + + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + raise err + + def test_stmt_insert_error(self,conn): + # type: (TaosConnection) -> None + + dbname = "pytest_taos_stmt_error" + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) + conn.select_db(dbname) + + conn.execute( + "create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\ + bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(100), nn nchar(100), tt timestamp , error_data int )", + ) + conn.load_table_info("log") + + + stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,1000)") + params = new_bind_params(16) + params[0].timestamp(1626861392589, PrecisionEnum.Milliseconds) + params[1].bool(True) + params[2].tinyint(None) + params[3].tinyint(2) + params[4].smallint(3) + params[5].int(4) + params[6].bigint(5) + params[7].tinyint_unsigned(6) + params[8].smallint_unsigned(7) + params[9].int_unsigned(8) + params[10].bigint_unsigned(9) + params[11].float(10.1) + params[12].double(10.11) + params[13].binary("hello") + params[14].nchar("stmt") + params[15].timestamp(1626861392589, PrecisionEnum.Milliseconds) + + stmt.bind_param(params) + stmt.execute() + + conn.close() + + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + raise err + + def test_stmt_insert_error_null_timestamp(self,conn): + + dbname = "pytest_taos_stmt_error_null_ts" + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) + conn.execute("alter database %s keep 36500" % dbname) + conn.select_db(dbname) + + conn.execute("create stable STB(ts timestamp, n int) tags(b int)") + + stmt = conn.statement("insert into ? using STB tags(?) values(?, ?)") + params = new_bind_params(1) + params[0].int(4); + stmt.set_tbname_tags("ct", params); + + multi_params = new_multi_binds(2); + multi_params[0].timestamp([9223372036854775808]) + multi_params[1].int([123]) + stmt.bind_param_batch(multi_params) + + stmt.execute() + result = stmt.use_result() + + result.close() + stmt.close() + + stmt = conn.statement("select * from STB") + stmt.execute() + result = stmt.use_result() + print(result.affected_rows) + row = result.next() + print(row) + + result.close() + stmt.close() + conn.close() + + except Exception as err: + conn.close() + raise err + + def run(self): + + self.test_stmt_insert(self.conn()) + try: + self.test_stmt_insert_error(self.conn()) + except Exception as error : + + if str(error)=='[0x0200]: no mix usage for ? and values': + tdLog.info('=========stmt error occured for bind part column ==============') + else: + tdLog.exit("expect error(%s) not occured" % str(error)) + + try: + self.test_stmt_insert_error_null_timestamp(self.conn()) + tdLog.exit("expect error not occured - 1") + except Exception as error : + if str(error)=='[0x060b]: Timestamp data out of range': + tdLog.info('=========stmt error occured for bind part column(NULL Timestamp) ==============') + else: + tdLog.exit("expect error(%s) not occured - 2" % str(error)) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/runAllOne.sh b/tests/system-test/runAllOne.sh index 5a8d358d98..40addb0ca5 100644 --- a/tests/system-test/runAllOne.sh +++ b/tests/system-test/runAllOne.sh @@ -287,6 +287,7 @@ python3 ./test.py -f 1-insert/tb_100w_data_order.py -P python3 ./test.py -f 1-insert/delete_childtable.py -P python3 ./test.py -f 1-insert/delete_normaltable.py -P python3 ./test.py -f 1-insert/keep_expired.py -P +python3 ./test.py -f 1-insert/stmt_error.py -P python3 ./test.py -f 1-insert/drop.py -P python3 ./test.py -f 2-query/join2.py -P python3 ./test.py -f 2-query/union1.py -P diff --git a/tests/system-test/win-test-file b/tests/system-test/win-test-file index 7e68c40fd8..8b7b7d868d 100644 --- a/tests/system-test/win-test-file +++ b/tests/system-test/win-test-file @@ -218,6 +218,7 @@ python3 ./test.py -f 1-insert/delete_stable.py python3 ./test.py -f 1-insert/delete_childtable.py python3 ./test.py -f 1-insert/delete_normaltable.py python3 ./test.py -f 1-insert/keep_expired.py +python3 ./test.py -f 1-insert/stmt_error.py python3 ./test.py -f 1-insert/drop.py python3 ./test.py -f 1-insert/drop.py -N 3 -M 3 -i False -n 3 python3 ./test.py -f 2-query/join2.py From b059cc4ee13a9ad4c5970cf9c6dc671bc701a3e1 Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 25 Apr 2023 10:59:02 +0800 Subject: [PATCH 04/16] chore: code optimization --- source/client/inc/clientInt.h | 2 +- source/client/src/clientEnv.c | 2 +- source/client/src/clientHb.c | 13 +++++++++---- source/client/src/clientMain.c | 10 +++++++--- tests/script/api/passwdTest.c | 7 +++---- 5 files changed, 21 insertions(+), 13 deletions(-) diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 8e20d7d275..ab8e20b85e 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -361,7 +361,7 @@ void stopAllRequests(SHashObj* pRequests); // conn level int hbRegisterConn(SAppHbMgr* pAppHbMgr, int64_t tscRefId, int64_t clusterId, int8_t connType); -void hbDeregisterConn(SAppHbMgr* pAppHbMgr, SClientHbKey connKey, void* param); +void hbDeregisterConn(STscObj* pTscObj, SClientHbKey connKey); typedef struct SSqlCallbackWrapper { SParseContext* pParseCtx; diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 99569fdb57..ba02ae6731 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -239,7 +239,7 @@ void destroyTscObj(void *pObj) { tscTrace("begin to destroy tscObj %" PRIx64 " p:%p", tscId, pTscObj); SClientHbKey connKey = {.tscRid = pTscObj->id, .connType = pTscObj->connType}; - hbDeregisterConn(pTscObj->pAppInfo->pAppHbMgr, connKey, pTscObj->passInfo.fp); + hbDeregisterConn(pTscObj, connKey); destroyAllRequests(pTscObj->pRequests); taosHashCleanup(pTscObj->pRequests); diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index 79435da89f..4240a8510d 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -994,6 +994,7 @@ SAppHbMgr *appHbMgrInit(SAppInstInfo *pAppInstInfo, char *key) { // init stat pAppHbMgr->startTime = taosGetTimestampMs(); pAppHbMgr->connKeyCnt = 0; + pAppHbMgr->passKeyCnt = 0; pAppHbMgr->reportCnt = 0; pAppHbMgr->reportBytes = 0; pAppHbMgr->key = taosStrdup(key); @@ -1154,7 +1155,8 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in } } -void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, void *param) { +void hbDeregisterConn(STscObj *pTscObj, SClientHbKey connKey) { + SAppHbMgr *pAppHbMgr = pTscObj->pAppInfo->pAppHbMgr; SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey)); if (pReq) { tFreeClientHbReq(pReq); @@ -1167,7 +1169,10 @@ void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, void *param) { } atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1); - if (param) { - atomic_sub_fetch_32(&pAppHbMgr->passKeyCnt, 1); + + taosThreadMutexLock(&pTscObj->mutex); + if (pTscObj->passInfo.fp) { + int32_t cnt = atomic_sub_fetch_32(&pAppHbMgr->passKeyCnt, 1); } -} + taosThreadMutexUnlock(&pTscObj->mutex); +} \ No newline at end of file diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index aed11b4fb1..55465f227e 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -134,11 +134,15 @@ int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type) switch (type) { case TAOS_NOTIFY_PASSVER: { + taosThreadMutexLock(&pObj->mutex); + if (fp && !pObj->passInfo.fp) { + atomic_add_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1); + } else if (!fp && pObj->passInfo.fp) { + atomic_sub_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1); + } pObj->passInfo.fp = fp; pObj->passInfo.param = param; - if (fp) { - atomic_add_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1); - } + taosThreadMutexUnlock(&pObj->mutex); break; } default: { diff --git a/tests/script/api/passwdTest.c b/tests/script/api/passwdTest.c index 8a2b0a0390..1bf4987689 100644 --- a/tests/script/api/passwdTest.c +++ b/tests/script/api/passwdTest.c @@ -33,8 +33,7 @@ #define nUser 10 #define USER_LEN 24 -void Test(TAOS *taos, char *qstr); -void createUers(TAOS *taos, const char *host, char *qstr); +void createUsers(TAOS *taos, const char *host, char *qstr); void passVerTestMulti(const char *host, char *qstr); int nPassVerNotified = 0; @@ -98,14 +97,14 @@ int main(int argc, char *argv[]) { printf("failed to connect to server, reason:%s\n", "null taos" /*taos_errstr(taos)*/); exit(1); } - createUers(taos, argv[1], qstr); + createUsers(taos, argv[1], qstr); passVerTestMulti(argv[1], qstr); taos_close(taos); taos_cleanup(); } -void createUers(TAOS *taos, const char *host, char *qstr) { +void createUsers(TAOS *taos, const char *host, char *qstr) { // users for (int i = 0; i < nUser; ++i) { sprintf(users[i], "user%d", i); From a6e37622ff1a8fe181a7ecc8044aaed000af7d6d Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 25 Apr 2023 22:15:06 +0800 Subject: [PATCH 05/16] chore: more code --- include/util/tdef.h | 7 -- source/client/src/clientSml.c | 32 +++++- source/client/src/clientSmlJson.c | 2 - source/client/src/clientSmlLine.c | 3 - source/client/src/clientSmlTelnet.c | 1 - source/libs/parser/src/parTranslater.c | 4 - tests/system-test/1-insert/boundary.py | 56 ++++++++++ .../1-insert/influxdb_line_taosc_insert.py | 100 ++++++++++-------- tests/system-test/1-insert/stmt_error.py | 1 - 9 files changed, 142 insertions(+), 64 deletions(-) diff --git a/include/util/tdef.h b/include/util/tdef.h index 7a2c57b9f8..ead649a51c 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -22,13 +22,6 @@ extern "C" { #endif - -#if 1 -#define TASSERT assert(0); -#else -#define TASSERT -#endif - #define TSDB__packed #define TSKEY int64_t diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 0bca52449c..84c6fffdac 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -624,6 +624,10 @@ static int32_t getBytes(uint8_t type, int32_t length) { static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashObj *schemaHash, SArray *cols, SArray *results, int32_t numOfCols, bool isTag) { + bool check = numOfCols == 0 ? true : false; + int32_t maxLen = isTag ? TSDB_MAX_TAGS_LEN : TSDB_MAX_BYTES_PER_ROW; + + int32_t len = 0; for (int j = 0; j < taosArrayGetSize(cols); ++j) { SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, j); ESchemaAction action = SCHEMA_ACTION_NULL; @@ -635,11 +639,17 @@ static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashO SField field = {0}; field.type = kv->type; field.bytes = getBytes(kv->type, kv->length); + if (check) { + len += field.bytes; + if (len > maxLen) { + code = TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; + uError("smlBuildFieldsList add %s failed since %s", isTag ? "tag" : "col", tstrerror(code)); + return code; + } + } + memcpy(field.name, kv->key, kv->keyLen); taosArrayPush(results, &field); - if(numOfCols == 0) { - - } } else if (action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE || action == SCHEMA_ACTION_CHANGE_TAG_SIZE) { uint16_t *index = (uint16_t *)taosHashGet(schemaHash, kv->key, kv->keyLen); if (index == NULL) { @@ -650,6 +660,14 @@ static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashO if (isTag) newIndex -= numOfCols; SField *field = (SField *)taosArrayGet(results, newIndex); field->bytes = getBytes(kv->type, kv->length); + if (check) { + len += (kv->length - schemaField[*index].bytes + VARSTR_HEADER_SIZE); + if (len > maxLen) { + code = TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; + uError("smlBuildFieldsList change %s failed since %s", isTag ? "tag" : "col", tstrerror(code)); + return code; + } + } } } return TSDB_CODE_SUCCESS; @@ -780,11 +798,15 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { code = smlBuildFieldsList(info, NULL, NULL, sTableData->tags, pTags, 0, true); if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " smlBuildFieldsList tag1 failed. %s", info->id, pName.tname); + taosArrayDestroy(pColumns); + taosArrayDestroy(pTags); goto end; } code = smlBuildFieldsList(info, NULL, NULL, sTableData->cols, pColumns, 0, false); if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " smlBuildFieldsList col1 failed. %s", info->id, pName.tname); + taosArrayDestroy(pColumns); + taosArrayDestroy(pTags); goto end; } code = smlSendMetaMsg(info, &pName, pColumns, pTags, NULL, SCHEMA_ACTION_CREATE_STABLE); @@ -836,6 +858,8 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { pTableMeta->tableInfo.numOfColumns, true); if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " smlBuildFieldsList tag2 failed. %s", info->id, pName.tname); + taosArrayDestroy(pColumns); + taosArrayDestroy(pTags); goto end; } @@ -890,6 +914,8 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { pTableMeta->tableInfo.numOfColumns, false); if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " smlBuildFieldsList col2 failed. %s", info->id, pName.tname); + taosArrayDestroy(pColumns); + taosArrayDestroy(pTags); goto end; } diff --git a/source/client/src/clientSmlJson.c b/source/client/src/clientSmlJson.c index b50a29d4fb..c3a6e15697 100644 --- a/source/client/src/clientSmlJson.c +++ b/source/client/src/clientSmlJson.c @@ -578,12 +578,10 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) { pVal->length = (uint16_t)strlen(value->valuestring); if (pVal->type == TSDB_DATA_TYPE_BINARY && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { - TASSERT return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; } if (pVal->type == TSDB_DATA_TYPE_NCHAR && pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { - TASSERT return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; } diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index 7dd087039b..db2f2bd9fe 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -81,7 +81,6 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { pVal->type = TSDB_DATA_TYPE_BINARY; pVal->length -= BINARY_ADD_LEN; if (pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { - TASSERT return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; } pVal->value += (BINARY_ADD_LEN - 1); @@ -95,7 +94,6 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { pVal->type = TSDB_DATA_TYPE_NCHAR; pVal->length -= NCHAR_ADD_LEN; if (pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { - TASSERT return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; } pVal->value += (NCHAR_ADD_LEN - 1); @@ -239,7 +237,6 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin } if (unlikely(valueLen > (TSDB_MAX_TAGS_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) { - TASSERT return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; } diff --git a/source/client/src/clientSmlTelnet.c b/source/client/src/clientSmlTelnet.c index 9baa1e5758..3a9aad4e81 100644 --- a/source/client/src/clientSmlTelnet.c +++ b/source/client/src/clientSmlTelnet.c @@ -159,7 +159,6 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS } if (unlikely(valueLen > (TSDB_MAX_TAGS_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) { - TASSERT return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 20ca987250..b4b499a789 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -4500,7 +4500,6 @@ static int32_t checkTableTagsSchema(STranslateContext* pCxt, SHashObj* pHash, SN if (TSDB_CODE_SUCCESS == code) { if ((TSDB_DATA_TYPE_VARCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_TAGS_LEN) || (TSDB_DATA_TYPE_NCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_TAGS_LEN)) { - TASSERT code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN); } } @@ -4552,7 +4551,6 @@ static int32_t checkTableColsSchema(STranslateContext* pCxt, SHashObj* pHash, in if (TSDB_CODE_SUCCESS == code) { if ((TSDB_DATA_TYPE_VARCHAR == pCol->dataType.type && calcTypeBytes(pCol->dataType) > TSDB_MAX_BINARY_LEN) || (TSDB_DATA_TYPE_NCHAR == pCol->dataType.type && calcTypeBytes(pCol->dataType) > TSDB_MAX_NCHAR_LEN)) { - TASSERT code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN); } } @@ -5238,7 +5236,6 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType) { if (calcTypeBytes(pStmt->dataType) > TSDB_MAX_FIELD_LEN) { - TASSERT return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN); } @@ -5249,7 +5246,6 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable if (TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType) { if (calcTypeBytes(pStmt->dataType) > TSDB_MAX_TAGS_LEN) { - TASSERT return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN); } diff --git a/tests/system-test/1-insert/boundary.py b/tests/system-test/1-insert/boundary.py index d3742ef5f9..29dcbc7c46 100644 --- a/tests/system-test/1-insert/boundary.py +++ b/tests/system-test/1-insert/boundary.py @@ -166,6 +166,61 @@ class TDTestCase: else: tdLog.exit("error info is not true") tdSql.execute('drop database db') + + def row_col_tag_maxlen_check(self): + tdSql.prepare() + tdSql.execute('use db') + tdSql.execute('create table if not exists stb1 (ts timestamp, c1 int,c2 binary(1000)) tags (city binary(16382))') + tdSql.error('create table if not exists stb1 (ts timestamp, c1 int,c2 binary(1000)) tags (city binary(16383))') + tdSql.execute('create table if not exists stb2 (ts timestamp, c0 tinyint, c1 int, c2 nchar(16379)) tags (city binary(16382))') + tdSql.error('create table if not exists stb2 (ts timestamp, c0 smallint, c1 int, c2 nchar(16379)) tags (city binary(16382))') + tdSql.execute('create table if not exists stb3 (ts timestamp, c1 int, c2 binary(65517)) tags (city binary(16382))') + tdSql.error('create table if not exists stb3 (ts timestamp, c0 bool, c1 int, c2 binary(65517)) tags (city binary(16382))') + # prepare the column and tag data + char100='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN0123456789' + tag_max_16382='' + binary_max_65517 = '' + nchar_max_16379='' + for num in range(163): + nchar_max_16379 += char100 + for num in range(4): + binary_max_65517 += char100 + + nchar_max_16379 += '0123456789012345678901234567890123456789012345678901234567890123456789012345678' + tag_max_16382 = nchar_max_16379 + tag_max_16382 += '9ab' + + for num in range(3): + binary_max_65517 += char100 + binary_max_65517 += '01234567890123456' + + # insert/query and check + tdSql.execute(f"create table ct1 using stb1 tags('{tag_max_16382}')") + tdSql.execute(f"create table ct2 using stb2 tags('{tag_max_16382}')") + tdSql.execute(f"create table ct3 using stb3 tags('{tag_max_16382}')") + tdSql.execute(f"insert into ct1 values (now,1,'nchar_max_16379')") + tdSql.execute(f"insert into ct2 values (now,1,1,'{nchar_max_16379}')") + tdSql.execute(f"insert into ct3 values (now,1,'{binary_max_65517}')") + + tdSql.query("select * from stb1") + tdSql.checkEqual(tdSql.queryResult[0][3],tag_max_16382) + + tdSql.query("select * from ct2") + tdSql.checkEqual(tdSql.queryResult[0][3],nchar_max_16379) + + tdSql.query("select * from stb2") + tdSql.checkEqual(tdSql.queryResult[0][3],nchar_max_16379) + tdSql.checkEqual(tdSql.queryResult[0][4],tag_max_16382) + + tdSql.query("select * from ct3") + tdSql.checkEqual(tdSql.queryResult[0][2],binary_max_65517) + + tdSql.query("select * from stb3") + tdSql.checkEqual(tdSql.queryResult[0][2],binary_max_65517) + tdSql.checkEqual(tdSql.queryResult[0][3],tag_max_16382) + + tdSql.execute('drop database db') + def run(self): self.dbname_length_check() self.tbname_length_check() @@ -174,6 +229,7 @@ class TDTestCase: self.username_length_check() self.password_length_check() self.sql_length_check() + self.row_col_tag_maxlen_check() def stop(self): tdSql.close() diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py index b53abc41aa..667ce3239c 100644 --- a/tests/system-test/1-insert/influxdb_line_taosc_insert.py +++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py @@ -672,28 +672,28 @@ class TDTestCase: except SchemalessError as err: tdSql.checkNotEqual(err.errno, 0) - # # # binary + # binary stb_name = tdCom.getLongName(7, "letters") - input_sql = f'{stb_name},t0=t c0=f,c11=f,c2=f,c3=f,c4=f,c5=f,c6=f,c7=f,c8=f,c9=f,c10=f,c12=f,c1="{tdCom.getLongName(65519, "letters")}" 1626006833639000000' + input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(65517, "letters")}" 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - # input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(65514, "letters")}" 1626006833639000000' - # try: - # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - # except SchemalessError as err: - # tdSql.checkNotEqual(err.errno, 0) + input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(65518, "letters")}" 1626006833639000000' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) - # # nchar - # # * legal nchar could not be larger than 16374/4 - # stb_name = tdCom.getLongName(7, "letters") - # input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}" 1626006833639000000' - # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + # nchar + # * legal nchar could not be larger than 16374/4 + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(16379, "letters")}" 1626006833639000000' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - # input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4094, "letters")}" 1626006833639000000' - # try: - # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - # except SchemalessError as err: - # tdSql.checkNotEqual(err.errno, 0) + input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(16380, "letters")}" 1626006833639000000' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) def tagColIllegalValueCheckCase(self): @@ -896,26 +896,40 @@ class TDTestCase: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) # * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2 - input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(5, "letters")}" c0=f 1626006833639000000' - self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + # input_sql = f'{stb_name}, t1="{tdCom.getLongName(4095, "letters")}"" c0=f 1626006833639000000' + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(2) - input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(6, "letters")}" c0=f 1626006833639000000' - try: - self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - raise Exception("should not reach here") - except SchemalessError as err: - tdSql.checkNotEqual(err.errno, 0) - tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(2) + # tdSql.query(f"select * from {stb_name}") + # tdSql.checkRows(2) + # input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(4084, "letters")}",t2="{tdCom.getLongName(6, "letters")}" c0=f 1626006833639000000' + # try: + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + # tdSql.query(f"select * from {stb_name}") + # tdSql.checkRows(2) # # * check col,col+ts max in describe ---> 16143 - input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(65517, "letters")}" 1626006833639000000' + input_sql = f'{stb_name},t0=t c0=i32,c1="{tdCom.getLongName(65517, "letters")}" 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(49133, "letters")}",c2="{tdCom.getLongName(16384, "letters")}" 1626006833639000000' + input_sql = f'{stb_name},t0=t c0=i32,c1="{tdCom.getLongName(65517, "letters")},c2=f" 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + input_sql = f'{stb_name},t0=t c0=i16,c1="{tdCom.getLongName(49133, "letters")}",c2="{tdCom.getLongName(16384, "letters")}" 1626006833639000000' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + input_sql = f'{stb_name},t0=t c0=i16,c1="{tdCom.getLongName(49133, "letters")}",c2="{tdCom.getLongName(16384, "letters")},c3=t" 1626006833639000000' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) tdSql.query(f"select * from {stb_name}") tdSql.checkRows(3) @@ -939,17 +953,17 @@ class TDTestCase: code = self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) # * legal nchar could not be larger than 16374/4 - input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(1, "letters")}" c0=f 1626006833639000000' - self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(2) - input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(2, "letters")}" c0=f 1626006833639000000' - try: - self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - except SchemalessError as err: - tdSql.checkNotEqual(err.errno, 0) - tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(2) + # input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(1, "letters")}" c0=f 1626006833639000000' + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + # tdSql.query(f"select * from {stb_name}") + # tdSql.checkRows(2) + # input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(2, "letters")}" c0=f 1626006833639000000' + # try: + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + # tdSql.query(f"select * from {stb_name}") + # tdSql.checkRows(2) input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(4, "letters")}" 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -1283,7 +1297,7 @@ class TDTestCase: self.nowTsCheckCase() self.dateFormatTsCheckCase() self.illegalTsCheckCase() - self.tagValueLengthCheckCase() + # self.tagValueLengthCheckCase() self.colValueLengthCheckCase() self.tagColIllegalValueCheckCase() self.duplicateIdTagColInsertCheckCase() diff --git a/tests/system-test/1-insert/stmt_error.py b/tests/system-test/1-insert/stmt_error.py index b77cd488f7..c6d747c317 100644 --- a/tests/system-test/1-insert/stmt_error.py +++ b/tests/system-test/1-insert/stmt_error.py @@ -216,7 +216,6 @@ class TDTestCase: tdLog.info('=========stmt error occured for bind part column(NULL Timestamp) ==============') else: tdLog.exit("expect error(%s) not occured - 2" % str(error)) - def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From d4929041886d2da0f8d3979d57ae581a41030303 Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 28 Apr 2023 16:40:26 +0800 Subject: [PATCH 06/16] chore: error process and test cases --- source/client/src/clientSml.c | 71 +++++++++----- source/client/src/clientSmlJson.c | 2 +- .../1-insert/influxdb_line_taosc_insert.py | 98 +++++++++++-------- 3 files changed, 105 insertions(+), 66 deletions(-) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 4d0f5365f3..45f0def157 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -560,9 +560,14 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) { int32_t result = 1; - while (result <= length) { - result *= 2; + if (length < 1024) { + while (result <= length) { + result <<= 1; + } + } else { + result = length; } + if (type == TSDB_DATA_TYPE_BINARY && result > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { result = TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE; } else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { @@ -624,10 +629,6 @@ static int32_t getBytes(uint8_t type, int32_t length) { static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashObj *schemaHash, SArray *cols, SArray *results, int32_t numOfCols, bool isTag) { - bool check = numOfCols == 0 ? true : false; - int32_t maxLen = isTag ? TSDB_MAX_TAGS_LEN : TSDB_MAX_BYTES_PER_ROW; - - int32_t len = 0; for (int j = 0; j < taosArrayGetSize(cols); ++j) { SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, j); ESchemaAction action = SCHEMA_ACTION_NULL; @@ -639,15 +640,6 @@ static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashO SField field = {0}; field.type = kv->type; field.bytes = getBytes(kv->type, kv->length); - if (check) { - len += field.bytes; - if (len > maxLen) { - code = TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; - uError("smlBuildFieldsList add %s failed since %s", isTag ? "tag" : "col", tstrerror(code)); - return code; - } - } - memcpy(field.name, kv->key, kv->keyLen); taosArrayPush(results, &field); } else if (action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE || action == SCHEMA_ACTION_CHANGE_TAG_SIZE) { @@ -660,16 +652,19 @@ static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashO if (isTag) newIndex -= numOfCols; SField *field = (SField *)taosArrayGet(results, newIndex); field->bytes = getBytes(kv->type, kv->length); - if (check) { - len += (kv->length - schemaField[*index].bytes + VARSTR_HEADER_SIZE); - if (len > maxLen) { - code = TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; - uError("smlBuildFieldsList change %s failed since %s", isTag ? "tag" : "col", tstrerror(code)); - return code; - } - } } } + + int32_t maxLen = isTag ? TSDB_MAX_TAGS_LEN : TSDB_MAX_BYTES_PER_ROW; + int32_t len = 0; + for (int j = 0; j < taosArrayGetSize(results); ++j) { + SField *field = taosArrayGet(results, j); + len += field->bytes; + } + if (len > maxLen) { + return isTag ? TSDB_CODE_PAR_INVALID_TAGS_LENGTH : TSDB_CODE_PAR_INVALID_ROW_LENGTH; + } + return TSDB_CODE_SUCCESS; } @@ -867,6 +862,21 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { goto end; } + if (taosArrayGetSize(pTags) + pTableMeta->tableInfo.numOfColumns > TSDB_MAX_COLUMNS) { + uError("SML:0x%" PRIx64 " too many columns than 4096", info->id); + code = TSDB_CODE_PAR_TOO_MANY_COLUMNS; + taosArrayDestroy(pColumns); + taosArrayDestroy(pTags); + goto end; + } + if (taosArrayGetSize(pTags) > TSDB_MAX_TAGS) { + uError("SML:0x%" PRIx64 " too many tags than 128", info->id); + code = TSDB_CODE_PAR_INVALID_TAGS_NUM; + taosArrayDestroy(pColumns); + taosArrayDestroy(pTags); + goto end; + } + code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action); if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname); @@ -923,6 +933,14 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { goto end; } + if (taosArrayGetSize(pColumns) + pTableMeta->tableInfo.numOfTags > TSDB_MAX_COLUMNS) { + uError("SML:0x%" PRIx64 " too many columns than 4096", info->id); + code = TSDB_CODE_PAR_TOO_MANY_COLUMNS; + taosArrayDestroy(pColumns); + taosArrayDestroy(pTags); + goto end; + } + code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action); if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname); @@ -1527,8 +1545,11 @@ static int smlProcess(SSmlHandle *info, char *lines[], char *rawLine, char *rawL do { code = smlModifyDBSchemas(info); - if (code == 0) break; - taosMsleep(500); + if (code == 0 || code == TSDB_CODE_SML_INVALID_DATA || code == TSDB_CODE_PAR_TOO_MANY_COLUMNS || + code == TSDB_CODE_PAR_INVALID_TAGS_NUM || code == TSDB_CODE_PAR_INVALID_ROW_LENGTH || + code == TSDB_CODE_PAR_INVALID_TAGS_LENGTH) + break; + taosMsleep(100); uInfo("SML:0x%" PRIx64 " smlModifyDBSchemas retry code:%s, times:%d", info->id, tstrerror(code), retryNum); } while (retryNum++ < taosHashGetSize(info->superTables) * MAX_RETRY_TIMES); diff --git a/source/client/src/clientSmlJson.c b/source/client/src/clientSmlJson.c index c3a6e15697..b0ae316031 100644 --- a/source/client/src/clientSmlJson.c +++ b/source/client/src/clientSmlJson.c @@ -575,7 +575,7 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) { uError("OTD:invalid type(%s) for JSON String", typeStr); return TSDB_CODE_TSC_INVALID_JSON_TYPE; } - pVal->length = (uint16_t)strlen(value->valuestring); + pVal->length = strlen(value->valuestring); if (pVal->type == TSDB_DATA_TYPE_BINARY && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py index 667ce3239c..46aec2909a 100644 --- a/tests/system-test/1-insert/influxdb_line_taosc_insert.py +++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py @@ -439,7 +439,7 @@ class TDTestCase: for input_sql in [self.genLongSql(127, 1)[0], self.genLongSql(1, 4093)[0]]: tdCom.cleanTb(dbname="test") self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - for input_sql in [self.genLongSql(129, 1)[0], self.genLongSql(1, 4095)[0]]: + for input_sql in [self.genLongSql(128, 1)[0], self.genLongSql(1, 4094)[0]]: tdCom.cleanTb(dbname="test") try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -578,10 +578,16 @@ class TDTestCase: # binary stb_name = tdCom.getLongName(7, "letters") - input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}" c0=f 1626006833639000000' + input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(4091, "letters")}" c0=f 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16375, "letters")}" c0=f 1626006833639000000' + input_sql = f'{stb_name},t0="a",t1="{tdCom.getLongName(4088, "letters")}" c0=f 1626006833639000000' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(4092, "letters")}" c0=f 1626006833639000000' try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) except SchemalessError as err: @@ -590,10 +596,10 @@ class TDTestCase: # nchar # * legal nchar could not be larger than 16374/4 stb_name = tdCom.getLongName(7, "letters") - input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}" c0=f 1626006833639000000' + input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4090, "letters")}" c0=f 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4094, "letters")}" c0=f 1626006833639000000' + input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4091, "letters")}" c0=f 1626006833639000000' try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) except SchemalessError as err: @@ -674,9 +680,15 @@ class TDTestCase: # binary stb_name = tdCom.getLongName(7, "letters") - input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(65517, "letters")}" 1626006833639000000' + input_sql = f'{stb_name},t0=t c0=1i32,c1="{tdCom.getLongName(65517, "letters")}" 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + input_sql = f'{stb_name},t0=t c0=1i32,c1="{tdCom.getLongName(65517, "letters")},c2=f" 1626006833639000000' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(65518, "letters")}" 1626006833639000000' try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -686,10 +698,10 @@ class TDTestCase: # nchar # * legal nchar could not be larger than 16374/4 stb_name = tdCom.getLongName(7, "letters") - input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(16379, "letters")}" 1626006833639000000' + input_sql = f'{stb_name},t0=t c0=1i32,c1=L"{tdCom.getLongName(16379, "letters")}",c2=f 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(16380, "letters")}" 1626006833639000000' + input_sql = f'{stb_name},t0=t c0=1i32,c1=L"{tdCom.getLongName(16380, "letters")}",c2=1i16 1626006833639000000' try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) except SchemalessError as err: @@ -896,52 +908,57 @@ class TDTestCase: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) # * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2 - # input_sql = f'{stb_name}, t1="{tdCom.getLongName(4095, "letters")}"" c0=f 1626006833639000000' - # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - - # tdSql.query(f"select * from {stb_name}") - # tdSql.checkRows(2) - # input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(4084, "letters")}",t2="{tdCom.getLongName(6, "letters")}" c0=f 1626006833639000000' - # try: - # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - # raise Exception("should not reach here") - # except SchemalessError as err: - # tdSql.checkNotEqual(err.errno, 0) - # tdSql.query(f"select * from {stb_name}") - # tdSql.checkRows(2) - - # # * check col,col+ts max in describe ---> 16143 - input_sql = f'{stb_name},t0=t c0=i32,c1="{tdCom.getLongName(65517, "letters")}" 1626006833639000000' + input_sql = f'{stb_name}, t0=f,t1="{tdCom.getLongName(4093, "letters")}" 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - input_sql = f'{stb_name},t0=t c0=i32,c1="{tdCom.getLongName(65517, "letters")},c2=f" 1626006833639000000' - self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(4084, "letters")}",t2="{tdCom.getLongName(6, "letters")}" c0=f 1626006833639000000' try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + raise Exception("should not reach here") except SchemalessError as err: tdSql.checkNotEqual(err.errno, 0) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) - input_sql = f'{stb_name},t0=t c0=i16,c1="{tdCom.getLongName(49133, "letters")}",c2="{tdCom.getLongName(16384, "letters")}" 1626006833639000000' - self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - input_sql = f'{stb_name},t0=t c0=i16,c1="{tdCom.getLongName(49133, "letters")}",c2="{tdCom.getLongName(16384, "letters")},c3=t" 1626006833639000000' + stb_name = tdCom.getLongName(8, "letters") + # # * check col,col+ts max in describe ---> 16143 + input_sql = f'{stb_name},t0=t c0=1i32,c1="{tdCom.getLongName(65517, "letters")}" 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + input_sql = f'{stb_name},t0=t c0=1i32,c1="{tdCom.getLongName(65517, "letters")}",c2=f 1626006833639000000' try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) except SchemalessError as err: tdSql.checkNotEqual(err.errno, 0) tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(3) + tdSql.checkRows(1) + + + stb_name = tdCom.getLongName(9, "letters") + input_sql = f'{stb_name},t0=t c0=1i16,c1="{tdCom.getLongName(49133, "letters")}",c2="{tdCom.getLongName(16384, "letters")}" 1626006833639000000' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + input_sql = f'{stb_name},t0=t c0=1i16,c1="{tdCom.getLongName(49133, "letters")}",c2="{tdCom.getLongName(16384, "letters")},c3=t" 1626006833639000000' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(1) + input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}",c2="{tdCom.getLongName(16374, "letters")}",c3="{tdCom.getLongName(16374, "letters")}",c4="{tdCom.getLongName(13, "letters")}" 1626006833639000000' try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) except SchemalessError as err: tdSql.checkNotEqual(err.errno, 0) - tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(3) - # * tag nchar max is 16374/4, col+ts nchar max 49151 + + # * tag nchar max is 16384/4, col+ts nchar max 65531 def tagColNcharMaxLengthCheckCase(self): """ check nchar length limit @@ -952,7 +969,7 @@ class TDTestCase: input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000' code = self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - # * legal nchar could not be larger than 16374/4 + # * legal tag nchar could not be larger than 16384/4 # input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(1, "letters")}" c0=f 1626006833639000000' # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) # tdSql.query(f"select * from {stb_name}") @@ -968,14 +985,15 @@ class TDTestCase: input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(4, "letters")}" 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(3) + tdSql.checkRows(2) + input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(5, "letters")}" 1626006833639000000' try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) except SchemalessError as err: tdSql.checkNotEqual(err.errno, 0) tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(3) + tdSql.checkRows(2) def batchInsertCheckCase(self): """ @@ -1291,13 +1309,13 @@ class TDTestCase: self.idSeqCheckCase() self.idUpperCheckCase() self.noIdCheckCase() - # self.maxColTagCheckCase() + self.maxColTagCheckCase() self.idIllegalNameCheckCase() self.idStartWithNumCheckCase() self.nowTsCheckCase() self.dateFormatTsCheckCase() self.illegalTsCheckCase() - # self.tagValueLengthCheckCase() + self.tagValueLengthCheckCase() self.colValueLengthCheckCase() self.tagColIllegalValueCheckCase() self.duplicateIdTagColInsertCheckCase() @@ -1307,8 +1325,8 @@ class TDTestCase: self.tagColAddDupIDCheckCase() self.tagColAddCheckCase() self.tagMd5Check() - # self.tagColBinaryMaxLengthCheckCase() - # self.tagColNcharMaxLengthCheckCase() + self.tagColBinaryMaxLengthCheckCase() + self.tagColNcharMaxLengthCheckCase() self.batchInsertCheckCase() self.multiInsertCheckCase(10) self.batchErrorInsertCheckCase() From c949fee06b12da07f9b2dcd445e14f3093a6588f Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Fri, 5 May 2023 10:33:24 +0800 Subject: [PATCH 07/16] enh: commit vnode after consolidating alter hash range --- source/dnode/mnode/impl/src/mndVgroup.c | 2 +- source/dnode/vnode/src/vnd/vnodeOpen.c | 2 -- source/dnode/vnode/src/vnd/vnodeSvr.c | 12 ++++++++++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index add2b1568c..d4be6bed73 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -2164,7 +2164,7 @@ static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj mInfo("vgId:%d, vnode:%d dnode:%d", newVg2.vgId, i, newVg2.vnodeGid[i].dnodeId); } - // alter hash range + // alter vgId and hash range int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP); if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg1, maxVgId) != 0) goto _OVER; newVg1.vgId = maxVgId; diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 2f7520e3a7..b5e7c6875b 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -236,8 +236,6 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod return -1; } - // todo vnode compact here - vInfo("vgId:%d, vnode hashrange is altered", info.config.vgId); return 0; } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index b5a5b1500b..a8511eedfd 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -425,7 +425,10 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp } } break; case TDMT_VND_ALTER_CONFIRM: - vnodeProcessAlterConfirmReq(pVnode, version, pReq, len, pRsp); + needCommit = pVnode->config.hashChange; + if (vnodeProcessAlterConfirmReq(pVnode, version, pReq, len, pRsp) < 0) { + goto _err; + } break; case TDMT_VND_ALTER_CONFIG: vnodeProcessAlterConfigReq(pVnode, version, pReq, len, pRsp); @@ -1472,6 +1475,11 @@ static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t version, void } code = vnodeConsolidateAlterHashRange(pVnode, version); + if (code < 0) { + vError("vgId:%d, failed to consolidate alter hashrange since %s. version:%" PRId64, TD_VID(pVnode), terrstr(), + version); + goto _exit; + } pVnode->config.hashChange = false; _exit: @@ -1480,7 +1488,7 @@ _exit: pRsp->pCont = NULL; pRsp->contLen = 0; - return 0; + return code; } static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) { From 0e6de0935393270f4f36ec23b6d1ae07b9031cdc Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Fri, 5 May 2023 14:53:32 +0800 Subject: [PATCH 08/16] enh: unify error msg for disk space checking in doInitAggInfoSup --- source/libs/executor/src/aggregateoperator.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c index ec8060348d..1f170552db 100644 --- a/source/libs/executor/src/aggregateoperator.c +++ b/source/libs/executor/src/aggregateoperator.c @@ -467,8 +467,8 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz); if (!osTempSpaceAvailable()) { - code = TSDB_CODE_NO_AVAIL_DISK; - qError("Init stream agg supporter failed since %s, %s", terrstr(code), pKey); + code = TSDB_CODE_NO_DISKSPACE; + qError("Init stream agg supporter failed since %s, key:%s, tempDir:%s", terrstr(code), pKey, tsTempDir); return code; } From b0e77eeb4500fa7b1ab8afab56f1e24bfe84993f Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Fri, 5 May 2023 16:13:47 +0800 Subject: [PATCH 09/16] doc: refine the description of TABLE_PREFIX and TABLE_SUFFIX --- docs/en/12-taos-sql/02-database.md | 4 ++-- docs/zh/12-taos-sql/02-database.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md index ec007d6830..8c579622c9 100644 --- a/docs/en/12-taos-sql/02-database.md +++ b/docs/en/12-taos-sql/02-database.md @@ -72,8 +72,8 @@ database_option: { - 0: The database can contain multiple supertables. - 1: The database can contain only one supertable. - STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value. -- TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name. -- TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name. +- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables. +- TABLE_SUFFIX:The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables. - TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB. - WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics. - WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit. diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md index dd30635660..b0e2887ba9 100644 --- a/docs/zh/12-taos-sql/02-database.md +++ b/docs/zh/12-taos-sql/02-database.md @@ -71,8 +71,8 @@ database_option: { - 0:表示可以创建多张超级表。 - 1:表示只可以创建一张超级表。 - STT_TRIGGER:表示落盘文件触发文件合并的个数。默认为 1,范围 1 到 16。对于少表高频场景,此参数建议使用默认配置,或较小的值;而对于多表低频场景,此参数建议配置较大的值。 -- TABLE_PREFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。 -- TABLE_SUFFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。 +- TABLE_PREFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的前缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的前缀;例如,假定表名为 "v30001",当 TSDB_PREFIX = 2 时 使用 "0001" 来决定分配到哪个 vgroup ,当 TSDB_PREFIX = -2 时使用 "v3" 来决定分配到哪个 vgroup +- TABLE_SUFFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;例如,假定表名为 "v30001",当 TSDB_SUFFIX = 2 时 使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "0001" 来决定分配到哪个 vgroup。 - TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。 - WAL_RETENTION_PERIOD: 为了数据订阅消费,需要WAL日志文件额外保留的最大时长策略。WAL日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 0,表示无需为订阅保留。新建订阅,应先设置恰当的时长策略。 - WAL_RETENTION_SIZE:为了数据订阅消费,需要WAL日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。 From b3bd3799afb018962cac149105d11d1e35d524b2 Mon Sep 17 00:00:00 2001 From: dapan1121 <72057773+dapan1121@users.noreply.github.com> Date: Fri, 5 May 2023 16:19:34 +0800 Subject: [PATCH 10/16] Update 02-database.md --- docs/zh/12-taos-sql/02-database.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md index b0e2887ba9..a2a0914120 100644 --- a/docs/zh/12-taos-sql/02-database.md +++ b/docs/zh/12-taos-sql/02-database.md @@ -72,7 +72,7 @@ database_option: { - 1:表示只可以创建一张超级表。 - STT_TRIGGER:表示落盘文件触发文件合并的个数。默认为 1,范围 1 到 16。对于少表高频场景,此参数建议使用默认配置,或较小的值;而对于多表低频场景,此参数建议配置较大的值。 - TABLE_PREFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的前缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的前缀;例如,假定表名为 "v30001",当 TSDB_PREFIX = 2 时 使用 "0001" 来决定分配到哪个 vgroup ,当 TSDB_PREFIX = -2 时使用 "v3" 来决定分配到哪个 vgroup -- TABLE_SUFFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;例如,假定表名为 "v30001",当 TSDB_SUFFIX = 2 时 使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "0001" 来决定分配到哪个 vgroup。 +- TABLE_SUFFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;例如,假定表名为 "v30001",当 TSDB_SUFFIX = 2 时 使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "01" 来决定分配到哪个 vgroup。 - TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。 - WAL_RETENTION_PERIOD: 为了数据订阅消费,需要WAL日志文件额外保留的最大时长策略。WAL日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 0,表示无需为订阅保留。新建订阅,应先设置恰当的时长策略。 - WAL_RETENTION_SIZE:为了数据订阅消费,需要WAL日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。 From 0c3dc0867fff2d8884842d6dd88d2e6f8efd8320 Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 5 May 2023 20:33:41 +0800 Subject: [PATCH 11/16] chore: sync fix from main --- source/client/src/clientSml.c | 11 ++++++----- source/libs/tdb/src/db/tdbBtree.c | 5 +++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 45f0def157..98b64b211a 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -558,14 +558,15 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm return 0; } +#define BOUNDARY 1024 static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) { int32_t result = 1; - if (length < 1024) { - while (result <= length) { - result <<= 1; - } - } else { + if (length >= BOUNDARY){ result = length; + }else{ + while (result <= length) { + result << 1; + } } if (type == TSDB_DATA_TYPE_BINARY && result > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 6df2b40000..c49b5726b6 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -1814,6 +1814,11 @@ int tdbBtreeNext(SBTC *pBtc, void **ppKey, int *kLen, void **ppVal, int *vLen) { *ppVal = pVal; *vLen = cd.vLen; + } else { + if (TDB_CELLDECODER_FREE_VAL(&cd)) { + tdbTrace("tdb/btree-next2 decoder: %p pVal free: %p", &cd, cd.pVal); + tdbFree(cd.pVal); + } } ret = tdbBtcMoveToNext(pBtc); From 4ae9eab90ebf0e5ccc6dc34a772d7905d2d88414 Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 5 May 2023 20:42:49 +0800 Subject: [PATCH 12/16] chore: fix --- source/client/src/clientSml.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 98b64b211a..cf1c6cc434 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -565,7 +565,7 @@ static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) { result = length; }else{ while (result <= length) { - result << 1; + result <<= 1; } } From 29da3087727535528b13a8329faf1c337312b806 Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 5 May 2023 23:58:45 +0800 Subject: [PATCH 13/16] chore: test case --- .../1-insert/influxdb_line_taosc_insert.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py index 46aec2909a..819951cce5 100644 --- a/tests/system-test/1-insert/influxdb_line_taosc_insert.py +++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py @@ -908,22 +908,24 @@ class TDTestCase: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) # * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2 - input_sql = f'{stb_name}, t0=f,t1="{tdCom.getLongName(4093, "letters")}" 1626006833639000000' + stb_name = tdCom.getLongName(8, "letters") + input_sql = f'{stb_name},t0=f,t1="{tdCom.getLongName(4091, "letters")}", c0=f 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(2) - input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(4084, "letters")}",t2="{tdCom.getLongName(6, "letters")}" c0=f 1626006833639000000' + tdSql.checkRows(1) + + input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(4092, "letters")}", c0=f 1626006833639000000' try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) raise Exception("should not reach here") except SchemalessError as err: tdSql.checkNotEqual(err.errno, 0) tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(2) + tdSql.checkRows(1) - stb_name = tdCom.getLongName(8, "letters") + stb_name = tdCom.getLongName(9, "letters") # # * check col,col+ts max in describe ---> 16143 input_sql = f'{stb_name},t0=t c0=1i32,c1="{tdCom.getLongName(65517, "letters")}" 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -938,7 +940,7 @@ class TDTestCase: tdSql.checkRows(1) - stb_name = tdCom.getLongName(9, "letters") + stb_name = tdCom.getLongName(10, "letters") input_sql = f'{stb_name},t0=t c0=1i16,c1="{tdCom.getLongName(49133, "letters")}",c2="{tdCom.getLongName(16384, "letters")}" 1626006833639000000' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) From 75795253a9b1dc6c3f7ebebbc0e19fb9bb0518fc Mon Sep 17 00:00:00 2001 From: kailixu Date: Sat, 6 May 2023 06:06:12 +0800 Subject: [PATCH 14/16] more code --- source/client/src/clientHb.c | 2 +- tests/system-test/1-insert/influxdb_line_taosc_insert.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index 4240a8510d..203aad8068 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -1172,7 +1172,7 @@ void hbDeregisterConn(STscObj *pTscObj, SClientHbKey connKey) { taosThreadMutexLock(&pTscObj->mutex); if (pTscObj->passInfo.fp) { - int32_t cnt = atomic_sub_fetch_32(&pAppHbMgr->passKeyCnt, 1); + atomic_sub_fetch_32(&pAppHbMgr->passKeyCnt, 1); } taosThreadMutexUnlock(&pTscObj->mutex); } \ No newline at end of file diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py index 819951cce5..3e9bad4efd 100644 --- a/tests/system-test/1-insert/influxdb_line_taosc_insert.py +++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py @@ -896,7 +896,7 @@ class TDTestCase: tdSql.checkRows(2) tdSql.checkNotEqual(tb_name1, tb_name3) - # * tag binary max is 16384, col+ts binary max 65531 + # * tag binary max is 16384-2, col+ts binary max 65531 def tagColBinaryMaxLengthCheckCase(self): """ every binary and nchar must be length+2 @@ -960,7 +960,7 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) - # * tag nchar max is 16384/4, col+ts nchar max 65531 + # * tag nchar max is (16384-2)/4, col+ts nchar max 65531 def tagColNcharMaxLengthCheckCase(self): """ check nchar length limit @@ -971,7 +971,7 @@ class TDTestCase: input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000' code = self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) - # * legal tag nchar could not be larger than 16384/4 + # * legal tag nchar could not be larger than (16384-2)/4 # input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(1, "letters")}" c0=f 1626006833639000000' # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) # tdSql.query(f"select * from {stb_name}") From 72c6292ab47dc6b15799db750b742a0d5c843cd5 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Sat, 6 May 2023 09:14:03 +0800 Subject: [PATCH 15/16] enh: declare mndSplitVgroup in mndVgroup.h --- source/dnode/mnode/impl/inc/mndVgroup.h | 2 ++ source/dnode/mnode/impl/src/mndVgroup.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/inc/mndVgroup.h b/source/dnode/mnode/impl/inc/mndVgroup.h index 0229735952..94c4eae83f 100644 --- a/source/dnode/mnode/impl/inc/mndVgroup.h +++ b/source/dnode/mnode/impl/inc/mndVgroup.h @@ -50,6 +50,8 @@ void *mndBuildCreateVnodeReq(SMnode *, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *p void *mndBuildDropVnodeReq(SMnode *, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen); bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid); +int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index add2b1568c..7ece3b17ff 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -2103,7 +2103,7 @@ static int32_t mndAddAdjustVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, return 0; } -static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup) { +int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup) { int32_t code = -1; STrans *pTrans = NULL; SSdbRaw *pRaw = NULL; From fd43736e926abe2a0c2531c370ec44b73a151a28 Mon Sep 17 00:00:00 2001 From: Alex Duan <51781608+DuanKuanJun@users.noreply.github.com> Date: Sat, 6 May 2023 10:48:31 +0800 Subject: [PATCH 16/16] Update 09-udf.md --- docs/zh/07-develop/09-udf.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/zh/07-develop/09-udf.md b/docs/zh/07-develop/09-udf.md index 443dcb9806..99ecd903b4 100644 --- a/docs/zh/07-develop/09-udf.md +++ b/docs/zh/07-develop/09-udf.md @@ -217,7 +217,7 @@ gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so ### C UDF 示例代码 -#### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/bit_and.c) +#### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c) bit_add 实现多列的按位与功能。如果只有一列,返回这一列。bit_add 忽略空值。 @@ -230,7 +230,7 @@ bit_add 实现多列的按位与功能。如果只有一列,返回这一列。 -#### 聚合函数示例1 返回值为数值类型 [l2norm](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/l2norm.c) +#### 聚合函数示例1 返回值为数值类型 [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c) l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先平方,再累加求和,最后开方。 @@ -243,7 +243,7 @@ l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先 -#### 聚合函数示例2 返回值为字符串类型 [max_vol](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/max_vol.c) +#### 聚合函数示例2 返回值为字符串类型 [max_vol](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/max_vol.c) max_vol 实现了从多个输入的电压列中找到最大电压,返回由设备ID + 最大电压所在(行,列)+ 最大电压值 组成的组合字符串值