From 53f6016968a19e2f435d89cbab21b0bd941d0946 Mon Sep 17 00:00:00 2001 From: "cris.pei" Date: Fri, 12 Jul 2024 20:40:54 +0800 Subject: [PATCH 001/142] Modify the taosBenchmark documentation for the TS-4883 problem --- docs/en/14-reference/05-taosbenchmark.md | 3 +++ docs/zh/14-reference/05-taosbenchmark.md | 2 ++ 2 files changed, 5 insertions(+) diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md index e4884b889c..45c5cd2fb8 100644 --- a/docs/en/14-reference/05-taosbenchmark.md +++ b/docs/en/14-reference/05-taosbenchmark.md @@ -364,6 +364,9 @@ The configuration parameters for specifying super table tag columns and data col - **min**: The minimum value of the column/label of the data type. The generated value will equal or large than the minimum value. - **max**: The maximum value of the column/label of the data type. The generated value will less than the maximum value. + +- **scalingFactor**: Floating-point precision enhancement factor, which takes effect only when the data type is float/double. It has a valid range of positive integers from 1 to 1,000,000. It is used to enhance the precision of generated floating-point numbers, particularly when the min or max values are small. This property enhances the precision after the decimal point by powers of 10: scalingFactor of 10 indicates an enhancement of 1 decimal precision, 100 indicates an enhancement of 2 decimal precision, and so on. + - **fun**: This column of data is filled with functions. Currently, only the sin and cos functions are supported. The input parameter is the timestamp and converted to an angle value. The conversion formula is: angle x=input time column ts value % 360. At the same time, it supports coefficient adjustment and random fluctuation factor adjustment, presented in a fixed format expression, such as fun="10\*sin(x)+100\*random(5)", where x represents the angle, ranging from 0 to 360 degrees, and the growth step size is consistent with the time column step size. 10 represents the coefficient of multiplication, 100 represents the coefficient of addition or subtraction, and 5 represents the fluctuation range within a random range of 5%. The currently supported data types are int, bigint, float, and double. Note: The expression is fixed and cannot be reversed. - **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values. diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md index d102497d7d..a6cc40c134 100644 --- a/docs/zh/14-reference/05-taosbenchmark.md +++ b/docs/zh/14-reference/05-taosbenchmark.md @@ -367,6 +367,8 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **max** : 数据类型的 列/标签 的最大值。生成的值将小于最小值。 +- **max** : 浮点数精度增强因子,仅当数据类型是float/double时生效,有效值范围为1至1000000的正整数。用于增强生成浮点数的精度,特别是在min或max值较小的情况下。此属性按10的幂次增强小数点后的精度:scalingFactor为10表示增强1位小数精度,100表示增强2位,依此类推。 + - **fun** : 此列数据以函数填充,目前只支持 sin 和 cos 两函数,输入参数为时间戳换算成角度值,换算公式: 角度 x = 输入的时间列ts值 % 360。同时支持系数调节,随机波动因子调节,以固定格式的表达式展现,如 fun=“10\*sin(x)+100\*random(5)” , x 表示角度,取值 0 ~ 360度,增长步长与时间列步长一致。10 表示乘的系数,100 表示加或减的系数,5 表示波动幅度在 5% 的随机范围内。目前支持的数据类型为 int, bigint, float, double 四种数据类型。注意:表达式为固定模式,不可前后颠倒。 - **values** : nchar/binary 列/标签的值域,将从值中随机选择。 From 23c8f8df3283dd71304637bf9600bac8cd550cb4 Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Mon, 29 Jul 2024 17:15:11 +0800 Subject: [PATCH 002/142] Update docs/zh/14-reference/05-taosbenchmark.md fix typo --- docs/zh/14-reference/05-taosbenchmark.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md index a6cc40c134..373f15af94 100644 --- a/docs/zh/14-reference/05-taosbenchmark.md +++ b/docs/zh/14-reference/05-taosbenchmark.md @@ -367,7 +367,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **max** : 数据类型的 列/标签 的最大值。生成的值将小于最小值。 -- **max** : 浮点数精度增强因子,仅当数据类型是float/double时生效,有效值范围为1至1000000的正整数。用于增强生成浮点数的精度,特别是在min或max值较小的情况下。此属性按10的幂次增强小数点后的精度:scalingFactor为10表示增强1位小数精度,100表示增强2位,依此类推。 +- **scalingFactor** : 浮点数精度增强因子,仅当数据类型是float/double时生效,有效值范围为1至1000000的正整数。用于增强生成浮点数的精度,特别是在min或max值较小的情况下。此属性按10的幂次增强小数点后的精度:scalingFactor为10表示增强1位小数精度,100表示增强2位,依此类推。 - **fun** : 此列数据以函数填充,目前只支持 sin 和 cos 两函数,输入参数为时间戳换算成角度值,换算公式: 角度 x = 输入的时间列ts值 % 360。同时支持系数调节,随机波动因子调节,以固定格式的表达式展现,如 fun=“10\*sin(x)+100\*random(5)” , x 表示角度,取值 0 ~ 360度,增长步长与时间列步长一致。10 表示乘的系数,100 表示加或减的系数,5 表示波动幅度在 5% 的随机范围内。目前支持的数据类型为 int, bigint, float, double 四种数据类型。注意:表达式为固定模式,不可前后颠倒。 From f84fe05a671bcde33fdcb07722752d4516a97895 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 24 Sep 2024 18:54:52 +0800 Subject: [PATCH 003/142] enh:[TS-5441] cost too long in tmq write meta data --- source/client/src/clientRawBlockWrite.c | 96 ++++++++++++++----------- 1 file changed, 55 insertions(+), 41 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 8a888a2a47..efd2eec678 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -26,7 +26,7 @@ #define RAW_NULL_CHECK(c) \ do { \ if (c == NULL) { \ - code = TSDB_CODE_OUT_OF_MEMORY; \ + code = terrno; \ goto end; \ } \ } while (0) @@ -1780,6 +1780,42 @@ end: return code; } +static int32_t buildCreateTbMap(STaosxRsp* rsp, SHashObj* pHashObj) { + // find schema data info + int32_t code = 0; + SVCreateTbReq pCreateReq = {0}; + SDecoder decoderTmp = {0}; + + for (int j = 0; j < rsp->createTableNum; j++) { + void** dataTmp = taosArrayGet(rsp->createTableReq, j); + RAW_NULL_CHECK(dataTmp); + int32_t* lenTmp = taosArrayGet(rsp->createTableLen, j); + RAW_NULL_CHECK(dataTmp); + + tDecoderInit(&decoderTmp, *dataTmp, *lenTmp); + RAW_RETURN_CHECK (tDecodeSVCreateTbReq(&decoderTmp, &pCreateReq)); + + if (pCreateReq.type != TSDB_CHILD_TABLE) { + code = TSDB_CODE_INVALID_MSG; + goto end; + } + if (taosHashGet(pHashObj, pCreateReq.name, strlen(pCreateReq.name)) == NULL){ + RAW_RETURN_CHECK(taosHashPut(pHashObj, pCreateReq.name, strlen(pCreateReq.name), &pCreateReq, sizeof(SVCreateTbReq))); + } else{ + tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); + pCreateReq = (SVCreateTbReq){0}; + } + + tDecoderClear(&decoderTmp); + } + return 0; + +end: + tDecoderClear(&decoderTmp); + tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); + return code; +} + static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) { if (taos == NULL || data == NULL) { SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data); @@ -1787,11 +1823,11 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) } int32_t code = TSDB_CODE_SUCCESS; SHashObj* pVgHash = NULL; + SHashObj* pCreateTbHash = NULL; SQuery* pQuery = NULL; SMqTaosxRspObj rspObj = {0}; SDecoder decoder = {0}; STableMeta* pTableMeta = NULL; - SVCreateTbReq* pCreateReqDst = NULL; SRequestObj* pRequest = NULL; RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest)); @@ -1832,6 +1868,11 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) RAW_RETURN_CHECK(smlInitHandle(&pQuery)); pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); RAW_NULL_CHECK(pVgHash); + pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + RAW_NULL_CHECK(pCreateTbHash); + taosHashSetFreeFp(pCreateTbHash, (_hash_free_fn_t)tdDestroySVCreateTbReq); + + RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.rsp, pCreateTbHash)); uDebug(LOG_ID_TAG " write raw metadata block num:%d", LOG_ID_VALUE, rspObj.rsp.common.blockNum); while (++rspObj.common.resIter < rspObj.rsp.common.blockNum) { @@ -1854,40 +1895,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) (void)strcpy(pName.tname, tbName); // find schema data info - for (int j = 0; j < rspObj.rsp.createTableNum; j++) { - void** dataTmp = taosArrayGet(rspObj.rsp.createTableReq, j); - RAW_NULL_CHECK(dataTmp); - int32_t* lenTmp = taosArrayGet(rspObj.rsp.createTableLen, j); - RAW_NULL_CHECK(dataTmp); - - SDecoder decoderTmp = {0}; - SVCreateTbReq pCreateReq = {0}; - tDecoderInit(&decoderTmp, *dataTmp, *lenTmp); - if (tDecodeSVCreateTbReq(&decoderTmp, &pCreateReq) < 0) { - tDecoderClear(&decoderTmp); - tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); - code = TSDB_CODE_TMQ_INVALID_MSG; - SET_ERROR_MSG("decode create table:%s req failed", tbName); - goto end; - } - - if (pCreateReq.type != TSDB_CHILD_TABLE) { - code = TSDB_CODE_TSC_INVALID_VALUE; - tDecoderClear(&decoderTmp); - tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); - SET_ERROR_MSG("create table req type is not child table: %s, type: %d", tbName, pCreateReq.type); - goto end; - } - if (strcmp(tbName, pCreateReq.name) == 0) { - RAW_RETURN_CHECK(cloneSVreateTbReq(&pCreateReq, &pCreateReqDst)); - tDecoderClear(&decoderTmp); - tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); - break; - } - tDecoderClear(&decoderTmp); - tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); - } - + SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); SVgroupInfo vg = {0}; RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg)); if (pCreateReqDst) { // change stable name to get meta @@ -1920,7 +1928,15 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) } void* rawData = getRawDataFromRes(pRetrieve); char err[ERR_MSG_LEN] = {0}; - code = rawBlockBindData(pQuery, pTableMeta, rawData, &pCreateReqDst, fields, pSW->nCols, true, err, ERR_MSG_LEN); + SVCreateTbReq* pCreateReqTmp = NULL; + if (pCreateReqDst){ + RAW_RETURN_CHECK(cloneSVreateTbReq(pCreateReqDst, &pCreateReqTmp)); + } + code = rawBlockBindData(pQuery, pTableMeta, rawData, &pCreateReqTmp, fields, pSW->nCols, true, err, ERR_MSG_LEN); + if (pCreateReqTmp != NULL) { + tdDestroySVCreateTbReq(pCreateReqTmp); + taosMemoryFree(pCreateReqTmp); + } taosMemoryFree(fields); taosMemoryFreeClear(pTableMeta); if (code != TSDB_CODE_SUCCESS) { @@ -1936,16 +1952,14 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) end: uDebug(LOG_ID_TAG " write raw metadata return, msg:%s", LOG_ID_VALUE, tstrerror(code)); + taosHashCleanup(pCreateTbHash); tDeleteSTaosxRsp(&rspObj.rsp); tDecoderClear(&decoder); qDestroyQuery(pQuery); destroyRequest(pRequest); taosHashCleanup(pVgHash); taosMemoryFreeClear(pTableMeta); - if (pCreateReqDst) { - tdDestroySVCreateTbReq(pCreateReqDst); - taosMemoryFree(pCreateReqDst); - } + return code; } From 33cab144faa092a93f77d30dbe2ecfeba65fb34d Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 25 Sep 2024 11:13:03 +0800 Subject: [PATCH 004/142] enh:[TS-5441] cost too long in tmq write meta data --- source/client/src/clientRawBlockWrite.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index efd2eec678..a97440eed7 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1870,8 +1870,6 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) RAW_NULL_CHECK(pVgHash); pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); RAW_NULL_CHECK(pCreateTbHash); - taosHashSetFreeFp(pCreateTbHash, (_hash_free_fn_t)tdDestroySVCreateTbReq); - RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.rsp, pCreateTbHash)); uDebug(LOG_ID_TAG " write raw metadata block num:%d", LOG_ID_VALUE, rspObj.rsp.common.blockNum); @@ -1952,7 +1950,13 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) end: uDebug(LOG_ID_TAG " write raw metadata return, msg:%s", LOG_ID_VALUE, tstrerror(code)); + void* pIter = taosHashIterate(pCreateTbHash, NULL); + while (pIter) { + tDestroySVCreateTbReq(pIter, TSDB_MSG_FLG_DECODE); + pIter = taosHashIterate(pCreateTbHash, pIter); + } taosHashCleanup(pCreateTbHash); + tDeleteSTaosxRsp(&rspObj.rsp); tDecoderClear(&decoder); qDestroyQuery(pQuery); From 5399bffe993bc462938e44100bef90d5bf321a8d Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 25 Sep 2024 18:47:02 +0800 Subject: [PATCH 005/142] enh:[TS-5441] cost too long in tmq write meta data --- source/client/src/clientRawBlockWrite.c | 188 ++++++++++++++---------- 1 file changed, 112 insertions(+), 76 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index a97440eed7..856f4f59c1 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -165,7 +165,7 @@ static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sche } RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags)); -end: + end: *pJson = json; } @@ -199,7 +199,7 @@ static int32_t setCompressOption(cJSON* json, uint32_t para) { return code; } -end: + end: return code; } static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** pJson) { @@ -340,7 +340,7 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** break; } -end: + end: tFreeSMAltertbReq(&req); *pJson = json; } @@ -360,7 +360,7 @@ static void processCreateStb(SMqMetaRsp* metaRsp, cJSON** pJson) { } buildCreateTableJson(&req.schemaRow, &req.schemaTag, req.name, req.suid, TSDB_SUPER_TABLE, &req.colCmpr, pJson); -end: + end: uDebug("create stable return, sql json:%s", cJSON_PrintUnformatted(*pJson)); tDecoderClear(&coder); } @@ -380,7 +380,7 @@ static void processAlterStb(SMqMetaRsp* metaRsp, cJSON** pJson) { } buildAlterSTableJson(req.alterOriData, req.alterOriDataLen, pJson); -end: + end: uDebug("alter stable return, sql json:%s", cJSON_PrintUnformatted(*pJson)); tDecoderClear(&coder); } @@ -485,7 +485,7 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) { RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, tag)); } -end: + end: RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags)); taosArrayDestroy(pTagVals); } @@ -514,7 +514,7 @@ static void buildCreateCTableJson(SVCreateTbReq* pCreateReq, int32_t nReqs, cJSO } RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "createList", createList)); -end: + end: *pJson = json; } @@ -542,7 +542,7 @@ static void processCreateTable(SMqMetaRsp* metaRsp, cJSON** pJson) { } } -end: + end: uDebug("create table return, sql json:%s", cJSON_PrintUnformatted(*pJson)); tDeleteSVCreateTbBatchReq(&req); tDecoderClear(&decoder); @@ -585,7 +585,7 @@ static void processAutoCreateTable(STaosxRsp* rsp, char** string) { *string = cJSON_PrintUnformatted(pJson); cJSON_Delete(pJson); -end: + end: uDebug("auto created table return, sql json:%s", *string); for (int i = 0; decoder && pCreateReq && i < rsp->createTableNum; i++) { tDecoderClear(&decoder[i]); @@ -771,7 +771,7 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { break; } -end: + end: uDebug("alter table return, sql json:%s", cJSON_PrintUnformatted(json)); tDecoderClear(&decoder); *pJson = json; @@ -806,7 +806,7 @@ static void processDropSTable(SMqMetaRsp* metaRsp, cJSON** pJson) { RAW_NULL_CHECK(tableName); RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName)); -end: + end: uDebug("processDropSTable return, sql json:%s", cJSON_PrintUnformatted(json)); tDecoderClear(&decoder); *pJson = json; @@ -842,7 +842,7 @@ static void processDeleteTable(SMqMetaRsp* metaRsp, cJSON** pJson) { RAW_NULL_CHECK(sqlJson); RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", sqlJson)); -end: + end: uDebug("processDeleteTable return, sql json:%s", cJSON_PrintUnformatted(json)); tDecoderClear(&coder); *pJson = json; @@ -879,7 +879,7 @@ static void processDropTable(SMqMetaRsp* metaRsp, cJSON** pJson) { } RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableNameList", tableNameList)); -end: + end: uDebug("processDropTable return, json sql:%s", cJSON_PrintUnformatted(json)); tDecoderClear(&decoder); *pJson = json; @@ -990,7 +990,7 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { code = pRequest->code; -end: + end: uDebug(LOG_ID_TAG " create stable return, msg:%s", LOG_ID_VALUE, tstrerror(code)); destroyRequest(pRequest); tFreeSMCreateStbReq(&pReq); @@ -1027,9 +1027,9 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { SCatalog* pCatalog = NULL; RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog)); SRequestConnInfo conn = {.pTrans = pRequest->pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp)}; SName pName = {0}; toName(pRequest->pTscObj->acctId, pRequest->pDb, req.name, &pName); STableMeta* pTableMeta = NULL; @@ -1092,7 +1092,7 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { code = pRequest->code; -end: + end: uDebug(LOG_ID_TAG " drop stable return, msg:%s", LOG_ID_VALUE, tstrerror(code)); destroyRequest(pRequest); tDecoderClear(&coder); @@ -1148,9 +1148,9 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch); SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName)); RAW_NULL_CHECK(pRequest->tableList); @@ -1243,7 +1243,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { code = pRequest->code; -end: + end: uDebug(LOG_ID_TAG " create table return, msg:%s", LOG_ID_VALUE, tstrerror(code)); tDeleteSVCreateTbBatchReq(&req); @@ -1304,9 +1304,9 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch); SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName)); RAW_NULL_CHECK(pRequest->tableList); // loop to create table @@ -1371,7 +1371,7 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { } code = pRequest->code; -end: + end: uDebug(LOG_ID_TAG " drop table return, msg:%s", LOG_ID_VALUE, tstrerror(code)); taosHashCleanup(pVgroupHashmap); destroyRequest(pRequest); @@ -1412,7 +1412,7 @@ static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { } taos_free_result(res); -end: + end: uDebug("connId:0x%" PRIx64 " delete data sql:%s, code:%s", *(int64_t*)taos, sql, tstrerror(code)); tDecoderClear(&coder); return code; @@ -1455,9 +1455,9 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { SCatalog* pCatalog = NULL; RAW_RETURN_CHECK(catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog)); SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; SVgroupInfo pInfo = {0}; SName pName = {0}; @@ -1525,7 +1525,7 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { code = handleAlterTbExecRes(pRes->res, pCatalog); } } -end: + end: uDebug(LOG_ID_TAG " alter table return, meta:%p, len:%d, msg:%s", LOG_ID_VALUE, meta, metaLen, tstrerror(code)); taosArrayDestroy(pArray); if (pVgData) taosMemoryFreeClear(pVgData->pData); @@ -1590,7 +1590,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat (void)launchQueryImpl(pRequest, pQuery, true, NULL); code = pRequest->code; -end: + end: uDebug(LOG_ID_TAG " write raw block with field return, msg:%s", LOG_ID_VALUE, tstrerror(code)); taosMemoryFreeClear(pTableMeta); qDestroyQuery(pQuery); @@ -1650,7 +1650,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha (void)launchQueryImpl(pRequest, pQuery, true, NULL); code = pRequest->code; -end: + end: uDebug(LOG_ID_TAG " write raw block return, msg:%s", LOG_ID_VALUE, tstrerror(code)); taosMemoryFreeClear(pTableMeta); qDestroyQuery(pQuery); @@ -1769,7 +1769,7 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) { (void)launchQueryImpl(pRequest, pQuery, true, NULL); code = pRequest->code; -end: + end: uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code)); tDeleteMqDataRsp(&rspObj.rsp); tDecoderClear(&decoder); @@ -1790,23 +1790,28 @@ static int32_t buildCreateTbMap(STaosxRsp* rsp, SHashObj* pHashObj) { void** dataTmp = taosArrayGet(rsp->createTableReq, j); RAW_NULL_CHECK(dataTmp); int32_t* lenTmp = taosArrayGet(rsp->createTableLen, j); - RAW_NULL_CHECK(dataTmp); + RAW_NULL_CHECK(lenTmp); tDecoderInit(&decoderTmp, *dataTmp, *lenTmp); - RAW_RETURN_CHECK (tDecodeSVCreateTbReq(&decoderTmp, &pCreateReq)); + RAW_RETURN_CHECK(tDecodeSVCreateTbReq(&decoderTmp, &pCreateReq)); if (pCreateReq.type != TSDB_CHILD_TABLE) { code = TSDB_CODE_INVALID_MSG; goto end; } - if (taosHashGet(pHashObj, pCreateReq.name, strlen(pCreateReq.name)) == NULL){ - RAW_RETURN_CHECK(taosHashPut(pHashObj, pCreateReq.name, strlen(pCreateReq.name), &pCreateReq, sizeof(SVCreateTbReq))); - } else{ - tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); - pCreateReq = (SVCreateTbReq){0}; + SVCreateTbReq** pCreateReqDst = taosHashGet(pHashObj, pCreateReq.name, strlen(pCreateReq.name)); + if (pCreateReqDst == NULL){ + RAW_RETURN_CHECK(cloneSVreateTbReq(&pCreateReq, pCreateReqDst)); + code = taosHashPut(pHashObj, pCreateReq.name, strlen(pCreateReq.name), &pCreateReqDst, POINTER_BYTES); + if (code != 0){ + tdDestroySVCreateTbReq(*pCreateReqDst); + goto end; + } } tDecoderClear(&decoderTmp); + tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); + pCreateReq = (SVCreateTbReq){0}; } return 0; @@ -1816,19 +1821,20 @@ end: return code; } +static threadlocal SHashObj* pVgHash = NULL; +static threadlocal SHashObj* pCreateTbHash = NULL; +static threadlocal SHashObj* pNameHash = NULL; +static threadlocal SHashObj* pMetaHash = NULL; + static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) { if (taos == NULL || data == NULL) { SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data); return TSDB_CODE_INVALID_PARA; } int32_t code = TSDB_CODE_SUCCESS; - SHashObj* pVgHash = NULL; - SHashObj* pCreateTbHash = NULL; SQuery* pQuery = NULL; SMqTaosxRspObj rspObj = {0}; SDecoder decoder = {0}; - STableMeta* pTableMeta = NULL; - SRequestObj* pRequest = NULL; RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest)); @@ -1856,6 +1862,23 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) goto end; } + if (pVgHash == NULL){ + pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + RAW_NULL_CHECK(pVgHash); + } + if (pCreateTbHash == NULL){ + pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + RAW_NULL_CHECK(pCreateTbHash); + } + if (pNameHash == NULL){ + pNameHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + RAW_NULL_CHECK(pNameHash); + } + if (pMetaHash == NULL){ + pMetaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + RAW_NULL_CHECK(pMetaHash); + taosHashSetFreeFp(pMetaHash, taosMemoryFree); + } struct SCatalog* pCatalog = NULL; RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog)); @@ -1866,11 +1889,6 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); RAW_RETURN_CHECK(smlInitHandle(&pQuery)); - pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); - RAW_NULL_CHECK(pVgHash); - pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - RAW_NULL_CHECK(pCreateTbHash); - RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.rsp, pCreateTbHash)); uDebug(LOG_ID_TAG " write raw metadata block num:%d", LOG_ID_VALUE, rspObj.rsp.common.blockNum); while (++rspObj.common.resIter < rspObj.rsp.common.blockNum) { @@ -1894,21 +1912,37 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) // find schema data info SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); - SVgroupInfo vg = {0}; - RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg)); - if (pCreateReqDst) { // change stable name to get meta - (void)strcpy(pName.tname, pCreateReqDst->ctb.stbName); + if (pCreateReqDst == NULL) { + RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.rsp, pCreateTbHash)); + pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); } - RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); - if (pCreateReqDst) { - pTableMeta->vgId = vg.vgId; - pTableMeta->uid = pCreateReqDst->uid; - pCreateReqDst->ctb.suid = pTableMeta->suid; + int32_t vgId = 0; + SVgroupInfo* vg = (SVgroupInfo*)taosHashGet(pNameHash, tbName, strlen(tbName)); + if (vg == NULL) { + SVgroupInfo vgTmp = {0}; + RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vgTmp)); + RAW_RETURN_CHECK(taosHashPut(pNameHash, tbName, strlen(tbName), &vgTmp, sizeof(SVgroupInfo))); + code = taosHashPut(pVgHash, &vgTmp.vgId, sizeof(vgTmp.vgId), &vgTmp, sizeof(SVgroupInfo)); + code = (code == TSDB_CODE_DUP_KEY) ? 0 : code; + RAW_RETURN_CHECK(code); + vgId = vgTmp.vgId; + } else { + vgId = vg->vgId; } - void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId)); - if (hData == NULL) { - RAW_RETURN_CHECK(taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); + + STableMeta** pTableMeta = (STableMeta**)taosHashGet(pMetaHash, tbName, strlen(tbName)); + if (pTableMeta == NULL) { + if (pCreateReqDst) { // change stable name to get meta + (void)strcpy(pName.tname, pCreateReqDst->ctb.stbName); + } + RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, pTableMeta)); + RAW_RETURN_CHECK(taosHashPut(pMetaHash, tbName, strlen(tbName), pTableMeta, POINTER_BYTES)); + if (pCreateReqDst) { + (*pTableMeta)->vgId = vgId; + (*pTableMeta)->uid = pCreateReqDst->uid; + pCreateReqDst->ctb.suid = (*pTableMeta)->suid; + } } SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.common.blockSchema, rspObj.common.resIter); @@ -1930,13 +1964,12 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) if (pCreateReqDst){ RAW_RETURN_CHECK(cloneSVreateTbReq(pCreateReqDst, &pCreateReqTmp)); } - code = rawBlockBindData(pQuery, pTableMeta, rawData, &pCreateReqTmp, fields, pSW->nCols, true, err, ERR_MSG_LEN); + code = rawBlockBindData(pQuery, *pTableMeta, rawData, &pCreateReqTmp, fields, pSW->nCols, true, err, ERR_MSG_LEN); if (pCreateReqTmp != NULL) { tdDestroySVCreateTbReq(pCreateReqTmp); taosMemoryFree(pCreateReqTmp); } taosMemoryFree(fields); - taosMemoryFreeClear(pTableMeta); if (code != TSDB_CODE_SUCCESS) { SET_ERROR_MSG("table:%s, err:%s", tbName, err); goto end; @@ -1948,23 +1981,26 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) (void)launchQueryImpl(pRequest, pQuery, true, NULL); code = pRequest->code; -end: + end: uDebug(LOG_ID_TAG " write raw metadata return, msg:%s", LOG_ID_VALUE, tstrerror(code)); + tDeleteSTaosxRsp(&rspObj.rsp); + tDecoderClear(&decoder); + qDestroyQuery(pQuery); + destroyRequest(pRequest); + + return code; +} + +void tmqClean() { + taosHashCleanup(pMetaHash); + taosHashCleanup(pNameHash); void* pIter = taosHashIterate(pCreateTbHash, NULL); while (pIter) { tDestroySVCreateTbReq(pIter, TSDB_MSG_FLG_DECODE); pIter = taosHashIterate(pCreateTbHash, pIter); } taosHashCleanup(pCreateTbHash); - - tDeleteSTaosxRsp(&rspObj.rsp); - tDecoderClear(&decoder); - qDestroyQuery(pQuery); - destroyRequest(pRequest); taosHashCleanup(pVgHash); - taosMemoryFreeClear(pTableMeta); - - return code; } static void processSimpleMeta(SMqMetaRsp* pMetaRsp, cJSON** meta) { @@ -2027,7 +2063,7 @@ static void processBatchMetaToJson(SMqBatchMetaRsp* pMsgRsp, char** string) { *string = fullStr; return; -end: + end: cJSON_Delete(pJson); tDeleteMqBatchMetaRsp(&rsp); } @@ -2113,7 +2149,7 @@ static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, void* rspObj, tmq_ra raw->raw = buf; raw->raw_len = len; return code; -FAILED: + FAILED: tEncoderClear(&encoder); taosMemoryFree(buf); return code; @@ -2236,7 +2272,7 @@ static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen } } -end: + end: tDeleteMqBatchMetaRsp(&rsp); return code; } From 511e1c8f1ad6ec233a5d91837b1208adbe026da7 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 29 Sep 2024 16:33:36 +0800 Subject: [PATCH 006/142] valid iter --- source/libs/stream/src/streamBackendRocksdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index a2c9012df5..3ec9f005f0 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -3337,7 +3337,7 @@ int32_t streamStateClear_rocksdb(SStreamState* pState) { return 0; } void streamStateCurNext_rocksdb(SStreamStateCur* pCur) { - if (pCur) { + if (pCur && pCur->iter && rocksdb_iter_valid(pCur->iter)) { rocksdb_iter_next(pCur->iter); } } From 14fcb4831860bce8f4b0600d69c8c5f6ed815cd6 Mon Sep 17 00:00:00 2001 From: Yaming Pei Date: Mon, 30 Sep 2024 10:11:48 +0800 Subject: [PATCH 007/142] docs: adjust the version function description --- docs/zh/14-reference/05-connector/50-odbc.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/14-reference/05-connector/50-odbc.mdx b/docs/zh/14-reference/05-connector/50-odbc.mdx index 38dd88b86d..73e5592cf5 100644 --- a/docs/zh/14-reference/05-connector/50-odbc.mdx +++ b/docs/zh/14-reference/05-connector/50-odbc.mdx @@ -7,7 +7,7 @@ TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系 TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。 -注意:TDengine ODBC 支持 32/64 位 Windows 系统,调用 TDengine ODBC 需要通过相应位数的 ODBC 驱动管理器进行。在 32 位 Windows 系统或者 64 位 Windows 系统的 32 位应用程序中,仅支持使用 WebSocket 连接方式访问 TDengine 数据库。 +注意:TDengine ODBC 支持 64 位 Windows 系统,支持 32 位 Websocket 连接方式(仅企业版支持),调用 TDengine ODBC 需要通过相应位数的 ODBC 驱动管理器进行。在 32 位 Windows 系统或者 64 位 Windows 系统的 32 位应用程序中,仅支持使用 WebSocket 连接方式访问 TDengine 数据库。 想更多了解 TDengine 时序时序数据库的使用,可访问 [TDengine官方文档](https://docs.taosdata.com/intro/)。 @@ -111,7 +111,7 @@ WebSocket 连接方式除此之外还支持 Windows X64系统上运行的 32 位 | taos_odbc版本 | 主要变化 | TDengine 版本 | | :----------- | :-------------------------------------------------------------------------------------------------- | :---------------- | -| v1.1.0 | 1. 支持视图功能;
2. 支持 VARBINARY/GEOMETRY 数据类型; | 3.3.3.0及更高版本 | +| v1.1.0 | 1. 支持视图功能;
2. 支持 VARBINARY/GEOMETRY 数据类型;
3. 支持 ODBC 32 位 WebSocket 连接方式(仅企业版支持);
4. 支持 ODBC 数据源配置对话框设置对工业软件 KingSCADA、Kepware 等的兼容性适配选项(仅企业版支持); | 3.3.3.0及更高版本 | | v1.0.2 | 支持 CP1252 字符编码; | 3.2.3.0及更高版本 | | v1.0.1 | 1. 支持 DSN 设置 BI 模式,在 BI 模式下 TDengine 数据库不返回系统数据库和超级表子表信息;
2. 重构字符集转换模块,提升读写性能;
3. ODBC 数据源配置对话框中默认修改默认连接方式为“WebSocket”;
4. ODBC 数据源配置对话框增加“测试连接”控件;
5. ODBC 数据源配置支持中文/英文界面; | - | | v1.0.0.0 | 发布初始版本,支持与Tdengine数据库交互以读写数据,具体请参考“API 参考”一节 | 3.2.2.0及更高版本 | From f42a97e0a78fc25e8596370b24b33d49599cff2a Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Mon, 30 Sep 2024 10:25:13 +0800 Subject: [PATCH 008/142] update odbc doc --- docs/zh/14-reference/05-connector/50-odbc.mdx | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/zh/14-reference/05-connector/50-odbc.mdx b/docs/zh/14-reference/05-connector/50-odbc.mdx index ac317435df..69dcdc2a2a 100644 --- a/docs/zh/14-reference/05-connector/50-odbc.mdx +++ b/docs/zh/14-reference/05-connector/50-odbc.mdx @@ -3,11 +3,14 @@ sidebar_label: ODBC title: TDengine ODBC --- -TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库的数据表/视图。 +TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)以及用户自定义开发的应用程序,通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库。 -TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。 +TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。 -注意:TDengine ODBC 支持 32/64 位 Windows 系统,调用 TDengine ODBC 需要通过相应位数的 ODBC 驱动管理器进行。在 32 位 Windows 系统或者 64 位 Windows 系统的 32 位应用程序中,仅支持使用 WebSocket 连接方式访问 TDengine 数据库。 +TDengine ODBC 提供 64 位 和 32 位 两种驱动程序。但 32 位仅企业版支持,且仅支持 Websocket 连接方式。 +**注意:** +- 驱动管理器:确保使用与应用程序架构匹配的 ODBC 驱动管理器。32 位应用程序需要使用 32 位 ODBC 驱动管理器,64 位应用程序需要使用 64 位 ODBC 驱动管理器。 +- 数据源名称(DSN):32 位和 64 位 ODBC 驱动管理器都可以看到所有 DSN,因此需要在 DSN 名称上去区分。 想更多了解 TDengine 时序时序数据库的使用,可访问 [TDengine官方文档](https://docs.taosdata.com/intro/)。 From 56cb709f42dc10f438be6070fb83d6c4ae5bf998 Mon Sep 17 00:00:00 2001 From: Yaming Pei Date: Mon, 30 Sep 2024 10:39:45 +0800 Subject: [PATCH 009/142] docs: adjust DSN description --- docs/zh/14-reference/05-connector/50-odbc.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/14-reference/05-connector/50-odbc.mdx b/docs/zh/14-reference/05-connector/50-odbc.mdx index 0a57a6308b..24c1a1e84d 100644 --- a/docs/zh/14-reference/05-connector/50-odbc.mdx +++ b/docs/zh/14-reference/05-connector/50-odbc.mdx @@ -7,10 +7,10 @@ TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系 TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。 -TDengine ODBC 提供 64 位 和 32 位 两种驱动程序。但 32 位仅企业版支持,且仅支持 Websocket 连接方式。 +TDengine ODBC 提供 64 位 和 32 位 两种驱动程序。但 32 位仅企业版支持,且仅支持 WebSocket 连接方式。 **注意:** - 驱动管理器:确保使用与应用程序架构匹配的 ODBC 驱动管理器。32 位应用程序需要使用 32 位 ODBC 驱动管理器,64 位应用程序需要使用 64 位 ODBC 驱动管理器。 -- 数据源名称(DSN):32 位和 64 位 ODBC 驱动管理器都可以看到所有 DSN,因此需要在 DSN 名称上去区分。 +- 数据源名称(DSN):32 位和 64 位 ODBC 驱动管理器都可以看到所有 DSN,用户 DSN 标签页下的 DSN 如果名字相同会共用,因此需要在 DSN 名称上去区分。 想更多了解 TDengine 时序时序数据库的使用,可访问 [TDengine官方文档](https://docs.taosdata.com/intro/)。 From 80936446f912a71087a4ee1b39eec0e6dab1bbb7 Mon Sep 17 00:00:00 2001 From: Yaming Pei Date: Mon, 30 Sep 2024 10:43:12 +0800 Subject: [PATCH 010/142] docs: adjust WebSocket description --- docs/zh/14-reference/05-connector/50-odbc.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/zh/14-reference/05-connector/50-odbc.mdx b/docs/zh/14-reference/05-connector/50-odbc.mdx index 24c1a1e84d..ee57901e9b 100644 --- a/docs/zh/14-reference/05-connector/50-odbc.mdx +++ b/docs/zh/14-reference/05-connector/50-odbc.mdx @@ -27,17 +27,17 @@ TDengine ODBC 提供 64 位 和 32 位 两种驱动程序。但 32 位仅企业 ### 数据源连接类型与区别 -TDengine ODBC 支持两种连接 TDengine 数据库方式:Websocket 连接与 Native 连接,其区别如下: +TDengine ODBC 支持两种连接 TDengine 数据库方式:WebSocket 连接与 Native 连接,其区别如下: -1. 访问云服务仅支持使用 Websocket 连接方式。 +1. 访问云服务仅支持使用 WebSocket 连接方式。 2. 32 位应用程序仅支持使用 WebSocket 连接方式。 -3. Websocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。 +3. WebSocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。 4. Native 连接通常性能更好一点,但是必须与 TDengine 数据库服务端的版本保持一致。 -5. 对于一般用户,建议使用 **Websocket** 连接方式,性能与 Native 差别不大,兼容性更好。 +5. 对于一般用户,建议使用 **WebSocket** 连接方式,性能与 Native 差别不大,兼容性更好。 ### WebSocket 连接 @@ -49,11 +49,11 @@ TDengine ODBC 支持两种连接 TDengine 数据库方式:Websocket 连接与 4. 点击完成,进入 TDengine ODBC 数据源配置页面,填写如下必要信息 - ![ODBC websocket connection config](./assets/odbc-ws-config-zh.webp) + ![ODBC WebSocket connection config](./assets/odbc-ws-config-zh.webp) 4.1 【DSN】:Data Source Name 必填,为新添加的 ODBC 数据源命名 - 4.2【连接类型】 : 必选,选择连接类型,这里选择 【Websocket】 + 4.2【连接类型】 : 必选,选择连接类型,这里选择 【WebSocket】 4.3【URL】必填,ODBC 数据源 URL,示例: `http://localhost:6041`, 云服务的 url 示例: `https://gw.cloud.taosdata.com?token=your_token` From 67fe6091298de92b1bb0876d09e985e2f74cbcbf Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 8 Oct 2024 18:24:16 +0800 Subject: [PATCH 011/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- include/libs/parser/parser.h | 4 +-- source/client/src/clientRawBlockWrite.c | 43 ++++++++++++------------- source/libs/parser/src/parInsertUtil.c | 21 +++++++++--- utils/test/c/tmq_taosx_ci.c | 1 + 4 files changed, 39 insertions(+), 30 deletions(-) diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index c98b188d49..832e4f8863 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -176,8 +176,8 @@ int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsS STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, int32_t msgBufLen); int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash); -int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD* fields, - int numFields, bool needChangeLength, char* errstr, int32_t errstrLen); +int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, void* fields, + int numFields, bool needChangeLength, char* errstr, int32_t errstrLen, bool raw); int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray); int32_t serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap, SArray** pOut); diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 17ebd9b558..064c3bdb2e 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1618,7 +1618,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat RAW_NULL_CHECK(pVgHash); RAW_RETURN_CHECK( taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData))); - RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields, false, NULL, 0)); + RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields, false, NULL, 0, false)); RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); launchQueryImpl(pRequest, pQuery, true, NULL); @@ -1678,7 +1678,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha RAW_NULL_CHECK(pVgHash); RAW_RETURN_CHECK( taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData))); - RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0, false, NULL, 0)); + RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0, false, NULL, 0, false)); RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); launchQueryImpl(pRequest, pQuery, true, NULL); @@ -1960,35 +1960,32 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da vgId = vg->vgId; } - STableMeta** pTableMeta = (STableMeta**)taosHashGet(pMetaHash, tbName, strlen(tbName)); - if (pTableMeta == NULL) { + STableMeta* pTableMeta = NULL; + STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, tbName, strlen(tbName)); + if (pTableMetaTmp == NULL) { if (pCreateReqDst) { // change stable name to get meta (void)strcpy(pName.tname, pCreateReqDst->ctb.stbName); } - RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, pTableMeta)); - RAW_RETURN_CHECK(taosHashPut(pMetaHash, tbName, strlen(tbName), pTableMeta, POINTER_BYTES)); - if (pCreateReqDst) { - (*pTableMeta)->vgId = vgId; - (*pTableMeta)->uid = pCreateReqDst->uid; - pCreateReqDst->ctb.suid = (*pTableMeta)->suid; + RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); + code = taosHashPut(pMetaHash, tbName, strlen(tbName), &pTableMeta, POINTER_BYTES); + if (code != 0){ + taosMemoryFree(pTableMeta); + goto end; } + if (pCreateReqDst) { + pTableMeta->vgId = vgId; + pTableMeta->uid = pCreateReqDst->uid; + pCreateReqDst->ctb.suid = pTableMeta->suid; + } + }else{ + pTableMeta = *pTableMetaTmp; } -// SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); -// RAW_NULL_CHECK(pSW); -// TAOS_FIELD* fields = taosMemoryCalloc(pSW->nCols, sizeof(TAOS_FIELD)); -// RAW_NULL_CHECK(fields); -// -// for (int i = 0; i < pSW->nCols; i++) { -// fields[i].type = pSW->pSchema[i].type; -// fields[i].bytes = pSW->pSchema[i].bytes; -// tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name)); -// } + SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); + RAW_NULL_CHECK(pSW); void* rawData = getRawDataFromRes(pRetrieve); char err[ERR_MSG_LEN] = {0}; - code = rawBlockBindData(pQuery, *pTableMeta, rawData, pCreateReqDst, NULL, 0, true, err, ERR_MSG_LEN); -// code = rawBlockBindData(pQuery, *pTableMeta, rawData, pCreateReqDst, fields, pSW->nCols, true, err, ERR_MSG_LEN); -// taosMemoryFree(fields); + code = rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); if (code != TSDB_CODE_SUCCESS) { SET_ERROR_MSG("table:%s, err:%s", tbName, err); goto end; diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c index 838f797394..f29ed79412 100644 --- a/source/libs/parser/src/parInsertUtil.c +++ b/source/libs/parser/src/parInsertUtil.c @@ -886,8 +886,8 @@ static bool findFileds(SSchema* pSchema, TAOS_FIELD* fields, int numFields) { return false; } -int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD* tFields, - int numFields, bool needChangeLength, char* errstr, int32_t errstrLen) { +int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, void* tFields, + int numFields, bool needChangeLength, char* errstr, int32_t errstrLen, bool raw) { int ret = 0; if(data == NULL) { uError("rawBlockBindData, data is NULL"); @@ -964,11 +964,16 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate goto end; } if (tFields != NULL && numFields > boundInfo->numOfBound) { - if (errstr != NULL) - snprintf(errstr, errstrLen, "numFields:%d bigger than num of bound cols:%d", numFields, boundInfo->numOfBound); + if (errstr != NULL) snprintf(errstr, errstrLen, "numFields:%d bigger than num of bound cols:%d", numFields, boundInfo->numOfBound); ret = TSDB_CODE_INVALID_PARA; goto end; } + if (tFields == NULL && numOfCols != boundInfo->numOfBound) { + if (errstr != NULL) snprintf(errstr, errstrLen, "numFields:%d not equal to num of bound cols:%d", numOfCols, boundInfo->numOfBound); + ret = TSDB_CODE_INVALID_PARA; + goto end; + } + if (tFields == NULL) { for (int j = 0; j < boundInfo->numOfBound; j++) { SSchema* pColSchema = &pSchema[j]; @@ -1006,7 +1011,13 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate for (int i = 0; i < numFields; i++) { for (int j = 0; j < boundInfo->numOfBound; j++) { SSchema* pColSchema = &pSchema[j]; - if (strcmp(pColSchema->name, tFields[i].name) == 0) { + char* fieldName = NULL; + if (raw) { + fieldName = ((SSchemaWrapper*)tFields)->pSchema[i].name; + } else { + fieldName = ((TAOS_FIELD*)tFields)[i].name; + } + if (strcmp(pColSchema->name, fieldName) == 0) { if (*fields != pColSchema->type && *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) { if (errstr != NULL) snprintf(errstr, errstrLen, diff --git a/utils/test/c/tmq_taosx_ci.c b/utils/test/c/tmq_taosx_ci.c index 3a79a3763c..d49c7e4ad4 100644 --- a/utils/test/c/tmq_taosx_ci.c +++ b/utils/test/c/tmq_taosx_ci.c @@ -79,6 +79,7 @@ static void msg_process(TAOS_RES* msg) { } else { taosFprintfFile(g_fp, result); taosFprintfFile(g_fp, "\n"); + taosFsyncFile(g_fp); } } } From a2fa41ccc77797da594464221768c43cabc51448 Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Wed, 9 Oct 2024 09:51:19 +0800 Subject: [PATCH 012/142] Update 01-arch.md --- docs/zh/26-tdinternal/01-arch.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/26-tdinternal/01-arch.md b/docs/zh/26-tdinternal/01-arch.md index 04e47797a8..8aa69e45d5 100644 --- a/docs/zh/26-tdinternal/01-arch.md +++ b/docs/zh/26-tdinternal/01-arch.md @@ -178,7 +178,7 @@ TDengine 集群可以容纳单个、多个甚至几千个数据节点。应用 TDengine 存储的数据包括采集的时序数据以及库、表相关的元数据、标签数据等,这些数据具体分为三部分: -- 时序数据:TDengine 的核心存储对象,存放于 vnode 里,由 data、head 和 last 三个文件组成,数据量大,查询量取决于应用场景。允许乱序写入,但暂时不支持删除操作,并且仅在 update 参数设置为 1 时允许更新操作。通过采用一个采集点一张表的模型,一个时间段的数据是连续存储,对单张表的写入是简单的追加操作,一次读,可以读到多条记录,这样保证对单个采集点的插入和查询操作,性能达到最优。 +- 时序数据:时序数据是 TDengine 的核心存储对象,它们被存储在 vnode 中。时序数据由 data、head、sma 和 stt 4 类文件组成,这些文件共同构成了时序数据的完整存储结构。由于时序数据的特点是数据量大且查询需求取决于具体应用场景,因此 TDengine 采用了“一个数据采集点一张表”的模型来优化存储和查询性能。在这种模型下,一个时间段内的数据是连续存储的,对单张表的写入是简单的追加操作,一次读取可以获取多条记录。这种设计确保了单个数据采集点的写入和查询操作都能达到最优性能。 - 数据表元数据:包含标签信息和 Table Schema 信息,存放于 vnode 里的 meta 文件,支持增删改查四个标准操作。数据量很大,有 N 张表,就有 N 条记录,因此采用 LRU 存储,支持标签数据的索引。TDengine 支持多核多线程并发查询。只要计算内存足够,元数据全内存存储,千万级别规模的标签数据过滤结果能毫秒级返回。在内存资源不足的情况下,仍然可以支持数千万张表的快速查询。 - 数据库元数据:存放于 mnode 里,包含系统节点、用户、DB、STable Schema 等信息,支持增删改查四个标准操作。这部分数据的量不大,可以全内存保存,而且由于客户端有缓存,查询量也不大。因此目前的设计虽是集中式存储管理,但不会构成性能瓶颈。 @@ -321,4 +321,4 @@ TDengine 采用了一种数据驱动的策略来实现缓存数据的持久化 此外,TDengine 还提供了数据分级存储的功能,允许用户将不同时间段的数据存储在不同存储设备的目录中,以此实现将“热”数据和“冷”数据分开存储。这样做可以充分利用各种存储资源,同时节约成本。例如,对于最新采集且需要频繁访问的数据,由于其读取性能要求较高,用户可以配置将这些数据存储在高性能的固态硬盘上。而对于超过一定期限、查询需求较低的数据,则可以将其存储在成本相对较低的机械硬盘上。 -为了进一步降低存储成本,TDengine 还支持将时序数据存储在对象存储系统中。通过其创新性的设计,在大多数情况下,从对象存储系统中查询时序数据的性能接近本地硬盘的一半,而在某些场景下,性能甚至可以与本地硬盘相媲美。同时,TDengine 还允许用户对存储在对象存储中的时序数据执行删除和更新操作。 \ No newline at end of file +为了进一步降低存储成本,TDengine 还支持将时序数据存储在对象存储系统中。通过其创新性的设计,在大多数情况下,从对象存储系统中查询时序数据的性能接近本地硬盘的一半,而在某些场景下,性能甚至可以与本地硬盘相媲美。同时,TDengine 还允许用户对存储在对象存储中的时序数据执行删除和更新操作。 From 0bcaf6f4e5324553b7beee173824a2c8ece4de03 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 9 Oct 2024 15:30:28 +0800 Subject: [PATCH 013/142] enh:[TS-5441] change poll flag to consumer for multi consumers --- source/client/src/clientTmq.c | 16 +++++++++------- source/dnode/mnode/impl/src/mndConsumer.c | 1 + source/dnode/vnode/src/tq/tqRead.c | 2 +- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 975d14f3ee..4cbb808187 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -134,6 +134,7 @@ struct tmq_t { // poll info int64_t pollCnt; int64_t totalRows; + int8_t pollFlag; // timer tmr_h hbLiveTimer; @@ -287,7 +288,6 @@ typedef struct { static TdThreadOnce tmqInit = PTHREAD_ONCE_INIT; // initialize only once volatile int32_t tmqInitRes = 0; // initialize rsp code static SMqMgmt tmqMgmt = {0}; -static int8_t pollFlag = 0; tmq_conf_t* tmq_conf_new() { tmq_conf_t* conf = taosMemoryCalloc(1, sizeof(tmq_conf_t)); @@ -977,7 +977,8 @@ void tmqSendHbReq(void* param, void* tmrId) { SMqHbReq req = {0}; req.consumerId = tmq->consumerId; req.epoch = tmq->epoch; - req.pollFlag = atomic_load_8(&pollFlag); + req.pollFlag = atomic_load_8(&tmq->pollFlag); + tqDebugC("consumer:0x%" PRIx64 " send hb, pollFlag:%d", tmq->consumerId, req.pollFlag); req.topics = taosArrayInit(taosArrayGetSize(tmq->clientTopics), sizeof(TopicOffsetRows)); if (req.topics == NULL) { goto END; @@ -1057,13 +1058,13 @@ void tmqSendHbReq(void* param, void* tmrId) { if (code != 0) { tqErrorC("tmqSendHbReq asyncSendMsgToServer failed"); } - (void)atomic_val_compare_exchange_8(&pollFlag, 1, 0); + (void)atomic_val_compare_exchange_8(&tmq->pollFlag, 1, 0); END: tDestroySMqHbReq(&req); if (tmrId != NULL) { bool ret = taosTmrReset(tmqSendHbReq, tmq->heartBeatIntervalMs, param, tmqMgmt.timer, &tmq->hbLiveTimer); - tqDebugC("reset timer fo tmq hb:%d", ret); + tqDebugC("consumer:0x%" PRIx64 " reset timer for tmq hb:%d, pollFlag:%d", tmq->consumerId, ret, tmq->pollFlag); } int32_t ret = taosReleaseRef(tmqMgmt.rsetId, refId); if (ret != 0){ @@ -1422,7 +1423,7 @@ void tmqHandleAllDelayedTask(tmq_t* pTmq) { tqDebugC("consumer:0x%" PRIx64 " retrieve ep from mnode in 1s", pTmq->consumerId); bool ret = taosTmrReset(tmqAssignAskEpTask, DEFAULT_ASKEP_INTERVAL, (void*)(pTmq->refId), tmqMgmt.timer, &pTmq->epTimer); - tqDebugC("reset timer fo tmq ask ep:%d", ret); + tqDebugC("reset timer for tmq ask ep:%d", ret); } else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) { tmq_commit_cb* pCallbackFn = (pTmq->commitCb != NULL) ? pTmq->commitCb : defaultCommitCbFn; asyncCommitAllOffsets(pTmq, pCallbackFn, pTmq->commitCbUserParam); @@ -1430,7 +1431,7 @@ void tmqHandleAllDelayedTask(tmq_t* pTmq) { pTmq->autoCommitInterval / 1000.0); bool ret = taosTmrReset(tmqAssignDelayedCommitTask, pTmq->autoCommitInterval, (void*)(pTmq->refId), tmqMgmt.timer, &pTmq->commitTimer); - tqDebugC("reset timer fo commit:%d", ret); + tqDebugC("reset timer for commit:%d", ret); } else { tqErrorC("consumer:0x%" PRIx64 " invalid task type:%d", pTmq->consumerId, *pTaskType); } @@ -1640,6 +1641,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { pTmq->status = TMQ_CONSUMER_STATUS__INIT; pTmq->pollCnt = 0; pTmq->epoch = 0; + pTmq->pollFlag = 0; // set conf tstrncpy(pTmq->clientId, conf->clientId, TSDB_CLIENT_ID_LEN); @@ -2441,7 +2443,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { return NULL; } - (void)atomic_val_compare_exchange_8(&pollFlag, 0, 1); + (void)atomic_val_compare_exchange_8(&tmq->pollFlag, 0, 1); while (1) { tmqHandleAllDelayedTask(tmq); diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index a96b8b22f5..a91d2379ae 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -239,6 +239,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { MND_TMQ_RETURN_CHECK(mndAcquireConsumer(pMnode, consumerId, &pConsumer)); MND_TMQ_RETURN_CHECK(checkPrivilege(pMnode, pConsumer, &rsp, pMsg->info.conn.user)); atomic_store_32(&pConsumer->hbStatus, 0); + mDebug("consumer:0x%" PRIx64 " receive hb pollFlag:%d %d", consumerId, req.pollFlag, pConsumer->pollStatus); if (req.pollFlag == 1){ atomic_store_32(&pConsumer->pollStatus, 0); } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 6dc5453d50..7ca749b5a3 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -953,7 +953,7 @@ END: } int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet) { - tqDebug("tq reader retrieve data block %p, %d", pReader->msg.msgStr, pReader->nextBlk); + tqTrace("tq reader retrieve data block %p, %d", pReader->msg.msgStr, pReader->nextBlk); SSDataBlock* block = NULL; SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk); From e6bf8dcfde498c8e73f3ce723d23230366e88c78 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 9 Oct 2024 18:10:43 +0800 Subject: [PATCH 014/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- include/common/tmsg.h | 23 +++++----- include/libs/executor/executor.h | 3 +- source/client/src/clientRawBlockWrite.c | 55 +++++++++++++++++++--- source/common/src/tmsg.c | 40 ++++++++++++++++ source/dnode/vnode/src/tq/tqScan.c | 61 ++++++++++++------------- source/dnode/vnode/src/tq/tqUtil.c | 10 +++- source/libs/executor/inc/querytask.h | 1 + source/libs/executor/src/executor.c | 6 +++ 8 files changed, 145 insertions(+), 54 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 1a10f02c96..fb118de907 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -4049,18 +4049,17 @@ void tDeleteMqMetaRsp(SMqMetaRsp* pRsp); #define MQ_DATA_RSP_VERSION 100 typedef struct { - struct { - SMqRspHead head; - STqOffsetVal rspOffset; - STqOffsetVal reqOffset; - int32_t blockNum; - int8_t withTbName; - int8_t withSchema; - SArray* blockDataLen; - SArray* blockData; - SArray* blockTbName; - SArray* blockSchema; - }; + SMqRspHead head; + STqOffsetVal rspOffset; + STqOffsetVal reqOffset; + int32_t blockNum; + int8_t withTbName; + int8_t withSchema; + SArray* blockDataLen; + SArray* blockData; + SArray* blockTbName; + SArray* blockSchema; + SArray* blockSuid; union{ struct{ diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index ae26d5f2ae..6b02d8f985 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -209,7 +209,8 @@ SMqBatchMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo); const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo); -const char* qExtractTbnameFromTask(qTaskInfo_t tinfo); +const char* qExtractTbnameFromTask(qTaskInfo_t tinfo); +const int64_t qExtractSuidFromTask(qTaskInfo_t tinfo); void* qExtractReaderFromStreamScanner(void* scanner); diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 064c3bdb2e..4b2e9e234d 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1856,6 +1856,42 @@ static threadlocal SHashObj* pCreateTbHash = NULL; static threadlocal SHashObj* pNameHash = NULL; static threadlocal SHashObj* pMetaHash = NULL; +static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrapper* pSW){ + char* p = (char*)rawData; + // | version | total length | total rows | blankFill | total columns | flag seg| block group id | column schema | each + // column length | + p += sizeof(int32_t); + p += sizeof(int32_t); + p += sizeof(int32_t); + p += sizeof(int32_t); + p += sizeof(int32_t); + p += sizeof(uint64_t); + int8_t* fields = p; + + if (pSW->nCols != pTableMeta->tableInfo.numOfColumns) { + return true; + } + for (int i = 0; i < pSW->nCols; i++) { + int j = 0; + for (; j < pTableMeta->tableInfo.numOfColumns; j++) { + SSchema* pColSchema = &pTableMeta->schema[j]; + char* fieldName = pSW->pSchema[i].name; + + if (strcmp(pColSchema->name, fieldName) == 0) { + if (*fields != pColSchema->type && *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) { + return true; + } + break; + } + } + fields += sizeof(int8_t) + sizeof(int32_t); + + if (j == pTableMeta->tableInfo.numOfColumns) + return true; + } + return false; +} + static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t dataLen) { if (taos == NULL || data == NULL) { SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data); @@ -1905,7 +1941,7 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da RAW_NULL_CHECK(pNameHash); } if (pMetaHash == NULL){ - pMetaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + pMetaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); RAW_NULL_CHECK(pMetaHash); taosHashSetFreeFp(pMetaHash, taosMemoryFree); } @@ -1931,6 +1967,9 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); RAW_NULL_CHECK(tbName); + int64_t* suid = taosArrayGet(rspObj.dataRsp.blockSuid, rspObj.resIter); + RAW_NULL_CHECK(suid); + uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName); SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; (void)strcpy(pName.dbname, pRequest->pDb); @@ -1960,14 +1999,19 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da vgId = vg->vgId; } + SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); + RAW_NULL_CHECK(pSW); + void* rawData = getRawDataFromRes(pRetrieve); + RAW_NULL_CHECK(rawData); + STableMeta* pTableMeta = NULL; - STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, tbName, strlen(tbName)); - if (pTableMetaTmp == NULL) { + STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, suid, POINTER_BYTES); + if (pTableMetaTmp == NULL || needRefreshMeta(rawData, *pTableMetaTmp, pSW)) { if (pCreateReqDst) { // change stable name to get meta (void)strcpy(pName.tname, pCreateReqDst->ctb.stbName); } RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); - code = taosHashPut(pMetaHash, tbName, strlen(tbName), &pTableMeta, POINTER_BYTES); + code = taosHashPut(pMetaHash, suid, POINTER_BYTES, &pTableMeta, POINTER_BYTES); if (code != 0){ taosMemoryFree(pTableMeta); goto end; @@ -1981,9 +2025,6 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da pTableMeta = *pTableMetaTmp; } - SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); - RAW_NULL_CHECK(pSW); - void* rawData = getRawDataFromRes(pRetrieve); char err[ERR_MSG_LEN] = {0}; code = rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); if (code != TSDB_CODE_SUCCESS) { diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 986747fe58..41516d325a 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -10716,12 +10716,42 @@ _exit: return code; } +int32_t tEncodeSuidArray(SEncoder *pEncoder, const SMqDataRsp *pRsp){ + for (int32_t i = 0; i < pRsp->blockNum; i++) { + if (pRsp->withTbName) { + int64_t* suid = taosArrayGet(pRsp->blockSuid, i); + if (suid != NULL){ + TAOS_CHECK_RETURN(tEncodeI64(pEncoder, *suid)); + } + } + } + return 0; +} int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { TAOS_CHECK_RETURN(tEncodeMqDataRspCommon(pEncoder, pRsp)); TAOS_CHECK_RETURN(tEncodeI64(pEncoder, pRsp->sleepTime)); + TAOS_CHECK_RETURN(tEncodeSuidArray(pEncoder, pRsp)); + return 0; } +int32_t tDecodeSuidArray(SDecoder *pDecoder, SMqDataRsp *pRsp){ + if (!tDecodeIsEnd(pDecoder)) { + if (pRsp->withTbName) { + if ((pRsp->blockSuid = taosArrayInit(pRsp->blockNum, sizeof(int64_t))) == NULL) { + TAOS_CHECK_RETURN(terrno); + } + } + for (int32_t i = 0; i < pRsp->blockNum; i++) { + int64_t suid = 0; + TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &suid)); + if (taosArrayPush(pRsp->blockSuid, &suid) == NULL) { + TAOS_CHECK_RETURN(terrno); + } + } + } + return 0; +} int32_t tDecodeMqDataRspCommon(SDecoder *pDecoder, SMqDataRsp *pRsp) { int32_t code = 0; int32_t lino; @@ -10798,6 +10828,9 @@ int32_t tDecodeMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) { if (!tDecodeIsEnd(pDecoder)) { TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &pRsp->sleepTime)); } + if (!tDecodeIsEnd(pDecoder)) { + TAOS_CHECK_RETURN(tDecodeSuidArray(pDecoder, pRsp)); + } return 0; } @@ -10811,6 +10844,8 @@ static void tDeleteMqDataRspCommon(SMqDataRsp *pRsp) { pRsp->blockSchema = NULL; taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree); pRsp->blockTbName = NULL; + taosArrayDestroy(pRsp->blockSuid); + pRsp->blockSuid = NULL; tOffsetDestroy(&pRsp->reqOffset); tOffsetDestroy(&pRsp->rspOffset); } @@ -10830,6 +10865,8 @@ int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, createTableReq, createTableLen)); } } + TAOS_CHECK_EXIT(tEncodeSuidArray(pEncoder, pRsp)); + _exit: return code; } @@ -10860,6 +10897,9 @@ int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) { } } } + if (!tDecodeIsEnd(pDecoder)) { + TAOS_CHECK_EXIT(tDecodeSuidArray(pDecoder, pRsp)); + } _exit: return code; diff --git a/source/dnode/vnode/src/tq/tqScan.c b/source/dnode/vnode/src/tq/tqScan.c index dbc1b16cf5..a95437ab0d 100644 --- a/source/dnode/vnode/src/tq/tqScan.c +++ b/source/dnode/vnode/src/tq/tqScan.c @@ -77,6 +77,14 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp, i tqError("failed to push tbName to blockTbName:%s", tbName); continue; } + int64_t suid = 0; + if(mr.me.type == TSDB_CHILD_TABLE){ + suid = mr.me.ctbEntry.suid; + } + if(taosArrayPush(pRsp->blockSuid, &suid) == NULL){ + tqError("failed to push suid to blockSuid:%"PRId64, suid); + continue; + } } metaReaderClear(&mr); return 0; @@ -210,36 +218,26 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat if (pDataBlock != NULL && pDataBlock->info.rows > 0) { if (pRsp->withTbName) { - if (pOffset->type == TMQ_OFFSET__LOG) { - int64_t uid = pExec->pTqReader->lastBlkUid; - if (tqAddTbNameToRsp(pTq, uid, pRsp, 1) < 0) { - tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId); - continue; - } - } else { - char* tbName = taosStrdup(qExtractTbnameFromTask(task)); - if (tbName == NULL) { - tqError("vgId:%d, failed to add tbname to rsp msg, null", pTq->pVnode->config.vgId); - return terrno; - } - if (taosArrayPush(pRsp->blockTbName, &tbName) == NULL){ - tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId); - continue; - } + char* tbName = taosStrdup(qExtractTbnameFromTask(task)); + if (tbName == NULL) { + tqError("vgId:%d, failed to add tbname to rsp msg, null", pTq->pVnode->config.vgId); + return terrno; + } + if (taosArrayPush(pRsp->blockTbName, &tbName) == NULL){ + tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId); + continue; + } + int64_t suid = qExtractSuidFromTask(task); + if (taosArrayPush(pRsp->blockSuid, &suid) == NULL){ + tqError("vgId:%d, failed to add suid to rsp msg", pTq->pVnode->config.vgId); + continue; } } if (pRsp->withSchema) { - if (pOffset->type == TMQ_OFFSET__LOG) { - if (tqAddBlockSchemaToRsp(pExec, pRsp) != 0){ - tqError("vgId:%d, failed to add schema to rsp msg", pTq->pVnode->config.vgId); - continue; - } - } else { - SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task)); - if(taosArrayPush(pRsp->blockSchema, &pSW) == NULL){ - tqError("vgId:%d, failed to add schema to rsp msg", pTq->pVnode->config.vgId); - continue; - } + SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task)); + if(taosArrayPush(pRsp->blockSchema, &pSW) == NULL){ + tqError("vgId:%d, failed to add schema to rsp msg", pTq->pVnode->config.vgId); + continue; } } @@ -249,12 +247,9 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat continue; } pRsp->blockNum++; - if (pOffset->type == TMQ_OFFSET__LOG) { - continue; - } else { - rowCnt += pDataBlock->info.rows; - if (rowCnt <= tmqRowSize) continue; - } + rowCnt += pDataBlock->info.rows; + if (rowCnt <= tmqRowSize) continue; + } // get meta diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index b4866b8c65..891e55d91d 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -50,8 +50,11 @@ static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t)); pRsp->blockTbName = taosArrayInit(0, sizeof(void*)); pRsp->blockSchema = taosArrayInit(0, sizeof(void*)); + pRsp->blockSuid = taosArrayInit(0, sizeof(int64_t)); - if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) { + if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || + pRsp->blockTbName == NULL || pRsp->blockSchema == NULL || + pRsp->blockSuid == NULL) { if (pRsp->blockData != NULL) { taosArrayDestroy(pRsp->blockData); pRsp->blockData = NULL; @@ -71,6 +74,11 @@ static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { taosArrayDestroy(pRsp->blockSchema); pRsp->blockSchema = NULL; } + + if (pRsp->blockSuid != NULL) { + taosArrayDestroy(pRsp->blockSuid); + pRsp->blockSuid = NULL; + } return terrno; } diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index e3bb9a1361..3c52f8080e 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -69,6 +69,7 @@ typedef struct { SVersionRange fillHistoryVer; STimeWindow fillHistoryWindow; SStreamState* pState; + int64_t suid; // for tmq } SStreamTaskInfo; struct SExecTaskInfo { diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index cbf392f67e..08b7ba0e05 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1212,6 +1212,11 @@ const char* qExtractTbnameFromTask(qTaskInfo_t tinfo) { return pTaskInfo->streamInfo.tbName; } +const int64_t qExtractSuidFromTask(qTaskInfo_t tinfo) { + SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; + return pTaskInfo->streamInfo.suid; +} + SMqBatchMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; return &pTaskInfo->streamInfo.btMetaRsp; @@ -1494,6 +1499,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond); tstrncpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName, TSDB_TABLE_NAME_LEN); + pTaskInfo->streamInfo.suid = mtInfo.suid; tDeleteSchemaWrapper(pTaskInfo->streamInfo.schema); pTaskInfo->streamInfo.schema = mtInfo.schema; From 5f9de6ac70b4848c088226ccf2448c598527bf10 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 9 Oct 2024 18:34:14 +0800 Subject: [PATCH 015/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientRawBlockWrite.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 4b2e9e234d..94162ff768 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1892,6 +1892,11 @@ static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrappe return false; } +static void tmqFreeMeta(void *data){ + STableMeta* pTableMeta = *(STableMeta**)data; + taosMemoryFree(pTableMeta); +} + static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t dataLen) { if (taos == NULL || data == NULL) { SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data); @@ -1943,7 +1948,7 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da if (pMetaHash == NULL){ pMetaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); RAW_NULL_CHECK(pMetaHash); - taosHashSetFreeFp(pMetaHash, taosMemoryFree); + taosHashSetFreeFp(pMetaHash, tmqFreeMeta); } struct SCatalog* pCatalog = NULL; RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog)); @@ -2005,13 +2010,13 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da RAW_NULL_CHECK(rawData); STableMeta* pTableMeta = NULL; - STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, suid, POINTER_BYTES); + STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, suid, LONG_BYTES); if (pTableMetaTmp == NULL || needRefreshMeta(rawData, *pTableMetaTmp, pSW)) { if (pCreateReqDst) { // change stable name to get meta (void)strcpy(pName.tname, pCreateReqDst->ctb.stbName); } RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); - code = taosHashPut(pMetaHash, suid, POINTER_BYTES, &pTableMeta, POINTER_BYTES); + code = taosHashPut(pMetaHash, suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); if (code != 0){ taosMemoryFree(pTableMeta); goto end; From 42b7520d34356355d8d45c645a2f730c43a2c8fc Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 10 Oct 2024 18:27:42 +0800 Subject: [PATCH 016/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- include/libs/executor/executor.h | 2 +- source/client/src/clientRawBlockWrite.c | 271 ++++++++++++++---------- source/client/src/clientTmq.c | 2 +- source/common/src/tmsg.c | 78 +++---- source/dnode/vnode/src/tq/tqScan.c | 28 +-- source/dnode/vnode/src/tq/tqUtil.c | 13 +- source/libs/executor/src/executor.c | 10 +- source/libs/parser/src/parInsertUtil.c | 15 +- tests/system-test/7-tmq/tmq_taosx.py | 6 +- utils/test/c/tmq_taosx_ci.c | 45 +++- 10 files changed, 281 insertions(+), 189 deletions(-) diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index 6b02d8f985..8501d88be0 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -210,7 +210,7 @@ SMqBatchMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo); const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo); const char* qExtractTbnameFromTask(qTaskInfo_t tinfo); -const int64_t qExtractSuidFromTask(qTaskInfo_t tinfo); +//const int64_t qExtractSuidFromTask(qTaskInfo_t tinfo); void* qExtractReaderFromStreamScanner(void* scanner); diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 94162ff768..578f8148da 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -53,9 +53,7 @@ #define TMQ_META_VERSION "1.0" static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen); - static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); } - static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t, SColCmprWrapper* pColCmprRow, cJSON** pJson) { int32_t code = TSDB_CODE_SUCCESS; @@ -1856,6 +1854,12 @@ static threadlocal SHashObj* pCreateTbHash = NULL; static threadlocal SHashObj* pNameHash = NULL; static threadlocal SHashObj* pMetaHash = NULL; +typedef struct{ + SVgroupInfo vgInfo; + int64_t uid; + int64_t suid; +}tbInfo; + static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrapper* pSW){ char* p = (char*)rawData; // | version | total length | total rows | blankFill | total columns | flag seg| block group id | column schema | each @@ -1878,7 +1882,7 @@ static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrappe char* fieldName = pSW->pSchema[i].name; if (strcmp(pColSchema->name, fieldName) == 0) { - if (*fields != pColSchema->type && *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) { + if (*fields != pColSchema->type || *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) { return true; } break; @@ -1892,11 +1896,6 @@ static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrappe return false; } -static void tmqFreeMeta(void *data){ - STableMeta* pTableMeta = *(STableMeta**)data; - taosMemoryFree(pTableMeta); -} - static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t dataLen) { if (taos == NULL || data == NULL) { SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data); @@ -1933,23 +1932,6 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da goto end; } - if (pVgHash == NULL){ - pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); - RAW_NULL_CHECK(pVgHash); - } - if (pCreateTbHash == NULL){ - pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - RAW_NULL_CHECK(pCreateTbHash); - } - if (pNameHash == NULL){ - pNameHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - RAW_NULL_CHECK(pNameHash); - } - if (pMetaHash == NULL){ - pMetaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); - RAW_NULL_CHECK(pMetaHash); - taosHashSetFreeFp(pMetaHash, tmqFreeMeta); - } struct SCatalog* pCatalog = NULL; RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog)); @@ -1959,90 +1941,119 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da conn.requestObjRefId = pRequest->self; conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); - RAW_RETURN_CHECK(smlInitHandle(&pQuery)); + int retry = 0; + while(1){ + RAW_RETURN_CHECK(smlInitHandle(&pQuery)); - uDebug(LOG_ID_TAG " write raw data type:%d block num:%d", LOG_ID_VALUE, type, rspObj.dataRsp.blockNum); - while (++rspObj.resIter < rspObj.dataRsp.blockNum) { - void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); - RAW_NULL_CHECK(pRetrieve); - if (!rspObj.dataRsp.withSchema) { - goto end; - } - - const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); - RAW_NULL_CHECK(tbName); - - int64_t* suid = taosArrayGet(rspObj.dataRsp.blockSuid, rspObj.resIter); - RAW_NULL_CHECK(suid); - - uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName); - SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; - (void)strcpy(pName.dbname, pRequest->pDb); - (void)strcpy(pName.tname, tbName); - - // find schema data info - SVCreateTbReq* pCreateReqDst = NULL; - if (type == RES_TYPE__TMQ_METADATA){ - pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); - if (pCreateReqDst == NULL) { - RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); - pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); - } - } - - int32_t vgId = 0; - SVgroupInfo* vg = (SVgroupInfo*)taosHashGet(pNameHash, tbName, strlen(tbName)); - if (vg == NULL) { - SVgroupInfo vgTmp = {0}; - RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vgTmp)); - RAW_RETURN_CHECK(taosHashPut(pNameHash, tbName, strlen(tbName), &vgTmp, sizeof(SVgroupInfo))); - code = taosHashPut(pVgHash, &vgTmp.vgId, sizeof(vgTmp.vgId), &vgTmp, sizeof(SVgroupInfo)); - code = (code == TSDB_CODE_DUP_KEY) ? 0 : code; - RAW_RETURN_CHECK(code); - vgId = vgTmp.vgId; - } else { - vgId = vg->vgId; - } - - SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); - RAW_NULL_CHECK(pSW); - void* rawData = getRawDataFromRes(pRetrieve); - RAW_NULL_CHECK(rawData); - - STableMeta* pTableMeta = NULL; - STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, suid, LONG_BYTES); - if (pTableMetaTmp == NULL || needRefreshMeta(rawData, *pTableMetaTmp, pSW)) { - if (pCreateReqDst) { // change stable name to get meta - (void)strcpy(pName.tname, pCreateReqDst->ctb.stbName); - } - RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); - code = taosHashPut(pMetaHash, suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); - if (code != 0){ - taosMemoryFree(pTableMeta); + uDebug(LOG_ID_TAG " write raw data type:%d block num:%d", LOG_ID_VALUE, type, rspObj.dataRsp.blockNum); + while (++rspObj.resIter < rspObj.dataRsp.blockNum) { + void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); + RAW_NULL_CHECK(pRetrieve); + if (!rspObj.dataRsp.withSchema) { goto end; } - if (pCreateReqDst) { - pTableMeta->vgId = vgId; - pTableMeta->uid = pCreateReqDst->uid; - pCreateReqDst->ctb.suid = pTableMeta->suid; + + const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); + RAW_NULL_CHECK(tbName); + +// int64_t* suid = taosArrayGet(rspObj.dataRsp.blockSuid, rspObj.resIter); +// RAW_NULL_CHECK(suid); + + uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName); + SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; + (void)strcpy(pName.dbname, pRequest->pDb); + (void)strcpy(pName.tname, tbName); + + // find schema data info + SVCreateTbReq* pCreateReqDst = NULL; + if (type == RES_TYPE__TMQ_METADATA){ + pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); + if (pCreateReqDst == NULL) { + RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); + pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); + } } - }else{ - pTableMeta = *pTableMetaTmp; - } + STableMeta* pTableMeta = NULL; + tbInfo* tmpInfo = (tbInfo*)taosHashGet(pNameHash, tbName, strlen(tbName)); + if (tmpInfo == NULL || retry > 0) { + tbInfo info = {0}; - char err[ERR_MSG_LEN] = {0}; - code = rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); - if (code != TSDB_CODE_SUCCESS) { - SET_ERROR_MSG("table:%s, err:%s", tbName, err); - goto end; + RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &info.vgInfo)); + if (pCreateReqDst) { // change stable name to get meta + (void)strcpy(pName.tname, pCreateReqDst->ctb.stbName); + } + RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); + info.uid = pTableMeta->uid; + if (pTableMeta->tableType == TSDB_CHILD_TABLE){ + info.suid = pTableMeta->suid; + } else { + info.suid = pTableMeta->uid; + } + code = taosHashPut(pMetaHash, &info.suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); + if (code != 0){ + taosMemoryFree(pTableMeta); + goto end; + } + if (pCreateReqDst) { + pTableMeta->vgId = info.vgInfo.vgId; + pTableMeta->uid = pCreateReqDst->uid; + pCreateReqDst->ctb.suid = pTableMeta->suid; + } + + code = taosHashPut(pNameHash, pName.tname, strlen(pName.tname), &info, sizeof(tbInfo)); + code = (code == TSDB_CODE_DUP_KEY) ? 0 : code; + RAW_RETURN_CHECK(code); + code = taosHashPut(pVgHash, &info.vgInfo.vgId, sizeof(info.vgInfo.vgId), &info.vgInfo, sizeof(SVgroupInfo)); + code = (code == TSDB_CODE_DUP_KEY) ? 0 : code; + RAW_RETURN_CHECK(code); + } + + SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); + RAW_NULL_CHECK(pSW); + void* rawData = getRawDataFromRes(pRetrieve); + RAW_NULL_CHECK(rawData); + + if (pTableMeta == NULL || retry > 0){ + STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, &tmpInfo->suid, LONG_BYTES); + if (pTableMetaTmp == NULL || retry > 0 || needRefreshMeta(rawData, *pTableMetaTmp, pSW)) { + RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); + code = taosHashPut(pMetaHash, &tmpInfo->suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); + if (code != 0){ + taosMemoryFree(pTableMeta); + goto end; + } + + }else{ + pTableMeta = *pTableMetaTmp; + pTableMeta->uid = tmpInfo->uid; + pTableMeta->vgId = tmpInfo->vgInfo.vgId; + } + } + + char err[ERR_MSG_LEN] = {0}; + code = rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); + if (code != TSDB_CODE_SUCCESS) { + SET_ERROR_MSG("table:%s, err:%s", tbName, err); + goto end; + } } + RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); + launchQueryImpl(pRequest, pQuery, true, NULL); + code = pRequest->code; + + if (NEED_CLIENT_HANDLE_ERROR(code)) { + uInfo("write raw retry:%d/3 end code:%d, msg:%s", retry, code, tstrerror(code)); + if (retry++ >= 3) { + break; + } + qDestroyQuery(pQuery); + pQuery = NULL; + rspObj.resIter = -1; + continue; + } + break; } - RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); - - launchQueryImpl(pRequest, pQuery, true, NULL); - code = pRequest->code; - end: uDebug(LOG_ID_TAG " write raw metadata return, msg:%s", LOG_ID_VALUE, tstrerror(code)); if (type == RES_TYPE__TMQ_METADATA){ @@ -2056,18 +2067,6 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da return code; } -void tmqClean() { - taosHashCleanup(pMetaHash); - taosHashCleanup(pNameHash); - void* pIter = taosHashIterate(pCreateTbHash, NULL); - while (pIter) { - tDestroySVCreateTbReq(pIter, TSDB_MSG_FLG_DECODE); - pIter = taosHashIterate(pCreateTbHash, pIter); - } - taosHashCleanup(pCreateTbHash); - taosHashCleanup(pVgHash); -} - static void processSimpleMeta(SMqMetaRsp* pMetaRsp, cJSON** meta) { if (pMetaRsp->resMsgType == TDMT_VND_CREATE_STB) { processCreateStb(pMetaRsp, meta); @@ -2263,7 +2262,53 @@ void tmq_free_raw(tmq_raw_data raw) { (void)memset(terrMsg, 0, ERR_MSG_LEN); } +static void tmqFreeMeta(void *data){ + STableMeta* pTableMeta = *(STableMeta**)data; + taosMemoryFree(pTableMeta); +} + +void freeHash() { + taosHashCleanup(pMetaHash); + taosHashCleanup(pNameHash); + void* pIter = taosHashIterate(pCreateTbHash, NULL); + while (pIter) { + tDestroySVCreateTbReq(pIter, TSDB_MSG_FLG_DECODE); + pIter = taosHashIterate(pCreateTbHash, pIter); + } + taosHashCleanup(pCreateTbHash); + taosHashCleanup(pVgHash); +} + +static int32_t initHash(){ + int32_t code = 0; + if (pVgHash == NULL){ + pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + RAW_NULL_CHECK(pVgHash); + } + if (pCreateTbHash == NULL){ + pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + RAW_NULL_CHECK(pCreateTbHash); + } + if (pNameHash == NULL){ + pNameHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + RAW_NULL_CHECK(pNameHash); + } + if (pMetaHash == NULL){ + pMetaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + RAW_NULL_CHECK(pMetaHash); + taosHashSetFreeFp(pMetaHash, tmqFreeMeta); + } + return code; +end: + freeHash(); + return code; +} + static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) { + int32_t code = initHash(); + if (code != 0) { + return code; + } if (type == TDMT_VND_CREATE_STB) { return taosCreateStb(taos, buf, len); } else if (type == TDMT_VND_ALTER_STB) { diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 4cbb808187..6d0323e20a 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1064,7 +1064,7 @@ END: tDestroySMqHbReq(&req); if (tmrId != NULL) { bool ret = taosTmrReset(tmqSendHbReq, tmq->heartBeatIntervalMs, param, tmqMgmt.timer, &tmq->hbLiveTimer); - tqDebugC("consumer:0x%" PRIx64 " reset timer for tmq hb:%d, pollFlag:%d", tmq->consumerId, ret, tmq->pollFlag); + tqDebugC("consumer:0x%" PRIx64 " reset timer for tmq heartbeat:%d, pollFlag:%d", tmq->consumerId, ret, tmq->pollFlag); } int32_t ret = taosReleaseRef(tmqMgmt.rsetId, refId); if (ret != 0){ diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 41516d325a..e585212372 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -10716,42 +10716,42 @@ _exit: return code; } -int32_t tEncodeSuidArray(SEncoder *pEncoder, const SMqDataRsp *pRsp){ - for (int32_t i = 0; i < pRsp->blockNum; i++) { - if (pRsp->withTbName) { - int64_t* suid = taosArrayGet(pRsp->blockSuid, i); - if (suid != NULL){ - TAOS_CHECK_RETURN(tEncodeI64(pEncoder, *suid)); - } - } - } - return 0; -} +//int32_t tEncodeSuidArray(SEncoder *pEncoder, const SMqDataRsp *pRsp){ +// for (int32_t i = 0; i < pRsp->blockNum; i++) { +// if (pRsp->withTbName) { +// int64_t* suid = taosArrayGet(pRsp->blockSuid, i); +// if (suid != NULL){ +// TAOS_CHECK_RETURN(tEncodeI64(pEncoder, *suid)); +// } +// } +// } +// return 0; +//} int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { TAOS_CHECK_RETURN(tEncodeMqDataRspCommon(pEncoder, pRsp)); TAOS_CHECK_RETURN(tEncodeI64(pEncoder, pRsp->sleepTime)); - TAOS_CHECK_RETURN(tEncodeSuidArray(pEncoder, pRsp)); +// TAOS_CHECK_RETURN(tEncodeSuidArray(pEncoder, pRsp)); return 0; } -int32_t tDecodeSuidArray(SDecoder *pDecoder, SMqDataRsp *pRsp){ - if (!tDecodeIsEnd(pDecoder)) { - if (pRsp->withTbName) { - if ((pRsp->blockSuid = taosArrayInit(pRsp->blockNum, sizeof(int64_t))) == NULL) { - TAOS_CHECK_RETURN(terrno); - } - } - - for (int32_t i = 0; i < pRsp->blockNum; i++) { - int64_t suid = 0; - TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &suid)); - if (taosArrayPush(pRsp->blockSuid, &suid) == NULL) { - TAOS_CHECK_RETURN(terrno); - } - } - } - return 0; -} +//int32_t tDecodeSuidArray(SDecoder *pDecoder, SMqDataRsp *pRsp){ +// if (!tDecodeIsEnd(pDecoder)) { +// if (pRsp->withTbName) { +// if ((pRsp->blockSuid = taosArrayInit(pRsp->blockNum, sizeof(int64_t))) == NULL) { +// TAOS_CHECK_RETURN(terrno); +// } +// } +// +// for (int32_t i = 0; i < pRsp->blockNum; i++) { +// int64_t suid = 0; +// TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &suid)); +// if (taosArrayPush(pRsp->blockSuid, &suid) == NULL) { +// TAOS_CHECK_RETURN(terrno); +// } +// } +// } +// return 0; +//} int32_t tDecodeMqDataRspCommon(SDecoder *pDecoder, SMqDataRsp *pRsp) { int32_t code = 0; int32_t lino; @@ -10828,9 +10828,9 @@ int32_t tDecodeMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) { if (!tDecodeIsEnd(pDecoder)) { TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &pRsp->sleepTime)); } - if (!tDecodeIsEnd(pDecoder)) { - TAOS_CHECK_RETURN(tDecodeSuidArray(pDecoder, pRsp)); - } +// if (!tDecodeIsEnd(pDecoder)) { +// TAOS_CHECK_RETURN(tDecodeSuidArray(pDecoder, pRsp)); +// } return 0; } @@ -10844,8 +10844,8 @@ static void tDeleteMqDataRspCommon(SMqDataRsp *pRsp) { pRsp->blockSchema = NULL; taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree); pRsp->blockTbName = NULL; - taosArrayDestroy(pRsp->blockSuid); - pRsp->blockSuid = NULL; +// taosArrayDestroy(pRsp->blockSuid); +// pRsp->blockSuid = NULL; tOffsetDestroy(&pRsp->reqOffset); tOffsetDestroy(&pRsp->rspOffset); } @@ -10865,7 +10865,7 @@ int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, createTableReq, createTableLen)); } } - TAOS_CHECK_EXIT(tEncodeSuidArray(pEncoder, pRsp)); +// TAOS_CHECK_EXIT(tEncodeSuidArray(pEncoder, pRsp)); _exit: return code; @@ -10897,9 +10897,9 @@ int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) { } } } - if (!tDecodeIsEnd(pDecoder)) { - TAOS_CHECK_EXIT(tDecodeSuidArray(pDecoder, pRsp)); - } +// if (!tDecodeIsEnd(pDecoder)) { +// TAOS_CHECK_EXIT(tDecodeSuidArray(pDecoder, pRsp)); +// } _exit: return code; diff --git a/source/dnode/vnode/src/tq/tqScan.c b/source/dnode/vnode/src/tq/tqScan.c index a95437ab0d..68702754aa 100644 --- a/source/dnode/vnode/src/tq/tqScan.c +++ b/source/dnode/vnode/src/tq/tqScan.c @@ -77,14 +77,16 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp, i tqError("failed to push tbName to blockTbName:%s", tbName); continue; } - int64_t suid = 0; - if(mr.me.type == TSDB_CHILD_TABLE){ - suid = mr.me.ctbEntry.suid; - } - if(taosArrayPush(pRsp->blockSuid, &suid) == NULL){ - tqError("failed to push suid to blockSuid:%"PRId64, suid); - continue; - } +// int64_t suid = 0; +// if(mr.me.type == TSDB_CHILD_TABLE){ +// suid = mr.me.ctbEntry.suid; +// }else{ +// suid = mr.me.uid; +// } +// if(taosArrayPush(pRsp->blockSuid, &suid) == NULL){ +// tqError("failed to push suid to blockSuid:%"PRId64, suid); +// continue; +// } } metaReaderClear(&mr); return 0; @@ -227,11 +229,11 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId); continue; } - int64_t suid = qExtractSuidFromTask(task); - if (taosArrayPush(pRsp->blockSuid, &suid) == NULL){ - tqError("vgId:%d, failed to add suid to rsp msg", pTq->pVnode->config.vgId); - continue; - } +// int64_t suid = qExtractSuidFromTask(task); +// if (taosArrayPush(pRsp->blockSuid, &suid) == NULL){ +// tqError("vgId:%d, failed to add suid to rsp msg", pTq->pVnode->config.vgId); +// continue; +// } } if (pRsp->withSchema) { SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task)); diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index 891e55d91d..6acfe6b074 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -50,11 +50,10 @@ static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t)); pRsp->blockTbName = taosArrayInit(0, sizeof(void*)); pRsp->blockSchema = taosArrayInit(0, sizeof(void*)); - pRsp->blockSuid = taosArrayInit(0, sizeof(int64_t)); +// pRsp->blockSuid = taosArrayInit(0, sizeof(int64_t)); if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || - pRsp->blockTbName == NULL || pRsp->blockSchema == NULL || - pRsp->blockSuid == NULL) { + pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) { if (pRsp->blockData != NULL) { taosArrayDestroy(pRsp->blockData); pRsp->blockData = NULL; @@ -75,10 +74,10 @@ static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { pRsp->blockSchema = NULL; } - if (pRsp->blockSuid != NULL) { - taosArrayDestroy(pRsp->blockSuid); - pRsp->blockSuid = NULL; - } +// if (pRsp->blockSuid != NULL) { +// taosArrayDestroy(pRsp->blockSuid); +// pRsp->blockSuid = NULL; +// } return terrno; } diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 08b7ba0e05..b5af516f75 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1212,10 +1212,10 @@ const char* qExtractTbnameFromTask(qTaskInfo_t tinfo) { return pTaskInfo->streamInfo.tbName; } -const int64_t qExtractSuidFromTask(qTaskInfo_t tinfo) { - SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; - return pTaskInfo->streamInfo.suid; -} +//const int64_t qExtractSuidFromTask(qTaskInfo_t tinfo) { +// SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; +// return pTaskInfo->streamInfo.suid; +//} SMqBatchMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; @@ -1499,7 +1499,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond); tstrncpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName, TSDB_TABLE_NAME_LEN); - pTaskInfo->streamInfo.suid = mtInfo.suid; + pTaskInfo->streamInfo.suid = mtInfo.suid == 0 ? mtInfo.uid : mtInfo.suid; tDeleteSchemaWrapper(pTaskInfo->streamInfo.schema); pTaskInfo->streamInfo.schema = mtInfo.schema; diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c index f29ed79412..8adf32d2dd 100644 --- a/source/libs/parser/src/parInsertUtil.c +++ b/source/libs/parser/src/parInsertUtil.c @@ -963,11 +963,11 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate ret = TSDB_CODE_INVALID_PARA; goto end; } - if (tFields != NULL && numFields > boundInfo->numOfBound) { - if (errstr != NULL) snprintf(errstr, errstrLen, "numFields:%d bigger than num of bound cols:%d", numFields, boundInfo->numOfBound); - ret = TSDB_CODE_INVALID_PARA; - goto end; - } +// if (tFields != NULL && numFields > boundInfo->numOfBound) { +// if (errstr != NULL) snprintf(errstr, errstrLen, "numFields:%d bigger than num of bound cols:%d", numFields, boundInfo->numOfBound); +// ret = TSDB_CODE_INVALID_PARA; +// goto end; +// } if (tFields == NULL && numOfCols != boundInfo->numOfBound) { if (errstr != NULL) snprintf(errstr, errstrLen, "numFields:%d not equal to num of bound cols:%d", numOfCols, boundInfo->numOfBound); ret = TSDB_CODE_INVALID_PARA; @@ -1037,6 +1037,11 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate pStart += numOfRows * sizeof(int32_t); } else { pStart += BitmapLen(numOfRows); +// for(int k = 0; k < numOfRows; k++) { +// if(!colDataIsNull_f(offset, k) && pColSchema->type == TSDB_DATA_TYPE_INT){ +// printf("colName:%s,val:%d", fieldName, *(int32_t*)(pStart + k * sizeof(int32_t))); +// } +// } } char* pData = pStart; diff --git a/tests/system-test/7-tmq/tmq_taosx.py b/tests/system-test/7-tmq/tmq_taosx.py index 4e90aefe7c..5047ada1d1 100644 --- a/tests/system-test/7-tmq/tmq_taosx.py +++ b/tests/system-test/7-tmq/tmq_taosx.py @@ -131,14 +131,14 @@ class TDTestCase: tdSql.checkData(0, 2, 1) tdSql.query("select * from ct3 order by c1 desc") - tdSql.checkRows(2) + tdSql.checkRows(5) tdSql.checkData(0, 1, 51) tdSql.checkData(0, 4, 940) tdSql.checkData(1, 1, 23) tdSql.checkData(1, 4, None) tdSql.query("select * from st1 order by ts") - tdSql.checkRows(8) + tdSql.checkRows(14) tdSql.checkData(0, 1, 1) tdSql.checkData(1, 1, 3) tdSql.checkData(4, 1, 4) @@ -180,7 +180,7 @@ class TDTestCase: tdSql.checkData(6, 8, None) tdSql.query("select * from ct1") - tdSql.checkRows(4) + tdSql.checkRows(7) tdSql.query("select * from ct2") tdSql.checkRows(0) diff --git a/utils/test/c/tmq_taosx_ci.c b/utils/test/c/tmq_taosx_ci.c index d49c7e4ad4..cd70dd88f5 100644 --- a/utils/test/c/tmq_taosx_ci.c +++ b/utils/test/c/tmq_taosx_ci.c @@ -133,7 +133,7 @@ int buildDatabase(TAOS* pConn, TAOS_RES* pRes) { pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)"); if (taos_errno(pRes) != 0) { - printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes)); + printf("failed to create child table ct0, reason:%s\n", taos_errstr(pRes)); return -1; } taos_free_result(pRes); @@ -176,7 +176,7 @@ int buildDatabase(TAOS* pConn, TAOS_RES* pRes) { pRes = taos_query( pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, " - "'ddd') ct0 values(1626006833603, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')"); + "'ddd') ct0 values(1626006833603, 4, 3, 'hwj') ct1 values(1626006833703, 23, 32, 's21ds')"); if (taos_errno(pRes) != 0) { printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes)); return -1; @@ -190,6 +190,41 @@ int buildDatabase(TAOS* pConn, TAOS_RES* pRes) { } taos_free_result(pRes); + pRes = taos_query(pConn, "insert into ct1 values(1736006813600, -32222, 43, 'ewb', 99)"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "alter table st1 drop column c4"); + if (taos_errno(pRes) != 0) { + printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into ct1 values(1736006833600, -4223, 344, 'bfs')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "alter table st1 add column c4 bigint"); + if (taos_errno(pRes) != 0) { + printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into ct1 values(1766006833600, -4432, 4433, 'e23wb', 9349)"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)"); if (taos_errno(pRes) != 0) { printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); @@ -597,6 +632,7 @@ tmq_t* build_consumer() { tmq_conf_set(conf, "enable.auto.commit", "true"); tmq_conf_set(conf, "auto.offset.reset", "earliest"); tmq_conf_set(conf, "msg.consume.excluded", "1"); +// tmq_conf_set(conf, "session.timeout.ms", "1000000"); // tmq_conf_set(conf, "max.poll.interval.ms", "20000"); if (g_conf.snapShot) { @@ -637,6 +673,7 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) { TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 5000); if (tmqmessage) { cnt++; + printf("cnt:%d\n", cnt); msg_process(tmqmessage); taos_free_result(tmqmessage); } else { @@ -845,6 +882,8 @@ void initLogFile() { "{\"name\":\"t1\",\"type\":4,\"value\":3000}],\"createList\":[]}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\"," "\"colType\":5}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":6,\"colName\":\"c4\"}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":7,\"colName\":\"c3\"," "\"colType\":8,\"colLength\":64}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":1,\"colName\":\"t2\"," @@ -992,6 +1031,8 @@ void initLogFile() { "{\"name\":\"t1\",\"type\":4,\"value\":3000}],\"createList\":[]}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\"," "\"colType\":5}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":6,\"colName\":\"c4\"}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":7,\"colName\":\"c3\"," "\"colType\":8,\"colLength\":64}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":1,\"colName\":\"t2\"," From 52f003e47408bb6aacdea7cfe0969ad7368a8b8e Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 11 Oct 2024 15:50:28 +0800 Subject: [PATCH 017/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientRawBlockWrite.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 358df25481..94e0b9bec0 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1821,6 +1821,7 @@ static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { int32_t code = 0; SVCreateTbReq pCreateReq = {0}; SDecoder decoderTmp = {0}; + SVCreateTbReq *pCreateReqTmp = NULL; for (int j = 0; j < rsp->createTableNum; j++) { void** dataTmp = taosArrayGet(rsp->createTableReq, j); @@ -1836,8 +1837,9 @@ static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { goto end; } if (taosHashGet(pHashObj, pCreateReq.name, strlen(pCreateReq.name)) == NULL) { - RAW_RETURN_CHECK( - taosHashPut(pHashObj, pCreateReq.name, strlen(pCreateReq.name), &pCreateReq, sizeof(SVCreateTbReq))); + RAW_RETURN_CHECK(cloneSVreateTbReq(&pCreateReq, &pCreateReqTmp)); + RAW_RETURN_CHECK(taosHashPut(pHashObj, pCreateReq.name, strlen(pCreateReq.name), pCreateReqTmp, sizeof(SVCreateTbReq))); + pCreateReqTmp = NULL; } else { tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); pCreateReq = (SVCreateTbReq){0}; @@ -1850,6 +1852,7 @@ static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { end: tDecoderClear(&decoderTmp); tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); + tDestroySVCreateTbReq(pCreateReqTmp, TSDB_MSG_FLG_DECODE); return code; } @@ -1944,6 +1947,7 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da conn.requestId = pRequest->requestId; conn.requestObjRefId = pRequest->self; conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); + RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); int retry = 0; while(1){ @@ -1971,10 +1975,6 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da SVCreateTbReq* pCreateReqDst = NULL; if (type == RES_TYPE__TMQ_METADATA){ pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); - if (pCreateReqDst == NULL) { - RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); - pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); - } } STableMeta* pTableMeta = NULL; tbInfo* tmpInfo = (tbInfo*)taosHashGet(pNameHash, tbName, strlen(tbName)); From ddc3676752b46b156dccbb68bfd77c4aeb555fed Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 11 Oct 2024 16:26:48 +0800 Subject: [PATCH 018/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientRawBlockWrite.c | 38 +++++++++++++------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 94e0b9bec0..800973196a 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1821,7 +1821,6 @@ static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { int32_t code = 0; SVCreateTbReq pCreateReq = {0}; SDecoder decoderTmp = {0}; - SVCreateTbReq *pCreateReqTmp = NULL; for (int j = 0; j < rsp->createTableNum; j++) { void** dataTmp = taosArrayGet(rsp->createTableReq, j); @@ -1837,9 +1836,7 @@ static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { goto end; } if (taosHashGet(pHashObj, pCreateReq.name, strlen(pCreateReq.name)) == NULL) { - RAW_RETURN_CHECK(cloneSVreateTbReq(&pCreateReq, &pCreateReqTmp)); - RAW_RETURN_CHECK(taosHashPut(pHashObj, pCreateReq.name, strlen(pCreateReq.name), pCreateReqTmp, sizeof(SVCreateTbReq))); - pCreateReqTmp = NULL; + RAW_RETURN_CHECK(taosHashPut(pHashObj, pCreateReq.name, strlen(pCreateReq.name), &pCreateReq, sizeof(SVCreateTbReq))); } else { tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); pCreateReq = (SVCreateTbReq){0}; @@ -1852,12 +1849,11 @@ static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { end: tDecoderClear(&decoderTmp); tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); - tDestroySVCreateTbReq(pCreateReqTmp, TSDB_MSG_FLG_DECODE); return code; } static threadlocal SHashObj* pVgHash = NULL; -static threadlocal SHashObj* pCreateTbHash = NULL; +//static threadlocal SHashObj* pCreateTbHash = NULL; static threadlocal SHashObj* pNameHash = NULL; static threadlocal SHashObj* pMetaHash = NULL; @@ -1912,7 +1908,8 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da SQuery* pQuery = NULL; SMqRspObj rspObj = {0}; SDecoder decoder = {0}; - SRequestObj* pRequest = NULL; + SHashObj* pCreateTbHash = NULL; + SRequestObj* pRequest = NULL; RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest)); uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); @@ -1947,7 +1944,12 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da conn.requestId = pRequest->requestId; conn.requestObjRefId = pRequest->self; conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); - RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); + + if (type == RES_TYPE__TMQ_METADATA) { + pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + RAW_NULL_CHECK(pCreateTbHash); + RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); + } int retry = 0; while(1){ @@ -2064,6 +2066,12 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da }else { tDeleteMqDataRsp(&rspObj.dataRsp); } + void* pIter = taosHashIterate(pCreateTbHash, NULL); + while (pIter) { + tDestroySVCreateTbReq(pIter, TSDB_MSG_FLG_DECODE); + pIter = taosHashIterate(pCreateTbHash, pIter); + } + taosHashCleanup(pCreateTbHash); tDecoderClear(&decoder); qDestroyQuery(pQuery); destroyRequest(pRequest); @@ -2273,12 +2281,6 @@ static void tmqFreeMeta(void *data){ void freeHash() { taosHashCleanup(pMetaHash); taosHashCleanup(pNameHash); - void* pIter = taosHashIterate(pCreateTbHash, NULL); - while (pIter) { - tDestroySVCreateTbReq(pIter, TSDB_MSG_FLG_DECODE); - pIter = taosHashIterate(pCreateTbHash, pIter); - } - taosHashCleanup(pCreateTbHash); taosHashCleanup(pVgHash); } @@ -2288,10 +2290,10 @@ static int32_t initHash(){ pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); RAW_NULL_CHECK(pVgHash); } - if (pCreateTbHash == NULL){ - pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - RAW_NULL_CHECK(pCreateTbHash); - } +// if (pCreateTbHash == NULL){ +// pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); +// RAW_NULL_CHECK(pCreateTbHash); +// } if (pNameHash == NULL){ pNameHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); RAW_NULL_CHECK(pNameHash); From 471871bff53126f30af89d1ce5a8ee3eecee86bd Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 11 Oct 2024 17:02:22 +0800 Subject: [PATCH 019/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientRawBlockWrite.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 800973196a..d3c2b53fb6 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1984,7 +1984,7 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da tbInfo info = {0}; RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &info.vgInfo)); - if (pCreateReqDst) { // change stable name to get meta + if (pCreateReqDst && tmpInfo == NULL) { // change stable name to get meta (void)strcpy(pName.tname, pCreateReqDst->ctb.stbName); } RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); @@ -2005,12 +2005,13 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da pCreateReqDst->ctb.suid = pTableMeta->suid; } - code = taosHashPut(pNameHash, pName.tname, strlen(pName.tname), &info, sizeof(tbInfo)); - code = (code == TSDB_CODE_DUP_KEY) ? 0 : code; - RAW_RETURN_CHECK(code); - code = taosHashPut(pVgHash, &info.vgInfo.vgId, sizeof(info.vgInfo.vgId), &info.vgInfo, sizeof(SVgroupInfo)); - code = (code == TSDB_CODE_DUP_KEY) ? 0 : code; - RAW_RETURN_CHECK(code); + RAW_RETURN_CHECK(taosHashPut(pNameHash, pName.tname, strlen(pName.tname), &info, sizeof(tbInfo))); + tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName.tname, strlen(pName.tname)); +// code = (code == TSDB_CODE_DUP_KEY) ? 0 : code; +// RAW_RETURN_CHECK(code); + RAW_RETURN_CHECK(taosHashPut(pVgHash, &info.vgInfo.vgId, sizeof(info.vgInfo.vgId), &info.vgInfo, sizeof(SVgroupInfo))); +// code = (code == TSDB_CODE_DUP_KEY) ? 0 : code; +// RAW_RETURN_CHECK(code); } SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); From 3cb60211de79c8cad72c811e1bb60075936a2ae7 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Sat, 12 Oct 2024 15:43:58 +0800 Subject: [PATCH 020/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/libs/executor/src/scanoperator.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index bae9926f63..3713a2c071 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2877,6 +2877,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock return 0; } + blockDataFreeRes((SSDataBlock*)pBlock); code = calBlockTbName(pInfo, pInfo->pRes, 0); QUERY_CHECK_CODE(code, lino, _end); From 88aac4ff677e8007f4e7890190733413005f301f Mon Sep 17 00:00:00 2001 From: yingzhao Date: Mon, 14 Oct 2024 15:38:31 +0800 Subject: [PATCH 021/142] docs(datain): add version information for every datasource --- docs/zh/06-advanced/05-data-in/index.md | 30 ++++++++++++++++--------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/docs/zh/06-advanced/05-data-in/index.md b/docs/zh/06-advanced/05-data-in/index.md index 7e5c467010..50c66cc377 100644 --- a/docs/zh/06-advanced/05-data-in/index.md +++ b/docs/zh/06-advanced/05-data-in/index.md @@ -14,18 +14,26 @@ TDengine Enterprise 配备了一个强大的可视化数据管理工具—taosEx ## 支持的数据源 -目前 TDengine 支持的数据源如下: +目前 TDengine 支持的数据源如下表: -1. Aveva PI System:一个工业数据管理和分析平台,前身为 OSIsoft PI System,它能够实时采集、整合、分析和可视化工业数据,助力企业实现智能化决策和精细化管理 -2. Aveva Historian:一个工业大数据分析软件,前身为 Wonderware Historian,专为工业环境设计,用于存储、管理和分析来自各种工业设备、传感器的实时和历史数据。 -3. OPC DA/UA:OPC 是 Open Platform Communications 的缩写,是一种开放式、标准化的通信协议,用于不同厂商的自动化设备之间进行数据交换。它最初由微软公司开发,旨在解决工业控制领域中不同设备之间互操作性差的问题。OPC 协议最初于 1996 年发布,当时称为 OPC DA (Data Access),主要用于实时数据采集和控制;2006 年,OPC 基金会发布了 OPC UA (Unified Architecture) 标准,它是一种基于服务的面向对象的协议,具有更高的灵活性和可扩展性,已成为 OPC 协议的主流版本。 -4. MQTT:Message Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。 -5. Kafka:由 Apache 软件基金会开发的一个开源流处理平台,主要用于处理实时数据,并提供一个统一、高通量、低延迟的消息系统。它具备高速度、可伸缩性、持久性和分布式设计等特点,使得它能够在每秒处理数十万次的读写操作,支持上千个客户端,同时保持数据的可靠性和可用性。 -6. OpenTSDB:基于 HBase 的分布式、可伸缩的时序数据库。它主要用于存储、索引和提供从大规模集群(包括网络设备、操作系统、应用程序等)中收集的指标数据,使这些数据更易于访问和图形化展示。 -7. CSV:Comma Separated Values 的缩写,是一种以逗号分隔的纯文本文件格式,通常用于电子表格或数据库软件。 -8. TDengine 2:泛指运行 TDengine 2.x 版本的 TDengine 实例。 -9. TDengine 3:泛指运行 TDengine 3.x 版本的 TDengine 实例。 -10. MySQL, PostgreSQL, Oracle 等关系型数据库。 +| 数据源 | 支持版本 | 描述 | +| --- | --- | --- | +| Aveva PI System | PI AF Server Version 2.10.9.593 或以上 | 工业数据管理和分析平台,前身为 OSIsoft PI System,它能够实时采集、整合、分析和可视化工业数据,助力企业实现智能化决策和精细化管理 | +| Aveva Historian | AVEVA Historian 2020 RS SP1 | 工业大数据分析软件,前身为 Wonderware Historian,专为工业环境设计,用于存储、管理和分析来自各种工业设备、传感器的实时和历史数据 | +| OPC DA | Matrikon OPC version: 1.7.2.7433 | OPC 是 Open Platform Communications 的缩写,是一种开放式、标准化的通信协议,用于不同厂商的自动化设备之间进行数据交换。它最初由微软公司开发,旨在解决工业控制领域中不同设备之间互操作性差的问题;OPC 协议最初于 1996 年发布,当时称为 OPC DA (Data Access),主要用于实时数据采集和控制。 | +| OPC UA | KeepWare KEPServerEx 6.5 | 2006 年,OPC 基金会发布了 OPC UA (Unified Architecture) 标准,它是一种基于服务的面向对象的协议,具有更高的灵活性和可扩展性,已成为 OPC 协议的主流版本 | +| MQTT | emqx: 3.0.0 到 5.7.1
hivemq: 4.0.0 到 4.31.0
mosquitto: 1.4.4 到 2.0.18 | Message Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。 | +| Kafka | 2.11 ~ 3.8.0 | 由 Apache 软件基金会开发的一个开源流处理平台,主要用于处理实时数据,并提供一个统一、高通量、低延迟的消息系统。它具备高速度、可伸缩性、持久性和分布式设计等特点,使得它能够在每秒处理数十万次的读写操作,支持上千个客户端,同时保持数据的可靠性和可用性。 | +| InfluxDB | 1.7、1.8、2.0-2.7 | InfluxDB 是一种流行的开源时间序列数据库,它针对处理大量时间序列数据进行了优化。| +| OpenTSDB | 2.4.1 | 基于 HBase 的分布式、可伸缩的时序数据库。它主要用于存储、索引和提供从大规模集群(包括网络设备、操作系统、应用程序等)中收集的指标数据,使这些数据更易于访问和图形化展示。 | +| MySQL | 5.6,5.7,8.0+ | MySQL是最流行的关系型数据库管理系统之一,由于其体积小、速度快、总体拥有成本低,尤其是开放源码这一特点,一般中小型和大型网站的开发都选择 MySQL 作为网站数据库。 | +| Oracle | 11G/12c/19c | Oracle 数据库系统是世界上流行的关系数据库管理系统,系统可移植性好、使用方便、功能强,适用于各类大、中、小微机环境。它是一种高效率的、可靠性好的、适应高吞吐量的数据库方案。 | +| PostgreSQL | v15.0+ | PostgreSQL 是一个功能非常强大的、源代码开放的客户/服务器关系型数据库管理系统, 有很多在大型商业RDBMS中所具有的特性,包括事务、子选择、触发器、视图、外键引用完整性和复杂锁定功能。| +| SQL Server | 2012/2022 | Microsoft SQL Server 是一种关系型数据库管理系统,由 Microsoft 公司开发,具有使用方便可伸缩性好与相关软件集成程度高等优点。 | +| MongoDB | 3.6+ | MongoDB 是一个介于关系型数据库与非关系型数据库之间的产品,被广泛应用于内容管理系统、移动应用与物联网等众多领域。 | +| CSV | - | Comma Separated Values 的缩写,是一种以逗号分隔的纯文本文件格式,通常用于电子表格或数据库软件。 | +| TDengine 2.x | 2.4 或 2.6+ | TDengine 旧版本,已不再维护,推荐升级到 3.0 最新版本。 | +| TDengine 3.x | 源端版本+ | 使用 TMQ 进行 TDengine 指定从数据库或超级表的订阅。 | ## 数据提取、过滤和转换 From 10436cacffbf07ddcccca22b392f521d6bada41b Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 15 Oct 2024 16:17:58 +0800 Subject: [PATCH 022/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientTmq.c | 10 ++++++---- source/libs/executor/inc/querytask.h | 2 +- source/libs/executor/src/executor.c | 2 +- source/libs/executor/src/scanoperator.c | 1 - 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 020bf25e40..ce701a755c 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -831,8 +831,8 @@ static int32_t innerCommitAll(tmq_t* tmq, SMqCommitCbParamSet* pParamSet){ } code = innerCommit(tmq, pTopic->topicName, &pVg->offsetInfo.endOffset, pVg, pParamSet); - if (code != 0){ - tqDebugC("consumer:0x%" PRIx64 " topic:%s vgId:%d, no commit, code:%s, current offset version:%" PRId64 ", ordinal:%d/%d", + if (code != 0 && code != TSDB_CODE_TMQ_SAME_COMMITTED_VALUE){ + tqErrorC("consumer:0x%" PRIx64 " topic:%s vgId:%d, no commit, code:%s, current offset version:%" PRId64 ", ordinal:%d/%d", tmq->consumerId, pTopic->topicName, pVg->vgId, tstrerror(code), pVg->offsetInfo.endOffset.version, j + 1, numOfVgroups); } } @@ -857,7 +857,7 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us return; } code = innerCommitAll(tmq, pParamSet); - if (code != 0){ + if (code != 0 && code != TSDB_CODE_TMQ_SAME_COMMITTED_VALUE){ tqErrorC("consumer:0x%" PRIx64 " innerCommitAll failed, code:%s", tmq->consumerId, tstrerror(code)); } @@ -1270,7 +1270,9 @@ static int32_t askEpCb(void* param, SDataBuf* pMsg, int32_t code) { } if (code != TSDB_CODE_SUCCESS) { - tqErrorC("consumer:0x%" PRIx64 ", get topic endpoint error, code:%s", tmq->consumerId, tstrerror(code)); + if (code != TSDB_CODE_MND_CONSUMER_NOT_READY){ + tqErrorC("consumer:0x%" PRIx64 ", get topic endpoint error, code:%s", tmq->consumerId, tstrerror(code)); + } goto END; } diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index 3c52f8080e..c9e65bacaf 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -69,7 +69,7 @@ typedef struct { SVersionRange fillHistoryVer; STimeWindow fillHistoryWindow; SStreamState* pState; - int64_t suid; // for tmq +// int64_t suid; // for tmq } SStreamTaskInfo; struct SExecTaskInfo { diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 4e5a1d2e1e..4cfa12be5b 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1499,7 +1499,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond); tstrncpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName, TSDB_TABLE_NAME_LEN); - pTaskInfo->streamInfo.suid = mtInfo.suid == 0 ? mtInfo.uid : mtInfo.suid; +// pTaskInfo->streamInfo.suid = mtInfo.suid == 0 ? mtInfo.uid : mtInfo.suid; tDeleteSchemaWrapper(pTaskInfo->streamInfo.schema); pTaskInfo->streamInfo.schema = mtInfo.schema; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 3713a2c071..bae9926f63 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2877,7 +2877,6 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock return 0; } - blockDataFreeRes((SSDataBlock*)pBlock); code = calBlockTbName(pInfo, pInfo->pRes, 0); QUERY_CHECK_CODE(code, lino, _end); From 0002ede469c58c20a8064f44f42d2b0d2c388354 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 16 Oct 2024 10:19:40 +0800 Subject: [PATCH 023/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientRawBlockWrite.c | 116 ++++++++++++++---------- 1 file changed, 70 insertions(+), 46 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 6764dbfea5..6c54c3aa69 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1852,10 +1852,14 @@ end: return code; } -static threadlocal SHashObj* pVgHash = NULL; -//static threadlocal SHashObj* pCreateTbHash = NULL; -static threadlocal SHashObj* pNameHash = NULL; -static threadlocal SHashObj* pMetaHash = NULL; +static SHashObj* writeRawCache = NULL; +static int8_t initFlag = 0; + +typedef struct{ + SHashObj* pVgHash; + SHashObj* pNameHash; + SHashObj* pMetaHash; +}rawCacheInfo; typedef struct{ SVgroupInfo vgInfo; @@ -1863,6 +1867,29 @@ typedef struct{ int64_t suid; }tbInfo; +static void tmqFreeMeta(void *data){ + STableMeta* pTableMeta = *(STableMeta**)data; + taosMemoryFree(pTableMeta); +} + +static void freeRawCache(void *data) { + rawCacheInfo* pRawCache = (rawCacheInfo*)data; + taosHashCleanup(pRawCache->pMetaHash); + taosHashCleanup(pRawCache->pNameHash); + taosHashCleanup(pRawCache->pVgHash); +} + +static int32_t initRawCacheHash(){ + if (writeRawCache == NULL){ + writeRawCache = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + if (writeRawCache == NULL){ + return terrno; + } + taosHashSetFreeFp(writeRawCache, freeRawCache); + } + return 0; +} + static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrapper* pSW){ char* p = (char*)rawData; // | version | total length | total rows | blankFill | total columns | flag seg| block group id | column schema | each @@ -1899,6 +1926,34 @@ static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrappe return false; } +static int32_t getRawCache(SHashObj **pVgHash, SHashObj **pNameHash, SHashObj **pMetaHash, void *key) { + int32_t code = 0; + void* cacheInfo = taosHashGet(writeRawCache, &key, POINTER_BYTES); + if (cacheInfo == NULL){ + *pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + RAW_NULL_CHECK(*pVgHash); + *pNameHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + RAW_NULL_CHECK(*pNameHash); + *pMetaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + RAW_NULL_CHECK(*pMetaHash); + taosHashSetFreeFp(*pMetaHash, tmqFreeMeta); + rawCacheInfo info = {*pVgHash, *pNameHash, *pMetaHash}; + RAW_RETURN_CHECK(taosHashPut(writeRawCache, &key, POINTER_BYTES, &info, sizeof(rawCacheInfo))); + } else { + rawCacheInfo *info = (rawCacheInfo *)cacheInfo; + *pVgHash = info->pVgHash; + *pNameHash = info->pNameHash; + *pMetaHash = info->pMetaHash; + } + + return 0; +end: + taosHashCleanup(*pMetaHash); + taosHashCleanup(*pNameHash); + taosHashCleanup(*pVgHash); + return code; +} + static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t dataLen) { if (taos == NULL || data == NULL) { SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data); @@ -1951,6 +2006,10 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); } + SHashObj *pVgHash = NULL; + SHashObj *pNameHash = NULL; + SHashObj *pMetaHash = NULL; + RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos)); int retry = 0; while(1){ RAW_RETURN_CHECK(smlInitHandle(&pQuery)); @@ -1965,9 +2024,6 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); RAW_NULL_CHECK(tbName); -// int64_t* suid = taosArrayGet(rspObj.dataRsp.blockSuid, rspObj.resIter); -// RAW_NULL_CHECK(suid); - uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName); SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; (void)strcpy(pName.dbname, pRequest->pDb); @@ -2274,47 +2330,15 @@ void tmq_free_raw(tmq_raw_data raw) { (void)memset(terrMsg, 0, ERR_MSG_LEN); } -static void tmqFreeMeta(void *data){ - STableMeta* pTableMeta = *(STableMeta**)data; - taosMemoryFree(pTableMeta); -} - -void freeHash() { - taosHashCleanup(pMetaHash); - taosHashCleanup(pNameHash); - taosHashCleanup(pVgHash); -} - -static int32_t initHash(){ - int32_t code = 0; - if (pVgHash == NULL){ - pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); - RAW_NULL_CHECK(pVgHash); - } -// if (pCreateTbHash == NULL){ -// pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); -// RAW_NULL_CHECK(pCreateTbHash); -// } - if (pNameHash == NULL){ - pNameHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - RAW_NULL_CHECK(pNameHash); - } - if (pMetaHash == NULL){ - pMetaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); - RAW_NULL_CHECK(pMetaHash); - taosHashSetFreeFp(pMetaHash, tmqFreeMeta); - } - return code; -end: - freeHash(); - return code; -} - static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) { - int32_t code = initHash(); - if (code != 0) { - return code; + int8_t old = atomic_val_compare_exchange_8(&initFlag, 0, 1); + if (old == 0) { + int32_t code = initRawCacheHash(); + if (code != 0) { + return code; + } } + if (type == TDMT_VND_CREATE_STB) { return taosCreateStb(taos, buf, len); } else if (type == TDMT_VND_ALTER_STB) { From 4f399749384955e256e0ecc08abd450b041beaf8 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 16 Oct 2024 17:17:44 +0800 Subject: [PATCH 024/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- include/util/tlog.h | 2 +- source/client/src/clientTmq.c | 11 +-- source/common/src/tglobal.c | 4 +- source/dnode/mnode/impl/src/mndConsumer.c | 2 +- source/dnode/vnode/inc/vnode.h | 2 +- source/dnode/vnode/src/inc/vnodeInt.h | 2 +- source/dnode/vnode/src/meta/metaQuery.c | 7 +- source/dnode/vnode/src/meta/metaSnapshot.c | 2 +- source/dnode/vnode/src/tq/tqRead.c | 10 +-- source/dnode/vnode/src/tq/tqScan.c | 92 +++++++++++++--------- source/dnode/vnode/src/vnd/vnodeQuery.c | 2 +- source/util/src/tlog.c | 2 +- 12 files changed, 76 insertions(+), 62 deletions(-) diff --git a/include/util/tlog.h b/include/util/tlog.h index e80e94de32..1a3b687e40 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -69,7 +69,7 @@ extern int32_t tdbDebugFlag; extern int32_t sndDebugFlag; extern int32_t simDebugFlag; -extern int32_t tqClientDebug; +extern int32_t tqClientDebugFlag; int32_t taosInitLog(const char *logName, int32_t maxFiles, bool tsc); void taosCloseLog(); diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index ce701a755c..0233372012 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -24,12 +24,9 @@ #include "tref.h" #include "ttimer.h" -#define tqFatalC(...) do { if (cDebugFlag & DEBUG_FATAL || tqClientDebug) { taosPrintLog("TQ FATAL ", DEBUG_FATAL, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqErrorC(...) do { if (cDebugFlag & DEBUG_ERROR || tqClientDebug) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqWarnC(...) do { if (cDebugFlag & DEBUG_WARN || tqClientDebug) { taosPrintLog("TQ WARN ", DEBUG_WARN, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebug) { taosPrintLog("TQ ", DEBUG_INFO, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqDebugC(...) do { if (cDebugFlag & DEBUG_DEBUG || tqClientDebug) { taosPrintLog("TQ ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqTraceC(...) do { if (cDebugFlag & DEBUG_TRACE || tqClientDebug) { taosPrintLog("TQ ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0) +#define tqErrorC(...) do { if (cDebugFlag & DEBUG_ERROR || tqClientDebugFlag & DEBUG_ERROR) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0) +#define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebugFlag & DEBUG_INFO) { taosPrintLog("TQ ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0) +#define tqDebugC(...) do { if (cDebugFlag & DEBUG_DEBUG || tqClientDebugFlag & DEBUG_DEBUG) { taosPrintLog("TQ ", DEBUG_DEBUG, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0) #define EMPTY_BLOCK_POLL_IDLE_DURATION 10 #define DEFAULT_AUTO_COMMIT_INTERVAL 5000 @@ -957,7 +954,7 @@ int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { } } - tqClientDebug = rsp.debugFlag; + tqClientDebugFlag = rsp.debugFlag; tDestroySMqHbRsp(&rsp); END: diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index b6fdc2c3c7..ea76adef1a 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -542,7 +542,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "sDebugFlag", sDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tsdbDebugFlag", tsdbDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqDebugFlag", tqDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebug", tqClientDebug, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebugFlag", tqClientDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "udfDebugFlag", udfDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); @@ -1959,7 +1959,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { {"smaDebugFlag", &smaDebugFlag}, {"idxDebugFlag", &idxDebugFlag}, {"tdbDebugFlag", &tdbDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag}, {"smaDebugFlag", &smaDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag}, {"metaDebugFlag", &metaDebugFlag}, - {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, {"tqClientDebug", &tqClientDebug}, + {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, {"tqClientDebugFlag", &tqClientDebugFlag}, }; static OptionNameAndVar options[] = {{"audit", &tsEnableAudit}, diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index e80654b65a..0538a5fe83 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -245,7 +245,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { } storeOffsetRows(pMnode, &req, pConsumer); - rsp.debugFlag = tqClientDebug; + rsp.debugFlag = tqClientDebugFlag; code = buildMqHbRsp(pMsg, &rsp); END: diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 204311aa98..610ba43673 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -243,7 +243,7 @@ int32_t extractMsgFromWal(SWalReader *pReader, void **pItem, int64_t maxVer, con int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver); bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids); int32_t tqRetrieveDataBlock(STqReader *pReader, SSDataBlock **pRes, const char *idstr); -int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet); +int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet, int64_t *createTime); int32_t tqGetStreamExecInfo(SVnode *pVnode, int64_t streamId, int64_t *pDelay, bool *fhFinished); // sma diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 1bd4317234..4497651151 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -160,7 +160,7 @@ int32_t metaDropTables(SMeta* pMeta, SArray* tbUids); int metaTtlFindExpired(SMeta* pMeta, int64_t timePointMs, SArray* tbUids, int32_t ttlDropMaxCount); int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp); int metaUpdateChangeTimeWithLock(SMeta* pMeta, tb_uid_t uid, int64_t changeTimeMs); -SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock); +SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, int64_t *createTime); int32_t metaGetTbTSchemaNotNull(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema); int32_t metaGetTbTSchemaMaybeNull(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema); STSchema* metaGetTbTSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 484c5c0a16..e2ba8d9ccb 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -371,7 +371,7 @@ int32_t metaTbCursorPrev(SMTbCursor *pTbCur, ETableType jumpTableType) { return 0; } -SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock) { +SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock, int64_t *createTime) { void *pData = NULL; int nData = 0; int64_t version; @@ -407,6 +407,9 @@ _query: } } else if (me.type == TSDB_CHILD_TABLE) { uid = me.ctbEntry.suid; + if (createTime != NULL){ + *createTime = me.ctbEntry.btime; + } tDecoderClear(&dc); goto _query; } else { @@ -617,7 +620,7 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock) { STSchema *pTSchema = NULL; SSchemaWrapper *pSW = NULL; - pSW = metaGetTableSchema(pMeta, uid, sver, lock); + pSW = metaGetTableSchema(pMeta, uid, sver, lock, NULL); if (!pSW) return NULL; pTSchema = tBuildTSchema(pSW->pSchema, pSW->nCols, pSW->version); diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c index 0936d8f092..b2826ec45a 100644 --- a/source/dnode/vnode/src/meta/metaSnapshot.c +++ b/source/dnode/vnode/src/meta/metaSnapshot.c @@ -552,7 +552,7 @@ int32_t setForSnapShot(SSnapContext* ctx, int64_t uid) { void taosXSetTablePrimaryKey(SSnapContext* ctx, int64_t uid) { bool ret = false; - SSchemaWrapper* schema = metaGetTableSchema(ctx->pMeta, uid, -1, 1); + SSchemaWrapper* schema = metaGetTableSchema(ctx->pMeta, uid, -1, 1, NULL); if (schema && schema->nCols >= 2 && schema->pSchema[1].flags & COL_IS_KEY) { ret = true; } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index f594c2f229..fedcc0e82d 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -263,7 +263,7 @@ bool tqGetTablePrimaryKey(STqReader* pReader) { return pReader->hasPrimaryKey; } void tqSetTablePrimaryKey(STqReader* pReader, int64_t uid) { bool ret = false; - SSchemaWrapper* schema = metaGetTableSchema(pReader->pVnodeMeta, uid, -1, 1); + SSchemaWrapper* schema = metaGetTableSchema(pReader->pVnodeMeta, uid, -1, 1, NULL); if (schema && schema->nCols >= 2 && schema->pSchema[1].flags & COL_IS_KEY) { ret = true; } @@ -669,7 +669,7 @@ int32_t tqRetrieveDataBlock(STqReader* pReader, SSDataBlock** pRes, const char* (pReader->cachedSchemaVer != sversion)) { tDeleteSchemaWrapper(pReader->pSchemaWrapper); - pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1); + pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1, NULL); if (pReader->pSchemaWrapper == NULL) { tqWarn("vgId:%d, cannot found schema wrapper for table: suid:%" PRId64 ", uid:%" PRId64 "version %d, possibly dropped table", @@ -961,10 +961,8 @@ END: return code; } -int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet) { +int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet, int64_t *createTime) { tqTrace("tq reader retrieve data block %p, %d", pReader->msg.msgStr, pReader->nextBlk); - SSDataBlock* block = NULL; - SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk); if (pSubmitTbData == NULL) { return terrno; @@ -980,7 +978,7 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas pReader->lastBlkUid = uid; tDeleteSchemaWrapper(pReader->pSchemaWrapper); - pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1); + pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1, createTime); if (pReader->pSchemaWrapper == NULL) { tqWarn("vgId:%d, cannot found schema wrapper for table: suid:%" PRId64 ", version %d, possibly dropped table", pReader->pWalReader->pWal->cfg.vgId, uid, pReader->cachedSchemaVer); diff --git a/source/dnode/vnode/src/tq/tqScan.c b/source/dnode/vnode/src/tq/tqScan.c index 68702754aa..3e4895378b 100644 --- a/source/dnode/vnode/src/tq/tqScan.c +++ b/source/dnode/vnode/src/tq/tqScan.c @@ -293,6 +293,54 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat return code; } +static int32_t buildCreateTbInfo(SMqDataRsp* pRsp, SVCreateTbReq* pCreateTbReq){ + int32_t code = 0; + void* createReq = NULL; + if (pRsp->createTableNum == 0) { + pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t)); + if (pRsp->createTableLen == NULL) { + code = terrno; + goto END; + } + pRsp->createTableReq = taosArrayInit(0, sizeof(void*)); + if (pRsp->createTableReq == NULL) { + code = terrno; + goto END; + } + } + + uint32_t len = 0; + tEncodeSize(tEncodeSVCreateTbReq, pCreateTbReq, len, code); + if (TSDB_CODE_SUCCESS != code) { + goto END; + } + createReq = taosMemoryCalloc(1, len); + if (createReq == NULL){ + code = terrno; + goto END; + } + SEncoder encoder = {0}; + tEncoderInit(&encoder, createReq, len); + code = tEncodeSVCreateTbReq(&encoder, pCreateTbReq); + tEncoderClear(&encoder); + if (code < 0) { + goto END; + } + if (taosArrayPush(pRsp->createTableLen, &len) == NULL){ + code = terrno; + goto END; + } + if (taosArrayPush(pRsp->createTableReq, &createReq) == NULL){ + code = terrno; + goto END; + } + pRsp->createTableNum++; + + return 0; +END: + taosMemoryFree(createReq); + return code; +} static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int32_t* totalRows, int8_t sourceExcluded){ int32_t code = 0; @@ -312,7 +360,8 @@ static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int } SSubmitTbData* pSubmitTbDataRet = NULL; - code = tqRetrieveTaosxBlock(pReader, pBlocks, pSchemas, &pSubmitTbDataRet); + int64_t createTime = INT64_MAX; + code = tqRetrieveTaosxBlock(pReader, pBlocks, pSchemas, &pSubmitTbDataRet, &createTime); if (code != 0) { tqError("vgId:%d, failed to retrieve block", pTq->pVnode->config.vgId); goto END; @@ -330,46 +379,13 @@ static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int } } if (pHandle->fetchMeta != WITH_DATA && pSubmitTbDataRet->pCreateTbReq != NULL) { - if (pRsp->createTableNum == 0) { - pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t)); - if (pRsp->createTableLen == NULL) { - code = terrno; - goto END; - } - pRsp->createTableReq = taosArrayInit(0, sizeof(void*)); - if (pRsp->createTableReq == NULL) { - code = terrno; + if (pSubmitTbDataRet->ctimeMs - createTime <= 1000) { // judge if table is already created to avoid sending crateTbReq + code = buildCreateTbInfo(pRsp, pSubmitTbDataRet->pCreateTbReq); + if (code != 0){ + tqError("vgId:%d, failed to build create table info", pTq->pVnode->config.vgId); goto END; } } - - uint32_t len = 0; - tEncodeSize(tEncodeSVCreateTbReq, pSubmitTbDataRet->pCreateTbReq, len, code); - if (TSDB_CODE_SUCCESS != code) { - goto END; - } - void* createReq = taosMemoryCalloc(1, len); - if (createReq == NULL){ - code = terrno; - goto END; - } - SEncoder encoder = {0}; - tEncoderInit(&encoder, createReq, len); - code = tEncodeSVCreateTbReq(&encoder, pSubmitTbDataRet->pCreateTbReq); - tEncoderClear(&encoder); - if (code < 0) { - taosMemoryFree(createReq); - goto END; - } - if (taosArrayPush(pRsp->createTableLen, &len) == NULL){ - taosMemoryFree(createReq); - goto END; - } - if (taosArrayPush(pRsp->createTableReq, &createReq) == NULL){ - taosMemoryFree(createReq); - goto END; - } - pRsp->createTableNum++; } if (pHandle->fetchMeta == ONLY_META && pSubmitTbDataRet->pCreateTbReq == NULL) { goto END; diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 7c6a2e7313..0929953e1c 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -702,7 +702,7 @@ int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) { } int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num) { - SSchemaWrapper *pSW = metaGetTableSchema(pVnode->pMeta, suid, -1, 0); + SSchemaWrapper *pSW = metaGetTableSchema(pVnode->pMeta, suid, -1, 0, NULL); if (pSW) { *num = pSW->nCols; tDeleteSchemaWrapper(pSW); diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 45c8a2f6c2..2b80eaf85d 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -125,7 +125,7 @@ int32_t idxDebugFlag = 131; int32_t sndDebugFlag = 131; int32_t simDebugFlag = 131; -int32_t tqClientDebug = 0; +int32_t tqClientDebugFlag = 0; int64_t dbgEmptyW = 0; int64_t dbgWN = 0; From 47742ce5ed6e0a84229ddcb9c875607dccf7a09c Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 16 Oct 2024 17:54:53 +0800 Subject: [PATCH 025/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientTmq.c | 6 ++++-- source/util/src/tlog.c | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 0233372012..72d709d962 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -24,8 +24,8 @@ #include "tref.h" #include "ttimer.h" -#define tqErrorC(...) do { if (cDebugFlag & DEBUG_ERROR || tqClientDebugFlag & DEBUG_ERROR) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0) -#define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebugFlag & DEBUG_INFO) { taosPrintLog("TQ ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0) +#define tqErrorC(...) do { if (cDebugFlag & DEBUG_ERROR || tqClientDebugFlag & DEBUG_ERROR) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0) +#define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebugFlag & DEBUG_INFO) { taosPrintLog("TQ ", DEBUG_INFO, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0) #define tqDebugC(...) do { if (cDebugFlag & DEBUG_DEBUG || tqClientDebugFlag & DEBUG_DEBUG) { taosPrintLog("TQ ", DEBUG_DEBUG, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0) #define EMPTY_BLOCK_POLL_IDLE_DURATION 10 @@ -955,6 +955,8 @@ int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { } tqClientDebugFlag = rsp.debugFlag; + tqDebugC("consumer:0x%" PRIx64 ", tqClientDebugFlag:%d", tmq->consumerId, rsp.debugFlag); + tDestroySMqHbRsp(&rsp); END: diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 2b80eaf85d..3b9d100da9 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -125,7 +125,7 @@ int32_t idxDebugFlag = 131; int32_t sndDebugFlag = 131; int32_t simDebugFlag = 131; -int32_t tqClientDebugFlag = 0; +int32_t tqClientDebugFlag = 131; int64_t dbgEmptyW = 0; int64_t dbgWN = 0; From 4a240288172a56620e9e455af7fd6960f075e013 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 16 Oct 2024 19:34:19 +0800 Subject: [PATCH 026/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- tests/system-test/2-query/db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/2-query/db.py b/tests/system-test/2-query/db.py index 588609e524..1964cea51f 100644 --- a/tests/system-test/2-query/db.py +++ b/tests/system-test/2-query/db.py @@ -57,7 +57,7 @@ class TDTestCase: tdSql.checkData(0, 2, 0) tdSql.query("show dnode 1 variables like '%debugFlag'") - tdSql.checkRows(23) + tdSql.checkRows(24) tdSql.query("show dnode 1 variables like '____debugFlag'") tdSql.checkRows(2) From 3ae6f79290731acf6ef8048fe161bd661de0d7c9 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 17 Oct 2024 09:36:32 +0800 Subject: [PATCH 027/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientTmq.c | 1 - 1 file changed, 1 deletion(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 72d709d962..950e0f7f34 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -955,7 +955,6 @@ int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { } tqClientDebugFlag = rsp.debugFlag; - tqDebugC("consumer:0x%" PRIx64 ", tqClientDebugFlag:%d", tmq->consumerId, rsp.debugFlag); tDestroySMqHbRsp(&rsp); From 2e5501c354bd56464aba0fc8cafe1e88a05d9d0c Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 17 Oct 2024 16:56:01 +0800 Subject: [PATCH 028/142] feat(tools): add taoskeeper --- tools/keeper | 1 + 1 file changed, 1 insertion(+) create mode 160000 tools/keeper diff --git a/tools/keeper b/tools/keeper new file mode 160000 index 0000000000..1b291d7930 --- /dev/null +++ b/tools/keeper @@ -0,0 +1 @@ +Subproject commit 1b291d7930180b49a0765805a34b3fb56f41b9fa From cbd262541e916ff1507cfc3219856a363abd50fb Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 17 Oct 2024 17:04:34 +0800 Subject: [PATCH 029/142] feat(tools): add taoskeeper --- tools/keeper | 1 - tools/keeper/.dockerignore | 1 + tools/keeper/.github/workflows/build.yaml | 59 + .../.github/workflows/release-pr-title.yaml | 33 + tools/keeper/.github/workflows/release.yaml | 45 + tools/keeper/.gitignore | 22 + tools/keeper/CHANGELOG.md | 10 + tools/keeper/Dockerfile | 16 + tools/keeper/DockerfileCloud | 24 + tools/keeper/README-CN.md | 267 + tools/keeper/README.md | 273 + tools/keeper/api/adapter2.go | 260 + tools/keeper/api/adapter2_test.go | 98 + tools/keeper/api/audit.go | 336 ++ tools/keeper/api/audit_test.go | 153 + tools/keeper/api/checkhealth.go | 21 + tools/keeper/api/common.go | 89 + tools/keeper/api/exporter_test.go | 297 + tools/keeper/api/gen_metric.go | 770 +++ tools/keeper/api/gen_metric_test.go | 358 ++ tools/keeper/api/https_test.go | 127 + tools/keeper/api/nodeexporter.go | 32 + tools/keeper/api/report.go | 478 ++ tools/keeper/api/tables.go | 286 + tools/keeper/api/zabbix.go | 113 + tools/keeper/ci/changelog-generate.sh | 31 + tools/keeper/ci/post-release.sh | 22 + tools/keeper/ci/release.sh | 31 + tools/keeper/cmd/command.go | 461 ++ tools/keeper/cmd/empty_test.go | 8 + tools/keeper/config/metrics.sample | 38 + tools/keeper/config/taoskeeper.toml | 53 + .../keeper/config/taoskeeper_enterprise.toml | 65 + ...ngine-taoskeeper-prometheus-dashboard.json | 5365 +++++++++++++++++ ...engine-taoskeeper-prometheus-dashboard.png | Bin 0 -> 233143 bytes tools/keeper/db/connector.go | 177 + tools/keeper/db/empty_test.go | 8 + tools/keeper/docker-compose.yml | 29 + tools/keeper/examples/metrics.toml | 38 + tools/keeper/go.mod | 80 + tools/keeper/go.sum | 764 +++ tools/keeper/infrastructure/config/audit.go | 6 + tools/keeper/infrastructure/config/config.go | 296 + tools/keeper/infrastructure/config/log.go | 29 + .../infrastructure/config/metric_test.go | 85 + tools/keeper/infrastructure/config/metrics.go | 29 + tools/keeper/infrastructure/log/empty_test.go | 8 + tools/keeper/infrastructure/log/log.go | 278 + tools/keeper/infrastructure/log/log_test.go | 23 + tools/keeper/infrastructure/log/web.go | 55 + tools/keeper/main.go | 11 + tools/keeper/monitor/collect.go | 99 + tools/keeper/monitor/empty_test.go | 8 + tools/keeper/monitor/monitor.go | 89 + tools/keeper/monitor/monitor_test.go | 58 + tools/keeper/monitor/system.go | 97 + tools/keeper/process/builder.go | 55 + tools/keeper/process/empty_test.go | 8 + tools/keeper/process/handle.go | 666 ++ tools/keeper/prometheus/prometheus.yml | 13 + tools/keeper/system/empty_test.go | 8 + tools/keeper/system/program.go | 146 + tools/keeper/system/program_test.go | 22 + tools/keeper/taoskeeper.service | 19 + tools/keeper/telegraf.conf | 6 + tools/keeper/telegraf.yml | 9 + tools/keeper/util/empty_test.go | 8 + tools/keeper/util/pool/antpool.go | 15 + tools/keeper/util/pool/bytes.go | 23 + tools/keeper/util/pool/empty_test.go | 8 + tools/keeper/util/util.go | 154 + tools/keeper/version/version.go | 9 + tools/keeper/zbx_taos_keeper_templates.xml | 111 + 73 files changed, 13789 insertions(+), 1 deletion(-) delete mode 160000 tools/keeper create mode 100644 tools/keeper/.dockerignore create mode 100644 tools/keeper/.github/workflows/build.yaml create mode 100644 tools/keeper/.github/workflows/release-pr-title.yaml create mode 100644 tools/keeper/.github/workflows/release.yaml create mode 100644 tools/keeper/.gitignore create mode 100644 tools/keeper/CHANGELOG.md create mode 100644 tools/keeper/Dockerfile create mode 100644 tools/keeper/DockerfileCloud create mode 100644 tools/keeper/README-CN.md create mode 100644 tools/keeper/README.md create mode 100644 tools/keeper/api/adapter2.go create mode 100644 tools/keeper/api/adapter2_test.go create mode 100644 tools/keeper/api/audit.go create mode 100644 tools/keeper/api/audit_test.go create mode 100644 tools/keeper/api/checkhealth.go create mode 100644 tools/keeper/api/common.go create mode 100644 tools/keeper/api/exporter_test.go create mode 100644 tools/keeper/api/gen_metric.go create mode 100644 tools/keeper/api/gen_metric_test.go create mode 100644 tools/keeper/api/https_test.go create mode 100644 tools/keeper/api/nodeexporter.go create mode 100644 tools/keeper/api/report.go create mode 100644 tools/keeper/api/tables.go create mode 100644 tools/keeper/api/zabbix.go create mode 100755 tools/keeper/ci/changelog-generate.sh create mode 100755 tools/keeper/ci/post-release.sh create mode 100755 tools/keeper/ci/release.sh create mode 100644 tools/keeper/cmd/command.go create mode 100644 tools/keeper/cmd/empty_test.go create mode 100644 tools/keeper/config/metrics.sample create mode 100644 tools/keeper/config/taoskeeper.toml create mode 100644 tools/keeper/config/taoskeeper_enterprise.toml create mode 100644 tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.json create mode 100644 tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.png create mode 100644 tools/keeper/db/connector.go create mode 100644 tools/keeper/db/empty_test.go create mode 100644 tools/keeper/docker-compose.yml create mode 100644 tools/keeper/examples/metrics.toml create mode 100644 tools/keeper/go.mod create mode 100644 tools/keeper/go.sum create mode 100644 tools/keeper/infrastructure/config/audit.go create mode 100644 tools/keeper/infrastructure/config/config.go create mode 100644 tools/keeper/infrastructure/config/log.go create mode 100644 tools/keeper/infrastructure/config/metric_test.go create mode 100644 tools/keeper/infrastructure/config/metrics.go create mode 100644 tools/keeper/infrastructure/log/empty_test.go create mode 100644 tools/keeper/infrastructure/log/log.go create mode 100644 tools/keeper/infrastructure/log/log_test.go create mode 100644 tools/keeper/infrastructure/log/web.go create mode 100644 tools/keeper/main.go create mode 100644 tools/keeper/monitor/collect.go create mode 100644 tools/keeper/monitor/empty_test.go create mode 100644 tools/keeper/monitor/monitor.go create mode 100644 tools/keeper/monitor/monitor_test.go create mode 100644 tools/keeper/monitor/system.go create mode 100644 tools/keeper/process/builder.go create mode 100644 tools/keeper/process/empty_test.go create mode 100644 tools/keeper/process/handle.go create mode 100644 tools/keeper/prometheus/prometheus.yml create mode 100644 tools/keeper/system/empty_test.go create mode 100644 tools/keeper/system/program.go create mode 100644 tools/keeper/system/program_test.go create mode 100644 tools/keeper/taoskeeper.service create mode 100644 tools/keeper/telegraf.conf create mode 100644 tools/keeper/telegraf.yml create mode 100644 tools/keeper/util/empty_test.go create mode 100644 tools/keeper/util/pool/antpool.go create mode 100644 tools/keeper/util/pool/bytes.go create mode 100644 tools/keeper/util/pool/empty_test.go create mode 100644 tools/keeper/util/util.go create mode 100644 tools/keeper/version/version.go create mode 100644 tools/keeper/zbx_taos_keeper_templates.xml diff --git a/tools/keeper b/tools/keeper deleted file mode 160000 index 1b291d7930..0000000000 --- a/tools/keeper +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1b291d7930180b49a0765805a34b3fb56f41b9fa diff --git a/tools/keeper/.dockerignore b/tools/keeper/.dockerignore new file mode 100644 index 0000000000..cff5a58d80 --- /dev/null +++ b/tools/keeper/.dockerignore @@ -0,0 +1 @@ +!taoskeeper diff --git a/tools/keeper/.github/workflows/build.yaml b/tools/keeper/.github/workflows/build.yaml new file mode 100644 index 0000000000..3b0db21ccb --- /dev/null +++ b/tools/keeper/.github/workflows/build.yaml @@ -0,0 +1,59 @@ +name: Go + +on: [push] + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + go: ["1.18"] + name: Go ${{ matrix.go }} + steps: + - name: Build tools + run: | + sudo apt-get update -y + sudo apt-get install -y build-essential cmake libgeos-dev + + - name: checkout + uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + + - name: checkout TDengine + uses: actions/checkout@v3 + with: + repository: "taosdata/TDengine" + path: "TDengine" + ref: "main" + + - name: install TDengine + run: | + cd TDengine + mkdir debug + cd debug + cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false -DBUILD_TOOLS=false -DBUILD_TEST=off + make -j 4 + sudo make install + which taosd + which taosadapter + + - name: start taosd + run: | + cp /etc/taos/taos.cfg ./ + sudo echo "supportVnodes 256" >> taos.cfg + nohup sudo taosd -c taos.cfg & + + - name: start taosadapter + run: nohup sudo taosadapter & + + - name: test + run: go mod tidy && go test -v ./... + + - name: Build + run: | + go mod tidy + go build diff --git a/tools/keeper/.github/workflows/release-pr-title.yaml b/tools/keeper/.github/workflows/release-pr-title.yaml new file mode 100644 index 0000000000..b20e1e49d9 --- /dev/null +++ b/tools/keeper/.github/workflows/release-pr-title.yaml @@ -0,0 +1,33 @@ +name: "Release PR Check" + +on: + pull_request: + branches: + - develop + + types: + - opened + - edited + - synchronize + - labeled + - unlabeled +jobs: + check: + name: "PR check if release" + if: contains(github.event.pull_request.title, '') + runs-on: ubuntu-latest + steps: + - uses: Slashgear/action-check-pr-title@v3.0.0 + with: + regexp: '.*:?\s*(\d+\.\d+\.\d+)(-\S+)?.*' # Regex the title should match. + + - name: Check version + run: | + version=$(echo "${{ github.event.pull_request.title }}" | grep -o -P ':?\s*(\d+\.\d+\.\d+)(-\S+)?' |sed -E 's/:?\s*//') + echo Seems you want to release $version + if git show-ref --tags $version --quiet; then + echo "bug tag exists" + exit 1 + else + echo "tag is valid" + fi diff --git a/tools/keeper/.github/workflows/release.yaml b/tools/keeper/.github/workflows/release.yaml new file mode 100644 index 0000000000..2a156634d4 --- /dev/null +++ b/tools/keeper/.github/workflows/release.yaml @@ -0,0 +1,45 @@ +name: Release + +on: + pull_request: + branches: + - develop + types: + - closed + +jobs: + release: + if: github.event.pull_request.merged == true && contains(github.event.pull_request.title, '') + runs-on: ubuntu-20.04 + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + ref: develop + fetch-depth: 0 + + - name: Extract version + id: extract-version + run: | + version=$(echo "${{ github.event.pull_request.title }}" | grep -o -P ':?\s*(\d+\.\d+\.\d+)(-\S+)?' |sed -E 's/:?\s*//') + echo $version + echo ::set-output name=version::$version + + - name: Version bump + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + ./ci/release.sh ${{ steps.extract-version.outputs.version }} + + - name: Release + uses: softprops/action-gh-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: v${{ steps.extract-version.outputs.version }} + body_path: CHANGELOG.tmp + + - name: Post-release + run: | + ./ci/post-release.sh ${{ steps.extract-version.outputs.version }} diff --git a/tools/keeper/.gitignore b/tools/keeper/.gitignore new file mode 100644 index 0000000000..2cba3f06c8 --- /dev/null +++ b/tools/keeper/.gitignore @@ -0,0 +1,22 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +*.html +*.data + +# Dependency directories (remove the comment below to include it) +vendor +/debug/ +/.idea/ +/taoskeeper +/test_data +/.vscode diff --git a/tools/keeper/CHANGELOG.md b/tools/keeper/CHANGELOG.md new file mode 100644 index 0000000000..6775343b0f --- /dev/null +++ b/tools/keeper/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Conventional Changelog](https://www.conventionalcommits.org/en/v1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## Footnote + +This changelog is automatically generated. diff --git a/tools/keeper/Dockerfile b/tools/keeper/Dockerfile new file mode 100644 index 0000000000..c38bdc1acb --- /dev/null +++ b/tools/keeper/Dockerfile @@ -0,0 +1,16 @@ +FROM golang:1.18.6-alpine as builder +LABEL maintainer = "Linhe Huo " + +WORKDIR /usr/src/taoskeeper +COPY ./ /usr/src/taoskeeper/ +ENV GO111MODULE=on \ + GOPROXY=https://goproxy.cn,direct +RUN go mod tidy && go build + +FROM alpine:3 +RUN mkdir -p /etc/taos +COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/ +COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml +RUN chmod u+rw /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] diff --git a/tools/keeper/DockerfileCloud b/tools/keeper/DockerfileCloud new file mode 100644 index 0000000000..11137f61c2 --- /dev/null +++ b/tools/keeper/DockerfileCloud @@ -0,0 +1,24 @@ +FROM golang:1.18.6-alpine as builder +LABEL maintainer = "TDengine" + +ARG latestv +ARG gitinfo +ARG buildinfo + +RUN apk --no-cache add upx && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /usr/src/taoskeeper +COPY ./ /usr/src/taoskeeper/ +ENV GO111MODULE=on \ + GOPROXY=https://goproxy.cn,direct + +RUN echo "$latestv $gitinfo $buildinfo" +RUN go mod tidy && go build -ldflags="-s -w -X 'github.com/taosdata/taoskeeper/version.Version=${latestv}' -X 'github.com/taosdata/taoskeeper/version.Gitinfo=${gitinfo}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${buildinfo}'" -o taoskeeper . && upx -9 taoskeeper +FROM alpine:3 +RUN mkdir -p /etc/taos +COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/ +COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml +RUN chmod u+rw /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] diff --git a/tools/keeper/README-CN.md b/tools/keeper/README-CN.md new file mode 100644 index 0000000000..770e9513c1 --- /dev/null +++ b/tools/keeper/README-CN.md @@ -0,0 +1,267 @@ +# TaosKeeper + +taosKeeper 是 TDengine 各项监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。并且 taosKeeper 企业版支持多种收集器,可以方便进行监控数据的展示。 + +taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。 + +## 构建 + +### 获取源码 + +从 GitHub 克隆源码: + +```sh +git clone https://github.com/taosdata/TDengine +cd TDengine/tools/keeper +``` + +### 编译 + +taosKeeper 使用 `GO` 语言编写,在构建前需要配置好 `GO` 语言开发环境。 + +```sh +go mod tidy +go build +``` + +## 安装 + +如果是自行构建的项目,仅需要拷贝 `taoskeeper` 文件到你的 `PATH` 中。 + +```sh +sudo install taoskeeper /usr/bin/ +``` + +## 启动 + +在启动前,应该做好如下配置: +在 `/etc/taos/taoskeeper.toml` 配置 TDengine 连接参数以及监控指标前缀等其他信息。 + +```toml +# gin 框架是否启用 debug +debug = false + +# 服务监听端口, 默认为 6043 +port = 6043 + +# 日志级别,包含 panic、error、info、debug、trace等 +loglevel = "info" + +# 程序中使用协程池的大小 +gopoolsize = 50000 + +# 查询 TDengine 监控数据轮询间隔 +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" + +# 需要被监控的 taosAdapter +[taosAdapter] +address = ["127.0.0.1:6041"] + +[metrics] +# 监控指标前缀 +prefix = "taos" + +# 存放监控数据的数据库 +database = "log" + +# 指定需要监控的普通表 +tables = [] + +[environment] +# 是否在容器中运行,影响 taosKeeper 自身的监控数据 +incgroup = false +``` + +现在可以启动服务,输入: + +```sh +taoskeeper +``` + +如果你使用 `systemd`,复制 `taoskeeper.service` 到 `/lib/systemd/system/`,并启动服务。 + +```sh +sudo cp taoskeeper.service /lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start taoskeeper +``` + +让 taosKeeper 随系统开机自启动。 + +```sh +sudo systemctl enable taoskeeper +``` + +如果使用 `systemd`,你可以使用如下命令完成安装。 + +```sh +go mod tidy +go build +sudo install taoskeeper /usr/bin/ +sudo cp taoskeeper.service /lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start taoskeeper +sudo systemctl enable taoskeeper +``` + +## Docker + +如下介绍了如何在 docker 中构建 taosKeeper: + +在构建前请配置好 `./config/taoskeeper.toml` 中合适的参数,并编辑 Dockerfile ,示例如下。 + +```dockerfile +FROM golang:1.18.6-alpine as builder + +WORKDIR /usr/src/taoskeeper +COPY ./ /usr/src/taoskeeper/ +ENV GO111MODULE=on \ + GOPROXY=https://goproxy.cn,direct +RUN go mod tidy && go build + +FROM alpine:3 +RUN mkdir -p /etc/taos +COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/ +COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] +``` + +如果已经有 taosKeeper 可执行文件,在配置好 `taoskeeper.toml` 后你可以使用如下方式构建: + +```dockerfile +FROM ubuntu:18.04 +RUN mkdir -p /etc/taos +COPY ./taoskeeper /usr/bin/ +COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] +``` + +## 使用(**企业版**) + +### Prometheus (by scrape) + +taosKeeper 可以像 `node-exporter` 一样向 Prometheus 提供监控指标。\ +在 `/etc/prometheus/prometheus.yml` 添加配置: + +```yml +global: + scrape_interval: 5s + +scrape_configs: + - job_name: "taoskeeper" + static_configs: + - targets: ["taoskeeper:6043"] +``` + +现在使用 PromQL 查询即可以显示结果,比如要查看指定主机(通过 FQDN 正则匹配表达式筛选)硬盘使用百分比: + +```promql +taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"} +``` + +你可以使用 `docker-compose` 测试完整的链路。 +`docker-compose.yml`示例: + +```yml +version: "3.7" + +services: + tdengine: + image: tdengine/tdengine + environment: + TAOS_FQDN: tdengine + volumes: + - taosdata:/var/lib/taos + taoskeeper: + build: ./ + depends_on: + - tdengine + environment: + TDENGINE_HOST: tdengine + TDENGINE_PORT: 6041 + volumes: + - ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml + ports: + - 6043:6043 + prometheus: + image: prom/prometheus + volumes: + - ./prometheus/:/etc/prometheus/ + ports: + - 9090:9090 +volumes: + taosdata: +``` + +启动: + +```sh +docker-compose up -d +``` + +现在通过访问 来查询结果。访问[simple dashboard](https://grafana.com/grafana/dashboards/15164) 来查看TaosKeeper + Prometheus + Grafana 监控 TDengine 的快速启动实例。 + +### Telegraf + +如果使用 telegraf 来收集各个指标,仅需要在配置中增加: + +```toml +[[inputs.prometheus]] +## An array of urls to scrape metrics from. +urls = ["http://taoskeeper:6043/metrics"] +``` + +可以通过 `docker-compose` 来测试 + +```sh +docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper +``` + +由于可以在 `telegraf.conf` 设置日志为标准输出: + +```toml +[[outputs.file]] +files = ["stdout"] +``` + +所以你可以通过 `docker-compose logs` 在标准输出中追踪 TDengine 各项指标。 + +```sh +docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf +``` + +### Zabbix + +1. 导入 zabbix 临时文件 `zbx_taos_keeper_templates.xml`。 +2. 使用 `TDengine` 模板来创建主机,修改宏 `{$TAOSKEEPER_HOST}` 和 `{$COLLECTION_INTERVAL}`。 +3. 等待并查看到自动创建的条目。 + +### 常见问题 + +* 启动报错,显示connection refused + + **解析**:taosKeeper 依赖 restful 接口查询数据,请检查 taosAdapter 是否正常运行或 taoskeeper.toml 中 taosAdapter 地址是否正确。 + +* taosKeeper 监控不同 TDengine 显示的检测指标数目不一致? + + **解析**:如果 TDengine 中未创建某项指标,taoskeeper 不能获取对应的检测结果。 + +* 不能接收到 TDengine 的监控日志。 + + **解析**: 修改 `/etc/taos/taos.cfg` 文件并增加如下参数: + + ```cfg + monitor 1 // 启用monitor + monitorInterval 30 // 发送间隔 (s) + monitorFqdn localhost // 接收消息的FQDN,默认为空 + monitorPort 6043 // 接收消息的端口号 + monitorMaxLogs 100 // 每个监控间隔缓存的最大日志数量 + ``` diff --git a/tools/keeper/README.md b/tools/keeper/README.md new file mode 100644 index 0000000000..18e351f160 --- /dev/null +++ b/tools/keeper/README.md @@ -0,0 +1,273 @@ +# TaosKeeper + +TDengine Metrics Exporter for Kinds of Collectors, you can obtain the running status of TDengine by performing several simple configurations. + +This tool uses TDengine RESTful API, so you could just build it without TDengine client. + +## Build + +### Get the source codes + +```sh +git clone https://github.com/taosdata/TDengine +cd TDengine/tools/keeper +``` + +### compile + +```sh +go mod tidy +go build +``` + +## Install + +If you build the tool by your self, just copy the `taoskeeper` binary to your `PATH`. + +```sh +sudo install taoskeeper /usr/bin/ +``` + +## Start + +Before start, you should configure some options like database ip, port or the prefix and others for exported metrics. + +in `/etc/taos/taoskeeper.toml`. + +```toml +# Start with debug middleware for gin +debug = false + +# Listen port, default is 6043 +port = 6043 + +# log level +loglevel = "info" + +# go pool size +gopoolsize = 50000 + +# interval for TDengine metrics +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" + +# list of taosAdapter that need to be monitored +[taosAdapter] +address = ["127.0.0.1:6041"] + +[metrics] +# metrics prefix in metrics names. +prefix = "taos" + +# database for storing metrics data +database = "log" + +# export some tables that are not super table +tables = [] + +[environment] +# Whether running in cgroup. +incgroup = false +``` + +Now you could run the tool: + +```sh +taoskeeper +``` + +If you use `systemd`, copy the `taoskeeper.service` to `/lib/systemd/system/` and start the service. + +```sh +sudo cp taoskeeper.service /lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start taoskeeper +``` + +To start taoskeeper whenever os rebooted, you should enable the systemd service: + +```sh +sudo systemctl enable taoskeeper +``` + +So if use `systemd`, you'd better install it with these lines all-in-one: + +```sh +go mod tidy +go build +sudo install taoskeeper /usr/bin/ +sudo cp taoskeeper.service /lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start taoskeeper +sudo systemctl enable taoskeeper +``` + +## Docker + +Here is an example to show how to build this tool in docker: + +Before building, you should configure `./config/taoskeeper.toml` with proper parameters and edit Dockerfile. Take following as example. + +```dockerfile +FROM golang:1.18.2 as builder + +WORKDIR /usr/src/taoskeeper +COPY ./ /usr/src/taoskeeper/ +ENV GO111MODULE=on \ + GOPROXY=https://goproxy.cn,direct +RUN go mod tidy && go build + +FROM alpine:3 +RUN mkdir -p /etc/taos +COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/ +COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] +``` + +If you already have taosKeeper binary file, you can build this tool like: + +```dockerfile +FROM ubuntu:18.04 +RUN mkdir -p /etc/taos +COPY ./taoskeeper /usr/bin/ +COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] +``` + +## Usage (**Enterprise Edition**) + +### Prometheus (by scrape) + +It's now act as a prometheus exporter like `node-exporter`. + +Here's how to add this in scrape configs of `/etc/prometheus/prometheus.yml`: + +```yml +global: + scrape_interval: 5s + +scrape_configs: + - job_name: "taoskeeper" + static_configs: + - targets: [ "taoskeeper:6043" ] +``` + +Now PromQL query will show the right result, for example, to show disk used percent in an specific host with FQDN regex +match expression: + +```promql +taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"} +``` + +You can use `docker-compose` with the current `docker-compose.yml` to test the whole stack. + +Here is the `docker-compose.yml`: + +```yml +version: "3.7" + +services: + tdengine: + image: tdengine/tdengine + environment: + TAOS_FQDN: tdengine + volumes: + - taosdata:/var/lib/taos + taoskeeper: + build: ./ + depends_on: + - tdengine + environment: + TDENGINE_HOST: tdengine + TDENGINE_PORT: 6041 + volumes: + - ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml + ports: + - 6043:6043 + prometheus: + image: prom/prometheus + volumes: + - ./prometheus/:/etc/prometheus/ + ports: + - 9090:9090 +volumes: + taosdata: + +``` + +Start the stack: + +```sh +docker-compose up -d +``` + +Now you point to (if you have not started a prometheus server by yourself) and query. + +For a quick demo with TaosKeeper + Prometheus + Grafana, we provide +a [simple dashboard](https://grafana.com/grafana/dashboards/15164) to monitor TDengine. + +### Telegraf + +If you are using telegraf to collect metrics, just add inputs like this: + +```toml +[[inputs.prometheus]] + ## An array of urls to scrape metrics from. + urls = ["http://taoskeeper:6043/metrics"] +``` + +You can test it with `docker-compose`: + +```sh +docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper +``` + +Since we have set an stdout file output in `telegraf.conf`: + +```toml +[[outputs.file]] + files = ["stdout"] +``` + +So you can track with TDengine metrics in standard output with `docker-compose logs`: + +```sh +docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf +``` + +### Zabbix + +1. Import the zabbix template file `zbx_taos_keeper_templates.xml`. +2. Use the template `TDengine` to create the host and modify the macros `{$TAOSKEEPER_HOST}` + and `{$COLLECTION_INTERVAL}`. +3. Waiting for monitoring items to be created automatically. + +### FAQ + +* Error occurred: Connection refused, while taosKeeper was starting + + **Answer**: taoskeeper relies on restful interfaces to query data. Check whether the taosAdapter is running or whether + the taosAdapter address in taoskeeper.toml is correct. + +* Why detection metrics displayed by different TDengine's inconsistent with taoskeeper monitoring? + + **Answer**: If a metric is not created in TDengine, taoskeeper cannot get the corresponding test results. + +* Cannot receive log from TDengine server. + + **Answer**: Modify `/etc/taos/taos.cfg` file and add parameters like: + + ```cfg + monitor 1 // start monitor + monitorInterval 30 // send log interval (s) + monitorFqdn localhost + monitorPort 6043 // taosKeeper port + monitorMaxLogs 100 + ``` diff --git a/tools/keeper/api/adapter2.go b/tools/keeper/api/adapter2.go new file mode 100644 index 0000000000..645b9a176b --- /dev/null +++ b/tools/keeper/api/adapter2.go @@ -0,0 +1,260 @@ +package api + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var adapterLog = log.GetLogger("ADP") + +type adapterReqType int + +const ( + rest adapterReqType = iota // 0 - rest + ws // 1 - ws +) + +type Adapter struct { + username string + password string + host string + port int + usessl bool + conn *db.Connector + db string + dbOptions map[string]interface{} +} + +func NewAdapter(c *config.Config) *Adapter { + return &Adapter{ + username: c.TDengine.Username, + password: c.TDengine.Password, + host: c.TDengine.Host, + port: c.TDengine.Port, + usessl: c.TDengine.Usessl, + db: c.Metrics.Database.Name, + dbOptions: c.Metrics.Database.Options, + } +} + +func (a *Adapter) Init(c gin.IRouter) error { + if err := a.createDatabase(); err != nil { + return fmt.Errorf("create database error:%s", err) + } + if err := a.initConnect(); err != nil { + return fmt.Errorf("init db connect error:%s", err) + } + if err := a.createTable(); err != nil { + return fmt.Errorf("create table error:%s", err) + } + c.POST("/adapter_report", a.handleFunc()) + return nil +} + +func (a *Adapter) handleFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + adapterLog := adapterLog.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if a.conn == nil { + adapterLog.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + adapterLog.Errorf("get adapter report data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get adapter report data error. %s", err)}) + return + } + if adapterLog.Logger.IsLevelEnabled(logrus.TraceLevel) { + adapterLog.Tracef("received adapter report data:%s", string(data)) + } + + var report AdapterReport + if err = json.Unmarshal(data, &report); err != nil { + adapterLog.Errorf("parse adapter report data error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse adapter report data error: %s", err)}) + return + } + sql := a.parseSql(report) + adapterLog.Debugf("adapter report sql:%s", sql) + + if _, err = a.conn.Exec(context.Background(), sql, qid); err != nil { + adapterLog.Errorf("adapter report error, msg:%s", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + c.JSON(http.StatusOK, gin.H{}) + } +} + +func (a *Adapter) initConnect() error { + conn, err := db.NewConnectorWithDb(a.username, a.password, a.host, a.port, a.db, a.usessl) + if err != nil { + adapterLog.Dup().Errorf("init db connect error, msg:%s", err) + return err + } + a.conn = conn + return nil +} + +func (a *Adapter) parseSql(report AdapterReport) string { + // reqType: 0: rest, 1: websocket + restTbName := a.tableName(report.Endpoint, rest) + wsTbName := a.tableName(report.Endpoint, ws) + ts := time.Unix(report.Timestamp, 0).Format(time.RFC3339) + metric := report.Metric + return fmt.Sprintf("insert into %s using adapter_requests tags ('%s', %d) "+ + "values('%s', %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d) "+ + "%s using adapter_requests tags ('%s', %d) "+ + "values('%s', %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)", + restTbName, report.Endpoint, rest, ts, metric.RestTotal, metric.RestQuery, metric.RestWrite, metric.RestOther, + metric.RestInProcess, metric.RestSuccess, metric.RestFail, metric.RestQuerySuccess, metric.RestQueryFail, + metric.RestWriteSuccess, metric.RestWriteFail, metric.RestOtherSuccess, metric.RestOtherFail, + metric.RestQueryInProcess, metric.RestWriteInProcess, + wsTbName, report.Endpoint, ws, ts, metric.WSTotal, + metric.WSQuery, metric.WSWrite, metric.WSOther, metric.WSInProcess, metric.WSSuccess, metric.WSFail, + metric.WSQuerySuccess, metric.WSQueryFail, metric.WSWriteSuccess, metric.WSWriteFail, metric.WSOtherSuccess, + metric.WSOtherFail, metric.WSQueryInProcess, metric.WSWriteInProcess) +} + +func (a *Adapter) tableName(endpoint string, reqType adapterReqType) string { + var tbname string + if reqType == rest { + tbname = fmt.Sprintf("adapter_req_%s_%s", endpoint, "rest") + } else { + tbname = fmt.Sprintf("adapter_req_%s_%s", endpoint, "ws") + } + + if len(tbname) <= util.MAX_TABLE_NAME_LEN { + return util.ToValidTableName(tbname) + } else { + sum := md5.Sum([]byte(fmt.Sprintf("%s%d", endpoint, reqType))) + return fmt.Sprintf("adapter_req_%s", hex.EncodeToString(sum[:])) + } +} + +func (a *Adapter) createDatabase() error { + qid := util.GetQidOwn() + + adapterLog := adapterLog.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + conn, err := db.NewConnector(a.username, a.password, a.host, a.port, a.usessl) + if err != nil { + return fmt.Errorf("connect to database error, msg:%s", err) + } + defer func() { _ = conn.Close() }() + sql := a.createDBSql() + adapterLog.Infof("create database, sql:%s", sql) + _, err = conn.Exec(context.Background(), sql, util.GetQidOwn()) + if err != nil { + adapterLog.Errorf("create database error, msg:%s", err) + return err + } + + return err +} + +func (a *Adapter) createDBSql() string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("create database if not exists %s ", a.db)) + + for k, v := range a.dbOptions { + buf.WriteString(k) + switch v := v.(type) { + case string: + buf.WriteString(fmt.Sprintf(" '%s'", v)) + default: + buf.WriteString(fmt.Sprintf(" %v", v)) + } + buf.WriteString(" ") + } + + return buf.String() +} + +var adapterTableSql = "create stable if not exists `adapter_requests` (" + + "`ts` timestamp, " + + "`total` int unsigned, " + + "`query` int unsigned, " + + "`write` int unsigned, " + + "`other` int unsigned, " + + "`in_process` int unsigned, " + + "`success` int unsigned, " + + "`fail` int unsigned, " + + "`query_success` int unsigned, " + + "`query_fail` int unsigned, " + + "`write_success` int unsigned, " + + "`write_fail` int unsigned, " + + "`other_success` int unsigned, " + + "`other_fail` int unsigned, " + + "`query_in_process` int unsigned, " + + "`write_in_process` int unsigned ) " + + "tags (`endpoint` varchar(32), `req_type` tinyint unsigned )" + +func (a *Adapter) createTable() error { + if a.conn == nil { + return errNoConnection + } + _, err := a.conn.Exec(context.Background(), adapterTableSql, util.GetQidOwn()) + return err +} + +type AdapterReport struct { + Timestamp int64 `json:"ts"` + Metric AdapterMetrics `json:"metrics"` + Endpoint string `json:"endpoint"` +} + +type AdapterMetrics struct { + RestTotal int `json:"rest_total"` + RestQuery int `json:"rest_query"` + RestWrite int `json:"rest_write"` + RestOther int `json:"rest_other"` + RestInProcess int `json:"rest_in_process"` + RestSuccess int `json:"rest_success"` + RestFail int `json:"rest_fail"` + RestQuerySuccess int `json:"rest_query_success"` + RestQueryFail int `json:"rest_query_fail"` + RestWriteSuccess int `json:"rest_write_success"` + RestWriteFail int `json:"rest_write_fail"` + RestOtherSuccess int `json:"rest_other_success"` + RestOtherFail int `json:"rest_other_fail"` + RestQueryInProcess int `json:"rest_query_in_process"` + RestWriteInProcess int `json:"rest_write_in_process"` + WSTotal int `json:"ws_total"` + WSQuery int `json:"ws_query"` + WSWrite int `json:"ws_write"` + WSOther int `json:"ws_other"` + WSInProcess int `json:"ws_in_process"` + WSSuccess int `json:"ws_success"` + WSFail int `json:"ws_fail"` + WSQuerySuccess int `json:"ws_query_success"` + WSQueryFail int `json:"ws_query_fail"` + WSWriteSuccess int `json:"ws_write_success"` + WSWriteFail int `json:"ws_write_fail"` + WSOtherSuccess int `json:"ws_other_success"` + WSOtherFail int `json:"ws_other_fail"` + WSQueryInProcess int `json:"ws_query_in_process"` + WSWriteInProcess int `json:"ws_write_in_process"` +} diff --git a/tools/keeper/api/adapter2_test.go b/tools/keeper/api/adapter2_test.go new file mode 100644 index 0000000000..e6fd263c43 --- /dev/null +++ b/tools/keeper/api/adapter2_test.go @@ -0,0 +1,98 @@ +package api + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/util" +) + +func TestAdapter2(t *testing.T) { + c := &config.Config{ + InstanceID: 64, + Port: 6043, + TDengine: config.TDengineRestful{ + Host: "127.0.0.1", + Port: 6041, + Username: "root", + Password: "taosdata", + Usessl: false, + }, + Metrics: config.MetricsConfig{ + Database: config.Database{ + Name: "adapter_report_test", + Options: map[string]interface{}{}, + }, + }, + } + a := NewAdapter(c) + err := a.Init(router) + assert.NoError(t, err) + + w := httptest.NewRecorder() + body := strings.NewReader(" {\"ts\": 1696928323, \"metrics\": {\"rest_total\": 10, \"rest_query\": 2, " + + "\"rest_write\": 5, \"rest_other\": 3, \"rest_in_process\": 1, \"rest_fail\": 5, \"rest_success\": 3, " + + "\"rest_query_success\": 1, \"rest_query_fail\": 2, \"rest_write_success\": 2, \"rest_write_fail\": 3, " + + "\"rest_other_success\": 1, \"rest_other_fail\": 2, \"rest_query_in_process\": 1, \"rest_write_in_process\": 2, " + + "\"ws_total\": 10, \"ws_query\": 2, \"ws_write\": 3, \"ws_other\": 5, \"ws_in_process\": 1, \"ws_success\": 3, " + + "\"ws_fail\": 3, \"ws_query_success\": 1, \"ws_query_fail\": 1, \"ws_write_success\": 2, \"ws_write_fail\": 2, " + + "\"ws_other_success\": 1, \"ws_other_fail\": 2, \"ws_query_in_process\": 1, \"ws_write_in_process\": 2 }, " + + "\"endpoint\": \"adapter-1:6041\"}") + req, _ := http.NewRequest(http.MethodPost, "/adapter_report", body) + req.Header.Set("X-QID", "0x1234567890ABCD00") + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + conn, err := db.NewConnectorWithDb(c.TDengine.Username, c.TDengine.Password, c.TDengine.Host, c.TDengine.Port, c.Metrics.Database.Name, c.TDengine.Usessl) + defer func() { + _, _ = conn.Query(context.Background(), "drop database if exists adapter_report_test", util.GetQidOwn()) + }() + + assert.NoError(t, err) + data, err := conn.Query(context.Background(), "select * from adapter_report_test.adapter_requests where req_type=0", util.GetQidOwn()) + + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, uint32(10), data.Data[0][1]) + assert.Equal(t, uint32(2), data.Data[0][2]) + assert.Equal(t, uint32(5), data.Data[0][3]) + assert.Equal(t, uint32(3), data.Data[0][4]) + assert.Equal(t, uint32(1), data.Data[0][5]) + assert.Equal(t, uint32(3), data.Data[0][6]) + assert.Equal(t, uint32(5), data.Data[0][7]) + assert.Equal(t, uint32(1), data.Data[0][8]) + assert.Equal(t, uint32(2), data.Data[0][9]) + assert.Equal(t, uint32(2), data.Data[0][10]) + assert.Equal(t, uint32(3), data.Data[0][11]) + assert.Equal(t, uint32(1), data.Data[0][12]) + assert.Equal(t, uint32(2), data.Data[0][13]) + assert.Equal(t, uint32(1), data.Data[0][14]) + assert.Equal(t, uint32(2), data.Data[0][15]) + + data, err = conn.Query(context.Background(), "select * from adapter_report_test.adapter_requests where req_type=1", util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, uint32(10), data.Data[0][1]) + assert.Equal(t, uint32(2), data.Data[0][2]) + assert.Equal(t, uint32(3), data.Data[0][3]) + assert.Equal(t, uint32(5), data.Data[0][4]) + assert.Equal(t, uint32(1), data.Data[0][5]) + assert.Equal(t, uint32(3), data.Data[0][6]) + assert.Equal(t, uint32(3), data.Data[0][7]) + assert.Equal(t, uint32(1), data.Data[0][8]) + assert.Equal(t, uint32(1), data.Data[0][9]) + assert.Equal(t, uint32(2), data.Data[0][10]) + assert.Equal(t, uint32(2), data.Data[0][11]) + assert.Equal(t, uint32(1), data.Data[0][12]) + assert.Equal(t, uint32(2), data.Data[0][13]) + assert.Equal(t, uint32(1), data.Data[0][14]) + assert.Equal(t, uint32(2), data.Data[0][15]) + + conn.Exec(context.Background(), "drop database "+c.Metrics.Database.Name, util.GetQidOwn()) +} diff --git a/tools/keeper/api/audit.go b/tools/keeper/api/audit.go new file mode 100644 index 0000000000..fd9fc4f667 --- /dev/null +++ b/tools/keeper/api/audit.go @@ -0,0 +1,336 @@ +package api + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var auditLogger = log.GetLogger("AUD") + +const MAX_DETAIL_LEN = 50000 + +type Audit struct { + username string + password string + host string + port int + usessl bool + conn *db.Connector + db string + dbOptions map[string]interface{} +} + +type AuditInfo struct { + Timestamp string `json:"timestamp"` + ClusterID string `json:"cluster_id"` + User string `json:"user"` + Operation string `json:"operation"` + Db string `json:"db"` + Resource string `json:"resource"` + ClientAdd string `json:"client_add"` // client address + Details string `json:"details"` +} + +type AuditArrayInfo struct { + Records []AuditInfo `json:"records"` +} + +type AuditInfoOld struct { + Timestamp int64 `json:"timestamp"` + ClusterID string `json:"cluster_id"` + User string `json:"user"` + Operation string `json:"operation"` + Db string `json:"db"` + Resource string `json:"resource"` + ClientAdd string `json:"client_add"` // client address + Details string `json:"details"` +} + +func NewAudit(c *config.Config) (*Audit, error) { + a := Audit{ + username: c.TDengine.Username, + password: c.TDengine.Password, + host: c.TDengine.Host, + port: c.TDengine.Port, + usessl: c.TDengine.Usessl, + db: c.Audit.Database.Name, + dbOptions: c.Audit.Database.Options, + } + if a.db == "" { + a.db = "audit" + } + return &a, nil +} + +func (a *Audit) Init(c gin.IRouter) error { + if err := a.createDatabase(); err != nil { + return fmt.Errorf("create database error, msg:%s", err) + } + if err := a.initConnect(); err != nil { + return fmt.Errorf("init db connect error, msg:%s", err) + } + if err := a.createSTables(); err != nil { + return fmt.Errorf("create stable error, msg:%s", err) + } + c.POST("/audit", a.handleFunc()) + c.POST("/audit_v2", a.handleFunc()) + c.POST("/audit-batch", a.handleBatchFunc()) + return nil +} + +func (a *Audit) handleBatchFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + auditLogger := auditLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if a.conn == nil { + auditLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + auditLogger.Errorf("get audit data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get audit data error. %s", err)}) + return + } + + if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) { + auditLogger.Tracef("receive audit request, data:%s", string(data)) + } + var auditArray AuditArrayInfo + + if err := json.Unmarshal(data, &auditArray); err != nil { + auditLogger.Errorf("parse audit data error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)}) + return + } + + if len(auditArray.Records) == 0 { + if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) { + auditLogger.Trace("handle request successfully (no records)") + } + c.JSON(http.StatusOK, gin.H{}) + return + } + + err = handleBatchRecord(auditArray.Records, a.conn, qid) + + if err != nil { + auditLogger.Errorf("process records error, error:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("process records error. %s", err)}) + return + } + + c.JSON(http.StatusOK, gin.H{}) + } +} + +func (a *Audit) handleFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + auditLogger := auditLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if a.conn == nil { + auditLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + auditLogger.Errorf("get audit data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get audit data error. %s", err)}) + return + } + if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) { + auditLogger.Tracef("receive audit request, data:%s", string(data)) + } + sql := "" + + isStrTime, _ := regexp.MatchString(`"timestamp"\s*:\s*"[^"]*"`, string(data)) + if isStrTime { + var audit AuditInfo + if err := json.Unmarshal(data, &audit); err != nil { + auditLogger.Errorf("parse audit data error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)}) + return + } + + sql = parseSql(audit) + } else { + var audit AuditInfoOld + if err := json.Unmarshal(data, &audit); err != nil { + auditLogger.Errorf("parse old audit error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)}) + return + } + + sql = parseSqlOld(audit) + } + + if _, err = a.conn.Exec(context.Background(), sql, qid); err != nil { + auditLogger.Errorf("save audit data error, sql:%s, error:%s", sql, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("save audit data error: %s", err)}) + return + } + c.JSON(http.StatusOK, gin.H{}) + } +} + +func handleDetails(details string) string { + if strings.Contains(details, "'") { + details = strings.ReplaceAll(details, "'", "\\'") + } + if strings.Contains(details, "\"") { + details = strings.ReplaceAll(details, "\"", "\\\"") + } + if len(details) > MAX_DETAIL_LEN { + details = details[:MAX_DETAIL_LEN] + } + return details +} + +func parseSql(audit AuditInfo) string { + details := handleDetails(audit.Details) + + return fmt.Sprintf( + "insert into %s using operations tags ('%s') values (%s, '%s', '%s', '%s', '%s', '%s', '%s') ", + getTableName(audit), audit.ClusterID, audit.Timestamp, audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details) +} + +func parseSqlOld(audit AuditInfoOld) string { + details := handleDetails(audit.Details) + + return fmt.Sprintf( + "insert into %s using operations tags ('%s') values (%s, '%s', '%s', '%s', '%s', '%s', '%s') ", + getTableNameOld(audit), audit.ClusterID, strconv.FormatInt(audit.Timestamp, 10)+"000000", audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details) +} + +func handleBatchRecord(auditArray []AuditInfo, conn *db.Connector, qid uint64) error { + var builder strings.Builder + var head = fmt.Sprintf( + "insert into %s using operations tags ('%s') values", + getTableName(auditArray[0]), auditArray[0].ClusterID) + + builder.WriteString(head) + var qid_counter uint8 = 0 + for _, audit := range auditArray { + + details := handleDetails(audit.Details) + valuesStr := fmt.Sprintf( + "(%s, '%s', '%s', '%s', '%s', '%s', '%s') ", + audit.Timestamp, audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details) + + if (builder.Len() + len(valuesStr)) > MAX_SQL_LEN { + sql := builder.String() + if _, err := conn.Exec(context.Background(), sql, qid|uint64((qid_counter%255))); err != nil { + return err + } + builder.Reset() + builder.WriteString(head) + } + builder.WriteString(valuesStr) + qid_counter++ + } + + if builder.Len() > len(head) { + sql := builder.String() + if _, err := conn.Exec(context.Background(), sql, qid|uint64((qid_counter%255))); err != nil { + return err + } + } + + return nil +} + +func getTableName(audit AuditInfo) string { + return fmt.Sprintf("t_operations_%s", audit.ClusterID) +} + +func getTableNameOld(audit AuditInfoOld) string { + return fmt.Sprintf("t_operations_%s", audit.ClusterID) +} + +func (a *Audit) initConnect() error { + conn, err := db.NewConnectorWithDb(a.username, a.password, a.host, a.port, a.db, a.usessl) + if err != nil { + auditLogger.Errorf("init db connect error, msg:%s", err) + return err + } + a.conn = conn + return nil +} + +func (a *Audit) createDatabase() error { + conn, err := db.NewConnector(a.username, a.password, a.host, a.port, a.usessl) + if err != nil { + return fmt.Errorf("connect to database error, msg:%s", err) + } + defer func() { _ = conn.Close() }() + sql := a.createDBSql() + auditLogger.Infof("create database, sql:%s", sql) + _, err = conn.Exec(context.Background(), sql, util.GetQidOwn()) + if err != nil { + auditLogger.Errorf("create database error, msg:%s", err) + return err + } + return err +} + +var errNoConnection = errors.New("no connection") + +func (a *Audit) createDBSql() string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("create database if not exists %s precision 'ns' ", a.db)) + + for k, v := range a.dbOptions { + buf.WriteString(k) + switch v := v.(type) { + case string: + buf.WriteString(fmt.Sprintf(" '%s'", v)) + default: + buf.WriteString(fmt.Sprintf(" %v", v)) + } + buf.WriteString(" ") + } + + return buf.String() +} + +func (a *Audit) createSTables() error { + var createTableSql = "create stable if not exists operations " + + "(ts timestamp, user_name varchar(25), operation varchar(20), db varchar(65), resource varchar(193), client_address varchar(25), details varchar(50000)) " + + "tags (cluster_id varchar(64))" + + if a.conn == nil { + return errNoConnection + } + _, err := a.conn.Exec(context.Background(), createTableSql, util.GetQidOwn()) + if err != nil { + auditLogger.Errorf("## create stable error, msg:%s", err) + return err + } + return nil +} diff --git a/tools/keeper/api/audit_test.go b/tools/keeper/api/audit_test.go new file mode 100644 index 0000000000..99beae7a54 --- /dev/null +++ b/tools/keeper/api/audit_test.go @@ -0,0 +1,153 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/util" +) + +func TestAudit(t *testing.T) { + cfg := util.GetCfg() + cfg.Audit = config.AuditConfig{ + Database: config.Database{ + Name: "keepter_test_audit", + }, + Enable: true, + } + + a, err := NewAudit(cfg) + assert.NoError(t, err) + err = a.Init(router) + assert.NoError(t, err) + + longDetails := strings.Repeat("0123456789", 5000) + + cases := []struct { + name string + ts int64 + detail string + data string + expect string + }{ + { + name: "1", + ts: 1699839716440000000, + data: `{"timestamp": "1699839716440000000", "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "detail"}`, + expect: "detail", + }, + { + name: "2", + ts: 1699839716441000000, + data: `{"timestamp": "1699839716441000000", "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "` + longDetails + `"}`, + expect: longDetails[:50000], + }, + { + name: "3", + ts: 1699839716442000000, + data: "{\"timestamp\": \"1699839716442000000\", \"cluster_id\": \"cluster_id\", \"user\": \"user\", \"operation\": \"operation\", \"db\":\"dbnameb\", \"resource\":\"resourcenameb\", \"client_add\": \"localhost:30000\", \"details\": \"create database `meter` buffer 32 cachemodel 'none' duration 50d keep 3650d single_stable 0 wal_retention_period 3600 precision 'ms'\"}", + expect: "create database `meter` buffer 32 cachemodel 'none' duration 50d keep 3650d single_stable 0 wal_retention_period 3600 precision 'ms'", + }, + } + + cases2 := []struct { + name string + ts int64 + detail string + data string + expect string + }{ + { + name: "1", + ts: 1699839716445000000, + data: `{"timestamp":1699839716445, "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "details"}`, + expect: "details", + }, + } + conn, err := db.NewConnectorWithDb(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.Audit.Database.Name, cfg.TDengine.Usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Audit.Database.Name), util.GetQidOwn()) + }() + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(c.data) + req, _ := http.NewRequest(http.MethodPost, "/audit_v2", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, c.expect, data.Data[0][1]) + }) + } + + for _, c := range cases2 { + t.Run(c.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(c.data) + req, _ := http.NewRequest(http.MethodPost, "/audit", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, c.expect, data.Data[0][1]) + }) + } + + for _, c := range cases2 { + t.Run(c.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(c.data) + req, _ := http.NewRequest(http.MethodPost, "/audit", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, c.expect, data.Data[0][1]) + }) + } + + MAX_SQL_LEN = 300 + // test audit batch + input := `{"records":[{"timestamp":"1702548856940013848","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45166","db":"test","resource":"","details":"d630302"},{"timestamp":"1702548856939746458","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45230","db":"test","resource":"","details":"d130277"},{"timestamp":"1702548856939586665","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50288","db":"test","resource":"","details":"d5268"},{"timestamp":"1702548856939528940","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50222","db":"test","resource":"","details":"d255282"},{"timestamp":"1702548856939336371","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45126","db":"test","resource":"","details":"d755297"},{"timestamp":"1702548856939075131","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45122","db":"test","resource":"","details":"d380325"},{"timestamp":"1702548856938640661","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45152","db":"test","resource":"","details":"d255281"},{"timestamp":"1702548856938505795","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45122","db":"test","resource":"","details":"d130276"},{"timestamp":"1702548856938363319","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45178","db":"test","resource":"","details":"d755296"},{"timestamp":"1702548856938201478","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45166","db":"test","resource":"","details":"d380324"},{"timestamp":"1702548856937740618","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50288","db":"test","resource":"","details":"d5266"}]}` + + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Audit.Database.Name), util.GetQidOwn()) + }() + + t.Run("testbatch", func(t *testing.T) { + //test empty array + w1 := httptest.NewRecorder() + body1 := strings.NewReader(`{"records": []}`) + + req1, _ := http.NewRequest(http.MethodPost, "/audit-batch", body1) + router.ServeHTTP(w1, req1) + assert.Equal(t, 200, w1.Code) + + //test 2 items array + w := httptest.NewRecorder() + body := strings.NewReader(input) + req, _ := http.NewRequest(http.MethodPost, "/audit-batch", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), "select ts, details from "+cfg.Audit.Database.Name+".operations where cluster_id='8468922059162439502'", util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 11, len(data.Data)) + }) +} diff --git a/tools/keeper/api/checkhealth.go b/tools/keeper/api/checkhealth.go new file mode 100644 index 0000000000..c5d5a2d24a --- /dev/null +++ b/tools/keeper/api/checkhealth.go @@ -0,0 +1,21 @@ +package api + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +func NewCheckHealth(version string) *CheckHealth { + return &CheckHealth{version: version} +} + +type CheckHealth struct { + version string +} + +func (h *CheckHealth) Init(c gin.IRouter) { + c.GET("check_health", func(context *gin.Context) { + context.JSON(http.StatusOK, map[string]string{"version": h.version}) + }) +} diff --git a/tools/keeper/api/common.go b/tools/keeper/api/common.go new file mode 100644 index 0000000000..d02a30eb8b --- /dev/null +++ b/tools/keeper/api/common.go @@ -0,0 +1,89 @@ +package api + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var commonLogger = log.GetLogger("CMN") + +func CreateDatabase(username string, password string, host string, port int, usessl bool, dbname string, databaseOptions map[string]interface{}) { + qid := util.GetQidOwn() + + commonLogger := commonLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + ctx := context.Background() + + conn, err := db.NewConnector(username, password, host, port, usessl) + if err != nil { + commonLogger.Errorf("connect to adapter error, msg:%s", err) + return + } + + defer closeConn(conn) + + createDBSql := generateCreateDBSql(dbname, databaseOptions) + commonLogger.Warningf("create database sql: %s", createDBSql) + + for i := 0; i < 3; i++ { + if _, err := conn.Exec(ctx, createDBSql, util.GetQidOwn()); err != nil { + commonLogger.Errorf("try %v times: create database %s error, msg:%v", i+1, dbname, err) + time.Sleep(5 * time.Second) + continue + } + return + } + panic(err) +} + +func generateCreateDBSql(dbname string, databaseOptions map[string]interface{}) string { + var buf bytes.Buffer + buf.WriteString("create database if not exists ") + buf.WriteString(dbname) + + for k, v := range databaseOptions { + buf.WriteString(" ") + buf.WriteString(k) + switch v := v.(type) { + case string: + buf.WriteString(fmt.Sprintf(" '%s'", v)) + default: + buf.WriteString(fmt.Sprintf(" %v", v)) + } + buf.WriteString(" ") + } + return buf.String() +} + +func CreatTables(username string, password string, host string, port int, usessl bool, dbname string, createList []string) { + ctx := context.Background() + conn, err := db.NewConnectorWithDb(username, password, host, port, dbname, usessl) + if err != nil { + commonLogger.Errorf("connect to database error, msg:%s", err) + return + } + defer closeConn(conn) + + for _, createSql := range createList { + commonLogger.Infof("execute sql:%s", createSql) + if _, err = conn.Exec(ctx, createSql, util.GetQidOwn()); err != nil { + commonLogger.Errorf("execute sql: %s, error: %s", createSql, err) + } + } +} + +func closeConn(conn *db.Connector) { + if err := conn.Close(); err != nil { + commonLogger.Errorf("close connection error, msg:%s", err) + } +} diff --git a/tools/keeper/api/exporter_test.go b/tools/keeper/api/exporter_test.go new file mode 100644 index 0000000000..f9ef6b169a --- /dev/null +++ b/tools/keeper/api/exporter_test.go @@ -0,0 +1,297 @@ +package api + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/gin-gonic/gin" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/cmd" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/process" + "github.com/taosdata/taoskeeper/util" +) + +var router *gin.Engine +var conf *config.Config +var dbName = "exporter_test" + +func TestMain(m *testing.M) { + conf = config.InitConfig() + log.ConfigLog() + + conf.Metrics.Database.Name = dbName + conn, err := db.NewConnector(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl) + if err != nil { + panic(err) + } + defer conn.Close() + ctx := context.Background() + conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn()) + + if _, err = conn.Exec(ctx, fmt.Sprintf("create database if not exists %s", dbName), util.GetQidOwn()); err != nil { + logger.Errorf("execute sql: %s, error: %s", fmt.Sprintf("create database %s", dbName), err) + } + gin.SetMode(gin.ReleaseMode) + router = gin.New() + reporter := NewReporter(conf) + reporter.Init(router) + + var createList = []string{ + CreateClusterInfoSql, + CreateDnodeSql, + CreateMnodeSql, + CreateDnodeInfoSql, + CreateDataDirSql, + CreateLogDirSql, + CreateTempDirSql, + CreateVgroupsInfoSql, + CreateVnodeRoleSql, + CreateSummarySql, + CreateGrantInfoSql, + CreateKeeperSql, + } + CreatTables(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl, conf.Metrics.Database.Name, createList) + + processor := process.NewProcessor(conf) + node := NewNodeExporter(processor) + node.Init(router) + m.Run() + if _, err = conn.Exec(ctx, fmt.Sprintf("drop database if exists %s", dbName), util.GetQidOwn()); err != nil { + logger.Errorf("execute sql: %s, error: %s", fmt.Sprintf("drop database %s", dbName), err) + } +} + +func TestGetMetrics(t *testing.T) { + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodGet, "/metrics", nil) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) +} + +var now = time.Now() +var nowStr = now.Format(time.RFC3339Nano) + +var report = Report{ + Ts: nowStr, + DnodeID: 1, + DnodeEp: "localhost:7100", + ClusterID: "6980428120398645172", + Protocol: 1, + ClusterInfo: &ClusterInfo{ + FirstEp: "localhost:7100", + FirstEpDnodeID: 1, + Version: "3.0.0.0", + MasterUptime: 2.3090276954462752e-05, + MonitorInterval: 1, + VgroupsTotal: 2, + VgroupsAlive: 2, + VnodesTotal: 2, + VnodesAlive: 2, + ConnectionsTotal: 1, + Dnodes: []Dnode{ + { + DnodeID: 1, + DnodeEp: "localhost:7100", + Status: "ready", + }, + }, + Mnodes: []Mnode{ + { + MnodeID: 1, + MnodeEp: "localhost:7100", + Role: "master", + }, + }, + }, + VgroupInfos: []VgroupInfo{ + { + VgroupID: 1, + DatabaseName: "test", + TablesNum: 1, + Status: "ready", + Vnodes: []Vnode{ + { + DnodeID: 1, + VnodeRole: "LEADER", + }, + { + DnodeID: 2, + VnodeRole: "FOLLOWER", + }, + }, + }, + }, + GrantInfo: &GrantInfo{ + ExpireTime: 2147483647, + TimeseriesUsed: 800, + TimeseriesTotal: 2147483647, + }, + DnodeInfo: DnodeInfo{ + Uptime: 0.000291412026854232, + CPUEngine: 0.0828500414250207, + CPUSystem: 0.4971002485501243, + CPUCores: 12, + MemEngine: 9268, + MemSystem: 54279816, + MemTotal: 65654816, + DiskEngine: 0, + DiskUsed: 39889702912, + DiskTotal: 210304475136, + NetIn: 4727.45292368682, + NetOut: 2194.251734390486, + IoRead: 3789.8909811694753, + IoWrite: 12311.19920713578, + IoReadDisk: 0, + IoWriteDisk: 12178.394449950447, + ReqSelect: 2, + ReqSelectRate: 0, + ReqInsert: 6, + ReqInsertSuccess: 4, + ReqInsertRate: 0, + ReqInsertBatch: 10, + ReqInsertBatchSuccess: 8, + ReqInsertBatchRate: 0, + Errors: 2, + VnodesNum: 2, + Masters: 2, + HasMnode: 1, + HasQnode: 1, + HasSnode: 1, + HasBnode: 1, + }, + DiskInfos: DiskInfo{ + Datadir: []DataDir{ + { + Name: "/root/TDengine/sim/dnode1/data", + Level: 0, + Avail: decimal.NewFromInt(171049893888), + Used: decimal.NewFromInt(39254581248), + Total: decimal.NewFromInt(210304475136), + }, + { + Name: "/root/TDengine/sim/dnode2/data", + Level: 1, + Avail: decimal.NewFromInt(171049893888), + Used: decimal.NewFromInt(39254581248), + Total: decimal.NewFromInt(210304475136), + }, + }, + Logdir: LogDir{ + Name: "/root/TDengine/sim/dnode1/log", + Avail: decimal.NewFromInt(171049771008), + Used: decimal.NewFromInt(39254704128), + Total: decimal.NewFromInt(210304475136), + }, + Tempdir: TempDir{ + Name: "/tmp", + Avail: decimal.NewFromInt(171049771008), + Used: decimal.NewFromInt(39254704128), + Total: decimal.NewFromInt(210304475136), + }, + }, + LogInfos: LogInfo{ + Summary: []Summary{ + { + Level: "error", + Total: 0, + }, { + Level: "info", + Total: 114, + }, { + Level: "debug", + Total: 117, + }, { + Level: "trace", + Total: 126, + }, + }, + }, +} + +func TestPutMetrics(t *testing.T) { + w := httptest.NewRecorder() + b, _ := json.Marshal(report) + body := strings.NewReader(string(b)) + req, _ := http.NewRequest(http.MethodPost, "/report", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, + conf.TDengine.Port, dbName, conf.TDengine.Usessl) + if err != nil { + logger.Errorf("connect to database error, msg:%s", err) + return + } + + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn()) + }() + + ctx := context.Background() + data, err := conn.Query(ctx, "select info from log_summary", util.GetQidOwn()) + if err != nil { + logger.Errorf("execute sql:%s, error:%s", "select * from log_summary", err) + t.Fatal(err) + } + for _, info := range data.Data { + assert.Equal(t, int32(114), info[0]) + } + + var tenMinutesBefore = now.Add(-10 * time.Minute) + var tenMinutesBeforeStr = tenMinutesBefore.Format(time.RFC3339Nano) + + conf.FromTime = tenMinutesBeforeStr + conf.Transfer = "old_taosd_metric" + + var cmd = cmd.NewCommand(conf) + cmd.Process(conf) + + type TableInfo struct { + TsName string + RowNum int + } + + tables := map[string]*TableInfo{ + "taosd_cluster_basic": {"ts", 1}, + "taosd_cluster_info": {"_ts", 1}, + "taosd_vgroups_info": {"_ts", 1}, + "taosd_dnodes_info": {"_ts", 1}, + "taosd_dnodes_status": {"_ts", 1}, + "taosd_dnodes_data_dirs": {"_ts", 1}, + "taosd_dnodes_log_dirs": {"_ts", 2}, + "taosd_mnodes_info": {"_ts", 1}, + "taosd_vnodes_info": {"_ts", 1}, + } + + for table, tableInfo := range tables { + data, err = conn.Query(ctx, fmt.Sprintf("select %s from %s", tableInfo.TsName, table), util.GetQidOwn()) + if err != nil { + logger.Errorf("execute sql:%s, error:%s", "select * from "+table, err) + t.Fatal(err) + } + + assert.Equal(t, tableInfo.RowNum, len(data.Data)) + assert.Equal(t, now.UnixMilli(), data.Data[0][0].(time.Time).UnixMilli()) + } + + conf.Transfer = "" + conf.Drop = "old_taosd_metric_stables" + cmd.Process(conf) + + data, err = conn.Query(ctx, "select * from information_schema.ins_stables where stable_name = 'm_info'", util.GetQidOwn()) + if err != nil { + logger.Errorf("execute sql:%s, error:%s", "m_info is not droped", err) + t.Fatal(err) + } + assert.Equal(t, 0, len(data.Data)) + logger.Infof("ALL OK !!!") +} diff --git a/tools/keeper/api/gen_metric.go b/tools/keeper/api/gen_metric.go new file mode 100644 index 0000000000..5534fe453d --- /dev/null +++ b/tools/keeper/api/gen_metric.go @@ -0,0 +1,770 @@ +package api + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "regexp" + + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var re = regexp.MustCompile("'+") +var gmLogger = log.GetLogger("GEN") + +var MAX_SQL_LEN = 1000000 + +var STABLE_NAME_KEY = "priv_stn" + +type ColumnSeq struct { + tagNames []string + metricNames []string +} + +var ( + mu sync.RWMutex + gColumnSeqMap = make(map[string]ColumnSeq) +) + +type GeneralMetric struct { + client *http.Client + conn *db.Connector + username string + password string + host string + port int + usessl bool + database string + url *url.URL +} + +type Tag struct { + Name string `json:"name"` + Value string `json:"value"` +} + +type Metric struct { + Name string `json:"name"` + Value float64 `json:"value"` +} + +type MetricGroup struct { + Tags []Tag `json:"tags"` + Metrics []Metric `json:"metrics"` +} + +type StableInfo struct { + Name string `json:"name"` + MetricGroups []MetricGroup `json:"metric_groups"` +} + +type StableArrayInfo struct { + Ts string `json:"ts"` + Protocol int `json:"protocol"` + Tables []StableInfo `json:"tables"` +} + +type ClusterBasic struct { + ClusterId string `json:"cluster_id"` + Ts string `json:"ts"` + FirstEp string `json:"first_ep"` + FirstEpDnodeId int32 `json:"first_ep_dnode_id"` + ClusterVersion string `json:"cluster_version"` +} + +type SlowSqlDetailInfo struct { + StartTs string `json:"start_ts"` + RequestId string `json:"request_id"` + QueryTime int32 `json:"query_time"` + Code int32 `json:"code"` + ErrorInfo string `json:"error_info"` + Type int8 `json:"type"` + RowsNum int64 `json:"rows_num"` + Sql string `json:"sql"` + ProcessName string `json:"process_name"` + ProcessId string `json:"process_id"` + Db string `json:"db"` + User string `json:"user"` + Ip string `json:"ip"` + ClusterId string `json:"cluster_id"` +} + +func (gm *GeneralMetric) Init(c gin.IRouter) error { + c.POST("/general-metric", gm.handleFunc()) + c.POST("/taosd-cluster-basic", gm.handleTaosdClusterBasic()) + c.POST("/slow-sql-detail-batch", gm.handleSlowSqlDetailBatch()) + + conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl) + if err != nil { + gmLogger.Errorf("init db connect error, msg:%s", err) + return err + } + gm.conn = conn + + err = gm.createSTables() + if err != nil { + gmLogger.Errorf("create stable error, msg:%s", err) + return err + } + + err = gm.initColumnSeqMap() + if err != nil { + gmLogger.Errorf("init gColumnSeqMap error, msg:%s", err) + return err + } + + return err +} + +func NewGeneralMetric(conf *config.Config) *GeneralMetric { + + client := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableCompression: true, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + + var protocol string + if conf.TDengine.Usessl { + protocol = "https" + } else { + protocol = "http" + } + + imp := &GeneralMetric{ + client: client, + username: conf.TDengine.Username, + password: conf.TDengine.Password, + host: conf.TDengine.Host, + port: conf.TDengine.Port, + usessl: conf.TDengine.Usessl, + database: conf.Metrics.Database.Name, + url: &url.URL{ + Scheme: protocol, + Host: fmt.Sprintf("%s:%d", conf.TDengine.Host, conf.TDengine.Port), + Path: "/influxdb/v1/write", + RawQuery: fmt.Sprintf("db=%s&precision=ms&table_name_key=%s", conf.Metrics.Database.Name, STABLE_NAME_KEY), + }, + } + return imp +} + +func (gm *GeneralMetric) handleFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + gmLogger := gmLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if gm.client == nil { + gmLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + gmLogger.Errorf("get general metric data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get general metric data error. %s", err)}) + return + } + + var request []StableArrayInfo + + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + gmLogger.Tracef("data:%s", string(data)) + } + + if err := json.Unmarshal(data, &request); err != nil { + gmLogger.Errorf("parse general metric data error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse general metric data error: %s", err)}) + return + } + + if len(request) == 0 { + c.JSON(http.StatusOK, gin.H{}) + return + } + + err = gm.handleBatchMetrics(request, qid) + + if err != nil { + gmLogger.Errorf("process records error. msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("process records error. %s", err)}) + return + } + + c.JSON(http.StatusOK, gin.H{}) + } +} + +func (gm *GeneralMetric) handleBatchMetrics(request []StableArrayInfo, qid uint64) error { + var buf bytes.Buffer + + for _, stableArrayInfo := range request { + if stableArrayInfo.Ts == "" { + gmLogger.Error("ts data is empty") + continue + } + + for _, table := range stableArrayInfo.Tables { + if table.Name == "" { + gmLogger.Error("stable name is empty") + continue + } + + table.Name = strings.ToLower(table.Name) + if _, ok := Load(table.Name); !ok { + Init(table.Name) + } + + for _, metricGroup := range table.MetricGroups { + buf.WriteString(table.Name) + writeTags(metricGroup.Tags, table.Name, &buf) + buf.WriteString(" ") + writeMetrics(metricGroup.Metrics, table.Name, &buf) + buf.WriteString(" ") + buf.WriteString(stableArrayInfo.Ts) + buf.WriteString("\n") + } + } + } + + if buf.Len() > 0 { + return gm.lineWriteBody(&buf, qid) + } + return nil +} + +func (gm *GeneralMetric) lineWriteBody(buf *bytes.Buffer, qid uint64) error { + gmLogger := gmLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + header := map[string][]string{ + "Connection": {"keep-alive"}, + } + req_data := buf.String() + + //build new URL,add qid to URL + urlWithQid := *gm.url + query := urlWithQid.Query() + query.Set("qid", fmt.Sprintf("%d", qid)) + urlWithQid.RawQuery = query.Encode() + + req := &http.Request{ + Method: http.MethodPost, + URL: &urlWithQid, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: header, + Host: gm.url.Host, + } + req.SetBasicAuth(gm.username, gm.password) + + req.Body = io.NopCloser(buf) + + startTime := time.Now() + resp, err := gm.client.Do(req) + + endTime := time.Now() + latency := endTime.Sub(startTime) + + if err != nil { + gmLogger.Errorf("latency:%v, req_data:%v, url:%s, resp:%d, err:%s", latency, req_data, urlWithQid.String(), resp.StatusCode, err) + return err + } + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + gmLogger.Tracef("latency:%v, req_data:%v, url:%s, resp:%d", latency, req_data, urlWithQid.String(), resp.StatusCode) + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusNoContent { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unexpected status code %d:body:%s", resp.StatusCode, string(body)) + } + return nil +} + +func (gm *GeneralMetric) handleTaosdClusterBasic() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + gmLogger := gmLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if gm.conn == nil { + gmLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + gmLogger.Errorf("get taosd cluster basic data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get general metric data error. %s", err)}) + return + } + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + gmLogger.Tracef("receive taosd cluster basic data:%s", string(data)) + } + + var request ClusterBasic + + if err := json.Unmarshal(data, &request); err != nil { + gmLogger.Errorf("parse general metric data error, data:%s, msg:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse general metric data error: %s", err)}) + return + } + + sql := fmt.Sprintf( + "insert into %s.taosd_cluster_basic_%s using taosd_cluster_basic tags ('%s') values (%s, '%s', %d, '%s') ", + gm.database, request.ClusterId, request.ClusterId, request.Ts, request.FirstEp, request.FirstEpDnodeId, request.ClusterVersion) + + if _, err = gm.conn.Exec(context.Background(), sql, qid); err != nil { + gmLogger.Errorf("insert taosd_cluster_basic error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taosd_cluster_basic error. %s", err)}) + return + } + c.JSON(http.StatusOK, gin.H{}) + } +} + +func processString(input string) string { + // remove number in the beginning + re := regexp.MustCompile(`^\d+`) + input = re.ReplaceAllString(input, "") + + // replage "." to "_" + input = strings.ReplaceAll(input, ".", "_") + + // remove special characters + re = regexp.MustCompile(`[^a-zA-Z0-9_]`) + input = re.ReplaceAllString(input, "") + + return input +} + +func (gm *GeneralMetric) handleSlowSqlDetailBatch() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + gmLogger := gmLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if gm.conn == nil { + gmLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + gmLogger.Errorf("get taos slow sql detail data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get taos slow sql detail data error. %s", err)}) + return + } + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + gmLogger.Tracef("receive taos slow sql detail data:%s", string(data)) + } + + var request []SlowSqlDetailInfo + + if err := json.Unmarshal(data, &request); err != nil { + gmLogger.Errorf("parse taos slow sql detail error, msg:%s", string(data)) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse taos slow sql detail error: %s", err)}) + return + } + + var sql_head = "INSERT INTO `taos_slow_sql_detail` (tbname, `db`, `user`, `ip`, `cluster_id`, `start_ts`, `request_id`, `query_time`, `code`, `error_info`, `type`, `rows_num`, `sql`, `process_name`, `process_id`) values " + var buf bytes.Buffer + buf.WriteString(sql_head) + var qid_counter uint8 = 0 + for _, slowSqlDetailInfo := range request { + if slowSqlDetailInfo.StartTs == "" { + gmLogger.Error("start_ts data is empty") + continue + } + + // cut string to max len + slowSqlDetailInfo.Sql = re.ReplaceAllString(slowSqlDetailInfo.Sql, "'") // 将匹配到的部分替换为一个单引号 + slowSqlDetailInfo.Sql = strings.ReplaceAll(slowSqlDetailInfo.Sql, "'", "''") + slowSqlDetailInfo.Sql = util.SafeSubstring(slowSqlDetailInfo.Sql, 16384) + slowSqlDetailInfo.ClusterId = util.SafeSubstring(slowSqlDetailInfo.ClusterId, 32) + slowSqlDetailInfo.Db = util.SafeSubstring(slowSqlDetailInfo.Db, 1024) + if slowSqlDetailInfo.Db == "" { + slowSqlDetailInfo.Db = "unknown" + } + slowSqlDetailInfo.User = util.SafeSubstring(slowSqlDetailInfo.User, 32) + slowSqlDetailInfo.Ip = util.SafeSubstring(slowSqlDetailInfo.Ip, 32) + slowSqlDetailInfo.ProcessName = util.SafeSubstring(slowSqlDetailInfo.ProcessName, 32) + slowSqlDetailInfo.ProcessId = util.SafeSubstring(slowSqlDetailInfo.ProcessId, 32) + slowSqlDetailInfo.ErrorInfo = util.SafeSubstring(slowSqlDetailInfo.ErrorInfo, 128) + + // max len 192 + var sub_table_name = slowSqlDetailInfo.User + "_" + util.SafeSubstring(slowSqlDetailInfo.Db, 80) + "_" + slowSqlDetailInfo.Ip + "_clusterId_" + slowSqlDetailInfo.ClusterId + sub_table_name = strings.ToLower(processString(sub_table_name)) + + var sql = fmt.Sprintf( + "('%s', '%s', '%s', '%s', '%s', %s, %s, %d, %d, '%s', %d, %d, '%s', '%s', '%s') ", + sub_table_name, + slowSqlDetailInfo.Db, slowSqlDetailInfo.User, slowSqlDetailInfo.Ip, slowSqlDetailInfo.ClusterId, slowSqlDetailInfo.StartTs, slowSqlDetailInfo.RequestId, + slowSqlDetailInfo.QueryTime, slowSqlDetailInfo.Code, slowSqlDetailInfo.ErrorInfo, slowSqlDetailInfo.Type, slowSqlDetailInfo.RowsNum, slowSqlDetailInfo.Sql, + slowSqlDetailInfo.ProcessName, slowSqlDetailInfo.ProcessId) + if (buf.Len() + len(sql)) < MAX_SQL_LEN { + buf.WriteString(sql) + } else { + if _, err = gm.conn.Exec(context.Background(), buf.String(), qid|uint64((qid_counter%255))); err != nil { + gmLogger.Errorf("insert taos_slow_sql_detail error, sql:%s, error:%s", buf.String(), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taos_slow_sql_detail error. %s", err)}) + return + } + buf.Reset() + buf.WriteString(sql_head) + buf.WriteString(sql) + qid_counter++ + } + } + + if buf.Len() > len(sql_head) { + if _, err = gm.conn.Exec(context.Background(), buf.String(), qid|uint64((qid_counter%255))); err != nil { + gmLogger.Errorf("insert taos_slow_sql_detail error, data:%s, msg:%s", buf.String(), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taos_slow_sql_detail error. %s", err)}) + return + } + } + c.JSON(http.StatusOK, gin.H{}) + } +} + +func writeTags(tags []Tag, stbName string, buf *bytes.Buffer) { + var nameArray []string + if columnSeq, ok := Load(stbName); ok { + if len(columnSeq.tagNames) < len(tags) { + // add column, only schema change will hit here + for _, tag := range tags { + if !contains(columnSeq.tagNames, tag.Name) { + columnSeq.tagNames = append(columnSeq.tagNames, tag.Name) + } + } + Store(stbName, columnSeq) + } + nameArray = columnSeq.tagNames + } + + // 将 Tag 切片转换为 map + tagMap := make(map[string]string) + for _, tag := range tags { + tagMap[tag.Name] = tag.Value + } + + for _, name := range nameArray { + if value, ok := tagMap[name]; ok { + if value != "" { + buf.WriteString(fmt.Sprintf(",%s=%s", name, util.EscapeInfluxProtocol(value))) + } else { + buf.WriteString(fmt.Sprintf(",%s=%s", name, "unknown")) + gmLogger.Errorf("tag value is empty, tag name:%s", name) + } + } else { + buf.WriteString(fmt.Sprintf(",%s=%s", name, "unknown")) + } + } + + // have sub table name + if _, ok := tagMap[STABLE_NAME_KEY]; ok { + return + } + + subTableName := get_sub_table_name_valid(stbName, tagMap) + if subTableName != "" { + buf.WriteString(fmt.Sprintf(",%s=%s", STABLE_NAME_KEY, subTableName)) + } else { + gmLogger.Errorf("get sub stable name error, stable name:%s, tag map:%v", stbName, tagMap) + } +} + +func checkKeysExist(data map[string]string, keys ...string) bool { + for _, key := range keys { + _, ok := data[key] + if !ok { + return false + } + } + return true +} + +func get_sub_table_name_valid(stbName string, tagMap map[string]string) string { + stbName = get_sub_table_name(stbName, tagMap) + return util.ToValidTableName(stbName) +} + +func get_sub_table_name(stbName string, tagMap map[string]string) string { + if strings.HasPrefix(stbName, "taosx") { + switch stbName { + case "taosx_sys": + if checkKeysExist(tagMap, "taosx_id") { + return fmt.Sprintf("sys_%s", tagMap["taosx_id"]) + } + case "taosx_agent": + if checkKeysExist(tagMap, "taosx_id", "agent_id") { + return fmt.Sprintf("agent_%s_%s", tagMap["taosx_id"], tagMap["agent_id"]) + } + case "taosx_connector": + if checkKeysExist(tagMap, "taosx_id", "ds_name", "task_id") { + return fmt.Sprintf("connector_%s_%s_%s", tagMap["taosx_id"], tagMap["ds_name"], tagMap["task_id"]) + } + default: + if strings.HasPrefix(stbName, "taosx_task_") { + ds_name := stbName[len("taosx_task_"):] + if checkKeysExist(tagMap, "taosx_id", "task_id") { + return fmt.Sprintf("task_%s_%s_%s", tagMap["taosx_id"], ds_name, tagMap["task_id"]) + } + } + return "" + } + } + + switch stbName { + case "taosd_cluster_info": + if checkKeysExist(tagMap, "cluster_id") { + return fmt.Sprintf("cluster_%s", tagMap["cluster_id"]) + } + case "taosd_vgroups_info": + if checkKeysExist(tagMap, "cluster_id", "vgroup_id", "database_name") { + return fmt.Sprintf("vginfo_%s_vgroup_%s_cluster_%s", tagMap["database_name"], tagMap["vgroup_id"], tagMap["cluster_id"]) + } + case "taosd_dnodes_info": + if checkKeysExist(tagMap, "cluster_id", "dnode_id") { + return fmt.Sprintf("dinfo_%s_cluster_%s", tagMap["dnode_id"], tagMap["cluster_id"]) + } + case "taosd_dnodes_status": + if checkKeysExist(tagMap, "cluster_id", "dnode_id") { + return fmt.Sprintf("dstatus_%s_cluster_%s", tagMap["dnode_id"], tagMap["cluster_id"]) + } + case "taosd_dnodes_log_dirs": + if checkKeysExist(tagMap, "cluster_id", "dnode_id", "data_dir_name") { + subTableName := fmt.Sprintf("dlog_%s_%s_cluster_%s", tagMap["dnode_id"], tagMap["data_dir_name"], tagMap["cluster_id"]) + if len(subTableName) <= util.MAX_TABLE_NAME_LEN { + return subTableName + } + return fmt.Sprintf("dlog_%s_%s_cluster_%s", tagMap["dnode_id"], + util.GetMd5HexStr(tagMap["data_dir_name"]), + tagMap["cluster_id"]) + } + case "taosd_dnodes_data_dirs": + if checkKeysExist(tagMap, "cluster_id", "dnode_id", "data_dir_name", "data_dir_level") { + subTableName := fmt.Sprintf("ddata_%s_%s_level_%s_cluster_%s", tagMap["dnode_id"], tagMap["data_dir_name"], tagMap["data_dir_level"], tagMap["cluster_id"]) + if len(subTableName) <= util.MAX_TABLE_NAME_LEN { + return subTableName + } + return fmt.Sprintf("ddata_%s_%s_level_%s_cluster_%s", tagMap["dnode_id"], + util.GetMd5HexStr(tagMap["data_dir_name"]), + tagMap["data_dir_level"], + tagMap["cluster_id"]) + } + case "taosd_mnodes_info": + if checkKeysExist(tagMap, "cluster_id", "mnode_id") { + return fmt.Sprintf("minfo_%s_cluster_%s", tagMap["mnode_id"], tagMap["cluster_id"]) + } + case "taosd_vnodes_info": + if checkKeysExist(tagMap, "cluster_id", "database_name", "vgroup_id", "dnode_id") { + return fmt.Sprintf("vninfo_%s_dnode_%s_vgroup_%s_cluster_%s", tagMap["database_name"], tagMap["dnode_id"], tagMap["vgroup_id"], tagMap["cluster_id"]) + } + case "taosd_sql_req": + if checkKeysExist(tagMap, "username", "sql_type", "result", "dnode_id", "vgroup_id", "cluster_id") { + return fmt.Sprintf("taosdsql_%s_%s_%s_%s_vgroup_%s_cluster_%s", tagMap["username"], + tagMap["sql_type"], tagMap["result"], tagMap["dnode_id"], tagMap["vgroup_id"], tagMap["cluster_id"]) + } + case "taos_sql_req": + if checkKeysExist(tagMap, "username", "sql_type", "result", "cluster_id") { + return fmt.Sprintf("taossql_%s_%s_%s_cluster_%s", tagMap["username"], + tagMap["sql_type"], tagMap["result"], tagMap["cluster_id"]) + } + case "taos_slow_sql": + if checkKeysExist(tagMap, "username", "duration", "result", "cluster_id") { + return fmt.Sprintf("slowsql_%s_%s_%s_cluster_%s", tagMap["username"], + tagMap["duration"], tagMap["result"], tagMap["cluster_id"]) + } + + default: + return "" + } + return "" +} + +func contains(array []string, item string) bool { + for _, value := range array { + if value == item { + return true + } + } + return false +} + +func writeMetrics(metrics []Metric, stbName string, buf *bytes.Buffer) { + var nameArray []string + if columnSeq, ok := Load(stbName); ok { + if len(columnSeq.metricNames) < len(metrics) { + // add column, only schema change will hit here + for _, metric := range metrics { + if !contains(columnSeq.metricNames, metric.Name) { + columnSeq.metricNames = append(columnSeq.metricNames, metric.Name) + } + } + Store(stbName, columnSeq) + } + nameArray = columnSeq.metricNames + } + + // 将 Metric 切片转换为 map + metricMap := make(map[string]float64) + for _, metric := range metrics { + metricMap[metric.Name] = metric.Value + } + + for i, name := range nameArray { + if value, ok := metricMap[name]; ok { + buf.WriteString(fmt.Sprintf("%s=%sf64", name, strconv.FormatFloat(value, 'f', -1, 64))) + if i != len(nameArray)-1 { + buf.WriteString(",") + } + } + } +} + +// 存储数据 +func Store(key string, value ColumnSeq) { + mu.Lock() + defer mu.Unlock() + gColumnSeqMap[key] = value +} + +// 加载数据 +func Load(key string) (ColumnSeq, bool) { + mu.RLock() + defer mu.RUnlock() + value, ok := gColumnSeqMap[key] + return value, ok +} + +// 初始化单表的列序列 +func Init(key string) { + mu.Lock() + defer mu.Unlock() + if _, ok := gColumnSeqMap[key]; !ok { + columnSeq := ColumnSeq{ + tagNames: []string{}, + metricNames: []string{}, + } + gColumnSeqMap[key] = columnSeq + } +} + +// 初始化所有列序列 +func (gm *GeneralMetric) initColumnSeqMap() error { + query := fmt.Sprintf(` + select stable_name + from information_schema.ins_stables + where db_name = '%s' + and ( + stable_name like 'taos_%%' + or stable_name like 'taosd_%%' + or stable_name like 'taosx_%%' + ) + order by stable_name asc; + `, gm.database) + + data, err := gm.conn.Query(context.Background(), query, util.GetQidOwn()) + + if err != nil { + return err + } + + //get all stables, then init gColumnSeqMap + for _, row := range data.Data { + stableName := row[0].(string) + Init(stableName) + } + //set gColumnSeqMap with desc stables + for tableName, columnSeq := range gColumnSeqMap { + data, err := gm.conn.Query(context.Background(), fmt.Sprintf(`desc %s.%s;`, gm.database, tableName), util.GetQidOwn()) + + if err != nil { + return err + } + + if len(data.Data) < 1 || len(data.Data[0]) < 4 { + return fmt.Errorf("desc %s.%s error", gm.database, tableName) + } + + for i, row := range data.Data { + if i == 0 { + continue + } + + if row[3].(string) == "TAG" { + columnSeq.tagNames = append(columnSeq.tagNames, row[0].(string)) + } else { + columnSeq.metricNames = append(columnSeq.metricNames, row[0].(string)) + } + } + Store(tableName, columnSeq) + } + + gmLogger.Infof("gColumnSeqMap:%v", gColumnSeqMap) + return nil +} + +func (gm *GeneralMetric) createSTables() error { + var createTableSql = "create stable if not exists taosd_cluster_basic " + + "(ts timestamp, first_ep varchar(100), first_ep_dnode_id INT, cluster_version varchar(20)) " + + "tags (cluster_id varchar(50))" + + if gm.conn == nil { + return errNoConnection + } + _, err := gm.conn.Exec(context.Background(), createTableSql, util.GetQidOwn()) + if err != nil { + return err + } + + createTableSql = "create stable if not exists taos_slow_sql_detail" + + " (start_ts TIMESTAMP, request_id BIGINT UNSIGNED PRIMARY KEY, query_time INT, code INT, error_info varchar(128), " + + "type TINYINT, rows_num BIGINT, sql varchar(16384), process_name varchar(32), process_id varchar(32)) " + + "tags (db varchar(1024), `user` varchar(32), ip varchar(32), cluster_id varchar(32))" + + _, err = gm.conn.Exec(context.Background(), createTableSql, util.GetQidOwn()) + return err +} diff --git a/tools/keeper/api/gen_metric_test.go b/tools/keeper/api/gen_metric_test.go new file mode 100644 index 0000000000..88987d6544 --- /dev/null +++ b/tools/keeper/api/gen_metric_test.go @@ -0,0 +1,358 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/util" +) + +var router_inited bool = false + +func TestClusterBasic(t *testing.T) { + cfg := util.GetCfg() + + CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options) + + gm := NewGeneralMetric(cfg) + if !router_inited { + err := gm.Init(router) + assert.NoError(t, err) + router_inited = true + } + + testcfg := struct { + name string + ts int64 + tbname string + data string + expect string + }{ + name: "1", + tbname: "taosd_cluster_basic", + ts: 1705655770381, + data: `{"ts":"1705655770381","cluster_id":"7648966395564416484","protocol":2,"first_ep":"ssfood06:6130","first_ep_dnode_id":1,"cluster_version":"3.2.1.0.alp"}`, + expect: "7648966395564416484", + } + + conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn()) + }() + + t.Run(testcfg.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(testcfg.data) + req, _ := http.NewRequest(http.MethodPost, "/taosd-cluster-basic", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, cluster_id from %s.%s where ts=%d", gm.database, testcfg.tbname, testcfg.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, testcfg.expect, data.Data[0][1]) + }) + + testcfg = struct { + name string + ts int64 + tbname string + data string + expect string + }{ + name: "1", + tbname: "taos_slow_sql_detail", + ts: 1703226836762, + data: `[{ + "start_ts": "1703226836762", + "request_id": "1", + "query_time": 100, + "code": 0, + "error_info": "", + "type": 1, + "rows_num": 5, + "sql": "select * from abc;", + "process_name": "abc", + "process_id": "123", + "db": "dbname", + "user": "root", + "ip": "127.0.0.1", + "cluster_id": "1234567" + }, + { + "start_ts": "1703226836763", + "request_id": "2", + "query_time": 100, + "code": 0, + "error_info": "", + "type": 1, + "rows_num": 5, + "sql": "insert into abc ('a', 'b') values ('aaa', 'bbb');", + "process_name": "abc", + "process_id": "123", + "db": "dbname", + "user": "root", + "ip": "127.0.0.1", + "cluster_id": "1234567" + }]`, + expect: "1234567", + } + + conn, err = db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn()) + }() + + t.Run(testcfg.name, func(t *testing.T) { + MAX_SQL_LEN = 1000000 + w := httptest.NewRecorder() + body := strings.NewReader(testcfg.data) + req, _ := http.NewRequest(http.MethodPost, "/slow-sql-detail-batch", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select start_ts, cluster_id from %s.%s where start_ts=%d", gm.database, testcfg.tbname, testcfg.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, testcfg.expect, data.Data[0][1]) + }) +} + +func TestGenMetric(t *testing.T) { + cfg := util.GetCfg() + + CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options) + + gm := NewGeneralMetric(cfg) + if !router_inited { + err := gm.Init(router) + assert.NoError(t, err) + router_inited = true + } + + testcfg := struct { + name string + ts []int64 + tbname []string + data string + expect string + }{ + name: "1", + tbname: []string{"taosd_cluster_info", "taosd_dnodes_info"}, + ts: []int64{1703226836761, 1703226836762}, + data: `[{ + "ts": "1703226836761", + "protocol": 2, + "tables": [{ + "name": "taosd_cluster_info", + "metric_groups": [{ + "tags": [{ + "name": "cluster_id", + "value": "1397715317673023180" + }], + "metrics": [{ + "name": "dbs_total", + "value": 1 + }, { + "name": "master_uptime", + "value": 0 + }] + }] + }, { + "name": "taosd_dnodes_info", + "metric_groups": [{ + "tags": [{ + "name": "cluster_id", + "value": "1397715317673023180" + }, { + "name": "dnode_id", + "value": "1" + }, { + "name": "dnode_ep", + "value": "ssfood06:6130" + }], + "metrics": [{ + "name": "uptime", + "value": 0 + }, { + "name": "cpu_engine", + "value": 0 + }] + }] + }] + }, { + "ts": "1703226836762", + "protocol": 2, + "tables": [{ + "name": "taosd_cluster_info", + "metric_groups": [{ + "tags": [{ + "name": "cluster_id", + "value": "1397715317673023180" + }], + "metrics": [{ + "name": "dbs_total", + "value": 1 + }, { + "name": "master_uptime", + "value": 0 + }] + }] + }, { + "name": "taosd_dnodes_info", + "metric_groups": [{ + "tags": [{ + "name": "cluster_id", + "value": "1397715317673023180" + }, { + "name": "dnode_id", + "value": "1" + }, { + "name": "dnode_ep", + "value": ", =\"ssfood06:6130" + }], + "metrics": [{ + "name": "uptime", + "value": 0 + }, { + "name": "cpu_engine", + "value": 0 + }] + }] + }] + }]`, + expect: "1397715317673023180", + } + + conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn()) + }() + + t.Run(testcfg.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(testcfg.data) + req, _ := http.NewRequest(http.MethodPost, "/general-metric", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + for _, tbname := range testcfg.tbname { + for _, ts := range testcfg.ts { + data, err := conn.Query(context.Background(), fmt.Sprintf("select _ts, cluster_id from %s.%s where _ts=%d", gm.database, tbname, ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, testcfg.expect, data.Data[0][1]) + } + } + }) +} +func TestGetSubTableName(t *testing.T) { + tests := []struct { + stbName string + tagMap map[string]string + want string + }{ + { + stbName: "taosx_sys", + tagMap: map[string]string{"taosx_id": "123"}, + want: "sys_123", + }, + { + stbName: "taosx_agent", + tagMap: map[string]string{"taosx_id": "123", "agent_id": "456"}, + want: "agent_123_456", + }, + { + stbName: "taosx_connector", + tagMap: map[string]string{"taosx_id": "123", "ds_name": "ds", "task_id": "789"}, + want: "connector_123_ds_789", + }, + { + stbName: "taosx_task_example", + tagMap: map[string]string{"taosx_id": "123", "task_id": "789"}, + want: "task_123_example_789", + }, + { + stbName: "taosd_cluster_info", + tagMap: map[string]string{"cluster_id": "123"}, + want: "cluster_123", + }, + { + stbName: "taosd_vgroups_info", + tagMap: map[string]string{"cluster_id": "123", "vgroup_id": "456", "database_name": "db"}, + want: "vginfo_db_vgroup_456_cluster_123", + }, + { + stbName: "taosd_dnodes_info", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123"}, + want: "dinfo_123_cluster_123", + }, + { + stbName: "taosd_dnodes_status", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123"}, + want: "dstatus_123_cluster_123", + }, + { + stbName: "taosd_dnodes_log_dirs", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "log"}, + want: "dlog_123_log_cluster_123", + }, + { + stbName: "taosd_dnodes_log_dirs", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "loglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglog"}, + want: "dlog_123_9cdc719961a632a27603cd5ed9f1aee2_cluster_123", + }, + { + stbName: "taosd_dnodes_data_dirs", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "data", "data_dir_level": "5"}, + want: "ddata_123_data_level_5_cluster_123", + }, + { + stbName: "taosd_dnodes_data_dirs", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "datadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadata", "data_dir_level": "5"}, + want: "ddata_123_03bf8dffdf6b97e08f347c6ae795998b_level_5_cluster_123", + }, + { + stbName: "taosd_mnodes_info", + tagMap: map[string]string{"cluster_id": "123", "mnode_id": "12"}, + want: "minfo_12_cluster_123", + }, + { + stbName: "taosd_vnodes_info", + tagMap: map[string]string{"cluster_id": "123", "database_name": "db", "vgroup_id": "456", "dnode_id": "789"}, + want: "vninfo_db_dnode_789_vgroup_456_cluster_123", + }, + { + stbName: "taosd_sql_req", + tagMap: map[string]string{"username": "user", "sql_type": "select", "result": "success", "dnode_id": "123", "vgroup_id": "456", "cluster_id": "123"}, + want: "taosdsql_user_select_success_123_vgroup_456_cluster_123", + }, + { + stbName: "taos_sql_req", + tagMap: map[string]string{"username": "user", "sql_type": "select", "result": "success", "cluster_id": "123"}, + want: "taossql_user_select_success_cluster_123", + }, + { + stbName: "taos_slow_sql", + tagMap: map[string]string{"username": "user", "duration": "100ms", "result": "success", "cluster_id": "123"}, + want: "slowsql_user_100ms_success_cluster_123", + }, + } + + for _, tt := range tests { + t.Run(tt.stbName, func(t *testing.T) { + if got := get_sub_table_name_valid(tt.stbName, tt.tagMap); got != tt.want { + panic(fmt.Sprintf("get_sub_table_name() = %v, want %v", got, tt.want)) + } + }) + } +} diff --git a/tools/keeper/api/https_test.go b/tools/keeper/api/https_test.go new file mode 100644 index 0000000000..c73cbfc2e4 --- /dev/null +++ b/tools/keeper/api/https_test.go @@ -0,0 +1,127 @@ +package api + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + crand "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "log" + "math/big" + "net/http" + "net/http/httputil" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/util" +) + +func TestHttps(t *testing.T) { + server := startProxy() + defer server.Shutdown(context.Background()) + + cfg := util.GetCfg() + cfg.TDengine.Usessl = true + cfg.TDengine.Port = 34443 + + CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options) + + conn, err := db.NewConnectorWithDb(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.Metrics.Database.Name, cfg.TDengine.Usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Metrics.Database.Name), util.GetQidOwn()) + }() + + data, err := conn.Query(context.Background(), "select server_version()", util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) +} + +func generateSelfSignedCert() (tls.Certificate, error) { + priv, err := ecdsa.GenerateKey(elliptic.P384(), crand.Reader) + if err != nil { + return tls.Certificate{}, err + } + + notBefore := time.Now() + notAfter := notBefore.Add(365 * 24 * time.Hour) + + serialNumber, err := crand.Int(crand.Reader, new(big.Int).Lsh(big.NewInt(1), 128)) + if err != nil { + return tls.Certificate{}, err + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Your Company"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(crand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return tls.Certificate{}, err + } + + certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) + keyPEM, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return tls.Certificate{}, err + } + + keyPEMBlock := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: keyPEM}) + + return tls.X509KeyPair(certPEM, keyPEMBlock) +} + +func startProxy() *http.Server { + // Generate self-signed certificate + cert, err := generateSelfSignedCert() + if err != nil { + log.Fatalf("Failed to generate self-signed certificate: %v", err) + } + + target := "http://127.0.0.1:6041" + proxyURL, err := url.Parse(target) + if err != nil { + log.Fatalf("Failed to parse target URL: %v", err) + } + + proxy := httputil.NewSingleHostReverseProxy(proxyURL) + proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, e error) { + http.Error(w, "Proxy error", http.StatusBadGateway) + } + mux := http.NewServeMux() + mux.Handle("/", proxy) + + server := &http.Server{ + Addr: ":34443", + Handler: mux, + TLSConfig: &tls.Config{Certificates: []tls.Certificate{cert}}, + // Setup server timeouts for better handling of idle connections and slowloris attacks + WriteTimeout: 10 * time.Second, + ReadTimeout: 10 * time.Second, + IdleTimeout: 30 * time.Second, + } + + log.Println("Starting server on :34443") + go func() { + err = server.ListenAndServeTLS("", "") + if err != nil && err != http.ErrServerClosed { + log.Fatalf("Failed to start HTTPS server: %v", err) + } + }() + return server +} diff --git a/tools/keeper/api/nodeexporter.go b/tools/keeper/api/nodeexporter.go new file mode 100644 index 0000000000..7b87a14336 --- /dev/null +++ b/tools/keeper/api/nodeexporter.go @@ -0,0 +1,32 @@ +package api + +import ( + "net/http" + + "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/taosdata/taoskeeper/process" +) + +type NodeExporter struct { + processor *process.Processor +} + +func NewNodeExporter(processor *process.Processor) *NodeExporter { + return &NodeExporter{processor: processor} +} + +func (z *NodeExporter) Init(c gin.IRouter) { + reg := prometheus.NewPedanticRegistry() + reg.MustRegister(z.processor) + c.GET("metrics", z.myMiddleware(promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))) +} + +func (z *NodeExporter) myMiddleware(next http.Handler) gin.HandlerFunc { + return func(c *gin.Context) { + z.processor.Process() + // call Prometheus handler + next.ServeHTTP(c.Writer, c.Request) + } +} diff --git a/tools/keeper/api/report.go b/tools/keeper/api/report.go new file mode 100644 index 0000000000..eb9c3856f8 --- /dev/null +++ b/tools/keeper/api/report.go @@ -0,0 +1,478 @@ +package api + +import ( + "bytes" + "context" + "fmt" + "strconv" + "strings" + "sync/atomic" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/go-utils/json" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var logger = log.GetLogger("REP") + +var createList = []string{ + // CreateClusterInfoSql, + // CreateDnodeSql, + // CreateMnodeSql, + // CreateDnodeInfoSql, + // CreateDataDirSql, + // CreateLogDirSql, + // CreateTempDirSql, + // CreateVgroupsInfoSql, + // CreateVnodeRoleSql, + // CreateSummarySql, + // CreateGrantInfoSql, + CreateKeeperSql, +} + +type Reporter struct { + username string + password string + host string + port int + usessl bool + dbname string + databaseOptions map[string]interface{} + totalRep atomic.Value +} + +func NewReporter(conf *config.Config) *Reporter { + r := &Reporter{ + username: conf.TDengine.Username, + password: conf.TDengine.Password, + host: conf.TDengine.Host, + port: conf.TDengine.Port, + usessl: conf.TDengine.Usessl, + dbname: conf.Metrics.Database.Name, + databaseOptions: conf.Metrics.Database.Options, + } + r.totalRep.Store(0) + return r +} + +func (r *Reporter) Init(c gin.IRouter) { + c.POST("report", r.handlerFunc()) + r.createDatabase() + r.creatTables() + // todo: it can delete in the future. + if r.shouldDetectFields() { + r.detectGrantInfoFieldType() + r.detectClusterInfoFieldType() + r.detectVgroupsInfoType() + } +} + +func (r *Reporter) getConn() *db.Connector { + conn, err := db.NewConnector(r.username, r.password, r.host, r.port, r.usessl) + if err != nil { + qid := util.GetQidOwn() + + logger := logger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + logger.Errorf("connect to database error, msg:%s", err) + panic(err) + } + return conn +} + +func (r *Reporter) detectGrantInfoFieldType() { + // `expire_time` `timeseries_used` `timeseries_total` in table `grant_info` changed to bigint from TS-3003. + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + r.detectFieldType(ctx, conn, "grants_info", "expire_time", "bigint") + r.detectFieldType(ctx, conn, "grants_info", "timeseries_used", "bigint") + r.detectFieldType(ctx, conn, "grants_info", "timeseries_total", "bigint") + if r.tagExist(ctx, conn, "grants_info", "dnode_id") { + r.dropTag(ctx, conn, "grants_info", "dnode_id") + } + if r.tagExist(ctx, conn, "grants_info", "dnode_ep") { + r.dropTag(ctx, conn, "grants_info", "dnode_ep") + } +} + +func (r *Reporter) detectClusterInfoFieldType() { + // `tbs_total` in table `cluster_info` changed to bigint from TS-3003. + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + r.detectFieldType(ctx, conn, "cluster_info", "tbs_total", "bigint") + + // add column `topics_total` and `streams_total` from TD-22032 + // if exists, _ := r.columnInfo(ctx, conn, "cluster_info", "topics_total"); !exists { + // logger.Warningf("## %s.cluster_info.topics_total not exists, will add it", r.dbname) + // r.addColumn(ctx, conn, "cluster_info", "topics_total", "int") + // } + // if exists, _ := r.columnInfo(ctx, conn, "cluster_info", "streams_total"); !exists { + // logger.Warningf("## %s.cluster_info.streams_total not exists, will add it", r.dbname) + // r.addColumn(ctx, conn, "cluster_info", "streams_total", "int") + // } +} + +func (r *Reporter) detectVgroupsInfoType() { + // `tables_num` in table `vgroups_info` changed to bigint from TS-3003. + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + r.detectFieldType(ctx, conn, "vgroups_info", "tables_num", "bigint") +} + +func (r *Reporter) detectFieldType(ctx context.Context, conn *db.Connector, table, field, fieldType string) { + _, colType := r.columnInfo(ctx, conn, table, field) + if colType == "INT" { + logger.Warningf("%s.%s.%s type is %s, will change to %s", r.dbname, table, field, colType, fieldType) + // drop column `tables_num` + r.dropColumn(ctx, conn, table, field) + + // add column `tables_num` + r.addColumn(ctx, conn, table, field, fieldType) + } +} + +func (r *Reporter) shouldDetectFields() bool { + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + version, err := r.serverVersion(ctx, conn) + if err != nil { + logger.Errorf("get server version error:%s", err) + return false + } + + // if server version is less than v3.0.3.0, should not detect fields. + versions := strings.Split(version, ".") + if len(versions) < 4 { + logger.Errorf("get server version error. version:%s", version) + return false + } + + v1, _ := strconv.Atoi(versions[0]) + v2, _ := strconv.Atoi(versions[1]) + v3, _ := strconv.Atoi(versions[2]) + + if v1 > 3 || v2 > 0 || v3 >= 3 { + return true + } + + return false +} + +func (r *Reporter) serverVersion(ctx context.Context, conn *db.Connector) (version string, err error) { + res, err := conn.Query(ctx, "select server_version()", util.GetQidOwn()) + if err != nil { + logger.Errorf("get server version error, msg:%s", err) + return + } + + if len(res.Data) == 0 { + logger.Errorf("get server version error. response:%+v", res) + return + } + + if len(res.Data) != 1 && len(res.Data[0]) != 1 { + logger.Errorf("get server version error. response:%+v", res) + return + } + + version = res.Data[0][0].(string) + + return +} + +func (r *Reporter) columnInfo(ctx context.Context, conn *db.Connector, table string, field string) (exists bool, colType string) { + res, err := conn.Query(ctx, fmt.Sprintf("select col_type from information_schema.ins_columns where table_name='%s' and db_name='%s' and col_name='%s'", table, r.dbname, field), util.GetQidOwn()) + if err != nil { + logger.Errorf("get %s field type error, msg:%s", r.dbname, err) + panic(err) + } + + if len(res.Data) == 0 { + return + } + + if len(res.Data) != 1 && len(res.Data[0]) != 1 { + logger.Errorf("get field type for %s error. response:%+v", table, res) + panic(fmt.Sprintf("get field type for %s error. response:%+v", table, res)) + } + + exists = true + colType = res.Data[0][0].(string) + colType = strings.ToUpper(colType) + return +} + +func (r *Reporter) tagExist(ctx context.Context, conn *db.Connector, stable string, tag string) (exists bool) { + res, err := conn.Query(ctx, fmt.Sprintf("select tag_name from information_schema.ins_tags where stable_name='%s' and db_name='%s' and tag_name='%s'", stable, r.dbname, tag), util.GetQidOwn()) + if err != nil { + logger.Errorf("get %s tag_name error, msg:%s", r.dbname, err) + panic(err) + } + + if len(res.Data) == 0 { + exists = false + return + } + + if len(res.Data) != 1 && len(res.Data[0]) != 1 { + logger.Errorf("get tag_name for %s error. response:%+v", stable, res) + panic(fmt.Sprintf("get tag_name for %s error. response:%+v", stable, res)) + } + + exists = true + return +} + +func (r *Reporter) dropColumn(ctx context.Context, conn *db.Connector, table string, field string) { + if _, err := conn.Exec(ctx, fmt.Sprintf("alter table %s.%s drop column %s", r.dbname, table, field), util.GetQidOwn()); err != nil { + logger.Errorf("drop column %s from table %s error, msg:%s", field, table, err) + panic(err) + } +} + +func (r *Reporter) dropTag(ctx context.Context, conn *db.Connector, stable string, tag string) { + if _, err := conn.Exec(ctx, fmt.Sprintf("alter stable %s.%s drop tag %s", r.dbname, stable, tag), util.GetQidOwn()); err != nil { + logger.Errorf("drop tag %s from stable %s error, msg:%s", tag, stable, err) + panic(err) + } +} + +func (r *Reporter) addColumn(ctx context.Context, conn *db.Connector, table string, field string, fieldType string) { + if _, err := conn.Exec(ctx, fmt.Sprintf("alter table %s.%s add column %s %s", r.dbname, table, field, fieldType), util.GetQidOwn()); err != nil { + logger.Errorf("add column %s to table %s error, msg:%s", field, table, err) + panic(err) + } +} + +func (r *Reporter) createDatabase() { + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + createDBSql := r.generateCreateDBSql() + logger.Warningf("create database sql: %s", createDBSql) + + if _, err := conn.Exec(ctx, createDBSql, util.GetQidOwn()); err != nil { + logger.Errorf("create database %s error, msg:%v", r.dbname, err) + panic(err) + } +} + +func (r *Reporter) generateCreateDBSql() string { + var buf bytes.Buffer + buf.WriteString("create database if not exists ") + buf.WriteString(r.dbname) + + for k, v := range r.databaseOptions { + buf.WriteString(" ") + buf.WriteString(k) + switch v := v.(type) { + case string: + buf.WriteString(fmt.Sprintf(" '%s'", v)) + default: + buf.WriteString(fmt.Sprintf(" %v", v)) + } + buf.WriteString(" ") + } + return buf.String() +} + +func (r *Reporter) creatTables() { + ctx := context.Background() + conn, err := db.NewConnectorWithDb(r.username, r.password, r.host, r.port, r.dbname, r.usessl) + if err != nil { + logger.Errorf("connect to database error, msg:%s", err) + return + } + defer r.closeConn(conn) + + for _, createSql := range createList { + logger.Infof("execute sql:%s", createSql) + if _, err = conn.Exec(ctx, createSql, util.GetQidOwn()); err != nil { + logger.Errorf("execute sql:%s, error:%s", createSql, err) + } + } +} + +func (r *Reporter) closeConn(conn *db.Connector) { + if err := conn.Close(); err != nil { + logger.Errorf("close connection error, msg:%s", err) + } +} + +func (r *Reporter) handlerFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + logger := logger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + r.recordTotalRep() + // data parse + data, err := c.GetRawData() + if err != nil { + logger.Errorf("receiving taosd data error, msg:%s", err) + return + } + var report Report + + logger.Tracef("report data:%s", string(data)) + if e := json.Unmarshal(data, &report); e != nil { + logger.Errorf("error occurred while unmarshal request, data:%s, error:%s", data, err) + return + } + var sqls []string + if report.ClusterInfo != nil { + sqls = append(sqls, insertClusterInfoSql(*report.ClusterInfo, report.ClusterID, report.Protocol, report.Ts)...) + } + sqls = append(sqls, insertDnodeSql(report.DnodeInfo, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)) + if report.GrantInfo != nil { + sqls = append(sqls, insertGrantSql(*report.GrantInfo, report.DnodeID, report.ClusterID, report.Ts)) + } + sqls = append(sqls, insertDataDirSql(report.DiskInfos, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)...) + for _, group := range report.VgroupInfos { + sqls = append(sqls, insertVgroupSql(group, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)...) + } + sqls = append(sqls, insertLogSummary(report.LogInfos, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)) + + conn, err := db.NewConnectorWithDb(r.username, r.password, r.host, r.port, r.dbname, r.usessl) + if err != nil { + logger.Errorf("connect to database error, msg:%s", err) + return + } + defer r.closeConn(conn) + ctx := context.Background() + + for _, sql := range sqls { + logger.Tracef("execute sql:%s", sql) + if _, err := conn.Exec(ctx, sql, util.GetQidOwn()); err != nil { + logger.Errorf("execute sql error, sql:%s, error:%s", sql, err) + } + } + } +} + +func (r *Reporter) recordTotalRep() { + old := r.totalRep.Load().(int) + for i := 0; i < 3; i++ { + r.totalRep.CompareAndSwap(old, old+1) + } +} + +func (r *Reporter) GetTotalRep() *atomic.Value { + return &r.totalRep +} + +func insertClusterInfoSql(info ClusterInfo, ClusterID string, protocol int, ts string) []string { + var sqls []string + var dtotal, dalive, mtotal, malive int + for _, dnode := range info.Dnodes { + sqls = append(sqls, fmt.Sprintf("insert into d_info_%s using d_info tags (%d, '%s', '%s') values ('%s', '%s')", + ClusterID+strconv.Itoa(dnode.DnodeID), dnode.DnodeID, dnode.DnodeEp, ClusterID, ts, dnode.Status)) + dtotal++ + if "ready" == dnode.Status { + dalive++ + } + } + + for _, mnode := range info.Mnodes { + sqls = append(sqls, fmt.Sprintf("insert into m_info_%s using m_info tags (%d, '%s', '%s') values ('%s', '%s')", + ClusterID+strconv.Itoa(mnode.MnodeID), mnode.MnodeID, mnode.MnodeEp, ClusterID, ts, mnode.Role)) + mtotal++ + //LEADER FOLLOWER CANDIDATE ERROR + if "ERROR" != mnode.Role { + malive++ + } + } + + sqls = append(sqls, fmt.Sprintf( + "insert into cluster_info_%s using cluster_info tags('%s') (ts, first_ep, first_ep_dnode_id, version, "+ + "master_uptime, monitor_interval, dbs_total, tbs_total, stbs_total, dnodes_total, dnodes_alive, "+ + "mnodes_total, mnodes_alive, vgroups_total, vgroups_alive, vnodes_total, vnodes_alive, connections_total, "+ + "topics_total, streams_total, protocol) values ('%s', '%s', %d, '%s', %f, %d, %d, %d, %d, %d, %d, %d, %d, "+ + "%d, %d, %d, %d, %d, %d, %d, %d)", + ClusterID, ClusterID, ts, info.FirstEp, info.FirstEpDnodeID, info.Version, info.MasterUptime, info.MonitorInterval, + info.DbsTotal, info.TbsTotal, info.StbsTotal, dtotal, dalive, mtotal, malive, info.VgroupsTotal, info.VgroupsAlive, + info.VnodesTotal, info.VnodesAlive, info.ConnectionsTotal, info.TopicsTotal, info.StreamsTotal, protocol)) + return sqls +} + +func insertDnodeSql(info DnodeInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) string { + return fmt.Sprintf("insert into dnode_info_%s using dnodes_info tags (%d, '%s', '%s') values ('%s', %f, %f, %f, %f, %d, %d, %d, %d, %d, %d, %f, %f, %f, %f, %f, %f, %d, %f, %d, %d, %f, %d, %d, %f, %d, %d, %d, %d, %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, + ts, info.Uptime, info.CPUEngine, info.CPUSystem, info.CPUCores, info.MemEngine, info.MemSystem, info.MemTotal, + info.DiskEngine, info.DiskUsed, info.DiskTotal, info.NetIn, info.NetOut, info.IoRead, info.IoWrite, + info.IoReadDisk, info.IoWriteDisk, info.ReqSelect, info.ReqSelectRate, info.ReqInsert, info.ReqInsertSuccess, + info.ReqInsertRate, info.ReqInsertBatch, info.ReqInsertBatchSuccess, info.ReqInsertBatchRate, info.Errors, + info.VnodesNum, info.Masters, info.HasMnode, info.HasQnode, info.HasSnode, info.HasBnode) +} + +func insertDataDirSql(disk DiskInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) []string { + var sqls []string + for _, data := range disk.Datadir { + sqls = append(sqls, + fmt.Sprintf("insert into data_dir_%s using data_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, + ts, data.Name, data.Level, data.Avail.IntPart(), data.Used.IntPart(), data.Total.IntPart()), + ) + } + sqls = append(sqls, + fmt.Sprintf("insert into log_dir_%s using log_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, + ts, disk.Logdir.Name, disk.Logdir.Avail.IntPart(), disk.Logdir.Used.IntPart(), disk.Logdir.Total.IntPart()), + fmt.Sprintf("insert into temp_dir_%s using temp_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, + ts, disk.Tempdir.Name, disk.Tempdir.Avail.IntPart(), disk.Tempdir.Used.IntPart(), disk.Tempdir.Total.IntPart()), + ) + return sqls +} + +func insertVgroupSql(g VgroupInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) []string { + var sqls []string + sqls = append(sqls, fmt.Sprintf("insert into vgroups_info_%s using vgroups_info tags (%d, '%s', '%s') "+ + "(ts, vgroup_id, database_name, tables_num, status, ) values ( '%s','%d', '%s', %d, '%s')", + ClusterID+strconv.Itoa(DnodeID)+strconv.Itoa(g.VgroupID), DnodeID, DnodeEp, ClusterID, + ts, g.VgroupID, g.DatabaseName, g.TablesNum, g.Status)) + for _, v := range g.Vnodes { + sqls = append(sqls, fmt.Sprintf("insert into vnodes_role_%s using vnodes_role tags (%d, '%s', '%s') values ('%s', '%s')", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, ts, v.VnodeRole)) + } + return sqls +} + +func insertLogSummary(log LogInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) string { + var e, info, debug, trace int + for _, s := range log.Summary { + switch s.Level { + case "error": + e = s.Total + case "info": + info = s.Total + case "debug": + debug = s.Total + case "trace": + trace = s.Total + } + } + return fmt.Sprintf("insert into log_summary_%s using log_summary tags (%d, '%s', '%s') values ('%s', %d, %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, ts, e, info, debug, trace) +} + +func insertGrantSql(g GrantInfo, DnodeID int, ClusterID string, ts string) string { + return fmt.Sprintf("insert into grants_info_%s using grants_info tags ('%s') (ts, expire_time, "+ + "timeseries_used, timeseries_total) values ('%s', %d, %d, %d)", ClusterID+strconv.Itoa(DnodeID), ClusterID, ts, g.ExpireTime, g.TimeseriesUsed, g.TimeseriesTotal) +} diff --git a/tools/keeper/api/tables.go b/tools/keeper/api/tables.go new file mode 100644 index 0000000000..90f0e09721 --- /dev/null +++ b/tools/keeper/api/tables.go @@ -0,0 +1,286 @@ +package api + +import ( + "strconv" + + "github.com/shopspring/decimal" +) + +type Report struct { + Ts string `json:"ts"` + DnodeID int `json:"dnode_id"` + DnodeEp string `json:"dnode_ep"` + ClusterID string `json:"cluster_id"` + Protocol int `json:"protocol"` + ClusterInfo *ClusterInfo `json:"cluster_info"` // only reported by master + StbInfos []StbInfo `json:"stb_infos"` + VgroupInfos []VgroupInfo `json:"vgroup_infos"` // only reported by master + GrantInfo *GrantInfo `json:"grant_info"` // only reported by master + DnodeInfo DnodeInfo `json:"dnode_info"` + DiskInfos DiskInfo `json:"disk_infos"` + LogInfos LogInfo `json:"log_infos"` +} + +type ClusterInfo struct { + FirstEp string `json:"first_ep"` + FirstEpDnodeID int `json:"first_ep_dnode_id"` + Version string `json:"version"` + MasterUptime float32 `json:"master_uptime"` + MonitorInterval int `json:"monitor_interval"` + DbsTotal int `json:"dbs_total"` + TbsTotal int64 `json:"tbs_total"` // change to bigint since TS-3003 + StbsTotal int `json:"stbs_total"` + VgroupsTotal int `json:"vgroups_total"` + VgroupsAlive int `json:"vgroups_alive"` + VnodesTotal int `json:"vnodes_total"` + VnodesAlive int `json:"vnodes_alive"` + ConnectionsTotal int `json:"connections_total"` + TopicsTotal int `json:"topics_total"` + StreamsTotal int `json:"streams_total"` + Dnodes []Dnode `json:"dnodes"` + Mnodes []Mnode `json:"mnodes"` +} + +var dnodeEpLen = strconv.Itoa(255) + +var CreateClusterInfoSql = "create table if not exists cluster_info (" + + "ts timestamp, " + + "first_ep binary(134), " + + "first_ep_dnode_id int, " + + "version binary(12), " + + "master_uptime float, " + + "monitor_interval int, " + + "dbs_total int, " + + "tbs_total bigint, " + // change to bigint since TS-3003 + "stbs_total int, " + + "dnodes_total int, " + + "dnodes_alive int, " + + "mnodes_total int, " + + "mnodes_alive int, " + + "vgroups_total int, " + + "vgroups_alive int, " + + "vnodes_total int, " + + "vnodes_alive int, " + + "connections_total int, " + + "topics_total int, " + + "streams_total int, " + + "protocol int " + + ") tags (cluster_id nchar(32))" + +type Dnode struct { + DnodeID int `json:"dnode_id"` + DnodeEp string `json:"dnode_ep"` + Status string `json:"status"` +} + +var CreateDnodeSql = "create table if not exists d_info (" + + "ts timestamp, " + + "status binary(10)" + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type Mnode struct { + MnodeID int `json:"mnode_id"` + MnodeEp string `json:"mnode_ep"` + Role string `json:"role"` +} + +var CreateMnodeSql = "create table if not exists m_info (" + + "ts timestamp, " + + "role binary(10)" + + ") tags (mnode_id int, mnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type DnodeInfo struct { + Uptime float32 `json:"uptime"` + CPUEngine float32 `json:"cpu_engine"` + CPUSystem float32 `json:"cpu_system"` + CPUCores float32 `json:"cpu_cores"` + MemEngine int `json:"mem_engine"` + MemSystem int `json:"mem_system"` + MemTotal int `json:"mem_total"` + DiskEngine int64 `json:"disk_engine"` + DiskUsed int64 `json:"disk_used"` + DiskTotal int64 `json:"disk_total"` + NetIn float32 `json:"net_in"` + NetOut float32 `json:"net_out"` + IoRead float32 `json:"io_read"` + IoWrite float32 `json:"io_write"` + IoReadDisk float32 `json:"io_read_disk"` + IoWriteDisk float32 `json:"io_write_disk"` + ReqSelect int `json:"req_select"` + ReqSelectRate float32 `json:"req_select_rate"` + ReqInsert int `json:"req_insert"` + ReqInsertSuccess int `json:"req_insert_success"` + ReqInsertRate float32 `json:"req_insert_rate"` + ReqInsertBatch int `json:"req_insert_batch"` + ReqInsertBatchSuccess int `json:"req_insert_batch_success"` + ReqInsertBatchRate float32 `json:"req_insert_batch_rate"` + Errors int `json:"errors"` + VnodesNum int `json:"vnodes_num"` + Masters int `json:"masters"` + HasMnode int8 `json:"has_mnode"` + HasQnode int8 `json:"has_qnode"` + HasSnode int8 `json:"has_snode"` + HasBnode int8 `json:"has_bnode"` +} + +var CreateDnodeInfoSql = "create table if not exists dnodes_info (" + + "ts timestamp, " + + "uptime float, " + + "cpu_engine float, " + + "cpu_system float, " + + "cpu_cores float, " + + "mem_engine int, " + + "mem_system int, " + + "mem_total int, " + + "disk_engine bigint, " + + "disk_used bigint, " + + "disk_total bigint, " + + "net_in float, " + + "net_out float, " + + "io_read float, " + + "io_write float, " + + "io_read_disk float, " + + "io_write_disk float, " + + "req_select int, " + + "req_select_rate float, " + + "req_insert int, " + + "req_insert_success int, " + + "req_insert_rate float, " + + "req_insert_batch int, " + + "req_insert_batch_success int, " + + "req_insert_batch_rate float, " + + "errors int, " + + "vnodes_num int, " + + "masters int, " + + "has_mnode int, " + + "has_qnode int, " + + "has_snode int, " + + "has_bnode int " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type DiskInfo struct { + Datadir []DataDir `json:"datadir"` + Logdir LogDir `json:"logdir"` + Tempdir TempDir `json:"tempdir"` +} + +type DataDir struct { + Name string `json:"name"` + Level int `json:"level"` + Avail decimal.Decimal `json:"avail"` + Used decimal.Decimal `json:"used"` + Total decimal.Decimal `json:"total"` +} + +var CreateDataDirSql = "create table if not exists data_dir (" + + "ts timestamp, " + + "name nchar(200), " + + "`level` int, " + + "avail bigint, " + + "used bigint, " + + "total bigint" + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type LogDir struct { + Name string `json:"name"` + Avail decimal.Decimal `json:"avail"` + Used decimal.Decimal `json:"used"` + Total decimal.Decimal `json:"total"` +} + +var CreateLogDirSql = "create table if not exists log_dir (" + + "ts timestamp, " + + "name nchar(200), " + + "avail bigint, " + + "used bigint, " + + "total bigint" + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type TempDir struct { + Name string `json:"name"` + Avail decimal.Decimal `json:"avail"` + Used decimal.Decimal `json:"used"` + Total decimal.Decimal `json:"total"` +} + +var CreateTempDirSql = "create table if not exists temp_dir(" + + "ts timestamp, " + + "name nchar(200), " + + "avail bigint, " + + "used bigint, " + + "total bigint " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type StbInfo struct { + StbName string `json:"stb_name"` + DataBaseName string `json:"database_name"` +} + +type VgroupInfo struct { + VgroupID int `json:"vgroup_id"` + DatabaseName string `json:"database_name"` + TablesNum int64 `json:"tables_num"` + Status string `json:"status"` + Vnodes []Vnode `json:"vnodes"` +} + +var CreateVgroupsInfoSql = "create table if not exists vgroups_info (" + + "ts timestamp, " + + "vgroup_id int, " + + "database_name binary(33), " + + "tables_num bigint, " + // change to bigint since TS-3003 + "status binary(512) " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type Vnode struct { + DnodeID int `json:"dnode_id"` + VnodeRole string `json:"vnode_role"` +} + +var CreateVnodeRoleSql = "create table if not exists vnodes_role (" + + "ts timestamp, " + + "vnode_role binary(10) " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type LogInfo struct { + Summary []Summary `json:"summary"` +} + +type Log struct { + Ts string `json:"ts"` + Level string `json:"level"` + Content string `json:"content"` +} + +type Summary struct { + Level string `json:"level"` + Total int `json:"total"` +} + +var CreateSummarySql = "create table if not exists log_summary(" + + "ts timestamp, " + + "error int, " + + "info int, " + + "debug int, " + + "trace int " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type GrantInfo struct { + ExpireTime int64 `json:"expire_time"` + TimeseriesUsed int64 `json:"timeseries_used"` + TimeseriesTotal int64 `json:"timeseries_total"` +} + +var CreateGrantInfoSql = "create table if not exists grants_info(" + + "ts timestamp, " + + "expire_time bigint, " + + "timeseries_used bigint, " + + "timeseries_total bigint " + + ") tags (cluster_id nchar(32))" + +var CreateKeeperSql = "create table if not exists keeper_monitor (" + + "ts timestamp, " + + "cpu float, " + + "mem float, " + + "total_reports int " + + ") tags (identify nchar(50))" diff --git a/tools/keeper/api/zabbix.go b/tools/keeper/api/zabbix.go new file mode 100644 index 0000000000..8b7cb75992 --- /dev/null +++ b/tools/keeper/api/zabbix.go @@ -0,0 +1,113 @@ +package api + +import ( + "net/http" + "sort" + "strings" + + "github.com/gin-gonic/gin" + "github.com/taosdata/taoskeeper/process" + "github.com/taosdata/taoskeeper/util/pool" +) + +type Zabbix struct { + processor *process.Processor + floatGroup []*process.Metric + strGroup []*process.Metric +} + +func NewZabbix(processor *process.Processor) *Zabbix { + z := &Zabbix{processor: processor} + z.processorMetrics() + return z +} + +type zabbixMetric struct { + Data []*ZMetric `json:"data"` +} + +type ZMetric struct { + Metric string `json:"{#METRIC}"` + Key string `json:"key"` + Value interface{} `json:"value"` +} + +const ( + FloatType = iota + 1 + StringType +) + +func (z *Zabbix) Init(c gin.IRouter) { + api := c.Group("zabbix") + api.GET("float", z.getFloat) + api.GET("string", z.getString) +} + +func (z *Zabbix) getFloat(c *gin.Context) { + z.returnData(c, FloatType) +} + +func (z *Zabbix) getString(c *gin.Context) { + z.returnData(c, StringType) +} + +func (z *Zabbix) returnData(c *gin.Context, valueType int) { + var metrics []*process.Metric + switch valueType { + case FloatType: + metrics = z.floatGroup + case StringType: + metrics = z.strGroup + } + var d zabbixMetric + b := pool.BytesPoolGet() + defer pool.BytesPoolPut(b) + for _, metric := range metrics { + values := metric.GetValue() + for _, value := range values { + label := z.sortLabel(value.Label) + b.Reset() + b.WriteString(metric.FQName) + if len(label) > 0 { + b.WriteByte(',') + b.WriteString(label) + } + metricName := b.String() + d.Data = append(d.Data, &ZMetric{ + Metric: metricName, + Key: metricName, + Value: value.Value, + }) + } + } + c.JSON(http.StatusOK, d) +} + +func (z *Zabbix) sortLabel(labels map[string]string) string { + if len(labels) == 0 { + return "" + } + result := make([]string, 0, len(labels)) + b := pool.BytesPoolGet() + defer pool.BytesPoolPut(b) + for k, v := range labels { + b.Reset() + b.WriteString(k) + b.WriteByte('=') + b.WriteString(v) + result = append(result, b.String()) + } + sort.Strings(result) + return strings.Join(result, "_") +} + +func (z *Zabbix) processorMetrics() { + metrics := z.processor.GetMetric() + for _, metric := range metrics { + if metric.Type == process.Gauge || metric.Type == process.Counter { + z.floatGroup = append(z.floatGroup, metric) + } else if metric.Type == process.Info { + z.strGroup = append(z.strGroup, metric) + } + } +} diff --git a/tools/keeper/ci/changelog-generate.sh b/tools/keeper/ci/changelog-generate.sh new file mode 100755 index 0000000000..516ebc5272 --- /dev/null +++ b/tools/keeper/ci/changelog-generate.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +last=$(git describe --tags --abbrev=0 2>/dev/null) + +if [ "$last" = "" ]; then + git log --pretty=format:'%s' | sort -k2n | uniq >./releaseNotes.tmp +else + git log --pretty=format:'%s' $last..HEAD | sort -k2n | uniq >./releaseNotes.tmp +fi + +function part() { + name=$1 + pattern=$2 + changes=$(grep -P '\[\w+-\d+\]\s*<('$pattern')>:' ./releaseNotes.tmp | sed -E 's/ *<('$pattern')>//' | sed 's/[ci skip]\s*//' | awk -F: '{print "- " $1 ": " $2}' | sort | uniq) + lines=$(printf "\\$changes\n" | wc -l) + # echo $name $pattern $lines >&2 + if [ $lines -gt 0 ]; then + echo "### $name" + echo "" + echo "$changes" + echo "" + fi +} + +part "Features" "feature|feat" +part "Bug Fixes" "bugfix|fix" +part "Enhancements" "enhance" +part "Tests" "test" +part "Documents" "docs|doc" + +rm -f ./releaseNotes.tmp diff --git a/tools/keeper/ci/post-release.sh b/tools/keeper/ci/post-release.sh new file mode 100755 index 0000000000..1b5d4201d6 --- /dev/null +++ b/tools/keeper/ci/post-release.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e +ci=$(realpath $(dirname $0)) +v=$1 +if [ "$v" = "" ]; then + echo "$0 " + exit 1 +fi + +newv=$(awk -F. '/[0-9]+\./{$NF+=1;print}' OFS=. <<<"$v") +tee version/version.go <" + exit 1 +fi + +tee version/version.go <CHANGELOG.md2 +printf "## v$newv - $(date +%F)\n\n" >>CHANGELOG.md2 +$ci/changelog-generate.sh >CHANGELOG.tmp +cat CHANGELOG.tmp >>CHANGELOG.md2 +sed "1,7d" CHANGELOG.md >>CHANGELOG.md2 +mv CHANGELOG.md2 CHANGELOG.md + +git config user.name github-actions +git config user.email github-actions@github.com +git add version/version.go CHANGELOG.md +git commit -m "release: v$newv" +git push + +git tag v$newv +git push origin v$newv:$newv --force diff --git a/tools/keeper/cmd/command.go b/tools/keeper/cmd/command.go new file mode 100644 index 0000000000..82d3efea1f --- /dev/null +++ b/tools/keeper/cmd/command.go @@ -0,0 +1,461 @@ +package cmd + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strconv" + "sync" + "time" + + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" + "github.com/taosdata/taoskeeper/util/pool" +) + +var logger = log.GetLogger("CMD") + +var MAX_SQL_LEN = 1000000 + +type Command struct { + fromTime time.Time + client *http.Client + conn *db.Connector + username string + password string + url *url.URL +} + +func NewCommand(conf *config.Config) *Command { + client := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableCompression: true, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + + conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.Metrics.Database.Name, conf.TDengine.Usessl) + if err != nil { + logger.Errorf("init db connect error, msg:%s", err) + panic(err) + } + + imp := &Command{ + client: client, + conn: conn, + username: conf.TDengine.Username, + password: conf.TDengine.Password, + url: &url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", conf.TDengine.Host, conf.TDengine.Port), + Path: "/influxdb/v1/write", + RawQuery: fmt.Sprintf("db=%s&precision=ms", conf.Metrics.Database.Name), + }, + } + return imp +} + +func (cmd *Command) Process(conf *config.Config) { + if len(conf.Transfer) > 0 && len(conf.Drop) > 0 { + logger.Errorf("transfer and drop can't be set at the same time") + return + } + + if len(conf.Transfer) > 0 && conf.Transfer != "old_taosd_metric" { + logger.Errorf("transfer only support old_taosd_metric") + return + } + + if conf.Transfer == "old_taosd_metric" { + cmd.ProcessTransfer(conf) + return + } + + if len(conf.Drop) > 0 && conf.Drop != "old_taosd_metric_stables" { + logger.Errorf("drop only support old_taosd_metric_stables") + return + } + + if conf.Drop == "old_taosd_metric_stables" { + cmd.ProcessDrop(conf) + return + } +} + +func (cmd *Command) ProcessTransfer(conf *config.Config) { + fromTime, err := time.Parse("2006-01-02T15:04:05Z07:00", conf.FromTime) + if err != nil { + logger.Errorf("parse fromTime error, msg:%s", err) + return + } + cmd.fromTime = fromTime + + funcs := []func() error{ + cmd.TransferTaosdClusterBasicInfo, + cmd.TransferTaosdClusterInfo, + cmd.TransferTaosdVgroupsInfo, + cmd.TransferTaosdDnodesInfo, + cmd.TransferTaosdDnodesStatus, + cmd.TransferTaosdDnodesLogDirs1, + cmd.TransferTaosdDnodesLogDirs2, + cmd.TransferTaosdDnodesDataDirs, + cmd.TransferTaosdMnodesInfo, + cmd.TransferTaosdVnodesInfo, + } + wg := sync.WaitGroup{} + wg.Add(len(funcs)) + + for i := range funcs { + index := i + err := pool.GoroutinePool.Submit(func() { + defer wg.Done() + funcs[index]() + }) + + if err != nil { + panic(err) + } + } + + wg.Wait() + logger.Info("transfer all old taosd metric success!!") +} + +func (cmd *Command) TransferTaosdClusterInfo() error { + sql := "select a.cluster_id, master_uptime * 3600 * 24 as cluster_uptime, dbs_total, tbs_total, stbs_total, dnodes_total, dnodes_alive, mnodes_total, mnodes_alive, vgroups_total, vgroups_alive, vnodes_total, vnodes_alive, connections_total, topics_total, streams_total, b.expire_time as grants_expire_time, b.timeseries_used as grants_timeseries_used, b.timeseries_total as grants_timeseries_total, a.ts from cluster_info a, grants_info b where a.ts = b.ts and a.cluster_id = b.cluster_id and" + dstTable := "taosd_cluster_info" + return cmd.TransferTableToDst(sql, dstTable, 1) +} + +func (cmd *Command) TransferTaosdVgroupsInfo() error { + sql := "select cluster_id, vgroup_id, database_name, tables_num, CASE status WHEN 'ready' THEN 1 ELSE 0 END as status, ts from vgroups_info a where " + dstTable := "taosd_vgroups_info" + return cmd.TransferTableToDst(sql, dstTable, 3) +} + +func (cmd *Command) TransferTaosdDnodesInfo() error { + sql := "select a.cluster_id, a.dnode_id, a.dnode_ep, uptime * 3600 * 24 as uptime, cpu_engine, cpu_system, cpu_cores, mem_engine, mem_system as mem_free, mem_total, disk_used, disk_total, disk_engine, net_in as system_net_in, net_out as system_net_out, io_read, io_write, io_read_disk, io_write_disk, vnodes_num, masters, has_mnode, has_qnode, has_snode, has_bnode, errors, b.error as error_log_count, b.info as info_log_count, b.debug as debug_log_count, b.trace as trace_log_count, a.ts as ts from dnodes_info a, log_summary b where a.ts = b.ts and a.dnode_id = b.dnode_id and a. dnode_ep = b.dnode_ep and " + dstTable := "taosd_dnodes_info" + return cmd.TransferTableToDst(sql, dstTable, 3) +} +func (cmd *Command) TransferTaosdDnodesStatus() error { + sql := "select cluster_id, dnode_id, dnode_ep, CASE status WHEN 'ready' THEN 1 ELSE 0 END as status, ts from d_info a where " + dstTable := "taosd_dnodes_status" + return cmd.TransferTableToDst(sql, dstTable, 3) +} + +func (cmd *Command) TransferTaosdDnodesLogDirs1() error { + sql := "select cluster_id, dnode_id, dnode_ep, name as log_dir_name, avail, used, total, ts from log_dir a where " + dstTable := "taosd_dnodes_log_dirs" + return cmd.TransferTableToDst(sql, dstTable, 4) +} +func (cmd *Command) TransferTaosdDnodesLogDirs2() error { + sql := "select cluster_id, dnode_id, dnode_ep, name as log_dir_name, avail, used, total, ts from temp_dir a where " + dstTable := "taosd_dnodes_log_dirs" + return cmd.TransferTableToDst(sql, dstTable, 4) +} + +func (cmd *Command) TransferTaosdDnodesDataDirs() error { + sql := "select cluster_id, dnode_id, dnode_ep, name as data_dir_name, `level` as data_dir_level, avail, used, total, ts from data_dir a where " + dstTable := "taosd_dnodes_data_dirs" + return cmd.TransferTableToDst(sql, dstTable, 5) +} + +func (cmd *Command) TransferTaosdMnodesInfo() error { + sql := "select cluster_id, mnode_id, mnode_ep, CASE role WHEN 'offline' THEN 0 WHEN 'follower' THEN 100 WHEN 'candidate' THEN 101 WHEN 'leader' THEN 102 WHEN 'learner' THEN 104 ELSE 103 END as role, ts from m_info a where " + dstTable := "taosd_mnodes_info" + return cmd.TransferTableToDst(sql, dstTable, 3) +} + +func (cmd *Command) TransferTaosdVnodesInfo() error { + sql := "select cluster_id, 0 as vgroup_id, 'UNKNOWN' as database_name, dnode_id, CASE vnode_role WHEN 'offline' THEN 0 WHEN 'follower' THEN 100 WHEN 'candidate' THEN 101 WHEN 'leader' THEN 102 WHEN 'learner' THEN 104 ELSE 103 END as role, ts from vnodes_role a where " + dstTable := "taosd_vnodes_info" + return cmd.TransferTableToDst(sql, dstTable, 4) +} + +func (cmd *Command) ProcessDrop(conf *config.Config) { + var dropStableList = []string{ + "log_dir", + "dnodes_info", + "data_dir", + "log_summary", + "m_info", + "vnodes_role", + "cluster_info", + "temp_dir", + "grants_info", + "vgroups_info", + "d_info", + "taosadapter_system_cpu_percent", + "taosadapter_restful_http_request_in_flight", + "taosadapter_restful_http_request_summary_milliseconds", + "taosadapter_restful_http_request_fail", + "taosadapter_system_mem_percent", + "taosadapter_restful_http_request_total", + } + ctx := context.Background() + logger.Infof("use database:%s", conf.Metrics.Database.Name) + + for _, stable := range dropStableList { + if _, err := cmd.conn.Exec(ctx, "DROP STABLE IF EXISTS "+stable, util.GetQidOwn()); err != nil { + logger.Errorf("drop stable %s, error:%s", stable, err) + panic(err) + } + } + logger.Info("drop old taosd metric stables success!!") +} + +func (cmd *Command) TransferDataToDest(data *db.Data, dstTable string, tagNum int) { + + var buf bytes.Buffer + + if len(data.Data) < 1 { + return + } + + for _, row := range data.Data { + // get one row here + buf.WriteString(dstTable) + + // write tags + var tag string + for j := 0; j < tagNum; j++ { + switch v := row[j].(type) { + case int: + tag = fmt.Sprint(v) + case int32: + tag = fmt.Sprint(v) + case int64: + tag = fmt.Sprint(v) + case string: + tag = v + default: + panic(fmt.Sprintf("Unexpected type for row[%d]: %T", j, row[j])) + } + + if tag != "" { + buf.WriteString(fmt.Sprintf(",%s=%s", data.Head[j], util.EscapeInfluxProtocol(tag))) + } else { + buf.WriteString(fmt.Sprintf(",%s=%s", data.Head[j], "unknown")) + logger.Errorf("tag value is empty, tag_name:%s", data.Head[j]) + } + } + buf.WriteString(" ") + + // write metrics + for j := tagNum; j < len(row)-1; j++ { + + switch v := row[j].(type) { + case int: + buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v))) + case int32: + buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v))) + case int64: + buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v))) + case float32: + buf.WriteString(fmt.Sprintf("%s=%sf64", data.Head[j], strconv.FormatFloat(float64(v), 'f', -1, 64))) + case float64: + buf.WriteString(fmt.Sprintf("%s=%sf64", data.Head[j], strconv.FormatFloat(v, 'f', -1, 64))) + default: + panic(fmt.Sprintf("Unexpected type for row[%d]: %T", j, row[j])) + } + + if j != len(row)-2 { + buf.WriteString(",") + } + } + + // write timestamp + buf.WriteString(" ") + buf.WriteString(fmt.Sprintf("%v", row[len(row)-1].(time.Time).UnixMilli())) + buf.WriteString("\n") + + if buf.Len() >= MAX_SQL_LEN { + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + logger.Tracef("buf:%v", buf.String()) + } + err := cmd.lineWriteBody(&buf) + if err != nil { + logger.Errorf("insert data error, msg:%s", err) + panic(err) + } + buf.Reset() + } + } + + if buf.Len() > 0 { + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + logger.Tracef("buf:%v", buf.String()) + } + err := cmd.lineWriteBody(&buf) + if err != nil { + logger.Errorf("insert data error, msg:%s", err) + panic(err) + } + } +} + +// cluster_info +func (cmd *Command) TransferTaosdClusterBasicInfo() error { + + ctx := context.Background() + + endTime := time.Now() + delta := time.Hour * 24 * 10 + + var createTableSql = "create stable if not exists taosd_cluster_basic " + + "(ts timestamp, first_ep varchar(100), first_ep_dnode_id INT, cluster_version varchar(20)) " + + "tags (cluster_id varchar(50))" + + if _, err := cmd.conn.Exec(ctx, createTableSql, util.GetQidOwn()); err != nil { + logger.Errorf("create taosd_cluster_basic error, msg:%s", err) + return err + } + + logger.Tracef("fromeTime:%d", cmd.fromTime.UnixMilli()) + + for current := cmd.fromTime; current.Before(endTime); current = current.Add(time.Duration(delta)) { + querySql := fmt.Sprintf("select cluster_id, first_ep, first_ep_dnode_id, `version` as cluster_version, ts from cluster_info where ts > %d and ts <= %d", + current.UnixMilli(), current.Add(time.Duration(delta)).UnixMilli()) + logger.Tracef("query sql:%s", querySql) + data, err := cmd.conn.Query(ctx, querySql, util.GetQidOwn()) + if err != nil { + logger.Errorf("query cluster_info error, msg:%s", err) + return err + } + + // transfer data to new table, only this table need use insert statement + var buf bytes.Buffer + + // 使用 map 将二维数组切分为多个二维数组 + result := make(map[string][][]interface{}) + for _, row := range data.Data { + key := row[0].(string) // 使用第一列的值作为 key + result[key] = append(result[key], row) + } + + // 按照不同 tag 来迁移数据 + for _, dataByCluster := range result { + buf.Reset() + + for _, row := range dataByCluster { + if len(buf.Bytes()) == 0 { + sql := fmt.Sprintf( + "insert into taosd_cluster_basic_%s using taosd_cluster_basic tags ('%s') values ", + row[0].(string), row[0].(string)) + + buf.WriteString(sql) + } + + sql := fmt.Sprintf( + "(%d, '%s', %d, '%s')", + row[4].(time.Time).UnixMilli(), row[1].(string), row[2].(int32), row[3].(string)) + buf.WriteString(sql) + + if buf.Len() >= MAX_SQL_LEN { + rowsAffected, err := cmd.conn.Exec(context.Background(), buf.String(), util.GetQidOwn()) + if err != nil { + logger.Errorf("insert taosd_cluster_basic error, msg:%s", err) + return err + } + if rowsAffected <= 0 { + logger.Errorf("insert taosd_cluster_basic failed, rowsAffected:%d", rowsAffected) + } + buf.Reset() + } + } + + if buf.Len() > 0 { + rowsAffected, err := cmd.conn.Exec(context.Background(), buf.String(), util.GetQidOwn()) + if err != nil { + logger.Errorf("insert taosd_cluster_basic error, msg:%s", err) + return err + } + if rowsAffected <= 0 { + logger.Errorf("insert taosd_cluster_basic failed, rowsAffected:%d", rowsAffected) + } + } + } + } + + logger.Info("transfer stable taosd_cluster_basic success!!") + return nil +} + +// cluster_info +func (cmd *Command) TransferTableToDst(sql string, dstTable string, tagNum int) error { + + ctx := context.Background() + + endTime := time.Now() + delta := time.Hour * 24 * 10 + + logger.Tracef("fromTime:%d", cmd.fromTime.UnixMilli()) + + for current := cmd.fromTime; current.Before(endTime); current = current.Add(time.Duration(delta)) { + querySql := fmt.Sprintf(sql+" a.ts > %d and a.ts <= %d", + current.UnixMilli(), current.Add(time.Duration(delta)).UnixMilli()) + logger.Tracef("query sql:%s", querySql) + data, err := cmd.conn.Query(ctx, querySql, util.GetQidOwn()) + if err != nil { + logger.Errorf("query cluster_info error, msg:%s", err) + return err + } + + // transfer data to new table, only this table need use insert statement + cmd.TransferDataToDest(data, dstTable, tagNum) + } + + logger.Info("transfer stable " + dstTable + " success!!") + return nil +} + +func (cmd *Command) lineWriteBody(buf *bytes.Buffer) error { + header := map[string][]string{ + "Connection": {"keep-alive"}, + } + + req := &http.Request{ + Method: http.MethodPost, + URL: cmd.url, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: header, + Host: cmd.url.Host, + } + req.SetBasicAuth(cmd.username, cmd.password) + + req.Body = io.NopCloser(buf) + resp, err := cmd.client.Do(req) + + if err != nil { + logger.Errorf("writing metrics exception, msg:%s", err) + return err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusNoContent { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unexpected status code %d:body:%s", resp.StatusCode, string(body)) + } + return nil +} diff --git a/tools/keeper/cmd/empty_test.go b/tools/keeper/cmd/empty_test.go new file mode 100644 index 0000000000..143df6893c --- /dev/null +++ b/tools/keeper/cmd/empty_test.go @@ -0,0 +1,8 @@ +package cmd + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/config/metrics.sample b/tools/keeper/config/metrics.sample new file mode 100644 index 0000000000..9dbfea2323 --- /dev/null +++ b/tools/keeper/config/metrics.sample @@ -0,0 +1,38 @@ +prefix = "taos" +cluster = "localhost" +database = "log" +explicit = false + +[tables.restful_info] +[tables.slowquery] +[tables.cluster_info] +[tables.grants_info] +[tables.disks_info] + +[tables.logs] +explicit = true +[tables.logs.metrics.content] +type = "info" +help = "login types or messages" +[tables.logs.metrics.level] +type = "gauge" +help = "login level" + +[tables.dnodes_info] +[tables.dnodes_info.metrics.has_mnode] +type = "gauge" +help = "check if the node has mnode" + +[tables.vgroups_info] +column_as_variables = ["database_name", "dnode_roles", "dnode_ids"] +explicit = false + +[tables.vgroups_info.metrics.tables_num] +type = "gauge" +help = "Tables count of the vgroup" +[tables.vgroups_info.metrics.online_vnodes] +type = "gauge" +help = "Online v-nodes of the v-group" +[tables.vgroups_info.metrics.status] +type = "info" +help = "Status of the v-group" diff --git a/tools/keeper/config/taoskeeper.toml b/tools/keeper/config/taoskeeper.toml new file mode 100644 index 0000000000..89847db2d5 --- /dev/null +++ b/tools/keeper/config/taoskeeper.toml @@ -0,0 +1,53 @@ +instanceId = 64 + +# Listen port, default is 6043 +port = 6043 + +# go pool size +gopoolsize = 50000 + +# interval for metrics +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" +usessl = false + +[metrics] +# metrics prefix in metrics names. +prefix = "taos" + +# export some tables that are not super table +tables = [] + +# database for storing metrics data +[metrics.database] +name = "log" +# database options for db storing metrics data +[metrics.database.options] +vgroups = 1 +buffer = 64 +keep = 90 +cachemodel = "both" + +[environment] +# Whether running in cgroup. +incgroup = false + +[log] +# The directory where log files are stored. +# path = "/var/log/taos" +level = "info" +# Number of log file rotations before deletion. +rotationCount = 30 +# The number of days to retain log files. +keepDays = 30 +# The maximum size of a log file before rotation. +rotationSize = "1GB" +# If set to true, log files will be compressed. +compress = false +# Minimum disk space to reserve. Log files will not be written if disk space falls below this limit. +reservedDiskSize = "1GB" diff --git a/tools/keeper/config/taoskeeper_enterprise.toml b/tools/keeper/config/taoskeeper_enterprise.toml new file mode 100644 index 0000000000..6601b60cd8 --- /dev/null +++ b/tools/keeper/config/taoskeeper_enterprise.toml @@ -0,0 +1,65 @@ +instanceId = 64 + +# Listen port, default is 6043 +port = 6043 + +# go pool size +gopoolsize = 50000 + +# interval for TDengine metrics +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" +usessl = false + +[metrics] +# metrics prefix in metrics names. +prefix = "taos" + +# cluster identifier for multiple TDengine clusters +cluster = "" + +# export some tables that are not super table +tables = [] + +# database for storing metrics data +[metrics.database] +name = "log" +# database options for db storing metrics data +[metrics.database.options] +vgroups = 1 +buffer = 64 +keep = 90 +cachemodel = "both" + +[environment] +# Whether running in cgroup. +incgroup = false + +[audit] +enable = true +[audit.database] +name = "audit" +[audit.database.options] +vgroups = 1 +buffer = 16 +cachemodel = "both" + +[log] +# The directory where log files are stored. +# path = "/var/log/taos" +level = "info" +# Number of log file rotations before deletion. +rotationCount = 30 +# The number of days to retain log files. +keepDays = 30 +# The maximum size of a log file before rotation. +rotationSize = "1GB" +# If set to true, log files will be compressed. +compress = false +# Minimum disk space to reserve. Log files will not be written if disk space falls below this limit. +reservedDiskSize = "1GB" diff --git a/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.json b/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.json new file mode 100644 index 0000000000..153778915f --- /dev/null +++ b/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.json @@ -0,0 +1,5365 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "gnetId": null, + "graphTooltip": 0, + "id": 3, + "iteration": 1643173897059, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": "Prometheus", + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 32, + "options": { + "content": "

TDengine Cluster Dashboard (First EP: ${firstEp}, Version: ${version})

", + "mode": "markdown" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "mnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show mnodes", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "-- OVERVIEW --", + "transformations": [ + { + "id": "calculateField", + "options": { + "binary": { + "left": "Time", + "operator": "+", + "reducer": "sum", + "right": "" + }, + "mode": "binary", + "reduce": { + "reducer": "sum" + } + } + } + ], + "type": "text" + }, + { + "collapsed": false, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 57, + "panels": [], + "title": "Cluster Status", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 0, + "y": 4 + }, + "id": 73, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "/.*/", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_first_ep{cluster=\"$cluster\"}", + "format": "table", + "formatType": "Time series", + "instant": false, + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "label_values(taos_cluster_info_first_ep{cluster=\"$cluster\"}, value)", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "First EP", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "value" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 3, + "y": 4 + }, + "id": 74, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/.*/", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_version{cluster=\"$cluster\"}", + "format": "table", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(version) from log.cluster_info", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Version", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "value" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "MNode 被选举后经过的时长", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 6, + "y": 4 + }, + "id": 72, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_master_uptime{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(master_uptime) from log.cluster_info", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Master Uptime", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "lastNotNull" + ] + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "企业版授权到期时间", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 9, + "y": 4 + }, + "id": 99, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_grants_info_expire_time{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(expire_time) from log.grants_info", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Expire Time", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "企业版授权已用测点数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "A" + }, + "properties": [ + { + "id": "noValue", + "value": "unlimited" + } + ] + } + ] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 12, + "y": 4 + }, + "id": 100, + "options": { + "displayMode": "gradient", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_disk_engine", + "formatType": "Time series", + "interval": "", + "legendFormat": "used", + "queryType": "SQL", + "refId": "A", + "sql": "select max(timeseries_used) as used ,max(timeseries_total) as total from log.grants_info where ts >= $from and ts <= $to interval(30s)", + "target": "select metric", + "type": "timeserie" + }, + { + "exemplar": true, + "expr": "taos_dnodes_info_disk_total", + "hide": false, + "interval": "", + "legendFormat": "total", + "refId": "B" + } + ], + "title": "Used Meassuring Points", + "transformations": [], + "type": "bargauge" + }, + { + "datasource": "Prometheus", + "description": "数据库个数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 2, + "x": 16, + "y": 4 + }, + "id": 65, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "count(taos_vgroups_info_status{cluster=\"$cluster\"})", + "format": "time_series", + "formatType": "Time series", + "instant": false, + "interval": "", + "legendFormat": "databases", + "queryType": "SQL", + "refId": "A", + "sql": "show databases;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Databases", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "Time", + "databases" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "所有数据库的表数量之和", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 18, + "y": 4 + }, + "id": 68, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "sum(taos_tables_per_database{cluster=\"$cluster\"})", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "show databases;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Tables", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 21, + "y": 4 + }, + "id": 82, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/.*/", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select connections_total from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Connections", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "当前TDengine集群DNode数量,Alive 为存活,Total 为所有", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 8 + }, + "id": 75, + "options": { + "displayMode": "basic", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_dnodes_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select dnodes_total as total,dnodes_alive as alive from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "DNodes", + "transformations": [], + "type": "bargauge" + }, + { + "datasource": "Prometheus", + "description": "当前TDengine集群MNode数量,Alive 为存活,Total 为所有", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 6, + "y": 8 + }, + "id": 101, + "options": { + "displayMode": "basic", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_mnodes_total{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select mnodes_total as total,mnodes_alive as alive from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "MNodes", + "transformations": [], + "type": "bargauge" + }, + { + "datasource": "Prometheus", + "description": "当前TDengine集群 VGroups 数量,Alive 为存活,Total 为所有", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 12, + "y": 8 + }, + "id": 102, + "options": { + "displayMode": "basic", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "count(count(taos_vgroups_info_status{cluster=\"$cluster\"}) by (vgroup_id))", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select vgroups_total as total, vgroups_alive as alive from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "VGroups", + "transformations": [], + "type": "bargauge" + }, + { + "datasource": "Prometheus", + "description": "当前TDengine集群 VNodes 数量,Alive 为存活,Total 为所有", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 18, + "y": 8 + }, + "id": 103, + "options": { + "displayMode": "basic", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "sum(taos_vgroups_info_online_vnodes{cluster=\"$cluster\"})", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select vnodes_total as total, vnodes_alive as alive from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "VNodes", + "transformations": [], + "type": "bargauge" + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "DNodes Alive Percent alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 12 + }, + "hiddenSeries": false, + "id": 84, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_dnodes_alive / taos_cluster_info_dnodes_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(dnodes_alive)/avg(dnodes_total) from log.cluster_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DNodes Alive Percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:71", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:72", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "MNodes Alive Percent alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 12 + }, + "hiddenSeries": false, + "id": 87, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_mnodes_alive / taos_cluster_info_mnodes_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(mnodes_alive)/avg(mnodes_total) from log.cluster_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "MNodes Alive Percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:221", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:222", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "VGroups Alive Percent alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 12 + }, + "hiddenSeries": false, + "id": 85, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_vgroups_alive / taos_cluster_info_vgroups_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(vgroups_alive)/avg(vgroups_total) from log.cluster_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "VGroups Alive Percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:256", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:257", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "VNodes Alive Percent alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 12 + }, + "hiddenSeries": false, + "id": 86, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_vnodes_alive / taos_cluster_info_vnodes_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(vnodes_alive)/avg(vnodes_total) from log.cluster_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "VNodes Alive Percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:291", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:292", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 0.95 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + }, + { + "evaluator": { + "params": [ + 0, + 1 + ], + "type": "within_range" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "keep_state", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "DNodes Alive Percent alert", + "noDataState": "ok", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 18 + }, + "hiddenSeries": false, + "id": 104, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_grants_info_timeseries_used / taos_grants_info_timeseries_total {cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "percent", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(timeseries_used)/avg(timeseries_total) as percent from log.grants_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 0.95, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Messuring Points Used Percent Alert", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:333", + "decimals": null, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:334", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 86400 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "keep_state", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "Grants Expire Time alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 18 + }, + "hiddenSeries": false, + "id": 105, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:368", + "alias": "percent", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_grants_info_expire_time", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(expire_time) as expire_time from log.grants_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 86400, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Grants Expire Time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:375", + "decimals": null, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:376", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 10 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "Error Rate alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "错误率(每秒错误数)", + "fieldConfig": { + "defaults": { + "unit": "cps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 18 + }, + "hiddenSeries": false, + "id": 106, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:410", + "alias": "percent", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "deriv(taos_dnodes_info_errors{cluster=\"$cluster\"}[1m])", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select derivative(errors, 1s, 1) as errors from (select sum(errors) as errors from log.dnodes_info where ts >= $from and ts <= $to interval(1s))", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 10, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Error Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:417", + "decimals": null, + "format": "cps", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:418", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 24, + "panels": [], + "repeat": null, + "title": "DNodes Overview", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 0, + "y": 25 + }, + "id": 90, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "value_and_name" + }, + "pluginVersion": "8.2.2", + "repeat": null, + "targets": [ + { + "alias": "", + "colNameFormatStr": "{{groupValue}}", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "taos_dnodes_info_uptime{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "{{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select last(uptime) from log.dnodes_info where ts >= now -1m and ts <= now group by dnode_ep", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "DNodes Lifetime", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 10, + "x": 4, + "y": 25 + }, + "hiddenSeries": false, + "id": 88, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_dnodes_total{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "total", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(dnodes_total) as total, avg(dnodes_alive) as alive from log.cluster_info where ts >= $from and ts <= $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_dnodes_alive{cluster=\"$cluster\"}", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "alive", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(dnodes_total) as total, avg(dnodes_alive) as alive from log.cluster_info where ts >= $from and ts <= $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DNodes Number", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:128", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:129", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 10, + "x": 14, + "y": 25 + }, + "hiddenSeries": false, + "id": 89, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_mnodes_total{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "total", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(mnodes_total) as total, avg(mnodes_alive) as alive from log.cluster_info where ts >= $from and ts <= $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "exemplar": true, + "expr": "taos_cluster_info_mnodes_alive{cluster=\"$cluster\"}", + "hide": false, + "interval": "", + "legendFormat": "alive", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "MNodes Number", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:452", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:453", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": true, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 31 + }, + "id": 108, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "Include two parts:", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 32 + }, + "hiddenSeries": false, + "id": 110, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": 600, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:66", + "alias": "/success_rate/", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "insert_count - {{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select sum(req_insert) as insert_count, sum(req_insert_success) as insert_success, sum(req_insert_batch) as insert_batches, sum(req_insert_batch_success) as insert_batch_success from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_success{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "insert_success - {{dnode_ep}}", + "queryType": "SQL", + "refId": "B", + "sql": "select sum(req_insert_success) / sum(req_insert) as success_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_batch{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "insert_batches - {{dnode_ep}}", + "queryType": "SQL", + "refId": "C", + "sql": "select sum(req_insert) as total_inserts, sum(req_insert_batch) as total_batches from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_batch_success{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "insert_batches_success - {{dnode_ep}}", + "queryType": "SQL", + "refId": "D", + "sql": "select sum(req_insert_success) / sum(req_insert) as success_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_success{cluster=\"$cluster\"}) by (dnode_ep) / sum(taos_dnodes_info_req_insert{cluster=\"$cluster\"}) by (dnode_ep)", + "hide": false, + "interval": "", + "legendFormat": "success_rate - {{dnode_ep}}", + "refId": "E" + }, + { + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_batch_success{cluster=\"$cluster\"}) by (dnode_ep) / sum(taos_dnodes_info_req_insert_batch{cluster=\"$cluster\"}) by (dnode_ep)", + "hide": false, + "interval": "", + "legendFormat": "batch_success_rate - {{dnode_ep}}", + "refId": "F" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (Inserts)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "cps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 41 + }, + "hiddenSeries": false, + "id": 112, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": 600, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_insert_rate{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "interval": "", + "legendFormat": "insert_rate - {{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(req_insert_rate) as insert_rate, avg(req_insert_batch_rate) as batch_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_insert_batch_rate{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "batch_rate - {{dnode_ep}}", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(insert_rate) as cluster_insert_rate, avg(batch_rate) as cluster_batch_rate from (select sum(req_insert_rate) as insert_rate, sum(req_insert_batch_rate) as batch_rate from log.dnodes_info where ts >= $from and ts <= $to interval(1s)) where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_insert_rate{cluster=\"$cluster\"})", + "hide": false, + "interval": "", + "legendFormat": "cluster_insert_rate", + "refId": "C" + }, + { + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_insert_batch_rate{cluster=\"$cluster\"})", + "hide": false, + "interval": "", + "legendFormat": "cluster_batch_rate", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests Rate (Inserts per Second)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 50 + }, + "hiddenSeries": false, + "id": 114, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": 600, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:227", + "alias": "/rate/", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_select{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "interval": "", + "legendFormat": "req_select - {{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select sum(req_select) as req_select from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_select{cluster=\"$cluster\"})", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "cluster_req_select", + "queryType": "SQL", + "refId": "B", + "sql": "select sum(req_select) as total from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_select_rate{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "req_select_rate - {{dnode_ep}}", + "queryType": "SQL", + "refId": "C", + "sql": "select avg(req_select_rate) as req_select_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_select_rate{cluster=\"$cluster\"})", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "cluster_req_select_rate", + "queryType": "SQL", + "refId": "D", + "sql": "select avg(req_select_rate) as req_select_rate from (select sum(req_select_rate) as req_select_rate from log.dnodes_info where ts >= $from and ts <= $to interval(1s)) where ts >= $from and ts <= $to interval($interval) fill(null)" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (Selects)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "cps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 58 + }, + "hiddenSeries": false, + "id": 111, + "interval": null, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": 600, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:100", + "alias": "/.*rate.*/", + "dashes": true, + "fill": 4, + "spaceLength": 1, + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_http{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "interval": "", + "legendFormat": "req_http - {{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select sum(req_http) as req_http from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_http{cluster=\"$cluster\"})", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "cluster_req_http", + "queryType": "SQL", + "refId": "B", + "sql": "select sum(req_http) as req_http from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_http_rate{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "req_http_rate - {{dnode_ep}}", + "queryType": "SQL", + "refId": "C", + "sql": "select avg(req_http_rate) as req_http_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_http_rate{cluster=\"$cluster\"})", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "cluster_req_http_rate", + "queryType": "SQL", + "refId": "D", + "sql": "select avg(req_http_rate) as req_http_rate from (select sum(req_http_rate) as req_http_rate from log.dnodes_info where ts >= $from and ts <= $to interval(1s)) where ts >= $from and ts <= $to interval($interval) fill(null)" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (HTTP)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Requests", + "type": "row" + }, + { + "collapsed": true, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 92, + "panels": [ + { + "datasource": "Prometheus", + "description": "超级表的个数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 67 + }, + "id": 96, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_stables_per_database{cluster=\"$cluster\", database=\"$database\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "show $database.stables;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "STables", + "transformations": [], + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 9, + "x": 3, + "y": 67 + }, + "hiddenSeries": false, + "id": 94, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colName": "vgroup_id", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "sum(taos_vgroups_info_tables_num{cluster=\"$cluster\", database_name=\"$database\"})", + "formatType": "Time series", + "groupDataByColName": "", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(tables_num) as tables_num from (select last(ts) as ts, sum(tables_num) as tables_num from log.vgroups_info where ts > $from and ts <= $to and database_name = '$database' group by ts) where ts >= $from and ts <= $to interval($interval) fill(null)" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Tables", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 67 + }, + "hiddenSeries": false, + "id": 95, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colName": "vgroup_id", + "colNameFormatStr": "vgoup{{groupValue}}", + "colNameToGroup": "vgroup_id", + "exemplar": true, + "expr": "taos_vgroups_info_tables_num", + "formatType": "Time series", + "groupDataByColName": "", + "interval": "", + "legendFormat": "vgroup_{{vgroup_id}}", + "queryType": "SQL", + "refId": "A", + "sql": "select max(tables_num) as tables_num from log.vgroups_info where ts > $from and ts <= $to and database_name = '$database' interval($interval) fill(null) group by vgroup_id" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Tables Number Foreach VGroups", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "description": "所有普通表的个数(包括超级表的子表)", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 70 + }, + "id": 98, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_tables_per_database{cluster=\"$cluster\",database=\"$database\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(tables_num) from (select last(ts) as ts, sum(tables_num) as tables_num from log.vgroups_info where ts > $from and ts <= $to and database_name = '$database' group by ts)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Tables", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "超级表的所有子表个数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 73 + }, + "id": 97, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_tables_per_database{cluster=\"$cluster\",database=\"$database\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "show $database.stables;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Sub Tables", + "transformations": [], + "type": "stat" + } + ], + "repeat": "database", + "title": "Database: [ $database ]", + "type": "row" + }, + { + "collapsed": true, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 20, + "panels": [ + { + "datasource": "Prometheus", + "description": "启动时长", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "Down", + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 0, + "y": 69 + }, + "id": 120, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "disk_used", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_uptime{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(uptime) as uptime from log.dnodes_info where dnode_ep = '$fqdn'", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Uptime", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "lastNotNull" + ] + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "是否有MNode", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 1, + "text": "No" + }, + "1": { + "index": 0, + "text": "Yes" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 4, + "y": 69 + }, + "id": 121, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_has_mnode{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(has_mnode) as has_mnode from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Has MNode?", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "CPU 核数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "Down", + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 6, + "y": 69 + }, + "id": 122, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_cpu_cores{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(cpu_cores) as uptime from log.dnodes_info where dnode_ep = '$fqdn' interval ($interval)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Cores", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "VNodes 数量", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 8, + "y": 69 + }, + "id": 123, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_vnodes_num{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(vnodes_num) as vnodes_num from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "VNodes Number", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "Master VNodes 数量", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 10, + "y": 69 + }, + "id": 124, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_masters{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(masters) as masters from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "VNodes Masters", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "当前 taosd 线程 CPU 使用率", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 0.5 + }, + { + "color": "red", + "value": 0.8 + } + ] + }, + "unit": "percent" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "last(cpu_taosd)" + }, + "properties": [ + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 12, + "y": 69 + }, + "id": 45, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "mem_taosd", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_cpu_engine{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Table", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(cpu_engine) from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current CPU Usage of taosd", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "当前 taosd 线程 内存 使用率", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decmbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 16, + "y": 69 + }, + "id": 66, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "/^taosd$/", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "memory", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_mem_engine{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "taosd", + "queryType": "SQL", + "refId": "A", + "sql": "select last(mem_engine) as taosd, last(mem_total) as total from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "memory", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_mem_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "total", + "queryType": "SQL", + "refId": "B", + "sql": "select last(mem_engine) as taosd, last(mem_total) as total from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Memory Usage of taosd", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 75 + }, + { + "color": "red", + "value": 80 + }, + { + "color": "dark-red", + "value": 95 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 20, + "y": 69 + }, + "id": 51, + "options": { + "displayMode": "gradient", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "disk_used", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_disks_info_datadir_l0_used/taos_disks_info_datadir_l0_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "level0", + "queryType": "SQL", + "refId": "A", + "sql": "select last(disk_used) as used, last(disk_total) as total, last(disk_used) / last(disk_total) as percent from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "disk_used", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_disks_info_datadir_l1_used/taos_disks_info_datadir_l1_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level1", + "queryType": "SQL", + "refId": "B", + "sql": "select last(disk_used) as used, last(disk_total) as total, last(disk_used) / last(disk_total) as percent from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "disk_used", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_disks_info_datadir_l2_used/taos_disks_info_datadir_l2_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level2", + "queryType": "SQL", + "refId": "C", + "sql": "select last(disk_used) as used, last(disk_total) as total, last(disk_used) / last(disk_total) as percent from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk Used", + "transformations": [], + "type": "bargauge" + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "CPU 资源占用情况。", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 75 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideZero": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_cpu_engine{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "taosd", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(cpu_engine) as taosd, avg(cpu_system) as system from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_cpu_system{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "system", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(cpu_engine) as taosd, avg(cpu_system) as system from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "CPU Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "percent", + "label": "使用占比", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "内存资源占用情况", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 75 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 42, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_mem_engine{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "taosd", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(mem_engine) as taosd, avg(mem_system) as system, avg(mem_total) as total from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_mem_system{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "system", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(mem_engine) as taosd, avg(mem_system) as system, avg(mem_total) as total from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_mem_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "total", + "queryType": "SQL", + "refId": "C", + "sql": "select avg(mem_engine) as taosd, avg(mem_system) as system, avg(mem_total) as total from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "RAM Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "decmbytes", + "label": "使用占比", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 86 + }, + "hiddenSeries": false, + "id": 117, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "hideZero": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "sideWidth": 400, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:84", + "alias": "/percent/", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l0_used{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level0_used", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(datadir_l0_used) as level0_used, avg(datadir_l0_total) as level0_total, avg(datadir_l1_used) as level1_used, avg(datadir_l1_total) as level1_total, avg(datadir_l2_used) as level2_used, avg(datadir_l2_total) as level2_total from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l1_used{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level1_used", + "queryType": "SQL", + "refId": "D", + "sql": "select avg(datadir_l0_used)/avg(datadir_l0_total) as level0_percent, avg(datadir_l1_used)/avg(datadir_l1_total) as level1_percent , avg(datadir_l2_used)/avg(datadir_l2_total) as level2_percent from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l2_used{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level2_used", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(datadir_l0_used)/avg(datadir_l0_total) as level0_percent, avg(datadir_l1_used)/avg(datadir_l1_total) as level1_percent , avg(datadir_l2_used)/avg(datadir_l2_total) as level2_percent from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l0_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level0_total", + "queryType": "SQL", + "refId": "C", + "sql": "select avg(datadir_l0_used) as level0_used, avg(datadir_l0_total) as level0_total, avg(datadir_l1_used) as level1_used, avg(datadir_l1_total) as level1_total, avg(datadir_l2_used) as level2_used, avg(datadir_l2_total) as level2_total from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l1_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level1_total", + "queryType": "SQL", + "refId": "E", + "sql": "select avg(datadir_l0_used) as level0_used, avg(datadir_l0_total) as level0_total, avg(datadir_l1_used) as level1_used, avg(datadir_l1_total) as level1_total, avg(datadir_l2_used) as level2_used, avg(datadir_l2_total) as level2_total from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l2_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level2_total", + "queryType": "SQL", + "refId": "F", + "sql": "select avg(datadir_l0_used) as level0_used, avg(datadir_l0_total) as level0_total, avg(datadir_l1_used) as level1_used, avg(datadir_l1_total) as level1_total, avg(datadir_l2_used) as level2_used, avg(datadir_l2_total) as level2_total from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Used", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "gbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "percentunit", + "label": "Disk Used", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 86 + }, + "hiddenSeries": false, + "id": 64, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "sideWidth": 400, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "level0", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "deriv(rate(taos_disks_info_datadir_l0_used{cluster=\"$cluster\", dnode_ep=\"$fqdn\"}[1m])[5m:1s]) * 1024", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level0", + "queryType": "SQL", + "refId": "A", + "sql": "select derivative(value, 1m, 0) from (select avg(datadir_l0_used) * 1024 as value from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null))", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "level1", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "deriv(rate(taos_disks_info_datadir_l1_used{cluster=\"$cluster\", dnode_ep=\"$fqdn\"}[1m])[5m:1s]) * 1024", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level1", + "queryType": "SQL", + "refId": "B", + "sql": "select derivative(value, 1m, 0) from (select avg(datadir_l1_used) * 1024 as value from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null))", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "level1", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "deriv(rate(taos_disks_info_datadir_l2_used{cluster=\"$cluster\", dnode_ep=\"$fqdn\"}[1m])[5m:1s]) * 1024", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level2", + "queryType": "SQL", + "refId": "C", + "sql": "select derivative(value, 1m, 0) from (select avg(datadir_l1_used) * 1024 as value from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null))", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Used Increasing Rate per Minute", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "MBs", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "percentunit", + "label": "Disk Used", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "io", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 96 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 119, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:302", + "alias": "/last/", + "dashLength": 5, + "dashes": true, + "spaceLength": 5 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_io_write_disk{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "io_write_disk", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(io_read_disk) as io_read_taosd, avg(io_write_disk) as io_write_taosd from log.dnodes_info where dnode_ep = '$fqdn' and ts >= now-1h and ts < now interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_io_read_disk{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "io_read_disk", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(io_read_disk) as io_read_taosd, avg(io_write_disk) as io_write_taosd from log.dnodes_info where dnode_ep = '$fqdn' and ts >= now-1h and ts < now interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "Disk IO", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "MBs", + "label": "IO Rate", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "io", + "fieldConfig": { + "defaults": { + "links": [], + "unit": "Mbits" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 96 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 118, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:347", + "alias": "/in/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_net_in{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "net_in", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(net_in) as net_in, avg(net_out) as net_out from log.dnodes_info where dnode_ep = '$fqdn' and ts >= now-1h and ts < now interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_net_out{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "net_out", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(net_in) as net_in, avg(net_out) as net_out from log.dnodes_info where dnode_ep = '$fqdn' and ts >= now-1h and ts < now interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "Net", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "Mbits", + "label": "IO Rate", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "repeat": "fqdn", + "title": "DNode Usage [ $fqdn ]", + "type": "row" + }, + { + "collapsed": true, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 35 + }, + "id": 63, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "displayName": "Logins Per Minute", + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 10 + }, + "hiddenSeries": false, + "id": 61, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:67", + "alias": "logins", + "nullPointMode": "null as zero" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "logins", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "count_over_time(taos_logs_content{cluster=\"$cluster\"}[1m])", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select count(*) from log.logs where ts >= $from and ts < $to interval (1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Login Counts per Minute", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:74", + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:75", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Login History", + "type": "row" + } + ], + "schemaVersion": 31, + "style": "dark", + "tags": [ + "TDengine", + "Prometheus", + "TaosKeeper" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": false, + "text": "localhost", + "value": "localhost" + }, + "datasource": null, + "definition": "label_values(taos_dn_cpu_taosd, cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(taos_dn_cpu_taosd, cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "tdengine:6030", + "value": "tdengine:6030" + }, + "datasource": null, + "definition": "label_values(taos_dn_cpu_taosd{cluster=\"$cluster\"}, fqdn)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "name": "fqdn", + "options": [], + "query": { + "query": "label_values(taos_dn_cpu_taosd{cluster=\"$cluster\"}, fqdn)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "1", + "value": "1" + }, + "datasource": null, + "definition": "label_values(taos_dn_cpu_taosd{cluster=\"$cluster\"}, dnodeid)", + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": "with DNode ID:", + "multi": false, + "name": "dnodeid", + "options": [], + "query": { + "query": "label_values(taos_dn_cpu_taosd{cluster=\"$cluster\"}, dnodeid)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": null, + "definition": "label_values(taos_vgroups_info_status{cluster=\"$cluster\"}, database_name)", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": null, + "multi": true, + "name": "database", + "options": [], + "query": { + "query": "label_values(taos_vgroups_info_status{cluster=\"$cluster\"}, database_name)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "tdengine:6030", + "value": "tdengine:6030" + }, + "datasource": null, + "definition": "label_values(taos_cluster_info_first_ep{cluster=\"$cluster\"}, value)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "firstEp", + "options": [], + "query": { + "query": "label_values(taos_cluster_info_first_ep{cluster=\"$cluster\"}, value)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "2.4.0.4", + "value": "2.4.0.4" + }, + "datasource": null, + "definition": "label_values(taos_cluster_info_version{cluster=\"$cluster\"}, value)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "version", + "options": [], + "query": { + "query": "label_values(taos_cluster_info_version{cluster=\"$cluster\"}, value)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "auto": true, + "auto_count": 100, + "auto_min": "10s", + "current": { + "selected": false, + "text": "5s", + "value": "5s" + }, + "description": null, + "error": null, + "hide": 0, + "label": null, + "name": "interval", + "options": [ + { + "selected": false, + "text": "auto", + "value": "$__auto_interval_interval" + }, + { + "selected": true, + "text": "5s", + "value": "5s" + }, + { + "selected": false, + "text": "10s", + "value": "10s" + }, + { + "selected": false, + "text": "15s", + "value": "15s" + }, + { + "selected": false, + "text": "20s", + "value": "20s" + }, + { + "selected": false, + "text": "30s", + "value": "30s" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "2m", + "value": "2m" + }, + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + } + ], + "query": "5s,10s,15s,20s,30s,1m,2m,5m,10m,30m,1h,6h,12h,1d", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "TaosKeeper Prometheus Dashboard", + "uid": "rSFM0Fxnk", + "version": 62 +} \ No newline at end of file diff --git a/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.png b/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..a608c98b9305db20cf3f5d056847ca5045007de3 GIT binary patch literal 233143 zcmagGbyyrh*XTKeJHZ`-yE_C3?(RN#aCe(PAUMHYgS)#-kPtjba1ZVfTz8W9y}RGD z_qn@&bX8Z?DLEyls(;-xk!mWk=qSV}00018UQS8_06+u-05~*Au+SJ}kopwVgR>D= z76$<8<58c?;GtolyN0X;pk{*f004jms3~bl6OvFcFthV>GwNw5sHzB4Gtmq3aA08* zP|-58F;g)!a`U|7g2xL;xT3SQP zC^R$genfT<1hh$82HcSYO}!mX$|UQ9dQ5psK2aiG@p74`lJtkYDiq z$Z)?$uPraXNJ2u+uW~*SF==rD8WA}`dPeqWM=2I!@`O;skHzLK1rp>WG~%tuG`jE@mb&I@m-c9=+BqTsOL+_qEQ}>x-=>vnn;k=wQLTkAO$E zdCI^_m5Y!^msLvedkz^cZ5O48&XtmqCIXW85@ck$igb!%w45rs-5m;*of@BeKp9>7 z+^%gDbkzJ>eC7oPRzV75?MVy{B}UE?eC*U#UzbPHK*RaGW7WZWi5d<`Aoav4CSle% zWt!o9djcL`)er?AbLM8S!nUwtRTOt?A`eH#;#(=3ix5#6SBbdzED3O*w=j8ctTr*P z42?m_*F>qp(G^lLo!)ew6kj%aZYD~t6eC))5Gg8U_d-Zx856jfkcKObmc%KlKE3W| zYhiLh2P2B;Qw|a^eXL)6eSI~}ddM{73-+=5sITCb zKIq<~n+6VSfh0m@8T9n{n$M19V?Q4izg$zB15)(7Wi7`F3-SAsO zjgKRcyBgff7$V`fD#ypiEZPz>MPsM{z*~U4l(?49(s7Qf9Em>p5Ul@~K6L>%L|ER@ z@rs=9)ELX5<#O0UZ@o!FEbEK((>>S~M{X!^euzbVU>W(C-uPM+?9){Iw%kE*-))Jx zGgyl``axDvs)dErX*FEJu^z5Ad5dD4;@01pZdl4@e8A4I{cPyhz_7pb8p(XX?&DgF zr@+mGIz4oW0&YIR!XQBXx1hfvm~b!<>LC$A9Wa;@1oZ&_Bl$h#Zv-6LU-!Q<{}KNm zGXE(842xU$rIX@cabRzjjg`4M%(TDd`P`mOY^fWPx-QhZs##&;JPOrxlc^cP%(X<2 z{w??{n>wU>@V$7GsfDwZ^f%WJ0v)wOmDcY5!ueisq!U~~XDjzjBQFeSjbQH|QyuI> zf)6hvd}y|_qCu{S+va6L=BZ&kU0d)(vv%@6SJrsn46{^T>;_fBZNDg?sh ze$YgXo=PBw$z6MT0***3V#FAO9S$SOvnbp?Ml z9NP{COW1zJExWU%)sloXNMs(COy*PRqUtq1{8kmKJ$lWBOy>U12^+7YR$sWfsXEj9 ziR(nR?pr$Ut9n=sw_+UC^UJuIRJXE-aF4g3qR$_zPL+#*L)y+CW+KtU-Gigwa4>~A zGnE?Nbe5F+_tF~tyI-$Sk(W-v-7}5L(16%idb@|Sp@Phhne-dN^aj0P~jjar0yz<4t(@^v34r0xf_Fdk*~w&Ah2)PqmMiIbutQ-Y9P z4X}W6rH1)#UnGS_#Y`M>#VQ4V>(-j;Ih~4(O8cLE8~PKfBphrUv04-oJ>L7u)vXDc znOB@1PlJ0}?Tg-jBK7yO7o59}usXi$Mx1rhTX!~nt!#R+`@=X_#NB?r^_vzYC8MBa zaJOc%zeG83oINe8kq)?5>hB)N*^|(*@a{FH@ajs9{$&5p`TgLoX7J1iE4{2So9;YX zWKyj21W(!PCEfinz>~A1$zbPhn*p6OxaUg3jS@d6_&dBq6Zfirzj~cA*?Sf*yq*I1yRwD)w^-wY(jK& z@hDQclYFGV6M6TT+j#tV2YG;J@>jeG#-rmb{VkG3fTESgFMSz!eEW!azrx#H(+b_P zLN#_Oxb&+%vA6!*6SKlSOu)bE(WY6*7*gOI$1G2!quz5|3AZ%*gvZ0ycgUeks;bpz z7bNsh_FGV%I*8ZL&H}`#;6?1eaNXdTQ3x_$Xr_71!9!*lkW*TQ%O~pPGY|kmVCfXs@n+XjCQz4ZUjfw4~wY0qj&hn-tS5Fa*v_%)DK!$h&G0+7? zdGw@3O%zzzsOAR0_#;ncQ&Uzje#+f)kjBfWF-1;+LaJAo^8ysT1R8eQ4V`|b(8XHz znrf@wiPKE5p)zyVVNK6I-L(dc11ba1tzy^!-zxM0rfJFUp~MbdKz-q9i&U{AH=YfT zufG;?F4*cRt7XUp(;>Va{F?=%ktuiG6aVR`$Nfbc`#i%ZK6pk~Pc@#%_I_Q=U(_yY zCJiUdCUeT9Ikf9|w(sP^05yHOtIRZVMbEQF?N5d4pSHNUmGu{tJCvC-9oOtGVGS0? zBBXatd!idiLkDP=Lkfq)7L=1jVu24=Ru+YPVj?^wy(3M6*Q^;k7$T$bB3&vE=2-mg zKXm*U1DpuXjb|LHC^yCeRPt(-;~EyX%AIR%k>_Z-v&}bM+q_GXx&KTEQ`r1T+U8;M zN;-Zwbe&e|a2TDU6OAtXy%wYRDmG(ZbI!40pC^l(+`q4hzgL$c|M~KYx>gFp;(IoT?^tz06?}dM5B%V%MEVRyJOB&HOS? z09)z)AR3rv2T*i?7mM5N1c8XdJ8haa+4x5!*UZDKE>7?BdM|(`4EK z_voEJEWA9jCZbALvm=bg_okm_V`x5Kk8%FCZ|;#VI{ZeVBV zH;^ZB%Z+p$)rJ!Z3`SY?S`N(10ipM7F%UCLPzCqh&`{VVF-Q0LkRt4na-X0G5Vcs} zzJ)D&8%DIBKks%d6}t{1_KYUzsp#zdsI1>$nh^iohwJf`@kd3jUpCQQ7@R}9SmpPM zGD*l1(Q2=>7oLaaR2z|ABjW~!1&UBxAc|ki=b3r*HF}(bry^I_DFtarzS2 z*qqQ}$RQt)KKylP?LMq2Wykf&t8}axlT~tI6>LptSUs+iQ-!aQK*;xV(f149VhRtY zw@_B128B=p9=$n^2a{FwR8X@_ zzVeP=ufFn}de0iNC|4nO5PcQ~w0Oc9{dVO9o}tF(Qr?KTTk|Nwt&AXi6LLhJa#^DL zU=y0vEG6u28=_*YRtt<@Rt@IsA*L#DsV+S*?a9Xn&fI2_H(JpeV9=qorLoPF@mIZ@ zF{PZw6*3pTL>2cLm{x32Yo{Lmj9{U27&zKBA(@f0XfTsG(?y1onaDycYN2)`$A?pYCq(*4kZG{)2O(Vjr%wB8q(`medd~eOc-KH#)Mtkm% z)q}Kd{#XDc-*I)iig15GT|1~4-Ab?wX9h;{p)Sp+8{p~xQW#K%j^om_Q(s|KjcjmK zG}t_&EIB+&x4%*6`U#VOgMcy?y%ZY!{7ooQA^XO3I-F9u+>I-)wwNfxT>0k+(^s&p z)bEkE!iF4t;*~IS8Y)rXYY9)pWF)Qrm`za*D&nh{Pbrd8g|YnNuu8QEk>>|D&}J-OTz*S>QSQ+{L<6EbryA*=cn z_baX0rd{q=gf5m#Z-+zda`2)P{gJ$Z<2Rg}&}N%>OvZy9+6{%$#*mar?#*H7xvJU& zuXRc%H2jtyCzVD!?4j(V^VEEzYT~DYKO~lezV!g0cO66WUDP{I#8YfBubTTSr1V=h z>>173VZJ)rE>4*=`S97jRVlm1>G~RsUQs*d?7$ zezY_tmcVsOD<=Xs!O5v*HRwBvM$+Y9p~ZrCEZ9-+u9G<-!)*x>>Fy0Q}&hgZqlC2 zo$dyRu}fxYBkUS#-eTQTLNdMGKMVw4MtOVZ|*^mgKiKQHgN3W?yHI=G7q zlbKk(OXS+>_|l$_*+y(fY_$FDcg46j+Xr=QXXiKgpFg}|LF##w>Q(x&3(ul@?a}>W z=b0Lpk1X@-j@X;myHYC)X;QsN^tawoq&+?AT4>l#ufB<=L-9Fx3yuO0GXLUyRD!rA zA+*ca-y}ExW`CPS?lhk*`b#D2##mhXkD`Mryz?=O|3c7*8TzvhM!cqkqOo=3T zWi*wOQ6J(r;=!ZI?u=8iHH#uGcBoh8)ZQ;ca>1$TK#;aNbf=_uiT+Bu*vBQ)fbD-^ zLPs|@Lv#f_f~WAIMed0+^VV{hUtC8WDo!nR3<6H%lP7 zgnxwt^ZE-r;ZqR}4GzDB!284Twd3ZFSOgVaL?@DNU&+{hlQQsuFUrZawyk%18woaY z$htagFKTn-bnYAfMLrsN_|+J*0PDeN0fNMG!9^AlQFUwG@O}c@$=&m-E-|ka`b)6N zv7Vl-Akix;);W$bWRu>)yaH_XFswV3_i$i||I4&4QlO9fPwIkSuMh!GFPhStqnW0# zH%sl^p3{`qa%dw^^jlcRVJzK-iZNtloj@{~>8(CcffrsT$O?F5f0%lf3 z=)1X8n_ePVG6y5gc6(hmev9-4PJRouPm?u>^VW>IJCtkY0dnU&dZ_qypLG+7fH>q1 zG}A4^i?S8q?vWFu>H{ZPhSFCHoc$FY^Y5x2?peEE(i1^;b`2N(Qv;0TR-Ey(I0+h; zY^A4MX-i-DO)0Y;Uaw2K`N zeK;lIMIwPDLv!KS>@zO7e~ie87>KF`nP(Ge^D?yszr2Ny`OAr*IA~b`lE|>u@eF!j zgx(_=yt2^79+3VQh#?WetT`$Fyy~XBY25q9TWJ1Yu+#>p{4*->Ky|75FLZ-qwhb!O z^mRnNi#h-h7VzGdv2nMA)ZXeekE7MG_*xR%Mbz^Pub?|#rZU7g#|uie7`P;M*0LFJ z++GKC96OU3?Aph)8U9hZu1R;6|8Fq&Ke^rC>i+5mb^aIs`wt5IKNPX?6~9i+`X&6k zj`J%1W;2~lqg&SYnAF^8l*j9S0IGI&c7On$!BZmcStnmgYw{{Az$+CzT4kMBpE@<^ z7skq5IPbt$onn~P$U5d%U)d=jxOiCG;qkv^xyApYvO1SiKjFdGDr94rer--rAs~3~ z!qjv}%edz4$mHxPKnSDOrVk_QMjjI7h;bq z8TFvKQHzO_8x(GUe|_wuw))}RL9Ji(;=1%MsQcc~E#PKF@{*@}%k&U$@O(^RRHL`Y zVK@J2cksP}arnC5rRY0ZKfPE?lUu8@M+SFDWQht2wLb3cu+w@j z&Wg7i;?$YcZ?>ZQD!Tl1OA|HbWCR&Ru8OtL22QTBGjwvF8a_^QA?XFR+#d{U{eA%4 zD@Ke_v`SB2M$z`Hj1F8v=mMMw+T;B~ZCgJRv+d}5PC`?u2#Q^JOv0H(P9}Hc zt6P8WuVNE%RPEgYV~P&8Rj#|YkKI#`U1w*{xeeQ-uj>G#lk`e@^)aXS0LR~#y{*6) zW5q=GV&v^5O2F8WO-^*Cf&Xs+KK~&~4I-%4_XHNmQL8ERq)3PZ6Hfe{sLNdK1SdzH z>WfVONkhT3CNF?=9MN4-M*<5cI(?%Prv>-Cr$hJ>!M&BadK3~H5zfx|HRyFF9MPC( z-(#SN7|oWg2nYxT!q%E+=RVq50g~dU0*;tZkX5h; zV)L5B$3o3>iN|QdBc9tFpwgX3RiSdpm)M?G~ zIsbd$hT&ETGx%$J>vH2~U5L*`64*#2VRmWsnY0*_RfxWPZrjcAxQS!Y;2c*@D^~yD7ku_FB{$KJgY_OOWh8#)C_P&`MYt#hRyB3) za0nL!=Z8GMdR(%2bGJtTbO)$JBoLyfgw@-PrL1i;z}T9MZAD9ZGToMWIGrCHk84CO zP&2+@JQ#Nc$l`36!^GFJw8;zYP0}nqOnK;DrLFVW9%eclbDH+`WP=Qv%NFkaAg`?7 zPQf|^!X}52y{#FM1=1b#5J?1Jm1l-W)T^%!W7@7Dm&l8;`jPPtS33+{!u7^{o@^*a!*;MHopK%S8s|vW)g=jJR)Pm0(mM)MiL$jrAzW4QrWE@Fu{=Ps|9x z-5Tn+JeP0+W+-!V6O~U=e@&K74Tyad?3i<+o3P$U^@Mk^+?oufeh%^MUvmE_#CGH5{7Wk8##7~)!{$p z8WFh8DvDIeiJP{2J>E1kd)9}};{;M+RD92U1}m0fix^v=6S6BMD^@D$g2jd8II(C5 zFxqKsVSTWZ5-kKwL4xYIV%@^)kbrM6Z00JN=l#yf@gzK}BbO?}lkmWhSXnpqH?bHV z@A-tJ$}_<3*e4DNs&ZHFT)m?X#c!o0N*Gk+WzpsuteP+{`T1L~Gu zTOF9AsqQb7+SW$>$L&poGXcBRA1k=vbBdPqNlScj5Uo!fcQkgO;NXMM^HsIvuh}Ih zjy5#|SOkXxLMt_s72_B0Dm(PMk>=l)*+Cx}OuA61FJBf!E^*g=eA19#Cz zty$juT5ZobTjH+aKG?pY9|(n0CiuVCL=-*J0cY{)yH z`gg^6HDFdMj0!xJH5IuP%3YTE#ZZO$>Me`_7blU7wh<7d^JPqCDNWM8yHQ(OkAU6n z8odJ#rEPq_4v)i>9;|CES@1KsO#$GJ2zY0L6ejBIs)erQh<(E1}@ zUBl&fSG#;(!mHZ`xzu?u^4NkwXi#vwM5oD21(1cfAGC#x0Gg6>`tB>~m|zFHkc<39 z_VCnbHQ^$2zROEW^QXEI>?s@o)w+;bvXl0=I2J-Y!--(cPsDPicq2_H+!9aHOCS}Rdl{)y1a=D9QOK?aO0xWNaRkV=kToTc z>`4?*d`AsQGO4S~mq!Z94Vas*v&93<;TXpkjB4&GBY^VcIVq}3lpdJ90qj)%W;K@` zV9(%sKlcBaFD6hQlbiS#WUSDNCPOubUlS-;N#xY(RczJhph~18{y!^QKUCbk0+ce- zAGw6v)Rquq60xG|^u{Mzl?P+tzudzue21R9|9uOPj(P#zym6i3D)SGdt3u{t;>=HY zHzN5@Ed8vHtvb9@vd9;at#13H`M}@4FYA56V};dQ$!5fD7G&w$O0Il%$YWZ*^%sAG z^B6ct^Mm?GV5ROrB3T0TC{-s^-3$;xA+*hJNGN{eybqR|8&RuNrfQ2wnG!ono)9IM z3O^X2tjy<($X0oLqoyN9J%%DQx*Md`h?42KwDki!kH~1RaQ?K(;kkvhYanRTq(=dm zrsDey!@sb$9)gU4G`V{Jm-vZbB zC%G>-`Dk9%$wZ~ zz)rOmA%94!L;A>pLZ}PF=YEGqLh%M3G$iwhqr7=WIOTTlp+0l!p^gI+ zV)AF{b56($yZ;`+v`#AG#;8+<7b@AxV;zBmq7|J#YqY21H?QL4glQ|Y4@gY{j7B5Y zKR9we%R|i6SE?l}3-(n^FU@Uf-V*;%EGTE52yGFcC9uUBAV^S$8!8q(PTjIFn{02t zcTXylSGsC>7PX#US=-+B4&oOv4cg0HIqrWU2ryDzSFiXv-_73hwkj*LM^RB>hN+{h z#@z5(Ga_NW@%Lb>BDx4(b;5Xw>q_oQm3#9seH5`V>Jo$B2Wcwa$6^&P-;--|{V%~O z?M;Jh$h9~A;zbp5x^8SbcF9?T*A-!@vG%JBI~3N68D?)m;CM-h$|EO&pl;C5A0fdV z!Ix7zd!NZ3$dCC{C=TN}pV+zd7VQyVf=W-b!d`x3C@_dY7c5fSA@#B!9+hF1X42QV z>p^v7Jx!+S9*bI|y%$>nz5?j8^BWgd*_KNxLw1|K3u-J5G1&+(5CEJvf~efU!^l9O z5s{RH4hS0#!;QuSJ3lm1PKzlmIOG30&vRqp?B~GD#me`I&A6>H6tD_P} zz8dp-leXzWqfra@g;c%$HF zTxi^J-}S>1Qd`01hOPn>j@*H!h!5?lHjaw$0RRwtjqt(z7pRmdzRJH3i2k1v{(VYx zmkxK_?qxDRY5M0M{*c6Rk!`FI<9)S1`&aj8YLIH>+S0!5hjX4NVb;G*+2DX%nW|_F zHd}R^H=Km#vccBNa;NRRWHR&AbU;;=l8 zWk*M6${|s&#YT%Ivc!Uj!#K??Hxi^c%r9ecfT^eYYAlbkg-&CyV|?+74OhYgD+$<) z?wpMWo;&VS#DoMk4+$^>if+6!VO02{yP2ttXZnT%XSRr*jo#x!3%l8`CTnq92n7SP zCIZ3=V9m4`xVSCspAzw#7cTtjJFQMl#dofNNi_(f88H{c%)xfK zN=i)?qTJs6SbVWlpOjD_{;!B5Kgm<;&>CtG;IkV0^@E|Ux_F_RsinA7y&tPK<$86;Q-RC3Of!{*h(RGCqlT+RWgD&fxKkM2N6kBZgl!Aazl7wqD zQv1M1JzZD=p24f@>SWnV>hIy#t$jIleP0uzlf=AhdDV%yHN^d$_IAYcyUhDYDfL&(g`JS5L`5~7387D24?xUI^h_vt^~YbZmP z>S#F~rZvzQu6_wp9)>0=wY>`PED&`h2$@xhIM_XS?0IO;So9>pxirD_r1m0JVnO@r z4Qn@w!G|Vy$&_{YIX42cu$}BC>@2MvrDkPFJ-O$~o3L6VVOBj|T+Qw~f{h6H<0-@5 zA5NZ4hc<7Zvjv^hi*~XXn+@+4$8KIxd9HKr790i^<`4JV*%8#zN8Ss<)N?3{z6@}k zjdllS(}2dkPGY^Dqe8$yQu)v>>^U<1Uv{fOz6N@YHJ^A{;T@Fjb_Cbzh;@^*rS;6+ zzjwW#OAaP|g%MDI>=rXrE4q3#iT!HFr3G8qgCwWs9D~i)J}h-4bTm%R48H|8!c{dD z1;=cT>KM;wk^IGY5K1IH!I>IEBf*)j=#PIF?fVq?st>vLh9PaHB4 zlSUe_4;Lhf@i(6fu_o$QnoY^l$ICJqd}vJg?HflaGU9icx)>@I&z(*>@AXcyS{}WZ z0|#I*!e<4}>N7PcvOJs<5uoO>fCeE|i^NshkngOGttQA`%e>+I2GdG_=X~50->7H% zD42){Nb>7I-#dOWzGa)eRk0yywN0DAg=HJRS$!`w+lZYBmb#}+-6>uv;OixA)gyuK zS=zJ$EzGHjTut)DIi!0rx~7aDPZ@NgopqSC{;;xiZ+X+Oo)(ZKN_yWp^G+jB|FbP} z^Xw$+4(*KjZVgglH3!n4HyxLMQszUuLa86CJ#RfkEcC>oHXefpH2kC>jozV1@iFK@ z)qu)0OA^G0cMh2xza5=Uc~n8BH?{Ek{T+h=#HtGU7(qOb3-u-b_+PjFp%`cGME2oi z1`RLwS?BV+a`}LO^p@$g-VejsF|?U}i|_kVNDMc^#F`hcyB1Efhd+N2N)-fAnH5h& z9w&`${UImOc?tK6?xsGzpQfuKzpBQ4_Qp+7pQoRGMuhgHSsvy7WEvc~x{brIa2mNH z@U}X@`%j~PW6hPGK0XeA51Vhj`uRtOk3HSjtHzuzuip&K;aR?KUtlwl6;$4hXYs2B zJ2wY5;hjs_MG(}-`?Bc=u+ez@X117QW7)lCcwSKq4?w=1D{XaJeNZYnLv%KSGiJwR z*;rT!a2}@TE%nn{KRkQAsr0N&)LtjAT+X;I#Yn50f2uw5It`ocNt5>43((P@Kn@r& zbsKx6aZhVad>oIbr-*usBKi$YYI|>LrDw=*c3Wq4BKkyP%KmajN}($UGvGOf8Ho@m zDUZ;CaHO=z1n1DvR~C#tA{MWYVioi8Bi650+lWm+<24f2dKflii22*uxs;4g_OI>3 z6?w2K3vTo6W_`UU(yn!sqNIHIM7+CnY?&GB*t! z2bmokX5|koCT+{RuE8yN)Wwsfgy9;eXiY!z+pQJ4o>%Vkcq!T^mUh_5lZA?Kg4WHy zC0sg|GJzI9B)Ad(o&O>-g#ke3%YalNlTqE26Ov48^fimMX?(L3T9Ezrn5KHYMs0`Y zPhY(AWHCJP9`Nmnge{DYc|o5SrXj>Svsr0);p8@U&PEgZERa#dvV-QncG?B&(5@6t zTD_XqijXRGWKcgl67#V3$pNoUDUav^5-w!D&3`p7=XWSb$aK#AcWGOp=so;wklVX; ze`(oH6~tF*>nQQICNMhHu3X7z7v2-fS`i{j_}#hlGX!xl^9N6!`X_A@JyjK1?j8Ok z^LDN8XTtPp#F2YNs`>cg*A9*oDMYo678R7=@7nMVDjly^o>CK#hj$cuUZzO-!VnP# zTG*R)on8zF)>9)*4gH%>I^X5#pI*%zqxbq;I@$xs*D_Z5LIbH@x}F|ds$no#N>%K4 zbs^>qX<5jg`$+_-HEpa#Do0nAbA|p$3tEv}DaM?%s-Vn0K15B#AM6h)Sal0)qFG&k z+V-ckq{NJeg*}lq19uC4h9vb&Wp8iuVr6sdzC`Ulp1kC#4ABYQ2jA4N@l?>Dn#vAE zbTYdUloW9)rLLe)91FFH3Sh8RMQ{s9W(K%nazVtwj_yd zu%uvok<&lmP%IT;39~Cpy#FNLSAxZbylaL;*~99P^B|&qyQWeosv|Nsro3)=yCTH9 zS9zALS=u4?`qV{=`kv{?W<>+%u%Cg*DK5vyutx5}Wi*B7g&1UAakQD1vDoM9IZ4Ij!h6ph6iL4X(Fcu5$*@e>!p!z$sW zQGMg)tT8BAWrq5)&DuE~;ILxpRj?sDlXVvNq@{*w2&mGq`Y!aN+xo{Uawb8cd9m0O z(R&1=9XR0wd`7hl=Huo--h`s&G39fr*Nm|9jfN8T1KkmidS(_w7Je3ui1gQFgL{RP zQaaR(OKmg=N>(C3z|;!aMXxAp680fVsZInC&i-j|7z_gTO~wX1^L*+Mt2r;uEPhzBS~kImvYm{pKpLz9!;vkrxrN zb})khHeP0g#UB6@4~0{Am$Bv^<~ih5i>I) zg>g-NE6_@*_O&_Mq_5j0I_EP)5+pN+IfW_NXhCbggC+Hn^g zLH8-$LuqzOlP!heo0pHy2gTNqn~ikiH{ZZ#_uiek)l)<4&BQ!YR54nmqv%1_{g*~n zWBWc&rPS&(WV-`z07?gCV*dL_H5oxSf%%zZTeYt%`aAjvyMze~V4$V>Oo~kWF!)#1 zRaA+4UeA{17wH;0JZ-OWV4Z`bj4(qWeTHr%j9r@q&bU?P8T1V@$sAb_SQEyL=;DfOE$I+q6t zvcrfuJtye3z@^4KVXGk@^AG7hri7#|KnPYn=Q3@2NVM2Uhw($mXFFtyw~FdorwI~K z7}z`6-YHBA8*g|8bO>+<568yU%58HMaH(gJvO4N|qeB=h3OU7OOD!T}of1o`BPkJ* zUg}%%z-?$m2u8dT36bTnOkMV$&_IO+Vs;Cx85w5%Ky6-g$XPaIFB2BTUxxv{ugUnb zGx}qcGZx!~XpB-zAsww4PZEuA-O@V&4syW>ij=fG_@PP`Bh95qOpPD87hN>);rO>E zh3DHOQ{0%{gW&ioh6eV_JWqTN6bOP*7Ywa{-(I*%{SVH9^}4E+ij@%@sSaLPb~ywV zlBX)dy0-Z@j2PgHeg#|Bct+D$>opXB^@jsn_-|s8v0SNzBjVN{{6 zF9XMud0Wr{DX$bbtlTw;+I!DL~2X!@0O|+I9q#4-4W=fx7>ct>;Ei zDsGEY5RrdOrO`oyWh3fT+F*tE>i>TKMX;CC5sRZ(4=o8ud*IT9JG@NwS(g@jxQcV8 zyqpsO=<>kiP!#e#XiN{d|O*9?JCW9;4ZXk-PWM`k)EtBw*0QZ+I z7u6Ox(1RF2&2Iqw)z<32wsW~XU)`1kpCI`ro+GI`r-BbwhJVr6VHg}s=%QD1{}MTv zcjl8Q4FjN$#6yH!{^7F;YAiveP_|@r>0Von`l1yW-tj505|t1o?3#96r1HX&0u3T9 z7$-x^vg1r!_GXA(5*Id=+9{&R6HV{i*+YIB9E9OlTA)4_ItkvCMzVzD82IE{eTSMI z){mj>mjc)V8~W$z1)Q|^O7 z^gv;6AO#5j+t>Vo8(iBfkfoqA_%*N9o3(J@Z#K3SogT|BblWEeEjAS>fEYu6{z_ce z@gxQ7Ql-b~Li;XrVS4-Zml#YxJpCx2@XDR(I#I(F9c{0K-h$g`?-F(v1-qc z9+6WWXwS!K%>NZZo#2%S-%SmeP`wmiZaz|7Ya!%_1k@dF9brbAD9Hh=oo63of;i42 zT-p)20{n=I^*HgbRKiab=MFG<F53<~)5nlWFxw22BpV+U>|UhdGf2T!@lHULYi`o(E3Buqxa~9?4?V(2h>U&5ceM1Fxxyj6BB{{8 zLssG}|LvCRTwS@y^5t^;wtL3AQB5~t@N)PzV4*$V*q<)Z+1eXqM>RRaxcX0yZ*UtG7l2rT~S|}Oi6&Lr13C4i=ZhJE$)j_q#< z=y|zZ*sPhjui|Jdcf&`a7{=DXw+Yz_BI#WIe5|1NU0H|VhqYu%;rQsELfkcMn^>2(~$wa*Zkm9J19e2WKYM*YFo%4U-00aZO zE?Ywh?tXJ}g2Z}X(#A)-u<;An$Bnw1DZEy0E*m~)RGL0xOaOTRgj;d<*RWYSv@FAs zEm1%cs`??m9y{kjQxBs&D?)gX(cVJk%tTGVE9$Nv=9dr&6rqFbKQ{R_xMV7ZS<*~> zBWsj$P6{!=EaM*vdX#}4zkdcXb&0*XoN?#Bc~Y>AWvj=(&AxB=gNM4XBqpmzecYaC zn4L(MFUV<0%hEBiATXec&LRa|ApGj2=X8w23eRn+GX$fB?=e})BfR(Ju%*Gx8t|Q5??v90mNGkTu5E z1fM(@z`c^eFae7%Y{I>F*Iy{X#ak6ap)o^tH%cSVV);#aJx$7FN~Fhm|KXh^8zl36 zw{RbryAaD$XxLzSKD##DN-=BWSx57?eiRi%UZMS@3Fe}|(--=x`H^UMOi9d7=T8 zDdjhLg@ETbed_WQlP04gp#nt+8*ijaEp!>~eUtUHN8AKX`G9SE+JH16#<4e%aV#g5 zTm*;^t%VXKK$d4*_v!t{>+!1d2OMAWp$k${{Q-_e1aa_dH9tTwqI_i!ZVDSm_IwuQsESLf2! z#M0W(7PuAqIIi?!lj#06qni6{9(Fl>n5XytStu{aCWu>*Wm;KXfkOBCp*$t`L5yt| zGrB}+@up-!t`I5cdihcibR@G_@BpYW4$B=+Q=wtdua8toS?_`#(mrqPr7HLYxA@TD zPM+lII^;Ta1b&_B82Px8Dq<|mF;Z$B%f=9K6Leo+%LqPdP0$by({EDiY;Oe_0x4S7 zvg*wRll`rlK@Bx97^0Q;0)4M_ij-hvkVoH(v-j#sBQHyNG6CP|Or_Qte(@rkWh=_j zkZq<;_Z!Vdoig5+tB!8>d&i~PMr&!m)vT!}&uUE``9^*J`&j1PN%bo?YTApWtHZwU z$9flP&I=z5c0xDple}uKI3+Sy=jJMFoqZJjWOJ+M~_X&5Sa) zJd|||AEvduET9q%%zl%|9L^iuIse+fOV@C|i?ybiWFRPhs0(K1MzuDWy z%xn;Oqeb4Ay6(Qmfy5RLL6x^z*B2VPZMs^lu3KgeUiDZW54By*{L0f~r=sXRvK3?h z;?qggkz>|{0}*qkh2s$7F$>~5j&x9uV|U%I_8h>?jISqu{~b?;Ajo|`FN;#vxzxw& zcl;@#@W@vGY@m$r--c0I+)wkQDy{%pjcqepYTk%$n569$zxu2Vdmr6){@#%HD&ZL> zf#U$8(ZB5H_3EueQ!Hi_zBtMnu0OtYn#>Q@erK!1f-!AH=FoMW#D35F7CLSM>5wS% zF^joH>=T^3;~|QH$K6*x7sAW&YlJ0DARJ=%5JLr3dDpCkihA8+r#`SA%lk_wZ-{uq;eE z3iO2y)8A6CAV%TPw?lZ)x^mGsEE;z7R$Kp8dd3dE&4Sfqt9(oaAOP|!@mv_4ssT06 z6flGUu#*ntuZ$M7nQFkl4et7k;*$rJ+W=`n#zOwe1G~Dr#yz1)q@Pgk9CaoBFXrAd zAdY5h7wwtBJt4S-5Fl7^3l>6f0s(@DFnDlx8$y8K7Thf)xVsDq?(XjHu4l-*_xJ7Z zoO|y5dw=zGSJkSup7qq~o?6w_2;By3kn8W~z|P^=n80fR9}A`hd0f$JAB;po0H*pq zt;qs;X7f37r^p0iDqE8nONN`^;LNQK3mpK;)e2zeu~p~@g~@RiR(CxRO8iy7{yk{l z>Old%PaxK5iZ6TCgj|TX*Xs~(H}j*;Y(DhAK*6{mzyB4n)5Vm4?&?0eBIut61}zv1 zkEX>29t@dXWTNmsv)ETgAhhri9G1k#z7+Kn0ow(w?E_RWou>~dR+uYO0Kh+M0f02b z1UsA)RIl)l1A=aD-Ojv>taqEfShoUSAHO>;M7BQC z;y=|8ZA>AIniV(-qEcVpE<5Z#r6=%(J8U^T75Dnm6a0= z-t`++%PCVE`Zc>L1B8i$?0+3*e@qO#24r=TNF%e-6`I)WxIFrKVB>FOYfEi*N*w&0 zI1ZP8mdB_+2GwV2$ZqrE+nTS}et0D-NL5p~op)F_M!g{eunO1bNN;=9%CdqHoL|jU z78A=6R+>US(0g@yRIk983~8k9zq2<|WUgft`IS%SlKr$@oKkBYR@VBJImmb$;_UhI zY^zp+>Y+K4x9sKg5nlP-GuG}JNmY-$qOKp)4j|^-g%T3Q;{<{O#JKP7&U*)9e)q(X z-%kH@?@L{^H;m7M;)Wi7Lg~53c}@f?Qua4VxJht zI~4N0-kuqmg4LNXRfO-p{uNMRf1Gc*%JDp+A)%ZO?0mXH(r=|5T}vw6P0q}BS> zUG{s+ui4&)yk`=_B?u-J@N-io6rdEB^WpawtcYcJ^2El;wfC)t*ugukmbK19R{IH7 zeC2FkRv!Du6Gs&z0*kfJW$&984!R$PLV&UMqx6~AS252+2+sAi!e~w0_4jjKD69p~ zvyCWBUeZ=MCrix@J-!Nh<8CHKn7!w^c%W^PLHw#l!1yET&Fm+^bKX~v5bmBHIIWdj zYtp>^vo*e^7Tcs-%L%~AK!W4}DL9fev6o!W5d#==0S?pTz<+t-~aGIpN&iat}4KflzgN51$BOeZrG({o_^wxIquW$pgiD`C)RrwEM2;Rr{{Gt3Qk-N(^*bVVJ%Ts(j~;~EY)rIu_W83T;?`>*kxKGH z`n5K?J4agO1>5sP;0*VydNgg`tT&5uQ?7`=8uG_YY>ceeoB-bV7t@0VLjct$u)pAY z+Pqfp$$2G_y`-^gX1t|kHZO|bx?jGeW0?gZY3SQOi=PduSHbT0{7-N|=Mr%7x#&MMyF*@BZc z_`yUtE#g)PuC6*ijUHC^pQ-A$WTyW(C+Ac(LzCrGyG`H2ruP?D>;M z--w##P8*YDQgA$XcXrPB-gD{t;Dd3~YR0rB$v!_neSc%`k=bFL@ceysP3-_rA+zAF zpFMhFcHZlKy|EAZKN9h_-h;K(BYokeb>^D`zRdA(s9W%x>!n9OarM8_c)x9ydd})u z^3z9D9i{0))G_!tZ6YK1w0jxDdg7x0!EH=GW-EGLAyI;{qW=BJ7#ZU`FJ$=!A9B zM6{)f_O$)sK`d(-)W7Q9ezU`epnj%oOSmuRv%Pw(TzRKr(DH zMM|e_V7sXm^wl3&^dE0;@0`2ePTUMzn9AP%Pdb{b&MALiCu&Dk8%TK7GQ_h&!8g27kLhln{qTrnU zLEiLnd0mNQ&+LY$W}d33I__3kt|QBLYc-`aabpfW(_fjZbm0vQ_ERv8kW_6K1-IS; zW)pdgwMj-ONi$2gZRsw@m!$6_LTn4lIl(yZ$uK|snWRB1&ooR#8C?6&ga)BwFpQBp zLrb=VH~4Hmg~Yfs$-WNzt)rS*o(&5TyuyYGMlkOGqMONv4Iecb*QTL2}ZAh zYs`llV#XKuVaY;s<5D)JTt)T`!+lN(ch`02bJyb!8ul6J{CqlD^ZG3Zfa2(#Vqkb2 z5tv-~ZKbNKODfoR#1j5Zi}3JW{I;F9l4#!yOhH76N#tjl=@^Qf8_Qdl&(C|rcl2AL zz1E^1g(0Ec;4AyS4_~YxV>7fzqZIKQ(EHW&{^0k?$KOyMRe0kUqCxAz-yN$TW5|jl zk@dL(t>%{M)D1<~>IdG^?^?*03PkBhU7dolVnP~-yFUNMiWX4^mI4HsV? zw7WLhKbUvyao}1gC!%D^%i8Fg?c}le;3Qhs-_(tC8h{*_q>2KEDPMIw9XV?}IP8UZ zoR50jp0K?tAlI177A76igz(H4B8O_Y{{kiYKSiq%&`qLh;7UjhaL_asD2`WMT%76J zt$|09I=!1Mhw_V4{E2pR4_O@D!Y0n#eDhT{fi5#MV6Zb~_ zJ-0aGmRG5(B34~3K(sjQDu7n?f}?24cw2qD*Q_GpHeHCqpH{`nNzBtS z&kHF0*`YHMg#oJ@yX@@hI>$yb>36S ztiESXbJY3o!&Tth#g#b7ol#XKTa;`gY!l{*MexlS3MKp}p%Q%yjeK3pQ^{)W%W`$3KLZ#Sf>M!jFH$ z7|KXM2A@D>S%t+qa`ZPk82nZG^Y zd=)ff;s$@4+ZFx%mMbCJDsL1+eFuc=b+}2OV!OrgnVFi|kkH;_i%LvPY+sooGQ$UX z|HHv=>F%}n z#ZoLc%>@vY`nK*>1Hw-Pv2xa%+Ck_(!^Q2W;J53KQIzX51oUv@nAKf`o>glaBbsGe zBQb>{i9MM`zuoHQg~kT=CZQBy^S!}VFQavkg{+7CC|k1v%n3nmJL!Yy4gE0Jg25WTXR+c6zso!P#wQZXN9 znZZ!n2VE_Pg2J!dsHZ2)N61=QOWCs5nlK6)TMBdm5;3~k0_iTv0by}ngiS@J^Wjk^J7`|44I1_{C`f^K*!Ree?(R#*|lxd%2or z8XktDr13O90v+mcwMb`Y{e5^VuC8boX8k*+1#L~x_3yRC&ow^GjdM2N;k{a_BnqWp zg-NjL-}9^83am|6*k*%clwJzXL>hcMdgL`RBM>eo+GivW=?g@Gw27%NyOf~z}B-XHbIef~Mv7BP?zQTz-M#>J68 z^18pl)h%gReqeWiu!%y}G>o&JpbeFv8v0ZIcFiOYL3tYHf;^+K zB3}U8q$U)pb4g=~Eun!|1)NOo#VZvtLNhyJZksyd>5>HtoJkSgtcC}p$$ovTTi>oKos^d>ixBladqP|++*5jD6% zu%USnJJ~{^hf%QQiY~k7_cNNJATz0IyRL&H4B{eMRpO9Y>3rBVA;wWV&AiTUsHzU0 zX!ctxxlUIGiOQ-8l>}ro4R483+-Wj9Z*9$@$9_^|-CH$ZE~nDW$B?iIOI_RaP~^-} zu;juyeWx(gmv*X14l=l6khk~uxU+_T1b^_vk108`@iNh)I}?++Q#EuRkL|rap~QVZ zeIv+LG%8QJIBc!^-Qs9U0y1Xm-<0e)XeJfs{8e&5lz{CMXQW-?+bi!+V$XiVO4frN z8W%RYxjXvPBn(j_6_2Q#uR(%ClVPakuL5Uun=m!Y`swc^xs+TOF;L#rY5?aevENMz zUQ9c(fnUJxUT-mW{7Bff6fuWM3{MPHb>2@3c#T(AFo!bvN`w1<*0b4Avvn}U(Dt$w zHye;vaa`zDzOOnpvdY%m4oY}e8-e9m^2K{}TK*W0i95r53E`2e$+D?{vK9WYiEa>i z9%7Vr7#1KgB&D+|Bss{sz2Vk(xiRBLYe@LmLQw-b$1B%kmsVvIOC+=*LeT8SI?H&T zP;5jCYp16)zk!wL%k*G$!PcHJ&n@Zj%g! z%lDU}wh~^GvDlMmUv4T-JZs**#P@`@ccxV-0%T#o3Z=(m2+_+VyXMSXEQVl&j~c88 zV=~flVzo)CF>@3~!dw<6wPK8lq91+j_$;R#*y2a4I4+w{Llt!*JJcl0Lr$gt`8n*E zE4R02ws!7F{!Z~CCHq}K4LfJIJEl@u>vP)uk?Yg>`|D$!`@1UFO!s72qoPXNOCBGt z0mH;_-vf3OfH)-r+b(m9^Cv1iJMprJ4m>e|L3PvZwH9((0h#!$4z)~-lPW{Z5b+_w z>TnjeGLt?quKnnFj-*#86i{zJi`7H&4zs5KAK%2$0!YLMs+R$tsh39dJu|K6UR87K zuhvR|;zOg)@_trWK=mJhN0arT5cywa$*2dP=9A*q42HZ=0VhJZ#C7Y=*Z8`H{V@TF z_K%g@z}{(Zb4fY{Zf_$9aC!olKyCS-<-%;JrbeF|QDJrH&`eA{$KO5L2Tt1*nffoM z9RS>3eQrB*9MFP1cw*ygTr!&-(ao%D`$k;1BL(~iNk8!>IWWTjkj>%^!{Xv9oBwKS z?TG{IJ%&-!wd`=fY>e3O0YBrX#$s~c(AxnR2nrDMlj*&Y<3s7gGO{Fu@AvkcPy!Nt zesOsCKwErs4QDG+d<|BMX?PQqrVrXz>E#N_NoKA2vie$GyjXa$0-4M1#+!pVyZs;2Xc>@N?>_sD;fCQlvwsn%pj z)1I1JlS1TdYefEj%MD(V-|JF;W|MMKqDMexw*rRxW?XS2> zc{xcc%#lsS+aLTUB*J49++q@HICcQ^EggW>{~Ccb3sSN9xdka~IYO;4dt2U&0s^2Q zJo5l6FfM|ThXB_54A{Au|4~jLwk6@7cyp+R0m6X z0C5C5@P`wCew!p^A66W|&V#k;cs})%K*00w+Ghci-`8P5069kq@NVFn9Ae}=STy~@ zC=2z|#8-CpOW_432?|u7V!Z#dM5(8(9y5V&%$DjvrQ_#*+Ia!kaHz;HAGVax;aaST5ww2fz| zW&(TMKPTcxDbORVz(PB)+G`o1 z$ag>Ofz*Vx&0H_ge+yeRlIV1kz5PVPxbP07F2IHBOd+_F_(%VpKwKI+Dc~3Ppj)iP zTV!(Kl$;efFnj)?gs^380e!+X_x`IqP~C+JZXOhdD;&`_X30 z%cJ4yz{!0)(W6ct2op=?Abl$!&J;j&6koED>tI6s zOcw%vGtfZ?y1T^f0Ie3r0;(7ye`PoI$uJgGHIES*Ix8kdmf+Jc?9D%dB@e|1UeW@t z=SSN)!S}q^CZjyyj2*%LLII(<^R?mJWu2oxTD;fXs_U!G8jYNp7P7*&}TOfN8G^ygn&1`Y2!H0L&S2Iw@Gy}ri z^DuA%d$i6p{g2WN%HGIRGJH!RvUMT!2y5{;%`fhtJ-ToW zA+T1cHP~=|#PlYZynvW51?%}K*ZhZ(o$or4?}Zn~&0fcea5oRFXdO*{J{^e))cwvx z`q4_5-#szy6(?w63&c#`d{m>)lr#Sd5P95H0pW8~P@X{*v=wSUoEcQsEN%H=+x-TJ zV%OPq)<1d+C}^)o%-?*c+vvpQ5j_svUCUqwywYpA-5hnD?3t3+1#ffAw+xv117s6o zGOw8)ENJ}ZlN#bm8j*eQP8Xe=sJ7b!URo_w#+}%q%)IqXHMzY)6&{L)M3Se*d|yAu z)o4eXkk~jtx6*c@9GTWVO$qd4ew8Z^^_{%SoMg;x`v6q)0QirB(xsZ;>gi7*#j>z| zH#7axH?lHGrk=XW2wg%pF{?P1qPpRGe?G)aOa#PLiw;Ri{;^TKZhPjN6CrZno9O_! zbFh{96r*fo|E}$WT)0{Q_&89W>t)XK+N`6lS@-UPFvUkCfgXWP!uOp^J_uKE18==w z)fMZz+2{StY6`Bnjwz#UNIG{ttf+2OVSQXw0zO-}5XvHV! zUOOok3{|WNBf5qK+&?Fi_$Vin%ZU#ZlEe4vg(QF8E>~%(KrR;F7gm5(KO4^Jbx(m| z+Ve8Hz4liFqx<5fSb+(H04+sVt{9CTRctetF?}hsM)d8_Xmya; zywAZ>&1sSK4P4gh&0Y=N`gLylSyLq=WBdzR@w}ofRqeT~8DK%}?UNGQ$H=?m?%7kz z-Nw2k_3h32K5`~_uTy5E;S0-5y}y)IR2pnFZ}UCJg_zx1OAKd5Q&9Mkaj zDO*PP;;IsX}6Z6qJ=K z;6gdnSDJH?xrCvvooiy5K~`_43;Y5QBSb!yX&&QXEC!Yvow|Dvmeb^IWUQ*#wC{^@Dg!55IoIqvNVp}EXBI14FFLccS z?xPPbB!#)K*v+3>dHAx`<%@dAJ1g3>PxF`-B2wq)JcMBiJ$H2%D&J92bKJJ;1CKaJ zz7$Q#guTffE=%3hkO{SU(e^62Nxr$Bwdzp0xY~PWRVm_tMr1Da1Lkh;`ll7;wVYEL z#pNGK)x!0LW#iJayE)lk)OYQEjk()vTQL%S%fx)L&NHbF;Grwl{roIahMd$y09
    j;gzzuRNpUR!4^Ut5RmGFw0*&0JJ6`Pb}@DY6tKvC)o=t46Y^c_Mk-yq$9|y;WI@ zaw{Ey>PM_g2i?wV`B&}Xlo+d;Obd!~Gxi1(O2|9eqo%}H=<1o#+Q>Hdg;eh=<$7)E zTfzic1B3KsB`S#yqzg`4^J|+i2;dqU&ho^#j}P6pxkS%fr4VV%tW)8(a4f4xAXIXF zi&s>smZX!D`zS)Sl1Ywgo_(8UI>Dwtw)5;4bt`+J zRcsr{eawyRAjpj6L)?k(V2eSx=n4cI}IW4T&|XWA|7a<9yz!nuW9c-*qkmdkq7DxP2Ku7S*(`s zTw?Hk1QRS_8sqhp<0A`t2$$)T$KQ|l)o{nVXFKJb>fG8@;c`{GXL&cj_V+cOEU2Nc zWswMcDqi&lurf79HUe@?k}al$Ohm=XAS z;WKjF7s7f_a_?5Zgi$w~v4JyFz}rntOKtKPfMeoGaEQpp*9Eu-05APQM8vW7ZZ8!I zVc0>{@?5s&QX>~eThH`#a(ClRI0xD8J%KvMU+l&QwmF%6d1uN|1}F#U0C95gF1k?Z zHAz=gT-q;iF%NcL_tqMy>amZ-QbI(gn7rYx8qrt{4)C&icb&JHl2)w&cKpVyJX zW^RPQJDWUdn%>}@6m?qNPdNOlloVxs-yl3kRN8_MCVpL3D?|B-IQPScjG1*m@`PomX+zZKZ3HHC6J|F?!Q%WQtIup=Fb&ufM}ogg`cSS22_l9 zZZM(}1Bsts?kCrhCsNx^e|)C^$~j-8Y&P;_#2_|KU(DA~cy?`)DUv5Nk3sPPPIG;JH^ymRHw z*A*TZ+5U82RJkzBEbK(D{LQ#LK)--~w;Wr2ey#$Tzx}jX&Cpuj?VFveOZcUv&&Gv`(>Jxy@!QHR4$PP^k3L}G z*80~I>$eR#QDEoY-lAzf(pmTHX(OuSigRPnn>a-!BoXEsk2$+S#xD&$GC+Z z?l($O1vC3#uN5a2D=M{^(h&#hSrwcdn0HKsUcb($ocG&|G^ZEPdZ_hEPC5+z`#4k= z{e0+U;c_r4l4`O$Dy*~IiW3!n`ko5V!Bdnh;!8=30}1lOnv2Db#iBN0HxwjY!*OY- zK%Q6kw-=J2pm5hrnR4{{@@ZMcQhUT6Rr_^|i?3e73@M9cs#Fe@FyK$+&Afw797!D? z{|a&Va0DRM#4(vLHvzuPCyhIB!vo}bvsvsf>@TgTC?l7|>BlG~L75~T6$ge}6rXZ@ z1)(AsuinEgaSqEWiD#0LVB!NsZ#7lZ(UAqG40cE9;I&RFi(uzT4%Nm ze?p)3MtSjWC63MM9%MF(zFg80QL&qES~i>tc|Sy&T;ELv>QAPCc?*UeHSFjDZ)bZ* z;F2h8!W&B;>!?F;S33!caVmZXewU}s4@CL!!O`%s1MUXJ9++*DsK!cJ!;B23dm1T^ z2F--soBfV(pv}i7blV|PIW~EpVyX@A(CPbRPIV9>ZuB)`GCDbl)8J^XlX>38MfR%U zs3|lXCYp65#cmuY1FcpkRD?UOaIWDApqQ@;#*F*R<3Qt*jtp;(9plJUW8#@!s5gC; z638WItJwh~gKi7VIa^+Ui7nlD5SFRIQEK(xUViOVbz!8^QQf|*Kq(I9&OdkJGzO@M z;1dIBbg1;h%`qkEfoZ1O>=BCkl^BzW5jH7zli1tP^}@e zQZ>;Z+%Yhmb(t1Ij(3V8UxOepwXsia1ZMEt-g<>&r(B%`gW zmobvcT-|ZWB9o>QnRZ>f=11+g_+UP&vPJlfqDRUkzJkfxmixHIXap_ly&QyKL8^06 zaU3JA3aL$dIzPuFu|^mw3koyf)`uFd>l9VOzBt!E*G68?0VS=G7%1Ai)x~qd!Y~%V zDHpYTg7G*NEA1wUm!|k!707wv`?c9-UqYoPEak>3Mai-e8>*FhFl3iV$UOq0l3dIb z3Sj7TD_3-%^CE{0--5{Kcf~d!j!zE$K<6Ea^7gpCJKgkywewR$pgRO~41vG#uqe8k z8Lz2;$R?^kY`@kQvKQ3TML~J|((lo?XL*a5Wna<4+7lTkc64Kc%GU``>!z5fih=fG zIL1)cJAQTy^@Oy%&UqtI(n!+KtdKg_V>VXmScg07LeRW13OvnYT;u=+5eyh$5lt#K zNE{TO`Hj53^D-?M{FE|L`yJR^Nen4Qd;DDxsLM_s-FCo)k;NPh`kOsNHYf2>q0KMy z#EU3`u~Z%6Am>B!{vbbDZSZ4&UpHL7R=mmcuDNS~MmONXC{wuRxCH#(8>*Mz>z6j=^ns?-Hc?i-sVIJjKY&g!ZAC4G#OUr*+7w*gM zkiM?#K> zA$C>IYJaB+M^l)RNF7_Jd19^Q^Y4LP1BiMCI`g1xKdQSP(C7Ej=x1tT+cG@wxeEns zm;}C#wZK-qSP#@&yihRs;t0w=L`*1FJ4d0vnzj6*D$>zf+AUSeiJ>{ZkbCrljY5@P z84eFn=-QG=S8Wqe{szhT#&)>AN_K1ah!Gn9&W?u3&Rh*XG4vVv-q|Rw#g0CAGo6od)|k1QH@ezMYQMXJ<>5ug}iq9$=2Nr$OMf>br(R=f5hbYaaBMh$2-S$ zNk;vdgIG;X<$bx2qHNJK2kPS_ItL^;_Nd|jiF-;^Mk}}=2NlXEmRm+3qxUj^axwe` z`L{qDWI@P;7_+Gsr(alHECFj0N*i~F&nBgYOex7ssx!&Kl5b(NlENG{o$sqQi1 z!3dWy2hx}dbCcg255$%O_4q4O>V77jFeUDl3lqkLyK0AfPE{Nk zDSA>Kt|7FzU{u{+x(yO|6qrwl1qe#Bl)wj9V`UTvB3`MJejhjlvc@xLtwhmf@kke9 zhf)7|QB!`0f6f0Ems<6_Q+9ZrXeQx(3iye(*WjTw=1m!|yq<_z!+g@{LM&xu$#QsE z?gb;N61;Pl_v++YuLt#}XK2bmqk)KkOPG=lm25}M#uQ^hb6`eORcw1F>q)SU>U#XL z3OZ9w5U;P zS{U&J`{@>@aC3VM5xd-=+rWb(AA|D8k7|gM^O*)sBP+VO*hF!K7r5yzOd{=OsQhr6LMkb2k=`~vFJ1A-F;c7n4%rzhG=y4C%x z^fRgWiY{SH>Z&EG&QM@I9Q2X?lC*qWtcBY=WpjT~#6V;Fi~7n95WEn5*m?6S4i%2I zaB0*Bq<{n=d_ZwY^l_IlN72`xH9A6nv`JO!V@q=j{n9wmq48@YcIESm1Pm?d8F~D9 z>K0TOdE&it3>)edS5azkypa4ii6Wo+AW)P8cf{jIKXBJ6X@rN2Et}&OjH}pZ;p`(R zpts?`P^I4(I~SU?)z`*QPgyAr5CvYcpyXAiA=5x z*Yuwd&Z7R>ABdw@xJ?;>5e_VJ3C!1d^E28qt|25PPuj( z=FXS_^~aIl!s{yxlhmu7J;K$hf0he#;rLCo;h`=;&DX1?JDRDfy*g=I=E!Y7my*du zc9(kNot1tyXRzlrSC?U!)=J-(uGFuyS%T^J)?a$!4h7TFAFo|sH}==Z^DfJ3(S`L{ zBq+IF3{`qf58Oo+s~SIxl|jrpV{txAEqA&vqg6XP-NszIuOsR;-&QhgxG&FZRqy`l z{g|S4Z~aH=Y(}T@?skE59mlHCZMn;@?v?!Pnk253-PtrcAOQdm^WcaS!GV?}JkJR= zdFmuBgwuGu=q@GExt%$KDes&ixz^tIcsdca;HGP;$Cu2Pp zj@C(C7BHpz?GAac6VDPMB3^Ov1^qn>J6nZ29OzF3ASb=IO(Fb zVv7bN#3AhV!UN{{5brbg?$?-K2nX@X&c zh-5-E2>8>3YhtvV#)1M5A*+m-hKksrK0!ff@>fs$^7s(c?XY>DkQZ18ebIqTjt7K6 z@V}P5PQ~WbrNe;GVj^m9l)2 z-)tpHIeP-8ZZu!B72yp(Hqr)W+KJ8RO^>$k=(>WU*)E5ArSl#k1VjVYO}Y;GEN(2B zRDc9R!C4$A6CR6G+b@OfQ$HHYzzWHts?i;Wwg|+(Hx5ZHu8_jpH!4B|YktpCCSCN7@QX1lvDA z;49qNX0XpA9CG`0js%Pl51yV6RPgWHET*DI{`GGfc?$Y~PaU$rwHfe|5K|C>fHfD0 zbJzIu*@8Pbe?PenBib7QZ?2iqQ`wQ04*adB9QpBT@UUbv-0CW_lJFky47w@3 z^@Z)>I*sM%Hm|*P5V*0>qs{XDDUyzS9sGR~RN!c}ab!Jo!p@Q~{YeET}+59iPGb(_a z`^QO&HV;ZCZhS-^7&kM_j@~LJX#Vj$#joe<}{7fQhTF1#VGnaC^SSI&dIqsgf91(I-4#58PMeA5xCR-BO@x>U^JGS9p|8#tZb^%M;D#7(l zm*~mV(!=4TFsF+QFnE&-sb)Kia+q24WmnNf{6ikLU7W%joS4PT4xtL`?@p~{U&G;2 zHYT|oYoB=0sve|{RkrcNs$A78w=q6cVQr+mhq_77Hky#Ck<{ft3>|Q^pSEve6tY=DpD@x{kEb>cy z!wp<(V?o>L?cDELz zEc=j<(5U=aZ1Ff}7Hgs0P$M&xM|oUBwBEr)1t?QJ2TvS!t}yBC$a2JDQ)3Vl z+mYg+t+AH{4dN=C6sbMGx_Td@V;pz;3lj<69bNXEZnlRNViy=4mx<{NTy$dI9IZsD z`3o})PDFL3&!@tR}(VOb)f}kQn8jz^mn?7X;D?yg;1UwNYy#jj#+SQx-a498Y3pnZr@0J1`fDVP7@Yx9(x`y%&0m0nYtTfAuOi~Jqk%{VJ0m9)=uncq*6d?WCjZ74A^ken zr@ZUNg37~7!y1hjOVyab&IeA1OqCWvy-`4JwpI<8$ZTCHacawK=Udglw0e2v?lP82 z)FA3r4>6iL=}>=rbTuoGEsnm>gtK=qMUFd9t=YZy3Z^iVQxZkA5}iTAddG*vxtsf> z`+Zi_a~P_+CYZ0U#hQ1J8RuK~$|IVEy|tu$;_j0YX709b;K1##wqzh>2P8ozl;2G? zZr)xrVKr)WBC-3=ChwDtm3WC_B`9yUoiI|!6x-w);)a(q^I1MYtUu@w0-=Ggqx`cQ zjvAsXobxP8f4s?=OFnFttK0fycvcpXceFL&;Tg~Fy}X%Np3)lvd~$cR`RprnaX9_B zr@X-3H9ss~1&C_x0>e?3ZGRgyNA5=e;<+_g$AGMX3jfPFCP1K?Eth7Y6|tMNhOi^A zQbc;nuh0P2BGAQ+Rrl%<>qbW^Wn;q48y7%eTuw-0c6ZSzr2rHsH`bRyagPesSAQWH z5^pIK=@ZX0dU4Zg{H1Wz3@(DIQwVluNlXN8&V$5m#{LvZtkf{anJ5#S7Ly;z07T`; zAgJuRZH$3GZKNi$b!<;|taF#l)*l2~z(ljJA{l5tuR~*?tVNADFZoMMY|7g{TG%Dm{8 zW35GRk9qxhZ=Y(;B;9^o;^5eJh;J`rkr*xR?JH4`@&_dGr*9mp`R0Tv7nF%YEp@JS zJ*nl4W=px{iJt3&a3H-R$P|p%KBJ3&R;k1l5puaco;On6+IPl$mUS#(h7V&~r{lJ@ z^n>UxvkyWWY|h@P`}EQr#6(w!Mdo5Qg>4YqL1Pf|xmxsu{4U zj1-Xgroxe(J?iyFyO8bvoE}`YG96p}Hk}@*tToZDH|YIg&mVu<)w)-xwW!gbT31*R zEJ$Ae9o5-AarDl6F(%Ij&Dy=-@>6>!g&S(q^%;Cno?|=ajH}3$a^c4q-ohC&lNw7` zA>zY)m4zKm=w{2gw1-;LLzUbtlVNxm2V9Vk8$J=iB9B!$?sf-K9jM-sdO30KUP}`n z&u$+VX$y^MG$h{ND4q>VT?Tc#r)qzWr-lz^L=Q#*^oe4ysI_Plt&691CW^H+wAbue zrv;m4MQi;yVRkcCQmQt`R2`Ln`~cME7s>boCfyF{yFY7+4egy*LAir*)ud2h9ip*U&e-% z?$1d@8Z1>U#*UhvmC9r%7_eeC_3MzSL_JOkt|?nrG>R&{Yn5R+Uft--NiMZmvkPIA zroB!$$bSCo`6701w9FeiBQ2P$D;wi#(|U@4nL*!=_*liYk>&0tl#j^t4@lWx6(f_}z)59$%9{0{zh|+a@Mf|-GrHePfCpmF>i&+)-noTvz4hy#6;90xU=%`P0Bqw@)(@Sm%QLS-28MCa2exo~t)Ays7h zuzznoGgh+jJ0t#!MJu;+@~hG}bn)PinhAYpJY3{2FPr07I7OV#-i!GFu|5iJRH?U< z8!7W2I-V=Ty1UjJa{OKA9a6P1l~Nb(lg4RF9ESHejq%KDEaGXqtz?}ykqOtXmBWnEsFpJ}T9*E(GI3U;~|8V?!ieT}-dp5ve z>q+`|rEo;QI|kthj{n~XxnqDJ=09r3Bl!PUbU31`|IHYAl>fgd;ujx<{?EvHpd$k2 z2M_;sbmvz9@ip6jogxP9za9P;
    ~<$<8||C@3DZiB;t5b@s_{|BfKUi}{uS`{t&a z)jYx6{+82OqRZ|F36uJlZj~&d^I?b^&Tm$&rPcWh{Gj7l`J{ z7F7|K^)D5$4UV^P6{_~PoQuy~=ZS{BbUF_&I{He0)T_%?gGj(h@G}VsaZ+F1&gDgi zJV)yM`dqU$=N_8EY;WHlfIEAED|V@lYyS~P;dlDRerVXv{d^Q+l7iz3Rn-r-?>LIx zch}lF(oUNmxmuM`3^wd6>s)E$Nx9&Bcmk>a#ce@}d+D^vQBMyu}Lih@rZCehel6xf9tP!GB;Yjk9bJKx3fIh-P6*iU;a2xg!&tO z!f9W9Jv73fLO&I^Q$fO>eU>7mfti2K$?kO3gJ!&LZzsvw{}!@8Bj;~*j@xpW28c9u zy_h|*I_qP7Ubqvw+;o=CRJU6y#F`^VZZuLC+7{1FbKKsHPQtZQJ=8qF6S}HtM*R5* z4>^d0ac0$Gfc%s^P z@g)>HPn%yn24TWaLAC(__V~v`B^bu~<2bNi&*W4!Aea?&KlA4JW+fbbM;Bv?yCsc@ z^=;x@-m@BJAu;R+3&s*tXl%-b(Bo^lFeTmuKsTVT=0ybQo;}B}kCC5ZijeJ#B{v=TpNIimSsXm(;%1Uwni?z27 zi>m7yhtH7G(jW-Z(nvQd0wN{SjihvUjfqG}gF`5t(j7BMw{(Z3bmtJ?LA~RBp7)>c z`n}h6U=I82y?U*^nZ3_iw3^ zs)fAuRffzN^KS{Azsez0DVs^AFxn|HA_b`VrbwJcYjlb-X1v@n5kIg?I&V=ZB#N)w zhN5|}@n z7rQ6;f$8VGw!Cb)+)e(Q13=~*RW4w|5hG}AOg@lsap)TxrgrmYvP*wLHY`lWRihC| zGQUKDt~YOt3djXHmipY^UjI;70F!3%JC)^n))hy+8%EVOoF0=bg*16G> zj)=KtEG0$M=R>Q{NJp!|EG(~gWec^Xo2m~N|;u)#+W%% z-$Y^k+No`dtt3gHuq)iCWd(f47tMfLYNdTr@Qk?0?R~U7kUl7W|m95(cd@PANB3QjPw*365{X{R>|GFKCe?gKmJ)I`gD_A zb#h;O4}IKRO>6tTs-Iz^g8SRO0$X>$Q8k|gkQ}sq#8p)D9>ekVJr-c<4*8qg9GYB4 z68QsZT#_!MlwUt;rFg`??d)1QD?vA=3B@a7#?D5)xP^G;f?unsaf>&9we)L|Ze2YB zJhz(aDZk%Qt3pDNrFPLrNxVw*w7gWL2GuyCzePuqo26J(21mBR&tkXA3NqHYtf=`Z z-JUGh;Cpwx(q@#BWJV{m#)-@<0FZa|Ww>6})DZaIo!+3Cd{P0Ez~(b9mrz-a^oOt0 zI3^!+VhkY5LW)&tYHI@&>sLTu^f(-NzvnJ$UKx_G0h{9BJUGuOsYNzLTT`0aDW#~_ zTgFx6&}ntu%Y6t`RcS^Nra!8mZCSX5P;Xk(X$q_T}Ing7_A~$stK!yP^{fT8(F13xbF@{7_`uekc>vKV!2J2p+PSuhi7^*>i4l z*(Nq$9u^aXx(Q;I!pTK8zCHtb>umRC=8)!5D&~eCn*0X|0P<0ldqNt>HT*2tw5eW) zd#M}*G36vLglpqdH-pZZB1p_B?b|f=wLGF&HPVKG_?!2o!oFtD&7Pl|o(iijN=kNx zyab!E_xA3xt?5LH;mO+nbmLhciP2~^Z$~<}Dd|p7l(3;egRL`A9I1jwKH|+;kelMA z$9>kTQsq?D^Ljb_UZt~Rh3><>L)dUlouLElZY5;H9P)Dc%`R&{UO5OC( z5WhqIBrpQHzmlfvwn|XG4`Nx!Oil9U10!_^Lgq-VSJhm|LwK5Fv?Al+_QJDGi|}UL zv0I4-^CSB&M|}i+ZCYv7^znNZRX? z(9+n5eK5MTd=?Ru9NKl9rB(~EjqCV$+{+@wRcIfep3T!|-u08V*B2e2oqB?0TuH}V zA_&cq0l(yW7*VutX(CB|{AMYcl`Oan!>advS$Stc-vMiIM0V=87CS&JagQ>S%U$$Y zS_f4z#5N{}a);H@hOu#N1KSP&-^PD(J)fX^=`7R55htWSe&H?31X0u!l$m)HsXa5n z?v~}?xv?;7?vOczeIUwG>ad%MXPl(ezbds;kV6&dO@44Wp1;;*ZfGKH^~pLsx;pYCnued=5u*& zg9}I&fni$xcIIthUlU@yg+tZoPVZ6PzeWz&hsh)bIkN>)+oq2--@1@NyI%F)=oZh8V?@b$)+z%X+k;WzR~LV96%?WMis^k* zlGk9-O!-gE_V!zJy?&@X(*f8($wNXGTQ{d z(#3eDq-m5MOHK>!APJ@}i>{^f*@V$dao3_0{(e{n2}Y#^6o~9YFE0n-1LM`Rk@p@dohjrb|55pPL)k8!P|@l&+cj`VQs#<`uXX zbG@1S`woP5;~Loif3VfyQUiW%C`ac>U z0!7hj1r^WgTs0mMOVNu`Iu1KpJLDhF55yUFKL=?sW>yyk+>@|^V;h?c<&@q}7~q+Q z(&XmYe9fvbtvbsf16jPr9D@&4QGNS@5;ZI6R_YKD!(Mm9EYix<<>SIyobEr6n1^}8 z1c<5=nzq$ulOGScx79fADsV0T-hW~f0DNqMsPpq>%R*|fbxxk=?|8|z4Tgoatr;n( zY!+6|N=;u~TxR*^v59>!S{iK1P0fb?oNe&?1s~j>vLGAm-ZsJ5(3CFqR&BNE%CmtT ztRy9cd-%x*#mzPx`mgjz%7l9)?HRdmy~rpKz(d^`jS5v{F>il~Gw?y)sf{Bf6RZD` z;#f<&GW)|P58_c*cN+k16Qf>zMTh71&%-QR%;K!pzgLA61bF$639rC6F&%eFFB3}09p{;;&*AUhh#8b;| zG_XqwQifxe3kCOG@frnOE$x?|wjfmkswXGT&!knhtFK}$PY)(1;cjb#db6{2T`u)Y zIt2|^4j%BncUZ*Cit+2szGEo8H7q)Ll=Tbat&Y@ApDd%C5-Xn~g&U$K=TKBRWmvI!)Sx!cV|QXdjvlMwP|>o?UF zqDBx{NXk!1Di9;j^E2!*v-cv`ci>xd9Lqe{oT))zxW93r+wwM3|LVtP97Xr*X%1BB zUHmodOcYH7vvl`=49JJJ{MP`|qF)1;75@fDTGRo0(>1bxffJ-O!M_#*w)TJHCoMt& zZS#6Sf(c}Md&729o!dm8Q&W)2eS(=`{5SmWvIdmoB$+zNY<0H&39-#)u!LL0mY?7? zci$AVpmI_3@afaE*GH2-tl$>B1EIh8{-&4-WuK%X98?y9hiMI-H~s9iVOVYb%|)gf zqEN2|*0E&^7lX6-2%a4T^-PWw!jAQ%JOe~ku8#8+e@TFNd4h+w23ndnzSrL`vuk*n znvhZ@pm6X<%9;D5J6-{jG_U8Cj%%u9sIJX~nW6s=*{;~0yTrSp(;JJ~WLaK^5^r>i7_T(_exD_js|#3SXQC{ZVZu%HJaVOR3+~ z|651#|5EwiO8r|S{~Ew<{KwM%HQ@i$Mwm4b3)aOYRo2uhDb2QHR!t5!oT$rY@l}ulQ4ueGE5bq)U3}AZf8A3u zBwNd-F!MC9KW0?+*Uf35T$qnUFwIKUs8$=>({~2i{M;@$K>isAd{$iOW zn80DC!hgDM_N);(=ye8L77+K+@YNF&NUSv&FpFOmd$; zukLuv_s9H`Q0(DhRieQZo4vDQFW|S6k3jKR{7YZ2Rh!EOdkPnZ`LB3t_9h{ z{8qeu4^3l%jwbz|9)9rz`iMiRmD$*Kj276In^UkL4S}_TqU@)?keM02>dh>4xU}7{ zV38{q?kVjM?p2hH{KMla$&R60p0-c_|8Qc)zfIj3p1Ne|{RQVg1MUB5-27Ld{XaDy zi&S*qVc}%SpflGh2j0l=zhi3YFV|&3?ykr>%$hOY{u92HAx0@O2VrS`9|hJ1wU`-J z{|VvIBn$&rvKqTSMrB9FY4|C7Q1JmXfZuZPh2fjS7h34dia42v20ZXSl?NUwC7Pw4 z->>>)`0!_u$11|0p22~x*upbHVaT|al*9K;&-wPZQyf7K%`A}u;3Zkc*z)mTPS z#I#JN7$1A>WcH2VGwd;OQRnFe#aOsi zYMS0dGB{|lQ869K>AT~M53F2|y40E^M|RS}#XW|*j2!;bW0tHndZlBn*KxQ1>KhfN#1mNm**{)!+lpKNE4&eg}9Zk{-^69f0X|f zD);&)hmoG}S3w5d7O5V?wDZgSa-nr0g^j+M^Luf>u&=EI6#v2=eBn`bj`@2Lay=Q( z!aF28TBCeRs)SafXQhnl7l@FUX(z5WBdqX%!+p5-8JO^~`W)VCu)8-(`?p!qZf$I+ zdtkKqa9a{CSSaE%xX;GSUd=Pk4{HWp$O(mC-o54-#M`oBMOfE1e5#>ccl*gkcvW>d+kX$o^!($rnVJZei|o*fO1<*= zRGi8khm_kYv$13FXpmbYq?Y7W@Mf1^M!3fkFY zKU#2s6eI%LX$F+@UZt@v7~a8E&$U%OLyQEBi>2IbD-a92T8{kmR;Q*${JZvJ!PeMf zPQBI&j#i7?Nb#l@d|b4F3B{PlwD!s2|> zhf>M#tq3t&`wpH!#r8Mg)!hze3SsWc!L4Qrv^AnNxm_-5s=kVohYV_x!~|ied(iflN1Mal7gOt* zPd(GaUKo$)8a9m!hjy3gtb|>dTWhdV73ln^t$WKh>owGm!>E_)Xf7g%?;YoK8jzXW zHI%Qufq5$_$xTBww=|hXIR=}%RI$gNXW}`2%y0q4@<*(M>x(i8=oOzUJlET6Sk=NH zj_KzaLmVtW{PZbq5dDdzLicLinV*+MWy~}u%%@aBtT#+_mo06So1Gz-!nv15jiv8k ztuyh7`a5_8TvM?pgiV2sS1Wrn&vmf3!?!vn^mZ=(LTIdguchY$=?4q`La-|5BzM4#blhGk0&DJz|SDT%U zVe2NV`LiyQ$p>%yiZF&2@YMI%E45#}J8yk$FIaoqUtF?}zN@N($|7ehTb5DbUO<9y zz7Pv6_DPc88dDJVyZ8w2nCJ4HyUynP%|2wg9uADv2a2za z#9@tak1B88T4Qy2#vXIv-)_bZolUsXPW-f)NzwmkIfhO-!3g?A(F=w}iX?Sl|M85< zW%*dP88>Sa!>7VPN&mZ-`NjuI*ko)VBh{8y3}HTL%nx0~u_JbdKG@J#we9cgG!8Cp z30i-MwYYervhKQlT2(wR@%j#%^nx1SRYLV=UeE3J0g2sVcV;HXq{i_x9rO2C)8CIW zEvlR6b%Hy0eAs4IjMqAZm6bDYEB4@94gPH!3~o4QNlQKq(Lc2A*c3}d?SjXNLgX?Z z9FmD?n#JDYmFp=@T$=9DuCOt`?UdCxTRJC7TrN^8^v%-1Kw240@B6JX@1t;U ze&a+CWm?UWS-R>|%6kGG*c#Rv)NMbxB@MwBKlw@cx7?zf-fAW(bw^C~s#e^Ia}#{B zWX_<_$ot%%B#WVRYI{j1lZJg#dT%A{Q@xyso0qk_HkH*^Wu?PMbt1+0**P&KhEFt# zy`%Y%C3j=Uca*_X7QaK=79B@x!-c0IJXNtPl~-FML6RpsT0_)JGC>u8^-P@=UwS6d zShm0d-Df~J&oyskd?=*#>+~eU6L*r`=tSYLs*={ddcuGLeXXC_rt63DPbvB3@2*m$ zR19x0-J9q6?Vw)fWeFz!f1L$rKO{33{jmOYw!8-tx{+(Jh==TYWd7buXV6DGyhrl$ z@-&q^b9ByC(UqB~kGR8kLKQ|qgYDfqbXft9k->#jPwbBqxk*PtUJv?oe{=EB!WG3c zMn6gLjem!VYAK6-hmXsABTG)mT5=VI>_X73 zp^9as`<-9@PRGXvQDh3XYmJkI<%M*mL^6YE?^KSMuR1Dx7dZZs8Vi*2Wq}zv7$EL5 zkG=0ECk5ZV!*c-=0A4Wjx^X-zI34|&)gWdlXgmneCBcUfT%K<5Z*VeXxSlh?yMN|P z6mVyBWA2+Q4d!1!EpH)8-VylAQDt_3^aH$&vSyNlz-8FR#;dyvFFty?nRR{)iomMS zT`t7~D?k81J~|3`qye~227g@(=zgwvM(a(nos5}bk0tgWx zH8&B)r4V0JOL5stdMGHlJJ3c8)Oa{xSdSihE(4&V@!SVquTUdK4Nu#w0fxT@#Vzix zuC6X@Cnx)d;OZbrckp&^jtDGrUjsJgTLw zCo4JW<&=+{IiJb1#2F&!6Zy8jeR-zQKPC5^{p#Q~fiyuS+3{?^eqm~G%K5GU+79}8 z+{*TstDu3E937B+6uT6-c^dA0o)LX!k@mz&&q9%*rK<-Pcy z!XikEc-s1Htka%ThOWu^xorn9i$BVV$WBjFAi>qX>E!~<3-D>$Js!}7oS&to1Y+(O z;a)_zlJ&xq{5zMW&$JvkF?YBx1As#6EtIV(Z=aE6mVwFVkn@>F+M5U*Fn2sFI4?Ay-j&nj zwAMl4nIOCT(WNa1jj!rm63Hwal#@A}g?`R^vRZBAzoZ!HL$ERhLof22_Pk2TDMt~e zUJe~V1|U~cqyrDEffOWm{3*ou$B$luxnulG&Sjz-a{?$QyQbVwmDe&g(JZ17mu)kr zZ_8qqoX%Y--D_NHHxm7XZLQliww1eIq_PTHeJ>e!7aD`ng>Q#pZ=?3i{p@>{nH9qT z8(3zny*n6rGlA<>*(>CoA<=nBZ9IkfV7=9?d~SN1o3`or9M9ZmIybfxkt|wvV&kjy*=-m2N+N8ejt06kDy79IacuAUU|(LlM}cg?fmA>}qs3&Kz*El& zjf#r}v@dK(o#mMyC1TbZ=vB%PxoL3IWp^A*4oO6(h3&lO`$&)R46p%;Betr)NqnZ# z>o$ZD1dzQ<6Z)p2mZls1D()8MKqzOkXTQ~{Q|h*kKSOow&otw&UOm-%on)Aa9S-kH ztz?MSi>F>~LCe?1o;>KCrk@{*;PMy2mR}V57KiLGuFAZ_eM~?hVV&Uct~H849e{Ol zej~K1fQujtFn3&FWgVf8b$12P{a(e=%%{_&q*T?rk5UdOPlq~R^z-eUcD(WOefjh? zC4%Jzl~t<$yvy5-MqT=T3+L{w;;UHdoHAN?5juiq!ZCV;e}cVl2+db{Z0;DNH@SiY zDs|Q-hl!t9F?Ql$PKo%!2`C?!lY6J^$I)<}VKjZgqhVSjjOuR_=b-BxJ{cfJMWJ{L z%!FRrM`FxL%|f7|$Gz_?$2?t6C(fS+fCm9xBx*7_JAts?7&Pc=Vir)99MTvWb}Kdp zkA>DPGg4(_sf-?=HfRu;o!notE>V}bUF&MCf!iN@d&kprKUG+-7u;PS*pGry^Ay_G z`1Be)mL$reN$9L5|K`Ex@eOu5_J;SdivW)E+4}fc2Em(!+pEe=DcHs?)!d^^u#{BP zk&I@A+~W(JI`tP+ISP7C&yd9vWNS{|^?>pC!WX)nFdC_cy4kao+2H#K0Dbx}JNj$E z`Lr!Thq=!H&!M1@>-Sc*!y^|lfWyZ+d+H}aC+6ppTky0QU7%wAoZUJSe=g(QyKRyx z_GILSNZt8*HCfSX{yN_-M+9nr?af$tr_F8#3&t<2V)wIgMhM~|UU$d^4F|p4QKUbV zFGuV8y2amhj*P9ii|YCCdno6TB;#PQz>VQRGc)UGtS-@snT7!gl9vOFE-aG_fN$Za zm%*e4-+hrrdJQ~cb0;Pa1hso9)??BV<~a%xhzEqrVZsPl<);mQZ0-}K~P zrM`B&t6yj08qvt~3M*_!Aq3X7dm9_2N^!3J*5}IRERz_1RNDoN)tzdWKnTL_jlRS~ zXawozkaW9K$^^Qf;GGBte^LbfmlQ^g1HtX;quVU$VHG(523MdjOUC*^Gd>_~Ub#tR zl~&&Eq#3E)?;eaY#(U26l{diP7W7oKSqzPiI3gQ!!}!1{!s@-fQ9|F5*_x!lyn+hu z4Pz1%Pa%`QPCznU#Q45ZO&*e~dGxUmLEqi@?icuqY1e{L$8;pvW+a;w%i_oER#=X0 zmgG(L{frVH2O|Ww6kn1T%!p!$*Ap6E8oa>^M!Q$`Z94BBM!sfSZQ+Bo^C9cuH@F{^ zIs#vRBp}N1!GQA3n#HlbgA6zZEjtTi)NQgq4KWjZS#J!DI3umrG$GxOB<}QGvmYwiyH`v=k4Wq%D?}^$0p%_q_Pa@EtKID4i3ycoo`0yQ*xH$YtMv1g zPr;o4B1+}hxpXNSR@AXD2vq6gL+YXs5dnM3 zKc8i0zc5`Wz?T)ZuD=D9DA~{Up@PcGDAWsne4+TwxqtE&l)W3kHv|Ogh!C{<(#pC- zh{zbk!qDkD<76w;4OoZhVK|l?cws+uHCr3!Zso_cW&CaAc^fvhU-qqfg$1ve4o^8ohB^Q^P0l<>P}vilR?=G=%Q5gjr+;-%s}qidEm5HPBB&#A#-1 zt><%Idpv-375lA=m4ekpi?tVND9#J}`DW>BLf8d>N=>4Y+X#D>tkvgyP}qz2Ebg>V ztCY51EjMUODz&@q(F3*W0I?4{E3FA9pfExo?t&9w1Q<&gcLDDhFct-zxbr<8tOr3L zArFD=Ak2nFZAwHE>-&c^K?|7SY>X!;3;C7rQ?ems=Wa11#B24<=+qC8=lX;(7aCX@>hn zuJ|pao!>LEBhqv|wfK?z11eXu@&sFGM2Ba@w_8W?8*dD1ZtyD84dV;u5R#b3InhDU z5DxU!;#lNR2Fi(~TdleGG*C3!|81-5vO=?P1bq`zV7#s%e};~bR$|!$jS&?={^Gll zH1`G?4?`$EDT1?XSKah}$sH(D-j_D3<-%R+B8V}AF-s4ioNqygwWy1&9?A`irMQW> zi?(CvIk%0)Ct_R?zX`z>ihxdllrxTlVq7fHK!q4#^R9g&SZR7 z|5$?P)f3F|uH;}1l1t2i5*Jlu)?KmS=74Y2EOl&rN8dhKhW39 zR-9~0rA^eP@3iW>*(!KtlHgT8AGotV&v5ne09-$ScGf3TqkhQ4dNoxG%TggO~5 zQFCxijc}w*BROVEY2bnud@3b??_XV^_>5~k@LIV={1yNwd!r0Re@Rh~F4?ECE(o8T zV+;h5f736RdT9h$rr5H_KKOGU4fi78*HIdr>1FHh{aSP1y`IoQCIbFn|F~*>1TOPc ze?t!qdI+R}=G8qu7XKT;x5UtIt&ixSgy26b-Q#y{6@L@)yjS+QsflUo_5U#c+58*Y zvMq@qGe{u_kjK&#?EAkqg8;7axCmlXf%1?AwjoeEyRU58`q{W|@NYVQThD%;e&OkN zyEP)i#a)f(yEbG~{(t$vGGC5SrUkDuz^1K(JBNp{OV_oos}ixf)%u zA272uhLAcvU-?y@Zhz0qIkdG9dX{4|=>T1(HiD{iHBB!pUU3J$lEz;`!IuhXh=?g8 z3XOcrASV5(zcb3^)fbZSzKw^@h%zqKL2=2;%gaZ!Du&UJBiRjSYT~{-fb2ua*s0y! zND^{ZOFw8IYfIpMS(8**-q2TKbIGWL&B+4#*xWQ^LXsWP^v-Bm@-7udtmdYE*r0n# z8MDApvA+5awJedbL-Pj;2`>&&*DP0!Xdhzna4=EzT?DF+Fh^~`Ni*RH@j8m)mZM~G zP;`~q@>HIsf!{NDb+!g<8 zQpe=VdlFyP1hy|d!k0E{H;J6DZuCe_Ztx+MSjOHN(VPHO=P_5QAZWkO=imuc!S5@7NPt zUvm-)4ypBL- zj(T=jSaU^VZ0`i#n&&a<+)qe%jnZ~`Pm!*UcLfRtLKXewu2-isll6ztQ}?L)IHo(; z&4}2CMGqfE=kY1J^!<`uC;iTA^tY z04@x*ij0(E9}-*9>T|7*LQe%vfC3@WH+wSbWig{UH{=am75aK{1o)0~(H)0>-`HN} zL#_gI?GRt??b`b!00Snb=REBN(v9^}@JFLEi z4{8t{02dnJ2iZtMvaQA-N>7=M`@vk&JOIY^QwARt z;iXMSf%8jYhNTu!`3|$W5ll4s$p{)JaGsq4*`ibbuzE}umR%!^r z1vBw)Bp^)LBXx+%xwnTEOMZgN-i?2?qZN(a@SUACX5R@@k-_>KKk8hD@GR|{MXW^q z<1Pv?$H6sKeh{7#3`9CX96YE|!S|w&0qr*<*8&m+uFUY@lI`Em+5~=ezQz%--B#hPSAkS{yMgCutLdkG!JsQ9&!iA~v4DHjYpY%(kMTrH!&E+gnToPvqf3 zn^59oN+UHt&6f0@H+sRRLrqasfsjYkbKbBkvt|Ll$x1yLFwu)KKuv~FYY^AZ?hOSK zwY%g@|LidB|00%=mpTOlu1Ap0Ei4k!c6uqK4CrX33^^2G-vUtQz600s%(I9jQ3?-Z zCH11tx~hCfBYeS@w+#g(D~JuD6lzq)mzKzc;gR3qDJ2=JXj}VcO6p-L0n~^Ep_BCZ z1MTVLYoGnb>K7g#sEqCeGWzvFU7cPg9NXzE;QF zM}S8A5&C$ozoy3WYnT4yC27qd)kZQnr)otf^e!NqidEz>r^?o3Z62g6Y9o}5daZAK z8}1i*t^6@s-x4*{^l*DWCuPTon_C2ZN)@euZPZ`e1d$Tdfy3i~%vFTxsNvbqCzb5K z&=rNDHYjQYYYnl%=`ucP-%2#BZD(Z4!#?bl3C-_H;<$;9cx*->?XC2x{wb76&O-=o zka9&%d&&}v?G3>muk63d_MYlh(SD{_Y^SF_r=Z(Z#?MB zSfAm)N($KTl9JN*t)1KDmNb}xNlJ=IT-~2-b~vvdFqQEA9KPQ+U#XSCx=nt$bk#H~ zsn4-2Kc&6;a!U$JMN$ra%{KUR9tn|*V@fSM4t3i))ksD?ISZ5ncfTthzeu{eTDg_? z`L_&%G2%an5bKDEGcaEalarsNc( zw=YC7;~`+p;-5Ish-n~^MjhuOIs(Us8gYUbhPl=#*Pa;ymJbGF{x$$JynyxdZ#xYC zid$*m#07>OAw(Qa6=gAb?AV3IyO=j3#2x(6jy(k^T%}K!^wkuUtOv z;k*DdyMP$cudTR54yuq#^mxM#S96F6xP}cn83v;L3xJP*A3Q?~n#C`SB6Dm%^|xj| z{XB%&H+xY`r{_)!@t5rDTw`{OCoK{plfgD|Ovbk^ zQu`pj(bU(->%~|qcupAf%RH2$wp1KjtDNWm6|AU<=H?!O>Cj*)@6%wF2dqhB)Vo!# zBf~6*RQaz!&V2-`hs$;FKHA{NiwgTE}Kd3NeAsROK|1ZoNas+SMM`! zrQe0ZQx|{l<%4vW3KA2h2d+fyLPSf;*f&U0do}1EcH-W%c&{!lokN3qNIaBxKsBUx zP~{hjnx6Zrl8$l3hy}euCAP9>@2IWSSZrJQNyoDg6G{j%ElyE7n}`$}YY=vk*?#Wi zlU!XbQfGPzHG?DtS}249Ds-s#-QP(3*)tIZ!z|hh^hW2}xXf^k4A!`n-n%Eyrl|m2 zM6(>s(lG*&+)M0d|Ac5C)Et`xHoYEavMP zq9^hFOr!VEVTgyUZehyqe;38_BSn`n&un{yVTm;Zva2G;R z08m_0b$!KyApqmo#r&r+ULW}DmI8|P=ONhTe{K9d6vOx?x-l4OPxt`exbj;w(yN)b z^SrqSHuXwRgPYU9%wjy%fFc+++iny+i91IgDA;i#XKo+k~IIaXCs}@Ir+dp8AcfjW)AxcATJ7yd(_*9JTB&< zDZJGsIF6|>5iwQ^XHs(Gg^E!TOptE!YcA%rzd~qPhzgR0#+50KkFs=9x4GggHi{iJ z8Kw4R5%e+G|GT*B^gaU9@A* z;KBsXtpE!&Tm+bmX~*6WmF76KR~=v1$2n1;VKyjnG~^{RurVA5N(%6y{Nce95(Xx9 zLM)>26stmO{qiD8QhTO7rQ9u>WDWUkn$}xPlz}IlzwS0Er;v}FS)VhO>Xg)&G zS5eV;e#coyL|FLa!(y$NT|$s@zyaHMn2iZX$s<=#(^TO!iBF%#&?_5sb7DPQ$JK$$ zuv%{CSdV444ArdBr`Og~#|ow0+|sU0%<1ozJuzGK=yj|n32g0_rx5R~dycz(p9C6I zP>(SsSz7B{ZXwyZil`H-U6T6_j-^5sK8NwL*q=RE!HqN0mhs>ar=6G;(Hg44NCgya z*%71V5$zh6HTo{1$Im@+u9`F=q=tg=Rvh}5QFo+>=OS4n6*rJtUV(Dg0K%yR8%%vp zGal0kMfMjWZTzL%&IFggCV@0k(;|E><~}*tu%&jZ_ipf_T+5j1HpH@Sz47&4s>{2s z!gKbQq9A7RSpJY6ER4yuLl=TPQ}HSqZh}U`}^+BH?F;f3dK{GIEQNHK!C9iE-Mul3C%&%B3%zw4r0Ze;spH7%sii~B!7~Hqg{)Q330{W2s*CEqO$fe z>TfT+_G&#<6dql0Dgc%I>-z2dpmeUi@K?)khy4AV_*%EWufLxI|7!f}DQHWevHk<~ z@B9BV@XcJ1Up5>`>Dmd@`2aq{=yRJGCvK#Ce&AY9(N+SHtMP>F(*S4J@iGN<-d|?{ zF1#tB9yrGE8lY|%D9oO(0$13Iwo-_2zla*9j1Qd_7+FA_9)KKnqX$>}B!B}0LP3KA zKQtkV$1~ciiDKqjnUlCXs&b4`k8}CggIx2CY(S2R>QUJ+y0k! z9hp#@Yrhgi)#sIo4D$1_o>j!af%|%$>@DRXcPGh@QzQ<{;*ZJv{O(CPk-yy!)_AiW z5W838Od4hM)uhBZZ^Uq^X|3RFkd0hS)GK1oZd1c^U&UtoXTSnjFMVt3Ztex{HIdIr z+wf{5O(XIYA|Bc^rhSI{`yB3tWmPuRq3*N(>>)H%*=m#D@m5<8w!1t!(i%&lZ#JtJ zyafLXJ?CODUg_4q8>@6hzthw-8W_lOys+4a=Wy8pOQ{1%liUm8fMsYDfb^bdUKg6&H-T+Rd=mfJpxM?bbm1=(zzPN^Pe}5} zqqTPuPARoCkL_=r&jj9G^4-ZIr=7+^reIq{A2(#2&Pg=FhUr^^TLyL{3~ipo-ny9) zOP(!LYORsm9&B-OMYt7m#@eSAH&gAozJGOLT^Lu{342;%l6NkUyTnOedEw}Ck!kMr z5<9jTb0@WiS4FEVu_zI;}pxBVaDm!^w}ZDxL4wA=!1RcH=&C! zODTml!SGw<+JnKp9M++JMl&H9YN4})8lGPrEmH!sV=lClQ=G^q36E-5t|X+SFZ4xO zF7&GhR^&nFxd+L&HqdAvCl0+*av7-m(OpHnvJp2NV`3MbSWarx%*kP)-EwhI%qFz7 zH${nVlnYj{XrSuY{U%@x_-jFzm~8}>k5)n+mqw+vZIU2@*FNZH6zIPz{Gmz4mp^YX zauV_P3~wX{Fjw3%Hi?5UyBEbDqc-?nyEO!X){-9o0v(*=MPQ%f(Le`QbnIom%Y1z! zw34XwR1pP?&JbY{!Fu>XN;1iG-;!oc>Z4wp)mH=UeN04Hd`}wMV_f7K+BcD>HWgQF z(|y5Hbt&+zE%?5OUS5rE#&B+)vT8ZoEfUL%NOpW(-$6+ zmNpt%d1bn8!>qZQGNxRX=fFCNdRDUN($HF}=u$}KC(s%9dR0_bO1z-ljO#S%r~2hT zr{1ibj9m6sacEf{rPB8AXBdT#G(0Y~JMO)vfBl;NuHc!v*{zz|tKJpcf;HKdqLCGKhxLp?fYqp3=qpet*ydOl~Q|K-^x{PFQ#s?hcl{Q0pI%3SvFFk6A9f!l{%jehN}Q+q&;BU@R*tutp> z#;udmQUKx^H$Q}Wau5ah@~H*pdAeSCWxbyy zo-|D`Vc>Es1%A8sKLHy8a-9DOm_Fvi56kbnkSTEy5&6Wj&FTf4i2X#QV%a%|w|_3X zv-P8PhibLsh{@yqQHLw9niTSCFR81mjid(-^EJeXX+5sZmkR08?T{}w(bARBUS@yZ z19G4riRhg2gpV&zFt!K6_a&#V&S@u!=bw(r$=9wu;S1ClZCxm~6o-dMdq@?Z&jNFK z^?IG<8RxpKIT_+-8C&OEfYJJQ-b;>!-e+=UA!6_W7@9H4&auIR*9f)w&3Aa-?*^$5 z#3m-VZKL6{d*!PcS|OH-k~BsVQekl4%Xvp?kHZ7uOX9Ap+gjg8y(^<_6i&-S)D*l9 z6sAw@w7zSJmW4=lRh14AaxU~19x#Ke)AMP-f9;-4bG2pT!qQv#4Ig`7OBy&wAw_uB z{@Q8jE_q)7KXsddganA-6i$FhIksmVjoq`M%!=Ln8CSCpgPdCh#_n2Ddc=yRnopg^ z%rFAPP&hh*QA++NtqQ$m{M8A_{L>{0 z1a#Lj>LVW(;u>q`IOC$?HSRy~66OEkY zzAwp5`|g@PI5}cdaCb)qaNN(G^59<+DJr4WLtTes*rag1QSfk_Z$i%Pf?wFFjUV(- zy-h(YiubOvPA=X|6((!y^V=~2wtbG?6_4s-S?WHxz=akBxwUTfL9~BTl0+Nf7&{Kp zh4cbdhiAn-iD<0xKX!rqTDJutxi7Z9$~{YD_ZeV{SSolXJ)QK~>)DnO*La6fK^PcO zFExRb^TGnZ6webD;N-{UEn3>}PCoOZO9%79tfma_wJVWr_i;*KRw}V*+m82>KM}%Y z&2?DBbW+BN<6R1J4jqVXmD<}S@edJYO7xO4vjmekCKA^&ZCp}A-zGP533Q=SP`lko z;{*I%l*fj3n-&*hiCgK`eJm)Xct?d&P}E%J--5*+y7b<@a~0lrvhO$j?wWoU1Gcv=ayLag^;x_gP!<%6X|2RBc0g_BwBL{!JHQZP3g3_GF=`&>XuIT z!TgIvHo^RjCjegMV*P-&AelI1_i&_Z9;%xUk znMoXP?Bi?gSSNdliqgH?5G`8=x zC^P;nh9tINnJg^w`5V!LMY_pXk3?}b@}`rEsQmLDW8LF3Rf@MCBYWPM%IHB)wIef& zj*;cw*P*#`uwa-M1oQW7M`+qfB_|McRfgDI-hKU{EJ>XOqoC!01=RpYx>^);!jmq@ zl&5(VnTuzeb8WYs#HVR;h9A#IinQOkKP25rQG+;a)(^vq45BHxCH3%<7+b&R?jJ6E zkC8k=T1-~}iM_;eX|cNt2VW9Q^LX8%!>K6Adn`7QF$b+l^HC$SV_ zM$t}MOm-+=;DmW7j&+(`jP8YMFmY_<5_;G}5W)#;w{W^fmdONC2UHblN;o@izpIyz zakH)$emh^%!^lGE6Plca{le$_3^2q%Sz(HF6Q#DygO%9})`3a@cq*&~%|DdaqNzu3 zGbU4%X=X^_9*E;E_0xmlt`Ku)Ho$7H(jRW75rUuoq1MAIfG-epQ8`{9#QY~AG=KuC zBBoZ=Noa165_)GlUu6A$Ol6BcV{S!pl}AJ3nAh#@hK`3_M1JTC6zO7Zz5c1haYXi8;0p?_BC4wO>n#omUnUY(&*WO$ z7%VNNthL%wJTh1D$4Vd#RAOG}8`TOG(aM7s+=ktKjUN+&87iMDH}Eq-m*fp!F=&fg zl)p5UAcu&P8i<2Jjz=F!cX;YF-jXFXr23df?H-njbKp{^#ctkyVH=rS##(_Y(96YMenr+s0UgP#Xx z(|2C6;9G_YhoMr|=Nb>(dUqRTM+J4q%CeK*Y&ciu>zfovOS31T*(|!7B_=Q%VqN~B zyL@MdNp4tgY7oFx?f(i9J+@^wl&mwAiJWlfn2~igRf7oI(L+Z`-^a-wqCx{di+QTm z{}4AXiCES8ZfyaX8w)e7Wo39aeep%$#W%%zj0ck%l9>B^s4>X|Kx<4kXuuBK^3}OyKB69FF$2x&lJ4h)?}3~XF#DH2*D&g&aThGVV@i9rEWYMeeezst!P1xz^&SD zV&l_v5xdjfJ3$ORTRxoj7TeIkB)9ZOK}mWU$J!?KRNQcM^ny8PfXpE z%})C${w>}LAj>7_v|!kk@d3Z5Wuw~EK+mstBsB)#QOX!zdn5V9(C+`^>n-4-YNGz} zySsFUbg7im(v5=Bq0$Y~4br_Tf=YLXQj#LwtWqM~4U2Sl*YBb}&-=W;e|&a7+_`t= zoH_G7XU+_J&%Ngif*8vmcR@qw`~Hii73iRjTa2$g0u0*=&$_-_rsv*8aTMly!qN$;FDkWiI3h&YoTA>2&cssj z9dw}-3n78emSK6=B)6V@1&lWMGN6HOl`$6~j1Wh2ZHNP2ctDkIL>DFRN7$X!DYHtt zJt1)N^8jtWaD(XWu`|HP3>U4Ao~ipw#_;1OhxYFEC4Emf-yz!h=66NR8V#`TTLdp%#cbSFZ!)pm1;p{9( zrMY198{B9Sr^>(He^bjHphNSb4cxQ5S@yEOWLyi?zP5K^YiiA@H>pJ5vvS`174Ym6 z-$ONJy`O*?N{0?l84X+W$4>rdV6^LcPLH!=uCV*FyR#7Nc)Xl$`6!51Xeo-L zxCR0b<-5_VGxtX?x+iQG7=1cDM%TvBpa~VV8Y_1Ko>bogQVx&WGd``AFKl;;cI8BW zc*C;|eWT5Z; zyydoCGR4Sq=POmAeZlajey+I4gw{&ld-@YNrV%mq2dxj6?%b`z`Uy8NGTeLdh%oEt znN~O2@uNx6g!*@PWerI`e(Y11CMEsZ*ek>=@Zjs$35_EDS#+rvTQ#`?%M!~zr`ux3 z$|}y8BQsXwm6M92Vr8cAxb)Zd1;_lSjTJ|>D~Cry_9V)Wf+MGhNl- zbnqcLHb1PJdlneH6y+D0*0F-c`fm={{Ls%)mpNT{HtUhcW;8OA=J0saP{f9+yI?ck z-fPQ>9oIbiym3Uw;(SE^rIxSwm!j-@M(dNVBT4Q_4%(rO{tK zDJ%JF$Z%e}jS_G{uNIzYnV*W?6HC+R4Nczr6mr?taIw8A9He(qbNy;1R&2{4zypuW zmNRqIsDg~?aZUWl;?7W`GuA{* z)g7_MXL>ViS;(?uuWj$f!8IK|nV`B7EFG5&k%l!6<@Iv;Z3skHcadd9w>wSQ>_elu zOr~gj{*{?-oj!w^y~>-TVmNkCpQefb5*xL*lQ^es_o(VhUR!pl!;6YHQ(m2OTL(hH zxjDoA3nJdDB?PEN{+|oI`aP7w8ZTav2Fh;QX+P=sHmVm{Re836wU(rUXAq3f@YQ~qbWz?6$9hrOEGI?AB9xzMdW+viW%E%(LtTZP1Z;%%z{|dq6;Hgb{`k=d zy}$kEK{Tb!?uxBsbe+Sb-qAS@hm6uM`ZK3kt}#lq)@Cm6;YCGz3(fe}V&#cV_|8~HW_2LnEaaJ0E!K=%_b?F1$o zZ|QOUDv5c-&23-JMD@A%rh3`;Z^`MjT{fE*!-H!RVd{knXrt~sHgvf~ilc8-u|8?n zI5c)1#;KAg!r_EmP;v9m-lW<7fb#K&Pj=vc@JG>vF_z;oKW~BNipP*!X#nC;@s9a(xvB}rHH)qq2 z>9)~B1R@0&g%|>ZqAl6pPJqK95STOnw=8x7uG>aNJ!r2nL4@eR64O6*1UJ+tnJ61( zF?t^LDA7jWJ@J@sBY#3Gg=&JYUziBn{pZ?<-At!rHxOs5rrW!y2NkGBC(*J9dDJy+jDKCNUlX(7#qhn{|bj^L9y znzqQ!Vo=F+y4qW@Jd_3dfsmnY7>+vU?YQV+X?b}dVcf{8j=s-1GVOT15yz}|GH&Ei zK%v$pbMv`>D~Wr9BQhr+NAQ?s{)5;pA)BpM9ZPRbEdJWsPan zZ*%velxvoTi(j6<6s#jW?;hLvKy)&Nw9P27^0D!d-C+69b3*o>2LROU&kOZI_*`!>+h_ftd3nSLlRBHRB6`64`4$QQ$bW*+Pove}w%VHLg}?QQ-jhh|mxstm zKMtex-83Zg^W#wAl~DiCgPFQCIC|u%t}3a|H@G$goDS9r5t;66G_OL$k%y%BsqIj=08Vjx`XV zR~U41#j5cixIS>lPpg)0rJMA}(BcytCo7S)8AboT|Gm1b>2mA(OHfdpI!*uA_a2fb zx>(iK_e*0!LTvHvIf7&`Z}16)XTM^-R{@TB4+Foj9Ef?0W zcNOH`I|BEJ4^N8Nj@54)2Az0&-_O&fD5;P$d}qe=3%fZHlLIu}hkTeGbdeBFQU@vd zpH#s(QU7v z*BiBk>U|Xy*DtOW?@d-4YmqrDz7bqQ-^c$YTN4vRT&(4waH8@SM^L~-b~9hS@2%1c z0rkwUaxtJ5bx>`|1J5I$W;GTP4P2p~MtMhK*3h9XCO15F6FAVTc604by9z44K5b2k zyb_O?)ekP-qF4=z3mK+ZoGJ+U%w_NcUWD#_YYV24d~n6?>g^b!fB=A}+0ZM3^6v`8 z7hg5#_9F`PBQybD6N@&ba0QGUU+_c^G2~gS5n)N@xfEXn@)( zAV>l+%rOq{F#Ah40J-tupiBDQ83cCQ=*CfjeL$rVH_}7sjZknvKLxJ#U=si%;akAX z34-iC@=C55!_58luh8qf6YpYR~oN8z=B>u?;YW= z8^7bS=`8M^KM46?#@>}UM?y&oF~mUoewxQ+jD!C~+O(Z!Ht8MRbSi5+`v$jwpvvG~ zgVH7(Bso3mK^|PEIQ9}e=ZkvvM!U!}0nQrp1Y8$B8Dl{@Lrm>EvQgNUT1~L4CAGPY zP<&2!3)q8Xo_sIke~^VTb^U@phD6)WMIdE{V6gD8%17>+T`LNLeTs)ed3U)ZK4Lp4 zi1fv}^P5Sk;sebB@2j9!FYGW*pjcWUIE2=uyK&k@r!4dG+bFZ6JRi&qKTM;4&+PwN zRW~;lc5injd`WR*JOdoQaTD{^R>QNaUI@mCk2&Iyn?kOI`#Iw4w{dvXnuNdEww~l< z?SP709 zp;NCfjn~J6?pqBN#nxoJx(+1G*!z@-st>y{#V=D^t9>T{L2_@QX?mjLm-?{&=KZbH z>DH5}kS#dbQgaTq*M3w(&5J-2{bPVOt^Q+rN@%zsCBk}0zwwiC%8$BEGVHAgPv5Ux zV*(@+{w~98Txc{U13*ujlEn4BrBJ~NZlTZVR6y>?R?GBT-|4)%Z?@pga7ZCf^J=*G zrXMYj{-&DGM>BEZ{i7~|?5}3P`U5L>d&7R;&wI~%IeV!h`WFkRtpL4bnw_#O!pH`{ z{WQoJAGg9~iW{aN zB1_EV#_<{KaMvY{Rez69qEvu`Jm9Va^d8g&Q|2GBr;={>;5gV)Al^LS%SXpi30uKC zDHc*8?4oicqw51;NXkt(-4Y)cGo2UU*Pnbo{*Z5mUQswX_G3#!S9#B`r=`a^SHubz z=ME3>5#PoD#8$*nWTm1wPmw}q=5l!K67JC)0EmhT*{9*C_$7shCD!*_@!HezNd^r* z@1$AVa3iDLCggNSkRlcd%>d@&<+GN@)x4mX19%=&fM88hGA?Hlw~6>zQxgwQJqRt8 zmQ1@#X~qZ!1t1Ur~$tAfU{H|B~zv1S`0$Ui4O_>_h#)`6wbl2%E0S&6Ch zTjMjKD!19RfcxRTm^8 zetcy6AZ^`C3Y|K{gBY$*2H7!3De1vEceP=e;V^{l=yErQ6`_>)xhtvxf(htwh$F5$ zLabwJUqL{kehVU47sFByLFBayM_>Q?tm}?d+>rKB^e7cLS{nb7OTF%eUgAztu5S<% ztI#(eM$UEFjiik{N!9VZHJDeXwFOrIj%3~mKm^Co-~#mUhJuX4+|&jRvsR)55uWC- zijq{X3YfAIYbwJdWxxo~>!d098Pu@P;U^a}LsVX((1*_sKkqK0uR3<2ZM1IGK2zws z(rUtK#4>WgoahN=T!pXGBKj*>XM7PkbF2942CaGleCdhG3z@w=ZtDvavDoabikeN{ zALrluJJ;4t4Gipoi75AE7`#=(kLsaFBPw9V1vuhq*4?g78F_lh@tx93(>$i_C|Tju zv<3zOy!gG*)BWB;FL*z?I!?r>`hjwz^SUgTY$i_O)+t zJo)@`{2-JFzB!gPbHs4P0?)K@A3;Cgs6V6?HUey~{bla>w?11QPB`gka=LfUAnso1 zB+F=yJ!jnhx(nR=0&DL z5Hi*q5>Yi186PZJ)FqH~%frSzrkzEM$(75F^ZP(-JKLl1^|-knuR#fF{~29-Mroj$ zU)U&egT~zl5cy7#*5#TgvO~#v@E*NtsTsSqtGrenM__gI?3@0PgA}*YrlauavLeaU z;;`Om06sFpJ!q+(qgcK-c-?b^=5rY2!s}NeUAuUa#LOsevq?5h-Lp2MtG9>iCRbXges=_0T>%!0yzD!&JAqPFrjsP3haz@ zx#B^99PlCmApJXNJmha70N|1YV-I0Qe+%>|aKhgqj{g+y05?qm^4~`^1P0`$CF(n= zz#slNa4@>$pG!AZ5N#Ww+%0Cgu_k9`hzpDM&-A)g4 zn>A7uHfu>N=OaD}L%7f$f@9UhMtoB9Zxo&JtC!jkWN1;Q_?7VS4ES3~he)Rp8YUD! z^eiK|b9$#8XP659001GW)&cm>PQZtNH_E?Mo=?>IHb0Sd+lybXf{??E;6>H|pmz>z z3EpX$%VgGlE7*6k_imHw+E7u}?PaEj5r{)&J_K+Kg?R)Ko#O>5^XWe?GR#vE&mn*6 zzlIGC3QRo+=rIBUk1O9x(Ae5cv$j;#O*9HBJ;=BkA^}A}13(!l0VbA za)3B06$KGs$_(Ou!wKLTe+T)kmEYIyjDxWX@LkM9bYTCHF#v=BTsN0Mnf8j%#18rCq#FLwo4HI3V*Kf4Ck}NA>;-HP`F}Jcyi-= zk3*RXx6fhxjA0u#mX;YeFx=?&Hw^56Kh1^k5b^8^p~fToSx3UC)QiQlBA^W*H?pAu zfKe{^@K@4n?FEL}HY;GQ7zS3r@qU{|5Ps6#gY2tK(46&42TF$b$8P3r=y99Nw8qwRQ0;ZjZDS=$TSRa$thgs*#OQ%zh%5Z zk^w|nLmCHw`{Tl+8~Pvoz=mVWT|y86EkFeh@_l9GF=hNoBRMALM6Pu< zxf?y%*=sU8d(+b-fwiMn$uWJ8|Iskq3n=;7nP!NMtgol=4majZ`%y9}Y?6N5Zl-FZ zx0Hh$FVoCD(8!_6V_>>kS*%di4ndp*fc;~>hX6YwlZ%<7L;iHJ`cXCEK2Pygr0IlN zY8cx2%yWO`B}SZBsV%uk6-qDeb7V6O?JvQ%L6PaEO;XZNRFDMEFD z*|oU-qw21yDlsSLF}o=4Q8UMS1~Xl)%3M}JuSVb^4pc6P8L<#u6PJ4_{|*rgJ2~W{4G{oM((fvrnZ(~b*s@gn$wX9KPQ3< zU7~F(Eg6o-cq;M=!cH1*gI#>pS8U-qrcrtZn^=hT7c%eLtzIqlO_;o@eK~!8MC>i; z@o=U@@Cx7Cf^4EKwsn_kEK0P6w5!u@h|>S@2~5Ooq2=XcYN8ip1#NLsrP`c7Y}M=W zafb*wG{ncAV(6%*Y^vH;jHBIMdmb78iEpw5)m`9Qx#t6`k(l}zNy&_uhzK*QBjed4 zzfj&NX+!PaAC#B9Z^UZpSG#t*t}lGH8l0E+n|2-0;2((>=m2t#JAegD%VKT=7D`eS zkk%j?$sr?E$luVOK|xwl{}WTlUFbqp7C8YJ&0paQdP=E*@+I_1R&Pc>t*i;X>9~Nt$Pmas}+q>Olg;pHLeLZ9TmV&?7MFTadsH!Lt z1ON(>4Wfl_^k7yV-LckMsVXAn!Hgtu<;8@bUXWj|KWvY+^HLa_fYXoX4Ay@vh!jV< zT#Xmm_S#S4auAxnm9A=pwhd!I1Qmc0n8(qplukDDM}q}PDN=myEa-^RIzbE4a^vgq z&bjoKD&^e!URR$Uo==%vbrih7(I)K+9z9eSI&n~U?KS7@LItFle@Fma%C}IEPl2+t z{V!{6pXlysU#m@_`y7=XjWFNYYpvRz?L=eKAKuctdvF}j=L|#_q&Z6j{W#l9Opal@ zdd2y!HGjrXoFVvSs|DK1tYRc*lV;Ub+Bv#&&Dz{-YvZX&Bk4g`4VX71P&vXMe9-3} z2I7=tXOt)DS}aUM0GUaOR2vHRt}&%LAM!?g55%-q2{--_>?mjP5DXzoxr03hXA zkmHjo%?=$E85S23jb8f7(PzB^26n%;vbD!il?H(&n@2(07e^n((G;DJ_tCGswl2=G z7H?fYaWUm=teZv_5oIl)-}?9$G( zjsn1m{~P<6m}3JW7jG_Le?+pSf_F^S&3aw9`@0bXues~t^E+I5B+1D2_ z90od!d96R<;oNND_UQu-&JQ(Bm@z2Ue%Q%?fI6|k?*JI!9spi|iX6ow9Qd^A{^+=Z zkb7czGpX15|Is9kLvm_@gO8fjNFq>#>DY6+%?+IQbnVc8#Hb~44EE{wdt7nW7P{#j zb7>R4i58DhDrsYDTUZYTeHYw|2mtE!0nshStR^}8?c*{ZDfIC;_nEP<>$YcrVC79C zKn(#n4x2yn#vkl))4(Vh9GUFU)e%`SPS?~zfbF-K-R>C7+Y3#N*h|vlP zXfSQ>Uju-ViKbIh5h^RvvIS#s05EvJxDy}W(+P07yatVJ{_!Kw3d<{E2#ZkL;8xsJ z*^H3-TOG}u%sc<9w=O{nB~vtOY)yp1d+Mk-N?49uYSUL!me|B#67>I$K#t{$G#?Et z=e?p3m7H`unMRzv>%)SpWvv))dKc}W3-14-Lfs21(MDAqObN^8?{rS>T4HBpjY51C z{BIjus8MvLj$0m7@atns?X>v4^;OHn)X_; +}?Kh28*Lz9sl0n>>{{|J9-oImAb&H2+x}!369(J)b@QbD9BnMOXrRYB%FCorNUm6Z9bKff~n&M{?_gCUz1}6 zx>h>*>Ay<)D8|-GF1PxThCATJ&u zKd1CjJH>C06`_TeZIRBXy*<#egI4DTw`OntN}g_KY8Q>p|JD}dp9}r-Z&=k&{Kv%$DML0xxwP1|7fHYsuz(pdhJl2dAZ*r-X0fo0 zN;9-{nSR+o5%UI;XUPi$W61$HxCP*daF%d*ZWh}D35Z$~RHF)?(bJe}G5l@_!}Dy? zPeVc8N(!rL?3b6g(GMGftSm%WQSPnMyj0=uEkJ9aL36&DKjeU|Cu)iST1uZtm?qJq zUA0H%#U4&5td=re5Jj7nZHJTSRNF&3QKhhRw}?2mrm8pjXqRL-OP1iMxPY-MEUltD zHHlHg1qXchqq8B@!3m#4wUbG>a5QFtz;A$20V{F}?**T`P~16h;HTsL97R^6iVPP# z_^8k+Cj`Mo4&c1`2<4x6T@aBi4-q66D1g5i2Xdf@V~7zs4#;Ay4%$NlGvL5L-YpQ; z)Pr1O{^m=LLsEjN;JHdE$IixH^khCeeKVPb8_iY&g8mHBYy^M;ER#6q--bu8Q6SX( zhcax8J6VuPbTGGY*NXR{PAXL=4ZWjN3Yb=-yhWW!CSA^QKe!ADcivUiL{F>_^xTW( z6v}r zIep7pDa3hM@Y^<(Z^1W79Jk0zC~Obgd8(&A!((X}i+&|-T5hj1iJ{9)@)kgj2)PFw z#vd%zGME}gFus|)Gjc$tt3N@RePCu2!y>x=NsTt>aj`M2x{C%{&G2vYWyUh9Q$u{! z3<>Qg^6dj<2E$AseZih!dik?}@CQ`vDu7);V8}jwD4pKB+DY8R;eef-> zx&`;|ZZf3;JS-y)@*3|qjr3eKtZdLqjM?WY4sci@Y0_|n$Dd42TmK7VZ8@*}oKv;Q z%XBblJBTCh0MX5agJ+_YD6t@oz{EWDC!aJ(3IiBfk+NR-qr|Q<15{R5rPG{8q-Z;u z7iGWphtYRB0HtNR(o7qzixD~v`|c;nPC|l@2T&iT7jbNcJU=bvk(cs_dvgE$BH^@c z9G@h$;*k#I#%Vdm-3+U{9qzoWnmE#018_L7uN=P9v1Uyq(B+h@cRix;$8ySX%C|%< z6!guX0BCnSM3XEc8myBU7Gy;{FS+iVCl|)ZwPmZ=Ht>i(lx@cZ6-kl3XJ!%eGdug) z%kUVH{hodKhteRD8o#$!DU5JyO$4u*SFc0q3GWJ4S|Eew_m{f{=4Wj@57Jmqex9E9 z5B12H>hsQULXgbtm#B!dm>6+Ty#w;sMU?7FpIOeLA-$gbJTazmjEQYSi7ux2?N;l| z=5^EZ!j+~fUyaxC>C=tHo=#6P|E4cZ=Mqx~2YD64c#ii@Wah=f+r4Uu*!9J}PIpVF z6dCp#Klk~e#kT)tw`b;K%Se#s;T_F=?Il_pzI3y#JDLaD=Cog3^{sW?O?KYZSX7m& zykvyhf{vOiv{$eTlPdzHIe;W6RQCQwq`TC z!0U6fnSf$yB$)WRT@!iOlznQUt66B~>%4!4Ty@ykp02SW3GApWzAz=wt%_>zc_Oi3 zE#5h0QnXaoCAJ_h-q}U(Rw+=i)h+sBYrLq=I}FAT;J|gLfnfs(nevmwGY(edevL?~ zVfjetl&i417t7D|Mb7IlF46XL!JmQLtAP?|ZfX4`k1-emUW4Jzg+K!;1a3x9??Sa> zyYN2UVXxzLWKn40OFzz)$f(ZK`eOYv5td^3VDFcq^=(BT3@!{uR}Dm-%h2KctRMOF zg{7Gl+~&L}Vy9c2vl#Bav>lgmFab(53I97sc0qSAxiGk<7Fm&{_!OcvU&%%`x|Pwy z$eePqyp`kqsEw~)vP|u+ZJz=`hPdJsWl%Iops)^vj325Jf6S-vB+~10N;S&Nx?!$g zid*d{q-cNEp(O{WVkv^FY9MYSo`OMv*^_XS(3;eE3*|U=IIADI6kBZFpx$m>w}9!F zTFM%zK`HKu^c)80fD9c>_r47s0XVWVHmBR@OYm&nlf>Y#m(Ko>JDFY?=@D z_lM1(=_O4l#tm||+QLAIC=DR-b}+m+L`_1EDYmzDePOml0tGb=X*J_+W0FMVg3pl_ zQgu~6IW)5pVi6`PFGCx6P(uQae5f6(y5x}nm>&K@Z?3$1_1Es>n#+QL)~fh@i$1L4 zlw{5t)$zKK$Vqrv@XVBVU#P%dJ3}lr%@omAf*I8fgDxzOyD!@FB^BSS;NmTDrm;x# z>R+wOGFzZg{R~YHXSYF3(MIMN>Le@JZT;*L5->P1X1jhS_Zyr}R^%_z=@P^#HsiK> zdPp^go1f@i#9}-969-sN8btW=bZScP1pkeTTc^X^&p}*ce{{Q>@g`|QdS(1FPqP`7 zII&#q)0d;3IOQN#v)w-k+Y5T3riihc2_O6JGq?UsVXZv@v3vyoSgDElI=0dEDW-=_ zywmxLm`a($V5*0GYkReTa8}TZ%F(YTdGiSS?e@9Z9P#U>a$Is0a%(G8(Pyg*l)1{e z*mdekoTtuw=Jr&ax=4dm&U$-BNR{aUZn`ErL&jbg+L5;8+2@_;ELQIh{uL^)=mO#5l^4mbcBZ?-HFgJ2;=g zT+hQejn zR8#%YL~~hVAD@rxpFs&Ss~;IFhVvb^9L>%S`8h1TSo@eGBY>ORB{8J+cc9cppVxsm zU@~a%4|o7g7Uci;aC3zooTcEXKn+1dfcfnv2psbFRPPv~JVp71xq|R_e_-xq@Iz{} zj}N+4unF5ALffnV>fuH+2_i&DyBoj+|BO`~%h8bQ+IGt7>Z3C3H>qAX$xd+t*uvv4 zq(31BgR>&_tSSDluZ3UV|LpDmLSN?C4OGaVzL6}bCo?k*b#?rUOX-KnL@Ec&X)81S z4%d`m6M{c*#NzEx;m3Jo3SMu_q{|*m#V7moQ!Cg`#Ar1MMUFMjTu8B_jsXwhsMR_U zKgY-@5=3oL(e&vlbN}>st-yn+p~=be)_)tFoRcCOXi{5IAO8B-eff=Za`TO31IllD zk8GIUDou4LO=?i(m5{`Bq19xHtnJEJQ=LU7XaZROn1Eitlu}W0(0OG=kc=eLS&~1I z-l3!a;0_ElgZqD=nXLmvi0NEu{%?RcP@nv3+BcAr|HYob2uy*^e|`H8^>3x=SwI47 z@%}HR{x5BRV*oYvG!!`YG>*^fRS0alMgv^Mfh|iomF{zc2{X{ZygGJ{XMv0!bOa5q zWBu1{TKnQJt$$h`f-P;~5V#qk5kPSBg`fd9U(&y)AfgcP`}^8|F8^)C;d(J|$}Qxz z8t+}-1C%5|XdCLwd|6J9+|PM@hmR`hZ~&+e6@>QW9xa`SfajdZW#SBp5rA@12^FP4 z@tDJhk7$)rHWVcpz;r67ycA(lN}2(OtXm?e*lDTEVded_}$NI)@PBx#Bi;2kFEn zGR#dL{)S@~AYC$=hs^d1O%| zx4#)8h3QqVo;bq{B_xP??O=X()_a47f_<0(#9&Fc@6jvg%kC}I%SQ=QU!4oL^}jM z_>&2`!W@~(Eqkx3)A1d0=%eW+ zIE8(NVD3;5kx-1Aa@nbQWz!@k?8sEzjHqbtRaElZy*i{1lLF;*Fh=#^r)7H;-P=Ys zQ$N7(cq+Lhi0oij{j!D0QSR!$t4DzRu7oa8C+@YXvU~FCxR|bnz<=%M;j7M>S7;#p zb~8EY2eRhW5|$~oy~B6uc%bDlOW;~r-PoVz9fS!Kyg#Zrw+~OfnN7u#HKtm;Q)Up@6Z);4~%%^3KqkW(+V6m8 z;>KCcjr7psZ%yDy$LDcE2;KMJMZxk+C`ZGQ;RQATL})`a&;5!v+HPtn3?NNS>f@6q&5EQ)Ut8bh1b~O;sD-2mDwRj`h5!JkA$1H}ZgrHZ;0jfi-cE`?xx3+{-@w+8R_Zkk4FLYib`uLw7VdJ9uvQgX zU)PxcGyMqHS0ZEaKh?(rX)@u{4)SRgII~I>rwwVi(smJa1dg`pM^_KpsBdGV+Rg;1 z*TDTD1NV_4Vv=Fix#G%*%>Cn5HxxFs?F&Q(%rqSye&Mo!c*S}`cDox&N}RU*$=7Yz zcfZ&&*}%2w)KBqBCf2?3Ag^~nEf;%aNzUE7se37nGC`j&_^vn~>eS4&y<5e6EWuey z**qt1q=A9^=2~~Pc&2_Yg$tbR7T2gh#LW~{bfrduYr#)sa<967-N(zThm@p*`j~Ad ziLOy(d~bMhG3>2BhkGG4m3xz2QkAk!XXIR930y3Awrh5^X8OE%r(#(>D}HoIypd=$ zx8d4}J`*pZ8wx68m1yUCMfRz;SgO7+UQlWA{e_<5&+tia6>4Xk+T0;J!jlc%eV3QX z`B7&M?P}Yv(yynD${b%t|sk@4yo`i~c&zThsHV>uqB zx9gWiMs{=k7=QG;Lc-k=Thq>~yv0;B!;18KQ}tXquet|T5PS2V;ZygcdCmWirr_a%A4iw%bIEo7tpxYN=2-(k^vMHsuVUB4I(Mvb^ zQ*SYTf`%P^jll)9fAcedRB^IC!Sf_-O7!mIGA^_KPV{C;U%gWNL9w_*-5}3OjrQ@x zrEFOBtdGdRZe`we+Tp@Z^+XF+b=kW9oUK85JR`DNyvqBW+y!n+!Ufu68Jvn)=PbbA zZMdxZ_M5?it%>3X4|Zy^=OyD0$)on=sv-vWE16kCLsOl%@{SJMBbcVcU!Y#dvULlI zTul&cF)}Mc+7BOy01w@;$giPUWcx8CF(V4an_+I+*9OC#@ivE{pPU-9hjo5P*QpOQ zidJe^pkWAjTRnN;El07Bn=kvJk`RotH7$Veaw3%Jo1x0BF~ll)dY5=MXoOk;-#svV0WZ<9x;72{k4V>RgH^ktS z=LD4j`UKBEb6cN)3kIIT!PWUsDC%t^kQC*2xcYzG&wrv<|J41{+*A^g+p|bEPGJFz zd5^7s2+!{hef?nqCaIR!A^*W<(L(tFcL^to594;*w_m{_Jivz7-k13Mkj_%oqfgks zbo^Go?MXg!DP0Qr+-c@``yAD0wVpuc`kNpOF_Wot>mJ1avW6g=lPpEe5M=xz?&x$~{#*al;6(R5iwYj>;wImH+{v@vSbXJ@V!uB)q? zt1Hl7-fCbSJ2+WrJ7I6O&d(|Y;K5;I1?YWA4-vT^-j+<_k=}=-vvFbLYkoo-!-tZ( znwc>gK9=C)<5Q~Qw=?w$ZrIe|-*hzV>K#jjCG;G@Zo^EC!YwjZOIk;llBdd{Zd$BJ zo_s~p>s^ms#*K(j6exyAvxzL(=(TWSv}$@;;u-tVrv>Tbd;v`jnNGdP)Ux$&Ytbl^ zYwm?XMYFexyqw$1YVOlK^tDEhNFIPJWVr#%&d-Ga?uTgNMK}yv5JNHM$5v_8K9Amg zU(erosXqMVoa_f1)r0beepa{{%Bp$Rm2#Bxal} zVsDC3+eI%%Lr+A@AUmh)T= zF%pbS;W!)WkYG-1mY&gq#V?1{B?NfS4E1`NqqW}*@xA?MP^$w8rs$qfKqzL}J@go^ z(`;C;onZK8ZNXvUI}tUzk&imc+6`@{drIiS6Y-(IfTy@mC+}2$$=i}l+l*EZm%Cv_I+(DPVc_Jr=6;&ei0@vHz?%dG^avk1K zHvWw2J(!?Plj<%Xl>%A;aK{82C*eUGNp9Ct6{?lQQh}4GI8a`0=&pS1^04OC2lZ8 zaT{_lXzowZ0n*V=P>s-l>vo!!TFZP=UFOFRs_PgS^sUUJ*}9Kh@~sBBZJ5voMTf)* zK85mZkjmI;r>4@L3vEwi@hyhcF#L>=3H_OTVr#czY7~{-IGCbA5jst(y=-zM$9sgy zOtb5RD3!fe-RBMXylpFHiEhNmp2{p*c$sz%Ls*u62D$*`H6HS;lyRkcR|nG}YWOKG zD)aa=-xT<{8_y1Z3q`(fng+lT<|Pg}U&<9*k4%)?)_!X4I_9=P0*oBJs$$EOe=}5z z%`Y$8+!?qU!Z+_4gu@;s0bO)>` zuB3OY^Hwx8iI> zZarK`v58gyw)ghG>Q*KEkOw;<0W_$Uu;qa=26V2`0F^IY1I@WWWuO`Fq<3X&|8#W% zoTos=TuTu_j}-79WXP(1KZL2TxhGj=^FMvG16Su4;QN3Dnj+qN%kwQxo%90yEhy9~ z;7gE71R3g2aS&M-jN7LuKKGcA=bze_wWZdn0J8oK7F#6};HsR2I?XaGdckupCSnHf zgpon`hp7W~4RLMbf_3rfziJw_eRS>Xid_rOs<83>DnUH>oH;NGHa6Oc4aajUGdn6B zY^D)s%H-PV1vF|WnN5bi<@~zG=;qzx{@RE*pCa>jo zw(Lxd#8mlQh2keCWl!hF8zKC$Q?+AJZyz1rZ~6?906}~imsBe@2vC3cT;I~3e|3x3 zTU8P9URUX(LUQoa+WB>9X>EQ{SOWRPvW&p+pe*@hpb%~OSU!`;t6PXCE*q7y z8IPq`$R-(Ww-Ow z%cp7_SErk(qQHg&coS2Jlmt!zT=1}tSF{}gKfqU#8oAi20p`F?kURzVD0(3P-s-2J zx@m;-2-H9Rghba<$j$8(h*82v=w~q6;{WIJZ)+ic4H)s#CCs^gf=ckf@1ry%g6G;d zeQ<$6AqY!xuIBZ}r~r6V2<;C#(>QQ^yRDXhjY*%w_Y5=cesp*lkUtHR1F`BE;UxS| zaZ>MXJ><)2SwW{SwYmxQ2w8&h)GMnYeIeBDtpwDOs*isJM9E5!wdx?OZ7|Lqod@l_d318^UJNh?s$Sj5O(k$ zigdd)s?m`MaB`r1Z`SJS7wu4@sxpH80b#PzbczB{ZIwG4ii>vr<=JTAgaSZYSS(uY zUJlE?`oY97eNa~6)UydV0Unx z@>yvRuX++5IW^NF$O&cChJF_iXSEo%L zR|L!mYDhDkqXY3pwp!E=Y~E06h@%DbH!U8N6XwyTvO{a9rtkR^Yg!)}c4bXvHY~h+ z>Hm+hv6a8)bx`=>n=F!d^7YB{!DKmTHEm9yviHn%q(dH$qQ&cwji;H?e31B$P=u1W zY^%qb_G()W`teu@C2!Iw=^)GI3#fhd@%vm&Pg_a{a+>{ak7s}un9|Ug#ID6ftYTA1 zTHndBQh;`yltxD=$U#*~fyuk;MdHD$`!z}5|6LsvrZCsjCDK;X6#$<84^0C#39pl_ z%BTi2sFv&6mf2VCszIxnYu(BORzGwo!gaGw(Cp5gCJF~QfD}fIGVbuy6@9U^aLc1C z3i#oL_Rr4*>21F3goH^W?J-+x{etNq&^IR+wrP@oWSzg%{n;b^Qs}J&5W*CUg}?#g zfE_xrT9gu9fJ1MR$IEaz#U@t)Ap1y!D6?v8vR!_a%2b-m>uFSwC7(GoRPKanB-RDd zTfzi?#5OJksi&d;AN6V8;U=AscZLm}uBPF1feKI!^}H0&^)vexX2Cn==%Eh=>^#$3^1 zBlql=0WQ!TsUbB{cNEBsYaWfP4&HZXmuF_RKgI&+ruQc1xreI8x$27Zi^N)7e(ii= zevl+R&e$I+yRWQ1kr}e}>dfAc#3fP_NWimo-*tg29ExYFWC6mB5QY0bG)Z2kZVlhv562pm0OX8zd6o#(`L1_9@WY zozO${W;UsTKnfN(){S$#__z!&t4n46gBAb;-k!q5{^R;_Zo*NL@Vj<|pS?<_KoA@# zL;qQ$2vC5`Qv}WzVo*%bh5oavlKSgxt!|t@_+CyfVg8#Tmi&;M|5;3Y@;EEcrnQ>Gtchg2{ul+B1(l*v$*5Sao?`W7MA@ta`#v+wwRPt!36bcuFQS1!h=Fw%eaBHu) zbo;pam--T9iO*cjtnPN-ooOeg#KdGDi+;O!6<_rut5m~u7Fcc>rzMSgtQwzhQC(u) zsOA^fQFZ&m?=HpbFobOBJD>$X3QR0zW8=Qi)`a+2U!lUo{Wt-SQO}v2)dDm+NrHL|@VK{1JVPUFBca3RIaN$~=ql0s4-+Py%qVt$wYwEH@mpjA>=bMot{n&Ct-D zmwVF4aC%q8n^wfpKp8g9@Wk1fVRwx;GC0k26@^%`@qSd5$8(O02`T=tL+JA z&GR_$GwrjlCLMPP<5_tuNOKT5{7jrzk?UnL9g|4&eOC6yoaFdf1v<$W7Uks{aUg^t z7~>%h;3ooKpAQ>h>@;RJw&N~KQcVq6)ZZ_J#%c2PGDuTT|CszVp3?u=?MbgIa;C+d zfuAzzS_-vL*G1bs{L=svW!U??*}_F4#Qs}W#$RjAf#e_B2i{E`ds>v;>wObcR`$hm zc&naikY$U#VXY}{8GMo@itR4&061AbYTvdvYG;I-rG40b|IKE+@HE3sRw}#o(?0l6 zS*j$rK~#^|6X$xy7RM(lp;9l52+jJe#Py(pzpNj-^*;8@9JrgM3r*0RI%ao@Jx(Z9 zTF!_(rC}DOGtK=!oPBjvRZaKsxtEfbk`4g@B}9-e=?3X8>F%zpsDPk!m!x!u^i{gM zQ$)JE`40N{yzlp0zqP)#zJFNf%*>gcGkecv_TJ;QE&E&6b_hKoPj>K%BI^j%lluvL z0V3%KI4~mM%K7s8I}3#D{;&qO*=m1u;YR4QE?+D|teN-cxuv-92Gs}N@CNfI`z(T% zl*?N4KJ?PDKq(`kH{3jfEL+$K>JS#5{;+rjpujm`X0g^UAT#uLJ3-pcJ{>Tatj3O% zA*~b6oVF=m|5rzD@!{JjOURdUR>`=xNU++D=iA|xhk_QQ zzRl0Q5M;-xSEK}Z4BJwi!cNaAN)se>JawNdnvru!i47D*J^$kS#I|dZUv$;)B6>D& zCxHQFPi$a(3j@AnWh+lzJGQQ!#g)Y(8)VM8We6&5Wt^m%=6$aDYZTEGD}=#5+Q1AAffy61-|;?6febbnTQ%!On>r*4`9<%)P3-(t^kh``UB49`agf1gI zo291)fEMt2+aFCOLIxrEP`$qtL%bOPC@*K>p`q zi=qJk?yCQev52mln~5f7HhaZ?_tF5mDv}4jBHE%NyvJ7)-fc?8Sz&PqOv3-bPk#vA zy|;MHW!Gk3QM~bU#ZUnwCEQG4AXHIO)gB7}P5LVjiy(NQD%?9T{Cyq6=6ru1v_Gt} zDKBXkVbSu`>yqUx9^+Q2?muJsAZNl>PQXGTcc5M#LvWa6y&W6P+*k=P>sC%!Lfozf zq$Ms;M*qAXa4G4p+U^Mms-sZZVVJ|1`WZXi^u6I^fzqWW|4E+Jg|@rOFZG3nao_lm z3bYZPe^-W~{BGq5 z|EwuKA3Sxes*C3%b(LHVb7cg_g*Kl@@*0YiwpAfLq%##3L^HOv2G4coU(uI{4cL}i zcQBI#Rz>9U>BUZ-{-l8gnpL}q#l0gl(~J64nY&6B*pLget*+#Oy<0Ha*~um!W3Ja& z5a?42{i>s6%ei{{SaXzJNpRw9858SQ#xJ~hQ}zAoU*ZJrq9xo(%Ao5TDUL{m3}^s* zY5plwX>mZT=M%rIhb2riS~_}zQhN9y{e-fE%h>Sy1BYUUkX>t623gv}S~-;k%gi{fA8r-iZZ75jq#~ zwEM%wlF?YA`k4j8vG0=^wdQUxt{P+Jp6o7}O=D^<3%eMLl zTltMP`URY%>Ho3d1aRr3od9{A4+$2Ofcg zh^=oppL5Q%C(`UdH;ZhE!=%Y`6D6KyY=TmKK=UjHs2-b<1>$H{a^yeMF@5naEjuKP zUA}&g=YPv{0{iJlu&ZD7=+_W!{(nO^uvK!=eRH!K^Jop%YK6&~&t0{~${P#>_w4LH zfyrBCyUC_L=v%QVLvzb8UzjpQFn)NP`I?@Skh~^VA=g#&ZNBiaG(K-e2yv(GK4 zb21#ymZR1KrbXtJ**||TL1axEeWfde;J`yY_vp%Ew_kL_y2|vvY47Lv$>ROxIpl%} z{)g{^(cCE2rw2{dgz_K#N250x7uRYQn*47&PIniQ&JD1K9>FsCN6ZD$_r#%`1|)%iH$xez7!2XLY^e zXVc_lz%*k;*72Eyj&^?Ddq_(|tNt;_s{_Hrzh-K|w)uq{^D*h{m__Q6-3ozObxn8l zYs4+f3|hRzTj^+OxAU+~vh^$OUuhNhSFUE`=I!EhB(44fCg4HX!<^2dSv zYbHYR?u;DxL&-mm9rE2vBzEiKb1~41zjMCXIPV;>S-(YV*rq3?;PVfO$y>tqtb*D)j*_z4y0v16Q!15Jap01;?Pe_Kj> z%*9V-p4aDx+uN(;^>c?-jiv+reK`hEZhqb;{j^idco$7Wjd2hvXb@al96+UXvjSo_ zzpbRr%*@!=@KsM8J+fRak>)#{j5FW8a$D?gh4}EC{2q>t`?_aPfpx%Na(lN2{v+(( zqLS-o+Tr10+JL)F#k!gMLf|3dC6DDo-_AUCQp!Ng-1PYmZ{%<*#wU1irWrSn?wjbX=p`?;7=BZGaBeu8$sl~^phV~NZaCV^ZR)zK}5;#?~#FO zvhHiT@XFCWX;>)|NZ#0;^3YKGB6b)$VXkX?($E!3Zf`B2I)aSp$iaGz2oITxmGSed zB;vC^n$#o^tvKAw(Q-?P-@{8HJRvE&`g<3kaX0pr7h|SK>^PPl2kO%7 z!njFhv(?g(0Xf`R2l)@9ZyP4P^I)0L7FzX}M~nif@L>;k9dYZ+EA6yAA`Yj@7o6Jo z7N3?^GEKRm4+wst!uV#aEH^dJ>qoUUl0PEN?I-7UtvwD2ol4!o<9chgq@N{)kUzV;4_e}-& zOM1erN>y*M=Y1X=DQ0S}rBqXU8`~Ckwc&Y6HN`QxIr$us%q4NtiTB2fEBUM}0!R?{ zI*Timb=B7 zMxDiTb{OI%+r z<^s1M%B4>gtR|>+(3ulafRY-2YiljrwsmekOx>#0Xkf%6 zC529JkZfw<8O4E9(efIH+k^ISMPbIRY+%nqEE@(EtK)*9ghoALw2J4Dz{g%h_Y1Nv zo|8}(mIg{4*4j(TmY{GEgXfO_obJj-a$#vgNY{INd%J6=HG$j5{78L4z--aGB=u~& z)9FM0B$onxv-Ns`(t4eqq+i3n?K+T26#>P*3VAiPVv$4?X+@0 z9is-nc!arv5mT|Uane&Jl0*MRC{m?)2KdT1WJ&#Gas6kN78$a-xf-M-he-qQ(}f(0 z*!wOTeuMB=2b0CTc{Lg4Pt>W{Dl)l40`Ja&#&Qvq+Jtg1Pm#I7j3 ziJ~VYwdBaTvJ@d(2Ok(0V6Q`NfR=JQq*rJRXLChw=h!S;=qvW52=eGt)xMh3sH;R~ zVWI|lR^mgy^rMw?R zlQ}F;`K33V0a4!syL~Z)aMZ6zSlCzOL$+o1whjm8aU{vH@F&-v)6w9kfZXHyn|tu3 z7sx0l@3#cb$~Ub%5K22VW>&aPAwBOak@q@2MXZA9t&NcQZHsoqC~ENM<%R5ko&r*s zxM;b8bKJmf2jDS&$~ICp6NK5cva-w#NMaGlzwtmcOn;h4^w-OLvdC2$++9fEuj4Qr z>!9OS+B~!18@!zR)i-E%8dB6^ZWCwwrm+eCYU zFkFR?@%7P8v_BFd#RH_@r@^0Mi2CANahZM?EuAM;!|Y-5C9;byU&e?9=jN;`zIzBF zV}ZwP${?fUAUHM;66W3c^!M}?&?e#Ufp@3X`z_o!uWJas?G;7e+!^-Y=Ttf2-$z#L zR#pWu0E`U-z(Ci#^D$_}!4NTU!f%#A@WZwf=I?qvJ8>~iUV#VI zJ8y!o(Q&5{dr~cgc0&G!${=$s&N3SMj~@X}yIA$*^I@%1#E#ch&q)_sV6lPP-!n64 zG(3jTJlxTyYXULg1?JzM#P}HvB^slFnLzH&z;{%u+qe_B6nslt=%>_gq2v~GnF8yL z{5b*NeV+1otzOpb2j1HpgamNUq!8Sv(y;h@b|s>x-peO&*uJ>9*ron z7sVjSe(8e$p6%f`XlG?*XqweY9+X7BfUD0hIF0T?NLIq>k>4w9`S)L{&Lc3J6JH?pIW_DQ_9R5@t54*&ca~s1vFwmLKW64-L(_MDsbU2IZ`|dzG*Ql`_*N|FziD>}!&_CKRf2F7InREYV>}oywLxZA0 z7j%h>+$n->^{d|34JGrTb!lSJAK$Vh%}Y4NzBlnIG1vbzeX!j7VF<5LX^JxV({4eN z;2wQ>l}oGX;XMeM#3$$;-B%Y)p3Ls5^COh09BPo(1Rol{OAkIPMf3cM|tP_rPcBC~WB*VF#NShpjJP5n=w9!(SP2+tU@ z<)qg?a)9Ddkty+C0(_Ca8b!Dq%tn>{59T$GW{8_pr%B9%zihx(KZ-Y{c~3(4<~t(* zEcETQe5Uc14P}FS1xrLzn6S+sx#5C!MCi_}(YxqN<65b2?-QScS5+LQl0R7NR(ODYt!Upji z!j3r4DZx3urwtpwdGYvuQIS8});WUV2PR_p7UEBz^^%9E`vXrG-286{_%+{rl2ND( zPuW$&Ff>q~(y~*U)CuRDP*akJ%r?g!ds=p)rEnc&zeLO~W%mB%L;GQlwR?BUj-LP~ zAKY7fejlEn9TS-M!|Bm@c|cPcd6ll-B;fJ5zuf2`SKCcbp6)t==t;EH7c=QJ``j@r zdisO~CSF;9ae2jSzi5XEX|Iq8!_y)sl}8((|Car%yMn9L2&9EkT|%o(xucs5(c!*O z8%s`$^9m{q?{<#%%MB1uEsM-*jC!#Ox!Ru*gf;t|5|_SB8G6FW38cCWUvjx>Z!bL~ zEzm_XoZ83_L;b0K%DL&8+JxVjK7;?NoF&WpbJ=#g3dAG7r)8P_O>(gYE^u1B)ROqf z^2Y~PNwxEA`V)YuERsL5P{94#7KIEkQv#pMoJ$&Ii|-js4P~~=7iBSfH;m1%Nm&i} zbR)@Y$dg_rUR3Q-jnEX#w^*^2P&dM&^Q(ot)TPpX_y^D7HP%`Cyu8!KAI)OtT{KvO z5xC!`T%G<(I%`5mtsSdG;50?i)e{8vm~d^MN&!qi80-$W6AK69JOy^L={?JAdNXt) z@-W=0gTFva(~3VL3)4yQ88QmmsD@BZ0+WwZ1P~n#awn5x5lg+i9mZTj`(}`j%|1Vq z#fsnJT|B`fFI^+HLUGX)nt!w=@32l~zo6eoI3fxtWTbVS((jYTosVVEjaP`0m&zEJ zhhq&S&;ttQTq(;%LO@PuB&2r@=1&1X#g6@aTXsua>ZopsI#a}=V;$j4?zpWWQg8^s zi;CkniR;svT^Td@?{$29r_zZO!)T<&CaW?}@N2H;WiwV{6*&xV;Ij*{kevy$DtFN0WHW>CX*Lbq~nK!HL}ZrPH#Ky+%hIyt*0h+k*&oz!SwZ*D~47cTP| zERSR#R?CwBc1Sz_|EcZ+=)EWiS5G?2*N$kvQKb~J>t&~Q&0qdBj zwmH6&13tWKD<`Vq_|IN8h1!08Ag_6PSyw*;`MOIu%3@*gR6U2<%5y_Rsf3itq-zf9 z@oL?7N_G5rtP)qu@0%LAnUXn|<+T|#b?P$}?h}xi_G$$6`p@1v zz-phq4_r$xC0T%1A6yBK0`5^edOAn(-CLx+=sDvxoSNyus4B+tLuTT&`^G!#tY-3;@gZz48VA{rIq~ybDJlqMdXBK9hn_KXWK5G(QHivMns$T>4?&x&|$umiiDc7;=hHCGR4cvB|U?BA-usZ??;IbMc{jx@0hXV!^{x?42jrmhJ zY16^+&n!(AHF|GOB$>Jec?p19z2

    2ISryyyhv4qYONRJI}kWKc-z#^AEnn7zSas{z5GK2h&9OUJ0my}9JL8b0rl@fU|POtUReFYixxHn&r9RNTOEom{ zW7nt!&A#GG%ON(m*3T|AQ4pP^NTV51m3#Q&X86%ak_l0SJudgN2paI)t3|y_$+4$p z^DrV57kVZd5s1?x*(|Uu9 z7XJ-}xI-FxN%d1@@CAhZjOr)1G`Ii&w0PlI#PDYb$e&@vKH-kZv~Ua}2E#wC1pST& z51IaUl%BHrVS|0hlqO{KsHKRQ;540)Ksj{+qyiBWpEUrR?Nb!uiE#Uu&#$3od&1xi zSwMCW06Umasf>{^{)I}+`V#)9l`;9}vZ&_Y4#Qp3D+;g=L$H%u0oM?f1O_z=e*`fR zXG1VUCHV_@D9R3F?yWwZp_}^Z-&Xl`ORLg9&mk?(@HZCsByg-n@UsxX3ALtPJmzf*~H*p#^L?q6XB zPv3s0_%r0DyZV;GLH_0AFr|zdnKu;E09px*nWTQA1`7&oF~$|$_I@NT)v!mx3VhRm zit$bzZWeX()F%NC6F8S}*UY{(6QXQ+Bf@A10Kn(Fpbv(-C$#vx zDg}Su7aN%k6*WrPDu%ZCl4~(JVkZ)15<0#&I#pLYc*0$nRq|W*I=kf>128G_1bXJB zI^W{n7qIkeVSQ81mX4S#5G4(dKz&5zFwDu~yrM~KT8ukcZO$hHe3nB0+E6n@!IId+ z)*`N<=Pj)lr~KNm=CRr4*Vk)y!JXRpEtoJF4D#QQ=HrTIwii!x3pb zDJo4BZEflp7&D#>JQnrQA_#Yq+mcU#$~%F5b+lfFN@L~8Yu%}ST!3L1DwzKS1X9(! zQ<@)~+dbknO`?p_EsJE!jB_nx>&mE-pDx{8%~zRwVg67{<>O+isXeWU2r4&cI^w4Z z^BitA)Jc#Qn}nCpYPlznnUyDyOEz=um1Hjs)a7p7N8yY!T2Me-8{X|lUKQ9p+R`@E zYp~Dll%YJPDwff>tz0eo@p!wBx!CmDRsVXr7<0;BU0uwq&w*M`r2Lo^CqWfjEzj_} z+%WSt7krfKqFwAX!7VjaMxEr;K%I=^8DaaEBHMCuI^bn5((H8BM)ISc+#eHB$!nIo z1CYl_ykYoQ%q&8h{?l3( zoPCk^VZu$YfR7kzSGfWz$e2&^bdZpXT<}4T4kAex1*3`b#?<8GLN)2sy_?()caqLI z3eGvDfCn%vLGAX=eclgTP)V4EDcj!kdTY=tN=?lc#@lw1uH8vVXYYZG zajYz;76GAW;mEj{imMmQVoSK~~IDyQc@?0?DP0dU`RY+*9PEl7GF< z)P;4u7kx9rIfYgy!-&VbliusSW@(T!#&|NHS3D{YW&a4GKlM)DZ*fa)i*ZYzI-7MR ztL)Of)=elttllO@!oUa9kpeH_;Gf@5NOEA=g}?82l}BFudH>V-_Z_(Fc^Qs0+aC;u ze4X9ju2+Urlyv2RFCLUR6BUd|S9xC=@xe*wMe9#T<{le@k7o>j7C)Fw6$Z`uUpIrh zVUr`!6>LZF!DkWM(ZlTvtnuox4ERaotxB|RD7b|z25j}W5q%|l2mo-DA6{^9=>G%t z7lIC_VNJfC^X7`&r{LfhH<5K#L=s!;dv*f6*FJ&*g6xmW3h;VL@MNtiRaRHFIvWjQ zDI!=vGQ&HN$h_6+6R##eG(5jhSqP*Mh$oxLPxIwFZ${(pIG?|1O!Oeplx{fdgevsu zhv7ZF`=}X-pREv`(ak>)Dfw`17MkkqA&*M1H=Cm48@owkHTJ}iL(?zK12jLd1{T9( z*(EMlV-B7@HQj6jy13Y$;_QxNM5iZiD%o)c9sTlVG6g>=~4 zWd1s9z^8gi=R$$-w8vyiSC?lXmcqaO%In2In0?^|MWIRRi8|#u6jTRK0fbZ;-TLQs zs+Vm0)Er437uzjj)iC8B(fz<+|6SN6k&}#LcC7)hrWKLsheeG zU`andSI0GB3voU_%E5)Sn$+!?z>$`9$4xgA?(11EIz2G0I-9)!!kztx@8Iu6rJC2l zb6%Ya)VV(WxUtk&t^^MdHP+w#8DVriaJ3R9ZC%T4YOi7IcO=%gz%JW0j_cxebogXf z)lAE4=``&k8JfB)tCx~m+2nINiM=^K^;+@-_lWL`-^!D!_$%8UUnV*~d6Ke{rM!-j z7K-b>wq_AO{CX?aupS2Nd@L!C_|o3W*%z3idZ zfT|}|c2)BQeXeGyo;&?lv6q_hE8Fah!RJRFE2M`STkfgM73#B2Zf8x$zk0plb+Fxt ze}MCsy7{@Zdlk4AH8_Be|k1sJYA01c)y+}K6zIKPe?&mS7qfpoTh+jqJ~RITf@x_ z^i96uW(-!wOqbESRwX@i9OFNR;sEfiTtg-s7 z>5s+xtf<@^T5KqkiSJzGyG3dSbt8Amcv_!-d8fA~{3eItb3jd*T%_!`BV}?KD!S6C za-QfLl3~H&=w7-kcqMJjab~@H^>iov69J?5W3Ys&hR(*-S&)>}&KtY`a*l7~~3B z@Y}ya*;HEdw5f1IIEk=%a8HvIa=U-=<=o$1vT-TopeX)ZOe!FNC)bd{7x*HN8FFa$guj^4T2QzUB8c2H(R`UV{;6FTBw`_V+*X)qS0^^}nEZ z-CX~8ET3l{GEiJ1)Oe<$=XJ4i5#pa<{-(D@FeGe&|Lj10(^x9!w2$8H>|K!CPo>k6 zK4YmHXOZuTp5fBd%^U=KEhq-vI_bxh4+3(hZ_7t@p4rdoEi{zf=**RWOg}o>O`p*y zSu2VgsOv*eNwaOL^*CPjYVpUAo$Eql%xUu}o z8(wuRZabgfm|5jnrKuC+Iyb75$E;=3b+P1o=;T;doqnzu9ddJW+(*i%T6gYBlk+Uk zZps-x9(xsUanP>LJfA`N#G=5yKAWlHCSC`u7DKf7Qv?L+5wW|tcN;nN)D)Y))%~8c zr1O)z8Zwe?LpKtXzy96@Ky0XgOnJQ&?*3|FyIG#%)(;$W=Y+nvGaq327SQNX8G?_9 zU@ZDSp~-jM|AZ$0Zul8-~-5}%jkyu zc|mYPxA_L9{I=_XqXG(u&Eij-JsF9-EaUM)qsLC7>Vtz88rQXZk4B8l#F;VPIm;%4 z_VopPmvPaVrpYb9$)m1he$XP#;o@4`lbr9pFR|& zUT9DriIO=!>CM+tK>H0X4@M)5L?(}tZJ#wHNIr{7!Sj@P|LBFr_OGcH=O0`9j<~NP zSdpc_&qE*i7k@wcMmFLTaEu%$l4da-R`?{u#{HLTR9?DMx znFY7y!e~R^K3t4aIq@iWyr1(|AXYYs`Zd3ve(EBeA;#Q$QB7qnXZ0R4b5^#G|sonOlP zQY@DUySO`J;{momL)AR#iWC|tQq9`7@A=pE{LSF48YJDvX1_{c2lLbc4bT= zcFvGPOLH$;BU%hd+929jh84!gH81fT#tX?gMDo^(f;nV&gg0gzKNlSrOFgXDc4GAU z`B?}F4QZ;rhIuh#I7d>rQkMa-h;omD6>9cpo|dL#Yx%Tywh%%z$xn+0T*VBJ4JJOh zF!=zQ7RCPS($G|CUg0?Zjn~<=%{t6vsu0;C zUDt^n*{Q7y=y($>#4f$u*POfjbj$&a*_C1f^=z>P$FdfsFw6n_>*F^p!RNEz^Eai5 zz1BLy-^@?5LDZ_1b176II{HX}9KG1uVRfx6h<-!DwO#jp0920e?Z+e)GT<6afnlu1DNq*o23NYCUjzap_+D z=;@YC1zpiKV#6kj`up#3F^S|(yrvA9#H+(^lqYyo5>#w=NDC$uMwLGsTk<`ki4xZ> zU>8uJUZ`7NKKmxze>Aynq_*2A8{Tx@_y)1o8Zm*?5jnPoFc6b%g0TPC{uHZU3NIq` zuN=wE3$48y)7ALrukzb#U#I9uh(!1@F6y~Gvj|h{%AhZn#N)Lb*qncsJ)%^=HMRn! z*bSjg$s2fk9C27QKyM}uP~E@-W3`eO%I&DLs7R9eiK#i z9Jukw6iSse7ps*(I9NqyMw$$?jsb2<98(T3xR z;o+0S1YXmpV1jc8gAP;g{3_^<%mC)6quieL})pVkKa5%zdm}a)oRAOV*A+MTXL+BotwkK zsF})a?c_XX-X3W^d5%$v#{DCecE7H1x&&R((A$FmM0)pIsyKG}#y(;kx@5oG@>Vd` zo&piVE`S>%99epOeicP0XireOZJ_l@Cb-T}-bFGCcVn-Fjth~g2z@S zs4aV0G52a>KZr8R&FK^yw<~32rtzBn)q9IiFK&9{Q)g-J{asdpI&|_jk;77(_P`?NcQ_4`>`6g-xqqfri;jmuTVf27nx7+HofMV2a^i#0{(BsGgeZkxdgM|nPeQvs zD@zA8oaJ@a$;$feTq~Ho-Vi^SjG-P%L`t<^%iE4p@w9msakVT+fXZN1%Oyn9ilGc~ zZ$f2he0cQ|5x`i-sWc3~U%DErD13YDfX4eJvv$)5?5~@0;)sB7lU0J1o9fqcd!@$$ z-wuXQGd|{My^-K&$ryKfkyumY(FvLTCgF(9eS2@-YwJGh5hne2P3u0N{YBYVU-8Av z>8wg_o|N&5v&^FvGiIT?t-3Fzf8xU7SE=-MGs0!zD{eNm-^NJEs!AIDKt}!JtD)oa z{3*30$w%G@ez$BXl$v+x!%L(&Bp+-gO{lMy$Ac@y*|5s+f0)0;)mjf8=l0LuNWTXV z4bxbBqk3p4WVm!0XerBE4NVZtNZwjItigcm_=6#oV6G+)>dKt*!E*ezLK({gE~BLq z1{A^K(?A~07I#;&qx7pVW8_l&MyHa0{;tlCsZm! zRw>rf1$6i?v>)eAV`c_Br%9wn^R#a45Q7?B4xgEa0wQqG(KciS_RJx=#rO4@G~{jS z@4p?idTxjQtL|6+j~T3W95AOV0uTU!3861Ky>n>N<+$bu>KbtM?C*9O%cC#(v}YT5 zbPo~$gTTH4aB$EJ!S};o`J`ynU&(oRbqKQRbX^UDtx=k`!zmsbg84VVW?r_td^jr;2x$yg}? z;LMGdsK^g)t>Zdhro8}ZZmxLn0%3~_$)QZ-%(d1^n5c|^w?3cS*uMt#vB1!BkD8+B zvnZYLVXFUdND&%ea(|v7u0W7Gw56EE9lNljjM3oCX{?&%DTb8ge-ZZuausl5Qfu{p zHEKYJ{+k2;kJtdZaEUra_ z5Hu|iCiF+zypkLu$qFp7xW@qnW3b+8U=1I|IBJtM_sogHWxP(EXtb zUYv=RIeoQd*0$B3u7_GUOVw<}^P5QX(p&;j;)nn;^F1OAT^ z)Zn_XJ8~q@wVF^ao#UL9YU`bqp{b~FcC-YrU_1H6ZVhE|pE(Y4|JatFh`BE7?Lm$03^~QVBS?^2}rTeo-s?52te+1|I7)F4ZhKfF& zwJiN=*8P6s*$45Hq;4yNto0BfcUPbO!Hh6tm#w9JoyX5&e?eqWH0haQV#}L^_e}#E z5gQ|Xdx$LQ>f+7ix?3AuR~||mIV2+DB<{Yf@1Sa;ZCmg-xWkk!IU)&r4_QVi*?rOWL>{i^KAqwjz-1WP zK|xSG&Fzuij<0r9NSURde08Mzk%?zu0l4-2UTF6wHGr?R3cZgb_Z}Y0sY6KnB@*+s z*cGaTA%?!BDaIxng;0fPEne1wUrdib$?0u8c3#$67n3ZTt&ps2c2RW#N@k;y_to

    7V zTCYZdHa)ztUvilm6L-!jAjhnZj!;!qETzfRsScG z@AiB};A~fj@#J(H-(|XHBy=`73(xy^O|g9;s+&UQq! zAfdE6c%qrmEtN)#8PueXBaSM?hX4T&5HXNZA(x1-+lI1wZ+LgGDwm!R%ATG6mbncR z;G^qC$OEJI4SiNrsH{Z+CR$;ewq2l=KM00s8apSE!JQLVJsJBBjMeRxBm<!r>PcdtiAaWrNH{8>B#A zgBI5t^<&p!HgoxLmkLgmEV}%im5&$=c`314*)MJrqbIYE$!rg1W0p(kR2QM2uT`M^ zocn7&K{yTTv$y%O3B5pNIu{z+f8<^g|HdxgejK0plXs%Nl)QOndWHPaxi1o!r(Omk z-F1;=#8}api~Fr1Ynk5ZnrFDEUD_DnMG6!XG?cc1Z zT)wR!L}3-`96^-^ZG0SMgSmuGTHjS-kI7GS`=cUq+_83+LW1=O!>5xpOS6~yCLY4a z)W$_+Cz;B$ZOClV(e{%wl19&+sh{8`^r$*{RZ%08OJ?G)pOY?$Y#QlUN{+)Cm0bz` zTL-KuziT6+`J(ZL*NGjw;4qbRqE!!lnT!t=4Gq$2YanvtxuPKpi4#Y&mW+1 zmrk1pOJR7kB_eQV0R1|*9rNegIr(BnJ}2Z{oKy4RpAl_BMG*0!YhrSkk-4p6BX8^5 zt(}V-c@-Cb9^njNd^TiEOr6^i+OdChDJN^*^z#CCY@A+M)>B&cnS4B6-bVbAh4ys@ zB1d}8HR=Gw{ONEdVgXCVS!&~%!?rS?_hqbYX@>}xZ-{&DVE?HMjxST_8Rgl~JD+2n z2tyz7tOSAYIzi91kKiP$KAjW?ruCcz()=4qA~BtCd%CGK65e974p@?az_>+MiLB8O zQ!oG@d}9P2S$(0frycw1`?2g2`f)Yye*{J!oqh6uG)lm@GD8`sm@#cn{hc!JyZ!e! z4=_X{S-0qcvnaa>g_ieQiyB-@8cI+&Ll`&{u&4NOC0W9F4-G(uDN&>&fx)(jP(Z>{ zERq!ijHm)PJMiVL(I6Ct13qhmkiUT!ks)B@7DL#d5uiWGO!S`C2)yMCz#xXvl0?WY zI}fE1m;6zH(1Ux-w5Toj!B5Tkx@j;1i~v^XUr6D!PzdnTMiQJJyvi*Bzya2~ArB(O zr~q&p@H@l*nzN|+9()>~K^SC!$Q^FiP5|HBsdK7Lo|A9Mso%__k3xd9@?{kbVj~*- zEB8p5B1U;fKMj0Qpsx|PB5$LN{JDWneNiW4vOYX6s%65FaXn{wmfI;4N7y*B;V8z{ zH6_P^3o*3##tH&%qJV$Ac_;l;tw8*h*00@Si>vi#Lu!G%R|R%LsF$L+r=sbUc#3%B zyxd83xpNQpp_cOQ9b>!V$bsD&!)2{y%&}rb^*@8i0plmQ_%uJxO-x@c@4Z9kR;pzQ6MM3?04-=e>c)mE&ex70_4 zM?5Uw^*p`U;#o|_JB5H8MNDA>=j*2#==I>WJ#kKHG9jyH5RpuRd2=t?>>qtcb<5g zQ!mNUx4n>1vFt6*{3=!5VHdv=wx*8@`9iDT5@F%c#Lzk^ z;*XM2r>YhDF_tfRua--F8w+WXjAXB{vVmw>Ts>W1ghZ`WaP) z<-1M|1doAYG%}eFbSa2)hugpC1RQ_0J@{v>G{=k!nJd|(naYcSW%df#>~r>JJEObj zd0@WkJKBYXPq&B@svc*bAORsO>Ei5wO0lCNWxGt$1pic#fK-RQmlvPs_TgnC?Jf@K z?4+dL(C8+$ia>pHc!?n4v!5~%g`T#_!BX#^60~&Fy_2fzDE<(1SV%vhQ81q4;uEe^ zA+lTm>D_(1S~Ge`=R03>>}%;=D~-+ctokogC4osp@=q6VNW=PpHd9ehtBn0L1bc*- z5SaC${?traPrsas;8Cqqt33@7in>JGQ9e0g)|z4b)Zv=w(tFXSFx|+aKN*I*^sJLk z9=EQdfZ8$S*0tH^m@h&q_rZ_Qev6dgw~loc2d{TaI5Q8_WyH$`+|+0nbNe((rb1TT z`4l<=TNds&F&vBmoGxziF9{XodHEXZtVd!$fV|Y(Wa`5GmzU~e%xf<_@r#ZG3=1+z zw%=3PcMY2MrND2BvbA<`VSOJjOoZNkczE)j81{uc4=Algt?8y6i?1`XVD7pWbEoBY zktYkJRl4Zk9VAe7m(o=sJ6j)XcPZw*;k|zGazI}u$tb&1R;pUbN}=hZs54ea=h2eo z@FXERP7J4%V#L8iwUiH-I&vTh>W_!-Yf-#vPm1yi>vklw8Y07RBOL=UKKcXEO5Vi{U^YYz4Z=-7>U@ z!P}hi+8|F|fwFa!eq+|=ByhNTM_ zKl!eIO}YP?W8G{%D=cS8A=cr~T6W_CB&hrdVCN$%b|O~dQ9El+o4>va*bf_kYWle+ z;&lc2FPwkO)GaH_zd^cV>eIo!f2>I}8k~DZiNc5oOppP(N54%JXuj^O+y5|8Mnsjr zO;jd7wKQncK*I<`wN;SH~+MEq2{t{ZK*2=Np(k zB6x~}*rV}Wn5aRy-dk`idF}zR3%*McxW2?usQ@M=fl<$cc1u^X zA^Ag_6*jd0vT^M{9>79~)c`>v_j`*8O|v6M-LIT1ur5T?y@@a`^uD26dDjUh)e+4; z*neXpS#xLM#z6~rU^VuNk13ed&7I~4x6e!3OQODZAB#E{be;|ZShQ$_aDnQ0lWquztSDet$)~WHaeMtj}!khz0Nr{9(BP}5%U6LZ*-QC?BDWydT z>6Q*@$)j{j9J*7wr0d-W^m)F&xMSRRjQa<=&faUy{LHy)tu-G&vK*f84PCo#?j|aT z%2zggnu0Cma6f1tOwa#4^eC==nyygCC(b;D5;C#XBExc|Y@HweDbhm@Vb!M5#q!?F z%{X_`OzT(n`)QIhLlg3f@=~J+1cD;K-m66WX2QyyUy;P4h0Gfg6_vNA#@ zx}4{ejoB;@_my~b%O#(~aSBD-v8b3g=0p;?s4^9-wChwD>+yG)D_&{B{QFpH{byXE zF%7K~zQ0eZJ^VVSm1iGa+99K$f~uhgEq5s$VVZsfkOjW&@GP|l83&BYAvqPQy0XTG*2F|JSkc)*An zc0c!Z0tN~5qTIXviLlBs=>|b#zo&%%AtGtbLkaul`bJ=-!`>y2V8S~gozIa*u6Bs( zK5LvzVX{c`dAUQIs;r>-FsZ>XglT5pNTXdVl;ob=pg;rL6ozV1s z$fs+p{kp4lq9%-;0h<6VX$+4+qt(T%aeh^piL3pcSpOGSJps+u?ZD@|$6M*&BpU>m z^-2B_hZkW_zDmMZ3enx6pbLl5q<|Z3pXc!$8OH?Zgz%lC1A%~1!Fi?04ijX?TgNvX zfp2qb>W%tjP8O;OgI2|hdPRMnkN7ZVlAswY)mbK%BgO%nbqif_J@aQX#cQngwOZ}T-5$x+6$DektX2$lV z1r3$qzZwh?$J- zyel=}bNOA3fDH;;qX)}dLjhupjZw+ zSC}O!=pS$7_ZR3nX)o9=cHWj%O&c7AVCW2PNE>C?+zUG~%b;{Ss~3*kxdR;G8l$ zW*Zz9TIuFl+AM8CHaErHPsS;PLJ$QK6$O;gm>a&zk!x;yhb_1{m?M|AOX6*_ z-o?D;?#4`Wy%4W<-?lqPsNzaay=yLFfGcBfD%Y!7S5NEQakqD1fAC7L z@j~!oGVO{-w|^f?Pxi77;1soU(bt?Df7Nq-AnsSugsJ~+SLl`CD?nI3kGk2La^A)u zO`>*cR_j7P-zQL1kp5!y8$+Vt?FItixeTbU9Yqvu0IbwNKm<-`5{bjtdQcC|qXiof z5wpP8kGQw8SZ-rOfQ`T4i&a`FffJO#gnSUozU1@!UYo$wd**E5tyV`S8#iiHztw6b zQ|nJB9&U2kVQKMl$?t3kEc@>bh>mLzh=|g!5X%NDUchYKjR(RLj4BV;#9b&_YZIhr zY-7yc@aQk%wy?d zNQlJ!78dw?|1^yGEur@!JQZiQ>uCI@J5dj9<+dg@OMUD&45FUO;KwGl_uh#<9(9jp z3^I5P8ZSVLma?*Gk)$MHz29=TDyfySip9DV)Vub-x!njU;vba!MUnV(fX-iZ!S%Th zcTg6An=mC3yk+!>s2%k&NFPCl4}c#ld_fIf)QI6HqI>@vhualPcous|03ucpdUXI1 z|HB^uDzgdx(75dbY$x-VAEi04SZcqM`ad)YRtURY7X^S9a1S6naBoNrSC!~aHcQ|r z;#~;*_4pkJKn*~4c6WaOz>Wy3On0~k;QUt;MT|c*Zu|VBi9#4Id;yEQ@uz=-3%4b& z4N&8%58+zfnHptb(4#MT)Q({Bs2Dx<50>GM*hPYh4|FTnFGTaRnMQuqct2*xZZ&l0 zyYO^egjr0;SSpT<1nC(D@wSEAk0QfqvJCj{K{C|T2 zAWm@MS9O5P-y94$7YwA=D&Qyx$Z0@+{jWF!!V(O9^_cHF%Y83y1Sj|^ygTFk#dCg; z!7YuuKL6l3uMPDvIUs$x)Sqm5OL=6G=~nCa27rctZE=?i;4!QT9R0@@|3nPH0k~3t z^eHe?ft=O&6!Hz@KLh&4XxCbIs=NQ?r6O+xaxx)^(D{t2!^wmMc<&$-@bsz6YVjZM#$fGD@1tOr(m7oG>H+O1N={roOgi^7e zx?ICdDeh^+V=Sp+I;39rY!-A;Cp6P`NVk4mo|Dg`dIO4?L zgT+a9I|N{hfI2?07Ef6gyKi}Y<6kO`cB`N4#)lP>>$g(|DU6$7e4g_Uu91W0WWL{Q z)y1>VO}|m^vx~J#{p^MLd@fbsCobJGr!nFk>2d#iNY-Y^$&U%jENz4QCZ>POJiQE5 z*Bm~Ni#6rI^FF;!IvhCJ-e&@|$Nu=^{BkQ4TN+X$1x@0J_7n+5-g@pK*0(8WIZNU! z<@dRc;wa9|N=@3P4gK>GT95(}gT4?n7_3@_tkSt2TlN^WR`cLmW(_7XJmnwxjIDr= zCgqtO{*cg%y_K3Qvvh*Rbq2%F(KZ%&ke!YP>2NGI@4xh zMO&+8NZQ>P_|z>UP}-+}qiDo%cDM}yKS3bOx=H69+TVtn>P zxerViy)`Uc`+r@F`Z+pSjyC+yTrj|u<-mC%Xt)Fk)gM(*KkrK#81KXDoxxjtlIUea|6qnCz3$-Axe-mq5>AsZNJa41@D-M+at$u?_73b{TI&1Dtfn8<5Xh z7(SycT%w#d{JA+WIZd*K1~`gJ^oOzraGnex0ob259RId}1zfn(+rB^x5D(z}A3DKh zE_eU%@NQDtKNi7dS&C*vH$nzVTs*Qk4g5j7wTRSf_lNg?6_tP;n8TL6fCn5!`fDvl zKouN-1d5aaT$jJXT@tWEZ`7d|>&NHH_8gx!e+k9|;UGZrQB&#GNDtcy*va%6z^Z*8 zwLTw~+SX{A_zdQ^F6}QY`FP^10;2w5J%zispD5Y5t)~+gN;BVf*}0!!YdCcgcw{tT zLsd5hXZ?c~ZrOiB!5dVnX4STovDCBu$nSZ>6wI+CPxnvI+#;KPUQwad#ZVNi3y*-E zu64u6Gkuet`T-AA=;4#&>h0K&tp?3!P_*-?am^V~^;SVZKma{KSBDWOljo}aC};-D z!co3WN*MBAU>;uzpQ@`b`k(mrWR0HPB|WK)ljW0^a?{XDc}_hl?X7T25#u!ZY0+U7 z&_?oJ2j9+i{%@6)_DV3uxA6l`w#R$>Pvcii%QIgo=T_S=Smnnt(hf65tL*x4Wf`Xs zeEEHq-V4_Muruw=ZtGFWe11UUUZf#`MNh3#gLkc(cMs>V+?pu2jGNhP_IPI zl+vyMxQkkLW&8ZJI8cz=6BespMWaY-OaEF!tIM;f&iM1Qa;;%zC+5OfLSBQjjU8#1 zS=rI~AL4Nl2k+3mmJT{vLUY{t2#T+i%C}-jjVI^?dqWkkHG;%^Hc;01h69U}VgwUS z6tB%yvkyQxWgX?&S6$HqDrOBeBi&n@K~@@HJ~~)wU|?Pq=&<)bNJtuEK2%BhtTi7v$(Dd$~Yln-{_ zlobbhV5Eo-$g17|YNZ?m6un=G3Vs&aH|;obOk)}Xum{YEqJi6!_SE!X1iRnkOT~}T zHvpp-bbgEsnr8^8?t2BA$?=x8P7|(5B+@y2eO|B zS$IA-q1mJw;_Hz*fs48tjrHg5MTRtZhYz$Kw6ELVMAHlvE8WJ7m$161*QmNOeU zdEE*q@lHdv8@qxcPLR8IK7V(3`r&$wt^BomobZ_updN}4^hyn;&6(sUD} z@jB|OYn^NP{o{Vw^2F;Z#NFPoGn=n?)LK<BYdPkY(zvxP>sIAC)!Nw|w*|h5yd$ ze?fvefCQ3+rx=(HgrBv?1cBG#IXJieY9nH4g z=7fIa|27<$rS(6SLqJ~Rv)Hu)WbE6YIMfnS&OfvhLj!)D*@<;^0qgn@`j-u5+o9{I zK$rf&L%3A`gN!&7=y#WfDD@#XY6924`)qHvn|-A$fWz}CYrjgCUe@^T3y%ym&s>TV z^8@Q-^sWV7{?6T+lWlH1Z>fl{#DYLTID0cRLETPk3*7NJHFncp+e=A;e997ObLS_1 zsWazGkHFXGE#K$5?RLE-w&4sxg5yeh*Y;}_=3rm%&C-LH7mE2)X?%QX5LjRRNggp^ zM6!7lugo2*DBp|A_TseNu0A!cRK7RfP8XlN98VXy(z^O`1$|Ek)ecsa?2^u`Ivhy0 zi20irh=-mw{Y<-h=_S{-Q0n*WouBWjjhGi$_2yLIVp)m!Mtbgd0O9%YPFKgAS>w&y zx|@sAUw7>Ixi4o@b|ZHL@*C=|5B9E)Zpz&RG8BwGp89DJw70a2>do5nzHu|`Bt4Yi zZT(Cw^+@Jq6+0N1o_#Mf;Xa)!KN0!s_kkRU-Trt6OiSbjFOdB2@i_-*Xnnzs_#*oI zK@9SQfaK>#_e}AfaBz1oykIGUhU_;on^b90uDY<-jdp72N@r6^;4b??4NkCBZOv*tQ8Mif0B9`vHc{;i5LBKL1Lh z@A3FIE31Q!o6q9SJiKGBW{y9tCw<-^F^YX%Xg6=K5{I@J%?vXOe2RTF!10$Ti3P`D zm#5*pl$zSu+9$=_ChGYezZEx^?QQwe{#k~XKNqG0bFwO&X38}*q32W0&a^(hd!uLH znN5aEt|B24*Ysh2>U8?Bi<5~UkcON)ybKs{KRCwgNi%s}+{_klW^_TAn@)pbmpA)0 zI^&R^iwCxM(qfh-xx%{#MLpG?LFTwLpHA>^qD9^h{JOauC74|QvPtjgx@$TR#>=@U zw7!%Qk~*inJ5$ zbl|g1ppb0}k-NkBL1%2&_X{~)om5|6&pi~%`2`7zk*@D=&G1hqOV;Vet`6SmVRLgC z9Le0+Rd36#QZuriUj`ipFo&7)D+46 z=j&_~lgTdbij^U^`tV=uh{=6^g9)uGHRobUU6w$3OCSNM_jNu3l_JiKO!*ZC>sHmc zEg~Spy#ax#H$z~1W2b(3Wkl$gr#zA@>FH7{LE4qk1 zbwTLlymCCwzyC7qJDxOtr7@4{?|4qH$-b__e7%&S19mrupZd9|4c)9N`kfh1NHJr0s`k1}=^kcF9y|TiM?rhJWv5W^l~#Ml&1o4# zmJ}09yW+ad8pf>~zd2&q;pk@-+gYR1uYkGu>jweshj2g&<$UsdQ?KHYL+(VqaOiOp z*L#Z4ycryroj`3ogMOL@qlNRD8fg!}jLWnHY0R6RA0LKti2G_Zqyu;F7T3E)Q*&t&ZRYz)9fgi7X%)|8rRW5E0rkXn^&Mtl`A?0S%IN3fnF2;=<4+A5 z!|jIk!4I*Qd*IPGn6gP~*;a)!$r5nx*5Vd^#Y{{w(&`L;(Hq!LyPX-28LB#xN_(&i zfvPUZT))^zm|{)xx2l$^`SB`p&fkbBD~~RE4x?GNM+Q?-tu}Cmv+*!dzEPi!oE$v5 zkJ{DF%1g~_(6MY%n2=}VnKjf)RTOje9a($h05zweyw!;0`x2kXLxoo+Y#Xn1(>Joa zi~AL>I8ybu4Lb}eomV_;&y^1qH29Pk(c&`Ni_&dgzwhjcLsx(H+{~f^jn#tUYhG-) zMt+2x0b`_Wo&v*8_LosMGTR9Hsq#KZZNtY5<+`ygl}lps=EAwaHjhBKiAHI2`q?vD z8I82E2H!WdDn&x@2CFJhA_KGc=kwU<80z+Qq@q$;y&fcls4HA-4|iCIKi4vDrkXcq zrxHYNl_*Y2nN{Yq5L==!Mv?(U$3-SJLVk*W*cO8?h4Iojjxz}-Y!dCOX|Lw%ltRMF z^+72*V+kWw#et%uEFmh2U0Y_fmj#uNaB-hYP$bMoEk0(MyL^Q!_7EJbvN2XEK=~RR z`HC}TzV``DY)om$o9$4xNzNlDnagiqASnZzW5`9ZD-cKsMJR3Nyqf|D2o?rBMO8^@ zwOAZbFlQ5U!XWu0e#!$_M%Biz7>+C@Q*_Nz;5kf{?_mU`ekR~$+Hrn-`ypxZBI8c! z1cvWVsQD)?`i^Fljf>IMEmvY!I7HL%R{jSb5mw5xGaF4Vh_DvdY9@T zjwaRKYoHMBX!TY#9x2bcKeJ~fnJ_qM16HAZBvq^es^3;N{#_9I#aIcq>^XkfgxmU? z;1}RgS0oUg7+|mO67OA0y>%t+ScUDLNpIrYOsksOdi3F@8Q=TG#d$?l5h4zSGgCm= zjQz?+N<7nFBlPEh9nNcU#pCKGL@+g={(JB#ko;~{0v_*goht%7b>7Mv1l$O*tl;^; z5=O69RQqIl_rab*^F?3(HT?iuj%ESa;~;449^y7lEKe+L zvk&%S^^=a0z6Jr(q#H_FnCsmb%2jt_=bf}C-Ym^L0SnaHwS^9>c$pzLbbH)vTFTI9prf7Z?2=G_x`(yD`^YZkHI|i62=h}^JI-v@aox=7$3wbGN=$uy=rbEI zEsVk07dqPS;Tq)WJgQQriX(XFj;FJHqIK{51UnkY{Rm|!- zI~)Nm-yQ9o-1jTymJ1gh&GObhD8d%HH76>?k5FO@?r-6ME~dT0A)Ar3Dk}BsAt)-Q zNDWmVvI>4Y-I_!|=0&Ud{Ym_7SVUC?H5>g>j8^TJvZ2b?I*B9E>sCiinBj0(a!$ZjxJ z>*{xP&uw_uk?ZRH$=-mVjonVAIh)e#TxO+Ulw5AVJ;_`P9UD~^iH|bk&QD9pS8A3w zK!I)d*JjhtqI7TQw{TqY&gjt|3bLSsFqHbn1?w`XNa{)k(rEWtY)nxjSFxCQ=CT|? z7e?2i-D&TZ!6g`7$ks$d6a<^Xt>f|c5yAJ+Y10$m^I(RAamY!W7dAa&*t9+fb)_Pv z-8a3NZocL#XpThM+V@DU-(5|p{Dv_W3z?#P<4MM9hsn6}(^0bX?I^=vNFwh@3NZrwXHcH=iWDKs<*xjpfG-6A{gXO6z?4LA4)os z5;zUOrTniXC&W7wUNtqm8-grs13xT!SFFP@+-{}?uJhqa-b)~76ZENc1J<%BFZB&PMAF%PW<5St~TkEAQcR@kR#UEpm>8C@$1>AWvn9TW!Q77d4A-r+DN3awI z;&TiE!7muIX}E^YcpsBdPj|20DSX)PuaTy-jv#|-fZ)y%QOE8}N) zU>`k#4kCRvF9K~B=rHN>o?qiRab;j5ty#7sIPZ4bwknd0tOPO5Ijwy@7xtTwn%53^_Y_B-J#l;1*QW@5 zTv1>J4TAJYa6RrAdJ+eeX*x?iUG7?Bq{!TCuIM`pNXsT&U@wA+$qEZi8dW_Eb z>sG^DVPWs2C#tERb!Jp{;?J0Vty!Yg;n5rIC}^qdwptT9v%Y$vJhmvT#oa_}LCa;l z+@@nI$FDppOs7BO*oX&;k2*;2Vzo4L0z zS|R&Gn$Xek54mML#vFsJQ3%Kn<@K5{>h6a;7pR#!a~-K> zI!|>*je0wYyG@>8vhL1@SJN$_wlR9Hz2Fk#pJzcH2KL z$qfBKks* zWBOD+nUj?9YSkK$M2G`)v_E0}#K)6Ddvh%*k&x+$A`v>cgl@jD%*C0uTWc|bPhJRg zw3#8Fw@am8JRYB}GKJS2f&KFAEf}2^l?WN6Cl57b=jHjCYq^9d>k9xE9V{*kC45*$ zXIu~N+hCcs^M-a|1FVFhK!3L%z_4_PNCuhyEvz0Qh5JLuZ}Hv*{z5x4fqol;5PygQ z4$Hsfa998-kU^qw5{Ia_?JP2VR3d;B&=WWaH0~JR_Jk>ep17?gBoTvr|4>H#VY~mNkgvJ^2-8&eq7ajNVv@YR){m87D2P)*v*jo=C?AA;L|gqYNEAOhCG{ z<``^#;zDZ=8u03hvk}C26tqcFjG+5|hWm@Y7){|y{{Vb9C`AgYT2z!G1C37I zEFV_Acg0=rLI=}{LLqg@V+<4eQle95QL%uX=&WHUAGDm`K@GyRCeZu_2_s+s@Qnx4 zZld8WSXPk+Z!fUfyRj`d{xClct(N2!)2vnyv9#UA5!sa%iNENbz8ietxP;ipEtH^l z*GI;mY33tpVp|AQFmttW@qd$sY~;@HdX70&>(b2&XmOt?@oaJv!xq3y@5Td+3z?oI zLMn0)nC9C)H_H?bh&lq{zYI<#$|o64{NNrfOMPeYkq)HG0aA&h0RvOm*;D3C39#+J zN9BHG>+zWW`vt0M4mBidKRyrjv)MQmm-Lu3FsOc@VqYzOJAkZ>h4x0kvm-><3Hj-7 zBm;3P^jan=x7;Fhe+|N+2S@{NA#ebt3sBDs%5MA;St?7^e+??t9Wjr^a4%;0a$gRa zug)a=oAS)TyTWI`ROM2#V@?uYtN`MdnFN%A;=xUu)IC3o=;rG_Os6y|OdceA;!Lm& zPu!*O$044U6~E{&S{oWwY=b;t<0mIML5l-u<7;e|LtWJ4GI;~4=aQw{w1j)vojNpN z-TMb}Q^i?G4Vkibua;!Lp%p%T;AFV>yzG;9lu_j(+n}Zmj$xs!&5Gcyp`vx zAMtC3Y(fvQdiwgvvt29F=;RxukX)hscQv1<4wo9VMT)+LkOLV=A~gDL(rM~_7+nm< zuX4u&Z^QLL6$aXRMWfM}%2J+PdpA!~Lng5ligd#S#%Z%(TSajL)dbQMDTBfkx@N|U z*+J)C+BBOA%pnOdR(WB4_aWZZph+W;`#iJDcd!#5r&_MnrvuP?<>Djl!ioNvLH{hs z!)3>Q@uVN23;B}WbRm=J752m_u#WQfx@Z}JHi(pexzVJ=fpvvqx~lOq0xe;D@yC@P z=w`$&o4(=4So2dJpBdk=N?|rG1D`d2xR>+VGm1#aKg##}$?Z2e{9?YR-Gm{s$|e$S zEAsIW_4KNqtv7=LmFSj76RV|L%WEDW)tI~GUbJTjGM6GnnaqCWC;7-sGjEQLFATkl zaMY2ZXW69>(|Bd3xUhV}X!N&(e|Jonhp5XmB*SwD(tUV#LwZ)%WbW*-x1w1UA)2*; z_aKjWkmTb8`|Iy)4uwMhWD}JqN8_deF-WucgP*<4l~Ht)s)t~bmmf@)Qy*v&YWo8T z?N2;N*<_6oOCla!V2oo=2>I?-v)yLGJUua}9mbH*)iGE%|MlSe+4}AK|7I^jJp0Ya z+_^p2wz>TQJhwF0LKN~aU}L&KKG&UzyhkJWC#B7YVYsjGSKS{XCC45YeF9F*&u{?;gW#Ez`MEx8;>x=#8?!q`I4tPkrUu9x z@{75e-mIzq`DGNqk!17C#qVxNAG~)@Xi9-N+=|qPw^t>P_Y*d7nyXflo*LhO7(G!V<5O5=_W!-YGW#sD z(o0)i9)yT44*`#GP<~S5yiyM}<670I`Xs3aD=l;ngD8$PUV z-_y#DToWq&!{%SfD@3p&(zD$v>RH;iZ0&^70TJB!?*@ou0wO*UgG+&uUEl zAryF!=Jh1iVzQMo)CCPNvJRfm&r3E>b<)$AU}uQ*PJN8O!o@4A43Qw83k%ttdOO`i zzkToGkf2;<)R$b1O~=ZGivr)Coq)?WvF^D8E&=`?4^FgH5uU9bn;!oGwtsAWw>i%> zJrh<*?;*FyBvFp9;7clG2yCBIi|OK9R&Zk5uF z;<<;*Li*yFCR+iRy5$LLe$qW<=qJv6lc^tCASL=o?UD(}G!|9(2k0;L8ce;q*y6A= zUM9zH-@AH;#n4~(CfGX2dN2W1J);|@cs})F^0{!*0N8=$@spYaiGv%27l*L%);Zcz zO$U3E)B)U7dh_QlU(=HVigeGf)@vo73dLdZAm{UcC~!B9!AYe(v`Fd|&0EGXO3kQp z$67?`R^2#!HL>F$90eI!lyV0Dt5QdjJHveT_C-^~auLORdm*0{ncjhq)HCp^sQcBS zIMhwJ;_L&4`J2It5Z+2zV zkcIXE3XYS*gCCYKPqc-Q7jjzS+hhyV0ta1t=z%5tyhf?CYk~`E{t^**I5;eTH)w@? zw4p}t@EeO2p^FbfM$!97HByxCC)jnejoKkMJT_cj1M9wC$&qd-gwzy)eHnElP{&`K zAoU)9{47aO0VNih@>AZhS{%3rxc@>8 zWqXAvh$ntIEQskuqt9+U2I;ax{OF=|+^s6aP_r;>v#LOhwru}XU+7DAiF487yn8BA zQr2ZjN=Zs+;eu2rwfGaS53&&yU$G~KBExdYN~k0GwcqX`&3&+^(5`X7VpZ#IZ4yMw zH##6dJ@M_vGpX-Tw>9W7sZk~l0$x%Bz5s0)MGrb1U4@Nkt)}_=(#54~DOs?SsgyA$ zefC99#LVC-_&DT9?m+G=a!$P7#5wukCwdOU@x%;8YxFCb^$2Htwg-ePkGqFuRiM^( zb4v0XlD|t-Ms4Nq8*5Mo6>H9-gmW0s%u8kG*P|0X2UlD3MjjY++pJ<#Xt}J%PQDp- zTUwmo+ab{9l-Mdsd3DU&E%jbE*A3Tekm)NnS~%N>qh31!RdT-XT(r%(CniubL$X0~ z8nUl$5_-{+4eqW$XiUX)77ozll9B1duLqDi>7tQ9&gm3YV+ZeanRm~boW_#gMY}rg zPN+K<%6cT)M{vLu4Pgu)>Qpi-VcWpQ3_5f0EQ|h1h#< z8yxi0cUBl85XU0fn-zFzQBiMWCa*M*{t41S`&;wTiX$^;pqKta5T-u_0o3Aw1iv74 z$Pb)QIc~;bf<2amV)7C!!aAOySf)J8A%jy$mVk$yPA*vWm+0tAmI2S6r3gg)p3N#D z9Gt&`o%<=61PIN(#2NM_Iu2=n`x^Jrwb`^X5+L5~b>X~9K`a1T?x3&KW+cb>1O?p@ zBdNh*p7w{_1G^t&LPFZlg`jZbHIamJpcchOAfnWbOmA^ud9OquUevKaZ;D3926PQR zM1^PM!gMC9r>H~!DiER20si+qEpV%cdP)zzR&5u=CP|(-88c~tqt)m62-oc?>r37C zn#J$UzJl(*buQrAfeO5T&y`Znw_k{!vwp@#Y9MUZA-Xx;wgkaJt|or9#>SxE*U@;{lZU~)EZZ%$)OjR;Ej^K(A@$g_P4*} za>)=o?5Ydk7b<3d>!%j=ubq>c4v*8)ogd#2fqEFCxSV^woCW(J6Wt^K7cjkgBftwtK!k|>rRuStn zB}zh;C*`g_TKHdV(#GDFmg?4Gd3%x9ejLGl%qF!qyKR;2IsD)?OS}jry0R6SxzXz> zbXA;@{Ku$QA<5UpTQoQ| zdZZT3veu&~AweGp{vNq($JHnUYe9_9H6Te#M8xq*X4(=>9t^ zuMUfRr}gpd*asS32|xd!pv?pv28N?EoJ_yQ0PkJyc}Zx3;wkgHQUpBIi3P6z?f@jI21iUrmKP^S)E09GO;WLkEq*99c+P?zynH5BJ!ej9Q89C&p0+ zK2caUA%F^((0JWThibADCl$3mq?VkccOS`G68~i|ZQIx@u9mWi0^bMt+@^Zs#3~A0Gb@UW#`$)y zK@#=7*-YETRN{S8#m=3Y1=@4;9S;SWf*5X;=sMGW-*;*AaCy;*ga;K@FkUuwM$??E zdG3cc;0prlcBVGiAQhLwv~^|u`v;4%fr6CvY=QzDC}iS1j?D3K zH@-f#uY`b+H=bu*A29VgAYERhP>JrD;|zc#z`ApBtD8d5_&SsIfO;^APbFg^gY=WL zrCJ6Cc4<7l&bl1A97vW2|QE;`1S7i}Bh#|xG z#T7%e#Q-8mE?GZ{7kqsF-8ARJ24pOAm?%VyqYjN>(uCZrw$M-$T2kw*Z4g9UmfCBn zD!~g%YbUm6Ca^zU6V~Z!vTcIoKsOj+;8>4xt|BI|=$o~CPv?0=CpQ31_i1qKmB#~m z19l&0Nv|x?X1$Gx>$54uoF{dM@M)GP|QC4exWlZqx!(p(hc&gXx;_~qO+q41| z-obniR7I6F_bIJ$hlSUYhpK${9}zvW`}KuPf&4sEm6W?=#s$CCrtiwI=A7)+_1F2| z@hXLc_|qlvthP#N*j3q{hLJvZ3U+^YKtI`9etaL^bl%GUMe;o88>`Q@3Dt`E0d z)yJSv-+E>6PQ;N%n==NRPXqq7`mx|}LLG?nnZ8=zP64l#oR#X5k~f;Ca=B&h53b-G zS9?O|+Au*n7n^lis~Be}EXX80>47ty=iwfwUKRc5uPvl$2kIj!)*Xl)wZV-yhXO&` z<;k-mL{9&ly5ed}zv4t|rrep24v)uRn7!ZMjzuhw@F7`@TjKARRWW!2FL6-F+U(nG z!6E2vGefhwbLW+q=NKP1<`GqVR;BCsXm2PDAcYI7hwU_LbfvLN8wrZ6;| z5}uCBU;iA#xAJ{pYkP4HEBR0$K{w!t`Wcwej0=cCoP*ep+1J2|%E_CPU(b-e;n2WI z{C6l%B&3Gz&8EipFN4zfibty=&B_5|-i2pAJ`Mp8G_a{87#V$}2ECSLw9=8&9lgZU zBbk`+bJLKs^XXzQh(m^$5M>Z`v!GmHI5lGhaSA!kai+ zlO>|q{NB{RLrlX0d!PW-zrg@>^SiU*X5AbFh@>TcRn9eCRF^n99PoytehS{53n)eA zn|su5x!Yi>$$eiKheh-OU|o`+^WjuJCmmk15o4O<;p8TFv*%eN+GFer1*@8feF+6j z<6Snt`GUz%Ka-<9q?L~nx$^L9_*S6qqt|m^uChTPF~NcSn!-tG(5`24QM*{#@|Pu{ z$Y0_vXy@}QOQM3)z5C!l?%>fykwFDMGs8V|z&+p0TKC6gV({bl0O_M3=Rwb2Apswg zZ()Mxf15Q#(?26p17D*9z{sIkpK)$|Pm0*~6z~-)5g?c|G>{BF;EITL74Xe1gG@x~ z+-z-N|I&Q9rKX3D%le56T?yV3m9`P3l)kRO-Vw~!wOrYFt`|ikT`oM_&3H+=ij47HMPXc&MZa_$=zl|=+F74dsTuc4JlotW9Z0F&N;p5^$KRSf5ltwCE9T`monSpAPGb7O96g4 zAH}+Zv^s=UvzI{kQn@MH*OntdcU(`ByT(bEQ21}ab!04Z&lh_>Rn^z2$gnsoLgmph z+xF3x09bG6>UFDyp8XhP&#E4YL-i-q52k|0TP`9~$J{8BKXZ%9e}(X$4*XbfDK{)N z<*?TvCU(5v-lulQkGz=x#m`wZex2kkO&~ZC!TS^NcH5ADpgZ~LvaT{BwVQB^qwl$j z)Arj{85P2$;<+pZkdfayLABaP`MOlXsD~ilDuvJq% z=f_!x)6I<$a|{O-W9y7)mh-p19)XM&W#K`L9~iwmiDW`Q2ic2FZ~b_S8n}d2-EDI7 zmcbDTOEG(z`h#QGrpQn>2Kn2EXxZN=j)Z3uJWMQdsw#L7rKhC^l+-6r8nElI3s06x zbu~mmO3*y{3_~zbOi3X_r0NNMKkyR%3%uQi1Z=vWB@$%GrJM|Wu9AVbL(zbeMdCr| zFx$MBh(LuVcpD#&1pzr69lu~k-r*il*aP0Cr`AV6C&H&!uFqJ67iYrt((cYr`t){; z50yYx2|eu3IC=~J=^0y8++F-1Sr8-==+<>*np<4&$Fd*Ubn@M#@x1=MK5jP{!8It8+!dU44>at zRWL%gZX~#VZ)BmWg@%Mt0rlTxUnEri{_*2H+#$je@|<4ZYW#4Q+7?ENF)9@npAI+V zAlv-9P(+iidRYes2FB)!!g=>xL$J~}i!Ty#bg1K@V5Z!mP{(gs*ZJ{76I!!@fE^1W zWT6+9Ck~3=2d=jV_1ROO_l^xTII>{Og}xK*MTWX>e#DK`9aFKvFe+T4eW}f0kvVP~a^Lwknce_y>WvEo)H+dW zT&YMet(<4Bd2sxh!VREej6e4O2Z6WaWuQPA7JB78SJAe+ie5nbGX$uI z0>ElJIP|Uv*B!*<;bAd}iEjoK`hOlGdlVW>n}L7l0vy-IC~?(j*wsqhMq35m-j7kr zNG*KsH*`f@qep(kdtgp$4Uk!hW&xyFwT+47HexliERXKHJ8VWn3dM3&p7((MF~?Q~kasRM>^ka#huz;9z1b93v@6<~kU?6i(AbafQ>5<< zw2ahHb`<_@k#Nt$K+$FU?7TL=+pTESi=yo^NLwDKEEcZ}@A3={0m_qbuD5lDv}MPS zc0xw0pEkPUeWjg-ZsF%zw`JkQj&qZ) z0$$nStH;DQ^)~tL4TO)Yiy+TWTIpBONDw%?tv4sIEn#Vp3gpdflYyG-SJY-IQGZcr z^Xaj*8mPx*)M^%o55YRlep9OY*c5r1Hm9N{OlxS^`aG60i_PUEFNQ?9^L}+6Fu}HI zZ5p%de^v|w5Qu=F--E8MCnB|cQ1*0MRVyL!nc>kigJ^*gmr|u+!K?nOfpWIo@{s`E z>OQ`Li=C>chq^VWJ4acIoUu|icP#lkItiML-YHUfS7vExtq8Hghc(^x( z#+;G;D?7l)SbLWScz0^JKJ+3h(fD!5xdmTrXGCG@QB#2uOD6q@-T6^9F6=4-6#I6Z zusoF5EB~Vmb#E(4LHO-oi3+u0ni)Rp@!pUydWu{4c)SX$p`NN(2r&fg%e(L_<`w#x z=fq8V;&gGMc42wERH9-EJFKGiiQF^CY1M&qryRP57nzo7;rxFTu!@ZyM~Tp1WX1K$ zd-34kzffE+_jR(IA&-vtJ{tIx$0jN<;~?7kcc`hRJ|`mElx8*8KL%-OVy%bkH=ZaE zS3U+duavb9^ieoiVYbT z1z3x!gpaqpMxl%C=M~@TDIoe=hCdvFJ#bh^u09ej=H$AlaVz*&J4Tb)mGaYJz&Rdg{L(>Ua@=Q;f8Ev|~K^M~4vHl?b1M?uh7MBa$p!A>-h!pxJU zLYY>$=}#=&0Y?LVhs$5p{j=Fs51;=SGMdxLxcdu`J+q@JGn9ZA0r_%ocOBM_ta`wzU@=WlTSpt#;( z)Y?=OihEIh$K_^5LN?LRjV&S5(|5+S(Wv8ncS&`Fw_vK}aS?OFjh(nadcl_=^p4J~ z&?zJgdPKl9(c=t=cJ2n$aHqRo<=tt64FDCJnRvO-d3Ml958&prqM!g|Upz(xgxktr zKnbA#Y{4rs{qtK13N-(FJuaX;n2$d)P#i4$S`&TTl`#|EM(z1JrU)e+zoG(0@CnVG z>GQ+OiUE5I!2i4D@qeNW@Al-mzQB7xj`?Wp#j-IaC>AMvDN@;4ROYfF{Cre-Y8b0M zw+Jv-nDaC`01~FT~u}3fGuN)OK^3i|3lJ@ic(?R0w7AP|~a1L58FVb7EezW3>T`9*>GMa$NAf%iD5y|pq}VJN#7J}N1)no}}U)Ut+x*vebV zu#=POrh$edHoVLd>K#>@`OT;jDFvurXreZ4-sO3UR@Ne^;;_-!a-~bWK736DEpFK~ z$%SzHE;F^jnWC(B{pZuO{)l}H&6M~DT=8y#{~v8%9ah!z^}ElZ8zhyIMp}@P5|EOV z?h;8U={^P`jihuaAl+R@B&56R0MgwJccb5UfA4#r`^UY{^?C5@y=G?3nl)>E_I~!v ztaY#MWNgS{eWOoFVb+MkYjtk8!TC7xC1fM46GPiPvA65gU+>FPa`4dA6FC%btIf$@ zODzW%?e?Kr2~?G0;D;o4tCP(=FI8(woqdY7cJ zB1)l}(mPC4TAnfF;CYNEw&oV-Sy$#q4F$`Mif?62rL`-_{hgIJs+mH(GqToe2Fsv?!X3A zh<0-Od42>x0b_Q zUx~wS50e<)*`2_s89w~-*ypnCo5J-$%Tmsgd_TBL!+*y5I-8(o=I|ZC?k|hJr;3SP zo$D=2MKYSC4trZ)^33r=(OIbwB^ZUtK`SlAO`1%p_SRLn93QctP!{faJZYMv-%{9W93)hDw zJKZ=P!en)BVvcHObv-t+M<*$vVY7xw31mjTI97a)Wxsg8?q$IGqA^booK`S?tCINh ziBrrz-;!T_A<8D@U4vNzfg!JWtp~ih^~sA^{6R%0EDxN2-AHm!{^F~;-Omo>hw0vN zKv6#x^Pp;SFeK2vJyesIfug_GWXgN>{ikj+3MgJ$o4m0EcoOIT!^s>vV-P_8J;(z` zC3Qd;+tAIU!HyQ=N)=vwBokM}9KW|M4f=Z6rE2U%L+CBVJsdo%4rt57mUYhue_!R0 z`ye?#`%%`(?N!wv%D&2zN@Kjg0NH=@Y|#&T0zcPx1Co zunmsVat~+e)_x=edBKkG9gK=TmPU5i861y}`A|&ftiSp+Pj~*FN~kzMKzs|+eIs$8 z-X&DKER4@oSs~DU%(A{Jk=pB7S$Eol*PPprpt0Id@97ng=&gS^w4ah3ZQS~3Tow^8 z4sUu5Mp`czZ8WE}ZAtljXOD5Vm2GI$ngtOh4X-SORdqdaR!Nw05%HFVQvUd88^yKQ zZQHEVZX{-dtZ>XQtFY!nT5Ma9lX4aL${NeLyt8-r4csBToKb51ZTnZIpb0+Li4F6a zkj`43NGH`!m!9lr4hA#aU})HLv2yNO#c7v@0*QJ)^ZdK~c~dXB3t8NBdwW`r=JQ2u z88>RTLuT~{ezaXl?@i}Nxe;(-1U(?wxkp@lS;I44YY@Vt(;l+ITM2ZwF!!deuzD}J{NZ6*We+XF!anGw|jR`HIiDX@cOH7O`x=TQq;U&6w0US?TLn64$y z%4gBRut;x^(~xOl5O|Pg5ht(Djc4s}aFu+YiVFjirdVL%$_^#JUq=zcy-h344`o8V z;w6J_tOU81R?pop6JzM(tT`Gvuo!`B|JUH4OrNzRwu&oa2`rhZPku^rway)Uj%B+r z+b~y8sI`QWf0A2YgIu7g3HM+k?=$3I zaMdZxS!tz9tFKVd_L#Lj-bon3stmu&B%)Q%^Gc|QmkY`$PGFz)9%jOWu00l68Ko7Y z317?fg4d|)+HmJ3nzQ&ujob4OIy9_g9dRyEH)B?xIj^&J{HjQ=fEn0pZ!LLyau|{s z)L8v3{P$(494#@8k;|UMCDq7-7Tx31*k-Guu;NUIAPG1AOJ#??2xOrkh14o8{fuM_#ormG}P>h_4LSQR_ z{qL|9e>3K=#K6oA}P5ypm#h zTp;%TEg13QFO7`p!+Qw)yy9gKa-p-8Mcuto0|=8(bT}nGO$2M?K{$T-Qvn4e47{Ch zEc4Sln@~sE+51iX6Z6Q+cSmT9?+~fc&a&Bss^ph@l66b8##@=HXPCswfusUR47@f10G5wT#opNeK2}<*jVWDSX^!-;} zir61GB^-L2D*3#%(IW*7CL3wkyshUwvDg944XmZEe)$jB248ILU1;mZ`TU-oqg6gVFHCcz<;+Y)fMAODqc_ticz|keEZwju^z$#u^t} zk1=cyf!*95FUrbwG4W2GNE!dCB&!0w_al|)c|B~KSrAo@Xb5?zC4Oxj55Zqe*wmGL zIMlG{_RjB_o}QlPrpU^%#~o!yy2J_+roM2L!P(lUBL0JkmrjJKKefHp0Ky_dltgYl z-weA60x)BVyWc8cOhzOj(ePZC)y|C6;mzmK(-?mX2lE`0Q9r=e9>Z1RE# z!YgjThoVC*xy_JBu!9PIuL|!3<_63b#30+ZQ`Id5{=#KY%hphdYg1d_i^gf*!8D$W z4iL|=7x&AsYl8(AbiJw(a|XDuF=^6gWeeZ?H#%QDqJxiUu1zn`y`2?B+SOmPD2l$Q zSMzq2qWY-$jk?=8*-S=bYLE)9?(~ic>}d;C_ZYB#rFI-@!|4ZnR<47h&7BD%mZJH$ zJ8g9v?NrZJU?%~%zz1;}Fk3^1!4s?(8R)H?Tt0(cMr^-=dnVSxef0XTvr&K_X)5;b zk%1dpAhqR}6_9FLsxg+uSd0wDt>6efWtnKY9P1+V2l4A7meUB`CGskAnW+Al~wHN(SDaddGh+(Nh+E# zs(6#a-+BOry`2FV?3ao?Kn4W^A*^Eu`_vmNPTnxs@J)*OT;Us5N- z%*>hrjI&=HmX(n0U+fhY<_(<_z;7-!`o+QvtSrd>Vc4hl0-VGvW<8SZj9d1h=#FjCISnv9|fpX4sn<=87>LMPQt*Y^lF9?A1vy z#Czk;QS^+#=l7P6Ch-1qiaI<^W3NG0Ve9&?qc%JZ-MG#E``C#KJozgOmkkJ%GMhDH? zpy7JmUc64SW4N{E+tXfIrI8+A1Q+f*C_dUf)s(#WSPEiI=Vr^x6Nolft z&YM=76T(8IGs*UM14$4U8_pf=*7#+BD)c~E7_vLvMM_>Ffp&QZj`0|A!YrhSp_#c2 z2_DLF(6!EsrWsbh<&DzH^Z}c~^gm*gnfag3z2L3tvYS3CwHHHi(4v%f zO|5A7wRelK&o?+sHJeK9a9DA-Ax$!kI`L?kjVL}{V;#n-*4guEb@Lmlb8lT&h!5V( z^P7AJeSbalS5W5;gnD>K%;@Sv!RWnC$w){wze*O4P%eg@{rKzqy2)IDumBse}8G5s$n{(bXKLRJ>{2o*+i%kd)Pzag%; zU(zjB{)R{N|Avl#asN+rM1{>ae&d&b!f4QcKNG-#NoF(HtcaC8jsNr90|A*D&$!}b z;;!=|={^_$<#EJkPO(Sz5ZqfJmXH9;{gC0(_9vcF{7j$6cxI6u>ikR{#t{p1{ThmL z-n1%|Yo~Tqjj=cV6l)J)40XE}5t+w3!$@6O>y0j$8LdUvC@UK8l>O>UtknEmG)gS9 zAK@D5MY-2IYS0?qK1)35=z;3;ul&Gt&X!@(s|5khs(cq$F>0m7?38+nvKjJtW(}`b z#{Kn#CB2usc6dAM7L zv!I2c*SXq9PcgS)zVhenie^$jl=OdQG!`2M$9RdMv2JxdiJF~x$n#z zBk^iwVAxU5F4ZP}thArzd-Nt#uf1FmVm$SVDR3=uCZs+iuM#Ksa}xoK)QQ$F5@}?< zP`m$1nv0~pA>DmK@Taq6WXC%~$~8t3i+kR2r{IIc`CB8w?x!bTDZ1KqQgT!tNgg(H z`==1E-eVVe>6+S8CD6OCAH-}HVJ^P_<7)}f;2{Y(eN}(^Ts@MO1svo6t(AnV$a^>9ju+sDUaV0`^q7$+~W;0`Q1Ot>V)1^!~A{Tc5!`w}7EXL;$2Kd02~ z>)uhqVl*D1pV(yH)6k`>h>9E{A;&9yP+Mj`PdT6hOS2kffEe4Bk1j*uzG^7}0l4mh zkvR=C+&3o|KsB8bODRREsk*0K=%Ig7wPKL%0~`A|o&j~-^5`)mLkWBxn-R7WS<24E z-t=>f9cfBO@0PQ6lh_qcwn?9e(j3Y3y69}%dxC@0#9x+o1-f{agf@RMNuQaDr@+?LfTmpAJ~E zy&JV=t_jfyKb!IFJhadKN^k3?Rag5s$4G065qF~cFt-8S7+Yo4{t*VmTj%goDw$$c6yX{z}2p%15_=(HUmm7z?FdeP_ke8JP z>=9>jCT!c1_)of~b1{2e`Uw)q5}yRa;XB*7dl%V6(KmDFq)da2+o=&$j;#G)sm z1#pT2xxucSdI4MXO#g0!nOVZjc|zG2{txJXIC5rFuuEu8Ta?X@P4G%zG9o8cn{z_R zkciI76(smLCH*4;fX%auWpkyjU;E-@5dD6EJ4nyajTo3O5>>?nmR)W;u%>;0rQH@OmCBXLqOjyMKvNZSn6^i}&;ou9o?Z#yHrqRe4%ycz@ zATPm0h*Ne+;8&?nHJ&C>K_+wG-=?*k2YU*pLdp`mu!Pn>LdZO8{>wQ0DB$nFHw_z7 zh<=ATTwcal7HEDy*nuOJ0=2n3(iJ*96N3MfrjIwC0k(YD9e(U=|o-9GIW$KHu_id+xp-J z>e!C28=n|YA8s=+aVov2*==4b1X@DMi_hn5vipVG4^ey}by^d+5FkpHg`IvZ^cR+RrBAM7Inb-;oA*Z!k%ZTwg zdig?Cfb+C}q*^GR%oiIGN=bhLqXT3rBofgL3=VT!UL#FXV<^RG)+%`u@%W@bwbflO zskne~XxriCYG)gMGT&x;h);Z$$9B z-=yvGhq7=z>>ns-nYUW~Nu38Fj5a{tW6D#SD&FM2cd3 zIv($Xwv*7)Q7Od?tt~0w1x&*4iyS&4Tn_$rd#u5^3ZX2%qYMbgQ5+Lji=1%yVBUMX z=ef>ug=5JX*c9FEktI{Y33;&PoA==32eu6fY~zKWL((Mh=eF&I_)V-2^B6jq+zWJ8a zz*T-w3|c{3%3QMjPaOHX~D1WCu{uxhwlUTV;|xyVKa;r-jUKpWuCnBL^*z|4FLbLhwv6zAlSkhfs9zb4R_i0HPvM;K@U*V4e3 z`!E7Hna=>o07`~}a;p->=I`lEH{=q}RYVo>;bdw3^oRVoxVJn&-cR6QApv;u)%YKo ze=`BCUrs>akRN0M2?5$xgRiBd@Zo=iu)Kqddv!Ght@dt^vv^xA3DChD9Y6qHySWPw zx}JBauMo=`d(|Xb8SR}I69AT{#<`66oW6x%zJa}y^t&k!E0l_#48N+YaYJ(F_vD0O zdA=DO*zPgU*3e@*7c{I#-8GBo@sz1Bic$4Xe`WYOL(GS#&R%O~J9F!e>S7N|Uvx|} zipEtBap;IkUIlg82_wKZp78CE>j@3MgTv}h>d3$dGY{Jp1z>Y=0v+c(kKV4}PfXTi zK;U2&LG)K&Lhg_?vA`N)o1EN8`G8r9i54oN=vn{5fOq^W5{9Q&^5dy4v$-DJm`+an zlE<^iFgG;`Hc})aCIa# zhb*kDe9TNyyPO0ry!uAyK6Df6M2^_Ft8Uc4?>_#BP5v(Prk8TbXPt=qcyL;}Qz=OI z^u(f$uehTkv+z+aADdIP-~1~#MJCBH(Wp;_+0h*OhWT4Fc25Z7!i)zYpXSRWj$hN_ z4u{7M%)WVmC%NvTcO0eMWnk6jddZJx#hFd}=F1jw6aUS9jr5Xyg`V#^UiMIUrYKEJ z#>cpd$7z^$IL>8dOcT8SXlGpljH`r7lQOxX6pXm$fp7G0CRs`%V0{by*XEc}sO{xjFCLTL ze2Lfc3y2q+?e5M?6+7_zWnoy4{}L5uDvk?HOhwPEe8T#kZS1{za*(?pG2HJa>QfVs zP%zN)ng2O>Wi=r z(cpN;qpxzD*?ozmG_2b^7J5ha-mfR>e+2Rz&#Iv6&o8wvz5s)yqMYUhVX}COh-Ebg zwSRU0nX@zrhRgJb%eJ07u(nnePRh+`=jZ%R-k_-#epV@mhA*Qu5_aZ0^7V6qm%=@Y z1HNx^dk&rzPRCSH3pClTZBZZ-0S$Sd$mbU+iOos6%Xil6%nh&D&96^6Eex;9zJv!`O!P^i?J}@q`DU&%bT8QW{Oda&dja-5LX&j z0-)Ue3P?a{9zJXNj8crgJkhQ(>fq);;*ePvB-E&}%&&<1=IdHi#0sVJ5Q=)^j?1|Z zb6hBDF?tZ~i|p505|txOk$pKAzY>A#F)Z8Ua<-yKFAT(+1Ur0)yCv!y9bXLNo5$!- z<*cmrwKj8Y|7$+jlC?p0dK6psP#ghFCT_iF+4Qjm65{K%4Fs_Uh4*+~h3nFN!7$qJ zIU_Ef#n7bIFDLEX@^325RW7J{oYGW>k{?%08b^AL&>ncoDolpOqTw&tEIpZo;YwGe4FG9aZ*yeS5a@+@F(_rmAbC!?n2cADc?s?tUN zp~DJDr_TytQM7CoPx*>QfA|o%1?-?Y6nt&w{{qRU^V$xP;a;AV0SfuQXw?G_MMNSB zOqH7OI$+yQ6P`}NC)snq0m{ek6@J$b2NB$Z>0YY?A$4N+d2Tm)mQe+@5n}hNAUO^o zfXz$~&NYfE3(I!uEIGR2jYqvE$scVtS$DLudVL?Ol9Sxpwl3+V8=f{nrDKyWR`^>> z6#c}m_L6**bT(EvhsDIF>uw@qwQ5afs`CNI)G@=n6E$Fb*IK&Qa}4iAcu1rrZWEsP z+2fams$uhPEr`j$y2|Rk+P!qn%{Qibf&{*6p&rc+E|}+HoXX+>{_3>O?8#%>PF1d3 z&OM3!oE`7ABKT*WH-$JM`m!Su?Qv(SZe?iTh}}gnGq>8a#wtG7%_3wRXlDk&P>9C+<|?-5~qWlmA>Y!ejs=JH@|ttdM?yZU;=&PF3%HnGnD z2Vpu=uPkIDp~xS#n5+Aick&#%#tsB{B8?}Bbz-p=uY~p#&$@LRD4X%^X6H7aowHb0 zKzp6v>K{pq5tHm7an{u<@@Z)!RrlvxY;+LAypCFHMhu?@erz_{7e!33iy}Vj4QNG) ziMc^XW}D~2X5o}^?>cRaV39g=Xu>rkaiK+QM2{AoPA?kX~RwKa~f#wv1DC;3=T2q~4HG^wu|Xwhv+q#k}87;d?V1m?46Q z@xJM=V&^_yo7;IGXwOpN)?3o}xW1a*JtLLdXjAnAXD>rQLrYgt!FUbF&jIqtsDm?> z{FMUvaHf-;>%x*Ozu}pmQ*#rJVuHH{1L@De#VSKU;mPwmzJ6W+b)iP_^`K(!Xh36p zX`)e)ZJFBIFTXt@#10MVm?A=nq#4vw|?;DE&E{OkJH1cg~4`d>S0SaFvV`MP( z$N!eN7W^MI>253b%=3evGQjjD5GlD+MJM#X^^5eI_Al!vX@Zxi@Y@_ZP%u#|1^_n3 zV1AbIH*t{kKZsyz9wcK>>Sxf~Y#-2|$lb(c{96hCVsqO|(;oRBF0dy~r#Qb;_#hMc zs@(x@^ZKYnKmD8dmGeJ{VBR29_@jV-M>5afLvt(4Uu=Hs;lI(q7XJ^d7i*&941~)$ zI61erAb+ec)iPu8OFI=D#n}+B8`o-0t3Ew)HGNnk^5lT?Sb@~&6C|AfVYgMjs-!mIcts?S!_ zt+ev%{!51<14HF6q&R8vP3kvZKAqw9Ou47%CROCgiyqkzNydT+-KE*LJIvv68{*W_ zLt4KXbnLu7l{$Y(=+hPtnpjiLoZMCW3}j$Pli5zExlXk2^0xIivX5~{`S{wOK!dtj2%HU~Y9QQHrvs`w7di{3t@%!dc|ya+?w}`k zVY+`d*s7zqks#_W_**&F86pE;#+nOEkd38&`RWV2p_x=|*jFNA;4N1Cwqpp_Z7HnzUoQuZ`w#K3^*@OJgAE7`AR8b{@L#>%f*_b>=zn71KWO&{ z82^Gm3n=1^&|ipa#pKCqxd|bAGKUK{Lff}( z4oF1|?=_UP60F~r>#XNKZ(<11SFwUa50e2lfa?~bM6aIarCCjuWW?c*a?KubXKazx zl>XyxaNswl-MTUaaU6=zF0ZXavtM{B=CwjPq@M6$Z)_s;hatSR=8Tb27Ryfo|N)2I{A2ml7KjeK%GeBMy>b0m}eV30>y2D+P``c5_7MeEC7xauR zp+DB{0L5=zhjI+}vR|EKrt@>8!$$m~%PU_s?1XHCVSX%+! z-Fds8PRL{^D61$e(M^`Lc+7_UexiVIZT8!5wZYzR)GV>QS-{g?WPl3EYu1D`rpdt} zaSp#>i=w=5g3rXoC|&l*6FyKF6r37AybZr?tcBcN|K>@=&!u%NsgLWP){{O&cE9)q znrR&M9|5V6V3oA#>U-MPr3Gm1h7gc1&`EFX4BR&c0Olsx9zXx@9e%(QIJn^u{0AX5 z-8dOuBSlTRI6TOL&2)X#{^}yDsA`cn)5C^B;9|o^D6Yrwu+hwxk2pA5tSsqkntd5B zF-pxN#RKNGDOuJ2Dt&rzRSo=0ERaKW*$=QeM|$~otr06q;l!X^HuD4rK-PyIr^8Bg zt%?S;Sg(2pf|h-0t1Ms-;@kp-h!eJE;iui_yW)#t3 zvL4xnPu&U5xxq7VBt(a{$T_Rvgbh?(&UYWr%y5klv-pzfl%M^XDl)TNd*D9ej*%zY zxwc_qdHYa-{kz=t=bzNavzZEd8GN4IeCT8e-4_ zHhjA7-&z81pZzDZO8aCuAofWral4MW80Oi=r99wpN%$qG>_r)&a+fe4t zA{dL4wxc_?qDLHZH5jJ0;I3B%yAYXa^iE`9iDR|luy}tLDP#jYp}n2 z`}_O4(w>o9xAlgQKA3OD?Rn%xWAIyrBhcPnEi`cY85w!N-dT{qQEo-t;`j+}{x$^tYgR@nNy@5J+BonMZjCBQs(zKa-$cwhGw^kapar%)t(My{`wQqAv-uILB?Jll{mY*v|Du7Ee|-G5 zIQ1Ls3o{9LF&?&Bh(5k-{O$J*=5p~?5KOE}iWLHu6{9?FU#X)r1}}k4^jq)%e#iU= z_Z|Q4_Ad!OfG7jpZk7JKSVal4sGJ1e0zytq|7X$QxO-=!ceW-rE?!`ME~uWdkoS0U zE<1ij(c}U)ZxYhb@T&38%f0>#RT3R4Kgbj|zI+>7{bBAg*wpOvr~TNwU;Vg@ZWV@`@WZ%lr^DoD=Pd3Y0uPc zI}4`cDY*CTq$GCozS6xqC&4fJaGvlNoMn?oCile7+ecT?lgXI=<;}oFj#)lqjC$``=sbd=*+jhk>7MYPfpkX9E0}!VmtHm z!^L(35(lnYQ3#d=W2z%wY0ReO<`TWZpn$T~<-D%xo?;T#>6M?%i-3SY^SIGF;Vlo< z%H%J@NY8#~mMS^Mm0;bF&9Wg5e^iW5IZ41v95jWQ8-y#K2%kR?ISpB^AfHl#ul=N# zzqx}O;I{}A5PP(YDZAZ-W#fV(u&QgEeLUVi7uQaoT+PDxoVbVD6VCA=PPz!MFC|H~ zq2aTkQa!fCZ8XmcsT1GD5frglz+y$3F{WIihuTPF~#lCTCRu48hflRW=vWrFfC34N)4kOf(FT>%+Cy*A82jy0@-_#TZtqxmJ?-na^Ha zM3p$HDObl9VGq4)>d(S(U+x{IC+A^A*zElpX&Fj_9SykoUiiS*WIv3TOs~Y7Ytqe< zUg}ZYygU+=mkF!W;_*fmTNGv8Za>6o*=M=+76tq&ADba%HywC0Jdx3u#<)aWXXsSp z1@-P$fK5Wy6;t63k`iZj%18FT^R~4i!W<*K$(GsqjS+Tg!-IJ3uv`zwoYQ`L{tnV| z8wdVw=oTYl!p*U8m~ltQr|x)j-Qm33N!NoQ)rVhr*~iYyCDxDq$Cj7Z&F=WR+Zs<) zNJDdWAm}m7yIe8y@K-~Ot0#WBUde>Jr>{51a;ar9VNpXli9Bwq_R@u~Aw$nJhe+mH zR+3HaOJZ-*25Mn0KMJuv%lzUO-!X=g=%q#7oI7=WDyy-%Or`5>n+1UT$65=oiE?aGFUIL3w<6!jL>MUthHcP~ zZ`D;|6dp;n9`=VmXe+_zwBT6n?(Ehd#EMD;%XIm`rzlW*Mmp!->LnV<90iXP5} zK1vp(5Ib%-l{hyOpm62vZoJR8VIcCe$q_j~9Gw1QiTGp43SRR9p~E`k!@Y;}+p`ys zT*XOuV?xFog}XTf{b5DJW$}Ss@q!$PUpyR$8aS_cqP#w#kVoLQA^o}Dl{usj-GAj6 z5;cz2y?jS`?R&Z7#W7*AXs<6qSL9f_UJ_ji%$W0*%%#TXOmXd}nWtad=g|LafFvdR|RaoAzzoy@3 zw)DWR_`e_Dhug2# z=*D^8(3fbyE*J_yHwDdm^NSk=qI5SmHO79L)aa1MMl+uTXrx1O|3Ep_zsxqGRij=<;3w+O& z+u7(LQy3Cjl^=SP8f(AX+ju1UYbLQ}uHG$qO)aR52(7sFjm4hnnZH1b7E4c5Ut+7( zGpCxWZ87L^jm7;3t(>Bl_HGEB z0BdRpDgXnJfG02v@Uwi`QpX2C%HThEoZ2tK- zCkSvKeiH|*XeA`Z;ik|}&t48A9-;vFh-awo&o#V2(%t8ESsn@yKt=>U(4L$7xS-@J z`*UeKE;R!FjlXwmYkm3z7nBDE$WZGY7-eZ#IJdi`2fh~vB||<m3 zMF2_%DfEUVqv$BvJn1OoBJob|-~ZJBL>gI;&fiOrDj2ZGkmfFZ|P5fR%AG z8qFZnMNsbB$xa&;gsOKvwU)dp%Cz+OCmA?Od_*#xvD8yH*}GM0>^ffkpX(_d`5u1> zFVL5N%tZ&y&nhIzXEHUW7_Y`Bc!}d8MDFD3rx2LedlVw;#=VG>=7v_yey2KNw3WRB z!$veO4^*i`Ig8M9|-qhBsR3O{;WNN5be2XKpzCo*lu zIDE3U!0>fI7fxp`3y0?g?1vMI2C-Hhnn|FNau<8wLW0aQ?uC4xxHzu>G_mvZGiwjG zokAd>cB#0!hl-Ga#BGlniVX-XKw?oij;kI)kNL#yulCzDvdN5}FQ{NY3Ah8;@C7K(NV?Bb~x@yWYLKVP0jHiZejR4i)pi%=NO8S!?PI}iA_2F^eW zy8EyYjvTlzKYHi^+IaWL+KX*b04Ra!T54^qJg3g8R;EzymQ;y+>j6(;bF0>WJo5hkwbKS* zJN#IRI$D4IStmchaWlbc+ailMyszRZC(v~Kfz%yV=H~a+x9gB z(x~5bMa*G;!>{vJMX!r3m`x@0VU8tCdDO|zhpFl&kxlGd!OK`Fl5cIF>!X0wrsIt3 zb9or(%Ia>gCMw_2!RLNg3~*5BTyNydOIP{!z}gK?2r2|sWeFncvU#zyQPhM?hD`Rv z1x*_pW;BaDqpc;zGirX@f(y>nsN*XL4?Q~Kd0~qR#ZJ2?1JC{?uFbSw_;;hzss1u7 z%9za}l1`HRGX|s7^|if1Md=UIe(xFE6rlR@leDu)f~z8S^RwcTzG0%<;d>zN@#c)_ zeZ3HB2zIoAUkPW5O}qwDR-v=`aq7Jfp!?r3ve?2sVkjzDqAx|k8p zrN+hd+!Mgh=|G}uXpxD9{~G7$F?k<%?`rW}Q{nREHN%cIR6noTZ?A9SL6cvP3=G4; zNBg+SBFX7manS+dN-{lF&X4I}{A1_DjbcGcsta-)y79XH4(XTJoR34o76(Lm7eLp$ zV%=Dl>MIX@zYHr|JG=Igt)+MZ2X^^w_x|Pv3=qL>5_6=q2$xz*o$Mc=nu!;z>iy+5ho_jviytWB@4LgJ`LSys=P*U0fMCszxa~S5Uq^&4un@i>xhb-J5WCd#f)+smIa|7Eo5{jV!4n z)c-6D!(nR|N|91lpr4JtdlbSzVtjXE;ryBJGneWn*JB-{k57Z9rU&edj2}(cRoDsH zm=e5f-qtEGfD{Hm0FKWl^i%wN7mJg}ss@XYQ6+xI4+@NIr z(pn(1?YhcWmwg&4ia$4%OyI$EmAnK5GwGXkxo6v`)b9ZNn&tz(jfJ^6yk%s@Tifle zFU@}rOGCfV!oRm(_kO694L|)N6(@8%H}3mYilz;_vUW~GS({AUaGy8vs3ocq8~%v> zxl_d?;3lr&04tP52!1+Isp-%7BdlN2g5S(EOGs`pRXU+MlI=tOBaJ&QHe*E6Te8nC zN(0fi-xu?y(N2|Y2ixq@6n-C(HO9Czh2W^Z7uynNT>O!fR)Edfpj+*eD2Zop&B-$L z_qLfD{*&oTQP+kjIlD$qx85sX!IQq0!n+PDGGZ*w$3_BQ@WeOv@@c#lNMq!93rGAG z4zkK{VfLwAv*4+i3apTCoMiQazqW8^J%iFv9@GaHdxmf9MC~*==tqsPHeP=4u}<)M zE)=|d&N^w}^V#vZJUFn+mcNA^#rVv+qb_e4`!wY|pIcM82kI`n%6(KJ{u~Y{k+2qC z_IX7umw$+dtn4VnwkALP;#*ah0eBDBpgJs);Y_~!yeU)66p9UFPlN>bwQs07Dz>vqFb_WXiJM?^U8Ys5WVRz(@W?2a%=2&q z%WWBYYF+1f{9$ttaCJqq7uj(xO@N5mTAo^6Nczxw(z{8D?GUG)E)F}h_zu+h%BLcU zdN$W|q?I+DG@a*{raWE=ryMDUTuj?liFgiU-mkjW||M*5&G`S83wgv23w$D?0HTk>M0KFm_oWFLsU%PvA zdqpAE2m5H*kCUdN59yCJ+rv(UfsM>vAU}52 zvkcf4;W#2NLqhP8!j<-LX1yEjPFpenl9f^yN^50Rp80MqUNkRrfbEFhg9heft3t#K z3+B5SK~w2Ibp-NJJ?qHg{m^U0+?0Qgb~}tX8f*dMm_Xj!^Fcvu^RSv3{(!WjvurU)1^w_>X`}8`OOB!k}ne*oQA3pzyj7#3VD`Q$wGa$cc_xy=ZCg~HmP51VZXGY{iDCh_?f+1XG#q72e zMr_zf+D1ZWSZMY&@A!c`t=Gw?Lp#aNEqngpIVbW+6kybXXn%32rZ$Df39oJ3R%A(p zE+AWVDd1>E_Q8oLCNhjY9+J0ak)`J{sJ=aGS@?YJgL*YFe-+J|p8v08xf9jF_mJZ1 zd{KVk^xEzkbotaV;$`oIGNI~?jsX-N#QRBp^$t0w=(=4q+LWo(wR$zfp{%EN$tDzK%O%K9mN0EFbDIdBh7 zh44uNWKwc~z%xKe+ADFu937xz++>ah@4Wxqy>A95XBb4358y&Q^Lwzs_wNEUgxTTW z1DgW)_wnvhf z6R5_hoD#seIe?Y@Lziz%llp}^P2Qi@G638@`{Y%=B3FKdKVZH~iwv_xE!OnO(}_M$ zbGCZY{8RPI4iX|0ch4FK1(5Z-5;iOyT^bf5_j7!Oa$5=}|!S056bXImVHZw^c$w zCDJoI0C2+;_75u{JM(9ablK;HXyDlwfDGKUff5z{mGv(CrrwvH#0kPy)EaUwiV7z) zL5AImwE5)@QXQ|O0g$S-DdOjQpQH{I_OKZp02=}0G6g|fqOW~W;XZZYt2OpUo9ckt zKOzmrU?IYbd|1AsA>JpPKdbN-BN4*+x2;hRa5|*DS!0Fp~y`oA@3hzbNB zAZ3RgqCC7ofs;XuSwM9gtO15U7h&H2vfAZF|KI5!6|RE(m%48!=&vrgfDg2PaYBJh zp*omq9eEz4@oFG}CIG;*{U1q(8X2oZwTXT;-JtI{84o*6cu^3QqpiTy4&y8cl9+pDjy%970^et~Fmjl}# z5i6)hLXEK?Y!m?h*fuhpOb24@_LIfd_EB(bPsK?4x8Iu9!$hGi>-DpMh`@eFtp?3mhPb z9}l7bh0N>#G##`GNvE<`p0?D6ZUZlxn(zm|U`GxVB47YoQxFF>az zL7S7r7BiDO-<-k4FO{R&eTT3?0D^BbmU1*|nkpaz5T~fOVaN@LBLzex+%FSA0T7+H zbEWQ}eTM@Bab?iuQ_wVp1&IId;jL~#1n`i|FjgNEF7RJX$P_@u3Owj6 zl-(}8+YnK?_5G~`e}eeG7y@qI7RSDe?f*|Exswnmatl;!R%uK?y}#sgbwLsPdNCRv!VEZaGj0EI^%ybK?mFqnd+|>})Z&70MuPZK;^OvQbQaF4(8}Y7Pv*2txr@j->~nX# z>7!fq>4_Xx9UkOw7{^_aZ$QsKzwBB~t7=gxX1OLoq!*Jg!Dp6#`?^mFh+|BHsU^h} zVw0`liI$1cl)FbdH(Lj_+BQ6uSmLzUEg1$@P1Q}j*t`C}S@P=c!>zV-c!fdLdbSjz zVXtjhM^_g?r{DOJEVh*-UqirsF;!eyC?Kmmi*r!pkB#EQGo5iV$Z7tTOVK!q4$>@6*?oAK zfBegSk%9*&E|_3o=Oq$=`FR4^i}%u_-o*pdpA4$UkHb+Hg}4=oaMvADAta~~ff?p5 z{vjz1Z{3OEejscEdETpfP~*Z%S^LI8Gt=)yQFs=VfAStK`0^nuO0y|0qR=(xi~qFG zVdtWOJ?|vXNuL~&CLRo2kei)$AFlEAOxP_%&EC19Itn9ie%+cy=i8X*BzX1UD3F5~ zoUDKKbirenrXYOuMMeohT;*}-0}vL31Hj9nBjmJ;SRbv)xcsA)DudsbDO(B?nQ)h< zle^as5E(Vo^E>nYttAy{`9!EqLBCju$Ko%7Yc{4px7^v90V{)GSmNubfD+ai0H90J z1Ol%if1U{)gK`G-0Zt`!0F2uy5vu1{2I8Pfz_e?hk&+dB!K~=;I6!wwJDAG)6To!i z_<%x2_n>dCP)Z(xwEcKToRYx`g-swLWCPif?8FF8_}bB}#qOBxp$N48$E*^RGw~lY zA_NFdc>k^z2$kcHS1=7gDw23G!*nJX7#olT6(9*Axhtn>4m|v;6mYpH^FQToB?hXn zO5*`0BZPyp-~S^Va0v(oKo@u@cV8S76qpS{-E{^hZ?X2i3@&srGK!<|YokVtye-ni z#`WN5G2}UD9@H_~A-bdU3Q53M^8=`8370GDX(+l(8&37qHuVZW0|Nth!lX>4D$1>V z0u~@K^c5PIMxr?MSN8ia#-RF{b!s7Rm&gnT+bl3(J{0JF5%>hOE$^KKgaCNg*yrtT z%#7WRt6xIzL&gBDI{`moLaQ#J(3_3x%bb(>y4hem;OLw9>vlUkgeRuVtAVjJY-kye z06TwO%3Ife^9dmw1LaiRb7kq8|LSb{DtM>S{kD?6?LU?HRR+B<;R$a}T2cZ>4bXIc z&(%53PB%Gr)=(cT@{Bkg%I9RW@B3pQtiaW~iwkuZRTl^A$E1!EzS9k!SHM|MZ~XMU z*Kw-L_O{zvW|tBE4tS~#MPRGETuLYxxNno!zX{^i;CSL9N6Y9go`$`9F0->JN3S{6 z=W@`Vu&PrP_BWs#N!2Q=NRd^`V={*ti?yA0vUNs6x@n zb|`lRjR35wfRIQufY`&52e9xLq`;*P2IL=D`~@9=kbgoHW%pkHMw))T*WL>5pZFn! zb3!f;oOqt(=r^JQkVOK34yGUvYLZvHIlS87A0B?92mTYNpa_faPqOt7uVHTT0wJPvq&<)4xDPd|uhw7z1_-uto!71ZXN$K)Pyx1r7;DP49x>eug!^PS+{|!D64woI( z({oXze~GBW=923lp#3YE+Z@(;aI-P!U%8wh9_KCfw*h;nJ+?t8`d!3>a;{QJbddgy zklTp+8%ck3Li$YeKOVSENPnWMMFMaSFxj}zC2mEG0>sMb4%!JA1iBG&vtOaPEjZpr zPSOn&1dOYJr*)E&lgp6+8wn#afrR<_?}Pe=--PA25fn8BQ(QtR`4RxBuhP>K`V;om z_=Hd3g{|9`f>?L>SO7PAB9%GvXWx&3e`!}yQPJ>z;I#F{xBCAL>2o9lVFPg&`0Ayx z>H1*sekmmYm`U(0oZdb~d0O>(B2N&dFc-)L97V{jSWpute%B=_%|uH~gPnjt7V$mE zW=aXqt*ULY4?%aT-a_m)r~ThR`G4dpAQS%o=Co>jWCI|lA!L#2lPqDb|7Kc6SXrQ) zB-WBHQ}Z&UAGR_}z2liP#-X&E4H$aYd^{RcUSLPjPUba=@CA)J4=on&aESgh^g z9})Cn48xnnVlNpPw?MvU#ZN9ASLSC0)5sG)i0wk&nfa;!?Z!s7-Ai=n3BvIF5^!%h z(&b^3;%&J#t&_eK-?Sq*mZZ*7l4ahSCjV9YZ%}HXy4lbwfb}dNG_Tp|i~VTFCoAn1 zd27Fu8p$Z(`mvR1S^fXEAy4+JH#K#U6y7zNLH^0;88vOQY#=Nd5L|n!tvN zfyRQ1!8t$XxWPVnJyIa*s*J!wuz~m@?yd|sdc@>aQi^C@PFnq~JLE;+v5#QG^E-1o zNZVkMGE(F~4xl9ssYcbAqoO#Tx=3jfaldPg`Cw1Bb{2e!rhDp}wC~N$h@zvF@gF7} z?VE%2v@rdw1pp4OK-Jnl<6?K2{^XDJ76K>@>NT}Zh0t7g65BNJ?k7`k-f8R6sy1Xq zGyU6L-^1;DJ$*BuU%ZC+Qz^>J0~$Fjvz9A}x$mU#`_9%aU6TIE`DyalRF?*wF%gcCY(<}XfD6Zdv;ZN(bW8dSx6n^_ctQO%`hC$PTjG409&48Ec zUWLNf1e962TpL!9lIo76URh9BMlafKdUSh*l$E_UcSSi2;MFCQolIK(mh%uiV z=|aBP6>d{AuQgV3K#Mxj?i{EIT1CtC;0|dx?S7EQp(}O+vQ;826 zl3R?8-WIVQ$gQBf*gE%ZlZx+UB0i|?_GML4CUeIe;h&@Trla@ZgbFK)b!&B7LnQ@+2t&7_WFdf>=l zn8I?D%DpNovX~l>rMYFOGEF?WnBIpRcKb3K5+gk~g?xFwp_tcTDE?#zv!alc(kQPo z?D%r*qfD*KFY3@>z@Dyb+`{9;OtRg43?P!1!0R&sQ&eLahTF)PuR8zHn}OS#pKp!n zA&(V3<%?WV$dQws0;-lYI}^F z{Q4SjJlLSpfQD`;JQen-3wULv1@+uHzLNm@_6!SVj|`je17zoSgD9~3f;l2U9NHXF z_-iW)aEz5F|1lrO5a|3L=H$X)ET9trdRN<)XJOxZ?2Rixyk|ht7 z>2Lcm0B&G_IumdZ2dV~OY`C9v!rcq{A015~sBi=(U=~fhyE3;9xl>jSc;)|NEiNz; z&=v7Vjav@?Lo|ryU!uFAV4aZ)2UO6aIXT!XmLE;{*0V7fZo*)ZF1;*f8vw>^$R(Dl z+{;8Dz`Q1qSp`rUU!cG#D$Kq-5Y)JZHOTRgu)8i0H*2KobTAM_k6TB%$n!+PO8+y+O-=u~ zr8Xq}-qwUzL64*E`xkM_j}B2ez;G5|D5xN1Sj`*@T(F*FStFf`2Y~|}S0AjG6SF0z z&<*UP_>H3kf(RrZE4H?r5MN`A>f|@O^9LD}SKt8y&m(d|*69^T{NC7s2=OS&ufrI70QIWo3 zA;|Kiw);VrN9j%0YHJZlQ{@YrADToK#MUS4hiX~SiqL9c`_4>7&RA7An#MX;!q!A9`c$?~0wNdhP>z>M@s^@a!;NhdCs~ zluiGYMYp1gibdu`bUJ`{deDVdYW-49t3%pkN(>IQuYh(G! zw}fJCfPKqEQQfUelQDLUJ6?s6lKWLKVyIzO z-n_-K(L$kae(N~=#mN#+_VJ0`>T<@_PJd2k~iS`vu z#p#kZm+^P@p_3uNTic(M)O*<&G-Oj&@+EGpZA_t-2P>+p@*i1l4%UX7`nGh1uIM^J zjU;)qc^2A{Vw2~)8*6(4`y$n8%j8uR><@nC(r<5rCsoeP@f>aSYPv>qjFq zNJOm`ph11<4Y*gkE@&-RzsdAL5>)l+sSa80l{essMJwV) zp49*G3RzcWuODC|uPtNQG6TjbJ|%TyF3{zdw6sS03txEUW@KR^*sO{jQYxo;bB`HvUHVUE0nEiQB?q7-*i6u9Z0`PEy?Xwi(&DZ`^2F& zk!;fTa?7ag`&&!j8y7tQ0(HAtd)Ra`i*}4kNgW#QykrG1#G;}D z*Bbzy_!BOlkP-T`xtg8G91<)WT5pKb@9BI7hQ)5Y6@9N(*(}>^Z-@APQzD)2>}-f2 zVS#9YQ8frWf=g_)2Wai?av;{xYL24*uc}~;{Ye%4C-hWAtL zaboG+(40pONPSrG9OdtT9WY4u0-cTsZl}%d!VRqP1uETUSYSBspCP>4@6tYtyD>l? zdgnAMFWO~xVE7Lm)T9s6Hrs1;S9h|@i-qb4b-7zGbNS9vt@?|M<0@kN=972V>Y^V8 z`mwe!iEW3>A1(Ojph<+S>)xrb0}P7xDJt@*lg$iwOCjxgzeEO|?hfF>kGq4BODN~% zCoE6Fr=6{qATKU)L>s=z>!1?$5+3U|y?OgafHWj3H`J$DgkbC#+t9x_(`q^F zb$9k2)c8%)gCC!l-k3T<=|fLmyedraP<-D!j*_6hy`lC!7|%VbEwq`TUZ}TZMpxUyDC~jRqBd-5M$Mo1J+(O9dyi1y}Y}&^Sw? z#*5YF=oYVhG?@Rd?^Y@z1fJfD(vEX>J5Q~(x7#z{ve=1GFk2)~>N?O|gi_jQva`Fl z=6SE?^>5#fkLZD6FIViVu6P!WOqN*vruLX|lqjzcAuOfee$LA%C-eFDWi9 zDLwrn*>ldvzhPJRct~;VK-2~MG56M?)J)#+weL{OyHmExy~uV99$d%@t<`2ZTa^lO z&eC9^eZIcJ1iRkYDrF_bn+-`Dcty7Lr&74^)aSrnN)7b%lde9&)chsggm!*& zEA59Dv3Y!$9eMOiP2Mh;cnzAnP7GNg_z*X>9-=03{i1&Tp6+N!c~E*cb7+s%dX|CP z?*#jo+Ce>B-ZS@NU>~BdSf6sc0`pi|Xp8NVRkL2@I%DZ(KL+P3sLU{m-Ctga!~%<< z-b;;RRW^#LloOn_#6~+5SN_d4Mr^oPZs$M}#~^F{*`Yp-Rmp?&cCC@mJJ`*2{?EB! zjj9*g`PyjUc4J;`Ap`seZRLzH(bzX5#QoM^X`BH$dnV;Ze;? zuu7P~VZzgr!~AI2m{48Fa-Z$c0Xs0Z z#u{<<=H5WZ2uHpoie?WiSEjGW-!EbP;_>zY_(QgmJu#Ef3w^$?9jujKocApTx!GKs ziF$H4HX9-Sm>woO!*8JMvR`x z9&f^`$XcG)%TQ8L6caCru*?k{@1)UP7sh>SyU4|#5YjPlMg>j+zoY@*sLd0ux|`;+ zB3$B2my>&AU=u@SMjDoxea?I}IJij}Z|Y0zwCY7M1s*LAeb>b2SgwWko=Lre^E(Q& zAj{=5xO4D_{d~{d$(Yl(K?nZOYA2$(PoDRjUI;^z_q}@O1I6f{YagOVh|2LxENk5i zg3m{I5tUahA!^910n)80;WkOc5O%I^EBLRDuVw`b(>5gu&ntp0-?7V#`Zc5wLa3BW z=fh<(D!;_S)N*odW2U3vQ=aIjgG?Lj!rY_v!kC)jhOgqwhUKl*8J@(Rngv>hY;Q6m z^kW(MjDTfjj}XLIr)i66M#x^xL){5@+G_A#cf%g-^J+zW!og-{Lt>2r>NzUfpJAMJ z-h{inS|)MCkMtiQ8j6cQ`{;nz)Ox2&)gB>S?t^iS(Ulc|Q#;h|m+(<_pr4)#v(;(i zmk1S*N#-w&eyr+yA6>EK;u%MUT#z{X72R!6<0uVT=V@suOjt!rM|2>7J{L1qYpFb0 z^{7Y2%;#%3yW*K@XW2Bqw>^G|jFHktWjmYsh(#Xx2lUUVc1zwhCue0jtmHLX#T4Lt zPQnP)T9IkaDqCW5d_@e+xN$)#cw~G>fj5B__nnrQnp>+^Dl`37!B{DT!zo$Ap|GYY zU~YzmQ@xne1ihZN`v;U7=LgsJ?^7t>#AMI9e3Hu)LDie=eo?Uartvuh$7$}{SU4p) z#gb&JR^Lfsy@P9h5qRfEg{YNA^aDtdF#iX4K}g&fEXOgQy7(1*y~J|h*GK~K@2j}u z1EO-vbN2d@`s}=$$B6Y1B`igESFJq#T5AF6BrmqS*9l`@Ohw@QR8oZs^5=^3`c}Bi z5S)D{GX)vogg`^Ie?s`<7z~XK{j`*+yC9S0`gEU+BOE~Sfps0utN3^e{Vk4?oxMOl zwz;y)T_Pu?1$I3EG)=5ZNs6UBNVn0P3wtn$J6HNSA;c;wwFjrlRuMA#3FRpnl8viZbYg-T-+C6qV& z#Bu*oVZogZLWsEB-L;LWevTD&H=4b&s{W)#m@uZrGI5fyo{(y0ykNW2FsQ4BTz?ei z&cldNoxH+maO?dnSSx9d_btG?9XMrH({qsv+FpMMyP{khYT%1>biyY)Sk) z?Ks0AW9{f~ZsdL!%$Q)Y_|ZlE0M1sja@7wR@2y|r(lK8px5^p2o!5~oJ{I=IJ-umb z^3rWIOI>{)A?TcOO0`Qf{0Na#XYG0g->_B5Fe)a+ly_=8j-!Ksz|Y~uhE`%5!~uya zg@&xDc)R5DsFN2?Mu*!~6UnvqQ8wSe8O0}}zVk67^;lN390~2 zHD6I&seQ_RUpelTx^=AKAK4KH5b%j_sG_e;`>!x6vGK?S*}ZB~p%qQy!sD>hA_k9q z0^#EtnU%Yhx}pWJRad^qiy!LB8aHJyVA^UbY$xR`quncbO2uPSP=RMDb0OSFQZUka zP+eH&^xmyy;cVAU`RR9ca^=DMC#_{ly$huI6=#%p z#rg4|y^C|5>hx*U$o1tZ)(;~T7arFVK#E?zS{U`kFX4?KP()7l84Fpqg>pNsyu*7d z*e(voSiMYSF1_~z&&7CnV6P)vSK*2CSj^ip*G+o65f8$NO7{U7Ii|t;yzi~80)PF) z(eUiiRd|5Y@k8V7iMvFhEq#ZQ1)b%*rkh4BeZO=TGe|RLV36BH<)0THC@_~ zvQGv=wO5L_8~BOykwLF?K_ZcmMnwa<&(m{m0(Rj|sWjw*gGb9$=M_i4Wh67hu6YCO zt}G+giJXGH^a<|ozA?=)(~V+ZjPr{9PKCS~(o#q!8|JY!n{MV@sJLfOqoRG3XgT2l#p;*@lrAEjbanR z$*E8ALNRSIkDNoxN^FSQ!c~PtrU^;W(iKZ%l)E-O8h572Kj~!9^Vtww-4JmOdTw8B zyY>qUU_3jWNbC!C2b z8hJWftfbZRG)2d;g6WaLL|!v+mhAf&@13$^DrhhxXHu7iVcoYS{?y`*y6RlD@QJ*x z$VY;Y41kn##*T;xbx=|#Cx!}Rea@2!&I#PoV510s;6+?sp{F`nJh>>!=DC0$_ol2f z-GhvBA^VKE;v4bTm`$@>w+>hFobqu3XcBb1XH2!Ct>`=DhGc#_T_OQQ)aw4&Mt~Ch z@fcUXbH(#ItXlPo*FB1-9|Tw*uy2i8UDM!HDc;cF9D#3>$HO_4b~Gwus>m2z1xBSo_imf^=&`qz{JoW z_B7#2_=ob~dM*JD9mDn3-R1?!iI~+jYQWsVEdprvO+?90liU-X?pKdbT>}8h3Q@o^ zG7vy%?Y4FJr(L*fB7n97xbe4%xDBnp&B*_4i-mt$kpCg^ECNMm+6jk3Ud6sK8#~Hd z$yl27#fpUjsY9G|p93>u;UL(E7yP1>Xw{+%Zz5hYD8|q@btNXDP#p!Mlif`40hFND zI*4c426(zh`@D;TOVP2bM_&3xrC@h1tUtZmw4& zk}hAJVJbh<6tR}gg_&er3M}e70S{Lc7kV`uS!dlT?n}2RZ;#ahy-83<(NmutQc2?p zbP^_EaasZiRwI2?uEAad9pLzge#Fjb6+Tk-`8Y!9D;Mpy?f zJ%K@%AWe#_1BKXfG^U5Jd&a<+Oh(S4zW?e7^|>szhe#L)WBn(LBD_sFbigE%5Lze> zAFU4wvy)(wFxFB92)+7;5CU>-s#~c#{<~BwCiC+6-Li5sh5(aEIvo8eDzWd{WK*1h zWPNORK*N*s?0W7)r{)W*ef+MB`BhKZoo`1C#ARWdFLoOocX$2Ic9vbQWBKWa1Mq%O z&ulAG-5cP$c$p)-oVHVhQs3~rrays`OE~B4X4U*E!P``aip)j@i>n^7Po^nr@gr;0 zDO(~zu<&=@B&)so2kD^VKDWoic8>`MA{GNksLnl@y4{SAXF@`LZ}qWOQrL;>s-nB@ zR9mo%^k2|M`lSaOzhKO6>@_GoY(JaIwBB6KlRmp#;kzxBA}07uHZyN>-jW2L>=ix@t*NDIXg{M`KBb*xNiH?;ON zXoldRZ{YA;CP`0lDGU0mi~=#>kj(wu*f&7%!6T1kyn65Gyyy_vFx!O*gb%rL&0E~@ zC+*IM*YF!YNAL%C8_Nj>L}PAe`gA7iVpJ~cHXExSah2 zNFm4Djge;-sa};_nVAG74xeGMJIt`+M*!^B>Q5-yCo+kl;yS;DI7~t*^1I$aRuZhO zWn&>L>Aoc}f2#w`qbgR~#iZ9Fc7V!V5T^K_C*@Xt^4!D;TXIEq)sMH<6Q%Xv`Y82b zN9_fleV)k+9QpQD>f<@m(fymAcy)NQRPW78bj{9ESlKI5N{hfCO;m;?3~+Z@ zqFLT~1&YY=R3_2rJVYH+1+u|gkZOsNmdo!WhzHGzD@H9wZ36-(wI<}V*Y!vjGmufRzd>G zW%07B!@G8?b(ydi7ERZQn5@57m;KTU?_h0X4T=lZ=!f*W%x3x6 z=W`RI9elyiPyI68aLr6e#d%byh|?)&zBT~C2=pBL^*iacra9e(9u|{<%&zn~rsfob z%3I<&cURg2dYi&GY`uzAb#A@W(o+(b@HWrd)OV-$eajs^G{C>0nE1NqBKYUDO#`+? zHB~0Qa~tB7pKCe$>_$66e1DKO0gE#Z*Nx!2H;r*s@y49TMlG%j|7Z%~)L zxmV`K6K6LR<-7{ZJIAKq`$YUSi*_SB@_9Ky{hLu0Jd8)q$u@9*K>3HMH$_xVUPx+= zne3d6&7pjnHP~17h8|1vSGnM{*{g~Xd1d&IiQnqlWb2X@xi5e5>Q|obtHafe*m(4{ zwQFAuyD;vqt;d~c2#s9qay{$@=b>hsFR^1c8G~`>(zSfv5$|rZ2j9$2G)ajewe+tY zwVO9=H_i+F4 zew6)v(e}WxfRpD-Dh#q}bC30X>YCQm=EqJf9A)1%@!DCec88dpAj@?JTq#{7-mr8hl3k69zDWY# zoqmZ$2e)OLGO?a-=*NxR|3q}0LVV#NE^9{pkL>}=uWiM$MA1$dKeeR0<;)W4RWm=bG1m)3DM_C+ z-hEvAWEL%{jU>!wm)Q3;#k?2PM#b2d^FC@nhhIowv;BKfLV|tH634!)gH%?g5Hq#7 z`V+<|x(3&sn$^p`vl++jhCFGOdBr)o?@zBc$9j8YlyTRdz4ZJlmi3XH_sgaZc|U8z z_IpbdvqV^UO-_?|{ROYzO2kr6s%KgAhTWI3Ln%Ana9D&8usvlmNYy|7-o3L{r#5;6 zK`5wC-R$z4uQjs=jTugEc7P}TFA4nRl2oEMAS;EoX{PTM_JjO#AI%ZJB(Xievk2 z6%``u-bf2M4!yMZ??N6BlEViM)p@qB;7fdHlrA{mfTl+o7mmUobfIFTLjF*Ff82z* zUZ%7ZIsEZ&FkM6|L&yX7{lP6uk}?`__xCJ%1RQ?|j<4ePb77GPJx&?r?G^IUjh$ZN zml}y0Y|V&QI8m4+U))CWFpJEenPDW(`SwRvs0KRaf=yP*+)Xku}{IY^;76% z>EPe^<#uJRLtXGD9K3PRFaP!+fs$^gk9;iVkT@jVtAlV6%QR?(XlCN~Oh)vIrTve( zevLOQp^qzN@wZF(cY9vYAxa(^Jk*wl z?>3GIw2B4_E8wT4d{coO#}%RKiMej;czO+B7WVQdyuej#aXL-MevvVcjNEkp72HVW zWw5s^8pj1lqg6u(iRuejg;ij^IMq0dl;h}tRqE{CTq;;a&lvoW z`s%Vu26b>L?JSA<6H*1A!NTYyW4`E;nBs@6-~3@_Lmc-gy`u&mx|LGG1l+9AIZC65 z)WGuauEUcuw(P;Vz$)-?UiPKaC)k_&;BM)!m(O1iGFdE|!aqsFmFdtQMNLnVmR!n^ zK6jtH?uA+PG;KaXW#3DECr6BNawUU2?ed^#Zj1pOpVy;@)Q2oQD=E52FwqJsl#(#_ z^)xZBZj#04=t)+LdxU_`;p)~M%j&$H{+4ZQNsi><^8G;Ux)DBFV-(L8JNMp_bjiFL z^>UA1L5$M_e~DWrb2a5)MakFMiRi`8_UCoE+pN3NKL3#6={x}=0n8)_s?uRys z>Q6oG{LF)Y5+P6Dkc#Ya-`AGVH^UlC00Lr&jDx=3dg1Un1DIPJF7a^-xgZUl;rvPJ zMS+$LDq|OH@XcE}hxe9kwAu|iA+YByjVw7sd_!5NKFScEd$A$Bhzj2v8FWqBZ!s{V zy+X;z4hIZmI*=+dHrrEeVyCF)Wt}&;L`aH7t~Zw-OdccHX%G)W7!IW{Drg{aQIcoD zKXv^HO5Pw5F^3$YzFGPn7A}H9U2tGF=wX}$Zq zQaepud}wahrtUxtpMm(%;?$DK_ktu&`AHaSw^|Tm4=1y@9Ui#d@1qZ0Q+}nf%rhC6 zLw56`p=1)yCyMMwTfhN6d9H)0kMWo$YF@=wtVheCQBKL9dl@&zY&ziyDDnO?~ zvNrHtp!(8Ko#gwMx52T|OSMepkDno2Ap`84otq3W0B7Jv6cdOcOB>0lo}$60w0>Q^RNK zT%jqM>8|3>S)-W2bKa%w%=l#PWb{8seq0>y&=*V|JbB#IWOX$p&qh(!V#3Kb>zx`cpJVB@-13M~?}NqDkSURSG9P}H zdjx*zkrAoV$G`(#WMFVu$;?52=9ZO@*1mt79B;c{i zTaPWtuy;GcIVebi{9(L)kHL|xYp%5DJ{FUKIpPT9hw=XL!5Bv3f*NXQ5j7NU0WNP0 z3z1LU?&W4nM#(*sH-jU+8=1o2kUJ0lq1(18kjzbXK(WgvgeGSNm!SL7wuZ=@dnZzS zDH8|=q9q2_;K^B_C~9YEg)&{?wL4X26HHR~%jGyv9v=T5Hc^V(QzkNt&$0vr{Xl#Th@KUT`3=KaWi`j&ojnNEH5Cqqh}3 z$|(Bz;UZ_|!JhpHvxS{aqlN7!M=AJt%gKDQV0&@ zAc_QU3)4V!ANfDE9}=+ah-LORz52FadKv^Z216J?M~K!nTF+)>Cu62zq{k2}K?5`U z5_%N*1T^82m8|q^41w=dzm@KSk+NF0`D9tO_1$IlCVO20=3;SXRC_$})Q);motwKo`&suK zBQHC;wB2KZ9A#zAJ|L$#=;@}|N6rNPj^2pUCi*_lVbko>3_;MCeBIU)C_^yb#xY~E!XwupB%jR9t22gZWAJ^lXw{pjr2I&P~jl#?8 ze~>)63BV!dz}CZg#3KpEz-pl7a~2K`*7VI+B_V7{SI+{cu2$^#d{2c-tK&s}l6Sw4 zO0jdDWwLr2YvKf2S6!}t4AT$CR3Y2xLNmTy@nhqe z$w)m+DM!1~w)3DV?8f+wg15)R4^47>;2ZnL^L5Ip7h*==gdww5 z9efYLp1Pf6dR!VNb@kC@d+;ANk@?>pYHJ8)ydJzPW=mu!*7VHgiid3kz#5ADT-R6h zG`_dR_oz+Se)5Q9{7}oxLT#t7^~3#xmgl$IV*W7Q>zUFdfavi%{0`|Jb{$stywCGMJ);G~|$m!>gc2^jEzS1|XE(bN^b1n*Bh zjHKKIFbsdt!Nr17L?VB83F7YU#D*m#!~h#Ee6~<9 z1%=VyUXs8JBYHre6J7x`aPCSB21EnvfmM7z({jtS^&lHSU%b#2#0(jqgUIN8Z>?6eoP-RHiN_gYM}rMaew*<*`1wmD_T7LK?U-W z+d8`37JLb7v1v)7R^NT;PayhC^^*NAY;S&Y1PGZ zOH{a=BNvi^Uvw^tCUWoA&j~wRayN!qBje54BZ~H&Na~c=i(5avcWQS)*l>=Qrl6jl z$C*xWAOE@?h58e`*>Cr8$PtWwvR>X#S4|hMKPH0j-1qs5u@K0mGPMks9AW&tm>XkZ zW=ihT*6q9V@qct4LpH7bQj+To(>4UrqW$HYJ*ig=7z&_RyNit*jl$dXPGtWYO;QDR zLeTG=f?#{$*)D9mID=+~v+d&T*OF4=clD)(Z^HTazhwiGaRj_RDV;!z$WtcJQfyD? z$ z9Rniy7h1sOKcMXC$uzzPz8*VT3igbz9j=?Z>4=zHo6_(?`wQy?Oqh|c7?w{c+J~E; zvfe_YGh)mO3R9Gzzt&;~9gE`5cb~ZnNah5mO=ICS8NXDCKSyUcInxe_Y^s<6{{wUk znQRDxFp1$lL9(m(s{mr)d1T^%%YTf>V6niN<{Ujyq#$nY5G;^baPWcKHEYcmf&WoR z2l@2uj9aIVhJuysN$8@DHzo+x0EiussVP1ksJ+i8mCfBKVeb;FMLPrO-zQn1?g;

    Wb+Sk3+ zuOEJ30j9f+;A(eTGpWJKq9Q3etltfFcrI8ZePDsd+;4D#d)6HL#yd?WhFduQsN<`o z*UgUCYAgS8EIJzF;%h^pxB#v}r~PKwA5T^LaBE-?PVvp+RDCi2kr!b{j@O5D%4Vw{ z89Y3koPNsVxIjI7sXv=~Mes-6q6bGJrM-Mk*|NtdUh?G0NV`*W{2_ZMy9X!t{=g}= zK*mN&%&gpaq5C|sK${AV#=Aki9L|vQ#|{Ov1XDy}1;#%tJ9Y*Uai9X4Ke}l~GSFl5 zsc|u#CtCO)fPD6dU%lLO|HF8+gzE_JW^HG+Cq|B{301FV33R!?zrVWNuN|H-ljKW( znIBF2r)=ZzPTwJp!Rh`4#)OaE#kPJ2pxHCz9lyoAm!ic_=JhH2+g^5#D)A+*eug;&6$2Ii1HWYj{$zafa+_;QloSrj9Z$BHKT`&oXf?m8 z)2jlW+!};qYx+Sb0q@Sq%ZzKU(o}hE;#>cs_cFf!~#w#eIe(IQnI z?JzK_1}(k{mQW!2VPM*Q-Qth=8!*e~=maLhZ4VYX!>`|sEBBwq=9f3e)kZ%6u|5HU zH&*c=6Q$1zma%G~cCRFW;fqB`Q%Z1bO^;|7{(-EA6{f**NP^{Rq8D|cNs`TKW_*wO zo1%4O?JgM?)ZO9vRi-Oho{yyc5kEDyvTV^{u}C5*y~4y)dfgM2oI8<1ywthfYQA4K z$QzOr9F;~P?NjP#s9gCZ`O5M_+(rq#FG)vN4mO{e8Q$_?0*&XDY}c&mj-ys0v9Kf* zR$mkaCWfGKTE1O|r(JragX+82p!%|hAk$Hj?uBau=o$X@+Hc$gyR8pO1N9VH&bq0O! z!cAYZPOmnJ5$%;Kf8N(pT>?%e?JQ8vEYbCnyiqJuIv}bxG#ysO^fyMi{ytYfTyN+3 zb_h&VJL>qRZBnp?uB^O>xU!sx4Gj^KIH=@Q8Q-w!m8k)hzd?t^g1m%3yMC>DMkFd* z$j!3MEb51&>->J=2t-i@H|wwK-hY~@y@$*rUmE41u9YpS_e0LK6UjWr9pmGyiRUD1 z*(g0dxk0*_jb#>Z-dEkonQe-{u#`6u2t+KhvExKG*)WF za$efZTqb;Im_JU>>k?Vm$$5z1TZ$5TX*&1+aP<~YQFYzp@SUN%yQHO&kOt{)=^Bv^ z>5>{kkd_V!k&+OQZbrI6k*-0cQ$iZP!RLM6-~0d8T?=O2d(S?*&ffd%bM8Lc4Jbj* z^Rhd>U}KW@U&`h(aq3p2hf+c2?S7L6_UF!;YJ9K;8x2p)ijP)Z1L;h&EeCaVNNJP+ zCmDSG#wl{{=1!>SG(aLFj;Dj)J+R?qU(gFS=>22HbfIs9ua$fdw~dufT;%P2RcDjz z>)MedGE{!;**c!AsfzljL6q6)z7*xBv?dSnY{W=`W2=M3%g1 z^1aIV@+KXpr#g*m%@?8D%DDvXNuQ!EU+_--1H8xiLzP(}8+*3zsw0#kTeK|4Hg=R_%PoauuG`U7n(Mhx+q3`(@MWC0zfO zeJjliR%I=Q)YD2%4qV*tU#6?nz3J7k8vz?Ms?F=lq&$_JwHUV2eDx4OUs z@@?w8J8HhYqXA*#!fS?i_=KRfn);THxw9%%$zr-cBfWZ)n2kN$?Ty19ury?_cJ2MX zx*sB;=vsWLYFsfRG7S`(iooByh%Re_gl-eLcP?{ z5C(Z?g5Wbx@Gs6HXMX!2^YTK=Jtuq)@J~5tjPDxPbKb{(`4TOxk3Rnnc4-I<+Xv4+ zALHogmwjQmEy#gc2vVBKTiL&icsBe_Up_=)w|0=SK~endii9`Bn*wWeMZV;TVR|NT z=ifC(5HHoLh=Pp+89oxfj&cdFM^9zS)AqEgqOk61v#J~F zhc0GMG!CXO&N=Yn2h{0vThM~Z1bG*Bhs2Y*y}!JW1|iWAz^;*m&&*zXXrYKJ4Oae! zTn)iBwt7|kCp-HBq6%i5Lim8~xST#SvNi+yRgmx27fx|5Mo01#>s7bVSD=dF$45SF z*tyUaPVhxF;JpbQ4k6`qOi9v79o@&%f|XcQ{qhp?B1d*z!@SUlcM=V=j~U%_agn`PpIwstGKM@a+`hp*nCV9ur@9i%I7 zRPZ)*vkN_HstT3-Aqv+x?f9kn5v7_O3$Es0*tG7KT4vPQRcKK7QK_Xg-p~~HmjuB4 zdO?c@V?0$zZ33tWLl@E<9-xe%(QL1A3Bjlst`xY4ae9jbyO(^mL~~3EYV${L*;%VD zx~cwrO+N$5tJ_%@r}^--be-0!Xu#%mV}`K)VlnTp>9MvuBrTiMJFa<*ZgrmIpX^fK zJ`A1;;e9xiFzSYIs@lPk4fJGMIP<$!P{6EE?)N}bXPhPhEE0UQ>nFG{@8G3$IVuY8 zW9*Fx$SeuV+ULk&1vdqURBVi&aVTCp_r&JiZ|n(S^8-Z%?v6#YlmSYzF*|Z$EU&Y= z+kMEzbcL{ma$heAHT7tPO}yi7OgZ${HOFyr&O6 z)mWvXL;WXtN~_03Q@E%<^c8s9M_Hy}C1>zMB=he-oie%=JQ0}x*;e0ZZH3bvS5m_h zicvUB$c}50E13_>iU%cVNGc-pW`Vuep^gU()rJ(q0lFyahM?WgqDA%O4GCxLHrJXw zEq_w$Hux$P#Bte>NuipV)xkFFJPh+ad|!!tToAlReL;cMp6iu2!JR#fE`seN3PCw? zS(B9XTV#a#;7L2u!1RpQo0q+`D~VBE%094hK#EDpa0T`0-zTV6R5+YR-rk6>&^ZG` zT*JxDkKba)orsKdw{)%W)kFjuiIeEYb~nc+b!+YWl|WbH(cw!V@n1pKS2}Gz_0F=9 zNE~qG*2lej9nB`ekSDlY73v*b*w4<5jV(#p67$`J*|#mW!od(oH~w1b3s-k}qI3xN zO98$Oj_keoKoN+b#jGf&y^&BZogql`ic+&s>tNQid8Td2tEdrDS>BHN$YzfA z#As|(Pd<$0RzTWG!)^}=V%H6D7Nm@uq94|@SB^lMWHSim07aC&A;dl*g2QU*uPvlc zT&6+E`Y;71_q(+}ui(d-Ow;Ge!9I-SoLN6Rx`CARv7UA{&3J#O!k%%z>|sD1)_+2tZlAY$be@L};RI@hFnz`Gml_Ac&Uc|aIwLCP zl*xv@Q~=ibSp0Ij-}Yi0y%`4NDVK7P8mC+q1m{^jfeS77p1)sy16kc_3Z>lyizB(d z3_p`b^;)Sd_HGv@gsIjE=o4Xpej~2tM|1l8B53=ko~464$*FGnl-GIJh*rAw(ql#_ z4*2ljy#QB^Xw`c@S~*)OQAic2WmOjo8aP@jVkK)acF!I z1P;%MW`&5^2$LMWNw-hadz^l`3DEu?pN+p{-euMhlUY}>Nb8Wc(*mf(DoPj6N5_8P z$g0>y^dfAM-Row~d~#?XA16E9W%+H`f~DJgpiKs3d*!@fyw*UVUCg!nRMwRpzF#V; z1XSrQH`bBbJe@U%?1})*CR^*rHKng%O;`iJ-IL3f%Agj*?+j1W(9V6i)SJz?5m2S|&;hb|FPLioCt4Nyy>Pu&-S)7C zH9e)Qj7f>kI9dWA)+(dK+92!q^N6eo@op1Rb3jb}B|diyaMwZYDr)bta7#GIC2zPp z13FJQA^5Vu_gNmrl)-%C6@X{yF^oDKyZ$;IJN%WBY;V0Ge!sd>R<=TUIXy2L8icd^ zhVx}G!IVe5uY+$ovlR+&1abq&P}C*BbAuyq7hxP*5v7+6zD+0zERg~~bzsB0(Efz- zZ|}(@er00{V?z<=gBpJ`Sutw@(Ls-kz~5lgx(LLfL%Jz z-$!k ziheZ3#Spcvh)U=F;whf=3I;p;8o!F#HpJ*N|8l#^m=h3T08<5~F%40XUAdVAHXuN( z{VZMDgc`Utwhr(XiMwnaLFNq}(7%IixZeFz_1@YaM}!)P(%e_yZ1@H|@~djgxE*Jf zHKxQfhm2}f~Y;dr=P?D3&W5-rm^$u_Q-9occaJq?)Gz0tV_ou zm>6)Zeqn9`m>s6D*ULWgY}ya2Ctj3n7ST>Y)wmZaQi7I^ChpUcR_$?Zev%c&sT8K5 zgccHw_>RQZxrN&&A*HGC0DFkaTrj_jV3zlhd9l zB+H1OxMk4D(4*}ar^W-ZxY&VGU2lrfgOJ|g8XPHGXK3epRzpAjv%9<~yz*+49ip0{>E5;!WU4Zp&?DNIebO<2y#krGgnOyaDAIb9+Ri z;QdXlaeKHY)Yq^88oZG}=Mk!-%(m0jmQkytl%G{b(oIZ!wAPeLgj{a#<&hLW|EQkC z$Pf`%ZeZ+ayxq*S|2Pna9rs4$+Lg`+?;&(D`>v`y&aA}TPPa+4rT8I)6@vg~0l-NV zb0yB7j~tX-@~D^>+P^t5V_n$`9=E+IseK1X<{ZR*FP#ys%}6I}3CQNyRsDea!A;Gy zOsDiWay~krz(xc?w#TNRxu2oJH1AfD`4x%$Dv(VLf_<1QLx)Qt!B#x8^Ucx>5=4}Y ztqvq^f?w?;7Z12cI2cu4{FHwl`Fk#KU9dw8dd`d8Qh4q^mEAdv`wonn%ZvzcfcxLT zL62wwWKNyaOxF5C{uA(p$=M=FN#VI}I?W>#Z1JjSARckCSk0MnZLMgXl@h>QJqu9a z;lxlpd_LEp%8^NH0@%Za%sK4XkHRzwLHM5TZ4qOTUPNv2hHN?w0}3_h;ks!Df?g>( zr*@vZKDZ~xXQ`x6yhLyW4%GuDihyn{XZgC7OXUXX_ZoC~70#!hoDOf0c^QQO=`mvl zC?n$9HtMUX)0a_VlQSx5l!Do=oWH2)t}J~KV(Kj+hdC?h81)z1JMQ72JEpwAmB>tL zZ*}#@039O=wy5^ozYpUPogX5@Wh+XR!OeT*Y>lU#JuI@ zoh^TYfk|zpIh_TNE!LdB#_W)m8mji~9QyHwu;>QGQ?V_@*Hi7d{QmhA8_+}#oSU2r zfMAZdlVBJk`HDRZz%58%I{rW#+awf`KqUuIHvsrP!285WK7V9drBhx=tuKMn>UCVy zOZMl3oCeOm{uz^?e_w^n@!-Le@@t4#B6~Ss6y?h)W(pQX91Q&Sd$BBFiMB9gwE(E~ zUi5!=O}9CjRb`CHhb=JkNa_HPr=jH_N`jiGGyL z8|L}10?Gywy=7a>J;f+MBxVMSZ7nC1YsQN;Yf!!c_kj=A372DO?X@7dGa4@&k*)W~ zH&bATu5&4EJKws(h_s(rXc% zX1GxH9{uS9&tkkM{mt_saV1GpgojTbank5c40SL+9>kKE@?k9j8$j{^j`YJni10Dg zuk+J~Nk%@AS68;)9Hh**VU(Fg+Z&crpCHLhj6-(Cm~LZ<$Rw+ExLHfR+2&3~oSL8u zYr7@Hbz)Sh&NP52{8C5goC4@U>Ab7hciATC9iHbGrqY{xd$*(W+DXOU-{owEOmJs%bpQ zwo6Am^YIfS`US*M>sfCKB~T!e_QogHuG%0f8p;O!3p8h)r#MmbCwKOL$616maP$u0slP41>)e6ile0P=312Dhf*=TylB8Z1CaYz*}}>i$6&Hc&2Y|-pSC0={+WQD z;As|cP}Of;Yt#GVmtC|THsr~!v>C<%*LuN+u&?7^Bd7w+X3=g&Ig&PL$hiKM=BT?- zuo3VlS?Ybg!v&E+?V7jV*rOQM0wpgzAp&)0RG7}$!;SZgG6w~Ub837S13x9^gDj6!xE4aJYr(#|-?OH+zpH67l~JNBz1ZIF(44CF zU@Kz%@lvZTC~V#a7#|Ys+Exm1rTm>h=0PZ8w&yh5k8!MG!Zb0cfC2U6)3RPo13&06 ztQs4$$qU#(Cs8K&ysc|!lT%pYK<>_>iSk5#=ZbKAN>7`c>8bh03=;yVVE05)o(md2 z4izcqdnVP-$gYLZD7KiVhmToEfQ>QMlN-cB0)q4hq0XD?E3y(uy3DA{QE&vsNX^dV z7|Z&Y2}zK~Sd$kX?-@z@?OR&DER_c;OZxVON$TjYdVV#34P}Y^b=jv8GJ#*h0f)Az z2>ZxwhVfL`jM>75OMse87U{yo^2r~QZ@VLvH5fZVg(st%M4M0 zs!{90G5>jtt0x8LLAeP*qUG4Z!w#QHHGVoBvrN+UP$3y|nnE&b-;sJ7eV*W^n^U7n zSKG7OUb7OBd14wr@!?~p%Ou6X^_0eE= zJIBU_dtKbdquNEtCDCWkae~v6<;>RVs8$)2z6~j9Bp!)|`uZY=W_AqU@a_%tsNMdRsJw znUP0T=RX8;+Vw!N=l^6nT~$vlThs7NV0tdT_`HR{06fZy8WpALpcG4o<3+#AB!`ub zq3>rFNrL95{29z!TG}rb@Pw)N`AcVZ`=u8Z`7=3O=@M*Hb@0a_zH=Wh35*qA12IWQ-=%Dggm%92yU9Q z1XS597x_3&oUOfh8oqAgC2{;&Zac9k{;R`dALUMKp+4P^INM0Fu-o^``)xqOItzMs z799?PEEqNC!fRlL>dll*TE)EecXuk}(G*HE7+wE)Sgi<1&D6xG#q?S4q^t# zET-n~XA9-&r%KIW-FTpAmpkLkVHM&_x`z20sseMyd?_DvW4oWI%E6oc3P%@i)?@$_ zM1)Y{@sVUHr#Gbi(w~ix&N+G)t$mKH131um@C-I(L3^)E6rn}Z%x#PVgrK(z;0c+(S&}>|VurJj@47Dvz+JNd3StxIS9X?E`lSU8|Ue&tXF0}Zdd=Fw>=W}oB zyxyKVEGa-Y6)UTIp=qEL-+vH!QZT96dcIlSK77?+uVmG2n)dYK&u*}Itm*h(O$#77 zf7lU3Txsz-eLoQY4|fY*zhuXlkCZ3PsXGc%}Uzpnd3Sjn7?5 zH1HQ4x69L8dMtwD+mDSir)DgH65?q?_6_!N>66Q0bpDce%f!(8C7Q#Wj@0;;q@#ro zE5ZKmlCuVPCg{iC4$)&V5!D+W~v*saazQl{Ag z{^>MrdmXpO8#DwB9~PIS;;a4~#1B#;Qs3YLjyzXNXT!HwhcIW`_@9abm%Ac_wZ}WBvu|b{XWg+m5Wc z?2RXM&$iII5-5Cm^5=R>F_~K|fJ~`jSL_AWP)B5fk7C#UC#lxq9q@(KI7i^>)=t0# znTEy5XIp8yqdA=`C5|70UBlwUTi#(w^$al4O?2D2A)N!+(Vw}rx( zV$W%a11aH^N+#QzG1#uGwMzUB7H&F(XKdM2wl@Fn*AUdR9aOg*|~Lp z$~${Os0p(_ApdzQ36sXSSi_qz-)sDnLIGA_iNywWcJHtkDoT8N(*I2bTjLj=P|cg; ziBHMOXP&RuJ`YBT=3CaJ%qd!_WBTfE+-NjjQ4K2znqaeBv1p{y7+7Q;@124I@DM;< zpMmeTkPN#MzYPv5TbyLlLlA4?_|@kb>P0#=v%(**T^TtF_fogmc&y2GhH2)d4ei%2 zYRslf7Ix~-`mUqi=4eeS?Ds=%fU)qiQnZU3uEo3{D8_b@0t`JPksiLr30Km{)3%Ze zde2%@yuCwWS>xlH7i^6VIYjieOhJM87@0cFX|LCOi-M)57$YId^O23g!Z`^A_}O0w z5GMvXp9Z)6bZG|1Kf0>I9MQEO3$o@6tk3}vBSawr`XvhAzIDFb=Nt!xlfjX+Bzp#p z{yOG58XNQof;=*S&-+w6)?oi&#PBtfn$;-Le!PfGc!xX;ogBo^ha3w%A0N~bOdGg( z>x8GKw{Q4GxUOmd9{?-B5KMka#2^+rFn|rx^?2wABat40jBSStBBB_TgF&ICB8;-I zXI2PE$x?@i8UXL{5yF9?Uko@bzuFt{b(BMNg6}$vF05Ito+R|N#)om3zeXZ{1U~SI zI+5nJl~u#}2<~%O#|Qa3bqGpeghJndq^*^l0)K+Xzt1|4*2UnPgJ|k;v5A4x18-w( zXg3E2oW?V0Dxiq^3m@427EgF6ynx^qqM*p%Vm%1D5j7b87IFCd_{IWd}`^|~-K+G)*nm+Cr-Y@6|dIViwWwqLvS&T{~3Tc+_dZbsC zmt(~=4q$?UWnhE8;-YfHqi0n#F*bVys=8kOkW%jBRr4Dm4!B8{JZ;0BaAu>j#6<)yz-*zYz0`q986#mQo&)|;Mn+GR!*_T)N^Q+G9( zkaptDBtjDHEuG{M8*!Y)v3cXSlyywvGuF-G!A%$XOJ;$l23A)$j(udM3#a==OgXF*(WiL z;nmPwSy@%A3>!)jqz3M-(!;5t+w%E7gRx+eSuIgX|I1s7qn-Mbdp}-}k_J5~e;dC* z=L3b?x%7k`s`453Dj%EEYzll{E};}|BVR`Nz`J`nPQlAbF!uo4lf=_Tv7i@oZcnJ~ z$ijBwiw*)@rELbk6rrf`t7n5csjsf&*Bdfg8#)^N4)xVPU!Jdv)wk{(BYG667e7x} znd-QkviHhcI2jhwH1XM`A|vHN3De}}CSV2^uYcx6O1t9Ss!X5`7tE+uH98~INVNnR zk$fRIvn@CvpS)L682pm@&=qktxBj}dy3_LYXiu<3Mb&oPcN4Po+{GI=1aZ~%%#Fdm z#zf_Kv?x~Ve~Onm0%n$*cJwDDqOl&xBI<0#^A%V_Z|!%}C^fMEZkC+FdYUllu4aa- znrAnjwr3c`i@$xkpaM%Y1Cf;w2e~`FK!}VH#WYyrkIZf@{r90jE;v?9RXWe85OUkq z&rwOp?)x0YGu5wDsVlh7_8^Ct7KX1dzLQgE!_<CZF^N;3kZ}pkZ7#Cu4Q5hCd_|745pMQl z1?f*ak*kh^yB>uLXNpx@uzZA^e#9iRzojW+f!s=2*qzD18?TFPY6;S))e5Ym$*%lvNb+@J7s|dpC`sNFrHiGJlP1)9CC*`*;v(i|+*t6ra^ znRvKJl8rkajK48%=&=ai4~i;fS-eI<=v;0$h8&_S=ws=+1_T!8N6yaoyTiS^*&#>> zN)8*^qzK+-Gel04@{XP|oFvSh#zI%7veS?<9z(LW)laW4JS-7|k{V|CVA86vGX8mj z0rx_*28a-11hsKPjBz0!p${e&MQs?QM$9{keAEK97lB4Sm_>6kUW8r!kG*6ud}u@p zAPhOeZX;aGe||lJ00>6~Lfk|AdiHP^g!IoZgpL32-5=RD;D7E45dD2FObgZ^)9ntT zuhDs-9*Zs9h6|vg`!;6kN|frnN32O6;9_f{6D0EKIWsCj)5wmr&YMWMF%o0EJw4wZ zMcqKjcYhWE!j_=Jr;{qe9j=RJMDZ74Z^AX@`s6xzWZwZ?JP_6r$(Lkr2EtEueo)X1 z;+eD&1B#sRB^&Nf?zly#(cUJbk@5}dScZj)=!75EQ7V+nG$)Hf46UTG(-_*+x#!@if0lD7EF>`=It<~;(nJ%UHge!_n&3y{Trn>riU>vUq6}E{X?u1Vzo{;^k9&KhEQV zG-k$8xN33by4%9mxh0VPtTUiZXussc^4rFPhGg7X#S*%@${*g=C+#pae6Z=7bNX%| z`8I(lXZecR++Q->{dvCUUTNmq6Z#170ipnbKFm_fn0BZ__sEKTAW|&I!pEd(hJoERnkgoF$w1$8pxJnE#&0G7_!3j% zd^+>|aa0o9VC(9<$_<@c`meP$*X5Cuo8^f_9#Ytb}2nQ1;IWKE<~usR+^KCWS5KjpQ9TEeC&ocjI(B zzS5brf@D5GH826E?Yf>rx;Q7QM%4(>4HAR$OeQ>bMUn*&i3>za0AgiF4o1wvAvoC& z|KPOPAZVigS4v?N02TTI9g#c<6LiG{VzF@!=-U9^@9f!NW`Otq2O#ta{1-q80E6Km z0DvH|Xc`DO`7h8B-#{P$sDM?na=c400LBMWcmV8a7~5uf;q!7KMCzGOpegbn0PdA4 z9qdJuX#dZ>4iyL1|L4YmibmG&!eVAx-h|8_d$$1cKI zgaP}XF4Xx{zW4X{`@0UwmDZ!fw&OS-D3g5v0%*t$nzzm^LUmb&+>$}2foKS1yYa&x za;V9mBia|%v>KUAaiT~~DX9Nah6IaxygC$L4K9gIcPX>SfFlm}rJ#&>gkThq{WggR zxZu=D?*wZIdH?0}1DgO>1{Gt$t|9ttzHRxhxyY_)|0M&^z=h@HaAxzFiyj-mw*P?s z7dPei1K&lUx_tvLYUdW;-KaP2Jn#ft+82>k6eeIcov+_@9coHP-hX+uI^*Q80(s>+E9{`T={Q&8r z=|NQgp`J)vME1{ElQ+($J3)1f`{iQXF$Ve7i1PF<`bKkWa5 zldN@&^EVmySII!|6Cq|Z9RyqjRW>l3=zlne*hU)Bg_+HV3KNq6W{m%bPz+`<90c%n zC*T#*|53t}LhT8_mAh7cU>4#2SH%Fw_MKQs_}o z5D;`@c@m^Cxx6?R;3v;d8`y{mXktMzI9!J^f$Zg`3LxnE&whWSj$~Ou!nX*F{cGEO z-;F=D-Zv|qsFZ(*e82Z=)_iaoU`_Cn!m!NrF01B`ho*hz%MqfMe_zwc`X%X&u0r&U3A0&gM{6N)Cxi^X^x{(o z%mWps4zWHThZ?q(E}7h}ew)7K1jK28%8oEyg&_@MSkWmn%F&csB9m@PtEF4HuuOwh zg4&k_?IDNr4=a)iU#DTosHtlDuJz>F&lc!@diJmnU>iSDtdYN}#9Me`k8aS4S((@H zC}7NCT(4Ibspz_N&^s&SaJR!gOEJ?>*FU1ies(9}SG$Jj&BYg7|9auQuq|!`*lL3{ z&H-ZigU7agV&%JGdXo5{C`W};>%!~CzP{Ebo9ka?1LC@5mF$8A#8$5{Kk9m)c{XB@ z_KaLkFzTLRX8l?v+Mi}rH_6Je@Xx&8OcwG*30y6v79v5&^BZs=2MX|222tCPN=H4< z=%_@T^Ap`ZLZY1hrA9R6+Z9}-Ae5fD-P~$Sn>+2%?;5^O5LhU`GRu!Ag^;9uMjVQe z-j6OumAkLa7iuW=oA>-Yf6hO3&TV&`+}KB@?H=;xO6aNIj;%UM+%+O#OCK)mW}x|z zSFKU)?tr_puM@^9^gdhwHDo-l@_F0t?-Qj)dAjk`XlK5mhFQ|_deb=-pZleB!iyH! zRo7h9<4YdyRc{{w<{n)uRE zcb1+uVKn*WAI|snmz4(VClTv6i6!A0jm?#LSQ7S0IMNjfYcIWZGi363Q6i>s88>%Q zK(mJT(k7#h^EZ2XW3-w_V(=KDbve)lz6J#wnl>QR(NYMI7MDgw)h)jBTRMr+evSLY z6;S7R$!E?P#5jbS8cyapc9T+-$-Vw8yQS3NK9`;H60SrhLw1O^FKoV@)yyCiQ`3UfSc(h=g8&OtESeWr;Zh4V)In|dsDbPe zV{u7&U%B1Br#b0peQd~*y}{&^NvC;SNoW};F|ldK)bf&&?R(;Pnu99xzMeE+FOq6y zXINY6vs9c0s_It?$Ccss6SdC{sH5zk_N1voNl&Wxjkq?v2KtFqxsb(Y$lip0wes}G zb{;n0KYD7zkvJn1PHg7=>nUZ0sr^y1ul+>fm$4^}NTLi>Mj@Ne!xIU+e*f~U_Em@b z>1iJPu+sDJdZf^uh&dkS-Yh*Cjjl1yy9QOK|HvHnw);2C)QsrL6Lc{YV`6Lw;;}b= zn%DY1n3^R?Gs(r4@ZGkydq;6y7!^~I2*UaZ=THUO2S@~Jh>!t7O`;ehq1td$c1#h1 zPvG+Ruk^aGg+&hB0t<$tKaD+_km8*R(3@~o*nB!Eq@GufBP|$BhkP5$9mBA5xKJiH zpQ?C=W)=!vmDMa-s+3CRdtBA}{E63zsDWbHvya5wF#?rn)=wL)(FUE&Wd~?na72%X zrudva^_0V|1)Fjvzi^cHRfW?Sq&Wx>YDz>!s!bJ?m+_ZcEHCR#R>?9`7b@tF+zw&l z287U>rKW#IHK=X%Ud z)gs5@igao6L9d72u_)~~AH8_q-4< z&9Vko@;pM97L3>~!HS^z8iau?vgjzfcd2qTei4G3D;0R!jqQIkdJ=jUraq!2bNh88czO*zKo<$)6E-A6r%QccY)^ff8_AFu?hKj=<0Sl|I z<)+MERRIpe-}`ntan4L*ERu$3&EK*{i72}Toefaek_=dtG_X&%q$Vwz9<0Wo%Z7jA zZS=3#Hl%x_gqIcavYZio87%T!{}XMwmb$R4h+*Cn+tX-CTYc=XoySoE8h`2r;Pt51 z7yA_EB9FWHhSy^ZgXD)ae`+tZwwAGDJ9wD7eRuE5)IHn8kzAWXW(_!Fn*U~#MBkyT z;JR(|)>%7Wy()u$qd~ZqGaA{-O~vzuHiDP|j{D>Kb*>T@Pj)I!Jy@56OOH(~kcKT< zgZabb?`7j_Gk0aE=jvI^5s6v9r{=ZB!m2Ly{N?c)DNb1AGleD9NnHP|^CPxVXPb4D z+nZeOos`UD0T-d__A?~+dhYUP#0_vTyq;3xgHK!;C{EH8*-zYeKS>#^BDRMMW8EfO5tCF)W4 z+Af<`ajR04N{98?T&4(1glZpqJXf@vtqI0Mg%pmAyd+?`x`8emOJfx-F=gN zAY>as!j9|EKIZuo(T3*8y_1NE}d;=>Y7YoZ|Z>l;RyG4XC>(&%3J~TV(hu z=u{uah!to$x*Z0);Sj+N8?yT~v&Kh5eS494eU6`kVQ=J6nj+VW)Q$(9zTjd5;3!Ft zk*GL6A?ggd?|Qyu*mNU8EF8q#rn%Zmwk){jKIMSNm|b_?M{3%+;tfCSlu=9eM1897 zs*Q;UKlz-{`Z;T^qUBf9t%`LVQP!x+E+z>t0{%k?SOhd-0sZ0Y)7yQ%DJH;sH_w+h z7fyPxbVnN$Q!$b~19MKoeKPEiIR(`CI&c3r$#@s>06H2Mvb^~28=wEg*+v{1$_>&fnqVq^Cs{^|5DeREL`x%3WtnYfTb2Va0GZI^XZZQ@-&9kjpg># z%QVFm7_J4TzhGef+X#XRG@#*N1Mnj)TadVx7HEx#Ql|Q03ee_DI0vc@jgi5C zrt(AE=TYjzgiQwVr>O}bb<^GgObIi=hTN&oDY)$Z^T4;l%`CUarRnjDd~6%gNFDfT z>q5I53xFVSvHKV?^*GW2IG5HwYJr=Ew4yRwsy={Gq4dZIjAHsJXVyz~yW}xqG<-;5 zG-*JCj*p5F@V~rl;?^9(e~{=9e}oDXzt`1!grHrK9x%=}nr^Z+P37iRF;wan0q*;> zqR5{_WBwO0bYL3oL2A?YRwX^TeQMs)+ZqC9+Xer2?=eI5 zh@7}jFLLXYqW#~u&ST5x`{gk(f@onon%*ke_9GqdVM_-NJ{6~NE&c-1DA<7P!$f8? zMeu-t>R90sFuO~{g901lRa#Mp^rCx_KPjdFP+vWIAi8!uD;^1s0Z@}_yW1w{5upurFKQiN+isYsX^5ckIyRH@xp2q$wsN6e1H5v%X6Hb!f& zcz4cRwPh^zGLc*p=%G2<{Is3uSu>i{CylsVpMEbQ%dHAf5LiUi}vOANigPQo4i@ zP;daeo6dTFfQsXhlvqWN`ueBTxJAw1R2kU+4<<#+z8(dD8*?WPwT{b48nn9$*HzqB z+sLKYjFS5wyq0g(kE3)glfINC#c`ZqLcHt&{q-cNr<=6BYV7~_VyfUjFX|-uYo|?V zyMY#G=GfeFnq| z4GQ!BJM3*R;mU>UNNiEfE~>f2=rE606u&pfl~bA&KIBX15=LQ?H-+FM%)^+tqj=Lh zE;=RUkw6nS-Pgz1aAA*FWR;GnCpS10Iw$~=5Iz`SMRwgczF4}LxsE2d>2JIi1CG`} zymQAP=n$?T70pUO+}6MvFtjS}5weZKUn4*qkI9RWKQ_?zdwT%+XnT3GM6-F{!1j9a zEbD~O1sS-(DpXr#n0lO?aos>j(>q83RsAWXuM*LT3^UkB*!{eMP`2MTj|zo>JG>cQ z(9Nsifa*q%vp;)rs!Q=qI}*T!0l3LSGrcG4g_ss5Hk{n23|S@MB`rFE$1(>FbXbvt z+F(Ic>LwhM+yzx8+7oXUeAFAH+O%eFfgaq}>SfWi!s(^wj1EZErIb=U`Q{Oo(zDo@ zYR9NE4H9>mrQX#@eHJxtef0tusYki?cflgzUB(V4#)WN~(D|6yT$kUB`F}lKS=duH zc3DJ%(&-HSh|Sn)CPszpl@0(6mCxBrpVP&s?zxP$@Tf+&(7hfm*pdmXf=}}Q$F5Si00oYWd165xOw$toUE5@XJ*IL zGf2|^j~-EKCu>VfMpB-!AWKJ?z#%ILy5R5cAF009zpM)4vTLJ2XQPB(He9Y>k9`VX z>2#Ul&LOSam+frMEbEL!Y^YD#763kO60$|ryBSxkUwjPr#@OpD3ictEr^6wm!ZG!J zkMZq+iGC<;9?m^cmoGQ5*dY&4O;HwSe`>}%EM~&;IyWWNOeUTmoB+HFOq2G2aUNe( zJuBPbd0_?8tW?dX7UigZL*@>mzVNpqhpCk)&P!L9YXbR}&k6}dr#PW=!6g8vjI1Smx~vdL4U(KQm%*K?0r3^aN_zk3vhF!6B-EtdpV0# zFq$v{s^$5Bh7|dr4g=0h5Vf;fGAS&5tZF+M6*`0ceVW6jf9F-V-+Tp>$NR4(TOL_L z(y+YLaL?nxi3Ol^gfLJ=Qj#YsMklK^Ev#D$D4~nBVk3BYN#(H|!v^(1jxX2CB~LJ? zsME7i)Ae2XJ(pgshp5 zT5XKGJJ6@LJxBt*&8SEt<f2^}64J%d(faBtJpoLJ zD_+b{h;Hh=5G}NMhJ^M%j3f0V9QH}NZW}}^MFn`cl#hrVU<*yu`uIc1U4XDwiM}cp zMTtgg1v@d2X4Lp0u8Q~F#tMWAyBG}my>cN96SRMfhMleR&nv2H*u9Ry( zD1h=#OxV4tnc3|LbLM|61|tx3VAOG+K6D&7A8MXzBXm8XM-I4_F(Ff-2M>2r_Z2X` zM6bbxSQ`OptccDf56|A2Py91XB8(@LEfK=u(FuluU=_fg%Oz3LtXO1HEd78mw3PtR zc$qD#M|cXxQ8@MX0RH;LjIraKGL-%1ag%e(ZAmx)f3A!!sdi^S63%2Mu}vOYfn;bw z3Jo*=fCs>dguE*!Li^r_yvqh>KrS_=LjNq@R`|zxMyNhuXnB)sB134qTWLrR;4uK% zeZqni(1LOQBH-xv!9m2?_#dk04}Kx(C2-BTQffT0?5tEirM!6#042&Y$1jczJj0pB zrp-$A@qrU0XCnajbqFC)!H0T;whQRMsRk^I~E zZhX1xC6olBQ2u_~am-aT@Zlu`S4yk_*H2d8amaa} zyTW+i#{w_G-BtxC13WNSDQM?NqEnJ(E!|%6;I|~*c2xa_U*1xMcV2o1x5N2BQ<=Q- zcFz#72SC~8b95N#>z}a!xWaAwMPCrkoZo=%4%)XUl2Ij`1Z~IYIjSp>B9>?-gnaAi!JSs zm&*evIVE?+Xzhj#SU?j2H0)|E7Y7dU0;s)EAkl=;#vB^cxmd%XI94Sz4G*WYOzB!XxkzAB?5t)#6AniIO_2yUg(r7WyIJJ2m!8^FKNz+1sQTc za;jMP_>eC2ACd4vdlzssqodsg!iVU+yq*?3a|%ucvi;H~$9ol}ijvhNJ~GTb@hHi4 z#36)*o9l4^@un+y{~u#t85T#=w7ZMDyF0<%Ex1E~;I_ftg6l#OAh^2)4el0f1Hs*c zI|R4jb~bsQ_x-->oImFe%=GkBRaf6#)ze#D{Rwc?fV!T!UBLKH;{c##`0O_S0Oi}# z+VXlKe5JSx?(5HdGn#<(cbQ|NHR+L3Fy7x&K0!C3TGG{?H=Vt$?Yja(`}YNJ=jnlt zdkgsL-FK*xVBu>Dw;!@*_+RgAe##*c5zf@YY!K$sMTQXTPTwwjA2VfJ9AM)wGBDKd zRFi?V1D0LrI_Z(tAx`9whx4F_i*qzqP#PMVMH(O_$$yPni$x=$@J(xa%-F62I(#5{F0-LPWs-&?)&kUSQWK zWp_iKJ(^yxEp^rM0+8$VnwX`vVufm`vy`SgEn_-rZ-^RJZ#NUx{<{hmB+?W3=zZz> zXn$#Qvv-rwC(@D^>{rv#0eg0KmEOQff=Ylp@lO?RV zbBb~?N-o|??-5vhG1kLTK_wdsMjC+kl<&5I{?BKRkJ~5D^F3GQgV-PbK{zUyo_@^> z$?-!id)3wa_k|;$>4EXy(lBI@9UBFnTj(#WAeDW(Y4r*Rl?aE>w){N+gAzlOjC7&5 zstkNND!`ik*KuS=i8=%(bP)176d9yA=%pD$q+RauF##+rn0e(QoV~G_CJoEkNHA`I zId(F4r+M~;cOO8*6*6Qk0m=$yx`3m<^r$h{ zBMWNlIy%UM8Um02f5ZboT2O_RD@P)BkeSFEg7{M-XVpu3;5a*oOfJ3>tG)kL12j^M zmCK%#P6$NL+_L!<8(9O^iNy8J{*uXyYeqd)@M`)n4x!&R8sopm=KvJskxUS;2lIcRiYC^ zaQK*Nc8R(HF^ljn%NP`V#N2uih}h57PC$!|S;DD^j9pP9mU@H3ivxS=K_d2t6I^>V zJE93zw8%#~j(e5I%1sZW5_h?-a#nkdnY$Qa=OD|^kwXfO1XORFaNhymp9V>2MoBjP zV+46Gkrw|G3vm1wE{a!o?Qg*fIr4`Pl2qlZ!EnhNN(fO@Kz`zIw^u~!pmdDwQp>2U zep+a2-2RV|7TrZ;1Y}$;K&~93Jr%yTxWcw`Qj7 zuXpSzuZbYvP2nKxAqXemudk6HAGaUodLxM-TZD+De)2=BUE`E2l&4xo6AA(1gi3hG zJjVnO?vRH7n3~sQRXHAJqpU2_E1~T?QVKfRbO7MTFzTiYSb9TnxStxb$KHJ!SyhG@Pk9Jsnq}HSxDhdF-6CT9z z?Gl4I3x)D<#oU?hwOG>A-(P1QPYsqIPk(vM&l@-{%^KL~F+QhGfkVozvKx4ks)UJ& z3l^Jh`JOZdRwG#mpBhx z?%v17gW9a;#`YHf1;ct7>Nn=QKy#oo%aHfW_kyR?LinhxF`tgN*p#LZ1hhJp+ET|~ z_yIECM&- zy$eJgcT!0A4?-H}Z~%eg$8*hgDodp&oM0Ep#JpD}j@W`Wmcl77&B!kdNeE-J?RqvJ zF*|h}{Kw#HR1yTQ`=~KM{jOHyKed$;x*1&y(rerFT< zjLBIlR`%l7^Zs?Jxu1#4@vY@);eL8W&z#m#9 zTH0GUa|6n-^A#pCIeX{O94}KL9*@S*yrBBF19D+htkCEeIDpYJS85{EVS1-B289oR z8ZcQ|FSKc<8rA=3Pw{bV!Ot6^45m>9fIB=plcVR4Z2D6-aEtw51W{K(wxm?lB5l{ zzGg$Ha$DtnSJlX!{ZS7?*NV2IZWf?XZZP7{$lYNzslclkJ{`is1VC)R8zE2za{l8< z#b9bwGIgIE1@z1G&&D-_KkW@D=?O{0o~3f4v7xoV+i~^#3pr=x!F$ksvCZJ7A#jpn z^@`q-;etQ0C)vq1%~kG9Oa-#OWl!WIzY2RF%;(MsgG%HA3kw7Yyl`=}47y1WVNak7Q#rc>Tg$p%X4U7!tP@g`u=P6(z7YqVC;-wv`=C1_>p{SzL`Z;^yI7$OXBN2B79Dk6NSU7a zy%H?Z4J%<2`>w5t9+|-#c{7zXLel;kN17{T%2_&MOXtNZ+Ll+cva7arSid2^^aj*_cWR)h1Y&1@-?`_AEXVF^rJV=4-TYq0$oW-w@$|Z zb!7=Sn#{%0;X7FwIV9X3l^JUD$?p_&W04UiikrE2oj|@xIQ27sh${BwdbR9Swmhl zU4W%Eh+hB6j&A-3`RrFKi%WgIkVv=sehjVG!$w+dz_aBfSkB?9`H5HG;nDx%Wyw{t zV0(JgyFtv^j+Iys|E5Agb#oP%c4A^#&v3~~IA+BKseL@71TMLdH3Wr08HCKP}=Q@#{vG^$HT38(}|r? zuS?ix8teg)k&-t^ka#+9{aG;#M6Uv$!AMhv@6#8@egYatx_w+&bK}Hv$ws7H{AfFu zRG;s6cKRew3mIRU`xKJobbfH^C_YAISfSn@N>9G74kdcMEk?qr$`JO+q5JKOk#A5O5rwGc~eK*1S9yNYaDKVE4A;_Q!C(UV_l zrHY9TGcu;}=bg5rZ?7Gzvb%LvCzaIJ@2vwJs-SCUBtY+`IM}l!*!5faB6>jXu@&Xx z?{UP>lKzaqnu`lKqlrKBq*+%rPEp zxh?(5tpEuNT$G79z(a?6vzz4HgZ6Uf_%l<}t7HSbhJ-{BtEc=Kd@8GkBSk|ph}O0L z=8~B*An@jI+P}zceuDnYP{@6punIW|1D%L8`bP#4MTSu5x*AnY*kC9mpXi!fXfNL$ zL;P1u82tX6z=tuIecYd>5K6Po5rY(C%R$ZLx5o!jCApZ=X^PsVg-$Pz&!;b|ejX2D z&#@BE!jI80;I;OTNJK4&{SV%BHplsm<`cMzKYN3BAcHA<)k`O3I+*LPy$P+N#fpX& z)9g<&SEdouWqfp}a;qszii?tA`PCkZ1o>^KiyRmfcg)3DvQ%CDJv%|7yfx?Q0B9_Z zi#diHHQ?p)GN{YHh&0mwe0p|PS3Sqy&EsLSH&OS6`vtm7uWCM*&86cRyb4Qj@hZkE z8K(i7+CTVrOQVMmXu+k-L0{_?wcW)nz<~<1P$^ek7hQ%u91yo~yuv^SeyCLM^E9q+9RrW-i}ZW5N_EOe1sxdBo=A>W z^t&X!udZkcj$&T^CR~lntzPs9FROJXJz{(5ER-qSs&aUilyU{6!vbD29H5l$H`Abp zyPguGFy`BRfV%+4h4)JVuzK;kUht~x&=0-zd;+`Q= zZ|3y{Dv-siWD-v$N_k+2Rwox9*iwN5Tn^gHT%M| zd5~ykKRszlD|EgQiYy2Wj#dxE@Mu7>9BfA39FYQuUtAW?hJB)0-^*R|zj0h!x*y)_ z5`EaD?6L(&_}!7YK|cWmSNpd@=4|;y#6|e)uV5e-F~Q`h+A4Ki6TvRBfU$ZRz>Sed zlhqlsNTo;^Z^dUS`zGW--{V8GcaL74Bf4krm|%8VH_IG3cVXr9gSh<+D6UW)s#x{H zhCka(^M8>~f(I;z^8&(lbsR=kIpEfvog@ODGm%4s{I51kavy}()t$S5$Rz&)l6+@d z0|@y9PQEk1iV@dmifJG-!2~-e0GHp-E@V(IyGspi?Eml{G^|4w%6^(GQ+!#HADd}O zVlHQv`v?hBI+tU7HAE8{o|$)lqA*y7E%n18!e&e;E2R)9ID_hB4)+26J!Auw2DDB= zc4{7%M_Oe^A%J-W?fib=lE$rJB3$q25YS(j0aow*EYL;SA3(8bmblk@J0CZ_;7rZG z5Oz4_iEhna>zVjinO7J^W7fsAN-%ys3}c)Z0B{-?B1=bFU-($sb2Az8{R4pLzl>NG zjTb6}=6Zm`ra@*k+_%v%`M17+tS=S-d%wi#E;A&7?73`d^y+vQ*)9D+PRA+I%H15= zQzS};ISl1emn2irdj*=?-0D6uN8C0h#v|lld;A>j0B!%FsBh(N?k-LJzNy)ov3Iz~<;O^)T*7S7Rc=&VI41E^v z?#DU&Y6`-27g(Y}Etv%9?2%zue+FES`%`^&r2jkZrUtC{(*64A>W~T~I41ySSeTNF4?y@}KYsy~yMCAJ*OEC7DxnZq!2HV|}HIDo$^@*=XW-#pt6#oUB&H(vmX zo<{uvGV`#$>wqtSHNyg++ls+J{{BsFrh5iIiN0rHU7R?s=7b^d^Yrh5MoW=sJrRbh zuidW6n>9?X*QdW!g~XpZ9VFG^UHp!Wss2o$qyUiA#Jp^kOyoSdN6U!$b}ntYAv(hG zG>!Un#CMD*=WRaHLzpCx^rp_wLy4IqVshI7)yk&gRG7YztI%;e&lGkn7S|f2wPjd!v_q@8J(`7 z7#kgd_7hP_ecv=q(OR(nyVw)6#?#;5<2VpWlpHBCUaLtdXV-LMg6qGsYk1 zxHn+jGq2;Kp>Za97{f#Dl69XCvzQ-~Ba^7Xy(N$XfSMlAfGf0Wg=%DYsxB4gS*Vaj zp=xSiQG z;qFq|&QsTCTSO@Dn|@T}xXw9RzKUztgKI~$oB?+i)YF^Y`w7(n{-df8 zoCZs;McuL)?OwOtw)AK(Np>RK9U^6oVA*JyAdX_AgHmM*P%k>lh37}8Gcv400zYg& zR+Oh)>yRT=pSkCj;Pi>t(8_8ceM(Auwe>-grHAPK1>nf&K$Rw90$$BzP>`(bAaI*W zM|ef*KXAjOmQ;5f^($~-z?Kj6aBD$DvCSYO#>S)loauQ=YgeQhhUICF-;uSDXCnqk z83we>1FVc2e>ahduV-scrF_9mJI>Zonag^A8IpbIl2}IT(ntfazFR4+|Gp2P;ICN^ zfBy;jFh`F&91Ikw!2+B~D@J%kymUR+UGhxk@B@@7=83p=e?j_XdTGRVwZB3xljtt5GnCR`;Rf; zkW^O`Xj^lZR@^FHXl9gAxO2K>r93q`MJn1J8+yZlmsW)Vxsn3t%xGz;9I*eYEOzc5 zveok@GJW;!jDWH>EmGK!9Bi!rk%_4YevqFW@*)2FegX2ouA#FXPONcw>`{6NVjTAc z^mM=qS8VB(x$VQom2wWC2*s|zf@HD5vSycxP0kbqCF)N0<2b-Ax|n;%}>X$wSefaPT0JdIT2 z8S((6d92kMAwh%%T=I*(MW#{mf0@40o~MJ-&8GSN%vHmT`T(T!pzllAjhO)d>xWHE zLUQILQ?y6OaM9>`Y?NcwEceAd_Y~7qW%cYKrI>F4Z{Hk71`0O7_oFv?aQW^ zTjx=z<)f4;Qst$KZnHg?N<4ziOJI`$69+-a$Z;pVLtnCyYY2XgeA+klZS#f zudv6g>lFk*@WDdg1C#ie=mT~uRn03+l=w7(jXK*!n%W89WI&U!QS*m6PI8n_Q+{q& zIcIS%l>eG?vK#y7r@i~}t^l9Q$A!#lbNXLVTE?n$=^XA4xTmAC#L;Snbk&pu5N+}3 zw28RTB(p))FG&+$1-?ijef}1gHHk?EnJ%e24CwxdIwYeJ;C^#m_0Tqx{9Coz+DYJF zsJl#6-(^;ttUSAVTs|FRN_54f=8+mJ$gFnx)n}iIgs7F3M+*790Vp5E50_u4!{`)s z{F0gF>!;)g_b(XZ_qFs*FIIe$`#LhrQ_OOW@_a-HpaDTfUlv3%bDJ7@qjGih!DXKD zG25-n%E*+`;Zzf6s(A;9UmNf~GeFE3u!m8YGL>wLSnEK+`SA%W9d#CT$}6%;{PiS* zkFXlL`tnZ_R$_a@ibrta0Z8!o?NnKU;<34xi$W6FsXv3U0Y9gg{CV05qif5(5NJm2 zy>pm*C2~xpZ}Dma>2e!r9=E%{W=&P9zdURY-BuoU?O z+GKl$OF}37e_A@cWMuBK_mfa6hViCpaN8-Cvin=5`TJUf()8BL`fGLZ9lN6svpZub z?n_Uve%`bQAdcBp>anKs@aYt?3cN@Nii?|Yi(%ci>y+!aDzTZ45hC{IdOkP%=dN6r zTVTLI0tnvarjayo&+l)4X{?E3$^8O{z6?Qv{tgaHfo^{|>^|0V+qI%tmvnS4t=H`1 zK*3RKXv|PnY-_;F{FtcTn`Le!O4w5yAQ4fg->9N&CZg3kG1Qs7pWdaDp6~To_nf6W zv}1<=IB8axG}7bbnLpeN?Up06&i*JslxeOs1JQ*OS;@>wapj|v*9S@4hMABR)&+QPS4iftJx>JOjN z9K~}b#@~*}(7a0yVw!sm#{*hNkOP2{^8E0bE|g;+B)<`PKv5G=o=^gFR7?O)E66Xj zC;;T94KDim&%K-tRM#3I2=;N%oqF^(|Gxg6gbtnu8nyYC)}I?d9~v}fVM7*~q=#81 z4sQe+l={EkqlHFF_eA3G>U}7D4flT$fpQP|_Za|^288{ifP%lwz2G7Hla0rSN5KTk zpvaDO7H$GGHHTwpKb>rx<^PozYo8F@xRd^+lpeU=h-EyY_6FdLeQa|p1o+=|aKT*; z*xX$VfOewdAbA2IF!Dl%95Ts2@>WP{QcW=eUOJT&UJ3qyX2ztpu<}qE0SB3iY?F!# zzX`WSoW~kJiSPTq<`fxk?KL+*`^-WZd%I;FyL_0*VcOVj@`h3gW>c30I_{vv0P7U1 z={$Jfq`w0())8qhx6QpjJYjm9(cCIIoIPC~CeDlg3V~)=({t^;pYL$;UhsW^SzDqk zMb0uxgm_`Xq*~5KLT*^<4`#nxcCTs2He&{-#N*ELsHwgdm-?4Ojx~fDWBsOnvZrw( z;g#eIwerLuZ={6*Yc*QyySiBw(cerHb{@&E>!kA*^8E-Cbdkse42^0I3+=;3S8C%T z3s;CcX#!YHH!~xW_nS)8fY$s!niOxUvc--PSX)KBgpFC)2R-ZTn~ZF2pyyQP(TW*Q z2OX`(8bT&tVgwVa$&62~h6I)Ta0J~LrJ7{^r`>H_QoCWjFW|LPwrU3~WKi9C#B#2} zQ&x6id#j?5snW`QR&n6emyI^HMuJi&DL2b9b9E-ac)L@%vYs~w5l+RzsingrAO9i+ zx-^{|5>sifm52ke@DOx__2%H0#zOkRhoCe8LQjAJG|RiWJDyJ8 z`;_4}O<Dy(E8ZaFVP_>nKjl@SA@+{0fH$20ot z(*DbKxu58+g%x3b_MLbOMXbY_D--wS?K#*q{%m<8vlYKifLo%5L}&U9PaV&V705|` zG<@dF;zyBQJbnVG2Yz6iNOSV}pQQSrc$akqU)Dt2>t0=7@Lmq5tBdJKq4 zwMfFO+8AuL<*tV&E`lY47+#{#u<*ElT%yV(b)OBI;hl+!D%ywRFSC-d^XA^11L@jp zdHPQuN+w-%OMOdaRrgiTwK`mI~4PU+no-GQ2vxG~~CObz+2( z{_jYpf;niNu<2G-F%`Xu68!3F~lNb|3g{kk!S(!RMz}xn70VlRBo9@Np2f?Z3XFH1l^EB z2E8(HA*{RekC9}(Im7zBSs9^a*cViFrFI|fy?Hlh^cJW_iO*+1 z#qJnSi3&Ut?PFtom^q4Jgr)^<@2wf1+gbknJmotXf=O?VMp`D2H0kk{ud|zqv97FUUcC$ zpR9)df}VI|$AbdX_?}8Gt`BQrjQ@!RU|As53;6Pfi=``7Lkm>5cF+clG3U1X{+=}c zFB-YHN#^EK!@h>PuMdYCLW@N=+DQLnx-wzT?#=4q#OfQHo>r?Bcm-8xWx7q+FWO&A z4=UK$Sy2h*Y5>u$YN8g9U#$){LLd>;KhyS>aBtL|8 zKk`Mk$=Az+um4dN2o47LU#A1TydnL^*wZz1kU+i2A43YQ0_bD{*bnnaYbjfZDLspE zQvUBp*npsgO+tb35xasf$9ry;?G*Ee=ijiat@8|C&*C9wm->OK_jmM`fKfD|~ z#!og0*6LmiTy)9J1^$>{Qbq6YcIx4GO684m5OaDX?eOkLbSS6n{NWYEH2A|4L~@-a zAiHwlU?D}PfL`%aU8uLD5(@f*B||^|*bsmIhlSAcToqI28VNTEIX{fFJRYV{UgbnH z33Efsh0xG9Dde9KZ9=0zyk57b0{K_Y_j1)dFcF|X5R366h@gdPVE({V3YydrR>#)m zKQKjS##xu2;HSr=vuE(iax4ya`tE5xki39!S}e-%iAg4(U72*}uoGGo=Qz^%BL;ny zq5RTaB$0j$ue6tB|0tEQ_{o8Dv10hsn<~N|OpBnw^~x<8CSx@g*AcS*=|Bb@f16{l zXZhp+zfIN|CHBIY!&!0YrO_4#beGW|^-qPb5c1%WP4Uxkcd{u*7bQr=JMF_6irInXJ?N zCJy+KPO$gp^umj5Fjjq5;R_84qR{tXGwZmDSug19Ck$Fqnq)b zt}`M)J^P#S(RA~jO)|nJjs*?S?D6H|#zn@BK}A|HD~^5YR)715Qj;`VrdmAR&6d*b zQGok{fLoRweyO`wmQuuR>Y=rTo}GXs-A1B;US+AeW_}t|uEz`WO0N5_l1yjQ?J|=7 z@Vlb9q+i<~x@wRDUR3FBo>HAppU7SjCi7fnO1!h(NVla&d?imG%13g}A~eQP{dhi| zoIu_9u3h=>Ry**FcQ%#Uc?S zYF|&52z%5GdzQ$@w>xO)a*pA(}q3t$(?;Pe&~MySjF=u_b` z&5y$)Bwvj&Yy2_rv2YUNpYoILOBalV9bc=afRM5oF3^m;ufH^Tzql*SptURJTdeA? z>@FvJuUzrNoJ#y)*2=+WZlz9%4Z28-ATAxs^b@sfth-rtt?m8gL9E#R1}U-pG$KEh zuz{-r?{^RY0M;ZQMs2F&$V$9q4Q@BftL3Bj4E9>wAzYxkQ)~6u`B|h(Uw=sP1ol*e zjR((_1=i$p)!i?3j(g)|p7B_UKxee?9hYO-W5NvxHZ}#59(^5Z9@Hlu!pxX%`)aRW z{I3J8XHG2+{M{rb3*;pePMazV1$>5-b0BA%2A}>Eq(BC@1b@>8oOp0qj#wFsPs?fs6-0!YNB@*u0)TE2L%dW=O z8^SG$U({l&*!n^-xWe`3ZP|D3@lOWvJy!`38gu7e{V2CmijbEna8I z+d*_#nyF8-iurer6P2X>bA-vG<=($j^zO|1S>(}ds$wC;W671!9d41y!DLDCIUT1D zqo7amTz{nSmrZ@4O#-LVH4_7~-yDt<{SHQCI-b}iY!L(;jELZb>b@VOfL}vTc|{m? zJp3ktye0?qtMxk1+Ha2gS{=<~sy^&(#}xYa?7f+~u+9Kkt{5cT4$O=kJAA1hjW-JD zg04C`nX?BO%M~UJ2X60^mJE!Ik(CpevMBMGE3$v;ms+J!N2Vv~Y=DX;3^a$xaCqpW zvIz%;(G&Ca(Tcu8rMt!He&ti&@r`+}B?XI}sh-tJ^Y;j53Jfd)MGmk{*K?lyXB9VS zZ?V(lC%EGoLh*Z00&6{R_(ybr2=-8N@MR?m79X z%>iF(xl@ctToX2MbZ*CrFOl7$t`bMtfMp6yDuO_l%(hrFyYMbMqcfu}O>x0n{>na3 zk?Fm{ND^n0A@w1zN`w_xQhxqu!lp7(K_NSNj6%al7sYf-uEc!nz#2Rv&a-v2pXQ6R zg+RXwYg0DwB*p%CfoorOVgdZ_YqwL{%bcyadK_%m35m9*hxfv#p1to5;(={aLe|E0 zmfWth_g~jwQ!>AN*hF^JF-@H0w1n8Vy4`!|;Yhx*t37VN|8B=az{B)0wVza38JUjj zt(%5If(HqMY(0^Z#$>4CBvf%i%y{o9;XnW1ipvAuSfP7wpma+92%joL`l>JQ%1DOuqW z&_oenO*&z&kBV=xuJ~FvxO2bTY2ud?9j!6_#77Vlj5o z{YRxCa;TCL@m+ujXn8PJux$c0h{k@e0}oiq*k0pbf|)_*u2ngrfnqPXpqOKy>qZPB zZAjG$dDJSg+L#33SrE-zAX!OPEBV~&*Bym9$GlWxL!?=vXUnkOZWQq!T-fk5<(m8> zM_s$u-{v#O%KvS?Mz^g0`IdLYzf$<_A}R+1VKK+RZ4O4SBgzucZr37vuNhWh8Au!2 zbbmwY{gktuAy)&ht6S(-H!Rhf4@WD(ty><7X@?6roS-l+O?E>>RT9W}{@^~wtvNpk zSZ1%YcAxp>@(0s+&L?^yXNRwLg&mqJ)}hd0!d#SWQCioK^!m@B8eE*^WXkAudHOS^ zY!com&GGnGJ7E7m&}gCLXye4h6)c%9rzcCMQ0_+`#Pg$x zq_H5ehVJec3H2biazNXAipcmR3;y^40|$Mk?{soX4Nx5RT0;jyL3F}q;zy0TL_@IO`iQP`ml=-RBP@qke&w z-G7WO8UW$|G>5&gB?e=;fmQ;2}}Z)K>l z|62&X{f|-pPaW?#gk^TxFMlofdLWu$JuECjAS*@zfuUk#Vx$T$7JojQYdvK^zuTnE z@sZ-|qrfD{0@TL7SZ(+ER?>|-bQgFC*%vs6zsj&{fXVlg`HgUu6j(q~;gFvee{(Up zx4CN{5uQJJ!H>xzrk<_4_P0TgVXs6#0AVHY_GGy$9fVtx71Vf<2$gq!j5;%MuM>zU z%)^GPIaF6w)eXytm60VdHhJ?`qO~yGHfdQv(EB*%Oje^;ziRSYThH2=JovD;Y9&78 z-6frEf8cuuVLbB53*w*?Gqu(!g3iCdy%X;#M)eC*_}Asin#S2l8tV&%zkliVZqiw` zcBt1F>xM=|L7YCRluPoNoh-9Re#8 zzZ;;ph43~CoXRzO?u^!9@eh8pZih>ZDO(lFFD>xv4o@ zLI?j$BMy-QKcY2uM1qN*_~bk%d6XH}Ndy0z_GlT(t^V<&llxAKE32~M8W=V6`RTDq z@buc5%@QKgJ6ba(3;$RgfkFi6+%t zK5K0k!%X!WKD*8{UUkg7b;A#|On*plw0&?dvZz0Fk!0^tZo*mG+4awCsOzY%M>Qol zE0Gr^;3T$}ErCmA_tYPbzn(aW_e6$|B7+s6QZZNw#`CM}K(MM6Z>>5Ysr7*w^JuW< z-%z1|dz?()S(c+&kGE9`C`w0@B&-X~BQ9FKK~6*ciS$jJPvjFF(93+Q(l`u`HD?fd ziUpjHV$sqS!*WIQ200;TuAk~`=$)%%=O;tX14+oin;^t%BW`{imJh@ux{9*;2Yo8>|w%;`Ozg)dGolv{#*)HjKdD@KiWU(1TPp{-=hfv_=a_u zLELkziyqf#X+L@JI78YCHLC;(f?C144kvut&E><@SRcO^4!8Aq`gB^wr#Mi}usiA7 z>g<}Zf1gPe0ak|-ImyC&Ghs`$Jv7V#mPk*S=AcR1i`Cl29+-Ug_oSh(!lKm?U!?r% zPw)8#GQhRojaML<80Q->4`GuF&{u@7AnJ`;G}Ctwx{;pv9fJ3W%v8ehE;^jiIIegY zo^0WM+Ky*H_Tte5;uD`FW)Q|k2UDelo`7;&_^z|@kQI11g$Ex+C69MzDQwpvWbrGb zM=n(og5}>c%jWQlodsCWui?)Mve|hAgHK_9B&u+w7;(jg%AwT>0W|jr;<}-uC*# z+;3T231Ui}MUeo$DN76Pq4&eI$ELVW6+IzPjyq&q0|>2sG(5YRZu>QS>~=1o@A&SV z-jv6Pp)&J5)){5RzFR9cH1IbBMlSA!j%I<9``!@G)R_KxplZ;!ckB1`r-?*~DUY1P zq}!v;f9hdv%ibbgAJ~)#<)r_44~s5xY4l>D%lZ@a8jll&Nb))Jzt8?i=xvn`#{P4G z1br6gh0Q68wVf>qq=nu>3!wu?^37iX{+38WNre0@1Qz_yTYn1kz_C>8NH50%&?_i8 ze>!05;VaPg{B+Zf(nan^H}hjO2DAPc*&Tk6m*gK!E^#_i=SlGb|6(ZdII0ljbF0ijD`!q@uq5Y^QEb`Z(_(9 z)+6{8Um8tCsATD#6NY$|moqePTFr+WZJT#j)$_zI3O=A@O0r*6pO7AL5#J1cmRAv* zA33la#53eSHK?Yv=VZEe+XZ7OJ9&M-MQAPd*m?ysSFM3Pz!m(S19NQ3AjPssH75y4 zaDst$K3%g?r81MMUieznYOEb`_C)y-k~GsLXT2!E8<&HHB}7 z4gdJPfIfc(9?B>YoKcBJkiXtM`eopW;(QP-@J72!)ha+jXB`EB8ZKYD*NaN{?SmRT zd{jGb2Oy@4RQw?xPZGj-ae{TCu5jF7Il`>}>6{2{5qgB-Eb=}aT{c{tEor4Rief3C zJZ3tSY=%jy%8)kMUw)i9iB&2qJolRnA~08hM&qp#^YXWEOa#fnSGaJ?o{xD;(0muX z`}!JoGk%?R(^dO79=`7o17BEmI)s&A?|krV;(G_4A2(jH8ovj|zO@=P3D%E_>98(i zQGx7CnHZ**7#NtBHuD{ZfZtvE>NLt4sz5xfn}zP48D2L_R;CQYtH5s6R3YAM$*Q+K zXVR~i@{aQfTkOiXgA!^HC%?Q7c!HXxc0z`qET=`LY2Yifp4he{Lkc>tV(8{0D~hh! z!ZfBdza&eow1#%4Z9EJXxSgR-VFB zXP>gFAZFX^tvxZQKgqAd+B9|za>Ddpn8^zffQQdKv_Fj?Ja>!yKBH;IFfAWe-tMbF z-Wy6(96}F-Hn#A3B1F2aR`+|n(u1HZkF}}dKzglda07_qZPjjEkVt$a^!zIbZs3+L zIg&^zjH&y%mtCzku;%zieh>e|J!Q;iKsx#UN%iu}cl^R~I$CRSF>!nlWzI7dh}->r z6&AL{8!J|F+_D{$bF|xBiJM#N(-@PfwgPPOCX}fp0+9mSU{r6U)Lgkk7_$(QkN!9X z+;O=qASHE{`#rhMc1Y1@@D+8@d`O;_sy2Mva{}07tE{59Ta1(e)8-QlAN==1AK;M4 zZ+M`5vi|Ie>+$`p#t33tG&lxMlE_2fqrgvZe&ssZR-LEq85QnvAA3yP*0_EzN^LaF!eUGpIl|@dFcI#tzUZ*=bwy z2mBGNW+p08XENefwCxf`&^1HyxVUxC^jioI1?|SQIt$Bm%uNEAxM+Kp#dlh{*k}Mh zk#SwoG)NNi_{0=wy3a4U?l6$y>7++w4O6AX!jEwv+}}EIbkBivmb%h!4jezm-cR;M zO1~{Ui=WAEedc(unqK-^g#a2KBb8$&Erf%U3CDzg<#PKr909u!?kzIjXX7Lf_c{IS zrwx_e5 z!D)*BCaxcqwLQ!N!>^D)j1J>ef8TPSDT+)aKZXxfXDo@{>Jzq{*lLkLB_f^6{AMyG za-SJ#{L)VU>&3qr&XMh7sVQP<%$#evJA<3>PUZ{#u*eqxv^_Kf23cd+t6AJ2dQz5e zEL7~~)tu@^GmYOb9$wS-N?wCQ09W_{KKF08@4o6)dIss9x3s{y(s%hvh~L~*PsF;K4^CkpEQ_nK34?@C^$aFC-bAak)`dY{ z&3-ZsKt^SBD_)B)LG*!ULQzt>X;J4HNv}JSLO}BeAw*eg6CnJgAjpV?VJ9~=ByMT& zHmUQRhcl8-m*_WP-E2F0RC6n^>s(4T&)It__k*;A?_~AFC?~w*n+`);jN~57s5u1= zYdkeoomQ%38=c(mArk?>sWhFV$$|HN5H-yZ0&oycu6Uj#G{@NKCL_F2Qy{9k!{4JV zT+D&$g>X5k000aEV0{ky2daGlG$A_RHG~TKsr_dm^xr}%5HvG782W+!Zy`9S9=u_S z0qE-QCoAp)oQbGKLmB+1_Wu_NXjXP8r)No+CN^&~7>I;EhPM7^hI+$OxeTKe}_C`DDbLt zd|32jstD6UHPv#wAQY|E9^LshcWgtQXcNXKxD6|mg_D{tG0pNl8EI!@!*`u- zwe*y)tee<%BySJydTs8J^GMW$V$4(C64H7Ns*OHIa}QqIXi~pp@kz}m)RsqV&buNj zsi`%Ym25BId$)6KnjeGIBDab8_%v<&VgVfAV&#aWSLCA+)hOcBv&e{57ASXUKfkQV za5o?U6M>ho$a&Vy`ajT9Jnx|a|-jGuq2fAAIgJg;mo6>Pm3f^10BvV z1e>#W7gd1ebg-*fD-ss{2#MaQwEGf=0OLgmU})Nnp2wi183p>fGIc@mq}H-1jsfY3spS@kb!O z%ewkn^IHCgx$vqhxAakib})sU#f5wKc8>M&!RH2r4oDFW>Oo8(AwrJQ0ebx_oASHW`w>wJIh3@@)81X3 z4C}U&{3N(1%*6DfmC?5pt)i#-$m-pw?y`{lwj~V1uKF?pmO1o(8Ins=s7BMlgV zWYy(df~3J&6ZD3JKr+8^?)CT5TO+I;fzk;6{c9aqHMSxtmsY@+yO2~ElZoE;o}^=- zKnlqr6*NYRm@^=e*<2R4|0F_ZZp5;jOO;mslZPqAyIN=cV=9YngML0Y=I>)Qvt z@8@~nKfd?6Sl2mw?X_lR&CL4E0`{I6Y2GC1W+U70==8{3T!YMQHsDIw%}K$I!x+1~ z^=4!9uONs(@<7;cOgdHJD#q<9rf1%Q`k0=L4$oj%vtJD<_4&ujE*m`XxqBkmAaj09 z+UqLHbo#v+Ke8&J-Ac#44YzfQKdWgRs61b2rG(m8g~xu#rlfXzI_Qi5h-*ri`{RZ> zVk}7+Hv|%`JhManoo0_p`}kRbcx5@qnaA%%`9_=4d61-(| zp8<;MxUEt9;Zoq+lv&jLsw}$Gc^MI38IPP}Ab_DU8$;PCM%ub0_u&Oyg$ctniE*4O zs~nNf##^_XD-(U8NG^3&LSyOItUAwVuyrqM9a`r5q_f@tZ{XYQp8F*jPA7MBNzEEJ z^Exs&ZMLxYPQP1Ax%4%SxT_c$PfbL~95Wsv!qAMPbVD|nT6m)+8`WOm67W8EFr*3^NeZl8aOO{7Hm)a^vhACAn|GeRG-VAiXL&3~ewSS;OIU`+sW0p@ zeO(DUhETZGGj~T{L~LAqex+-?o_(V@wa}n5#Eix*ey#u0GG#NBD{OG{UF~lS*IE+E z^iuRqo5H8Tix^eZ#0wppmjxqVzOKH*ZH~yBmGxJ{FDlsIyn;p*NckhP4@|6Ixu2{I zR-KspJB@#(^Zm>Qu)W=s^mA|G-bE<+`ewO`rYq|VlN*XtiVK;U+%(vLKtTwI=cEWe zJ=(onOp;JA#OK=n2?8xpU5XiK$UqOS2^2&CGhG$ z5lQcFgCLl=Ch!Kj??>IAMwscMVSg@2o}4U6H`v#RJW284dI%Eq1i7(K>C*>G3nPT< zn3{4_$tSNdxp@NuGm>wl84!U>{Qte2 zdwFSL|GkL&1$1oTtF?vNW1$U+37XgUS`>@ymM(i-2jGgmtzoeDSf8_?h4p>Dv%pq9 zRS`1YO|FwL9v`8c!Lspd6W!d!p;obk)2T5?t3VU0|T_ySgWYVvkGzjJR)vt6-a7N?B{(@}$sxT%h z_j?8AF9_e2wlnYQsG1Z3r5yMOpa)upW8((>pDLP);fbk>3*73(LUQM6%!3WsmK8?0uxB&%-Zwqp^tq2 zQsEgvI}U3fLdAZ)2lBOnql0@#TKTHZljjsT^psSkc}CvQSr?z6S%N-i$dp97a0Ioy zUw`slzdL_+_W2-hHt=ZW4%y?#{kFb$S~6|@ii-ooeU!Uol)AeUR#ez|TCYsmjy5k= z166sX&M*t8aY6*Ju@NAVjnqwr6QTQ&eg{d|d*8Tbsnxltvxf4!nzP*9wcMSWoksMc zqx5YI6%ms_3}-@*Np4oZN1QK>>Udbv6@*h56e&?OSi-V8d@h_EB>QAY*oJR#n7xbJ zvE1u(I(~~7Hf35Mmm%1GMJePiDCT`T*)Wuz7OJG5zSAAktk3nEB4-!X4H^|Qf-%H8 zVweqSIxi_q9Q+P&0qZs3Q zd|TdNEqML2&>}Zu)hP+sJjr|-g{iCdvN@nR`{rW(OA00Bm(qC5J z>t(B|>}(L%o=mTK)rml<7H&~qan_d%G|+^0Uug`_Mddq}@)v_Wx1!yIcFx|<;vG}W zTpAvXYIqYi$z`%BT4jRmG3R(K$NXnOB2t!+G%?J18%X3eO%ul1y04u`<7rK35?@*` z&rFuP6;0E0;d1TgS-)`+*fSkOQrQ%#S$S7={el7Ms`E_Og$h4p(Spriv1w%uENQ)! zfTh>-f>J&`8eYR*K&({_nV+k_YW7Ahs;&}dpg(!tZ7I0Y7x^hTu73Yj<6S_*I>}j> zi$UJi)eB}nc?(FR!14a=%X14r4|nTgE4SAQjhhN65>b8Ri|addm<>Ui^!!H|gU|MotzYET^S?`b zFOg9-F8dbY)I9lgx-V>QR;%rDO;RGkjb)6j{{aAhU7pyUnN6CO9jz>3>v62=^CEkE zEpEuRT+P#>rYMu?b_Hdp?OnSdUGR9K-v-FiEz8ou?7_*?^nDe}Igx7FGJj&R%|@iz zHO`(mqXJN#5#mzxJmJDb2pB9Q9zB=tO`4N3R(^1<(vZ@+`CN#PICH45A@-oSB&>dFk**7vm6}4_A6+CQSid;W`A5zoo%u-||Vbj+#qd5qOC5}Km z(jmPp37vguaqrWj)J@ZwzZp&em)0QJ_5jVx@)pO~~}_UIw@jyC-8ACs%ep_#2=Fj`R%+ z=Hc<*PS*jvPqS5BMZi> zNX)c7y(pw;Pt-EP`H`a%RK^$MoivsqL92&uH%y~sZOB|(c z5CfX$#t+{IjAe7Bt|acXen^3(z^lQ%x!{!G*iYgKCxc_Gy|cwC*;{4X*hFIDJ%5ly zM&e!Mn&$J4morz-0(+y95d;nQcVEurgS$zZL zrXDf0JStR%?CWLFkc=}zrL9#E4`OpDbzz-=g?~?cOY6Z96(;t@#l*Qg$YE8{Xr_xIQcN}C zd%J5dmdWW1cL>(uvEKy$UxHW5Pr=vK-{*;Y8-SdzCQZ%VsIW+_xk{g8`{;Qdo0EM@ z!&ycT?)tZ_bf!%mYT(J4Y2v)j-<>}XRro2YZjxue<$%&yEz7T^f>9$8U>4?W?Cfly zN}0>Ju}vow0|lDO-(XYTjkBWW;1#0A04Zid7@JH=9?=VHzO(h|xpX~VLmyd*?>v#; zY6t%ip@wZ6R@b?0g9PtV!l?GEU$QrGmfvuF@hOkD(=2Zc6maN5gq+JVvEO~5gGi9V z+SHrg-;)NJ+;=gca3^T*BV&W1_w+!p1uRNV#PegU3$92p-z36L9p)hl5Je>4c1DZ1f{37wLmV=C5a=y@7$(T{LKYB#z0?7fk|!B7 zFjXhbIe~{FG7%%2K_Kg`M-)t&`d($~jrp9T9oLcCpm1_P0XIX4Vl8q@`sJ$D@Q)n_ zzV61Lo{h(iEx%M()z}sd5fUBTU(G+7n^SpknLphtx*4}-zg7}};cn!x;FCu~44(It z16~B01BBjhvpX7;8rjRF`RM2WoF%sKmTusRGG;^7?zj#A?+^rcSln?Mn|ar_Zl4_C z2&a}6QTY~DZJx1PwlUV&0|Jed6El=iuie~~0t;8yg84M)N8b~dDGJVg_3xu^=EJVO zdJ{PGA~P>IBayWXHD7)`S=id^4NT9wGHWE+(87^6#=$gTX1r_(dPY0xIfwS`W_9mu z?P`n*($tHD}@2bB#!?=Hh5=WCT`wBVQKhO;C&9QD9?7N`4X-br8C@DQJNP z(PY;DnGMRD$H$-I`*d|qU^xP74Zk;y?6zJi){DO*zuZ|LX{L(X?XG^F?59sZcS?_< zHjFK$2Rluq{}f`FwTrX3fV%GPy_GG&=+HL0^Rny2T}i|Y^p`#gf?xtc3rAS@Gp5yB z7wvRr7d2I7e6%UX&kFd5pINfU+7)}GV>pk55*?{|mU?27T?HH-+S;9K5Cp4SNI9qK zZt5v^Sx0$|;NN_g{vI0}ETLg$#zHO;{#!7LRpo*$c_C}M@h5Y#ZCph=85g0fGT~t) z`$%j-53#vkT0v||Uyddn*~5|8nb&Rx7M|XYnlgzz{VHm7g9nl0HdZ#H!}6N#iZmLh zWJE08qS%id-B)!>6$oh#82oD^S9$bA^O;s6dOdvAHJQ5ipT;W+2g%bMMJlONzh7St z4X2dip!PJoJ6XO=rk!Fa?Hl=_rg#-9jJN9ekwW)@d9+BhG+8MDN=9sQHrgMbzy2_N zEm5lSV@aB1D2vkPu6&r|QAxkC=xyd}9V0^u1+q>VEuJ!o$fX#2=S&U2^Bn<`yIEdN$4 zcX}_IO-Y+rUBJNcsilublFR%@uKm{JxKMxkaV}`3jn+u7>U; zqtS9s;l5L0S&ryq+)hu|Cc*a`^}1}@VC%;fj`GU8p>J`IMYB$NgdIH>;(gAClEsm@F>vBA=1_T+_OQKF;c*l2hHqUiqj63Au>EN44X-0B^w-8QPgT*8V!2= zA@U51@3Ts7Rc%(BHYYY!6#36wRnfG!&OPSV6YG!31b-}Lq*bb>yD>H`3u`s#xIZHE z){Ec#b^;`qD32I7r_-&|&SkcC7Tt0wXm~&8X#22=Pdo9I`+Q+;-qAXm2z@U>d~e%h zbH1LQtm4gYMhp|rRR&ob{O4?Fry0`|RIt*4KZ%af$Zi>j3o`Dg*pp7QKkwBHNwFC) zKR6xz{z7}IT*SISKF|Ju>|HVQZc3J@yfuj10|CHI97G2P1*g{0B`}!=h>==_eV(V-lUY-~q1i2~YIP%^I`h`7R zpi}gkw7-oNM9T9Au|MEpo$ica&_HA{VHhD>l(1E=?uVn3txsKZV@TRpx5kR|f6=k< znzjqrT6z-vk+GFR(CqQsrgJ{xhVxTd;C;#V3Q8D_exz2aZK5gvogv|%*13#_Wun+4 z;M^gD@SjPHZ`(QVNR|-AuNXGx!5E-H$7Tymn<}Ne$Q{nY12mY3xA}hri@|hr#EX!J z=-+`riQ$pD9ZCVFZcJ?Gy-3(*ZC!Fe26nP|U+Xk#iLDeML8rH3&5hv~_=4YoL&XgEjvjq$e&tjk%MCW} zh9_dug&R=54Xx466pbOu9qMzatp5R?d9`|V?3^*3hF}j`=)!V44Lhf3s6V%OMh&xP zi_{vit?AIYecoJbR5Jsz7{t2Q5h)5(;Ar#5PpN>t?qeAoYys6_~;vNVf zS<5n2%W|Vj2Vzf9YCKkDSzkf_+vE;K&q4v~E8YexMX;d8z$+9?GJ0-e(B6$Ys~;n% zg_Y-pHQ{R%(FxJjItYqmH~T11R#cGZZE~WlFQ}7q;0w0NDd)7olDl*B*&fv0y5Hr1 z>ISu!d$|1}=r5&JoE{=Yh==v(9_*vk-y#b+Os1)cR2hf*wHf8rY4Oa0E17dFv#NZ0 zx-KYJA;QZOmHu@jQ#F~MQKrcI>G%o$fPNlbqkG*H|8pK2lKpv9OvKM19-eX6TqY2W zMdJojBikPAZF!Y(yeQ@3{>MG9qrkS~7oQJfKt5$g^JeI<$h~(*<{d>feR^VufLgy| zy16+LVbI?nEO?|%-ia*O&duoLMSnWeW$@huoqtk))sE0%F}$bii`58*GVn}hCYHCl zj4}K>15`phGNq9VO7rj6DdxPTDpQ-XxH6W$c*QpRHBbQLAdTGMfp1g zx`-t{(4}*r=cvyg0OnqyAtM99_K|5UU-n(_zS~7ahQ;}t0kOA@!Y3`DeA<8JqEM7i z50rEm9f=7t@pv1}%RO$T2r3fg7yhr#W(twL2g4^G+M}TVko4u}KNo@7v3!KJXD0V~Gg8^1!^H5vG6X}<85){iG=e=^eBCkN%(_5uWeNa0wKx{pE z2mdX^59RUg2N~d^$DM%RwC0Kpt8gK>GA)ft;E(a?@fsUQ>dK& zkJcbO#YO9j#6^jq7XwXaF!;JLK?o)uQj!K{?rWwH9icNHDpHWfZ4B51glUi3<&d1G zUuJ}$Q@4|W@UMx$$ug@O#6_owge6BBnf?R+F0bP+^ z34&Rx6zn0 z(B7OIjnBTC^E@w5K6)5E}CHBdF}-x)fEIZ8+-4Jdv|^7}dJ^owo@?>`Kg?#6I$7g*Kx_&JyF>0YYbYih z##?s@66O4sDkuh6XCe#Fr?#6e+I??s%)bC>tpjccV*=^`|8JzDl`_Yn&U+4|u`{+6 zRawc3(Ie`9^X3sqyt?H&`;A^|q%Zcb(DX*cvh%qHffQ^&k)IHP1#HTV(W{hI8isE(?gMqhNj>@1cJug`NRWijDS=;UN%`(W5`%pQo;l!lYm zln=%O(ns`5`Q8~wSHPDAq#T;yRS8MuJ9*vYu27#ArWNfi@G`%^&H@bRa{&RG!!Qr# z?hrSS?68LBfL?^B=XLw`?OE{Bry47vY&)vm#>50{fUZ{^A49i>zJKOLmDNZuU78rJ zI4>wD?7ovI4wdVE%f-9YY+Pa8&C^amAQdLTgO%gYbQO@yF^XxrVLm+`3PNN|6}V2AQ;be+BQUR8b; zYWo>w!C>Z>j1x9A=xY%MUctK`#%f~jKup;8e#~!yAPRwn4{y#~tufqBeSyMGbs=s$ zh-r`Z@+%z*tTPw4yI8su0h@%g$Px|XhkWq_eH$S4Tx8S~C>pVB*m_2F)zqvEzVUP= z!G9(z>HWmY@5Kh59?^9q=bD;;nm($sZF18ip|7`k=XRBOJE40=akFqjaCdp2dU4nK zOi)%5LiS;eiX# z+fkZ_cI`2R)aYlwj&h~yC#ZUNxp*0-&1GGBw>ZHO!arPh>wSXv!cXYTMuhv+R4J84 znD&eFIx$qennm;R+25@$UoS2F_hEne%T0e1cz8uDDPaZy0XVP+=O zAeGbc{g}PB)4te9SBH|b5-`o!VJg!QN?DokqDPN)w5s$2-ySk;l_vxr5)n_metS7& zr_Spbb*DoB1gltpOl}u1GR%j}A*D}^2j`T40p6Fl-S$G0g8xY{kS=RhV9 z9@>3G(7<%t*Khc6V=NIKm%0*jEOTanDNE7w5MF5UX%Ndw>`b)0I4)*y1j7)20I&yY zq@U)6V_wf#2}tROu%q-3&b|r=yI1)dld;ir*E(W-2>}B@?YNv z@PF!r56+PRp%iXHZ0I8yNuQ5RjAbmK5CWS42s{WrK`L`u76EmTf)5l=0|CC2(b?E( zNcZH?;p9P)k08HOw@Ct;K($UCtgnHke?ri4%RvK~0&H0*a3?XKrpWhRCir*npoT_Q zSVFwP`-6=Ct3R`%qskIgx)nn4hkJk+5(mX!!Q%P!GWG(EW?4lr-~fz71Uv0I^R2nx z$A~|)lUH$*_Q3ZM<f zr8MS&`@I1D*D4mjvqS<=LOL###fXIN!|Z<+k{lIXf=EqCJ|X?Yj29kh*=s=w?|p!r zdd3b+MNDTMVqU!z-5{U7SMR|B>Xvk~uL3^bUQODZ% z-UPukX=Qy4gmB{DOHyYqPCH2xDz6~-rXc;trOblGu^ILkMm0H!)%wSP5F=Zaz`Qy1 zeevT_U`*(GT?(#+VN!vfpdkd-)dj`*SI)}ggONQ%KR=r-TE@uZ@uwgImC~XjRE|tc zCNF^y#y+hjS{i)qVD9MlTRotBJp+O`NJsduPDeZE(XOxGA0cJ0d ztR&5b?Xib#=3tqWH{HiI8jwL9y1(;ZPq)}w%4<4x0F_P&m{27WLl)ZX$&O^+arBW} zcgZ+r-^t*>u0mKrk&&3^poQ#`hUmX4JuSZoh7Fdkna0RTbLgaAAe`UFLwSwR5B0WU z#x?JMw)`6vy)v(UH{ag!40(}MNZze=!3@ma`YQk94Zk`q^29J`fy&~yZ7tPzT> zPD`c`3V6(+1pzx=S=hwP3>Jd@?@DJnkD8A1UK&f`?}l_#B9&EJ8NY27>edMX5A4v% zY%eoG+ugMg0L{XV|D)NngE8H82@s5)e4_e@j-#@e$;&_Q@y*c>9j?(U5Wl+GJ(uXz zGPC=*hT665J6c424%F}(AI);58~74rU@r}QG4P|HvnjPqQ%k@GeuDWof+Ctxi&rXdBW_?rS!o;|~5eD@Yg zc4tB(11t4gzWOD`+qV{*sB}isjb1CTy(@rR=Yk|^Fglid?zwQwNGhh3l2~}W!EwUB zx%TRAhJxY4*-R(K$$U%8Ey+FeaHw%pYUw=h82y>v&EAzJh7PuD5Cexv@(byXE9)?y zhsEpfPp~k=A?10u?dVAdIKfckM$7T=ytV;3jM@~OXBZw_wq35*Lpvuy?&LnN;9^eg zMBVE`mZS{x%TpoGaQB}C0)jCwF2xs~PRAN4zd7}qeGT(y7eEYzFU7^A2I5y&vBVBn z3o69o`t49aCWlD}TQeOlV&6GR76K|M{DurH(q9+~B`ZaIN{b3eB7=g<4IdH-b)=w@ zghgsY{*0Fb5}5oM{~!h%$RQ#{`-cxIDR_y(4in5^!VQp70|n5U0Oa!!09t_25#j1s z2SGaEQsHqgWdc(>*aH3v1V)U64`DDtR06mz#Y_GfkIVhfc!lggQUXLfBuD{S&x0j_ zyE0)eAaE84@&pEOQH4+aca48Zwiv=#mh6%UHbe?$Jl<+^F1OXmttdQvKVrB|_phJ} zh8VS4Np&l{F;5s`<^6R_mK)XL;we-gYLHQ%;&PJ$%YOis#N97JGN1@Gh9utr`3YvArA>hekMz)3S03)y^O8jwR zON)X3^= zw`M8LsJ)Jd@j`!uLjnvocjF8TZvX2zfUxjwwgG}zzL0K0H31J&V7PiwyG3k(@*|ft1kl@EDOs$4Wa4Z^ z8UC05iV*I#XW@l@n|>Jg~-uY;I(M!FUyrC zY%lfG{V+7Zid?q|yOko2y|Z5j|7+W54}v=(r)foUl_{vTqbJMX^c9e1&__FZ!}Ji{ z8$AU&|7&arWZ;J}(yx3bNC}4mkF}~P_^O$pz&H|lQ!lp8z=Evw-d~q!BC)Tvzzr|O zC4A~3;&Ovd&3VMxosK z1&Mtm{F|_~^RA}e2GN7?UoY>Gpwkak@7KzOU?u63%-H#@RhOnnZ$aP&%#hl^<--0T%nXgy!u{lNP&}XZH(vmI4iD7L43^O8L zOtYQwNQiD$AEaFL8=v)_;Kf`S$KRblr%XhwCfqCH$x+iQiPfck*QNZKB*H`g9{Yl!?izXON*10QWxDZtUpcFy$DpDm`K_as0IQ;}O;1-j zt~~IY>!WQ(YvYSr2-Pl`tCL)5SuH5!6m(HOzOo27#;!Ag_5qCW_hkufV zCeB79HFDe|uN17Izzh}ZtutS*YN>Qd>H&*lXK$2)b(C1zlv>rRNkhzHdv=nl;B*4# zhsI&psyS*TLhGR%7F2TKQ^+6H#{;sHbZPH_Mnf2_P4dMfc&n6r!3zKEm6XN@+vD8h zF77@y(u~NsvE{mo-7z%H&A?_G<&9KSz4ly9#f>;NCCj%@J!85a56tVLBUt%4I0#{{?4_^i$dyLv1aB@U25(K{ zbGG&izdWw$BtUq8hDLS3YruzIb&G{ngb&M;B3Fcy0WBG+LIM7%i~I$tQS7g=MM(DL z&$(&0RE**+Bc{$*(Ok8*t3$@G*2lAU7k{P|L^W8hSvywl7-moaUVB^&OlF-1yqEpM z3nt@6g$LzB^cF%U7~0sa}?f;)a0#aP9Pn^U<#w`bM^Hi3*$?~h{C zf5JEb3N5kFy^hN_8x?zNqgMW^%oF9%6@!-Qs6QtJfIvEipr5|;CC_0);9MNKpvZdJ zeXL0Og1aw*R{H|4TliqYR*QvRxl>3Eal3GMPf4HI^=fXg^F-_l%p-tLpV{oVL+ z15Gp7_sS^bZo=*=aj$tNg8?r7v^FfG-X*38%7WsBwCy)OPwLu`KN_l*H$Ott%tAr! zr@(2Se@@=Za$NkVNmF}9E{}r1xAEuKF@DaWHx*h{|8ozZygFsLgnDY0~Aq z_sv4Aud=_M_*4+^c)X8T;|j(ihq2)G$|YS1m0tx*Z-0Q;{5PMY`GK|!d%^3SW2X~) zOV0+nM(x`}>XKTYgZ)f80043KZQPG^X@`3{6Gi<`HDUCvClPn`Tj5s{rvsS+jyX}k zE5}lq^?yeGKnCNXe*)>!qm(_Rv;zfwlKUi=AF8hGc-27j>S}tQ&ejS8oy2NEs&@8D z4ag~RG+$(yaFaNDGy>^3@Jted;zU=r+WwGforpM}@R2@CbZ?QSO$c;2!b`PT+q`ol zMl;KL-tS4fBC69Z8=>rz;3_b7+vZfw559AI{7dZc?z{&v5G7UBxIWp4uFjJPPZGDZu`5nGU^-hB zA3AyM+R&bUJ~419Dwx_ptNk#Qp@21dbz|Ii*Xb~MXA1FUnt^J* zB5>wVADG$tx_~_1H}Cv2Zkx`A{K@d)3%-dhlsh!Gn648A4^EII2muPZH3DP8zkzUJ zAX?y=V^kC1;bYS4C$*e&$-sA@_u9dh?EEdXP z!oH`o>t~K==$%~7^p0=N-qy6>u|)=7Rteuyx2(e}LIUzYF-Li(<)smab)FrsRy`qU z&z+_YkP41Xt6yUbde%<4LPN*()Kb5$p?-Dk&=^tpnSn9CTqaGLp;cFSPFov-@pzX_ z?>8|tGsleWj~jXwX{mus#XXCk@6gekLMzSSLMkVJVbSN|Q~ko6x70nqk&F3TM->;g zFoCr&NT~U{@&q5T%;R&8Z;!*YTV4WhW!SEfRI1KMn5C;Rd#CD1{#Lsm|EEd!*KM94 z)pSm*9xDCJo2m@TBpYSR(nruz-SPS8HPrzX-R@605yCsypy8A2&^hSbBEmytmS&iUL9<)bYu)J*TrSi$r$VNw+xNN zR|E?+4kb{H4cL<>l2>S!yE*kgTv=eNowQc_RNL7DcUtZwt1O2dfpO0P#jd(A;BYHjPl`@uK z<&EF)Nt?P}7aDNvEoFa>KagrXLj<>=BWeO-Iv)14LucUb* zkz+JP*z?=o8(Dp~84+R9>QVEL&b+VR415Dt`tDvK^$G8I_!NDt4-Ke3lN=;SZ;gsh zea!c)V7lK3TiwWEH75 z+x88w;v((Esrp~?)M{!? zjd|am>n#tST~V8-P5ZW`h_|-W)IoxYEmP)C=fbw2b^EsD%}cr&m?oDX$+w3SDh-6N zyV$~aVk@QmP^M8iD)BC)sZ>$7_boEjjYA`|+4JTq4}VqMc5W$E=5u;0%^!XLK;GW^ zWJy6kcT4_3#2KS{u8sTyjc<&pYl&5i-A}oGbA*kvdR4IgYrTnBZYYr`fEyE_yOkb} z?+$5;8f?4vaYAeAcxjt)d*p=vO;irGG%b%;8#jwPDoJJ$ zu2Ao6Y|3h8-o+7R0DSjdbS?pmjmJE^jTlTbF7gNhX=Q~e82%i+ysSS2-Ke;nQNI{6)Ali20uyb5CGZd zXnRUP?;p4ZLE)W|fA0JX=J#;@52C|Kz~|kAKJq=(x7dPcm`5idaFB=YQNvVqk4~{D zkrdKcLKl9pzwP1H(N>}CPE{4^*a)c#zcY_+Zyg|~iH!4qFO9WbWo^tU^Iw)8Y z#D>CM8vii}Ll8Yq(2kOt`f_Mw7c=4|XwB^TY_=I_zC24MbGPZPw(`f-v6e@5S>dbI z8BybW-9E9AlZLc%_57BjxKC_cvoEvMPD#jF(HD-(_;E~lr38q`P3pQox=P5U?lnJd zeG!CQX{2GJ11PGQtmKLUXB{x4~SQt8ZvEtUb#0#k^hDD)K z58ExLfpW!s=C)(gktBV3qhuu*D;L8bK5l<$Qm=g7)vg*`uD)@K;Qd&=X)RR6>teXc zf~dv%c6nQKu6F=o@)LGy9+m`jDqWP#sv_3RzLIr`(<`w5+Q-Vsd993|B@~3bkor=; zJM(P8(B_f@K`@q+NwtUls&7;I1sAnww*Bj!2AONCDllsPnQ)U|Z6oSJcwa;G4J~xA zkT8JVyYn)t4<`DHbEx6#3$7a`vIIh>-jsGx*P;oFIh_|?+iJhHJX-1vU+?5>g{&x_ zsEnKSf_6tp6{DX11lN!|jUsgjGc|909o*5aIu9rwoe?YQLZgR*Co6Y|N^mQ~KJ89a zCQF?eyrD})Ws(1dX82`LQaDy(eYl$75nXc9&u4uT!Y;ht<)WV{V0YJ5-z99o247|e zr=yFsUbbtDXQhX#eYCN{iC3ar&_#KeR*if!V%odO8wCF*{Uc}e_G`l3fdwk2<=&(|4!y}x+}R$cv<+(;zj zljR#v57tojBPw}O-O=jaG-&kFmCH`3MRFYYpAHz-ykWwE`4{j;VZOrxt06K4e2@#x zenuTmvW`!k62OTo*$NeY5|42rYyY9+hTk*96oFGcTW;wH*ZPd3dXCJ<+z~+Sw25<$%w%WibEPksd7ij3&RP#c%h9m({TRm$7%W|6L?hqS)9Cai+SGRkE6r zzji>OlNrkN+n!1&6i*+)m)>MkH9r}Rbh5}^`e=9DQOpU~)rWBVh*BaWH*vzITX z4g9zkn)-iv2Lj1}g!m9FR<9vV4Vo~TDeX?J<(8S5=k4DU=bX1{-5)Ra_v^vVU>a{m z1{><~c2yMpEwBVr18=`sq6^18ZeFZVE`;C&Pz85<*a_sHZd7kG7Iwmw$s<)%ObTj% z*oE?&LHsN!xGhQI#995hxiW%LZJicz0*v0dM(f-PMkgm>y9}-H0`h~WSC)~kpZW+# z#~Id}Rde+2@tW-aAp17Mh%o-c>d=D_Z*Ae%m1>##+O$ecm7PA!JGJTz5y+6_WR0BW zb!^A6(9mGY=BL6^a@V}&JMGV{Sm5cW4g2WM4kg-bSz|&uNC}WCZd;W9_{qqgNN$bPg*@ud@d;+hf$P8UT-{JsI>lndMF?l+?^tFqlkaUk%aD}4Y@%YrKQ^h25unu3oIN`;~Y(%!xN-JSXXCZK*u=eWols5LmlAx?S`dE8w4Erz30;`Ep7x*lia_383q#F*ncd1{l#vs z-g@egUrIyPxbDPBbUn;yyw-)*95KALTuVMOmV@V+&`_$c#8`Z zGAdI(TqqX*@-^o$UgO;}vQfNuRRdgv+&2z%(wVffPC3|#w>)@dMU^)4W{_$*%BXes zgPp|~>d5b1N}Nx$EY1SoOwEOB3cP=#yZ#MBfsLq4BHKP#32BB-Ljcbz&v(_L=@Hmi z`v<`K%2A#g2clA|bmt>fpWnaPZw2rH`x#0JPcJnb$@zM7_07}L!EqwC ziDaBka<&ql0GTMuEkDrPx;WNfVY+pl!{#r+3NS=Wsof2Ki~%-ZC_2%8ZZvM)k6nbh zR+9ll%r|@Nkc^Rx3%`vJGrAzU9XCC7J$j(jOib3s1e#S{81zkSbG_R`li`=b*arA+ zK5LsF;%*)jIZRFr z(k*-*%@}vdo&F>RsnU&4v1&0i(gFR=3t(i>ib*Pzq92u;AW#04Vw-rkIs@Iq%6dDc zLT=W(Wxo?Bi03r&n4Hfr(V~c{#m1gK9(mM5K}P_`K6%yriB(*^f`+!LJfgu*v4k#l z0@PIv~!S1n1z(- z2_i)`qJeR~V!1MOznZ3Xm@%g}QAwx+8R0qvELm|x6>m9da0?E|A+l=}aG`mO$dV)c zdLpPIho~emPD#za(Be1$yOauCv$3zy#8m%OGYWH^_En8DI7{V&DcR4$4!Hy=*Y@I= zWHMgxNa0v+jq?OQ-|Rw4TrL-iwbQr2#6FC|9r?uC;`*krW#o4caEQyX?WaN&+FS9pK0JF&?fRD8D9iAiiU3R&C z1mnD8N8m)>Y=zwVhERbU;hQJv#rQZ% zXHZG`0?*{!6j!fA9Xy~=d)RXOXl}T(=MQE2Ug*5@9syWC*yu2%*acnhLrQk{c)$`$ zK27FK(KVv6JS=mZO9Wy!J+)1ye@<>-nPW--!#^r!apjjI>U{o~d5SJ23L-NX3Y|Jh zE(S|%qml}@VnC2v5Es~|YBtBJflz&c0r#>B(K%x7$3nN55b7!2%`Y|UimsJ>7`!IS z9DsJ^JNH2J8Cs;qzd{PEOqgy0W&Pq!org zEmF0uFFt9o*;GjW5UO=^LyaN!fdbM8Lbs^PeR3s9FSX5dDxJZdxu+kY`U>ScjP-vvg>Jo(^9C0W z*yf8e4>-{_=rOg9qrV0#F*%q#cg+w6`~b7qhnn)%S@&9>6pZ=OPEifS%4 zo?-Gj-+X`KTnG5}qbt~mwK?E{-)08SKUoo8&_qo1GXm;`dib(xUlJk^3*{}3dgwT$ zI*sh~e|l_~Jfeg-#>pY3IPgI4%8H76&{ie~`ix5HwF7Q9x%S{~f4f&LY>;3dt?~J> z3d!T!O?k0wh=he*XWL4sReGnUv72ud5!9kr&8mfdA^lC&N=gL7`wM^#6ZpOyqEZFrSxm z+xG+{36o%0NOAId2)s?jyxLV_x!4kCZr>9ClotscnB8CSngkG*39Xjqb0Uhe zM6s`V&n+IO374dt=T!A`_>c7g^459Vb&qzHoFV5xbz|)fj3h0kH_Hh(J^PZ=HsEOW-%33F&WMyzA|L!RQPgk<7ojt6P!x8TpO3FTOUI zD-|Ou5VX#MCA@_J9`W(O1DAUCFGuAnBm@^6rsRidMhRr3Vv2(;D}IxcD`i8a7W3tG z(32u5Gl*3~f0+)=vI=cb)5TyW6+ibl@+KAUwW?=%_I0p2FVS|;ehnHA!S?)6LjIYM zJDJUuy3W$|uqr3yNPuK(9m%gAx+bIULOJv$Qr5-R^G(}Q{+E}@iC7wBVEv}=7Fe67 z4|ZIwQ;Z~G3j{IvzB51l7_S2JVmnoXpzTywOU^lpb#IrP5_i_6ct{-j$*6DUHnWMN zp+TM^PVT0fnF)~7++8NbnaA5+opi+8Bj(q00ye@rPdQxhuIY>chPYn zQJ!BT45!5UOd3=WAPb~Cji48>tLKc?X(_imkJEUx)ZsifBhh5`tE?w(?W6v?$u=W0 zaP$Srk6pZvQ`ThkE%`dKT>GYo*H<)y=Qlx6g1MjHa7H)OdUJWHAH55c`+v;6bySpH z^gsFxLwAFuD4}#pN+_k23eqJhA>BEYpeP~TCEeXHC?VZlqjV3_eFxC@eZRl^UF+U| z?ppVsS)B7cXV=;Lv-k7Nuur9=5vT*G;UX#zCsSfaUtk|)MqqCx=ymw-q;CY^-w~OoiC)r3nZiC zpS67U@$_N;`*Z&^HpJ`mIwS5M-?I}L4WGCr2?{QdR@ORfU`8Qb8Xy~uQrm)pg(gVD z&)$pCwDz7>-)`C`$dyYA7N~>KuqV5|hsQj!+4xk41T_DEKa>E@NccIF1!ac3LK6W| zMGug4WhhHA0s9w?Sa@v+G1#>kg>@ImkJo z?RcMkRZkE81WKgAgXmO`6UCV{-cv*jkCFFNgeplvUqSmG7>`i%ccBaD?3PRreE;gq z<8Aju+BoOojs(lfpoN82Ks_RZMgGWVrL`ssFeyQa>2GOs5;;(D`$J=V&KgpXb1kzlH?or| z2ja&4H%(pY@O~WR@%?980+}J5 z(?m@BBK-K}sTU1nXS&pxCp0*-@$JzgC%OYaC+d}*5V{b80u|rg>GWEJwCN6D&>~tY zMyMfcb0$o^{{$iW-ZWX+o(UP|9i%Llgl@eJ7gDa>s2bycWTA${o;b!(m#X|-^{h0u zA}`m7Dl1obC3oEcI}Wn9hU1zy3@`k-zUl zMu)kvQ24NVj~zwGXp*j2tx;Y|PNO1&KKBT}5q>xN#28ks$j5ap!a!^Gu|ceJd{FlM z_LZl%UUM^9f}Yp26dCAs`lWn6{)zu-W%8RE`B-|p$*ow8YT#iX<4*Av)Fn(GY;B2G|E3_PvDbrB)#yDw3TJ3-Fd zDT|%-NQPBt?PpPJS@dQYK^B&LF*aX!qKQGySowaBXvSL$_Jm&&(Ahf#1aS2ciG-e| z-#JcMcz(Crx5>IHw$pFERFz&tt0jj4o%h)%YCe01Bsv87MhEl6XYpkJzCFY;g81S* z_@ER?agY%VuO{XMV837;__DPCVX?on+oDD zDSXcVIfRFzB>uv~f*ypWn;_Zr^6jM~74h$GqygF(=H(&r7*M|Po?y&ly@Lu=paye@ z(VP)bQ|F<*aDM{=0{{S{zpg881O#r26X1r^b<0VlOAg@`qA6*oj@=mX97ox=9f8k` zu{!U(VL3=CV7p>?#P=a*D@>o|-r`+r&aaVBE7Didl0HUwX<-6Gxk5&9Qdn;uVhkD& z<9~djFE`ms^DR1{EXGPHprpk8f!tc-PNVDn?oz6i(1ad&G1~6K27aEM{+-6+rak)4 z2#GkaJEqw|Qg4P0pysGJ z(SR1BgY!^8MlQ$Zm z{C+_>u&(Ek@SFL^SECnEOte|W#GgUkjn%=2wcfuUuKpnB?7~g~1uIAMv4`ZJh2uKz z5%QuoWP&n&+Y~?=&miy2pr+QzSG!yJ{3C%Ss$xUkx;vsUYgHNr@6z9`jGBX-h+UEf zlgdS(;z#p6<`QGBD+J4+W1*g7BSfIn0Bu-*D~ke=^{-8bSvu0fY1uEQ!x0{pg1#Hm zPs4QC!~aI=+ES}3c{nw~Y&gdQ6DkHpT0!y#A8PcW36z}7cps`pQ_`E+ygybo552Cf zdcKyJf}j5uA^0a>BM{gt8kAuNxl3^y8ULUE*#II^zvfnYbFvH=RXh%HP8Z@N6U3)J z;y)reQL!NiSh}4+{R9FdCT}PHo#sY>!0qi1h`^{&#Mz^BqUqT9j9}gExcKR`YEtQJ zw?lMWPJX_r*`Aa;iYAto&MA(P7YU`&ZK=kYOriNBo7jG6C5@;J5?>gfGTX07Du3CE zoaBXhHu_WEJw~+*#cSm;oLkxp^R$_uc}7#oWAAloVE5JVRlb{^Tl1+i8yk*`W-F5& zGhaV5rM^3kiyys0yR6+{pZwVu%lw`!kdiZj6$C~JKO2&j$=Wyh#YWd2>eSoCvYnXX z?V=gfq*U31N553(V%s1?S)yL@>j{+}p@EW;frAen@&qH;OY=)Gqv2vIzD`6F{WNpA zvzw^kgYsXs0^g9)n?Q0>!I9j`J3BsJZ?L$ywpQ?P+yWoOnJyj(O0S$E^>o6lZ>77*0Yr*C&gh@Nj0?2>dm51Py!=YZTjh2o%(&|HQOlXT^zu>+p z#b0OVGmJ&O9VV(G?hBeh%{oQtHnpDzC4W#kkHMycKDq-(efEeax?qOx4gI8Q+&aUm zgjthZH#*T>UKW_jo?X3M5Or;lNmEmwXsU5C(fH|-a!ZLWX!`6+y;MvcbbDK&&(Ym@ z%hl}kuz{l3?J|J_LL-@tHeH5f6>cQgKJevQP{|Us@y81I zWV3|TgpK8~*xe9p*OL#O1j`Q|r=m?8K`H6iENo2#xT*&tjf9O4adVXO~?=!Tfp~L0CKR&$3eu2dR%nK zh;=5{m&n2Gp6DP%TxD>(K|nnD=%B7vNSwhw0B(oa(msKJ459mo{eVJr#tUl#?9yJ$ z-3e|B9#!X44c91DuT*D?_xCyuT$4$IwI8+{2pbkXt6@%~`Q2(>NzRYgA)Km_FtgR( z*qe8@_f6A>q%-rIqE1wdjZU$+j$@k5Gn3a{)m=-!Gwq}pFiZPPP`olUu_7~n+_&!h zWyCd_e)+%!aXz(~&uWD+s7|Qa+&Me%o!ZN-%KRepqWRKF$uv#JUu9O_FC5!+W6$r; znoMB!BJI;Sn@aKZZ}^0yRqKt-5c-ZyjRmHrqIKq~zAjE&FaEgut0-?+)5lp|;&owr zI(dw+k!tNPDaI}t(2EV*`+%w1JN3q9J#P>8+ZWl{iPrAH1@70|YlSK!= z3#VA_NAJZi``2q8jhMNGEpoaVuhuP|FpKaNn~x6E3BE>AH8E|xFDHb|V7=P#EHzL$ zfH~oHcGSq-F4Sm>rp$OB{`(V_UzrCIXqiuc^S)MlIK3Hr$4sL#UR7gu<$XwKPt*NC zmCup<281M@wS{O3{q#hFv(!43xPi|W8!@8QzULku-!BzCW!r);C#pd#ukvz6-ElL(OX>;3=+BLIn9((Ewt!Cp5iN?K<`fcK)fI59e+v$-dD;m%8 zO9vG^D%0WeDV(%v51glL;lk6Q@8sKL3R_?6=!tU1DB(4P2Le-g58IA*6Gv5j z=Gas*uELRE>E&y%OdF=y8`D@4xa74woN@h0U_&#r8|j+gf*H%?jfleLXJD_FSMF9c z9=UmWkzE{G(ELZR=FM4QCCX`v_T3nrNfhENbs58g-I5O+zuBC6ChkuoeF(P2Ym4~os0mlZJtds?lbI5Zs?|Py zmz#mv_+rEPEJlhon#r7ry;7^bWoP+P_6&GD; z(?b@rGrT_}hT@Xa!1k2T?xCghkrBZB2E~>&{8N%$QT5-8VE`^~fTm+7JMOw9IZrQJ zsyHSRr3EZJoAW6VXE$m+NdyhX>339+JD2nT35CTk9N=b&J|IoY4Cn*zIS2vH(-$BB%=tY98Ni0FK1D%%(%(g# z&WuNb==i{M{tSQupF~vJ~R=L1dCVZa~4 z*QWZbH{<`5xyjk6>{dwx#V}Yn5K3KKPprK)_Sn?8Zs2$0X}oN<`O3&XeOdAZPqw0g z2O8WB^@i-V%7o{n&@ML3+FlzOE!D!jc-E24rY=Wg{c(d{O9$==tHs_m%LN+f7X7WH z2+PnPvq3^V!{f=cvqV4E_;o(9)7lB}oHPqIYL;OF+q8|H{qos(3z>dg|22kn{tF4y z>v@zLeGvA8AmbqmOcFn1VvTDkBu+pvE$c7m?0;R*=qsQ->2wDM#b>;yKuH6-rq;DEM`=$AK^DnHHYVC~eLt}c9E zCkn+YK0m>-0W-T)aoqYFOwZf&F#leqip`pt#O%UbI!_a?c5_o@df`usbkL+88xAD( z9Y{HaX+DRl>kr)6En1Qi@U5~J4_D3Qmbl%#^Nn~+E4BGFUd+DR`x(z4#3wxX5LsY( zat(kjr@7(^h~a<8Y?95u{YPO8no}W|XGcD5S6MxMpici}C@JQLNKwo$IC?;0QWNe3 z8C#{37Fhd{7)ql%DSLK2->sJ-!cP|d<%%%6&UyCj`M!iS$906C$J5;5Ctb#}3z3z?10AtJaa~rq4g=NB< z5Qc4mJqqXwU0aOTGlGP}*le+(TaekLD%nGKz@|-?=uV9yP_oI$8h(*RBczzu)D359 z(R#)yRCnowa|0>xLy+PlsUg&1_`A&1VkAg?l=-E4S0{C*JYY@S@WLP zO9iI^L|``fP-^S6T7zzoVi|xL=(av@?I8<)aj(|^8LsH`B2N=_)|UvF*-(orvuHwf zc}jcA)upt7#y>Ph`Y#@x2T|L&)qxeIfrRf{Xu`1PKnaH_O0Of|1SsFpUE_<%N_})` zy+y>y-s=7O-xUkFDOacbcU$ad_4miuL$5ZQO>O}ShUS@Y^v9=VW*0D^d1)MZ9l}!+ zn>I)`Ogu9QK!Mny)FUjnqZzV%5u+n1U;aHRI7s*ggJQ8DuPZa{?lktkE{iMIEc)mK zRaC9i&R@nc=#hPYkP}^`h#vUH{8Rl{fi%RRH%-rnnA{zxaFXhK3;u{sYn;;rfUCC5pUIF$B=xz$(EesX`fBC=Wo5>$->X1zlUlPCSl z3GN1@^z%avJ2u!+)ao7kHR9OS-Cg$*>V$t5uWrkGD?1zXCklPorG{?YxkUnmJo47? zjv4T6oH$n?;w0~F3r!sb%tt4fuY9mmzdoElyZo~|P#tj#mvAZHxH%Z&euv5EqyrR1(17vly`9-(PLryhO#gpu>n4Bz-t7KmQC!Asj`^-e`%*+mHxn ztYK;!2E+H;+Wi?N<_)qEN9jLA0XRLvVF94a zM?1jZXh0x*eE>`lrVzf#DHH%WbAN9{*KY*(nI{%DsV~t9>sNWJKyk^NWDz~~{ttL3 zVe<#Z8zc%QKOph~SHB_}z>n8!4?XdbWj&ChaQtUmK3Um_9{rCX*p$H{f-q1v0RqCF z)Fm96=37;$HI`nOZbz)zihLIT?9Lq|56*ZC2(5jN`}pT}C0e7~dIh1x5~#^F2DtjI z|99p;o<_>)a~pbqKEf$jW?(8{4_BoBcq!63_}^Ig8!}M%fi4i@{qM@_LhH8N@?dlG z-OvJ;{Clls6Lg9FFU2^phXLq6aRYe&8zuiC(})IN%{?B>Va#PVPg5`Xk2Bqe3PCs0Ej36W~Ai$-{*2;dF6j^TR< z^Q8L@fSPV#2(pYK)lqjHkJMz)G`*glRAYEHb=@t#p4G(J$qJNmc zj7L&@MaijyVsQ~>&O~v!OzIp87L~;3g5q4YFB0=2STq31Z%m!W5k+>;T`wTUVlAFx zRUcck0Lh+&p`NSvqm652qY(v*kA{zZgQxdG{cg-xk$#JfstqxRnO z@4GMC6>FoD(_Z}E5hWvrQopG>z1(p^Se~R63wB`yI)7^_%9Zt*1&1Sl+dRM;pc$J0 zA$F*Bi+DeDA;NbgSsN*QjyPSA&{~T9%BCS(S6fT(;7Mt?hB8Naf*?)_Ie8X7ZM*qk zXJ!&8Sd63<1Gcd6HOcpKiR+!VnZPxH+WB8UTUdollm zy*>{Ec}g{k?_cmqs&jz8%|<(w1e#6E!Q9R_)YrrfKsNPZk3_y-Ts|WitxP6-if@OZ3oqM+4=2R5LH;=l}UmkeSvGaM}(`%pTMaw5f zfs^r#@#3pX;Vw)s0+@`@-iK?Q3XCkds)Qnx2V6&s8rcrkT|DplDl6~IHFzx-dyAdL zMd2oOkO*gWR2OV>ga-)o!Saa|6)ouC?x;JE?dsBNWe{j_cl)a|hx zrGDVtWTD3+Y_W-mGe(j`k7rZ3$KieF&4{~k;Hj#Q;bosOpc$Xss0)R-wJfX_nY@qM z$~zLsng!=1&(Bd}A?PNDU`dP;CMiX$D|mpjyCdD^{-SkG(K|0p05{|)vUE%UsHqpF zK?U-S7i4+6)}@Yb&|KuUAS`QO&)lcXp7ffy z*Z%O;SIiT_VI8qgSCDe;y-l`!&~FN~<`ABg^CQT?sLyuvdF(Z8_^JbzV`Hd>NAN&X z&mXRRJ`+|&L4MR6*~QxjAlwkFjh{lWCQW7`x0MY&v<5887xrCo0P!&1q5n4%i!^Ge^#0ypL8KO#Dh;Dvl&y@d~1(4!G=6oP4xo(iWNL@5Gn zL;eRP*r0wbsc>3HFaoiEkTiG{gPNY>Lh1g{tN`Zm5NHa75dAat73e+#LE$J+E+m0Z zeMZ+}yAT9!Kn#Uo!I;s+FizBKX+SsN>F&!xCJHqj|34wSWDzh0kvEYx`d-i@iR|1` z2_l76X>np&Q6kXq4$$=MUn#x3p*{2ROw)h?nek^W~c(HD`lV)u}_Z2@~s3eTYHS0|XHX}-!h@ED^1 zq%|U#@V}$K85O}{33R-a@_*X-HZ|(B7y~8$$+q~QrX=8;?7Hl@-6wxs_y>$XxfG$@ zPw+Y3zZY+cY($C#zK?SQ=a&rsUh(H|+2#6o_}?2r=P1{e4?>Hd!I)3bj*YC(O03&pY~Z;V!Ud8HQyYiutEuuyZ^V8KmxpQ zp|h{X4lNr?dnX=_*NF{mgvzbM+!U%dPAQA!0`5E3YaXM*diyu8!T6?fz=T+T!GpUz zkjz*STX*y?X_}*TVPF=8t|`#PyQOH^QLG>4X8?V~jX3k0(Qe73`$B~OtWCt&MuPMc z;lZUff;30H7w3!WhQXpxjqi!)Bq$|I#%)^tuK>y|%hA_?c3>{}qiTNEe?`6IgE;R! zQL(tpNPktGm_P#uc^T)-7HSOwYVFaheD4u3Zt_NJQ}7AZ*KY2pB>98o$&9mQei{B> zD;!W{NzQWG8xt;L!E;fMg$4mRrlh$s%DE{7l0Oxpirt2&@j;(fKJKH|XmN+$(NYOq z7uD+`F5iSqBD(zxUl(34oT1|@+`BsBi>c_%Ta1kqQO@h5kF3u58&X& z?^Sr!N`9VX&YNLJiBn-N@ou4O=peufH4Cicz!fa~RKmIQIsW@eVuxL)>1DhJ+aq?fiFo9Z!hR)s`MP5%5yu#g$7&JBb)%-nS0Q z)cEQoVF*{*xt9k&OWSgY_N4^DgJXg}eqmQ3hhbPL1w}r8kE=Y~^@35|+pBKr(zDuW z{KESJ2ukG#i(G_SUU_)$ZQKK(185HcD1R=5=Ov1W)1-uo$3wY!S4=BpD9I^ruPjFw zrn>7Fnk=qe(%%stZp?n`SOnOzC{!{iH#%{y0?pd=%wHF0hXf+dL|0#^)|l>IkS@ee zXT#djTnz2JnSfY+SGn$C$-sq*+KPZSS^{iIWK00{nlHHrB&v7xdbMSXvfk#_Es5v$*NpP?^ zie2FsA@4o%Pw3{iwuONJkCnsoon#!8CVUAc&?Pzq#JMklNy7tuYzf06*f)Y4O8{`e zThRe1VhWLgusy@Bx3`7t?=(Q}fp`Y{v*8HOaakoud=KdTb1Q=O6(G6?5=Fh0{Q7?1 zOB^_&gX!&7T(k0qjMG7{pU)5u2Le#_5QmL`^;i9fucNR5kX&PVZYtr)_#B^>a3S662%< zJ<&-`n@WUAbV}p(@kR zHRQcaO`T3sIMBbj^47w&qZv6VapG%?oMd3Gnel@KJigQ6cZT~R?-s~*uFz4%G2I@4 z5Zi#?(uj0Ei*~#(0WM(8e3eiJOA2O&xqcK%ULyI;0^`Df73=@1ZyfVmSzCT5^Zhu0Dntr_Xqeltxhh>KhVxx!Z_W@@gjc1z_`@1%_#D zBcPZdr}gh_$_HQcXHRZ<;K#^9=T zydUHSZV=+yEscRCLj9#QFqM$Uh6X)LX9x{zqvj61omT^GD z=>Ox$saL=_qE`Cf{Mri*g7@n-7H@aE7&O;--N^ip8~cjl*nGqJ8wl_K^ivV&v@z6lkW7n*>;2k*-(2a7J^Tq2H=r@6P%})rr8*qgN(E?p!{`Xd4$T^WL?H?Gq6}=y{SllcmOak@@ z1w;|&Qe5j$wq;&rf;iJA0Db!npmA7Mg9Hl&LCJucd4Qj$I<_`)D3!i~z0UM|QD=%&D-|4IX5pgn_k&{8DqIBo+*`c>^g=ae zc4*sxm6s?UpD=5hfoUPyhd%*WJ@$(OCq89oT02aY>8ipOH`3Mj9>(U6u7M?W1x(do zVmps8v@=@G%W6j}r^?B?-?=lHcebzeoT*RH?sk6;(baKo4XPWidcgEa=dp*Rvv-L2gLQ#t)q=#r_NN-D$@1dje~>q{7J}~g9!n>;fCnS=LM%f4aRLWX}Jp3 z*<51eU)CU1_2@6<2M2e{NcO7t!V9e0ina1b&mWIg{W4YaBEYomJ{wXi!AEuPTQLUJji z_~5=Z;n&oke41oeItxK7)5IT7#*xasj)t(BWL3$W@e zu*ovy3a4uu_(nKWWSd@3-R_XVQSognh&N+FILw8nEoyRdqA+byxp zt#)oXCFPviRBuo6A>zPNElKBC?T&lTqlM=ZVW5%UmD#NE%7twe@Hf5q4_`=PQXMcu zMHCehU$+XR4d!Y+6=A8<^Q<~aOK9c>T_>+<%U@(~+7>#<3?RXVz|5@vkzK7SHqXzq ze8RQ{^^SBZJ}0vgJ;Dr>%N(!Q`lFV)tWzIT;8Q7YJMDP1^@VYjN2l}G%pEG+Qk61T zHD2*vrRQdBn2_U=#r)*=s!g8$=87L;E)=px-+K9zP|r~ajB^xjO3_l9Va-GtMKV9? zUllRR9U8>YxS%me^B`hftsZyhj`@zGSIUd;d&&cm&v1kLn0E+$?fIylS~RPRP&_8g z5>L*P>|Ktm8ZOGe_h$5k{=C>QMtSkcqRb9+>^TZXh*?|gS#x}isay3!!MN@Bk{}PiWXg6_- z#^16O?pzbnE5SaLQ>OH}^aRxk9Uoa+yG7da*FDI@F$9cGA_#j`(*y@27a&ECFJ(|& zt%ncUT#W5bg(&&IDNafL5l0u(eA4^keqfEY7c1!aj2f~Rt^RT%Jcq#t*L&KMGLsz2 z(7d;Xtv?-+2{HW`z{x0hNxEn{*3m=%A->G@amfJKP4m2*AdAdd;ZxYzMybJhH{d>uU$WLYfu zM1CakZm@ASwuxum6!(iy1sSiDO?Pv1_;QvfspyHq-aM8kpy{xEh@G?rWIhndj}yn2 z`)Y_rMIwQt=w?!^+qQGRR^VX`(<`2HTsq=UMAQ|AqCMoZiLpUp=(aXX$9ExNJs-QJ zl5;!WqQR(}oN#C=z?y2r3Xbyp#ZVxRcY?^j9ry#g9P zs=k&puB2e0h}OF7Er&Qo8q*wxEvg@0EmpGfc)iiNy~AWcx181k%>`O5oGie542xgFl)Vf_>Sw-NDcXM zmqd4`>-4L7`Ird6NL%Si15@1RvrSu5g%LDf{=v>kkV{~(j$h#V-6rtyfdfg3>=ejl znvmuBOveMmDFezV`vA{E=eg;_FkC3%>AY4fsjVg219?IMtam9Wbh|(Dr+>&j3WUG+ zjnSv5o(o@997ggaWY;M)+k0e`;GczZ-s?t=D#2#9fGzYmp(9mLFmXv;rmk3gtU#zd z69%z0QH_Oa@?DO+H-g&A#Smxg$5!JM8)#tC>^3+uUOdglRn9c#R{yk%w#n1`CEdH)*+LAYD@STeG7a0=hSSqzHqSrWF+{AIo-wtdT*%5Iso6(r*e1G>4PYj zfQRxPg$qhKQSwoRf$uv>x`VMFAfR@N9+_7`5x?7waml<}Ui{ZX3+OPU9L7FollPWl zj2+SwX!Ft2=j0!ue|4_t&D{w70IgNPkdVACo$8F@4j|@(1kN3dU2T19@g+IAUs)ne zKB&8s5^N}6Bjc#N)Ro>h8vh7Pu*nmz6p) zgH%VP51O3($L8CXYhuhoz%;I&So$iSFauwZ$tulHs#7h_;^RW_y!TY$gwl)ZqDzVqq0J{F30LFMvK6KPUN) zb>#l4_UDU&X=6x885Gis>{LcU$sU$J|NC12o_9ftfsW`t4-Y<|!yua4$`p3FCPzWk zv$uYawhblwW|!!U2CLUh_0E$Pmxz4&3fs~jsbu@)`Li@z0UBf?$TQ-67l2Cur%f*u zo3S_C)OWw1=OH)JzO}x=U35at9}>{@5vC|Zu6-l(H-mj%-LEk|73IkUD2Arm4-A5Q z$V?uihe$ZuBpWpiAb&DM)!Ef}J7H~0w9+RoGtsDH2NK1=CVbIf!NnV*7dXJ}F_k1Q zo!$KaLN2f=pfJ3@@ALQ)(Ms@#&{ck+9w zY%M{4!M#3@RxK+4FB+B=TfP-0U}P z!CqO2N0yT(uL=HbE}me2iQA5)P~tBeE>kPvOHq+xqH$`unkIc^@?f0mOWfV)#$zBY zoNszJnQ?;ZtsZt>p%9Gzg-J1Fhx`flk()0)`H3*@(=5kB^dBZNI4Srw9;(j3C!2DHfyx9Gl+)R`iAx+H@h@-v-WYK{X^#diWk`g~OyA{irRi zQMI+?AGV5R>j`KLB2V}PPd+j0gZV+sApJX<)go$4)=Gg>Q5$I{zHU5BbTGSKd7fU{ z0c2sod--hE@Mo0Dx9o6)ZRUynO{_i3LRo01IgVVRRS?-aDgFe(TB=|E-xC!NNs z;wU1ev+>IR>24hScPJsAtYH?Rm%pX9qNHaHs$-RWf_3)jE{-$oV1kdV*Jr4_+m0m0 z&62{8eQ}^D@`8t14li|{Mk%*XV(bZBK}YDkNOk_|!E`r3J_Lj5DcGc8`2@M^_^>+% z%#qYz7=UV~7}b7NpCAL;@1n^mI6005OOe+(cEois2%GgkdLid-{yBsFb=;d!j>eVlarRxLP-=G26&{ zCdQp|-<=ok%-jgMZD?M+wGUvU@Lw{PyYA^bqC!bES<4QW#Z%7OVfkvDiG^ z)$3xTIFv^0@(l<^v4X@aV6!GY?D2Sk?Z;%hJ<7LCJ)xkYuyVgYbNp+g z;R0r>RrKs>*^X=?{t0aILXN3M=&3D&4Pj>ZKds+BiaPpM?!@Q}%CqO>94=6@Zx3AZ z!Q_wDFI-OjT-WPL?6IzxFL_r0ZJHG84G!WkgiJHtX4kWg`T7`ZB4ApWPvr!C0D?;P zB;Z-{kfs(#$|Ah=*NFzi_b9syBY_$+Kb-thqIZYVGkz8kE?1!n5BW~T(uLKiQ?Pf! z8V!yrmNMRRZ#J3K!WQQ|*5p3N(Xj9ZiVDQZD_(78n`I}^)5uI&7)6#jv^7G9{a+c& zs9`=vPmSh(rfAEX5z;N4&w6>l?J@aT_EqViRf8OO#Fj``Do%{E=t=!E=VU%vB;H}! zuO%r4(g|(oSmXAOBUG%YGOa?M0W3wqG}_o9(?uod-j?SHh8f;CL= z9(8Yj^WA-eMuja%{xi0*fP*nxD#kcau;3~(H?PR5Qu9k;UR8F+@!6LLukz>f)picX z^!iCzvaoa6-}-XdMu zNW83SZw+HRHjAS_OMfe#Bl4dK#|w| zh{YiZrE@6>&EK8BHi@j%>=4|6pL`3rcgBlvCh+k|WnMd<FbO$vCxiF z=n{oqzKJ|{?^-L!o1G`=>DKJ*U?`A4L$Qft5e&8fqB=-~oG&Z*XYu{5hD~uQjPW+{ zdDItVZByP(t@^S-?r1>|<}0_F%{C%PJd==lr|s?1 z1u}zYg(e$~>b3p$Yf(0KNSmQ35ic*cRJh59KcO^+Yk&D-O_#JW?#!}_v|`HmjsSK{ zh)j;KokJsF5l823DNhE|4!Yzg%tF$>J0-0j>D4uFO|1WVzfz+wo2cHP>7^mR?PNjc z1poF+=j2ytL^Q9_9xeUinTWTIRhjcCg8kx(w>z{xtgcGl9-%rE_ixD_j(Tp&7nb*i zBiMcqhADh8bJfzc6lb`J?Ik1NdANJ=CKP%yo&_8gL@7PHDBaq05;SRY%B+5ZSFbdr z+BQ7>5_4*1=!qsI`&XI@dmCC~OQf@iIV*XvS?{7vE1_=5IpYB0+2r2rjWN(z<@m*S5B7x@&>e1GAP5*Ksp z(|A#-CJfM=XL)U{#;!*QQcu6?+=YsL7Ob7XQqAp!SZhym#Ih#N@#uusuhRMC#y(BM z_|vU&XH^ip+T-R6=?1-%J=Y7ta{lCR(rDuj=t2@QHTS=7QoCnL7I3Kd%iF&yV^D!) z)Z)kWN#z1(j8DHumVWj2VO^9qC49N*mL@7IL52m#C(&u{??yRRF7M_XN9)tqPNl(?WTq5Nua95I_Vr z3H0%(+Q`aU6kknIVUA^C%K)94B`yP`VtD8<9L0IqsyU3mp zl7puMmvC$id7?53@{tC*TSdnEAy2g-WYX`|?9;d|@b~?`-Q6vFI*2EVyh6LmVr^}$ zR$VP-Wld{kKY+J$q6fNoI(lB#v`lxkT(TAau1Cl^)%+c7rixML{7D)pkMgSI^lkI< z8++~js5M=*$2oZisZ+Keb>W=a1ua)^kvWLX;lq3Gs+I#%dcj8=-fByt9EB|k@1#Od z)aB5Scs#xd8}vblzBcJU+|x_}?a6WelIj||{G20zCLh@OeG{|O$Ayy-`IrU(jjO=|kFs1T?c~HM=y8?flELWozEy_g9Un z-s@Ig_8Q>DkErbO)pf23hfa157r2)_TXlG)Yw~O|=zhlDTN4 z3oPd1pU>;-qid$eV84clbH`G>s&lbU3Rhp#w zT2`7W@Ko05 zIy$IaC*7qMnzg>4(eML0prSd+|05vN(U?0!sF6aqFSc_WS$g+z!Do}?h|*YZ?hHK? z9s2J+tYrFn;Yr{*eSd0s_T!lH2N-}uDG;%8QM;c<2OVT`z)m)91|KrehNE0+1^PVE z7YQIl67S&M35tuqET2F2%xgP$h8nc2daw&hnbz#pdR;s#Re>n~e8~k0Z4Ri76FlC+fI6Rk^GMS?R;j)|VV}_!+1n8mei74%+&FL0@^m}XI$QaI-OqF3- zOe4<4wV*&OjF+0^7jnO3ny4)PEAMMOae)^~J|!weK7(bJK&Z5v`^gx)X0)QutdAv* zynE@LYTQ&7Wb#%AdHIeWhosnTp5cdFByq0p$S(2wdzdo!KS*##GSW52>@|eFatNbN zpX{Sl3VB(jhNrKegc}w354-aDx61K(Xm_b{vc=Vz$DwaE9zjG*i5>6$zhV}M67=u=jCD?8Nb&FJ-L2I$GA#rO$T(hQG$_C6_dt(?jqAba| zjK4G5P|1b~_|U!z6y3R2$(c~RkBp8RRN-`&Kq8~@aFHP+*A4jQsG<)n+my1hFF2#~;SvqUITPG^p#B&u&#QSbj$YTT!ZzPUiV) zXO8>mOU819VdPd4viNqZsWH;qE%AH4(r1>*u?v=&fInE~p?);3xqcNZ5g=m{9y=PN zAQ1uXq!`&MT-cN(!$Rl~^gnf|!howIa5|v;;9Mk@-iSAKQU77*Z`NC5L7{|n$^^I8 zN*xsaKHsSFuNRcW;D{Smb&embcW0$``WPWvM;Y)~QHzsd{NK5ZQTt}4J}sDAk@iyc z3)ZyMXlpTlBTh@R%hd5;Iv*`e!Tsr4fjm%1Ei0+7L`Tb%kJx>I93QJ(UGS`~5aj== z?z^L+YL*J8^B@S8xeyy zF$lBH=h$5T2=Pw?nG;wfsPWpuVPyyEt7jI1tQNEy~O3PIKZ9Xd_Ak zeFx>Fr88EOu(~5d4-hjqhm$1XQ67i=p%*obkrzIMy4A02KE=Q|=i&qT;95p!oDwym+~ zu)ZX|$|oGB@!ttGo9Wls)9jf%;2aB9xP=?LeJ=F6o>+?$0zIFzJooS3YgB@ZRkW7NIzkjwCDFvBdWnKb=$3 ze)&DRk@=HOJVySbeR&KPf3hNn!COldc`r=Nl6$NoUlM{C!I6v`Zac33aM zQq0Q=)~&-DJ8buFXuEJ@VadCnI5R8$qU1Sv;8|owUrRRJ>{yLhAk|ERJ$f5ygqC+;xqORjc^K`F(UZp< z5{|1c^t|WqFV@Gqk)1(>|g7$wh*QXE<*Qd)%Xjna2a3N5a2j@(;ZJ)SLtGOc%NC%cT7 z1|iO_s6V@;-bw-wjrAsc13bTzAL&NNH&V!hgSgkHkUn#3a9%I)F~IbB$U@k)KK_vi zXrw#q&tA=YHA*|7!$_80Z$e_89y?koblv-mjkre=1h#*^>`emJ-?;&oy}kfIE8Rom z3VM16h>*Fu0GPllkVDW@ko(oeWzS0%%a#yLR`x;_^JCI*u%S<^e9T#}ZpK!(X@w(j z^7+{iASN)_X`3kzZj2X2e;4p|*=rx_otj}8`>vtK&Qow-t{i^ho;Im>1(Z2oq%Mz5hJWr~VQ)N-9`gBUh4B4U^_YS*NWl3?cv z{|M;~F0QX`4?6su57h!f1S1!7wWSE&()y5mrg5O z*P0cnqocz9A|~(DdJRhf2fN*#L$<^i|>_o;s zns_6$I((XObvJQm?+HMGTgb(>yz0hHvM}ycWn|#}$bH)(25S{%iBIipQR6njGDNQ6 z{2^bn*L`6CK&(ExhgCO|A}2|!UHOrEc&n-_bkriHJ#_&G@bc)MtioqwTBssCUreEX zrSKd;CJQ@)-_mFOs5`;jp5=gxG<}HC5^-j_pN(egf}i`Vp!S!QgwJi|N-A|6@%@7v zOgyN-!iiPm#O~aCr|Y?Q_Bx`tJ$%1-e=WlN6ym%|*{wN9;gaq8xICQf$}rT{F5uox z{4oVB)aO@A8nVBS6!~fdauSUabCG zsj0jK;96z`${uI_FxmOw7S|`f)h1eAWl%Q&a|*BA*9U3q2;%S!m>>;a8Dz){w)G)V`e7Pt)@| z-3i~wz$YozyPs%K9|B+YJR33aelp^B&#dM~aGY7TN#32*+999Shea`zStRw}dw%k^ zo2&+&Q<-hxA=uZS*Wdr0ECL-`Eq)TlSFS?_euoxm^TgVWA`9I=CJh^Kb)e}BQ57~z z3U?GMbb6kdNWT5Z7i(Oq(5y>yO4ypLKmJ)@p?@9Q+v7EcfnuB9j>#aR0jdTFE%u*Rk7hUF zoS(Lh1jfk$R0$rk5YEPr*Z8;9J5pSjBDj|6xB zpb1y~D&M{~OLXebrD#GFpmqx$g68_X;yY-lR8Gj}H!36Mn}@L~TIi=4-ZrCn~-DL*}@zOB836i z($9Csajs4-iK5fo-u+8NCJW|q8%o`zXRA9fpOV5_gTK&l44zz10VUu zP%CPhnUImI5#8=ihBGN!#=XKep=7aD@-UYY!nse5 z(D!ImCT8{S@Yi5NtDxU8)w!DqHS2+X8bfqCGltGdUg6I>U?j#@2%J!;-od7-z9XAkFrbfVuzr~hYgh8^%o(P>Grh*C9$UaZj9k@zo z2wnL8t1V*qBi8OO@poNZjpsvFv)zC4z?K*`>)daBBE9>?g433Zms{{bOv(9ptOyqB z;%ziG+}?gV&KKUQZR+Ya$Y*2#{tzcRaGPCJk{D3Odd~h)lVxhH_u!#;De8Hq$D-$3 zjYPtT#|8>fktpN)_FZy=wUONf&)ZtVQ<#jUls3OKWz{kO0xEZ<(P9!MoH4?>Nb=Nd zY^iAPb<9kM=^yTyk55wF4b?B@-Hh?!R4jy@HeHlD^4I2ADAO!Gb9Al)~sBK{YVM8l0gOnWY!3pM($8I{U14;p7a!Q-J!PpO#Rh0qJUiL+Q1R z2Ep|o^&7AJb3a%x_YXF=bSCA4 zgh|em$oNC3Wpy875=f6{Ku-D+pAp2|7x@Kl>orICIYBYczL0zCu+T%J@A1~^52f?B zn~@WAyiWytm%u-YuJ#4^Q`R`CV#iaxJ=l~_Nnq9TsFJH zA5w`$>*M=)N{^r4=PYB=+DMk@J?Z4)ixEp5oU`jFQz5nc>Ac zG@!XYlNQKwMbc#b`NsPLKJjE?o6d(zuUzNXP5#=_;gEFB&Kd$qnhH$c-3-0nOa7<~B#Fn4DvoLWus8g?K8vbJ~GqwgUM+ zAJy}=STg+CC*}y&=IAJD7wEkRvOl_E>`$gL#y9XA2zS}UR zqW{r4UkM}RVj$hdS~@X(+lZ-zg*BcutXh-LdMYzI zW&{E-OS;j1`VftE^@on}gDNJZO_P`ehl&QD@2JV{ROCG|BzgxCk6)NzI3eSef84J0 zaz_ew^8Hp0)TN=~cKMAQi$eQn--Oc|-!`;exft50Gs0}4xSs`2xjq_|*RSVhm)=(A zP1yz|4U_ax^2(EgWm506F0rjnY-5>=`=*gdm<@gZwKOvqg|#|F&UtWw56(+ zzlA+(yrl892nE<{1Z4C!b=hC8oA=JO#!uYxbjpo=_uPul(EI&7xBM&jXP6~T!`%Qu0Vjh+>7-6uVSdOoH<(nF zWye1>33wW)7=SjZsXMsyOCTi41)BU*&BYA$W#4ekmKM_u_T0l|q zjSRKqA3iF}``k@Dgg;8d%LPl+Y<<7Mo;U0nP1)Xx;;7rxtx%NYi6F zN{fzt;V?x;E`RBIxrIQ=B1TdNSL_Mr^Cw}GUzRGcO#F@BVa0ro$)T9P@w+Ak!mYde zOIP}hETvbKh;!Jum&F6rgeTEQoamWq`|6ZW@3rG=8SQU+{IWFLdNZ;dFqn#TIbhH- zCFenYc_IY0GrPH3<*u}eA_v(` zQ5wd@;R}Qq@{ZWKZ-dgFOngboIO5jM{)jGvh{|P4dXR@6GavODmme}@J(3Xqjqp*t z_9h1C0PQJ{d8H50Iao7>^@XmXX}v6Xb z-5(d3Q_ZCdF{rJxGk_Hwyf_!?^(BL5^2sC&7w^85AgU2r{TxWuLqtDgj8kJ&Vg`q} znzrXjg}cYy=1$i3UjI?Tsp8+|M-)w8_7{$1_4= zRYOvZF0hlc|s6yK4RVV(Z|Y&{{6%m=R`>3(*->fa#k2y)K`rB*9iTD<{e&r4klwf z$A`X8IJRbHz#+8AMJ!R!Ljkn4c^PsCCkBsFZE{LAlZ%(InUa$FTz zk;;0IO>#b$R|j<|K+i2g?P-UO=&$kLq}YU&7tBu-wraZI`gbBEux6v-#ayr_tW+op zvn{qxBZ_cK%TkQtsNrtY4XwETH}Ro^ypYl)0ShsG2oKjz{tO)ExH+g0r}8of;u;Z* zpQwgNb9fqaYt8GIG8?0#FXyzuKH4VN!0hGPfzslY?2Tj#Sj?N8;ZH}(n9moV*!w&P zQb@nEU)!g-9+BTl$Igmu;6kpw83=F;M;}Aos6HA`wR|`k#dE{cL+)J)@x<)|CzIC` zcfuwuqp7^PdY2P)VI}_a521QcYs)`lW2gAsutX;TVF8#MeiCn{yqV=4pT~PIH(@`X z*Z54{Yz3uEX^Z6BUy0R>cid095EnTh?Nd(z>_pe*>*Wd0IkX<)1nu|!987-}9wSY{ zB-(Tw_@R$+Hf7>DqRAr$7o)iC<6FINO-UYGJumO;q(ozknZyb?jnX~i%rN;n*8cMT zO_!{Wged=Nvi$>3U&GXv(cb(VAMu`+IPr#t1D~UXv(m4%kRg@iC8;Ap*YdN+JF7Bi zYxpheKyBI{8boZw$iCIWW^6|UbMUrUregq3*^c|c%m zRkIrB*)RA~4L*;#vvOd_``;O%!eFZjRKdN#EV z%4%$!nkR9yARK6DVa9aB97~^NCiSU?Dl6fhFZ(0kOcJIstG;jZ%=Jk1!FOv~Q6%_X zEl9G_i`BOxt|4s3Z6jt8Tjk5UnlMP+A2A(H@kL2^;}zW(%5U#p>TbG4?kw!eDEK_v z=*=I<7H^ow5WHDE7pAzhBAB`1U9iD(m+aACnvWi1Z;osBD%3;e>^$gYd(P@ge{gk! zmg8t`j$*m1c3?eG7UzOjxN3hgo~sa~BF{_ESbqjtP(lCNht`3jj1*?Z_51Up4S7{s zd3W|cQT|et{hMjm?UvM6k?|TkFGnZeKWAozmM|_b)@x1{$Jx%U86oy@V_265Q5&oM z<8P`TJ)NMowc3wmusq2$Pyc3wFr8|+wBLx5ZM^ee$ALHPb<2Lh~apt0gzt zqgR;?ygm5iOS|7jC5Qf~)*^$kY8Z}t4Mxc3ni1@&7=RJ{a8AJ zb8xcpnwD1u<4B+7U9F~{;;80YdqAAV&Ihi!CE4s;gWCBOYS#E_JQ4MskScHTvU;>o z*T8m9U5+{YjsS+0aW|vVxrEo7lf0p-kHVGAo@|N)jN>TWtFJ^mV9H{u*QxB=&rv&< z_!(Ok@SR6DG=9-^f;C}s(;5-aP1_Z!Ddd|VYZ=4sN4l@ogGCwJdKzDJ$Kt_H7cbLo zF+GjJ#A*9yZObodg^!2F>znyi&wz`_H+@*i`qCgF(*QAB(aR*c-l3+&3w~JsU0TT@ z5tqSi)&xQxBSP_qN4h--oF9d$=weBqW#JGsd@!p7=BOY{;2RUnQWL}A04eysti-$)j-S1cO&m<~PjVt8 zQ+bR&(7llVu=Y%S0BV&U_-<=C2~zOE;Q>2t@0&as)$%8wJHM#8I7%e2s{Jg=jC}fW z9t+G3hCXq8)!(Ax_X5&zuV!9AN(+xFvjC%X4`@$qkt(EyMN=cwvn?8SKb7JM6Z5BO z-)7WFBz5-`Ii#>|eqVA5TH?=*HQv_N&Mue=oYZijz;yR*+iR8_J-CS{mQ*i+o#5TX zJv31k_3fHWAxOb!w@O%d(1+7+_dnE78g7BdHrq+QsXrfr(opDbZxYxyXjt!5;G#l} z5YQV8HIfSOo&B8;hCKv!12SQw%2=T7JTdl<$BHo4HUyF;Z1LQr;e6Ko48JGI?Y>WZ zazV?>2w}L}HUkBSk@aqnf^Vczk|jk?oY^X;DN$mOjV{bVyu-CQ%n%vXoyd6&8@RN8 z3~c#QieNx7d$#6GTRYGA4ofA@=ST4*SU7gdPEik8#&3OQwP5_r@?~&d#s&@zXe-?X z)vl6)TR+mi9qU{ORfrq;S%keoSk&N1;biqkrg>f+)g~v%aG|ze0Ih)L&3^>si9nc% z?vFC7P>0fZ)UdW-;$79S7(Ksv#}HK%kr*VHWe&L{D#J+V2+$z(-Q|xDDPH?mx4X>c z{-1g&su)x7^%0~DgSG91+Rg)wCIr(97?rp!_vTGMDJ)pv(N}NHy6=!ICWO!AHhh12 zk$~_j@<6`_hq{Wc%b@sgLN079>##LA@Z5YR(<6j+_~&*3 z{Z)vQA{%YldCz@%2C2)Vw>n1~wHvzjichfcP^sTG{2tsw(uzyOYSp-ZHUT#iCq}B! zAZh8IPY|FNysF*XGl0IzJ-cmdOWm@qcR&ie4E^K^;9_rNVB9Q1Y$~^Nw0*deX|wZD zy~BFr)P<;-?JlNG_oGN(@ndral!eOr2g15finYS!Yhp|&3G8C8quz+o*+LHCFyH;1 zKV}r(`%R78`Q5hGld-De>st$*vQv?kIy>zR0rd82=R4cM<>H-q#Z_PROjncHsIG<& z0J_;a&rU*$z3k88tLJ!{2<+D9qZQX`>!mT~GEwS=j%(8{@WfWH6Vo+uUpKD3Bc!F8 z#VzD$`=9txeJ`Sg`KHCBg6`o8pE7;3^^@jJFQOI_J$PbPEyU+!C0?g7u};JDs5xS2 zY!pNT!h~Q3zs254cdTxk$Nr~sk{p%D^Tu<(V197X5^szsS*wOei>T-%W_3R~u)$W| zw*tz>BU0+5^30jx{%Vcbg}}kG1M5K`!uqI^8n(D>1E(0^SYnlL6d_gP#JE!E5E#=b zdl+<}CZv(<2?0;OWz5^mu4rWDIo`X}BOBEB*ZMe*7$;)eWKxgx_r%^&tsZ;h?j^c&ZldeE(qsW)TqBw*5L~Udx zj|^mGN>)jZq-Vlmc!sQWu;UcBgT%&dM_B-G43f5*e`u6PukI8id@$& zcDk241r@9oH+_V0*wMTyPL$=l(uj!&2kI&z=gqEz7JE}>9On%lm7CqgajqY_FR)iJ z5E|#@$70AKdV{kEUPA{+frDc`;I#P{R^!>)Tp_h$|M17O!sBygZyNEneQDREacf8| zCTf}UghOfydN|%3xtY{L(Y<(Hv^}>Xu+?x-xw~*5qKU=@_pJje^)fn7=Y4$UHGYkK z+DP=W+d3Yk5U!Y<@|@o&-Hj+p4fO4vJW{-=>werTw>~T1P4Bp|^=r;r#yelJ{X>Jc zxtNZYH+A3dK1lLr5F~J9^KrT245_l!LU8XYC2V1eF;NYQ69$qWmF92JP z4zL&p?u#2JJ?T?>R^wJPRRaR?vJw!xiAf62!m|{=-)$1Vbr$Qv#2f55^7*%$%@FPb z5Gs^HBW41cm$6fx69b0(w}b~n64^H;Uq>xalyoK;9o>>ozZ7r+aH}ZNq;3B>#k(J9 zS9fE!md3X^x;ZV9xP_eUGpHHh3Pq-G{Tvz5KK538R0brig+asyJsxicka<)N^1F_$ zpY88YKQ}p00MA2(>!S(Ay>GlFpX68YQv(65Pf=P2P%V2ln{^e{V>X+Wl|*JN7Z?5L zMY~M;5F+wazu8|ZJ&&qWvbFItq=2**RZ!topi%!fAxBm`GiIp()&eKib-j6|IF6;^ zkJJxTDv+XdZ}Hu*)Uxcj4>`c4S1KE^TpD~J=C3ks7sv?*kWJTym4&YV$ z;*R%GkXtSZ2;**m1lgsjL8*^bdhGLAkxAo9s`>tXcdl_M^8^A({YK)UJJHa;YQ~b^ z@o^hm%hxAHFBNnN>ODY5^+huX9E9?k9a@pppz3oB(61mLuL(d8xh}8ST_d+24+07K zbL|!2-taBvXka|Df6Fuk9Rk1yJ>t6Ef(4`pjm|Pa%u;D)7j)7DeDTfO%?{DsP*2yUb!7Nt|H5hgULUhnm9~*8K zh01lF3EOK%e}^eiK#zFr`B%|s>y`tvmDnD(E)!{%*=nup?j&&6V?HE0vg*yW1@5-0 zkV>Av(u05=woXpt_(!eVJb$C@ih(rMe`HcoEdFjb#LZLaoaG&tHjaVS_iODzTy!B} zybyf~)?-( z{fVEkGJIq&qjSH#>~*~Prqo&>SNzl|B3^-ZeHT$0PXpUqXn7N&IKRfUXy-Jma^w`D zBGmXRCqWzr9??>sLj&$*h*SVB()AQ7O)ZySC~kW>p4k`j{2l2^ z&68OX8|!I10ll;K(XdKc+JkfB_$)(^%lU#8Cpcj?g89>V3Cq8uz8s3MwI1jY;aOf^ zw5`|6t*v+2tUG2%-b8NkkL542t8ho=uSeNqfO|QZz|-yqYE+s!NGg|;%LNxgQR?c| zwEy9g!k0d|0oYY@Qb&v7cpGwgI>OE!WFVp1;67Ma(FaBeB~szVbEo4RDYDCX8wa6_ zQRVoz+xS@lO1p1Wf(do)e0-~x(s$vqwyrIjs^L{CDP8R@=#GA}FC5#RsYz*DpS=3ng3 zYr&O5q=2U&HT{=(KjP6&;)VRx_}_JQm7WaOY|-j^f$n35@$T?Lv3uhCl1mkmOA~D3 zI}?gR-UlptLY}J=jVE?_b%W+&HkStRy@70ha;y~d&IBIj8V-n8LtP46e;0P0J7M8t zqx3PV62~FK6-_?U@QeU5&L>c}T0mcwI-~1$nGMFZ)V35zlxCq00eJW>XKoSKJo0LU6Ej#US6`$Xo$aR}v%XhulXe!=T&pN7N2dzU!cQ{j{J{tU@^U#JZT0^Htp6)~FYOHC1_z`ut0r>!8ym$2m>B;CDFpMdW?SiXNQ0+^ z%M#&VK}vuM^uY!X5e~%a|LwJPqCx*}*uFq_i$Xult~p$?@=~%Pl&DG$;Ih@7QeE(` zoxuAC?>h7|&BIq`;WH;%*wKr!@Pq4Ez4jwGU^~NU&>7Iwv^?Y2{(U=W_Mjc0-@FI$ z(H5&DKvEEc9_oA8{plI=vVNe#2HbLNdPy)3ixR;^3`1TBBPdEOjwKvZM(;agPM@lR z6BB097BHoz9o8O3w;I`9&h>~Ce*~qyf?FE zR_n$(vSpZWT>6^z=-h2&jrVBI@YsB0aj~qVrrpwH-R&qZuhT(0=xjE>(}y}^N@k7m zGLTWMIbDJ6mWTF?tYhAs)hrvXo18UihZDnrZCa(yDjv<|lj>o(6aDYCa_h_0WXxJm z7(6I!O{#+{!aGy*MK87-G=;>+N98Vveml*TUk&9P-T!-(Ei01#Y|AiDR85hb!Ke3f zVm_7Yl6X`fu(|0>)K}S!>!<9XMpGUk-z$5%u_`&fR=F~itKhgbk$k?wJ8T+}w%fuQ zyXH&S3o6#GTNXLbHu>8DOTc*V<{Lx0u5NMX{=n1=`4K)Z zCz2?Q=GIC2!av@Lt)1kBJJyObM8h!+oE_ae-Q+YO(XM(`zz!jT;LWqKy0 ziHBAQl;1&HYiz!>gumU~#G1l#eoO>e0OC@Y0}#2mx(3$5 z%zR{aFc|;RF=?D}k~~yVa+hONt|GYn($$P>6Vz8uGKiWS9=89pr>(FRMh=3Kupxx5 zmJy9~NTM@Njb?@7aHZf$sf>mA5lL^+?B-(hG~pU>TF&IiaxDeWyI) zbNu)Cy>7i+wtx#3$fZyI72SUq|EcdqCxnCSLesZ|N89t8G8=g=vwx(SPQ-2__gY}- zg~u(Vl1<~Bkc2jy^gV^skKtRV8{p#P)asqxKGc~80~m^#FeiGp__D# zu)janYrpvbQTmJ%U)!@^n|qQzqa6^~;eEB($$~i1ShhE!L%ya%&f`>3Re0Zk;SstB zA;K!D>GZ)B9nN&Yjg_4pi!@x68n!;6g!iYG;ni+9^{{~;eckJ@liY6jScGZSYRETW zIEtcs5XbEn(&Ep)6r5e~54cmMjr$E`9kqlA-r1HgqLaA9MbK)Xfv%wND}|qI5_6|B zTpCL&TRcwzu|uJ_5)UZ|u+~Ta^VUz72z9X66bulBEifZxvy-i`1kcH5x{P@c;LT!7 z2%+q&8juf6?IkH9RN^VnDPKLAD%d$0BisF|mJk^|oyix!u*$lWS063(k>1^H6Nh=Qo7~NESj*H|MzHwC>8s(^{f-T+3fyJqNu62HYV{JJ<=xO&H_*x z*W;t*792q*@nq{{c$(h!tQqr9tr99sNCgr+QmOL2dJ8h;6%Vyx7Hi YvKuAv^7rd30Q@H_rSP;s0`~TQ0pO*=PXGV_ literal 0 HcmV?d00001 diff --git a/tools/keeper/db/connector.go b/tools/keeper/db/connector.go new file mode 100644 index 0000000000..a8f106e952 --- /dev/null +++ b/tools/keeper/db/connector.go @@ -0,0 +1,177 @@ +package db + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "os" + "strings" + "time" + + "github.com/sirupsen/logrus" + "github.com/taosdata/driver-go/v3/common" + + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +type Connector struct { + db *sql.DB +} + +type Data struct { + Head []string `json:"head"` + Data [][]interface{} `json:"data"` +} + +var dbLogger = log.GetLogger("DB ") + +func NewConnector(username, password, host string, port int, usessl bool) (*Connector, error) { + var protocol string + if usessl { + protocol = "https" + } else { + protocol = "http" + } + dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: util.GetQidOwn()}) + dbLogger.Tracef("connect to adapter, host:%s, port:%d, usessl:%v", host, port, usessl) + + db, err := sql.Open("taosRestful", fmt.Sprintf("%s:%s@%s(%s:%d)/?skipVerify=true", username, password, protocol, host, port)) + if err != nil { + dbLogger.Errorf("connect to adapter failed, host:%s, port:%d, usessl:%v, error:%s", host, port, usessl, err) + return nil, err + } + + dbLogger.Tracef("connect to adapter success, host:%s, port:%d, usessl:%v", host, port, usessl) + return &Connector{db: db}, nil +} + +func NewConnectorWithDb(username, password, host string, port int, dbname string, usessl bool) (*Connector, error) { + var protocol string + if usessl { + protocol = "https" + } else { + protocol = "http" + } + + dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: util.GetQidOwn()}) + dbLogger.Tracef("connect to adapter, host:%s, port:%d, usessl:%v", host, port, usessl) + + db, err := sql.Open("taosRestful", fmt.Sprintf("%s:%s@%s(%s:%d)/%s?skipVerify=true", username, password, protocol, host, port, dbname)) + if err != nil { + dbLogger.Errorf("connect to adapter failed, host:%s, port:%d, db:%s, usessl:%v, error:%s", host, port, dbname, usessl, err) + return nil, err + } + + dbLogger.Tracef("connect to adapter success, host:%s, port:%d, db:%s, usessl:%v", host, port, dbname, usessl) + return &Connector{db: db}, nil +} + +func (c *Connector) Exec(ctx context.Context, sql string, qid uint64) (int64, error) { + dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: qid}) + ctx = context.WithValue(ctx, common.ReqIDKey, int64(qid)) + + dbLogger.Tracef("call adapter to execute sql:%s", sql) + startTime := time.Now() + res, err := c.db.ExecContext(ctx, sql) + + endTime := time.Now() + latency := endTime.Sub(startTime) + + if err != nil { + if strings.Contains(err.Error(), "Authentication failure") { + dbLogger.Error("Authentication failure") + ctxLog, cancelLog := context.WithTimeout(context.Background(), 3*time.Second) + defer cancelLog() + log.Close(ctxLog) + os.Exit(1) + } + dbLogger.Errorf("latency:%v, sql:%s, err:%s", latency, sql, err) + return 0, err + } + + rowsAffected, err := res.RowsAffected() + if err != nil { + dbLogger.Errorf("latency:%v, err:%s", latency, err) + return rowsAffected, err + } + + dbLogger.Tracef("response ok, rowsAffected:%v, latency:%v", rowsAffected, latency) + + return rowsAffected, err +} + +func logData(data *Data, logger *logrus.Entry) { + if data == nil { + logger.Tracef("No data to display") + return + } + + jsonData, err := json.Marshal(data) + if err != nil { + logger.Errorf("Failed to marshal data to JSON: %v", err) + return + } + logger.Tracef("query result data:%s", jsonData) +} + +func (c *Connector) Query(ctx context.Context, sql string, qid uint64) (*Data, error) { + dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: qid}) + ctx = context.WithValue(ctx, common.ReqIDKey, int64(qid)) + + dbLogger.Tracef("call adapter to execute query, sql:%s", sql) + + startTime := time.Now() + rows, err := c.db.QueryContext(ctx, sql) + + endTime := time.Now() + latency := endTime.Sub(startTime) + + if err != nil { + if strings.Contains(err.Error(), "Authentication failure") { + dbLogger.Error("Authentication failure") + ctxLog, cancelLog := context.WithTimeout(context.Background(), 3*time.Second) + defer cancelLog() + log.Close(ctxLog) + os.Exit(1) + } + dbLogger.Errorf("latency:%v, sql:%s, err:%s", latency, sql, err) + return nil, err + } + + dbLogger.Tracef("response ok, latency:%v, sql:%s", latency, sql) + + data := &Data{} + data.Head, err = rows.Columns() + columnCount := len(data.Head) + if err != nil { + dbLogger.Errorf("get columns error, msg:%s", err) + return nil, err + } + scanData := make([]interface{}, columnCount) + for rows.Next() { + tmp := make([]interface{}, columnCount) + for i := 0; i < columnCount; i++ { + scanData[i] = &tmp[i] + } + err = rows.Scan(scanData...) + if err != nil { + rows.Close() + dbLogger.Errorf("rows scan error, msg:%s", err) + return nil, err + } + data.Data = append(data.Data, tmp) + } + + if dbLogger.Logger.IsLevelEnabled(logrus.TraceLevel) { + logData(data, dbLogger) + } + return data, nil +} + +func (c *Connector) Close() error { + return c.db.Close() +} diff --git a/tools/keeper/db/empty_test.go b/tools/keeper/db/empty_test.go new file mode 100644 index 0000000000..52e32e1f89 --- /dev/null +++ b/tools/keeper/db/empty_test.go @@ -0,0 +1,8 @@ +package db + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/docker-compose.yml b/tools/keeper/docker-compose.yml new file mode 100644 index 0000000000..f7f43fe112 --- /dev/null +++ b/tools/keeper/docker-compose.yml @@ -0,0 +1,29 @@ +version: "3.7" + +services: + tdengine: + image: tdengine/tdengine:3.0.1.6 + environment: + TZ: Asia/Shanghai + TAOS_FQDN: tdengine + volumes: + - taosdata:/var/lib/taos + taoskeeper: + build: ./ + depends_on: + - tdengine + environment: + TDENGINE_HOST: tdengine + TDENGINE_PORT: 6041 + volumes: + - ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml + ports: + - 6043:6043 + prometheus: + image: prom/prometheus + volumes: + - ./prometheus/:/etc/prometheus/ + ports: + - 9090:9090 +volumes: + taosdata: diff --git a/tools/keeper/examples/metrics.toml b/tools/keeper/examples/metrics.toml new file mode 100644 index 0000000000..9dbfea2323 --- /dev/null +++ b/tools/keeper/examples/metrics.toml @@ -0,0 +1,38 @@ +prefix = "taos" +cluster = "localhost" +database = "log" +explicit = false + +[tables.restful_info] +[tables.slowquery] +[tables.cluster_info] +[tables.grants_info] +[tables.disks_info] + +[tables.logs] +explicit = true +[tables.logs.metrics.content] +type = "info" +help = "login types or messages" +[tables.logs.metrics.level] +type = "gauge" +help = "login level" + +[tables.dnodes_info] +[tables.dnodes_info.metrics.has_mnode] +type = "gauge" +help = "check if the node has mnode" + +[tables.vgroups_info] +column_as_variables = ["database_name", "dnode_roles", "dnode_ids"] +explicit = false + +[tables.vgroups_info.metrics.tables_num] +type = "gauge" +help = "Tables count of the vgroup" +[tables.vgroups_info.metrics.online_vnodes] +type = "gauge" +help = "Online v-nodes of the v-group" +[tables.vgroups_info.metrics.status] +type = "info" +help = "Status of the v-group" diff --git a/tools/keeper/go.mod b/tools/keeper/go.mod new file mode 100644 index 0000000000..f520ceb774 --- /dev/null +++ b/tools/keeper/go.mod @@ -0,0 +1,80 @@ +module github.com/taosdata/taoskeeper + +go 1.18 + +require ( + github.com/BurntSushi/toml v0.4.1 + github.com/gin-gonic/gin v1.9.1 + github.com/kardianos/service v1.2.1 + github.com/panjf2000/ants/v2 v2.4.6 + github.com/prometheus/client_golang v1.12.2 + github.com/shirou/gopsutil/v3 v3.22.4 + github.com/shopspring/decimal v1.3.1 + github.com/sirupsen/logrus v1.8.1 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.12.0 + github.com/stretchr/testify v1.9.0 + github.com/taosdata/driver-go/v3 v3.5.8 + github.com/taosdata/file-rotatelogs/v2 v2.5.2 + github.com/taosdata/go-utils v0.0.0-20211022070036-018cc5f2432a +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/bytedance/sonic v1.9.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gin-contrib/cors v1.3.1 // indirect + github.com/gin-contrib/gzip v0.0.3 // indirect + github.com/gin-contrib/pprof v1.3.0 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.14.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/leodido/go-urn v1.2.4 // indirect + github.com/lestrrat-go/strftime v1.0.6 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/subosito/gotenv v1.3.0 // indirect + github.com/tklauser/go-sysconf v0.3.10 // indirect + github.com/tklauser/numcpus v0.4.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.11 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect + golang.org/x/arch v0.3.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/ini.v1 v1.66.4 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/tools/keeper/go.sum b/tools/keeper/go.sum new file mode 100644 index 0000000000..9c7721c4d7 --- /dev/null +++ b/tools/keeper/go.sum @@ -0,0 +1,764 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= +github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deathowl/go-metrics-prometheus v0.0.0-20200518174047-74482eab5bfb/go.mod h1:kZ9Xvhj+PTMJ415unU/sutrnWDVqG0PDS/Sl4Rt3xkE= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gin-contrib/cors v1.3.1 h1:doAsuITavI4IOcd0Y19U4B+O0dNWihRyX//nn4sEmgA= +github.com/gin-contrib/cors v1.3.1/go.mod h1:jjEJ4268OPZUcU7k9Pm653S7lXUGcqMADzFA61xsmDk= +github.com/gin-contrib/gzip v0.0.3 h1:etUaeesHhEORpZMp18zoOhepboiWnFtXrBZxszWUn4k= +github.com/gin-contrib/gzip v0.0.3/go.mod h1:YxxswVZIqOvcHEQpsSn+QF5guQtO1dCfy0shBPy4jFc= +github.com/gin-contrib/pprof v1.3.0 h1:G9eK6HnbkSqDZBYbzG4wrjCsA4e+cvYAHUZw6W+W9K0= +github.com/gin-contrib/pprof v1.3.0/go.mod h1:waMjT1H9b179t3CxuG1cV3DHpga6ybizwfBaM5OXaB0= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= +github.com/gin-gonic/gin v1.6.2/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.7.2/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= +github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kardianos/service v1.2.1 h1:AYndMsehS+ywIS6RB9KOlcXzteWUzxgMgBymJD7+BYk= +github.com/kardianos/service v1.2.1/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8= +github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is= +github.com/lestrrat-go/strftime v1.0.6 h1:CFGsDEt1pOpFNU+TJB0nhz9jl+K0hZSLE205AhTIGQQ= +github.com/lestrrat-go/strftime v1.0.6/go.mod h1:f7jQKgV5nnJpYgdEasS+/y7EsTb8ykN2z68n3TtcTaw= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= +github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/panjf2000/ants/v2 v2.4.6 h1:drmj9mcygn2gawZ155dRbo+NfXEfAssjZNU1qoIb4gQ= +github.com/panjf2000/ants/v2 v2.4.6/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/shirou/gopsutil/v3 v3.22.4 h1:srAQaiX6jX/cYL6q29aE0m8lOskT9CurZ9N61YR3yoI= +github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= +github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= +github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= +github.com/taosdata/driver-go/v2 v2.0.1-0.20211018081904-0a2a3ef6c829/go.mod h1:W7pu74rSvDmGjJPO6fzp+GCtwOelrMgXEhPD0aQJ1xw= +github.com/taosdata/driver-go/v3 v3.5.8 h1:JT5lNFUCOHD9Hs4Phjg8RBkGOWlePRnpGqq8kIRHT98= +github.com/taosdata/driver-go/v3 v3.5.8/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU= +github.com/taosdata/file-rotatelogs/v2 v2.5.2 h1:6ryjwDdKqQtWrkVq9OKj4gvMING/f+fDluMAAe2DIXQ= +github.com/taosdata/file-rotatelogs/v2 v2.5.2/go.mod h1:Qm99Lh0iMZouGgyy++JgTqKvP5FQw1ruR5jkWF7e1n0= +github.com/taosdata/go-utils v0.0.0-20211022070036-018cc5f2432a h1:WGFREiuYBrTXTS9GVQQpDvVgGRyByfo0V5//o7tv/ho= +github.com/taosdata/go-utils v0.0.0-20211022070036-018cc5f2432a/go.mod h1:hlvGgM/HN3AqWMajvMQe80qoLNJ4KIxs8YOVqEqnxUo= +github.com/tidwall/gjson v1.9.1/go.mod h1:jydLKE7s8J0+1/5jC4eXcuFlzKizGrCKvLmBVX/5oXc= +github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= +golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= +gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/tools/keeper/infrastructure/config/audit.go b/tools/keeper/infrastructure/config/audit.go new file mode 100644 index 0000000000..10f3a6aa1e --- /dev/null +++ b/tools/keeper/infrastructure/config/audit.go @@ -0,0 +1,6 @@ +package config + +type AuditConfig struct { + Enable bool `toml:"enable"` + Database Database `toml:"database"` +} diff --git a/tools/keeper/infrastructure/config/config.go b/tools/keeper/infrastructure/config/config.go new file mode 100644 index 0000000000..f4bbe1b274 --- /dev/null +++ b/tools/keeper/infrastructure/config/config.go @@ -0,0 +1,296 @@ +package config + +import ( + "fmt" + "io/fs" + "os" + "runtime" + "time" + + "github.com/spf13/pflag" + "github.com/spf13/viper" + "github.com/taosdata/go-utils/web" + "github.com/taosdata/taoskeeper/util/pool" + "github.com/taosdata/taoskeeper/version" +) + +var IsEnterprise = "false" + +var Name = fmt.Sprintf("%skeeper", version.CUS_PROMPT) + +const ReqIDKey = "QID" +const ModelKey = "model" + +type Config struct { + InstanceID uint8 + Cors web.CorsConfig `toml:"cors"` + Port int `toml:"port"` + LogLevel string `toml:"loglevel"` + GoPoolSize int `toml:"gopoolsize"` + RotationInterval string `toml:"RotationInterval"` + TDengine TDengineRestful `toml:"tdengine"` + Metrics MetricsConfig `toml:"metrics"` + Env Environment `toml:"environment"` + Audit AuditConfig `toml:"audit"` + Log Log `mapstructure:"-"` + + Transfer string + FromTime string + Drop string +} + +type TDengineRestful struct { + Host string `toml:"host"` + Port int `toml:"port"` + Username string `toml:"username"` + Password string `toml:"password"` + Usessl bool `toml:"usessl"` +} + +var Conf *Config + +func InitConfig() *Config { + viper.SetConfigType("toml") + viper.SetConfigName(Name) + viper.AddConfigPath("/etc/taos") + + var cp *string + switch runtime.GOOS { + case "windows": + viper.AddConfigPath(fmt.Sprintf("C:\\%s\\cfg", version.CUS_NAME)) + cp = pflag.StringP("config", "c", "", fmt.Sprintf("config path default C:\\%s\\cfg\\%s.toml", version.CUS_NAME, Name)) + default: + viper.AddConfigPath(fmt.Sprintf("/etc/%s", version.CUS_PROMPT)) + cp = pflag.StringP("config", "c", "", fmt.Sprintf("config path default /etc/%s/%s.toml", version.CUS_PROMPT, Name)) + } + + transfer := pflag.StringP("transfer", "", "", "run "+Name+" in command mode, only support old_taosd_metric. transfer old metrics data to new tables and exit") + fromTime := pflag.StringP("fromTime", "", "2020-01-01T00:00:00+08:00", "parameter of transfer, example: 2020-01-01T00:00:00+08:00") + drop := pflag.StringP("drop", "", "", "run "+Name+" in command mode, only support old_taosd_metric_stables. ") + + v := pflag.BoolP("version", "V", false, "Print the version and exit") + help := pflag.BoolP("help", "h", false, "Print this help message and exit") + + pflag.Parse() + + if *help { + fmt.Fprintf(os.Stderr, "Usage of %s v%s:\n", Name, version.Version) + pflag.PrintDefaults() + os.Exit(0) + } + + if *v { + fmt.Printf("%s version: %s\n", Name, version.Version) + fmt.Printf("git: %s\n", version.Gitinfo) + fmt.Printf("build: %s\n", version.BuildInfo) + os.Exit(0) + } + + if *cp != "" { + viper.SetConfigFile(*cp) + } + + viper.SetEnvPrefix(Name) + err := viper.BindPFlags(pflag.CommandLine) + if err != nil { + panic(err) + } + viper.AutomaticEnv() + + gotoStep := false +ReadConfig: + if err := viper.ReadInConfig(); err != nil { + _, isConfigNotFoundError := err.(viper.ConfigFileNotFoundError) + _, isPathError := err.(*fs.PathError) + if isConfigNotFoundError || isPathError { + fmt.Println("config file not found") + + if !gotoStep { + fmt.Println("use keeper.toml instead") + viper.SetConfigName("keeper") + gotoStep = true + goto ReadConfig + } + } else { + panic(err) + } + } + + // if old format, change to new format + if !viper.IsSet("metrics.database.name") { + databaseName := viper.GetString("metrics.database") + viper.Set("metrics.database.name", databaseName) + viper.Set("metrics.database.options", viper.Get("metrics.databaseoptions")) + } + + var conf Config + if err = viper.Unmarshal(&conf); err != nil { + panic(err) + } + + conf.Transfer = *transfer + conf.FromTime = *fromTime + conf.Drop = *drop + + conf.Cors.Init() + pool.Init(conf.GoPoolSize) + conf.Log.SetValue() + + // set log level default value: info + if conf.LogLevel == "" { + conf.LogLevel = "info" + } + if viper.IsSet("log.level") { + conf.LogLevel = conf.Log.Level + } else { + viper.Set("log.level", "") + } + + if !viper.IsSet("logLevel") { + viper.Set("logLevel", "") + } + + Conf = &conf + return &conf +} + +func init() { + viper.SetDefault("instanceId", 64) + _ = viper.BindEnv("instanceId", "TAOS_KEEPER_INSTANCE_ID") + pflag.Int("instanceId", 64, `instance ID. Env "TAOS_KEEPER_INSTANCE_ID"`) + + viper.SetDefault("port", 6043) + _ = viper.BindEnv("port", "TAOS_KEEPER_PORT") + pflag.IntP("port", "P", 6043, `http port. Env "TAOS_KEEPER_PORT"`) + + _ = viper.BindEnv("logLevel", "TAOS_KEEPER_LOG_LEVEL") + pflag.String("logLevel", "info", `log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL"`) + + viper.SetDefault("gopoolsize", 50000) + _ = viper.BindEnv("gopoolsize", "TAOS_KEEPER_POOL_SIZE") + pflag.Int("gopoolsize", 50000, `coroutine size. Env "TAOS_KEEPER_POOL_SIZE"`) + + viper.SetDefault("RotationInterval", "15s") + _ = viper.BindEnv("RotationInterval", "TAOS_KEEPER_ROTATION_INTERVAL") + pflag.StringP("RotationInterval", "R", "15s", `interval for refresh metrics, such as "300ms", Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Env "TAOS_KEEPER_ROTATION_INTERVAL"`) + + viper.SetDefault("tdengine.host", "127.0.0.1") + _ = viper.BindEnv("tdengine.host", "TAOS_KEEPER_TDENGINE_HOST") + pflag.String("tdengine.host", "127.0.0.1", `TDengine server's ip. Env "TAOS_KEEPER_TDENGINE_HOST"`) + + viper.SetDefault("tdengine.port", 6041) + _ = viper.BindEnv("tdengine.port", "TAOS_KEEPER_TDENGINE_PORT") + pflag.Int("tdengine.port", 6041, `TDengine REST server(taosAdapter)'s port. Env "TAOS_KEEPER_TDENGINE_PORT"`) + + viper.SetDefault("tdengine.username", "root") + _ = viper.BindEnv("tdengine.username", "TAOS_KEEPER_TDENGINE_USERNAME") + pflag.String("tdengine.username", "root", `TDengine server's username. Env "TAOS_KEEPER_TDENGINE_USERNAME"`) + + viper.SetDefault("tdengine.password", "taosdata") + _ = viper.BindEnv("tdengine.password", "TAOS_KEEPER_TDENGINE_PASSWORD") + pflag.String("tdengine.password", "taosdata", `TDengine server's password. Env "TAOS_KEEPER_TDENGINE_PASSWORD"`) + + viper.SetDefault("tdengine.usessl", false) + _ = viper.BindEnv("tdengine.usessl", "TAOS_KEEPER_TDENGINE_USESSL") + pflag.Bool("tdengine.usessl", false, `TDengine server use ssl or not. Env "TAOS_KEEPER_TDENGINE_USESSL"`) + + viper.SetDefault("metrics.prefix", "") + _ = viper.BindEnv("metrics.prefix", "TAOS_KEEPER_METRICS_PREFIX") + pflag.String("metrics.prefix", "", `prefix in metrics names. Env "TAOS_KEEPER_METRICS_PREFIX"`) + + viper.SetDefault("metrics.database.name", "log") + _ = viper.BindEnv("metrics.database.name", "TAOS_KEEPER_METRICS_DATABASE") + pflag.String("metrics.database.name", "log", `database for storing metrics data. Env "TAOS_KEEPER_METRICS_DATABASE"`) + + viper.SetDefault("metrics.database.options.vgroups", 1) + _ = viper.BindEnv("metrics.database.options.vgroups", "TAOS_KEEPER_METRICS_VGROUPS") + pflag.Int("metrics.database.options.vgroups", 1, `database option vgroups for audit database. Env "TAOS_KEEPER_METRICS_VGROUPS"`) + + viper.SetDefault("metrics.database.options.buffer", 64) + _ = viper.BindEnv("metrics.database.options.buffer", "TAOS_KEEPER_METRICS_BUFFER") + pflag.Int("metrics.database.options.buffer", 64, `database option buffer for audit database. Env "TAOS_KEEPER_METRICS_BUFFER"`) + + viper.SetDefault("metrics.database.options.keep", 90) + _ = viper.BindEnv("metrics.database.options.keep", "TAOS_KEEPER_METRICS_KEEP") + pflag.Int("metrics.database.options.keep", 90, `database option buffer for audit database. Env "TAOS_KEEPER_METRICS_KEEP"`) + + viper.SetDefault("metrics.database.options.cachemodel", "both") + _ = viper.BindEnv("metrics.database.options.cachemodel", "TAOS_KEEPER_METRICS_CACHEMODEL") + pflag.String("metrics.database.options.cachemodel", "both", `database option cachemodel for audit database. Env "TAOS_KEEPER_METRICS_CACHEMODEL"`) + + viper.SetDefault("metrics.tables", []string{}) + _ = viper.BindEnv("metrics.tables", "TAOS_KEEPER_METRICS_TABLES") + pflag.StringArray("metrics.tables", []string{}, `export some tables that are not super table, multiple values split with white space. Env "TAOS_KEEPER_METRICS_TABLES"`) + + viper.SetDefault("environment.incgroup", false) + _ = viper.BindEnv("environment.incgroup", "TAOS_KEEPER_ENVIRONMENT_INCGROUP") + pflag.Bool("environment.incgroup", false, `whether running in cgroup. Env "TAOS_KEEPER_ENVIRONMENT_INCGROUP"`) + + initLog() + + if IsEnterprise == "true" { + initAudit() + } +} + +func initLog() { + switch runtime.GOOS { + case "windows": + viper.SetDefault("log.path", fmt.Sprintf("C:\\%s\\log", version.CUS_NAME)) + _ = viper.BindEnv("log.path", "TAOS_KEEPER_LOG_PATH") + pflag.String("log.path", fmt.Sprintf("C:\\%s\\log", version.CUS_NAME), `log path. Env "TAOS_KEEPER_LOG_PATH"`) + default: + viper.SetDefault("log.path", fmt.Sprintf("/var/log/%s", version.CUS_PROMPT)) + _ = viper.BindEnv("log.path", "TAOS_KEEPER_LOG_PATH") + pflag.String("log.path", fmt.Sprintf("/var/log/%s", version.CUS_PROMPT), `log path. Env "TAOS_KEEPER_LOG_PATH"`) + } + + _ = viper.BindEnv("log.level", "TAOS_KEEPER_LOG_LEVEL") + pflag.String("log.level", "info", `log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL"`) + + viper.SetDefault("log.rotationCount", 5) + _ = viper.BindEnv("log.rotationCount", "TAOS_KEEPER_LOG_ROTATION_COUNT") + pflag.Uint("log.rotationCount", 5, `log rotation count. Env "TAOS_KEEPER_LOG_ROTATION_COUNT"`) + + viper.SetDefault("log.keepDays", 30) + _ = viper.BindEnv("log.keepDays", "TAOS_KEEPER_LOG_KEEP_DAYS") + pflag.Uint("log.keepDays", 30, `log retention days, must be a positive integer. Env "TAOS_KEEPER_LOG_KEEP_DAYS"`) + + viper.SetDefault("log.rotationTime", time.Hour*24) + _ = viper.BindEnv("log.rotationTime", "TAOS_KEEPER_LOG_ROTATION_TIME") + pflag.Duration("log.rotationTime", time.Hour*24, `deprecated: log rotation time always 24 hours. Env "TAOS_KEEPER_LOG_ROTATION_TIME"`) + + viper.SetDefault("log.rotationSize", "1GB") + _ = viper.BindEnv("log.rotationSize", "TAOS_KEEPER_LOG_ROTATION_SIZE") + pflag.String("log.rotationSize", "1GB", `log rotation size(KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_ROTATION_SIZE"`) + + viper.SetDefault("log.compress", false) + _ = viper.BindEnv("log.compress", "TAOS_KEEPER_LOG_COMPRESS") + pflag.Bool("log.compress", false, `whether to compress old log. Env "TAOS_KEEPER_LOG_COMPRESS"`) + + viper.SetDefault("log.reservedDiskSize", "1GB") + _ = viper.BindEnv("log.reservedDiskSize", "TAOS_KEEPER_LOG_RESERVED_DISK_SIZE") + pflag.String("log.reservedDiskSize", "1GB", `reserved disk size for log dir (KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_RESERVED_DISK_SIZE"`) +} + +func initAudit() { + viper.SetDefault("audit.enable", "true") + _ = viper.BindEnv("audit.enable", "TAOS_KEEPER_AUDIT_ENABLE") + pflag.String("audit.enable", "true", `database for enable audit data. Env "TAOS_KEEPER_AUDIT_ENABLE"`) + + viper.SetDefault("audit.database.name", "audit") + _ = viper.BindEnv("audit.database.name", "TAOS_KEEPER_AUDIT_DATABASE") + pflag.String("audit.database.name", "audit", `database for storing audit data. Env "TAOS_KEEPER_AUDIT_DATABASE"`) + + viper.SetDefault("audit.database.options.vgroups", 1) + _ = viper.BindEnv("audit.database.options.vgroups", "TAOS_KEEPER_AUDIT_VGROUPS") + pflag.Int("audit.database.options.vgroups", 1, `database option vgroups for audit database. Env "TAOS_KEEPER_AUDIT_VGROUPS"`) + + viper.SetDefault("audit.database.options.buffer", 16) + _ = viper.BindEnv("audit.database.options.buffer", "TAOS_KEEPER_AUDIT_BUFFER") + pflag.Int("audit.database.options.buffer", 16, `database option buffer for audit database. Env "TAOS_KEEPER_AUDIT_BUFFER"`) + + viper.SetDefault("audit.database.options.cachemodel", "both") + _ = viper.BindEnv("audit.database.options.cachemodel", "TAOS_KEEPER_AUDIT_CACHEMODEL") + pflag.String("audit.database.options.cachemodel", "both", `database option cachemodel for audit database. Env "TAOS_KEEPER_AUDIT_CACHEMODEL"`) +} diff --git a/tools/keeper/infrastructure/config/log.go b/tools/keeper/infrastructure/config/log.go new file mode 100644 index 0000000000..dc67d877e7 --- /dev/null +++ b/tools/keeper/infrastructure/config/log.go @@ -0,0 +1,29 @@ +package config + +import ( + "time" + + "github.com/spf13/viper" +) + +type Log struct { + Level string + Path string + RotationCount uint + RotationTime time.Duration + RotationSize uint + KeepDays uint + Compress bool + ReservedDiskSize uint +} + +func (l *Log) SetValue() { + l.Level = viper.GetString("log.level") + l.Path = viper.GetString("log.path") + l.RotationCount = viper.GetUint("log.rotationCount") + l.RotationTime = viper.GetDuration("log.rotationTime") + l.RotationSize = viper.GetSizeInBytes("log.rotationSize") + l.KeepDays = viper.GetUint("log.keepDays") + l.Compress = viper.GetBool("log.compress") + l.ReservedDiskSize = viper.GetSizeInBytes("log.reservedDiskSize") +} diff --git a/tools/keeper/infrastructure/config/metric_test.go b/tools/keeper/infrastructure/config/metric_test.go new file mode 100644 index 0000000000..5d20cdc5ec --- /dev/null +++ b/tools/keeper/infrastructure/config/metric_test.go @@ -0,0 +1,85 @@ +package config_test + +import ( + "fmt" + "io" + "os" + "runtime" + "testing" + + "github.com/BurntSushi/toml" + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/version" +) + +func TestConfig(t *testing.T) { + data := ` +# Start with debug middleware for gin +debug = true +# Listen port, default is 6043 +port = 9000 +# log level +loglevel = "error" +# go pool size +gopoolsize = 5000 +# interval for TDengine metrics +RotationInterval = "10s" +[tdengine] +address = "http://localhost:6041" +authtype = "Basic" +username = "root" +password = "taosdata" +` + var c config.Config + _, err := toml.Decode(data, &c) + if err != nil { + t.Error(err) + return + } + assert.EqualValues(t, c, c) + fmt.Print(c) +} + +func TestBakConfig(t *testing.T) { + isOk := copyConfigFile() + if isOk { + config.Name = "aaa" + config.InitConfig() + config.Name = "taoskeeper" + } +} + +func copyConfigFile() bool { + var sourceFile string + var destinationFile string + switch runtime.GOOS { + case "windows": + sourceFile = fmt.Sprintf("C:\\%s\\cfg\\%s.toml", version.CUS_NAME, "taoskeeper") + destinationFile = fmt.Sprintf("C:\\%s\\cfg\\%s.toml", version.CUS_NAME, "keeper") + default: + sourceFile = fmt.Sprintf("/etc/%s/%s.toml", version.CUS_PROMPT, "taoskeeper") + destinationFile = fmt.Sprintf("/etc/%s/%s.toml", version.CUS_PROMPT, "keeper") + } + _, err := os.Stat(sourceFile) + if os.IsNotExist(err) { + return false + } + + source, err := os.Open(sourceFile) //open the source file + if err != nil { + panic(err) + } + defer source.Close() + + destination, err := os.Create(destinationFile) //create the destination file + if err != nil { + panic(err) + } + defer destination.Close() + _, err = io.Copy(destination, source) //copy the contents of source to destination file + if err != nil { + panic(err) + } + return true +} diff --git a/tools/keeper/infrastructure/config/metrics.go b/tools/keeper/infrastructure/config/metrics.go new file mode 100644 index 0000000000..c41544fc39 --- /dev/null +++ b/tools/keeper/infrastructure/config/metrics.go @@ -0,0 +1,29 @@ +package config + +type MetricsConfig struct { + Cluster string `toml:"cluster"` + Prefix string `toml:"prefix"` + Database Database `toml:"database"` + Tables []string `toml:"tables"` +} + +type TaosAdapter struct { + Address []string `toml:"address"` +} + +type Metric struct { + Alias string `toml:"alias"` + Help string `toml:"help"` + Unit string `toml:"unit"` + Type string `toml:"type"` + Labels map[string]string `toml:"labels"` +} + +type Environment struct { + InCGroup bool `toml:"incgroup"` +} + +type Database struct { + Name string `toml:"name"` + Options map[string]interface{} `toml:"options"` +} diff --git a/tools/keeper/infrastructure/log/empty_test.go b/tools/keeper/infrastructure/log/empty_test.go new file mode 100644 index 0000000000..468c02173b --- /dev/null +++ b/tools/keeper/infrastructure/log/empty_test.go @@ -0,0 +1,8 @@ +package log + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/infrastructure/log/log.go b/tools/keeper/infrastructure/log/log.go new file mode 100644 index 0000000000..0a54e99eb9 --- /dev/null +++ b/tools/keeper/infrastructure/log/log.go @@ -0,0 +1,278 @@ +package log + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/sirupsen/logrus" + rotatelogs "github.com/taosdata/file-rotatelogs/v2" + "github.com/taosdata/taoskeeper/infrastructure/config" + + "github.com/taosdata/taoskeeper/version" +) + +var logger = logrus.New() +var ServerID = randomID() +var globalLogFormatter = &TaosLogFormatter{} +var finish = make(chan struct{}) +var exist = make(chan struct{}) + +var bufferPool = &defaultPool{ + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, +} + +type defaultPool struct { + pool *sync.Pool +} + +func (p *defaultPool) Put(buf *bytes.Buffer) { + buf.Reset() + p.pool.Put(buf) +} + +func (p *defaultPool) Get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +type FileHook struct { + formatter logrus.Formatter + writer io.Writer + buf *bytes.Buffer + sync.Mutex +} + +func NewFileHook(formatter logrus.Formatter, writer io.WriteCloser) *FileHook { + fh := &FileHook{formatter: formatter, writer: writer, buf: &bytes.Buffer{}} + ticker := time.NewTicker(time.Second * 5) + go func() { + for { + select { + case <-ticker.C: + //can be optimized by tryLock + fh.Lock() + if fh.buf.Len() > 0 { + fh.flush() + } + fh.Unlock() + case <-exist: + fh.Lock() + fh.flush() + fh.Unlock() + writer.Close() + ticker.Stop() + close(finish) + return + } + } + }() + return fh +} + +func (f *FileHook) Levels() []logrus.Level { + return logrus.AllLevels +} + +func (f *FileHook) Fire(entry *logrus.Entry) error { + if entry.Buffer == nil { + entry.Buffer = bufferPool.Get() + defer func() { + bufferPool.Put(entry.Buffer) + entry.Buffer = nil + }() + } + data, err := f.formatter.Format(entry) + if err != nil { + return err + } + f.Lock() + f.buf.Write(data) + if f.buf.Len() > 1024 || entry.Level == logrus.FatalLevel || entry.Level == logrus.PanicLevel { + err = f.flush() + } + f.Unlock() + return err +} + +func (f *FileHook) flush() error { + _, err := f.writer.Write(f.buf.Bytes()) + f.buf.Reset() + return err +} + +var once sync.Once + +func ConfigLog() { + once.Do(func() { + err := SetLevel(config.Conf.LogLevel) + if err != nil { + panic(err) + } + writer, err := rotatelogs.New( + filepath.Join(config.Conf.Log.Path, fmt.Sprintf("%skeeper_%d_%%Y%%m%%d.log", version.CUS_PROMPT, config.Conf.InstanceID)), + rotatelogs.WithRotationCount(config.Conf.Log.RotationCount), + rotatelogs.WithRotationTime(time.Hour*24), + rotatelogs.WithRotationSize(int64(config.Conf.Log.RotationSize)), + rotatelogs.WithReservedDiskSize(int64(config.Conf.Log.ReservedDiskSize)), + rotatelogs.WithRotateGlobPattern(filepath.Join(config.Conf.Log.Path, fmt.Sprintf("%skeeper_%d_*.log*", version.CUS_PROMPT, config.Conf.InstanceID))), + rotatelogs.WithCompress(config.Conf.Log.Compress), + rotatelogs.WithCleanLockFile(filepath.Join(config.Conf.Log.Path, fmt.Sprintf(".%skeeper_%d_rotate_lock", version.CUS_PROMPT, config.Conf.InstanceID))), + rotatelogs.ForceNewFile(), + rotatelogs.WithMaxAge(time.Hour*24*time.Duration(config.Conf.Log.KeepDays)), + ) + if err != nil { + panic(err) + } + fmt.Fprintln(writer, "==================================================") + fmt.Fprintln(writer, " new log file") + fmt.Fprintln(writer, "==================================================") + fmt.Fprintf(writer, "config:%+v\n", config.Conf) + + fmt.Fprintf(writer, "%-45s%v\n", "version", version.Version) + fmt.Fprintf(writer, "%-45s%v\n", "gitinfo", version.CommitID) + fmt.Fprintf(writer, "%-45s%v\n", "buildinfo", version.BuildInfo) + + hook := NewFileHook(globalLogFormatter, writer) + logger.AddHook(hook) + }) +} + +func SetLevel(level string) error { + l, err := logrus.ParseLevel(level) + if err != nil { + return err + } + logger.SetLevel(l) + return nil +} + +func GetLogger(model string) *logrus.Entry { + return logger.WithFields(logrus.Fields{config.ModelKey: model}) +} + +func init() { + logrus.SetBufferPool(bufferPool) + logger.SetFormatter(globalLogFormatter) + logger.SetOutput(os.Stdout) +} + +func randomID() string { + return fmt.Sprintf("%08d", os.Getpid()) +} + +type TaosLogFormatter struct { +} + +func (t *TaosLogFormatter) Format(entry *logrus.Entry) ([]byte, error) { + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + b.Reset() + b.WriteString(entry.Time.Format("01/02 15:04:05.000000")) + b.WriteByte(' ') + b.WriteString(ServerID) + b.WriteByte(' ') + v, exist := entry.Data[config.ModelKey] + if exist && v != nil { + b.WriteString(v.(string)) + b.WriteByte(' ') + } else { + b.WriteString("CLI ") + } + switch entry.Level { + case logrus.PanicLevel: + b.WriteString("PANIC ") + case logrus.FatalLevel: + b.WriteString("FATAL ") + case logrus.ErrorLevel: + b.WriteString("ERROR ") + case logrus.WarnLevel: + b.WriteString("WARN ") + case logrus.InfoLevel: + b.WriteString("INFO ") + case logrus.DebugLevel: + b.WriteString("DEBUG ") + case logrus.TraceLevel: + b.WriteString("TRACE ") + } + + // request id + v, exist = entry.Data[config.ReqIDKey] + if exist && v != nil { + b.WriteString(config.ReqIDKey) + b.WriteByte(':') + fmt.Fprintf(b, "0x%x ", v) + } + if len(entry.Message) > 0 && entry.Message[len(entry.Message)-1] == '\n' { + b.WriteString(entry.Message[:len(entry.Message)-1]) + } else { + b.WriteString(entry.Message) + } + // sort the keys + keys := make([]string, 0, len(entry.Data)) + for k := range entry.Data { + if k == config.ModelKey || k == config.ReqIDKey { + continue + } + keys = append(keys, k) + } + for _, k := range keys { + v := entry.Data[k] + if k == config.ReqIDKey && v == nil { + continue + } + b.WriteString(", ") + b.WriteString(k) + b.WriteByte(':') + fmt.Fprintf(b, "%v", v) + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func IsDebug() bool { + return logger.IsLevelEnabled(logrus.DebugLevel) +} + +func GetLogLevel() logrus.Level { + return logger.Level +} + +var zeroTime = time.Time{} +var zeroDuration = time.Duration(0) + +func GetLogNow(isDebug bool) time.Time { + if isDebug { + return time.Now() + } + return zeroTime +} +func GetLogDuration(isDebug bool, s time.Time) time.Duration { + if isDebug { + return time.Since(s) + } + return zeroDuration +} + +func Close(ctx context.Context) { + close(exist) + select { + case <-finish: + return + case <-ctx.Done(): + return + } +} diff --git a/tools/keeper/infrastructure/log/log_test.go b/tools/keeper/infrastructure/log/log_test.go new file mode 100644 index 0000000000..656cda4bbc --- /dev/null +++ b/tools/keeper/infrastructure/log/log_test.go @@ -0,0 +1,23 @@ +package log + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/infrastructure/config" +) + +func TestConfigLog(t *testing.T) { + config.InitConfig() + config.Conf.LogLevel = "debug" + ConfigLog() + debug, _ := logrus.ParseLevel("debug") + assert.Equal(t, logger.Level, debug) + assert.Equal(t, true, IsDebug()) + fmt.Print(GetLogNow(true), GetLogDuration(true, time.Now())) + Close(context.Background()) +} diff --git a/tools/keeper/infrastructure/log/web.go b/tools/keeper/infrastructure/log/web.go new file mode 100644 index 0000000000..4aa244448b --- /dev/null +++ b/tools/keeper/infrastructure/log/web.go @@ -0,0 +1,55 @@ +package log + +import ( + "time" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/util" +) + +func GinLog() gin.HandlerFunc { + logger := GetLogger("WEB") + + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + logger := logger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + statusCode := c.Writer.Status() + + startTime := time.Now() + c.Next() + endTime := time.Now() + latencyTime := endTime.Sub(startTime) + reqMethod := c.Request.Method + reqUri := c.Request.RequestURI + + clientIP := c.ClientIP() + + if statusCode != 200 { + logger.Errorf("finish request, status_code:%3d, latency:%v, client_ip:%s, method:%s, uri:%s", statusCode, latencyTime, clientIP, reqMethod, reqUri) + return + } + logger.Infof("finish request, status_code:%3d, latency:%v, client_ip:%s, method:%s, uri:%s", statusCode, latencyTime, clientIP, reqMethod, reqUri) + } +} + +type recoverLog struct { + logger logrus.FieldLogger +} + +func (r *recoverLog) Write(p []byte) (n int, err error) { + r.logger.Errorln(string(p)) + return len(p), nil +} + +func GinRecoverLog() gin.HandlerFunc { + logger := GetLogger("WEB") + return func(c *gin.Context) { + writer := &recoverLog{logger: logger} + gin.RecoveryWithWriter(writer)(c) + } +} diff --git a/tools/keeper/main.go b/tools/keeper/main.go new file mode 100644 index 0000000000..43432bde3d --- /dev/null +++ b/tools/keeper/main.go @@ -0,0 +1,11 @@ +package main + +import ( + "github.com/taosdata/taoskeeper/system" +) + +func main() { + r := system.Init() + system.Start(r) + // config.IsEnterprise +} diff --git a/tools/keeper/monitor/collect.go b/tools/keeper/monitor/collect.go new file mode 100644 index 0000000000..652ae1f1ce --- /dev/null +++ b/tools/keeper/monitor/collect.go @@ -0,0 +1,99 @@ +package monitor + +import ( + "math" + "os" + "runtime" + + "github.com/shirou/gopsutil/v3/mem" + "github.com/shirou/gopsutil/v3/process" + "github.com/taosdata/taoskeeper/util" +) + +type SysCollector interface { + CpuPercent() (float64, error) + MemPercent() (float64, error) +} + +type NormalCollector struct { + p *process.Process +} + +func NewNormalCollector() (*NormalCollector, error) { + p, err := process.NewProcess(int32(os.Getpid())) + if err != nil { + return nil, err + } + return &NormalCollector{p: p}, nil +} + +func (n *NormalCollector) CpuPercent() (float64, error) { + cpuPercent, err := n.p.Percent(0) + if err != nil { + return 0, err + } + return cpuPercent / float64(runtime.NumCPU()), nil +} + +func (n *NormalCollector) MemPercent() (float64, error) { + memPercent, err := n.p.MemoryPercent() + if err != nil { + return 0, err + } + return float64(memPercent), nil +} + +const ( + CGroupCpuQuotaPath = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + CGroupCpuPeriodPath = "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + CGroupMemLimitPath = "/sys/fs/cgroup/memory/memory.limit_in_bytes" +) + +type CGroupCollector struct { + p *process.Process + cpuCore float64 + totalMemory uint64 +} + +func NewCGroupCollector() (*CGroupCollector, error) { + p, err := process.NewProcess(int32(os.Getpid())) + if err != nil { + return nil, err + } + cpuPeriod, err := util.ReadUint(CGroupCpuPeriodPath) + if err != nil { + return nil, err + } + cpuQuota, err := util.ReadUint(CGroupCpuQuotaPath) + if err != nil { + return nil, err + } + cpuCore := float64(cpuQuota) / float64(cpuPeriod) + limitMemory, err := util.ReadUint(CGroupMemLimitPath) + if err != nil { + return nil, err + } + machineMemory, err := mem.VirtualMemory() + if err != nil { + return nil, err + } + totalMemory := uint64(math.Min(float64(limitMemory), float64(machineMemory.Total))) + return &CGroupCollector{p: p, cpuCore: cpuCore, totalMemory: totalMemory}, nil +} + +func (c *CGroupCollector) CpuPercent() (float64, error) { + cpuPercent, err := c.p.Percent(0) + if err != nil { + return 0, err + } + cpuPercent = cpuPercent / c.cpuCore + return cpuPercent, nil +} + +func (c *CGroupCollector) MemPercent() (float64, error) { + memInfo, err := c.p.MemoryInfo() + if err != nil { + return 0, err + } + return 100 * float64(memInfo.RSS) / float64(c.totalMemory), nil +} diff --git a/tools/keeper/monitor/empty_test.go b/tools/keeper/monitor/empty_test.go new file mode 100644 index 0000000000..689acfac4c --- /dev/null +++ b/tools/keeper/monitor/empty_test.go @@ -0,0 +1,8 @@ +package monitor + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/monitor/monitor.go b/tools/keeper/monitor/monitor.go new file mode 100644 index 0000000000..6f3083e866 --- /dev/null +++ b/tools/keeper/monitor/monitor.go @@ -0,0 +1,89 @@ +package monitor + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/taosdata/taoskeeper/api" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" + "github.com/taosdata/taoskeeper/util/pool" +) + +var logger = log.GetLogger("MON") + +func StartMonitor(identity string, conf *config.Config, reporter *api.Reporter) { + if len(identity) == 0 { + hostname, err := os.Hostname() + if err != nil { + logger.Errorf("can not get hostname, error:%s", err) + } + if len(hostname) > 40 { + hostname = hostname[:40] + } + identity = fmt.Sprintf("%s:%d", hostname, conf.Port) + } + + systemStatus := make(chan SysStatus) + _ = pool.GoroutinePool.Submit(func() { + var ( + cpuPercent float64 + memPercent float64 + totalReport int + ) + + for status := range systemStatus { + if status.CpuError == nil { + cpuPercent = status.CpuPercent + } + if status.MemError == nil { + memPercent = status.MemPercent + } + + totalResp := reporter.GetTotalRep() + for i := 0; i < 3; i++ { + totalReport = totalResp.Load().(int) + if totalResp.CompareAndSwap(totalReport, 0) { + break + } + logger.Warn("Reset keeper_monitor total resp via cas fail! Maybe to many concurrent ") + reporter.GetTotalRep().Store(0) + } + + var kn string + if len(identity) <= util.MAX_TABLE_NAME_LEN { + kn = util.ToValidTableName(identity) + } else { + kn = util.GetMd5HexStr(identity) + } + + sql := fmt.Sprintf("insert into `km_%s` using keeper_monitor tags ('%s') values ( now, "+ + " %f, %f, %d)", kn, identity, cpuPercent, memPercent, totalReport) + conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, + conf.TDengine.Port, conf.Metrics.Database.Name, conf.TDengine.Usessl) + if err != nil { + logger.Errorf("connect to database error, msg:%s", err) + return + } + + ctx := context.Background() + if _, err = conn.Exec(ctx, sql, util.GetQidOwn()); err != nil { + logger.Errorf("execute sql:%s, error:%s", sql, err) + } + + if err := conn.Close(); err != nil { + logger.Errorf("close connection error, msg:%s", err) + } + } + }) + SysMonitor.Register(systemStatus) + interval, err := time.ParseDuration(conf.RotationInterval) + if err != nil { + panic(err) + } + Start(interval, conf.Env.InCGroup) +} diff --git a/tools/keeper/monitor/monitor_test.go b/tools/keeper/monitor/monitor_test.go new file mode 100644 index 0000000000..b2b860dcaa --- /dev/null +++ b/tools/keeper/monitor/monitor_test.go @@ -0,0 +1,58 @@ +package monitor + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/go-utils/web" + "github.com/taosdata/taoskeeper/api" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/util" + + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" +) + +func TestStart(t *testing.T) { + conf := config.InitConfig() + if conf == nil { + panic("config error") + } + conf.Env.InCGroup = true + cpuCgroupDir := "/sys/fs/cgroup/cpu" + if _, err := os.Stat(cpuCgroupDir); os.IsNotExist(err) { + conf.Env.InCGroup = false + } + log.ConfigLog() + router := web.CreateRouter(false, &conf.Cors, false) + conf.Metrics.Database.Name = "monitor" + reporter := api.NewReporter(conf) + reporter.Init(router) + conf.RotationInterval = "1s" + StartMonitor("", conf, reporter) + time.Sleep(2 * time.Second) + for k, _ := range SysMonitor.outputs { + SysMonitor.Deregister(k) + } + + conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.Metrics.Database.Name, conf.TDengine.Usessl) + assert.NoError(t, err) + conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn()) + +} + +func TestParseUint(t *testing.T) { + num, err := util.ParseUint("-1", 10, 8) + assert.Equal(t, nil, err) + assert.Equal(t, uint64(0), num) + num, err = util.ParseUint("0", 10, 8) + assert.Equal(t, nil, err) + assert.Equal(t, uint64(0), num) + num, err = util.ParseUint("257", 10, 8) + assert.Equal(t, "strconv.ParseUint: parsing \"257\": value out of range", err.Error()) + assert.Equal(t, uint64(0), num) +} diff --git a/tools/keeper/monitor/system.go b/tools/keeper/monitor/system.go new file mode 100644 index 0000000000..7d5ef5bd54 --- /dev/null +++ b/tools/keeper/monitor/system.go @@ -0,0 +1,97 @@ +package monitor + +import ( + "math" + "runtime" + "sync" + "time" + + "github.com/taosdata/taoskeeper/util/pool" +) + +type SysStatus struct { + CollectTime time.Time + CpuPercent float64 + CpuError error + MemPercent float64 + MemError error + GoroutineCounts int + ThreadCounts int +} + +type sysMonitor struct { + sync.Mutex + collectDuration time.Duration + collector SysCollector + status *SysStatus + outputs map[chan<- SysStatus]struct{} + ticker *time.Ticker +} + +func (s *sysMonitor) collect() { + s.status.CollectTime = time.Now() + s.status.CpuPercent, s.status.CpuError = s.collector.CpuPercent() + s.status.MemPercent, s.status.MemError = s.collector.MemPercent() + s.status.GoroutineCounts = runtime.NumGoroutine() + s.status.ThreadCounts, _ = runtime.ThreadCreateProfile(nil) + // skip when inf or nan + if math.IsInf(s.status.CpuPercent, 0) || math.IsNaN(s.status.CpuPercent) || + math.IsInf(s.status.MemPercent, 0) || math.IsNaN(s.status.MemPercent) { + return + } + + s.Lock() + for output := range s.outputs { + select { + case output <- *s.status: + default: + } + } + s.Unlock() +} + +func (s *sysMonitor) Register(c chan<- SysStatus) { + s.Lock() + if s.outputs == nil { + s.outputs = map[chan<- SysStatus]struct{}{ + c: {}, + } + } else { + s.outputs[c] = struct{}{} + } + s.Unlock() +} + +func (s *sysMonitor) Deregister(c chan<- SysStatus) { + s.Lock() + if s.outputs != nil { + delete(s.outputs, c) + } + s.Unlock() +} + +var SysMonitor = &sysMonitor{status: &SysStatus{}} + +func Start(collectDuration time.Duration, inCGroup bool) { + SysMonitor.collectDuration = collectDuration + if inCGroup { + collector, err := NewCGroupCollector() + if err != nil { + logger.Errorf("new normal group controller error, msg:%s", err) + } + SysMonitor.collector = collector + } else { + collector, err := NewNormalCollector() + if err != nil { + logger.Errorf("new normal controller error, msg:%s", err) + } + SysMonitor.collector = collector + } + SysMonitor.collect() + SysMonitor.ticker = time.NewTicker(SysMonitor.collectDuration) + pool.GoroutinePool.Submit(func() { + for range SysMonitor.ticker.C { + SysMonitor.collect() + } + }) +} diff --git a/tools/keeper/process/builder.go b/tools/keeper/process/builder.go new file mode 100644 index 0000000000..d6e37534bf --- /dev/null +++ b/tools/keeper/process/builder.go @@ -0,0 +1,55 @@ +package process + +import ( + "context" + "fmt" + + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var builderLogger = log.GetLogger("BLD") + +func ExpandMetricsFromConfig(ctx context.Context, conn *db.Connector, cfg *config.MetricsConfig) (tables map[string]struct{}, err error) { + tables = make(map[string]struct{}) + for _, name := range cfg.Tables { + builderLogger.Debug("normal table: ", name) + + _, exist := tables[name] + if exist { + builderLogger.Debug(name, "is exist in config") + continue + } + tables[name] = struct{}{} + } + + sql := fmt.Sprintf(GetStableNameListSql(), cfg.Database.Name) + data, err := conn.Query(ctx, sql, util.GetQidOwn()) + if err != nil { + return nil, err + } + builderLogger.Debugf("show stables:%s", sql) + + for _, info := range data.Data { + name := info[0].(string) + builderLogger.Debug("stable:", info) + + _, exist := tables[name] + if exist { + builderLogger.Debug(name, "is exist in config") + continue + } + tables[name] = struct{}{} + } + return +} + +func GetStableNameListSql() string { + return "select stable_name from information_schema.ins_stables " + + " where db_name = '%s' " + + " and (stable_name not like 'taosx\\_%%')" + + " and (stable_name not like 'taosadapter%%')" + + " and (stable_name != 'temp_dir' and stable_name != 'data_dir')" +} diff --git a/tools/keeper/process/empty_test.go b/tools/keeper/process/empty_test.go new file mode 100644 index 0000000000..6718d12525 --- /dev/null +++ b/tools/keeper/process/empty_test.go @@ -0,0 +1,8 @@ +package process + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/process/handle.go b/tools/keeper/process/handle.go new file mode 100644 index 0000000000..980902daca --- /dev/null +++ b/tools/keeper/process/handle.go @@ -0,0 +1,666 @@ +package process + +import ( + "context" + "errors" + "fmt" + "math" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + taosError "github.com/taosdata/driver-go/v3/errors" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" + "github.com/taosdata/taoskeeper/util/pool" +) + +var logger = log.GetLogger("HND") + +var metricNameMap = map[string]string{ + "taosd_cluster_basic_first_ep": "cluster_info_first_ep", + "taosd_cluster_basic_first_ep_dnode_id": "cluster_info_first_ep_dnode_id", + "taosd_cluster_basic_cluster_version": "cluster_info_version", + + "taosd_cluster_info_cluster_uptime": "cluster_info_master_uptime", + "taosd_cluster_info_dbs_total": "cluster_info_dbs_total", + "taosd_cluster_info_tbs_total": "cluster_info_tbs_total", + "taosd_cluster_info_stbs_total": "cluster_info_stbs_total", + "taosd_cluster_info_dnodes_total": "cluster_info_dnodes_total", + "taosd_cluster_info_dnodes_alive": "cluster_info_dnodes_alive", + "taosd_cluster_info_mnodes_total": "cluster_info_mnodes_total", + "taosd_cluster_info_mnodes_alive": "cluster_info_mnodes_alive", + "taosd_cluster_info_vgroups_total": "cluster_info_vgroups_total", + "taosd_cluster_info_vgroups_alive": "cluster_info_vgroups_alive", + "taosd_cluster_info_vnodes_total": "cluster_info_vnodes_total", + "taosd_cluster_info_vnodes_alive": "cluster_info_vnodes_alive", + "taosd_cluster_info_connections_total": "cluster_info_connections_total", + "taosd_cluster_info_topics_total": "cluster_info_topics_total", + "taosd_cluster_info_streams_total": "cluster_info_streams_total", + + "taosd_cluster_info_grants_expire_time": "grants_info_expire_time", + "taosd_cluster_info_grants_timeseries_used": "grants_info_timeseries_used", + "taosd_cluster_info_grants_timeseries_total": "grants_info_timeseries_total", + + "taosd_dnodes_info_uptime": "dnodes_info_uptime", + "taosd_dnodes_info_cpu_engine": "dnodes_info_cpu_engine", + "taosd_dnodes_info_cpu_system": "dnodes_info_cpu_system", + "taosd_dnodes_info_cpu_cores": "dnodes_info_cpu_cores", + "taosd_dnodes_info_mem_engine": "dnodes_info_mem_engine", + "taosd_dnodes_info_mem_free": "dnodes_info_mem_system", + "taosd_dnodes_info_mem_total": "dnodes_info_mem_total", + "taosd_dnodes_info_disk_engine": "dnodes_info_disk_engine", + "taosd_dnodes_info_disk_used": "dnodes_info_disk_used", + "taosd_dnodes_info_disk_total": "dnodes_info_disk_total", + "taosd_dnodes_info_system_net_in": "dnodes_info_net_in", + "taosd_dnodes_info_system_net_out": "dnodes_info_net_out", + "taosd_dnodes_info_io_read": "dnodes_info_io_read", + "taosd_dnodes_info_io_write": "dnodes_info_io_write", + "taosd_dnodes_info_io_read_disk": "dnodes_info_io_read_disk", + "taosd_dnodes_info_io_write_disk": "dnodes_info_io_write_disk", + "taosd_dnodes_info_vnodes_num": "dnodes_info_vnodes_num", + "taosd_dnodes_info_masters": "dnodes_info_masters", + "taosd_dnodes_info_has_mnode": "dnodes_info_has_mnode", + "taosd_dnodes_info_has_qnode": "dnodes_info_has_qnode", + "taosd_dnodes_info_has_snode": "dnodes_info_has_snode", + "taosd_dnodes_info_has_bnode": "dnodes_info_has_bnode", + "taosd_dnodes_info_errors": "dnodes_info_errors", + "taosd_dnodes_info_error_log_count": "log_summary_error", + "taosd_dnodes_info_info_log_count": "log_summary_info", + "taosd_dnodes_info_debug_log_count": "log_summary_debug", + "taosd_dnodes_info_trace_log_count": "log_summary_trace", + + "taosd_dnodes_status_status": "d_info_status", + + "taosd_mnodes_info_role": "m_info_role", +} + +var metricTypeMap = map[string]CollectType{ + "taosd_cluster_basic_first_ep": Info, + "taosd_cluster_basic_first_ep_dnode_id": Counter, + "taosd_cluster_basic_cluster_version": Info, + + "taosd_cluster_info_cluster_uptime": Gauge, + "taosd_cluster_info_dbs_total": Counter, + "taosd_cluster_info_tbs_total": Counter, + "taosd_cluster_info_stbs_total": Counter, + "taosd_cluster_info_dnodes_total": Counter, + "taosd_cluster_info_dnodes_alive": Counter, + "taosd_cluster_info_mnodes_total": Counter, + "taosd_cluster_info_mnodes_alive": Counter, + "taosd_cluster_info_vgroups_total": Counter, + "taosd_cluster_info_vgroups_alive": Counter, + "taosd_cluster_info_vnodes_total": Counter, + "taosd_cluster_info_vnodes_alive": Counter, + "taosd_cluster_info_connections_total": Counter, + "taosd_cluster_info_topics_total": Counter, + "taosd_cluster_info_streams_total": Counter, + + "taosd_cluster_info_grants_expire_time": Counter, + "taosd_cluster_info_grants_timeseries_used": Counter, + "taosd_cluster_info_grants_timeseries_total": Counter, + + "taosd_dnodes_info_uptime": Gauge, + "taosd_dnodes_info_cpu_engine": Gauge, + "taosd_dnodes_info_cpu_system": Gauge, + "taosd_dnodes_info_cpu_cores": Gauge, + "taosd_dnodes_info_mem_engine": Counter, + "taosd_dnodes_info_mem_free": Counter, + "taosd_dnodes_info_mem_total": Counter, + "taosd_dnodes_info_disk_engine": Counter, + "taosd_dnodes_info_disk_used": Counter, + "taosd_dnodes_info_disk_total": Counter, + "taosd_dnodes_info_system_net_in": Gauge, + "taosd_dnodes_info_system_net_out": Gauge, + "taosd_dnodes_info_io_read": Gauge, + "taosd_dnodes_info_io_write": Gauge, + "taosd_dnodes_info_io_read_disk": Gauge, + "taosd_dnodes_info_io_write_disk": Gauge, + "taosd_dnodes_info_vnodes_num": Counter, + "taosd_dnodes_info_masters": Counter, + "taosd_dnodes_info_has_mnode": Counter, + "taosd_dnodes_info_has_qnode": Counter, + "taosd_dnodes_info_has_snode": Counter, + "taosd_dnodes_info_has_bnode": Counter, + "taosd_dnodes_info_errors": Counter, + "taosd_dnodes_info_error_log_count": Counter, + "taosd_dnodes_info_info_log_count": Counter, + "taosd_dnodes_info_debug_log_count": Counter, + "taosd_dnodes_info_trace_log_count": Counter, + + "taosd_dnodes_status_status": Info, + + "taosd_mnodes_info_role": Info, +} + +type CollectType string + +const ( + Counter CollectType = "counter" + Gauge CollectType = "gauge" + Info CollectType = "info" + Summary CollectType = "summary" +) + +type Processor struct { + prefix string + db string + tableMap map[string]*Table //tableName:*Table{} + metricMap map[string]*Metric //Fqname:*Metric{} + tableList []string + ctx context.Context + rotationInterval time.Duration + exitChan chan struct{} + dbConn *db.Connector + summaryTable map[string]*Table + tables map[string]struct{} +} + +func (p *Processor) Describe(descs chan<- *prometheus.Desc) { + for _, metric := range p.metricMap { + descs <- metric.Desc + } +} + +func (p *Processor) Collect(metrics chan<- prometheus.Metric) { + for _, metric := range p.metricMap { + logger.Tracef("metric name:%v", metric.FQName) + + switch metric.Type { + case Gauge: + gv := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: metric.FQName, + Help: metric.Help, + ConstLabels: metric.ConstLabels, + }, metric.Variables) + for _, value := range metric.GetValue() { + if value.Value == nil { + continue + } + g := gv.With(value.Label) + g.Set(value.Value.(float64)) + metrics <- g + } + case Counter: + cv := prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: metric.FQName, + Help: metric.Help, + ConstLabels: metric.ConstLabels, + }, metric.Variables) + for _, value := range metric.GetValue() { + if value.Value == nil { + continue + } + v := i2float(value.Value) + if v < 0 { + logger.Warningf("negative value for prometheus counter. label %v value %v", + value.Label, value.Value) + continue + } + c := cv.With(value.Label) + c.Add(v) + metrics <- c + } + case Info: + lbs := []string{"value"} + lbs = append(lbs, metric.Variables...) + gf := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: metric.FQName, + Help: metric.Help, + ConstLabels: metric.ConstLabels, + }, lbs) + for _, value := range metric.GetValue() { + if value == nil { + continue + } + v := make(map[string]string, len(value.Label)+1) + v["value"] = value.Value.(string) + for k, l := range value.Label { + v[k] = l + } + g := gf.With(v) + g.Set(1) + metrics <- g + } + case Summary: + } + } +} + +type Table struct { + tsName string + Variables []string + ColumnList []string +} + +type Metric struct { + sync.RWMutex + FQName string + Help string + Type CollectType + ColType int + ConstLabels map[string]string + Variables []string + Desc *prometheus.Desc + LastValue []*Value +} + +func (m *Metric) SetValue(v []*Value) { + m.Lock() + defer m.Unlock() + m.LastValue = v +} + +func (m *Metric) GetValue() []*Value { + m.RLock() + defer m.RUnlock() + return m.LastValue +} + +type Value struct { + Label map[string]string + Value interface{} +} + +func NewProcessor(conf *config.Config) *Processor { + + conn, err := db.NewConnector(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl) + if err != nil { + panic(err) + } + interval, err := time.ParseDuration(conf.RotationInterval) + if err != nil { + panic(err) + } + ctx := context.Background() + tables, err := ExpandMetricsFromConfig(ctx, conn, &conf.Metrics) + if err != nil { + panic(err) + } + p := &Processor{ + prefix: conf.Metrics.Prefix, + db: conf.Metrics.Database.Name, + tableMap: map[string]*Table{}, + metricMap: map[string]*Metric{}, + ctx: ctx, + rotationInterval: interval, + exitChan: make(chan struct{}), + dbConn: conn, + summaryTable: map[string]*Table{"taosadapter_restful_http_request_summary_milliseconds": nil}, + tables: tables, + } + p.Prepare() + p.Process() + return p +} + +func (p *Processor) Prepare() { + locker := sync.RWMutex{} + wg := sync.WaitGroup{} + wg.Add(len(p.tables)) + + for tn := range p.tables { + tableName := tn + + err := pool.GoroutinePool.Submit(func() { + defer wg.Done() + data, err := p.dbConn.Query(p.ctx, fmt.Sprintf("describe %s", p.withDBName(tableName)), util.GetQidOwn()) + if err != nil { + var tdEngineError *taosError.TaosError + if errors.As(err, &tdEngineError) { + logger.Errorf("table %s not exist, skip it, error:%s", tableName, err) + } else { + logger.Errorf("could not get table %s metadata, skip it, error:%s", tableName, err) + } + return + } + + tags := make([]string, 0, len(data.Data)) + columns := make([]string, 0, len(data.Data)) + typeList := make([]string, 0, len(data.Data)) + columnMap := make(map[string]struct{}, len(data.Data)) + variablesMap := make(map[string]struct{}, len(data.Data)) + for _, info := range data.Data { + if info[3].(string) != "" { + variable := info[0].(string) + tags = append(tags, variable) + variablesMap[variable] = struct{}{} + } else { + column := info[0].(string) + columns = append(columns, column) + typeList = append(typeList, info[1].(string)) + columnMap[column] = struct{}{} + } + } + + // metrics := make([]*Metric, 0, len(columns)) + // newMetrics := make(map[string]*Metric, len(columns)) + columnList := make([]string, 0, len(columns)) + + timestampColumn := "ts" + _, exist := p.summaryTable[tableName] + for i, column := range columns { + if _, columnExist := variablesMap[column]; columnExist { + continue + } + + if typeList[i] == "TIMESTAMP" { + timestampColumn = column + continue + } + + columnName, metricType := "", Summary + if !exist { + columnName = column + + if _, ok := metricTypeMap[tableName+"_"+columnName]; ok { + metricType = metricTypeMap[tableName+"_"+columnName] + } else { + metricType = exchangeDBType(typeList[i]) + } + + // 为了兼容性,硬编码,后续要优化 + if strings.HasSuffix(columnName, "role") { + metricType = Info + } + } + + labels := make(map[string]string) + + fqName := p.buildFQName(tableName, columnName) + pDesc := prometheus.NewDesc(fqName, "", nil, labels) + metric := &Metric{ + Type: metricType, + Desc: pDesc, + FQName: fqName, + Help: "", + ConstLabels: labels, + Variables: tags, + } + // metrics = append(metrics, metric) + // newMetrics[column] = metric + + locker.Lock() + p.metricMap[fqName] = metric + locker.Unlock() + + columnList = append(columnList, column) + } + + t := &Table{ + tsName: timestampColumn, + Variables: tags, + ColumnList: columnList, + } + locker.Lock() + p.tableMap[tableName] = t + p.tableList = append(p.tableList, tableName) + locker.Unlock() + + }) + if err != nil { + panic(err) + } + } + + wg.Wait() +} + +func (p *Processor) withDBName(tableName string) string { + b := pool.BytesPoolGet() + b.WriteString(p.db) + b.WriteByte('.') + b.WriteString(tableName) + return b.String() +} + +func (p *Processor) Process() { + // 首先清空所有指标值 + for _, metric := range p.metricMap { + metric.SetValue(nil) + } + + for _, tableName := range p.tableList { + tagIndex := 0 + hasTag := false + b := pool.BytesPoolGet() + b.WriteString("select ") + + table := p.tableMap[tableName] + columns := table.ColumnList + + for i, column := range columns { + b.WriteString("last_row(`" + column + "`) as `" + column + "`") + if i != len(columns)-1 { + b.WriteByte(',') + } + } + + if len(table.Variables) > 0 { + tagIndex = len(columns) + for _, tag := range table.Variables { + b.WriteString(", last_row(`" + tag + "`) as `" + tag + "`") + } + } + + b.WriteString(" from ") + b.WriteString(p.withDBName(tableName)) + + b.WriteString(" WHERE " + p.tableMap[tableName].tsName + " > (NOW() - 1m) ") + + if len(table.Variables) > 0 { + tagIndex = len(columns) + b.WriteString(" group by ") + for i, tag := range table.Variables { + b.WriteString("`" + tag + "`") + if i != len(table.Variables)-1 { + b.WriteByte(',') + } + } + } + sql := b.String() + pool.BytesPoolPut(b) + data, err := p.dbConn.Query(p.ctx, sql, util.GetQidOwn()) + logger.Debug(sql) + if err != nil { + logger.WithError(err).Errorln("select data sql:", sql) + continue + } + if tagIndex > 0 { + hasTag = true + } + if len(data.Data) == 0 { + continue + } + values := make([][]*Value, len(table.ColumnList)) + for _, row := range data.Data { + label := map[string]string{} + valuesMap := make(map[string]interface{}) + colEndIndex := len(columns) + if hasTag { + for i := tagIndex; i < len(data.Head); i++ { + if row[i] != nil { + label[data.Head[i]] = fmt.Sprintf("%v", row[i]) + } + } + } + // values array to map + for i := 0; i < colEndIndex; i++ { + valuesMap[columns[i]] = row[i] + } + for i, column := range table.ColumnList { + var v interface{} + metric := p.metricMap[p.buildFQName(tableName, column)] + switch metric.Type { + case Info: + _, isFloat := valuesMap[column].(float64) + if strings.HasSuffix(column, "role") && valuesMap[column] != nil && isFloat { + v = getRoleStr(valuesMap[column].(float64)) + break + } + if strings.HasSuffix(column, "status") && valuesMap[column] != nil && isFloat { + v = getStatusStr(valuesMap[column].(float64)) + break + } + + if valuesMap[column] != nil { + v = i2string(valuesMap[column]) + } else { + v = nil + } + case Counter, Gauge, Summary: + if valuesMap[column] != nil { + v = i2float(valuesMap[column]) + if column == "cluster_uptime" { + v = i2float(valuesMap[column]) / 86400 + } + } else { + v = nil + } + } + values[i] = append(values[i], &Value{ + Label: label, + Value: v, + }) + } + } + + for i, column := range table.ColumnList { + metric := p.metricMap[p.buildFQName(tableName, column)] + for _, value := range values[i] { + logger.Tracef("set metric:%s, Label:%v, Value:%v", column, value.Label, value.Value) + } + if metric.GetValue() != nil { + values[i] = append(values[i], metric.GetValue()...) + } + metric.SetValue(values[i]) + } + } +} + +func (p *Processor) buildFQName(tableName string, column string) string { + + // keep same metric name + tempFQName := tableName + "_" + column + if _, ok := metricNameMap[tempFQName]; ok { + return p.prefix + "_" + metricNameMap[tempFQName] + } + + b := pool.BytesPoolGet() + b.WriteString(p.prefix) + b.WriteByte('_') + + b.WriteString(tableName) + + if column != "" { + b.WriteByte('_') + b.WriteString(column) + } + + fqName := b.String() + pool.BytesPoolPut(b) + + return fqName +} + +func (p *Processor) GetMetric() map[string]*Metric { + return p.metricMap +} + +func (p *Processor) Close() error { + close(p.exitChan) + return p.dbConn.Close() +} + +func getRoleStr(v float64) string { + rounded := math.Round(v) + integer := int(rounded) + + switch integer { + case 0: + return "offline" + case 100: + return "follower" + case 101: + return "candidate" + case 102: + return "leader" + case 103: + return "error" + case 104: + return "learner" + } + return "unknown" +} + +func getStatusStr(v float64) string { + rounded := math.Round(v) + integer := int(rounded) + + switch integer { + case 0: + return "offline" + case 1: + return "ready" + } + return "unknown" +} + +func exchangeDBType(t string) CollectType { + switch t { + case "BOOL", "FLOAT", "DOUBLE": + return Gauge + case "TINYINT", "SMALLINT", "INT", "BIGINT", "TINYINT UNSIGNED", "SMALLINT UNSIGNED", "INT UNSIGNED", "BIGINT UNSIGNED": + return Counter + case "BINARY", "NCHAR", "VARCHAR": + return Info + default: + panic("unsupported type") + } +} + +func i2string(value interface{}) string { + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + panic("unexpected type to string") + } +} + +func i2float(value interface{}) float64 { + switch v := value.(type) { + case int8: + return float64(v) + case int16: + return float64(v) + case int32: + return float64(v) + case int64: + return float64(v) + case uint8: + return float64(v) + case uint16: + return float64(v) + case uint32: + return float64(v) + case uint64: + return float64(v) + case float64: + return v + case float32: + return float64(v) + case bool: + if v { + return 1 + } + return 0 + default: + panic("unexpected type to float64") + } +} diff --git a/tools/keeper/prometheus/prometheus.yml b/tools/keeper/prometheus/prometheus.yml new file mode 100644 index 0000000000..397d566d91 --- /dev/null +++ b/tools/keeper/prometheus/prometheus.yml @@ -0,0 +1,13 @@ +global: + scrape_interval: 5s + +scrape_configs: + - job_name: "prometheus" + static_configs: + - targets: ["localhost:9090"] + - job_name: "taoskeeper" + static_configs: + - targets: ["taoskeeper:6043"] + - job_name: "node" + static_configs: + - targets: ["nodeexporter:9100"] diff --git a/tools/keeper/system/empty_test.go b/tools/keeper/system/empty_test.go new file mode 100644 index 0000000000..a4d4777d32 --- /dev/null +++ b/tools/keeper/system/empty_test.go @@ -0,0 +1,8 @@ +package system + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/system/program.go b/tools/keeper/system/program.go new file mode 100644 index 0000000000..437da68f08 --- /dev/null +++ b/tools/keeper/system/program.go @@ -0,0 +1,146 @@ +package system + +import ( + "context" + "fmt" + "net/http" + "os" + "strconv" + "time" + + "github.com/kardianos/service" + "github.com/taosdata/go-utils/web" + "github.com/taosdata/taoskeeper/api" + "github.com/taosdata/taoskeeper/cmd" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/monitor" + "github.com/taosdata/taoskeeper/process" + "github.com/taosdata/taoskeeper/version" +) + +var logger = log.GetLogger("PRG") + +func Init() *http.Server { + conf := config.InitConfig() + log.ConfigLog() + + if len(conf.Transfer) > 0 || len(conf.Drop) > 0 { + cmd := cmd.NewCommand(conf) + cmd.Process(conf) + os.Exit(0) + return nil + } + + router := web.CreateRouter(false, &conf.Cors, false) + router.Use(log.GinLog()) + router.Use(log.GinRecoverLog()) + + reporter := api.NewReporter(conf) + reporter.Init(router) + monitor.StartMonitor(conf.Metrics.Cluster, conf, reporter) + + go func() { + // wait for monitor to all metric received + time.Sleep(time.Second * 35) + + processor := process.NewProcessor(conf) + node := api.NewNodeExporter(processor) + node.Init(router) + + if config.IsEnterprise == "true" { + zabbix := api.NewZabbix(processor) + zabbix.Init(router) + } + }() + + checkHealth := api.NewCheckHealth(version.Version) + checkHealth.Init(router) + + if config.IsEnterprise == "true" { + if conf.Audit.Enable { + audit, err := api.NewAudit(conf) + if err != nil { + panic(err) + } + if err = audit.Init(router); err != nil { + panic(err) + } + } + } + + adapter := api.NewAdapter(conf) + if err := adapter.Init(router); err != nil { + panic(err) + } + + gen_metric := api.NewGeneralMetric(conf) + if err := gen_metric.Init(router); err != nil { + panic(err) + } + + server := &http.Server{ + Addr: ":" + strconv.Itoa(conf.Port), + Handler: router, + } + + return server +} + +func Start(server *http.Server) { + prg := newProgram(server) + svcConfig := &service.Config{ + Name: "taoskeeper", + DisplayName: "taoskeeper", + Description: "taosKeeper is a tool for TDengine that exports monitoring metrics", + } + s, err := service.New(prg, svcConfig) + if err != nil { + logger.Fatal(err) + } + err = s.Run() + if err != nil { + logger.Fatal(err) + } +} + +type program struct { + server *http.Server +} + +func newProgram(server *http.Server) *program { + return &program{server: server} +} + +func (p *program) Start(s service.Service) error { + if service.Interactive() { + logger.Info("Running in terminal.") + } else { + logger.Info("Running under service manager.") + } + + server := p.server + go func() { + if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + panic(fmt.Errorf("taoskeeper start up fail! %v", err)) + } + }() + return nil +} + +func (p *program) Stop(s service.Service) error { + logger.Println("Shutdown WebServer ...") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if err := p.server.Shutdown(ctx); err != nil { + logger.Println("WebServer Shutdown error:", err) + } + + logger.Println("Server exiting") + ctxLog, cancelLog := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelLog() + logger.Println("Flushing Log") + log.Close(ctxLog) + return nil +} diff --git a/tools/keeper/system/program_test.go b/tools/keeper/system/program_test.go new file mode 100644 index 0000000000..eabc4fff35 --- /dev/null +++ b/tools/keeper/system/program_test.go @@ -0,0 +1,22 @@ +package system + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/util" +) + +func TestStart(t *testing.T) { + server := Init() + assert.NotNil(t, server) + + conn, err := db.NewConnectorWithDb(config.Conf.TDengine.Username, config.Conf.TDengine.Password, config.Conf.TDengine.Host, config.Conf.TDengine.Port, config.Conf.Metrics.Database.Name, config.Conf.TDengine.Usessl) + assert.NoError(t, err) + conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", config.Conf.Metrics.Database.Name), util.GetQidOwn()) + conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", config.Conf.Audit.Database.Name), util.GetQidOwn()) +} diff --git a/tools/keeper/taoskeeper.service b/tools/keeper/taoskeeper.service new file mode 100644 index 0000000000..d8478bc59b --- /dev/null +++ b/tools/keeper/taoskeeper.service @@ -0,0 +1,19 @@ +[Unit] +Description=TaosKeeper - TDengine Metrics Exporter for Kinds of Collectors +Documentation=https://www.taosdata.com +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/bin/taoskeeper +TimeoutSec=0 +RestartSec=2 +StandardOutput=null +StandardError=journal +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/tools/keeper/telegraf.conf b/tools/keeper/telegraf.conf new file mode 100644 index 0000000000..aa2e8e0b35 --- /dev/null +++ b/tools/keeper/telegraf.conf @@ -0,0 +1,6 @@ +[[inputs.prometheus]] +# An array of urls to scrape metrics from. +urls = ["${TAOSKEEPER}"] + +[[outputs.file]] +files = ["stdout"] diff --git a/tools/keeper/telegraf.yml b/tools/keeper/telegraf.yml new file mode 100644 index 0000000000..a02e9f669b --- /dev/null +++ b/tools/keeper/telegraf.yml @@ -0,0 +1,9 @@ +version: "3.6" +services: + telegraf: + image: telegraf:1.20-alpine + hostname: telegraf + volumes: + - ./telegraf.conf:/etc/telegraf/telegraf.conf:ro + environment: + TAOSKEEPER: http://taoskeeper:6043/metrics diff --git a/tools/keeper/util/empty_test.go b/tools/keeper/util/empty_test.go new file mode 100644 index 0000000000..5d82866721 --- /dev/null +++ b/tools/keeper/util/empty_test.go @@ -0,0 +1,8 @@ +package util + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/util/pool/antpool.go b/tools/keeper/util/pool/antpool.go new file mode 100644 index 0000000000..7a4ecd46de --- /dev/null +++ b/tools/keeper/util/pool/antpool.go @@ -0,0 +1,15 @@ +package pool + +import ( + "github.com/panjf2000/ants/v2" +) + +var GoroutinePool *ants.Pool + +func Init(size int) { + var err error + GoroutinePool, err = ants.NewPool(size) + if err != nil { + panic(err) + } +} diff --git a/tools/keeper/util/pool/bytes.go b/tools/keeper/util/pool/bytes.go new file mode 100644 index 0000000000..0fc44f77b8 --- /dev/null +++ b/tools/keeper/util/pool/bytes.go @@ -0,0 +1,23 @@ +package pool + +import ( + "bytes" + "sync" +) + +var bytesBufferPool sync.Pool + +func init() { + bytesBufferPool.New = func() interface{} { + return &bytes.Buffer{} + } +} + +func BytesPoolGet() *bytes.Buffer { + return bytesBufferPool.Get().(*bytes.Buffer) +} + +func BytesPoolPut(b *bytes.Buffer) { + b.Reset() + bytesBufferPool.Put(b) +} diff --git a/tools/keeper/util/pool/empty_test.go b/tools/keeper/util/pool/empty_test.go new file mode 100644 index 0000000000..dcbca2d11d --- /dev/null +++ b/tools/keeper/util/pool/empty_test.go @@ -0,0 +1,8 @@ +package pool + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/util/util.go b/tools/keeper/util/util.go new file mode 100644 index 0000000000..a739c23760 --- /dev/null +++ b/tools/keeper/util/util.go @@ -0,0 +1,154 @@ +package util + +import ( + "crypto/md5" + "encoding/hex" + "os" + "strconv" + "strings" + "sync/atomic" + "time" + "unicode" + + "github.com/taosdata/taoskeeper/infrastructure/config" +) + +// https://github.com/containerd/cgroups/blob/main/utils.go +var globalCounter64 uint64 +var globalCounter32 uint32 + +var MAX_TABLE_NAME_LEN = 190 + +func init() { + atomic.StoreUint64(&globalCounter64, 0) + atomic.StoreUint32(&globalCounter32, 0) +} + +func ReadUint(path string) (uint64, error) { + v, err := os.ReadFile(path) + if err != nil { + return 0, err + } + return ParseUint(strings.TrimSpace(string(v)), 10, 64) +} + +func ParseUint(s string, base, bitSize int) (uint64, error) { + v, err := strconv.ParseUint(s, base, bitSize) + if err != nil { + intValue, intErr := strconv.ParseInt(s, base, bitSize) + // 1. Handle negative values greater than MinInt64 (and) + // 2. Handle negative values lesser than MinInt64 + if intErr == nil && intValue < 0 { + return 0, nil + } else if intErr != nil && + intErr.(*strconv.NumError).Err == strconv.ErrRange && + intValue < 0 { + return 0, nil + } + return 0, err + } + return v, nil +} + +func EscapeInfluxProtocol(s string) string { + s = strings.TrimSuffix(s, "\\") + s = strings.ReplaceAll(s, ",", "\\,") + s = strings.ReplaceAll(s, "=", "\\=") + s = strings.ReplaceAll(s, " ", "\\ ") + s = strings.ReplaceAll(s, "\"", "\\\"") + return s +} + +func GetCfg() *config.Config { + c := &config.Config{ + InstanceID: 64, + Port: 6043, + LogLevel: "trace", + TDengine: config.TDengineRestful{ + Host: "127.0.0.1", + Port: 6041, + Username: "root", + Password: "taosdata", + Usessl: false, + }, + Metrics: config.MetricsConfig{ + Database: config.Database{ + Name: "keeper_test_log", + Options: map[string]interface{}{}, + }, + }, + Log: config.Log{ + Level: "trace", + Path: "/var/log/taos", + RotationCount: 10, + RotationTime: 24 * time.Hour, + RotationSize: 1073741824, + Compress: true, + ReservedDiskSize: 1073741824, + }, + } + return c +} + +func SafeSubstring(s string, n int) string { + if len(s) > n { + return s[:n] + } + return s +} + +func GetQid(qidStr string) uint64 { + if qidStr == "" || !strings.HasPrefix(qidStr, "0x") { + qid32 := atomic.AddUint32(&globalCounter32, 1) + qid64 := uint64(qid32) << 8 + return qid64 + } + + qid, err := strconv.ParseUint(qidStr[2:], 16, 64) + if err != nil { + qid32 := atomic.AddUint32(&globalCounter32, 1) + qid64 := uint64(qid32) << 8 + return qid64 + } + + // clear the last byte + qid = qid &^ 0xFF + + return qid +} + +func GetQidOwn() uint64 { + + id := atomic.AddUint64(&globalCounter64, 1) + + if id > 0x00ffffffffffffff { + atomic.StoreUint64(&globalCounter64, 1) + id = 1 + } + qid64 := uint64(config.Conf.InstanceID)<<56 | id + return qid64 +} + +func GetMd5HexStr(str string) string { + sum := md5.Sum([]byte(str)) + return hex.EncodeToString(sum[:]) +} + +func isValidChar(r rune) bool { + return unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' +} + +func ToValidTableName(input string) string { + var builder strings.Builder + + for _, r := range input { + if isValidChar(r) { + builder.WriteRune(unicode.ToLower(r)) + } else { + builder.WriteRune('_') + } + } + + result := builder.String() + return result +} diff --git a/tools/keeper/version/version.go b/tools/keeper/version/version.go new file mode 100644 index 0000000000..a9e33f402b --- /dev/null +++ b/tools/keeper/version/version.go @@ -0,0 +1,9 @@ +package version + +var Version = "0.0.0.0" +var Gitinfo = "unknown" +var BuildInfo = "1970-01-01 00:00:00 +08:00" +var CommitID = "unknown" + +var CUS_NAME = "TDengine" +var CUS_PROMPT = "taos" diff --git a/tools/keeper/zbx_taos_keeper_templates.xml b/tools/keeper/zbx_taos_keeper_templates.xml new file mode 100644 index 0000000000..04e260cd21 --- /dev/null +++ b/tools/keeper/zbx_taos_keeper_templates.xml @@ -0,0 +1,111 @@ + + + 5.0 + 2021-12-06T05:55:45Z + + + taos + + + + + + \ No newline at end of file From bc05289192b50ebda674fa77a510281bd2f497e5 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 18 Oct 2024 09:28:16 +0800 Subject: [PATCH 030/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- include/common/tmsg.h | 1 - include/libs/executor/executor.h | 1 - source/client/src/clientRawBlockWrite.c | 110 ------------------------ source/common/src/tmsg.c | 40 +-------- source/dnode/vnode/src/tq/tqScan.c | 15 ---- source/dnode/vnode/src/tq/tqUtil.c | 5 -- source/libs/executor/inc/querytask.h | 1 - source/libs/executor/src/executor.c | 5 -- 8 files changed, 1 insertion(+), 177 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index eb7a08357b..9058dfe53f 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -4113,7 +4113,6 @@ typedef struct { SArray* blockData; SArray* blockTbName; SArray* blockSchema; -// SArray* blockSuid; union{ struct{ diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index 8501d88be0..6d4b3df041 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -210,7 +210,6 @@ SMqBatchMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo); const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo); const char* qExtractTbnameFromTask(qTaskInfo_t tinfo); -//const int64_t qExtractSuidFromTask(qTaskInfo_t tinfo); void* qExtractReaderFromStreamScanner(void* scanner); diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 6c54c3aa69..206bc63d19 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1706,116 +1706,6 @@ static void* getRawDataFromRes(void* pRetrieve) { return rawData; } -//static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) { -// if (taos == NULL || data == NULL) { -// SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data); -// return TSDB_CODE_INVALID_PARA; -// } -// int32_t code = TSDB_CODE_SUCCESS; -// SHashObj* pVgHash = NULL; -// SQuery* pQuery = NULL; -// SMqRspObj rspObj = {0}; -// SDecoder decoder = {0}; -// STableMeta* pTableMeta = NULL; -// -// SRequestObj* pRequest = NULL; -// RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest)); -// -// uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); -// pRequest->syncQuery = true; -// rspObj.resIter = -1; -// rspObj.resType = RES_TYPE__TMQ; -// -// int8_t dataVersion = *(int8_t*)data; -// if (dataVersion >= MQ_DATA_RSP_VERSION) { -// data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); -// dataLen -= sizeof(int8_t) + sizeof(int32_t); -// } -// tDecoderInit(&decoder, data, dataLen); -// code = tDecodeMqDataRsp(&decoder, &rspObj.dataRsp); -// if (code != 0) { -// SET_ERROR_MSG("decode mq data rsp failed"); -// code = TSDB_CODE_INVALID_MSG; -// goto end; -// } -// -// if (!pRequest->pDb) { -// code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; -// goto end; -// } -// -// struct SCatalog* pCatalog = NULL; -// RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog)); -// -// SRequestConnInfo conn = {0}; -// conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter; -// conn.requestId = pRequest->requestId; -// conn.requestObjRefId = pRequest->self; -// conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); -// -// RAW_RETURN_CHECK(smlInitHandle(&pQuery)); -// pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); -// RAW_NULL_CHECK(pVgHash); -// while (++rspObj.resIter < rspObj.dataRsp.blockNum) { -// void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); -// RAW_NULL_CHECK(pRetrieve); -// if (!rspObj.dataRsp.withSchema) { -// goto end; -// } -// -// const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); -// RAW_NULL_CHECK(tbName); -// -// SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; -// (void)strcpy(pName.dbname, pRequest->pDb); -// (void)strcpy(pName.tname, tbName); -// -// RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); -// -// SVgroupInfo vg = {0}; -// RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg)); -// -// void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId)); -// if (hData == NULL) { -// RAW_RETURN_CHECK(taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); -// } -// -// SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); -// RAW_NULL_CHECK(pSW); -// TAOS_FIELD* fields = taosMemoryCalloc(pSW->nCols, sizeof(TAOS_FIELD)); -// RAW_NULL_CHECK(fields); -// for (int i = 0; i < pSW->nCols; i++) { -// fields[i].type = pSW->pSchema[i].type; -// fields[i].bytes = pSW->pSchema[i].bytes; -// tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name)); -// } -// void* rawData = getRawDataFromRes(pRetrieve); -// char err[ERR_MSG_LEN] = {0}; -// code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, fields, pSW->nCols, true, err, ERR_MSG_LEN); -// taosMemoryFree(fields); -// taosMemoryFreeClear(pTableMeta); -// if (code != TSDB_CODE_SUCCESS) { -// SET_ERROR_MSG("table:%s, err:%s", tbName, err); -// goto end; -// } -// } -// -// RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); -// -// launchQueryImpl(pRequest, pQuery, true, NULL); -// code = pRequest->code; -// -// end: -// uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code)); -// tDeleteMqDataRsp(&rspObj.dataRsp); -// tDecoderClear(&decoder); -// qDestroyQuery(pQuery); -// destroyRequest(pRequest); -// taosHashCleanup(pVgHash); -// taosMemoryFreeClear(pTableMeta); -// return code; -//} - static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { // find schema data info int32_t code = 0; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index e0009177e5..9c8544fcd4 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -10980,42 +10980,13 @@ _exit: return code; } -//int32_t tEncodeSuidArray(SEncoder *pEncoder, const SMqDataRsp *pRsp){ -// for (int32_t i = 0; i < pRsp->blockNum; i++) { -// if (pRsp->withTbName) { -// int64_t* suid = taosArrayGet(pRsp->blockSuid, i); -// if (suid != NULL){ -// TAOS_CHECK_RETURN(tEncodeI64(pEncoder, *suid)); -// } -// } -// } -// return 0; -//} int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { TAOS_CHECK_RETURN(tEncodeMqDataRspCommon(pEncoder, pRsp)); TAOS_CHECK_RETURN(tEncodeI64(pEncoder, pRsp->sleepTime)); -// TAOS_CHECK_RETURN(tEncodeSuidArray(pEncoder, pRsp)); return 0; } -//int32_t tDecodeSuidArray(SDecoder *pDecoder, SMqDataRsp *pRsp){ -// if (!tDecodeIsEnd(pDecoder)) { -// if (pRsp->withTbName) { -// if ((pRsp->blockSuid = taosArrayInit(pRsp->blockNum, sizeof(int64_t))) == NULL) { -// TAOS_CHECK_RETURN(terrno); -// } -// } -// -// for (int32_t i = 0; i < pRsp->blockNum; i++) { -// int64_t suid = 0; -// TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &suid)); -// if (taosArrayPush(pRsp->blockSuid, &suid) == NULL) { -// TAOS_CHECK_RETURN(terrno); -// } -// } -// } -// return 0; -//} + int32_t tDecodeMqDataRspCommon(SDecoder *pDecoder, SMqDataRsp *pRsp) { int32_t code = 0; int32_t lino; @@ -11092,9 +11063,6 @@ int32_t tDecodeMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) { if (!tDecodeIsEnd(pDecoder)) { TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &pRsp->sleepTime)); } -// if (!tDecodeIsEnd(pDecoder)) { -// TAOS_CHECK_RETURN(tDecodeSuidArray(pDecoder, pRsp)); -// } return 0; } @@ -11108,8 +11076,6 @@ static void tDeleteMqDataRspCommon(SMqDataRsp *pRsp) { pRsp->blockSchema = NULL; taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree); pRsp->blockTbName = NULL; -// taosArrayDestroy(pRsp->blockSuid); -// pRsp->blockSuid = NULL; tOffsetDestroy(&pRsp->reqOffset); tOffsetDestroy(&pRsp->rspOffset); } @@ -11129,7 +11095,6 @@ int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, createTableReq, createTableLen)); } } -// TAOS_CHECK_EXIT(tEncodeSuidArray(pEncoder, pRsp)); _exit: return code; @@ -11161,9 +11126,6 @@ int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) { } } } -// if (!tDecodeIsEnd(pDecoder)) { -// TAOS_CHECK_EXIT(tDecodeSuidArray(pDecoder, pRsp)); -// } _exit: return code; diff --git a/source/dnode/vnode/src/tq/tqScan.c b/source/dnode/vnode/src/tq/tqScan.c index 3e4895378b..0e7e27fcff 100644 --- a/source/dnode/vnode/src/tq/tqScan.c +++ b/source/dnode/vnode/src/tq/tqScan.c @@ -77,16 +77,6 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp, i tqError("failed to push tbName to blockTbName:%s", tbName); continue; } -// int64_t suid = 0; -// if(mr.me.type == TSDB_CHILD_TABLE){ -// suid = mr.me.ctbEntry.suid; -// }else{ -// suid = mr.me.uid; -// } -// if(taosArrayPush(pRsp->blockSuid, &suid) == NULL){ -// tqError("failed to push suid to blockSuid:%"PRId64, suid); -// continue; -// } } metaReaderClear(&mr); return 0; @@ -229,11 +219,6 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId); continue; } -// int64_t suid = qExtractSuidFromTask(task); -// if (taosArrayPush(pRsp->blockSuid, &suid) == NULL){ -// tqError("vgId:%d, failed to add suid to rsp msg", pTq->pVnode->config.vgId); -// continue; -// } } if (pRsp->withSchema) { SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task)); diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index 6acfe6b074..3f11937463 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -50,7 +50,6 @@ static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t)); pRsp->blockTbName = taosArrayInit(0, sizeof(void*)); pRsp->blockSchema = taosArrayInit(0, sizeof(void*)); -// pRsp->blockSuid = taosArrayInit(0, sizeof(int64_t)); if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) { @@ -74,10 +73,6 @@ static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { pRsp->blockSchema = NULL; } -// if (pRsp->blockSuid != NULL) { -// taosArrayDestroy(pRsp->blockSuid); -// pRsp->blockSuid = NULL; -// } return terrno; } diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index c9e65bacaf..e3bb9a1361 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -69,7 +69,6 @@ typedef struct { SVersionRange fillHistoryVer; STimeWindow fillHistoryWindow; SStreamState* pState; -// int64_t suid; // for tmq } SStreamTaskInfo; struct SExecTaskInfo { diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 4cfa12be5b..b9e103ebca 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1212,11 +1212,6 @@ const char* qExtractTbnameFromTask(qTaskInfo_t tinfo) { return pTaskInfo->streamInfo.tbName; } -//const int64_t qExtractSuidFromTask(qTaskInfo_t tinfo) { -// SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; -// return pTaskInfo->streamInfo.suid; -//} - SMqBatchMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; return &pTaskInfo->streamInfo.btMetaRsp; From 936efc7529f700bb097508ce77be9b17f7da0b84 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Fri, 18 Oct 2024 16:43:41 +0800 Subject: [PATCH 031/142] refactor(*): modify the position of IsEnterprise --- tools/keeper/infrastructure/config/config.go | 4 +--- tools/keeper/system/program.go | 4 ++-- tools/keeper/version/version.go | 2 ++ 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/keeper/infrastructure/config/config.go b/tools/keeper/infrastructure/config/config.go index f4bbe1b274..d3e884ba8f 100644 --- a/tools/keeper/infrastructure/config/config.go +++ b/tools/keeper/infrastructure/config/config.go @@ -14,8 +14,6 @@ import ( "github.com/taosdata/taoskeeper/version" ) -var IsEnterprise = "false" - var Name = fmt.Sprintf("%skeeper", version.CUS_PROMPT) const ReqIDKey = "QID" @@ -228,7 +226,7 @@ func init() { initLog() - if IsEnterprise == "true" { + if version.IsEnterprise == "true" { initAudit() } } diff --git a/tools/keeper/system/program.go b/tools/keeper/system/program.go index 437da68f08..b8f1d8943f 100644 --- a/tools/keeper/system/program.go +++ b/tools/keeper/system/program.go @@ -48,7 +48,7 @@ func Init() *http.Server { node := api.NewNodeExporter(processor) node.Init(router) - if config.IsEnterprise == "true" { + if version.IsEnterprise == "true" { zabbix := api.NewZabbix(processor) zabbix.Init(router) } @@ -57,7 +57,7 @@ func Init() *http.Server { checkHealth := api.NewCheckHealth(version.Version) checkHealth.Init(router) - if config.IsEnterprise == "true" { + if version.IsEnterprise == "true" { if conf.Audit.Enable { audit, err := api.NewAudit(conf) if err != nil { diff --git a/tools/keeper/version/version.go b/tools/keeper/version/version.go index a9e33f402b..c29a40c58e 100644 --- a/tools/keeper/version/version.go +++ b/tools/keeper/version/version.go @@ -7,3 +7,5 @@ var CommitID = "unknown" var CUS_NAME = "TDengine" var CUS_PROMPT = "taos" + +var IsEnterprise = "false" From 904d738d7e93fe6bd3bc49c8e2a4a3277497af88 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Fri, 18 Oct 2024 16:46:19 +0800 Subject: [PATCH 032/142] chore(*): add taoskeeper build process --- cmake/cmake.define | 13 ++++++ packaging/tools/make_install.sh | 72 +++++++++++++++++++++++++++++++++ tools/CMakeLists.txt | 46 +++++++++++++++++++++ 3 files changed, 131 insertions(+) diff --git a/cmake/cmake.define b/cmake/cmake.define index eb78b54cae..52b8a66a4a 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -57,6 +57,19 @@ IF (TD_BUILD_HTTP) ADD_DEFINITIONS(-DHTTP_EMBEDDED) ENDIF () +IF ("${BUILD_KEEPER}" STREQUAL "") + SET(TD_BUILD_KEEPER FALSE) +ELSEIF (${BUILD_KEEPER} MATCHES "false") + SET(TD_BUILD_KEEPER FALSE) +ELSEIF (${BUILD_KEEPER} MATCHES "true") + SET(TD_BUILD_KEEPER TRUE) +ELSEIF (${BUILD_KEEPER} MATCHES "internal") + SET(TD_BUILD_KEEPER FALSE) + SET(TD_BUILD_KEEPER_INTERNAL TRUE) +ELSE () + SET(TD_BUILD_KEEPER FALSE) +ENDIF () + IF ("${BUILD_TOOLS}" STREQUAL "") IF (TD_LINUX) IF (TD_ARM_32) diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index ea19125bf5..193ec4f9d0 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -129,6 +129,13 @@ function kill_taosadapter() { fi } +function kill_taoskeeper() { + pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo}kill -9 $pid || : + fi +} + function kill_taosd() { pid=$(ps -ef | grep -w ${serverName} | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then @@ -155,6 +162,7 @@ function install_bin() { ${csudo}rm -f ${bin_link_dir}/${clientName} || : ${csudo}rm -f ${bin_link_dir}/${serverName} || : ${csudo}rm -f ${bin_link_dir}/taosadapter || : + ${csudo}rm -f ${bin_link_dir}/taoskeeper || : ${csudo}rm -f ${bin_link_dir}/udfd || : ${csudo}rm -f ${bin_link_dir}/taosdemo || : ${csudo}rm -f ${bin_link_dir}/taosdump || : @@ -169,6 +177,7 @@ function install_bin() { [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || : [ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || : + [ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || : ${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || : @@ -183,6 +192,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || : + [ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo > /dev/null 2>&1 || : @@ -197,6 +207,7 @@ function install_bin() { [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || : [ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || : + [ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/*explorer ] && ${csudo}cp -r ${binary_dir}/build/bin/*explorer ${install_main_dir}/bin || : @@ -208,6 +219,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || : + [ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || : [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || : @@ -407,6 +419,29 @@ function install_taosadapter_config() { fi } +function install_taoskeeper_config() { + if [ ! -f "${cfg_install_dir}/taoskeeper.toml" ]; then + ${csudo}mkdir -p ${cfg_install_dir} || : + [ -f ${binary_dir}/test/cfg/taoskeeper.toml ] && + ${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_install_dir} && + ${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || : + [ -f ${cfg_install_dir}/taoskeeper.toml ] && + ${csudo}chmod 644 ${cfg_install_dir}/taoskeeper.toml || : + [ -f ${binary_dir}/test/cfg/taoskeeper.toml ] && + ${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \ + ${cfg_install_dir}/taoskeeper.toml.${verNumber} || : + [ -f ${cfg_install_dir}/taoskeeper.toml ] && + ${csudo}ln -s ${cfg_install_dir}/taoskeeper.toml \ + ${install_main_dir}/cfg/taoskeeper.toml > /dev/null 2>&1 || : + else + if [ -f "${binary_dir}/test/cfg/taoskeeper.toml" ]; then + ${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \ + ${cfg_install_dir}/taoskeeper.toml.${verNumber} || : + ${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || : + fi + fi +} + function install_log() { ${csudo}rm -rf ${log_dir} || : ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} @@ -526,6 +561,15 @@ function install_taosadapter_service() { fi } +function install_taoskeeper_service() { + if ((${service_mod} == 0)); then + [ -f ${binary_dir}/test/cfg/taoskeeper.service ] && + ${csudo}cp ${binary_dir}/test/cfg/taoskeeper.service \ + ${service_config_dir}/ || : + ${csudo}systemctl daemon-reload + fi +} + function install_service_on_launchctl() { ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || : ${csudo}cp ${script_dir}/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist @@ -534,6 +578,10 @@ function install_service_on_launchctl() { ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : ${csudo}cp ${script_dir}/com.taosdata.taosadapter.plist /Library/LaunchDaemons/com.taosdata.taosadapter.plist ${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : + + ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || : + ${csudo}cp ${script_dir}/com.taosdata.taoskeeper.plist /Library/LaunchDaemons/com.taosdata.taoskeeper.plist + ${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || : } function install_service() { @@ -549,6 +597,7 @@ function install_service() { install_service_on_launchctl fi } + function install_app() { if [ "$osType" = "Darwin" ]; then ${csudo}rm -rf /Applications/TDengine.app && @@ -573,6 +622,7 @@ function update_TDengine() { elif ((${service_mod} == 1)); then ${csudo}service ${serverName} stop || : else + kill_taoskeeper kill_taosadapter kill_taosd fi @@ -591,9 +641,11 @@ function update_TDengine() { install_service install_taosadapter_service + install_taoskeeper_service install_config install_taosadapter_config + install_taoskeeper_service echo echo -e "\033[44;32;1m${productName} is updated successfully!${NC}" @@ -602,22 +654,31 @@ function update_TDengine() { echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml" + [ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}" + [ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}" + [ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}" else if [ "$osType" != "Darwin" ]; then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}" + [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}" else echo -e "${GREEN_DARK}To start service ${NC}: launchctl start com.tdengine.taosd${NC}" echo -e "${GREEN_DARK}To start Adapter ${NC}: launchctl start com.tdengine.taosadapter${NC}" + echo -e "${GREEN_DARK}To start Keeper ${NC}: launchctl start com.tdengine.taoskeeper${NC}" fi fi @@ -643,9 +704,11 @@ function install_TDengine() { install_service install_taosadapter_service + install_taoskeeper_service install_config install_taosadapter_config + install_taoskeeper_config # Ask if to start the service echo @@ -654,22 +717,31 @@ function install_TDengine() { echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml" + [ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}" + [ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}" + [ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}" else if [ "$osType" != "Darwin" ]; then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}" + [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}" else echo -e "${GREEN_DARK}To start service ${NC}: launchctl start com.tdengine.taosd${NC}" echo -e "${GREEN_DARK}To start Adapter ${NC}: launchctl start com.tdengine.taosadapter${NC}" + echo -e "${GREEN_DARK}To start Keeper ${NC}: launchctl start com.tdengine.taoskeeper${NC}" fi fi diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index a16a03d30a..832bd68899 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -208,3 +208,49 @@ ELSE () ) ENDIF () ENDIF () + +IF(TD_BUILD_KEEPER) + MESSAGE("") + MESSAGE("${Green} build taoskeeper, current platform is ${PLATFORM_ARCH_STR} ${ColourReset}") + + EXECUTE_PROCESS( + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/keeper + COMMAND git rev-parse HEAD + OUTPUT_VARIABLE taoskeeper_commit_sha1 + ) + + IF("${taoskeeper_commit_sha1}" STREQUAL "") + SET(taoskeeper_commit_sha1 "unknown") + ELSE() + STRING(STRIP "${taoskeeper_commit_sha1}" taoskeeper_commit_sha1) + ENDIF() + + SET(taos_version ${TD_VER_NUMBER}) + MESSAGE("${Green} taoskeeper will use ${taos_version} and commit ${taoskeeper_commit_sha1} as version ${ColourReset}") + MESSAGE(" current source dir is ${CMAKE_CURRENT_SOURCE_DIR}") + + IF(TD_LINUX) + MESSAGE("Building taoskeeper on Linux") + INCLUDE(ExternalProject) + ExternalProject_Add(taoskeeper + PREFIX "taoskeeper" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/keeper + BUILD_ALWAYS off + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND cmake -E echo "taoskeeper no need cmake to config" + PATCH_COMMAND + COMMAND git clean -f -d + BUILD_COMMAND + COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" + INSTALL_COMMAND + COMMAND cmake -E echo "Copy taoskeeper" + COMMAND cmake -E copy taoskeeper ${CMAKE_BINARY_DIR}/build/bin + COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E echo "Copy taoskeeper.toml" + COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E copy ./taoskeeper.service ${CMAKE_BINARY_DIR}/test/cfg/ + ) + ENDIF() +ELSEIF(TD_BUILD_KEEPER_INTERNAL) + MESSAGE("${Yellow} use taoskeeper internal ${ColourReset}") +ENDIF() From 399e2429f45b7a2052aeb845eb15241a9f724593 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Fri, 18 Oct 2024 16:56:41 +0800 Subject: [PATCH 033/142] chore(packaging): add taoskeeper --- packaging/tools/make_install.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index c0e42e3ceb..f395909d38 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -678,6 +678,7 @@ function update_TDengine() { else echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}" echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" + echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}" fi fi @@ -740,6 +741,7 @@ function install_TDengine() { else echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}" echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" + echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}" fi fi From 51c7f8b4f003804990d3bb02dfe09cb2858b5973 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 18 Oct 2024 17:36:57 +0800 Subject: [PATCH 034/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientRawBlockWrite.c | 325 ++++++++++++++---------- 1 file changed, 192 insertions(+), 133 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 206bc63d19..eb4eed8b6d 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -888,9 +888,6 @@ end: } static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVCreateStbReq req = {0}; SDecoder coder; SMCreateStbReq pReq = {0}; @@ -1001,9 +998,6 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { } static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVDropStbReq req = {0}; SDecoder coder = {0}; SMDropStbReq pReq = {0}; @@ -1113,9 +1107,6 @@ static void destroyCreateTbReqBatch(void* data) { } static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVCreateTbBatchReq req = {0}; SDecoder coder = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -1302,9 +1293,6 @@ static void destroyDropTbReqBatch(void* data) { } static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVDropTbBatchReq req = {0}; SDecoder coder = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -1417,9 +1405,6 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { } static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SDeleteRes req = {0}; SDecoder coder = {0}; char sql[256] = {0}; @@ -1455,9 +1440,6 @@ static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { } static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVAlterTbReq req = {0}; SDecoder dcoder = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -1771,7 +1753,7 @@ static void freeRawCache(void *data) { static int32_t initRawCacheHash(){ if (writeRawCache == NULL){ - writeRawCache = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + writeRawCache = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK); if (writeRawCache == NULL){ return terrno; } @@ -1844,57 +1826,113 @@ end: return code; } -static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t dataLen) { - if (taos == NULL || data == NULL) { - SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data); - return TSDB_CODE_INVALID_PARA; +static int32_t buildRawRequest(TAOS* taos, SRequestObj** pRequest, SCatalog** pCatalog, SRequestConnInfo *conn){ + int32_t code = 0; + RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, pRequest)); + (*pRequest)->syncQuery = true; + if (!(*pRequest)->pDb) { + code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; + goto end; } - int32_t code = TSDB_CODE_SUCCESS; - SQuery* pQuery = NULL; - SMqRspObj rspObj = {0}; - SDecoder decoder = {0}; - SHashObj* pCreateTbHash = NULL; - SRequestObj* pRequest = NULL; - RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest)); - uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); - pRequest->syncQuery = true; - rspObj.resIter = -1; -// rspObj.resType = RES_TYPE__TMQ_METADATA; + RAW_RETURN_CHECK(catalogGetHandle((*pRequest)->pTscObj->pAppInfo->clusterId, pCatalog)); + conn->pTrans = (*pRequest)->pTscObj->pAppInfo->pTransporter; + conn->requestId = (*pRequest)->requestId; + conn->requestObjRefId = (*pRequest)->self; + conn->mgmtEps = getEpSet_s(&(*pRequest)->pTscObj->pAppInfo->mgmtEp); +end: + return code; +} + +typedef int32_t _raw_decode_func_(SDecoder *pDecoder, SMqDataRsp *pRsp); +static int32_t decodeRawData(SDecoder *decoder, void* data, int32_t dataLen, _raw_decode_func_ func, SMqRspObj* rspObj){ int8_t dataVersion = *(int8_t*)data; if (dataVersion >= MQ_DATA_RSP_VERSION) { data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); dataLen -= sizeof(int8_t) + sizeof(int32_t); } - tDecoderInit(&decoder, data, dataLen); - code = (type == RES_TYPE__TMQ_METADATA) ? tDecodeSTaosxRsp(&decoder, &rspObj.dataRsp) : tDecodeMqDataRsp(&decoder, &rspObj.dataRsp); + rspObj->resIter = -1; + tDecoderInit(decoder, data, dataLen); + int32_t code = func(decoder, &rspObj->dataRsp); if (code != 0) { SET_ERROR_MSG("decode mq taosx data rsp failed"); - code = TSDB_CODE_INVALID_MSG; - goto end; + } + return code; +} + +static int32_t processCacheMeta(SHashObj *pVgHash, SHashObj *pNameHash, SHashObj *pMetaHash, SVCreateTbReq* pCreateReqDst, + SCatalog* pCatalog, SRequestConnInfo* conn, SName* pName, + STableMeta** pMeta, SSchemaWrapper* pSW, void* rawData, int32_t retry){ + int32_t code = 0; + STableMeta* pTableMeta = NULL; + tbInfo* tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname)); + if (tmpInfo == NULL || retry > 0) { + tbInfo info = {0}; + + RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, conn, pName, &info.vgInfo)); + if (pCreateReqDst && tmpInfo == NULL) { // change stable name to get meta + tstrncpy(pName->tname, pCreateReqDst->ctb.stbName, TSDB_TABLE_NAME_LEN); + } + RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta)); + info.uid = pTableMeta->uid; + if (pTableMeta->tableType == TSDB_CHILD_TABLE){ + info.suid = pTableMeta->suid; + } else { + info.suid = pTableMeta->uid; + } + code = taosHashPut(pMetaHash, &info.suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); + if (code != 0){ + taosMemoryFree(pTableMeta); + goto end; + } + if (pCreateReqDst) { + pTableMeta->vgId = info.vgInfo.vgId; + pTableMeta->uid = pCreateReqDst->uid; + pCreateReqDst->ctb.suid = pTableMeta->suid; + } + + RAW_RETURN_CHECK(taosHashPut(pNameHash, pName->tname, strlen(pName->tname), &info, sizeof(tbInfo))); + tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname)); + RAW_RETURN_CHECK(taosHashPut(pVgHash, &info.vgInfo.vgId, sizeof(info.vgInfo.vgId), &info.vgInfo, sizeof(SVgroupInfo))); } - if (!pRequest->pDb) { - code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; - goto end; + if (pTableMeta == NULL || retry > 0){ + STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, &tmpInfo->suid, LONG_BYTES); + if (pTableMetaTmp == NULL || retry > 0 || needRefreshMeta(rawData, *pTableMetaTmp, pSW)) { + RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta)); + code = taosHashPut(pMetaHash, &tmpInfo->suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); + if (code != 0){ + taosMemoryFree(pTableMeta); + goto end; + } + + }else{ + pTableMeta = *pTableMetaTmp; + pTableMeta->uid = tmpInfo->uid; + pTableMeta->vgId = tmpInfo->vgInfo.vgId; + } } + *pMeta = pTableMeta; - struct SCatalog* pCatalog = NULL; - RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog)); +end: + return code; +} - SRequestConnInfo conn = {0}; - conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter; - conn.requestId = pRequest->requestId; - conn.requestObjRefId = pRequest->self; - conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); +static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen){ + int32_t code = TSDB_CODE_SUCCESS; + SQuery* pQuery = NULL; + SMqRspObj rspObj = {0}; + SDecoder decoder = {0}; - if (type == RES_TYPE__TMQ_METADATA) { - pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - RAW_NULL_CHECK(pCreateTbHash); - RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); - } + SRequestObj* pRequest = NULL; + SCatalog* pCatalog = NULL; + SRequestConnInfo conn = {0}; + + uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); + RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn)); + RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeMqDataRsp, &rspObj)); SHashObj *pVgHash = NULL; SHashObj *pNameHash = NULL; @@ -1903,89 +1941,33 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da int retry = 0; while(1){ RAW_RETURN_CHECK(smlInitHandle(&pQuery)); - uDebug(LOG_ID_TAG " write raw data type:%d block num:%d", LOG_ID_VALUE, type, rspObj.dataRsp.blockNum); + uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum); while (++rspObj.resIter < rspObj.dataRsp.blockNum) { - void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); - RAW_NULL_CHECK(pRetrieve); if (!rspObj.dataRsp.withSchema) { goto end; } const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); RAW_NULL_CHECK(tbName); - - uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName); - SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; - (void)strcpy(pName.dbname, pRequest->pDb); - (void)strcpy(pName.tname, tbName); - - // find schema data info - SVCreateTbReq* pCreateReqDst = NULL; - if (type == RES_TYPE__TMQ_METADATA){ - pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); - } - STableMeta* pTableMeta = NULL; - tbInfo* tmpInfo = (tbInfo*)taosHashGet(pNameHash, tbName, strlen(tbName)); - if (tmpInfo == NULL || retry > 0) { - tbInfo info = {0}; - - RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &info.vgInfo)); - if (pCreateReqDst && tmpInfo == NULL) { // change stable name to get meta - (void)strcpy(pName.tname, pCreateReqDst->ctb.stbName); - } - RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); - info.uid = pTableMeta->uid; - if (pTableMeta->tableType == TSDB_CHILD_TABLE){ - info.suid = pTableMeta->suid; - } else { - info.suid = pTableMeta->uid; - } - code = taosHashPut(pMetaHash, &info.suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); - if (code != 0){ - taosMemoryFree(pTableMeta); - goto end; - } - if (pCreateReqDst) { - pTableMeta->vgId = info.vgInfo.vgId; - pTableMeta->uid = pCreateReqDst->uid; - pCreateReqDst->ctb.suid = pTableMeta->suid; - } - - RAW_RETURN_CHECK(taosHashPut(pNameHash, pName.tname, strlen(pName.tname), &info, sizeof(tbInfo))); - tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName.tname, strlen(pName.tname)); -// code = (code == TSDB_CODE_DUP_KEY) ? 0 : code; -// RAW_RETURN_CHECK(code); - RAW_RETURN_CHECK(taosHashPut(pVgHash, &info.vgInfo.vgId, sizeof(info.vgInfo.vgId), &info.vgInfo, sizeof(SVgroupInfo))); -// code = (code == TSDB_CODE_DUP_KEY) ? 0 : code; -// RAW_RETURN_CHECK(code); - } - SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); RAW_NULL_CHECK(pSW); + void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); + RAW_NULL_CHECK(pRetrieve); void* rawData = getRawDataFromRes(pRetrieve); RAW_NULL_CHECK(rawData); - if (pTableMeta == NULL || retry > 0){ - STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, &tmpInfo->suid, LONG_BYTES); - if (pTableMetaTmp == NULL || retry > 0 || needRefreshMeta(rawData, *pTableMetaTmp, pSW)) { - RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); - code = taosHashPut(pMetaHash, &tmpInfo->suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); - if (code != 0){ - taosMemoryFree(pTableMeta); - goto end; - } - - }else{ - pTableMeta = *pTableMetaTmp; - pTableMeta->uid = tmpInfo->uid; - pTableMeta->vgId = tmpInfo->vgInfo.vgId; - } - } + uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName); + SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; + tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN); + tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN); + STableMeta* pTableMeta = NULL; + processCacheMeta(pVgHash, pNameHash, pMetaHash, NULL, pCatalog, &conn, + &pName, &pTableMeta, pSW, rawData, retry); char err[ERR_MSG_LEN] = {0}; - code = rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); + code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); if (code != TSDB_CODE_SUCCESS) { - SET_ERROR_MSG("table:%s, err:%s", tbName, err); + SET_ERROR_MSG("table:%s, err:%s", pName.tname, err); goto end; } } @@ -1993,11 +1975,8 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da launchQueryImpl(pRequest, pQuery, true, NULL); code = pRequest->code; - if (NEED_CLIENT_HANDLE_ERROR(code)) { + if (NEED_CLIENT_HANDLE_ERROR(code) && retry++ < 3) { uInfo("write raw retry:%d/3 end code:%d, msg:%s", retry, code, tstrerror(code)); - if (retry++ >= 3) { - break; - } qDestroyQuery(pQuery); pQuery = NULL; rspObj.resIter = -1; @@ -2007,12 +1986,89 @@ static int32_t tmqWriteRawImpl(TAOS* taos, uint16_t type, void* data, int32_t da } end: - uDebug(LOG_ID_TAG " write raw metadata return, msg:%s", LOG_ID_VALUE, tstrerror(code)); - if (type == RES_TYPE__TMQ_METADATA){ - tDeleteSTaosxRsp(&rspObj.dataRsp); - }else { - tDeleteMqDataRsp(&rspObj.dataRsp); + uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code)); + tDeleteMqDataRsp(&rspObj.dataRsp); + tDecoderClear(&decoder); + qDestroyQuery(pQuery); + destroyRequest(pRequest); + return code; +} + +static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) { + int32_t code = TSDB_CODE_SUCCESS; + SQuery* pQuery = NULL; + SMqRspObj rspObj = {0}; + SDecoder decoder = {0}; + SHashObj* pCreateTbHash = NULL; + + SRequestObj* pRequest = NULL; + SCatalog* pCatalog = NULL; + SRequestConnInfo conn = {0}; + + uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); + RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn)); + RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeSTaosxRsp, &rspObj)); + + pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + RAW_NULL_CHECK(pCreateTbHash); + RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); + + SHashObj *pVgHash = NULL; + SHashObj *pNameHash = NULL; + SHashObj *pMetaHash = NULL; + RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos)); + int retry = 0; + while(1){ + RAW_RETURN_CHECK(smlInitHandle(&pQuery)); + uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum); + while (++rspObj.resIter < rspObj.dataRsp.blockNum) { + if (!rspObj.dataRsp.withSchema) { + goto end; + } + + const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); + RAW_NULL_CHECK(tbName); + SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); + RAW_NULL_CHECK(pSW); + void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); + RAW_NULL_CHECK(pRetrieve); + void* rawData = getRawDataFromRes(pRetrieve); + RAW_NULL_CHECK(rawData); + + uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName); + SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; + tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN); + tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN); + + // find schema data info + SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, pName.tname, strlen(pName.tname)); + STableMeta* pTableMeta = NULL; + processCacheMeta(pVgHash, pNameHash, pMetaHash, pCreateReqDst, pCatalog, &conn, + &pName, &pTableMeta, pSW, rawData, retry); + char err[ERR_MSG_LEN] = {0}; + code = rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); + if (code != TSDB_CODE_SUCCESS) { + SET_ERROR_MSG("table:%s, err:%s", pName.tname, err); + goto end; + } + } + RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); + launchQueryImpl(pRequest, pQuery, true, NULL); + code = pRequest->code; + + if (NEED_CLIENT_HANDLE_ERROR(code) && retry++ < 3) { + uInfo("write raw retry:%d/3 end code:%d, msg:%s", retry, code, tstrerror(code)); + qDestroyQuery(pQuery); + pQuery = NULL; + rspObj.resIter = -1; + continue; + } + break; } + +end: + uDebug(LOG_ID_TAG " write raw metadata return, msg:%s", LOG_ID_VALUE, tstrerror(code)); + tDeleteSTaosxRsp(&rspObj.dataRsp); void* pIter = taosHashIterate(pCreateTbHash, NULL); while (pIter) { tDestroySVCreateTbReq(pIter, TSDB_MSG_FLG_DECODE); @@ -2243,8 +2299,10 @@ static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) return taosDropTable(taos, buf, len); } else if (type == TDMT_VND_DELETE) { return taosDeleteData(taos, buf, len); - } else if (type == RES_TYPE__TMQ_METADATA || type == RES_TYPE__TMQ) { - return tmqWriteRawImpl(taos, type, buf, len); + } else if (type == RES_TYPE__TMQ_METADATA){ + return tmqWriteRawMetaDataImpl(taos, buf, len); + } else if (type == RES_TYPE__TMQ) { + return tmqWriteRawDataImpl(taos, buf, len); } else if (type == RES_TYPE__TMQ_BATCH_META) { return tmqWriteBatchMetaDataImpl(taos, buf, len); } @@ -2252,7 +2310,8 @@ static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) } int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) { - if (!taos) { + if (taos == NULL || raw.raw == NULL || raw.raw_len <= 0) { + SET_ERROR_MSG("taos:%p or data:%p is NULL or raw_len <= 0", taos, raw.raw); return TSDB_CODE_INVALID_PARA; } From ccfe5246365487f3158a8ced9770bbea25a93b5e Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 18 Oct 2024 18:28:09 +0800 Subject: [PATCH 035/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientRawBlockWrite.c | 35 ++++++++++++++++++++----- 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index eb4eed8b6d..d955fad150 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1724,8 +1724,15 @@ end: return code; } +typedef enum { + WRITE_RAW_INIT_START = 0, + WRITE_RAW_INIT_OK, + WRITE_RAW_INIT_FAIL, +}; + static SHashObj* writeRawCache = NULL; static int8_t initFlag = 0; +static int8_t initedFlag = WRITE_RAW_INIT_START; typedef struct{ SHashObj* pVgHash; @@ -2276,15 +2283,31 @@ void tmq_free_raw(tmq_raw_data raw) { (void)memset(terrMsg, 0, ERR_MSG_LEN); } -static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) { - int8_t old = atomic_val_compare_exchange_8(&initFlag, 0, 1); - if (old == 0) { - int32_t code = initRawCacheHash(); - if (code != 0) { - return code; +static int32_t writeRawInit(){ + while (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_OK) { + int8_t old = atomic_val_compare_exchange_8(&initFlag, 0, 1); + if (old == 0) { + int32_t code = initRawCacheHash(); + if (code != 0) { + uError("tmq writeRawImpl init error:%d", code); + atomic_store_8(&initedFlag, WRITE_RAW_INIT_FAIL); + return code; + } + atomic_store_8(&initedFlag, WRITE_RAW_INIT_OK); } } + if (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_FAIL){ + return TSDB_CODE_INTERNAL_ERROR; + } + return 0; +} + +static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) { + if (writeRawInit() != 0) { + return TSDB_CODE_INTERNAL_ERROR; + } + if (type == TDMT_VND_CREATE_STB) { return taosCreateStb(taos, buf, len); } else if (type == TDMT_VND_ALTER_STB) { From a6f6d41297c1de50effe63a4003a92cd099f2a35 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 21 Oct 2024 09:25:22 +0800 Subject: [PATCH 036/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientRawBlockWrite.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index d955fad150..59dcbdf292 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1728,7 +1728,7 @@ typedef enum { WRITE_RAW_INIT_START = 0, WRITE_RAW_INIT_OK, WRITE_RAW_INIT_FAIL, -}; +} WRITE_RAW_INIT_STATUS; static SHashObj* writeRawCache = NULL; static int8_t initFlag = 0; From c2a5325a6f36fcfd52017145940020f7142f7fda Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Mon, 21 Oct 2024 13:45:11 +0800 Subject: [PATCH 037/142] chore(*): modify taoskeeper build process --- packaging/tools/make_install.sh | 2 +- tools/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index f395909d38..b23c675e10 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -645,7 +645,7 @@ function update_TDengine() { install_config install_taosadapter_config - install_taoskeeper_service + install_taoskeeper_config echo echo -e "\033[44;32;1m${productName} is updated successfully!${NC}" diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 832bd68899..686beb39de 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -252,5 +252,5 @@ IF(TD_BUILD_KEEPER) ) ENDIF() ELSEIF(TD_BUILD_KEEPER_INTERNAL) - MESSAGE("${Yellow} use taoskeeper internal ${ColourReset}") + MESSAGE("${Yellow} taoskeeper community edition does not support ${ColourReset}") ENDIF() From 41a7dce0df12eb7e4daedbd772e58f2e4aa347ce Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Mon, 21 Oct 2024 14:00:46 +0800 Subject: [PATCH 038/142] chore(tools): modify taoskeeper message --- tools/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 686beb39de..e294067ff5 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -252,5 +252,5 @@ IF(TD_BUILD_KEEPER) ) ENDIF() ELSEIF(TD_BUILD_KEEPER_INTERNAL) - MESSAGE("${Yellow} taoskeeper community edition does not support ${ColourReset}") + MESSAGE("${Yellow} taoskeeper does not support internal option ${ColourReset}") ENDIF() From 02c16bddc6dd76f3efc8f0043bae29d720b32725 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 21 Oct 2024 14:40:49 +0800 Subject: [PATCH 039/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientRawBlockWrite.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 59dcbdf292..22de6e4a75 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1969,8 +1969,8 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen){ tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN); STableMeta* pTableMeta = NULL; - processCacheMeta(pVgHash, pNameHash, pMetaHash, NULL, pCatalog, &conn, - &pName, &pTableMeta, pSW, rawData, retry); + RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, NULL, pCatalog, &conn, + &pName, &pTableMeta, pSW, rawData, retry)); char err[ERR_MSG_LEN] = {0}; code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); if (code != TSDB_CODE_SUCCESS) { @@ -2050,8 +2050,8 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) // find schema data info SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, pName.tname, strlen(pName.tname)); STableMeta* pTableMeta = NULL; - processCacheMeta(pVgHash, pNameHash, pMetaHash, pCreateReqDst, pCatalog, &conn, - &pName, &pTableMeta, pSW, rawData, retry); + RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, pCreateReqDst, pCatalog, &conn, + &pName, &pTableMeta, pSW, rawData, retry)); char err[ERR_MSG_LEN] = {0}; code = rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); if (code != TSDB_CODE_SUCCESS) { From 4efae71832e95d1c96110aed371b37b27576d3b6 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Mon, 21 Oct 2024 16:28:06 +0800 Subject: [PATCH 040/142] chore(*): add taoskeeper CI for Windows and Darwin platforms --- packaging/tools/make_install.bat | 20 +++++++++++++- tools/CMakeLists.txt | 45 +++++++++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 2 deletions(-) diff --git a/packaging/tools/make_install.bat b/packaging/tools/make_install.bat index 0b2a55b89c..55b55eacde 100644 --- a/packaging/tools/make_install.bat +++ b/packaging/tools/make_install.bat @@ -13,8 +13,10 @@ if exist C:\\TDengine\\data\\dnode\\dnodeCfg.json ( rem // stop and delete service mshta vbscript:createobject("shell.application").shellexecute("%~s0",":stop_delete","","runas",1)(window.close) echo This might take a few moment to accomplish deleting service taosd/taosadapter ... +echo This might take a few moment to accomplish deleting service taosd/taoskeeper ... call :check_svc taosd call :check_svc taosadapter +call :check_svc taoskeeper set source_dir=%2 set source_dir=%source_dir:/=\\% @@ -46,6 +48,11 @@ if exist %binary_dir%\\test\\cfg\\taosadapter.toml ( copy %binary_dir%\\test\\cfg\\taosadapter.toml %target_dir%\\cfg\\taosadapter.toml > nul ) ) +if exist %binary_dir%\\test\\cfg\\taoskeeper.toml ( + if not exist %target_dir%\\cfg\\taoskeeper.toml ( + copy %binary_dir%\\test\\cfg\\taoskeeper.toml %target_dir%\\cfg\\taoskeeper.toml > nul + ) +) copy %source_dir%\\include\\client\\taos.h %target_dir%\\include > nul copy %source_dir%\\include\\util\\taoserror.h %target_dir%\\include > nul copy %source_dir%\\include\\libs\\function\\taosudf.h %target_dir%\\include > nul @@ -98,12 +105,15 @@ if %Enterprise% == TRUE ( copy %binary_dir%\\build\\bin\\*explorer.exe %target_dir% > nul ) ) - + copy %binary_dir%\\build\\bin\\taosd.exe %target_dir% > nul copy %binary_dir%\\build\\bin\\udfd.exe %target_dir% > nul if exist %binary_dir%\\build\\bin\\taosadapter.exe ( copy %binary_dir%\\build\\bin\\taosadapter.exe %target_dir% > nul ) +if exist %binary_dir%\\build\\bin\\taoskeeper.exe ( + copy %binary_dir%\\build\\bin\\taoskeeper.exe %target_dir% > nul +) mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close) @@ -116,6 +126,10 @@ if exist %binary_dir%\\build\\bin\\taosadapter.exe ( echo To start/stop taosAdapter with administrator privileges: %ESC%[92msc start/stop taosadapter %ESC%[0m ) +if exist %binary_dir%\\build\\bin\\taoskeeper.exe ( + echo To start/stop taosKeeper with administrator privileges: %ESC%[92msc start/stop taoskeeper %ESC%[0m +) + goto :eof :hasAdmin @@ -123,6 +137,7 @@ goto :eof call :stop_delete call :check_svc taosd call :check_svc taosadapter +call :check_svc taoskeeper if exist c:\\windows\\sysnative ( echo x86 @@ -141,6 +156,7 @@ if exist c:\\windows\\sysnative ( rem // create services sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND sc create "taosadapter" binPath= "C:\\TDengine\\taosadapter.exe" start= DEMAND +sc create "taoskeeper" binPath= "C:\\TDengine\\taoskeeper.exe" start= DEMAND set "env=HKLM\System\CurrentControlSet\Control\Session Manager\Environment" for /f "tokens=2*" %%I in ('reg query "%env%" /v Path ^| findstr /i "\"') do ( @@ -181,6 +197,8 @@ sc stop taosd sc delete taosd sc stop taosadapter sc delete taosadapter +sc stop taoskeeper +sc delete taoskeeper exit /B 0 :check_svc diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index e294067ff5..a4867612e4 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -229,7 +229,50 @@ IF(TD_BUILD_KEEPER) MESSAGE("${Green} taoskeeper will use ${taos_version} and commit ${taoskeeper_commit_sha1} as version ${ColourReset}") MESSAGE(" current source dir is ${CMAKE_CURRENT_SOURCE_DIR}") - IF(TD_LINUX) + IF(TD_WINDOWS) + MESSAGE("Building taoskeeper on Windows") + INCLUDE(ExternalProject) + ExternalProject_Add(taoskeeper + PREFIX "taoskeeper" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/keeper + BUILD_ALWAYS off + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND cmake -E echo "taoskeeper no need cmake to config" + PATCH_COMMAND + COMMAND git clean -f -d + BUILD_COMMAND + COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" + INSTALL_COMMAND + COMMAND cmake -E echo "Comparessing taoskeeper.exe" + COMMAND cmake -E time upx taoskeeper.exe + COMMAND cmake -E echo "Copy taoskeeper.exe" + COMMAND cmake -E copy taoskeeper.exe ${CMAKE_BINARY_DIR}/build/bin/taoskeeper.exe + COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E echo "Copy taoskeeper.toml" + COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/ + ) + ELSEIF(TD_DARWIN) + MESSAGE("Building taoskeeper on macOS") + INCLUDE(ExternalProject) + ExternalProject_Add(taoskeeper + PREFIX "taoskeeper" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/keeper + BUILD_ALWAYS off + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND cmake -E echo "taoskeeper no need cmake to config" + PATCH_COMMAND + COMMAND git clean -f -d + BUILD_COMMAND + COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" + INSTALL_COMMAND + COMMAND cmake -E echo "Copy taoskeeper" + COMMAND cmake -E copy taoskeeper ${CMAKE_BINARY_DIR}/build/bin + COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E echo "Copy taoskeeper.toml" + COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E copy ./taoskeeper.service ${CMAKE_BINARY_DIR}/test/cfg/ + ) + ELSE() MESSAGE("Building taoskeeper on Linux") INCLUDE(ExternalProject) ExternalProject_Add(taoskeeper From 4d26469ac51651e8f8abbbd47c326be40439ff07 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 21 Oct 2024 18:44:22 +0800 Subject: [PATCH 041/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientRawBlockWrite.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 22de6e4a75..681d6e5fdb 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1936,9 +1936,8 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen){ SRequestObj* pRequest = NULL; SCatalog* pCatalog = NULL; SRequestConnInfo conn = {0}; - - uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn)); + uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeMqDataRsp, &rspObj)); SHashObj *pVgHash = NULL; @@ -2012,8 +2011,8 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) SCatalog* pCatalog = NULL; SRequestConnInfo conn = {0}; - uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn)); + uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeSTaosxRsp, &rspObj)); pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); @@ -2284,7 +2283,7 @@ void tmq_free_raw(tmq_raw_data raw) { } static int32_t writeRawInit(){ - while (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_OK) { + while (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_START) { int8_t old = atomic_val_compare_exchange_8(&initFlag, 0, 1); if (old == 0) { int32_t code = initRawCacheHash(); From 5ee1c9b70a9cbf99ec71bf2517753d6d8305ef53 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 21 Oct 2024 21:01:33 +0800 Subject: [PATCH 042/142] fix retry error --- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 3090903805..e63e17e584 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -29,9 +29,16 @@ static inline void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) { dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet); if (epSet.numOfEps <= 1) { - pMsg->pCont = NULL; - pMsg->code = TSDB_CODE_MNODE_NOT_FOUND; - return; + if (epSet.numOfEps == 0) { + pMsg->pCont = NULL; + pMsg->code = TSDB_CODE_MNODE_NOT_FOUND; + return; + } + if (strcmp(epSet.eps[0].fqdn, tsLocalFqdn) == 0 && epSet.eps[0].port == tsServerPort) { + pMsg->pCont = NULL; + pMsg->code = TSDB_CODE_MNODE_NOT_FOUND; + return; + } } int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); From d91b771d9c3b22619a0cf52bcb0fff6f395627ad Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 22 Oct 2024 09:13:45 +0800 Subject: [PATCH 043/142] fix retry error --- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index e63e17e584..1340ee9277 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -34,6 +34,8 @@ static inline void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) { pMsg->code = TSDB_CODE_MNODE_NOT_FOUND; return; } + // dnode is not the mnode or mnode leader and This ensures that the function correctly handles cases where the + // dnode cannot obtain a valid epSet and avoids returning an incorrect or misleading epSet. if (strcmp(epSet.eps[0].fqdn, tsLocalFqdn) == 0 && epSet.eps[0].port == tsServerPort) { pMsg->pCont = NULL; pMsg->code = TSDB_CODE_MNODE_NOT_FOUND; From a193644e8593982c76d2fa5203a158029016ec47 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Tue, 22 Oct 2024 11:20:56 +0800 Subject: [PATCH 044/142] chore(*): modify taoskeeper build process --- cmake/cmake.define | 1 + tools/CMakeLists.txt | 31 ++++++++----------------------- 2 files changed, 9 insertions(+), 23 deletions(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index 2aba4785be..a5f636c0fc 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -1,6 +1,7 @@ cmake_minimum_required(VERSION 3.0) set(CMAKE_VERBOSE_MAKEFILE FALSE) set(TD_BUILD_TAOSA_INTERNAL FALSE) +set(TD_BUILD_TAOSA_INTERNAL FALSE) #set output directory SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index a4867612e4..b540200b4d 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -251,29 +251,13 @@ IF(TD_BUILD_KEEPER) COMMAND cmake -E echo "Copy taoskeeper.toml" COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/ ) - ELSEIF(TD_DARWIN) - MESSAGE("Building taoskeeper on macOS") - INCLUDE(ExternalProject) - ExternalProject_Add(taoskeeper - PREFIX "taoskeeper" - SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/keeper - BUILD_ALWAYS off - BUILD_IN_SOURCE 1 - CONFIGURE_COMMAND cmake -E echo "taoskeeper no need cmake to config" - PATCH_COMMAND - COMMAND git clean -f -d - BUILD_COMMAND - COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" - INSTALL_COMMAND - COMMAND cmake -E echo "Copy taoskeeper" - COMMAND cmake -E copy taoskeeper ${CMAKE_BINARY_DIR}/build/bin - COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ - COMMAND cmake -E echo "Copy taoskeeper.toml" - COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/ - COMMAND cmake -E copy ./taoskeeper.service ${CMAKE_BINARY_DIR}/test/cfg/ - ) ELSE() - MESSAGE("Building taoskeeper on Linux") + IF(TD_DARWIN) + MESSAGE("Building taoskeeper on macOS") + ELSE() + MESSAGE("Building taoskeeper on Linux") + ENDIF() + INCLUDE(ExternalProject) ExternalProject_Add(taoskeeper PREFIX "taoskeeper" @@ -291,9 +275,10 @@ IF(TD_BUILD_KEEPER) COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E echo "Copy taoskeeper.toml" COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E echo "Copy taoskeeper.service" COMMAND cmake -E copy ./taoskeeper.service ${CMAKE_BINARY_DIR}/test/cfg/ ) ENDIF() ELSEIF(TD_BUILD_KEEPER_INTERNAL) - MESSAGE("${Yellow} taoskeeper does not support internal option ${ColourReset}") + MESSAGE("${Yellow} use taoskeeper internal ${ColourReset}") ENDIF() From e7a90fe982945d70d5e125ffc78db803601708bb Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Tue, 22 Oct 2024 15:03:40 +0800 Subject: [PATCH 045/142] chore(tools): delete taoskeeper internal message --- tools/CMakeLists.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index b540200b4d..29aacd6bce 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -279,6 +279,4 @@ IF(TD_BUILD_KEEPER) COMMAND cmake -E copy ./taoskeeper.service ${CMAKE_BINARY_DIR}/test/cfg/ ) ENDIF() -ELSEIF(TD_BUILD_KEEPER_INTERNAL) - MESSAGE("${Yellow} use taoskeeper internal ${ColourReset}") ENDIF() From 94a70c9389cd930a3f80dbfe66edbd272a67246c Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 22 Oct 2024 23:24:39 +0800 Subject: [PATCH 046/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientRawBlockWrite.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 681d6e5fdb..2bd815b460 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -2344,9 +2344,9 @@ static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen if (taos == NULL || meta == NULL) { return TSDB_CODE_INVALID_PARA; } - SMqBatchMetaRsp rsp = {0}; + SMqBatchMetaRsp rsp = {0}; SDecoder coder = {0}; - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; // decode and process req tDecoderInit(&coder, meta, metaLen); From 2178dd53ace79b55071dbac8c80fff28c381d2ab Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Wed, 23 Oct 2024 09:04:22 +0800 Subject: [PATCH 047/142] chore(cmake): modify define --- cmake/cmake.define | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index d808f02413..998a969b65 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 3.0) set(CMAKE_VERBOSE_MAKEFILE FALSE) set(TD_BUILD_TAOSA_INTERNAL FALSE) -set(TD_BUILD_TAOSA_INTERNAL FALSE) +set(TD_BUILD_KEEPER_INTERNAL FALSE) #set output directory SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib) From 9a3184578dc0c352aa96bbb22179881a8aeae2dc Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Wed, 23 Oct 2024 09:26:28 +0800 Subject: [PATCH 048/142] docs:Update 07-supported.md --- docs/zh/14-reference/07-supported.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/14-reference/07-supported.md b/docs/zh/14-reference/07-supported.md index 10ca237653..fcb20b8ac1 100644 --- a/docs/zh/14-reference/07-supported.md +++ b/docs/zh/14-reference/07-supported.md @@ -14,7 +14,7 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表" | M1 | | | | | | | | ● | 注:1) ● 表示经过官方测试验证, ○ 表示非官方测试验证,E 表示仅企业版支持。 - 2) 社区版仅支持主流操作系统的较新版本,包括 Ubuntu 18+/CentOS 7+/RedHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS 等。如果有其他操作系统及版本的需求,请联系企业版支持。 + 2) 社区版仅支持主流操作系统的较新版本,包括 Ubuntu 18+/CentOS 7+/CentOS Stream/RedHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS 等。如果有其他操作系统及版本的需求,请联系企业版支持。 ## TDengine 客户端和连接器支持的平台列表 From 14faa7585d42b1c0610b6f7d79854fe721ba0d92 Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Tue, 22 Oct 2024 10:05:44 +0800 Subject: [PATCH 049/142] enh: mndArbGroup replace unsafe func --- source/dnode/mnode/impl/src/mndArbGroup.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndArbGroup.c b/source/dnode/mnode/impl/src/mndArbGroup.c index 97bf661bc3..1dd21900e3 100644 --- a/source/dnode/mnode/impl/src/mndArbGroup.c +++ b/source/dnode/mnode/impl/src/mndArbGroup.c @@ -15,13 +15,10 @@ #define _DEFAULT_SOURCE #include "mndArbGroup.h" -#include "audit.h" #include "mndDb.h" #include "mndDnode.h" -#include "mndPrivilege.h" #include "mndShow.h" #include "mndTrans.h" -#include "mndUser.h" #include "mndVgroup.h" #define ARBGROUP_VER_NUMBER 1 @@ -245,11 +242,11 @@ static int32_t mndArbGroupActionUpdate(SSdb *pSdb, SArbGroup *pOld, SArbGroup *p } for (int i = 0; i < TSDB_ARB_GROUP_MEMBER_NUM; i++) { - (void)memcpy(pOld->members[i].state.token, pNew->members[i].state.token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(pOld->members[i].state.token, pNew->members[i].state.token, TSDB_ARB_TOKEN_SIZE); } pOld->isSync = pNew->isSync; pOld->assignedLeader.dnodeId = pNew->assignedLeader.dnodeId; - (void)memcpy(pOld->assignedLeader.token, pNew->assignedLeader.token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(pOld->assignedLeader.token, pNew->assignedLeader.token, TSDB_ARB_TOKEN_SIZE); pOld->assignedLeader.acked = pNew->assignedLeader.acked; pOld->version++; @@ -834,12 +831,12 @@ static int32_t mndProcessArbUpdateGroupBatchReq(SRpcMsg *pReq) { newGroup.dbUid = pUpdateGroup->dbUid; for (int i = 0; i < TSDB_ARB_GROUP_MEMBER_NUM; i++) { newGroup.members[i].info.dnodeId = pUpdateGroup->members[i].dnodeId; - (void)memcpy(newGroup.members[i].state.token, pUpdateGroup->members[i].token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(newGroup.members[i].state.token, pUpdateGroup->members[i].token, TSDB_ARB_TOKEN_SIZE); } newGroup.isSync = pUpdateGroup->isSync; newGroup.assignedLeader.dnodeId = pUpdateGroup->assignedLeader.dnodeId; - (void)memcpy(newGroup.assignedLeader.token, pUpdateGroup->assignedLeader.token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(newGroup.assignedLeader.token, pUpdateGroup->assignedLeader.token, TSDB_ARB_TOKEN_SIZE); newGroup.assignedLeader.acked = pUpdateGroup->assignedLeader.acked; newGroup.version = pUpdateGroup->version; @@ -897,7 +894,7 @@ static void mndArbGroupSetAssignedLeader(SArbGroup *pGroup, int32_t index) { SArbGroupMember *pMember = &pGroup->members[index]; pGroup->assignedLeader.dnodeId = pMember->info.dnodeId; - (void)strncpy(pGroup->assignedLeader.token, pMember->state.token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(pGroup->assignedLeader.token, pMember->state.token, TSDB_ARB_TOKEN_SIZE); pGroup->assignedLeader.acked = false; } @@ -979,7 +976,7 @@ bool mndUpdateArbGroupByHeartBeat(SArbGroup *pGroup, SVArbHbRspMember *pRspMembe // update token mndArbGroupDupObj(pGroup, pNewGroup); - (void)memcpy(pNewGroup->members[index].state.token, pRspMember->memberToken, TSDB_ARB_TOKEN_SIZE); + tstrncpy(pNewGroup->members[index].state.token, pRspMember->memberToken, TSDB_ARB_TOKEN_SIZE); pNewGroup->isSync = false; bool resetAssigned = false; From e4547fd5a7f225c03c029e3a0be0f6f8b8153e23 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 23 Oct 2024 16:56:11 +0800 Subject: [PATCH 050/142] tetst:add testecase for clear consumer with session and poll timout --- tests/army/frame/common.py | 8 +- tests/army/tmq/drop_lost_comsumers.py | 267 ++++++++++++++++++++++++++ tests/army/tmq/per_consumer.py | 144 ++++++++++++++ tests/parallel_test/cases.task | 2 +- 4 files changed, 417 insertions(+), 4 deletions(-) create mode 100644 tests/army/tmq/drop_lost_comsumers.py create mode 100644 tests/army/tmq/per_consumer.py diff --git a/tests/army/frame/common.py b/tests/army/frame/common.py index b816095817..bad86c828f 100644 --- a/tests/army/frame/common.py +++ b/tests/army/frame/common.py @@ -803,11 +803,13 @@ class TDCom: else: tdLog.exit(f"getOneRow out of range: row_index={location} row_count={self.query_row}") - def killProcessor(self, processorName): + def kill_signal_process(self, signal=15, processor_name: str = "taosd"): if (platform.system().lower() == 'windows'): - os.system("TASKKILL /F /IM %s.exe"%processorName) + os.system(f"TASKKILL /F /IM {processor_name}.exe") else: - os.system("unset LD_PRELOAD; pkill %s " % processorName) + tdLog.debug(f"unset LD_PRELOAD; sudo pkill -f -{signal} '{processor_name}' ") + os.system(f"unset LD_PRELOAD; sudo pkill -f -{signal} '{processor_name}' ") + def gen_tag_col_str(self, gen_type, data_type, count): """ diff --git a/tests/army/tmq/drop_lost_comsumers.py b/tests/army/tmq/drop_lost_comsumers.py new file mode 100644 index 0000000000..b88aae8c03 --- /dev/null +++ b/tests/army/tmq/drop_lost_comsumers.py @@ -0,0 +1,267 @@ + +import taos +import sys +import time +import socket +import os +import threading +import multiprocessing +from multiprocessing import Process, Queue + +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame import * +from taos.tmq import * +from frame import etool +from datetime import datetime +from taos.tmq import Consumer +from frame.common import * + +class TaosConsumer: + def __init__(self): + pass + + def sub_consumer(self ,consumer ,group_id ,topic_name ): + group_id = int(group_id) + if group_id < 100 : + try: + consumer.subscribe([topic_name]) + except TmqError: + tdLog.exit(f"subscribe error") + nrows = 0 + while True: + start = datetime.now() + print(f"time:{start},consumer:{group_id}, start to consume") + message = consumer.poll(timeout=10.0) + + if message: + id = message.offset() + topic = message.topic() + database = message.database() + + for block in message: + addrows = block.nrows() + nrows += block.nrows() + ncols = block.ncols() + values = block.fetchall + end = datetime.now() + elapsed_time = end -start + print(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") + consumer.commit() + print(f"consumer:{group_id},consumer_nrows:{nrows}") + # consumer.unsubscribe() + # consumer.close() + # break + # if nrows >= 1000000: + # break + + def set_conf(self,td_connect_ip="localhost",group_id=1,client_id="test_consumer_py",enable_auto_commit="false",auto_commit_interval_ms="1000",auto_offset_reset="earliest",msg_with_table_name="true",session_timeout_ms=10000,max_poll_interval_ms=180000,experimental_snapshot_enable="false"): + conf = { + # auth options + # consume options + "td.connect.ip": f"{td_connect_ip}", + "group.id": f"{group_id}", + "client.id": f"{client_id}", + "enable.auto.commit": f"{enable_auto_commit}", + "auto.commit.interval.ms": f"{auto_commit_interval_ms}", + "auto.offset.reset": f"{auto_offset_reset}", + "msg.with.table.name": f"{msg_with_table_name}", + "session.timeout.ms": f"{session_timeout_ms}", + "max.poll.interval.ms": f"{max_poll_interval_ms}", + "experimental.snapshot.enable" :f"{experimental_snapshot_enable}", + } + return conf + + def sub_consumer_once(self,consumer, group_id, topic_name, counter, stop_event): + group_id = int(group_id) + if group_id < 100 : + consumer.subscribe([topic_name]) + nrows = 0 + consumer_nrows = 0 + + while not stop_event.is_set(): + start = datetime.now() + tdLog.info(f"time:{start},consumer:{group_id}, start to consume") + #start = datetime.now() + #print(f"time:{start},consumer:{group_id}, start to consume") + tdLog.info(f"consumer_nrows:{consumer_nrows}") + message = consumer.poll(timeout=10.0) + + if message: + id = message.offset() + topic = message.topic() + database = message.database() + for block in message: + addrows = block.nrows() + nrows += block.nrows() + counter.rows(block.nrows()) + ncols = block.ncols() + values = block.fetchall + end = datetime.now() + elapsed_time = end -start + # tdLog.info(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") + consumer.commit() + # tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") + consumer_nrows = nrows + # consumer.unsubscribe() + # consumer.close() + # break + + print("Consumer subscription thread is stopping.") + def taosc_consumer(self, conf, topic_name, counter,stop_event): + try: + print(conf) + from taos.tmq import Consumer + print("3333") + consumer = Consumer(conf) + print("456") + group_id = int(conf["group.id"]) + tdLog.info(f"{consumer},{group_id}") + except Exception as e: + tdLog.exit(f"{e}") + #counsmer sub: + # while True: + # try: + # self.sub_consumer_once(consumer,group_id) + # except Exception as e: + # print(str(e)) + # time.sleep(1) + # break + # only consumer once + try: + self.sub_consumer_once(consumer, group_id, topic_name, counter, stop_event) + + except Exception as e: + tdLog.exit(f"{e}") + + #consumer.close() + + +class ThreadSafeCounter: + def __init__(self): + self.counter = 0 + self.lock = threading.Lock() + + def rows(self, rows): + with self.lock: + self.counter += rows + + def get(self): + with self.lock: + return self.counter + + +class TDTestCase: + # updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.consumer_instance = TaosConsumer() + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + def caseDescription(self): + ''' + drop_lost_consmuers: + 1. verifying that the boundary and valid values of session_timeout_ms are in effect + 2. verifying that the boundary and valid values of max_poll_interval_ms are in effect + 3. verifying that consumer will be closed when the session_timeout_ms and max_poll_interval_ms is expired + ''' + return + + def check_consumer(self,count,rows): + time.sleep(count) + print(count) + try: + for ct in range(5): + tdSql.query(f'show consumers') + anser_rows=tdSql.getRows() + if tdSql.checkRows(rows): + break + else: + time.sleep(1) + tdLog.info(f"wait for {count} seconds to check that consumers number is {rows}") + if anser_rows != rows: + tdLog.exit(f"consumer number is not {rows}") + except Exception as e: + tdLog.exit(f"{e},check consumer error") + + def drop_session_timeout_consmuers(self, consumer_groups_num, session_timeout_ms, max_poll_interval_ms, topic_name, timeout): + tdSql.execute(f'drop topic if exists {topic_name};') + tdSql.execute(f'use db_sub') + tdSql.execute(f'create topic {topic_name} as select * from db_sub.meters;') + + # start consumer and config some parameters + os.system(f"nohup python3 ./tmq/per_consumer.py -c {consumer_groups_num} -s {session_timeout_ms} -p {max_poll_interval_ms} > consumer.log &") + # wait 4s for consuming data + time.sleep(4) + # kill consumer to simulate session_timeout_ms + tdLog.info("kill per_consumer.py") + tdCom.kill_signal_process(signal=9,processor_name="python3\s*./tmq/per_consumer.py") + self.check_consumer(timeout,0) + tdSql.execute(f'drop topic if exists {topic_name};') + os.system("rm -rf consumer.log") + + + def drop_max_poll_timeout_consmuers(self, consumer_groups_num, consumer_rows, topic_name, timeout): + tdSql.execute(f'drop topic if exists {topic_name};') + tdSql.execute(f'use db_sub') + tdSql.execute(f'create topic {topic_name} as select * from db_sub.meters;') + + threads = [] + counter = ThreadSafeCounter() + stop_event = threading.Event() + for id in range(consumer_groups_num): + conf = self.consumer_instance.set_conf(group_id=id, session_timeout_ms=self.session_timeout_ms, max_poll_interval_ms=self.max_poll_interval_ms) + threads.append(threading.Thread(target=self.consumer_instance.taosc_consumer, args=(conf, topic_name, counter, stop_event))) + for tr in threads: + tr.start() + + consumer_all_rows = consumer_rows * consumer_groups_num + while True: + if counter.get() < consumer_all_rows: + time.sleep(5) + print(f"consumer_all_rows:{consumer_all_rows},counter.get():{counter.get()}") + elif counter.get() >= consumer_all_rows: + self.check_consumer(timeout+20, 0) + stop_event.set() + tr.join() + break + time.sleep(2) + tdSql.execute(f'drop topic if exists {topic_name};') + + def case_session_12s(self): + #test session_timeout_ms=12s + session_timeout_ms=12000 + max_poll_interval_ms=180000 + topic_name = "select_d1" + self.drop_session_timeout_consmuers(consumer_groups_num=1, session_timeout_ms=session_timeout_ms, max_poll_interval_ms=max_poll_interval_ms, topic_name=topic_name , timeout=int(session_timeout_ms/1000)) + + + def case_max_poll_12s(self,consumer_rows): + #test max_poll_interval_ms=12s + self.session_timeout_ms=180000 + self.max_poll_interval_ms=12000 + topic_name = "select_d1" + self.drop_max_poll_timeout_consmuers(consumer_groups_num=1, topic_name=topic_name, consumer_rows=consumer_rows, timeout=int(self.max_poll_interval_ms/1000)) + + + def run(self): + table_number = 1000 + rows_per_table = 1000 + vgroups = 4 + etool.benchMark(command=f"-d db_sub -t {table_number} -n {rows_per_table} -v {vgroups} -y") + consumer_rows = table_number * rows_per_table # 消费的目标行数 + # self.case_session_12s() + self.case_max_poll_12s(consumer_rows) + remaining_threads = threading.Lock() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/tmq/per_consumer.py b/tests/army/tmq/per_consumer.py new file mode 100644 index 0000000000..67c82d5d3e --- /dev/null +++ b/tests/army/tmq/per_consumer.py @@ -0,0 +1,144 @@ +import os +import taos +import time +from datetime import datetime +import subprocess +from multiprocessing import Process +import threading +from taos.tmq import Consumer +import click + +try: + conn = taos.connect() +except Exception as e: + print(str(e)) + +@click.command() +@click.option('-c', '--consumer-groups-num', "consumer_group_num", default=1, help='Number of consumer group.') +@click.option('-s', '--session-timeout-ms', "session_timeout_ms", default=60000, help='session timeout:ms') +@click.option('-p', '--max-poll-interval-ms',"max_poll_interval_ms", default=180000, help='max poll interval timeout:ms') + +def test_timeout_sub(consumer_group_num,session_timeout_ms,max_poll_interval_ms): + threads = [] + print(consumer_group_num,session_timeout_ms,max_poll_interval_ms) + for id in range(consumer_group_num): + conf = set_conf(group_id=id,session_timeout_ms=session_timeout_ms,max_poll_interval_ms=max_poll_interval_ms) + print(conf) + threads.append(threading.Thread(target=taosc_consumer, args=(conf,))) + for tr in threads: + tr.start() + for tr in threads: + tr.join() + +def sub_consumer(consumer,group_id): + group_id = int(group_id) + if group_id < 100 : + try: + consumer.subscribe(["select_d1"]) + except Exception as e: + print(f"subscribe error") + exit(1) + + nrows = 0 + while True: + start = datetime.now() + print(f"time:{start},consumer:{group_id}, start to consume") + message = consumer.poll(timeout=10.0) + + if message: + id = message.offset() + topic = message.topic() + database = message.database() + + for block in message: + addrows = block.nrows() + nrows += block.nrows() + ncols = block.ncols() + values = block.fetchall + end = datetime.now() + elapsed_time = end -start + print(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") + consumer.commit() + print(f"consumer:{group_id},consumer_nrows:{nrows}") + # consumer.unsubscribe() + # consumer.close() + # break + # if nrows >= 1000000: + # break +def sub_consumer_once(consumer,group_id): + group_id = int(group_id) + if group_id < 100 : + consumer.subscribe(["select_d1"]) + nrows = 0 + consumer_nrows = 0 + while True: + start = datetime.now() + print(f"time:{start},consumer:{group_id}, start to consume") + #start = datetime.now() + #print(f"time:{start},consumer:{group_id}, start to consume") + print(f"consumer_nrows:{consumer_nrows}") + if consumer_nrows < 1000000: + message = consumer.poll(timeout=10.0) + else: + print(" stop consumer when consumer all rows") + + if message: + id = message.offset() + topic = message.topic() + database = message.database() + + for block in message: + addrows = block.nrows() + nrows += block.nrows() + ncols = block.ncols() + values = block.fetchall + end = datetime.now() + elapsed_time = end -start + # print(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") + consumer.commit() + # print(f"consumer:{group_id},consumer_nrows:{nrows}") + consumer_nrows = nrows + # consumer.unsubscribe() + # consumer.close() + # break + +def set_conf(td_connect_ip="localhost",group_id=1,client_id="test_consumer_py",enable_auto_commit="false",auto_commit_interval_ms="1000",auto_offset_reset="earliest",msg_with_table_name="true",session_timeout_ms=10000,max_poll_interval_ms=20000,experimental_snapshot_enable="false"): + conf = { + # auth options + # consume options + "td.connect.ip": f"{td_connect_ip}", + "group.id": f"{group_id}", + "client.id": f"{client_id}", + "enable.auto.commit": f"{enable_auto_commit}", + "auto.commit.interval.ms": f"{auto_commit_interval_ms}", + "auto.offset.reset": f"{auto_offset_reset}", + "msg.with.table.name": f"{msg_with_table_name}", + "session.timeout.ms": f"{session_timeout_ms}", + "max.poll.interval.ms": f"{max_poll_interval_ms}", + "experimental.snapshot.enable" :f"{experimental_snapshot_enable}", + } + return conf + +def taosc_consumer(conf): + consumer = Consumer(conf) + group_id = int(conf["group.id"]) + print(f"{consumer},{group_id}") + #counsmer sub: + # while True: + # try: + # self.sub_consumer_once(consumer,group_id) + # except Exception as e: + # print(str(e)) + # time.sleep(1) + # break + # only consumer once + try: + sub_consumer_once(consumer,group_id) + except Exception as e: + print(str(e)) + + #consumer.close() + + +if __name__ == '__main__': + test_timeout_sub() \ No newline at end of file diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index cfe88138ef..ef3e9c9c56 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -47,7 +47,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f query/window/base.py ,,y,army,./pytest.sh python3 ./test.py -f query/sys/tb_perf_queries_exist_test.py -N 3 ,,y,army,./pytest.sh python3 ./test.py -f query/test_having.py - +,,n,army,python3 ./test.py -f tmq/drop_lost_comsumers.py # # system test # From ea1da6db9e8fef7edf4ff0c8f87246590a4edf6e Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Thu, 24 Oct 2024 13:35:01 +0800 Subject: [PATCH 051/142] docs:Update 01-taosd.md --- docs/zh/14-reference/01-components/01-taosd.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index fbf086bf6b..b724d46bde 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -33,7 +33,7 @@ taosd 命令行参数如下 | secondEp | taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,缺省值:无 | | fqdn | 启动 taosd 后所监听的服务地址,缺省值:所在服务器上配置的第一个 hostname | | serverPort | 启动 taosd 后所监听的端口,缺省值:6030 | -| numOfRpcSessions | 允许一个客户端能创建的最大连接数,取值范围 100-100000,缺省值:30000 | +| numOfRpcSessions | 允许一个 dnode 能发起的最大连接数,取值范围 100-100000,缺省值:30000 | | timeToGetAvailableConn | 获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,缺省值:500000 | ### 监控相关 @@ -458,4 +458,4 @@ TDengine 的日志文件主要包括普通日志和慢日志两种类型。 3. 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。 4. 慢日志文件不自动删除,不压缩。 5. 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLines,logKeepDays 不适用于慢日志。 - \ No newline at end of file + From 4e016d85fa42442e8162157d9bf963a31ade0dca Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 14:21:10 +0800 Subject: [PATCH 052/142] chore(tools): modify the output of taoskeeper and taosadapter on Windows and macOS --- packaging/tools/make_install.bat | 11 +++++++++-- packaging/tools/make_install.sh | 6 ++++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/packaging/tools/make_install.bat b/packaging/tools/make_install.bat index 55b55eacde..04d342ea06 100644 --- a/packaging/tools/make_install.bat +++ b/packaging/tools/make_install.bat @@ -12,8 +12,15 @@ if exist C:\\TDengine\\data\\dnode\\dnodeCfg.json ( rem // stop and delete service mshta vbscript:createobject("shell.application").shellexecute("%~s0",":stop_delete","","runas",1)(window.close) -echo This might take a few moment to accomplish deleting service taosd/taosadapter ... -echo This might take a few moment to accomplish deleting service taosd/taoskeeper ... + +if exist %binary_dir%\\build\\bin\\taosadapter.exe ( + echo This might take a few moment to accomplish deleting service taosd/taosadapter ... +) + +if exist %binary_dir%\\build\\bin\\taoskeeper.exe ( + echo This might take a few moment to accomplish deleting service taosd/taoskeeper ... +) + call :check_svc taosd call :check_svc taosadapter call :check_svc taoskeeper diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index b23c675e10..ec4332a7a0 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -740,8 +740,10 @@ function install_TDengine() { echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}" else echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}" - echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" - echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" + [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}" fi fi From 6db4797eb8e4c1b747e1751e64b2142464a3c396 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 14:39:57 +0800 Subject: [PATCH 053/142] chore(tools): modify the output of taoskeeper and taosadapter on macOS --- packaging/tools/make_install.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index ec4332a7a0..1b8fa2fb70 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -677,8 +677,10 @@ function update_TDengine() { echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}" else echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}" - echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" - echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" + [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}" fi fi From eab9f1a5d217f3d499c034cf4e5ed8e7526e4f4c Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 15:04:54 +0800 Subject: [PATCH 054/142] chore(.github): add taoskeeper github action workflow --- .github/workflows/taoskeeper-ci.yml | 56 +++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 .github/workflows/taoskeeper-ci.yml diff --git a/.github/workflows/taoskeeper-ci.yml b/.github/workflows/taoskeeper-ci.yml new file mode 100644 index 0000000000..a9f48831c1 --- /dev/null +++ b/.github/workflows/taoskeeper-ci.yml @@ -0,0 +1,56 @@ +name: taoskeeper CI + +on: + push: + paths: + - "tools/keeper/**" + +jobs: + build: + runs-on: ubuntu-latest + name: Run taoskeeper unit tests + steps: + - name: Checkout the repository + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: 1.18 + + - name: Install system dependencies + run: | + sudo apt update -y + sudo apt install -y build-essential cmake libgeos-dev + + - name: Install TDengine + run: | + cd TDengine + mkdir debug + cd debug + cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false -DBUILD_TOOLS=false -DBUILD_TEST=off -DBUILD_KEEPER=true + make -j 4 + sudo make install + which taosd + which taosadapter + which taoskeeper + + - name: Start taosd + run: | + cp /etc/taos/taos.cfg ./ + sudo echo "supportVnodes 256" >> taos.cfg + nohup sudo taosd -c taos.cfg & + + - name: Start taosadapter + run: nohup sudo taosadapter & + + - name: Run tests + run: | + go mod tidy + go test -v ./... + + - name: Clean up + if: always() + run: | + sudo pkill taosd + sudo pkill taosadapter From 5085273360df493fcf2278b9bd10de4917c4aa85 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 15:36:53 +0800 Subject: [PATCH 055/142] chore(.github): modify taoskeeper github action workflow --- .github/workflows/taoskeeper-ci.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/taoskeeper-ci.yml b/.github/workflows/taoskeeper-ci.yml index a9f48831c1..a6e41bc5f0 100644 --- a/.github/workflows/taoskeeper-ci.yml +++ b/.github/workflows/taoskeeper-ci.yml @@ -8,13 +8,14 @@ on: jobs: build: runs-on: ubuntu-latest - name: Run taoskeeper unit tests + name: Run unit tests + steps: - name: Checkout the repository uses: actions/checkout@v4 - - name: Set up Go - uses: actions/setup-go@v4 + - name: Setup Go env + uses: actions/setup-go@v5 with: go-version: 1.18 @@ -25,7 +26,7 @@ jobs: - name: Install TDengine run: | - cd TDengine + # cd TDengine mkdir debug cd debug cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false -DBUILD_TOOLS=false -DBUILD_TEST=off -DBUILD_KEEPER=true From 3a11a2d319e0c4abce3c12388e129c36415fa0bd Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 15:37:56 +0800 Subject: [PATCH 056/142] chore(keeper): delete unused files --- tools/keeper/.github/workflows/build.yaml | 59 ------------------- .../.github/workflows/release-pr-title.yaml | 33 ----------- tools/keeper/.github/workflows/release.yaml | 45 -------------- tools/keeper/ci/changelog-generate.sh | 31 ---------- tools/keeper/ci/post-release.sh | 22 ------- tools/keeper/ci/release.sh | 31 ---------- 6 files changed, 221 deletions(-) delete mode 100644 tools/keeper/.github/workflows/build.yaml delete mode 100644 tools/keeper/.github/workflows/release-pr-title.yaml delete mode 100644 tools/keeper/.github/workflows/release.yaml delete mode 100755 tools/keeper/ci/changelog-generate.sh delete mode 100755 tools/keeper/ci/post-release.sh delete mode 100755 tools/keeper/ci/release.sh diff --git a/tools/keeper/.github/workflows/build.yaml b/tools/keeper/.github/workflows/build.yaml deleted file mode 100644 index 3b0db21ccb..0000000000 --- a/tools/keeper/.github/workflows/build.yaml +++ /dev/null @@ -1,59 +0,0 @@ -name: Go - -on: [push] - -jobs: - build: - runs-on: ubuntu-latest - strategy: - matrix: - go: ["1.18"] - name: Go ${{ matrix.go }} - steps: - - name: Build tools - run: | - sudo apt-get update -y - sudo apt-get install -y build-essential cmake libgeos-dev - - - name: checkout - uses: actions/checkout@v2 - - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: ${{ matrix.go }} - - - name: checkout TDengine - uses: actions/checkout@v3 - with: - repository: "taosdata/TDengine" - path: "TDengine" - ref: "main" - - - name: install TDengine - run: | - cd TDengine - mkdir debug - cd debug - cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false -DBUILD_TOOLS=false -DBUILD_TEST=off - make -j 4 - sudo make install - which taosd - which taosadapter - - - name: start taosd - run: | - cp /etc/taos/taos.cfg ./ - sudo echo "supportVnodes 256" >> taos.cfg - nohup sudo taosd -c taos.cfg & - - - name: start taosadapter - run: nohup sudo taosadapter & - - - name: test - run: go mod tidy && go test -v ./... - - - name: Build - run: | - go mod tidy - go build diff --git a/tools/keeper/.github/workflows/release-pr-title.yaml b/tools/keeper/.github/workflows/release-pr-title.yaml deleted file mode 100644 index b20e1e49d9..0000000000 --- a/tools/keeper/.github/workflows/release-pr-title.yaml +++ /dev/null @@ -1,33 +0,0 @@ -name: "Release PR Check" - -on: - pull_request: - branches: - - develop - - types: - - opened - - edited - - synchronize - - labeled - - unlabeled -jobs: - check: - name: "PR check if release" - if: contains(github.event.pull_request.title, '') - runs-on: ubuntu-latest - steps: - - uses: Slashgear/action-check-pr-title@v3.0.0 - with: - regexp: '.*:?\s*(\d+\.\d+\.\d+)(-\S+)?.*' # Regex the title should match. - - - name: Check version - run: | - version=$(echo "${{ github.event.pull_request.title }}" | grep -o -P ':?\s*(\d+\.\d+\.\d+)(-\S+)?' |sed -E 's/:?\s*//') - echo Seems you want to release $version - if git show-ref --tags $version --quiet; then - echo "bug tag exists" - exit 1 - else - echo "tag is valid" - fi diff --git a/tools/keeper/.github/workflows/release.yaml b/tools/keeper/.github/workflows/release.yaml deleted file mode 100644 index 2a156634d4..0000000000 --- a/tools/keeper/.github/workflows/release.yaml +++ /dev/null @@ -1,45 +0,0 @@ -name: Release - -on: - pull_request: - branches: - - develop - types: - - closed - -jobs: - release: - if: github.event.pull_request.merged == true && contains(github.event.pull_request.title, '') - runs-on: ubuntu-20.04 - - steps: - - name: Checkout - uses: actions/checkout@v2 - with: - ref: develop - fetch-depth: 0 - - - name: Extract version - id: extract-version - run: | - version=$(echo "${{ github.event.pull_request.title }}" | grep -o -P ':?\s*(\d+\.\d+\.\d+)(-\S+)?' |sed -E 's/:?\s*//') - echo $version - echo ::set-output name=version::$version - - - name: Version bump - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - ./ci/release.sh ${{ steps.extract-version.outputs.version }} - - - name: Release - uses: softprops/action-gh-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: v${{ steps.extract-version.outputs.version }} - body_path: CHANGELOG.tmp - - - name: Post-release - run: | - ./ci/post-release.sh ${{ steps.extract-version.outputs.version }} diff --git a/tools/keeper/ci/changelog-generate.sh b/tools/keeper/ci/changelog-generate.sh deleted file mode 100755 index 516ebc5272..0000000000 --- a/tools/keeper/ci/changelog-generate.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -last=$(git describe --tags --abbrev=0 2>/dev/null) - -if [ "$last" = "" ]; then - git log --pretty=format:'%s' | sort -k2n | uniq >./releaseNotes.tmp -else - git log --pretty=format:'%s' $last..HEAD | sort -k2n | uniq >./releaseNotes.tmp -fi - -function part() { - name=$1 - pattern=$2 - changes=$(grep -P '\[\w+-\d+\]\s*<('$pattern')>:' ./releaseNotes.tmp | sed -E 's/ *<('$pattern')>//' | sed 's/[ci skip]\s*//' | awk -F: '{print "- " $1 ": " $2}' | sort | uniq) - lines=$(printf "\\$changes\n" | wc -l) - # echo $name $pattern $lines >&2 - if [ $lines -gt 0 ]; then - echo "### $name" - echo "" - echo "$changes" - echo "" - fi -} - -part "Features" "feature|feat" -part "Bug Fixes" "bugfix|fix" -part "Enhancements" "enhance" -part "Tests" "test" -part "Documents" "docs|doc" - -rm -f ./releaseNotes.tmp diff --git a/tools/keeper/ci/post-release.sh b/tools/keeper/ci/post-release.sh deleted file mode 100755 index 1b5d4201d6..0000000000 --- a/tools/keeper/ci/post-release.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -set -e -ci=$(realpath $(dirname $0)) -v=$1 -if [ "$v" = "" ]; then - echo "$0 " - exit 1 -fi - -newv=$(awk -F. '/[0-9]+\./{$NF+=1;print}' OFS=. <<<"$v") -tee version/version.go <" - exit 1 -fi - -tee version/version.go <CHANGELOG.md2 -printf "## v$newv - $(date +%F)\n\n" >>CHANGELOG.md2 -$ci/changelog-generate.sh >CHANGELOG.tmp -cat CHANGELOG.tmp >>CHANGELOG.md2 -sed "1,7d" CHANGELOG.md >>CHANGELOG.md2 -mv CHANGELOG.md2 CHANGELOG.md - -git config user.name github-actions -git config user.email github-actions@github.com -git add version/version.go CHANGELOG.md -git commit -m "release: v$newv" -git push - -git tag v$newv -git push origin v$newv:$newv --force From 2bb13dd7dd3cf52ac332bcaed6a0d7ca9072b1b5 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 16:31:47 +0800 Subject: [PATCH 057/142] chore(.github): modify taoskeeper github action workflow --- .github/workflows/taoskeeper-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/taoskeeper-ci.yml b/.github/workflows/taoskeeper-ci.yml index a6e41bc5f0..edfcfe7177 100644 --- a/.github/workflows/taoskeeper-ci.yml +++ b/.github/workflows/taoskeeper-ci.yml @@ -14,7 +14,7 @@ jobs: - name: Checkout the repository uses: actions/checkout@v4 - - name: Setup Go env + - name: Set up Go uses: actions/setup-go@v5 with: go-version: 1.18 @@ -49,6 +49,7 @@ jobs: run: | go mod tidy go test -v ./... + working-directory: tools/keeper - name: Clean up if: always() From cfd0497465637a3fb2cded3774cdc4e879bf90b1 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 16:42:57 +0800 Subject: [PATCH 058/142] test(keeper/api): trigger taoskeeper ci --- tools/keeper/api/adapter2_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/keeper/api/adapter2_test.go b/tools/keeper/api/adapter2_test.go index e6fd263c43..5456dfeecf 100644 --- a/tools/keeper/api/adapter2_test.go +++ b/tools/keeper/api/adapter2_test.go @@ -14,6 +14,7 @@ import ( ) func TestAdapter2(t *testing.T) { + // c := &config.Config{ InstanceID: 64, Port: 6043, From 9a9038faa06933f6f9db2db93a66af877df68414 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 18:03:04 +0800 Subject: [PATCH 059/142] chore(.github): modify taoskeeper github action workflow --- .github/workflows/taoskeeper-ci.yml | 43 ++++++++++------------------- 1 file changed, 14 insertions(+), 29 deletions(-) diff --git a/.github/workflows/taoskeeper-ci.yml b/.github/workflows/taoskeeper-ci.yml index edfcfe7177..538d1adbd3 100644 --- a/.github/workflows/taoskeeper-ci.yml +++ b/.github/workflows/taoskeeper-ci.yml @@ -3,7 +3,7 @@ name: taoskeeper CI on: push: paths: - - "tools/keeper/**" + - tools/keeper/** jobs: build: @@ -19,40 +19,25 @@ jobs: with: go-version: 1.18 - - name: Install system dependencies + - name: Start TDengine run: | - sudo apt update -y - sudo apt install -y build-essential cmake libgeos-dev + docker pull tdengine/tdengine:latest + docker run --name tdengine -d -p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine - - name: Install TDengine - run: | - # cd TDengine - mkdir debug - cd debug - cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false -DBUILD_TOOLS=false -DBUILD_TEST=off -DBUILD_KEEPER=true - make -j 4 - sudo make install - which taosd - which taosadapter - which taoskeeper - - - name: Start taosd - run: | - cp /etc/taos/taos.cfg ./ - sudo echo "supportVnodes 256" >> taos.cfg - nohup sudo taosd -c taos.cfg & - - - name: Start taosadapter - run: nohup sudo taosadapter & - - - name: Run tests + - name: Build taoskeeper + working-directory: tools/keeper run: | go mod tidy - go test -v ./... + go build -v ./... + + - name: Run tests with coverage working-directory: tools/keeper + run: | + go test -v -coverpkg=./... -coverprofile=coverage.out ./... + go tool cover -func=coverage.out - name: Clean up if: always() run: | - sudo pkill taosd - sudo pkill taosadapter + docker stop tdengine + docker rm tdengine From 5ec4b9d35abd1fd89c8a1156876dc38740a1d22e Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 18:03:17 +0800 Subject: [PATCH 060/142] test(keeper/api): trigger taoskeeper ci --- tools/keeper/api/adapter2_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/keeper/api/adapter2_test.go b/tools/keeper/api/adapter2_test.go index 5456dfeecf..e6fd263c43 100644 --- a/tools/keeper/api/adapter2_test.go +++ b/tools/keeper/api/adapter2_test.go @@ -14,7 +14,6 @@ import ( ) func TestAdapter2(t *testing.T) { - // c := &config.Config{ InstanceID: 64, Port: 6043, From 534a6ef1c5243ad64e33da4aa19fb620b17e9fa4 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 18:05:30 +0800 Subject: [PATCH 061/142] chore(.github): modify taoskeeper github action workflow --- .github/workflows/taoskeeper-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/taoskeeper-ci.yml b/.github/workflows/taoskeeper-ci.yml index 538d1adbd3..9ed23be1fa 100644 --- a/.github/workflows/taoskeeper-ci.yml +++ b/.github/workflows/taoskeeper-ci.yml @@ -33,7 +33,7 @@ jobs: - name: Run tests with coverage working-directory: tools/keeper run: | - go test -v -coverpkg=./... -coverprofile=coverage.out ./... + sudo go test -v -coverpkg=./... -coverprofile=coverage.out ./... go tool cover -func=coverage.out - name: Clean up From 9aef8f1bdb2a62431bdd3e45585fe26c6a285f81 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 18:05:47 +0800 Subject: [PATCH 062/142] test(keeper/api): trigger taoskeeper ci --- tools/keeper/api/adapter2_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/keeper/api/adapter2_test.go b/tools/keeper/api/adapter2_test.go index e6fd263c43..5456dfeecf 100644 --- a/tools/keeper/api/adapter2_test.go +++ b/tools/keeper/api/adapter2_test.go @@ -14,6 +14,7 @@ import ( ) func TestAdapter2(t *testing.T) { + // c := &config.Config{ InstanceID: 64, Port: 6043, From c040bccf890af208af7a6f767eca7b6680799d30 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 18:07:45 +0800 Subject: [PATCH 063/142] test(keeper/api): trigger taoskeeper ci --- tools/keeper/api/adapter2_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/keeper/api/adapter2_test.go b/tools/keeper/api/adapter2_test.go index 5456dfeecf..e6fd263c43 100644 --- a/tools/keeper/api/adapter2_test.go +++ b/tools/keeper/api/adapter2_test.go @@ -14,7 +14,6 @@ import ( ) func TestAdapter2(t *testing.T) { - // c := &config.Config{ InstanceID: 64, Port: 6043, From 9cb415cb4d46efcae8bfdb4990d59476b4230b30 Mon Sep 17 00:00:00 2001 From: dmchen Date: Thu, 24 Oct 2024 10:08:11 +0000 Subject: [PATCH 064/142] fix/TD-32621-add-log --- source/dnode/mgmt/mgmt_vnode/src/vmFile.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index 5fabd4cdde..215a057618 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -203,6 +203,7 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { SVnodeObj **ppVnodes = NULL; char file[PATH_MAX] = {0}; char realfile[PATH_MAX] = {0}; + int32_t lino = 0; int32_t nBytes = snprintf(file, sizeof(file), "%s%svnodes_tmp.json", pMgmt->path, TD_DIRSEP); if (nBytes <= 0 || nBytes >= sizeof(file)) { @@ -215,8 +216,7 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { } int32_t numOfVnodes = 0; - code = vmGetVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes); - if (code) goto _OVER; + TAOS_CHECK_GOTO(vmGetVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes), &lino, _OVER); // terrno = TSDB_CODE_OUT_OF_MEMORY; pJson = tjsonCreateObject(); @@ -224,36 +224,41 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { code = terrno; goto _OVER; } - if ((code = vmEncodeVnodeList(pJson, ppVnodes, numOfVnodes)) != 0) goto _OVER; + TAOS_CHECK_GOTO(vmEncodeVnodeList(pJson, ppVnodes, numOfVnodes), &lino, _OVER); buffer = tjsonToString(pJson); if (buffer == NULL) { code = TSDB_CODE_INVALID_JSON_FORMAT; + lino = __LINE__; goto _OVER; } pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_WRITE_THROUGH); if (pFile == NULL) { code = terrno; + lino = __LINE__; goto _OVER; } int32_t len = strlen(buffer); if (taosWriteFile(pFile, buffer, len) <= 0) { code = terrno; + lino = __LINE__; goto _OVER; } if (taosFsyncFile(pFile) < 0) { code = TAOS_SYSTEM_ERROR(errno); + lino = __LINE__; goto _OVER; } code = taosCloseFile(&pFile); if (code != 0) { code = TAOS_SYSTEM_ERROR(errno); + lino = __LINE__; goto _OVER; } - TAOS_CHECK_GOTO(taosRenameFile(file, realfile), NULL, _OVER); + TAOS_CHECK_GOTO(taosRenameFile(file, realfile), &lino, _OVER); dInfo("succeed to write vnodes file:%s, vnodes:%d", realfile, numOfVnodes); @@ -272,7 +277,8 @@ _OVER: } if (code != 0) { - dError("failed to write vnodes file:%s since %s, vnodes:%d", realfile, tstrerror(code), numOfVnodes); + dError("failed to write vnodes file:%s at line:%d since %s, vnodes:%d", realfile, lino, tstrerror(code), + numOfVnodes); } return code; } From 4a6e011615d5ff77a128f96aaec03459ee8ae100 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 24 Oct 2024 18:33:37 +0800 Subject: [PATCH 065/142] fix: invalid db options --- source/libs/parser/src/parAstCreater.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index e031ee0fe1..7e6ffd12a2 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1974,14 +1974,13 @@ static SNode* setDatabaseOptionImpl(SAstCreateContext* pCxt, SNode* pOptions, ED case DB_OPTION_S3_COMPACT: pDbOptions->s3Compact = taosStr2Int8(((SToken*)pVal)->z, NULL, 10); break; - case DB_OPTION_KEEP_TIME_OFFSET: { + case DB_OPTION_KEEP_TIME_OFFSET: pDbOptions->keepTimeOffset = taosStr2Int32(((SToken*)pVal)->z, NULL, 10); break; - case DB_OPTION_ENCRYPT_ALGORITHM: - COPY_STRING_FORM_STR_TOKEN(pDbOptions->encryptAlgorithmStr, (SToken*)pVal); - pDbOptions->encryptAlgorithm = TSDB_DEFAULT_ENCRYPT_ALGO; - break; - } + case DB_OPTION_ENCRYPT_ALGORITHM: + COPY_STRING_FORM_STR_TOKEN(pDbOptions->encryptAlgorithmStr, (SToken*)pVal); + pDbOptions->encryptAlgorithm = TSDB_DEFAULT_ENCRYPT_ALGO; + break; default: break; } From c148d0676bd6e9323941892c6278922fc77288ff Mon Sep 17 00:00:00 2001 From: dmchen Date: Thu, 24 Oct 2024 11:07:40 +0000 Subject: [PATCH 066/142] fix/TD-32621-remove-from-hash-when-creating-fail --- source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 7e950ef1be..f55cb648e0 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -436,6 +436,21 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { _OVER: if (code != 0) { + int32_t r = 0; + r = taosThreadRwlockWrlock(&pMgmt->lock); + if (r != 0) { + dError("vgId:%d, failed to lock since %s", req.vgId, tstrerror(r)); + } + if (r == 0) { + r = taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t)); + if (r != 0) { + dError("vgId:%d, failed to remove vnode since %s", req.vgId, tstrerror(r)); + } + } + r = taosThreadRwlockUnlock(&pMgmt->lock); + if (r != 0) { + dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(r)); + } vnodeClose(pImpl); vnodeDestroy(0, path, pMgmt->pTfs, 0); } else { From 8c7c6945f27e77e2f0ec9d4f56daf26c55f98950 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 19:19:02 +0800 Subject: [PATCH 067/142] chore(.github): modify taoskeeper github action workflow --- .github/workflows/taoskeeper-ci.yml | 33 +++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/.github/workflows/taoskeeper-ci.yml b/.github/workflows/taoskeeper-ci.yml index 9ed23be1fa..ea1cfd411f 100644 --- a/.github/workflows/taoskeeper-ci.yml +++ b/.github/workflows/taoskeeper-ci.yml @@ -19,25 +19,40 @@ jobs: with: go-version: 1.18 - - name: Start TDengine + - name: Install system dependencies run: | - docker pull tdengine/tdengine:latest - docker run --name tdengine -d -p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine + sudo apt update -y + sudo apt install -y build-essential cmake libgeos-dev - - name: Build taoskeeper - working-directory: tools/keeper + - name: Install TDengine run: | - go mod tidy - go build -v ./... + mkdir debug + cd debug + cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false -DBUILD_TOOLS=false -DBUILD_TEST=off -DBUILD_KEEPER=true + make -j 4 + sudo make install + which taosd + which taosadapter + which taoskeeper + + - name: Start taosd + run: | + cp /etc/taos/taos.cfg ./ + sudo echo "supportVnodes 256" >> taos.cfg + nohup sudo taosd -c taos.cfg & + + - name: Start taosadapter + run: nohup sudo taosadapter & - name: Run tests with coverage working-directory: tools/keeper run: | + go mod tidy sudo go test -v -coverpkg=./... -coverprofile=coverage.out ./... go tool cover -func=coverage.out - name: Clean up if: always() run: | - docker stop tdengine - docker rm tdengine + sudo pkill taosd + sudo pkill taosadapter From 5fe1217c090ffed645fa4f6674bbb224fcdc9400 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 19:19:27 +0800 Subject: [PATCH 068/142] test(keeper/api): trigger taoskeeper ci --- tools/keeper/api/adapter2_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/keeper/api/adapter2_test.go b/tools/keeper/api/adapter2_test.go index e6fd263c43..5456dfeecf 100644 --- a/tools/keeper/api/adapter2_test.go +++ b/tools/keeper/api/adapter2_test.go @@ -14,6 +14,7 @@ import ( ) func TestAdapter2(t *testing.T) { + // c := &config.Config{ InstanceID: 64, Port: 6043, From 5c1d2e429c768e1fc89e1d09c80b71ce5d75ba4b Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 19:37:41 +0800 Subject: [PATCH 069/142] chore(.github): modify taoskeeper github action workflow --- .github/workflows/taoskeeper-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/taoskeeper-ci.yml b/.github/workflows/taoskeeper-ci.yml index ea1cfd411f..9d177f641c 100644 --- a/.github/workflows/taoskeeper-ci.yml +++ b/.github/workflows/taoskeeper-ci.yml @@ -48,7 +48,7 @@ jobs: working-directory: tools/keeper run: | go mod tidy - sudo go test -v -coverpkg=./... -coverprofile=coverage.out ./... + go test -v -coverpkg=./... -coverprofile=coverage.out ./... go tool cover -func=coverage.out - name: Clean up From 1de02f9fe2a7bd0f91cabc00cf83c35df49cc051 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 19:37:55 +0800 Subject: [PATCH 070/142] test(keeper/api): trigger taoskeeper ci --- tools/keeper/api/adapter2_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/keeper/api/adapter2_test.go b/tools/keeper/api/adapter2_test.go index 5456dfeecf..e6fd263c43 100644 --- a/tools/keeper/api/adapter2_test.go +++ b/tools/keeper/api/adapter2_test.go @@ -14,7 +14,6 @@ import ( ) func TestAdapter2(t *testing.T) { - // c := &config.Config{ InstanceID: 64, Port: 6043, From b7c793050973413640aa366b0ab29071c3afd098 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 20:00:13 +0800 Subject: [PATCH 071/142] chore(.github): modify taoskeeper github action workflow --- .github/workflows/taoskeeper-ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/taoskeeper-ci.yml b/.github/workflows/taoskeeper-ci.yml index 9d177f641c..87b893666c 100644 --- a/.github/workflows/taoskeeper-ci.yml +++ b/.github/workflows/taoskeeper-ci.yml @@ -54,5 +54,5 @@ jobs: - name: Clean up if: always() run: | - sudo pkill taosd - sudo pkill taosadapter + if pgrep taosd; then sudo pkill taosd; fi + if pgrep taosadapter; then sudo pkill taosadapter; fi From a57ef351375c5c133e7bf4f8ad9b97ef60fc9ee9 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 20:00:29 +0800 Subject: [PATCH 072/142] test(keeper/api): trigger taoskeeper ci --- tools/keeper/api/adapter2_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/keeper/api/adapter2_test.go b/tools/keeper/api/adapter2_test.go index e6fd263c43..5456dfeecf 100644 --- a/tools/keeper/api/adapter2_test.go +++ b/tools/keeper/api/adapter2_test.go @@ -14,6 +14,7 @@ import ( ) func TestAdapter2(t *testing.T) { + // c := &config.Config{ InstanceID: 64, Port: 6043, From 08ec37e2cc2cd7514245ccc2be5fda4759e11d35 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Thu, 24 Oct 2024 20:00:57 +0800 Subject: [PATCH 073/142] test(keeper/api): trigger taoskeeper ci --- tools/keeper/api/adapter2_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/keeper/api/adapter2_test.go b/tools/keeper/api/adapter2_test.go index 5456dfeecf..e6fd263c43 100644 --- a/tools/keeper/api/adapter2_test.go +++ b/tools/keeper/api/adapter2_test.go @@ -14,7 +14,6 @@ import ( ) func TestAdapter2(t *testing.T) { - // c := &config.Config{ InstanceID: 64, Port: 6043, From 9f69124708398a0231f1ef2914c03c4cb5195e26 Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 25 Oct 2024 02:53:48 +0000 Subject: [PATCH 074/142] fix/TD-32622-add-closed-hash --- source/dnode/mgmt/mgmt_vnode/inc/vmInt.h | 2 + source/dnode/mgmt/mgmt_vnode/src/vmFile.c | 50 ++++++++++++++++++- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 58 ++++++++++++++++++++++- source/dnode/mnode/sdb/src/sdbFile.c | 11 +++-- 4 files changed, 113 insertions(+), 8 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h index 0e1a4bc98e..5bf151fced 100644 --- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h +++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h @@ -36,6 +36,7 @@ typedef struct SVnodeMgmt { SSingleWorker mgmtWorker; SSingleWorker mgmtMultiWorker; SHashObj *hash; + SHashObj *closedHash; TdThreadRwlock lock; SVnodesStat state; STfs *pTfs; @@ -111,6 +112,7 @@ int32_t vmProcessArbHeartBeatReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes); int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt); int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes); +int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes); // vmWorker.c int32_t vmStartWorker(SVnodeMgmt *pMgmt); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index 215a057618..c3f103d45c 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -19,6 +19,54 @@ #define MAX_CONTENT_LEN 2 * 1024 * 1024 +int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) { + (void)taosThreadRwlockRdlock(&pMgmt->lock); + + int32_t num = 0; + int32_t size = taosHashGetSize(pMgmt->hash); + int32_t closedSize = taosHashGetSize(pMgmt->closedHash); + size += closedSize; + SVnodeObj **pVnodes = taosMemoryCalloc(size, sizeof(SVnodeObj *)); + if (pVnodes == NULL) { + (void)taosThreadRwlockUnlock(&pMgmt->lock); + return terrno; + } + + void *pIter = taosHashIterate(pMgmt->hash, NULL); + while (pIter) { + SVnodeObj **ppVnode = pIter; + SVnodeObj *pVnode = *ppVnode; + if (pVnode && num < size) { + int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); + // dTrace("vgId:%d, acquire vnode list, ref:%d", pVnode->vgId, refCount); + pVnodes[num++] = (*ppVnode); + pIter = taosHashIterate(pMgmt->hash, pIter); + } else { + taosHashCancelIterate(pMgmt->hash, pIter); + } + } + + pIter = taosHashIterate(pMgmt->closedHash, NULL); + while (pIter) { + SVnodeObj **ppVnode = pIter; + SVnodeObj *pVnode = *ppVnode; + if (pVnode && num < size) { + int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); + // dTrace("vgId:%d, acquire vnode list, ref:%d", pVnode->vgId, refCount); + pVnodes[num++] = (*ppVnode); + pIter = taosHashIterate(pMgmt->closedHash, pIter); + } else { + taosHashCancelIterate(pMgmt->closedHash, pIter); + } + } + + (void)taosThreadRwlockUnlock(&pMgmt->lock); + *numOfVnodes = num; + *ppVnodes = pVnodes; + + return 0; +} + int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) { (void)taosThreadRwlockRdlock(&pMgmt->lock); @@ -216,7 +264,7 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { } int32_t numOfVnodes = 0; - TAOS_CHECK_GOTO(vmGetVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes), &lino, _OVER); + TAOS_CHECK_GOTO(vmGetAllVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes), &lino, _OVER); // terrno = TSDB_CODE_OUT_OF_MEMORY; pJson = tjsonCreateObject(); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 20618dbdf3..55d42646d4 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -166,10 +166,27 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { (void)taosThreadRwlockWrlock(&pMgmt->lock); SVnodeObj *pOld = NULL; int32_t r = taosHashGetDup(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld); + if (r != 0) { + dError("vgId:%d, failed to get vnode from hash", pVnode->vgId); + } if (pOld) { vmFreeVnodeObj(&pOld); } int32_t code = taosHashPut(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), &pVnode, sizeof(SVnodeObj *)); + + pOld = NULL; + r = taosHashGetDup(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld); + if (r != 0) { + dError("vgId:%d, failed to get vnode from closedHash", pVnode->vgId); + } + if (pOld) { + vmFreeVnodeObj(&pOld); + } + + r = taosHashRemove(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t)); + if (r != 0) { + dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId); + } (void)taosThreadRwlockUnlock(&pMgmt->lock); return code; @@ -185,7 +202,33 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) (void)taosThreadRwlockWrlock(&pMgmt->lock); int32_t r = taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t)); + if (r != 0) { + dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId); + } + + SVnodeObj *pClosedVnode = taosMemoryCalloc(1, sizeof(SVnodeObj)); + if (pVnode == NULL) { + dError("vgId:%d, failed to alloc vnode since %s", pVnode->vgId, terrstr()); + (void)taosThreadRwlockUnlock(&pMgmt->lock); + return; + } + + *pClosedVnode = *pVnode; + + SVnodeObj *pOld = NULL; + r = taosHashGetDup(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld); + if (r != 0) { + dError("vgId:%d, failed to get vnode from closedHash", pVnode->vgId); + } + if (pOld) { + vmFreeVnodeObj(&pOld); + } + r = taosHashPut(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), &pClosedVnode, sizeof(SVnodeObj *)); + if (r != 0) { + dError("vgId:%d, failed to put vnode to closedHash", pVnode->vgId); + } (void)taosThreadRwlockUnlock(&pMgmt->lock); + vmReleaseVnode(pMgmt, pVnode); if (pVnode->failed) { @@ -362,9 +405,15 @@ static void *vmOpenVnodeInThread(void *param) { static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) { pMgmt->hash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); if (pMgmt->hash == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; dError("failed to init vnode hash since %s", terrstr()); - return -1; + return TSDB_CODE_OUT_OF_MEMORY; + } + + pMgmt->closedHash = + taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); + if (pMgmt->hash == NULL) { + dError("failed to init vnode closed hash since %s", terrstr()); + return TSDB_CODE_OUT_OF_MEMORY; } SWrapperCfg *pCfgs = NULL; @@ -537,6 +586,11 @@ static void vmCloseVnodes(SVnodeMgmt *pMgmt) { pMgmt->hash = NULL; } + if (pMgmt->closedHash != NULL) { + taosHashCleanup(pMgmt->closedHash); + pMgmt->closedHash = NULL; + } + dInfo("total vnodes:%d are all closed", numOfVnodes); } diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c index 227ff15da9..474b22cca0 100644 --- a/source/dnode/mnode/sdb/src/sdbFile.c +++ b/source/dnode/mnode/sdb/src/sdbFile.c @@ -400,8 +400,8 @@ static int32_t sdbReadFileImp(SSdb *pSdb) { pSdb->commitTerm = pSdb->applyTerm; pSdb->commitConfig = pSdb->applyConfig; memcpy(pSdb->tableVer, tableVer, sizeof(tableVer)); - mInfo("read sdb file:%s success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file, pSdb->commitIndex, - pSdb->commitTerm, pSdb->commitConfig); + mInfo("vgId:1, trans:0, read sdb file:%s success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file, + pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig); _OVER: if ((ret = taosCloseFile(&pFile)) != 0) { @@ -573,7 +573,8 @@ static int32_t sdbWriteFileImp(SSdb *pSdb, int32_t skip_type) { pSdb->commitIndex = pSdb->applyIndex; pSdb->commitTerm = pSdb->applyTerm; pSdb->commitConfig = pSdb->applyConfig; - mInfo("write sdb file success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s", + mInfo("vgId:1, trans:0, write sdb file success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 + " file:%s", pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig, curfile); } @@ -610,8 +611,8 @@ int32_t sdbWriteFile(SSdb *pSdb, int32_t delta) { if (code != 0) { mError("failed to write sdb file since %s", tstrerror(code)); } else { - mInfo("write sdb file success, apply index:%" PRId64 " term:%" PRId64 " config:%" PRId64, pSdb->applyIndex, - pSdb->applyTerm, pSdb->applyConfig); + mInfo("vgId:1, trans:0, write sdb file success, apply index:%" PRId64 ", term:%" PRId64 ", config:%" PRId64, + pSdb->applyIndex, pSdb->applyTerm, pSdb->applyConfig); } (void)taosThreadMutexUnlock(&pSdb->filelock); return code; From 8c826bfc095797d0580ebc710e25be8a6021a0b1 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Fri, 25 Oct 2024 10:58:57 +0800 Subject: [PATCH 075/142] style(cmake): format code --- cmake/cmake.define | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index 4f12b15af0..3fa99a7c93 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -58,18 +58,18 @@ IF(TD_BUILD_HTTP) ADD_DEFINITIONS(-DHTTP_EMBEDDED) ENDIF() -IF ("${BUILD_KEEPER}" STREQUAL "") - SET(TD_BUILD_KEEPER FALSE) -ELSEIF (${BUILD_KEEPER} MATCHES "false") - SET(TD_BUILD_KEEPER FALSE) -ELSEIF (${BUILD_KEEPER} MATCHES "true") - SET(TD_BUILD_KEEPER TRUE) -ELSEIF (${BUILD_KEEPER} MATCHES "internal") - SET(TD_BUILD_KEEPER FALSE) - SET(TD_BUILD_KEEPER_INTERNAL TRUE) -ELSE () - SET(TD_BUILD_KEEPER FALSE) -ENDIF () +IF("${BUILD_KEEPER}" STREQUAL "") + SET(TD_BUILD_KEEPER FALSE) +ELSEIF(${BUILD_KEEPER} MATCHES "false") + SET(TD_BUILD_KEEPER FALSE) +ELSEIF(${BUILD_KEEPER} MATCHES "true") + SET(TD_BUILD_KEEPER TRUE) +ELSEIF(${BUILD_KEEPER} MATCHES "internal") + SET(TD_BUILD_KEEPER FALSE) + SET(TD_BUILD_KEEPER_INTERNAL TRUE) +ELSE() + SET(TD_BUILD_KEEPER FALSE) +ENDIF() IF("${BUILD_TOOLS}" STREQUAL "") IF(TD_LINUX) From 02295c407141183e91035738067077a441c20a15 Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Fri, 25 Oct 2024 10:59:25 +0800 Subject: [PATCH 076/142] docs:Update description of "numOfCommitThread" 01-taosd.md --- docs/zh/14-reference/01-components/01-taosd.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index fbf086bf6b..fdc7e24163 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -163,7 +163,7 @@ charset 的有效值是 UTF-8。 | 参数名称 | 参数说明 | | :----------------: | :---------------------------------------------: | -| numOfCommitThreads | 写入线程的最大数量,取值范围 0-1024,缺省值为 4 | +| numOfCommitThreads | 落盘线程的最大数量,取值范围 0-1024,缺省值为 4 | ### 日志相关 @@ -458,4 +458,4 @@ TDengine 的日志文件主要包括普通日志和慢日志两种类型。 3. 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。 4. 慢日志文件不自动删除,不压缩。 5. 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLines,logKeepDays 不适用于慢日志。 - \ No newline at end of file + From 9d76aa91bc74faab85f827c8b5e5fa143497b0d9 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 25 Oct 2024 10:59:51 +0800 Subject: [PATCH 077/142] change transport log level --- source/libs/transport/inc/transComm.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index 3a4f11ac81..d835d12c79 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -278,19 +278,19 @@ bool transAsyncPoolIsEmpty(SAsyncPool* pool); } \ } while (0) -#define ASYNC_CHECK_HANDLE(idMgt, id, exh1) \ - do { \ - if (id > 0) { \ - SExHandle* exh2 = transAcquireExHandle(idMgt, id); \ - if (exh2 == NULL || exh1 != exh2 || (exh2 != NULL && exh2->refId != id)) { \ - tError("handle not match, exh1:%p, exh2:%p, refId:%"PRId64"", exh1, exh2, id); \ - code = TSDB_CODE_INVALID_MSG; \ - goto _return1; \ - } \ - } else { \ - tError("invalid handle to release"); \ - goto _return2; \ - } \ +#define ASYNC_CHECK_HANDLE(idMgt, id, exh1) \ + do { \ + if (id > 0) { \ + SExHandle* exh2 = transAcquireExHandle(idMgt, id); \ + if (exh2 == NULL || exh1 != exh2 || (exh2 != NULL && exh2->refId != id)) { \ + tDebug("handle not match, exh1:%p, exh2:%p, refId:%" PRId64 "", exh1, exh2, id); \ + code = TSDB_CODE_INVALID_MSG; \ + goto _return1; \ + } \ + } else { \ + tDebug("invalid handle to release"); \ + goto _return2; \ + } \ } while (0) int32_t transInitBuffer(SConnBuffer* buf); From 9f111b60ab37f7bee8d3599719db6b3efc815bdb Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 25 Oct 2024 03:50:16 +0000 Subject: [PATCH 078/142] fix/TD-32622-add-closed-hash-fix-case --- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 55d42646d4..0f27e04303 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -586,6 +586,13 @@ static void vmCloseVnodes(SVnodeMgmt *pMgmt) { pMgmt->hash = NULL; } + void *pIter = taosHashIterate(pMgmt->closedHash, NULL); + while (pIter) { + SVnodeObj **ppVnode = pIter; + vmFreeVnodeObj(ppVnode); + pIter = taosHashIterate(pMgmt->closedHash, pIter); + } + if (pMgmt->closedHash != NULL) { taosHashCleanup(pMgmt->closedHash); pMgmt->closedHash = NULL; From 7a6c21814ebce207812d38c2cfb58687cdc8e66a Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 25 Oct 2024 04:50:16 +0000 Subject: [PATCH 079/142] fix/TD-32622-add-closed-hash-fix-case --- source/dnode/mgmt/mgmt_vnode/inc/vmInt.h | 2 +- source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 8 ++-- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 50 ++++++++++++--------- 3 files changed, 34 insertions(+), 26 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h index 5bf151fced..1c08442bef 100644 --- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h +++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h @@ -95,7 +95,7 @@ SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId); SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict); void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode); int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl); -void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal); +void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, bool keepClosed); // vmHandle.c SArray *vmGetMsgHandles(); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 7e950ef1be..04d64a7b33 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -535,7 +535,7 @@ int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path)); bool commitAndRemoveWal = vnodeShouldRemoveWal(pVnode->pImpl); - vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal); + vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal, true); int32_t diskPrimary = wrapperCfg.diskPrimary; char path[TSDB_FILENAME_LEN] = {0}; @@ -683,7 +683,7 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { } dInfo("vgId:%d, close vnode", srcVgId); - vmCloseVnode(pMgmt, pVnode, true); + vmCloseVnode(pMgmt, pVnode, true, true); int32_t diskPrimary = wrapperCfg.diskPrimary; char srcPath[TSDB_FILENAME_LEN] = {0}; @@ -792,7 +792,7 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path)); bool commitAndRemoveWal = vnodeShouldRemoveWal(pVnode->pImpl); - vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal); + vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal, true); int32_t diskPrimary = wrapperCfg.diskPrimary; char path[TSDB_FILENAME_LEN] = {0}; @@ -860,7 +860,7 @@ int32_t vmProcessDropVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return code; } - vmCloseVnode(pMgmt, pVnode, false); + vmCloseVnode(pMgmt, pVnode, false, false); if (vmWriteVnodeListToFile(pMgmt) != 0) { dError("vgId:%d, failed to write vnode list since %s", vgId, terrstr()); } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 0f27e04303..20385d4e6b 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -183,6 +183,7 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { vmFreeVnodeObj(&pOld); } + dInfo("vgId:%d, remove from closedHash", pVnode->vgId); r = taosHashRemove(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t)); if (r != 0) { dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId); @@ -192,7 +193,7 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { return code; } -void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) { +void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, bool keepClosed) { char path[TSDB_FILENAME_LEN] = {0}; bool atExit = true; @@ -205,27 +206,34 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) if (r != 0) { dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId); } + if (keepClosed) { + SVnodeObj *pClosedVnode = taosMemoryCalloc(1, sizeof(SVnodeObj)); + (void)memset(pClosedVnode, 0, sizeof(SVnodeObj)); + if (pVnode == NULL) { + dError("vgId:%d, failed to alloc vnode since %s", pVnode->vgId, terrstr()); + (void)taosThreadRwlockUnlock(&pMgmt->lock); + return; + } - SVnodeObj *pClosedVnode = taosMemoryCalloc(1, sizeof(SVnodeObj)); - if (pVnode == NULL) { - dError("vgId:%d, failed to alloc vnode since %s", pVnode->vgId, terrstr()); - (void)taosThreadRwlockUnlock(&pMgmt->lock); - return; - } + pClosedVnode->vgId = pVnode->vgId; + pClosedVnode->dropped = pVnode->dropped; + pClosedVnode->vgVersion = pVnode->vgVersion; + pClosedVnode->diskPrimary = pVnode->diskPrimary; + pClosedVnode->toVgId = pVnode->toVgId; - *pClosedVnode = *pVnode; - - SVnodeObj *pOld = NULL; - r = taosHashGetDup(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld); - if (r != 0) { - dError("vgId:%d, failed to get vnode from closedHash", pVnode->vgId); - } - if (pOld) { - vmFreeVnodeObj(&pOld); - } - r = taosHashPut(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), &pClosedVnode, sizeof(SVnodeObj *)); - if (r != 0) { - dError("vgId:%d, failed to put vnode to closedHash", pVnode->vgId); + SVnodeObj *pOld = NULL; + r = taosHashGetDup(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld); + if (r != 0) { + dError("vgId:%d, failed to get vnode from closedHash", pVnode->vgId); + } + if (pOld) { + vmFreeVnodeObj(&pOld); + } + dInfo("vgId:%d, put vnode to closedHash", pVnode->vgId); + r = taosHashPut(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), &pClosedVnode, sizeof(SVnodeObj *)); + if (r != 0) { + dError("vgId:%d, failed to put vnode to closedHash", pVnode->vgId); + } } (void)taosThreadRwlockUnlock(&pMgmt->lock); @@ -508,7 +516,7 @@ static void *vmCloseVnodeInThread(void *param) { pMgmt->state.openVnodes, pMgmt->state.totalVnodes); tmsgReportStartup("vnode-close", stepDesc); - vmCloseVnode(pMgmt, pVnode, false); + vmCloseVnode(pMgmt, pVnode, false, false); } dInfo("thread:%d, numOfVnodes:%d is closed", pThread->threadIndex, pThread->vnodeNum); From 6b2d338a20df26baed502f79f7b127d1a60dfc78 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Fri, 25 Oct 2024 13:47:09 +0800 Subject: [PATCH 080/142] enh(executor):avoid use dangerous functions --- include/libs/executor/executor.h | 5 +- .../libs/executor/src/anomalywindowoperator.c | 2 +- source/libs/executor/src/exchangeoperator.c | 2 +- source/libs/executor/src/executor.c | 9 +-- source/libs/executor/src/scanoperator.c | 2 +- source/libs/executor/src/sysscanoperator.c | 67 +++++++++++-------- source/libs/executor/test/executorTests.cpp | 4 +- source/libs/executor/test/lhashTests.cpp | 2 +- source/libs/qworker/src/qwUtil.c | 2 +- source/libs/stream/src/streamState.c | 2 +- source/libs/stream/src/tstreamFileState.c | 8 +-- 11 files changed, 60 insertions(+), 45 deletions(-) diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index ae26d5f2ae..fa17abdebc 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -151,8 +151,9 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, * @param tversion * @return */ -int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, - int32_t* tversion, int32_t idx, bool* tbGet); +int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, int32_t dbNameBuffLen, char* tableName, + int32_t tbaleNameBuffLen, int32_t* sversion, int32_t* tversion, int32_t idx, + bool* tbGet); /** * The main task execution function, including query on both table and multiple tables, diff --git a/source/libs/executor/src/anomalywindowoperator.c b/source/libs/executor/src/anomalywindowoperator.c index 7f3430b837..d03e527c2b 100644 --- a/source/libs/executor/src/anomalywindowoperator.c +++ b/source/libs/executor/src/anomalywindowoperator.c @@ -86,7 +86,7 @@ int32_t createAnomalywindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* p pOperator->exprSupp.hasWindowOrGroup = true; pInfo->tsSlotId = ((SColumnNode*)pAnomalyNode->window.pTspk)->slotId; - strncpy(pInfo->anomalyOpt, pAnomalyNode->anomalyOpt, sizeof(pInfo->anomalyOpt)); + tstrncpy(pInfo->anomalyOpt, pAnomalyNode->anomalyOpt, sizeof(pInfo->anomalyOpt)); if (pAnomalyNode->window.pExprs != NULL) { int32_t numOfScalarExpr = 0; diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index 60442c34ee..042fcf0120 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -320,7 +320,7 @@ static int32_t initDataSource(int32_t numOfSources, SExchangeInfo* pInfo, const if (!pInfo->pTaskId) { return terrno; } - strncpy(pInfo->pTaskId, id, len); + tstrncpy(pInfo->pTaskId, id, len); for (int32_t i = 0; i < numOfSources; ++i) { SSourceDataInfo dataInfo = {0}; dataInfo.status = EX_SOURCE_DATA_NOT_READY; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 27dd687f40..019b4faed9 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -545,8 +545,9 @@ int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableI return code; } -int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, - int32_t* tversion, int32_t idx, bool* tbGet) { +int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, int32_t dbNameBuffLen, char* tableName, + int32_t tbaleNameBuffLen, int32_t* sversion, int32_t* tversion, int32_t idx, + bool* tbGet) { *tbGet = false; if (tinfo == NULL || dbName == NULL || tableName == NULL) { @@ -567,12 +568,12 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table *sversion = pSchemaInfo->sw->version; *tversion = pSchemaInfo->tversion; if (pSchemaInfo->dbname) { - strcpy(dbName, pSchemaInfo->dbname); + tstrncpy(dbName, pSchemaInfo->dbname, dbNameBuffLen); } else { dbName[0] = 0; } if (pSchemaInfo->tablename) { - strcpy(tableName, pSchemaInfo->tablename); + tstrncpy(tableName, pSchemaInfo->tablename, tbaleNameBuffLen); } else { tableName[0] = 0; } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index bae9926f63..95846087d0 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -6345,7 +6345,7 @@ int32_t fillTableCountScanDataBlock(STableCountScanSupp* pSupp, char* dbName, ch QUERY_CHECK_NULL(colInfoData, code, lino, _end, terrno); if (strlen(stbName) != 0) { char varStbName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - strncpy(varDataVal(varStbName), stbName, TSDB_TABLE_NAME_LEN); + tstrncpy(varDataVal(varStbName), stbName, TSDB_TABLE_NAME_LEN); varDataSetLen(varStbName, strlen(stbName)); code = colDataSetVal(colInfoData, 0, varStbName, false); QUERY_CHECK_CODE(code, lino, _end); diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 051a06ba5c..108cf78204 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -425,7 +425,7 @@ static bool sysTableIsOperatorCondOnOneTable(SNode* pCond, char* condTable) { SValueNode* pValue = (SValueNode*)node->pRight; if (pValue->node.resType.type == TSDB_DATA_TYPE_NCHAR || pValue->node.resType.type == TSDB_DATA_TYPE_VARCHAR) { char* value = nodesGetValueFromNode(pValue); - strncpy(condTable, varDataVal(value), TSDB_TABLE_NAME_LEN); + tstrncpy(condTable, varDataVal(value), TSDB_TABLE_NAME_LEN); return true; } } @@ -914,41 +914,41 @@ _end: } } -int32_t convertTagDataToStr(char* str, int type, void* buf, int32_t bufSize, int32_t* len) { +int32_t convertTagDataToStr(char* str, int32_t strBuffLen, int type, void* buf, int32_t bufSize, int32_t* len) { int32_t n = 0; switch (type) { case TSDB_DATA_TYPE_NULL: - n = sprintf(str, "null"); + n = tsnprintf(str, strBuffLen, "null"); break; case TSDB_DATA_TYPE_BOOL: - n = sprintf(str, (*(int8_t*)buf) ? "true" : "false"); + n = tsnprintf(str, strBuffLen, (*(int8_t*)buf) ? "true" : "false"); break; case TSDB_DATA_TYPE_TINYINT: - n = sprintf(str, "%d", *(int8_t*)buf); + n = tsnprintf(str, strBuffLen, "%d", *(int8_t*)buf); break; case TSDB_DATA_TYPE_SMALLINT: - n = sprintf(str, "%d", *(int16_t*)buf); + n = tsnprintf(str, strBuffLen, "%d", *(int16_t*)buf); break; case TSDB_DATA_TYPE_INT: - n = sprintf(str, "%d", *(int32_t*)buf); + n = tsnprintf(str, strBuffLen, "%d", *(int32_t*)buf); break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - n = sprintf(str, "%" PRId64, *(int64_t*)buf); + n = tsnprintf(str, strBuffLen, "%" PRId64, *(int64_t*)buf); break; case TSDB_DATA_TYPE_FLOAT: - n = sprintf(str, "%.5f", GET_FLOAT_VAL(buf)); + n = tsnprintf(str, strBuffLen, "%.5f", GET_FLOAT_VAL(buf)); break; case TSDB_DATA_TYPE_DOUBLE: - n = sprintf(str, "%.9f", GET_DOUBLE_VAL(buf)); + n = tsnprintf(str, strBuffLen, "%.9f", GET_DOUBLE_VAL(buf)); break; case TSDB_DATA_TYPE_BINARY: @@ -973,19 +973,19 @@ int32_t convertTagDataToStr(char* str, int type, void* buf, int32_t bufSize, int n = length; break; case TSDB_DATA_TYPE_UTINYINT: - n = sprintf(str, "%u", *(uint8_t*)buf); + n = tsnprintf(str, strBuffLen, "%u", *(uint8_t*)buf); break; case TSDB_DATA_TYPE_USMALLINT: - n = sprintf(str, "%u", *(uint16_t*)buf); + n = tsnprintf(str, strBuffLen, "%u", *(uint16_t*)buf); break; case TSDB_DATA_TYPE_UINT: - n = sprintf(str, "%u", *(uint32_t*)buf); + n = tsnprintf(str, strBuffLen, "%u", *(uint32_t*)buf); break; case TSDB_DATA_TYPE_UBIGINT: - n = sprintf(str, "%" PRIu64, *(uint64_t*)buf); + n = tsnprintf(str, strBuffLen, "%" PRIu64, *(uint64_t*)buf); break; default: @@ -1065,14 +1065,21 @@ static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, int8_t tagType = (*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].type; pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4); QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno); - char tagTypeStr[VARSTR_HEADER_SIZE + 32]; - int tagTypeLen = sprintf(varDataVal(tagTypeStr), "%s", tDataTypes[tagType].name); + int32_t tagStrBufflen = 32; + char tagTypeStr[VARSTR_HEADER_SIZE + tagStrBufflen]; + int tagTypeLen = tsnprintf(varDataVal(tagTypeStr), tagStrBufflen, "%s", tDataTypes[tagType].name); + tagStrBufflen -= tagTypeLen; + if (tagStrBufflen <= 0) { + code = TSDB_CODE_INVALID_PARA; + QUERY_CHECK_CODE(code, lino, _end); + } + if (tagType == TSDB_DATA_TYPE_NCHAR) { - tagTypeLen += sprintf( - varDataVal(tagTypeStr) + tagTypeLen, "(%d)", + tagTypeLen += tsnprintf( + varDataVal(tagTypeStr) + tagTypeLen, tagStrBufflen, "(%d)", (int32_t)(((*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } else if (IS_VAR_DATA_TYPE(tagType)) { - tagTypeLen += sprintf(varDataVal(tagTypeStr) + tagTypeLen, "(%d)", + tagTypeLen += tsnprintf(varDataVal(tagTypeStr) + tagTypeLen, tagStrBufflen, "(%d)", (int32_t)((*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE)); } varDataSetLen(tagTypeStr, tagTypeLen); @@ -1127,7 +1134,7 @@ static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, QUERY_CHECK_NULL(tagVarChar, code, lino, _end, terrno); int32_t len = -1; if (tagLen > 0) - convertTagDataToStr(varDataVal(tagVarChar), tagType, tagData, tagLen, &len); + convertTagDataToStr(varDataVal(tagVarChar), bufSize + 1 - VARSTR_HEADER_SIZE, tagType, tagData, tagLen, &len); else len = 0; varDataSetLen(tagVarChar, len); @@ -1197,13 +1204,19 @@ static int32_t sysTableUserColsFillOneTableCols(const SSysTableScanInfo* pInfo, int8_t colType = schemaRow->pSchema[i].type; pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4); QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno); - char colTypeStr[VARSTR_HEADER_SIZE + 32]; - int colTypeLen = sprintf(varDataVal(colTypeStr), "%s", tDataTypes[colType].name); + int32_t colStrBufflen = 32; + char colTypeStr[VARSTR_HEADER_SIZE + colStrBufflen]; + int colTypeLen = tsnprintf(varDataVal(colTypeStr), colStrBufflen, "%s", tDataTypes[colType].name); + colStrBufflen -= colTypeLen; + if (colStrBufflen <= 0) { + code = TSDB_CODE_INVALID_PARA; + QUERY_CHECK_CODE(code, lino, _end); + } if (colType == TSDB_DATA_TYPE_VARCHAR) { - colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)", + colTypeLen += tsnprintf(varDataVal(colTypeStr) + colTypeLen, colStrBufflen, "(%d)", (int32_t)(schemaRow->pSchema[i].bytes - VARSTR_HEADER_SIZE)); } else if (colType == TSDB_DATA_TYPE_NCHAR) { - colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)", + colTypeLen += tsnprintf(varDataVal(colTypeStr) + colTypeLen, colStrBufflen, "(%d)", (int32_t)((schemaRow->pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } varDataSetLen(colTypeStr, colTypeLen); @@ -2019,7 +2032,7 @@ static EDealRes getDBNameFromConditionWalker(SNode* pNode, void* pContext) { SValueNode* node = (SValueNode*)pNode; char* dbName = nodesGetValueFromNode(node); - strncpy(pContext, varDataVal(dbName), varDataLen(dbName)); + tstrncpy((char*)pContext, varDataVal(dbName), varDataLen(dbName)); *((char*)pContext + varDataLen(dbName)) = 0; return DEAL_RES_END; // stop walk } @@ -2056,11 +2069,11 @@ static int32_t doSysTableScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) getDBNameFromCondition(pInfo->pCondition, dbName); if (strncasecmp(name, TSDB_INS_TABLE_COMPACTS, TSDB_TABLE_FNAME_LEN) != 0 && strncasecmp(name, TSDB_INS_TABLE_COMPACT_DETAILS, TSDB_TABLE_FNAME_LEN) != 0) { - sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName); + tsnprintf(pInfo->req.db, sizeof(pInfo->req.db), "%d.%s", pInfo->accountId, dbName); } } else if (strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0) { getDBNameFromCondition(pInfo->pCondition, dbName); - if (dbName[0]) sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName); + if (dbName[0]) tsnprintf(pInfo->req.db, sizeof(pInfo->req.db), "%d.%s", pInfo->accountId, dbName); (void)sysTableIsCondOnOneTable(pInfo->pCondition, pInfo->req.filterTb); } diff --git a/source/libs/executor/test/executorTests.cpp b/source/libs/executor/test/executorTests.cpp index ff33732b23..87887d2b2f 100644 --- a/source/libs/executor/test/executorTests.cpp +++ b/source/libs/executor/test/executorTests.cpp @@ -115,7 +115,7 @@ SSDataBlock* getDummyBlock(SOperatorInfo* pOperator) { int32_t code = colDataSetVal(pColInfo, i, reinterpret_cast(&v), false); ASSERT(code == 0); - // sprintf(buf, "this is %d row", i); + // tsnprintf(buf, "this is %d row", i); // STR_TO_VARSTR(b1, buf); // // SColumnInfoData* pColInfo2 = static_cast(TARRAY_GET_ELEM(pBlock->pDataBlock, 1)); @@ -179,7 +179,7 @@ SSDataBlock* get2ColsDummyBlock(SOperatorInfo* pOperator) { code = colDataSetVal(pColInfo1, i, reinterpret_cast(&v), false); ASSERT(code == 0); - // sprintf(buf, "this is %d row", i); + // tsnprintf(buf, "this is %d row", i); // STR_TO_VARSTR(b1, buf); // // SColumnInfoData* pColInfo2 = static_cast(TARRAY_GET_ELEM(pBlock->pDataBlock, 1)); diff --git a/source/libs/executor/test/lhashTests.cpp b/source/libs/executor/test/lhashTests.cpp index daf59c6058..89e1cd2b07 100644 --- a/source/libs/executor/test/lhashTests.cpp +++ b/source/libs/executor/test/lhashTests.cpp @@ -26,7 +26,7 @@ TEST(testCase, linear_hash_Tests) { taosSeedRand(taosGetTimestampSec()); - strcpy(tsTempDir, "/tmp/"); + tstrncpy((char*)tsTempDir, "/tmp/", sizeof(tsTempDir)); _hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT); diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index bebb9b288a..ef07a42629 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -533,7 +533,7 @@ int32_t qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) { while (true) { tbGet = false; - code = qGetQueryTableSchemaVersion(pTaskInfo, dbFName, tbName, &tbInfo.sversion, &tbInfo.tversion, i, &tbGet); + code = qGetQueryTableSchemaVersion(pTaskInfo, dbFName, TSDB_DB_FNAME_LEN, tbName, TSDB_TABLE_NAME_LEN, &tbInfo.sversion, &tbInfo.tversion, i, &tbGet); if (TSDB_CODE_SUCCESS != code || !tbGet) { break; } diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index cfe476540c..2791e3cead 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -120,7 +120,7 @@ SStreamState* streamStateOpen(const char* path, void* pTask, int64_t streamId, i SStreamTask* pStreamTask = pTask; pState->streamId = streamId; pState->taskId = taskId; - sprintf(pState->pTdbState->idstr, "0x%" PRIx64 "-0x%x", pState->streamId, pState->taskId); + tsnprintf(pState->pTdbState->idstr, sizeof(pState->pTdbState->idstr), "0x%" PRIx64 "-0x%x", pState->streamId, pState->taskId); code = streamTaskSetDb(pStreamTask->pMeta, pTask, pState->pTdbState->idstr); QUERY_CHECK_CODE(code, lino, _end); diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index 424845e4f2..d4a181f89f 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -777,7 +777,7 @@ _end: int32_t forceRemoveCheckpoint(SStreamFileState* pFileState, int64_t checkpointId) { char keyBuf[128] = {0}; - sprintf(keyBuf, "%s:%" PRId64 "", TASK_KEY, checkpointId); + tsnprintf(keyBuf, sizeof(keyBuf), "%s:%" PRId64 "", TASK_KEY, checkpointId); return streamDefaultDel_rocksdb(pFileState->pFileStore, keyBuf); } @@ -799,14 +799,14 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) { } memcpy(buf, val, len); buf[len] = 0; - maxCheckPointId = atol((char*)buf); + maxCheckPointId = taosStr2Int64((char*)buf, NULL, 10); taosMemoryFree(val); } for (int64_t i = maxCheckPointId; i > 0; i--) { char buf[128] = {0}; void* val = 0; int32_t len = 0; - sprintf(buf, "%s:%" PRId64 "", TASK_KEY, i); + tsnprintf(buf, sizeof(buf), "%s:%" PRId64 "", TASK_KEY, i); code = streamDefaultGet_rocksdb(pFileState->pFileStore, buf, &val, &len); if (code != 0) { return TSDB_CODE_FAILED; @@ -816,7 +816,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) { taosMemoryFree(val); TSKEY ts; - ts = atol((char*)buf); + ts = taosStr2Int64((char*)buf, NULL, 10); if (ts < mark) { // statekey winkey.ts < mark int32_t tmpRes = forceRemoveCheckpoint(pFileState, i); From 25343b914def17a929799f5bdec93f20c67a6235 Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 25 Oct 2024 05:51:40 +0000 Subject: [PATCH 081/142] fix/TD-32622-add-closed-hash-fix-case --- source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 04d64a7b33..7558f6f3de 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -683,7 +683,7 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { } dInfo("vgId:%d, close vnode", srcVgId); - vmCloseVnode(pMgmt, pVnode, true, true); + vmCloseVnode(pMgmt, pVnode, true, false); int32_t diskPrimary = wrapperCfg.diskPrimary; char srcPath[TSDB_FILENAME_LEN] = {0}; From 127c5f5032f8159838b7630fcbb6f044b46705d6 Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Fri, 25 Oct 2024 15:08:46 +0800 Subject: [PATCH 082/142] docs: Update 01-docker.md --- docs/zh/04-get-started/01-docker.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/04-get-started/01-docker.md b/docs/zh/04-get-started/01-docker.md index cadde10e0c..848a7fd499 100644 --- a/docs/zh/04-get-started/01-docker.md +++ b/docs/zh/04-get-started/01-docker.md @@ -17,7 +17,7 @@ docker pull tdengine/tdengine:latest 或者指定版本的容器镜像: ```shell -docker pull tdengine/tdengine:3.0.1.4 +docker pull tdengine/tdengine:3.3.3.0 ``` 然后只需执行下面的命令: @@ -121,4 +121,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1 SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); ``` -在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 \ No newline at end of file +在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 From a34ce8f773949a8b0986af43706b3cad0678db19 Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 25 Oct 2024 07:39:18 +0000 Subject: [PATCH 083/142] fix/remove-monitor-error-log --- source/libs/monitor/src/monFramework.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/monitor/src/monFramework.c b/source/libs/monitor/src/monFramework.c index a2d03bbd6a..0dbf6e091a 100644 --- a/source/libs/monitor/src/monFramework.c +++ b/source/libs/monitor/src/monFramework.c @@ -183,7 +183,7 @@ void monGenClusterInfoTable(SMonInfo *pMonitor){ } if (taosHashRemove(tsMonitor.metrics, metric_names[i], strlen(metric_names[i])) != 0) { - uError("failed to remove metric %s", metric_names[i]); + uTrace("failed to remove metric %s", metric_names[i]); } } @@ -652,7 +652,7 @@ void monGenMnodeRoleTable(SMonInfo *pMonitor){ } if (taosHashRemove(tsMonitor.metrics, mnodes_role_gauges[i], strlen(mnodes_role_gauges[i])) != 0) { - uError("failed to remove metric %s", mnodes_role_gauges[i]); + uTrace("failed to remove metric %s", mnodes_role_gauges[i]); } } @@ -725,7 +725,7 @@ void monGenVnodeRoleTable(SMonInfo *pMonitor){ } if (taosHashRemove(tsMonitor.metrics, vnodes_role_gauges[i], strlen(vnodes_role_gauges[i])) != 0) { - uError("failed to remove metric %s", vnodes_role_gauges[i]); + uTrace("failed to remove metric %s", vnodes_role_gauges[i]); } } From c6ef1333f6d5149ef41679e741b68886e2afd97c Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 25 Oct 2024 07:48:32 +0000 Subject: [PATCH 084/142] fix/TD-32681-monitor-test-ci-fail --- source/libs/monitor/test/monTest.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source/libs/monitor/test/monTest.cpp b/source/libs/monitor/test/monTest.cpp index 2660cff216..a788a5a341 100644 --- a/source/libs/monitor/test/monTest.cpp +++ b/source/libs/monitor/test/monTest.cpp @@ -26,7 +26,10 @@ class MonitorTest : public ::testing::Test { monInit(&cfg); } - static void TearDownTestSuite() { monCleanup(); } + static void TearDownTestSuite() { + monCleanup(); + taosMsleep(100); + } public: void SetUp() override {} From e71fb627230d9d9d6feb2aac90713f0517a92111 Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 25 Oct 2024 08:05:10 +0000 Subject: [PATCH 085/142] fix/TD-32621-remove-from-hash-when-creating-fail --- source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index f55cb648e0..99d76d5531 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -442,7 +442,8 @@ _OVER: dError("vgId:%d, failed to lock since %s", req.vgId, tstrerror(r)); } if (r == 0) { - r = taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t)); + dInfo("vgId:%d, remove from hash", req.vgId); + r = taosHashRemove(pMgmt->hash, &req.vgId, sizeof(int32_t)); if (r != 0) { dError("vgId:%d, failed to remove vnode since %s", req.vgId, tstrerror(r)); } From 077005a9f99f939905c3323d8cb3b1b529ead3a6 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Fri, 25 Oct 2024 16:08:38 +0800 Subject: [PATCH 086/142] fix ci issue --- source/libs/executor/src/sysscanoperator.c | 4 ++-- source/libs/stream/src/streamState.c | 2 +- source/libs/stream/src/tstreamFileState.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 108cf78204..20d290db01 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -2069,11 +2069,11 @@ static int32_t doSysTableScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) getDBNameFromCondition(pInfo->pCondition, dbName); if (strncasecmp(name, TSDB_INS_TABLE_COMPACTS, TSDB_TABLE_FNAME_LEN) != 0 && strncasecmp(name, TSDB_INS_TABLE_COMPACT_DETAILS, TSDB_TABLE_FNAME_LEN) != 0) { - tsnprintf(pInfo->req.db, sizeof(pInfo->req.db), "%d.%s", pInfo->accountId, dbName); + TAOS_UNUSED(tsnprintf(pInfo->req.db, sizeof(pInfo->req.db), "%d.%s", pInfo->accountId, dbName)); } } else if (strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0) { getDBNameFromCondition(pInfo->pCondition, dbName); - if (dbName[0]) tsnprintf(pInfo->req.db, sizeof(pInfo->req.db), "%d.%s", pInfo->accountId, dbName); + if (dbName[0]) TAOS_UNUSED(tsnprintf(pInfo->req.db, sizeof(pInfo->req.db), "%d.%s", pInfo->accountId, dbName)); (void)sysTableIsCondOnOneTable(pInfo->pCondition, pInfo->req.filterTb); } diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 2791e3cead..0e2ff48fa5 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -120,7 +120,7 @@ SStreamState* streamStateOpen(const char* path, void* pTask, int64_t streamId, i SStreamTask* pStreamTask = pTask; pState->streamId = streamId; pState->taskId = taskId; - tsnprintf(pState->pTdbState->idstr, sizeof(pState->pTdbState->idstr), "0x%" PRIx64 "-0x%x", pState->streamId, pState->taskId); + TAOS_UNUSED(tsnprintf(pState->pTdbState->idstr, sizeof(pState->pTdbState->idstr), "0x%" PRIx64 "-0x%x", pState->streamId, pState->taskId)); code = streamTaskSetDb(pStreamTask->pMeta, pTask, pState->pTdbState->idstr); QUERY_CHECK_CODE(code, lino, _end); diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index d4a181f89f..c630010598 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -777,7 +777,7 @@ _end: int32_t forceRemoveCheckpoint(SStreamFileState* pFileState, int64_t checkpointId) { char keyBuf[128] = {0}; - tsnprintf(keyBuf, sizeof(keyBuf), "%s:%" PRId64 "", TASK_KEY, checkpointId); + TAOS_UNUSED(tsnprintf(keyBuf, sizeof(keyBuf), "%s:%" PRId64 "", TASK_KEY, checkpointId)); return streamDefaultDel_rocksdb(pFileState->pFileStore, keyBuf); } @@ -806,7 +806,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) { char buf[128] = {0}; void* val = 0; int32_t len = 0; - tsnprintf(buf, sizeof(buf), "%s:%" PRId64 "", TASK_KEY, i); + TAOS_UNUSED(tsnprintf(buf, sizeof(buf), "%s:%" PRId64 "", TASK_KEY, i)); code = streamDefaultGet_rocksdb(pFileState->pFileStore, buf, &val, &len); if (code != 0) { return TSDB_CODE_FAILED; From f5626d45bc3eedd33acb08ec3a30a07b68b4122a Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Fri, 25 Oct 2024 16:13:43 +0800 Subject: [PATCH 087/142] fix issue for windows compile --- source/libs/executor/src/sysscanoperator.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 20d290db01..112c52ba23 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -1065,10 +1065,10 @@ static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, int8_t tagType = (*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].type; pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4); QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno); - int32_t tagStrBufflen = 32; - char tagTypeStr[VARSTR_HEADER_SIZE + tagStrBufflen]; - int tagTypeLen = tsnprintf(varDataVal(tagTypeStr), tagStrBufflen, "%s", tDataTypes[tagType].name); - tagStrBufflen -= tagTypeLen; + const int32_t bufflen = 32; + char tagTypeStr[VARSTR_HEADER_SIZE + bufflen]; + int tagTypeLen = tsnprintf(varDataVal(tagTypeStr), bufflen, "%s", tDataTypes[tagType].name); + int32_t tagStrBufflen = bufflen - tagTypeLen; if (tagStrBufflen <= 0) { code = TSDB_CODE_INVALID_PARA; QUERY_CHECK_CODE(code, lino, _end); @@ -1204,10 +1204,10 @@ static int32_t sysTableUserColsFillOneTableCols(const SSysTableScanInfo* pInfo, int8_t colType = schemaRow->pSchema[i].type; pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4); QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno); - int32_t colStrBufflen = 32; - char colTypeStr[VARSTR_HEADER_SIZE + colStrBufflen]; - int colTypeLen = tsnprintf(varDataVal(colTypeStr), colStrBufflen, "%s", tDataTypes[colType].name); - colStrBufflen -= colTypeLen; + const int32_t bufflen = 32; + char colTypeStr[VARSTR_HEADER_SIZE + bufflen]; + int colTypeLen = tsnprintf(varDataVal(colTypeStr), bufflen, "%s", tDataTypes[colType].name); + int32_t colStrBufflen = bufflen - colTypeLen; if (colStrBufflen <= 0) { code = TSDB_CODE_INVALID_PARA; QUERY_CHECK_CODE(code, lino, _end); From dd913a211dddd5e1582786a1a90c992f9b7f5f82 Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 25 Oct 2024 08:48:54 +0000 Subject: [PATCH 088/142] doc/TD-32681-drop-dnode --- docs/en/14-reference/03-taos-sql/21-node.md | 10 ++++++++++ docs/zh/14-reference/03-taos-sql/21-node.md | 10 ++++++++++ 2 files changed, 20 insertions(+) diff --git a/docs/en/14-reference/03-taos-sql/21-node.md b/docs/en/14-reference/03-taos-sql/21-node.md index 2ebccb76f7..fd1a9df53e 100644 --- a/docs/en/14-reference/03-taos-sql/21-node.md +++ b/docs/en/14-reference/03-taos-sql/21-node.md @@ -32,6 +32,16 @@ DROP DNODE dnode_id Note that deleting a dnode does not stop its process. You must stop the process after the dnode is deleted. +```sql +DROP DNODE dnode_id force +``` +Only online node is allowed to be deleted. Drop is executed forcely if the offline node need to be deleted. + +```sql +DROP DNODE dnode_id unsafe +``` +Drop is executed unsafely if the node with single replica is offline, and the data on it is not able to be restored. + ## Modify Dnode Configuration ```sql diff --git a/docs/zh/14-reference/03-taos-sql/21-node.md b/docs/zh/14-reference/03-taos-sql/21-node.md index 967cb51127..0137d0cd79 100644 --- a/docs/zh/14-reference/03-taos-sql/21-node.md +++ b/docs/zh/14-reference/03-taos-sql/21-node.md @@ -32,6 +32,16 @@ DROP DNODE dnode_id 注意删除 dnode 不等于停止相应的进程。实际中推荐先将一个 dnode 删除之后再停止其所对应的进程。 +```sql +DROP DNODE dnode_id force +``` +只有在线节点可以被删除。如果要强制删除离线节点,需要执行强制删除操作。 + +```sql +DROP DNODE dnode_id unsafe +``` +当节点上存在单副本,并且节点处于离线,如果要强制删除该节点,需要执行非安全删除,并且数据不可再恢复。 + ## 修改数据节点配置 ```sql From 417938d2b294eeb9cac553caeb1c6adc4ecdffe2 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Fri, 25 Oct 2024 17:21:28 +0800 Subject: [PATCH 089/142] fix issue --- source/libs/executor/src/sysscanoperator.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 112c52ba23..a997a95686 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -2032,8 +2032,7 @@ static EDealRes getDBNameFromConditionWalker(SNode* pNode, void* pContext) { SValueNode* node = (SValueNode*)pNode; char* dbName = nodesGetValueFromNode(node); - tstrncpy((char*)pContext, varDataVal(dbName), varDataLen(dbName)); - *((char*)pContext + varDataLen(dbName)) = 0; + tstrncpy((char*)pContext, varDataVal(dbName), varDataLen(dbName) + 1); return DEAL_RES_END; // stop walk } default: From c404086a5a1e5cdeae936198aeda0d79d921d2cd Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 25 Oct 2024 09:51:57 +0000 Subject: [PATCH 090/142] doc/TD-32681-drop-dnode-add-option --- docs/en/14-reference/03-taos-sql/21-node.md | 12 +++--------- docs/zh/14-reference/03-taos-sql/21-node.md | 12 +++--------- 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/docs/en/14-reference/03-taos-sql/21-node.md b/docs/en/14-reference/03-taos-sql/21-node.md index fd1a9df53e..cdc4bdd020 100644 --- a/docs/en/14-reference/03-taos-sql/21-node.md +++ b/docs/en/14-reference/03-taos-sql/21-node.md @@ -27,20 +27,14 @@ The preceding SQL command shows all dnodes in the cluster with the ID, endpoint, ## Delete a DNODE ```sql -DROP DNODE dnode_id +DROP DNODE dnode_id [force] [unsafe] ``` Note that deleting a dnode does not stop its process. You must stop the process after the dnode is deleted. -```sql -DROP DNODE dnode_id force -``` -Only online node is allowed to be deleted. Drop is executed forcely if the offline node need to be deleted. +Only online node is allowed to be deleted. Drop is executed with force option if the offline node need to be deleted. -```sql -DROP DNODE dnode_id unsafe -``` -Drop is executed unsafely if the node with single replica is offline, and the data on it is not able to be restored. +Drop is executed with unsafe option if the node with single replica is offline, and the data on it is not able to be restored. ## Modify Dnode Configuration diff --git a/docs/zh/14-reference/03-taos-sql/21-node.md b/docs/zh/14-reference/03-taos-sql/21-node.md index 0137d0cd79..e3a672790c 100644 --- a/docs/zh/14-reference/03-taos-sql/21-node.md +++ b/docs/zh/14-reference/03-taos-sql/21-node.md @@ -27,20 +27,14 @@ SHOW DNODES; ## 删除数据节点 ```sql -DROP DNODE dnode_id +DROP DNODE dnode_id [force] [unsafe] ``` 注意删除 dnode 不等于停止相应的进程。实际中推荐先将一个 dnode 删除之后再停止其所对应的进程。 -```sql -DROP DNODE dnode_id force -``` -只有在线节点可以被删除。如果要强制删除离线节点,需要执行强制删除操作。 +只有在线节点可以被删除。如果要强制删除离线节点,需要执行强制删除操作, 即指定force选项。 -```sql -DROP DNODE dnode_id unsafe -``` -当节点上存在单副本,并且节点处于离线,如果要强制删除该节点,需要执行非安全删除,并且数据不可再恢复。 +当节点上存在单副本,并且节点处于离线,如果要强制删除该节点,需要执行非安全删除,即制定unsafe,并且数据不可再恢复。 ## 修改数据节点配置 From d63795fd835859f2a96d4d02cd3364ebd12821ea Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Fri, 25 Oct 2024 17:54:43 +0800 Subject: [PATCH 091/142] update sample code --- .../com/taos/example/ConsumerLoopFull.java | 15 ++--- .../com/taos/example/ConsumerLoopImp.java | 8 ++- .../com/taos/example/WsConsumerLoopFull.java | 15 ++--- .../com/taos/example/WsConsumerLoopImp.java | 8 ++- .../example/highvolume/DataBaseMonitor.java | 3 + .../taos/example/highvolume/SQLWriter.java | 3 + .../src/test/java/com/taos/test/TestAll.java | 57 +++++++++++++++---- 7 files changed, 81 insertions(+), 28 deletions(-) diff --git a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java index a399f3aa6a..647855dc48 100644 --- a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java @@ -1,8 +1,9 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; +import com.fasterxml.jackson.core.JsonProcessingException; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.tmq.*; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.*; import java.time.Duration; @@ -60,7 +61,7 @@ public class ConsumerLoopFull { // ANCHOR_END: create_consumer } - public static void pollExample(TaosConsumer consumer) throws SQLException { + public static void pollExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: poll_data_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -73,7 +74,7 @@ public class ConsumerLoopFull { for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } } } catch (Exception ex) { @@ -91,7 +92,7 @@ public class ConsumerLoopFull { // ANCHOR_END: poll_data_code_piece } - public static void seekExample(TaosConsumer consumer) throws SQLException { + public static void seekExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: consumer_seek List topics = Collections.singletonList("topic_meters"); try { @@ -99,7 +100,7 @@ public class ConsumerLoopFull { consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); Set assignment = consumer.assignment(); - System.out.println("Now assignment: " + JSON.toJSONString(assignment)); + System.out.println("Now assignment: " + JsonUtil.getObjectMapper().writeValueAsString(assignment)); ConsumerRecords records = ConsumerRecords.emptyRecord(); // make sure we have got some data @@ -125,7 +126,7 @@ public class ConsumerLoopFull { } - public static void commitExample(TaosConsumer consumer) throws SQLException { + public static void commitExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: commit_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -135,7 +136,7 @@ public class ConsumerLoopFull { for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } if (!records.isEmpty()) { // after processing the data, commit the offset manually diff --git a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java index a59bfc282f..378ef8ae6d 100644 --- a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java @@ -1,7 +1,7 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.Connection; import java.sql.DriverManager; @@ -31,7 +31,11 @@ public class ConsumerLoopImp { final AbsConsumerLoop consumerLoop = new AbsConsumerLoop() { @Override public void process(ResultBean result) { - System.out.println("data: " + JSON.toJSONString(result)); + try{ + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(result)); + } catch (Exception e) { + throw new RuntimeException(e); + } } }; diff --git a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java index 6db65f47f2..02db97a5a9 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java +++ b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java @@ -1,8 +1,9 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; +import com.fasterxml.jackson.core.JsonProcessingException; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.tmq.*; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.*; import java.time.Duration; @@ -60,7 +61,7 @@ public class WsConsumerLoopFull { // ANCHOR_END: create_consumer } - public static void pollExample(TaosConsumer consumer) throws SQLException { + public static void pollExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: poll_data_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -73,7 +74,7 @@ public class WsConsumerLoopFull { for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } } } catch (Exception ex) { @@ -91,7 +92,7 @@ public class WsConsumerLoopFull { // ANCHOR_END: poll_data_code_piece } - public static void seekExample(TaosConsumer consumer) throws SQLException { + public static void seekExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: consumer_seek List topics = Collections.singletonList("topic_meters"); try { @@ -99,7 +100,7 @@ public class WsConsumerLoopFull { consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); Set assignment = consumer.assignment(); - System.out.println("Now assignment: " + JSON.toJSONString(assignment)); + System.out.println("Now assignment: " + JsonUtil.getObjectMapper().writeValueAsString(assignment)); ConsumerRecords records = ConsumerRecords.emptyRecord(); // make sure we have got some data @@ -125,7 +126,7 @@ public class WsConsumerLoopFull { } - public static void commitExample(TaosConsumer consumer) throws SQLException { + public static void commitExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: commit_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -135,7 +136,7 @@ public class WsConsumerLoopFull { for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } if (!records.isEmpty()) { // after processing the data, commit the offset manually diff --git a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java index 70e29503f8..77c6a4fd1b 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java +++ b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java @@ -1,7 +1,7 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.Connection; import java.sql.DriverManager; @@ -28,7 +28,11 @@ public abstract class WsConsumerLoopImp { final AbsConsumerLoop consumerLoop = new AbsConsumerLoop() { @Override public void process(ResultBean result) { - System.out.println("data: " + JSON.toJSONString(result)); + try{ + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(result)); + } catch (Exception e) { + throw new RuntimeException(e); + } } }; diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java index 8678f65231..fa6ebf0858 100644 --- a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java @@ -13,6 +13,9 @@ public class DataBaseMonitor { public DataBaseMonitor init() throws SQLException { if (conn == null) { String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + if (jdbcURL == null || jdbcURL == ""){ + jdbcURL = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + } conn = DriverManager.getConnection(jdbcURL); stmt = conn.createStatement(); } diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java index dc820f161c..1497992f6b 100644 --- a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java @@ -69,6 +69,9 @@ public class SQLWriter { */ private static Connection getConnection() throws SQLException { String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + if (jdbcURL == null || jdbcURL == ""){ + jdbcURL = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + } return DriverManager.getConnection(jdbcURL); } diff --git a/docs/examples/java/src/test/java/com/taos/test/TestAll.java b/docs/examples/java/src/test/java/com/taos/test/TestAll.java index e014a3b315..a92ddd116c 100644 --- a/docs/examples/java/src/test/java/com/taos/test/TestAll.java +++ b/docs/examples/java/src/test/java/com/taos/test/TestAll.java @@ -17,6 +17,37 @@ public class TestAll { stmt.execute("drop database if exists " + dbName); } } + waitTransaction(); + } + + public void dropTopic(String topicName) throws SQLException { + String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + try (Connection conn = DriverManager.getConnection(jdbcUrl)) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop topic if exists " + topicName); + } + } + waitTransaction(); + } + + public void waitTransaction() throws SQLException { + + String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + try (Connection conn = DriverManager.getConnection(jdbcUrl)) { + try (Statement stmt = conn.createStatement()) { + for (int i = 0; i < 10; i++) { + stmt.execute("show transactions"); + try (ResultSet resultSet = stmt.getResultSet()) { + if (resultSet.next()) { + int count = resultSet.getInt(1); + if (count == 0) { + break; + } + } + } + } + } + } } public void insertData() throws SQLException { @@ -104,14 +135,20 @@ public class TestAll { SubscribeDemo.main(args); } -// @Test -// public void testSubscribeJni() throws SQLException, InterruptedException { -// dropDB("power"); -// ConsumerLoopFull.main(args); -// } -// @Test -// public void testSubscribeWs() throws SQLException, InterruptedException { -// dropDB("power"); -// WsConsumerLoopFull.main(args); -// } + @Test + public void testSubscribeJni() throws SQLException, InterruptedException { + dropTopic("topic_meters"); + dropDB("power"); + ConsumerLoopFull.main(args); + dropTopic("topic_meters"); + dropDB("power"); + } + @Test + public void testSubscribeWs() throws SQLException, InterruptedException { + dropTopic("topic_meters"); + dropDB("power"); + WsConsumerLoopFull.main(args); + dropTopic("topic_meters"); + dropDB("power"); + } } From dde9ae82f2ffa1d24ebd59791cfc81fdb7325af8 Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 25 Oct 2024 10:10:00 +0000 Subject: [PATCH 092/142] fix/TD-32622-add-lock-for-vnodes --- source/dnode/mgmt/mgmt_vnode/inc/vmInt.h | 2 +- source/dnode/mgmt/mgmt_vnode/src/vmFile.c | 22 ++++++++++++++++----- source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 13 ------------ 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h index 0e1a4bc98e..b027763c63 100644 --- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h +++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h @@ -41,7 +41,7 @@ typedef struct SVnodeMgmt { STfs *pTfs; TdThread thread; bool stop; - TdThreadMutex createLock; + TdThreadMutex fileLock; } SVnodeMgmt; typedef struct { diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index 215a057618..80170cfa56 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -233,35 +233,47 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { goto _OVER; } + code = taosThreadMutexLock(&pMgmt->fileLock); + if (code != 0) { + lino = __LINE__; + goto _OVER; + } + pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_WRITE_THROUGH); if (pFile == NULL) { code = terrno; lino = __LINE__; - goto _OVER; + goto _OVER1; } int32_t len = strlen(buffer); if (taosWriteFile(pFile, buffer, len) <= 0) { code = terrno; lino = __LINE__; - goto _OVER; + goto _OVER1; } if (taosFsyncFile(pFile) < 0) { code = TAOS_SYSTEM_ERROR(errno); lino = __LINE__; - goto _OVER; + goto _OVER1; } code = taosCloseFile(&pFile); if (code != 0) { code = TAOS_SYSTEM_ERROR(errno); lino = __LINE__; - goto _OVER; + goto _OVER1; } - TAOS_CHECK_GOTO(taosRenameFile(file, realfile), &lino, _OVER); + TAOS_CHECK_GOTO(taosRenameFile(file, realfile), &lino, _OVER1); dInfo("succeed to write vnodes file:%s, vnodes:%d", realfile, numOfVnodes); +_OVER1: + int32_t ret = taosThreadMutexUnlock(&pMgmt->fileLock); + if (ret != 0) { + dError("failed to unlock since %s", tstrerror(ret)); + } + _OVER: if (pJson != NULL) tjsonDelete(pJson); if (buffer != NULL) taosMemoryFree(buffer); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index f55cb648e0..bd00e99bae 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -415,24 +415,11 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { goto _OVER; } - code = taosThreadMutexLock(&pMgmt->createLock); - if (code != 0) { - dError("vgId:%d, failed to lock since %s", req.vgId, tstrerror(code)); - goto _OVER; - } code = vmWriteVnodeListToFile(pMgmt); if (code != 0) { code = terrno != 0 ? terrno : code; - int32_t ret = taosThreadMutexUnlock(&pMgmt->createLock); - if (ret != 0) { - dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(ret)); - } goto _OVER; } - int32_t ret = taosThreadMutexUnlock(&pMgmt->createLock); - if (ret != 0) { - dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(ret)); - } _OVER: if (code != 0) { From 83ca164e97882deb0bb7c9816da2c527fe645deb Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 25 Oct 2024 10:13:15 +0000 Subject: [PATCH 093/142] fix/TD-32622-add-lock-for-vnodes-fix-compile --- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 20618dbdf3..b769791ec3 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -545,7 +545,7 @@ static void vmCleanup(SVnodeMgmt *pMgmt) { vmStopWorker(pMgmt); vnodeCleanup(); (void)taosThreadRwlockDestroy(&pMgmt->lock); - (void)taosThreadMutexDestroy(&pMgmt->createLock); + (void)taosThreadMutexDestroy(&pMgmt->fileLock); taosMemoryFree(pMgmt); } @@ -637,7 +637,7 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { goto _OVER; } - code = taosThreadMutexInit(&pMgmt->createLock, NULL); + code = taosThreadMutexInit(&pMgmt->fileLock, NULL); if (code != 0) { code = TAOS_SYSTEM_ERROR(errno); goto _OVER; From 81f08c9d822f5b4062e6515f555c03814deb5a60 Mon Sep 17 00:00:00 2001 From: qevolg <2227465945@qq.com> Date: Sun, 27 Oct 2024 09:22:58 +0800 Subject: [PATCH 094/142] chore(.github): modify taoskeeper github action workflow --- .github/workflows/taoskeeper-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/taoskeeper-ci.yml b/.github/workflows/taoskeeper-ci.yml index 87b893666c..38dfb6d43c 100644 --- a/.github/workflows/taoskeeper-ci.yml +++ b/.github/workflows/taoskeeper-ci.yml @@ -1,4 +1,4 @@ -name: taoskeeper CI +name: TaosKeeper CI on: push: From 86e7371e8732df3e482006c11dac6d6227df8dc6 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Sun, 27 Oct 2024 15:50:57 +0800 Subject: [PATCH 095/142] fix issue for windows compile --- source/libs/executor/src/sysscanoperator.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 112c52ba23..4b783d1b8d 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -1065,10 +1065,10 @@ static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, int8_t tagType = (*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].type; pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4); QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno); - const int32_t bufflen = 32; - char tagTypeStr[VARSTR_HEADER_SIZE + bufflen]; - int tagTypeLen = tsnprintf(varDataVal(tagTypeStr), bufflen, "%s", tDataTypes[tagType].name); - int32_t tagStrBufflen = bufflen - tagTypeLen; + int32_t tagStrBufflen = 32; + char tagTypeStr[VARSTR_HEADER_SIZE + 32]; + int tagTypeLen = tsnprintf(varDataVal(tagTypeStr), tagStrBufflen, "%s", tDataTypes[tagType].name); + tagStrBufflen -= tagTypeLen; if (tagStrBufflen <= 0) { code = TSDB_CODE_INVALID_PARA; QUERY_CHECK_CODE(code, lino, _end); @@ -1204,10 +1204,10 @@ static int32_t sysTableUserColsFillOneTableCols(const SSysTableScanInfo* pInfo, int8_t colType = schemaRow->pSchema[i].type; pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4); QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno); - const int32_t bufflen = 32; - char colTypeStr[VARSTR_HEADER_SIZE + bufflen]; - int colTypeLen = tsnprintf(varDataVal(colTypeStr), bufflen, "%s", tDataTypes[colType].name); - int32_t colStrBufflen = bufflen - colTypeLen; + int32_t colStrBufflen = 32; + char colTypeStr[VARSTR_HEADER_SIZE + 32]; + int colTypeLen = tsnprintf(varDataVal(colTypeStr), colStrBufflen, "%s", tDataTypes[colType].name); + colStrBufflen -= colTypeLen; if (colStrBufflen <= 0) { code = TSDB_CODE_INVALID_PARA; QUERY_CHECK_CODE(code, lino, _end); From 9b9f06eec3c52ee18b4571997a216599958c47e2 Mon Sep 17 00:00:00 2001 From: dmchen Date: Mon, 28 Oct 2024 01:23:52 +0000 Subject: [PATCH 096/142] fix/TD-32622-add-lock-for-vnodes-fix-compile --- source/dnode/mgmt/mgmt_vnode/src/vmFile.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index 80170cfa56..866072bc1a 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -204,6 +204,7 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { char file[PATH_MAX] = {0}; char realfile[PATH_MAX] = {0}; int32_t lino = 0; + int32_t ret = -1; int32_t nBytes = snprintf(file, sizeof(file), "%s%svnodes_tmp.json", pMgmt->path, TD_DIRSEP); if (nBytes <= 0 || nBytes >= sizeof(file)) { @@ -269,7 +270,7 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { dInfo("succeed to write vnodes file:%s, vnodes:%d", realfile, numOfVnodes); _OVER1: - int32_t ret = taosThreadMutexUnlock(&pMgmt->fileLock); + ret = taosThreadMutexUnlock(&pMgmt->fileLock); if (ret != 0) { dError("failed to unlock since %s", tstrerror(ret)); } From d34fc78a2265cf507fcec72d81008c3f3faed1c4 Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Mon, 28 Oct 2024 11:18:06 +0800 Subject: [PATCH 097/142] fix: (last) eliminate redundant logs caused by incorrect return results --- source/dnode/vnode/src/tsdb/tsdbCache.c | 91 ++++++++++++------------- 1 file changed, 43 insertions(+), 48 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 5583e464ed..cbb4f9e873 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -723,34 +723,32 @@ static int32_t tsdbCacheDropTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; { SLastCol *pLastCol = NULL; - code = tsdbCacheDeserialize(values_list[0], values_list_sizes[0], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; + if (values_list[0] != NULL) { + code = tsdbCacheDeserialize(values_list[0], values_list_sizes[0], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } + if (NULL != pLastCol) { + rocksdb_writebatch_delete(wb, keys_list[0], klen); + } + taosMemoryFreeClear(pLastCol); } - if (NULL != pLastCol) { - rocksdb_writebatch_delete(wb, keys_list[0], klen); - } - taosMemoryFreeClear(pLastCol); pLastCol = NULL; - code = tsdbCacheDeserialize(values_list[1], values_list_sizes[1], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; + if (values_list[1] != NULL) { + code = tsdbCacheDeserialize(values_list[1], values_list_sizes[1], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } + if (NULL != pLastCol) { + rocksdb_writebatch_delete(wb, keys_list[1], klen); + } + taosMemoryFreeClear(pLastCol); } - if (NULL != pLastCol) { - rocksdb_writebatch_delete(wb, keys_list[1], klen); - } - taosMemoryFreeClear(pLastCol); rocksdb_free(values_list[0]); rocksdb_free(values_list[1]); @@ -1218,14 +1216,13 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray SColVal *pColVal = &updCtx->colVal; SLastCol *pLastCol = NULL; - code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; + if (values_list[i] != NULL) { + code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } } /* if (code) { @@ -1692,14 +1689,13 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA continue; } - code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; + if (values_list[i] != NULL) { + code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } } SLastCol *pToFree = pLastCol; SIdxKey *idxKey = &((SIdxKey *)TARRAY_DATA(remainCols))[j]; @@ -1959,14 +1955,13 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; for (int i = 0; i < numKeys; ++i) { SLastCol *pLastCol = NULL; - code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; + if (values_list[i] != NULL) { + code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } } SIdxKey *idxKey = taosArrayGet(remainCols, i); SLastKey *pLastKey = &idxKey->key; From b55694ab19402a7dd435e10541ae0e180344f4da Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Mon, 28 Oct 2024 11:25:00 +0800 Subject: [PATCH 098/142] docs: 'ttlChangeOnWrite' default value --- docs/zh/14-reference/01-components/01-taosd.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index fdc7e24163..d09eb4764d 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -225,14 +225,14 @@ lossyColumns float|double | :--------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | enableCoreFile | crash 时是否生成 core 文件;0: 不生成,1:生成;默认值 为 1; 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下
    2、手动启动,就在 taosd 执行目录下。 | | udf | 是否启动 UDF 服务;0: 不启动,1:启动;默认值 为 0 | -| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变,1:改变 ;默认值 为 | +| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变,1:改变 ;默认值 为 0 | | tmqMaxTopicNum | 订阅最多可建立的 topic 数量; 取值范围 1-10000;缺省值 为20 | | maxTsmaNum | 集群内可创建的TSMA个数;取值范围:0-3;缺省值: 3 | ## taosd 监控指标 -taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。 +taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。 ### taosd\_cluster\_basic 表 @@ -458,4 +458,3 @@ TDengine 的日志文件主要包括普通日志和慢日志两种类型。 3. 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。 4. 慢日志文件不自动删除,不压缩。 5. 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLines,logKeepDays 不适用于慢日志。 - From dbd4147e88871d7e10cc09c94b20fa020be68918 Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Mon, 28 Oct 2024 11:41:18 +0800 Subject: [PATCH 099/142] docs: format for space --- docs/zh/14-reference/01-components/01-taosd.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index d09eb4764d..d11bbf4fa5 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -223,11 +223,11 @@ lossyColumns float|double | 参数名称 | 参数说明 | | :--------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| enableCoreFile | crash 时是否生成 core 文件;0: 不生成,1:生成;默认值 为 1; 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下
    2、手动启动,就在 taosd 执行目录下。 | -| udf | 是否启动 UDF 服务;0: 不启动,1:启动;默认值 为 0 | -| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变,1:改变 ;默认值 为 0 | -| tmqMaxTopicNum | 订阅最多可建立的 topic 数量; 取值范围 1-10000;缺省值 为20 | -| maxTsmaNum | 集群内可创建的TSMA个数;取值范围:0-3;缺省值: 3 | +| enableCoreFile | crash 时是否生成 core 文件;0: 不生成,1:生成;默认值为 1; 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下
    2、手动启动,就在 taosd 执行目录下。 | +| udf | 是否启动 UDF 服务;0: 不启动,1:启动;默认值为 0 | +| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变,1:改变;默认值为 0 | +| tmqMaxTopicNum | 订阅最多可建立的 topic 数量; 取值范围 1-10000;缺省值为20 | +| maxTsmaNum | 集群内可创建的TSMA个数;取值范围:0-3;缺省值为 3 | ## taosd 监控指标 From 0100d383eead8c110c8f21b31154ab7d6573c297 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 28 Oct 2024 15:02:03 +0800 Subject: [PATCH 100/142] enh:[TS-5441] cost too long in tmq write meta data by cache meta and vg info --- source/client/src/clientRawBlockWrite.c | 315 ++++++++++++------------ 1 file changed, 159 insertions(+), 156 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 2bd815b460..80403986aa 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -23,12 +23,12 @@ #include "tglobal.h" #include "tmsgtype.h" -#define RAW_NULL_CHECK(c) \ - do { \ - if (c == NULL) { \ - code = terrno; \ - goto end; \ - } \ +#define RAW_NULL_CHECK(c) \ + do { \ + if (c == NULL) { \ + code = terrno; \ + goto end; \ + } \ } while (0) #define RAW_FALSE_CHECK(c) \ @@ -52,7 +52,7 @@ #define TMQ_META_VERSION "1.0" -static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen); +static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen); static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); } static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t, SColCmprWrapper* pColCmprRow, cJSON** pJson) { @@ -163,7 +163,7 @@ static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sche } RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags)); - end: +end: *pJson = json; } @@ -197,7 +197,7 @@ static int32_t setCompressOption(cJSON* json, uint32_t para) { return code; } - end: +end: return code; } static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** pJson) { @@ -338,7 +338,7 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** break; } - end: +end: tFreeSMAltertbReq(&req); *pJson = json; } @@ -455,7 +455,7 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) { cJSON* tvalue = NULL; if (IS_VAR_DATA_TYPE(pTagVal->type)) { - char* buf = NULL; + char* buf = NULL; int64_t bufSize = 0; if (pTagVal->type == TSDB_DATA_TYPE_VARBINARY) { bufSize = pTagVal->nData * 2 + 2 + 3; @@ -485,7 +485,7 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) { RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, tag)); } - end: +end: RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags)); taosArrayDestroy(pTagVals); } @@ -514,7 +514,7 @@ static void buildCreateCTableJson(SVCreateTbReq* pCreateReq, int32_t nReqs, cJSO } RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "createList", createList)); - end: +end: *pJson = json; } @@ -585,7 +585,7 @@ static void processAutoCreateTable(SMqDataRsp* rsp, char** string) { *string = cJSON_PrintUnformatted(pJson); cJSON_Delete(pJson); - end: +end: uDebug("auto created table return, sql json:%s", *string); for (int i = 0; decoder && pCreateReq && i < rsp->createTableNum; i++) { tDecoderClear(&decoder[i]); @@ -989,7 +989,7 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { code = pRequest->code; - end: +end: uDebug(LOG_ID_TAG " create stable return, msg:%s", LOG_ID_VALUE, tstrerror(code)); destroyRequest(pRequest); tFreeSMCreateStbReq(&pReq); @@ -1023,9 +1023,9 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { SCatalog* pCatalog = NULL; RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog)); SRequestConnInfo conn = {.pTrans = pRequest->pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp)}; SName pName = {0}; toName(pRequest->pTscObj->acctId, pRequest->pDb, req.name, &pName); STableMeta* pTableMeta = NULL; @@ -1088,7 +1088,7 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { code = pRequest->code; - end: +end: uDebug(LOG_ID_TAG " drop stable return, msg:%s", LOG_ID_VALUE, tstrerror(code)); destroyRequest(pRequest); tDecoderClear(&coder); @@ -1142,9 +1142,9 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch); SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName)); RAW_NULL_CHECK(pRequest->tableList); @@ -1269,7 +1269,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { code = pRequest->code; - end: +end: uDebug(LOG_ID_TAG " create table return, msg:%s", LOG_ID_VALUE, tstrerror(code)); tDeleteSVCreateTbBatchReq(&req); @@ -1328,9 +1328,9 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch); SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName)); RAW_NULL_CHECK(pRequest->tableList); // loop to create table @@ -1395,7 +1395,7 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { } code = pRequest->code; - end: +end: uDebug(LOG_ID_TAG " drop table return, msg:%s", LOG_ID_VALUE, tstrerror(code)); taosHashCleanup(pVgroupHashmap); destroyRequest(pRequest); @@ -1433,7 +1433,7 @@ static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { } taos_free_result(res); - end: +end: uDebug("connId:0x%" PRIx64 " delete data sql:%s, code:%s", *(int64_t*)taos, sql, tstrerror(code)); tDecoderClear(&coder); return code; @@ -1473,9 +1473,9 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { SCatalog* pCatalog = NULL; RAW_RETURN_CHECK(catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog)); SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; SVgroupInfo pInfo = {0}; SName pName = {0}; @@ -1543,7 +1543,7 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { code = handleAlterTbExecRes(pRes->res, pCatalog); } } - end: +end: uDebug(LOG_ID_TAG " alter table return, meta:%p, len:%d, msg:%s", LOG_ID_VALUE, meta, metaLen, tstrerror(code)); taosArrayDestroy(pArray); if (pVgData) taosMemoryFreeClear(pVgData->pData); @@ -1608,7 +1608,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat launchQueryImpl(pRequest, pQuery, true, NULL); code = pRequest->code; - end: +end: uDebug(LOG_ID_TAG " write raw block with field return, msg:%s", LOG_ID_VALUE, tstrerror(code)); taosMemoryFreeClear(pTableMeta); qDestroyQuery(pQuery); @@ -1668,7 +1668,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha launchQueryImpl(pRequest, pQuery, true, NULL); code = pRequest->code; - end: +end: uDebug(LOG_ID_TAG " write raw block return, msg:%s", LOG_ID_VALUE, tstrerror(code)); taosMemoryFreeClear(pTableMeta); qDestroyQuery(pQuery); @@ -1708,7 +1708,8 @@ static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { goto end; } if (taosHashGet(pHashObj, pCreateReq.name, strlen(pCreateReq.name)) == NULL) { - RAW_RETURN_CHECK(taosHashPut(pHashObj, pCreateReq.name, strlen(pCreateReq.name), &pCreateReq, sizeof(SVCreateTbReq))); + RAW_RETURN_CHECK( + taosHashPut(pHashObj, pCreateReq.name, strlen(pCreateReq.name), &pCreateReq, sizeof(SVCreateTbReq))); } else { tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); pCreateReq = (SVCreateTbReq){0}; @@ -1734,34 +1735,34 @@ static SHashObj* writeRawCache = NULL; static int8_t initFlag = 0; static int8_t initedFlag = WRITE_RAW_INIT_START; -typedef struct{ - SHashObj* pVgHash; - SHashObj* pNameHash; - SHashObj* pMetaHash; -}rawCacheInfo; +typedef struct { + SHashObj* pVgHash; + SHashObj* pNameHash; + SHashObj* pMetaHash; +} rawCacheInfo; -typedef struct{ +typedef struct { SVgroupInfo vgInfo; int64_t uid; int64_t suid; -}tbInfo; +} tbInfo; -static void tmqFreeMeta(void *data){ +static void tmqFreeMeta(void* data) { STableMeta* pTableMeta = *(STableMeta**)data; taosMemoryFree(pTableMeta); } -static void freeRawCache(void *data) { +static void freeRawCache(void* data) { rawCacheInfo* pRawCache = (rawCacheInfo*)data; taosHashCleanup(pRawCache->pMetaHash); taosHashCleanup(pRawCache->pNameHash); taosHashCleanup(pRawCache->pVgHash); } -static int32_t initRawCacheHash(){ - if (writeRawCache == NULL){ +static int32_t initRawCacheHash() { + if (writeRawCache == NULL) { writeRawCache = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK); - if (writeRawCache == NULL){ + if (writeRawCache == NULL) { return terrno; } taosHashSetFreeFp(writeRawCache, freeRawCache); @@ -1769,7 +1770,7 @@ static int32_t initRawCacheHash(){ return 0; } -static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrapper* pSW){ +static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrapper* pSW) { char* p = (char*)rawData; // | version | total length | total rows | blankFill | total columns | flag seg| block group id | column schema | each // column length | @@ -1799,16 +1800,15 @@ static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrappe } fields += sizeof(int8_t) + sizeof(int32_t); - if (j == pTableMeta->tableInfo.numOfColumns) - return true; + if (j == pTableMeta->tableInfo.numOfColumns) return true; } return false; } -static int32_t getRawCache(SHashObj **pVgHash, SHashObj **pNameHash, SHashObj **pMetaHash, void *key) { +static int32_t getRawCache(SHashObj** pVgHash, SHashObj** pNameHash, SHashObj** pMetaHash, void* key) { int32_t code = 0; - void* cacheInfo = taosHashGet(writeRawCache, &key, POINTER_BYTES); - if (cacheInfo == NULL){ + void* cacheInfo = taosHashGet(writeRawCache, &key, POINTER_BYTES); + if (cacheInfo == NULL) { *pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); RAW_NULL_CHECK(*pVgHash); *pNameHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); @@ -1819,7 +1819,7 @@ static int32_t getRawCache(SHashObj **pVgHash, SHashObj **pNameHash, SHashObj ** rawCacheInfo info = {*pVgHash, *pNameHash, *pMetaHash}; RAW_RETURN_CHECK(taosHashPut(writeRawCache, &key, POINTER_BYTES, &info, sizeof(rawCacheInfo))); } else { - rawCacheInfo *info = (rawCacheInfo *)cacheInfo; + rawCacheInfo* info = (rawCacheInfo*)cacheInfo; *pVgHash = info->pVgHash; *pNameHash = info->pNameHash; *pMetaHash = info->pMetaHash; @@ -1833,7 +1833,7 @@ end: return code; } -static int32_t buildRawRequest(TAOS* taos, SRequestObj** pRequest, SCatalog** pCatalog, SRequestConnInfo *conn){ +static int32_t buildRawRequest(TAOS* taos, SRequestObj** pRequest, SCatalog** pCatalog, SRequestConnInfo* conn) { int32_t code = 0; RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, pRequest)); (*pRequest)->syncQuery = true; @@ -1852,29 +1852,30 @@ end: return code; } -typedef int32_t _raw_decode_func_(SDecoder *pDecoder, SMqDataRsp *pRsp); -static int32_t decodeRawData(SDecoder *decoder, void* data, int32_t dataLen, _raw_decode_func_ func, SMqRspObj* rspObj){ - int8_t dataVersion = *(int8_t*)data; - if (dataVersion >= MQ_DATA_RSP_VERSION) { - data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); - dataLen -= sizeof(int8_t) + sizeof(int32_t); +typedef int32_t _raw_decode_func_(SDecoder* pDecoder, SMqDataRsp* pRsp); +static int32_t decodeRawData(SDecoder* decoder, void* data, int32_t dataLen, _raw_decode_func_ func, + SMqRspObj* rspObj) { + int8_t dataVersion = *(int8_t*)data; + if (dataVersion >= MQ_DATA_RSP_VERSION) { + data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); + dataLen -= sizeof(int8_t) + sizeof(int32_t); } - rspObj->resIter = -1; - tDecoderInit(decoder, data, dataLen); - int32_t code = func(decoder, &rspObj->dataRsp); - if (code != 0) { - SET_ERROR_MSG("decode mq taosx data rsp failed"); + rspObj->resIter = -1; + tDecoderInit(decoder, data, dataLen); + int32_t code = func(decoder, &rspObj->dataRsp); + if (code != 0) { + SET_ERROR_MSG("decode mq taosx data rsp failed"); } - return code; + return code; } -static int32_t processCacheMeta(SHashObj *pVgHash, SHashObj *pNameHash, SHashObj *pMetaHash, SVCreateTbReq* pCreateReqDst, - SCatalog* pCatalog, SRequestConnInfo* conn, SName* pName, - STableMeta** pMeta, SSchemaWrapper* pSW, void* rawData, int32_t retry){ - int32_t code = 0; +static int32_t processCacheMeta(SHashObj* pVgHash, SHashObj* pNameHash, SHashObj* pMetaHash, + SVCreateTbReq* pCreateReqDst, SCatalog* pCatalog, SRequestConnInfo* conn, SName* pName, + STableMeta** pMeta, SSchemaWrapper* pSW, void* rawData, int32_t retry) { + int32_t code = 0; STableMeta* pTableMeta = NULL; - tbInfo* tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname)); + tbInfo* tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname)); if (tmpInfo == NULL || retry > 0) { tbInfo info = {0}; @@ -1884,13 +1885,13 @@ static int32_t processCacheMeta(SHashObj *pVgHash, SHashObj *pNameHash, SHashObj } RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta)); info.uid = pTableMeta->uid; - if (pTableMeta->tableType == TSDB_CHILD_TABLE){ + if (pTableMeta->tableType == TSDB_CHILD_TABLE) { info.suid = pTableMeta->suid; } else { info.suid = pTableMeta->uid; } code = taosHashPut(pMetaHash, &info.suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); - if (code != 0){ + if (code != 0) { taosMemoryFree(pTableMeta); goto end; } @@ -1902,20 +1903,21 @@ static int32_t processCacheMeta(SHashObj *pVgHash, SHashObj *pNameHash, SHashObj RAW_RETURN_CHECK(taosHashPut(pNameHash, pName->tname, strlen(pName->tname), &info, sizeof(tbInfo))); tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname)); - RAW_RETURN_CHECK(taosHashPut(pVgHash, &info.vgInfo.vgId, sizeof(info.vgInfo.vgId), &info.vgInfo, sizeof(SVgroupInfo))); + RAW_RETURN_CHECK( + taosHashPut(pVgHash, &info.vgInfo.vgId, sizeof(info.vgInfo.vgId), &info.vgInfo, sizeof(SVgroupInfo))); } - if (pTableMeta == NULL || retry > 0){ + if (pTableMeta == NULL || retry > 0) { STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, &tmpInfo->suid, LONG_BYTES); if (pTableMetaTmp == NULL || retry > 0 || needRefreshMeta(rawData, *pTableMetaTmp, pSW)) { RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta)); code = taosHashPut(pMetaHash, &tmpInfo->suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); - if (code != 0){ + if (code != 0) { taosMemoryFree(pTableMeta); goto end; } - }else{ + } else { pTableMeta = *pTableMetaTmp; pTableMeta->uid = tmpInfo->uid; pTableMeta->vgId = tmpInfo->vgInfo.vgId; @@ -1927,25 +1929,25 @@ end: return code; } -static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen){ - int32_t code = TSDB_CODE_SUCCESS; - SQuery* pQuery = NULL; - SMqRspObj rspObj = {0}; - SDecoder decoder = {0}; +static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) { + int32_t code = TSDB_CODE_SUCCESS; + SQuery* pQuery = NULL; + SMqRspObj rspObj = {0}; + SDecoder decoder = {0}; - SRequestObj* pRequest = NULL; - SCatalog* pCatalog = NULL; - SRequestConnInfo conn = {0}; + SRequestObj* pRequest = NULL; + SCatalog* pCatalog = NULL; + SRequestConnInfo conn = {0}; RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn)); uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeMqDataRsp, &rspObj)); - SHashObj *pVgHash = NULL; - SHashObj *pNameHash = NULL; - SHashObj *pMetaHash = NULL; + SHashObj* pVgHash = NULL; + SHashObj* pNameHash = NULL; + SHashObj* pMetaHash = NULL; RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos)); int retry = 0; - while(1){ + while (1) { RAW_RETURN_CHECK(smlInitHandle(&pQuery)); uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum); while (++rspObj.resIter < rspObj.dataRsp.blockNum) { @@ -1968,9 +1970,9 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen){ tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN); STableMeta* pTableMeta = NULL; - RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, NULL, pCatalog, &conn, - &pName, &pTableMeta, pSW, rawData, retry)); - char err[ERR_MSG_LEN] = {0}; + RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, NULL, pCatalog, &conn, &pName, &pTableMeta, pSW, + rawData, retry)); + char err[ERR_MSG_LEN] = {0}; code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); if (code != TSDB_CODE_SUCCESS) { SET_ERROR_MSG("table:%s, err:%s", pName.tname, err); @@ -1991,7 +1993,7 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen){ break; } - end: +end: uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code)); tDeleteMqDataRsp(&rspObj.dataRsp); tDecoderClear(&decoder); @@ -2001,15 +2003,15 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen){ } static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) { - int32_t code = TSDB_CODE_SUCCESS; - SQuery* pQuery = NULL; - SMqRspObj rspObj = {0}; - SDecoder decoder = {0}; - SHashObj* pCreateTbHash = NULL; + int32_t code = TSDB_CODE_SUCCESS; + SQuery* pQuery = NULL; + SMqRspObj rspObj = {0}; + SDecoder decoder = {0}; + SHashObj* pCreateTbHash = NULL; - SRequestObj* pRequest = NULL; - SCatalog* pCatalog = NULL; - SRequestConnInfo conn = {0}; + SRequestObj* pRequest = NULL; + SCatalog* pCatalog = NULL; + SRequestConnInfo conn = {0}; RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn)); uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); @@ -2019,12 +2021,12 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) RAW_NULL_CHECK(pCreateTbHash); RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); - SHashObj *pVgHash = NULL; - SHashObj *pNameHash = NULL; - SHashObj *pMetaHash = NULL; + SHashObj* pVgHash = NULL; + SHashObj* pNameHash = NULL; + SHashObj* pMetaHash = NULL; RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos)); int retry = 0; - while(1){ + while (1) { RAW_RETURN_CHECK(smlInitHandle(&pQuery)); uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum); while (++rspObj.resIter < rspObj.dataRsp.blockNum) { @@ -2048,11 +2050,12 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) // find schema data info SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, pName.tname, strlen(pName.tname)); - STableMeta* pTableMeta = NULL; - RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, pCreateReqDst, pCatalog, &conn, - &pName, &pTableMeta, pSW, rawData, retry)); - char err[ERR_MSG_LEN] = {0}; - code = rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); + STableMeta* pTableMeta = NULL; + RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, pCreateReqDst, pCatalog, &conn, &pName, + &pTableMeta, pSW, rawData, retry)); + char err[ERR_MSG_LEN] = {0}; + code = + rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); if (code != TSDB_CODE_SUCCESS) { SET_ERROR_MSG("table:%s, err:%s", pName.tname, err); goto end; @@ -2147,7 +2150,7 @@ static void processBatchMetaToJson(SMqBatchMetaRsp* pMsgRsp, char** string) { *string = fullStr; return; - end: +end: cJSON_Delete(pJson); tDeleteMqBatchMetaRsp(&rsp); } @@ -2159,18 +2162,18 @@ char* tmq_get_json_meta(TAOS_RES* res) { return NULL; } - char* string = NULL; + char* string = NULL; SMqRspObj* rspObj = (SMqRspObj*)res; if (TD_RES_TMQ_METADATA(res)) { processAutoCreateTable(&rspObj->dataRsp, &string); } else if (TD_RES_TMQ_BATCH_META(res)) { processBatchMetaToJson(&rspObj->batchMetaRsp, &string); } else if (TD_RES_TMQ_META(res)) { - cJSON* pJson = NULL; + cJSON* pJson = NULL; processSimpleMeta(&rspObj->metaRsp, &pJson); string = cJSON_PrintUnformatted(pJson); cJSON_Delete(pJson); - } else{ + } else { uError("tmq_get_json_meta res:%d, invalid type", *(int8_t*)res); } @@ -2181,7 +2184,7 @@ char* tmq_get_json_meta(TAOS_RES* res) { void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); } static int32_t getOffSetLen(const SMqDataRsp* pRsp) { - SEncoder coder = {0}; + SEncoder coder = {0}; tEncoderInit(&coder, NULL, 0); if (tEncodeSTqOffsetVal(&coder, &pRsp->reqOffset) < 0) return -1; if (tEncodeSTqOffsetVal(&coder, &pRsp->rspOffset) < 0) return -1; @@ -2191,46 +2194,46 @@ static int32_t getOffSetLen(const SMqDataRsp* pRsp) { } typedef int32_t __encode_func__(SEncoder* pEncoder, const SMqDataRsp* pRsp); -static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, tmq_raw_data* raw) { - int32_t len = 0; - int32_t code = 0; - SEncoder encoder = {0}; - void* buf = NULL; - tEncodeSize(encodeFunc, rspObj, len, code); - if (code < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; +static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, tmq_raw_data* raw) { + int32_t len = 0; + int32_t code = 0; + SEncoder encoder = {0}; + void* buf = NULL; + tEncodeSize(encodeFunc, rspObj, len, code); + if (code < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - len += sizeof(int8_t) + sizeof(int32_t); - buf = taosMemoryCalloc(1, len); - if (buf == NULL) { - code = terrno; - goto FAILED; + len += sizeof(int8_t) + sizeof(int32_t); + buf = taosMemoryCalloc(1, len); + if (buf == NULL) { + code = terrno; + goto FAILED; } - tEncoderInit(&encoder, buf, len); - if (tEncodeI8(&encoder, MQ_DATA_RSP_VERSION) < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + tEncoderInit(&encoder, buf, len); + if (tEncodeI8(&encoder, MQ_DATA_RSP_VERSION) < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - int32_t offsetLen = getOffSetLen(rspObj); - if (offsetLen <= 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + int32_t offsetLen = getOffSetLen(rspObj); + if (offsetLen <= 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - if (tEncodeI32(&encoder, offsetLen) < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + if (tEncodeI32(&encoder, offsetLen) < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - if (encodeFunc(&encoder, rspObj) < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + if (encodeFunc(&encoder, rspObj) < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - tEncoderClear(&encoder); + tEncoderClear(&encoder); - raw->raw = buf; - raw->raw_len = len; - return code; - FAILED: + raw->raw = buf; + raw->raw_len = len; + return code; +FAILED: tEncoderClear(&encoder); taosMemoryFree(buf); return code; @@ -2247,7 +2250,7 @@ int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) { raw->raw_type = rspObj->metaRsp.resMsgType; uDebug("tmq get raw type meta:%p", raw); } else if (TD_RES_TMQ(res)) { - int32_t code = encodeMqDataRsp(tEncodeMqDataRsp, &rspObj->dataRsp, raw); + int32_t code = encodeMqDataRsp(tEncodeMqDataRsp, &rspObj->dataRsp, raw); if (code != 0) { uError("tmq get raw type error:%d", terrno); return code; @@ -2282,7 +2285,7 @@ void tmq_free_raw(tmq_raw_data raw) { (void)memset(terrMsg, 0, ERR_MSG_LEN); } -static int32_t writeRawInit(){ +static int32_t writeRawInit() { while (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_START) { int8_t old = atomic_val_compare_exchange_8(&initFlag, 0, 1); if (old == 0) { @@ -2296,7 +2299,7 @@ static int32_t writeRawInit(){ } } - if (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_FAIL){ + if (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_FAIL) { return TSDB_CODE_INTERNAL_ERROR; } return 0; @@ -2321,7 +2324,7 @@ static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) return taosDropTable(taos, buf, len); } else if (type == TDMT_VND_DELETE) { return taosDeleteData(taos, buf, len); - } else if (type == RES_TYPE__TMQ_METADATA){ + } else if (type == RES_TYPE__TMQ_METADATA) { return tmqWriteRawMetaDataImpl(taos, buf, len); } else if (type == RES_TYPE__TMQ) { return tmqWriteRawDataImpl(taos, buf, len); @@ -2344,9 +2347,9 @@ static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen if (taos == NULL || meta == NULL) { return TSDB_CODE_INVALID_PARA; } - SMqBatchMetaRsp rsp = {0}; + SMqBatchMetaRsp rsp = {0}; SDecoder coder = {0}; - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; // decode and process req tDecoderInit(&coder, meta, metaLen); @@ -2374,7 +2377,7 @@ static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen } } - end: +end: tDeleteMqBatchMetaRsp(&rsp); return code; } From b8a9e4d364d40ab3c5c3c99acb382c4b425fcfd8 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 28 Oct 2024 15:19:48 +0800 Subject: [PATCH 101/142] opt log to aovid stack overflow --- source/util/src/tlog.c | 64 +++++++++++++++++++++++++++------ source/util/test/CMakeLists.txt | 7 ++++ source/util/test/log.cpp | 46 ++++++++++++++++++++++++ 3 files changed, 106 insertions(+), 11 deletions(-) create mode 100644 source/util/test/log.cpp diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 3ca148a625..6174a9cb3e 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -21,10 +21,12 @@ #include "tjson.h" #include "tutil.h" -#define LOG_MAX_LINE_SIZE (10024) -#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3) -#define LOG_MAX_LINE_DUMP_SIZE (1024 * 1024) -#define LOG_MAX_LINE_DUMP_BUFFER_SIZE (LOG_MAX_LINE_DUMP_SIZE + 128) +#define LOG_MAX_LINE_SIZE (10024) +#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3) +#define LOG_MAX_STACK_LINE_SIZE (512) +#define LOG_MAX_STACK_LINE_BUFFER_SIZE (LOG_MAX_STACK_LINE_SIZE + 3) +#define LOG_MAX_LINE_DUMP_SIZE (1024 * 1024) +#define LOG_MAX_LINE_DUMP_BUFFER_SIZE (LOG_MAX_LINE_DUMP_SIZE + 128) #define LOG_FILE_DAY_LEN 64 @@ -669,16 +671,40 @@ static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *b } } -void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *format, ...) { - if (!(dflag & DEBUG_FILE) && !(dflag & DEBUG_SCREEN)) return; +/* + use taosPrintLogImpl_useStackBuffer to avoid stack overflow - char buffer[LOG_MAX_LINE_BUFFER_SIZE]; +*/ +int8_t taosPrintLogImpl_useStackBuffer(const char *flags, int32_t level, int32_t dflag, const char *format, + va_list args) { + char buffer[LOG_MAX_STACK_LINE_BUFFER_SIZE]; int32_t len = taosBuildLogHead(buffer, flags); - va_list argpointer; - va_start(argpointer, format); - int32_t writeLen = len + vsnprintf(buffer + len, LOG_MAX_LINE_BUFFER_SIZE - len, format, argpointer); - va_end(argpointer); + int32_t writeLen = len + vsnprintf(buffer + len, LOG_MAX_STACK_LINE_BUFFER_SIZE - len - 1, format, args); + if (writeLen > LOG_MAX_STACK_LINE_SIZE) { + return 1; + } + + buffer[writeLen++] = '\n'; + buffer[writeLen] = 0; + + taosPrintLogImp(level, dflag, buffer, writeLen); + + if (tsLogFp && level <= DEBUG_INFO) { + buffer[writeLen - 1] = 0; + (*tsLogFp)(taosGetTimestampMs(), level, buffer + len); + } + return 0; +} +int8_t taosPrintLogImpl_useHeapBuffer(const char *flags, int32_t level, int32_t dflag, const char *format, + va_list args) { + char *buffer = taosMemoryCalloc(1, LOG_MAX_LINE_BUFFER_SIZE + 1); + if (buffer == NULL) { + return 1; + } + int32_t len = taosBuildLogHead(buffer, flags); + + int32_t writeLen = len + vsnprintf(buffer + len, LOG_MAX_LINE_BUFFER_SIZE - len - 1, format, args); if (writeLen > LOG_MAX_LINE_SIZE) writeLen = LOG_MAX_LINE_SIZE; buffer[writeLen++] = '\n'; @@ -690,6 +716,22 @@ void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *f buffer[writeLen - 1] = 0; (*tsLogFp)(taosGetTimestampMs(), level, buffer + len); } + taosMemoryFree(buffer); + return 0; +} +void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *format, ...) { + if (!(dflag & DEBUG_FILE) && !(dflag & DEBUG_SCREEN)) return; + + va_list argpointer, argpointer_copy; + va_start(argpointer, format); + va_copy(argpointer_copy, argpointer); + + if (taosPrintLogImpl_useStackBuffer(flags, level, dflag, format, argpointer) == 0) { + } else { + TAOS_UNUSED(taosPrintLogImpl_useHeapBuffer(flags, level, dflag, format, argpointer_copy)); + } + va_end(argpointer_copy); + va_end(argpointer); } void taosPrintLongString(const char *flags, int32_t level, int32_t dflag, const char *format, ...) { diff --git a/source/util/test/CMakeLists.txt b/source/util/test/CMakeLists.txt index 4966a629d8..3732c2af59 100644 --- a/source/util/test/CMakeLists.txt +++ b/source/util/test/CMakeLists.txt @@ -126,6 +126,13 @@ add_test( COMMAND regexTest ) +add_executable(logTest "log.cpp") + target_link_libraries(logTest os util common gtest_main) + add_test( + NAME logTest + COMMAND logTest +) + add_executable(decompressTest "decompressTest.cpp") target_link_libraries(decompressTest os util common gtest_main) add_test( diff --git a/source/util/test/log.cpp b/source/util/test/log.cpp new file mode 100644 index 0000000000..ba32d2d639 --- /dev/null +++ b/source/util/test/log.cpp @@ -0,0 +1,46 @@ +#include +#include +#include +#include +#include +#include + +using namespace std; + + +TEST(log, check_log_refactor) { + const char *logDir = "/tmp"; + const char *defaultLogFileNamePrefix = "taoslog"; + const int32_t maxLogFileNum = 10000; + tsAsyncLog = 0; + // idxDebugFlag = 143; + strcpy(tsLogDir, (char *)logDir); + taosInitLog(tsLogDir, 10, false); + tsAsyncLog = 0; + uDebugFlag = 143; + + std::string str; + str.push_back('a'); + + for (int i = 0; i < 10000; i += 2) { + str.push_back('a'); + uError("write to file %s", str.c_str()); + } + str.clear(); + for (int i = 0; i < 10000; i += 2) { + str.push_back('a'); + uDebug("write to file %s", str.c_str()); + } + + for (int i = 0; i < 10000; i += 2) { + str.push_back('a'); + uInfo("write to file %s", str.c_str()); + } + str.clear(); + + for (int i = 0; i < 10000; i += 2) { + str.push_back('a'); + uTrace("write to file %s", str.c_str()); + } + taosCloseLog(); +} From 64c16fbcfecba2ce9c198347b15891833ee20be6 Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Mon, 28 Oct 2024 16:34:31 +0800 Subject: [PATCH 102/142] docs: add example for 'insert into stb file csv_path' --- docs/zh/14-reference/03-taos-sql/03-table.md | 2 +- docs/zh/14-reference/03-taos-sql/05-insert.md | 26 ++++++++++++------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/docs/zh/14-reference/03-taos-sql/03-table.md b/docs/zh/14-reference/03-taos-sql/03-table.md index cad9190bd9..9e4cc66eaf 100644 --- a/docs/zh/14-reference/03-taos-sql/03-table.md +++ b/docs/zh/14-reference/03-taos-sql/03-table.md @@ -87,7 +87,7 @@ CREATE TABLE [IF NOT EXISTS] USING [db_name.]stb_name (field1_name [, field2_nam **参数说明** -1. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。CSV 文件中应仅包含 table name 与 tag 值。如需插入数据,请参考数据写入章节。 +1. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。CSV 文件中应仅包含 table name 与 tag 值。如需插入数据,请参考'数据写入'章节。 2. 为指定的 stb_name 创建子表,该超级表必须已经存在。 3. field_name 列表顺序与 CSV 文件各列内容顺序一致。列表中不允许出现重复项,且必须包含 `tbname`,可包含零个或多个超级表中已定义的标签列。未包含在列表中的标签值将被设置为 NULL。 diff --git a/docs/zh/14-reference/03-taos-sql/05-insert.md b/docs/zh/14-reference/03-taos-sql/05-insert.md index b2c34f4c55..40f8e95006 100644 --- a/docs/zh/14-reference/03-taos-sql/05-insert.md +++ b/docs/zh/14-reference/03-taos-sql/05-insert.md @@ -1,7 +1,7 @@ --- sidebar_label: 数据写入 title: 数据写入 -description: 写入数据的详细语法 +description: 写入数据的详细语法 --- ## 写入语法 @@ -25,9 +25,9 @@ INSERT INTO tb_name [(field1_name, ...)] subquery ### 超级表语法 ```sql INSERT INTO - stb1_name [(field1_name, ...)] + stb1_name [(field1_name, ...)] VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path - [stb2_name [(field1_name, ...)] + [stb2_name [(field1_name, ...)] VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path ...]; ``` @@ -47,7 +47,7 @@ INSERT INTO 2. VALUES 语法表示了要插入的一行或多行数据。 -3. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。 +3. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。如仅需创建子表,请参考'表'章节。 4. `INSERT ... VALUES` 语句和 `INSERT ... FILE` 语句均可以在一条 INSERT 语句中同时向多个表插入数据。 @@ -154,12 +154,20 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv' d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; ``` -## 超级表语法 +## 向超级表插入数据并自动创建子表 -自动建表, 表名通过tbname列指定 +自动建表, 表名通过 tbname 列指定 ```sql -INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase) - values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32) +INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase) + VALUES ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33) - ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33) + ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33) +``` +## 通过 CSV 文件向超级表插入数据并自动创建子表 + +根据 csv 文件内容,为 超级表创建子表,并填充相应 column 与 tag + +```sql +INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase) + FILE '/tmp/csvfile_21002.csv' ``` From af0a41979e59c3846833b66e1ff9350fe4d04479 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 28 Oct 2024 18:11:39 +0800 Subject: [PATCH 103/142] fix:conflicts from 3.0 --- source/common/src/tglobal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 56af9a1c47..7ea6b01bf3 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -2000,7 +2000,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { {"tdbDebugFlag", &tdbDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag}, {"smaDebugFlag", &smaDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag}, {"metaDebugFlag", &metaDebugFlag}, {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, - {"tqClientDebugFlag", &}tqClientDebugFlag, + {"tqClientDebugFlag", &tqClientDebugFlag}, }; static OptionNameAndVar options[] = {{"audit", &tsEnableAudit}, From 6c155d949e43457561ce02147b99dca0ab263ae9 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 28 Oct 2024 23:13:31 +0800 Subject: [PATCH 104/142] fix:[TS-5441] test case error --- tests/system-test/2-query/db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/2-query/db.py b/tests/system-test/2-query/db.py index cd7c5bd26e..d4e5f89aa3 100644 --- a/tests/system-test/2-query/db.py +++ b/tests/system-test/2-query/db.py @@ -57,7 +57,7 @@ class TDTestCase: tdSql.checkData(0, 2, 0) tdSql.query("show dnode 1 variables like '%debugFlag'") - tdSql.checkRows(24) + tdSql.checkRows(25) tdSql.query("show dnode 1 variables like '____debugFlag'") tdSql.checkRows(2) From 01819f202ede0331e2749467c8dd8bb0696a3486 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 29 Oct 2024 10:34:45 +0800 Subject: [PATCH 105/142] opt log --- source/util/src/tlog.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 6174a9cb3e..d2c8d090e5 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -675,8 +675,8 @@ static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *b use taosPrintLogImpl_useStackBuffer to avoid stack overflow */ -int8_t taosPrintLogImpl_useStackBuffer(const char *flags, int32_t level, int32_t dflag, const char *format, - va_list args) { +static int8_t taosPrintLogImplUseStackBuffer(const char *flags, int32_t level, int32_t dflag, const char *format, + va_list args) { char buffer[LOG_MAX_STACK_LINE_BUFFER_SIZE]; int32_t len = taosBuildLogHead(buffer, flags); @@ -696,8 +696,8 @@ int8_t taosPrintLogImpl_useStackBuffer(const char *flags, int32_t level, int32_t } return 0; } -int8_t taosPrintLogImpl_useHeapBuffer(const char *flags, int32_t level, int32_t dflag, const char *format, - va_list args) { +static int8_t taosPrintLogImplUseHeapBuffer(const char *flags, int32_t level, int32_t dflag, const char *format, + va_list args) { char *buffer = taosMemoryCalloc(1, LOG_MAX_LINE_BUFFER_SIZE + 1); if (buffer == NULL) { return 1; @@ -726,9 +726,9 @@ void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *f va_start(argpointer, format); va_copy(argpointer_copy, argpointer); - if (taosPrintLogImpl_useStackBuffer(flags, level, dflag, format, argpointer) == 0) { + if (taosPrintLogImplUseStackBuffer(flags, level, dflag, format, argpointer) == 0) { } else { - TAOS_UNUSED(taosPrintLogImpl_useHeapBuffer(flags, level, dflag, format, argpointer_copy)); + TAOS_UNUSED(taosPrintLogImplUseHeapBuffer(flags, level, dflag, format, argpointer_copy)); } va_end(argpointer_copy); va_end(argpointer); From fc2e62a18cced179af96d5a45afd5267327682d9 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Tue, 29 Oct 2024 14:14:26 +0800 Subject: [PATCH 106/142] use len of context buffer --- source/libs/executor/src/sysscanoperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 057913e04e..8aad415f70 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -2032,7 +2032,7 @@ static EDealRes getDBNameFromConditionWalker(SNode* pNode, void* pContext) { SValueNode* node = (SValueNode*)pNode; char* dbName = nodesGetValueFromNode(node); - tstrncpy((char*)pContext, varDataVal(dbName), varDataLen(dbName) + 1); + tstrncpy((char*)pContext, varDataVal(dbName), TSDB_DB_NAME_LEN); return DEAL_RES_END; // stop walk } default: From 5261c87620ff5f48d2ee2605211f4f07c47df4fd Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 29 Oct 2024 14:32:40 +0800 Subject: [PATCH 107/142] tetst:add testecase for clear consumer with session and poll timout --- tests/army/tmq/drop_lost_comsumers.py | 162 ++++++++++++++------------ tests/army/tmq/per_consumer.py | 33 +++--- 2 files changed, 106 insertions(+), 89 deletions(-) diff --git a/tests/army/tmq/drop_lost_comsumers.py b/tests/army/tmq/drop_lost_comsumers.py index b88aae8c03..d2ec796516 100644 --- a/tests/army/tmq/drop_lost_comsumers.py +++ b/tests/army/tmq/drop_lost_comsumers.py @@ -20,9 +20,17 @@ from taos.tmq import Consumer from frame.common import * class TaosConsumer: + #TODo: add to tq.py and remove from here def __init__(self): - pass - + self.sub_once = True + self.once_consumer_rows = 0 + self.sub_log = False + self.safe_counter = ThreadSafeCounter() + + def log_info(self, message): + if self.sub_log: + tdLog.info(message) + def sub_consumer(self ,consumer ,group_id ,topic_name ): group_id = int(group_id) if group_id < 100 : @@ -33,9 +41,9 @@ class TaosConsumer: nrows = 0 while True: start = datetime.now() - print(f"time:{start},consumer:{group_id}, start to consume") + tdLog.info(f"time:{start},consumer:{group_id}, start to consume") message = consumer.poll(timeout=10.0) - + if message: id = message.offset() topic = message.topic() @@ -48,14 +56,11 @@ class TaosConsumer: values = block.fetchall end = datetime.now() elapsed_time = end -start - print(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") + tdLog.info(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") consumer.commit() - print(f"consumer:{group_id},consumer_nrows:{nrows}") + tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") # consumer.unsubscribe() # consumer.close() - # break - # if nrows >= 1000000: - # break def set_conf(self,td_connect_ip="localhost",group_id=1,client_id="test_consumer_py",enable_auto_commit="false",auto_commit_interval_ms="1000",auto_offset_reset="earliest",msg_with_table_name="true",session_timeout_ms=10000,max_poll_interval_ms=180000,experimental_snapshot_enable="false"): conf = { @@ -74,7 +79,7 @@ class TaosConsumer: } return conf - def sub_consumer_once(self,consumer, group_id, topic_name, counter, stop_event): + def sub_consumer_once(self, consumer, group_id, topic_name, stop_event): group_id = int(group_id) if group_id < 100 : consumer.subscribe([topic_name]) @@ -83,11 +88,12 @@ class TaosConsumer: while not stop_event.is_set(): start = datetime.now() - tdLog.info(f"time:{start},consumer:{group_id}, start to consume") - #start = datetime.now() - #print(f"time:{start},consumer:{group_id}, start to consume") - tdLog.info(f"consumer_nrows:{consumer_nrows}") - message = consumer.poll(timeout=10.0) + self.log_info(f"time:{start},consumer:{group_id}, start to consume,consumer_nrows:{consumer_nrows}") + if consumer_nrows < self.once_consumer_rows: + message = consumer.poll(timeout=1.0) + elif consumer_nrows >= self.once_consumer_rows: + pass + # tdLog.info("stop consumer when consumer all rows") if message: id = message.offset() @@ -96,43 +102,35 @@ class TaosConsumer: for block in message: addrows = block.nrows() nrows += block.nrows() - counter.rows(block.nrows()) + self.safe_counter.rows(block.nrows()) ncols = block.ncols() values = block.fetchall end = datetime.now() elapsed_time = end -start - # tdLog.info(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") - consumer.commit() - # tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") - consumer_nrows = nrows - # consumer.unsubscribe() - # consumer.close() - # break - print("Consumer subscription thread is stopping.") - def taosc_consumer(self, conf, topic_name, counter,stop_event): + self.log_info(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") + self.log_info(f"consumer:{group_id},consumer_nrows:{nrows},counter.counter:{self.safe_counter.counter}") + # consumer.commit() + consumer_nrows = nrows + # consumer.unsubscribe() + # consumer.close() + # break + tdLog.info("Consumer subscription thread is stopping.") + + def taosc_consumer(self, conf: list, topic_name: str, stop_event): try: - print(conf) + tdLog.info(conf) from taos.tmq import Consumer - print("3333") + tdLog.info("start to config consumer") consumer = Consumer(conf) - print("456") + tdLog.info("start to subscribe") group_id = int(conf["group.id"]) tdLog.info(f"{consumer},{group_id}") - except Exception as e: - tdLog.exit(f"{e}") - #counsmer sub: - # while True: - # try: - # self.sub_consumer_once(consumer,group_id) - # except Exception as e: - # print(str(e)) - # time.sleep(1) - # break - # only consumer once - try: - self.sub_consumer_once(consumer, group_id, topic_name, counter, stop_event) - + if self.sub_once: + self.sub_consumer_once(consumer, group_id, topic_name, stop_event) + else: + self.sub_consumer(consumer, group_id, topic_name) + # only consumer once except Exception as e: tdLog.exit(f"{e}") @@ -162,6 +160,17 @@ class TDTestCase: tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) self.consumer_instance = TaosConsumer() + #db parameter + self.table_number = 1000 + self.rows_per_table = 1000 + #consumer parameter + self.consumer_groups_num = 2 + self.session_timeout_ms= 180000 + self.max_poll_interval_ms= 180000 + #case consumer parameter + self.consumer_rows_per_thread = self.table_number * self.rows_per_table + self.consumer_all_rows = self.consumer_rows_per_thread * self.consumer_groups_num + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def caseDescription(self): ''' @@ -172,20 +181,22 @@ class TDTestCase: ''' return - def check_consumer(self,count,rows): + def check_consumer(self, count, rows, stop_event=None): time.sleep(count) - print(count) try: + tdLog.info(f"wait timeout count:{count} and check consumer status whether is closed") for ct in range(5): tdSql.query(f'show consumers') anser_rows=tdSql.getRows() - if tdSql.checkRows(rows): + if anser_rows == rows: break else: time.sleep(1) - tdLog.info(f"wait for {count} seconds to check that consumers number is {rows}") + tdLog.info(f"wait for {count} seconds to check that consumers number is {anser_rows}") if anser_rows != rows: - tdLog.exit(f"consumer number is not {rows}") + if stop_event: + stop_event.set() + tdLog.exit(f"consumer number is {anser_rows } but not expected {rows}") except Exception as e: tdLog.exit(f"{e},check consumer error") @@ -206,58 +217,63 @@ class TDTestCase: os.system("rm -rf consumer.log") - def drop_max_poll_timeout_consmuers(self, consumer_groups_num, consumer_rows, topic_name, timeout): + def drop_max_poll_timeout_consmuers(self, topic_name, timeout): tdSql.execute(f'drop topic if exists {topic_name};') tdSql.execute(f'use db_sub') tdSql.execute(f'create topic {topic_name} as select * from db_sub.meters;') threads = [] - counter = ThreadSafeCounter() + self.safe_counter = ThreadSafeCounter() + self.consumer_instance.safe_counter = self.safe_counter stop_event = threading.Event() - for id in range(consumer_groups_num): + self.consumer_instance.once_consumer_rows = self.consumer_rows_per_thread + tdLog.info(f"consumer_rows:{self.consumer_instance.once_consumer_rows}") + self.consumer_instance.sub_once = True + for id in range(self.consumer_groups_num): conf = self.consumer_instance.set_conf(group_id=id, session_timeout_ms=self.session_timeout_ms, max_poll_interval_ms=self.max_poll_interval_ms) - threads.append(threading.Thread(target=self.consumer_instance.taosc_consumer, args=(conf, topic_name, counter, stop_event))) + threads.append(threading.Thread(target=self.consumer_instance.taosc_consumer, args=(conf, topic_name, stop_event))) for tr in threads: tr.start() - consumer_all_rows = consumer_rows * consumer_groups_num while True: - if counter.get() < consumer_all_rows: + if self.safe_counter.get() < self.consumer_all_rows: time.sleep(5) - print(f"consumer_all_rows:{consumer_all_rows},counter.get():{counter.get()}") - elif counter.get() >= consumer_all_rows: - self.check_consumer(timeout+20, 0) + tdLog.info(f"consumer_all_rows:{self.consumer_all_rows},counter.get():{self.safe_counter.get()}") + elif self.safe_counter.get() >= self.consumer_all_rows: + # adding 5s is for heartbeat check + self.check_consumer(timeout+5, 0, stop_event) stop_event.set() tr.join() break - time.sleep(2) + time.sleep(1) tdSql.execute(f'drop topic if exists {topic_name};') - def case_session_12s(self): + def case_session_timeout(self): + tdLog.info("start to test session_timeout_ms=12s") #test session_timeout_ms=12s - session_timeout_ms=12000 - max_poll_interval_ms=180000 + self.session_timeout_ms=12000 + self.max_poll_interval_ms=180000 topic_name = "select_d1" - self.drop_session_timeout_consmuers(consumer_groups_num=1, session_timeout_ms=session_timeout_ms, max_poll_interval_ms=max_poll_interval_ms, topic_name=topic_name , timeout=int(session_timeout_ms/1000)) - + self.drop_session_timeout_consmuers(consumer_groups_num=1, session_timeout_ms=self.session_timeout_ms, max_poll_interval_ms=self.max_poll_interval_ms, topic_name=topic_name , timeout=int(self.session_timeout_ms/1000)) + tdLog.info("stop to test session_timeout_ms=12s and done ") - def case_max_poll_12s(self,consumer_rows): - #test max_poll_interval_ms=12s + def case_max_poll_timeout(self): + tdLog.info("start to test max_poll_interval_ms=20s") + #test max_poll_interval_ms=20s self.session_timeout_ms=180000 - self.max_poll_interval_ms=12000 + self.max_poll_interval_ms=20000 topic_name = "select_d1" - self.drop_max_poll_timeout_consmuers(consumer_groups_num=1, topic_name=topic_name, consumer_rows=consumer_rows, timeout=int(self.max_poll_interval_ms/1000)) + self.drop_max_poll_timeout_consmuers(topic_name=topic_name, timeout=int(self.max_poll_interval_ms/1000)) + tdLog.info("stop to test max_poll_interval_ms=20s and done ") def run(self): - table_number = 1000 - rows_per_table = 1000 vgroups = 4 - etool.benchMark(command=f"-d db_sub -t {table_number} -n {rows_per_table} -v {vgroups} -y") - consumer_rows = table_number * rows_per_table # 消费的目标行数 - # self.case_session_12s() - self.case_max_poll_12s(consumer_rows) - remaining_threads = threading.Lock() + etool.benchMark(command=f"-d db_sub -t {self.table_number} -n {self.rows_per_table} -v {vgroups} -a {self.replicaVar} -y") + # test case start here + + self.case_session_timeout() + self.case_max_poll_timeout() def stop(self): tdSql.close() diff --git a/tests/army/tmq/per_consumer.py b/tests/army/tmq/per_consumer.py index 67c82d5d3e..810d7d44f8 100644 --- a/tests/army/tmq/per_consumer.py +++ b/tests/army/tmq/per_consumer.py @@ -2,6 +2,7 @@ import os import taos import time from datetime import datetime +from frame.log import * import subprocess from multiprocessing import Process import threading @@ -11,7 +12,7 @@ import click try: conn = taos.connect() except Exception as e: - print(str(e)) + tdLog.info(str(e)) @click.command() @click.option('-c', '--consumer-groups-num', "consumer_group_num", default=1, help='Number of consumer group.') @@ -20,10 +21,10 @@ except Exception as e: def test_timeout_sub(consumer_group_num,session_timeout_ms,max_poll_interval_ms): threads = [] - print(consumer_group_num,session_timeout_ms,max_poll_interval_ms) + tdLog.info(consumer_group_num,session_timeout_ms,max_poll_interval_ms) for id in range(consumer_group_num): conf = set_conf(group_id=id,session_timeout_ms=session_timeout_ms,max_poll_interval_ms=max_poll_interval_ms) - print(conf) + tdLog.info(conf) threads.append(threading.Thread(target=taosc_consumer, args=(conf,))) for tr in threads: tr.start() @@ -36,13 +37,13 @@ def sub_consumer(consumer,group_id): try: consumer.subscribe(["select_d1"]) except Exception as e: - print(f"subscribe error") + tdLog.info(f"subscribe error") exit(1) nrows = 0 while True: start = datetime.now() - print(f"time:{start},consumer:{group_id}, start to consume") + tdLog.info(f"time:{start},consumer:{group_id}, start to consume") message = consumer.poll(timeout=10.0) if message: @@ -57,9 +58,9 @@ def sub_consumer(consumer,group_id): values = block.fetchall end = datetime.now() elapsed_time = end -start - print(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") + tdLog.info(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") consumer.commit() - print(f"consumer:{group_id},consumer_nrows:{nrows}") + tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") # consumer.unsubscribe() # consumer.close() # break @@ -73,14 +74,14 @@ def sub_consumer_once(consumer,group_id): consumer_nrows = 0 while True: start = datetime.now() - print(f"time:{start},consumer:{group_id}, start to consume") + tdLog.info(f"time:{start},consumer:{group_id}, start to consume") #start = datetime.now() - #print(f"time:{start},consumer:{group_id}, start to consume") - print(f"consumer_nrows:{consumer_nrows}") + #tdLog.info(f"time:{start},consumer:{group_id}, start to consume") + tdLog.info(f"consumer_nrows:{consumer_nrows}") if consumer_nrows < 1000000: message = consumer.poll(timeout=10.0) else: - print(" stop consumer when consumer all rows") + tdLog.info(" stop consumer when consumer all rows") if message: id = message.offset() @@ -94,9 +95,9 @@ def sub_consumer_once(consumer,group_id): values = block.fetchall end = datetime.now() elapsed_time = end -start - # print(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") + # tdLog.info(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") consumer.commit() - # print(f"consumer:{group_id},consumer_nrows:{nrows}") + # tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") consumer_nrows = nrows # consumer.unsubscribe() # consumer.close() @@ -122,20 +123,20 @@ def set_conf(td_connect_ip="localhost",group_id=1,client_id="test_consumer_py",e def taosc_consumer(conf): consumer = Consumer(conf) group_id = int(conf["group.id"]) - print(f"{consumer},{group_id}") + tdLog.info(f"{consumer},{group_id}") #counsmer sub: # while True: # try: # self.sub_consumer_once(consumer,group_id) # except Exception as e: - # print(str(e)) + # tdLog.info(str(e)) # time.sleep(1) # break # only consumer once try: sub_consumer_once(consumer,group_id) except Exception as e: - print(str(e)) + tdLog.info(str(e)) #consumer.close() From d99c9159082d2047cf10d008420c50749735aff5 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 29 Oct 2024 14:38:33 +0800 Subject: [PATCH 108/142] test:use the black tool to format the code style --- tests/army/tmq/drop_lost_comsumers.py | 211 ++++++++++++++++---------- tests/army/tmq/per_consumer.py | 123 +++++++++------ 2 files changed, 211 insertions(+), 123 deletions(-) diff --git a/tests/army/tmq/drop_lost_comsumers.py b/tests/army/tmq/drop_lost_comsumers.py index d2ec796516..f455931089 100644 --- a/tests/army/tmq/drop_lost_comsumers.py +++ b/tests/army/tmq/drop_lost_comsumers.py @@ -1,4 +1,3 @@ - import taos import sys import time @@ -19,8 +18,9 @@ from datetime import datetime from taos.tmq import Consumer from frame.common import * + class TaosConsumer: - #TODo: add to tq.py and remove from here + # TODo: add to tq.py and remove from here def __init__(self): self.sub_once = True self.once_consumer_rows = 0 @@ -31,43 +31,57 @@ class TaosConsumer: if self.sub_log: tdLog.info(message) - def sub_consumer(self ,consumer ,group_id ,topic_name ): + def sub_consumer(self, consumer, group_id, topic_name): group_id = int(group_id) - if group_id < 100 : + if group_id < 100: try: consumer.subscribe([topic_name]) except TmqError: - tdLog.exit(f"subscribe error") + tdLog.exit(f"subscribe error") nrows = 0 while True: start = datetime.now() tdLog.info(f"time:{start},consumer:{group_id}, start to consume") message = consumer.poll(timeout=10.0) - + if message: id = message.offset() topic = message.topic() database = message.database() - + for block in message: addrows = block.nrows() nrows += block.nrows() ncols = block.ncols() values = block.fetchall end = datetime.now() - elapsed_time = end -start - tdLog.info(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") + elapsed_time = end - start + tdLog.info( + f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}" + ) consumer.commit() tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") - # consumer.unsubscribe() - # consumer.close() + # consumer.unsubscribe() + # consumer.close() - def set_conf(self,td_connect_ip="localhost",group_id=1,client_id="test_consumer_py",enable_auto_commit="false",auto_commit_interval_ms="1000",auto_offset_reset="earliest",msg_with_table_name="true",session_timeout_ms=10000,max_poll_interval_ms=180000,experimental_snapshot_enable="false"): + def set_conf( + self, + td_connect_ip="localhost", + group_id=1, + client_id="test_consumer_py", + enable_auto_commit="false", + auto_commit_interval_ms="1000", + auto_offset_reset="earliest", + msg_with_table_name="true", + session_timeout_ms=10000, + max_poll_interval_ms=180000, + experimental_snapshot_enable="false", + ): conf = { # auth options # consume options "td.connect.ip": f"{td_connect_ip}", - "group.id": f"{group_id}", + "group.id": f"{group_id}", "client.id": f"{client_id}", "enable.auto.commit": f"{enable_auto_commit}", "auto.commit.interval.ms": f"{auto_commit_interval_ms}", @@ -75,25 +89,28 @@ class TaosConsumer: "msg.with.table.name": f"{msg_with_table_name}", "session.timeout.ms": f"{session_timeout_ms}", "max.poll.interval.ms": f"{max_poll_interval_ms}", - "experimental.snapshot.enable" :f"{experimental_snapshot_enable}", + "experimental.snapshot.enable": f"{experimental_snapshot_enable}", } return conf - + def sub_consumer_once(self, consumer, group_id, topic_name, stop_event): group_id = int(group_id) - if group_id < 100 : + if group_id < 100: consumer.subscribe([topic_name]) nrows = 0 consumer_nrows = 0 - + while not stop_event.is_set(): start = datetime.now() - self.log_info(f"time:{start},consumer:{group_id}, start to consume,consumer_nrows:{consumer_nrows}") + self.log_info( + f"time:{start},consumer:{group_id}, start to consume,consumer_nrows:{consumer_nrows}" + ) if consumer_nrows < self.once_consumer_rows: - message = consumer.poll(timeout=1.0) + message = consumer.poll(timeout=1.0) elif consumer_nrows >= self.once_consumer_rows: + # when break the loop, the consumer will be closed, so we need to continue to keep consumer alive util the stop_event is set pass - # tdLog.info("stop consumer when consumer all rows") + # tdLog.info("stop consumer when consumer all rows") if message: id = message.offset() @@ -106,37 +123,42 @@ class TaosConsumer: ncols = block.ncols() values = block.fetchall end = datetime.now() - elapsed_time = end -start + elapsed_time = end - start - self.log_info(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") - self.log_info(f"consumer:{group_id},consumer_nrows:{nrows},counter.counter:{self.safe_counter.counter}") + self.log_info( + f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}" + ) + self.log_info( + f"consumer:{group_id},consumer_nrows:{nrows},counter.counter:{self.safe_counter.counter}" + ) # consumer.commit() consumer_nrows = nrows # consumer.unsubscribe() # consumer.close() - # break + # break tdLog.info("Consumer subscription thread is stopping.") def taosc_consumer(self, conf: list, topic_name: str, stop_event): try: tdLog.info(conf) from taos.tmq import Consumer + tdLog.info("start to config consumer") consumer = Consumer(conf) tdLog.info("start to subscribe") group_id = int(conf["group.id"]) tdLog.info(f"{consumer},{group_id}") if self.sub_once: - self.sub_consumer_once(consumer, group_id, topic_name, stop_event) + self.sub_consumer_once(consumer, group_id, topic_name, stop_event) else: self.sub_consumer(consumer, group_id, topic_name) # only consumer once except Exception as e: tdLog.exit(f"{e}") - - #consumer.close() - + # consumer.close() + + class ThreadSafeCounter: def __init__(self): self.counter = 0 @@ -154,45 +176,51 @@ class ThreadSafeCounter: class TDTestCase: # updatecfgDict = {'debugFlag': 135, 'asynclog': 0} - def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) self.consumer_instance = TaosConsumer() - #db parameter + # db parameter self.table_number = 1000 self.rows_per_table = 1000 - #consumer parameter + # consumer parameter self.consumer_groups_num = 2 - self.session_timeout_ms= 180000 - self.max_poll_interval_ms= 180000 - #case consumer parameter + self.session_timeout_ms = 180000 + self.max_poll_interval_ms = 180000 + # case consumer parameter self.consumer_rows_per_thread = self.table_number * self.rows_per_table - self.consumer_all_rows = self.consumer_rows_per_thread * self.consumer_groups_num + self.consumer_all_rows = ( + self.consumer_rows_per_thread * self.consumer_groups_num + ) + + # tdSql.init(conn.cursor(), logSql) # output sql.txt file - #tdSql.init(conn.cursor(), logSql) # output sql.txt file def caseDescription(self): - ''' - drop_lost_consmuers: + """ + drop_lost_consmuers: 1. verifying that the boundary and valid values of session_timeout_ms are in effect 2. verifying that the boundary and valid values of max_poll_interval_ms are in effect 3. verifying that consumer will be closed when the session_timeout_ms and max_poll_interval_ms is expired - ''' + """ return - + def check_consumer(self, count, rows, stop_event=None): time.sleep(count) try: - tdLog.info(f"wait timeout count:{count} and check consumer status whether is closed") + tdLog.info( + f"wait timeout count:{count} and check consumer status whether is closed" + ) for ct in range(5): - tdSql.query(f'show consumers') - anser_rows=tdSql.getRows() + tdSql.query(f"show consumers") + anser_rows = tdSql.getRows() if anser_rows == rows: break else: time.sleep(1) - tdLog.info(f"wait for {count} seconds to check that consumers number is {anser_rows}") + tdLog.info( + f"wait for {count} seconds to check that consumers number is {anser_rows}" + ) if anser_rows != rows: if stop_event: stop_event.set() @@ -200,27 +228,37 @@ class TDTestCase: except Exception as e: tdLog.exit(f"{e},check consumer error") - def drop_session_timeout_consmuers(self, consumer_groups_num, session_timeout_ms, max_poll_interval_ms, topic_name, timeout): - tdSql.execute(f'drop topic if exists {topic_name};') - tdSql.execute(f'use db_sub') - tdSql.execute(f'create topic {topic_name} as select * from db_sub.meters;') + def drop_session_timeout_consmuers( + self, + consumer_groups_num, + session_timeout_ms, + max_poll_interval_ms, + topic_name, + timeout, + ): + tdSql.execute(f"drop topic if exists {topic_name};") + tdSql.execute(f"use db_sub") + tdSql.execute(f"create topic {topic_name} as select * from db_sub.meters;") # start consumer and config some parameters - os.system(f"nohup python3 ./tmq/per_consumer.py -c {consumer_groups_num} -s {session_timeout_ms} -p {max_poll_interval_ms} > consumer.log &") + os.system( + f"nohup python3 ./tmq/per_consumer.py -c {consumer_groups_num} -s {session_timeout_ms} -p {max_poll_interval_ms} > consumer.log &" + ) # wait 4s for consuming data time.sleep(4) # kill consumer to simulate session_timeout_ms tdLog.info("kill per_consumer.py") - tdCom.kill_signal_process(signal=9,processor_name="python3\s*./tmq/per_consumer.py") - self.check_consumer(timeout,0) - tdSql.execute(f'drop topic if exists {topic_name};') + tdCom.kill_signal_process( + signal=9, processor_name="python3\s*./tmq/per_consumer.py" + ) + self.check_consumer(timeout, 0) + tdSql.execute(f"drop topic if exists {topic_name};") os.system("rm -rf consumer.log") - - + def drop_max_poll_timeout_consmuers(self, topic_name, timeout): - tdSql.execute(f'drop topic if exists {topic_name};') - tdSql.execute(f'use db_sub') - tdSql.execute(f'create topic {topic_name} as select * from db_sub.meters;') + tdSql.execute(f"drop topic if exists {topic_name};") + tdSql.execute(f"use db_sub") + tdSql.execute(f"create topic {topic_name} as select * from db_sub.meters;") threads = [] self.safe_counter = ThreadSafeCounter() @@ -230,48 +268,68 @@ class TDTestCase: tdLog.info(f"consumer_rows:{self.consumer_instance.once_consumer_rows}") self.consumer_instance.sub_once = True for id in range(self.consumer_groups_num): - conf = self.consumer_instance.set_conf(group_id=id, session_timeout_ms=self.session_timeout_ms, max_poll_interval_ms=self.max_poll_interval_ms) - threads.append(threading.Thread(target=self.consumer_instance.taosc_consumer, args=(conf, topic_name, stop_event))) + conf = self.consumer_instance.set_conf( + group_id=id, + session_timeout_ms=self.session_timeout_ms, + max_poll_interval_ms=self.max_poll_interval_ms, + ) + threads.append( + threading.Thread( + target=self.consumer_instance.taosc_consumer, + args=(conf, topic_name, stop_event), + ) + ) for tr in threads: tr.start() - + while True: if self.safe_counter.get() < self.consumer_all_rows: time.sleep(5) - tdLog.info(f"consumer_all_rows:{self.consumer_all_rows},counter.get():{self.safe_counter.get()}") + tdLog.info( + f"consumer_all_rows:{self.consumer_all_rows},counter.get():{self.safe_counter.get()}" + ) elif self.safe_counter.get() >= self.consumer_all_rows: - # adding 5s is for heartbeat check - self.check_consumer(timeout+5, 0, stop_event) + # adding 5s is for heartbeat check + self.check_consumer(timeout + 5, 0, stop_event) stop_event.set() tr.join() break time.sleep(1) - tdSql.execute(f'drop topic if exists {topic_name};') - + tdSql.execute(f"drop topic if exists {topic_name};") + def case_session_timeout(self): tdLog.info("start to test session_timeout_ms=12s") - #test session_timeout_ms=12s - self.session_timeout_ms=12000 - self.max_poll_interval_ms=180000 + # test session_timeout_ms=12s + self.session_timeout_ms = 12000 + self.max_poll_interval_ms = 180000 topic_name = "select_d1" - self.drop_session_timeout_consmuers(consumer_groups_num=1, session_timeout_ms=self.session_timeout_ms, max_poll_interval_ms=self.max_poll_interval_ms, topic_name=topic_name , timeout=int(self.session_timeout_ms/1000)) + self.drop_session_timeout_consmuers( + consumer_groups_num=1, + session_timeout_ms=self.session_timeout_ms, + max_poll_interval_ms=self.max_poll_interval_ms, + topic_name=topic_name, + timeout=int(self.session_timeout_ms / 1000), + ) tdLog.info("stop to test session_timeout_ms=12s and done ") def case_max_poll_timeout(self): tdLog.info("start to test max_poll_interval_ms=20s") - #test max_poll_interval_ms=20s - self.session_timeout_ms=180000 - self.max_poll_interval_ms=20000 + # test max_poll_interval_ms=20s + self.session_timeout_ms = 180000 + self.max_poll_interval_ms = 20000 topic_name = "select_d1" - self.drop_max_poll_timeout_consmuers(topic_name=topic_name, timeout=int(self.max_poll_interval_ms/1000)) + self.drop_max_poll_timeout_consmuers( + topic_name=topic_name, timeout=int(self.max_poll_interval_ms / 1000) + ) tdLog.info("stop to test max_poll_interval_ms=20s and done ") - def run(self): vgroups = 4 - etool.benchMark(command=f"-d db_sub -t {self.table_number} -n {self.rows_per_table} -v {vgroups} -a {self.replicaVar} -y") + etool.benchMark( + command=f"-d db_sub -t {self.table_number} -n {self.rows_per_table} -v {vgroups} -a {self.replicaVar} -y" + ) # test case start here - + self.case_session_timeout() self.case_max_poll_timeout() @@ -279,5 +337,6 @@ class TDTestCase: tdSql.close() tdLog.success(f"{__file__} successfully executed") + tdCases.addLinux(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/tmq/per_consumer.py b/tests/army/tmq/per_consumer.py index 810d7d44f8..418c88dead 100644 --- a/tests/army/tmq/per_consumer.py +++ b/tests/army/tmq/per_consumer.py @@ -9,21 +9,46 @@ import threading from taos.tmq import Consumer import click +# TDDO +# 1. using tmq common class to replace the function, file drop_lost_consumers.py has the same function + try: conn = taos.connect() except Exception as e: tdLog.info(str(e)) - -@click.command() -@click.option('-c', '--consumer-groups-num', "consumer_group_num", default=1, help='Number of consumer group.') -@click.option('-s', '--session-timeout-ms', "session_timeout_ms", default=60000, help='session timeout:ms') -@click.option('-p', '--max-poll-interval-ms',"max_poll_interval_ms", default=180000, help='max poll interval timeout:ms') -def test_timeout_sub(consumer_group_num,session_timeout_ms,max_poll_interval_ms): + +@click.command() +@click.option( + "-c", + "--consumer-groups-num", + "consumer_group_num", + default=1, + help="Number of consumer group.", +) +@click.option( + "-s", + "--session-timeout-ms", + "session_timeout_ms", + default=60000, + help="session timeout:ms", +) +@click.option( + "-p", + "--max-poll-interval-ms", + "max_poll_interval_ms", + default=180000, + help="max poll interval timeout:ms", +) +def test_timeout_sub(consumer_group_num, session_timeout_ms, max_poll_interval_ms): threads = [] - tdLog.info(consumer_group_num,session_timeout_ms,max_poll_interval_ms) + tdLog.info(consumer_group_num, session_timeout_ms, max_poll_interval_ms) for id in range(consumer_group_num): - conf = set_conf(group_id=id,session_timeout_ms=session_timeout_ms,max_poll_interval_ms=max_poll_interval_ms) + conf = set_conf( + group_id=id, + session_timeout_ms=session_timeout_ms, + max_poll_interval_ms=max_poll_interval_ms, + ) tdLog.info(conf) threads.append(threading.Thread(target=taosc_consumer, args=(conf,))) for tr in threads: @@ -31,52 +56,54 @@ def test_timeout_sub(consumer_group_num,session_timeout_ms,max_poll_interval_ms) for tr in threads: tr.join() -def sub_consumer(consumer,group_id): + +def sub_consumer(consumer, group_id): group_id = int(group_id) - if group_id < 100 : + if group_id < 100: try: consumer.subscribe(["select_d1"]) except Exception as e: - tdLog.info(f"subscribe error") - exit(1) + tdLog.info(f"subscribe error") + exit(1) nrows = 0 while True: start = datetime.now() tdLog.info(f"time:{start},consumer:{group_id}, start to consume") message = consumer.poll(timeout=10.0) - + if message: id = message.offset() topic = message.topic() database = message.database() - + for block in message: addrows = block.nrows() nrows += block.nrows() ncols = block.ncols() values = block.fetchall end = datetime.now() - elapsed_time = end -start - tdLog.info(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") + elapsed_time = end - start + tdLog.info( + f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}" + ) consumer.commit() tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") - # consumer.unsubscribe() - # consumer.close() - # break - # if nrows >= 1000000: - # break -def sub_consumer_once(consumer,group_id): + # consumer.unsubscribe() + # consumer.close() + + +def sub_consumer_once(consumer, group_id): group_id = int(group_id) - if group_id < 100 : + if group_id < 100: consumer.subscribe(["select_d1"]) nrows = 0 consumer_nrows = 0 while True: start = datetime.now() tdLog.info(f"time:{start},consumer:{group_id}, start to consume") - #start = datetime.now() - #tdLog.info(f"time:{start},consumer:{group_id}, start to consume") + # start = datetime.now() + # tdLog.info(f"time:{start},consumer:{group_id}, start to consume") tdLog.info(f"consumer_nrows:{consumer_nrows}") if consumer_nrows < 1000000: message = consumer.poll(timeout=10.0) @@ -87,28 +114,40 @@ def sub_consumer_once(consumer,group_id): id = message.offset() topic = message.topic() database = message.database() - + for block in message: addrows = block.nrows() nrows += block.nrows() ncols = block.ncols() values = block.fetchall end = datetime.now() - elapsed_time = end -start + elapsed_time = end - start # tdLog.info(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") consumer.commit() # tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") consumer_nrows = nrows - # consumer.unsubscribe() - # consumer.close() - # break + # consumer.unsubscribe() + # consumer.close() + # break -def set_conf(td_connect_ip="localhost",group_id=1,client_id="test_consumer_py",enable_auto_commit="false",auto_commit_interval_ms="1000",auto_offset_reset="earliest",msg_with_table_name="true",session_timeout_ms=10000,max_poll_interval_ms=20000,experimental_snapshot_enable="false"): + +def set_conf( + td_connect_ip="localhost", + group_id=1, + client_id="test_consumer_py", + enable_auto_commit="false", + auto_commit_interval_ms="1000", + auto_offset_reset="earliest", + msg_with_table_name="true", + session_timeout_ms=10000, + max_poll_interval_ms=20000, + experimental_snapshot_enable="false", +): conf = { # auth options # consume options "td.connect.ip": f"{td_connect_ip}", - "group.id": f"{group_id}", + "group.id": f"{group_id}", "client.id": f"{client_id}", "enable.auto.commit": f"{enable_auto_commit}", "auto.commit.interval.ms": f"{auto_commit_interval_ms}", @@ -116,30 +155,20 @@ def set_conf(td_connect_ip="localhost",group_id=1,client_id="test_consumer_py",e "msg.with.table.name": f"{msg_with_table_name}", "session.timeout.ms": f"{session_timeout_ms}", "max.poll.interval.ms": f"{max_poll_interval_ms}", - "experimental.snapshot.enable" :f"{experimental_snapshot_enable}", + "experimental.snapshot.enable": f"{experimental_snapshot_enable}", } return conf + def taosc_consumer(conf): consumer = Consumer(conf) group_id = int(conf["group.id"]) tdLog.info(f"{consumer},{group_id}") - #counsmer sub: - # while True: - # try: - # self.sub_consumer_once(consumer,group_id) - # except Exception as e: - # tdLog.info(str(e)) - # time.sleep(1) - # break - # only consumer once try: - sub_consumer_once(consumer,group_id) + sub_consumer_once(consumer, group_id) except Exception as e: tdLog.info(str(e)) - - #consumer.close() -if __name__ == '__main__': - test_timeout_sub() \ No newline at end of file +if __name__ == "__main__": + test_timeout_sub() From 923f26b1eb817bd1104f3f664e6a261b0843c16c Mon Sep 17 00:00:00 2001 From: Yubesitie <151515717+Yubesitie@users.noreply.github.com> Date: Tue, 29 Oct 2024 15:13:12 +0800 Subject: [PATCH 109/142] Update 07-explorer.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 拼写错误 --- docs/zh/14-reference/01-components/07-explorer.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/14-reference/01-components/07-explorer.md b/docs/zh/14-reference/01-components/07-explorer.md index 499fb3697c..eab4aef15b 100644 --- a/docs/zh/14-reference/01-components/07-explorer.md +++ b/docs/zh/14-reference/01-components/07-explorer.md @@ -8,7 +8,7 @@ taosExplorer 是一个为用户提供 TDengine 实例的可视化管理交互工 ## 安装 -taosEexplorer 无需单独安装,从 TDengine 3.3.0.0 版本开始,它随着 TDengine 安装包一起发布,安装完成后,就可以看到 `taos-explorer` 服务。如果按照 GitHub 里步骤自己编译 TDengine 源代码生成的安装包不包含 taosExplorer。 +taosExplorer 无需单独安装,从 TDengine 3.3.0.0 版本开始,它随着 TDengine 安装包一起发布,安装完成后,就可以看到 `taos-explorer` 服务。如果按照 GitHub 里步骤自己编译 TDengine 源代码生成的安装包不包含 taosExplorer。 ## 配置 From 3ebdaf1d82f8ed8c4e3f74903fd4fcb8a5ca0aff Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 29 Oct 2024 16:21:44 +0800 Subject: [PATCH 110/142] test: use pylint to formate code --- tests/army/tmq/drop_lost_comsumers.py | 166 ++++++++++++-------------- 1 file changed, 78 insertions(+), 88 deletions(-) diff --git a/tests/army/tmq/drop_lost_comsumers.py b/tests/army/tmq/drop_lost_comsumers.py index f455931089..cef67ddfcc 100644 --- a/tests/army/tmq/drop_lost_comsumers.py +++ b/tests/army/tmq/drop_lost_comsumers.py @@ -1,26 +1,20 @@ -import taos -import sys import time -import socket import os import threading -import multiprocessing -from multiprocessing import Process, Queue - -from frame.log import * -from frame.cases import * -from frame.sql import * -from frame.caseBase import * -from frame import * -from taos.tmq import * -from frame import etool -from datetime import datetime +import datetime from taos.tmq import Consumer -from frame.common import * +from taos.error import TmqError + +from frame.log import tdLog +from frame.cases import tdCases +from frame.sql import tdSql +from frame.caseBase import * +from frame import etool +from frame.common import tdCom class TaosConsumer: - # TODo: add to tq.py and remove from here + # TODO: Move this class to tq.py and remove it from here def __init__(self): self.sub_once = True self.once_consumer_rows = 0 @@ -40,24 +34,26 @@ class TaosConsumer: tdLog.exit(f"subscribe error") nrows = 0 while True: - start = datetime.now() + start = datetime.datetime.now() tdLog.info(f"time:{start},consumer:{group_id}, start to consume") message = consumer.poll(timeout=10.0) if message: - id = message.offset() - topic = message.topic() - database = message.database() + message_offset = message.offset() + # topic = message.topic() + # database = message.database() for block in message: addrows = block.nrows() nrows += block.nrows() ncols = block.ncols() - values = block.fetchall - end = datetime.now() + # values = block.fetchall + end = datetime.datetime.now() elapsed_time = end - start tdLog.info( - f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}" + f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time}," + f"consumer_nrows:{nrows},consumer_addrows:{addrows}," + f"consumer_ncols:{ncols},offset:{id}" ) consumer.commit() tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") @@ -101,10 +97,11 @@ class TaosConsumer: consumer_nrows = 0 while not stop_event.is_set(): - start = datetime.now() + start = datetime.datetime.now() self.log_info( f"time:{start},consumer:{group_id}, start to consume,consumer_nrows:{consumer_nrows}" ) + message = None if consumer_nrows < self.once_consumer_rows: message = consumer.poll(timeout=1.0) elif consumer_nrows >= self.once_consumer_rows: @@ -113,36 +110,32 @@ class TaosConsumer: # tdLog.info("stop consumer when consumer all rows") if message: - id = message.offset() - topic = message.topic() - database = message.database() + message_offset = message.offset() + # topic = message.topic() + # database = message.database() for block in message: addrows = block.nrows() nrows += block.nrows() self.safe_counter.rows(block.nrows()) ncols = block.ncols() - values = block.fetchall - end = datetime.now() + # values = block.fetchall + end = datetime.datetime.now() elapsed_time = end - start self.log_info( - f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}" + f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{message_offset}" ) self.log_info( f"consumer:{group_id},consumer_nrows:{nrows},counter.counter:{self.safe_counter.counter}" ) # consumer.commit() consumer_nrows = nrows - # consumer.unsubscribe() - # consumer.close() - # break + tdLog.info("Consumer subscription thread is stopping.") - def taosc_consumer(self, conf: list, topic_name: str, stop_event): + def taosc_consumer(self, conf: list, topic_name: str, stop_event: threading.Event): try: tdLog.info(conf) - from taos.tmq import Consumer - tdLog.info("start to config consumer") consumer = Consumer(conf) tdLog.info("start to subscribe") @@ -175,12 +168,7 @@ class ThreadSafeCounter: class TDTestCase: # updatecfgDict = {'debugFlag': 135, 'asynclog': 0} - - def init(self, conn, logSql, replicaVar=1): - self.replicaVar = int(replicaVar) - tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor()) - self.consumer_instance = TaosConsumer() + def __init__(self): # db parameter self.table_number = 1000 self.rows_per_table = 1000 @@ -193,7 +181,12 @@ class TDTestCase: self.consumer_all_rows = ( self.consumer_rows_per_thread * self.consumer_groups_num ) - + self.topic_name = "select_d1" + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), logSql) + self.consumer_instance = TaosConsumer() # tdSql.init(conn.cursor(), logSql) # output sql.txt file def caseDescription(self): @@ -211,16 +204,15 @@ class TDTestCase: tdLog.info( f"wait timeout count:{count} and check consumer status whether is closed" ) - for ct in range(5): - tdSql.query(f"show consumers") + for _ in range(5): + tdSql.query("show consumers") anser_rows = tdSql.getRows() if anser_rows == rows: break - else: - time.sleep(1) - tdLog.info( - f"wait for {count} seconds to check that consumers number is {anser_rows}" - ) + time.sleep(1) + tdLog.info( + f"wait for {count} seconds to check that consumers number is {anser_rows}" + ) if anser_rows != rows: if stop_event: stop_event.set() @@ -228,37 +220,30 @@ class TDTestCase: except Exception as e: tdLog.exit(f"{e},check consumer error") - def drop_session_timeout_consmuers( - self, - consumer_groups_num, - session_timeout_ms, - max_poll_interval_ms, - topic_name, - timeout, - ): - tdSql.execute(f"drop topic if exists {topic_name};") - tdSql.execute(f"use db_sub") - tdSql.execute(f"create topic {topic_name} as select * from db_sub.meters;") + def drop_session_timeout_consmuers(self): + tdSql.execute(f"drop topic if exists {self.topic_name};") + tdSql.execute("use db_sub") + tdSql.execute(f"create topic {self.topic_name} as select * from db_sub.meters;") # start consumer and config some parameters os.system( - f"nohup python3 ./tmq/per_consumer.py -c {consumer_groups_num} -s {session_timeout_ms} -p {max_poll_interval_ms} > consumer.log &" + f"nohup python3 ./tmq/per_consumer.py -c {self.consumer_groups_num} -s {self.session_timeout_ms} -p {self.max_poll_interval_ms} > consumer.log &" ) # wait 4s for consuming data time.sleep(4) # kill consumer to simulate session_timeout_ms tdLog.info("kill per_consumer.py") tdCom.kill_signal_process( - signal=9, processor_name="python3\s*./tmq/per_consumer.py" + signal=9, processor_name=r"python3\s*./tmq/per_consumer.py" ) - self.check_consumer(timeout, 0) - tdSql.execute(f"drop topic if exists {topic_name};") + self.check_consumer(int(self.session_timeout_ms / 1000), 0) + tdSql.execute(f"drop topic if exists {self.topic_name};") os.system("rm -rf consumer.log") - def drop_max_poll_timeout_consmuers(self, topic_name, timeout): - tdSql.execute(f"drop topic if exists {topic_name};") - tdSql.execute(f"use db_sub") - tdSql.execute(f"create topic {topic_name} as select * from db_sub.meters;") + def drop_max_poll_timeout_consmuers(self): + tdSql.execute(f"drop topic if exists {self.topic_name};") + tdSql.execute("use db_sub") + tdSql.execute(f"create topic {self.topic_name} as select * from db_sub.meters;") threads = [] self.safe_counter = ThreadSafeCounter() @@ -267,16 +252,16 @@ class TDTestCase: self.consumer_instance.once_consumer_rows = self.consumer_rows_per_thread tdLog.info(f"consumer_rows:{self.consumer_instance.once_consumer_rows}") self.consumer_instance.sub_once = True - for id in range(self.consumer_groups_num): + for group_id in range(self.consumer_groups_num): conf = self.consumer_instance.set_conf( - group_id=id, + group_id=group_id, session_timeout_ms=self.session_timeout_ms, max_poll_interval_ms=self.max_poll_interval_ms, ) threads.append( threading.Thread( target=self.consumer_instance.taosc_consumer, - args=(conf, topic_name, stop_event), + args=(conf, self.topic_name, stop_event), ) ) for tr in threads: @@ -290,50 +275,55 @@ class TDTestCase: ) elif self.safe_counter.get() >= self.consumer_all_rows: # adding 5s is for heartbeat check - self.check_consumer(timeout + 5, 0, stop_event) + self.check_consumer(int(self.max_poll_interval_ms / 1000 ) + 5, 0, stop_event) stop_event.set() - tr.join() break + for tr in threads: + tr.join() time.sleep(1) - tdSql.execute(f"drop topic if exists {topic_name};") + tdSql.execute(f"drop topic if exists {self.topic_name};") def case_session_timeout(self): + """ + TEST CASE: verifying that the boundary and valid values of session_timeout_ms are in effect + """ + tdLog.info("start to test session_timeout_ms=12s") # test session_timeout_ms=12s self.session_timeout_ms = 12000 self.max_poll_interval_ms = 180000 - topic_name = "select_d1" - self.drop_session_timeout_consmuers( - consumer_groups_num=1, - session_timeout_ms=self.session_timeout_ms, - max_poll_interval_ms=self.max_poll_interval_ms, - topic_name=topic_name, - timeout=int(self.session_timeout_ms / 1000), - ) + # self.set_session_timeout = int(self.session_timeout_ms / 1000) + self.drop_session_timeout_consmuers() tdLog.info("stop to test session_timeout_ms=12s and done ") def case_max_poll_timeout(self): + """ + TEST CASE: verifying that the boundary and valid values of max_poll_interval_ms are in effect + """ tdLog.info("start to test max_poll_interval_ms=20s") # test max_poll_interval_ms=20s self.session_timeout_ms = 180000 self.max_poll_interval_ms = 20000 - topic_name = "select_d1" - self.drop_max_poll_timeout_consmuers( - topic_name=topic_name, timeout=int(self.max_poll_interval_ms / 1000) - ) + self.drop_max_poll_timeout_consmuers() tdLog.info("stop to test max_poll_interval_ms=20s and done ") def run(self): + """ + Run the test cases for session timeout and max poll timeout. + """ vgroups = 4 etool.benchMark( command=f"-d db_sub -t {self.table_number} -n {self.rows_per_table} -v {vgroups} -a {self.replicaVar} -y" ) # test case start here - + self.topic_name = "select_d1" self.case_session_timeout() self.case_max_poll_timeout() def stop(self): + """ + Closes the taos connection and logs the success message. + """ tdSql.close() tdLog.success(f"{__file__} successfully executed") From ff2f2f2b5ff8e0a47990369496a6ca8c8af88c2a Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 29 Oct 2024 04:11:11 +0000 Subject: [PATCH 111/142] fix/TD-32703-add-log --- source/libs/sync/src/syncAppendEntries.c | 15 ++++++++---- source/libs/sync/src/syncAppendEntriesReply.c | 6 +++-- source/libs/sync/src/syncPipeline.c | 24 ++++++++++++++++--- source/libs/sync/src/syncUtil.c | 4 ++-- 4 files changed, 38 insertions(+), 11 deletions(-) diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index e3f94c1c9a..0345880874 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -95,6 +95,8 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) { bool accepted = false; SSyncRaftEntry* pEntry = NULL; bool resetElect = false; + const STraceId* trace = &pRpcMsg->info.traceId; + char tbuf[40] = {0}; // if already drop replica, do not process if (!syncNodeInRaftGroup(ths, &(pMsg->srcId))) { @@ -150,10 +152,10 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) { goto _IGNORE; } - sTrace("vgId:%d, recv append entries msg. index:%" PRId64 ", term:%" PRId64 ", preLogIndex:%" PRId64 - ", prevLogTerm:%" PRId64 " commitIndex:%" PRId64 " entryterm:%" PRId64, - pMsg->vgId, pMsg->prevLogIndex + 1, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, - pEntry->term); + sGTrace("vgId:%d, recv append entries msg. index:%" PRId64 ", term:%" PRId64 ", preLogIndex:%" PRId64 + ", prevLogTerm:%" PRId64 " commitIndex:%" PRId64 " entryterm:%" PRId64, + pMsg->vgId, pMsg->prevLogIndex + 1, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, + pEntry->term); if (ths->fsmState == SYNC_FSM_STATE_INCOMPLETE) { pReply->fsmState = ths->fsmState; @@ -179,6 +181,11 @@ _SEND_RESPONSE: sTrace("vgId:%d, update commit return index %" PRId64 "", ths->vgId, returnIndex); } + TRACE_SET_MSGID(&(rpcRsp.info.traceId), tGenIdPI64()); + trace = &(rpcRsp.info.traceId); + sGTrace("vgId:%d, send append reply matchIndex:%" PRId64 " term:%" PRId64 " lastSendIndex:%" PRId64 + " to dest: 0x%016" PRIx64, + ths->vgId, pReply->matchIndex, pReply->term, pReply->lastSendIndex, pReply->destId.addr); // ack, i.e. send response TAOS_CHECK_RETURN(syncNodeSendMsgById(&pReply->destId, ths, &rpcRsp)); diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index 005cf4337d..a7f36be9e9 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -43,6 +43,8 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { int32_t code = 0; SyncAppendEntriesReply* pMsg = (SyncAppendEntriesReply*)pRpcMsg->pCont; int32_t ret = 0; + const STraceId* trace = &pRpcMsg->info.traceId; + char tbuf[40] = {0}; // if already drop replica, do not process if (!syncNodeInRaftGroup(ths, &(pMsg->srcId))) { @@ -63,8 +65,8 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { return TSDB_CODE_SYN_WRONG_TERM; } - sTrace("vgId:%d, received append entries reply. srcId:0x%016" PRIx64 ", term:%" PRId64 ", matchIndex:%" PRId64 "", - pMsg->vgId, pMsg->srcId.addr, pMsg->term, pMsg->matchIndex); + sGTrace("vgId:%d, received append entries reply. srcId:0x%016" PRIx64 ", term:%" PRId64 ", matchIndex:%" PRId64 "", + pMsg->vgId, pMsg->srcId.addr, pMsg->term, pMsg->matchIndex); if (pMsg->success) { SyncIndex oldMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId)); diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 9f6acf6d83..efb71b5714 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -1026,6 +1026,14 @@ int32_t syncLogReplRecover(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEn int32_t code = 0; if (pMgr->restored != false) return TSDB_CODE_SYN_INTERNAL_ERROR; + sTrace("vgId:%d, begin to recover sync log repl. peer: dnode:%d (%" PRIx64 "), repl-mgr:[%" PRId64 ", %" PRId64 + ", %" PRId64 ") restore:%d, buffer: [%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 + "), msg: {lastSendIndex:%" PRId64 ", matchIndex:%" PRId64 ", fsmState:%d, success:%d, lastMatchTerm:%" PRId64 + "}", + pNode->vgId, DID(&destId), destId.addr, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, pMgr->restored, + pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex, pMsg->lastSendIndex, pMsg->matchIndex, + pMsg->fsmState, pMsg->success, pMsg->lastMatchTerm); + if (pMgr->endIndex == 0) { if (pMgr->startIndex != 0) return TSDB_CODE_SYN_INTERNAL_ERROR; if (pMgr->matchIndex != 0) return TSDB_CODE_SYN_INTERNAL_ERROR; @@ -1171,6 +1179,11 @@ int32_t syncLogReplProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex inde int64_t nowMs = taosGetMonoTimestampMs(); int32_t code = 0; + sTrace("vgId:%d, begin to probe peer:%" PRIx64 " with msg of index:%" PRId64 ". repl-mgr:[%" PRId64 ", %" PRId64 + ", %" PRId64 "), restored:%d", + pNode->vgId, pNode->replicasId[pMgr->peerId].addr, index, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, + pMgr->restored); + if (pMgr->endIndex > pMgr->startIndex && nowMs < pMgr->states[pMgr->startIndex % pMgr->size].timeMs + retryMaxWaitMs) { return 0; @@ -1206,6 +1219,10 @@ int32_t syncLogReplProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex inde int32_t syncLogReplAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { if (!pMgr->restored) return TSDB_CODE_SYN_INTERNAL_ERROR; + sTrace("vgId:%d, begin to attempt replicate log entries from end to match. repl-mgr:[%" PRId64 ", %" PRId64 + ", %" PRId64 "), restore:%d", + pNode->vgId, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, pMgr->restored); + SRaftId* pDestId = &pNode->replicasId[pMgr->peerId]; int32_t batchSize = TMAX(1, pMgr->size >> (4 + pMgr->retryBackoff)); int32_t code = 0; @@ -1527,11 +1544,12 @@ int32_t syncLogReplSendTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex ind goto _err; } + TRACE_SET_MSGID(&(msgOut.info.traceId), tGenIdPI64()); + STraceId* trace = &(msgOut.info.traceId); + sGTrace("vgId:%d, replicate one msg index:%" PRId64 " term:%" PRId64 " prevterm:%" PRId64 " to dest: 0x%016" PRIx64, + pNode->vgId, pEntry->index, pEntry->term, prevLogTerm, pDestId->addr); TAOS_CHECK_GOTO(syncNodeSendAppendEntries(pNode, pDestId, &msgOut), &lino, _err); - sTrace("vgId:%d, replicate one msg index:%" PRId64 " term:%" PRId64 " prevterm:%" PRId64 " to dest: 0x%016" PRIx64, - pNode->vgId, pEntry->index, pEntry->term, prevLogTerm, pDestId->addr); - if (!inBuf) { syncEntryDestroy(pEntry); pEntry = NULL; diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index 212a75c2ae..efb258d952 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -152,8 +152,8 @@ static void syncLogReplStates2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLe for (int32_t i = 0; i < pSyncNode->replicaNum; i++) { SSyncLogReplMgr* pMgr = pSyncNode->logReplMgrs[i]; if (pMgr == NULL) break; - len += tsnprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 " %" PRId64 ", %" PRId64 "]", i, pMgr->restored, - pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex); + len += tsnprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 ", %" PRId64 ", %" PRId64 "]", i, pMgr->restored, + pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex); if (i + 1 < pSyncNode->replicaNum) { len += tsnprintf(buf + len, bufLen - len, "%s", ", "); } From 23249271fb56abdbe892b11da2ade6549ff51db3 Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 29 Oct 2024 06:38:36 +0000 Subject: [PATCH 112/142] fix/TD-32703-add-log --- source/libs/transport/src/transCli.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index c3e214b5e3..5ade768d0c 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -3090,7 +3090,7 @@ int32_t transReleaseCliHandle(void* handle) { static int32_t transInitMsg(void* pInstRef, const SEpSet* pEpSet, STransMsg* pReq, STransCtx* ctx, SCliReq** pCliMsg) { int32_t code = 0; - TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64()); + if (pReq->info.traceId.msgId == 0) TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64()); SCliReq* pCliReq = NULL; SReqCtx* pCtx = taosMemoryCalloc(1, sizeof(SReqCtx)); From e9feec94b3e5067a9421d2a0014e5818119bdb4b Mon Sep 17 00:00:00 2001 From: Pengrongkun Date: Sun, 27 Oct 2024 11:04:39 +0800 Subject: [PATCH 113/142] TD-32120:add api taos_stmt2_get_all_fields --- include/client/taos.h | 16 ++++--- source/client/src/clientMain.c | 10 ++++ source/client/src/clientStmt2.c | 45 ++++++++++++++++++ source/libs/parser/src/parInsertSql.c | 27 +++++++++-- source/libs/parser/src/parInsertStmt.c | 7 +-- tests/script/api/makefile | 2 + tests/script/api/stmt2-get-fields.c | 64 ++++++++++++++++++++++++++ 7 files changed, 156 insertions(+), 15 deletions(-) create mode 100644 tests/script/api/stmt2-get-fields.c diff --git a/include/client/taos.h b/include/client/taos.h index 80dbe27c47..00fec66a71 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -81,6 +81,13 @@ typedef enum { TSDB_SML_TIMESTAMP_NANO_SECONDS, } TSDB_SML_TIMESTAMP_TYPE; +typedef enum TAOS_FIELD_T { + TAOS_FIELD_COL = 1, + TAOS_FIELD_TAG, + TAOS_FIELD_QUERY, + TAOS_FIELD_TBNAME, +} TAOS_FIELD_T; + typedef struct taosField { char name[65]; int8_t type; @@ -93,6 +100,7 @@ typedef struct TAOS_FIELD_E { uint8_t precision; uint8_t scale; int32_t bytes; + TAOS_FIELD_T field_type; } TAOS_FIELD_E; #ifdef WINDOWS @@ -195,13 +203,6 @@ DLL_EXPORT int taos_stmt_affected_rows_once(TAOS_STMT *stmt); typedef void TAOS_STMT2; -typedef enum TAOS_FIELD_T { - TAOS_FIELD_COL = 1, - TAOS_FIELD_TAG, - TAOS_FIELD_QUERY, - TAOS_FIELD_TBNAME, -} TAOS_FIELD_T; - typedef struct TAOS_STMT2_OPTION { int64_t reqid; bool singleStbInsert; @@ -232,6 +233,7 @@ DLL_EXPORT int taos_stmt2_exec(TAOS_STMT2 *stmt, int *affected_rows); DLL_EXPORT int taos_stmt2_close(TAOS_STMT2 *stmt); DLL_EXPORT int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert); DLL_EXPORT int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields); +DLL_EXPORT int taos_stmt2_get_all_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_E **fields); DLL_EXPORT void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields); DLL_EXPORT TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt); DLL_EXPORT char *taos_stmt2_error(TAOS_STMT2 *stmt); diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 64631fd754..43f8c9e1f9 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -2103,6 +2103,16 @@ int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, } } +int taos_stmt2_get_all_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_E **fields) { + if (stmt == NULL || NULL == count) { + tscError("NULL parameter for %s", __FUNCTION__); + terrno = TSDB_CODE_INVALID_PARA; + return terrno; + } + + return stmtGetColFields2(stmt, count, fields); +} + void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields) { (void)stmt; if (!fields) return; diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index 2f046b61d6..2d76d91e38 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -1068,6 +1068,48 @@ static int stmtFetchColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E return TSDB_CODE_SUCCESS; } + +static int stmtFetchFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E** fields) { + SBoundColInfo* tags = (SBoundColInfo*)pStmt->bInfo.boundTags; + STableMeta* meta = ((SVnodeModifyOpStmt*)(pStmt->sql.pQuery->pRoot))->pTableMeta; + if (tags == NULL || meta == NULL || (meta->schema == NULL && tags->numOfBound != 0)) { + return TSDB_CODE_INVALID_PARA; + } + + if (fields) { + *fields = taosMemoryCalloc(tags->numOfBound, sizeof(TAOS_FIELD_E)); + if (NULL == *fields) { + return terrno; + } + + SSchema* pSchema = meta->schema; + int32_t tbnameIdx = meta->tableInfo.numOfTags + meta->tableInfo.numOfColumns; + for (int32_t i = 0; i < tags->numOfBound; ++i) { + int16_t idx = tags->pColIndex[i]; + if (idx == tbnameIdx) { + (*fields)[i].field_type = TAOS_FIELD_TBNAME; + strcpy((*fields)[i].name, "tbname"); + continue; + } else if (idx < meta->tableInfo.numOfColumns) { + (*fields)[i].field_type = TAOS_FIELD_COL; + } else { + (*fields)[i].field_type = TAOS_FIELD_TAG; + } + SSchema schema = pSchema[tags->pColIndex[i]]; + if (TSDB_DATA_TYPE_TIMESTAMP == schema.type) { + (*fields)[i].precision = meta->tableInfo.precision; + } + + tstrncpy((*fields)[i].name, schema.name, sizeof((*fields)[i].name)); + (*fields)[i].type = schema.type; + (*fields)[i].bytes = schema.bytes; + } + } + + *fieldNum = tags->numOfBound; + + return TSDB_CODE_SUCCESS; +} /* SArray* stmtGetFreeCol(STscStmt2* pStmt, int32_t* idx) { while (true) { @@ -1847,6 +1889,9 @@ int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) { _return: pStmt->errCode = preCode; + if (code == TSDB_CODE_TSC_INVALID_OPERATION) { + return stmtFetchFields2(stmt, nums, fields); + } return code; } diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 1c26a7c70e..77617ad3b3 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -780,6 +780,9 @@ static int32_t buildCreateTbReq(SVnodeModifyOpStmt* pStmt, STag* pTag, SArray* p } int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, SMsgBuf* pMsgBuf, int8_t type) { + if (pToken->type == TK_NK_QUESTION) { + return buildInvalidOperationMsg(pMsgBuf, "insert into super table syntax is not supported for stmt"); + } if ((pToken->type != TK_NOW && pToken->type != TK_TODAY && pToken->type != TK_NK_INTEGER && pToken->type != TK_NK_STRING && pToken->type != TK_NK_FLOAT && pToken->type != TK_NK_BOOL && pToken->type != TK_NULL && pToken->type != TK_NK_HEX && pToken->type != TK_NK_OCT && pToken->type != TK_NK_BIN && @@ -2422,9 +2425,6 @@ static int32_t parseInsertStbClauseBottom(SInsertParseContext* pCxt, SVnodeModif // 1. [(tag1_name, ...)] ... // 2. VALUES ... | FILE ... static int32_t parseInsertTableClauseBottom(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) { - if (pStmt->stbSyntax && TSDB_QUERY_HAS_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { - return buildInvalidOperationMsg(&pCxt->msg, "insert into super table syntax is not supported for stmt"); - } if (!pStmt->stbSyntax) { STableDataCxt* pTableCxt = NULL; int32_t code = parseSchemaClauseBottom(pCxt, pStmt, &pTableCxt); @@ -3066,8 +3066,9 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal .isStmtBind = pCxt->isStmtBind}; int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery); + SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)((*pQuery)->pRoot); if (TSDB_CODE_SUCCESS == code) { - code = parseInsertSqlImpl(&context, (SVnodeModifyOpStmt*)(*pQuery)->pRoot); + code = parseInsertSqlImpl(&context, pStmt); } if (TSDB_CODE_SUCCESS == code) { code = setNextStageInfo(&context, *pQuery, pCatalogReq); @@ -3076,8 +3077,24 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal QUERY_EXEC_STAGE_SCHEDULE == (*pQuery)->execStage) { code = setRefreshMeta(*pQuery); } - insDestroyBoundColInfo(&context.tags); + if (pStmt->stbSyntax && TSDB_QUERY_HAS_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT) && + code == TSDB_CODE_TSC_INVALID_OPERATION) { + context.tags.numOfBound = pStmt->pStbRowsCxt->boundColsInfo.numOfBound; + context.tags.numOfCols = pStmt->pStbRowsCxt->boundColsInfo.numOfCols; + context.tags. hasBoundCols= pStmt->pStbRowsCxt->boundColsInfo.hasBoundCols; + context.tags.pColIndex = taosMemoryMalloc(sizeof(int16_t) * context.tags.numOfBound); + memcpy(context.tags.pColIndex, pStmt->pStbRowsCxt->boundColsInfo.pColIndex, + sizeof(int16_t) * pStmt->pStbRowsCxt->boundColsInfo.numOfBound); + code = setStmtInfo(&context, pStmt); + if (TSDB_CODE_SUCCESS == code) { + insDestroyBoundColInfo(&context.tags); + return TSDB_CODE_TSC_INVALID_OPERATION; + } + } + + insDestroyBoundColInfo(&context.tags); + // if no data to insert, set emptyMode to avoid request server if (!context.needRequest) { (*pQuery)->execMode = QUERY_EXEC_MODE_EMPTY_RESULT; diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index ee61611bf2..f18b7b817b 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -885,7 +885,7 @@ _return: } int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSchema, int32_t* fieldNum, - TAOS_FIELD_E** fields, uint8_t timePrec) { + TAOS_FIELD_E** fields, uint8_t timePrec, TAOS_FIELD_T fieldType) { if (fields) { *fields = taosMemoryCalloc(numOfBound, sizeof(TAOS_FIELD_E)); if (NULL == *fields) { @@ -900,6 +900,7 @@ int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSc for (int32_t i = 0; i < numOfBound; ++i) { schema = &pSchema[boundColumns[i]]; strcpy((*fields)[i].name, schema->name); + (*fields)[i].field_type = schema->type; (*fields)[i].type = schema->type; (*fields)[i].bytes = schema->bytes; } @@ -929,7 +930,7 @@ int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TA return TSDB_CODE_SUCCESS; } - CHECK_CODE(buildBoundFields(tags->numOfBound, tags->pColIndex, pSchema, fieldNum, fields, 0)); + CHECK_CODE(buildBoundFields(tags->numOfBound, tags->pColIndex, pSchema, fieldNum, fields, 0, TAOS_FIELD_TAG)); return TSDB_CODE_SUCCESS; } @@ -947,7 +948,7 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fiel } CHECK_CODE(buildBoundFields(pDataBlock->boundColsInfo.numOfBound, pDataBlock->boundColsInfo.pColIndex, pSchema, - fieldNum, fields, pDataBlock->pMeta->tableInfo.precision)); + fieldNum, fields, pDataBlock->pMeta->tableInfo.precision, TAOS_FIELD_COL)); return TSDB_CODE_SUCCESS; } diff --git a/tests/script/api/makefile b/tests/script/api/makefile index d8a4e19218..9c2bb6be3d 100644 --- a/tests/script/api/makefile +++ b/tests/script/api/makefile @@ -25,6 +25,7 @@ exe: gcc $(CFLAGS) ./stmt.c -o $(ROOT)stmt $(LFLAGS) gcc $(CFLAGS) ./stmt2.c -o $(ROOT)stmt2 $(LFLAGS) gcc $(CFLAGS) ./stmt2-example.c -o $(ROOT)stmt2-example $(LFLAGS) + gcc $(CFLAGS) ./stmt2-get-fields.c -o $(ROOT)stmt2-get-fields $(LFLAGS) gcc $(CFLAGS) ./stmt2-nohole.c -o $(ROOT)stmt2-nohole $(LFLAGS) gcc $(CFLAGS) ./stmt-crash.c -o $(ROOT)stmt-crash $(LFLAGS) @@ -42,5 +43,6 @@ clean: rm $(ROOT)stmt rm $(ROOT)stmt2 rm $(ROOT)stmt2-example + rm $(ROOT)stmt2-get-fields rm $(ROOT)stmt2-nohole rm $(ROOT)stmt-crash diff --git a/tests/script/api/stmt2-get-fields.c b/tests/script/api/stmt2-get-fields.c new file mode 100644 index 0000000000..60bee16873 --- /dev/null +++ b/tests/script/api/stmt2-get-fields.c @@ -0,0 +1,64 @@ +// TAOS standard API example. The same syntax as MySQL, but only a subet +// to compile: gcc -o stmt2-get-fields stmt2-get-fields.c -ltaos + +#include +#include +#include +#include "taos.h" + +void do_query(TAOS *taos, const char *sql) { + TAOS_RES *result = taos_query(taos, sql); + int code = taos_errno(result); + if (code) { + printf("failed to query: %s, reason:%s\n", sql, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); +} + +void do_stmt(TAOS *taos) { + do_query(taos, "drop database if exists db"); + do_query(taos, "create database db"); + do_query(taos, + "create table db.stb (ts timestamp, b binary(10)) tags(t1 " + "int, t2 binary(10))"); + + TAOS_STMT2_OPTION option = {0}; + TAOS_STMT2 *stmt = taos_stmt2_init(taos, &option); + const char *sql = "insert into db.stb(t1,t2,ts,b,tbname) values(?,?,?,?,?)"; + + int code = taos_stmt2_prepare(stmt, sql, 0); + if (code != 0) { + printf("failed to execute taos_stmt2_prepare. error:%s\n", taos_stmt2_error(stmt)); + taos_stmt2_close(stmt); + return; + } + + int fieldNum = 0; + TAOS_FIELD_E *pFields = NULL; + code = taos_stmt2_get_all_fields(stmt, &fieldNum, &pFields); + if (code != 0) { + printf("failed get col,ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_stmt2_error(stmt)); + } else { + printf("col nums:%d\n", fieldNum); + for(int i = 0; i < fieldNum; i++) { + printf("field[%d]: %s,type:%d\n", i, pFields[i].name,pFields[i].field_type); + } + } + + + taos_stmt2_close(stmt); +} + +int main() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", "", 0); + if (!taos) { + printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); + exit(1); + } + + do_stmt(taos); + taos_close(taos); + taos_cleanup(); +} \ No newline at end of file From 7e89000d4e10d76c4200bda0cbb445e20e24fb68 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 29 Oct 2024 18:02:14 +0800 Subject: [PATCH 114/142] test: add the frequency of printing consumer logs --- tests/army/tmq/drop_lost_comsumers.py | 55 +++++++++++++++------------ tests/army/tmq/per_consumer.py | 32 ++++++++++------ 2 files changed, 50 insertions(+), 37 deletions(-) diff --git a/tests/army/tmq/drop_lost_comsumers.py b/tests/army/tmq/drop_lost_comsumers.py index cef67ddfcc..a5e8140c4a 100644 --- a/tests/army/tmq/drop_lost_comsumers.py +++ b/tests/army/tmq/drop_lost_comsumers.py @@ -24,7 +24,8 @@ class TaosConsumer: def log_info(self, message): if self.sub_log: tdLog.info(message) - + + #TODO merge sub_consumer and sub_consumer_once def sub_consumer(self, consumer, group_id, topic_name): group_id = int(group_id) if group_id < 100: @@ -95,20 +96,23 @@ class TaosConsumer: consumer.subscribe([topic_name]) nrows = 0 consumer_nrows = 0 - + count = 0 while not stop_event.is_set(): start = datetime.datetime.now() - self.log_info( - f"time:{start},consumer:{group_id}, start to consume,consumer_nrows:{consumer_nrows}" - ) + # self.log_info( + # f"time:{start},consumer:{group_id}, start to consume,consumer_nrows:{consumer_nrows}" + # ) message = None if consumer_nrows < self.once_consumer_rows: message = consumer.poll(timeout=1.0) elif consumer_nrows >= self.once_consumer_rows: - # when break the loop, the consumer will be closed, so we need to continue to keep consumer alive util the stop_event is set - pass - # tdLog.info("stop consumer when consumer all rows") - + if count == 0: + # when break the loop, the consumer will be closed, so we need to continue to keep consumer alive util the stop_event is set + tdLog.info("stop consumer when consumer all rows") + count += 1 + # tdLog.info("stop consumer when consumer all rows") + else: + continue if message: message_offset = message.offset() # topic = message.topic() @@ -122,12 +126,13 @@ class TaosConsumer: end = datetime.datetime.now() elapsed_time = end - start - self.log_info( - f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{message_offset}" - ) + # self.log_info( + # f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{message_offset}" + # ) self.log_info( - f"consumer:{group_id},consumer_nrows:{nrows},counter.counter:{self.safe_counter.counter}" + f"consumer:{group_id},consumer_nrows:{nrows},counter.counter:{self.safe_counter.counter},counter.get():{self.safe_counter.get()}" ) + # consumer.commit() consumer_nrows = nrows @@ -204,7 +209,7 @@ class TDTestCase: tdLog.info( f"wait timeout count:{count} and check consumer status whether is closed" ) - for _ in range(5): + for _ in range(2): tdSql.query("show consumers") anser_rows = tdSql.getRows() if anser_rows == rows: @@ -227,10 +232,10 @@ class TDTestCase: # start consumer and config some parameters os.system( - f"nohup python3 ./tmq/per_consumer.py -c {self.consumer_groups_num} -s {self.session_timeout_ms} -p {self.max_poll_interval_ms} > consumer.log &" + f"nohup python3 ./tmq/per_consumer.py -c {self.consumer_groups_num} -s {self.session_timeout_ms} -p {self.max_poll_interval_ms} -t {self.topic_name} > consumer.log &" ) - # wait 4s for consuming data - time.sleep(4) + # wait 5s for consuming data + time.sleep(5) # kill consumer to simulate session_timeout_ms tdLog.info("kill per_consumer.py") tdCom.kill_signal_process( @@ -268,18 +273,18 @@ class TDTestCase: tr.start() while True: - if self.safe_counter.get() < self.consumer_all_rows: - time.sleep(5) + if self.safe_counter.counter < self.consumer_all_rows: + # control print log frequency + time.sleep(1) tdLog.info( - f"consumer_all_rows:{self.consumer_all_rows},counter.get():{self.safe_counter.get()}" + f"consumer_all_rows:{self.consumer_all_rows},counter.get():{self.safe_counter.counter}" ) - elif self.safe_counter.get() >= self.consumer_all_rows: + elif self.safe_counter.counter == self.consumer_all_rows: # adding 5s is for heartbeat check self.check_consumer(int(self.max_poll_interval_ms / 1000 ) + 5, 0, stop_event) stop_event.set() break - for tr in threads: - tr.join() + time.sleep(1) tdSql.execute(f"drop topic if exists {self.topic_name};") @@ -302,7 +307,7 @@ class TDTestCase: """ tdLog.info("start to test max_poll_interval_ms=20s") # test max_poll_interval_ms=20s - self.session_timeout_ms = 180000 + self.session_timeout_ms = 300000 self.max_poll_interval_ms = 20000 self.drop_max_poll_timeout_consmuers() tdLog.info("stop to test max_poll_interval_ms=20s and done ") @@ -317,7 +322,7 @@ class TDTestCase: ) # test case start here self.topic_name = "select_d1" - self.case_session_timeout() + # self.case_session_timeout() self.case_max_poll_timeout() def stop(self): diff --git a/tests/army/tmq/per_consumer.py b/tests/army/tmq/per_consumer.py index 418c88dead..b8f409d710 100644 --- a/tests/army/tmq/per_consumer.py +++ b/tests/army/tmq/per_consumer.py @@ -1,8 +1,9 @@ import os import taos -import time +import sys from datetime import datetime -from frame.log import * +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +from frame.log import tdLog import subprocess from multiprocessing import Process import threading @@ -40,28 +41,35 @@ except Exception as e: default=180000, help="max poll interval timeout:ms", ) -def test_timeout_sub(consumer_group_num, session_timeout_ms, max_poll_interval_ms): +@click.option( + "-t", + "--topic-name", + "topic_name", + default="select_d1", + help="topic name", +) +def test_timeout_sub(consumer_group_num, session_timeout_ms, max_poll_interval_ms, topic_name): threads = [] - tdLog.info(consumer_group_num, session_timeout_ms, max_poll_interval_ms) + tdLog.info(f"consumer_group_num:{consumer_group_num}, session_timeout_ms:{session_timeout_ms}, max_poll_interval_ms:{max_poll_interval_ms}") for id in range(consumer_group_num): conf = set_conf( group_id=id, session_timeout_ms=session_timeout_ms, max_poll_interval_ms=max_poll_interval_ms, ) - tdLog.info(conf) - threads.append(threading.Thread(target=taosc_consumer, args=(conf,))) + tdLog.info(f"conf:{conf}") + threads.append(threading.Thread(target=taosc_consumer, args=(conf,topic_name))) for tr in threads: tr.start() for tr in threads: tr.join() -def sub_consumer(consumer, group_id): +def sub_consumer(consumer, group_id, topic_name): group_id = int(group_id) if group_id < 100: try: - consumer.subscribe(["select_d1"]) + consumer.subscribe([topic_name]) except Exception as e: tdLog.info(f"subscribe error") exit(1) @@ -93,10 +101,10 @@ def sub_consumer(consumer, group_id): # consumer.close() -def sub_consumer_once(consumer, group_id): +def sub_consumer_once(consumer, group_id, topic_name): group_id = int(group_id) if group_id < 100: - consumer.subscribe(["select_d1"]) + consumer.subscribe([topic_name]) nrows = 0 consumer_nrows = 0 while True: @@ -160,12 +168,12 @@ def set_conf( return conf -def taosc_consumer(conf): +def taosc_consumer(conf,topic_name): consumer = Consumer(conf) group_id = int(conf["group.id"]) tdLog.info(f"{consumer},{group_id}") try: - sub_consumer_once(consumer, group_id) + sub_consumer_once(consumer, group_id, topic_name) except Exception as e: tdLog.info(str(e)) From 419c2e2974dc1cb6cb1d6e12675833d6d2b625d1 Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Tue, 29 Oct 2024 11:13:50 +0800 Subject: [PATCH 115/142] fix:[TS-5567] fix bug when partition/group by const value's alias name. --- source/libs/parser/src/parTranslater.c | 114 +++++++++++-------- tests/system-test/2-query/group_partition.py | 19 +++- 2 files changed, 83 insertions(+), 50 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 636be7c5cc..7ce8d87b18 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1603,25 +1603,6 @@ static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** p } } if (*pFound) { - if (QUERY_NODE_FUNCTION == nodeType(pFoundNode) && (SQL_CLAUSE_GROUP_BY == pCxt->currClause || SQL_CLAUSE_PARTITION_BY == pCxt->currClause)) { - pCxt->errCode = getFuncInfo(pCxt, (SFunctionNode*)pFoundNode); - if (TSDB_CODE_SUCCESS == pCxt->errCode) { - if (fmIsVectorFunc(((SFunctionNode*)pFoundNode)->funcId)) { - pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION, (*pCol)->colName); - return DEAL_RES_ERROR; - } else if (fmIsPseudoColumnFunc(((SFunctionNode*)pFoundNode)->funcId)) { - if ('\0' != (*pCol)->tableAlias[0]) { - return translateColumnWithPrefix(pCxt, pCol); - } else { - return translateColumnWithoutPrefix(pCxt, pCol); - } - } else { - /* Do nothing and replace old node with found node. */ - } - } else { - return DEAL_RES_ERROR; - } - } SNode* pNew = NULL; int32_t code = nodesCloneNode(pFoundNode, &pNew); if (NULL == pNew) { @@ -1630,14 +1611,6 @@ static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** p } nodesDestroyNode(*(SNode**)pCol); *(SNode**)pCol = (SNode*)pNew; - if (QUERY_NODE_COLUMN == nodeType(pFoundNode)) { - pCxt->errCode = TSDB_CODE_SUCCESS; - if ('\0' != (*pCol)->tableAlias[0]) { - return translateColumnWithPrefix(pCxt, pCol); - } else { - return translateColumnWithoutPrefix(pCxt, pCol); - } - } } return DEAL_RES_CONTINUE; } @@ -1882,6 +1855,39 @@ static bool clauseSupportAlias(ESqlClause clause) { SQL_CLAUSE_ORDER_BY == clause; } +static EDealRes translateColumnInGroupByClause(STranslateContext* pCxt, SColumnNode** pCol, bool *translateAsAlias) { + *translateAsAlias = false; + // count(*)/first(*)/last(*) and so on + if (0 == strcmp((*pCol)->colName, "*")) { + return DEAL_RES_CONTINUE; + } + + if (pCxt->pParseCxt->biMode) { + SNode** ppNode = (SNode**)pCol; + bool ret; + pCxt->errCode = biRewriteToTbnameFunc(pCxt, ppNode, &ret); + if (TSDB_CODE_SUCCESS != pCxt->errCode) return DEAL_RES_ERROR; + if (ret) { + return translateFunction(pCxt, (SFunctionNode**)ppNode); + } + } + + EDealRes res = DEAL_RES_CONTINUE; + if ('\0' != (*pCol)->tableAlias[0]) { + res = translateColumnWithPrefix(pCxt, pCol); + } else { + bool found = false; + res = translateColumnWithoutPrefix(pCxt, pCol); + if (!(*pCol)->node.asParam && + res != DEAL_RES_CONTINUE && + res != DEAL_RES_END && pCxt->errCode != TSDB_CODE_PAR_AMBIGUOUS_COLUMN) { + res = translateColumnUseAlias(pCxt, pCol, &found); + *translateAsAlias = true; + } + } + return res; +} + static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { if (NULL == pCxt->pCurrStmt || (isSelectStmt(pCxt->pCurrStmt) && NULL == ((SSelectStmt*)pCxt->pCurrStmt)->pFromTable)) { @@ -5472,12 +5478,13 @@ typedef struct SReplaceGroupByAliasCxt { SNodeList* pProjectionList; } SReplaceGroupByAliasCxt; -static EDealRes replaceGroupByAliasImpl(SNode** pNode, void* pContext) { +static EDealRes translateGroupPartitionByImpl(SNode** pNode, void* pContext) { SReplaceGroupByAliasCxt* pCxt = pContext; SNodeList* pProjectionList = pCxt->pProjectionList; SNode* pProject = NULL; + int32_t code = TSDB_CODE_SUCCESS; + STranslateContext* pTransCxt = pCxt->pTranslateCxt; if (QUERY_NODE_VALUE == nodeType(*pNode)) { - STranslateContext* pTransCxt = pCxt->pTranslateCxt; SValueNode* pVal = (SValueNode*) *pNode; if (DEAL_RES_ERROR == translateValue(pTransCxt, pVal)) { return DEAL_RES_CONTINUE; @@ -5488,43 +5495,58 @@ static EDealRes replaceGroupByAliasImpl(SNode** pNode, void* pContext) { int32_t pos = getPositionValue(pVal); if (0 < pos && pos <= LIST_LENGTH(pProjectionList)) { SNode* pNew = NULL; - int32_t code = nodesCloneNode(nodesListGetNode(pProjectionList, pos - 1), (SNode**)&pNew); + code = nodesCloneNode(nodesListGetNode(pProjectionList, pos - 1), (SNode**)&pNew); if (TSDB_CODE_SUCCESS != code) { pCxt->pTranslateCxt->errCode = code; return DEAL_RES_ERROR; } nodesDestroyNode(*pNode); *pNode = pNew; - return DEAL_RES_CONTINUE; - } else { + } + code = translateExpr(pTransCxt, pNode); + if (TSDB_CODE_SUCCESS != code) { + pTransCxt->errCode = code; + return DEAL_RES_ERROR; + } + return DEAL_RES_CONTINUE; + } else if (QUERY_NODE_COLUMN == nodeType(*pNode)) { + bool asAlias = false; + EDealRes res = translateColumnInGroupByClause(pTransCxt, (SColumnNode**)pNode, &asAlias); + if (DEAL_RES_ERROR == res) { + return DEAL_RES_ERROR; + } + pTransCxt->errCode = TSDB_CODE_SUCCESS; + if (nodeType(*pNode) == QUERY_NODE_COLUMN && !asAlias) { return DEAL_RES_CONTINUE; } - } else if (QUERY_NODE_COLUMN == nodeType(*pNode)) { - STranslateContext* pTransCxt = pCxt->pTranslateCxt; - return translateColumn(pTransCxt, (SColumnNode**)pNode); + code = translateExpr(pTransCxt, pNode); + if (TSDB_CODE_SUCCESS != code) { + pTransCxt->errCode = code; + return DEAL_RES_ERROR; + } + return DEAL_RES_CONTINUE; } - - return DEAL_RES_CONTINUE; + return doTranslateExpr(pNode, pTransCxt); } -static int32_t replaceGroupByAlias(STranslateContext* pCxt, SSelectStmt* pSelect) { +static int32_t translateGroupByList(STranslateContext* pCxt, SSelectStmt* pSelect) { if (NULL == pSelect->pGroupByList) { return TSDB_CODE_SUCCESS; } SReplaceGroupByAliasCxt cxt = { .pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList}; - nodesRewriteExprsPostOrder(pSelect->pGroupByList, replaceGroupByAliasImpl, &cxt); + nodesRewriteExprsPostOrder(pSelect->pGroupByList, translateGroupPartitionByImpl, &cxt); return pCxt->errCode; } -static int32_t replacePartitionByAlias(STranslateContext* pCxt, SSelectStmt* pSelect) { +static int32_t translatePartitionByList(STranslateContext* pCxt, SSelectStmt* pSelect) { if (NULL == pSelect->pPartitionByList) { return TSDB_CODE_SUCCESS; } SReplaceGroupByAliasCxt cxt = { .pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList}; - nodesRewriteExprsPostOrder(pSelect->pPartitionByList, replaceGroupByAliasImpl, &cxt); + nodesRewriteExprsPostOrder(pSelect->pPartitionByList, translateGroupPartitionByImpl, &cxt); return pCxt->errCode; } @@ -5588,11 +5610,8 @@ static int32_t translateGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) { NODES_DESTORY_LIST(pSelect->pGroupByList); return TSDB_CODE_SUCCESS; } - code = replaceGroupByAlias(pCxt, pSelect); - } - if (TSDB_CODE_SUCCESS == code) { pSelect->timeLineResMode = TIME_LINE_NONE; - code = translateExprList(pCxt, pSelect->pGroupByList); + code = translateGroupByList(pCxt, pSelect); } return code; } @@ -6287,10 +6306,7 @@ static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelec (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { pSelect->timeLineResMode = TIME_LINE_MULTI; } - code = replacePartitionByAlias(pCxt, pSelect); - if (TSDB_CODE_SUCCESS == code) { - code = translateExprList(pCxt, pSelect->pPartitionByList); - } + code = translatePartitionByList(pCxt, pSelect); } if (TSDB_CODE_SUCCESS == code) { code = translateExprList(pCxt, pSelect->pTags); diff --git a/tests/system-test/2-query/group_partition.py b/tests/system-test/2-query/group_partition.py index 384df02e8d..7ee528841c 100644 --- a/tests/system-test/2-query/group_partition.py +++ b/tests/system-test/2-query/group_partition.py @@ -420,7 +420,23 @@ class TDTestCase: tdSql.error(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 where t2 = 1") tdSql.error(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 interval(1d)") - + def test_TS5567(self): + tdSql.query(f"select const_col from (select 1 as const_col from {self.dbname}.{self.stable}) t group by const_col") + tdSql.checkRows(50) + tdSql.query(f"select const_col from (select 1 as const_col from {self.dbname}.{self.stable}) t partition by const_col") + tdSql.checkRows(50) + tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) group by const_col") + tdSql.checkRows(10) + tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) partition by const_col") + tdSql.checkRows(10) + tdSql.query(f"select const_col as c_c from (select 1 as const_col from {self.dbname}.{self.stable}) t group by c_c") + tdSql.checkRows(50) + tdSql.query(f"select const_col as c_c from (select 1 as const_col from {self.dbname}.{self.stable}) t partition by c_c") + tdSql.checkRows(50) + tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) group by 1") + tdSql.checkRows(10) + tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) partition by 1") + tdSql.checkRows(10) def run(self): tdSql.prepare() self.prepare_db() @@ -453,6 +469,7 @@ class TDTestCase: self.test_window(nonempty_tb_num) self.test_event_window(nonempty_tb_num) + self.test_TS5567() ## test old version before changed # self.test_groupby('group', 0, 0) From a603c16f6c2b80a6217afa3b1af2a9d78750bd09 Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 29 Oct 2024 19:21:59 +0800 Subject: [PATCH 116/142] set case tmqVnodeTransform-stb-removewal.py's sync debug flag to 143 --- tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py b/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py index 40879d5c66..938dcfcc9e 100644 --- a/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py +++ b/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py @@ -17,6 +17,8 @@ sys.path.append("./7-tmq") from tmqCommon import * class TDTestCase: + + updatecfgDict = {'sDebugFlag':143} def __init__(self): self.vgroups = 1 self.ctbNum = 10 From 439032092a507411d7a42017b398dee423375aca Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 29 Oct 2024 21:38:05 +0800 Subject: [PATCH 117/142] doc: minor changes --- docs/zh/04-get-started/01-docker.md | 2 +- docs/zh/04-get-started/03-package.md | 2 +- docs/zh/04-get-started/_07-use.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/zh/04-get-started/01-docker.md b/docs/zh/04-get-started/01-docker.md index 848a7fd499..4bd9322595 100644 --- a/docs/zh/04-get-started/01-docker.md +++ b/docs/zh/04-get-started/01-docker.md @@ -121,4 +121,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1 SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); ``` -在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 +在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。 diff --git a/docs/zh/04-get-started/03-package.md b/docs/zh/04-get-started/03-package.md index 2a1f594b4f..dcc5c58d17 100644 --- a/docs/zh/04-get-started/03-package.md +++ b/docs/zh/04-get-started/03-package.md @@ -317,4 +317,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1 SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); ``` -在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 \ No newline at end of file +在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。 \ No newline at end of file diff --git a/docs/zh/04-get-started/_07-use.md b/docs/zh/04-get-started/_07-use.md index d206ed4102..8c976e9b55 100644 --- a/docs/zh/04-get-started/_07-use.md +++ b/docs/zh/04-get-started/_07-use.md @@ -54,4 +54,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1 SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); ``` -在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 \ No newline at end of file +在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。 \ No newline at end of file From 9aeb74e6d9b0d90fdf247860e635643a81f5f2ec Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Wed, 30 Oct 2024 13:54:55 +0800 Subject: [PATCH 118/142] improve doc --- docs/zh/08-operation/02-planning.md | 18 +++++++++--------- docs/zh/14-reference/07-supported.md | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/zh/08-operation/02-planning.md b/docs/zh/08-operation/02-planning.md index 66da1df8bf..396fd0d3d5 100644 --- a/docs/zh/08-operation/02-planning.md +++ b/docs/zh/08-operation/02-planning.md @@ -53,7 +53,7 @@ M = (T × S × 3 + (N / 4096) + 100) 与 WebSocket 连接方式相比,RESTful 连接方式在内存占用上更大,除了缓冲区所需的内存以外,还需要考虑每个连接响应结果的内存开销。这种内存开销与响应结果的JSON 数据大小密切相关,特别是在查询数据量很大时,会占用大量内存。 -由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议打开batchfetch=true 选项,以启用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险 +由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议使用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险 **注意** - 建议采用 RESTful/WebSocket 连接方式来访问 TDengine 集群,而不采用taosc 原生连接方式。 @@ -146,11 +146,11 @@ TDengine 的多级存储功能在使用上还具备以下优点。 下表列出了 TDengine 的一些接口或组件的常用端口,这些端口均可以通过配置文件中的参数进行修改。 -|接口或组件 | 端口 | -|:---------------------------:|:---------:| -|原生接口(taosc) | 6030 | -|RESTful 接口 | 6041 | -|WebSocket 接口 |6041 | -|taosKeeper | 6043 | -|taosX | 6050, 6055 | -|taosExplorer | 6060 | \ No newline at end of file +| 接口或组件 | 端口 | +| :---------------: | :--------: | +| 原生接口(taosc) | 6030 | +| RESTful 接口 | 6041 | +| WebSocket 接口 | 6041 | +| taosKeeper | 6043 | +| taosX | 6050, 6055 | +| taosExplorer | 6060 | diff --git a/docs/zh/14-reference/07-supported.md b/docs/zh/14-reference/07-supported.md index 10ca237653..b243b14383 100644 --- a/docs/zh/14-reference/07-supported.md +++ b/docs/zh/14-reference/07-supported.md @@ -26,10 +26,10 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表" | ----------- | ------------- | ------------- | --------- | ------------- | --------- | | **OS** | **Linux** | **Win64** | **Linux** | **macOS** | **macOS** | | **C/C++** | ● | ● | ● | ● | ● | -| **JDBC** | ● | ● | ● | ○ | ○ | +| **JDBC** | ● | ● | ● | ● | ● | | **Python** | ● | ● | ● | ● | ● | | **Go** | ● | ● | ● | ● | ● | -| **NodeJs** | ● | ● | ● | ○ | ○ | +| **NodeJs** | ● | ● | ● | ● | ● | | **C#** | ● | ● | ○ | ○ | ○ | | **Rust** | ● | ● | ○ | ● | ● | | **RESTful** | ● | ● | ● | ● | ● | From 19f85afc11314d07e18e63c7754c3616d3b6d8b6 Mon Sep 17 00:00:00 2001 From: dengbin40 <46433701+dengbin40@users.noreply.github.com> Date: Sun, 29 Sep 2024 09:58:18 +0800 Subject: [PATCH 119/142] =?UTF-8?q?fix:=20=E5=A2=9E=E5=8A=A0pod=E8=8A=82?= =?UTF-8?q?=E7=82=B9=E7=9A=84=E5=88=86=E5=B8=83=E5=BC=8F=E9=83=A8=E7=BD=B2?= =?UTF-8?q?=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/zh/08-operation/03-deployment.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/docs/zh/08-operation/03-deployment.md b/docs/zh/08-operation/03-deployment.md index 2e0c2a7989..e549e8613d 100644 --- a/docs/zh/08-operation/03-deployment.md +++ b/docs/zh/08-operation/03-deployment.md @@ -368,6 +368,18 @@ spec: labels: app: "tdengine" spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - tdengine + topologyKey: kubernetes.io/hostname containers: - name: "tdengine" image: "tdengine/tdengine:3.2.3.0" @@ -837,4 +849,4 @@ Helm 管理下,清理操作也变得简单: helm uninstall tdengine ``` -但 Helm 也不会自动移除 PVC,需要手动获取 PVC 然后删除掉。 \ No newline at end of file +但 Helm 也不会自动移除 PVC,需要手动获取 PVC 然后删除掉。 From aa3c79100bb55ad5fd296f88de21c4bc978e4b51 Mon Sep 17 00:00:00 2001 From: Pengrongkun Date: Wed, 30 Oct 2024 14:24:10 +0800 Subject: [PATCH 120/142] fix some format --- include/client/taos.h | 32 ++++++------- source/client/src/clientMain.c | 11 ++--- source/client/src/clientStmt2.c | 16 +++---- source/libs/parser/src/parInsertSql.c | 63 ++++++++++++++------------ source/libs/parser/src/parInsertStmt.c | 8 ++-- 5 files changed, 66 insertions(+), 64 deletions(-) diff --git a/include/client/taos.h b/include/client/taos.h index 00fec66a71..6797dfee5f 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -95,11 +95,11 @@ typedef struct taosField { } TAOS_FIELD; typedef struct TAOS_FIELD_E { - char name[65]; - int8_t type; - uint8_t precision; - uint8_t scale; - int32_t bytes; + char name[65]; + int8_t type; + uint8_t precision; + uint8_t scale; + int32_t bytes; TAOS_FIELD_T field_type; } TAOS_FIELD_E; @@ -253,17 +253,17 @@ DLL_EXPORT int64_t taos_affected_rows64(TAOS_RES *res); DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res); DLL_EXPORT int taos_select_db(TAOS *taos, const char *db); DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); -DLL_EXPORT int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); -DLL_EXPORT void taos_stop_query(TAOS_RES *res); -DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col); -DLL_EXPORT int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows); -DLL_EXPORT bool taos_is_update_query(TAOS_RES *res); -DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); -DLL_EXPORT int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows); -DLL_EXPORT int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData); -DLL_EXPORT int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex); -DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); -DLL_EXPORT void taos_reset_current_db(TAOS *taos); +DLL_EXPORT int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); +DLL_EXPORT void taos_stop_query(TAOS_RES *res); +DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col); +DLL_EXPORT int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows); +DLL_EXPORT bool taos_is_update_query(TAOS_RES *res); +DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); +DLL_EXPORT int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows); +DLL_EXPORT int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData); +DLL_EXPORT int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex); +DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); +DLL_EXPORT void taos_reset_current_db(TAOS *taos); DLL_EXPORT int *taos_fetch_lengths(TAOS_RES *res); DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res); diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 43f8c9e1f9..54da1c013c 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -84,7 +84,7 @@ void taos_cleanup(void) { taosCloseRef(id); nodesDestroyAllocatorSet(); -// cleanupAppInfo(); + // cleanupAppInfo(); rpcCleanup(); tscDebug("rpc cleanup"); @@ -388,7 +388,6 @@ void taos_free_result(TAOS_RES *res) { tDeleteMqBatchMetaRsp(&pRsp->batchMetaRsp); } taosMemoryFree(pRsp); - } void taos_kill_query(TAOS *taos) { @@ -484,7 +483,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { return taos_print_row_with_size(str, INT32_MAX, row, fields, num_fields); } -int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields){ +int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { int32_t len = 0; for (int i = 0; i < num_fields; ++i) { if (i > 0 && len < size - 1) { @@ -589,7 +588,7 @@ int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD break; } } - if (len < size){ + if (len < size) { str[len] = 0; } @@ -2082,7 +2081,7 @@ int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert) { } int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields) { - if (stmt == NULL || NULL == count) { + if (stmt == NULL || count == NULL) { tscError("NULL parameter for %s", __FUNCTION__); terrno = TSDB_CODE_INVALID_PARA; return terrno; @@ -2104,7 +2103,7 @@ int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, } int taos_stmt2_get_all_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_E **fields) { - if (stmt == NULL || NULL == count) { + if (stmt == NULL || count == NULL) { tscError("NULL parameter for %s", __FUNCTION__); terrno = TSDB_CODE_INVALID_PARA; return terrno; diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index 2d76d91e38..5d04006f06 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -1076,33 +1076,33 @@ static int stmtFetchFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E** return TSDB_CODE_INVALID_PARA; } - if (fields) { + if (fields != NULL) { *fields = taosMemoryCalloc(tags->numOfBound, sizeof(TAOS_FIELD_E)); if (NULL == *fields) { return terrno; } SSchema* pSchema = meta->schema; - int32_t tbnameIdx = meta->tableInfo.numOfTags + meta->tableInfo.numOfColumns; + int32_t tbnameIdx = meta->tableInfo.numOfTags + meta->tableInfo.numOfColumns; for (int32_t i = 0; i < tags->numOfBound; ++i) { int16_t idx = tags->pColIndex[i]; if (idx == tbnameIdx) { (*fields)[i].field_type = TAOS_FIELD_TBNAME; - strcpy((*fields)[i].name, "tbname"); + tstrncpy((*fields)[i].name, "tbname", sizeof((*fields)[i].name)); continue; } else if (idx < meta->tableInfo.numOfColumns) { (*fields)[i].field_type = TAOS_FIELD_COL; } else { (*fields)[i].field_type = TAOS_FIELD_TAG; } - SSchema schema = pSchema[tags->pColIndex[i]]; - if (TSDB_DATA_TYPE_TIMESTAMP == schema.type) { + + if (TSDB_DATA_TYPE_TIMESTAMP == pSchema[tags->pColIndex[i]].type) { (*fields)[i].precision = meta->tableInfo.precision; } - tstrncpy((*fields)[i].name, schema.name, sizeof((*fields)[i].name)); - (*fields)[i].type = schema.type; - (*fields)[i].bytes = schema.bytes; + tstrncpy((*fields)[i].name, pSchema[tags->pColIndex[i]].name, sizeof((*fields)[i].name)); + (*fields)[i].type = pSchema[tags->pColIndex[i]].type; + (*fields)[i].bytes = pSchema[tags->pColIndex[i]].bytes; } } diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 77617ad3b3..f4ef099515 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -30,7 +30,7 @@ typedef struct SInsertParseContext { bool forceUpdate; bool needTableTagVal; bool needRequest; // whether or not request server - bool isStmtBind; // whether is stmt bind + bool isStmtBind; // whether is stmt bind } SInsertParseContext; typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param); @@ -757,7 +757,7 @@ int32_t parseTagValue(SMsgBuf* pMsgBuf, const char** pSql, uint8_t precision, SS STagVal val = {0}; int32_t code = parseTagToken(pSql, pToken, pTagSchema, precision, &val, pMsgBuf); if (TSDB_CODE_SUCCESS == code) { - if (NULL == taosArrayPush(pTagVals, &val)){ + if (NULL == taosArrayPush(pTagVals, &val)) { code = terrno; } } @@ -775,8 +775,8 @@ static int32_t buildCreateTbReq(SVnodeModifyOpStmt* pStmt, STag* pTag, SArray* p return terrno; } return insBuildCreateTbReq(pStmt->pCreateTblReq, pStmt->targetTableName.tname, pTag, pStmt->pTableMeta->suid, - pStmt->usingTableName.tname, pTagName, pStmt->pTableMeta->tableInfo.numOfTags, - TSDB_DEFAULT_TABLE_TTL); + pStmt->usingTableName.tname, pTagName, pStmt->pTableMeta->tableInfo.numOfTags, + TSDB_DEFAULT_TABLE_TTL); } int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, SMsgBuf* pMsgBuf, int8_t type) { @@ -813,7 +813,7 @@ typedef struct SRewriteTagCondCxt { static int32_t rewriteTagCondColumnImpl(STagVal* pVal, SNode** pNode) { SValueNode* pValue = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue); if (NULL == pValue) { return code; } @@ -1044,7 +1044,7 @@ static int32_t storeChildTableMeta(SInsertParseContext* pCxt, SVnodeModifyOpStmt return TSDB_CODE_OUT_OF_MEMORY; } - char tbFName[TSDB_TABLE_FNAME_LEN]; + char tbFName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(&pStmt->targetTableName, tbFName); if (TSDB_CODE_SUCCESS != code) { taosMemoryFree(pBackup); @@ -1239,7 +1239,7 @@ static int32_t getTargetTableMetaAndVgroup(SInsertParseContext* pCxt, SVnodeModi } static int32_t collectUseTable(const SName* pName, SHashObj* pTable) { - char fullName[TSDB_TABLE_FNAME_LEN]; + char fullName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pName, fullName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1385,7 +1385,7 @@ static int32_t getTableDataCxt(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS pStmt->pTableMeta, &pStmt->pCreateTblReq, pTableCxt, false, false); } - char tbFName[TSDB_TABLE_FNAME_LEN]; + char tbFName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(&pStmt->targetTableName, tbFName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1926,8 +1926,8 @@ static int32_t processCtbAutoCreationAndCtbMeta(SInsertParseContext* pCxt, SVnod } if (code == TSDB_CODE_SUCCESS) { code = insBuildCreateTbReq(pStbRowsCxt->pCreateCtbReq, pStbRowsCxt->ctbName.tname, pStbRowsCxt->pTag, - pStbRowsCxt->pStbMeta->uid, pStbRowsCxt->stbName.tname, pStbRowsCxt->aTagNames, - getNumOfTags(pStbRowsCxt->pStbMeta), TSDB_DEFAULT_TABLE_TTL); + pStbRowsCxt->pStbMeta->uid, pStbRowsCxt->stbName.tname, pStbRowsCxt->aTagNames, + getNumOfTags(pStbRowsCxt->pStbMeta), TSDB_DEFAULT_TABLE_TTL); pStbRowsCxt->pTag = NULL; } @@ -1936,9 +1936,9 @@ static int32_t processCtbAutoCreationAndCtbMeta(SInsertParseContext* pCxt, SVnod code = tNameExtractFullName(&pStbRowsCxt->ctbName, ctbFName); SVgroupInfo vg; SRequestConnInfo conn = {.pTrans = pCxt->pComCxt->pTransporter, - .requestId = pCxt->pComCxt->requestId, - .requestObjRefId = pCxt->pComCxt->requestRid, - .mgmtEps = pCxt->pComCxt->mgmtEpSet}; + .requestId = pCxt->pComCxt->requestId, + .requestObjRefId = pCxt->pComCxt->requestRid, + .mgmtEps = pCxt->pComCxt->mgmtEpSet}; if (TSDB_CODE_SUCCESS == code) { code = catalogGetTableHashVgroup(pCxt->pComCxt->pCatalog, &conn, &pStbRowsCxt->ctbName, &vg); } @@ -2179,8 +2179,8 @@ static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt if (code == TSDB_CODE_SUCCESS) { SStbRowsDataContext* pStbRowsCxt = rowsDataCxt.pStbRowsCxt; void* pData = pTableDataCxt; - code = taosHashPut(pStmt->pTableCxtHashObj, &pStbRowsCxt->pCtbMeta->uid, sizeof(pStbRowsCxt->pCtbMeta->uid), &pData, - POINTER_BYTES); + code = taosHashPut(pStmt->pTableCxtHashObj, &pStbRowsCxt->pCtbMeta->uid, sizeof(pStbRowsCxt->pCtbMeta->uid), + &pData, POINTER_BYTES); if (TSDB_CODE_SUCCESS != code) { break; } @@ -2252,7 +2252,7 @@ static int32_t parseDataFromFileImpl(SInsertParseContext* pCxt, SVnodeModifyOpSt if (!pStmt->stbSyntax && numOfRows > 0) { void* pData = rowsDataCxt.pTableDataCxt; code = taosHashPut(pStmt->pTableCxtHashObj, &pStmt->pTableMeta->uid, sizeof(pStmt->pTableMeta->uid), &pData, - POINTER_BYTES); + POINTER_BYTES); } return code; @@ -2366,8 +2366,7 @@ static int32_t constructStbRowsDataContext(SVnodeModifyOpStmt* pStmt, SStbRowsDa if (TSDB_CODE_SUCCESS == code) { // col values and bound cols info of STableDataContext is not used pStbRowsCxt->aColVals = taosArrayInit(getNumOfColumns(pStbRowsCxt->pStbMeta), sizeof(SColVal)); - if (!pStbRowsCxt->aColVals) - code = terrno; + if (!pStbRowsCxt->aColVals) code = terrno; } if (TSDB_CODE_SUCCESS == code) { code = insInitColValues(pStbRowsCxt->pStbMeta, pStbRowsCxt->aColVals); @@ -2511,9 +2510,9 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif } // db.? situation,ensure that the only thing following the '.' mark is '?' - char *tbNameAfterDbName = strnchr(pTbName->z, '.', pTbName->n, true); + char* tbNameAfterDbName = strnchr(pTbName->z, '.', pTbName->n, true); if ((tbNameAfterDbName != NULL) && (*(tbNameAfterDbName + 1) == '?')) { - char *tbName = NULL; + char* tbName = NULL; if (NULL == pCxt->pComCxt->pStmtCb) { return buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pTbName->z); } @@ -2528,7 +2527,8 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif if (pCxt->isStmtBind) { if (TK_NK_ID == pTbName->type || (tbNameAfterDbName != NULL && *(tbNameAfterDbName + 1) != '?')) { // In SQL statements, the table name has already been specified. - parserWarn("0x%" PRIx64 " table name is specified in sql, ignore the table name in bind param", pCxt->pComCxt->requestId); + parserWarn("0x%" PRIx64 " table name is specified in sql, ignore the table name in bind param", + pCxt->pComCxt->requestId); } } @@ -2614,7 +2614,7 @@ static void destroySubTableHashElem(void* p) { taosMemoryFree(*(STableMeta**)p); static int32_t createVnodeModifOpStmt(SInsertParseContext* pCxt, bool reentry, SNode** pOutput) { SVnodeModifyOpStmt* pStmt = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pStmt); + int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pStmt); if (NULL == pStmt) { return code; } @@ -2729,7 +2729,7 @@ static int32_t buildTagNameFromMeta(STableMeta* pMeta, SArray** pTagName) { return terrno; } SSchema* pSchema = getTableTagSchema(pMeta); - int32_t code = 0; + int32_t code = 0; for (int32_t i = 0; i < pMeta->tableInfo.numOfTags; ++i) { if (NULL == taosArrayPush(*pTagName, pSchema[i].name)) { code = terrno; @@ -2834,7 +2834,7 @@ static int32_t resetVnodeModifOpStmt(SInsertParseContext* pCxt, SQuery* pQuery) } if (NULL == pStmt->pTableBlockHashObj) { pStmt->pTableBlockHashObj = - taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); } if (NULL == pStmt->pVgroupsHashObj || NULL == pStmt->pTableBlockHashObj) { code = TSDB_CODE_OUT_OF_MEMORY; @@ -2866,7 +2866,7 @@ static int32_t initInsertQuery(SInsertParseContext* pCxt, SCatalogReq* pCatalogR static int32_t setRefreshMeta(SQuery* pQuery) { SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)pQuery->pRoot; - int32_t code = 0; + int32_t code = 0; if (taosHashGetSize(pStmt->pTableNameHashObj) > 0) { taosArrayDestroy(pQuery->pTableList); @@ -3065,7 +3065,7 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal .forceUpdate = (NULL != pCatalogReq ? pCatalogReq->forceUpdate : false), .isStmtBind = pCxt->isStmtBind}; - int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery); + int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery); SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)((*pQuery)->pRoot); if (TSDB_CODE_SUCCESS == code) { code = parseInsertSqlImpl(&context, pStmt); @@ -3082,10 +3082,14 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal code == TSDB_CODE_TSC_INVALID_OPERATION) { context.tags.numOfBound = pStmt->pStbRowsCxt->boundColsInfo.numOfBound; context.tags.numOfCols = pStmt->pStbRowsCxt->boundColsInfo.numOfCols; - context.tags. hasBoundCols= pStmt->pStbRowsCxt->boundColsInfo.hasBoundCols; + context.tags.hasBoundCols = pStmt->pStbRowsCxt->boundColsInfo.hasBoundCols; context.tags.pColIndex = taosMemoryMalloc(sizeof(int16_t) * context.tags.numOfBound); - memcpy(context.tags.pColIndex, pStmt->pStbRowsCxt->boundColsInfo.pColIndex, - sizeof(int16_t) * pStmt->pStbRowsCxt->boundColsInfo.numOfBound); + if (NULL == context.tags.pColIndex) { + return terrno; + } + + (void)memcpy(context.tags.pColIndex, pStmt->pStbRowsCxt->boundColsInfo.pColIndex, + sizeof(int16_t) * pStmt->pStbRowsCxt->boundColsInfo.numOfBound); code = setStmtInfo(&context, pStmt); if (TSDB_CODE_SUCCESS == code) { insDestroyBoundColInfo(&context.tags); @@ -3094,7 +3098,6 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal } insDestroyBoundColInfo(&context.tags); - // if no data to insert, set emptyMode to avoid request server if (!context.needRequest) { (*pQuery)->execMode = QUERY_EXEC_MODE_EMPTY_RESULT; diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index f18b7b817b..8c17689ea7 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -242,7 +242,7 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch } code = insBuildCreateTbReq(pDataBlock->pData->pCreateTbReq, tName, pTag, suid, sTableName, tagName, - pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); + pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); pTag = NULL; end: @@ -594,7 +594,7 @@ int32_t qBindStmtTagsValue2(void* pBlock, void* boundTags, int64_t suid, const c } code = insBuildCreateTbReq(pDataBlock->pData->pCreateTbReq, tName, pTag, suid, sTableName, tagName, - pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); + pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); pTag = NULL; end: @@ -886,7 +886,7 @@ _return: int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSchema, int32_t* fieldNum, TAOS_FIELD_E** fields, uint8_t timePrec, TAOS_FIELD_T fieldType) { - if (fields) { + if (fields != NULL) { *fields = taosMemoryCalloc(numOfBound, sizeof(TAOS_FIELD_E)); if (NULL == *fields) { return terrno; @@ -940,7 +940,7 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fiel SSchema* pSchema = getTableColumnSchema(pDataBlock->pMeta); if (pDataBlock->boundColsInfo.numOfBound <= 0) { *fieldNum = 0; - if (fields) { + if (fields != NULL) { *fields = NULL; } From 25ea22e0b17268099de9dcd0b174d59ebbdaa30e Mon Sep 17 00:00:00 2001 From: haoranchen Date: Wed, 30 Oct 2024 14:24:59 +0800 Subject: [PATCH 121/142] Update tests/army/frame/common.py Co-authored-by: WANG Xu --- tests/army/frame/common.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/army/frame/common.py b/tests/army/frame/common.py index bad86c828f..a82bf4c94f 100644 --- a/tests/army/frame/common.py +++ b/tests/army/frame/common.py @@ -807,8 +807,9 @@ class TDCom: if (platform.system().lower() == 'windows'): os.system(f"TASKKILL /F /IM {processor_name}.exe") else: - tdLog.debug(f"unset LD_PRELOAD; sudo pkill -f -{signal} '{processor_name}' ") - os.system(f"unset LD_PRELOAD; sudo pkill -f -{signal} '{processor_name}' ") + command = f"unset LD_PRELOAD; sudo pkill -f -{signal} '{processor_name}'" + tdLog.debug(f"command: {command}") + os.system(command) def gen_tag_col_str(self, gen_type, data_type, count): From ef50e5d407dd3ae39929cd5761e9e6646b35e06b Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 30 Oct 2024 15:21:11 +0800 Subject: [PATCH 122/142] enh: optimize the error message for the case of the drop column --- include/util/taoserror.h | 1 + source/dnode/mnode/impl/src/mndStb.c | 8 ++++---- source/util/src/terror.c | 1 + 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index a53923b904..dd9b90d390 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -392,6 +392,7 @@ int32_t taosGetErrSize(); #define TSDB_CODE_MND_INVALID_SCHEMA_VER TAOS_DEF_ERROR_CODE(0, 0x03C6) #define TSDB_CODE_MND_STABLE_UID_NOT_MATCH TAOS_DEF_ERROR_CODE(0, 0x03C7) #define TSDB_CODE_MND_FIELD_CONFLICT_WITH_TSMA TAOS_DEF_ERROR_CODE(0, 0x03C8) +#define TSDB_CODE_MND_ONLY_TS_SCHEMA_NOT_ALLOW TAOS_DEF_ERROR_CODE(0, 0x03C9) // mnode-trans #define TSDB_CODE_MND_TRANS_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03D0) diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 2500d1ef40..2986bdc117 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -1928,7 +1928,7 @@ static int32_t mndDropSuperTableColumn(SMnode *pMnode, const SStbObj *pOld, SStb } if (pOld->numOfColumns == 2) { - code = TSDB_CODE_MND_INVALID_STB_ALTER_OPTION; + code = TSDB_CODE_MND_ONLY_TS_SCHEMA_NOT_ALLOW; TAOS_RETURN(code); } @@ -4295,9 +4295,9 @@ static int32_t mndDropTbAdd(SMnode *pMnode, SHashObj *pVgHashMap, const SVgroupI return 0; } -int vgInfoCmp(const void* lp, const void* rp) { - SVgroupInfo* pLeft = (SVgroupInfo*)lp; - SVgroupInfo* pRight = (SVgroupInfo*)rp; +int vgInfoCmp(const void *lp, const void *rp) { + SVgroupInfo *pLeft = (SVgroupInfo *)lp; + SVgroupInfo *pRight = (SVgroupInfo *)rp; if (pLeft->hashBegin < pRight->hashBegin) { return -1; } else if (pLeft->hashBegin > pRight->hashBegin) { diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 52a3be120d..0a9eab43f8 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -307,6 +307,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_STABLE_UID_NOT_MATCH, "Invalid stable uid") TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_CONFLICT_WITH_TSMA, "Field used by tsma") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_IN_CREATING, "Dnode in creating status") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_IN_DROPPING, "Dnode in dropping status") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_ONLY_TS_SCHEMA_NOT_ALLOW, "Schema with only ts field is not allowed") // mnode-trans TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_ALREADY_EXIST, "Transaction already exists") From 4b21fd866df6ea3a95659a9e724cac5ce747218e Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 30 Oct 2024 15:21:32 +0800 Subject: [PATCH 123/142] doc: update os support info --- docs/zh/14-reference/07-supported.md | 47 ++++++++++++++++++---------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/docs/zh/14-reference/07-supported.md b/docs/zh/14-reference/07-supported.md index 10ca237653..828c8717e2 100644 --- a/docs/zh/14-reference/07-supported.md +++ b/docs/zh/14-reference/07-supported.md @@ -6,12 +6,25 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表" ## TDengine 服务端支持的平台列表 -| | **Windows server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18 以上** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **macOS** | -| ------------ | ---------------------------- | ----------------- | ---------------- | ------------------ | ------------ | ----------------- | ---------------- | --------- | -| X64 | ●/E | ●/E | ● | ● | ●/E | ●/E | ●/E | ● | -| 树莓派 ARM64 | | | ● | | | | | | -| 华为云 ARM64 | | | | ● | | | | | -| M1 | | | | | | | | ● | +| | **版本** | **X64 64bit** | **ARM64** | **M1** | +| ----------------------|----------------| ------------- | --------- | ------ | +| **CentOS** | **7.9 以上** | ● | ● | | +| **Ubuntu** | **18 以上** | ● | ● | | +| **RedHat** | **RHEL 6 以上** | ● | ● | | +| **Debian** | **6.0 以上** | ● | ● | | +| **FreeBSD** | **12 以上** | ● | ● | | +| **OpenSUSE** | **全部版本** | ● | ● | | +| **SUSE Linux** | **11 以上** | ● | ● | | +| **Fedora** | **21 以上** | ● | ● | | +| **Windows Server** | **2016/2019** | ●/E | | | +| **Windows** | **10/11** | ●/E | | | +| **银河麒麟** | **V10 以上** | ●/E | | | +| **中标麒麟** | **V7.0 以上** | ●/E | | | +| **通信 UOS** | **V20 以上** | ●/E | | | +| **凝思磐石** | **V6.0 以上** | ●/E | | | +| **华为欧拉 openEuler** | **V20.03 以上** | ●/E | | | +| **龙蜥 Anolis OS** | **V8.6 以上** | ●/E | | | +| **macOS** | **11.0 以上** | | | ● | 注:1) ● 表示经过官方测试验证, ○ 表示非官方测试验证,E 表示仅企业版支持。 2) 社区版仅支持主流操作系统的较新版本,包括 Ubuntu 18+/CentOS 7+/RedHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS 等。如果有其他操作系统及版本的需求,请联系企业版支持。 @@ -22,16 +35,16 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表" 对照矩阵如下: -| **CPU** | **X64 64bit** | **X64 64bit** | **ARM64** | **X64 64bit** | **ARM64** | -| ----------- | ------------- | ------------- | --------- | ------------- | --------- | -| **OS** | **Linux** | **Win64** | **Linux** | **macOS** | **macOS** | -| **C/C++** | ● | ● | ● | ● | ● | -| **JDBC** | ● | ● | ● | ○ | ○ | -| **Python** | ● | ● | ● | ● | ● | -| **Go** | ● | ● | ● | ● | ● | -| **NodeJs** | ● | ● | ● | ○ | ○ | -| **C#** | ● | ● | ○ | ○ | ○ | -| **Rust** | ● | ● | ○ | ● | ● | -| **RESTful** | ● | ● | ● | ● | ● | +| **CPU** | **X64 64bit** | **X64 64bit** | **X64 64bit** | **ARM64** | **ARM64** | +| ----------- | ------------- | ------------- | ------------- | --------- | --------- | +| **OS** | **Linux** | **Win64** | **macOS** | **Linux** | **macOS** | +| **C/C++** | ● | ● | ● | ● | ● | +| **JDBC** | ● | ● | ○ | ● | ○ | +| **Python** | ● | ● | ● | ● | ● | +| **Go** | ● | ● | ● | ● | ● | +| **NodeJs** | ● | ● | ○ | ● | ○ | +| **C#** | ● | ● | ○ | ○ | ○ | +| **Rust** | ● | ● | ● | ○ | ● | +| **RESTful** | ● | ● | ● | ● | ● | 注:● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。 From 83642604034258298de85652cc30f8478a7e8a21 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 30 Oct 2024 15:30:32 +0800 Subject: [PATCH 124/142] enh: change the error code --- include/util/taoserror.h | 1 - source/dnode/mnode/impl/src/mndStb.c | 2 +- source/util/src/terror.c | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index dd9b90d390..a53923b904 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -392,7 +392,6 @@ int32_t taosGetErrSize(); #define TSDB_CODE_MND_INVALID_SCHEMA_VER TAOS_DEF_ERROR_CODE(0, 0x03C6) #define TSDB_CODE_MND_STABLE_UID_NOT_MATCH TAOS_DEF_ERROR_CODE(0, 0x03C7) #define TSDB_CODE_MND_FIELD_CONFLICT_WITH_TSMA TAOS_DEF_ERROR_CODE(0, 0x03C8) -#define TSDB_CODE_MND_ONLY_TS_SCHEMA_NOT_ALLOW TAOS_DEF_ERROR_CODE(0, 0x03C9) // mnode-trans #define TSDB_CODE_MND_TRANS_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03D0) diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 2986bdc117..3725d3a3fc 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -1928,7 +1928,7 @@ static int32_t mndDropSuperTableColumn(SMnode *pMnode, const SStbObj *pOld, SStb } if (pOld->numOfColumns == 2) { - code = TSDB_CODE_MND_ONLY_TS_SCHEMA_NOT_ALLOW; + code = TSDB_CODE_PAR_INVALID_DROP_COL; TAOS_RETURN(code); } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 0a9eab43f8..52a3be120d 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -307,7 +307,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_STABLE_UID_NOT_MATCH, "Invalid stable uid") TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_CONFLICT_WITH_TSMA, "Field used by tsma") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_IN_CREATING, "Dnode in creating status") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_IN_DROPPING, "Dnode in dropping status") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_ONLY_TS_SCHEMA_NOT_ALLOW, "Schema with only ts field is not allowed") // mnode-trans TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_ALREADY_EXIST, "Transaction already exists") From 935d118f42c295f0e3d7a5ea81b55151b37232fd Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 30 Oct 2024 07:29:13 +0000 Subject: [PATCH 125/142] fix/TD-32766-check-ref-before-free-main --- source/dnode/mgmt/mgmt_vnode/src/vmFile.c | 6 +++--- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 16 ++++++++++++---- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index d07fe9c5ac..7566b69c02 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -38,7 +38,7 @@ int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnod SVnodeObj *pVnode = *ppVnode; if (pVnode && num < size) { int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); - // dTrace("vgId:%d, acquire vnode list, ref:%d", pVnode->vgId, refCount); + dTrace("vgId:%d,acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); pVnodes[num++] = (*ppVnode); pIter = taosHashIterate(pMgmt->hash, pIter); } else { @@ -52,7 +52,7 @@ int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnod SVnodeObj *pVnode = *ppVnode; if (pVnode && num < size) { int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); - // dTrace("vgId:%d, acquire vnode list, ref:%d", pVnode->vgId, refCount); + dTrace("vgId:%d, acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); pVnodes[num++] = (*ppVnode); pIter = taosHashIterate(pMgmt->closedHash, pIter); } else { @@ -84,7 +84,7 @@ int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeOb SVnodeObj *pVnode = *ppVnode; if (pVnode && num < size) { int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); - // dTrace("vgId:%d, acquire vnode list, ref:%d", pVnode->vgId, refCount); + dTrace("vgId:%d, acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); pVnodes[num++] = (*ppVnode); pIter = taosHashIterate(pMgmt->hash, pIter); } else { diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index a285043382..0913d7539b 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -103,7 +103,7 @@ SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict) { pVnode = NULL; } else { int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); - // dTrace("vgId:%d, acquire vnode, ref:%d", pVnode->vgId, refCount); + dTrace("vgId:%d, acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); } (void)taosThreadRwlockUnlock(&pMgmt->lock); @@ -115,16 +115,24 @@ SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId) { return vmAcquireVno void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { if (pVnode == NULL) return; - (void)taosThreadRwlockRdlock(&pMgmt->lock); + //(void)taosThreadRwlockRdlock(&pMgmt->lock); int32_t refCount = atomic_sub_fetch_32(&pVnode->refCount, 1); - // dTrace("vgId:%d, release vnode, ref:%d", pVnode->vgId, refCount); - (void)taosThreadRwlockUnlock(&pMgmt->lock); + dTrace("vgId:%d, release vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); + //(void)taosThreadRwlockUnlock(&pMgmt->lock); } static void vmFreeVnodeObj(SVnodeObj **ppVnode) { if (!ppVnode || !(*ppVnode)) return; SVnodeObj *pVnode = *ppVnode; + + int32_t refCount = 1; + while (refCount > 0) { + dWarn("vgId:%d, vnode is refenced, retry to free in 200ms, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); + taosMsleep(200); + refCount = atomic_load_32(&pVnode->refCount); + } + taosMemoryFree(pVnode->path); taosMemoryFree(pVnode); ppVnode[0] = NULL; From 65f11a24633d7457e41aec32c6a81be5e79b0c09 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 30 Oct 2024 15:38:29 +0800 Subject: [PATCH 126/142] doc: minor changes --- docs/zh/14-reference/07-supported.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/zh/14-reference/07-supported.md b/docs/zh/14-reference/07-supported.md index 828c8717e2..96d12311be 100644 --- a/docs/zh/14-reference/07-supported.md +++ b/docs/zh/14-reference/07-supported.md @@ -18,10 +18,10 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表" | **Fedora** | **21 以上** | ● | ● | | | **Windows Server** | **2016/2019** | ●/E | | | | **Windows** | **10/11** | ●/E | | | -| **银河麒麟** | **V10 以上** | ●/E | | | -| **中标麒麟** | **V7.0 以上** | ●/E | | | +| **银河麒麟** | **V10 以上** | ●/E | ●/E | | +| **中标麒麟** | **V7.0 以上** | ●/E | ●/E | | | **通信 UOS** | **V20 以上** | ●/E | | | -| **凝思磐石** | **V6.0 以上** | ●/E | | | +| **凝思磐石** | **V8.0 以上** | ●/E | | | | **华为欧拉 openEuler** | **V20.03 以上** | ●/E | | | | **龙蜥 Anolis OS** | **V8.6 以上** | ●/E | | | | **macOS** | **11.0 以上** | | | ● | From eed49788c0cfac34a3acc431d209732747937993 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 30 Oct 2024 15:50:39 +0800 Subject: [PATCH 127/142] doc: update index --- docs/zh/01-index.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md index 32ea117fbb..1c9ad0b2e1 100644 --- a/docs/zh/01-index.md +++ b/docs/zh/01-index.md @@ -6,7 +6,7 @@ slug: / TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库Time Series Database, TSDB), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。如果你对时序数据的基本概念、价值以及其所能带来的业务价值尚不了解,请参考[时序数据基础](./concept) -TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[数据模型](./basic/model)一章。 +TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论你在工作中是什么角色,请您仔细阅读[数据模型](./basic/model)一章。 如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。对 REST API、各种编程语言的连接器(Connector)想做更多详细了解的话,请看[连接器](./reference/connector)一章。 @@ -16,6 +16,8 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移 如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。 +如果你对数据库内核设计感兴趣,或是开源爱好者,建议仔细阅读[技术内幕](./tdinterna)一章。该章从分布式架构到存储引擎、查询引擎、数据订阅,再到流计算引擎都做了详细阐述。建议对照文档,查看TDengine在GitHub的源代码,对TDengine的设计和编码做深入了解,更欢迎加入开源社区,贡献代码。 + 最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。 Together, we make a difference! From 1cdad6572410aa034f909e85589b5189cca3a5e6 Mon Sep 17 00:00:00 2001 From: Chait Diwadkar <94201190+cdiwadkar16@users.noreply.github.com> Date: Fri, 6 Sep 2024 14:01:09 -0700 Subject: [PATCH 128/142] docs/Update 01-docker.md Corrected spelling mistake and have a statement on how to login to taos-explorer using root/taosdata. --- docs/en/04-get-started/01-docker.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/04-get-started/01-docker.md b/docs/en/04-get-started/01-docker.md index 882e2ef194..f361e5a10f 100644 --- a/docs/en/04-get-started/01-docker.md +++ b/docs/en/04-get-started/01-docker.md @@ -75,9 +75,9 @@ taos> ## TDegnine Graphic User Interface -From TDengine 3.3.0.0, there is a new componenet called `taos-explorer` added in the TDengine docker image. You can use it to manage the databases, super tables, child tables, and data in your TDengine system. There are also some features only available in TDengine Enterprise Edition, please contact TDengine sales team in case you need these features. +From TDengine 3.3.0.0, there is a new component called `taos-explorer` added in the TDengine docker image. You can use it to manage the databases, super tables, child tables, and data in your TDengine system. There are also some features only available in TDengine Enterprise Edition, please contact TDengine sales team in case you need these features. -To use taos-explorer in the container, you need to access the host port mapped from container port 6060. Assuming the host name is abc.com, and the port used on host is 6060, you need to access `http://abc.com:6060`. taos-explorer uses port 6060 by default in the container. When you use it the first time, you need to register with your enterprise email, then can logon using your user name and password in the TDengine database management system. +To use taos-explorer in the container, you need to access the host port mapped from container port 6060. Assuming the host name is abc.com, and the port used on host is 6060, you need to access `http://abc.com:6060`. taos-explorer uses port 6060 by default in the container. The default username and password to log in to the TDengine Database Management System is "root/taosdata". ## Test data insert performance From 016c4fa9d7009b00e5231cb9c55da2d53e28e62d Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 30 Oct 2024 16:14:14 +0800 Subject: [PATCH 129/142] doc: update minor errors --- docs/zh/14-reference/07-supported.md | 38 ++++++++++++++-------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/zh/14-reference/07-supported.md b/docs/zh/14-reference/07-supported.md index 96d12311be..a062257143 100644 --- a/docs/zh/14-reference/07-supported.md +++ b/docs/zh/14-reference/07-supported.md @@ -6,25 +6,25 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表" ## TDengine 服务端支持的平台列表 -| | **版本** | **X64 64bit** | **ARM64** | **M1** | -| ----------------------|----------------| ------------- | --------- | ------ | -| **CentOS** | **7.9 以上** | ● | ● | | -| **Ubuntu** | **18 以上** | ● | ● | | -| **RedHat** | **RHEL 6 以上** | ● | ● | | -| **Debian** | **6.0 以上** | ● | ● | | -| **FreeBSD** | **12 以上** | ● | ● | | -| **OpenSUSE** | **全部版本** | ● | ● | | -| **SUSE Linux** | **11 以上** | ● | ● | | -| **Fedora** | **21 以上** | ● | ● | | -| **Windows Server** | **2016/2019** | ●/E | | | -| **Windows** | **10/11** | ●/E | | | -| **银河麒麟** | **V10 以上** | ●/E | ●/E | | -| **中标麒麟** | **V7.0 以上** | ●/E | ●/E | | -| **通信 UOS** | **V20 以上** | ●/E | | | -| **凝思磐石** | **V8.0 以上** | ●/E | | | -| **华为欧拉 openEuler** | **V20.03 以上** | ●/E | | | -| **龙蜥 Anolis OS** | **V8.6 以上** | ●/E | | | -| **macOS** | **11.0 以上** | | | ● | +| | **版本** | **X64 64bit** | **ARM64** | +| ----------------------|----------------| ------------- | --------- | +| **CentOS** | **7.9 以上** | ● | ● | +| **Ubuntu** | **18 以上** | ● | ● | +| **RedHat** | **RHEL 7 以上** | ● | ● | +| **Debian** | **6.0 以上** | ● | ● | +| **FreeBSD** | **12 以上** | ● | ● | +| **OpenSUSE** | **全部版本** | ● | ● | +| **SUSE Linux** | **11 以上** | ● | ● | +| **Fedora** | **21 以上** | ● | ● | +| **Windows Server** | **2016 以上** | ●/E | | +| **Windows** | **10/11** | ●/E | | +| **银河麒麟** | **V10 以上** | ●/E | ●/E | +| **中标麒麟** | **V7.0 以上** | ●/E | ●/E | +| **统信 UOS** | **V20 以上** | ●/E | | +| **凝思磐石** | **V8.0 以上** | ●/E | | +| **华为欧拉 openEuler** | **V20.03 以上** | ●/E | | +| **龙蜥 Anolis OS** | **V8.6 以上** | ●/E | | +| **macOS** | **11.0 以上** | | ● | 注:1) ● 表示经过官方测试验证, ○ 表示非官方测试验证,E 表示仅企业版支持。 2) 社区版仅支持主流操作系统的较新版本,包括 Ubuntu 18+/CentOS 7+/RedHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS 等。如果有其他操作系统及版本的需求,请联系企业版支持。 From 6aba3e9cac2a2de018161c82376e78a2bceea996 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 30 Oct 2024 16:17:53 +0800 Subject: [PATCH 130/142] doc: minor changes --- docs/zh/14-reference/07-supported.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/14-reference/07-supported.md b/docs/zh/14-reference/07-supported.md index a062257143..f98f96755a 100644 --- a/docs/zh/14-reference/07-supported.md +++ b/docs/zh/14-reference/07-supported.md @@ -43,7 +43,7 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表" | **Python** | ● | ● | ● | ● | ● | | **Go** | ● | ● | ● | ● | ● | | **NodeJs** | ● | ● | ○ | ● | ○ | -| **C#** | ● | ● | ○ | ○ | ○ | +| **C#** | ● | ● | ○ | ● | ○ | | **Rust** | ● | ● | ● | ○ | ● | | **RESTful** | ● | ● | ● | ● | ● | From 25064ce77cbdea4d95cffc1cb64d26fdb0600d4d Mon Sep 17 00:00:00 2001 From: RuiLiang51666 <130521128+RuiLiang51666@users.noreply.github.com> Date: Thu, 12 Sep 2024 15:35:19 +0800 Subject: [PATCH 131/142] Update 01-index.md add the missing period. --- docs/zh/01-index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md index 1c9ad0b2e1..1fda72024c 100644 --- a/docs/zh/01-index.md +++ b/docs/zh/01-index.md @@ -4,7 +4,7 @@ sidebar_label: 文档首页 slug: / --- -TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库Time Series Database, TSDB), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。如果你对时序数据的基本概念、价值以及其所能带来的业务价值尚不了解,请参考[时序数据基础](./concept) +TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库Time Series Database, TSDB), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。如果你对时序数据的基本概念、价值以及其所能带来的业务价值尚不了解,请参考[时序数据基础](./concept)。 TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论你在工作中是什么角色,请您仔细阅读[数据模型](./basic/model)一章。 From fbb8585593968ba5194ba699613b0ee361628490 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 30 Oct 2024 09:00:42 +0000 Subject: [PATCH 132/142] fix/TD-32766-check-ref-before-free-check-first --- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 0913d7539b..682c179270 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -126,7 +126,7 @@ static void vmFreeVnodeObj(SVnodeObj **ppVnode) { SVnodeObj *pVnode = *ppVnode; - int32_t refCount = 1; + int32_t refCount = atomic_load_32(&pVnode->refCount); while (refCount > 0) { dWarn("vgId:%d, vnode is refenced, retry to free in 200ms, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); taosMsleep(200); From 296eb9a14665247838a9166a43651507924a1d50 Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Wed, 30 Oct 2024 17:11:20 +0800 Subject: [PATCH 133/142] =?UTF-8?q?docs=EF=BC=9AUpdate=20get-started=20des?= =?UTF-8?q?cription?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/zh/04-get-started/03-package.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/zh/04-get-started/03-package.md b/docs/zh/04-get-started/03-package.md index 2a1f594b4f..cef3cb6943 100644 --- a/docs/zh/04-get-started/03-package.md +++ b/docs/zh/04-get-started/03-package.md @@ -14,7 +14,9 @@ TDengine 完整的软件包包括服务端(taosd)、应用驱动(taosc) 为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 Lite 版本的安装包。 -在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,RPM 和 Deb 包不含 `taosdump` 和 TDinsight 安装脚本,这些工具需要通过安装 taosTools 包获得。TDengine 也提供 Windows x64 平台和 macOS x64/m1 平台的安装包。 +在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统,用户可以根据自己的运行环境自行选择。同时我们也提供了 tar.gz 格式安装包,以及 `apt-get` 工具从线上进行安装。 + +此外,TDengine 也提供 macOS x64/m1 平台的 pkg 安装包。 ## 运行环境要求 在linux系统中,运行环境最低要求如下: @@ -317,4 +319,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1 SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); ``` -在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 \ No newline at end of file +在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 From ae369cdeb873d25f0841ffa8e78649210040db5a Mon Sep 17 00:00:00 2001 From: beliefstar Date: Tue, 13 Aug 2024 16:50:41 +0800 Subject: [PATCH 134/142] Update 02-cache.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit wal_level设置为2时,每次写入请求都会执行fsync,不需要控制执行频率,只有当wal_level设置为1时,才需要控制fsync的执行频率。 --- docs/zh/06-advanced/02-cache.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/06-advanced/02-cache.md b/docs/zh/06-advanced/02-cache.md index ca1da30dbf..b83509c2dd 100644 --- a/docs/zh/06-advanced/02-cache.md +++ b/docs/zh/06-advanced/02-cache.md @@ -54,7 +54,7 @@ TDengine 利用这些日志文件实现故障前的状态恢复。在写入 WAL 数据库参数 wal_level 和 wal_fsync_period 共同决定了 WAL 的保存行为。。 - wal_level:此参数控制 WAL 的保存级别。级别 1 表示仅将数据写入 WAL,但不立即执行 fsync 函数;级别 2 则表示在写入 WAL 的同时执行 fsync 函数。默认情况下,wal_level 设为 1。虽然执行 fsync 函数可以提高数据的持久性,但相应地也会降低写入性能。 -- wal_fsync_period:当 wal_level 设置为 2 时,这个参数控制执行 fsync 的频率。设置为 0 表示每次写入后立即执行 fsync,这可以确保数据的安全性,但可能会牺牲一些性能。当设置为大于 0 的数值时,表示 fsync 周期,默认为 3000,范围是[1, 180000],单位毫秒。 +- wal_fsync_period:当 wal_level 设置为 1 时,这个参数控制执行 fsync 的频率。设置为 0 表示每次写入后立即执行 fsync,这可以确保数据的安全性,但可能会牺牲一些性能。当设置为大于 0 的数值时,表示 fsync 周期,默认为 3000,范围是[1, 180000],单位毫秒。 ```sql CREATE DATABASE POWER WAL_LEVEL 1 WAL_FSYNC_PERIOD 3000; @@ -119,4 +119,4 @@ taos> select last_row(ts,current) from meters; Query OK, 1 row(s) in set (0.046682s) ``` -可以看到查询的时延从 353/344ms 缩短到了 44ms,提升约 8 倍。 \ No newline at end of file +可以看到查询的时延从 353/344ms 缩短到了 44ms,提升约 8 倍。 From 68be4a7c3baab3969dd63bd1d0e414b6a2556880 Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Wed, 18 Sep 2024 16:13:04 +0800 Subject: [PATCH 135/142] Update 14-stream.md --- docs/zh/14-reference/03-taos-sql/14-stream.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/zh/14-reference/03-taos-sql/14-stream.md b/docs/zh/14-reference/03-taos-sql/14-stream.md index cd5c76a4ad..6baad76e23 100644 --- a/docs/zh/14-reference/03-taos-sql/14-stream.md +++ b/docs/zh/14-reference/03-taos-sql/14-stream.md @@ -212,11 +212,11 @@ TDengine 对于修改数据提供两种处理方式,由 IGNORE UPDATE 选项 ```sql [field1_name,...] ``` -用来指定stb_name的列与subquery输出结果的对应关系。如果stb_name的列与subquery输出结果的位置、数量全部匹配,则不需要显示指定对应关系。如果stb_name的列与subquery输出结果的数据类型不匹配,会把subquery输出结果的类型转换成对应的stb_name的列的类型。 +在本页文档顶部的 [field1_name,...] 是用来指定 stb_name 的列与 subquery 输出结果的对应关系的。如果 stb_name 的列与 subquery 输出结果的位置、数量全部匹配,则不需要显示指定对应关系。如果 stb_name 的列与 subquery 输出结果的数据类型不匹配,会把 subquery 输出结果的类型转换成对应的 stb_name 的列的类型。 对于已经存在的超级表,检查列的schema信息 -1. 检查列的schema信息是否匹配,对于不匹配的,则自动进行类型转换,当前只有数据长度大于4096byte时才报错,其余场景都能进行类型转换。 -2. 检查列的个数是否相同,如果不同,需要显示的指定超级表与subquery的列的对应关系,否则报错;如果相同,可以指定对应关系,也可以不指定,不指定则按位置顺序对应。 +1. 检查列的 schema 信息是否匹配,对于不匹配的,则自动进行类型转换,当前只有数据长度大于 4096byte 时才报错,其余场景都能进行类型转换。 +2. 检查列的个数是否相同,如果不同,需要显示的指定超级表与 subquery 的列的对应关系,否则报错;如果相同,可以指定对应关系,也可以不指定,不指定则按位置顺序对应。 ## 自定义TAG From 411bcd04e1b8b43bb23a9d0340d3407ddf8c9dde Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Fri, 21 Jun 2024 16:22:30 +0800 Subject: [PATCH 136/142] docs:Update README-CN.md --- README-CN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README-CN.md b/README-CN.md index 06ac087859..1f785eb458 100644 --- a/README-CN.md +++ b/README-CN.md @@ -348,7 +348,7 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java # 成为社区贡献者 -点击 [这里](https://www.taosdata.com/cn/contributor/),了解如何成为 TDengine 的贡献者。 +点击 [这里](https://www.taosdata.com/contributor),了解如何成为 TDengine 的贡献者。 # 加入技术交流群 From 3895a7707af617ee8acf9689ce51ca7bb4c00b6b Mon Sep 17 00:00:00 2001 From: Pengrongkun Date: Wed, 30 Oct 2024 16:51:59 +0800 Subject: [PATCH 137/142] convert TAOS_FIELD_E to TAOS_FIELD_ALL, to prevent modifications to the original API --- include/client/taos.h | 13 ++++++-- source/client/inc/clientStmt2.h | 1 + source/client/src/clientMain.c | 10 ++++-- source/client/src/clientStmt2.c | 45 ++++++++++++++++++++++++-- source/libs/parser/src/parInsertStmt.c | 7 ++-- tests/script/api/stmt2-get-fields.c | 13 ++++---- 6 files changed, 72 insertions(+), 17 deletions(-) diff --git a/include/client/taos.h b/include/client/taos.h index 6797dfee5f..266e0e192d 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -95,13 +95,21 @@ typedef struct taosField { } TAOS_FIELD; typedef struct TAOS_FIELD_E { + char name[65]; + int8_t type; + uint8_t precision; + uint8_t scale; + int32_t bytes; +} TAOS_FIELD_E; + +typedef struct TAOS_FIELD_ALL { char name[65]; int8_t type; uint8_t precision; uint8_t scale; int32_t bytes; TAOS_FIELD_T field_type; -} TAOS_FIELD_E; +} TAOS_FIELD_ALL; #ifdef WINDOWS #define DLL_EXPORT __declspec(dllexport) @@ -233,8 +241,9 @@ DLL_EXPORT int taos_stmt2_exec(TAOS_STMT2 *stmt, int *affected_rows); DLL_EXPORT int taos_stmt2_close(TAOS_STMT2 *stmt); DLL_EXPORT int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert); DLL_EXPORT int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields); -DLL_EXPORT int taos_stmt2_get_all_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_E **fields); +DLL_EXPORT int taos_stmt2_get_all_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_ALL **fields); DLL_EXPORT void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields); +DLL_EXPORT void taos_stmt2_free_all_fields(TAOS_STMT2 *stmt, TAOS_FIELD_ALL *fields); DLL_EXPORT TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt); DLL_EXPORT char *taos_stmt2_error(TAOS_STMT2 *stmt); diff --git a/source/client/inc/clientStmt2.h b/source/client/inc/clientStmt2.h index 4e9a09c082..e54057e72f 100644 --- a/source/client/inc/clientStmt2.h +++ b/source/client/inc/clientStmt2.h @@ -222,6 +222,7 @@ int stmtSetTbTags2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *tags); int stmtBindBatch2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *bind, int32_t colIdx); int stmtGetTagFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields); int stmtGetColFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields); +int stmtGetColFieldsALL2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_ALL **fields); int stmtGetParamNum2(TAOS_STMT2 *stmt, int *nums); int stmtGetParamTbName(TAOS_STMT2 *stmt, int *nums); int stmtIsInsert2(TAOS_STMT2 *stmt, int *insert); diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 54da1c013c..f2fc2b8bdc 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -2102,14 +2102,14 @@ int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, } } -int taos_stmt2_get_all_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_E **fields) { +int taos_stmt2_get_all_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_ALL **fields) { if (stmt == NULL || count == NULL) { tscError("NULL parameter for %s", __FUNCTION__); terrno = TSDB_CODE_INVALID_PARA; return terrno; } - return stmtGetColFields2(stmt, count, fields); + return stmtGetColFieldsALL2(stmt, count, fields); } void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields) { @@ -2118,6 +2118,12 @@ void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields) { taosMemoryFree(fields); } +DLL_EXPORT void taos_stmt2_free_all_fields(TAOS_STMT2 *stmt, TAOS_FIELD_ALL *fields) { + (void)stmt; + if (!fields) return; + taosMemoryFree(fields); +} + TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt) { if (stmt == NULL) { tscError("NULL parameter for %s", __FUNCTION__); diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index 5d04006f06..67de878ee5 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -1069,7 +1069,7 @@ static int stmtFetchColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E return TSDB_CODE_SUCCESS; } -static int stmtFetchFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E** fields) { +static int stmtFetchFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_ALL** fields) { SBoundColInfo* tags = (SBoundColInfo*)pStmt->bInfo.boundTags; STableMeta* meta = ((SVnodeModifyOpStmt*)(pStmt->sql.pQuery->pRoot))->pTableMeta; if (tags == NULL || meta == NULL || (meta->schema == NULL && tags->numOfBound != 0)) { @@ -1077,7 +1077,7 @@ static int stmtFetchFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E** } if (fields != NULL) { - *fields = taosMemoryCalloc(tags->numOfBound, sizeof(TAOS_FIELD_E)); + *fields = taosMemoryCalloc(tags->numOfBound, sizeof(TAOS_FIELD_ALL)); if (NULL == *fields) { return terrno; } @@ -1886,6 +1886,47 @@ int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) { STMT_ERRI_JRET(stmtFetchColFields2(stmt, nums, fields)); +_return: + + pStmt->errCode = preCode; + + return code; +} + +int stmtGetColFieldsALL2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_ALL** fields) { + int32_t code = 0; + STscStmt2* pStmt = (STscStmt2*)stmt; + int32_t preCode = pStmt->errCode; + + STMT_DLOG_E("start to get col fields"); + + if (pStmt->errCode != TSDB_CODE_SUCCESS) { + return pStmt->errCode; + } + + if (STMT_TYPE_QUERY == pStmt->sql.type) { + STMT_ERRI_JRET(TSDB_CODE_TSC_STMT_API_ERROR); + } + + STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); + + if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 && + STMT_TYPE_MULTI_INSERT != pStmt->sql.type) { + pStmt->bInfo.needParse = false; + } + + if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) { + taos_free_result(pStmt->exec.pRequest); + pStmt->exec.pRequest = NULL; + STMT_ERR_RET(stmtCreateRequest(pStmt)); + } + + STMT_ERRI_JRET(stmtCreateRequest(pStmt)); + + if (pStmt->bInfo.needParse) { + STMT_ERRI_JRET(stmtParseSql(pStmt)); + } + _return: pStmt->errCode = preCode; diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index 8c17689ea7..1355d9e09f 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -885,7 +885,7 @@ _return: } int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSchema, int32_t* fieldNum, - TAOS_FIELD_E** fields, uint8_t timePrec, TAOS_FIELD_T fieldType) { + TAOS_FIELD_E** fields, uint8_t timePrec) { if (fields != NULL) { *fields = taosMemoryCalloc(numOfBound, sizeof(TAOS_FIELD_E)); if (NULL == *fields) { @@ -900,7 +900,6 @@ int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSc for (int32_t i = 0; i < numOfBound; ++i) { schema = &pSchema[boundColumns[i]]; strcpy((*fields)[i].name, schema->name); - (*fields)[i].field_type = schema->type; (*fields)[i].type = schema->type; (*fields)[i].bytes = schema->bytes; } @@ -930,7 +929,7 @@ int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TA return TSDB_CODE_SUCCESS; } - CHECK_CODE(buildBoundFields(tags->numOfBound, tags->pColIndex, pSchema, fieldNum, fields, 0, TAOS_FIELD_TAG)); + CHECK_CODE(buildBoundFields(tags->numOfBound, tags->pColIndex, pSchema, fieldNum, fields, 0)); return TSDB_CODE_SUCCESS; } @@ -948,7 +947,7 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fiel } CHECK_CODE(buildBoundFields(pDataBlock->boundColsInfo.numOfBound, pDataBlock->boundColsInfo.pColIndex, pSchema, - fieldNum, fields, pDataBlock->pMeta->tableInfo.precision, TAOS_FIELD_COL)); + fieldNum, fields, pDataBlock->pMeta->tableInfo.precision)); return TSDB_CODE_SUCCESS; } diff --git a/tests/script/api/stmt2-get-fields.c b/tests/script/api/stmt2-get-fields.c index 60bee16873..23b91b56c9 100644 --- a/tests/script/api/stmt2-get-fields.c +++ b/tests/script/api/stmt2-get-fields.c @@ -25,8 +25,8 @@ void do_stmt(TAOS *taos) { "int, t2 binary(10))"); TAOS_STMT2_OPTION option = {0}; - TAOS_STMT2 *stmt = taos_stmt2_init(taos, &option); - const char *sql = "insert into db.stb(t1,t2,ts,b,tbname) values(?,?,?,?,?)"; + TAOS_STMT2 *stmt = taos_stmt2_init(taos, &option); + const char *sql = "insert into db.stb(t1,t2,ts,b,tbname) values(?,?,?,?,?)"; int code = taos_stmt2_prepare(stmt, sql, 0); if (code != 0) { @@ -35,19 +35,18 @@ void do_stmt(TAOS *taos) { return; } - int fieldNum = 0; - TAOS_FIELD_E *pFields = NULL; + int fieldNum = 0; + TAOS_FIELD_ALL *pFields = NULL; code = taos_stmt2_get_all_fields(stmt, &fieldNum, &pFields); if (code != 0) { printf("failed get col,ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_stmt2_error(stmt)); } else { printf("col nums:%d\n", fieldNum); - for(int i = 0; i < fieldNum; i++) { - printf("field[%d]: %s,type:%d\n", i, pFields[i].name,pFields[i].field_type); + for (int i = 0; i < fieldNum; i++) { + printf("field[%d]: %s,type:%d\n", i, pFields[i].name, pFields[i].field_type); } } - taos_stmt2_close(stmt); } From 7d05ccbfa03ea43055e507cc01fe4831e9566b7c Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Tue, 29 Oct 2024 11:13:50 +0800 Subject: [PATCH 138/142] fix:[TS-5567] fix bug when partition/group by const value's alias name. --- source/libs/parser/src/parTranslater.c | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 7b1896d6a9..1faa010663 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1607,26 +1607,6 @@ static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** p } } if (*pFound) { - if (QUERY_NODE_FUNCTION == nodeType(pFoundNode) && - (SQL_CLAUSE_GROUP_BY == pCxt->currClause || SQL_CLAUSE_PARTITION_BY == pCxt->currClause)) { - pCxt->errCode = getFuncInfo(pCxt, (SFunctionNode*)pFoundNode); - if (TSDB_CODE_SUCCESS == pCxt->errCode) { - if (fmIsVectorFunc(((SFunctionNode*)pFoundNode)->funcId)) { - pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION, (*pCol)->colName); - return DEAL_RES_ERROR; - } else if (fmIsPseudoColumnFunc(((SFunctionNode*)pFoundNode)->funcId)) { - if ('\0' != (*pCol)->tableAlias[0]) { - return translateColumnWithPrefix(pCxt, pCol); - } else { - return translateColumnWithoutPrefix(pCxt, pCol); - } - } else { - /* Do nothing and replace old node with found node. */ - } - } else { - return DEAL_RES_ERROR; - } - } SNode* pNew = NULL; int32_t code = nodesCloneNode(pFoundNode, &pNew); if (NULL == pNew) { @@ -5496,8 +5476,7 @@ static EDealRes translateGroupPartitionByImpl(SNode** pNode, void* pContext) { int32_t code = TSDB_CODE_SUCCESS; STranslateContext* pTransCxt = pCxt->pTranslateCxt; if (QUERY_NODE_VALUE == nodeType(*pNode)) { - STranslateContext* pTransCxt = pCxt->pTranslateCxt; - SValueNode* pVal = (SValueNode*)*pNode; + SValueNode* pVal = (SValueNode*) *pNode; if (DEAL_RES_ERROR == translateValue(pTransCxt, pVal)) { return DEAL_RES_CONTINUE; } @@ -5506,7 +5485,7 @@ static EDealRes translateGroupPartitionByImpl(SNode** pNode, void* pContext) { } int32_t pos = getPositionValue(pVal); if (0 < pos && pos <= LIST_LENGTH(pProjectionList)) { - SNode* pNew = NULL; + SNode* pNew = NULL; code = nodesCloneNode(nodesListGetNode(pProjectionList, pos - 1), (SNode**)&pNew); if (TSDB_CODE_SUCCESS != code) { pCxt->pTranslateCxt->errCode = code; From ba9c796c9dbb2144ce84fb3eb15c66d7182e44b5 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 30 Oct 2024 11:33:00 +0000 Subject: [PATCH 139/142] fix/TD-32703-add-wal-log --- source/libs/wal/src/walMeta.c | 6 ++++-- tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index d9666eb02f..3c396f0599 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -1058,6 +1058,8 @@ int32_t walSaveMeta(SWal* pWal) { TAOS_CHECK_GOTO(TAOS_SYSTEM_ERROR(errno), &lino, _err); } + wInfo("vgId:%d, save meta file: %s, firstVer:%" PRId64 ", lastVer:%" PRId64, pWal->cfg.vgId, tmpFnameStr, + pWal->vers.firstVer, pWal->vers.lastVer); // rename it n = walBuildMetaName(pWal, metaVer + 1, fnameStr); @@ -1155,8 +1157,8 @@ int32_t walLoadMeta(SWal* pWal) { (void)taosCloseFile(&pFile); taosMemoryFree(buf); - wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%d", pWal->cfg.vgId, fnameStr, - (int32_t)taosArrayGetSize(pWal->fileInfoSet)); + wInfo("vgId:%d, load meta file: %s, firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileInfoSet size:%d", pWal->cfg.vgId, + fnameStr, pWal->vers.firstVer, pWal->vers.lastVer, (int32_t)taosArrayGetSize(pWal->fileInfoSet)); printFileSet(pWal->fileInfoSet); TAOS_RETURN(code); diff --git a/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py b/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py index 938dcfcc9e..e0632686d4 100644 --- a/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py +++ b/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py @@ -18,7 +18,7 @@ from tmqCommon import * class TDTestCase: - updatecfgDict = {'sDebugFlag':143} + updatecfgDict = {'sDebugFlag':143, 'wDebugFlag':143} def __init__(self): self.vgroups = 1 self.ctbNum = 10 From af03868864db5aec6a2cf831f3fd90159cf91d02 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 30 Oct 2024 19:42:09 +0800 Subject: [PATCH 140/142] fix unit test --- source/dnode/mnode/impl/test/stb/stb.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/test/stb/stb.cpp b/source/dnode/mnode/impl/test/stb/stb.cpp index aa12c107a1..e92231907f 100644 --- a/source/dnode/mnode/impl/test/stb/stb.cpp +++ b/source/dnode/mnode/impl/test/stb/stb.cpp @@ -782,7 +782,7 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) { { void* pReq = BuildAlterStbDropColumnReq(stbname, "col1", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); - ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_ALTER_OPTION); + ASSERT_EQ(pRsp->code, TSDB_CODE_PAR_INVALID_DROP_COL); rpcFreeCont(pRsp->pCont); } From d5f6f9aacb0035a18ea33a44ac047493f8521549 Mon Sep 17 00:00:00 2001 From: yingzhao Date: Wed, 30 Oct 2024 19:53:00 +0800 Subject: [PATCH 141/142] fix(dosc): br compile eror --- docs/zh/06-advanced/05-data-in/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/05-data-in/index.md b/docs/zh/06-advanced/05-data-in/index.md index 50c66cc377..0dfa04db56 100644 --- a/docs/zh/06-advanced/05-data-in/index.md +++ b/docs/zh/06-advanced/05-data-in/index.md @@ -22,7 +22,7 @@ TDengine Enterprise 配备了一个强大的可视化数据管理工具—taosEx | Aveva Historian | AVEVA Historian 2020 RS SP1 | 工业大数据分析软件,前身为 Wonderware Historian,专为工业环境设计,用于存储、管理和分析来自各种工业设备、传感器的实时和历史数据 | | OPC DA | Matrikon OPC version: 1.7.2.7433 | OPC 是 Open Platform Communications 的缩写,是一种开放式、标准化的通信协议,用于不同厂商的自动化设备之间进行数据交换。它最初由微软公司开发,旨在解决工业控制领域中不同设备之间互操作性差的问题;OPC 协议最初于 1996 年发布,当时称为 OPC DA (Data Access),主要用于实时数据采集和控制。 | | OPC UA | KeepWare KEPServerEx 6.5 | 2006 年,OPC 基金会发布了 OPC UA (Unified Architecture) 标准,它是一种基于服务的面向对象的协议,具有更高的灵活性和可扩展性,已成为 OPC 协议的主流版本 | -| MQTT | emqx: 3.0.0 到 5.7.1
    hivemq: 4.0.0 到 4.31.0
    mosquitto: 1.4.4 到 2.0.18 | Message Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。 | +| MQTT | emqx: 3.0.0 到 5.7.1
    hivemq: 4.0.0 到 4.31.0
    mosquitto: 1.4.4 到 2.0.18 | Message Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。 | | Kafka | 2.11 ~ 3.8.0 | 由 Apache 软件基金会开发的一个开源流处理平台,主要用于处理实时数据,并提供一个统一、高通量、低延迟的消息系统。它具备高速度、可伸缩性、持久性和分布式设计等特点,使得它能够在每秒处理数十万次的读写操作,支持上千个客户端,同时保持数据的可靠性和可用性。 | | InfluxDB | 1.7、1.8、2.0-2.7 | InfluxDB 是一种流行的开源时间序列数据库,它针对处理大量时间序列数据进行了优化。| | OpenTSDB | 2.4.1 | 基于 HBase 的分布式、可伸缩的时序数据库。它主要用于存储、索引和提供从大规模集群(包括网络设备、操作系统、应用程序等)中收集的指标数据,使这些数据更易于访问和图形化展示。 | From e260eb7a07d6e6176b9052ed22a27d8f067aa305 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 31 Oct 2024 13:35:42 +0800 Subject: [PATCH 142/142] Revert "enh(stmt2):add api taos_stmt2_get_all_fields" --- include/client/taos.h | 47 ++++++-------- source/client/inc/clientStmt2.h | 1 - source/client/src/clientMain.c | 25 ++------ source/client/src/clientStmt2.c | 86 -------------------------- source/libs/parser/src/parInsertSql.c | 82 ++++++++++-------------- source/libs/parser/src/parInsertStmt.c | 8 +-- tests/script/api/makefile | 2 - tests/script/api/stmt2-get-fields.c | 63 ------------------- 8 files changed, 58 insertions(+), 256 deletions(-) delete mode 100644 tests/script/api/stmt2-get-fields.c diff --git a/include/client/taos.h b/include/client/taos.h index 266e0e192d..80dbe27c47 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -81,13 +81,6 @@ typedef enum { TSDB_SML_TIMESTAMP_NANO_SECONDS, } TSDB_SML_TIMESTAMP_TYPE; -typedef enum TAOS_FIELD_T { - TAOS_FIELD_COL = 1, - TAOS_FIELD_TAG, - TAOS_FIELD_QUERY, - TAOS_FIELD_TBNAME, -} TAOS_FIELD_T; - typedef struct taosField { char name[65]; int8_t type; @@ -102,15 +95,6 @@ typedef struct TAOS_FIELD_E { int32_t bytes; } TAOS_FIELD_E; -typedef struct TAOS_FIELD_ALL { - char name[65]; - int8_t type; - uint8_t precision; - uint8_t scale; - int32_t bytes; - TAOS_FIELD_T field_type; -} TAOS_FIELD_ALL; - #ifdef WINDOWS #define DLL_EXPORT __declspec(dllexport) #else @@ -211,6 +195,13 @@ DLL_EXPORT int taos_stmt_affected_rows_once(TAOS_STMT *stmt); typedef void TAOS_STMT2; +typedef enum TAOS_FIELD_T { + TAOS_FIELD_COL = 1, + TAOS_FIELD_TAG, + TAOS_FIELD_QUERY, + TAOS_FIELD_TBNAME, +} TAOS_FIELD_T; + typedef struct TAOS_STMT2_OPTION { int64_t reqid; bool singleStbInsert; @@ -241,9 +232,7 @@ DLL_EXPORT int taos_stmt2_exec(TAOS_STMT2 *stmt, int *affected_rows); DLL_EXPORT int taos_stmt2_close(TAOS_STMT2 *stmt); DLL_EXPORT int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert); DLL_EXPORT int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields); -DLL_EXPORT int taos_stmt2_get_all_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_ALL **fields); DLL_EXPORT void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields); -DLL_EXPORT void taos_stmt2_free_all_fields(TAOS_STMT2 *stmt, TAOS_FIELD_ALL *fields); DLL_EXPORT TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt); DLL_EXPORT char *taos_stmt2_error(TAOS_STMT2 *stmt); @@ -262,17 +251,17 @@ DLL_EXPORT int64_t taos_affected_rows64(TAOS_RES *res); DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res); DLL_EXPORT int taos_select_db(TAOS *taos, const char *db); DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); -DLL_EXPORT int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); -DLL_EXPORT void taos_stop_query(TAOS_RES *res); -DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col); -DLL_EXPORT int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows); -DLL_EXPORT bool taos_is_update_query(TAOS_RES *res); -DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); -DLL_EXPORT int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows); -DLL_EXPORT int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData); -DLL_EXPORT int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex); -DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); -DLL_EXPORT void taos_reset_current_db(TAOS *taos); +DLL_EXPORT int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); +DLL_EXPORT void taos_stop_query(TAOS_RES *res); +DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col); +DLL_EXPORT int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows); +DLL_EXPORT bool taos_is_update_query(TAOS_RES *res); +DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); +DLL_EXPORT int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows); +DLL_EXPORT int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData); +DLL_EXPORT int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex); +DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); +DLL_EXPORT void taos_reset_current_db(TAOS *taos); DLL_EXPORT int *taos_fetch_lengths(TAOS_RES *res); DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res); diff --git a/source/client/inc/clientStmt2.h b/source/client/inc/clientStmt2.h index e54057e72f..4e9a09c082 100644 --- a/source/client/inc/clientStmt2.h +++ b/source/client/inc/clientStmt2.h @@ -222,7 +222,6 @@ int stmtSetTbTags2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *tags); int stmtBindBatch2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *bind, int32_t colIdx); int stmtGetTagFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields); int stmtGetColFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields); -int stmtGetColFieldsALL2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_ALL **fields); int stmtGetParamNum2(TAOS_STMT2 *stmt, int *nums); int stmtGetParamTbName(TAOS_STMT2 *stmt, int *nums); int stmtIsInsert2(TAOS_STMT2 *stmt, int *insert); diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index f2fc2b8bdc..64631fd754 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -84,7 +84,7 @@ void taos_cleanup(void) { taosCloseRef(id); nodesDestroyAllocatorSet(); - // cleanupAppInfo(); +// cleanupAppInfo(); rpcCleanup(); tscDebug("rpc cleanup"); @@ -388,6 +388,7 @@ void taos_free_result(TAOS_RES *res) { tDeleteMqBatchMetaRsp(&pRsp->batchMetaRsp); } taosMemoryFree(pRsp); + } void taos_kill_query(TAOS *taos) { @@ -483,7 +484,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { return taos_print_row_with_size(str, INT32_MAX, row, fields, num_fields); } -int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { +int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields){ int32_t len = 0; for (int i = 0; i < num_fields; ++i) { if (i > 0 && len < size - 1) { @@ -588,7 +589,7 @@ int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD break; } } - if (len < size) { + if (len < size){ str[len] = 0; } @@ -2081,7 +2082,7 @@ int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert) { } int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields) { - if (stmt == NULL || count == NULL) { + if (stmt == NULL || NULL == count) { tscError("NULL parameter for %s", __FUNCTION__); terrno = TSDB_CODE_INVALID_PARA; return terrno; @@ -2102,28 +2103,12 @@ int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, } } -int taos_stmt2_get_all_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_ALL **fields) { - if (stmt == NULL || count == NULL) { - tscError("NULL parameter for %s", __FUNCTION__); - terrno = TSDB_CODE_INVALID_PARA; - return terrno; - } - - return stmtGetColFieldsALL2(stmt, count, fields); -} - void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields) { (void)stmt; if (!fields) return; taosMemoryFree(fields); } -DLL_EXPORT void taos_stmt2_free_all_fields(TAOS_STMT2 *stmt, TAOS_FIELD_ALL *fields) { - (void)stmt; - if (!fields) return; - taosMemoryFree(fields); -} - TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt) { if (stmt == NULL) { tscError("NULL parameter for %s", __FUNCTION__); diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index 67de878ee5..2f046b61d6 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -1068,48 +1068,6 @@ static int stmtFetchColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E return TSDB_CODE_SUCCESS; } - -static int stmtFetchFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_ALL** fields) { - SBoundColInfo* tags = (SBoundColInfo*)pStmt->bInfo.boundTags; - STableMeta* meta = ((SVnodeModifyOpStmt*)(pStmt->sql.pQuery->pRoot))->pTableMeta; - if (tags == NULL || meta == NULL || (meta->schema == NULL && tags->numOfBound != 0)) { - return TSDB_CODE_INVALID_PARA; - } - - if (fields != NULL) { - *fields = taosMemoryCalloc(tags->numOfBound, sizeof(TAOS_FIELD_ALL)); - if (NULL == *fields) { - return terrno; - } - - SSchema* pSchema = meta->schema; - int32_t tbnameIdx = meta->tableInfo.numOfTags + meta->tableInfo.numOfColumns; - for (int32_t i = 0; i < tags->numOfBound; ++i) { - int16_t idx = tags->pColIndex[i]; - if (idx == tbnameIdx) { - (*fields)[i].field_type = TAOS_FIELD_TBNAME; - tstrncpy((*fields)[i].name, "tbname", sizeof((*fields)[i].name)); - continue; - } else if (idx < meta->tableInfo.numOfColumns) { - (*fields)[i].field_type = TAOS_FIELD_COL; - } else { - (*fields)[i].field_type = TAOS_FIELD_TAG; - } - - if (TSDB_DATA_TYPE_TIMESTAMP == pSchema[tags->pColIndex[i]].type) { - (*fields)[i].precision = meta->tableInfo.precision; - } - - tstrncpy((*fields)[i].name, pSchema[tags->pColIndex[i]].name, sizeof((*fields)[i].name)); - (*fields)[i].type = pSchema[tags->pColIndex[i]].type; - (*fields)[i].bytes = pSchema[tags->pColIndex[i]].bytes; - } - } - - *fieldNum = tags->numOfBound; - - return TSDB_CODE_SUCCESS; -} /* SArray* stmtGetFreeCol(STscStmt2* pStmt, int32_t* idx) { while (true) { @@ -1893,50 +1851,6 @@ _return: return code; } -int stmtGetColFieldsALL2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_ALL** fields) { - int32_t code = 0; - STscStmt2* pStmt = (STscStmt2*)stmt; - int32_t preCode = pStmt->errCode; - - STMT_DLOG_E("start to get col fields"); - - if (pStmt->errCode != TSDB_CODE_SUCCESS) { - return pStmt->errCode; - } - - if (STMT_TYPE_QUERY == pStmt->sql.type) { - STMT_ERRI_JRET(TSDB_CODE_TSC_STMT_API_ERROR); - } - - STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); - - if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 && - STMT_TYPE_MULTI_INSERT != pStmt->sql.type) { - pStmt->bInfo.needParse = false; - } - - if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) { - taos_free_result(pStmt->exec.pRequest); - pStmt->exec.pRequest = NULL; - STMT_ERR_RET(stmtCreateRequest(pStmt)); - } - - STMT_ERRI_JRET(stmtCreateRequest(pStmt)); - - if (pStmt->bInfo.needParse) { - STMT_ERRI_JRET(stmtParseSql(pStmt)); - } - -_return: - - pStmt->errCode = preCode; - if (code == TSDB_CODE_TSC_INVALID_OPERATION) { - return stmtFetchFields2(stmt, nums, fields); - } - - return code; -} - int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) { STscStmt2* pStmt = (STscStmt2*)stmt; diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index f4ef099515..1c26a7c70e 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -30,7 +30,7 @@ typedef struct SInsertParseContext { bool forceUpdate; bool needTableTagVal; bool needRequest; // whether or not request server - bool isStmtBind; // whether is stmt bind + bool isStmtBind; // whether is stmt bind } SInsertParseContext; typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param); @@ -757,7 +757,7 @@ int32_t parseTagValue(SMsgBuf* pMsgBuf, const char** pSql, uint8_t precision, SS STagVal val = {0}; int32_t code = parseTagToken(pSql, pToken, pTagSchema, precision, &val, pMsgBuf); if (TSDB_CODE_SUCCESS == code) { - if (NULL == taosArrayPush(pTagVals, &val)) { + if (NULL == taosArrayPush(pTagVals, &val)){ code = terrno; } } @@ -775,14 +775,11 @@ static int32_t buildCreateTbReq(SVnodeModifyOpStmt* pStmt, STag* pTag, SArray* p return terrno; } return insBuildCreateTbReq(pStmt->pCreateTblReq, pStmt->targetTableName.tname, pTag, pStmt->pTableMeta->suid, - pStmt->usingTableName.tname, pTagName, pStmt->pTableMeta->tableInfo.numOfTags, - TSDB_DEFAULT_TABLE_TTL); + pStmt->usingTableName.tname, pTagName, pStmt->pTableMeta->tableInfo.numOfTags, + TSDB_DEFAULT_TABLE_TTL); } int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, SMsgBuf* pMsgBuf, int8_t type) { - if (pToken->type == TK_NK_QUESTION) { - return buildInvalidOperationMsg(pMsgBuf, "insert into super table syntax is not supported for stmt"); - } if ((pToken->type != TK_NOW && pToken->type != TK_TODAY && pToken->type != TK_NK_INTEGER && pToken->type != TK_NK_STRING && pToken->type != TK_NK_FLOAT && pToken->type != TK_NK_BOOL && pToken->type != TK_NULL && pToken->type != TK_NK_HEX && pToken->type != TK_NK_OCT && pToken->type != TK_NK_BIN && @@ -813,7 +810,7 @@ typedef struct SRewriteTagCondCxt { static int32_t rewriteTagCondColumnImpl(STagVal* pVal, SNode** pNode) { SValueNode* pValue = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue); if (NULL == pValue) { return code; } @@ -1044,7 +1041,7 @@ static int32_t storeChildTableMeta(SInsertParseContext* pCxt, SVnodeModifyOpStmt return TSDB_CODE_OUT_OF_MEMORY; } - char tbFName[TSDB_TABLE_FNAME_LEN]; + char tbFName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(&pStmt->targetTableName, tbFName); if (TSDB_CODE_SUCCESS != code) { taosMemoryFree(pBackup); @@ -1239,7 +1236,7 @@ static int32_t getTargetTableMetaAndVgroup(SInsertParseContext* pCxt, SVnodeModi } static int32_t collectUseTable(const SName* pName, SHashObj* pTable) { - char fullName[TSDB_TABLE_FNAME_LEN]; + char fullName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pName, fullName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1385,7 +1382,7 @@ static int32_t getTableDataCxt(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS pStmt->pTableMeta, &pStmt->pCreateTblReq, pTableCxt, false, false); } - char tbFName[TSDB_TABLE_FNAME_LEN]; + char tbFName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(&pStmt->targetTableName, tbFName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1926,8 +1923,8 @@ static int32_t processCtbAutoCreationAndCtbMeta(SInsertParseContext* pCxt, SVnod } if (code == TSDB_CODE_SUCCESS) { code = insBuildCreateTbReq(pStbRowsCxt->pCreateCtbReq, pStbRowsCxt->ctbName.tname, pStbRowsCxt->pTag, - pStbRowsCxt->pStbMeta->uid, pStbRowsCxt->stbName.tname, pStbRowsCxt->aTagNames, - getNumOfTags(pStbRowsCxt->pStbMeta), TSDB_DEFAULT_TABLE_TTL); + pStbRowsCxt->pStbMeta->uid, pStbRowsCxt->stbName.tname, pStbRowsCxt->aTagNames, + getNumOfTags(pStbRowsCxt->pStbMeta), TSDB_DEFAULT_TABLE_TTL); pStbRowsCxt->pTag = NULL; } @@ -1936,9 +1933,9 @@ static int32_t processCtbAutoCreationAndCtbMeta(SInsertParseContext* pCxt, SVnod code = tNameExtractFullName(&pStbRowsCxt->ctbName, ctbFName); SVgroupInfo vg; SRequestConnInfo conn = {.pTrans = pCxt->pComCxt->pTransporter, - .requestId = pCxt->pComCxt->requestId, - .requestObjRefId = pCxt->pComCxt->requestRid, - .mgmtEps = pCxt->pComCxt->mgmtEpSet}; + .requestId = pCxt->pComCxt->requestId, + .requestObjRefId = pCxt->pComCxt->requestRid, + .mgmtEps = pCxt->pComCxt->mgmtEpSet}; if (TSDB_CODE_SUCCESS == code) { code = catalogGetTableHashVgroup(pCxt->pComCxt->pCatalog, &conn, &pStbRowsCxt->ctbName, &vg); } @@ -2179,8 +2176,8 @@ static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt if (code == TSDB_CODE_SUCCESS) { SStbRowsDataContext* pStbRowsCxt = rowsDataCxt.pStbRowsCxt; void* pData = pTableDataCxt; - code = taosHashPut(pStmt->pTableCxtHashObj, &pStbRowsCxt->pCtbMeta->uid, sizeof(pStbRowsCxt->pCtbMeta->uid), - &pData, POINTER_BYTES); + code = taosHashPut(pStmt->pTableCxtHashObj, &pStbRowsCxt->pCtbMeta->uid, sizeof(pStbRowsCxt->pCtbMeta->uid), &pData, + POINTER_BYTES); if (TSDB_CODE_SUCCESS != code) { break; } @@ -2252,7 +2249,7 @@ static int32_t parseDataFromFileImpl(SInsertParseContext* pCxt, SVnodeModifyOpSt if (!pStmt->stbSyntax && numOfRows > 0) { void* pData = rowsDataCxt.pTableDataCxt; code = taosHashPut(pStmt->pTableCxtHashObj, &pStmt->pTableMeta->uid, sizeof(pStmt->pTableMeta->uid), &pData, - POINTER_BYTES); + POINTER_BYTES); } return code; @@ -2366,7 +2363,8 @@ static int32_t constructStbRowsDataContext(SVnodeModifyOpStmt* pStmt, SStbRowsDa if (TSDB_CODE_SUCCESS == code) { // col values and bound cols info of STableDataContext is not used pStbRowsCxt->aColVals = taosArrayInit(getNumOfColumns(pStbRowsCxt->pStbMeta), sizeof(SColVal)); - if (!pStbRowsCxt->aColVals) code = terrno; + if (!pStbRowsCxt->aColVals) + code = terrno; } if (TSDB_CODE_SUCCESS == code) { code = insInitColValues(pStbRowsCxt->pStbMeta, pStbRowsCxt->aColVals); @@ -2424,6 +2422,9 @@ static int32_t parseInsertStbClauseBottom(SInsertParseContext* pCxt, SVnodeModif // 1. [(tag1_name, ...)] ... // 2. VALUES ... | FILE ... static int32_t parseInsertTableClauseBottom(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) { + if (pStmt->stbSyntax && TSDB_QUERY_HAS_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { + return buildInvalidOperationMsg(&pCxt->msg, "insert into super table syntax is not supported for stmt"); + } if (!pStmt->stbSyntax) { STableDataCxt* pTableCxt = NULL; int32_t code = parseSchemaClauseBottom(pCxt, pStmt, &pTableCxt); @@ -2510,9 +2511,9 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif } // db.? situation,ensure that the only thing following the '.' mark is '?' - char* tbNameAfterDbName = strnchr(pTbName->z, '.', pTbName->n, true); + char *tbNameAfterDbName = strnchr(pTbName->z, '.', pTbName->n, true); if ((tbNameAfterDbName != NULL) && (*(tbNameAfterDbName + 1) == '?')) { - char* tbName = NULL; + char *tbName = NULL; if (NULL == pCxt->pComCxt->pStmtCb) { return buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pTbName->z); } @@ -2527,8 +2528,7 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif if (pCxt->isStmtBind) { if (TK_NK_ID == pTbName->type || (tbNameAfterDbName != NULL && *(tbNameAfterDbName + 1) != '?')) { // In SQL statements, the table name has already been specified. - parserWarn("0x%" PRIx64 " table name is specified in sql, ignore the table name in bind param", - pCxt->pComCxt->requestId); + parserWarn("0x%" PRIx64 " table name is specified in sql, ignore the table name in bind param", pCxt->pComCxt->requestId); } } @@ -2614,7 +2614,7 @@ static void destroySubTableHashElem(void* p) { taosMemoryFree(*(STableMeta**)p); static int32_t createVnodeModifOpStmt(SInsertParseContext* pCxt, bool reentry, SNode** pOutput) { SVnodeModifyOpStmt* pStmt = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pStmt); + int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pStmt); if (NULL == pStmt) { return code; } @@ -2729,7 +2729,7 @@ static int32_t buildTagNameFromMeta(STableMeta* pMeta, SArray** pTagName) { return terrno; } SSchema* pSchema = getTableTagSchema(pMeta); - int32_t code = 0; + int32_t code = 0; for (int32_t i = 0; i < pMeta->tableInfo.numOfTags; ++i) { if (NULL == taosArrayPush(*pTagName, pSchema[i].name)) { code = terrno; @@ -2834,7 +2834,7 @@ static int32_t resetVnodeModifOpStmt(SInsertParseContext* pCxt, SQuery* pQuery) } if (NULL == pStmt->pTableBlockHashObj) { pStmt->pTableBlockHashObj = - taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); } if (NULL == pStmt->pVgroupsHashObj || NULL == pStmt->pTableBlockHashObj) { code = TSDB_CODE_OUT_OF_MEMORY; @@ -2866,7 +2866,7 @@ static int32_t initInsertQuery(SInsertParseContext* pCxt, SCatalogReq* pCatalogR static int32_t setRefreshMeta(SQuery* pQuery) { SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)pQuery->pRoot; - int32_t code = 0; + int32_t code = 0; if (taosHashGetSize(pStmt->pTableNameHashObj) > 0) { taosArrayDestroy(pQuery->pTableList); @@ -3065,10 +3065,9 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal .forceUpdate = (NULL != pCatalogReq ? pCatalogReq->forceUpdate : false), .isStmtBind = pCxt->isStmtBind}; - int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery); - SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)((*pQuery)->pRoot); + int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery); if (TSDB_CODE_SUCCESS == code) { - code = parseInsertSqlImpl(&context, pStmt); + code = parseInsertSqlImpl(&context, (SVnodeModifyOpStmt*)(*pQuery)->pRoot); } if (TSDB_CODE_SUCCESS == code) { code = setNextStageInfo(&context, *pQuery, pCatalogReq); @@ -3077,27 +3076,8 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal QUERY_EXEC_STAGE_SCHEDULE == (*pQuery)->execStage) { code = setRefreshMeta(*pQuery); } - - if (pStmt->stbSyntax && TSDB_QUERY_HAS_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT) && - code == TSDB_CODE_TSC_INVALID_OPERATION) { - context.tags.numOfBound = pStmt->pStbRowsCxt->boundColsInfo.numOfBound; - context.tags.numOfCols = pStmt->pStbRowsCxt->boundColsInfo.numOfCols; - context.tags.hasBoundCols = pStmt->pStbRowsCxt->boundColsInfo.hasBoundCols; - context.tags.pColIndex = taosMemoryMalloc(sizeof(int16_t) * context.tags.numOfBound); - if (NULL == context.tags.pColIndex) { - return terrno; - } - - (void)memcpy(context.tags.pColIndex, pStmt->pStbRowsCxt->boundColsInfo.pColIndex, - sizeof(int16_t) * pStmt->pStbRowsCxt->boundColsInfo.numOfBound); - code = setStmtInfo(&context, pStmt); - if (TSDB_CODE_SUCCESS == code) { - insDestroyBoundColInfo(&context.tags); - return TSDB_CODE_TSC_INVALID_OPERATION; - } - } - insDestroyBoundColInfo(&context.tags); + // if no data to insert, set emptyMode to avoid request server if (!context.needRequest) { (*pQuery)->execMode = QUERY_EXEC_MODE_EMPTY_RESULT; diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index 1355d9e09f..ee61611bf2 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -242,7 +242,7 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch } code = insBuildCreateTbReq(pDataBlock->pData->pCreateTbReq, tName, pTag, suid, sTableName, tagName, - pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); + pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); pTag = NULL; end: @@ -594,7 +594,7 @@ int32_t qBindStmtTagsValue2(void* pBlock, void* boundTags, int64_t suid, const c } code = insBuildCreateTbReq(pDataBlock->pData->pCreateTbReq, tName, pTag, suid, sTableName, tagName, - pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); + pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); pTag = NULL; end: @@ -886,7 +886,7 @@ _return: int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSchema, int32_t* fieldNum, TAOS_FIELD_E** fields, uint8_t timePrec) { - if (fields != NULL) { + if (fields) { *fields = taosMemoryCalloc(numOfBound, sizeof(TAOS_FIELD_E)); if (NULL == *fields) { return terrno; @@ -939,7 +939,7 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fiel SSchema* pSchema = getTableColumnSchema(pDataBlock->pMeta); if (pDataBlock->boundColsInfo.numOfBound <= 0) { *fieldNum = 0; - if (fields != NULL) { + if (fields) { *fields = NULL; } diff --git a/tests/script/api/makefile b/tests/script/api/makefile index 9c2bb6be3d..d8a4e19218 100644 --- a/tests/script/api/makefile +++ b/tests/script/api/makefile @@ -25,7 +25,6 @@ exe: gcc $(CFLAGS) ./stmt.c -o $(ROOT)stmt $(LFLAGS) gcc $(CFLAGS) ./stmt2.c -o $(ROOT)stmt2 $(LFLAGS) gcc $(CFLAGS) ./stmt2-example.c -o $(ROOT)stmt2-example $(LFLAGS) - gcc $(CFLAGS) ./stmt2-get-fields.c -o $(ROOT)stmt2-get-fields $(LFLAGS) gcc $(CFLAGS) ./stmt2-nohole.c -o $(ROOT)stmt2-nohole $(LFLAGS) gcc $(CFLAGS) ./stmt-crash.c -o $(ROOT)stmt-crash $(LFLAGS) @@ -43,6 +42,5 @@ clean: rm $(ROOT)stmt rm $(ROOT)stmt2 rm $(ROOT)stmt2-example - rm $(ROOT)stmt2-get-fields rm $(ROOT)stmt2-nohole rm $(ROOT)stmt-crash diff --git a/tests/script/api/stmt2-get-fields.c b/tests/script/api/stmt2-get-fields.c deleted file mode 100644 index 23b91b56c9..0000000000 --- a/tests/script/api/stmt2-get-fields.c +++ /dev/null @@ -1,63 +0,0 @@ -// TAOS standard API example. The same syntax as MySQL, but only a subet -// to compile: gcc -o stmt2-get-fields stmt2-get-fields.c -ltaos - -#include -#include -#include -#include "taos.h" - -void do_query(TAOS *taos, const char *sql) { - TAOS_RES *result = taos_query(taos, sql); - int code = taos_errno(result); - if (code) { - printf("failed to query: %s, reason:%s\n", sql, taos_errstr(result)); - taos_free_result(result); - return; - } - taos_free_result(result); -} - -void do_stmt(TAOS *taos) { - do_query(taos, "drop database if exists db"); - do_query(taos, "create database db"); - do_query(taos, - "create table db.stb (ts timestamp, b binary(10)) tags(t1 " - "int, t2 binary(10))"); - - TAOS_STMT2_OPTION option = {0}; - TAOS_STMT2 *stmt = taos_stmt2_init(taos, &option); - const char *sql = "insert into db.stb(t1,t2,ts,b,tbname) values(?,?,?,?,?)"; - - int code = taos_stmt2_prepare(stmt, sql, 0); - if (code != 0) { - printf("failed to execute taos_stmt2_prepare. error:%s\n", taos_stmt2_error(stmt)); - taos_stmt2_close(stmt); - return; - } - - int fieldNum = 0; - TAOS_FIELD_ALL *pFields = NULL; - code = taos_stmt2_get_all_fields(stmt, &fieldNum, &pFields); - if (code != 0) { - printf("failed get col,ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_stmt2_error(stmt)); - } else { - printf("col nums:%d\n", fieldNum); - for (int i = 0; i < fieldNum; i++) { - printf("field[%d]: %s,type:%d\n", i, pFields[i].name, pFields[i].field_type); - } - } - - taos_stmt2_close(stmt); -} - -int main() { - TAOS *taos = taos_connect("localhost", "root", "taosdata", "", 0); - if (!taos) { - printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); - exit(1); - } - - do_stmt(taos); - taos_close(taos); - taos_cleanup(); -} \ No newline at end of file