Merge branch '3.0' into enh/top_bot_split
This commit is contained in:
commit
ca22215718
|
@ -38,6 +38,16 @@ export TDENGINE_API=http://tdengine.local:6041
|
|||
# user + password
|
||||
export TDENGINE_USER=user
|
||||
export TDENGINE_PASSWORD=password
|
||||
|
||||
# 其他环境变量:
|
||||
# - 是否安装数据源,默认为 true,表示安装
|
||||
export TDENGINE_DS_ENABLED=false
|
||||
# - 数据源名称,默认为 TDengine
|
||||
export TDENGINE_DS_NAME=TDengine
|
||||
# - 数据源所属组织 ID,默认为 1
|
||||
export GF_ORG_ID=1
|
||||
# - 数据源是否可通过管理面板编辑,默认为 0,表示不可编辑
|
||||
export TDENGINE_EDITABLE=1
|
||||
```
|
||||
|
||||
运行安装脚本:
|
||||
|
@ -48,6 +58,8 @@ bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/m
|
|||
|
||||
该脚本将自动安装 Grafana 插件并配置数据源。安装完毕后,需要重启 Grafana 服务后生效。
|
||||
|
||||
保存该脚本并执行 `./install.sh --help` 可查看详细帮助文档。
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="manual" label="手动安装并配置">
|
||||
|
||||
|
|
|
@ -40,6 +40,16 @@ export TDENGINE_API=http://tdengine.local:6041
|
|||
# user + password
|
||||
export TDENGINE_USER=user
|
||||
export TDENGINE_PASSWORD=password
|
||||
|
||||
# Other useful variables
|
||||
# - If to install TDengine data source, default is true
|
||||
export TDENGINE_DS_ENABLED=false
|
||||
# - Data source name to be created, default is TDengine
|
||||
export TDENGINE_DS_NAME=TDengine
|
||||
# - Data source organization id, default is 1
|
||||
export GF_ORG_ID=1
|
||||
# - Data source is editable in admin ui or not, default is 0 (false)
|
||||
export TDENGINE_EDITABLE=1
|
||||
```
|
||||
|
||||
Run `install.sh`:
|
||||
|
@ -48,7 +58,7 @@ Run `install.sh`:
|
|||
bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)"
|
||||
```
|
||||
|
||||
With this script, TDengine data source plugin and the Grafana data source will be installed and created automatically with Grafana provisioning configurations.
|
||||
With this script, TDengine data source plugin and the Grafana data source will be installed and created automatically with Grafana provisioning configurations. Save the script and type `./install.sh --help` for the full usage of the script.
|
||||
|
||||
And then, restart Grafana service and open Grafana in web-browser, usually <http://localhost:3000>.
|
||||
|
||||
|
|
|
@ -37,14 +37,6 @@ enum {
|
|||
TMQ_MSG_TYPE__EP_RSP,
|
||||
};
|
||||
|
||||
enum {
|
||||
STREAM_TRIGGER__AT_ONCE = 1,
|
||||
STREAM_TRIGGER__WINDOW_CLOSE,
|
||||
STREAM_TRIGGER__BY_COUNT,
|
||||
STREAM_TRIGGER__BY_BATCH_COUNT,
|
||||
STREAM_TRIGGER__BY_EVENT_TIME,
|
||||
};
|
||||
|
||||
typedef enum EStreamType {
|
||||
STREAM_NORMAL = 1,
|
||||
STREAM_INVERT,
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_UTIL_LRUCACHE_H_
|
||||
#define _TD_UTIL_LRUCACHE_H_
|
||||
|
||||
#include "thash.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct SLRUCache SLRUCache;
|
||||
|
||||
typedef void (*_taos_lru_deleter_t)(const void *key, size_t keyLen, void *value);
|
||||
|
||||
typedef struct LRUHandle LRUHandle;
|
||||
|
||||
typedef enum {
|
||||
TAOS_LRU_PRIORITY_HIGH,
|
||||
TAOS_LRU_PRIORITY_LOW
|
||||
} LRUPriority;
|
||||
|
||||
typedef enum {
|
||||
TAOS_LRU_STATUS_OK,
|
||||
TAOS_LRU_STATUS_FAIL,
|
||||
TAOS_LRU_STATUS_INCOMPLETE,
|
||||
TAOS_LRU_STATUS_OK_OVERWRITTEN
|
||||
} LRUStatus;
|
||||
|
||||
SLRUCache *taosLRUCacheInit(size_t capacity, int numShardBits, double highPriPoolRatio);
|
||||
void taosLRUCacheCleanup(SLRUCache *cache);
|
||||
|
||||
LRUStatus taosLRUCacheInsert(SLRUCache *cache, const void *key, size_t keyLen, void *value, size_t charge,
|
||||
_taos_lru_deleter_t deleter, LRUHandle **handle, LRUPriority priority);
|
||||
LRUHandle *taosLRUCacheLookup(SLRUCache * cache, const void *key, size_t keyLen);
|
||||
void taosLRUCacheErase(SLRUCache * cache, const void *key, size_t keyLen);
|
||||
|
||||
void taosLRUCacheEraseUnrefEntries(SLRUCache *cache);
|
||||
|
||||
bool taosLRUCacheRef(SLRUCache *cache, LRUHandle *handle);
|
||||
bool taosLRUCacheRelease(SLRUCache *cache, LRUHandle *handle, bool eraseIfLastRef);
|
||||
|
||||
void* taosLRUCacheValue(SLRUCache *cache, LRUHandle *handle);
|
||||
|
||||
size_t taosLRUCacheGetUsage(SLRUCache *cache);
|
||||
size_t taosLRUCacheGetPinnedUsage(SLRUCache *cache);
|
||||
|
||||
void taosLRUCacheSetCapacity(SLRUCache *cache, size_t capacity);
|
||||
size_t taosLRUCacheGetCapacity(SLRUCache *cache);
|
||||
|
||||
void taosLRUCacheSetStrictCapacity(SLRUCache *cache, bool strict);
|
||||
bool taosLRUCacheIsStrictCapacity(SLRUCache *cache);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // _TD_UTIL_LRUCACHE_H_
|
|
@ -4,11 +4,16 @@
|
|||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
|
||||
len=$(echo ${#OS})
|
||||
len=$((len-2))
|
||||
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
|
||||
echo -ne $retval
|
||||
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
echo -e "Darwin"
|
||||
else
|
||||
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
|
||||
len=$(echo ${#OS})
|
||||
len=$((len-2))
|
||||
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
|
||||
echo -ne $retval
|
||||
fi
|
||||
|
|
|
@ -167,7 +167,7 @@ void *createTscObj(const char *user, const char *auth, const char *db, int32_t c
|
|||
|
||||
taosThreadMutexInit(&pObj->mutex, NULL);
|
||||
pObj->id = taosAddRef(clientConnRefPool, pObj);
|
||||
pObj->schemalessType = 0;
|
||||
pObj->schemalessType = 1;
|
||||
|
||||
tscDebug("connObj created, 0x%" PRIx64, pObj->id);
|
||||
return pObj;
|
||||
|
|
|
@ -306,19 +306,10 @@ static int32_t smlApplySchemaAction(SSmlHandle* info, SSchemaAction* action) {
|
|||
const char* errStr = taos_errstr(res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%"PRIx64" apply schema action. error: %s", info->id, errStr);
|
||||
taosMsleep(100);
|
||||
}
|
||||
taos_free_result(res);
|
||||
|
||||
// if (code == TSDB_CODE_MND_FIELD_ALREADY_EXIST || code == TSDB_CODE_MND_TAG_ALREADY_EXIST || tscDupColNames) {
|
||||
if (code == TSDB_CODE_MND_TAG_ALREADY_EXIST) {
|
||||
TAOS_RES* res2 = taos_query(info->taos, "RESET QUERY CACHE");
|
||||
code = taos_errno(res2);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
|
||||
}
|
||||
taos_free_result(res2);
|
||||
taosMsleep(500);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case SCHEMA_ACTION_ADD_TAG: {
|
||||
|
@ -330,19 +321,10 @@ static int32_t smlApplySchemaAction(SSmlHandle* info, SSchemaAction* action) {
|
|||
const char* errStr = taos_errstr(res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
|
||||
taosMsleep(100);
|
||||
}
|
||||
taos_free_result(res);
|
||||
|
||||
// if (code ==TSDB_CODE_MND_TAG_ALREADY_EXIST || code == TSDB_CODE_MND_FIELD_ALREAY_EXIST || tscDupColNames) {
|
||||
if (code ==TSDB_CODE_MND_TAG_ALREADY_EXIST) {
|
||||
TAOS_RES* res2 = taos_query(info->taos, "RESET QUERY CACHE");
|
||||
code = taos_errno(res2);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
|
||||
}
|
||||
taos_free_result(res2);
|
||||
taosMsleep(500);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case SCHEMA_ACTION_CHANGE_COLUMN_SIZE: {
|
||||
|
@ -353,19 +335,10 @@ static int32_t smlApplySchemaAction(SSmlHandle* info, SSchemaAction* action) {
|
|||
code = taos_errno(res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
|
||||
taosMsleep(100);
|
||||
}
|
||||
taos_free_result(res);
|
||||
|
||||
// if (code == TSDB_CODE_MND_INVALID_COLUMN_LENGTH || code == TSDB_CODE_TSC_INVALID_COLUMN_LENGTH) {
|
||||
if (code == TSDB_CODE_TSC_INVALID_COLUMN_LENGTH) {
|
||||
TAOS_RES* res2 = taos_query(info->taos, "RESET QUERY CACHE");
|
||||
code = taos_errno(res2);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
|
||||
}
|
||||
taos_free_result(res2);
|
||||
taosMsleep(500);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case SCHEMA_ACTION_CHANGE_TAG_SIZE: {
|
||||
|
@ -376,19 +349,10 @@ static int32_t smlApplySchemaAction(SSmlHandle* info, SSchemaAction* action) {
|
|||
code = taos_errno(res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
|
||||
taosMsleep(100);
|
||||
}
|
||||
taos_free_result(res);
|
||||
|
||||
// if (code == TSDB_CODE_MND_INVALID_TAG_LENGTH || code == TSDB_CODE_TSC_INVALID_TAG_LENGTH) {
|
||||
if (code == TSDB_CODE_TSC_INVALID_TAG_LENGTH) {
|
||||
TAOS_RES* res2 = taos_query(info->taos, "RESET QUERY CACHE");
|
||||
code = taos_errno(res2);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
|
||||
}
|
||||
taos_free_result(res2);
|
||||
taosMsleep(500);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case SCHEMA_ACTION_CREATE_STABLE: {
|
||||
|
@ -428,18 +392,10 @@ static int32_t smlApplySchemaAction(SSmlHandle* info, SSchemaAction* action) {
|
|||
code = taos_errno(res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
|
||||
taosMsleep(100);
|
||||
}
|
||||
taos_free_result(res);
|
||||
|
||||
if (code == TSDB_CODE_MND_STB_ALREADY_EXIST) {
|
||||
TAOS_RES* res2 = taos_query(info->taos, "RESET QUERY CACHE");
|
||||
code = taos_errno(res2);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
|
||||
}
|
||||
taos_free_result(res2);
|
||||
taosMsleep(500);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -473,6 +429,21 @@ static int32_t smlProcessSchemaAction(SSmlHandle* info, SSchema* schemaField, SH
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t smlCheckMeta(SSchema* schema, int32_t length, SArray* cols){
|
||||
SHashObj *hashTmp = taosHashInit(length, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
for(uint16_t i = 0; i < length; i++){
|
||||
taosHashPut(hashTmp, schema[i].name, strlen(schema[i].name), &i, SHORT_BYTES);
|
||||
}
|
||||
|
||||
for(int32_t i = 0; i < taosArrayGetSize(cols); i++){
|
||||
SSmlKv* kv = (SSmlKv*)taosArrayGetP(cols, i);
|
||||
if(taosHashGet(hashTmp, kv->key, kv->keyLen) == NULL){
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t smlModifyDBSchemas(SSmlHandle* info) {
|
||||
int32_t code = 0;
|
||||
SEpSet ep = getEpSet_s(&info->taos->pAppInfo->mgmtEp);
|
||||
|
@ -483,6 +454,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle* info) {
|
|||
while (tableMetaSml) {
|
||||
SSmlSTableMeta* sTableData = *tableMetaSml;
|
||||
STableMeta *pTableMeta = NULL;
|
||||
bool needCheckMeta = false; // for multi thread
|
||||
|
||||
size_t superTableLen = 0;
|
||||
void *superTable = taosHashGetKey(tableMetaSml, &superTableLen);
|
||||
|
@ -533,6 +505,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle* info) {
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto end;
|
||||
}
|
||||
needCheckMeta = true;
|
||||
} else {
|
||||
uError("SML:0x%"PRIx64" load table meta error: %s", info->id, tstrerror(code));
|
||||
goto end;
|
||||
|
@ -544,6 +517,20 @@ static int32_t smlModifyDBSchemas(SSmlHandle* info) {
|
|||
uError("SML:0x%"PRIx64" catalogGetSTableMeta failed. super table name %s", info->id, (char*)superTable);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if(needCheckMeta){
|
||||
code = smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags, sTableData->tags);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%"PRIx64" check tag failed. super table name %s", info->id, (char*)superTable);
|
||||
goto end;
|
||||
}
|
||||
code = smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%"PRIx64" check cols failed. super table name %s", info->id, (char*)superTable);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
sTableData->tableMeta = pTableMeta;
|
||||
|
||||
tableMetaSml = (SSmlSTableMeta**)taosHashIterate(info->superTables, tableMetaSml);
|
||||
|
@ -2368,6 +2355,7 @@ static void smlInsertCallback(void* param, void* res, int32_t code) {
|
|||
SRequestObj *pRequest = (SRequestObj *)res;
|
||||
SSmlHandle* info = (SSmlHandle *)param;
|
||||
|
||||
uDebug("SML:0x%"PRIx64" result. code:%d, msg:%s", info->id, pRequest->code, pRequest->msgBuf);
|
||||
// lock
|
||||
if(code != TSDB_CODE_SUCCESS){
|
||||
taosThreadSpinLock(&info->params->lock);
|
||||
|
@ -2496,8 +2484,9 @@ TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int pr
|
|||
end:
|
||||
taosThreadSpinDestroy(¶ms.lock);
|
||||
tsem_destroy(¶ms.sem);
|
||||
((STscObj *)taos)->schemalessType = 0;
|
||||
uDebug("result:%s", request->msgBuf);
|
||||
// ((STscObj *)taos)->schemalessType = 0;
|
||||
((STscObj *)taos)->schemalessType = 1;
|
||||
uDebug("resultend:%s", request->msgBuf);
|
||||
return (TAOS_RES*)request;
|
||||
}
|
||||
|
||||
|
|
|
@ -408,7 +408,7 @@ int32_t tmqCommitInner(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int8_
|
|||
pParam->userParam = userParam;
|
||||
if (!async) tsem_init(&pParam->rspSem, 0, 0);
|
||||
|
||||
sendInfo = taosMemoryMalloc(sizeof(SMsgSendInfo));
|
||||
sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
if (sendInfo == NULL) goto END;
|
||||
sendInfo->msgInfo = (SDataBuf){
|
||||
.pData = buf,
|
||||
|
@ -704,7 +704,7 @@ tmq_resp_err_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
|
|||
void* abuf = buf;
|
||||
tSerializeSCMSubscribeReq(&abuf, &req);
|
||||
|
||||
SMsgSendInfo* sendInfo = taosMemoryMalloc(sizeof(SMsgSendInfo));
|
||||
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
if (sendInfo == NULL) goto FAIL;
|
||||
|
||||
SMqSubscribeCbParam param = {
|
||||
|
@ -1008,7 +1008,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
|
|||
pParam->async = async;
|
||||
tsem_init(&pParam->rspSem, 0, 0);
|
||||
|
||||
SMsgSendInfo* sendInfo = taosMemoryMalloc(sizeof(SMsgSendInfo));
|
||||
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
if (sendInfo == NULL) {
|
||||
tsem_destroy(&pParam->rspSem);
|
||||
taosMemoryFree(pParam);
|
||||
|
@ -1162,7 +1162,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
|
|||
pParam->vgId = pVg->vgId;
|
||||
pParam->epoch = tmq->epoch;
|
||||
|
||||
SMsgSendInfo* sendInfo = taosMemoryMalloc(sizeof(SMsgSendInfo));
|
||||
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
if (sendInfo == NULL) {
|
||||
taosMemoryFree(pReq);
|
||||
taosMemoryFree(pParam);
|
||||
|
|
|
@ -41,7 +41,7 @@ TARGET_INCLUDE_DIRECTORIES(
|
|||
PRIVATE "${TD_SOURCE_DIR}/source/client/inc"
|
||||
)
|
||||
|
||||
#add_test(
|
||||
# NAME smlTest
|
||||
# COMMAND smlTest
|
||||
#)
|
||||
add_test(
|
||||
NAME smlTest
|
||||
COMMAND smlTest
|
||||
)
|
||||
|
|
|
@ -499,6 +499,7 @@ TEST(testCase, smlGetTimestampLen_Test) {
|
|||
ASSERT_EQ(len, 3);
|
||||
}
|
||||
|
||||
/*
|
||||
TEST(testCase, smlProcess_influx_Test) {
|
||||
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(taos, nullptr);
|
||||
|
@ -1259,4 +1260,4 @@ TEST(testCase, sml_16368_Test) {
|
|||
pRes = taos_schemaless_insert(taos, (char**)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_MICRO_SECONDS);
|
||||
ASSERT_EQ(taos_errno(pRes), 0);
|
||||
taos_free_result(pRes);
|
||||
}
|
||||
}*/
|
||||
|
|
|
@ -91,7 +91,7 @@ static const SSysDbTableSchema userDBSchema[] = {
|
|||
{.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "single_stable_model", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
|
||||
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "schemaless", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
|
||||
// {.name = "schemaless", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
|
||||
{.name = "retension", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
|
||||
// {.name = "update", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, // disable update
|
||||
|
@ -175,7 +175,7 @@ static const SSysDbTableSchema userUsersSchema[] = {
|
|||
};
|
||||
|
||||
static const SSysDbTableSchema grantsSchema[] = {
|
||||
{.name = "version", .bytes = 8 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "expire time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "storage(GB)", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
|
|
|
@ -76,22 +76,22 @@ void deltaToUtcInitOnce() {
|
|||
|
||||
static int64_t parseFraction(char* str, char** end, int32_t timePrec);
|
||||
static int32_t parseTimeWithTz(const char* timestr, int64_t* time, int32_t timePrec, char delim);
|
||||
static int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec);
|
||||
static int32_t parseLocaltimeDst(char* timestr, int64_t* time, int32_t timePrec);
|
||||
static int32_t parseLocaltime(char* timestr, int32_t len, int64_t* utime, int32_t timePrec);
|
||||
static int32_t parseLocaltimeDst(char* timestr, int32_t len, int64_t* utime, int32_t timePrec);
|
||||
static char* forwardToTimeStringEnd(char* str);
|
||||
static bool checkTzPresent(const char* str, int32_t len);
|
||||
|
||||
static int32_t (*parseLocaltimeFp[])(char* timestr, int64_t* time, int32_t timePrec) = {parseLocaltime,
|
||||
static int32_t (*parseLocaltimeFp[])(char* timestr, int32_t len, int64_t* utime, int32_t timePrec) = {parseLocaltime,
|
||||
parseLocaltimeDst};
|
||||
|
||||
int32_t taosParseTime(const char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t day_light) {
|
||||
int32_t taosParseTime(const char* timestr, int64_t* utime, int32_t len, int32_t timePrec, int8_t day_light) {
|
||||
/* parse datatime string in with tz */
|
||||
if (strnchr(timestr, 'T', len, false) != NULL) {
|
||||
return parseTimeWithTz(timestr, time, timePrec, 'T');
|
||||
return parseTimeWithTz(timestr, utime, timePrec, 'T');
|
||||
} else if (checkTzPresent(timestr, len)) {
|
||||
return parseTimeWithTz(timestr, time, timePrec, 0);
|
||||
return parseTimeWithTz(timestr, utime, timePrec, 0);
|
||||
} else {
|
||||
return (*parseLocaltimeFp[day_light])((char*)timestr, time, timePrec);
|
||||
return (*parseLocaltimeFp[day_light])((char*)timestr, len, utime, timePrec);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -309,12 +309,36 @@ int32_t parseTimeWithTz(const char* timestr, int64_t* time, int32_t timePrec, ch
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) {
|
||||
static FORCE_INLINE bool validateTm(struct tm* pTm) {
|
||||
if (pTm == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t dayOfMonth[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
|
||||
|
||||
int32_t leapYearMonthDay = 29;
|
||||
int32_t year = pTm->tm_year + 1900;
|
||||
bool isLeapYear = ((year % 100) == 0)? ((year % 400) == 0):((year % 4) == 0);
|
||||
|
||||
if (isLeapYear && (pTm->tm_mon == 1)) {
|
||||
if (pTm->tm_mday > leapYearMonthDay) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (pTm->tm_mday > dayOfMonth[pTm->tm_mon]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t parseLocaltime(char* timestr, int32_t len, int64_t* time, int32_t timePrec) {
|
||||
*time = 0;
|
||||
struct tm tm = {0};
|
||||
|
||||
char* str = taosStrpTime(timestr, "%Y-%m-%d %H:%M:%S", &tm);
|
||||
if (str == NULL) {
|
||||
if (str == NULL || (((str - timestr) < len) && (*str != '.')) || !validateTm(&tm)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -343,13 +367,13 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t parseLocaltimeDst(char* timestr, int64_t* time, int32_t timePrec) {
|
||||
int32_t parseLocaltimeDst(char* timestr, int32_t len, int64_t* time, int32_t timePrec) {
|
||||
*time = 0;
|
||||
struct tm tm = {0};
|
||||
tm.tm_isdst = -1;
|
||||
|
||||
char* str = taosStrpTime(timestr, "%Y-%m-%d %H:%M:%S", &tm);
|
||||
if (str == NULL) {
|
||||
if (str == NULL || (((str - timestr) < len) && (*str != '.')) || !validateTm(&tm)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
"C" {
|
||||
#endif
|
||||
|
||||
#include "mndInt.h"
|
||||
|
||||
typedef enum {
|
||||
TSDB_GRANT_ALL,
|
||||
TSDB_GRANT_TIME,
|
||||
|
|
|
@ -1503,8 +1503,8 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, rows, (const char *)statusB, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.schemaless, false);
|
||||
// pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
// colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.schemaless, false);
|
||||
|
||||
char *p = buildRetension(pDb->cfg.pRetensions);
|
||||
|
||||
|
|
|
@ -14,19 +14,115 @@
|
|||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "os.h"
|
||||
#include "taoserror.h"
|
||||
#include "mndGrant.h"
|
||||
#include "mndInt.h"
|
||||
#include "mndShow.h"
|
||||
|
||||
#ifndef _GRANT
|
||||
static int32_t mndRetrieveGrant(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock* pBlock, int32_t rows) { return TSDB_CODE_OPS_NOT_SUPPORT; }
|
||||
|
||||
static int32_t mndRetrieveGrant(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) {
|
||||
int32_t numOfRows = 0;
|
||||
char *pWrite;
|
||||
int32_t cols = 0;
|
||||
char tmp[32];
|
||||
char tmp1[32];
|
||||
|
||||
if (pShow->numOfRows < 1) {
|
||||
cols = 0;
|
||||
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
const char *src = "community";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
cols++;
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
src = "unlimited";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
cols++;
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
src = "false";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
cols++;
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
src = "unlimited";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
cols++;
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
src = "unlimited";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
cols++;
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
src = "unlimited";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
cols++;
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
src = "unlimited";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
cols++;
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
src = "unlimited";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
cols++;
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
src = "unlimited";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
cols++;
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
src = "unlimited";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
cols++;
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
src = "unlimited";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
cols++;
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
src = "unlimited";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
cols++;
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
src = "unlimited";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
cols++;
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
|
||||
src = "unlimited";
|
||||
STR_WITH_SIZE_TO_VARSTR(tmp, src, strlen(src));
|
||||
colDataAppend(pColInfo, numOfRows, tmp, false);
|
||||
|
||||
numOfRows++;
|
||||
}
|
||||
|
||||
pShow->numOfRows += numOfRows;
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
int32_t mndInitGrant(SMnode *pMnode) {
|
||||
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_GRANTS, mndRetrieveGrant);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mndCleanupGrant() {}
|
||||
void grantParseParameter() { mError("can't parsed parameter k"); }
|
||||
int32_t grantCheck(EGrantType grant) { return TSDB_CODE_SUCCESS; }
|
||||
|
|
|
@ -397,17 +397,17 @@ static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
if (sdbGetSize(pMnode->pSdb, SDB_MNODE) >= 3) {
|
||||
terrno = TSDB_CODE_MND_TOO_MANY_MNODES;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
pDnode = mndAcquireDnode(pMnode, createReq.dnodeId);
|
||||
if (pDnode == NULL) {
|
||||
terrno = TSDB_CODE_MND_DNODE_NOT_EXIST;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (sdbGetSize(pMnode->pSdb, SDB_MNODE) >= 3) {
|
||||
terrno = TSDB_CODE_MND_TOO_MANY_MNODES;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (!mndIsDnodeOnline(pDnode, taosGetTimestampMs())) {
|
||||
terrno = TSDB_CODE_NODE_OFFLINE;
|
||||
goto _OVER;
|
||||
|
@ -597,6 +597,11 @@ static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
if (!mndIsDnodeOnline(pObj->pDnode, taosGetTimestampMs())) {
|
||||
terrno = TSDB_CODE_NODE_OFFLINE;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
pUser = mndAcquireUser(pMnode, pReq->conn.user);
|
||||
if (pUser == NULL) {
|
||||
terrno = TSDB_CODE_MND_NO_USER_FROM_CONN;
|
||||
|
|
|
@ -353,7 +353,13 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
|
|||
|
||||
bool hasExtraSink = false;
|
||||
bool externalTargetDB = strcmp(pStream->sourceDb, pStream->targetDb) != 0;
|
||||
if (totLevel == 2 || externalTargetDB) {
|
||||
SDbObj* pDbObj = mndAcquireDb(pMnode, pStream->targetDb);
|
||||
ASSERT(pDbObj != NULL);
|
||||
sdbRelease(pSdb, pDbObj);
|
||||
|
||||
bool multiTarget = pDbObj->cfg.numOfVgroups > 1;
|
||||
|
||||
if (totLevel == 2 || externalTargetDB || multiTarget) {
|
||||
SArray* taskOneLevel = taosArrayInit(0, sizeof(void*));
|
||||
taosArrayPush(pStream->tasks, &taskOneLevel);
|
||||
// add extra sink
|
||||
|
|
|
@ -240,7 +240,7 @@ static int32_t sdbReadFileImp(SSdb *pSdb) {
|
|||
if (pFile == NULL) {
|
||||
taosMemoryFree(pRaw);
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
mError("failed to read sdb file:%s since %s", file, terrstr());
|
||||
mDebug("failed to read sdb file:%s since %s", file, terrstr());
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ void metaReaderClear(SMetaReader *pReader) {
|
|||
}
|
||||
|
||||
int metaGetTableEntryByVersion(SMetaReader *pReader, int64_t version, tb_uid_t uid) {
|
||||
SMeta *pMeta = pReader->pMeta;
|
||||
SMeta * pMeta = pReader->pMeta;
|
||||
STbDbKey tbDbKey = {.version = version, .uid = uid};
|
||||
|
||||
// query table.db
|
||||
|
@ -54,7 +54,7 @@ _err:
|
|||
}
|
||||
|
||||
int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) {
|
||||
SMeta *pMeta = pReader->pMeta;
|
||||
SMeta * pMeta = pReader->pMeta;
|
||||
int64_t version;
|
||||
|
||||
// query uid.idx
|
||||
|
@ -68,7 +68,7 @@ int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) {
|
|||
}
|
||||
|
||||
int metaGetTableEntryByName(SMetaReader *pReader, const char *name) {
|
||||
SMeta *pMeta = pReader->pMeta;
|
||||
SMeta * pMeta = pReader->pMeta;
|
||||
tb_uid_t uid;
|
||||
|
||||
// query name.idx
|
||||
|
@ -82,7 +82,7 @@ int metaGetTableEntryByName(SMetaReader *pReader, const char *name) {
|
|||
}
|
||||
|
||||
tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) {
|
||||
void *pData = NULL;
|
||||
void * pData = NULL;
|
||||
int nData = 0;
|
||||
tb_uid_t uid = 0;
|
||||
|
||||
|
@ -138,7 +138,7 @@ void metaCloseTbCursor(SMTbCursor *pTbCur) {
|
|||
|
||||
int metaTbCursorNext(SMTbCursor *pTbCur) {
|
||||
int ret;
|
||||
void *pBuf;
|
||||
void * pBuf;
|
||||
STbCfg tbCfg;
|
||||
|
||||
for (;;) {
|
||||
|
@ -159,7 +159,7 @@ int metaTbCursorNext(SMTbCursor *pTbCur) {
|
|||
}
|
||||
|
||||
SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, bool isinline) {
|
||||
void *pData = NULL;
|
||||
void * pData = NULL;
|
||||
int nData = 0;
|
||||
int64_t version;
|
||||
SSchemaWrapper schema = {0};
|
||||
|
@ -219,11 +219,11 @@ _err:
|
|||
}
|
||||
|
||||
struct SMCtbCursor {
|
||||
SMeta *pMeta;
|
||||
TBC *pCur;
|
||||
SMeta * pMeta;
|
||||
TBC * pCur;
|
||||
tb_uid_t suid;
|
||||
void *pKey;
|
||||
void *pVal;
|
||||
void * pKey;
|
||||
void * pVal;
|
||||
int kLen;
|
||||
int vLen;
|
||||
};
|
||||
|
@ -294,10 +294,10 @@ tb_uid_t metaCtbCursorNext(SMCtbCursor *pCtbCur) {
|
|||
|
||||
STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver) {
|
||||
// SMetaReader mr = {0};
|
||||
STSchema *pTSchema = NULL;
|
||||
STSchema * pTSchema = NULL;
|
||||
SSchemaWrapper *pSW = NULL;
|
||||
STSchemaBuilder sb = {0};
|
||||
SSchema *pSchema;
|
||||
SSchema * pSchema;
|
||||
|
||||
pSW = metaGetTableSchema(pMeta, uid, sver, 0);
|
||||
if (!pSW) return NULL;
|
||||
|
@ -323,11 +323,11 @@ int metaGetTbNum(SMeta *pMeta) {
|
|||
}
|
||||
|
||||
typedef struct {
|
||||
SMeta *pMeta;
|
||||
TBC *pCur;
|
||||
SMeta * pMeta;
|
||||
TBC * pCur;
|
||||
tb_uid_t uid;
|
||||
void *pKey;
|
||||
void *pVal;
|
||||
void * pKey;
|
||||
void * pVal;
|
||||
int kLen;
|
||||
int vLen;
|
||||
} SMSmaCursor;
|
||||
|
@ -399,7 +399,7 @@ tb_uid_t metaSmaCursorNext(SMSmaCursor *pSmaCur) {
|
|||
|
||||
STSmaWrapper *metaGetSmaInfoByTable(SMeta *pMeta, tb_uid_t uid, bool deepCopy) {
|
||||
STSmaWrapper *pSW = NULL;
|
||||
SArray *pSmaIds = NULL;
|
||||
SArray * pSmaIds = NULL;
|
||||
|
||||
if (!(pSmaIds = metaGetSmaIdsByTable(pMeta, uid))) {
|
||||
return NULL;
|
||||
|
@ -423,7 +423,7 @@ STSmaWrapper *metaGetSmaInfoByTable(SMeta *pMeta, tb_uid_t uid, bool deepCopy) {
|
|||
metaReaderInit(&mr, pMeta, 0);
|
||||
int64_t smaId;
|
||||
int smaIdx = 0;
|
||||
STSma *pTSma = NULL;
|
||||
STSma * pTSma = NULL;
|
||||
for (int i = 0; i < pSW->number; ++i) {
|
||||
smaId = *(tb_uid_t *)taosArrayGet(pSmaIds, i);
|
||||
if (metaGetTableEntryByUid(&mr, smaId) < 0) {
|
||||
|
@ -471,7 +471,7 @@ _err:
|
|||
}
|
||||
|
||||
STSma *metaGetSmaInfoByIndex(SMeta *pMeta, int64_t indexUid) {
|
||||
STSma *pTSma = NULL;
|
||||
STSma * pTSma = NULL;
|
||||
SMetaReader mr = {0};
|
||||
metaReaderInit(&mr, pMeta, 0);
|
||||
if (metaGetTableEntryByUid(&mr, indexUid) < 0) {
|
||||
|
@ -493,7 +493,7 @@ STSma *metaGetSmaInfoByIndex(SMeta *pMeta, int64_t indexUid) {
|
|||
}
|
||||
|
||||
SArray *metaGetSmaIdsByTable(SMeta *pMeta, tb_uid_t uid) {
|
||||
SArray *pUids = NULL;
|
||||
SArray * pUids = NULL;
|
||||
SSmaIdxKey *pSmaIdxKey = NULL;
|
||||
|
||||
SMSmaCursor *pCur = metaOpenSmaCursor(pMeta, uid);
|
||||
|
@ -531,7 +531,7 @@ SArray *metaGetSmaIdsByTable(SMeta *pMeta, tb_uid_t uid) {
|
|||
}
|
||||
|
||||
SArray *metaGetSmaTbUids(SMeta *pMeta) {
|
||||
SArray *pUids = NULL;
|
||||
SArray * pUids = NULL;
|
||||
SSmaIdxKey *pSmaIdxKey = NULL;
|
||||
tb_uid_t lastUid = 0;
|
||||
|
||||
|
@ -593,19 +593,21 @@ const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *val) {
|
|||
}
|
||||
|
||||
typedef struct {
|
||||
SMeta *pMeta;
|
||||
TBC *pCur;
|
||||
SMeta * pMeta;
|
||||
TBC * pCur;
|
||||
tb_uid_t suid;
|
||||
int16_t cid;
|
||||
int16_t type;
|
||||
void *pKey;
|
||||
void *pVal;
|
||||
void * pKey;
|
||||
void * pVal;
|
||||
int32_t kLen;
|
||||
int32_t vLen;
|
||||
} SIdxCursor;
|
||||
|
||||
int32_t metaFilteTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) {
|
||||
SIdxCursor *pCursor = NULL;
|
||||
char * buf = NULL;
|
||||
int32_t maxSize = 0;
|
||||
|
||||
int32_t ret = 0, valid = 0;
|
||||
pCursor = (SIdxCursor *)taosMemoryCalloc(1, sizeof(SIdxCursor));
|
||||
|
@ -623,17 +625,34 @@ int32_t metaFilteTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) {
|
|||
int32_t nKey = 0;
|
||||
|
||||
int32_t nTagData = 0;
|
||||
void *tagData = NULL;
|
||||
void * tagData = NULL;
|
||||
|
||||
if (param->val == NULL) {
|
||||
metaError("vgId:%d failed to filter NULL data", TD_VID(pMeta->pVnode));
|
||||
return -1;
|
||||
} else {
|
||||
if (IS_VAR_DATA_TYPE(param->type)) {
|
||||
tagData = varDataVal(param->val);
|
||||
nTagData = varDataLen(param->val);
|
||||
|
||||
if (param->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
maxSize = 4 * nTagData + 1;
|
||||
buf = taosMemoryCalloc(1, maxSize);
|
||||
if (false == taosMbsToUcs4(tagData, nTagData, (TdUcs4 *)buf, maxSize, &maxSize)) {
|
||||
goto END;
|
||||
}
|
||||
|
||||
tagData = buf;
|
||||
nTagData = maxSize;
|
||||
}
|
||||
} else {
|
||||
tagData = param->val;
|
||||
nTagData = tDataTypes[param->type].bytes;
|
||||
}
|
||||
}
|
||||
ret = metaCreateTagIdxKey(pCursor->suid, pCursor->cid, tagData, nTagData, pCursor->type,
|
||||
param->reverse ? INT64_MAX : INT64_MIN, &pKey, &nKey);
|
||||
|
||||
if (ret != 0) {
|
||||
goto END;
|
||||
}
|
||||
|
@ -642,7 +661,7 @@ int32_t metaFilteTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) {
|
|||
goto END;
|
||||
}
|
||||
|
||||
void *entryKey = NULL, *entryVal = NULL;
|
||||
void * entryKey = NULL, *entryVal = NULL;
|
||||
int32_t nEntryKey, nEntryVal;
|
||||
while (1) {
|
||||
valid = tdbTbcGet(pCursor->pCur, (const void **)&entryKey, &nEntryKey, (const void **)&entryVal, &nEntryVal);
|
||||
|
@ -676,6 +695,7 @@ int32_t metaFilteTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) {
|
|||
END:
|
||||
if (pCursor->pMeta) metaULock(pCursor->pMeta);
|
||||
if (pCursor->pCur) tdbTbcClose(pCursor->pCur);
|
||||
taosMemoryFree(buf);
|
||||
|
||||
taosMemoryFree(pCursor);
|
||||
|
||||
|
|
|
@ -136,6 +136,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
|||
pReq->subKey);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pHandle->consumerId != consumerId) {
|
||||
tqError("tmq poll: consumer handle mismatch for consumer %ld in vg %d, subkey %s, handle consumer id %ld",
|
||||
consumerId, pTq->pVnode->config.vgId, pReq->subKey, pHandle->consumerId);
|
||||
|
|
|
@ -145,10 +145,8 @@ typedef struct SElapsedInfo {
|
|||
typedef struct SHistoFuncBin {
|
||||
double lower;
|
||||
double upper;
|
||||
union {
|
||||
int64_t count;
|
||||
double percentage;
|
||||
};
|
||||
} SHistoFuncBin;
|
||||
|
||||
typedef struct SHistoFuncInfo {
|
||||
|
@ -3190,7 +3188,7 @@ int32_t spreadCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
|||
|
||||
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
|
||||
SSpreadInfo* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
|
||||
spreadTransferInfo(pDBuf, pSBuf);
|
||||
spreadTransferInfo(pSBuf, pDBuf);
|
||||
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -3361,7 +3359,7 @@ int32_t elapsedCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
|||
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
|
||||
SElapsedInfo* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
|
||||
|
||||
elapsedTransferInfo(pDBuf, pSBuf);
|
||||
elapsedTransferInfo(pSBuf, pDBuf);
|
||||
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -3668,7 +3666,7 @@ int32_t histogramCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
|||
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
|
||||
SHistoFuncInfo* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
|
||||
|
||||
histogramTransferInfo(pDBuf, pSBuf);
|
||||
histogramTransferInfo(pSBuf, pDBuf);
|
||||
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -3864,7 +3862,7 @@ int32_t hllCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
|||
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
|
||||
SHLLInfo* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
|
||||
|
||||
hllTransferInfo(pDBuf, pSBuf);
|
||||
hllTransferInfo(pSBuf, pDBuf);
|
||||
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -4272,6 +4270,8 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
|
|||
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
|
||||
|
||||
int32_t alreadySampled = pInfo->numSampled;
|
||||
|
||||
int32_t startOffset = pCtx->offset;
|
||||
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
|
||||
if (colDataIsNull_s(pInputCol, i)) {
|
||||
|
|
|
@ -141,7 +141,7 @@ static int32_t sifGetValueFromNode(SNode *node, char **value) {
|
|||
if (*pData == TSDB_DATA_TYPE_NULL) {
|
||||
dataLen = 0;
|
||||
} else if (*pData == TSDB_DATA_TYPE_NCHAR) {
|
||||
dataLen = varDataTLen(pData + CHAR_BYTES);
|
||||
dataLen = varDataTLen(pData);
|
||||
} else if (*pData == TSDB_DATA_TYPE_DOUBLE) {
|
||||
dataLen = LONG_BYTES;
|
||||
} else if (*pData == TSDB_DATA_TYPE_BOOL) {
|
||||
|
@ -180,15 +180,17 @@ static int32_t sifInitJsonParam(SNode *node, SIFParam *param, SIFCtx *ctx) {
|
|||
// memcpy(param->colName, l->colName, sizeof(l->colName));
|
||||
}
|
||||
static int32_t sifInitParam(SNode *node, SIFParam *param, SIFCtx *ctx) {
|
||||
param->status = SFLT_COARSE_INDEX;
|
||||
switch (nodeType(node)) {
|
||||
case QUERY_NODE_VALUE: {
|
||||
SValueNode *vn = (SValueNode *)node;
|
||||
if (vn->typeData == TSDB_DATA_TYPE_NULL && (vn->literal == NULL || strlen(vn->literal) == 0)) {
|
||||
param->status = SFLT_NOT_INDEX;
|
||||
return 0;
|
||||
}
|
||||
SIF_ERR_RET(sifGetValueFromNode(node, ¶m->condValue));
|
||||
param->colId = -1;
|
||||
param->colValType = (uint8_t)(vn->node.resType.type);
|
||||
if (vn->literal == NULL || strlen(vn->literal) == 0) {
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
}
|
||||
memcpy(param->colName, vn->literal, strlen(vn->literal));
|
||||
break;
|
||||
}
|
||||
|
@ -517,9 +519,17 @@ static int32_t sifExecOper(SOperatorNode *node, SIFCtx *ctx, SIFParam *output) {
|
|||
// ugly code, refactor later
|
||||
output->arg = ctx->arg;
|
||||
sif_func_t operFn = sifNullFunc;
|
||||
code = sifGetOperFn(node->opType, &operFn, &output->status);
|
||||
|
||||
if (!ctx->noExec) {
|
||||
code = operFn(¶ms[0], nParam > 1 ? ¶ms[1] : NULL, output);
|
||||
SIF_ERR_RET(sifGetOperFn(node->opType, &operFn, &output->status));
|
||||
SIF_ERR_RET(operFn(¶ms[0], nParam > 1 ? ¶ms[1] : NULL, output));
|
||||
} else {
|
||||
// ugly code, refactor later
|
||||
if (nParam > 1 && params[1].status == SFLT_NOT_INDEX) {
|
||||
output->status = SFLT_NOT_INDEX;
|
||||
return code;
|
||||
}
|
||||
SIF_ERR_RET(sifGetOperFn(node->opType, &operFn, &output->status));
|
||||
}
|
||||
|
||||
taosMemoryFree(params);
|
||||
|
@ -595,7 +605,7 @@ static EDealRes sifWalkLogic(SNode *pNode, void *context) {
|
|||
}
|
||||
static EDealRes sifWalkOper(SNode *pNode, void *context) {
|
||||
SOperatorNode *node = (SOperatorNode *)pNode;
|
||||
SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))};
|
||||
SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t)), .status = SFLT_COARSE_INDEX};
|
||||
|
||||
SIFCtx *ctx = context;
|
||||
ctx->code = sifExecOper(node, ctx, &output);
|
||||
|
|
|
@ -801,7 +801,8 @@ SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOpti
|
|||
((SDatabaseOptions*)pOptions)->pRetentions = pVal;
|
||||
break;
|
||||
case DB_OPTION_SCHEMALESS:
|
||||
((SDatabaseOptions*)pOptions)->schemaless = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
|
||||
// ((SDatabaseOptions*)pOptions)->schemaless = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
|
||||
((SDatabaseOptions*)pOptions)->schemaless = 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -1297,11 +1297,12 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) {
|
|||
}
|
||||
|
||||
static int32_t checkSchemalessDb(SInsertParseContext* pCxt, char* pDbName) {
|
||||
SDbCfgInfo pInfo = {0};
|
||||
char fullName[TSDB_TABLE_FNAME_LEN];
|
||||
snprintf(fullName, sizeof(fullName), "%d.%s", pCxt->pComCxt->acctId, pDbName);
|
||||
CHECK_CODE(getDBCfg(pCxt, fullName, &pInfo));
|
||||
return pInfo.schemaless ? TSDB_CODE_SML_INVALID_DB_CONF : TSDB_CODE_SUCCESS;
|
||||
// SDbCfgInfo pInfo = {0};
|
||||
// char fullName[TSDB_TABLE_FNAME_LEN];
|
||||
// snprintf(fullName, sizeof(fullName), "%d.%s", pCxt->pComCxt->acctId, pDbName);
|
||||
// CHECK_CODE(getDBCfg(pCxt, fullName, &pInfo));
|
||||
// return pInfo.schemaless ? TSDB_CODE_SML_INVALID_DB_CONF : TSDB_CODE_SUCCESS;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// tb_name
|
||||
|
@ -2119,9 +2120,11 @@ static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SS
|
|||
isOrdered = false;
|
||||
}
|
||||
if (index < 0) {
|
||||
uError("smlBoundColumnData. index:%d", index);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
if (pColList->cols[index].valStat == VAL_STAT_HAS) {
|
||||
uError("smlBoundColumnData. already set. index:%d", index);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
lastColIdx = index;
|
||||
|
|
|
@ -2762,15 +2762,16 @@ static int32_t checkTableSchema(STranslateContext* pCxt, SCreateTableStmt* pStmt
|
|||
}
|
||||
|
||||
static int32_t checkSchemalessDb(STranslateContext* pCxt, const char* pDbName) {
|
||||
if (0 != pCxt->pParseCxt->schemalessType) {
|
||||
// if (0 != pCxt->pParseCxt->schemalessType) {
|
||||
// return TSDB_CODE_SUCCESS;
|
||||
// }
|
||||
// SDbCfgInfo info = {0};
|
||||
// int32_t code = getDBCfg(pCxt, pDbName, &info);
|
||||
// if (TSDB_CODE_SUCCESS == code) {
|
||||
// code = info.schemaless ? TSDB_CODE_SML_INVALID_DB_CONF : TSDB_CODE_SUCCESS;
|
||||
// }
|
||||
// return code;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SDbCfgInfo info = {0};
|
||||
int32_t code = getDBCfg(pCxt, pDbName, &info);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = info.schemaless ? TSDB_CODE_SML_INVALID_DB_CONF : TSDB_CODE_SUCCESS;
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) {
|
||||
|
@ -5048,6 +5049,9 @@ static int32_t buildModifyVnodeArray(STranslateContext* pCxt, SAlterTableStmt* p
|
|||
static int32_t rewriteAlterTable(STranslateContext* pCxt, SQuery* pQuery) {
|
||||
SAlterTableStmt* pStmt = (SAlterTableStmt*)pQuery->pRoot;
|
||||
int32_t code = checkSchemalessDb(pCxt, pStmt->dbName);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
STableMeta* pTableMeta = NULL;
|
||||
code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
|
|
|
@ -110,9 +110,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDisp
|
|||
return 0;
|
||||
}
|
||||
// continue dispatch
|
||||
if (pTask->dispatchType != TASK_DISPATCH__NONE) {
|
||||
streamDispatch(pTask, pMsgCb);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -182,6 +182,7 @@ FAIL:
|
|||
}
|
||||
|
||||
int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
||||
ASSERT(pTask->dispatchType != TASK_DISPATCH__NONE);
|
||||
#if 1
|
||||
int8_t old =
|
||||
atomic_val_compare_exchange_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL, TASK_OUTPUT_STATUS__WAIT);
|
||||
|
|
|
@ -713,7 +713,7 @@ static int32_t syncNodeMakeLogSame(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
|||
// delete confict entries
|
||||
code = ths->pLogStore->syncLogTruncate(ths->pLogStore, delBegin);
|
||||
ASSERT(code == 0);
|
||||
sInfo("sync event vgId:%d log truncate, from %ld to %ld", ths->vgId, delBegin, delEnd);
|
||||
sDebug("vgId:%d sync event log truncate, from %ld to %ld", ths->vgId, delBegin, delEnd);
|
||||
logStoreSimpleLog2("after syncNodeMakeLogSame", ths->pLogStore);
|
||||
|
||||
return code;
|
||||
|
@ -994,7 +994,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
|
|||
SyncIndex commitEnd = snapshot.lastApplyIndex;
|
||||
ths->commitIndex = snapshot.lastApplyIndex;
|
||||
|
||||
sInfo("sync event vgId:%d commit by snapshot from index:%ld to index:%ld, %s", ths->vgId, commitBegin,
|
||||
sDebug("vgId:%d sync event commit by snapshot from index:%ld to index:%ld, %s", ths->vgId, commitBegin,
|
||||
commitEnd, syncUtilState2String(ths->state));
|
||||
}
|
||||
|
||||
|
|
|
@ -189,16 +189,16 @@ int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntries
|
|||
|
||||
if (gRaftDetailLog) {
|
||||
char* s = snapshotSender2Str(pSender);
|
||||
sInfo(
|
||||
"sync event vgId:%d snapshot send to %s:%d start sender first time, lastApplyIndex:%ld lastApplyTerm:%lu "
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot send to %s:%d start sender first time, lastApplyIndex:%ld lastApplyTerm:%lu "
|
||||
"lastConfigIndex:%ld"
|
||||
"sender:%s",
|
||||
ths->vgId, host, port, pSender->snapshot.lastApplyIndex, pSender->snapshot.lastApplyTerm,
|
||||
pSender->snapshot.lastConfigIndex, s);
|
||||
taosMemoryFree(s);
|
||||
} else {
|
||||
sInfo(
|
||||
"sync event vgId:%d snapshot send to %s:%d start sender first time, lastApplyIndex:%ld "
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot send to %s:%d start sender first time, lastApplyIndex:%ld "
|
||||
"lastApplyTerm:%lu lastConfigIndex:%ld",
|
||||
ths->vgId, host, port, pSender->snapshot.lastApplyIndex, pSender->snapshot.lastApplyTerm,
|
||||
pSender->snapshot.lastConfigIndex);
|
||||
|
|
|
@ -56,7 +56,7 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
|
|||
SyncIndex commitEnd = snapshot.lastApplyIndex;
|
||||
pSyncNode->commitIndex = snapshot.lastApplyIndex;
|
||||
|
||||
sInfo("sync event vgId:%d commit by snapshot from index:%ld to index:%ld, %s", pSyncNode->vgId,
|
||||
sDebug("vgId:%d sync event commit by snapshot from index:%ld to index:%ld, %s", pSyncNode->vgId,
|
||||
pSyncNode->commitIndex, snapshot.lastApplyIndex, syncUtilState2String(pSyncNode->state));
|
||||
}
|
||||
|
||||
|
|
|
@ -470,7 +470,7 @@ int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) {
|
|||
return TAOS_SYNC_PROPOSE_OTHER_ERROR;
|
||||
}
|
||||
assert(rid == pSyncNode->rid);
|
||||
sTrace("sync event vgId:%d propose msgType:%s", pSyncNode->vgId, TMSG_INFO(pMsg->msgType));
|
||||
sDebug("vgId:%d sync event propose msgType:%s", pSyncNode->vgId, TMSG_INFO(pMsg->msgType));
|
||||
|
||||
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
|
||||
SRespStub stub;
|
||||
|
@ -501,7 +501,7 @@ int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) {
|
|||
SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
|
||||
SSyncInfo* pSyncInfo = (SSyncInfo*)pOldSyncInfo;
|
||||
|
||||
sInfo("sync event vgId:%d sync open", pSyncInfo->vgId);
|
||||
sDebug("vgId:%d sync event sync open", pSyncInfo->vgId);
|
||||
|
||||
SSyncNode* pSyncNode = (SSyncNode*)taosMemoryMalloc(sizeof(SSyncNode));
|
||||
assert(pSyncNode != NULL);
|
||||
|
@ -761,7 +761,7 @@ void syncNodeStartStandBy(SSyncNode* pSyncNode) {
|
|||
}
|
||||
|
||||
void syncNodeClose(SSyncNode* pSyncNode) {
|
||||
sInfo("sync event vgId:%d sync close", pSyncNode->vgId);
|
||||
sDebug("vgId:%d sync event sync close", pSyncNode->vgId);
|
||||
|
||||
int32_t ret;
|
||||
assert(pSyncNode != NULL);
|
||||
|
@ -1240,7 +1240,8 @@ void syncNodeUpdateTerm(SSyncNode* pSyncNode, SyncTerm term) {
|
|||
}
|
||||
|
||||
void syncNodeBecomeFollower(SSyncNode* pSyncNode, const char* debugStr) {
|
||||
sInfo("sync event vgId:%d become follower, isStandBy:%d, %s", pSyncNode->vgId, pSyncNode->pRaftCfg->isStandBy, debugStr);
|
||||
sDebug("vgId:%d sync event become follower, isStandBy:%d, %s", pSyncNode->vgId, pSyncNode->pRaftCfg->isStandBy,
|
||||
debugStr);
|
||||
|
||||
// maybe clear leader cache
|
||||
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
|
||||
|
@ -1274,7 +1275,8 @@ void syncNodeBecomeFollower(SSyncNode* pSyncNode, const char* debugStr) {
|
|||
// /\ UNCHANGED <<messages, currentTerm, votedFor, candidateVars, logVars>>
|
||||
//
|
||||
void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) {
|
||||
sInfo("sync event vgId:%d become leader, isStandBy:%d, %s", pSyncNode->vgId, pSyncNode->pRaftCfg->isStandBy, debugStr);
|
||||
sDebug("vgId:%d sync event become leader, isStandBy:%d, %s", pSyncNode->vgId, pSyncNode->pRaftCfg->isStandBy,
|
||||
debugStr);
|
||||
|
||||
// state change
|
||||
pSyncNode->state = TAOS_SYNC_STATE_LEADER;
|
||||
|
@ -1882,7 +1884,7 @@ static int32_t syncNodeConfigChange(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftE
|
|||
int32_t syncNodeCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex, uint64_t flag) {
|
||||
int32_t code = 0;
|
||||
ESyncState state = flag;
|
||||
sInfo("sync event vgId:%d commit by wal from index:%" PRId64 " to index:%" PRId64 ", %s", ths->vgId, beginIndex,
|
||||
sDebug("vgId:%d sync event commit by wal from index:%" PRId64 " to index:%" PRId64 ", %s", ths->vgId, beginIndex,
|
||||
endIndex, syncUtilState2String(state));
|
||||
|
||||
// execute fsm
|
||||
|
@ -1931,7 +1933,7 @@ int32_t syncNodeCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex,
|
|||
ths->pFsm->FpRestoreFinishCb(ths->pFsm);
|
||||
}
|
||||
ths->restoreFinish = true;
|
||||
sInfo("sync event vgId:%d restore finish", ths->vgId);
|
||||
sDebug("vgId:%d sync event restore finish", ths->vgId);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -162,7 +162,7 @@ static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntr
|
|||
|
||||
walFsync(pWal, true);
|
||||
|
||||
sTrace("sync event vgId:%d write index:%ld, %s, isStandBy:%d, msgType:%s, originalRpcType:%s", pData->pSyncNode->vgId,
|
||||
sDebug("vgId:%d sync event write index:%ld, %s, isStandBy:%d, msgType:%s, originalRpcType:%s", pData->pSyncNode->vgId,
|
||||
pEntry->index, syncUtilState2String(pData->pSyncNode->state), pData->pSyncNode->pRaftCfg->isStandBy,
|
||||
TMSG_INFO(pEntry->msgType), TMSG_INFO(pEntry->originalRpcType));
|
||||
|
||||
|
@ -320,7 +320,7 @@ int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry) {
|
|||
|
||||
walFsync(pWal, true);
|
||||
|
||||
sTrace("sync event old write wal: %ld", pEntry->index);
|
||||
sDebug("sync event old write wal: %ld", pEntry->index);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -140,16 +140,16 @@ void snapshotSenderStart(SSyncSnapshotSender *pSender) {
|
|||
|
||||
if (gRaftDetailLog) {
|
||||
char *msgStr = syncSnapshotSend2Str(pMsg);
|
||||
sTrace(
|
||||
"sync event vgId:%d snapshot send to %s:%d begin seq:%d ack:%d lastApplyIndex:%ld lastApplyTerm:%lu "
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot send to %s:%d begin seq:%d ack:%d lastApplyIndex:%ld lastApplyTerm:%lu "
|
||||
"lastConfigIndex:%ld send "
|
||||
"msg:%s",
|
||||
pSender->pSyncNode->vgId, host, port, pSender->seq, pSender->ack, pSender->snapshot.lastApplyIndex,
|
||||
pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex, msgStr);
|
||||
taosMemoryFree(msgStr);
|
||||
} else {
|
||||
sTrace(
|
||||
"sync event vgId:%d snapshot send to %s:%d begin seq:%d ack:%d lastApplyIndex:%ld lastApplyTerm:%lu "
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot send to %s:%d begin seq:%d ack:%d lastApplyIndex:%ld lastApplyTerm:%lu "
|
||||
"lastConfigIndex:%ld",
|
||||
pSender->pSyncNode->vgId, host, port, pSender->seq, pSender->ack, pSender->snapshot.lastApplyIndex,
|
||||
pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex);
|
||||
|
@ -278,23 +278,23 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender) {
|
|||
if (pSender->seq == SYNC_SNAPSHOT_SEQ_END) {
|
||||
if (gRaftDetailLog) {
|
||||
char *msgStr = syncSnapshotSend2Str(pMsg);
|
||||
sTrace(
|
||||
"sync event vgId:%d snapshot send to %s:%d finish seq:%d ack:%d lastApplyIndex:%ld lastApplyTerm:%lu "
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot send to %s:%d finish seq:%d ack:%d lastApplyIndex:%ld lastApplyTerm:%lu "
|
||||
"lastConfigIndex:%ld send "
|
||||
"msg:%s",
|
||||
pSender->pSyncNode->vgId, host, port, pSender->seq, pSender->ack, pSender->snapshot.lastApplyIndex,
|
||||
pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex, msgStr);
|
||||
taosMemoryFree(msgStr);
|
||||
} else {
|
||||
sTrace(
|
||||
"sync event vgId:%d snapshot send to %s:%d finish seq:%d ack:%d lastApplyIndex:%ld lastApplyTerm:%lu "
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot send to %s:%d finish seq:%d ack:%d lastApplyIndex:%ld lastApplyTerm:%lu "
|
||||
"lastConfigIndex:%ld",
|
||||
pSender->pSyncNode->vgId, host, port, pSender->seq, pSender->ack, pSender->snapshot.lastApplyIndex,
|
||||
pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex);
|
||||
}
|
||||
} else {
|
||||
sTrace(
|
||||
"sync event vgId:%d snapshot send to %s:%d sending seq:%d ack:%d lastApplyIndex:%ld lastApplyTerm:%lu "
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot send to %s:%d sending seq:%d ack:%d lastApplyIndex:%ld lastApplyTerm:%lu "
|
||||
"lastConfigIndex:%ld",
|
||||
pSender->pSyncNode->vgId, host, port, pSender->seq, pSender->ack, pSender->snapshot.lastApplyIndex,
|
||||
pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex);
|
||||
|
@ -328,11 +328,11 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) {
|
|||
|
||||
if (gRaftDetailLog) {
|
||||
char *msgStr = syncSnapshotSend2Str(pMsg);
|
||||
sTrace("sync event vgId:%d snapshot send to %s:%d resend seq:%d ack:%d send msg:%s", pSender->pSyncNode->vgId,
|
||||
sDebug("vgId:%d sync event snapshot send to %s:%d resend seq:%d ack:%d send msg:%s", pSender->pSyncNode->vgId,
|
||||
host, port, pSender->seq, pSender->ack, msgStr);
|
||||
taosMemoryFree(msgStr);
|
||||
} else {
|
||||
sTrace("sync event vgId:%d snapshot send to %s:%d resend seq:%d ack:%d", pSender->pSyncNode->vgId, host, port,
|
||||
sDebug("vgId:%d sync event snapshot send to %s:%d resend seq:%d ack:%d", pSender->pSyncNode->vgId, host, port,
|
||||
pSender->seq, pSender->ack);
|
||||
}
|
||||
|
||||
|
@ -565,12 +565,17 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
|
|||
|
||||
if (gRaftDetailLog) {
|
||||
char *msgStr = syncSnapshotSend2Str(pMsg);
|
||||
sTrace("sync event vgId:%d snapshot recv from %s:%d begin ack:%d, lastIndex:%ld, lastTerm:%lu, recv msg:%s",
|
||||
pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm, msgStr);
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot recv from %s:%d begin ack:%d, lastIndex:%ld, lastTerm:%lu, "
|
||||
"lastConfigIndex:%ld, recv msg:%s",
|
||||
pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm, pMsg->lastConfigIndex,
|
||||
msgStr);
|
||||
taosMemoryFree(msgStr);
|
||||
} else {
|
||||
sTrace("sync event vgId:%d snapshot recv from %s:%d begin ack:%d, lastIndex:%ld, lastTerm:%lu",
|
||||
pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm);
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot recv from %s:%d begin ack:%d, lastIndex:%ld, lastTerm:%lu, "
|
||||
"lastConfigIndex:%ld",
|
||||
pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm, pMsg->lastConfigIndex);
|
||||
}
|
||||
|
||||
} else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_END) {
|
||||
|
@ -597,12 +602,12 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
|
|||
|
||||
bool isDrop;
|
||||
if (IamInNew) {
|
||||
sTrace("sync event vgId:%d update config by snapshot, lastIndex:%ld, lastTerm:%lu, lastConfigIndex:%ld ",
|
||||
sDebug("vgId:%d sync event update config by snapshot, lastIndex:%ld, lastTerm:%lu, lastConfigIndex:%ld ",
|
||||
pSyncNode->vgId, pMsg->lastIndex, pMsg->lastTerm, pMsg->lastConfigIndex);
|
||||
syncNodeUpdateConfig(pSyncNode, &newSyncCfg, pMsg->lastConfigIndex, &isDrop);
|
||||
} else {
|
||||
sTrace(
|
||||
"sync event vgId:%d do not update config by snapshot, I am not in newCfg, lastIndex:%ld, lastTerm:%lu, "
|
||||
sDebug(
|
||||
"vgId:%d sync event do not update config by snapshot, I am not in newCfg, lastIndex:%ld, lastTerm:%lu, "
|
||||
"lastConfigIndex:%ld ",
|
||||
pSyncNode->vgId, pMsg->lastIndex, pMsg->lastTerm, pMsg->lastConfigIndex);
|
||||
}
|
||||
|
@ -626,19 +631,20 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
|
|||
|
||||
if (gRaftDetailLog) {
|
||||
char *logSimpleStr = logStoreSimple2Str(pSyncNode->pLogStore);
|
||||
sInfo(
|
||||
"sync event vgId:%d snapshot recv from %s:%d finish, update log begin index:%ld, "
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot recv from %s:%d finish, update log begin index:%ld, "
|
||||
"snapshot.lastApplyIndex:%ld, "
|
||||
"snapshot.lastApplyTerm:%lu, raft log:%s",
|
||||
"snapshot.lastApplyTerm:%lu, snapshot.lastConfigIndex:%ld, raft log:%s",
|
||||
pSyncNode->vgId, host, port, pMsg->lastIndex + 1, snapshot.lastApplyIndex, snapshot.lastApplyTerm,
|
||||
logSimpleStr);
|
||||
snapshot.lastConfigIndex, logSimpleStr);
|
||||
taosMemoryFree(logSimpleStr);
|
||||
} else {
|
||||
sInfo(
|
||||
"sync event vgId:%d snapshot recv from %s:%d finish, update log begin index:%ld, "
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot recv from %s:%d finish, update log begin index:%ld, "
|
||||
"snapshot.lastApplyIndex:%ld, "
|
||||
"snapshot.lastApplyTerm:%lu",
|
||||
pSyncNode->vgId, host, port, pMsg->lastIndex + 1, snapshot.lastApplyIndex, snapshot.lastApplyTerm);
|
||||
"snapshot.lastApplyTerm:%lu, snapshot.lastConfigIndex:%ld",
|
||||
pSyncNode->vgId, host, port, pMsg->lastIndex + 1, snapshot.lastApplyIndex, snapshot.lastApplyTerm,
|
||||
snapshot.lastConfigIndex);
|
||||
}
|
||||
|
||||
pReceiver->pWriter = NULL;
|
||||
|
@ -648,12 +654,18 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
|
|||
|
||||
if (gRaftDetailLog) {
|
||||
char *msgStr = syncSnapshotSend2Str(pMsg);
|
||||
sTrace("sync event vgId:%d snapshot recv from %s:%d end ack:%d, lastIndex:%ld, lastTerm:%lu, recv msg:%s",
|
||||
pReceiver->pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm, msgStr);
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot recv from %s:%d end ack:%d, lastIndex:%ld, lastTerm:%lu, "
|
||||
"lastConfigIndex:%ld, recv msg:%s",
|
||||
pReceiver->pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm,
|
||||
pMsg->lastConfigIndex, msgStr);
|
||||
taosMemoryFree(msgStr);
|
||||
} else {
|
||||
sTrace("sync event vgId:%d snapshot recv from %s:%d end ack:%d, lastIndex:%ld, lastTerm:%lu",
|
||||
pReceiver->pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm);
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot recv from %s:%d end ack:%d, lastIndex:%ld, lastTerm:%lu, "
|
||||
"lastConfigIndex:%ld",
|
||||
pReceiver->pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm,
|
||||
pMsg->lastConfigIndex);
|
||||
}
|
||||
|
||||
} else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_FORCE_CLOSE) {
|
||||
|
@ -667,14 +679,19 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
|
|||
|
||||
if (gRaftDetailLog) {
|
||||
char *msgStr = syncSnapshotSend2Str(pMsg);
|
||||
sTrace(
|
||||
"sync event vgId:%d snapshot recv from %s:%d force close ack:%d, lastIndex:%ld, lastTerm:%lu, recv "
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot recv from %s:%d force close ack:%d, lastIndex:%ld, lastTerm:%lu, "
|
||||
"lastConfigIndex:%ld, recv "
|
||||
"msg:%s",
|
||||
pReceiver->pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm, msgStr);
|
||||
pReceiver->pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm,
|
||||
pMsg->lastConfigIndex, msgStr);
|
||||
taosMemoryFree(msgStr);
|
||||
} else {
|
||||
sTrace("sync event vgId:%d snapshot recv from %s:%d force close ack:%d, lastIndex:%ld, lastTerm:%lu",
|
||||
pReceiver->pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm);
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot recv from %s:%d force close ack:%d, lastIndex:%ld, lastTerm:%lu, "
|
||||
"lastConfigIndex:%ld",
|
||||
pReceiver->pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm,
|
||||
pMsg->lastConfigIndex);
|
||||
}
|
||||
|
||||
} else if (pMsg->seq > SYNC_SNAPSHOT_SEQ_BEGIN && pMsg->seq < SYNC_SNAPSHOT_SEQ_END) {
|
||||
|
@ -693,13 +710,17 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
|
|||
|
||||
if (gRaftDetailLog) {
|
||||
char *msgStr = syncSnapshotSend2Str(pMsg);
|
||||
sTrace(
|
||||
"sync event vgId:%d snapshot recv from %s:%d receiving ack:%d, lastIndex:%ld, lastTerm:%lu, recv msg:%s",
|
||||
pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm, msgStr);
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot recv from %s:%d receiving ack:%d, lastIndex:%ld, lastTerm:%lu, "
|
||||
"lastConfigIndex:%ld, recv msg:%s",
|
||||
pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm, pMsg->lastConfigIndex,
|
||||
msgStr);
|
||||
taosMemoryFree(msgStr);
|
||||
} else {
|
||||
sTrace("sync event vgId:%d snapshot recv from %s:%d receiving ack:%d, lastIndex:%ld, lastTerm:%lu",
|
||||
pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm);
|
||||
sDebug(
|
||||
"vgId:%d sync event snapshot recv from %s:%d receiving ack:%d, lastIndex:%ld, lastTerm:%lu, "
|
||||
"lastConfigIndex:%ld",
|
||||
pSyncNode->vgId, host, port, pReceiver->ack, pMsg->lastIndex, pMsg->lastTerm, pMsg->lastConfigIndex);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
|
|
@ -0,0 +1,794 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "tlrucache.h"
|
||||
#include "os.h"
|
||||
#include "tdef.h"
|
||||
#include "taoserror.h"
|
||||
#include "tlog.h"
|
||||
#include "tarray.h"
|
||||
|
||||
typedef struct SLRUEntry SLRUEntry;
|
||||
typedef struct SLRUEntryTable SLRUEntryTable;
|
||||
typedef struct SLRUCacheShard SLRUCacheShard;
|
||||
typedef struct SShardedCache SShardedCache;
|
||||
|
||||
enum {
|
||||
TAOS_LRU_IN_CACHE = (1 << 0), // Whether this entry is referenced by the hash table.
|
||||
|
||||
TAOS_LRU_IS_HIGH_PRI = (1 << 1), // Whether this entry is high priority entry.
|
||||
|
||||
TAOS_LRU_IN_HIGH_PRI_POOL = (1 << 2), // Whether this entry is in high-pri pool.
|
||||
|
||||
TAOS_LRU_HAS_HIT = (1 << 3), // Whether this entry has had any lookups (hits).
|
||||
};
|
||||
|
||||
struct SLRUEntry {
|
||||
void *value;
|
||||
_taos_lru_deleter_t deleter;
|
||||
SLRUEntry *nextHash;
|
||||
SLRUEntry *next;
|
||||
SLRUEntry *prev;
|
||||
size_t totalCharge;
|
||||
size_t keyLength;
|
||||
uint32_t hash;
|
||||
uint32_t refs;
|
||||
uint8_t flags;
|
||||
char keyData[1];
|
||||
};
|
||||
|
||||
#define TAOS_LRU_ENTRY_IN_CACHE(h) ((h)->flags & TAOS_LRU_IN_CACHE)
|
||||
#define TAOS_LRU_ENTRY_IN_HIGH_POOL(h) ((h)->flags & TAOS_LRU_IN_HIGH_PRI_POOL)
|
||||
#define TAOS_LRU_ENTRY_IS_HIGH_PRI(h) ((h)->flags & TAOS_LRU_IS_HIGH_PRI)
|
||||
#define TAOS_LRU_ENTRY_HAS_HIT(h) ((h)->flags & TAOS_LRU_HAS_HIT)
|
||||
|
||||
#define TAOS_LRU_ENTRY_SET_IN_CACHE(h, inCache) do { if(inCache) {(h)->flags |= TAOS_LRU_IN_CACHE;} else {(h)->flags &= ~TAOS_LRU_IN_CACHE;} } while(0)
|
||||
#define TAOS_LRU_ENTRY_SET_IN_HIGH_POOL(h, inHigh) do { if(inHigh) {(h)->flags |= TAOS_LRU_IN_HIGH_PRI_POOL;} else {(h)->flags &= ~TAOS_LRU_IN_HIGH_PRI_POOL;} } while(0)
|
||||
#define TAOS_LRU_ENTRY_SET_PRIORITY(h, priority) do { if(priority == TAOS_LRU_PRIORITY_HIGH) {(h)->flags |= TAOS_LRU_IS_HIGH_PRI;} else {(h)->flags &= ~TAOS_LRU_IS_HIGH_PRI;} } while(0)
|
||||
#define TAOS_LRU_ENTRY_SET_HIT(h) ((h)->flags |= TAOS_LRU_HAS_HIT)
|
||||
|
||||
#define TAOS_LRU_ENTRY_HAS_REFS(h) ((h)->refs > 0)
|
||||
#define TAOS_LRU_ENTRY_REF(h) (++(h)->refs)
|
||||
|
||||
static bool taosLRUEntryUnref(SLRUEntry *entry) {
|
||||
assert(entry->refs > 0);
|
||||
--entry->refs;
|
||||
return entry->refs == 0;
|
||||
}
|
||||
|
||||
static void taosLRUEntryFree(SLRUEntry *entry) {
|
||||
assert(entry->refs == 0);
|
||||
|
||||
if (entry->deleter) {
|
||||
(*entry->deleter)(entry->keyData, entry->keyLength, entry->value);
|
||||
}
|
||||
|
||||
taosMemoryFree(entry);
|
||||
}
|
||||
|
||||
typedef void (*_taos_lru_table_func_t)(SLRUEntry *entry);
|
||||
|
||||
struct SLRUEntryTable {
|
||||
int lengthBits;
|
||||
SLRUEntry **list;
|
||||
uint32_t elems;
|
||||
int maxLengthBits;
|
||||
};
|
||||
|
||||
static int taosLRUEntryTableInit(SLRUEntryTable *table, int maxUpperHashBits) {
|
||||
table->lengthBits = 4;
|
||||
table->list = taosMemoryCalloc(1 << table->lengthBits, sizeof(SLRUEntry*));
|
||||
if (!table->list) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
table->elems = 0;
|
||||
table->maxLengthBits = maxUpperHashBits;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void taosLRUEntryTableApply(SLRUEntryTable *table, _taos_lru_table_func_t func, uint32_t begin, uint32_t end) {
|
||||
for (uint32_t i = begin; i < end; ++i) {
|
||||
SLRUEntry *h = table->list[i];
|
||||
while (h) {
|
||||
SLRUEntry *n = h->nextHash;
|
||||
assert(TAOS_LRU_ENTRY_IN_CACHE(h));
|
||||
func(h);
|
||||
h = n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void taosLRUEntryTableFree(SLRUEntry *entry) {
|
||||
if (!TAOS_LRU_ENTRY_HAS_REFS(entry)) {
|
||||
taosLRUEntryFree(entry);
|
||||
}
|
||||
}
|
||||
|
||||
static void taosLRUEntryTableCleanup(SLRUEntryTable *table) {
|
||||
taosLRUEntryTableApply(table, taosLRUEntryTableFree, 0, 1 << table->lengthBits);
|
||||
|
||||
taosMemoryFree(table->list);
|
||||
}
|
||||
|
||||
static SLRUEntry **taosLRUEntryTableFindPtr(SLRUEntryTable * table, const void *key, size_t keyLen, uint32_t hash) {
|
||||
SLRUEntry **entry = &table->list[hash >> (32 - table->lengthBits)];
|
||||
while (*entry && ((*entry)->hash != hash || memcmp(key, (*entry)->keyData, keyLen) != 0)) {
|
||||
entry = &(*entry)->nextHash;
|
||||
}
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
static void taosLRUEntryTableResize(SLRUEntryTable * table) {
|
||||
int lengthBits = table->lengthBits;
|
||||
if (lengthBits >= table->maxLengthBits) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (lengthBits >= 31) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t oldLength = 1 << lengthBits;
|
||||
int newLengthBits = lengthBits + 1;
|
||||
SLRUEntry **newList = taosMemoryCalloc(1 << newLengthBits, sizeof(SLRUEntry*));
|
||||
if (!newList) {
|
||||
return;
|
||||
}
|
||||
uint32_t count = 0;
|
||||
for (uint32_t i = 0; i < oldLength; ++i) {
|
||||
SLRUEntry *entry = table->list[i];
|
||||
while (entry) {
|
||||
SLRUEntry *next = entry->nextHash;
|
||||
uint32_t hash = entry->hash;
|
||||
SLRUEntry **ptr = &newList[hash >> (32 - newLengthBits)];
|
||||
entry->nextHash = *ptr;
|
||||
*ptr = entry;
|
||||
entry = next;
|
||||
++count;
|
||||
}
|
||||
}
|
||||
assert(table->elems == count);
|
||||
|
||||
taosMemoryFree(table->list);
|
||||
table->list = newList;
|
||||
table->lengthBits = newLengthBits;
|
||||
}
|
||||
|
||||
static SLRUEntry *taosLRUEntryTableLookup(SLRUEntryTable * table, const void *key, size_t keyLen, uint32_t hash) {
|
||||
return *taosLRUEntryTableFindPtr(table, key, keyLen, hash);
|
||||
}
|
||||
|
||||
static SLRUEntry *taosLRUEntryTableInsert(SLRUEntryTable * table, SLRUEntry *entry) {
|
||||
SLRUEntry **ptr = taosLRUEntryTableFindPtr(table, entry->keyData, entry->keyLength, entry->hash);
|
||||
SLRUEntry *old = *ptr;
|
||||
entry->nextHash = (old == NULL) ? NULL : old->nextHash;
|
||||
*ptr = entry;
|
||||
if (old == NULL) {
|
||||
++table->elems;
|
||||
if ((table->elems >> table->lengthBits) > 0) {
|
||||
taosLRUEntryTableResize(table);
|
||||
}
|
||||
}
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
static SLRUEntry *taosLRUEntryTableRemove(SLRUEntryTable * table, const void *key, size_t keyLen, uint32_t hash) {
|
||||
SLRUEntry **entry = taosLRUEntryTableFindPtr(table, key, keyLen, hash);
|
||||
SLRUEntry *result = *entry;
|
||||
if (result) {
|
||||
*entry = result->nextHash;
|
||||
--table->elems;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
struct SLRUCacheShard {
|
||||
size_t capacity;
|
||||
size_t highPriPoolUsage;
|
||||
bool strictCapacity;
|
||||
double highPriPoolRatio;
|
||||
double highPriPoolCapacity;
|
||||
SLRUEntry lru;
|
||||
SLRUEntry *lruLowPri;
|
||||
SLRUEntryTable table;
|
||||
size_t usage; // Memory size for entries residing in the cache.
|
||||
size_t lruUsage; // Memory size for entries residing only in the LRU list.
|
||||
TdThreadMutex mutex;
|
||||
};
|
||||
|
||||
#define TAOS_LRU_CACHE_SHARD_HASH32(key, len) (MurmurHash3_32((key), (len)))
|
||||
|
||||
static void taosLRUCacheShardMaintainPoolSize(SLRUCacheShard *shard) {
|
||||
while (shard->highPriPoolUsage > shard->highPriPoolCapacity) {
|
||||
shard->lruLowPri = shard->lruLowPri->next;
|
||||
assert(shard->lruLowPri != &shard->lru);
|
||||
TAOS_LRU_ENTRY_SET_IN_HIGH_POOL(shard->lruLowPri, false);
|
||||
|
||||
assert(shard->highPriPoolUsage >= shard->lruLowPri->totalCharge);
|
||||
shard->highPriPoolUsage -= shard->lruLowPri->totalCharge;
|
||||
}
|
||||
}
|
||||
|
||||
static void taosLRUCacheShardLRUInsert(SLRUCacheShard *shard, SLRUEntry *e) {
|
||||
assert(e->next == NULL);
|
||||
assert(e->prev == NULL);
|
||||
|
||||
if (shard->highPriPoolRatio > 0
|
||||
&& (TAOS_LRU_ENTRY_IS_HIGH_PRI(e) || TAOS_LRU_ENTRY_HAS_HIT(e))) {
|
||||
e->next = &shard->lru;
|
||||
e->prev = shard->lru.prev;
|
||||
|
||||
e->prev->next = e;
|
||||
e->next->prev = e;
|
||||
|
||||
TAOS_LRU_ENTRY_SET_IN_HIGH_POOL(e, true);
|
||||
shard->highPriPoolUsage += e->totalCharge;
|
||||
taosLRUCacheShardMaintainPoolSize(shard);
|
||||
} else {
|
||||
e->next = shard->lruLowPri->next;
|
||||
e->prev = shard->lruLowPri;
|
||||
|
||||
e->prev->next = e;
|
||||
e->next->prev = e;
|
||||
|
||||
TAOS_LRU_ENTRY_SET_IN_HIGH_POOL(e, false);
|
||||
shard->lruLowPri = e;
|
||||
}
|
||||
|
||||
shard->lruUsage += e->totalCharge;
|
||||
}
|
||||
|
||||
static void taosLRUCacheShardLRURemove(SLRUCacheShard *shard, SLRUEntry *e) {
|
||||
assert(e->next);
|
||||
assert(e->prev);
|
||||
|
||||
if (shard->lruLowPri == e) {
|
||||
shard->lruLowPri = e->prev;
|
||||
}
|
||||
e->next->prev = e->prev;
|
||||
e->prev->next = e->next;
|
||||
e->prev = e->next = NULL;
|
||||
|
||||
assert(shard->lruUsage >= e->totalCharge);
|
||||
shard->lruUsage -= e->totalCharge;
|
||||
if (TAOS_LRU_ENTRY_IN_HIGH_POOL(e)) {
|
||||
assert(shard->highPriPoolUsage >= e->totalCharge);
|
||||
shard->highPriPoolUsage -= e->totalCharge;
|
||||
}
|
||||
}
|
||||
|
||||
static void taosLRUCacheShardEvictLRU(SLRUCacheShard *shard, size_t charge, SArray *deleted) {
|
||||
while (shard->usage + charge > shard->capacity && shard->lru.next != &shard->lru) {
|
||||
SLRUEntry *old = shard->lru.next;
|
||||
assert(TAOS_LRU_ENTRY_IN_CACHE(old) && !TAOS_LRU_ENTRY_HAS_REFS(old));
|
||||
|
||||
taosLRUCacheShardLRURemove(shard, old);
|
||||
taosLRUEntryTableRemove(&shard->table, old->keyData, old->keyLength, old->hash);
|
||||
|
||||
TAOS_LRU_ENTRY_SET_IN_CACHE(old, false);
|
||||
assert(shard->usage >= old->totalCharge);
|
||||
shard->usage -= old->totalCharge;
|
||||
|
||||
taosArrayPush(deleted, &old);
|
||||
}
|
||||
}
|
||||
|
||||
static void taosLRUCacheShardSetCapacity(SLRUCacheShard *shard, size_t capacity) {
|
||||
SArray *lastReferenceList = taosArrayInit(16, POINTER_BYTES);
|
||||
|
||||
taosThreadMutexLock(&shard->mutex);
|
||||
|
||||
shard->capacity = capacity;
|
||||
shard->highPriPoolCapacity = capacity * shard->highPriPoolRatio;
|
||||
taosLRUCacheShardEvictLRU(shard, 0, lastReferenceList);
|
||||
|
||||
taosThreadMutexUnlock(&shard->mutex);
|
||||
|
||||
for (int i = 0; i < taosArrayGetSize(lastReferenceList); ++i) {
|
||||
SLRUEntry *entry = taosArrayGetP(lastReferenceList, i);
|
||||
taosLRUEntryFree(entry);
|
||||
}
|
||||
taosArrayDestroy(lastReferenceList);
|
||||
}
|
||||
|
||||
static int taosLRUCacheShardInit(SLRUCacheShard *shard, size_t capacity, bool strict,
|
||||
double highPriPoolRatio, int maxUpperHashBits) {
|
||||
if (taosLRUEntryTableInit(&shard->table, maxUpperHashBits) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
taosThreadMutexInit(&shard->mutex, NULL);
|
||||
|
||||
shard->capacity = 0;
|
||||
shard->highPriPoolUsage = 0;
|
||||
shard->strictCapacity = strict;
|
||||
shard->highPriPoolRatio = highPriPoolRatio;
|
||||
shard->highPriPoolCapacity = 0;
|
||||
|
||||
shard->usage = 0;
|
||||
shard->lruUsage = 0;
|
||||
|
||||
shard->lru.next = &shard->lru;
|
||||
shard->lru.prev = &shard->lru;
|
||||
shard->lruLowPri = &shard->lru;
|
||||
|
||||
taosLRUCacheShardSetCapacity(shard, capacity);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void taosLRUCacheShardCleanup(SLRUCacheShard *shard) {
|
||||
taosThreadMutexDestroy(&shard->mutex);
|
||||
|
||||
taosLRUEntryTableCleanup(&shard->table);
|
||||
}
|
||||
|
||||
static LRUStatus taosLRUCacheShardInsertEntry(SLRUCacheShard *shard, SLRUEntry *e, LRUHandle **handle, bool freeOnFail) {
|
||||
LRUStatus status = TAOS_LRU_STATUS_OK;
|
||||
SArray *lastReferenceList = taosArrayInit(16, POINTER_BYTES);
|
||||
|
||||
taosThreadMutexLock(&shard->mutex);
|
||||
|
||||
taosLRUCacheShardEvictLRU(shard, e->totalCharge, lastReferenceList);
|
||||
|
||||
if (shard->usage + e->totalCharge > shard->capacity && (shard->strictCapacity || handle == NULL)) {
|
||||
TAOS_LRU_ENTRY_SET_IN_CACHE(e, false);
|
||||
if (handle == NULL) {
|
||||
taosArrayPush(lastReferenceList, &e);
|
||||
} else {
|
||||
if (freeOnFail) {
|
||||
taosMemoryFree(e);
|
||||
|
||||
*handle = NULL;
|
||||
}
|
||||
|
||||
status = TAOS_LRU_STATUS_INCOMPLETE;
|
||||
}
|
||||
} else {
|
||||
SLRUEntry *old = taosLRUEntryTableInsert(&shard->table, e);
|
||||
shard->usage += e->totalCharge;
|
||||
if (old != NULL) {
|
||||
status = TAOS_LRU_STATUS_OK_OVERWRITTEN;
|
||||
|
||||
assert(TAOS_LRU_ENTRY_IN_CACHE(old));
|
||||
TAOS_LRU_ENTRY_SET_IN_CACHE(old, false);
|
||||
if (!TAOS_LRU_ENTRY_HAS_REFS(e)) {
|
||||
taosLRUCacheShardLRURemove(shard, old);
|
||||
assert(shard->usage >= old->totalCharge);
|
||||
shard->usage -= old->totalCharge;
|
||||
|
||||
taosArrayPush(lastReferenceList, &old);
|
||||
}
|
||||
}
|
||||
if (handle == NULL) {
|
||||
taosLRUCacheShardLRUInsert(shard, e);
|
||||
} else {
|
||||
if (!TAOS_LRU_ENTRY_HAS_REFS(e)) {
|
||||
TAOS_LRU_ENTRY_REF(e);
|
||||
}
|
||||
|
||||
*handle = (LRUHandle*) e;
|
||||
}
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&shard->mutex);
|
||||
|
||||
for (int i = 0; i < taosArrayGetSize(lastReferenceList); ++i) {
|
||||
SLRUEntry *entry = taosArrayGetP(lastReferenceList, i);
|
||||
|
||||
taosLRUEntryFree(entry);
|
||||
}
|
||||
taosArrayDestroy(lastReferenceList);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static LRUStatus taosLRUCacheShardInsert(SLRUCacheShard *shard, const void *key, size_t keyLen, uint32_t hash,
|
||||
void *value, size_t charge, _taos_lru_deleter_t deleter,
|
||||
LRUHandle **handle, LRUPriority priority) {
|
||||
SLRUEntry *e = taosMemoryCalloc(1, sizeof(SLRUEntry) - 1 + keyLen);
|
||||
if (!e) {
|
||||
return TAOS_LRU_STATUS_FAIL;
|
||||
}
|
||||
|
||||
e->value = value;
|
||||
e->flags = 0;
|
||||
e->deleter = deleter;
|
||||
e->keyLength = keyLen;
|
||||
e->hash = hash;
|
||||
e->refs = 0;
|
||||
e->next = e->prev = NULL;
|
||||
TAOS_LRU_ENTRY_SET_IN_CACHE(e, true);
|
||||
|
||||
TAOS_LRU_ENTRY_SET_PRIORITY(e, priority);
|
||||
memcpy(e->keyData, key, keyLen);
|
||||
// TODO: e->CalcTotalCharge(charge, metadataChargePolicy);
|
||||
e->totalCharge = charge;
|
||||
|
||||
return taosLRUCacheShardInsertEntry(shard, e, handle, true);
|
||||
}
|
||||
|
||||
static LRUHandle *taosLRUCacheShardLookup(SLRUCacheShard *shard, const void *key, size_t keyLen, uint32_t hash) {
|
||||
SLRUEntry *e = NULL;
|
||||
|
||||
taosThreadMutexLock(&shard->mutex);
|
||||
e = taosLRUEntryTableLookup(&shard->table, key, keyLen, hash);
|
||||
if (e != NULL) {
|
||||
assert(TAOS_LRU_ENTRY_IN_CACHE(e));
|
||||
if (!TAOS_LRU_ENTRY_HAS_REFS(e)) {
|
||||
taosLRUCacheShardLRURemove(shard, e);
|
||||
}
|
||||
TAOS_LRU_ENTRY_REF(e);
|
||||
TAOS_LRU_ENTRY_SET_HIT(e);
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&shard->mutex);
|
||||
|
||||
return (LRUHandle *) e;
|
||||
}
|
||||
|
||||
static void taosLRUCacheShardErase(SLRUCacheShard *shard, const void *key, size_t keyLen, uint32_t hash) {
|
||||
bool lastReference = false;
|
||||
taosThreadMutexLock(&shard->mutex);
|
||||
|
||||
SLRUEntry *e = taosLRUEntryTableRemove(&shard->table, key, keyLen, hash);
|
||||
if (e != NULL) {
|
||||
assert(TAOS_LRU_ENTRY_IN_CACHE(e));
|
||||
TAOS_LRU_ENTRY_SET_IN_CACHE(e, false);
|
||||
if (!TAOS_LRU_ENTRY_HAS_REFS(e)) {
|
||||
taosLRUCacheShardLRURemove(shard, e);
|
||||
|
||||
assert(shard->usage >= e->totalCharge);
|
||||
shard->usage -= e->totalCharge;
|
||||
lastReference = true;
|
||||
}
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&shard->mutex);
|
||||
|
||||
if (lastReference) {
|
||||
taosLRUEntryFree(e);
|
||||
}
|
||||
}
|
||||
|
||||
static void taosLRUCacheShardEraseUnrefEntries(SLRUCacheShard *shard) {
|
||||
SArray *lastReferenceList = taosArrayInit(16, POINTER_BYTES);
|
||||
|
||||
taosThreadMutexLock(&shard->mutex);
|
||||
|
||||
while (shard->lru.next != &shard->lru) {
|
||||
SLRUEntry *old = shard->lru.next;
|
||||
assert(TAOS_LRU_ENTRY_IN_CACHE(old) && !TAOS_LRU_ENTRY_HAS_REFS(old));
|
||||
taosLRUCacheShardLRURemove(shard, old);
|
||||
taosLRUEntryTableRemove(&shard->table, old->keyData, old->keyLength, old->hash);
|
||||
TAOS_LRU_ENTRY_SET_IN_CACHE(old, false);
|
||||
assert(shard->usage >= old->totalCharge);
|
||||
shard->usage -= old->totalCharge;
|
||||
|
||||
taosArrayPush(lastReferenceList, &old);
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&shard->mutex);
|
||||
|
||||
for (int i = 0; i < taosArrayGetSize(lastReferenceList); ++i) {
|
||||
SLRUEntry *entry = taosArrayGetP(lastReferenceList, i);
|
||||
|
||||
taosLRUEntryFree(entry);
|
||||
}
|
||||
|
||||
taosArrayDestroy(lastReferenceList);
|
||||
}
|
||||
|
||||
static bool taosLRUCacheShardRef(SLRUCacheShard *shard, LRUHandle *handle) {
|
||||
SLRUEntry *e = (SLRUEntry *) handle;
|
||||
taosThreadMutexLock(&shard->mutex);
|
||||
|
||||
assert(TAOS_LRU_ENTRY_HAS_REFS(e));
|
||||
TAOS_LRU_ENTRY_REF(e);
|
||||
|
||||
taosThreadMutexUnlock(&shard->mutex);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool taosLRUCacheShardRelease(SLRUCacheShard *shard, LRUHandle *handle, bool eraseIfLastRef) {
|
||||
if (handle == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
SLRUEntry *e = (SLRUEntry *) handle;
|
||||
bool lastReference = false;
|
||||
|
||||
taosThreadMutexLock(&shard->mutex);
|
||||
|
||||
lastReference = taosLRUEntryUnref(e);
|
||||
if (lastReference && TAOS_LRU_ENTRY_IN_CACHE(e)) {
|
||||
if (shard->usage > shard->capacity || eraseIfLastRef) {
|
||||
assert(shard->lru.next == &shard->lru || eraseIfLastRef);
|
||||
|
||||
taosLRUEntryTableRemove(&shard->table, e->keyData, e->keyLength, e->hash);
|
||||
TAOS_LRU_ENTRY_SET_IN_CACHE(e, false);
|
||||
} else {
|
||||
taosLRUCacheShardLRUInsert(shard, e);
|
||||
|
||||
lastReference = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (lastReference && e->value) {
|
||||
assert(shard->usage >= e->totalCharge);
|
||||
shard->usage -= e->totalCharge;
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&shard->mutex);
|
||||
|
||||
if (lastReference) {
|
||||
taosLRUEntryFree(e);
|
||||
}
|
||||
|
||||
return lastReference;
|
||||
}
|
||||
|
||||
static size_t taosLRUCacheShardGetUsage(SLRUCacheShard *shard) {
|
||||
size_t usage = 0;
|
||||
|
||||
taosThreadMutexLock(&shard->mutex);
|
||||
usage = shard->usage;
|
||||
taosThreadMutexUnlock(&shard->mutex);
|
||||
|
||||
return usage;
|
||||
}
|
||||
|
||||
static size_t taosLRUCacheShardGetPinnedUsage(SLRUCacheShard *shard) {
|
||||
size_t usage = 0;
|
||||
|
||||
taosThreadMutexLock(&shard->mutex);
|
||||
|
||||
assert(shard->usage >= shard->lruUsage);
|
||||
usage = shard->usage - shard->lruUsage;
|
||||
|
||||
taosThreadMutexUnlock(&shard->mutex);
|
||||
|
||||
return usage;
|
||||
}
|
||||
|
||||
static void taosLRUCacheShardSetStrictCapacity(SLRUCacheShard *shard, bool strict) {
|
||||
taosThreadMutexLock(&shard->mutex);
|
||||
|
||||
shard->strictCapacity = strict;
|
||||
|
||||
taosThreadMutexUnlock(&shard->mutex);
|
||||
}
|
||||
|
||||
struct SShardedCache {
|
||||
uint32_t shardMask;
|
||||
TdThreadMutex capacityMutex;
|
||||
size_t capacity;
|
||||
bool strictCapacity;
|
||||
uint64_t lastId; // atomic var for last id
|
||||
};
|
||||
|
||||
struct SLRUCache {
|
||||
SShardedCache shardedCache;
|
||||
SLRUCacheShard *shards;
|
||||
int numShards;
|
||||
};
|
||||
|
||||
static int getDefaultCacheShardBits(size_t capacity) {
|
||||
int numShardBits = 0;
|
||||
size_t minShardSize = 512 * 1024;
|
||||
size_t numShards = capacity / minShardSize;
|
||||
while (numShards >>= 1) {
|
||||
if (++numShardBits >= 6) {
|
||||
return numShardBits;
|
||||
}
|
||||
}
|
||||
|
||||
return numShardBits;
|
||||
}
|
||||
|
||||
SLRUCache *taosLRUCacheInit(size_t capacity, int numShardBits, double highPriPoolRatio) {
|
||||
if (numShardBits >= 20) {
|
||||
return NULL;
|
||||
}
|
||||
if (highPriPoolRatio < 0.0 || highPriPoolRatio > 1.0) {
|
||||
return NULL;
|
||||
}
|
||||
SLRUCache *cache = taosMemoryCalloc(1, sizeof(SLRUCache));
|
||||
if (!cache) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (numShardBits < 0) {
|
||||
numShardBits = getDefaultCacheShardBits(capacity);
|
||||
}
|
||||
|
||||
int numShards = 1 << numShardBits;
|
||||
cache->shards = taosMemoryCalloc(numShards, sizeof(SLRUCacheShard));
|
||||
if (!cache->shards) {
|
||||
taosMemoryFree(cache);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool strictCapacity = 1;
|
||||
size_t perShard = (capacity + (numShards - 1)) / numShards;
|
||||
for (int i = 0; i < numShards; ++i) {
|
||||
taosLRUCacheShardInit(&cache->shards[i], perShard, strictCapacity, highPriPoolRatio, 32 - numShardBits);
|
||||
}
|
||||
|
||||
cache->numShards = numShards;
|
||||
|
||||
cache->shardedCache.shardMask = (1 << numShardBits) - 1;
|
||||
cache->shardedCache.strictCapacity = strictCapacity;
|
||||
cache->shardedCache.capacity = capacity;
|
||||
cache->shardedCache.lastId = 1;
|
||||
|
||||
taosThreadMutexInit(&cache->shardedCache.capacityMutex, NULL);
|
||||
|
||||
return cache;
|
||||
}
|
||||
|
||||
void taosLRUCacheCleanup(SLRUCache *cache) {
|
||||
if (cache) {
|
||||
if (cache->shards) {
|
||||
int numShards = cache->numShards;
|
||||
assert(numShards > 0);
|
||||
for (int i = 0; i < numShards; ++i) {
|
||||
taosLRUCacheShardCleanup(&cache->shards[i]);
|
||||
}
|
||||
taosMemoryFree(cache->shards);
|
||||
cache->shards = 0;
|
||||
}
|
||||
|
||||
taosThreadMutexDestroy(&cache->shardedCache.capacityMutex);
|
||||
|
||||
taosMemoryFree(cache);
|
||||
}
|
||||
}
|
||||
|
||||
LRUStatus taosLRUCacheInsert(SLRUCache *cache, const void *key, size_t keyLen, void *value, size_t charge,
|
||||
_taos_lru_deleter_t deleter, LRUHandle **handle, LRUPriority priority) {
|
||||
uint32_t hash = TAOS_LRU_CACHE_SHARD_HASH32(key, keyLen);
|
||||
uint32_t shardIndex = hash & cache->shardedCache.shardMask;
|
||||
|
||||
return taosLRUCacheShardInsert(&cache->shards[shardIndex], key, keyLen, hash, value, charge, deleter, handle, priority);
|
||||
}
|
||||
|
||||
LRUHandle *taosLRUCacheLookup(SLRUCache *cache, const void *key, size_t keyLen) {
|
||||
uint32_t hash = TAOS_LRU_CACHE_SHARD_HASH32(key, keyLen);
|
||||
uint32_t shardIndex = hash & cache->shardedCache.shardMask;
|
||||
|
||||
return taosLRUCacheShardLookup(&cache->shards[shardIndex], key, keyLen, hash);
|
||||
}
|
||||
|
||||
void taosLRUCacheErase(SLRUCache *cache, const void *key, size_t keyLen) {
|
||||
uint32_t hash = TAOS_LRU_CACHE_SHARD_HASH32(key, keyLen);
|
||||
uint32_t shardIndex = hash & cache->shardedCache.shardMask;
|
||||
|
||||
return taosLRUCacheShardErase(&cache->shards[shardIndex], key, keyLen, hash);
|
||||
}
|
||||
|
||||
void taosLRUCacheEraseUnrefEntries(SLRUCache *cache) {
|
||||
int numShards = cache->numShards;
|
||||
for (int i = 0; i < numShards; ++i) {
|
||||
taosLRUCacheShardEraseUnrefEntries(&cache->shards[i]);
|
||||
}
|
||||
}
|
||||
|
||||
bool taosLRUCacheRef(SLRUCache *cache, LRUHandle *handle) {
|
||||
if (handle == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t hash = ((SLRUEntry *) handle)->hash;
|
||||
uint32_t shardIndex = hash & cache->shardedCache.shardMask;
|
||||
|
||||
return taosLRUCacheShardRef(&cache->shards[shardIndex], handle);
|
||||
}
|
||||
|
||||
bool taosLRUCacheRelease(SLRUCache *cache, LRUHandle *handle, bool eraseIfLastRef) {
|
||||
if (handle == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t hash = ((SLRUEntry *) handle)->hash;
|
||||
uint32_t shardIndex = hash & cache->shardedCache.shardMask;
|
||||
|
||||
return taosLRUCacheShardRelease(&cache->shards[shardIndex], handle, eraseIfLastRef);
|
||||
}
|
||||
|
||||
void* taosLRUCacheValue(SLRUCache *cache, LRUHandle *handle) {
|
||||
return ((SLRUEntry*) handle)->value;
|
||||
}
|
||||
|
||||
size_t taosLRUCacheGetUsage(SLRUCache *cache) {
|
||||
size_t usage = 0;
|
||||
|
||||
for (int i = 0; i < cache->numShards; ++i) {
|
||||
usage += taosLRUCacheShardGetUsage(&cache->shards[i]);
|
||||
}
|
||||
|
||||
return usage;
|
||||
}
|
||||
|
||||
size_t taosLRUCacheGetPinnedUsage(SLRUCache *cache) {
|
||||
size_t usage = 0;
|
||||
|
||||
for (int i = 0; i < cache->numShards; ++i) {
|
||||
usage += taosLRUCacheShardGetPinnedUsage(&cache->shards[i]);
|
||||
}
|
||||
|
||||
return usage;
|
||||
}
|
||||
|
||||
void taosLRUCacheSetCapacity(SLRUCache *cache, size_t capacity) {
|
||||
uint32_t numShards = cache->numShards;
|
||||
size_t perShard = (capacity + (numShards = 1)) / numShards;
|
||||
|
||||
taosThreadMutexLock(&cache->shardedCache.capacityMutex);
|
||||
|
||||
for (int i = 0; i < numShards; ++i) {
|
||||
taosLRUCacheShardSetCapacity(&cache->shards[i], perShard);
|
||||
}
|
||||
|
||||
cache->shardedCache.capacity = capacity;
|
||||
|
||||
taosThreadMutexUnlock(&cache->shardedCache.capacityMutex);
|
||||
}
|
||||
|
||||
size_t taosLRUCacheGetCapacity(SLRUCache *cache) {
|
||||
size_t capacity = 0;
|
||||
|
||||
taosThreadMutexLock(&cache->shardedCache.capacityMutex);
|
||||
|
||||
capacity = cache->shardedCache.capacity;
|
||||
|
||||
taosThreadMutexUnlock(&cache->shardedCache.capacityMutex);
|
||||
|
||||
return capacity;
|
||||
}
|
||||
|
||||
void taosLRUCacheSetStrictCapacity(SLRUCache *cache, bool strict) {
|
||||
uint32_t numShards = cache->numShards;
|
||||
|
||||
taosThreadMutexLock(&cache->shardedCache.capacityMutex);
|
||||
|
||||
for (int i = 0; i < numShards; ++i) {
|
||||
taosLRUCacheShardSetStrictCapacity(&cache->shards[i], strict);
|
||||
}
|
||||
|
||||
cache->shardedCache.strictCapacity = strict;
|
||||
|
||||
taosThreadMutexUnlock(&cache->shardedCache.capacityMutex);
|
||||
}
|
||||
|
||||
bool taosLRUCacheIsStrictCapacity(SLRUCache *cache) {
|
||||
bool strict = false;
|
||||
|
||||
taosThreadMutexLock(&cache->shardedCache.capacityMutex);
|
||||
|
||||
strict = cache->shardedCache.strictCapacity;
|
||||
|
||||
taosThreadMutexUnlock(&cache->shardedCache.capacityMutex);
|
||||
|
||||
return strict;
|
||||
}
|
|
@ -287,6 +287,88 @@ class TDDnode:
|
|||
return ""
|
||||
return paths[0]
|
||||
|
||||
def starttaosd(self):
|
||||
binPath = self.getPath()
|
||||
|
||||
if (binPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found: %s" % binPath)
|
||||
|
||||
if self.deployed == 0:
|
||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||
|
||||
if self.valgrind == 0:
|
||||
if platform.system().lower() == 'windows':
|
||||
cmd = "mintty -h never -w hide %s -c %s" % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
cmd = "mintty -h never -w hide %s %s -c %s" % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
else:
|
||||
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
|
||||
print(cmd)
|
||||
|
||||
if (not self.remoteIP == ""):
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.start(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index))
|
||||
self.running = 1
|
||||
else:
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
print("dnode:%d is running with %s " % (self.index, cmd))
|
||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||
if self.valgrind == 0:
|
||||
time.sleep(0.1)
|
||||
key1 = 'from offline to online'
|
||||
bkey1 = bytes(key1, encoding="utf8")
|
||||
key2= 'TDengine initialized successfully'
|
||||
bkey2 = bytes(key2, encoding="utf8")
|
||||
logFile = self.logDir + "/taosdlog.0"
|
||||
i = 0
|
||||
while not os.path.exists(logFile):
|
||||
sleep(0.1)
|
||||
i += 1
|
||||
if i > 10:
|
||||
break
|
||||
tailCmdStr = 'tail -f '
|
||||
if platform.system().lower() == 'windows':
|
||||
tailCmdStr = 'tail -n +0 -f '
|
||||
popen = subprocess.Popen(
|
||||
tailCmdStr + logFile,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
shell=True)
|
||||
pid = popen.pid
|
||||
# print('Popen.pid:' + str(pid))
|
||||
timeout = time.time() + 60 * 2
|
||||
# while True:
|
||||
# line = popen.stdout.readline().strip()
|
||||
# print(line)
|
||||
# if bkey1 in line:
|
||||
# popen.kill()
|
||||
# break
|
||||
# elif bkey2 in line:
|
||||
# popen.kill()
|
||||
# break
|
||||
# if time.time() > timeout:
|
||||
# print(time.time(),timeout)
|
||||
# tdLog.exit('wait too long for taosd start')
|
||||
tdLog.debug("the dnode:%d has been started." % (self.index))
|
||||
else:
|
||||
tdLog.debug(
|
||||
"wait 10 seconds for the dnode:%d to start." %
|
||||
(self.index))
|
||||
time.sleep(10)
|
||||
|
||||
def start(self):
|
||||
binPath = self.getPath()
|
||||
|
||||
|
@ -423,6 +505,35 @@ class TDDnode:
|
|||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
|
||||
|
||||
def stoptaosd(self):
|
||||
if (not self.remoteIP == ""):
|
||||
print("123")
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
|
||||
tdLog.info("stop dnode%d"%self.index)
|
||||
return
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}'" % (toBeKilled,self.index)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
while(processID):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
|
||||
def forcestop(self):
|
||||
if (not self.remoteIP == ""):
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].forcestop()"%(self.index-1,self.index-1))
|
||||
|
@ -535,6 +646,15 @@ class TDDnodes:
|
|||
self.check(index)
|
||||
self.dnodes[index - 1].cfg(option, value)
|
||||
|
||||
def starttaosd(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].starttaosd()
|
||||
|
||||
def stoptaosd(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].stoptaosd()
|
||||
|
||||
|
||||
def start(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].start()
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
# ---- dnode
|
||||
./test.sh -f tsim/dnode/create_dnode.sim
|
||||
./test.sh -f tsim/dnode/drop_dnode_mnode.sim
|
||||
./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim
|
||||
|
||||
# ---- insert
|
||||
./test.sh -f tsim/insert/basic0.sim
|
||||
|
@ -60,6 +60,7 @@
|
|||
./test.sh -f tsim/mnode/basic2.sim
|
||||
./test.sh -f tsim/mnode/basic3.sim
|
||||
./test.sh -f tsim/mnode/basic4.sim
|
||||
./test.sh -f tsim/mnode/basic5.sim
|
||||
|
||||
# ---- show
|
||||
./test.sh -f tsim/show/basic.sim
|
||||
|
|
|
@ -182,16 +182,5 @@ if $rows != 15 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
print =============== drop dnode
|
||||
#sql drop dnode 2;
|
||||
#sql show dnodes;
|
||||
#if $rows != 1 then
|
||||
# return -1
|
||||
#endi
|
||||
|
||||
#if $data00 != 1 then
|
||||
# return -1
|
||||
#endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sql connect
|
||||
|
||||
print =============== step1 create dnode2
|
||||
sql create dnode $hostname port 7200
|
||||
sql create dnode $hostname port 7300
|
||||
|
||||
$x = 0
|
||||
step1:
|
||||
$ = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
print ====> dnode not online!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data(1)[4] != ready then
|
||||
goto step1
|
||||
endi
|
||||
if $data(2)[4] != ready then
|
||||
goto step1
|
||||
endi
|
||||
if $data(3)[4] != offline then
|
||||
goto step1
|
||||
endi
|
||||
|
||||
print =============== step2 drop dnode 3
|
||||
sql_error drop dnode 1
|
||||
sql drop dnode 3
|
||||
|
||||
sql show dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data(1)[4] != ready then
|
||||
goto step1
|
||||
endi
|
||||
if $data(2)[4] != ready then
|
||||
goto step1
|
||||
endi
|
||||
|
||||
print =============== step3: create mnode on dnode 2
|
||||
sql create mnode on dnode 2
|
||||
$x = 0
|
||||
step3:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show mnodes -x step3
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4]
|
||||
if $data(1)[2] != leader then
|
||||
goto step3
|
||||
endi
|
||||
if $data(2)[2] != follower then
|
||||
goto step3
|
||||
endi
|
||||
|
||||
print =============== step4: drop dnode 2
|
||||
sql drop dnode 2
|
||||
|
||||
print show dnodes;
|
||||
sql show dnodes;
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4]
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print show dnodes;
|
||||
sql show mnodes
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4]
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data(1)[2] != leader then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
|
@ -1,52 +0,0 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sql connect
|
||||
|
||||
print =============== step1 create dnode2
|
||||
sql create dnode $hostname port 7200
|
||||
|
||||
$x = 0
|
||||
step1:
|
||||
$ = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data(1)[4] != ready then
|
||||
goto step1
|
||||
endi
|
||||
if $data(2)[4] != ready then
|
||||
goto step1
|
||||
endi
|
||||
|
||||
sql create dnode $hostname port 7300
|
||||
sql drop dnode 3
|
||||
sql_error drop dnode 1
|
||||
|
||||
print =============== step2: create mnode
|
||||
sql create mnode on dnode 2
|
||||
|
||||
print =============== step3: drop dnode 3
|
||||
sql drop dnode 2
|
||||
sql show dnodes;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
return
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
|
@ -117,7 +117,7 @@ if $data(3)[3] != ready then
|
|||
endi
|
||||
|
||||
print =============== step5: drop mnode 3 and stop dnode3
|
||||
system sh/exec.sh -n dnode3 -s stop
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGKILL
|
||||
sql_error drop mnode on dnode 3
|
||||
|
||||
$x = 0
|
||||
|
|
|
@ -0,0 +1,349 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/deploy.sh -n dnode3 -i 3
|
||||
system sh/deploy.sh -n dnode4 -i 4
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
print =============== step1: create dnodes
|
||||
sql create dnode $hostname port 7200
|
||||
sql create dnode $hostname port 7300
|
||||
sql create dnode $hostname port 7400
|
||||
|
||||
$x = 0
|
||||
step1:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 5 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $data(1)[4] != ready then
|
||||
goto step1
|
||||
endi
|
||||
|
||||
print =============== step2: create dnodes - with error
|
||||
sql_error create mnode on dnode 1;
|
||||
sql_error create mnode on dnode 2;
|
||||
sql_error create mnode on dnode 3;
|
||||
sql_error create mnode on dnode 4;
|
||||
sql_error create mnode on dnode 5;
|
||||
sql_error create mnode on dnode 6;
|
||||
|
||||
print =============== step3: create mnode 2 and 3
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
$x = 0
|
||||
step3:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 5 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $data(2)[4] != ready then
|
||||
goto step3
|
||||
endi
|
||||
if $data(3)[4] != ready then
|
||||
goto step3
|
||||
endi
|
||||
if $data(4)[4] != ready then
|
||||
goto step3
|
||||
endi
|
||||
|
||||
sql create mnode on dnode 2
|
||||
sql create mnode on dnode 3
|
||||
|
||||
$x = 0
|
||||
step31:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 50 then
|
||||
return -1
|
||||
endi
|
||||
sql show mnodes
|
||||
if $data(1)[2] != leader then
|
||||
goto step31
|
||||
endi
|
||||
if $data(2)[2] != follower then
|
||||
goto step31
|
||||
endi
|
||||
if $data(3)[2] != follower then
|
||||
goto step31
|
||||
endi
|
||||
|
||||
print =============== step4: create dnodes - with error
|
||||
sql_error create mnode on dnode 1
|
||||
sql_error create mnode on dnode 2;
|
||||
sql_error create mnode on dnode 3;
|
||||
sql_error create mnode on dnode 4;
|
||||
sql_error create mnode on dnode 5;
|
||||
sql_error create mnode on dnode 6;
|
||||
|
||||
print =============== step5: drop mnodes - with error
|
||||
sql_error drop mnode on dnode 1
|
||||
sql_error drop mnode on dnode 4
|
||||
sql_error drop mnode on dnode 5
|
||||
sql_error drop mnode on dnode 6
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGKILL
|
||||
$x = 0
|
||||
step5:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
print ===> $data30 $data31 $data32 $data33 $data34 $data35
|
||||
if $data(1)[4] != ready then
|
||||
goto step5
|
||||
endi
|
||||
if $data(2)[4] != offline then
|
||||
goto step5
|
||||
endi
|
||||
if $data(3)[4] != ready then
|
||||
goto step5
|
||||
endi
|
||||
if $data(4)[4] != ready then
|
||||
goto step5
|
||||
endi
|
||||
|
||||
sql_error drop mnode on dnode 2
|
||||
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
$x = 0
|
||||
step51:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
print ===> $data30 $data31 $data32 $data33 $data34 $data35
|
||||
if $data(1)[4] != ready then
|
||||
goto step51
|
||||
endi
|
||||
if $data(2)[4] != ready then
|
||||
goto step51
|
||||
endi
|
||||
if $data(3)[4] != ready then
|
||||
goto step51
|
||||
endi
|
||||
if $data(4)[4] != ready then
|
||||
goto step51
|
||||
endi
|
||||
|
||||
print =============== step6: stop mnode1
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGKILL
|
||||
sql_error drop mnode on dnode 1
|
||||
|
||||
$x = 0
|
||||
step61:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show mnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
$leaderNum = 0
|
||||
if $data(2)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $data(3)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $leaderNum != 1 then
|
||||
goto step61
|
||||
endi
|
||||
|
||||
print =============== step7: start mnode1 and wait it online
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$x = 0
|
||||
step71:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
print ===> $data30 $data31 $data32 $data33 $data34 $data35
|
||||
if $data(1)[4] != ready then
|
||||
goto step71
|
||||
endi
|
||||
if $data(2)[4] != ready then
|
||||
goto step71
|
||||
endi
|
||||
if $data(3)[4] != ready then
|
||||
goto step71
|
||||
endi
|
||||
if $data(4)[4] != ready then
|
||||
goto step71
|
||||
endi
|
||||
|
||||
print =============== step8: stop mnode1 and drop it
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGKILL
|
||||
sql_error drop mnode on dnode 1
|
||||
|
||||
$x = 0
|
||||
step81:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show mnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
$leaderNum = 0
|
||||
if $data(1)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $data(2)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $data(3)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $leaderNum != 1 then
|
||||
goto step81
|
||||
endi
|
||||
if $data(1)[3] != dropping then
|
||||
goto step81
|
||||
endi
|
||||
|
||||
print =============== step9: start mnode1 and wait it dropped
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$x = 0
|
||||
step91:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
print ===> $data30 $data31 $data32 $data33 $data34 $data35
|
||||
if $data(1)[4] != ready then
|
||||
goto step91
|
||||
endi
|
||||
if $data(2)[4] != ready then
|
||||
goto step91
|
||||
endi
|
||||
if $data(3)[4] != ready then
|
||||
goto step91
|
||||
endi
|
||||
if $data(4)[4] != ready then
|
||||
goto step91
|
||||
endi
|
||||
|
||||
$x = 0
|
||||
step92:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show mnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
$leaderNum = 0
|
||||
if $data(1)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $data(2)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $data(3)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $leaderNum != 1 then
|
||||
goto step92
|
||||
endi
|
||||
if $rows != 2 then
|
||||
goto step92
|
||||
endi
|
||||
|
||||
print =============== stepa: create mnode1 again
|
||||
sql create mnode on dnode 1
|
||||
$x = 0
|
||||
stepa:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show mnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
$leaderNum = 0
|
||||
if $data(2)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $data(3)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $data(3)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $leaderNum == 0 then
|
||||
goto stepa
|
||||
endi
|
||||
if $leaderNum != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$x = 0
|
||||
stepb:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
print ===> $data30 $data31 $data32 $data33 $data34 $data35
|
||||
if $rows != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data(1)[4] != ready then
|
||||
goto stepb
|
||||
endi
|
||||
if $data(2)[4] != ready then
|
||||
goto stepb
|
||||
endi
|
||||
if $data(3)[4] != ready then
|
||||
goto stepb
|
||||
endi
|
||||
if $data(4)[4] != ready then
|
||||
goto stepb
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop
|
||||
system sh/exec.sh -n dnode2 -s stop
|
||||
system sh/exec.sh -n dnode3 -s stop
|
||||
system sh/exec.sh -n dnode4 -s stop
|
|
@ -231,8 +231,10 @@ sql use test3;
|
|||
sql create table t1(ts timestamp, a int, b int , c int, d double);
|
||||
sql create stream streams3 trigger at_once watermark 1d into streamt3 as select _wstartts, min(b), a,c from t1 session(ts,10s);
|
||||
sql create stream streams4 trigger at_once watermark 1d into streamt4 as select _wstartts, max(b), a,c from t1 session(ts,10s);
|
||||
sql create stream streams5 trigger at_once watermark 1d into streamt5 as select _wstartts, max(b), a,c from t1 session(ts,10s);
|
||||
sql create stream streams6 trigger at_once watermark 1d into streamt6 as select _wstartts, max(b), a,c from t1 session(ts,10s);
|
||||
sql create stream streams5 trigger at_once watermark 1d into streamt5 as select _wstartts, top(b,3), a,c from t1 session(ts,10s);
|
||||
sql create stream streams6 trigger at_once watermark 1d into streamt6 as select _wstartts, bottom(b,3), a,c from t1 session(ts,10s);
|
||||
sql create stream streams7 trigger at_once watermark 1d into streamt7 as select _wstartts, spread(a), elapsed(ts), hyperloglog(a) from t1 session(ts,10s);
|
||||
sql create stream streams8 trigger at_once watermark 1d into streamt8 as select _wstartts, histogram(a,"user_input", "[1,3,5,7]", 1), histogram(a,"user_input", "[1,3,5,7]", 0) from t1 session(ts,10s);
|
||||
sql insert into t1 values(1648791213001,1,1,1,1.0);
|
||||
sql insert into t1 values(1648791213002,2,3,2,3.4);
|
||||
sql insert into t1 values(1648791213003,4,9,3,4.8);
|
||||
|
@ -279,4 +281,16 @@ if $rows == 0 then
|
|||
goto loop3
|
||||
endi
|
||||
|
||||
sql select * from streamt7;
|
||||
if $rows == 0 then
|
||||
print ======$rows
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
sql select * from streamt8;
|
||||
if $rows == 0 then
|
||||
print ======$rows
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
|
|
@ -274,27 +274,9 @@ class TDTestCase:
|
|||
|
||||
return
|
||||
|
||||
# test case2 base:insert data
|
||||
def test_case2(self):
|
||||
|
||||
tdLog.debug("-----muti-thread insert data test------- ")
|
||||
# drop database
|
||||
tdSql.execute("drop database if exists db1")
|
||||
tdSql.execute("drop database if exists db4")
|
||||
tdSql.execute("drop database if exists db6")
|
||||
tdSql.execute("drop database if exists db8")
|
||||
tdSql.execute("drop database if exists db12")
|
||||
tdSql.execute("drop database if exists db16")
|
||||
|
||||
#create database and tables;
|
||||
|
||||
tdSql.execute("create database db1 vgroups 1")
|
||||
self.create_tables("db1", "stb1", 1*100)
|
||||
self.insert_data("db1", "stb1", self.ts, 1*50,1*10000)
|
||||
return
|
||||
|
||||
def test_case3(self):
|
||||
self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 1, 1, 1*10)
|
||||
self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 1, 8, 1*10)
|
||||
# self.taosBenchCreate("test209","no","db2", "stb2", 1, 8, 1*10000)
|
||||
|
||||
# self.taosBenchCreate("chenhaoran02","no","db1", "stb1", 1, 8, 1*10000)
|
||||
|
@ -349,22 +331,18 @@ class TDTestCase:
|
|||
# run case
|
||||
def run(self):
|
||||
|
||||
# # create database and tables。
|
||||
# self.test_case1()
|
||||
# tdLog.debug(" LIMIT test_case1 ............ [OK]")
|
||||
|
||||
# # taosBenchmark : create database and table
|
||||
# self.test_case2()
|
||||
# tdLog.debug(" LIMIT test_case2 ............ [OK]")
|
||||
# create database and tables。
|
||||
self.test_case1()
|
||||
tdLog.debug(" LIMIT test_case1 ............ [OK]")
|
||||
|
||||
# taosBenchmark:create database/table and insert data
|
||||
self.test_case3()
|
||||
tdLog.debug(" LIMIT test_case3 ............ [OK]")
|
||||
|
||||
|
||||
# # test qnode
|
||||
# self.test_case4()
|
||||
# tdLog.debug(" LIMIT test_case3 ............ [OK]")
|
||||
# test qnode
|
||||
self.test_case4()
|
||||
tdLog.debug(" LIMIT test_case3 ............ [OK]")
|
||||
|
||||
|
||||
return
|
||||
|
|
|
@ -83,6 +83,8 @@ class TDTestCase:
|
|||
|
||||
tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}")
|
||||
pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
|
||||
if (platform.system().lower() == 'windows' and pre_data.dtype == 'int32'):
|
||||
pre_data = np.array(pre_data, dtype = 'int64')
|
||||
print("data is ", pre_data)
|
||||
pre_csum = np.cumsum(pre_data)
|
||||
tdSql.query(self.csum_query_form(
|
||||
|
@ -124,6 +126,8 @@ class TDTestCase:
|
|||
tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
|
||||
offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0
|
||||
pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
|
||||
if (platform.system().lower() == 'windows' and pre_result.dtype == 'int32'):
|
||||
pre_result = np.array(pre_result, dtype = 'int64')
|
||||
pre_csum = np.cumsum(pre_result)[offset_val:]
|
||||
tdSql.query(self.csum_query_form(
|
||||
col=col, alias=alias, table_expr=table_expr, condition=condition
|
||||
|
|
|
@ -83,6 +83,8 @@ class TDTestCase:
|
|||
|
||||
tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}")
|
||||
pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
|
||||
if (platform.system().lower() == 'windows' and pre_data.dtype == 'int32'):
|
||||
pre_data = np.array(pre_data, dtype = 'int64')
|
||||
pre_diff = np.diff(pre_data)
|
||||
# trans precision for data
|
||||
tdSql.query(self.diff_query_form(
|
||||
|
@ -127,6 +129,8 @@ class TDTestCase:
|
|||
tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
|
||||
offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0
|
||||
pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
|
||||
if (platform.system().lower() == 'windows' and pre_result.dtype == 'int32'):
|
||||
pre_result = np.array(pre_result, dtype = 'int64')
|
||||
pre_diff = np.diff(pre_result)[offset_val:]
|
||||
tdSql.query(self.diff_query_form(
|
||||
col=col, alias=alias, table_expr=table_expr, condition=condition
|
||||
|
|
|
@ -245,6 +245,8 @@ class TDTestCase:
|
|||
|
||||
tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}")
|
||||
pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
|
||||
if (platform.system().lower() == 'windows' and pre_data.dtype == 'int32'):
|
||||
pre_data = np.array(pre_data, dtype = 'int64')
|
||||
pre_mavg = np.convolve(pre_data, np.ones(k), "valid")/k
|
||||
tdSql.query(self.mavg_query_form(
|
||||
sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
|
||||
|
@ -291,6 +293,8 @@ class TDTestCase:
|
|||
# print(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
|
||||
if not tdSql.queryResult:
|
||||
pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
|
||||
if (platform.system().lower() == 'windows' and pre_result.dtype == 'int32'):
|
||||
pre_result = np.array(pre_result, dtype = 'int64')
|
||||
|
||||
pre_mavg = pre_mavg = np.convolve(pre_result, np.ones(k), "valid")[offset_val:]/k
|
||||
tdSql.query(self.mavg_query_form(
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
|
||||
class MyDnodes(TDDnodes):
|
||||
def __init__(self ,dnodes_lists):
|
||||
super(MyDnodes,self).__init__()
|
||||
self.dnodes = dnodes_lists # dnode must be TDDnode instance
|
||||
self.simDeployed = False
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
self.depoly_cluster(5)
|
||||
self.master_dnode = self.TDDnodes.dnodes[0]
|
||||
self.host=self.master_dnode.cfgDict["fqdn"]
|
||||
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
|
||||
tdSql.init(conn1.cursor())
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
|
||||
def depoly_cluster(self ,dnodes_nums):
|
||||
|
||||
testCluster = False
|
||||
valgrind = 0
|
||||
hostname = socket.gethostname()
|
||||
dnodes = []
|
||||
start_port = 6030
|
||||
for num in range(1, dnodes_nums+1):
|
||||
dnode = TDDnode(num)
|
||||
dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}")
|
||||
dnode.addExtraCfg("fqdn", f"{hostname}")
|
||||
dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}")
|
||||
dnode.addExtraCfg("monitorFqdn", hostname)
|
||||
dnode.addExtraCfg("monitorPort", 7043)
|
||||
dnodes.append(dnode)
|
||||
|
||||
self.TDDnodes = MyDnodes(dnodes)
|
||||
self.TDDnodes.init("")
|
||||
self.TDDnodes.setTestCluster(testCluster)
|
||||
self.TDDnodes.setValgrind(valgrind)
|
||||
self.TDDnodes.stopAll()
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.deploy(dnode.index,{})
|
||||
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.starttaosd(dnode.index)
|
||||
|
||||
# create cluster
|
||||
for dnode in self.TDDnodes.dnodes[1:]:
|
||||
# print(dnode.cfgDict)
|
||||
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
|
||||
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
|
||||
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
|
||||
print(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
time.sleep(2)
|
||||
tdLog.info(" create cluster done! ")
|
||||
|
||||
def five_dnode_one_mnode(self):
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
tdSql.checkData(0,4,'ready')
|
||||
tdSql.checkData(4,4,'ready')
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
||||
|
||||
tdSql.error("create mnode on dnode 1;")
|
||||
tdSql.error("drop mnode on dnode 1;")
|
||||
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("create database if not exists db replica 1 days 300")
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
tdSql.query('show databases;')
|
||||
tdSql.checkData(2,5,'no_strict')
|
||||
tdSql.error('alter database db strict 0')
|
||||
tdSql.execute('alter database db strict 1')
|
||||
tdSql.query('show databases;')
|
||||
tdSql.checkData(2,5,'strict')
|
||||
|
||||
def getConnection(self, dnode):
|
||||
host = dnode.cfgDict["fqdn"]
|
||||
port = dnode.cfgDict["serverPort"]
|
||||
config_dir = dnode.cfgDir
|
||||
return taos.connect(host=host, port=int(port), config=config_dir)
|
||||
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.five_dnode_one_mnode()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,237 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
|
||||
class MyDnodes(TDDnodes):
|
||||
def __init__(self ,dnodes_lists):
|
||||
super(MyDnodes,self).__init__()
|
||||
self.dnodes = dnodes_lists # dnode must be TDDnode instance
|
||||
self.simDeployed = False
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
self.depoly_cluster(5)
|
||||
self.master_dnode = self.TDDnodes.dnodes[0]
|
||||
self.host=self.master_dnode.cfgDict["fqdn"]
|
||||
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
|
||||
tdSql.init(conn1.cursor())
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
|
||||
def depoly_cluster(self ,dnodes_nums):
|
||||
|
||||
testCluster = False
|
||||
valgrind = 0
|
||||
hostname = socket.gethostname()
|
||||
dnodes = []
|
||||
start_port = 6030
|
||||
for num in range(1, dnodes_nums+1):
|
||||
dnode = TDDnode(num)
|
||||
dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}")
|
||||
dnode.addExtraCfg("fqdn", f"{hostname}")
|
||||
dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}")
|
||||
dnode.addExtraCfg("monitorFqdn", hostname)
|
||||
dnode.addExtraCfg("monitorPort", 7043)
|
||||
dnodes.append(dnode)
|
||||
|
||||
self.TDDnodes = MyDnodes(dnodes)
|
||||
self.TDDnodes.init("")
|
||||
self.TDDnodes.setTestCluster(testCluster)
|
||||
self.TDDnodes.setValgrind(valgrind)
|
||||
self.TDDnodes.stopAll()
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.deploy(dnode.index,{})
|
||||
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.starttaosd(dnode.index)
|
||||
|
||||
# create cluster
|
||||
for dnode in self.TDDnodes.dnodes[1:]:
|
||||
# print(dnode.cfgDict)
|
||||
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
|
||||
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
|
||||
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
|
||||
print(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
time.sleep(2)
|
||||
tdLog.info(" create cluster done! ")
|
||||
|
||||
def five_dnode_one_mnode(self):
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
tdSql.checkData(0,4,'ready')
|
||||
tdSql.checkData(4,4,'ready')
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
||||
|
||||
tdSql.error("create mnode on dnode 1;")
|
||||
tdSql.error("drop mnode on dnode 1;")
|
||||
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("create database if not exists db replica 1 days 300")
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
def five_dnode_two_mnode(self):
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
tdSql.checkData(0,4,'ready')
|
||||
tdSql.checkData(4,4,'ready')
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
||||
|
||||
# fisr add two mnodes;
|
||||
tdSql.error("create mnode on dnode 1;")
|
||||
tdSql.error("drop mnode on dnode 1;")
|
||||
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(2)
|
||||
if tdSql.queryResult[0][2]=='leader' :
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
print("two mnodes is ready")
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("two mnodes is not ready in 10s ")
|
||||
|
||||
# fisrt check statut ready
|
||||
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
tdSql.checkData(1,2,'follower')
|
||||
tdSql.checkData(1,3,'ready')
|
||||
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
tdSql.execute("drop database if exists db2")
|
||||
tdSql.execute("create database if not exists db2 replica 1 days 300")
|
||||
tdSql.execute("use db2")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
|
||||
# # fisrt drop follower of mnode
|
||||
# BUG
|
||||
# tdSql.execute("drop mnode on dnode 2")
|
||||
# tdSql.query("show mnodes;")
|
||||
# tdSql.checkRows(1)
|
||||
# tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
# tdSql.checkData(0,2,'leader')
|
||||
# tdSql.checkData(0,3,'ready')
|
||||
|
||||
# tdSql.execute("create mnode on dnode 2")
|
||||
# count=0
|
||||
# while count < 10:
|
||||
# time.sleep(1)
|
||||
# tdSql.query("show mnodes;")
|
||||
# tdSql.checkRows(2)
|
||||
# if tdSql.queryResult[0][2]=='leader' :
|
||||
# if tdSql.queryResult[1][2]=='follower':
|
||||
# print("two mnodes is ready")
|
||||
# break
|
||||
# count+=1
|
||||
# else:
|
||||
# print("two mnodes is not ready in 10s ")
|
||||
|
||||
# tdSql.query("show mnodes;")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
# tdSql.checkData(0,2,'leader')
|
||||
# tdSql.checkData(0,3,'ready')
|
||||
# tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
# tdSql.checkData(1,2,'follower')
|
||||
# tdSql.checkData(2,3,'ready')
|
||||
|
||||
|
||||
def getConnection(self, dnode):
|
||||
host = dnode.cfgDict["fqdn"]
|
||||
port = dnode.cfgDict["serverPort"]
|
||||
config_dir = dnode.cfgDir
|
||||
return taos.connect(host=host, port=int(port), config=config_dir)
|
||||
|
||||
|
||||
def run(self):
|
||||
print(self.master_dnode.cfgDict)
|
||||
self.five_dnode_one_mnode()
|
||||
self.five_dnode_two_mnode()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,286 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
from paramiko import HostKeys
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
class MyDnodes(TDDnodes):
|
||||
def __init__(self ,dnodes_lists):
|
||||
super(MyDnodes,self).__init__()
|
||||
self.dnodes = dnodes_lists # dnode must be TDDnode instance
|
||||
self.simDeployed = False
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
|
||||
def buildcluster(self,dnodenumber):
|
||||
self.depoly_cluster(dnodenumber)
|
||||
self.master_dnode = self.TDDnodes.dnodes[0]
|
||||
self.host=self.master_dnode.cfgDict["fqdn"]
|
||||
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
|
||||
tdSql.init(conn1.cursor())
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def insert_data(self,count):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
for couti in count:
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 days 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
def depoly_cluster(self ,dnodes_nums):
|
||||
|
||||
testCluster = False
|
||||
valgrind = 0
|
||||
hostname = socket.gethostname()
|
||||
print(hostname)
|
||||
dnodes = []
|
||||
start_port = 6030
|
||||
for num in range(1, dnodes_nums+1):
|
||||
dnode = TDDnode(num)
|
||||
dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}")
|
||||
dnode.addExtraCfg("fqdn", f"{hostname}")
|
||||
dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}")
|
||||
dnode.addExtraCfg("monitorFqdn", hostname)
|
||||
dnode.addExtraCfg("monitorPort", 7043)
|
||||
dnodes.append(dnode)
|
||||
|
||||
self.TDDnodes = MyDnodes(dnodes)
|
||||
self.TDDnodes.init("")
|
||||
self.TDDnodes.setTestCluster(testCluster)
|
||||
self.TDDnodes.setValgrind(valgrind)
|
||||
self.TDDnodes.stopAll()
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.deploy(dnode.index,{})
|
||||
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.starttaosd(dnode.index)
|
||||
|
||||
# create cluster
|
||||
for dnode in self.TDDnodes.dnodes[1:]:
|
||||
# print(dnode.cfgDict)
|
||||
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
|
||||
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
|
||||
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
|
||||
print(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
time.sleep(2)
|
||||
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
|
||||
|
||||
def check3mnode(self):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
if tdSql.queryResult[0][2]=='leader' :
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
print("three mnodes is ready in 10s")
|
||||
break
|
||||
elif tdSql.queryResult[0][2]=='follower' :
|
||||
if tdSql.queryResult[1][2]=='leader':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
print("three mnodes is ready in 10s")
|
||||
break
|
||||
elif tdSql.queryResult[0][2]=='follower' :
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
if tdSql.queryResult[2][2]=='leader':
|
||||
print("three mnodes is ready in 10s")
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("three mnodes is not ready in 10s ")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'%s:6230'%self.host)
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
def check3mnode1off(self):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
if tdSql.queryResult[0][2]=='offline' :
|
||||
if tdSql.queryResult[1][2]=='leader':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
print("stop mnodes on dnode 2 successfully in 10s")
|
||||
break
|
||||
elif tdSql.queryResult[1][2]=='follower':
|
||||
if tdSql.queryResult[2][2]=='leader':
|
||||
print("stop mnodes on dnode 2 successfully in 10s")
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("stop mnodes on dnode 2 failed in 10s ")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'offline')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'%s:6230'%self.host)
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
def check3mnode2drop(self):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
if tdSql.queryResult[0][2]=='leader' :
|
||||
if tdSql.queryResult[1][2]=='offline':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
print("stop mnodes on dnode 2 successfully in 10s")
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("stop mnodes on dnode 2 failed in 10s ")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
tdSql.checkData(1,2,'offline')
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'%s:6230')
|
||||
tdSql.checkData(2,2,'follower')
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
def check3mnode3off(self):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
if tdSql.queryResult[0][2]=='leader' :
|
||||
if tdSql.queryResult[2][2]=='offline':
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
print("stop mnodes on dnode 3 successfully in 10s")
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("stop mnodes on dnode 3 failed in 10s")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
tdSql.checkData(1,2,'follower')
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'%s:6230'%self.host)
|
||||
tdSql.checkData(2,2,'offline')
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
|
||||
|
||||
def five_dnode_three_mnode(self,dnodenumber):
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
tdSql.checkData(0,4,'ready')
|
||||
tdSql.checkData(4,4,'ready')
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
|
||||
# fisrt check statut ready
|
||||
self.check3mnode()
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.error("create mnode on dnode 1")
|
||||
tdSql.error("create mnode on dnode 3")
|
||||
|
||||
tdSql.error("drop mnode on dnode 1")
|
||||
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
# drop and follower of mnode
|
||||
dropcount =0
|
||||
while dropcount <= 10:
|
||||
for i in range(1,3):
|
||||
tdSql.execute("drop mnode on dnode %d"%(i+1))
|
||||
tdSql.execute("create mnode on dnode %d"%(i+1))
|
||||
dropcount+=1
|
||||
self.check3mnode()
|
||||
|
||||
|
||||
def getConnection(self, dnode):
|
||||
host = dnode.cfgDict["fqdn"]
|
||||
port = dnode.cfgDict["serverPort"]
|
||||
config_dir = dnode.cfgDir
|
||||
return taos.connect(host=host, port=int(port), config=config_dir)
|
||||
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.buildcluster(5)
|
||||
self.five_dnode_three_mnode(5)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,305 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
class MyDnodes(TDDnodes):
|
||||
def __init__(self ,dnodes_lists):
|
||||
super(MyDnodes,self).__init__()
|
||||
self.dnodes = dnodes_lists # dnode must be TDDnode instance
|
||||
self.simDeployed = False
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
|
||||
def buildcluster(self,dnodenumber):
|
||||
self.depoly_cluster(dnodenumber)
|
||||
self.master_dnode = self.TDDnodes.dnodes[0]
|
||||
self.host=self.master_dnode.cfgDict["fqdn"]
|
||||
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
|
||||
tdSql.init(conn1.cursor())
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def insert_data(self,count):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
for couti in count:
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 days 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
def depoly_cluster(self ,dnodes_nums):
|
||||
|
||||
testCluster = False
|
||||
valgrind = 0
|
||||
hostname = socket.gethostname()
|
||||
dnodes = []
|
||||
start_port = 6030
|
||||
for num in range(1, dnodes_nums+1):
|
||||
dnode = TDDnode(num)
|
||||
dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}")
|
||||
dnode.addExtraCfg("fqdn", f"{hostname}")
|
||||
dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}")
|
||||
dnode.addExtraCfg("monitorFqdn", hostname)
|
||||
dnode.addExtraCfg("monitorPort", 7043)
|
||||
dnodes.append(dnode)
|
||||
|
||||
self.TDDnodes = MyDnodes(dnodes)
|
||||
self.TDDnodes.init("")
|
||||
self.TDDnodes.setTestCluster(testCluster)
|
||||
self.TDDnodes.setValgrind(valgrind)
|
||||
self.TDDnodes.stopAll()
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.deploy(dnode.index,{})
|
||||
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.starttaosd(dnode.index)
|
||||
|
||||
# create cluster
|
||||
for dnode in self.TDDnodes.dnodes[1:]:
|
||||
# print(dnode.cfgDict)
|
||||
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
|
||||
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
|
||||
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
|
||||
print(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
time.sleep(2)
|
||||
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
|
||||
|
||||
def check3mnode(self):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
if tdSql.queryResult[0][2]=='leader' :
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
print("three mnodes is ready in 10s")
|
||||
break
|
||||
elif tdSql.queryResult[0][2]=='follower' :
|
||||
if tdSql.queryResult[1][2]=='leader':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
print("three mnodes is ready in 10s")
|
||||
break
|
||||
elif tdSql.queryResult[0][2]=='follower' :
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
if tdSql.queryResult[2][2]=='leader':
|
||||
print("three mnodes is ready in 10s")
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("three mnodes is not ready in 10s ")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'%s:6230'%self.host)
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
def check3mnode1off(self):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
if tdSql.queryResult[0][2]=='offline' :
|
||||
if tdSql.queryResult[1][2]=='leader':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
print("stop mnodes on dnode 2 successfully in 10s")
|
||||
break
|
||||
elif tdSql.queryResult[1][2]=='follower':
|
||||
if tdSql.queryResult[2][2]=='leader':
|
||||
print("stop mnodes on dnode 2 successfully in 10s")
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("stop mnodes on dnode 2 failed in 10s ")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'offline')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'%s:6230'%self.host)
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
def check3mnode2off(self):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
if tdSql.queryResult[0][2]=='leader' :
|
||||
if tdSql.queryResult[1][2]=='offline':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
print("stop mnodes on dnode 2 successfully in 10s")
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("stop mnodes on dnode 2 failed in 10s ")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
tdSql.checkData(1,2,'offline')
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'%s:6230'%self.host)
|
||||
tdSql.checkData(2,2,'follower')
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
def check3mnode3off(self):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
if tdSql.queryResult[0][2]=='leader' :
|
||||
if tdSql.queryResult[2][2]=='offline':
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
print("stop mnodes on dnode 3 successfully in 10s")
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("stop mnodes on dnode 3 failed in 10s")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
tdSql.checkData(1,2,'follower')
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'%s:6230'%self.host)
|
||||
tdSql.checkData(2,2,'offline')
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
|
||||
|
||||
def five_dnode_three_mnode(self,dnodenumber):
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
tdSql.checkData(0,4,'ready')
|
||||
tdSql.checkData(4,4,'ready')
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
|
||||
# fisrt check statut ready
|
||||
self.check3mnode()
|
||||
|
||||
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
|
||||
tdLog.debug("stop and follower of mnode")
|
||||
# self.TDDnodes.stoptaosd(2)
|
||||
# self.check3mnode2off()
|
||||
# self.TDDnodes.starttaosd(2)
|
||||
|
||||
# self.TDDnodes.stoptaosd(3)
|
||||
# self.check3mnode3off()
|
||||
# self.TDDnodes.starttaosd(2)
|
||||
|
||||
# self.TDDnodes.stoptaosd(1)
|
||||
# self.check3mnode1off()
|
||||
# self.TDDnodes.starttaosd(1)
|
||||
|
||||
# self.check3mnode()
|
||||
stopcount =0
|
||||
while stopcount <= 2:
|
||||
for i in range(dnodenumber):
|
||||
self.TDDnodes.stoptaosd(i+1)
|
||||
# if i == 1 :
|
||||
# self.check3mnode2off()
|
||||
# elif i == 2 :
|
||||
# self.check3mnode3off()
|
||||
# elif i == 0:
|
||||
# self.check3mnode1off()
|
||||
|
||||
self.TDDnodes.starttaosd(i+1)
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
self.check3mnode()
|
||||
|
||||
|
||||
def getConnection(self, dnode):
|
||||
host = dnode.cfgDict["fqdn"]
|
||||
port = dnode.cfgDict["serverPort"]
|
||||
config_dir = dnode.cfgDir
|
||||
return taos.connect(host=host, port=int(port), config=config_dir)
|
||||
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.buildcluster(5)
|
||||
self.five_dnode_three_mnode(5)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,292 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
class MyDnodes(TDDnodes):
|
||||
def __init__(self ,dnodes_lists):
|
||||
super(MyDnodes,self).__init__()
|
||||
self.dnodes = dnodes_lists # dnode must be TDDnode instance
|
||||
self.simDeployed = False
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
|
||||
def buildcluster(self,dnodenumber):
|
||||
self.depoly_cluster(dnodenumber)
|
||||
self.master_dnode = self.TDDnodes.dnodes[0]
|
||||
self.host=self.master_dnode.cfgDict["fqdn"]
|
||||
|
||||
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
|
||||
tdSql.init(conn1.cursor())
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def insert_data(self,count):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
for couti in count:
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 days 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
def depoly_cluster(self ,dnodes_nums):
|
||||
|
||||
testCluster = False
|
||||
valgrind = 0
|
||||
hostname = socket.gethostname()
|
||||
dnodes = []
|
||||
start_port = 6030
|
||||
for num in range(1, dnodes_nums+1):
|
||||
dnode = TDDnode(num)
|
||||
dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}")
|
||||
dnode.addExtraCfg("fqdn", f"{hostname}")
|
||||
dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}")
|
||||
dnode.addExtraCfg("monitorFqdn", hostname)
|
||||
dnode.addExtraCfg("monitorPort", 7043)
|
||||
dnodes.append(dnode)
|
||||
|
||||
self.TDDnodes = MyDnodes(dnodes)
|
||||
self.TDDnodes.init("")
|
||||
self.TDDnodes.setTestCluster(testCluster)
|
||||
self.TDDnodes.setValgrind(valgrind)
|
||||
self.TDDnodes.stopAll()
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.deploy(dnode.index,{})
|
||||
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.starttaosd(dnode.index)
|
||||
|
||||
# create cluster
|
||||
for dnode in self.TDDnodes.dnodes[1:]:
|
||||
# print(dnode.cfgDict)
|
||||
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
|
||||
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
|
||||
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
|
||||
print(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
time.sleep(2)
|
||||
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
|
||||
|
||||
def check3mnode(self):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
if tdSql.queryResult[0][2]=='leader' :
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
print("three mnodes is ready in 10s")
|
||||
break
|
||||
elif tdSql.queryResult[0][2]=='follower' :
|
||||
if tdSql.queryResult[1][2]=='leader':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
print("three mnodes is ready in 10s")
|
||||
break
|
||||
elif tdSql.queryResult[0][2]=='follower' :
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
if tdSql.queryResult[2][2]=='leader':
|
||||
print("three mnodes is ready in 10s")
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("three mnodes is not ready in 10s ")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'chenhaoran02:6030')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'chenhaoran02:6130')
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'chenhaoran02:6230')
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
def check3mnode1off(self):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
if tdSql.queryResult[0][2]=='offline' :
|
||||
if tdSql.queryResult[1][2]=='leader':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
print("stop mnodes on dnode 2 successfully in 10s")
|
||||
break
|
||||
elif tdSql.queryResult[1][2]=='follower':
|
||||
if tdSql.queryResult[2][2]=='leader':
|
||||
print("stop mnodes on dnode 2 successfully in 10s")
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("stop mnodes on dnode 2 failed in 10s ")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'chenhaoran02:6030')
|
||||
tdSql.checkData(0,2,'offline')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'chenhaoran02:6130')
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'chenhaoran02:6230')
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
def check3mnode2off(self):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
if tdSql.queryResult[0][2]=='leader' :
|
||||
if tdSql.queryResult[1][2]=='offline':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
print("stop mnodes on dnode 2 successfully in 10s")
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("stop mnodes on dnode 2 failed in 10s ")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'chenhaoran02:6030')
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'chenhaoran02:6130')
|
||||
tdSql.checkData(1,2,'offline')
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'chenhaoran02:6230')
|
||||
tdSql.checkData(2,2,'follower')
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
def check3mnode3off(self):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
if tdSql.queryResult[0][2]=='leader' :
|
||||
if tdSql.queryResult[2][2]=='offline':
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
print("stop mnodes on dnode 3 successfully in 10s")
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("stop mnodes on dnode 3 failed in 10s")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'chenhaoran02:6030')
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'chenhaoran02:6130')
|
||||
tdSql.checkData(1,2,'follower')
|
||||
tdSql.checkData(1,3,'ready')
|
||||
tdSql.checkData(2,1,'chenhaoran02:6230')
|
||||
tdSql.checkData(2,2,'offline')
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
|
||||
|
||||
def five_dnode_three_mnode(self,dnodenumber):
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'chenhaoran02:6030')
|
||||
tdSql.checkData(4,1,'chenhaoran02:6430')
|
||||
tdSql.checkData(0,4,'ready')
|
||||
tdSql.checkData(4,4,'ready')
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,1,'chenhaoran02:6030')
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
|
||||
# fisrt check statut ready
|
||||
self.check3mnode()
|
||||
|
||||
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
# stop and follower of mnode
|
||||
stopcount =0
|
||||
while stopcount <= 2:
|
||||
for i in range(dnodenumber):
|
||||
self.TDDnodes.stoptaosd(i+1)
|
||||
# if i == 1 :
|
||||
# self.check3mnode2off()
|
||||
# elif i == 2 :
|
||||
# self.check3mnode3off()
|
||||
# elif i == 0:
|
||||
# self.check3mnode1off()
|
||||
|
||||
self.TDDnodes.starttaosd(i+1)
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
self.check3mnode()
|
||||
|
||||
|
||||
def getConnection(self, dnode):
|
||||
host = dnode.cfgDict["fqdn"]
|
||||
port = dnode.cfgDict["serverPort"]
|
||||
config_dir = dnode.cfgDir
|
||||
return taos.connect(host=host, port=int(port), config=config_dir)
|
||||
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.buildcluster(5)
|
||||
self.five_dnode_three_mnode(5)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -300,7 +300,7 @@ class TDTestCase:
|
|||
print ("====total rows of stb: %d"%countOfStb)
|
||||
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
if totalConsumeRows < expectrowcnt:
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdLog.info("again start consume processer")
|
||||
|
|
|
@ -6,7 +6,7 @@ python3 .\test.py -f 0-others\telemetry.py
|
|||
python3 .\test.py -f 0-others\taosdMonitor.py
|
||||
python3 .\test.py -f 0-others\udfTest.py
|
||||
python3 .\test.py -f 0-others\udf_create.py
|
||||
@REM python3 .\test.py -f 0-others\udf_restart_taosd.py
|
||||
python3 .\test.py -f 0-others\udf_restart_taosd.py
|
||||
python3 .\test.py -f 0-others\cachelast.py
|
||||
|
||||
python3 .\test.py -f 0-others\user_control.py
|
||||
|
@ -21,7 +21,7 @@ python3 .\test.py -f 1-insert\alter_table.py
|
|||
python3 .\test.py -f 2-query\between.py
|
||||
python3 .\test.py -f 2-query\distinct.py
|
||||
python3 .\test.py -f 2-query\varchar.py
|
||||
@REM python3 .\test.py -f 2-query\ltrim.py
|
||||
python3 .\test.py -f 2-query\ltrim.py
|
||||
python3 .\test.py -f 2-query\rtrim.py
|
||||
python3 .\test.py -f 2-query\length.py
|
||||
python3 .\test.py -f 2-query\char_length.py
|
||||
|
@ -32,12 +32,12 @@ python3 .\test.py -f 2-query\join2.py
|
|||
python3 .\test.py -f 2-query\cast.py
|
||||
python3 .\test.py -f 2-query\union.py
|
||||
python3 .\test.py -f 2-query\union1.py
|
||||
@REM python3 .\test.py -f 2-query\concat.py
|
||||
python3 .\test.py -f 2-query\concat.py
|
||||
python3 .\test.py -f 2-query\concat2.py
|
||||
python3 .\test.py -f 2-query\concat_ws.py
|
||||
python3 .\test.py -f 2-query\concat_ws2.py
|
||||
@REM python3 .\test.py -f 2-query\check_tsdb.py
|
||||
@REM python3 .\test.py -f 2-query\spread.py
|
||||
python3 .\test.py -f 2-query\check_tsdb.py
|
||||
python3 .\test.py -f 2-query\spread.py
|
||||
@REM python3 .\test.py -f 2-query\hyperloglog.py
|
||||
|
||||
python3 .\test.py -f 2-query\timezone.py
|
||||
|
@ -71,7 +71,7 @@ python3 .\test.py -f 2-query\tan.py
|
|||
python3 .\test.py -f 2-query\arcsin.py
|
||||
python3 .\test.py -f 2-query\arccos.py
|
||||
python3 .\test.py -f 2-query\arctan.py
|
||||
@REM python3 .\test.py -f 2-query\query_cols_tags_and_or.py
|
||||
python3 .\test.py -f 2-query\query_cols_tags_and_or.py
|
||||
@REM # python3 .\test.py -f 2-query\nestedQuery.py
|
||||
@REM # TD-15983 subquery output duplicate name column.
|
||||
@REM # Please Xiangyang Guo modify the following script
|
||||
|
@ -79,11 +79,11 @@ python3 .\test.py -f 2-query\arctan.py
|
|||
|
||||
python3 .\test.py -f 2-query\avg.py
|
||||
python3 .\test.py -f 2-query\elapsed.py
|
||||
@REM python3 .\test.py -f 2-query\csum.py
|
||||
python3 .\test.py -f 2-query\csum.py
|
||||
python3 .\test.py -f 2-query\mavg.py
|
||||
python3 .\test.py -f 2-query\diff.py
|
||||
python3 .\test.py -f 2-query\sample.py
|
||||
@REM python3 .\test.py -f 2-query\function_diff.py
|
||||
python3 .\test.py -f 2-query\function_diff.py
|
||||
python3 .\test.py -f 2-query\unique.py
|
||||
python3 .\test.py -f 2-query\stateduration.py
|
||||
python3 .\test.py -f 2-query\function_stateduration.py
|
||||
|
|
|
@ -18,9 +18,10 @@ python3 ./test.py -f 0-others/fsync.py
|
|||
python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py
|
||||
python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py
|
||||
python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py
|
||||
#python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py
|
||||
# python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py
|
||||
python3 ./test.py -f 1-insert/alter_stable.py
|
||||
python3 ./test.py -f 1-insert/alter_table.py
|
||||
python3 ./test.py -f 1-insert/insertWithMoreVgroup.py
|
||||
# python3 ./test.py -f 1-inerst/create_table_comment.py
|
||||
python3 ./test.py -f 2-query/between.py
|
||||
python3 ./test.py -f 2-query/distinct.py
|
||||
|
@ -95,10 +96,14 @@ python3 ./test.py -f 2-query/stateduration.py
|
|||
python3 ./test.py -f 2-query/function_stateduration.py
|
||||
python3 ./test.py -f 2-query/statecount.py
|
||||
|
||||
python3 ./test.py -f 6-cluster/5dnode1mnode.py
|
||||
python3 ./test.py -f 6-cluster/5dnode2mnode.py
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py
|
||||
|
||||
python3 ./test.py -f 7-tmq/basic5.py
|
||||
python3 ./test.py -f 7-tmq/subscribeDb.py
|
||||
python3 ./test.py -f 7-tmq/subscribeDb0.py
|
||||
python3 ./test.py -f 7-tmq/subscribeDb1.py
|
||||
# python3 ./test.py -f 7-tmq/subscribeDb1.py
|
||||
python3 ./test.py -f 7-tmq/subscribeStb.py
|
||||
python3 ./test.py -f 7-tmq/subscribeStb0.py
|
||||
python3 ./test.py -f 7-tmq/subscribeStb1.py
|
||||
|
|
|
@ -22,10 +22,11 @@ echo Windows Taosd Test
|
|||
for /F "usebackq tokens=*" %%i in (simpletest.bat) do (
|
||||
for /f "tokens=1* delims= " %%a in ("%%i") do if not "%%a" == "@REM" (
|
||||
set /a a+=1
|
||||
set timeNow=!time!
|
||||
echo !a! Processing %%i
|
||||
call :GetTimeSeconds !time!
|
||||
call :GetTimeSeconds !timeNow!
|
||||
set time1=!_timeTemp!
|
||||
echo Start at !time!
|
||||
echo Start at !timeNow!
|
||||
call %%i ARG1 > result_!a!.txt 2>error_!a!.txt
|
||||
if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && echo result: && cat result_!a!.txt && echo error: && cat error_!a!.txt && exit 8 ) else ( call :colorEcho 0a "Success" &echo. )
|
||||
)
|
||||
|
@ -45,10 +46,11 @@ for /F "usebackq tokens=*" %%i in (simpletest.bat) do (
|
|||
exit
|
||||
|
||||
:colorEcho
|
||||
call :GetTimeSeconds %time%
|
||||
set timeNow=%time%
|
||||
call :GetTimeSeconds %timeNow%
|
||||
set time2=%_timeTemp%
|
||||
set /a interTime=%time2% - %time1%
|
||||
echo End at %time% , cast %interTime%s
|
||||
echo End at %timeNow% , cast %interTime%s
|
||||
echo off
|
||||
<nul set /p ".=%DEL%" > "%~2"
|
||||
findstr /v /a:%1 /R "^$" "%~2" nul
|
||||
|
|
|
@ -313,8 +313,10 @@ static int32_t msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex)
|
|||
|
||||
taos_print_row(buf, row, fields, numOfFields);
|
||||
|
||||
const char* tbName = tmq_get_table_name(msg);
|
||||
|
||||
if (0 != g_stConfInfo.showRowFlag) {
|
||||
taosFprintfFile(g_fp, "rows[%d]: %s\n", totalRows, buf);
|
||||
taosFprintfFile(g_fp, "tbname:%s, rows[%d]: %s\n", (tbName != NULL ? tbName:"null table"), totalRows, buf);
|
||||
if (0 != g_stConfInfo.saveRowFlag) {
|
||||
saveConsumeContentToTbl(pInfo, buf);
|
||||
}
|
||||
|
@ -361,6 +363,8 @@ void build_consumer(SThreadInfo* pInfo) {
|
|||
tmq_conf_set(conf, pInfo->key[i], pInfo->value[i]);
|
||||
}
|
||||
|
||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||
|
||||
// tmq_conf_set(conf, "client.id", "c-001");
|
||||
|
||||
// tmq_conf_set(conf, "enable.auto.commit", "true");
|
||||
|
@ -533,6 +537,12 @@ void* consumeThreadFunc(void* param) {
|
|||
// save consume result into consumeresult table
|
||||
saveConsumeResult(pInfo);
|
||||
|
||||
// save rows from per vgroup
|
||||
taosFprintfFile(g_fp, "======== consumerId: %d, consume rows from per vgroups ========\n", pInfo->consumerId);
|
||||
for (int32_t i = 0; i < pInfo->numOfVgroups; i++) {
|
||||
taosFprintfFile(g_fp, "vgroups: %04d, rows: %d\n", pInfo->rowsOfPerVgroups[i][0], pInfo->rowsOfPerVgroups[i][1]);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -886,7 +886,9 @@ void shellGetGrantInfo() {
|
|||
memcpy(expiretime, row[1], fields[1].bytes);
|
||||
memcpy(expired, row[2], fields[2].bytes);
|
||||
|
||||
if (strcmp(expiretime, "unlimited") == 0) {
|
||||
if (strcmp(serverVersion, "community") == 0) {
|
||||
fprintf(stdout, "Server is Community Edition.\n");
|
||||
} else if (strcmp(expiretime, "unlimited") == 0) {
|
||||
fprintf(stdout, "Server is Enterprise %s Edition, %s and will never expire.\n", serverVersion, sinfo);
|
||||
} else {
|
||||
fprintf(stdout, "Server is Enterprise %s Edition, %s and will expire at %s.\n", serverVersion, sinfo, expiretime);
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 717f5aaa5f0a1b4d92bb2ae68858fec554fb5eda
|
||||
Subproject commit 3d5aa76f8c718dcffa100b45e4cbf313d499c356
|
Loading…
Reference in New Issue