Merge branch 'develop' into feature/query
This commit is contained in:
commit
370fadb07a
|
@ -1,6 +1,7 @@
|
|||
from .cinterface import CTaosInterface
|
||||
from .error import *
|
||||
from .constants import FieldType
|
||||
import threading
|
||||
|
||||
# querySeqNum = 0
|
||||
|
||||
|
@ -37,6 +38,7 @@ class TDengineCursor(object):
|
|||
self._block_iter = 0
|
||||
self._affected_rows = 0
|
||||
self._logfile = ""
|
||||
self._threadId = threading.get_ident()
|
||||
|
||||
if connection is not None:
|
||||
self._connection = connection
|
||||
|
@ -103,6 +105,12 @@ class TDengineCursor(object):
|
|||
def execute(self, operation, params=None):
|
||||
"""Prepare and execute a database operation (query or command).
|
||||
"""
|
||||
# if threading.get_ident() != self._threadId:
|
||||
# info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
|
||||
# raise OperationalError(info)
|
||||
# print(info)
|
||||
# return None
|
||||
|
||||
if not operation:
|
||||
return None
|
||||
|
||||
|
@ -188,6 +196,11 @@ class TDengineCursor(object):
|
|||
def fetchall(self):
|
||||
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
|
||||
"""
|
||||
# if threading.get_ident() != self._threadId:
|
||||
# info ="[WARNING] Cursor fetchall:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
|
||||
# raise OperationalError(info)
|
||||
# print(info)
|
||||
# return None
|
||||
if self._result is None or self._fields is None:
|
||||
raise OperationalError("Invalid use of fetchall")
|
||||
|
||||
|
@ -232,6 +245,12 @@ class TDengineCursor(object):
|
|||
def _handle_result(self):
|
||||
"""Handle the return result from query.
|
||||
"""
|
||||
# if threading.get_ident() != self._threadId:
|
||||
# info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
|
||||
# raise OperationalError(info)
|
||||
# print(info)
|
||||
# return None
|
||||
|
||||
self._description = []
|
||||
for ele in self._fields:
|
||||
self._description.append(
|
||||
|
|
|
@ -131,8 +131,8 @@ static void dnodeFreeMnodeWriteMsg(SMnodeMsg *pWrite) {
|
|||
taosFreeQitem(pWrite);
|
||||
}
|
||||
|
||||
void dnodeSendRpcMnodeWriteRsp(void *pRaw, int32_t code) {
|
||||
SMnodeMsg *pWrite = pRaw;
|
||||
void dnodeSendRpcMnodeWriteRsp(void *pMsg, int32_t code) {
|
||||
SMnodeMsg *pWrite = pMsg;
|
||||
if (pWrite == NULL) return;
|
||||
if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return;
|
||||
if (code == TSDB_CODE_MND_ACTION_NEED_REPROCESSED) {
|
||||
|
|
|
@ -206,9 +206,10 @@ static void shellSourceFile(TAOS *con, char *fptr) {
|
|||
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "DB error: %s: %s (%d)\n", taos_errstr(con), fname, lineNo);
|
||||
/* free local resouce: allocated memory/metric-meta refcnt */
|
||||
taos_free_result(pSql);
|
||||
}
|
||||
|
||||
/* free local resouce: allocated memory/metric-meta refcnt */
|
||||
taos_free_result(pSql);
|
||||
|
||||
memset(cmd, 0, MAX_COMMAND_SIZE);
|
||||
cmd_len = 0;
|
||||
|
|
|
@ -520,9 +520,8 @@ int main(int argc, char *argv[]) {
|
|||
snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols);
|
||||
queryDB(taos, command);
|
||||
printf("meters created!\n");
|
||||
|
||||
taos_close(taos);
|
||||
}
|
||||
taos_close(taos);
|
||||
|
||||
/* Wait for table to create */
|
||||
multiThreadCreateTable(cols, use_metric, threads, ntables, db_name, tb_prefix, ip_addr, port, user, pass);
|
||||
|
@ -792,9 +791,6 @@ void * createTable(void *sarg)
|
|||
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", winfo->db_name, winfo->tb_prefix, i, winfo->cols);
|
||||
queryDB(winfo->taos, command);
|
||||
}
|
||||
|
||||
taos_close(winfo->taos);
|
||||
|
||||
} else {
|
||||
/* Create all the tables; */
|
||||
printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
|
||||
|
@ -812,7 +808,6 @@ void * createTable(void *sarg)
|
|||
}
|
||||
queryDB(winfo->taos, command);
|
||||
}
|
||||
taos_close(winfo->taos);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
|
|
@ -53,6 +53,7 @@ typedef struct {
|
|||
void * rowData;
|
||||
int32_t rowSize;
|
||||
int32_t retCode; // for callback in sdb queue
|
||||
int32_t processedCount; // for sync fwd callback
|
||||
int32_t (*cb)(struct SMnodeMsg *pMsg, int32_t code);
|
||||
struct SMnodeMsg *pMsg;
|
||||
} SSdbOper;
|
||||
|
|
|
@ -88,13 +88,13 @@ static int32_t mnodeDnodeActionDelete(SSdbOper *pOper) {
|
|||
}
|
||||
|
||||
static int32_t mnodeDnodeActionUpdate(SSdbOper *pOper) {
|
||||
SDnodeObj *pDnode = pOper->pObj;
|
||||
SDnodeObj *pSaved = mnodeGetDnode(pDnode->dnodeId);
|
||||
if (pSaved != NULL && pDnode != pSaved) {
|
||||
memcpy(pSaved, pDnode, pOper->rowSize);
|
||||
free(pDnode);
|
||||
mnodeDecDnodeRef(pSaved);
|
||||
SDnodeObj *pNew = pOper->pObj;
|
||||
SDnodeObj *pDnode = mnodeGetDnode(pNew->dnodeId);
|
||||
if (pDnode != NULL && pNew != pDnode) {
|
||||
memcpy(pDnode, pNew, pOper->rowSize);
|
||||
free(pNew);
|
||||
}
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -72,8 +72,6 @@ typedef struct {
|
|||
void * sync;
|
||||
void * wal;
|
||||
SSyncCfg cfg;
|
||||
sem_t sem;
|
||||
int32_t code;
|
||||
int32_t numOfTables;
|
||||
SSdbTable *tableList[SDB_TABLE_MAX];
|
||||
pthread_mutex_t mutex;
|
||||
|
@ -244,27 +242,36 @@ static void sdbNotifyRole(void *ahandle, int8_t role) {
|
|||
sdbUpdateMnodeRoles();
|
||||
}
|
||||
|
||||
FORCE_INLINE
|
||||
static void sdbConfirmForward(void *ahandle, void *param, int32_t code) {
|
||||
tsSdbObj.code = code;
|
||||
sem_post(&tsSdbObj.sem);
|
||||
sdbDebug("forward request confirmed, version:%" PRIu64 ", result:%s", (int64_t)param, tstrerror(code));
|
||||
}
|
||||
assert(param);
|
||||
SSdbOper * pOper = param;
|
||||
SMnodeMsg *pMsg = pOper->pMsg;
|
||||
if (code <= 0) pOper->retCode = code;
|
||||
|
||||
static int32_t sdbForwardToPeer(SWalHead *pHead) {
|
||||
if (tsSdbObj.sync == NULL) return TSDB_CODE_SUCCESS;
|
||||
int32_t processedCount = atomic_add_fetch_32(&pOper->processedCount, 1);
|
||||
if (processedCount <= 1) {
|
||||
if (pMsg != NULL) {
|
||||
sdbDebug("app:%p:%p, waiting for confirm this operation, count:%d", pMsg->rpcMsg.ahandle, pMsg, processedCount);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t code = syncForwardToPeer(tsSdbObj.sync, pHead, (void*)pHead->version, TAOS_QTYPE_RPC);
|
||||
if (code > 0) {
|
||||
sdbDebug("forward request is sent, version:%" PRIu64 ", code:%d", pHead->version, code);
|
||||
sem_wait(&tsSdbObj.sem);
|
||||
return tsSdbObj.code;
|
||||
}
|
||||
return code;
|
||||
if (pMsg != NULL) {
|
||||
sdbDebug("app:%p:%p, is confirmed and will do callback func", pMsg->rpcMsg.ahandle, pMsg);
|
||||
}
|
||||
|
||||
if (pOper->cb != NULL) {
|
||||
pOper->retCode = (*pOper->cb)(pMsg, pOper->retCode);
|
||||
}
|
||||
|
||||
dnodeSendRpcMnodeWriteRsp(pMsg, pOper->retCode);
|
||||
taosFreeQitem(pOper);
|
||||
}
|
||||
|
||||
void sdbUpdateSync() {
|
||||
SSyncCfg syncCfg = {0};
|
||||
int32_t index = 0;
|
||||
int32_t index = 0;
|
||||
|
||||
SDMMnodeInfos *mnodes = dnodeGetMnodeInfos();
|
||||
for (int32_t i = 0; i < mnodes->nodeNum; ++i) {
|
||||
|
@ -298,7 +305,7 @@ void sdbUpdateSync() {
|
|||
}
|
||||
|
||||
syncCfg.replica = index;
|
||||
syncCfg.quorum = (syncCfg.replica == 1) ? 1:2;
|
||||
syncCfg.quorum = (syncCfg.replica == 1) ? 1 : 2;
|
||||
|
||||
bool hasThisDnode = false;
|
||||
for (int32_t i = 0; i < syncCfg.replica; ++i) {
|
||||
|
@ -325,10 +332,10 @@ void sdbUpdateSync() {
|
|||
syncInfo.getWalInfo = sdbGetWalInfo;
|
||||
syncInfo.getFileInfo = sdbGetFileInfo;
|
||||
syncInfo.writeToCache = sdbWriteToQueue;
|
||||
syncInfo.confirmForward = sdbConfirmForward;
|
||||
syncInfo.confirmForward = sdbConfirmForward;
|
||||
syncInfo.notifyRole = sdbNotifyRole;
|
||||
tsSdbObj.cfg = syncCfg;
|
||||
|
||||
|
||||
if (tsSdbObj.sync) {
|
||||
syncReconfig(tsSdbObj.sync, &syncCfg);
|
||||
} else {
|
||||
|
@ -339,7 +346,6 @@ void sdbUpdateSync() {
|
|||
|
||||
int32_t sdbInit() {
|
||||
pthread_mutex_init(&tsSdbObj.mutex, NULL);
|
||||
sem_init(&tsSdbObj.sem, 0, 0);
|
||||
|
||||
if (sdbInitWriteWorker() != 0) {
|
||||
return -1;
|
||||
|
@ -379,7 +385,6 @@ void sdbCleanUp() {
|
|||
tsSdbObj.wal = NULL;
|
||||
}
|
||||
|
||||
sem_destroy(&tsSdbObj.sem);
|
||||
pthread_mutex_destroy(&tsSdbObj.mutex);
|
||||
}
|
||||
|
||||
|
@ -513,24 +518,22 @@ static int sdbWrite(void *param, void *data, int type) {
|
|||
assert(pTable != NULL);
|
||||
|
||||
pthread_mutex_lock(&tsSdbObj.mutex);
|
||||
|
||||
if (pHead->version == 0) {
|
||||
// assign version
|
||||
// assign version
|
||||
tsSdbObj.version++;
|
||||
pHead->version = tsSdbObj.version;
|
||||
} else {
|
||||
// for data from WAL or forward, version may be smaller
|
||||
if (pHead->version <= tsSdbObj.version) {
|
||||
pthread_mutex_unlock(&tsSdbObj.mutex);
|
||||
if (type == TAOS_QTYPE_FWD && tsSdbObj.sync != NULL) {
|
||||
sdbDebug("forward request is received, version:%" PRIu64 " confirm it", pHead->version);
|
||||
syncConfirmForward(tsSdbObj.sync, pHead->version, TSDB_CODE_SUCCESS);
|
||||
}
|
||||
sdbDebug("table:%s, failed to restore %s record:%s from source(%d), version:%" PRId64 " too large, sdb version:%" PRId64,
|
||||
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (pHead->version != tsSdbObj.version + 1) {
|
||||
pthread_mutex_unlock(&tsSdbObj.mutex);
|
||||
sdbError("table:%s, failed to restore %s record:%s from wal, version:%" PRId64 " too large, sdb version:%" PRId64,
|
||||
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version,
|
||||
tsSdbObj.version);
|
||||
sdbError("table:%s, failed to restore %s record:%s from source(%d), version:%" PRId64 " too large, sdb version:%" PRId64,
|
||||
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version);
|
||||
return TSDB_CODE_MND_APP_ERROR;
|
||||
} else {
|
||||
tsSdbObj.version = pHead->version;
|
||||
|
@ -542,28 +545,36 @@ static int sdbWrite(void *param, void *data, int type) {
|
|||
pthread_mutex_unlock(&tsSdbObj.mutex);
|
||||
return code;
|
||||
}
|
||||
|
||||
code = sdbForwardToPeer(pHead);
|
||||
|
||||
pthread_mutex_unlock(&tsSdbObj.mutex);
|
||||
|
||||
// from app, oper is created
|
||||
if (pOper != NULL) {
|
||||
sdbTrace("record from app is disposed, table:%s action:%s record:%s version:%" PRIu64 " result:%s",
|
||||
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
// forward to peers
|
||||
pOper->processedCount = 0;
|
||||
int32_t syncCode = syncForwardToPeer(tsSdbObj.sync, pHead, pOper, TAOS_QTYPE_RPC);
|
||||
if (syncCode <= 0) pOper->processedCount = 1;
|
||||
|
||||
if (syncCode < 0) {
|
||||
sdbError("table:%s, failed to forward request, result:%s action:%s record:%s version:%" PRId64, pTable->tableName,
|
||||
tstrerror(syncCode), sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
|
||||
} else if (syncCode > 0) {
|
||||
sdbDebug("table:%s, forward request is sent, action:%s record:%s version:%" PRId64, pTable->tableName,
|
||||
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
|
||||
} else {
|
||||
sdbTrace("table:%s, no need to send fwd request, action:%s record:%s version:%" PRId64, pTable->tableName,
|
||||
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
|
||||
}
|
||||
return syncCode;
|
||||
}
|
||||
|
||||
sdbDebug("table:%s, record from wal/fwd is disposed, action:%s record:%s version:%" PRId64, pTable->tableName,
|
||||
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
|
||||
|
||||
// even it is WAL/FWD, it shall be called to update version in sync
|
||||
syncForwardToPeer(tsSdbObj.sync, pHead, pOper, TAOS_QTYPE_RPC);
|
||||
|
||||
// from wal or forward msg, oper not created, should add into hash
|
||||
if (tsSdbObj.sync != NULL) {
|
||||
sdbTrace("record from wal forward is disposed, table:%s action:%s record:%s version:%" PRIu64 " confirm it",
|
||||
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
|
||||
syncConfirmForward(tsSdbObj.sync, pHead->version, code);
|
||||
} else {
|
||||
sdbTrace("record from wal restore is disposed, table:%s action:%s record:%s version:%" PRIu64, pTable->tableName,
|
||||
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
|
||||
}
|
||||
|
||||
if (action == SDB_ACTION_INSERT) {
|
||||
SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable};
|
||||
code = (*pTable->decodeFp)(&oper);
|
||||
|
@ -627,7 +638,7 @@ int32_t sdbInsertRow(SSdbOper *pOper) {
|
|||
memcpy(pNewOper, pOper, sizeof(SSdbOper));
|
||||
|
||||
if (pNewOper->pMsg != NULL) {
|
||||
sdbDebug("app:%p:%p, table:%s record:%p:%s, insert action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle,
|
||||
sdbDebug("app:%p:%p, table:%s record:%p:%s, insert action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle,
|
||||
pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj));
|
||||
}
|
||||
|
||||
|
@ -677,7 +688,7 @@ int32_t sdbDeleteRow(SSdbOper *pOper) {
|
|||
memcpy(pNewOper, pOper, sizeof(SSdbOper));
|
||||
|
||||
if (pNewOper->pMsg != NULL) {
|
||||
sdbDebug("app:%p:%p, table:%s record:%p:%s, delete action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle,
|
||||
sdbDebug("app:%p:%p, table:%s record:%p:%s, delete action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle,
|
||||
pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj));
|
||||
}
|
||||
|
||||
|
@ -727,7 +738,7 @@ int32_t sdbUpdateRow(SSdbOper *pOper) {
|
|||
memcpy(pNewOper, pOper, sizeof(SSdbOper));
|
||||
|
||||
if (pNewOper->pMsg != NULL) {
|
||||
sdbDebug("app:%p:%p, table:%s record:%p:%s, update action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle,
|
||||
sdbDebug("app:%p:%p, table:%s record:%p:%s, update action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle,
|
||||
pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj));
|
||||
}
|
||||
|
||||
|
@ -943,20 +954,20 @@ static void *sdbWorkerFp(void *param) {
|
|||
taosGetQitem(tsSdbWriteQall, &type, &item);
|
||||
if (type == TAOS_QTYPE_RPC) {
|
||||
pOper = (SSdbOper *)item;
|
||||
pOper->processedCount = 1;
|
||||
pHead = (void *)pOper + sizeof(SSdbOper) + SDB_SYNC_HACK;
|
||||
if (pOper->pMsg != NULL) {
|
||||
sdbDebug("app:%p:%p, table:%s record:%p:%s version:%" PRIu64 ", will be processed in sdb queue",
|
||||
pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, ((SSdbTable *)pOper->table)->tableName, pOper->pObj,
|
||||
sdbGetKeyStr(pOper->table, pHead->cont), pHead->version);
|
||||
}
|
||||
} else {
|
||||
pHead = (SWalHead *)item;
|
||||
pOper = NULL;
|
||||
}
|
||||
|
||||
if (pOper != NULL && pOper->pMsg != NULL) {
|
||||
sdbDebug("app:%p:%p, table:%s record:%p:%s version:%" PRIu64 ", will be processed in sdb queue",
|
||||
pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, ((SSdbTable *)pOper->table)->tableName, pOper->pObj,
|
||||
sdbGetKeyStr(pOper->table, pHead->cont), pHead->version);
|
||||
}
|
||||
|
||||
int32_t code = sdbWrite(pOper, pHead, type);
|
||||
if (pOper) pOper->retCode = code;
|
||||
if (pOper && code <= 0) pOper->retCode = code;
|
||||
}
|
||||
|
||||
walFsync(tsSdbObj.wal);
|
||||
|
@ -965,25 +976,17 @@ static void *sdbWorkerFp(void *param) {
|
|||
taosResetQitems(tsSdbWriteQall);
|
||||
for (int32_t i = 0; i < numOfMsgs; ++i) {
|
||||
taosGetQitem(tsSdbWriteQall, &type, &item);
|
||||
|
||||
if (type == TAOS_QTYPE_RPC) {
|
||||
pOper = (SSdbOper *)item;
|
||||
if (pOper != NULL && pOper->cb != NULL) {
|
||||
sdbTrace("app:%p:%p, will do callback func, index:%d", pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, i);
|
||||
pOper->retCode = (*pOper->cb)(pOper->pMsg, pOper->retCode);
|
||||
}
|
||||
|
||||
if (pOper != NULL && pOper->pMsg != NULL) {
|
||||
sdbTrace("app:%p:%p, msg is processed, result:%s", pOper->pMsg->rpcMsg.ahandle, pOper->pMsg,
|
||||
tstrerror(pOper->retCode));
|
||||
}
|
||||
|
||||
if (pOper != NULL) {
|
||||
sdbDecRef(pOper->table, pOper->pObj);
|
||||
}
|
||||
|
||||
dnodeSendRpcMnodeWriteRsp(pOper->pMsg, pOper->retCode);
|
||||
sdbDecRef(pOper->table, pOper->pObj);
|
||||
sdbConfirmForward(NULL, pOper, pOper->retCode);
|
||||
} else if (type == TAOS_QTYPE_FWD) {
|
||||
syncConfirmForward(tsSdbObj.sync, pHead->version, TSDB_CODE_SUCCESS);
|
||||
taosFreeQitem(item);
|
||||
} else {
|
||||
taosFreeQitem(item);
|
||||
}
|
||||
taosFreeQitem(item);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -783,9 +783,15 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) {
|
||||
SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable;
|
||||
if (pTable != NULL) {
|
||||
mLInfo("app:%p:%p, stable:%s, is created in sdb, result:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
|
||||
tstrerror(code));
|
||||
assert(pTable);
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
mLInfo("stable:%s, is created in sdb", pTable->info.tableId);
|
||||
} else {
|
||||
mError("app:%p:%p, stable:%s, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
|
||||
tstrerror(code));
|
||||
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsSuperTableSdb};
|
||||
sdbDeleteRow(&desc);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -1561,10 +1567,16 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
|
||||
assert(pTable);
|
||||
|
||||
mDebug("app:%p:%p, table:%s, create table in id:%d, uid:%" PRIu64 ", result:%s", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code));
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) return code;
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
mDebug("app:%p:%p, table:%s, create table in sid:%d, uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
|
||||
pTable->sid, pTable->uid);
|
||||
} else {
|
||||
mError("app:%p:%p, table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code));
|
||||
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsChildTableSdb};
|
||||
sdbDeleteRow(&desc);
|
||||
return code;
|
||||
}
|
||||
|
||||
SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont;
|
||||
SMDCreateTableMsg *pMDCreate = mnodeBuildCreateChildTableMsg(pCreate, pTable);
|
||||
|
|
|
@ -348,17 +348,23 @@ void *mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup) {
|
|||
}
|
||||
|
||||
static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) {
|
||||
SVgObj *pVgroup = pMsg->pVgroup;
|
||||
SDbObj *pDb = pMsg->pDb;
|
||||
assert(pVgroup);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pMsg->pVgroup = NULL;
|
||||
mError("app:%p:%p, vgId:%d, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId,
|
||||
tstrerror(code));
|
||||
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .table = tsVgroupSdb};
|
||||
sdbDeleteRow(&desc);
|
||||
return code;
|
||||
}
|
||||
|
||||
SVgObj *pVgroup = pMsg->pVgroup;
|
||||
SDbObj *pDb = pMsg->pDb;
|
||||
|
||||
mInfo("vgId:%d, is created in mnode, db:%s replica:%d", pVgroup->vgId, pDb->name, pVgroup->numOfVnodes);
|
||||
mInfo("app:%p:%p, vgId:%d, is created in mnode, db:%s replica:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId,
|
||||
pDb->name, pVgroup->numOfVnodes);
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
mInfo("vgId:%d, index:%d, dnode:%d", pVgroup->vgId, i, pVgroup->vnodeGid[i].dnodeId);
|
||||
mInfo("app:%p:%p, vgId:%d, index:%d, dnode:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, i,
|
||||
pVgroup->vnodeGid[i].dnodeId);
|
||||
}
|
||||
|
||||
mnodeIncVgroupRef(pVgroup);
|
||||
|
|
|
@ -156,6 +156,7 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
|
||||
tInfo("client is initialized");
|
||||
tInfo("threads:%d msgSize:%d requests:%d", appThreads, msgSize, numOfReqs);
|
||||
|
||||
gettimeofday(&systemTime, NULL);
|
||||
startTime = systemTime.tv_sec*1000000 + systemTime.tv_usec;
|
||||
|
|
|
@ -24,23 +24,21 @@ int msgSize = 128;
|
|||
int commit = 0;
|
||||
int dataFd = -1;
|
||||
void *qhandle = NULL;
|
||||
void *qset = NULL;
|
||||
|
||||
void processShellMsg() {
|
||||
static int num = 0;
|
||||
taos_qall qall;
|
||||
SRpcMsg *pRpcMsg, rpcMsg;
|
||||
int type;
|
||||
void *pvnode;
|
||||
|
||||
qall = taosAllocateQall();
|
||||
|
||||
while (1) {
|
||||
int numOfMsgs = taosReadAllQitems(qhandle, qall);
|
||||
if (numOfMsgs <= 0) {
|
||||
usleep(100);
|
||||
continue;
|
||||
}
|
||||
|
||||
int numOfMsgs = taosReadAllQitemsFromQset(qset, qall, &pvnode);
|
||||
tDebug("%d shell msgs are received", numOfMsgs);
|
||||
if (numOfMsgs <= 0) break;
|
||||
|
||||
for (int i=0; i<numOfMsgs; ++i) {
|
||||
taosGetQitem(qall, &type, (void **)&pRpcMsg);
|
||||
|
@ -82,15 +80,6 @@ void processShellMsg() {
|
|||
}
|
||||
|
||||
taosFreeQall(qall);
|
||||
/*
|
||||
SRpcIpSet ipSet;
|
||||
ipSet.numOfIps = 1;
|
||||
ipSet.index = 0;
|
||||
ipSet.port = 7000;
|
||||
ipSet.ip[0] = inet_addr("192.168.0.2");
|
||||
|
||||
rpcSendRedirectRsp(ahandle, &ipSet);
|
||||
*/
|
||||
|
||||
}
|
||||
|
||||
|
@ -189,6 +178,8 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
|
||||
qhandle = taosOpenQueue(sizeof(SRpcMsg));
|
||||
qset = taosOpenQset();
|
||||
taosAddIntoQset(qset, qhandle, NULL);
|
||||
|
||||
processShellMsg();
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -38,4 +38,4 @@ export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3
|
|||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/../../build/build/lib
|
||||
|
||||
# Now we are all let, and let's see if we can find a crash. Note we pass all params
|
||||
./crash_gen.py $@
|
||||
python3 ./crash_gen.py $@
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
sql connect
|
||||
|
||||
$db = db1
|
||||
$stb = stb1
|
||||
print =============== client1_0:
|
||||
|
||||
sql use $db
|
||||
|
||||
$tblNum = 1000
|
||||
|
||||
$i = 1
|
||||
while $i < $tblNum
|
||||
$tb = tb . $i
|
||||
sql create table $tb using $stb tags ($i, 'abcd')
|
||||
$i = $i + 1
|
||||
endw
|
|
@ -0,0 +1,494 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/deploy.sh -n dnode3 -i 3
|
||||
system sh/deploy.sh -n dnode4 -i 4
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
|
||||
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
|
||||
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
|
||||
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode2 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode3 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode4 -c walLevel -v 1
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
|
||||
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
|
||||
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
|
||||
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
|
||||
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
|
||||
system sh/cfg.sh -n dnode3 -c alternativeRole -v 0
|
||||
system sh/cfg.sh -n dnode4 -c alternativeRole -v 0
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
|
||||
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 1000
|
||||
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 1000
|
||||
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 1000
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
|
||||
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
|
||||
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
|
||||
system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
|
||||
|
||||
print ============== step0: start tarbitrator
|
||||
system sh/exec_tarbitrator.sh -s start
|
||||
|
||||
print ============== step1: start dnode1/dnode2/dnode3
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
sql create dnode $hostname2
|
||||
sql create dnode $hostname3
|
||||
sleep 3000
|
||||
|
||||
print ============== step2: create db1 with replica 3
|
||||
$db = db1
|
||||
print create database $db replica 3
|
||||
#sql create database $db replica 3 maxTables $totalTableNum
|
||||
sql create database $db replica 3
|
||||
sql use $db
|
||||
|
||||
print ============== step3: create stable stb1
|
||||
$stb = stb1
|
||||
sql create table $stb (ts timestamp, c1 int, c2 int) tags(t1 int, t2 binary(8))
|
||||
|
||||
print ============== step4: start 10 client1/ 10 client2/ 10 client3/ 10 client4/ 1 client5
|
||||
run_back unique/cluster/client1_0.sim
|
||||
#run_back unique/cluster/client1_1.sim
|
||||
#run_back unique/big_cluster/client1_2.sim
|
||||
#run_back unique/big_cluster/client1_3.sim
|
||||
#run_back unique/big_cluster/client1_4.sim
|
||||
#run_back unique/big_cluster/client1_5.sim
|
||||
#run_back unique/big_cluster/client1_6.sim
|
||||
#run_back unique/big_cluster/client1_7.sim
|
||||
#run_back unique/big_cluster/client1_8.sim
|
||||
#run_back unique/big_cluster/client1_9.sim
|
||||
|
||||
|
||||
print wait for a while to let clients start insert data
|
||||
sleep 5000
|
||||
|
||||
$loop_cnt = 0
|
||||
loop_cluster_do:
|
||||
print **** **** **** START loop cluster do **** **** **** ****
|
||||
print ============== step5: start dnode4 and add into cluster, then wait dnode4 ready
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
sql create dnode $hostname4
|
||||
|
||||
wait_dnode4_ready_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 4 then
|
||||
sleep 2000
|
||||
goto wait_dnode4_ready_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
#$dnode4Status = $data4_4
|
||||
|
||||
if $loop_cnt == 0 then
|
||||
$dnode4Status = $data4_4
|
||||
elif $loop_cnt == 1 then
|
||||
$dnode4Status = $data4_6
|
||||
elif $loop_cnt == 2 then
|
||||
$dnode4Status = $data4_8
|
||||
else then
|
||||
print **** **** **** END loop cluster do 2**** **** **** ****
|
||||
return
|
||||
endi
|
||||
|
||||
if $dnode4Status != ready then
|
||||
sleep 2000
|
||||
goto wait_dnode4_ready_0
|
||||
endi
|
||||
|
||||
|
||||
print ============== step6: stop and drop dnode1, then remove data dir of dnode1
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode1_offline_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 4 then
|
||||
sleep 2000
|
||||
goto wait_dnode1_offline_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
$dnode4Status = $data4_4
|
||||
|
||||
if $loop_cnt == 0 then
|
||||
$dnode1Status = $data4_1
|
||||
elif $loop_cnt == 1 then
|
||||
$dnode1Status = $data4_5
|
||||
elif $loop_cnt == 2 then
|
||||
$dnode1Status = $data4_7
|
||||
elif $loop_cnt == 3 then
|
||||
$dnode1Status = $data4_9
|
||||
else then
|
||||
print **** **** **** END loop cluster do 1**** **** **** ****
|
||||
return
|
||||
endi
|
||||
|
||||
if $dnode1Status != offline then
|
||||
sleep 2000
|
||||
goto wait_dnode1_offline_0
|
||||
endi
|
||||
|
||||
sql drop dnode $hostname1
|
||||
system rm -rf ../../../sim/dnode1
|
||||
|
||||
|
||||
print ============== step7: stop dnode2, because mnodes < 50%, so clusert don't provide services
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
sql show dnodes -x wait_dnode2_offline_0
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode2_offline_0
|
||||
endi
|
||||
wait_dnode2_offline_0:
|
||||
|
||||
#$cnt = 0
|
||||
#wait_dnode2_offline_0:
|
||||
#$cnt = $cnt + 1
|
||||
#if $cnt == 10 then
|
||||
# return -1
|
||||
#endi
|
||||
#sql show dnodes -x wait_dnode2_offline_0
|
||||
#if $rows != 3 then
|
||||
# sleep 2000
|
||||
# goto wait_dnode2_offline_0
|
||||
#endi
|
||||
#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
#$dnode1Status = $data4_1
|
||||
#$dnode2Status = $data4_2
|
||||
#$dnode3Status = $data4_3
|
||||
#$dnode4Status = $data4_4
|
||||
#
|
||||
#if $dnode2Status != offline then
|
||||
# sleep 2000
|
||||
# goto wait_dnode1_offline_0
|
||||
#endi
|
||||
|
||||
print ============== step8: restart dnode2, then wait sync end
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode2_ready_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode2_ready_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
$dnode4Status = $data4_4
|
||||
|
||||
if $dnode2Status != ready then
|
||||
sleep 2000
|
||||
goto wait_dnode2_ready_0
|
||||
endi
|
||||
|
||||
|
||||
print ============== step9: stop dnode3, then wait sync end
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode3_offline_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode3_offline_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
$dnode4Status = $data4_4
|
||||
|
||||
if $dnode3Status != offline then
|
||||
sleep 2000
|
||||
goto wait_dnode3_offline_0
|
||||
endi
|
||||
|
||||
print ============== step10: restart dnode3, then wait sync end
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode3_ready_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode3_ready_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
$dnode4Status = $data4_4
|
||||
|
||||
if $dnode3Status != ready then
|
||||
sleep 2000
|
||||
goto wait_dnode3_ready_0
|
||||
endi
|
||||
|
||||
print ============== step11: stop dnode4, then wait sync end
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode4_offline_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode4_offline_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
#$dnode4Status = $data4_4
|
||||
|
||||
if $loop_cnt == 0 then
|
||||
$dnode4Status = $data4_4
|
||||
elif $loop_cnt == 1 then
|
||||
$dnode4Status = $data4_6
|
||||
elif $loop_cnt == 2 then
|
||||
$dnode4Status = $data4_8
|
||||
else then
|
||||
print **** **** **** END loop cluster do 2**** **** **** ****
|
||||
return
|
||||
endi
|
||||
|
||||
if $dnode4Status != offline then
|
||||
sleep 2000
|
||||
goto wait_dnode4_offline_0
|
||||
endi
|
||||
|
||||
print ============== step12: restart dnode4, then wait sync end
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode4_ready_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode4_ready_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
#$dnode4Status = $data4_4
|
||||
|
||||
if $loop_cnt == 0 then
|
||||
$dnode4Status = $data4_4
|
||||
elif $loop_cnt == 1 then
|
||||
$dnode4Status = $data4_6
|
||||
elif $loop_cnt == 2 then
|
||||
$dnode4Status = $data4_8
|
||||
else then
|
||||
print **** **** **** END loop cluster do 2**** **** **** ****
|
||||
return
|
||||
endi
|
||||
|
||||
if $dnode4Status != ready then
|
||||
sleep 2000
|
||||
goto wait_dnode4_ready_0
|
||||
endi
|
||||
|
||||
print ============== step13: alter replica 2
|
||||
sql alter database $db replica 2
|
||||
sql show database
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
|
||||
if $data0_5 != 2 then
|
||||
print rplica is not modify to 2, error!!!!!!
|
||||
return
|
||||
endi
|
||||
|
||||
print ============== step14: stop and drop dnode4, then remove data dir of dnode4
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode4_offline_1:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode4_offline_1
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
#$dnode4Status = $data4_4
|
||||
|
||||
if $loop_cnt == 0 then
|
||||
$dnode4Status = $data4_4
|
||||
elif $loop_cnt == 1 then
|
||||
$dnode4Status = $data4_6
|
||||
elif $loop_cnt == 2 then
|
||||
$dnode4Status = $data4_8
|
||||
else then
|
||||
print **** **** **** END loop cluster do 2**** **** **** ****
|
||||
return
|
||||
endi
|
||||
|
||||
if $dnode4Status != offline then
|
||||
sleep 2000
|
||||
goto wait_dnode4_offline_1
|
||||
endi
|
||||
|
||||
sql drop dnode $hostname4
|
||||
system rm -rf ../../../sim/dnode4
|
||||
|
||||
|
||||
print ============== step15: alter replica 1
|
||||
sql alter database $db replica 1
|
||||
sql show database
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
|
||||
if $data0_5 != 1 then
|
||||
print rplica is not modify to 1, error!!!!!!
|
||||
return
|
||||
endi
|
||||
|
||||
|
||||
print ============== step16: alter replica 2
|
||||
sql alter database $db replica 1
|
||||
sql show database
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
|
||||
if $data0_5 != 2 then
|
||||
print rplica is not modify to 2, error!!!!!!
|
||||
return
|
||||
endi
|
||||
|
||||
print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql create dnode $hostname1
|
||||
|
||||
wait_dnode1_ready_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode1_ready_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
#$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
$dnode4Status = $data4_4
|
||||
|
||||
if $loop_cnt == 0 then
|
||||
$dnode1Status = $data4_1
|
||||
elif $loop_cnt == 1 then
|
||||
$dnode1Status = $data4_5
|
||||
elif $loop_cnt == 2 then
|
||||
$dnode1Status = $data4_7
|
||||
elif $loop_cnt == 3 then
|
||||
$dnode1Status = $data4_9
|
||||
else then
|
||||
print **** **** **** END loop cluster do 3**** **** **** ****
|
||||
return
|
||||
endi
|
||||
|
||||
if $dnode1Status != ready then
|
||||
sleep 2000
|
||||
goto wait_dnode1_ready_0
|
||||
endi
|
||||
|
||||
print ============== step18: alter replica 3
|
||||
sql alter database $db replica 3
|
||||
sql show database
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
|
||||
if $data0_5 != 3 then
|
||||
print rplica is not modify to 3, error!!!!!!
|
||||
return
|
||||
endi
|
||||
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
goto loop_cluster_do
|
Loading…
Reference in New Issue