Merge branch 'develop' into feature/crash_gen

This commit is contained in:
Steven Li 2020-05-09 16:07:45 -07:00
commit b52b452972
73 changed files with 1632 additions and 1166 deletions

View File

@ -72,8 +72,8 @@ matrix:
fi
done
grep 'definitely lost' mem-error-out.txt | uniq | tee uniq-definitely-lost-out.txt
for defiMemError in `cat uniq-definitely-lost-out.txt | awk '{print $7}'`
grep 'definitely lost:' mem-error-out.txt | uniq | tee uniq-definitely-lost-out.txt
for defiMemError in `cat uniq-definitely-lost-out.txt | awk '{print $4}'`
do
if [ -n "$defiMemError" ]; then
if [ "$defiMemError" -gt 16 ]; then

View File

@ -168,9 +168,10 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
return TSDB_CODE_SUCCESS;
}
// (uid, tid) + VGID + TAGSIZE + VARSTR_HEADER_SIZE
if (functionId == TSDB_FUNC_TID_TAG) { // todo use struct
*type = TSDB_DATA_TYPE_BINARY;
*bytes = dataBytes + sizeof(int64_t) + sizeof(int32_t) + sizeof(int32_t); // (uid, tid) + VGID + TAGSIZE
*bytes = dataBytes + sizeof(int64_t) + sizeof(int32_t) + sizeof(int32_t) + VARSTR_HEADER_SIZE;
*interBytes = *bytes;
return TSDB_CODE_SUCCESS;
}
@ -5285,10 +5286,10 @@ SQLAggFuncElem aAggs[] = {{
},
{
// 34
"tid_tag", // return table id and the corresponding tags for join match
"tid_tag", // return table id and the corresponding tags for join match and subscribe
TSDB_FUNC_TID_TAG,
TSDB_FUNC_TID_TAG,
TSDB_FUNCSTATE_MO,
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE,
function_setup,
noop1,
noop2,

View File

@ -156,7 +156,8 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 3);
if (i >= tscGetNumOfColumns(pMeta) && tscGetNumOfTags(pMeta) != 0) {
char* output = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i;
STR_WITH_SIZE_TO_VARSTR(output, "TAG", 3);
const char *src = "TAG";
STR_WITH_SIZE_TO_VARSTR(output, src, strlen(src));
}
}
@ -191,7 +192,8 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
// tag value
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 3);
char *target = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i;
STR_WITH_SIZE_TO_VARSTR(target, "TAG", 3);
const char *src = "TAG";
STR_WITH_SIZE_TO_VARSTR(target, src, strlen(src));
pTagValue += pSchema[i].bytes;
}

View File

@ -370,10 +370,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
char* pMsg = pCmd->payload + tsRpcHeadSize;
pMsg += sizeof(SMgmtHead);
char* pMsg = pCmd->payload;
SCMCfgDnodeMsg* pCfg = (SCMCfgDnodeMsg*)pMsg;
pDCL->a[0].n = strdequote(pDCL->a[0].z);
strncpy(pCfg->ep, pDCL->a[0].z, pDCL->a[0].n);
strncpy(pCfg->config, pDCL->a[1].z, pDCL->a[1].n);
@ -1123,7 +1124,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
if (addProjectionExprAndResultField(pQueryInfo, pItem) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_INVALID_SQL;
}
} else if (pItem->pNode->nSQLOptr >= TK_COUNT && pItem->pNode->nSQLOptr <= TK_AVG_IRATE) {
} else if (pItem->pNode->nSQLOptr >= TK_COUNT && pItem->pNode->nSQLOptr <= TK_TBID) {
// sql function in selection clause, append sql function info in pSqlCmd structure sequentially
if (addExprAndResultField(pQueryInfo, outputIndex, pItem, true) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_INVALID_SQL;
@ -1468,7 +1469,8 @@ static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema,
int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult) {
STableMetaInfo* pTableMetaInfo = NULL;
int32_t optr = pItem->pNode->nSQLOptr;
int32_t optr = pItem->pNode->nSQLOptr;
const char* msg1 = "not support column types";
const char* msg2 = "invalid parameters";
@ -1476,7 +1478,8 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
const char* msg4 = "invalid table name";
const char* msg5 = "parameter is out of range [0, 100]";
const char* msg6 = "function applied to tags not allowed";
const char* msg7 = "normal table can not apply this function";
switch (optr) {
case TK_COUNT: {
if (pItem->pNode->pParam != NULL && pItem->pNode->pParam->nExpr != 1) {
@ -1858,13 +1861,68 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
}
}
return TSDB_CODE_SUCCESS;
};
case TK_TBID: {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_NOMRAL_TABLE(pTableMetaInfo)) {
return invalidSqlErrMsg(pQueryInfo->msg, msg7);
}
// no parameters or more than one parameter for function
if (pItem->pNode->pParam == NULL || pItem->pNode->pParam->nExpr != 1) {
return invalidSqlErrMsg(pQueryInfo->msg, msg2);
}
tSQLExpr* pParam = pItem->pNode->pParam->a[0].pNode;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(&pParam->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(pQueryInfo->msg, msg3);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
// functions can not be applied to normal columns
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
if (index.columnIndex < numOfCols) {
return invalidSqlErrMsg(pQueryInfo->msg, msg6);
}
index.columnIndex -= numOfCols;
// 2. valid the column type
int16_t colType = pSchema[index.columnIndex].type;
if (colType == TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) {
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
}
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
SSchema s = pTagSchema[index.columnIndex];
int16_t bytes = 0;
int16_t type = 0;
int16_t inter = 0;
int32_t ret = getResultDataInfo(s.type, s.bytes, TSDB_FUNC_TID_TAG, 0, &type, &bytes, &inter, 0, 0);
assert(ret == TSDB_CODE_SUCCESS);
s.type = type;
s.bytes = bytes;
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY);
tscAddSpecialColumnForSelect(pQueryInfo, 0, TSDB_FUNC_TID_TAG, &index, &s, TSDB_COL_TAG);
return TSDB_CODE_SUCCESS;
}
default:
return TSDB_CODE_INVALID_SQL;
}
}
// todo refactor
@ -2197,14 +2255,14 @@ int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
char* portStr = strtok(NULL, &delim);
if (!validateIpAddress(ipStr, strlen(ipStr))) {
memset(pCmd->payload, 0, tListLen(pCmd->payload));
memset(pCmd->payload, 0, strlen(pCmd->payload));
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
uint16_t port = (uint16_t)strtol(portStr, NULL, 10);
if (port <= 0 || port > 65535) {
memset(pCmd->payload, 0, tListLen(pCmd->payload));
memset(pCmd->payload, 0, strlen(pCmd->payload));
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}

View File

@ -1005,13 +1005,7 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t tscBuildCfgDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMCfgDnodeMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
pCmd->msgType = TSDB_MSG_TYPE_MD_CONFIG_DNODE;
pCmd->msgType = TSDB_MSG_TYPE_CM_CONFIG_DNODE;
return TSDB_CODE_SUCCESS;
}
@ -1189,7 +1183,7 @@ int32_t tscBuildKillMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int tscEstimateCreateTableMsgLength(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &(pSql->cmd);
int32_t size = minMsgSize() + sizeof(SMgmtHead) + sizeof(SCMCreateTableMsg);
int32_t size = minMsgSize() + sizeof(SCMCreateTableMsg);
SCreateTableSQL *pCreateTableInfo = pInfo->pCreateTableInfo;
if (pCreateTableInfo->type == TSQL_CREATE_TABLE_FROM_STABLE) {
@ -1277,7 +1271,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int tscEstimateAlterTableMsgLength(SSqlCmd *pCmd) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
return minMsgSize() + sizeof(SMgmtHead) + sizeof(SCMAlterTableMsg) + sizeof(SSchema) * tscNumOfFields(pQueryInfo) +
return minMsgSize() + sizeof(SCMAlterTableMsg) + sizeof(SSchema) * tscNumOfFields(pQueryInfo) +
TSDB_EXTRA_PAYLOAD_SIZE;
}
@ -1534,6 +1528,7 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
* no used 4B
**/
int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
#if 0
SSqlCmd *pCmd = &pSql->cmd;
// copy payload content to temp buff
@ -1566,6 +1561,8 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen);
return pCmd->payloadLen;
#endif
return 0;
}
//static UNUSED_FUNC int32_t tscEstimateMetricMetaMsgSize(SSqlCmd *pCmd) {
@ -1765,7 +1762,7 @@ int tscEstimateHeartBeatMsgLength(SSqlObj *pSql) {
int size = 0;
STscObj *pObj = pSql->pTscObj;
size += tsRpcHeadSize + sizeof(SMgmtHead);
size += tsRpcHeadSize;
size += sizeof(SQqueryList);
SSqlObj *tpSql = pObj->sqlList;
@ -1801,13 +1798,9 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return -1;
}
pMsg = pCmd->payload + tsRpcHeadSize;
pMsg = pCmd->payload;
pStart = pMsg;
SMgmtHead *pMgmt = (SMgmtHead *)pMsg;
strcpy(pMgmt->db, pObj->db);
pMsg += sizeof(SMgmtHead);
pMsg = tscBuildQueryStreamDesc(pMsg, pObj);
pthread_mutex_unlock(&pObj->mutex);

View File

@ -172,7 +172,7 @@ static void syncConnCallback(void *param, TAOS_RES *tres, int code) {
}
TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) {
tscTrace("try to create a connection to %s", ip);
tscTrace("try to create a connection to %s:%u, user:%s db:%s", ip, port, user, db);
STscObj *pObj = taosConnectImpl(ip, user, pass, db, port, NULL, NULL, NULL);
if (pObj != NULL) {
@ -191,7 +191,7 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha
return NULL;
}
tscTrace("%p DB connection is opening", pObj);
tscTrace("%p DB connection is opening, dnodeConn:%p", pObj, pObj->pDnodeConn);
// version compare only requires the first 3 segments of the version string
int code = taosCheckVersion(version, taos_get_server_info(pObj), 3);

View File

@ -412,10 +412,6 @@ static void updateQueryTimeRange(SQueryInfo* pQueryInfo, int64_t st, int64_t et)
static void tSIntersectionAndLaunchSecQuery(SJoinSupporter* pSupporter, SSqlObj* pSql) {
SSqlObj* pParentSql = pSupporter->pObj;
// SSqlCmd* pCmd = &pSql->cmd;
// SSqlRes* pRes = &pSql->res;
// SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
SQueryInfo* pParentQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, pParentSql->cmd.clauseIndex);
// if (tscNonOrderedProjectionQueryOnSTable(pParentQueryInfo, 0)) {
@ -602,21 +598,6 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// if (pSupporter->pState->code != TSDB_CODE_SUCCESS) {
// tscError("%p abort query due to other subquery failure. code:%d, global code:%s", pSql, numOfRows,
// tstrerror(pSupporter->pState->code));
//
// quitAllSubquery(pParentSql, pSupporter);
// return;
// }
//
// if (numOfRows < 0) {
// tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex);
// pSupporter->pState->code = numOfRows;
// quitAllSubquery(pParentSql, pSupporter);
// return;
// }
// response of tag retrieve
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) {
if (numOfRows == 0 || pSql->res.completed) {
@ -1455,7 +1436,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
return;
} else { // reach the maximum retry count, abort
atomic_val_compare_exchange_32(&pState->code, TSDB_CODE_SUCCESS, numOfRows);
tscError("%p sub:%p retrieve failed,code:%s,orderOfSub:%d failed.no more retry,set global code:%d", pPObj, pSql,
tscError("%p sub:%p retrieve failed,code:%d,orderOfSub:%d failed.no more retry,set global code:%d", pPObj, pSql,
numOfRows, subqueryIndex, tstrerror(pState->code));
}
}

View File

@ -69,6 +69,8 @@ int32_t tscInitRpc(const char *user, const char *secret, void** pDnodeConn) {
if (*pDnodeConn == NULL) {
tscError("failed to init connection to TDengine");
return -1;
} else {
tscTrace("dnodeConn:%p is created, user:%s", *pDnodeConn, user);
}
}

View File

@ -766,7 +766,7 @@ void tscCloseTscObj(STscObj* pObj) {
rpcClose(pObj->pDnodeConn);
}
tscTrace("%p DB connection is closed", pObj);
tscTrace("%p DB connection is closed, dnodeConn:%p", pObj, pObj->pDnodeConn);
tfree(pObj);
}

View File

@ -204,10 +204,19 @@ static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT;
void taosSetAllDebugFlag() {
for (int32_t i = 0; i < tsGlobalConfigNum; ++i) {
SGlobalCfg *cfg = &tsGlobalConfig[i];
if ((cfg->cfgType & TSDB_CFG_CTYPE_B_LOG) && cfg->cfgType == TAOS_CFG_VTYPE_INT32) {
*((int32_t*)cfg->ptr) = debugFlag;
}
mdebugFlag = debugFlag;
sdbDebugFlag = debugFlag;
dDebugFlag = debugFlag;
vDebugFlag = debugFlag;
cdebugFlag = debugFlag;
jnidebugFlag = debugFlag;
odbcdebugFlag = debugFlag;
httpDebugFlag = debugFlag;
monitorDebugFlag = debugFlag;
rpcDebugFlag = debugFlag;
uDebugFlag = debugFlag;
sDebugFlag = debugFlag;
//qdebugFlag = debugFlag;
}
uPrint("all debug flag are set to %d", debugFlag);
}
@ -1189,6 +1198,10 @@ void taosInitGlobalCfg() {
}
bool taosCheckGlobalCfg() {
if (debugFlag == 135 || debugFlag == 199) {
taosSetAllDebugFlag();
}
taosGetFqdn(tsLocalEp);
sprintf(tsLocalEp + strlen(tsLocalEp), ":%d", tsServerPort);
uPrint("localEp is %s", tsLocalEp);

View File

@ -61,7 +61,7 @@ int32_t dnodeInitServer() {
rpcInit.cfp = dnodeProcessReqMsgFromDnode;
rpcInit.sessions = 100;
rpcInit.connType = TAOS_CONN_SERVER;
rpcInit.idleTime = tsShellActivityTimer * 2000;
rpcInit.idleTime = tsShellActivityTimer * 1000;
tsDnodeServerRpc = rpcOpen(&rpcInit);
if (tsDnodeServerRpc == NULL) {
@ -122,7 +122,7 @@ int32_t dnodeInitClient() {
rpcInit.ufp = dnodeUpdateIpSet;
rpcInit.sessions = 100;
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.idleTime = tsShellActivityTimer * 2000;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.user = "t";
rpcInit.ckey = "key";
rpcInit.secret = "secret";

View File

@ -86,7 +86,7 @@ int32_t dnodeInitShell() {
rpcInit.cfp = dnodeProcessMsgFromShell;
rpcInit.sessions = TSDB_SESSIONS_PER_DNODE;
rpcInit.connType = TAOS_CONN_SERVER;
rpcInit.idleTime = tsShellActivityTimer * 1500;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.afp = dnodeRetrieveUserAuthInfo;
tsDnodeShellRpc = rpcOpen(&rpcInit);
@ -137,7 +137,6 @@ void dnodeProcessMsgFromShell(SRpcMsg *pMsg) {
}
}
static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char *secret, char *ckey) {
return TSDB_CODE_SUCCESS;
}

View File

@ -85,7 +85,7 @@ void dnodeCleanupWrite() {
void dnodeDispatchToVnodeWriteQueue(SRpcMsg *pMsg) {
char *pCont = (char *)pMsg->pCont;
if (pMsg->msgType == TSDB_MSG_TYPE_SUBMIT || pMsg->msgType == TSDB_MSG_TYPE_MD_DROP_STABLE) {
if (pMsg->msgType == TSDB_MSG_TYPE_SUBMIT) {
SMsgDesc *pDesc = (SMsgDesc *)pCont;
pDesc->numOfVnodes = htonl(pDesc->numOfVnodes);
pCont += sizeof(SMsgDesc);

View File

@ -317,10 +317,6 @@ typedef struct {
int8_t flag;
} SCMCreateUserMsg, SCMAlterUserMsg;
typedef struct {
char db[TSDB_TABLE_ID_LEN + 1];
} SMgmtHead;
typedef struct {
int32_t contLen;
int32_t vgId;
@ -330,6 +326,7 @@ typedef struct {
} SMDDropTableMsg;
typedef struct {
int32_t contLen;
int32_t vgId;
int64_t uid;
char tableId[TSDB_TABLE_ID_LEN + 1];

View File

@ -206,19 +206,20 @@
#define TK_SUM_IRATE 188
#define TK_AVG_RATE 189
#define TK_AVG_IRATE 190
#define TK_SEMI 191
#define TK_NONE 192
#define TK_PREV 193
#define TK_LINEAR 194
#define TK_IMPORT 195
#define TK_METRIC 196
#define TK_TBNAME 197
#define TK_JOIN 198
#define TK_METRICS 199
#define TK_STABLE 200
#define TK_INSERT 201
#define TK_INTO 202
#define TK_VALUES 203
#define TK_TBID 191
#define TK_SEMI 192
#define TK_NONE 193
#define TK_PREV 194
#define TK_LINEAR 195
#define TK_IMPORT 196
#define TK_METRIC 197
#define TK_TBNAME 198
#define TK_JOIN 199
#define TK_METRICS 200
#define TK_STABLE 201
#define TK_INSERT 202
#define TK_INTO 203
#define TK_VALUES 204
#endif

View File

@ -634,7 +634,7 @@ void *readMetric(void *sarg) {
fprintf(fp, "Querying On %d records:\n", totalData);
for (int j = 0; j < n; j++) {
char condition[BUFFER_SIZE] = "\0";
char condition[BUFFER_SIZE - 30] = "\0";
char tempS[BUFFER_SIZE] = "\0";
int m = 10 < num_of_tables ? 10 : num_of_tables;

View File

@ -677,9 +677,11 @@ static int32_t mgmtRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void *
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
if (pDb->status == TSDB_DB_STATUS_READY) {
STR_WITH_SIZE_TO_VARSTR(pWrite, "ready", 5);
const char *src = "ready";
STR_WITH_SIZE_TO_VARSTR(pWrite, src, strlen(src));
} else {
STR_WITH_SIZE_TO_VARSTR(pWrite, "dropping", 8);
const char *src = "dropping";
STR_WITH_SIZE_TO_VARSTR(pWrite, src, strlen(src));
}
cols++;

View File

@ -223,7 +223,7 @@ void mgmtProcessCfgDnodeMsg(SQueuedMsg *pMsg) {
if (pCmCfgDnode->ep[0] == 0) {
strcpy(pCmCfgDnode->ep, tsLocalEp);
} else {
strcpy(pCmCfgDnode->ep, pCmCfgDnode->ep);
// TODO temporary disabled for compiling: strcpy(pCmCfgDnode->ep, pCmCfgDnode->ep);
}
if (strcmp(pMsg->pUser->user, "root") != 0) {
@ -252,7 +252,7 @@ void mgmtProcessCfgDnodeMsg(SQueuedMsg *pMsg) {
}
static void mgmtProcessCfgDnodeMsgRsp(SRpcMsg *rpcMsg) {
mPrint("cfg vnode rsp is received, result:%s", tstrerror(rpcMsg->code));
mPrint("cfg dnode rsp is received");
}
void mgmtProcessDnodeStatusMsg(SRpcMsg *rpcMsg) {

View File

@ -55,11 +55,6 @@ int32_t mgmtStartSystem() {
return -1;
}
if (grantInit() < 0) {
mError("failed to init grant");
return -1;
}
if (mgmtInitUsers() < 0) {
mError("failed to init users");
return -1;
@ -99,6 +94,11 @@ int32_t mgmtStartSystem() {
mError("failed to init balance")
}
if (grantInit() < 0) {
mError("failed to init grant");
return -1;
}
if (mgmtInitServer() < 0) {
return -1;
}
@ -132,20 +132,19 @@ int32_t mgmtInitSystem() {
void mgmtCleanUpSystem() {
mPrint("starting to clean up mgmt");
grantCleanUp();
mgmtCleanupMnodes();
balanceCleanUp();
tsMgmtIsRunning = false;
mgmtCleanUpShell();
mgmtCleanupServer();
mgmtCleanUpAccts();
grantCleanUp();
balanceCleanUp();
sdbCleanUp();
mgmtCleanupMnodes();
mgmtCleanUpTables();
mgmtCleanUpVgroups();
mgmtCleanUpDbs();
mgmtCleanupDnodes();
mgmtCleanUpUsers();
sdbCleanUp();
taosTmrCleanUp(tsMgmtTmr);
tsMgmtIsRunning = false;
mgmtCleanUpAccts();
mPrint("mgmt is cleaned up");
}

View File

@ -278,7 +278,7 @@ void sdbUpdateSync() {
sdbPrint("mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn, syncCfg.nodeInfo[i].nodePort);
}
SSyncInfo syncInfo;
SSyncInfo syncInfo = {0};
syncInfo.vgId = 1;
syncInfo.version = sdbGetVersion();
syncInfo.syncCfg = syncCfg;
@ -323,11 +323,19 @@ void sdbCleanUp() {
if (tsSdbObj.status != SDB_STATUS_SERVING) return;
tsSdbObj.status = SDB_STATUS_CLOSING;
syncStop(tsSdbObj.sync);
walClose(tsSdbObj.wal);
if (tsSdbObj.sync) {
syncStop(tsSdbObj.sync);
tsSdbObj.sync = NULL;
}
if (tsSdbObj.wal) {
walClose(tsSdbObj.wal);
tsSdbObj.wal = NULL;
}
sem_destroy(&tsSdbObj.sem);
pthread_mutex_destroy(&tsSdbObj.mutex);
memset(&tsSdbObj, 0, sizeof(tsSdbObj));
}
void sdbIncRef(void *handle, void *pRow) {

View File

@ -66,21 +66,26 @@ int32_t mgmtInitShell() {
tsMgmtTmr = taosTmrInit((tsMaxShellConns) * 3, 200, 3600000, "MND");
tsMgmtTranQhandle = taosInitScheduler(tsMaxShellConns, 1, "mnodeT");
tsQhandleCache = taosCacheInit(tsMgmtTmr, 2);
tsQhandleCache = taosCacheInit(tsMgmtTmr, 10);
return 0;
}
void mgmtCleanUpShell() {
if (tsMgmtTranQhandle) {
taosCleanUpScheduler(tsMgmtTranQhandle);
tsMgmtTranQhandle = NULL;
if (tsMgmtTmr != NULL){
taosTmrCleanUp(tsMgmtTmr);
tsMgmtTmr = NULL;
}
if (tsQhandleCache) {
if (tsQhandleCache != NULL) {
taosCacheCleanup(tsQhandleCache);
tsQhandleCache = NULL;
}
if (tsMgmtTranQhandle != NULL) {
taosCleanUpScheduler(tsMgmtTranQhandle);
tsMgmtTranQhandle = NULL;
}
}
void mgmtAddShellMsgHandle(uint8_t showType, void (*fp)(SQueuedMsg *queuedMsg)) {

View File

@ -701,10 +701,10 @@ static void mgmtProcessDropTableMsg(SQueuedMsg *pMsg) {
}
if (pMsg->pTable->type == TSDB_SUPER_TABLE) {
mTrace("table:%s, start to drop stable", pDrop->tableId);
mPrint("table:%s, start to drop stable", pDrop->tableId);
mgmtProcessDropSuperTableMsg(pMsg);
} else {
mTrace("table:%s, start to drop ctable", pDrop->tableId);
mPrint("table:%s, start to drop ctable", pDrop->tableId);
mgmtProcessDropChildTableMsg(pMsg);
}
}
@ -802,29 +802,32 @@ static void mgmtProcessDropSuperTableMsg(SQueuedMsg *pMsg) {
int32_t vgId = pStable->vgList[vg];
if (vgId == 0) break;
SVgObj *pVgroup = mgmtGetVgroup(vgId);
if (pVgroup == NULL) break;
SMDDropSTableMsg *pDrop = rpcMallocCont(sizeof(SMDDropSTableMsg));
pDrop->contLen = htonl(sizeof(SMDDropSTableMsg));
pDrop->vgId = htonl(vgId);
pDrop->uid = htobe64(pStable->uid);
mgmtExtractTableName(pStable->info.tableId, pDrop->tableId);
SVgObj *pVgroup = mgmtGetVgroup(vgId);
if (pVgroup != NULL) {
SRpcIpSet ipSet = mgmtGetIpSetFromVgroup(pVgroup);
SRpcMsg rpcMsg = {.pCont = pDrop, .contLen = sizeof(SMDDropSTableMsg), .msgType = TSDB_MSG_TYPE_MD_DROP_STABLE};
dnodeSendMsgToDnode(&ipSet, &rpcMsg);
mgmtDecVgroupRef(pVgroup);
}
mPrint("stable:%s, send drop stable msg to vgId:%d", pStable->info.tableId, vgId);
SRpcIpSet ipSet = mgmtGetIpSetFromVgroup(pVgroup);
SRpcMsg rpcMsg = {.pCont = pDrop, .contLen = sizeof(SMDDropSTableMsg), .msgType = TSDB_MSG_TYPE_MD_DROP_STABLE};
dnodeSendMsgToDnode(&ipSet, &rpcMsg);
mgmtDecVgroupRef(pVgroup);
}
} else {
SSdbOper oper = {
.type = SDB_OPER_GLOBAL,
.table = tsSuperTableSdb,
.pObj = pStable
};
int32_t code = sdbDeleteRow(&oper);
mLPrint("stable:%s, is dropped from sdb, result:%s", pStable->info.tableId, tstrerror(code));
mgmtSendSimpleResp(pMsg->thandle, code);
}
}
SSdbOper oper = {
.type = SDB_OPER_GLOBAL,
.table = tsSuperTableSdb,
.pObj = pStable
};
int32_t code = sdbDeleteRow(&oper);
mLPrint("stable:%s, is dropped from sdb, result:%s", pStable->info.tableId, tstrerror(code));
mgmtSendSimpleResp(pMsg->thandle, code);
}
static int32_t mgmtFindSuperTableTagIndex(SSuperTableObj *pStable, const char *tagName) {
@ -1303,7 +1306,7 @@ static void mgmtProcessSuperTableVgroupMsg(SQueuedMsg *pMsg) {
}
static void mgmtProcessDropSuperTableRsp(SRpcMsg *rpcMsg) {
mTrace("drop stable rsp received, handle:%p code:%s", rpcMsg->handle, tstrerror(rpcMsg->code));
mPrint("drop stable rsp received, result:%s", tstrerror(rpcMsg->code));
}
static void *mgmtBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableObj *pTable) {
@ -1540,7 +1543,7 @@ static void mgmtProcessDropChildTableMsg(SQueuedMsg *pMsg) {
SRpcIpSet ipSet = mgmtGetIpSetFromVgroup(pMsg->pVgroup);
mTrace("table:%s, send drop ctable msg", pDrop->tableId);
mPrint("table:%s, send drop ctable msg", pDrop->tableId);
SQueuedMsg *newMsg = mgmtCloneQueuedMsg(pMsg);
newMsg->ahandle = pMsg->pTable;
SRpcMsg rpcMsg = {
@ -1867,7 +1870,7 @@ static void mgmtProcessDropChildTableRsp(SRpcMsg *rpcMsg) {
queueMsg->received++;
SChildTableObj *pTable = queueMsg->ahandle;
mTrace("table:%s, drop table rsp received, thandle:%p result:%s", pTable->info.tableId, queueMsg->thandle, tstrerror(rpcMsg->code));
mPrint("table:%s, drop table rsp received, thandle:%p result:%s", pTable->info.tableId, queueMsg->thandle, tstrerror(rpcMsg->code));
if (rpcMsg->code != TSDB_CODE_SUCCESS) {
mError("table:%s, failed to drop in dnode, reason:%s", pTable->info.tableId, tstrerror(rpcMsg->code));

View File

@ -309,11 +309,14 @@ static int32_t mgmtRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, void
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
if (pUser->superAuth) {
STR_WITH_SIZE_TO_VARSTR(pWrite, "super", 5);
const char *src = "super";
STR_WITH_SIZE_TO_VARSTR(pWrite, src, strlen(src));
} else if (pUser->writeAuth) {
STR_WITH_SIZE_TO_VARSTR(pWrite, "writable", 8);
const char *src = "writable";
STR_WITH_SIZE_TO_VARSTR(pWrite, src, strlen(src));
} else {
STR_WITH_SIZE_TO_VARSTR(pWrite, "readable", 8);
const char *src = "readable";
STR_WITH_SIZE_TO_VARSTR(pWrite, src, strlen(src));
}
cols++;
@ -525,4 +528,4 @@ void mgmtDropAllUsers(SAcctObj *pAcct) {
}
mTrace("acct:%s, all users:%d is dropped from sdb", pAcct->user, numOfUsers);
}
}

View File

@ -494,11 +494,12 @@ int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pCo
cols++;
} else {
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_SIZE_TO_VARSTR(pWrite, "NULL", 4);
const char *src = "NULL";
STR_WITH_SIZE_TO_VARSTR(pWrite, src, strlen(src));
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_SIZE_TO_VARSTR(pWrite, "NULL", 4);
STR_WITH_SIZE_TO_VARSTR(pWrite, src, strlen(src));
cols++;
}
}

View File

@ -12,3 +12,5 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
ADD_LIBRARY(os ${SRC})
TARGET_LINK_LIBRARIES(os m rt)
ENDIF ()
SET_SOURCE_FILES_PROPERTIES(src/linuxSysPara.c PROPERTIES COMPILE_FLAGS -w)

View File

@ -23,13 +23,6 @@ extern "C" {
#include <stdio.h>
#include <stdlib.h>
#ifndef _ALPINE
#include <error.h>
#include <sys/sysctl.h>
#else
#include <linux/sysctl.h>
#endif
#include <argp.h>
#include <arpa/inet.h>
#include <assert.h>
@ -82,6 +75,7 @@ extern "C" {
#include <fcntl.h>
#include <sys/utsname.h>
#include <sys/resource.h>
#include <error.h>
#define taosCloseSocket(x) \
{ \

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_TSYSCTL_H
#define TDENGINE_TSYSCTL_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ALPINE
#include <error.h>
#include <sys/sysctl.h>
#else
#include <linux/sysctl.h>
#endif
#endif

View File

@ -15,6 +15,7 @@
#define _DEFAULT_SOURCE
#include "os.h"
#include "tsysctl.h"
#include "tconfig.h"
#include "tglobal.h"
#include "tulog.h"

View File

@ -94,6 +94,7 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
for (int k = 0; k < numOfRows; ++k) {
TAOS_ROW row = taos_fetch_row(result);
int32_t* length = taos_fetch_lengths(result);
// data row array begin
httpJsonItemToken(jsonBuf);
@ -129,7 +130,7 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
httpJsonStringForTransMean(jsonBuf, row[i], fields[i].bytes);
httpJsonStringForTransMean(jsonBuf, row[i], length[i]);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
if (timestampFormat == REST_TIMESTAMP_FMT_LOCAL_STRING) {

View File

@ -160,11 +160,11 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) {
", band_speed float"
", io_read float, io_write float"
", req_http int, req_select int, req_insert int"
") tags (ipaddr binary(%d))",
") tags (dnodeid int, fqdn binary(%d))",
tsMonitorDbName, TSDB_FQDN_LEN + 1);
} else if (cmd == MONITOR_CMD_CREATE_TB_DN) {
snprintf(sql, SQL_LENGTH, "create table if not exists %s.dn_%s using %s.dn tags('%s')", tsMonitorDbName,
tsMonitorConn.ep, tsMonitorDbName, tsLocalEp);
snprintf(sql, SQL_LENGTH, "create table if not exists %s.dn%d using %s.dn tags(%d, '%s')", tsMonitorDbName,
dnodeGetDnodeId(), tsMonitorDbName, dnodeGetDnodeId(), tsLocalEp);
} else if (cmd == MONITOR_CMD_CREATE_MT_ACCT) {
snprintf(sql, SQL_LENGTH,
"create table if not exists %s.acct(ts timestamp "
@ -347,7 +347,7 @@ static void monitorSaveSystemInfo() {
int64_t ts = taosGetTimestampUs();
char * sql = tsMonitorConn.sql;
int32_t pos = snprintf(sql, SQL_LENGTH, "insert into %s.dn_%s values(%" PRId64, tsMonitorDbName, tsMonitorConn.ep, ts);
int32_t pos = snprintf(sql, SQL_LENGTH, "insert into %s.dn%d values(%" PRId64, tsMonitorDbName, dnodeGetDnodeId(), ts);
pos += monitorBuildCpuSql(sql + pos);
pos += monitorBuildMemorySql(sql + pos);

View File

@ -655,5 +655,5 @@ cmd ::= KILL QUERY IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X
DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD
LIKE MATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL
COUNT SUM AVG MIN MAX FIRST LAST TOP BOTTOM STDDEV PERCENTILE APERCENTILE LEASTSQUARES HISTOGRAM DIFF
SPREAD TWA INTERP LAST_ROW RATE IRATE SUM_RATE SUM_IRATE AVG_RATE AVG_IRATE NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT
SPREAD TWA INTERP LAST_ROW RATE IRATE SUM_RATE SUM_IRATE AVG_RATE AVG_IRATE TBID NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT
METRIC TBNAME JOIN METRICS STABLE NULL INSERT INTO VALUES.

View File

@ -245,21 +245,8 @@ enum {
BLK_DATA_ALL_NEEDED = 0x3,
};
#define IS_FILE_BLOCK(x) (((x)&BLK_FILE_BLOCK) != 0)
#define SET_FILE_BLOCK_FLAG(x) \
do { \
(x) &= (~BLK_CACHE_BLOCK); \
(x) |= BLK_FILE_BLOCK; \
} while (0);
#define SET_CACHE_BLOCK_FLAG(x) ((x) = BLK_CACHE_BLOCK | BLK_BLOCK_LOADED);
#define SET_DATA_BLOCK_NOT_LOADED(x) ((x) &= (~BLK_BLOCK_LOADED));
#define SET_DATA_BLOCK_LOADED(x) ((x) |= BLK_BLOCK_LOADED);
#define IS_DATA_BLOCK_LOADED(x) (((x)&BLK_BLOCK_LOADED) != 0)
typedef struct STwaInfo {
TSKEY lastKey;
int8_t hasResult; // flag to denote has value
@ -291,7 +278,6 @@ bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *mi
bool stableQueryFunctChanged(int32_t funcId);
void resetResultInfo(SResultInfo *pResInfo);
void initResultInfo(SResultInfo *pResInfo);
void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable);

View File

@ -225,6 +225,7 @@ static SKeyword keywordTable[] = {
{"TBNAME", TK_TBNAME},
{"JOIN", TK_JOIN},
{"METRICS", TK_METRICS},
{"TBID", TK_TBID},
{"STABLE", TK_STABLE},
{"FILE", TK_FILE},
{"VNODES", TK_VNODES},

View File

@ -39,10 +39,10 @@
#define TSDB_COL_IS_TAG(f) (((f)&TSDB_COL_TAG) != 0)
#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP)
#define IS_MASTER_SCAN(runtime) (((runtime)->scanFlag & 1u) == MASTER_SCAN)
#define IS_SUPPLEMENT_SCAN(runtime) ((runtime)->scanFlag == SUPPLEMENTARY_SCAN)
#define SET_SUPPLEMENT_SCAN_FLAG(runtime) ((runtime)->scanFlag = SUPPLEMENTARY_SCAN)
#define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN)
#define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN)
#define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == SUPPLEMENTARY_SCAN)
#define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN)
#define SET_REVERSE_SCAN_FLAG(runtime) ((runtime)->scanFlag = SUPPLEMENTARY_SCAN)
#define GET_QINFO_ADDR(x) ((void *)((char *)(x)-offsetof(SQInfo, runtimeEnv)))
@ -1101,7 +1101,7 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx
}
// in the supplementary scan, only the following functions need to be executed
if (IS_SUPPLEMENT_SCAN(pRuntimeEnv) &&
if (IS_REVERSE_SCAN(pRuntimeEnv) &&
!(functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_FIRST ||
functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS)) {
return false;
@ -2450,8 +2450,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
qTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", lastkey:%" PRId64 ", order:%d",
GET_QINFO_ADDR(pRuntimeEnv), pQuery->window.skey, pQuery->window.ekey, pQuery->lastKey, pQuery->order.order);
TsdbQueryHandleT pQueryHandle =
pRuntimeEnv->scanFlag == MASTER_SCAN ? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle;
TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle;
while (tsdbNextDataBlock(pQueryHandle)) {
if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) {
return 0;
@ -2835,11 +2834,12 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
return; // failed to save data in the disk
}
// set current query completed
// if (pQInfo->numOfGroupResultPages == 0 && pQInfo->groupIndex == pQInfo->pSidSet->numOfSubSet) {
// pQInfo->tableIndex = pQInfo->pSidSet->numOfTables;
// return;
// }
// check if all results has been sent to client
int32_t numOfGroup = taosArrayGetSize(pQInfo->groupInfo.pGroupList);
if (pQInfo->numOfGroupResultPages == 0 && pQInfo->groupIndex == numOfGroup) {
pQInfo->tableIndex = pQInfo->groupInfo.numOfTables; // set query completed
return;
}
}
SQueryRuntimeEnv * pRuntimeEnv = &pQInfo->runtimeEnv;
@ -3087,7 +3087,31 @@ void setTableDataInfo(STableQueryInfo *pTableQueryInfo, int32_t tableIndex, int3
pTableQueryInfo->tableIndex = tableIndex;
}
static void doDisableFunctsForSupplementaryScan(SQuery *pQuery, SWindowResInfo *pWindowResInfo, int32_t order) {
static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo *pTableQueryInfo) {
if (pTableQueryInfo == NULL) {
return;
}
// order has change already!
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
if (!QUERY_IS_ASC_QUERY(pQuery)) {
assert(pTableQueryInfo->win.ekey >= pTableQueryInfo->lastKey + step);
} else {
assert(pTableQueryInfo->win.ekey <= pTableQueryInfo->lastKey + step);
}
pTableQueryInfo->win.ekey = pTableQueryInfo->lastKey + step;
SWAP(pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, TSKEY);
pTableQueryInfo->lastKey = pTableQueryInfo->win.skey;
SWITCH_ORDER(pTableQueryInfo->cur.order);
pTableQueryInfo->cur.vgroupIndex = -1;
}
static void disableFuncInReverseScanImpl(SQInfo* pQInfo, SWindowResInfo *pWindowResInfo, int32_t order) {
SQuery* pQuery = pQInfo->runtimeEnv.pQuery;
for (int32_t i = 0; i < pWindowResInfo->size; ++i) {
SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, i);
if (!pStatus->closed) {
@ -3108,18 +3132,32 @@ static void doDisableFunctsForSupplementaryScan(SQuery *pQuery, SWindowResInfo *
}
}
}
int32_t numOfGroups = taosArrayGetSize(pQInfo->groupInfo.pGroupList);
for(int32_t i = 0; i < numOfGroups; ++i) {
SArray *group = taosArrayGetP(pQInfo->groupInfo.pGroupList, i);
qTrace("QInfo:%p no result in group %d, continue", pQInfo, pQInfo->groupIndex - 1);
size_t t = taosArrayGetSize(group);
for (int32_t j = 0; j < t; ++j) {
SGroupItem *item = taosArrayGet(group, j);
updateTableQueryInfoForReverseScan(pQuery, item->info);
}
}
}
void disableFuncInReverseScan(SQueryRuntimeEnv *pRuntimeEnv) {
void disableFuncInReverseScan(SQInfo *pQInfo) {
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery *pQuery = pRuntimeEnv->pQuery;
int32_t order = pQuery->order.order;
// group by normal columns and interval query on normal table
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) {
doDisableFunctsForSupplementaryScan(pQuery, pWindowResInfo, order);
disableFuncInReverseScanImpl(pQInfo, pWindowResInfo, order);
} else { // for simple result of table query,
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { // todo refactor
int32_t functId = pQuery->pSelectExpr[j].base.functionId;
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[j];
@ -3134,34 +3172,10 @@ void disableFuncInReverseScan(SQueryRuntimeEnv *pRuntimeEnv) {
}
}
void disableFuncForReverseScan(SQInfo *pQInfo, int32_t order) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery * pQuery = pRuntimeEnv->pQuery;
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
pRuntimeEnv->pCtx[i].order = (pRuntimeEnv->pCtx[i].order) ^ 1u;
}
if (isIntervalQuery(pQuery)) {
// for (int32_t i = 0; i < pQInfo->groupInfo.numOfTables; ++i) {
// STableQueryInfo *pTableQueryInfo = pQInfo->pTableQueryInfo[i].pTableQInfo;
// SWindowResInfo * pWindowResInfo = &pTableQueryInfo->windowResInfo;
//
// doDisableFunctsForSupplementaryScan(pQuery, pWindowResInfo, order);
// }
} else {
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
doDisableFunctsForSupplementaryScan(pQuery, pWindowResInfo, order);
}
pQuery->order.order = (pQuery->order.order) ^ 1u;
}
void switchCtxOrder(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery;
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SWITCH_ORDER(pRuntimeEnv->pCtx[i]
.order); // = (pRuntimeEnv->pCtx[i].order == TSDB_ORDER_ASC)? TSDB_ORDER_DESC:TSDB_ORDER_ASC;
SWITCH_ORDER(pRuntimeEnv->pCtx[i] .order);
}
}
@ -3358,7 +3372,7 @@ static void setEnvBeforeReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusI
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
SWITCH_ORDER(pQuery->order.order);
SET_SUPPLEMENT_SCAN_FLAG(pRuntimeEnv);
SET_REVERSE_SCAN_FLAG(pRuntimeEnv);
STsdbQueryCond cond = {
.twindow = pQuery->window,
@ -3376,7 +3390,7 @@ static void setEnvBeforeReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusI
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
switchCtxOrder(pRuntimeEnv);
disableFuncInReverseScan(pRuntimeEnv);
disableFuncInReverseScan(pQInfo);
}
static void clearEnvAfterReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusInfo *pStatus) {
@ -3533,28 +3547,6 @@ void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols)
free(pTableQueryInfo);
}
void changeMeterQueryInfoForSuppleQuery(SQuery *pQuery, STableQueryInfo *pTableQueryInfo) {
if (pTableQueryInfo == NULL) {
return;
}
// order has change already!
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
if (!QUERY_IS_ASC_QUERY(pQuery)) {
assert(pTableQueryInfo->win.ekey >= pTableQueryInfo->lastKey + step);
} else {
assert(pTableQueryInfo->win.ekey <= pTableQueryInfo->lastKey + step);
}
pTableQueryInfo->win.ekey = pTableQueryInfo->lastKey + step;
SWAP(pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, TSKEY);
pTableQueryInfo->lastKey = pTableQueryInfo->win.skey;
pTableQueryInfo->cur.order = pTableQueryInfo->cur.order ^ 1u;
pTableQueryInfo->cur.vgroupIndex = -1;
}
void restoreIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) {
SQuery *pQuery = pRuntimeEnv->pQuery;
@ -3943,9 +3935,16 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data
data += bytes * numOfRows;
}
// all data returned, set query over
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
setQueryStatus(pQuery, QUERY_OVER);
if (pQInfo->runtimeEnv.stableQuery && isIntervalQuery(pQuery)) {
if (pQInfo->tableIndex >= pQInfo->groupInfo.numOfTables) {
setQueryStatus(pQuery, QUERY_OVER);
}
} else {
setQueryStatus(pQuery, QUERY_OVER);
}
}
}
@ -4368,7 +4367,8 @@ static int64_t queryOnDataBlocks(SQInfo *pQInfo) {
int64_t st = taosGetTimestampMs();
TsdbQueryHandleT *pQueryHandle = pRuntimeEnv->pQueryHandle;
TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle;
while (tsdbNextDataBlock(pQueryHandle)) {
if (isQueryKilled(pQInfo)) {
break;
@ -4400,7 +4400,7 @@ static int64_t queryOnDataBlocks(SQInfo *pQInfo) {
}
}
assert(pTableQueryInfo != NULL && pTableQueryInfo != NULL);
assert(pTableQueryInfo != NULL);
restoreIntervalQueryRange(pRuntimeEnv, pTableQueryInfo);
SDataStatis *pStatis = NULL;
@ -4759,28 +4759,35 @@ static void createTableQueryInfo(SQInfo *pQInfo) {
}
}
static void prepareQueryInfoForReverseScan(SQInfo *pQInfo) {
// SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
// for (int32_t i = 0; i < pQInfo->groupInfo.numOfTables; ++i) {
// STableQueryInfo *pTableQueryInfo = pQInfo->pTableQueryInfo[i].pTableQInfo;
// changeMeterQueryInfoForSuppleQuery(pQuery, pTableQueryInfo);
// }
}
static void doSaveContext(SQInfo *pQInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery * pQuery = pRuntimeEnv->pQuery;
SET_SUPPLEMENT_SCAN_FLAG(pRuntimeEnv);
disableFuncForReverseScan(pQInfo, pQuery->order.order);
if (pRuntimeEnv->pTSBuf != NULL) {
pRuntimeEnv->pTSBuf->cur.order = pRuntimeEnv->pTSBuf->cur.order ^ 1u;
}
SET_REVERSE_SCAN_FLAG(pRuntimeEnv);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
prepareQueryInfoForReverseScan(pQInfo);
SWITCH_ORDER(pQuery->order.order);
if (pRuntimeEnv->pTSBuf != NULL) {
pRuntimeEnv->pTSBuf->cur.order = pQuery->order.order;
}
STsdbQueryCond cond = {
.twindow = pQuery->window,
.order = pQuery->order.order,
.colList = pQuery->colList,
.numOfCols = pQuery->numOfCols,
};
// clean unused handle
if (pRuntimeEnv->pSecQueryHandle != NULL) {
tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle);
}
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableIdGroupInfo);
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
switchCtxOrder(pRuntimeEnv);
disableFuncInReverseScan(pQInfo);
}
static void doRestoreContext(SQInfo *pQInfo) {
@ -4835,8 +4842,6 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult);
}
pQuery->rec.rows += pQuery->rec.rows;
if (pQuery->rec.rows == 0) {
// vnodePrintQueryStatistics(pSupporter);
}
@ -6287,7 +6292,10 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
SGroupItem* item = taosArrayGet(pa, i);
char* output = pQuery->sdata[0]->data + i * rsize;
*(int64_t*) output = item->id.uid; // memory align problem
varDataSetLen(output, rsize - VARSTR_HEADER_SIZE);
output = varDataVal(output);
*(int64_t*) output = item->id.uid; // memory align problem, todo serialize
output += sizeof(item->id.uid);
*(int32_t*) output = item->id.tid;

File diff suppressed because it is too large Load Diff

View File

@ -793,7 +793,7 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv) {
pConn->chandle = pRecv->chandle;
pConn->peerIp = pRecv->ip;
if (pConn->peerPort == 0) pConn->peerPort = pRecv->port;
pConn->peerPort = pRecv->port;
if (pHead->port) pConn->peerPort = htons(pHead->port);
terrno = rpcCheckAuthentication(pConn, (char *)pHead, pRecv->msgLen);

View File

@ -301,7 +301,7 @@ void *vnodeGetVnode(int32_t vgId) {
SVnodeObj **ppVnode = (SVnodeObj **)taosGetIntHashData(tsDnodeVnodesHash, vgId);
if (ppVnode == NULL || *ppVnode == NULL) {
terrno = TSDB_CODE_INVALID_VGROUP_ID;
vPrint("vgId:%d not exist", vgId);
vPrint("vgId:%d, not exist", vgId);
return NULL;
}

View File

@ -184,7 +184,7 @@ static int32_t vnodeProcessAlterTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet
int16_t numOfColumns = htons(pTable->numOfColumns);
int16_t numOfTags = htons(pTable->numOfTags);
int32_t sid = htonl(pTable->sid);
uint64_t uid = htobe64(pTable->uid);
uint64_t uid = htobe64(pTable->uid);
SSchema *pSchema = (SSchema *) pTable->data;
int32_t totalCols = numOfColumns + numOfTags;
@ -231,13 +231,15 @@ static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pCont, SRspRet
int32_t code = 0;
vTrace("vgId:%d, stable:%s, start to drop", pVnode->vgId, pTable->tableId);
// TODO: drop stable in vvnode
//int64_t uid = htobe64(pTable->uid);
//void *pTsdb = dnodeGetVnodeTsdb(pMsg->pVnode);
//rpcRsp.code = tsdbDropTable(pTsdb, pTable->uid);
STableId stableId = {
.uid = htobe64(pTable->uid),
.tid = -1
};
code = TSDB_CODE_SUCCESS;
vTrace("vgId:%d, stable:%s, drop stable result:%x", pVnode, pTable->tableId, code);
code = tsdbDropTable(pVnode->tsdb, stableId);
vTrace("vgId:%d, stable:%s, drop stable result:%s", pVnode, pTable->tableId, tstrerror(code));
return code;
}

View File

@ -14,7 +14,7 @@ spring.datasource.druid.max-active=5
# max wait time for get connection, ms
spring.datasource.druid.max-wait=60000
spring.datasource.druid.validation-query=describe log.dn
spring.datasource.druid.validation-query=select server_status();
spring.datasource.druid.validation-query-timeout=5000
spring.datasource.druid.test-on-borrow=false
spring.datasource.druid.test-on-return=false

View File

@ -10,7 +10,7 @@ spring.datasource.druid.max-active=5
# max wait time for get connection, ms
spring.datasource.druid.max-wait=60000
spring.datasource.druid.validation-query=describe log.dn
spring.datasource.druid.validation-query=select server_status();
spring.datasource.druid.validation-query-timeout=5000
spring.datasource.druid.test-on-borrow=false
spring.datasource.druid.test-on-return=false
@ -23,4 +23,4 @@ spring.datasource.druid.max-evictable-idle-time-millis=900000
#mybatis
mybatis.mapper-locations=classpath:mapper/*.xml
logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
logging.level.com.taosdata.jdbc.springbootdemo.dao=debug

View File

View File

@ -0,0 +1,41 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
try:
tdSql.execute("create account a&cc PASS 'pass123'")
except Exception as e:
print("create account a&cc PASS 'pass123'")
return
tdLog.exit("drop built-in user is error.")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,52 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
print("==========step1")
print("drop built-in account")
try:
tdSql.execute("drop account root")
except Exception as e:
if len(e.args) > 0 and 'no rights' != e.args[0]:
tdLog.exit(e)
print("==========step2")
print("drop built-in user")
try:
tdSql.execute("drop user root")
except Exception as e:
if len(e.args) > 0 and 'no rights' != e.args[0]:
tdLog.exit(e)
return
tdLog.exit("drop built-in user is error.")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -9,6 +9,7 @@ python3 ./test.py $1 -f insert/smallint.py
python3 ./test.py $1 -f insert/tinyint.py
python3 ./test.py $1 -f insert/date.py
python3 ./test.py $1 -f insert/binary.py
python3 ./test.py $1 -f insert/nchar.py
python3 ./test.py $1 -f table/column_name.py
python3 ./test.py $1 -f table/column_num.py
@ -80,3 +81,7 @@ python3 ./test.py $1 -f import_merge/importToCommit.py
python3 ./test.py $1 -f import_merge/importTORestart.py
python3 ./test.py $1 -f import_merge/importTPORestart.py
python3 ./test.py $1 -f import_merge/importTRestart.py
# user
python3 ./test.py $1 -f user/user_create.py
python3 ./test.py $1 -f user/pass_len.py

View File

@ -14,7 +14,6 @@ class TDTestCase:
def run(self):
tdSql.prepare()
tdLog.info('=============== step1')
tdLog.info('create table tb (ts timestamp, speed binary(5))')
tdSql.execute('create table tb (ts timestamp, speed binary(5))')

View File

@ -0,0 +1,44 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
tdSql.prepare()
tdSql.execute('create table tb (ts timestamp, col nchar(10))')
tdSql.execute("insert into tb values (now, 'taosdata')")
tdSql.query("select * from tb")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 'taosdata')
tdSql.execute("insert into tb values (now, '涛思数据')")
tdSql.query("select * from tb")
tdSql.checkRows(2)
tdSql.checkData(1, 1, '涛思数据')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -1,66 +1,116 @@
#!/bin/bash
python3 ./test.py $1 -f insert/basic.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/int.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/float.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/bigint.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/bool.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/double.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/smallint.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/tinyint.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/binary.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/date.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/nchar.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f table/column_name.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f table/column_num.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f table/db_table.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importDataLastTO.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -f import_merge/importDataLastT.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -f import_merge/importDataTO.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -f import_merge/importDataT.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -f import_merge/importHeadOverlap.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -f import_merge/importHeadPartOverlap.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -f import_merge/importHORestart.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -f import_merge/importHPORestart.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -f import_merge/importHRestart.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -f import_merge/importLastSub.py
python3 ./test.py -s $1
sleep 1
python3 ./test.py $1 -f import_merge/importDataLastTO.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importDataLastT.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importDataTO.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importDataT.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importHeadOverlap.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importHeadPartOverlap.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importHORestart.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importHPORestart.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importHRestart.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importLastSub.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock1HO.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock1HPO.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock1H.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock1S.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock1Sub.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock1TO.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock1TPO.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock1T.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock2HO.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock2HPO.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock2H.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock2S.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock2Sub.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock2TO.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock2TPO.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlock2T.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importBlockbetween.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importCacheFileSub.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importCacheFileTO.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importCacheFileT.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importDataLastSub.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importHead.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importLastTO.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importLastT.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importSpan.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importSRestart.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importSubRestart.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importTailOverlap.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importTail.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importTORestart.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importTPORestart.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importTRestart.py
python3 ./test.py $1 -s && sleep 1

View File

@ -92,23 +92,23 @@ if __name__ == "__main__":
tdDnodes.start(1)
if masterIp == "":
host='127.0.0.1'
host = '127.0.0.1'
else:
host=masterIp
host = masterIp
tdLog.notice("Procedures for tdengine deployed in %s" % (host))
tdLog.info("Procedures for tdengine deployed in %s" % (host))
if testCluster:
tdLog.notice("Procedures for testing cluster")
tdLog.info("Procedures for testing cluster")
if fileName == "all":
tdCases.runAllCluster()
else:
tdCases.runOneCluster(fileName)
else:
tdLog.notice("Procedures for testing self-deployment")
tdLog.info("Procedures for testing self-deployment")
conn = taos.connect(
host,
config=tdDnodes.getSimCfgPath())
host,
config=tdDnodes.getSimCfgPath())
if fileName == "all":
tdCases.runAllLinux(conn)
else:

View File

View File

@ -0,0 +1,63 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
print("==============step1")
try:
tdSql.execute("create user abc pass '123456'")
except Exception as e:
tdLog.exit(e)
print("create user abc pass '123456'")
print("==============step2")
try:
tdSql.execute("alter user abc pass 'taosdata'")
except Exception as e:
tdLog.exit(e)
print("alter user abc pass 'taosdata'")
print("==============step3")
try:
tdSql.execute("alter user abc pass ''")
except Exception as e:
print("alter user abc pass ''")
else:
tdLog.exit("Error: alert user abc pass''")
print("==============step4")
try:
tdSql.execute("alter user abc pass null")
except Exception as e:
print("alter user abc pass null")
else:
tdLog.exit("Error: alter user abc pass null")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,54 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
print("==============step1")
try:
tdSql.execute("create user &abc PASS 'pass123'")
except Exception as e:
print(e)
print("==============step2")
try:
tdSql.execute("create user a&bc PASS 'pass123'")
except Exception as e:
print(e)
print("==============step3")
try:
tdSql.execute("create user '涛思' PASS 'pass123'")
except Exception as e:
print(e)
return
tdLog.exit("create user with special character.")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -69,7 +69,7 @@ class TDSimClient:
self.cfg("numOfLogLines", "100000000")
self.cfg("numOfThreadsPerCore", "2.0")
self.cfg("locale", "en_US.UTF-8")
self.cfg("charset", "GBK")
self.cfg("charset", "UTF-8")
self.cfg("asyncLog", "0")
self.cfg("anyIp", "0")
self.cfg("sdbDebugFlag", "135")
@ -205,24 +205,47 @@ class TDDnode:
time.sleep(2)
def stop(self):
if self.valgrind == 0:
toBeKilled = "taosd"
else:
toBeKilled = "valgrind.bin"
if self.running != 0:
cmd = "ps -ef|grep -w taosd | grep '%s' | grep -v grep | awk '{print $2}' && pkill -sigint taosd" % (
self.cfgDir)
if os.system(cmd) != 0:
tdLog.exit(cmd)
tdLog.debug("dnode:%d is stopped by kill -SIGINT" % (self.index))
killCmd = "ps -ef|grep -w %s| grep '%s' | grep -v grep | awk '{print $2}' | xargs kill -INT" % (
toBeKilled, self.cfgDir)
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(psCmd, shell=True)
while(processID):
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
tdLog.debug(
"wait 2 seconds for the dnode:%d to stop." %
(self.index))
time.sleep(2)
def forcestop(self):
if self.valgrind == 0:
toBeKilled = "taosd"
else:
toBeKilled = "valgrind.bin"
if self.running != 0:
cmd = "ps -ef|grep -w taosd | grep '%s' | grep -v grep | awk '{print $2}' && pkill -sigkill taosd" % (
self.cfgDir)
if os.system(cmd) != 0:
tdLog.exit(cmd)
tdLog.debug("dnode:%d is stopped by kill -9" % (self.index))
killCmd = "ps -ef|grep -w %s| grep '%s' | grep -v grep | awk '{print $2}' | xargs kill -KILL" % (
toBeKilled, self.cfgDir)
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(psCmd, shell=True)
while(processID):
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
tdLog.debug(
"wait 2 seconds for the dnode:%d to stop." %
(self.index))
@ -268,8 +291,21 @@ class TDDnodes:
self.dnodes.append(TDDnode(10))
def init(self, path):
cmd = "ps -ef|grep -w taosd | grep 'taosd' | grep -v grep | awk '{print $2}' && pkill -sigkill taosd"
os.system(cmd)
killCmd = "ps -ef|grep -w taosd | grep -v grep | awk '{print $2}' | xargs kill -KILL"
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True)
while(processID):
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
killCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs kill -KILL"
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True)
while(processID):
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
binPath = os.path.dirname(os.path.realpath(__file__))
binPath = binPath + "/../../../debug/"
@ -361,8 +397,22 @@ class TDDnodes:
os.system(cmd)
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
cmd = "ps -ef | grep -w taosd | grep 'dnode' | grep -v grep | awk '{print $2}' && pkill -sigkill taosd"
os.system(cmd)
killCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}' | xargs kill -KILL"
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True)
while(processID):
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
killCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs kill -KILL"
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True)
while(processID):
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)

View File

@ -1,49 +1,26 @@
#################################
run general/db/basic1.sim
run general/db/basic2.sim
run general/db/basic3.sim
run general/cache/new_metrics.sim
run general/compress/compress.sim
run general/compute/avg.sim
run general/compute/bottom.sim
run general/compute/count.sim
run general/db/len.sim
run general/db/basic4.sim
run general/db/basic5.sim
run general/table/basic1.sim
run general/table/basic2.sim
run general/http/restful_insert.sim
run general/import/basic.sim
run general/import/commit.sim
run general/insert/basic.sim
run general/insert/query_file_memory.sim
run general/parser/binary_escapeCharacter.sim
run general/parser/columnValue_bigint.sim
run general/parser/select_from_cache_disk.sim
run general/table/autocreate.sim
run general/table/basic3.sim
run general/table/column_num.sim
run general/table/column_name.sim
run general/table/bigint.sim
run general/table/bool.sim
run general/table/double.sim
run general/table/float.sim
run general/table/int.sim
run general/table/smallint.sim
run general/table/tinyint.sim
run general/table/db.table.sim
run general/table/vgroup.sim
run general/user/basic1.sim
run general/user/pass_alter.sim
run general/user/pass_len.sim
run general/user/user_create.sim
run general/user/user_len.sim
# run general/compute/count.sim
# run general/compute/avg.sim
# run general/compute/sum.sim
# run general/compute/min.sim
# run general/compute/max.sim
# run general/compute/first.sim
# run general/compute/last.sim
run general/compute/stddev.sim
# run general/compute/leastsquare.sim
run general/compute/top.sim
run general/compute/bottom.sim
run general/compute/percentile.sim
run general/compute/diff.sim
# run general/compute/interval.sim
run general/compute/null.sim
# run general/compute/diff2.sim
run general/parse/testSuite.sim
run general/field/testSuite.sim
run general/vector/single.sim
##################################

View File

@ -35,7 +35,7 @@ print =============== step1 - one query, 1 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"A","datapoints":[["-",1577980800000],["-",1578067200000]]}]@ then
if $system_content != @[{"refId":"A","target":"A","datapoints":[[null,1577980800000],[null,1578067200000]]}]@ then
return -1
endi
@ -51,32 +51,32 @@ print =============== step3 - one query, 3 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step3.1-> $system_content
if $system_content != @[{"refId":"A","target":"13","datapoints":[[2,1577980800000]]},{"refId":"A","target":"14","datapoints":[[2,1578067200000]]}]@ then
if $system_content != @[{"refId":"A","target":"{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,}","datapoints":[[2,1578067200000]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb "} ]' 127.0.0.1:6020/grafana/query
print step3.2-> $system_content
if $system_content != @[{"refId":"A","target":"11","datapoints":[[1,1577808000000]]},{"refId":"A","target":"12","datapoints":[[1,1577894400000]]},{"refId":"A","target":"13","datapoints":[[2,1577980800000]]},{"refId":"A","target":"14","datapoints":[[2,1578067200000]]}]@ then
if $system_content != @[{"refId":"A","target":"{val1:11,}","datapoints":[[1,1577808000000]]},{"refId":"A","target":"{val1:12,}","datapoints":[[1,1577894400000]]},{"refId":"A","target":"{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,}","datapoints":[[2,1578067200000]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val1,val from db.tb "} ]' 127.0.0.1:6020/grafana/query
print step3.3-> $system_content
if $system_content != @[{"refId":"A","target":"1","datapoints":[[11,1577808000000],[12,1577894400000]]},{"refId":"A","target":"2","datapoints":[[13,1577980800000],[14,1578067200000]]}]@ then
if $system_content != @[{"refId":"A","target":"{val:1,}","datapoints":[[11,1577808000000],[12,1577894400000]]},{"refId":"A","target":"{val:2,}","datapoints":[[13,1577980800000],[14,1578067200000]]}]@ then
return -1
endi
print =============== step4 - one query, 4 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val2,val1,val from db.tb "} ]' 127.0.0.1:6020/grafana/query
print step4.1-> $system_content
if $system_content != @[{"refId":"A","target":"1","datapoints":[[21,1577808000000],[22,1577894400000]]},{"refId":"A","target":"2","datapoints":[[23,1577980800000],[24,1578067200000]]}]@ then
if $system_content != @[{"refId":"A","target":"{val1:11,, val:1,}","datapoints":[[21,1577808000000]]},{"refId":"A","target":"{val1:12,, val:1,}","datapoints":[[22,1577894400000]]},{"refId":"A","target":"{val1:13,, val:2,}","datapoints":[[23,1577980800000]]},{"refId":"A","target":"{val1:14,, val:2,}","datapoints":[[24,1578067200000]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1,val2 from db.tb "} ]' 127.0.0.1:6020/grafana/query
print step4.2-> $system_content
if $system_content != @[{"refId":"A","target":"21","datapoints":[[1,1577808000000]]},{"refId":"A","target":"22","datapoints":[[1,1577894400000]]},{"refId":"A","target":"23","datapoints":[[2,1577980800000]]},{"refId":"A","target":"24","datapoints":[[2,1578067200000]]}]@ then
if $system_content != @[{"refId":"A","target":"{val1:11,, val2:21,}","datapoints":[[1,1577808000000]]},{"refId":"A","target":"{val1:12,, val2:22,}","datapoints":[[1,1577894400000]]},{"refId":"A","target":"{val1:13,, val2:23,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,, val2:24,}","datapoints":[[2,1578067200000]]}]@ then
return -1
endi
@ -90,20 +90,20 @@ endi
print =============== step6 - one query, 2 column, no timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"23","datapoints":[[13,"-"]]},{"refId":"A","target":"24","datapoints":[[14,"-"]]}]@ then
if $system_content != @[{"refId":"A","target":"{val2:23,}","datapoints":[[13,"-"]]},{"refId":"A","target":"{val2:24,}","datapoints":[[14,"-"]]}]@ then
return -1
endi
print =============== step7 - one query, 3 column, no timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"2","datapoints":[[13,"-"],[14,"-"]]}]@ then
if $system_content != @[{"refId":"A","target":"{val2:23,, val:2,}","datapoints":[[13,"-"]]},{"refId":"A","target":"{val2:24,, val:2,}","datapoints":[[14,"-"]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb"} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"1","datapoints":[[11,"-"],[12,"-"]]},{"refId":"A","target":"2","datapoints":[[13,"-"],[14,"-"]]}]@ then
if $system_content != @[{"refId":"A","target":"{val2:21,, val:1,}","datapoints":[[11,"-"]]},{"refId":"A","target":"{val2:22,, val:1,}","datapoints":[[12,"-"]]},{"refId":"A","target":"{val2:23,, val:2,}","datapoints":[[13,"-"]]},{"refId":"A","target":"{val2:24,, val:2,}","datapoints":[[14,"-"]]}]@ then
return -1
endi
@ -132,7 +132,7 @@ print =============== step11 - two query, 1 column, with timestamp, 1 column, w
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[["-",1577980800000],["-",1578067200000]]},{"refId":"A","target":"A","datapoints":[["-",1577980800000],["-",1578067200000]]}]@ then
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[null,1577980800000],[null,1578067200000]]},{"refId":"A","target":"A","datapoints":[[null,1577980800000],[null,1578067200000]]}]@ then
return -1
endi
@ -140,14 +140,14 @@ print =============== step12 - two query, 1 column, with timestamp, 2 column, w
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[["-",1577980800000],["-",1578067200000]]},{"refId":"A","target":"A","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[null,1577980800000],[null,1578067200000]]},{"refId":"A","target":"A","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then
return -1
endi
print =============== step13 - two query, 1 column, with timestamp, 3 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[["-",1577980800000],["-",1578067200000]]},{"refId":"A","target":"13","datapoints":[[2,1577980800000]]},{"refId":"A","target":"14","datapoints":[[2,1578067200000]]}]@ then
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[null,1577980800000],[null,1578067200000]]},{"refId":"A","target":"{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,}","datapoints":[[2,1578067200000]]}]@ then
return -1
endi
@ -161,14 +161,14 @@ endi
print =============== step15 - two query, 2 column, with timestamp, 3 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val2, val1 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB213","datapoints":[[223,1577980800000]]},{"refId":"B","target":"BB214","datapoints":[[224,1578067200000]]},{"refId":"A","target":"AA","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then
if $system_content != @[{"refId":"B","target":"BB{val1:213,}","datapoints":[[223,1577980800000]]},{"refId":"B","target":"BB{val1:214,}","datapoints":[[224,1578067200000]]},{"refId":"A","target":"AA","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then
return -1
endi
print =============== step16 - two query, 3 column, with timestamp, 4 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val, val1, val2, val1 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB213","datapoints":[[22,1577980800000]]},{"refId":"B","target":"BB214","datapoints":[[22,1578067200000]]},{"refId":"A","target":"AA13","datapoints":[[2,1577980800000]]},{"refId":"A","target":"AA14","datapoints":[[2,1578067200000]]}]@ then
if $system_content != @[{"refId":"B","target":"BB{val1:213,, val2:223,, val1:213,}","datapoints":[[22,1577980800000]]},{"refId":"B","target":"BB{val1:214,, val2:224,, val1:214,}","datapoints":[[22,1578067200000]]},{"refId":"A","target":"AA{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"AA{val1:14,}","datapoints":[[2,1578067200000]]}]@ then
return -1
endi
@ -196,7 +196,7 @@ endi
print =============== step20 - two query, 1 column, no timestamp, 1 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,"-"],[22,"-"]]},{"refId":"A","target":"AA","datapoints":[["-",1577980800000],["-",1578067200000]]}]@ then
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,"-"],[22,"-"]]},{"refId":"A","target":"AA","datapoints":[[null,1577980800000],[null,1578067200000]]}]@ then
return -1
endi
@ -210,28 +210,28 @@ endi
print =============== step22 - two query, 1 column, no timestamp, 3 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val1, val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,"-"],[22,"-"]]},{"refId":"A","target":"AA223","datapoints":[[213,1577980800000]]},{"refId":"A","target":"AA224","datapoints":[[214,1578067200000]]}]@ then
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,"-"],[22,"-"]]},{"refId":"A","target":"AA{val2:223,}","datapoints":[[213,1577980800000]]},{"refId":"A","target":"AA{val2:224,}","datapoints":[[214,1578067200000]]}]@ then
return -1
endi
print =============== step23 - two query, 2 column, no timestamp, 1 column, no timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val1 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB223","datapoints":[[213,"-"]]},{"refId":"B","target":"BB224","datapoints":[[214,"-"]]},{"refId":"A","target":"AA","datapoints":[[213,"-"],[214,"-"]]}]@ then
if $system_content != @[{"refId":"B","target":"BB{val2:223,}","datapoints":[[213,"-"]]},{"refId":"B","target":"BB{val2:224,}","datapoints":[[214,"-"]]},{"refId":"A","target":"AA","datapoints":[[213,"-"],[214,"-"]]}]@ then
return -1
endi
print =============== step24 - two query, 2 column, no timestamp, 2 column, no timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val,val1 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB223","datapoints":[[213,"-"]]},{"refId":"B","target":"BB224","datapoints":[[214,"-"]]},{"refId":"A","target":"AA213","datapoints":[[22,"-"]]},{"refId":"A","target":"AA214","datapoints":[[22,"-"]]}]@ then
if $system_content != @[{"refId":"B","target":"BB{val2:223,}","datapoints":[[213,"-"]]},{"refId":"B","target":"BB{val2:224,}","datapoints":[[214,"-"]]},{"refId":"A","target":"AA{val1:213,}","datapoints":[[22,"-"]]},{"refId":"A","target":"AA{val1:214,}","datapoints":[[22,"-"]]}]@ then
return -1
endi
print =============== step25 - two query, 2 column, no timestamp, 3 column, no timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val,val1,val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6020/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB223","datapoints":[[213,"-"]]},{"refId":"B","target":"BB224","datapoints":[[214,"-"]]},{"refId":"A","target":"AA223","datapoints":[[22,"-"]]},{"refId":"A","target":"AA224","datapoints":[[22,"-"]]}]@ then
if $system_content != @[{"refId":"B","target":"BB{val2:223,}","datapoints":[[213,"-"]]},{"refId":"B","target":"BB{val2:224,}","datapoints":[[214,"-"]]},{"refId":"A","target":"AA{val1:213,, val2:223,}","datapoints":[[22,"-"]]},{"refId":"A","target":"AA{val1:214,, val2:224,}","datapoints":[[22,"-"]]}]@ then
return -1
endi

View File

@ -16,14 +16,14 @@ print =============== step1 - parse
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/
print $system_content
if $system_content != @{"status":"error","code":1022,"desc":"database name can not be NULL"}@ then
if $system_content != @{"status":"error","code":1022,"desc":"database name can not be null"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/
print $system_content
if $system_content != @{"status":"error","code":1022,"desc":"database name can not be NULL"}@ then
if $system_content != @{"status":"error","code":1022,"desc":"database name can not be null"}@ then
return -1
endi
@ -170,7 +170,7 @@ endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"":"windows"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
if $system_content != @{"status":"error","code":1038,"desc":"tag name is NULL"}@ then
if $system_content != @{"status":"error","code":1038,"desc":"tag name is null"}@ then
return -1
endi
@ -191,14 +191,14 @@ endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":""},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
if $system_content != @{"status":"error","code":1041,"desc":"tag value is NULL"}@ then
if $system_content != @{"status":"error","code":1041,"desc":"tag value is null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"1022":"111"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
if $system_content != @{"status":"error","code":1042,"desc":"table is NULL"}@ then
if $system_content != @{"status":"error","code":1042,"desc":"table is null"}@ then
return -1
endi
@ -219,14 +219,14 @@ endi
system_content curl -u root:taosdata -d '{"fields":{"":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
if $system_content != @{"status":"error","code":1048,"desc":"field name is NULL"}@ then
if $system_content != @{"status":"error","code":1048,"desc":"field name is null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":"","Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
if $system_content != @{"status":"error","code":1051,"desc":"field value is NULL"}@ then
if $system_content != @{"status":"error","code":1051,"desc":"field value is null"}@ then
return -1
endi

View File

@ -46,8 +46,9 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 2000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
sleep 3000
sql reset query cache
sleep 1000
sql use $db
sql drop table tb5
@ -71,13 +72,15 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 2000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
sleep 3000
sql reset query cache
sleep 1000
sql use $db
sql create table tb5 using $stb tags(5)
sql select * from tb5
print $rows should be 0
if $rows != 0 then
return -1
endi

View File

@ -1,4 +0,0 @@
run general/metrics/disk.sim
run general/metrics/metrics.sim
run general/metrics/values.sim
run general/metrics/vnode3.sim

View File

@ -0,0 +1,4 @@
run general/stable/disk.sim
run general/stable/metrics.sim
run general/stable/values.sim
run general/stable/vnode3.sim

View File

@ -70,4 +70,11 @@ if $data21 != 3 then
return -1
endi
print =============== drop stable
sql drop table db.st
sql show db.stables
if $rows != 0 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -19,28 +19,47 @@ endi
print ============= step2
sql close
sleep 2500
print user read login
sql connect read
sleep 2000
sql alter user read pass 'taosdata'
sql alter user write pass 'taosdata1' -x step2
return -1
step2:
sql_error create user read pass 'taosdata1'
sql_error create user write pass 'taosdata1'
sql show users
if $rows != 5 then
return -1
endi
print ============= step3
sql close
sleep 2500
print user write login
sql connect write
sleep 2000
sql_error create user read pass 'taosdata1'
sql_error create user write pass 'taosdata1'
sql alter user write pass 'taosdata'
sql alter user read pass 'taosdata' -x step3
return -1
step3:
sql show users
if $rows != 5 then
return -1
endi
print ============= step4
sql close
sleep 2500
print root write login
sql connect
sleep 2000
sql create user oroot pass 'taosdata'
sql show users
if $rows != 6 then
return -1

View File

@ -34,10 +34,10 @@ print =============== step3
sql drop user read -x step31
return -1
step31:
sql drop user sys -x step32
sql drop user _root -x step32
return -1
step32:
sql drop user stream -x step33
sql drop user monitor -x step33
return -1
step33:
@ -71,11 +71,11 @@ sql alter user read privilege 1 -x step43
return -1
step43:
sql drop user sys -x step41
sql drop user _root -x step41
return -1
step41:
sql drop user stream -x step42
sql drop user monitor -x step42
return -1
step42:

View File

@ -27,21 +27,21 @@ cd ../../../debug; make
./test.sh -f general/compress/compress2.sim
./test.sh -f general/compress/uncompress.sim
#./test.sh -f general/compute/avg.sim
./test.sh -f general/compute/avg.sim
./test.sh -f general/compute/bottom.sim
#./test.sh -f general/compute/count.sim
./test.sh -f general/compute/count.sim
./test.sh -f general/compute/diff.sim
#./test.sh -f general/compute/diff2.sim
#./test.sh -f general/compute/first.sim
./test.sh -f general/compute/first.sim
#./test.sh -f general/compute/interval.sim
#./test.sh -f general/compute/last.sim
#./test.sh -f general/compute/leastsquare.sim
#./test.sh -f general/compute/max.sim
#./test.sh -f general/compute/min.sim
./test.sh -f general/compute/max.sim
./test.sh -f general/compute/min.sim
./test.sh -f general/compute/null.sim
./test.sh -f general/compute/percentile.sim
./test.sh -f general/compute/stddev.sim
#./test.sh -f general/compute/sum.sim
./test.sh -f general/compute/sum.sim
./test.sh -f general/compute/top.sim
./test.sh -f general/db/basic.sim
@ -65,20 +65,20 @@ cd ../../../debug; make
#./test.sh -f general/field/4.sim
#./test.sh -f general/field/5.sim
#./test.sh -f general/field/6.sim
./test.sh -f general/field/bigint.sim
##./test.sh -f general/field/bigint.sim
#./test.sh -f general/field/binary.sim
./test.sh -f general/field/bool.sim
##./test.sh -f general/field/bool.sim
#./test.sh -f general/field/single.sim
./test.sh -f general/field/smallint.sim
./test.sh -f general/field/tinyint.sim
##./test.sh -f general/field/smallint.sim
##./test.sh -f general/field/tinyint.sim
./test.sh -f general/http/restful.sim
##./test.sh -f general/http/restful.sim
./test.sh -f general/http/restful_insert.sim
./test.sh -f general/http/restful_limit.sim
./test.sh -f general/http/restful_full.sim
##./test.sh -f general/http/restful_full.sim
./test.sh -f general/http/prepare.sim
./test.sh -f general/http/telegraf.sim
#./test.sh -f general/http/grafana_bug.sim
./test.sh -f general/http/grafana_bug.sim
#./test.sh -f general/http/grafana.sim
./test.sh -f general/import/basic.sim
@ -96,11 +96,6 @@ cd ../../../debug; make
./test.sh -f general/insert/query_multi_file.sim
./test.sh -f general/insert/tcp.sim
#./test.sh -f general/metrics/disk.sim
#./test.sh -f general/metrics/metrics.sim
#./test.sh -f general/metrics/values.sim
#./test.sh -f general/metrics/vnode3.sim
#parser
# ./test.sh -f general/parser/alter.sim
@ -109,7 +104,7 @@ cd ../../../debug; make
# ./test.sh -f general/parser/auto_create_tb.sim
# ./test.sh -f general/parser/auto_create_tb_drop_tb.sim
./test.sh -f general/parser/binary_escapeCharacter.sim
./test.sh -f general/parser/bug.sim
#./test.sh -f general/parser/bug.sim
./test.sh -f general/parser/col_arithmetic_operation.sim
./test.sh -f general/parser/columnValue_bigint.sim
./test.sh -f general/parser/columnValue_bool.sim
@ -162,6 +157,11 @@ cd ../../../debug; make
# ./test.sh -f general/parser/select_with_tags.sim
# ./test.sh -f general/parser/groupby.sim
#./test.sh -f general/stable/disk.sim
#./test.sh -f general/stable/metrics.sim
#./test.sh -f general/stable/values.sim
#./test.sh -f general/stable/vnode3.sim
#stream
./test.sh -f general/table/autocreate.sim
@ -255,9 +255,9 @@ cd ../../../debug; make
#./test.sh -u -f unique/big/maxvnodes.sim
#./test.sh -u -f unique/big/tcp.sim
./test.sh -u -f unique/cluster/balance1.sim
./test.sh -u -f unique/cluster/balance2.sim
./test.sh -u -f unique/cluster/balance3.sim
##./test.sh -u -f unique/cluster/balance1.sim
##./test.sh -u -f unique/cluster/balance2.sim
##./test.sh -u -f unique/cluster/balance3.sim
#./test.sh -u -f unique/cluster/cache.sim
./test.sh -u -f unique/column/replica3.sim
@ -265,26 +265,26 @@ cd ../../../debug; make
#./test.sh -u -f unique/db/commit.sim
#./test.sh -u -f unique/db/delete.sim
#./test.sh -u -f unique/db/delete_part.sim
./test.sh -u -f unique/db/replica_add12.sim
./test.sh -u -f unique/db/replica_add13.sim
./test.sh -u -f unique/db/replica_add23.sim
./test.sh -u -f unique/db/replica_reduce21.sim
./test.sh -u -f unique/db/replica_reduce32.sim
./test.sh -u -f unique/db/replica_reduce31.sim
./test.sh -u -f unique/db/replica_part.sim
##./test.sh -u -f unique/db/replica_add12.sim
##./test.sh -u -f unique/db/replica_add13.sim
##./test.sh -u -f unique/db/replica_add23.sim
##./test.sh -u -f unique/db/replica_reduce21.sim
##./test.sh -u -f unique/db/replica_reduce32.sim
##./test.sh -u -f unique/db/replica_reduce31.sim
##./test.sh -u -f unique/db/replica_part.sim
./test.sh -u -f unique/dnode/balance1.sim
./test.sh -u -f unique/dnode/balance2.sim
./test.sh -u -f unique/dnode/balance3.sim
./test.sh -u -f unique/dnode/balancex.sim
./test.sh -u -f unique/dnode/offline1.sim
./test.sh -u -f unique/dnode/offline2.sim
##./test.sh -u -f unique/dnode/balance1.sim
##./test.sh -u -f unique/dnode/balance2.sim
##./test.sh -u -f unique/dnode/balance3.sim
##./test.sh -u -f unique/dnode/balancex.sim
##./test.sh -u -f unique/dnode/offline1.sim
##./test.sh -u -f unique/dnode/offline2.sim
#./test.sh -u -f unique/dnode/remove1.sim
#./test.sh -u -f unique/dnode/remove2.sim
#./test.sh -u -f unique/dnode/vnode_clean.sim
./test.sh -u -f unique/http/admin.sim
./test.sh -u -f unique/http/opentsdb.sim
##./test.sh -u -f unique/http/admin.sim
##./test.sh -u -f unique/http/opentsdb.sim
#./test.sh -u -f unique/import/replica2.sim
#./test.sh -u -f unique/import/replica3.sim
@ -298,28 +298,28 @@ cd ../../../debug; make
#./test.sh -u -f unique/metrics/replica3_dnode6.sim
#./test.sh -u -f unique/metrics/replica3_vnode3.sim
./test.sh -u -f unique/mnode/mgmt22.sim
./test.sh -u -f unique/mnode/mgmt23.sim
./test.sh -u -f unique/mnode/mgmt24.sim
./test.sh -u -f unique/mnode/mgmt25.sim
./test.sh -u -f unique/mnode/mgmt26.sim
./test.sh -u -f unique/mnode/mgmt33.sim
./test.sh -u -f unique/mnode/mgmt34.sim
##./test.sh -u -f unique/mnode/mgmt22.sim
##./test.sh -u -f unique/mnode/mgmt23.sim
##./test.sh -u -f unique/mnode/mgmt24.sim
##./test.sh -u -f unique/mnode/mgmt25.sim
##./test.sh -u -f unique/mnode/mgmt26.sim
##./test.sh -u -f unique/mnode/mgmt33.sim
##./test.sh -u -f unique/mnode/mgmt34.sim
#./test.sh -u -f unique/mnode/mgmtr2.sim
#./test.sh -u -f unique/mnode/secondIp.sim
#stream
./test.sh -u -f unique/table/delete_part.sim
##./test.sh -u -f unique/table/delete_part.sim
./test.sh -u -f unique/vnode/replica2_basic2.sim
./test.sh -u -f unique/vnode/replica3_basic.sim
##./test.sh -u -f unique/vnode/replica2_basic2.sim
##./test.sh -u -f unique/vnode/replica3_basic.sim
#./test.sh -u -f unique/vnode/commit.sim
#./test.sh -u -f unique/vnode/many.sim
#./test.sh -u -f unique/vnode/replica2_basic.sim
./test.sh -u -f unique/vnode/replica2_basic2.sim
##./test.sh -u -f unique/vnode/replica2_basic2.sim
#./test.sh -u -f unique/vnode/replica2_repeat.sim
./test.sh -u -f unique/vnode/replica3_basic.sim
##./test.sh -u -f unique/vnode/replica3_basic.sim
#./test.sh -u -f unique/vnode/replica3_repeat.sim
#./test.sh -u -f unique/vnode/replica3_vgroup.sim

View File

@ -108,6 +108,7 @@ echo "udebugFlag 131" >> $TAOS_CFG
echo "jnidebugFlag 131" >> $TAOS_CFG
echo "sdebugFlag 135" >> $TAOS_CFG
echo "monitor 0" >> $TAOS_CFG
echo "monitorInterval 1" >> $TAOS_CFG
echo "http 0" >> $TAOS_CFG
echo "numOfThreadsPerCore 2.0" >> $TAOS_CFG
echo "defaultPass taosdata" >> $TAOS_CFG

View File

@ -2,14 +2,14 @@
PID=`ps -ef|grep /usr/bin/taosd | grep -v grep | awk '{print $2}'`
if [ -n "$PID" ]; then
echo sudo systemctl stop taosd
sudo systemctl stop taosd
echo systemctl stop taosd
systemctl stop taosd
fi
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]; do
echo sudo kill -9 $PID
sudo pkill -9 taosd
sudo fuser -k -n tcp 6030
echo kill -9 $PID
pkill -9 taosd
fuser -k -n tcp 6030
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done

View File

@ -35,10 +35,10 @@ print =============== step3
sql drop user read -x step31
return -1
step31:
sql drop user sys -x step32
sql drop user _root -x step32
return -1
step32:
sql drop user stream -x step33
sql drop user monitor -x step33
return -1
step33:
@ -71,11 +71,11 @@ sql alter user read privilege 1 -x step43
return -1
step43:
sql drop user sys -x step41
sql drop user _root -x step41
return -1
step41:
sql drop user stream -x step42
sql drop user monitor -x step42
return -1
step42:

View File

@ -73,9 +73,9 @@ endi
sleep 2000
sql show dnodes
print dnode1 ==> openVnodes: $data3_1
print dnode2 ==> openVnodes: $data3_2
print dnode3 ==> openVnodes: $data3_3
print dnode1 ==> openVnodes: $data2_1
print dnode2 ==> openVnodes: $data2_2
print dnode3 ==> openVnodes: $data2_3
if $data2_1 != 0 then
return -1
@ -99,9 +99,9 @@ sleep 10000
print ======== step3
sql show dnodes
print dnode1 ==> openVnodes: $data3_1
print dnode2 ==> openVnodes: $data3_2
print dnode3 ==> openVnodes: $data3_3
print dnode1 ==> openVnodes: $data2_1
print dnode2 ==> openVnodes: $data2_2
print dnode3 ==> openVnodes: $data2_3
if $data2_1 != 0 then
return -1

View File

@ -8,7 +8,8 @@ system sh/cfg.sh -n dnode2 -c numOfMPeers -v 2
system sh/cfg.sh -n dnode3 -c numOfMPeers -v 2
print ============== step1
system sh/exec_up.sh -n dnode1 -s start
system sh/exec_up.sh -n dnode1 -s start -t
sleep 3000
sql connect
sql show mnodes
@ -19,7 +20,7 @@ if $data2_1 != master then
endi
print ============== step2
system sh/exec_up.sh -n dnode2 -s start
system sh/exec_up.sh -n dnode2 -s start -t
sql create dnode $hostname2
$x = 0
@ -44,8 +45,12 @@ print ============== step3
sql_error drop dnode $hostname1 -x error1
print should not drop master
system sh/exec_up.sh -n dnode1 -s stop -x SIGINT
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
return
print ============== step4
system sh/exec_up.sh -n dnode1 -s stop
system sh/exec_up.sh -n dnode1 -s stop -x SIGINT
sql_error show mnodes
print error of no master
@ -78,7 +83,7 @@ if $data2_2 != slave then
endi
print ============== step7
system sh/exec_up.sh -n dnode3 -s start
system sh/exec_up.sh -n dnode3 -s start -t
sql create dnode $hostname3
sleep 5000

View File

@ -30,10 +30,10 @@ print =============== step3
sql drop user read -x step31
return -1
step31:
sql drop user sys -x step32
sql drop user _root -x step32
return -1
step32:
sql drop user stream -x step33
sql drop user monitor -x step33
return -1
step33:
@ -64,11 +64,11 @@ sql alter user read privilege 1 -x step43
return -1
step43:
sql drop user sys -x step41
sql drop user _root -x step41
return -1
step41:
sql drop user stream -x step42
sql drop user monitor -x step42
return -1
step42:

View File

@ -119,7 +119,7 @@ char *simGetVariable(SScript *script, char *varName, int varLen) {
int simExecuteExpression(SScript *script, char *exp) {
char *op1, *op2, *var1, *var2, *var3, *rest;
int op1Len, op2Len, var1Len, var2Len, var3Len, val0, val1;
char t0[512], t1[512], t2[512], t3[512];
char t0[512], t1[512], t2[512], t3[1024];
int result;
rest = paGetToken(exp, &var1, &var1Len);
@ -310,14 +310,15 @@ void simStoreSystemContentResult(SScript *script, char *filename) {
bool simExecuteSystemContentCmd(SScript *script, char *option) {
char buf[4096] = {0};
char buf1[4096 + 512] = {0};
char filename[400] = {0};
sprintf(filename, "%s/%s.tmp", tsScriptDir, script->fileName);
sprintf(buf, "cd %s; ", tsScriptDir);
simVisuallizeOption(script, option, buf + strlen(buf));
sprintf(buf, "%s > %s 2>/dev/null", buf, filename);
sprintf(buf1, "%s > %s 2>/dev/null", buf, filename);
sprintf(script->system_exit_code, "%d", system(buf));
sprintf(script->system_exit_code, "%d", system(buf1));
simStoreSystemContentResult(script, filename);
script->linePos++;
@ -414,7 +415,7 @@ void simCloseNativeConnect(SScript *script) {
simTrace("script:%s, taos:%p closed", script->fileName, script->taos);
taos_close(script->taos);
taosMsleep(1000);
taosMsleep(1200);
script->taos = NULL;
}