Merge remote-tracking branch 'origin/develop' into feature/crash_gen

This commit is contained in:
Steven Li 2020-06-08 17:34:29 -07:00
commit 0faeca5b82
51 changed files with 1057 additions and 463 deletions

3
.gitignore vendored
View File

@ -64,4 +64,5 @@ CMakeError.log
/out/isenseconfig/WSL-Clang-Debug /out/isenseconfig/WSL-Clang-Debug
/out/isenseconfig/WSL-GCC-Debug /out/isenseconfig/WSL-GCC-Debug
/test/cfg /test/cfg
/src/.vs /src/.vs
*.o

View File

@ -178,7 +178,7 @@ matrix:
cd ${TRAVIS_BUILD_DIR} cd ${TRAVIS_BUILD_DIR}
lcov -d . --capture --rc lcov_branch_coverage=1 -o coverage.info lcov -d . --capture --rc lcov_branch_coverage=1 -o coverage.info
lcov --remove coverage.info '*tests*' '*deps*' -o coverage.info lcov --remove coverage.info '*/tests/*' '*/test/*' '*/deps/*' -o coverage.info
lcov -l --rc lcov_branch_coverage=1 coverage.info || travis_terminate $? lcov -l --rc lcov_branch_coverage=1 coverage.info || travis_terminate $?
gem install coveralls-lcov gem install coveralls-lcov

View File

@ -191,14 +191,14 @@ typedef struct SDataBlockList { // todo remove
} SDataBlockList; } SDataBlockList;
typedef struct SQueryInfo { typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately. int16_t command; // the command may be different for each subclause, so keep it seperately.
uint32_t type; // query/insert/import type uint32_t type; // query/insert/import type
char slidingTimeUnit; char slidingTimeUnit;
STimeWindow window; STimeWindow window;
int64_t intervalTime; // aggregation time interval int64_t intervalTime; // aggregation time interval
int64_t slidingTime; // sliding window in mseconds int64_t slidingTime; // sliding window in mseconds
SSqlGroupbyExpr groupbyExpr; // group by tags info SSqlGroupbyExpr groupbyExpr; // group by tags info
SArray * colList; // SArray<SColumn*> SArray * colList; // SArray<SColumn*>
SFieldInfo fieldsInfo; SFieldInfo fieldsInfo;
@ -207,11 +207,11 @@ typedef struct SQueryInfo {
SLimitVal slimit; SLimitVal slimit;
STagCond tagCond; STagCond tagCond;
SOrderVal order; SOrderVal order;
int16_t fillType; // interpolate type int16_t fillType; // final result fill type
int16_t numOfTables; int16_t numOfTables;
STableMetaInfo **pTableMetaInfo; STableMetaInfo **pTableMetaInfo;
struct STSBuf * tsBuf; struct STSBuf * tsBuf;
int64_t * fillVal; // default value for interpolation int64_t * fillVal; // default value for fill
char * msg; // pointer to the pCmd->payload to keep error message temporarily char * msg; // pointer to the pCmd->payload to keep error message temporarily
int64_t clauseLimit; // limit for current sub clause int64_t clauseLimit; // limit for current sub clause
@ -222,15 +222,15 @@ typedef struct SQueryInfo {
typedef struct { typedef struct {
int command; int command;
uint8_t msgType; uint8_t msgType;
bool autoCreated; // if the table is missing, on-the-fly create it. during getmeterMeta
bool autoCreated; // if the table is missing, on-the-fly create it. during getmeterMeta int8_t dataSourceType; // load data from file or not
int8_t dataSourceType; // load data from file or not
union { union {
int32_t count; int32_t count;
int32_t numOfTablesInSubmit; int32_t numOfTablesInSubmit;
}; };
int32_t insertType;
int32_t clauseIndex; // index of multiple subclause query int32_t clauseIndex; // index of multiple subclause query
int8_t parseFinished; int8_t parseFinished;
short numOfCols; short numOfCols;
@ -239,14 +239,12 @@ typedef struct {
int32_t payloadLen; int32_t payloadLen;
SQueryInfo **pQueryInfo; SQueryInfo **pQueryInfo;
int32_t numOfClause; int32_t numOfClause;
char * curSql; // current sql, resume position of sql after parsing paused
void * pTableList; // referred table involved in sql
int32_t batchSize; // for parameter ('?') binding and batch processing
int32_t numOfParams;
SDataBlockList *pDataBlocks; // submit data blocks after parsing sql SDataBlockList *pDataBlocks; // submit data blocks after parsing sql
char * curSql; // current sql, resume position of sql after parsing paused
void * pTableList; // referred table involved in sql
// for parameter ('?') binding and batch processing
int32_t batchSize;
int32_t numOfParams;
} SSqlCmd; } SSqlCmd;
typedef struct SResRec { typedef struct SResRec {
@ -316,7 +314,6 @@ typedef struct SSqlObj {
SRpcIpSet ipList; SRpcIpSet ipList;
char freed : 4; char freed : 4;
char listed : 4; char listed : 4;
uint32_t insertType;
tsem_t rspSem; tsem_t rspSem;
SSqlCmd cmd; SSqlCmd cmd;
SSqlRes res; SSqlRes res;
@ -361,7 +358,7 @@ int tsParseSql(SSqlObj *pSql, bool multiVnodeInsertion);
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet); void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet);
int tscProcessSql(SSqlObj *pSql); int tscProcessSql(SSqlObj *pSql);
int tscRenewMeterMeta(SSqlObj *pSql, char *tableId); int tscRenewTableMeta(SSqlObj *pSql, char *tableId);
void tscQueueAsyncRes(SSqlObj *pSql); void tscQueueAsyncRes(SSqlObj *pSql);
void tscQueueAsyncError(void(*fp), void *param, int32_t code); void tscQueueAsyncError(void(*fp), void *param, int32_t code);

View File

@ -442,15 +442,17 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
} }
if (pSql->pStream == NULL) { if (pSql->pStream == NULL) {
// check if it is a sub-query of super table query first, if true, enter another routine
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) { // check if it is a sub-query of super table query first, if true, enter another routine
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY)) {
tscTrace("%p update table meta in local cache, continue to process sql and send corresponding subquery", pSql);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pTableMetaInfo->pTableMeta == NULL){ if (pTableMetaInfo->pTableMeta == NULL){
code = tscGetTableMeta(pSql, pTableMetaInfo); code = tscGetTableMeta(pSql, pTableMetaInfo);
assert(code == TSDB_CODE_SUCCESS); assert(code == TSDB_CODE_SUCCESS);
} }
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0) && pTableMetaInfo->vgroupIndex >= 0 && pSql->param != NULL); assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0) && pTableMetaInfo->vgroupIndex >= 0 && pSql->param != NULL);
@ -460,32 +462,37 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex && assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex &&
tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0); tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0);
tscTrace("%p get metricMeta during super table query successfully", pSql); // NOTE: the vgroupInfo for the queried super table must be existed here.
assert(pTableMetaInfo->vgroupList != NULL);
code = tscGetSTableVgroupInfo(pSql, 0); if ((code = tscProcessSql(pSql)) == TSDB_CODE_SUCCESS) {
pRes->code = code; return;
}
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return; } else { // continue to process normal async query
} else { // normal async query continues
if (pCmd->parseFinished) { if (pCmd->parseFinished) {
tscTrace("%p re-send data to vnode in table Meta callback since sql parsed completed", pSql); tscTrace("%p update table meta in local cache, continue to process sql and send corresponding query", pSql);
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo); code = tscGetTableMeta(pSql, pTableMetaInfo);
assert(code == TSDB_CODE_SUCCESS); assert(code == TSDB_CODE_SUCCESS);
if (pTableMetaInfo->pTableMeta) { // if failed to process sql, go to error handler
// todo update the submit message according to the new table meta if ((code = tscProcessSql(pSql)) == TSDB_CODE_SUCCESS) {
// 1. table uid, 2. ip address return;
code = tscSendMsgToServer(pSql);
if (code == TSDB_CODE_SUCCESS) return;
} }
// // todo update the submit message according to the new table meta
// // 1. table uid, 2. ip address
// code = tscSendMsgToServer(pSql);
// if (code == TSDB_CODE_SUCCESS) return;
// }
} else { } else {
tscTrace("%p continue parse sql after get table meta", pSql);
code = tsParseSql(pSql, false); code = tsParseSql(pSql, false);
if ((pQueryInfo->type & TSDB_QUERY_TYPE_STMT_INSERT) == TSDB_QUERY_TYPE_STMT_INSERT) { if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STMT_INSERT)) {
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo); code = tscGetTableMeta(pSql, pTableMetaInfo);
assert(code == TSDB_CODE_SUCCESS && pTableMetaInfo->pTableMeta != NULL); assert(code == TSDB_CODE_SUCCESS && pTableMetaInfo->pTableMeta != NULL);
(*pSql->fp)(pSql->param, pSql, code); (*pSql->fp)(pSql->param, pSql, code);
return; return;
} }

View File

@ -1293,7 +1293,7 @@ static void max_function_f(SQLFunctionCtx *pCtx, int32_t index) {
minMax_function_f(pCtx, index, 0); minMax_function_f(pCtx, index, 0);
SResultInfo *pResInfo = GET_RES_INFO(pCtx); SResultInfo *pResInfo = GET_RES_INFO(pCtx);
if (pResInfo->hasResult == DATA_SET_FLAG) { if (pResInfo->hasResult == DATA_SET_FLAG && pResInfo->superTableQ) {
char *flag = pCtx->aOutputBuf + pCtx->inputBytes; char *flag = pCtx->aOutputBuf + pCtx->inputBytes;
*flag = DATA_SET_FLAG; *flag = DATA_SET_FLAG;
} }
@ -1309,7 +1309,7 @@ static void min_function_f(SQLFunctionCtx *pCtx, int32_t index) {
minMax_function_f(pCtx, index, 1); minMax_function_f(pCtx, index, 1);
SResultInfo *pResInfo = GET_RES_INFO(pCtx); SResultInfo *pResInfo = GET_RES_INFO(pCtx);
if (pResInfo->hasResult == DATA_SET_FLAG) { if (pResInfo->hasResult == DATA_SET_FLAG && pResInfo->superTableQ) {
char *flag = pCtx->aOutputBuf + pCtx->inputBytes; char *flag = pCtx->aOutputBuf + pCtx->inputBytes;
*flag = DATA_SET_FLAG; *flag = DATA_SET_FLAG;
} }

View File

@ -1314,7 +1314,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo); tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo);
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT); TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT);
TSDB_QUERY_SET_TYPE(pQueryInfo->type, pSql->insertType); TSDB_QUERY_SET_TYPE(pQueryInfo->type, pCmd->insertType);
sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL); sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
if (sToken.type != TK_INTO) { if (sToken.type != TK_INTO) {
@ -1342,7 +1342,7 @@ int tsParseSql(SSqlObj *pSql, bool initialParse) {
* Set the fp before parse the sql string, in case of getTableMeta failed, in which * Set the fp before parse the sql string, in case of getTableMeta failed, in which
* the error handle callback function can rightfully restore the user-defined callback function (fp). * the error handle callback function can rightfully restore the user-defined callback function (fp).
*/ */
if (initialParse && (pSql->insertType != TSDB_QUERY_TYPE_STMT_INSERT)) { if (initialParse && (pSql->cmd.insertType != TSDB_QUERY_TYPE_STMT_INSERT)) {
pSql->fetchFp = pSql->fp; pSql->fetchFp = pSql->fp;
pSql->fp = (void(*)())tscHandleMultivnodeInsert; pSql->fp = (void(*)())tscHandleMultivnodeInsert;
} }
@ -1354,9 +1354,7 @@ int tsParseSql(SSqlObj *pSql, bool initialParse) {
return ret; return ret;
} }
SSqlInfo SQLInfo = {0}; SSqlInfo SQLInfo = qSQLParse(pSql->sqlstr);
tSQLParse(&SQLInfo, pSql->sqlstr);
ret = tscToSQLCmd(pSql, &SQLInfo); ret = tscToSQLCmd(pSql, &SQLInfo);
SQLInfoDestroy(&SQLInfo); SQLInfoDestroy(&SQLInfo);
} }

View File

@ -451,7 +451,7 @@ static int insertStmtExecute(STscStmt* stmt) {
pRes->qhandle = 0; pRes->qhandle = 0;
pSql->insertType = 0; pSql->cmd.insertType = 0;
pSql->fetchFp = waitForQueryRsp; pSql->fetchFp = waitForQueryRsp;
pSql->fp = (void(*)())tscHandleMultivnodeInsert; pSql->fp = (void(*)())tscHandleMultivnodeInsert;
@ -515,7 +515,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
pSql->param = (void*) pSql; pSql->param = (void*) pSql;
pSql->fp = waitForQueryRsp; pSql->fp = waitForQueryRsp;
pSql->insertType = TSDB_QUERY_TYPE_STMT_INSERT; pSql->cmd.insertType = TSDB_QUERY_TYPE_STMT_INSERT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
tscError("%p failed to malloc payload buffer", pSql); tscError("%p failed to malloc payload buffer", pSql);

View File

@ -515,7 +515,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (ret != 0) { if (ret != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
} }
} }
pCmd->parseFinished = 1; pCmd->parseFinished = 1;
return TSDB_CODE_SUCCESS; // do not build query message here return TSDB_CODE_SUCCESS; // do not build query message here
@ -543,6 +543,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "not support sql expression"); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "not support sql expression");
} }
pSql->cmd.parseFinished = true;
return tscBuildMsg[pCmd->command](pSql, pInfo); return tscBuildMsg[pCmd->command](pSql, pInfo);
} }

View File

@ -1185,7 +1185,9 @@ bool needToMerge(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer, tFilePage
int32_t ret = 0; // merge all result by default int32_t ret = 0; // merge all result by default
int16_t functionId = pLocalReducer->pCtx[0].functionId; int16_t functionId = pLocalReducer->pCtx[0].functionId;
if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) { // column projection query
// todo opt performance
if ((/*functionId == TSDB_FUNC_PRJ || */functionId == TSDB_FUNC_ARITHM) || (tscIsProjectionQueryOnSTable(pQueryInfo, 0))) { // column projection query
ret = 1; // disable merge procedure ret = 1; // disable merge procedure
} else { } else {
tOrderDescriptor *pDesc = pLocalReducer->pDesc; tOrderDescriptor *pDesc = pLocalReducer->pDesc;

View File

@ -239,16 +239,6 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
if (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID || if (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID ||
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
/*
* not_active_table: 1. the virtual node may fail to create table, since the procedure of create table is asynchronized,
* the virtual node may have not create table till now, so try again by using the new metermeta.
* 2. this requested table may have been removed by other client, so we need to renew the
* metermeta here.
*
* not_active_vnode: current vnode is move to other node due to node balance procedure or virtual node have been
* removed. So, renew metermeta and try again.
* not_active_session: db has been move to other node, the vnode does not exist on this dnode anymore.
*/
if (pCmd->command == TSDB_SQL_CONNECT) { if (pCmd->command == TSDB_SQL_CONNECT) {
rpcMsg->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; rpcMsg->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
rpcFreeCont(rpcMsg->pCont); rpcFreeCont(rpcMsg->pCont);
@ -258,8 +248,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
rpcFreeCont(rpcMsg->pCont); rpcFreeCont(rpcMsg->pCont);
return; return;
} else if (pCmd->command == TSDB_SQL_META) { } else if (pCmd->command == TSDB_SQL_META) {
// rpcFreeCont(rpcMsg->pCont); // get table meta query will not retry, do nothing
// return;
} else { } else {
tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry); tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry);
@ -267,13 +256,14 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
if (pSql->retry > pSql->maxRetry) { if (pSql->retry > pSql->maxRetry) {
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry); tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
} else { } else {
rpcMsg->code = tscRenewMeterMeta(pSql, pTableMetaInfo->name); rpcMsg->code = tscRenewTableMeta(pSql, pTableMetaInfo->name);
if (pTableMetaInfo->pTableMeta) {
tscSendMsgToServer(pSql); // if there is an error occurring, proceed to the following error handling procedure.
// todo add test cases
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
rpcFreeCont(rpcMsg->pCont);
return;
} }
rpcFreeCont(rpcMsg->pCont);
return;
} }
} }
} }
@ -330,9 +320,9 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
} }
} }
if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command]) if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command]) {
rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql); rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
}
if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) { if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? pRes->numOfRows: pRes->code; rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? pRes->numOfRows: pRes->code;
@ -431,7 +421,7 @@ void tscKillSTableQuery(SSqlObj *pSql) {
/* /*
* here, we cannot set the command = TSDB_SQL_KILL_QUERY. Otherwise, it may cause * here, we cannot set the command = TSDB_SQL_KILL_QUERY. Otherwise, it may cause
* sub-queries not correctly released and master sql object of metric query reaches an abnormal state. * sub-queries not correctly released and master sql object of super table query reaches an abnormal state.
*/ */
pSql->pSubs[i]->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; pSql->pSubs[i]->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
//taosStopRpcConn(pSql->pSubs[i]->thandle); //taosStopRpcConn(pSql->pSubs[i]->thandle);
@ -565,7 +555,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
pQueryMsg->numOfTables = htonl(1); // set the number of tables pQueryMsg->numOfTables = htonl(1); // set the number of tables
pMsg += sizeof(STableIdInfo); pMsg += sizeof(STableIdInfo);
} else { } else { // it is a subquery of the super table query, this IP info is acquired from vgroupInfo
int32_t index = pTableMetaInfo->vgroupIndex; int32_t index = pTableMetaInfo->vgroupIndex;
int32_t numOfVgroups = taosArrayGetSize(pTableMetaInfo->pVgroupTables); int32_t numOfVgroups = taosArrayGetSize(pTableMetaInfo->pVgroupTables);
assert(index >= 0 && index < numOfVgroups); assert(index >= 0 && index < numOfVgroups);
@ -1822,7 +1812,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
tscTrace("%p recv table meta: %"PRId64 ", tid:%d, name:%s", pSql, pTableMeta->uid, pTableMeta->sid, pTableMetaInfo->name); tscTrace("%p recv table meta, uid:%"PRId64 ", tid:%d, name:%s", pSql, pTableMeta->uid, pTableMeta->sid, pTableMetaInfo->name);
free(pTableMeta); free(pTableMeta);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -2358,7 +2348,7 @@ static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInf
int32_t code = tscProcessSql(pNew); int32_t code = tscProcessSql(pNew);
if (code == TSDB_CODE_SUCCESS) { if (code == TSDB_CODE_SUCCESS) {
code = TSDB_CODE_TSC_ACTION_IN_PROGRESS; code = TSDB_CODE_TSC_ACTION_IN_PROGRESS; // notify upper application that current process need to be terminated
} }
return code; return code;
@ -2389,56 +2379,26 @@ int tscGetMeterMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool create
return tscGetTableMeta(pSql, pTableMetaInfo); return tscGetTableMeta(pSql, pTableMetaInfo);
} }
/*
* in handling the renew metermeta problem during insertion,
*
* If the meter is created on demand during insertion, the routine usually waits for a short
* period to re-issue the getMeterMeta msg, in which makes a greater change that vnode has
* successfully created the corresponding table.
*/
static void tscWaitingForCreateTable(SSqlCmd *pCmd) {
if (pCmd->command == TSDB_SQL_INSERT) {
taosMsleep(50); // todo: global config
}
}
/** /**
* in renew metermeta, do not retrieve metadata in cache. * retrieve table meta from mnode, and update the local table meta cache.
* @param pSql sql object * @param pSql sql object
* @param tableId meter id * @param tableId table full name
* @return status code * @return status code
*/ */
int tscRenewMeterMeta(SSqlObj *pSql, char *tableId) { int tscRenewTableMeta(SSqlObj *pSql, char *tableId) {
int code = 0;
// handle table meta renew process
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
/* STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
* 1. only update the metermeta in force model metricmeta is not updated if (pTableMetaInfo->pTableMeta) {
* 2. if get metermeta failed, still get the metermeta tscTrace("%p update table meta, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64 ", addr:%p", pSql,
*/ tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->uid, pTableMeta);
if (pTableMetaInfo->pTableMeta == NULL || !tscQueryOnSTable(pCmd)) {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pTableMetaInfo->pTableMeta) {
tscTrace("%p update table meta, old: numOfTags:%d, numOfCols:%d, uid:%" PRId64 ", addr:%p", pSql,
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->uid, pTableMeta);
}
tscWaitingForCreateTable(pCmd);
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
code = getTableMetaFromMgmt(pSql, pTableMetaInfo); // todo ??
} else {
tscTrace("%p metric query not update metric meta, numOfTags:%d, numOfCols:%d, uid:%" PRId64 ", addr:%p", pSql,
tscGetNumOfTags(pTableMetaInfo->pTableMeta), pCmd->numOfCols, pTableMetaInfo->pTableMeta->uid,
pTableMetaInfo->pTableMeta);
} }
return code; taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
return getTableMetaFromMgmt(pSql, pTableMetaInfo);
} }
static bool allVgroupInfoRetrieved(SSqlCmd* pCmd, int32_t clauseIndex) { static bool allVgroupInfoRetrieved(SSqlCmd* pCmd, int32_t clauseIndex) {

View File

@ -301,7 +301,9 @@ void tscSaveSubscriptionProgress(void* sub) {
char path[256]; char path[256];
sprintf(path, "%s/subscribe", tsDataDir); sprintf(path, "%s/subscribe", tsDataDir);
if (access(path, 0) != 0) { if (access(path, 0) != 0) {
mkdir(path, 0777); if (mkdir(path, 0777) != 0 && errno != EEXIST) {
tscError("failed to create subscribe dir: %s", path);
}
} }
sprintf(path, "%s/subscribe/%s", tsDataDir, pSub->topic); sprintf(path, "%s/subscribe/%s", tsDataDir, pSub->topic);

View File

@ -80,9 +80,8 @@ int32_t tscInitRpc(const char *user, const char *secret, void** pDnodeConn) {
} }
void taos_init_imp() { void taos_init_imp() {
char temp[128]; char temp[128];
struct stat dirstat;
errno = TSDB_CODE_SUCCESS; errno = TSDB_CODE_SUCCESS;
srand(taosGetTimestampSec()); srand(taosGetTimestampSec());
deltaToUtcInitOnce(); deltaToUtcInitOnce();
@ -94,7 +93,9 @@ void taos_init_imp() {
taosReadGlobalLogCfg(); taosReadGlobalLogCfg();
// For log directory // For log directory
if (stat(tsLogDir, &dirstat) < 0) mkdir(tsLogDir, 0755); if (mkdir(tsLogDir, 0755) != 0 && errno != EEXIST) {
printf("failed to create log dir:%s\n", tsLogDir);
}
sprintf(temp, "%s/taoslog", tsLogDir); sprintf(temp, "%s/taoslog", tsLogDir);
if (taosInitLog(temp, tsNumOfLogLines, 10) < 0) { if (taosInitLog(temp, tsNumOfLogLines, 10) < 0) {

View File

@ -136,16 +136,17 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_USERS, 0, 0x0355, "mnode too
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TABLE_ALREADY_EXIST, 0, 0x0360, "mnode table already exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TABLE_ALREADY_EXIST, 0, 0x0360, "mnode table already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_ID, 0, 0x0361, "mnode invalid table id") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_ID, 0, 0x0361, "mnode invalid table id")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_TYPE, 0, 0x0362, "mnode invalid table type") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_NAME, 0, 0x0362, "mnode invalid table name")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TAGS, 0, 0x0363, "mnode too many tags") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_TYPE, 0, 0x0363, "mnode invalid table type")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TABLES, 0, 0x0364, "mnode too many tables") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TAGS, 0, 0x0364, "mnode too many tags")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TIMESERIES, 0, 0x0365, "mnode not enough time series") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TABLES, 0, 0x0365, "mnode too many tables")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_SUPER_TABLE, 0, 0x0366, "mnode no super table") // operation only available for super table TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TIMESERIES, 0, 0x0366, "mnode not enough time series")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_COL_NAME_TOO_LONG, 0, 0x0367, "mnode column name too long") TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_SUPER_TABLE, 0, 0x0367, "mnode no super table") // operation only available for super table
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TAG_ALREAY_EXIST, 0, 0x0368, "mnode tag already exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_COL_NAME_TOO_LONG, 0, 0x0368, "mnode column name too long")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TAG_NOT_EXIST, 0, 0x0369, "mnode tag not exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TAG_ALREAY_EXIST, 0, 0x0369, "mnode tag already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_ALREAY_EXIST, 0, 0x036A, "mnode field already exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TAG_NOT_EXIST, 0, 0x036A, "mnode tag not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_NOT_EXIST, 0, 0x036B, "mnode field not exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_ALREAY_EXIST, 0, 0x036B, "mnode field already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_NOT_EXIST, 0, 0x036C, "mnode field not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_NOT_SELECTED, 0, 0x0380, "mnode db not selected") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_NOT_SELECTED, 0, 0x0380, "mnode db not selected")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_ALREADY_EXIST, 0, 0x0381, "mnode database aleady exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_ALREADY_EXIST, 0, 0x0381, "mnode database aleady exist")

View File

@ -148,7 +148,11 @@ static void shellSourceFile(TAOS *con, char *fptr) {
} }
char *fname = full_path.we_wordv[0]; char *fname = full_path.we_wordv[0];
if (fname == NULL) {
fprintf(stderr, "ERROR: invalid filename\n");
return;
}
if (access(fname, F_OK) != 0) { if (access(fname, F_OK) != 0) {
fprintf(stderr, "ERROR: file %s is not exist\n", fptr); fprintf(stderr, "ERROR: file %s is not exist\n", fptr);
@ -169,6 +173,7 @@ static void shellSourceFile(TAOS *con, char *fptr) {
if (f == NULL) { if (f == NULL) {
fprintf(stderr, "ERROR: failed to open file %s\n", fname); fprintf(stderr, "ERROR: failed to open file %s\n", fname);
wordfree(&full_path); wordfree(&full_path);
free(cmd);
return; return;
} }

View File

@ -43,6 +43,7 @@ extern char configDir[];
#define MAX_DATA_SIZE 1024 #define MAX_DATA_SIZE 1024
#define MAX_NUM_DATATYPE 8 #define MAX_NUM_DATATYPE 8
#define OPT_ABORT 1 /* abort */ #define OPT_ABORT 1 /* abort */
#define STRING_LEN 512
/* The options we understand. */ /* The options we understand. */
static struct argp_option options[] = { static struct argp_option options[] = {
@ -380,10 +381,11 @@ int main(int argc, char *argv[]) {
bool insert_only = arguments.insert_only; bool insert_only = arguments.insert_only;
char **data_type = arguments.datatype; char **data_type = arguments.datatype;
int count_data_type = 0; int count_data_type = 0;
char dataString[512]; char dataString[STRING_LEN];
bool do_aggreFunc = true; bool do_aggreFunc = true;
memset(dataString, 0, 512); memset(dataString, 0, STRING_LEN);
int len = 0;
if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0) { if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0) {
do_aggreFunc = false; do_aggreFunc = false;
@ -392,8 +394,8 @@ int main(int argc, char *argv[]) {
if (strcasecmp(data_type[count_data_type], "") == 0) { if (strcasecmp(data_type[count_data_type], "") == 0) {
break; break;
} }
strcat(dataString, data_type[count_data_type]);
strcat(dataString, " "); len += snprintf(dataString + len, STRING_LEN - len, "%s ", data_type[count_data_type]);
} }
FILE *fp = fopen(arguments.output_file, "a"); FILE *fp = fopen(arguments.output_file, "a");
@ -473,32 +475,29 @@ int main(int argc, char *argv[]) {
sprintf(command, "create database %s;", db_name); sprintf(command, "create database %s;", db_name);
taos_query(taos, command); taos_query(taos, command);
char cols[512] = "\0"; char cols[STRING_LEN] = "\0";
int colIndex = 0; int colIndex = 0;
len = 0;
for (; colIndex < ncols_per_record - 1; colIndex++) { for (; colIndex < ncols_per_record - 1; colIndex++) {
if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0) { if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0) {
sprintf(command, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]); len += snprintf(cols + len, STRING_LEN - len, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]);
strcat(cols, command);
} else { } else {
sprintf(command, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary); len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary);
strcat(cols, command);
} }
} }
if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0) { if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0) {
sprintf(command, ",f%d %s)", colIndex + 1, data_type[colIndex % count_data_type]); len += snprintf(cols + len, STRING_LEN - len, ",f%d %s)", colIndex + 1, data_type[colIndex % count_data_type]);
} else { } else {
sprintf(command, ",f%d %s(%d))", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary); len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d))", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary);
} }
strcat(cols, command);
if (!use_metric) { if (!use_metric) {
/* Create all the tables; */ /* Create all the tables; */
printf("Creating %d table(s)......\n", ntables); printf("Creating %d table(s)......\n", ntables);
for (int i = 0; i < ntables; i++) { for (int i = 0; i < ntables; i++) {
sprintf(command, "create table %s.%s%d (ts timestamp%s;", db_name, tb_prefix, i, cols); snprintf(command, BUFFER_SIZE, "create table %s.%s%d (ts timestamp%s;", db_name, tb_prefix, i, cols);
queryDB(taos, command); queryDB(taos, command);
} }
@ -508,7 +507,7 @@ int main(int argc, char *argv[]) {
} else { } else {
/* Create metric table */ /* Create metric table */
printf("Creating meters super table...\n"); printf("Creating meters super table...\n");
sprintf(command, "create table %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols); snprintf(command, BUFFER_SIZE, "create table %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols);
queryDB(taos, command); queryDB(taos, command);
printf("meters created!\n"); printf("meters created!\n");
@ -522,10 +521,10 @@ int main(int argc, char *argv[]) {
j = i % 10; j = i % 10;
} }
if (j % 2 == 0) { if (j % 2 == 0) {
sprintf(command, "create table %s.%s%d using %s.meters tags (%d,\"%s\");", db_name, tb_prefix, i, db_name, j,"shanghai"); snprintf(command, BUFFER_SIZE, "create table %s.%s%d using %s.meters tags (%d,\"%s\");", db_name, tb_prefix, i, db_name, j, "shanghai");
} else { } else {
sprintf(command, "create table %s.%s%d using %s.meters tags (%d,\"%s\");", db_name, tb_prefix, i, db_name, j,"beijing"); snprintf(command, BUFFER_SIZE, "create table %s.%s%d using %s.meters tags (%d,\"%s\");", db_name, tb_prefix, i, db_name, j, "beijing");
} }
queryDB(taos, command); queryDB(taos, command);
} }

View File

@ -117,8 +117,8 @@ typedef struct {
} SDbInfo; } SDbInfo;
typedef struct { typedef struct {
char name[TSDB_TABLE_NAME_LEN + 1]; char name[TSDB_TABLE_NAME_LEN];
char metric[TSDB_TABLE_NAME_LEN + 1]; char metric[TSDB_TABLE_NAME_LEN];
} STableRecord; } STableRecord;
typedef struct { typedef struct {
@ -871,7 +871,7 @@ int32_t taosDumpMetric(char *metric, SDumpArguments *arguments, FILE *fp) {
int fd = -1; int fd = -1;
STableRecord tableRecord; STableRecord tableRecord;
strcpy(tableRecord.metric, metric); tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
sprintf(command, "select tbname from %s", metric); sprintf(command, "select tbname from %s", metric);
result = taos_query(taos, command); result = taos_query(taos, command);

View File

@ -27,7 +27,7 @@
void * tsAcctSdb = NULL; void * tsAcctSdb = NULL;
static int32_t tsAcctUpdateSize; static int32_t tsAcctUpdateSize;
static void mnodeCreateRootAcct(); static int32_t mnodeCreateRootAcct();
static int32_t mnodeAcctActionDestroy(SSdbOper *pOper) { static int32_t mnodeAcctActionDestroy(SSdbOper *pOper) {
SAcctObj *pAcct = pOper->pObj; SAcctObj *pAcct = pOper->pObj;
@ -79,7 +79,11 @@ static int32_t mnodeAcctActionDecode(SSdbOper *pOper) {
static int32_t mnodeAcctActionRestored() { static int32_t mnodeAcctActionRestored() {
if (dnodeIsFirstDeploy()) { if (dnodeIsFirstDeploy()) {
mnodeCreateRootAcct(); int32_t code = mnodeCreateRootAcct();
if (code != TSDB_CODE_SUCCESS) {
mError("failed to create root account, reason:%s", tstrerror(code));
return code;
}
} }
acctInit(); acctInit();
@ -161,9 +165,9 @@ void mnodeDropUserFromAcct(SAcctObj *pAcct, SUserObj *pUser) {
mnodeDecAcctRef(pAcct); mnodeDecAcctRef(pAcct);
} }
static void mnodeCreateRootAcct() { static int32_t mnodeCreateRootAcct() {
int32_t numOfAccts = sdbGetNumOfRows(tsAcctSdb); int32_t numOfAccts = sdbGetNumOfRows(tsAcctSdb);
if (numOfAccts != 0) return; if (numOfAccts != 0) return TSDB_CODE_SUCCESS;
SAcctObj *pAcct = malloc(sizeof(SAcctObj)); SAcctObj *pAcct = malloc(sizeof(SAcctObj));
memset(pAcct, 0, sizeof(SAcctObj)); memset(pAcct, 0, sizeof(SAcctObj));
@ -190,7 +194,8 @@ static void mnodeCreateRootAcct() {
.table = tsAcctSdb, .table = tsAcctSdb,
.pObj = pAcct, .pObj = pAcct,
}; };
sdbInsertRow(&oper);
return sdbInsertRow(&oper);
} }
#ifndef _ACCT #ifndef _ACCT

View File

@ -88,9 +88,9 @@ int32_t mnodeStartSystem() {
} }
mPrint("starting to initialize mnode ..."); mPrint("starting to initialize mnode ...");
struct stat dirstat; if (mkdir(tsMnodeDir, 0755) != 0 && errno != EEXIST) {
if (stat(tsMnodeDir, &dirstat) < 0) { mError("failed to init mnode dir:%s, reason:%s", tsMnodeDir, strerror(errno));
mkdir(tsMnodeDir, 0755); return -1;
} }
dnodeAllocateMnodeWqueue(); dnodeAllocateMnodeWqueue();

View File

@ -316,7 +316,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
} }
sprintf(pConnectRsp->acctId, "%x", pAcct->acctId); sprintf(pConnectRsp->acctId, "%x", pAcct->acctId);
strcpy(pConnectRsp->serverVersion, version); memcpy(pConnectRsp->serverVersion, version, TSDB_VERSION_LEN);
pConnectRsp->writeAuth = pUser->writeAuth; pConnectRsp->writeAuth = pUser->writeAuth;
pConnectRsp->superAuth = pUser->superAuth; pConnectRsp->superAuth = pUser->superAuth;

View File

@ -714,7 +714,7 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else { } else {
mError("table:%s, failed to drop table, table not exist", pDrop->tableId); mError("table:%s, failed to drop table, table not exist", pDrop->tableId);
return TSDB_CODE_MND_INVALID_TABLE_ID; return TSDB_CODE_MND_INVALID_TABLE_NAME;
} }
} }
@ -742,7 +742,7 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) {
if (pMsg->pTable == NULL) { if (pMsg->pTable == NULL) {
if (!pInfo->createFlag) { if (!pInfo->createFlag) {
mError("table:%s, failed to get table meta, table not exist", pInfo->tableId); mError("table:%s, failed to get table meta, table not exist", pInfo->tableId);
return TSDB_CODE_MND_INVALID_TABLE_ID; return TSDB_CODE_MND_INVALID_TABLE_NAME;
} else { } else {
mTrace("table:%s, failed to get table meta, start auto create table ", pInfo->tableId); mTrace("table:%s, failed to get table meta, start auto create table ", pInfo->tableId);
return mnodeAutoCreateChildTable(pMsg); return mnodeAutoCreateChildTable(pMsg);
@ -779,7 +779,7 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
if (pStable->schema == NULL) { if (pStable->schema == NULL) {
free(pStable); free(pStable);
mError("table:%s, failed to create, no schema input", pCreate->tableId); mError("table:%s, failed to create, no schema input", pCreate->tableId);
return TSDB_CODE_MND_INVALID_TABLE_ID; return TSDB_CODE_MND_INVALID_TABLE_NAME;
} }
memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema)); memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema));
@ -1340,7 +1340,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
if (pRsp->numOfTables != numOfTable) { if (pRsp->numOfTables != numOfTable) {
rpcFreeCont(pRsp); rpcFreeCont(pRsp);
return TSDB_CODE_MND_INVALID_TABLE_ID; return TSDB_CODE_MND_INVALID_TABLE_NAME;
} else { } else {
pRsp->numOfTables = htonl(pRsp->numOfTables); pRsp->numOfTables = htonl(pRsp->numOfTables);
pMsg->rpcRsp.rsp = pRsp; pMsg->rpcRsp.rsp = pRsp;
@ -1452,7 +1452,7 @@ static SChildTableObj* mnodeDoCreateChildTable(SCMCreateTableMsg *pCreate, SVgOb
if (pSuperTable == NULL) { if (pSuperTable == NULL) {
mError("table:%s, corresponding super table:%s does not exist", pCreate->tableId, pTagData->name); mError("table:%s, corresponding super table:%s does not exist", pCreate->tableId, pTagData->name);
mnodeDestroyChildTable(pTable); mnodeDestroyChildTable(pTable);
terrno = TSDB_CODE_MND_INVALID_TABLE_ID; terrno = TSDB_CODE_MND_INVALID_TABLE_NAME;
return NULL; return NULL;
} }
mnodeDecTableRef(pSuperTable); mnodeDecTableRef(pSuperTable);
@ -2212,7 +2212,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pAlter->tableId); if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pAlter->tableId);
if (pMsg->pTable == NULL) { if (pMsg->pTable == NULL) {
mError("table:%s, failed to alter table, table not exist", pMsg->pTable->tableId); mError("table:%s, failed to alter table, table not exist", pMsg->pTable->tableId);
return TSDB_CODE_MND_INVALID_TABLE_ID; return TSDB_CODE_MND_INVALID_TABLE_NAME;
} }
pAlter->type = htons(pAlter->type); pAlter->type = htons(pAlter->type);

View File

@ -383,11 +383,11 @@ int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
pTable = mnodeGetTable(pShow->payload); pTable = mnodeGetTable(pShow->payload);
if (NULL == pTable || pTable->type == TSDB_SUPER_TABLE) { if (NULL == pTable || pTable->type == TSDB_SUPER_TABLE) {
mnodeDecTableRef(pTable); mnodeDecTableRef(pTable);
return TSDB_CODE_MND_INVALID_TABLE_ID; return TSDB_CODE_MND_INVALID_TABLE_NAME;
} }
mnodeDecTableRef(pTable); mnodeDecTableRef(pTable);
pVgroup = mnodeGetVgroup(((SChildTableObj*)pTable)->vgId); pVgroup = mnodeGetVgroup(((SChildTableObj*)pTable)->vgId);
if (NULL == pVgroup) return TSDB_CODE_MND_INVALID_TABLE_ID; if (NULL == pVgroup) return TSDB_CODE_MND_INVALID_TABLE_NAME;
mnodeDecVgroupRef(pVgroup); mnodeDecVgroupRef(pVgroup);
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica; maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica;
} else { } else {

View File

@ -229,7 +229,7 @@ static void taosGetSystemLocale() { // get and set default locale
uError("can't get locale from system, set it to en_US.UTF-8"); uError("can't get locale from system, set it to en_US.UTF-8");
strcpy(tsLocale, "en_US.UTF-8"); strcpy(tsLocale, "en_US.UTF-8");
} else { } else {
strncpy(tsLocale, locale, tListLen(tsLocale)); tstrncpy(tsLocale, locale, tListLen(tsLocale));
uError("locale not configured, set to system default:%s", tsLocale); uError("locale not configured, set to system default:%s", tsLocale);
} }
} }

View File

@ -445,7 +445,7 @@ void httpJsonPairStatus(JsonBuf* buf, int code) {
httpJsonItemToken(buf); httpJsonItemToken(buf);
if (code == TSDB_CODE_MND_DB_NOT_SELECTED) { if (code == TSDB_CODE_MND_DB_NOT_SELECTED) {
httpJsonPair(buf, "desc", 4, "failed to create database", 23); httpJsonPair(buf, "desc", 4, "failed to create database", 23);
} else if (code == TSDB_CODE_MND_INVALID_TABLE_ID) { } else if (code == TSDB_CODE_MND_INVALID_TABLE_NAME) {
httpJsonPair(buf, "desc", 4, "failed to create table", 22); httpJsonPair(buf, "desc", 4, "failed to create table", 22);
} else } else
httpJsonPair(buf, "desc", 4, (char*)tstrerror(code), (int)strlen(tstrerror(code))); httpJsonPair(buf, "desc", 4, (char*)tstrerror(code), (int)strlen(tstrerror(code)));

View File

@ -32,12 +32,12 @@ bool httpCheckUsedbSql(char *sql) {
void httpTimeToString(time_t t, char *buf, int buflen) { void httpTimeToString(time_t t, char *buf, int buflen) {
memset(buf, 0, (size_t)buflen); memset(buf, 0, (size_t)buflen);
char ts[30] = {0}; char ts[32] = {0};
struct tm *ptm; struct tm *ptm;
time_t tt = t / 1000; time_t tt = t / 1000;
ptm = localtime(&tt); ptm = localtime(&tt);
strftime(ts, 64, "%Y-%m-%d %H:%M:%S", ptm); strftime(ts, 31, "%Y-%m-%d %H:%M:%S", ptm);
sprintf(buf, "%s.%03ld", ts, t % 1000); sprintf(buf, "%s.%03ld", ts, t % 1000);
} }

View File

@ -313,7 +313,7 @@ bool tgGetPassFromUrl(HttpContext *pContext) {
return false; return false;
} }
strcpy(pContext->pass, pParser->path[TG_PASS_URL_POS].pos); tstrncpy(pContext->pass, pParser->path[TG_PASS_URL_POS].pos, TSDB_PASSWORD_LEN);
return true; return true;
} }

View File

@ -111,7 +111,7 @@ bool tgCheckFinished(struct HttpContext *pContext, HttpSqlCmd *cmd, int code) {
pContext->ipstr); pContext->ipstr);
return false; return false;
} }
} else if (code == TSDB_CODE_MND_INVALID_TABLE_ID) { } else if (code == TSDB_CODE_MND_INVALID_TABLE_NAME) {
cmd->cmdState = HTTP_CMD_STATE_RUN_FINISHED; cmd->cmdState = HTTP_CMD_STATE_RUN_FINISHED;
if (multiCmds->cmds[multiCmds->pos - 1].cmdState == HTTP_CMD_STATE_NOT_RUN_YET) { if (multiCmds->cmds[multiCmds->pos - 1].cmdState == HTTP_CMD_STATE_NOT_RUN_YET) {
multiCmds->pos = (int16_t)(multiCmds->pos - 2); multiCmds->pos = (int16_t)(multiCmds->pos - 2);

View File

@ -322,7 +322,7 @@ enum {
#define NORMAL_ARITHMETIC 1 #define NORMAL_ARITHMETIC 1
#define AGG_ARIGHTMEIC 2 #define AGG_ARIGHTMEIC 2
int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pSql); SSqlInfo qSQLParse(const char *str);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -373,7 +373,6 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin
SPosInfo pos = {-1, -1}; SPosInfo pos = {-1, -1};
createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &pos); createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &pos);
} }
pWindowResInfo->capacity = newCap; pWindowResInfo->capacity = newCap;
} }
@ -1566,11 +1565,6 @@ static bool isFirstLastRowQuery(SQuery *pQuery) {
return false; return false;
} }
static UNUSED_FUNC bool notHasQueryTimeRange(SQuery *pQuery) {
return (pQuery->window.skey == 0 && pQuery->window.ekey == INT64_MAX && QUERY_IS_ASC_QUERY(pQuery)) ||
(pQuery->window.skey == INT64_MAX && pQuery->window.ekey == 0 && (!QUERY_IS_ASC_QUERY(pQuery)));
}
static bool needReverseScan(SQuery *pQuery) { static bool needReverseScan(SQuery *pQuery) {
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
int32_t functionId = pQuery->pSelectExpr[i].base.functionId; int32_t functionId = pQuery->pSelectExpr[i].base.functionId;
@ -1768,61 +1762,6 @@ static void changeExecuteScanOrder(SQuery *pQuery, bool stableQuery) {
} }
} }
static UNUSED_FUNC void doSetInterpVal(SQLFunctionCtx *pCtx, TSKEY ts, int16_t type, int32_t index, char *data) {
assert(pCtx->param[index].pz == NULL);
int32_t len = 0;
size_t t = 0;
if (type == TSDB_DATA_TYPE_BINARY) {
t = strlen(data);
len = t + 1 + TSDB_KEYSIZE;
pCtx->param[index].pz = calloc(1, len);
} else if (type == TSDB_DATA_TYPE_NCHAR) {
t = wcslen((const wchar_t *)data);
len = (t + 1) * TSDB_NCHAR_SIZE + TSDB_KEYSIZE;
pCtx->param[index].pz = calloc(1, len);
} else {
len = TSDB_KEYSIZE * 2;
pCtx->param[index].pz = malloc(len);
}
pCtx->param[index].nType = TSDB_DATA_TYPE_BINARY;
char *z = pCtx->param[index].pz;
*(TSKEY *)z = ts;
z += TSDB_KEYSIZE;
switch (type) {
case TSDB_DATA_TYPE_FLOAT:
*(double *)z = GET_FLOAT_VAL(data);
break;
case TSDB_DATA_TYPE_DOUBLE:
*(double *)z = GET_DOUBLE_VAL(data);
break;
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_TIMESTAMP:
*(int64_t *)z = GET_INT64_VAL(data);
break;
case TSDB_DATA_TYPE_BINARY:
strncpy(z, data, t);
break;
case TSDB_DATA_TYPE_NCHAR: {
wcsncpy((wchar_t *)z, (const wchar_t *)data, t);
} break;
default:
assert(0);
}
pCtx->param[index].nLen = len;
}
static int32_t getInitialPageNum(SQInfo *pQInfo) { static int32_t getInitialPageNum(SQInfo *pQInfo) {
SQuery *pQuery = pQInfo->runtimeEnv.pQuery; SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
int32_t INITIAL_RESULT_ROWS_VALUE = 16; int32_t INITIAL_RESULT_ROWS_VALUE = 16;
@ -4071,45 +4010,19 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool
initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, rows, 4096, type); initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, rows, 4096, type);
} }
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
/*
* in case of last_row query without query range, we set the query timestamp to be
* STable->lastKey. Otherwise, keep the initial query time range unchanged.
*/
// if (isFirstLastRowQuery(pQuery)) {
// if (!normalizeUnBoundLastRowQuery(pQInfo, &interpInfo)) {
// sem_post(&pQInfo->dataReady);
// pointInterpSupporterDestroy(&interpInfo);
// return TSDB_CODE_SUCCESS;
// }
// }
if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) { if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) {
SFillColInfo* pColInfo = taosCreateFillColInfo(pQuery); SFillColInfo* pColInfo = taosCreateFillColInfo(pQuery);
pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, 0, 0, pQuery->rec.capacity, pQuery->numOfOutput, pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, 0, 0, pQuery->rec.capacity, pQuery->numOfOutput,
pQuery->slidingTime, pQuery->fillType, pColInfo); pQuery->slidingTime, pQuery->fillType, pColInfo);
} }
// todo refactor
pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery); pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery);
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static UNUSED_FUNC bool isGroupbyEachTable(SSqlGroupbyExpr *pGroupbyExpr, STableGroupInfo *pSidset) {
if (pGroupbyExpr == NULL || pGroupbyExpr->numOfGroupCols == 0) {
return false;
}
for (int32_t i = 0; i < pGroupbyExpr->numOfGroupCols; ++i) {
SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, i);
if (pColIndex->flag == TSDB_COL_TAG) {
return true;
}
}
return false;
}
static void enableExecutionForNextTable(SQueryRuntimeEnv *pRuntimeEnv) { static void enableExecutionForNextTable(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery; SQuery *pQuery = pRuntimeEnv->pQuery;
@ -5907,10 +5820,11 @@ int32_t qCreateQueryInfo(void *tsdb, int32_t vgId, SQueryTableMsg *pQueryMsg, qi
_over: _over:
tfree(tagCond); tfree(tagCond);
tfree(tbnameCond); tfree(tbnameCond);
tfree(pGroupColIndex);
taosArrayDestroy(pTableIdList); taosArrayDestroy(pTableIdList);
//pQInfo already freed in initQInfo, but *pQInfo may not pointer to null;
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
//pQInfo already freed in initQInfo, but *pQInfo may not pointer to null;
*pQInfo = NULL; *pQInfo = NULL;
} }

View File

@ -26,16 +26,18 @@
#include "tstrbuild.h" #include "tstrbuild.h"
#include "queryLog.h" #include "queryLog.h"
int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pStr) { SSqlInfo qSQLParse(const char *pStr) {
void *pParser = ParseAlloc(malloc); void *pParser = ParseAlloc(malloc);
pSQLInfo->valid = true;
SSqlInfo sqlInfo = {0};
sqlInfo.valid = true;
int32_t i = 0; int32_t i = 0;
while (1) { while (1) {
SSQLToken t0 = {0}; SSQLToken t0 = {0};
if (pStr[i] == 0) { if (pStr[i] == 0) {
Parse(pParser, 0, t0, pSQLInfo); Parse(pParser, 0, t0, &sqlInfo);
goto abort_parse; goto abort_parse;
} }
@ -49,19 +51,19 @@ int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pStr) {
break; break;
} }
case TK_SEMI: { case TK_SEMI: {
Parse(pParser, 0, t0, pSQLInfo); Parse(pParser, 0, t0, &sqlInfo);
goto abort_parse; goto abort_parse;
} }
case TK_QUESTION: case TK_QUESTION:
case TK_ILLEGAL: { case TK_ILLEGAL: {
snprintf(pSQLInfo->pzErrMsg, tListLen(pSQLInfo->pzErrMsg), "unrecognized token: \"%s\"", t0.z); snprintf(sqlInfo.pzErrMsg, tListLen(sqlInfo.pzErrMsg), "unrecognized token: \"%s\"", t0.z);
pSQLInfo->valid = false; sqlInfo.valid = false;
goto abort_parse; goto abort_parse;
} }
default: default:
Parse(pParser, t0.type, t0, pSQLInfo); Parse(pParser, t0.type, t0, &sqlInfo);
if (pSQLInfo->valid == false) { if (sqlInfo.valid == false) {
goto abort_parse; goto abort_parse;
} }
} }
@ -69,7 +71,7 @@ int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pStr) {
abort_parse: abort_parse:
ParseFree(pParser, free); ParseFree(pParser, free);
return 0; return sqlInfo;
} }
tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SSQLToken *pToken) { tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SSQLToken *pToken) {

View File

@ -74,14 +74,16 @@ void tsdbEncodeTable(STable *pTable, char *buf, int *contLen) {
STable *tsdbDecodeTable(void *cont, int contLen) { STable *tsdbDecodeTable(void *cont, int contLen) {
STable *pTable = (STable *)calloc(1, sizeof(STable)); STable *pTable = (STable *)calloc(1, sizeof(STable));
if (pTable == NULL) return NULL; if (pTable == NULL) return NULL;
pTable->schema = (STSchema **)malloc(sizeof(STSchema *) * TSDB_MAX_TABLE_SCHEMAS);
if (pTable->schema == NULL) {
free(pTable);
return NULL;
}
void *ptr = cont; void *ptr = cont;
T_READ_MEMBER(ptr, int8_t, pTable->type); T_READ_MEMBER(ptr, int8_t, pTable->type);
if (pTable->type != TSDB_CHILD_TABLE) {
pTable->schema = (STSchema **)malloc(sizeof(STSchema *) * TSDB_MAX_TABLE_SCHEMAS);
if (pTable->schema == NULL) {
free(pTable);
return NULL;
}
}
int len = *(int *)ptr; int len = *(int *)ptr;
ptr = (char *)ptr + sizeof(int); ptr = (char *)ptr + sizeof(int);
pTable->name = calloc(1, len + VARSTR_HEADER_SIZE + 1); pTable->name = calloc(1, len + VARSTR_HEADER_SIZE + 1);
@ -620,7 +622,10 @@ static int tsdbFreeTable(STable *pTable) {
if (pTable->type == TSDB_CHILD_TABLE) { if (pTable->type == TSDB_CHILD_TABLE) {
kvRowFree(pTable->tagVal); kvRowFree(pTable->tagVal);
} else { } else {
for (int i = 0; i < pTable->numOfSchemas; i++) tdFreeSchema(pTable->schema[i]); if (pTable->schema) {
for (int i = 0; i < pTable->numOfSchemas; i++) tdFreeSchema(pTable->schema[i]);
free(pTable->schema);
}
} }
if (pTable->type == TSDB_STREAM_TABLE) { if (pTable->type == TSDB_STREAM_TABLE) {

View File

@ -764,8 +764,8 @@ static bool tsdbShouldCreateNewLast(SRWHelper *pHelper) {
static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDataCols, int rowsToWrite, SCompBlock *pCompBlock, static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDataCols, int rowsToWrite, SCompBlock *pCompBlock,
bool isLast, bool isSuperBlock) { bool isLast, bool isSuperBlock) {
ASSERT(rowsToWrite > 0 && rowsToWrite <= pDataCols->numOfRows && ASSERT(rowsToWrite > 0 && rowsToWrite <= pDataCols->numOfRows && rowsToWrite <= pHelper->config.maxRowsPerFileBlock);
rowsToWrite <= pHelper->config.maxRowsPerFileBlock); ASSERT(isLast ? rowsToWrite < pHelper->config.minRowsPerFileBlock : true);
SCompData *pCompData = (SCompData *)(pHelper->pBuffer); SCompData *pCompData = (SCompData *)(pHelper->pBuffer);
int64_t offset = 0; int64_t offset = 0;
@ -905,14 +905,15 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
rowsWritten = MIN((defaultRowsToWrite - blockAtIdx(pHelper, blkIdx)->numOfRows), pDataCols->numOfRows); rowsWritten = MIN((defaultRowsToWrite - blockAtIdx(pHelper, blkIdx)->numOfRows), pDataCols->numOfRows);
if ((blockAtIdx(pHelper, blkIdx)->numOfSubBlocks < TSDB_MAX_SUBBLOCKS) && if ((blockAtIdx(pHelper, blkIdx)->numOfSubBlocks < TSDB_MAX_SUBBLOCKS) &&
(blockAtIdx(pHelper, blkIdx)->numOfRows + rowsWritten < pHelper->config.minRowsPerFileBlock) && (pHelper->files.nLastF.fd) > 0) { (blockAtIdx(pHelper, blkIdx)->numOfRows + rowsWritten < pHelper->config.minRowsPerFileBlock) &&
(pHelper->files.nLastF.fd) < 0) {
if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.lastF), pDataCols, rowsWritten, &compBlock, true, false) < 0) if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.lastF), pDataCols, rowsWritten, &compBlock, true, false) < 0)
goto _err; goto _err;
if (tsdbAddSubBlock(pHelper, &compBlock, blkIdx, rowsWritten) < 0) goto _err; if (tsdbAddSubBlock(pHelper, &compBlock, blkIdx, rowsWritten) < 0) goto _err;
} else { } else {
// Load // Load
if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx), NULL) < 0) goto _err; if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx), NULL) < 0) goto _err;
ASSERT(pHelper->pDataCols[0]->numOfRows == blockAtIdx(pHelper, blkIdx)->numOfRows); ASSERT(pHelper->pDataCols[0]->numOfRows <= blockAtIdx(pHelper, blkIdx)->numOfRows);
// Merge // Merge
if (tdMergeDataCols(pHelper->pDataCols[0], pDataCols, rowsWritten) < 0) goto _err; if (tdMergeDataCols(pHelper->pDataCols[0], pDataCols, rowsWritten) < 0) goto _err;
// Write // Write
@ -936,21 +937,21 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
// Key must overlap with the block // Key must overlap with the block
ASSERT(keyFirst <= blockAtIdx(pHelper, blkIdx)->keyLast); ASSERT(keyFirst <= blockAtIdx(pHelper, blkIdx)->keyLast);
TSKEY keyLimit = TSKEY keyLimit = (blkIdx == pIdx->numOfBlocks - 1) ? INT64_MAX : blockAtIdx(pHelper, blkIdx + 1)->keyFirst - 1;
(blkIdx == pIdx->numOfBlocks - 1) ? INT64_MAX : pHelper->pCompInfo->blocks[blkIdx + 1].keyFirst - 1;
// rows1: number of rows must merge in this block // rows1: number of rows must merge in this block
int rows1 = tsdbGetRowsInRange(pDataCols, blockAtIdx(pHelper, blkIdx)->keyFirst, blockAtIdx(pHelper, blkIdx)->keyLast); int rows1 = tsdbGetRowsInRange(pDataCols, blockAtIdx(pHelper, blkIdx)->keyFirst, blockAtIdx(pHelper, blkIdx)->keyLast);
// rows2: max nuber of rows the block can have more // rows2: max number of rows the block can have more
int rows2 = pHelper->config.maxRowsPerFileBlock - blockAtIdx(pHelper, blkIdx)->numOfRows; int rows2 = pHelper->config.maxRowsPerFileBlock - blockAtIdx(pHelper, blkIdx)->numOfRows;
// rows3: number of rows between this block and the next block // rows3: number of rows between this block and the next block
int rows3 = tsdbGetRowsInRange(pDataCols, blockAtIdx(pHelper, blkIdx)->keyFirst, keyLimit); int rows3 = tsdbGetRowsInRange(pDataCols, blockAtIdx(pHelper, blkIdx)->keyFirst, keyLimit);
ASSERT(rows3 >= rows1); ASSERT(rows3 >= rows1);
if ((rows2 >= rows1) && if ((rows2 >= rows1) && (blockAtIdx(pHelper, blkIdx)->numOfSubBlocks < TSDB_MAX_SUBBLOCKS) &&
(( blockAtIdx(pHelper, blkIdx)->last) || ((!blockAtIdx(pHelper, blkIdx)->last) ||
((rows1 + blockAtIdx(pHelper, blkIdx)->numOfRows < pHelper->config.minRowsPerFileBlock) && (pHelper->files.nLastF.fd < 0)))) { ((rows1 + blockAtIdx(pHelper, blkIdx)->numOfRows < pHelper->config.minRowsPerFileBlock) &&
(pHelper->files.nLastF.fd < 0)))) {
rowsWritten = rows1; rowsWritten = rows1;
bool isLast = false; bool isLast = false;
SFile *pFile = NULL; SFile *pFile = NULL;
@ -964,7 +965,7 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
if (tsdbWriteBlockToFile(pHelper, pFile, pDataCols, rows1, &compBlock, isLast, false) < 0) goto _err; if (tsdbWriteBlockToFile(pHelper, pFile, pDataCols, rows1, &compBlock, isLast, false) < 0) goto _err;
if (tsdbAddSubBlock(pHelper, &compBlock, blkIdx, rowsWritten) < 0) goto _err; if (tsdbAddSubBlock(pHelper, &compBlock, blkIdx, rowsWritten) < 0) goto _err;
} else { // Load-Merge-Write } else { // Load-Merge-Write
// Load // Load
if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx), NULL) < 0) goto _err; if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx), NULL) < 0) goto _err;
if (blockAtIdx(pHelper, blkIdx)->last) pHelper->hasOldLastBlock = false; if (blockAtIdx(pHelper, blkIdx)->last) pHelper->hasOldLastBlock = false;
@ -1106,16 +1107,16 @@ static int tsdbAddSubBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkId
for (int i = blkIdx + 1; i < pIdx->numOfBlocks; i++) { for (int i = blkIdx + 1; i < pIdx->numOfBlocks; i++) {
SCompBlock *pTCompBlock = pHelper->pCompInfo->blocks + i; SCompBlock *pTCompBlock = pHelper->pCompInfo->blocks + i;
if (pTCompBlock->numOfSubBlocks > 1) { if (pTCompBlock->numOfSubBlocks > 1) {
ptr = (void *)((char *)(pHelper->pCompInfo) + pTCompBlock->offset + pTCompBlock->len); ptr = POINTER_SHIFT(pHelper->pCompInfo, pTCompBlock->offset);
break; break;
} }
} }
if (ptr == NULL) ptr = (void *)((char *)(pHelper->pCompInfo) + pIdx->len - sizeof(TSCKSUM)); if (ptr == NULL) ptr = POINTER_SHIFT(pHelper->pCompInfo, pIdx->len-sizeof(TSCKSUM));
size_t tsize = pIdx->len - ((char *)ptr - (char *)(pHelper->pCompInfo)); size_t tsize = pIdx->len - ((char *)ptr - (char *)(pHelper->pCompInfo));
if (tsize > 0) { if (tsize > 0) {
memmove((void *)((char *)ptr + sizeof(SCompBlock) * 2), ptr, tsize); memmove(POINTER_SHIFT(ptr, sizeof(SCompBlock) * 2), ptr, tsize);
for (int i = blkIdx + 1; i < pIdx->numOfBlocks; i++) { for (int i = blkIdx + 1; i < pIdx->numOfBlocks; i++) {
SCompBlock *pTCompBlock = pHelper->pCompInfo->blocks + i; SCompBlock *pTCompBlock = pHelper->pCompInfo->blocks + i;
if (pTCompBlock->numOfSubBlocks > 1) pTCompBlock->offset += (sizeof(SCompBlock) * 2); if (pTCompBlock->numOfSubBlocks > 1) pTCompBlock->offset += (sizeof(SCompBlock) * 2);

60
src/util/inc/tkvstore.h Normal file
View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_KVSTORE_H_
#define _TD_KVSTORE_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
typedef int (*iterFunc)(void *, void *cont, int contLen);
typedef void (*afterFunc)(void *);
typedef struct {
int64_t size;
int64_t tombSize;
int64_t nRecords;
int64_t nDels;
} SStoreInfo;
typedef struct {
char * fname;
int fd;
char * fsnap;
int sfd;
char * fnew;
int nfd;
SHashObj * map;
iterFunc iFunc;
afterFunc aFunc;
void * appH;
SStoreInfo info;
} SKVStore;
int tdCreateKVStore(char *fname);
int tdDestroyKVStore();
SKVStore *tdOpenKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH);
void tdCloseKVStore(SKVStore *pStore);
int tdKVStoreStartCommit(SKVStore *pStore);
int tdUpdateRecordInKVStore(SKVStore *pStore, uint64_t uid, void *cont, int contLen);
int tdKVStoreEndCommit(SKVStore *pStore);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -18,6 +18,7 @@ void generate_key(unsigned char* key);
void generate_sub_keys(unsigned char* main_key, key_set* key_sets); void generate_sub_keys(unsigned char* main_key, key_set* key_sets);
void process_message(unsigned char* message_piece, unsigned char* processed_piece, key_set* key_sets, int mode); void process_message(unsigned char* message_piece, unsigned char* processed_piece, key_set* key_sets, int mode);
#if 0
int64_t taosDesGenKey() { int64_t taosDesGenKey() {
unsigned int iseed = (unsigned int)time(NULL); unsigned int iseed = (unsigned int)time(NULL);
srand(iseed); srand(iseed);
@ -27,6 +28,7 @@ int64_t taosDesGenKey() {
return *((int64_t*)key); return *((int64_t*)key);
} }
#endif
char* taosDesImp(unsigned char* key, char* src, unsigned int len, int process_mode) { char* taosDesImp(unsigned char* key, char* src, unsigned int len, int process_mode) {
unsigned int number_of_blocks = len / 8; unsigned int number_of_blocks = len / 8;

292
src/util/src/tkvstore.c Normal file
View File

@ -0,0 +1,292 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "hash.h"
#include "os.h"
#include "taoserror.h"
#include "tchecksum.h"
#include "tcoding.h"
#include "tkvstore.h"
#include "tulog.h"
#define TD_KVSTORE_HEADER_SIZE 512
#define TD_KVSTORE_MAJOR_VERSION 1
#define TD_KVSTORE_MAINOR_VERSION 0
#define TD_KVSTORE_SNAP_SUFFIX ".snap"
#define TD_KVSTORE_NEW_SUFFIX ".new"
static int tdInitKVStoreHeader(int fd, char *fname);
static void * tdEncodeStoreInfo(void *buf, SStoreInfo *pInfo);
// static void * tdDecodeStoreInfo(void *buf, SStoreInfo *pInfo);
static SKVStore *tdNewKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH);
static char * tdGetKVStoreSnapshotFname(char *fdata);
static char * tdGetKVStoreNewFname(char *fdata);
static void tdFreeKVStore(SKVStore *pStore);
static int tdUpdateKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo);
int tdCreateKVStore(char *fname) {
char *tname = strdup(fname);
if (tname == NULL) return TSDB_CODE_COM_OUT_OF_MEMORY;
int fd = open(fname, O_RDWR | O_CREAT, 0755);
if (fd < 0) {
uError("failed to open file %s since %s", fname, strerror(errno));
return TAOS_SYSTEM_ERROR(errno);
}
int code = tdInitKVStoreHeader(fd, fname);
if (code != TSDB_CODE_SUCCESS) return code;
if (fsync(fd) < 0) {
uError("failed to fsync file %s since %s", fname, strerror(errno));
return TAOS_SYSTEM_ERROR(errno);
}
if (close(fd) < 0) {
uError("failed to close file %s since %s", fname, strerror(errno));
return TAOS_SYSTEM_ERROR(errno);
}
return TSDB_CODE_SUCCESS;
}
SKVStore *tdOpenKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH) {
SKVStore *pStore = tdNewKVStore(fname, iFunc, aFunc, appH);
if (pStore == NULL) return NULL;
pStore->fd = open(pStore->fname, O_RDWR);
if (pStore->fd < 0) {
uError("failed to open file %s since %s", pStore->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
if (access(pStore->fsnap, F_OK) == 0) {
uTrace("file %s exists, try to recover the KV store", pStore->fsnap);
pStore->sfd = open(pStore->fsnap, O_RDONLY);
if (pStore->sfd < 0) {
uError("failed to open file %s since %s", pStore->fsnap, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
// TODO: rewind the file
close(pStore->sfd);
pStore->sfd = -1;
remove(pStore->fsnap);
}
// TODO: Recover from the file
return pStore;
_err:
if (pStore->fd > 0) {
close(pStore->fd);
pStore->fd = -1;
}
if (pStore->sfd > 0) {
close(pStore->sfd);
pStore->sfd = -1;
}
tdFreeKVStore(pStore);
return NULL;
}
int tdKVStoreStartCommit(SKVStore *pStore) {
pStore->fd = open(pStore->fname, O_RDWR);
if (pStore->fd < 0) {
uError("failed to open file %s since %s", pStore->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
pStore->sfd = open(pStore->fsnap, O_WRONLY | O_CREAT, 0755);
if (pStore->sfd < 0) {
uError("failed to open file %s since %s", pStore->fsnap, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
if (tsendfile(pStore->sfd, pStore->fd, NULL, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) {
uError("failed to send file %d bytes since %s", TD_KVSTORE_HEADER_SIZE, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
if (fsync(pStore->sfd) < 0) {
uError("failed to fsync file %s since %s", pStore->fsnap, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
if (close(pStore->sfd) < 0) {
uError("failed to close file %s since %s", pStore->fsnap, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
pStore->sfd = -1;
return 0;
_err:
if (pStore->sfd > 0) {
close(pStore->sfd);
pStore->sfd = -1;
remove(pStore->fsnap);
}
if (pStore->fd > 0) {
close(pStore->fd);
pStore->fd = -1;
}
return -1;
}
int tdKVStoreEndCommit(SKVStore *pStore) {
ASSERT(pStore->fd > 0);
terrno = tdUpdateKVStoreHeader(pStore->fd, pStore->fname, &(pStore->info));
if (terrno != TSDB_CODE_SUCCESS) return -1;
if (fsync(pStore->fd) < 0) {
uError("failed to fsync file %s since %s", pStore->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
if (close(pStore->fd) < 0) {
uError("failed to close file %s since %s", pStore->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
remove(pStore->fsnap);
return 0;
}
static int tdUpdateKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo) {
char buf[TD_KVSTORE_HEADER_SIZE] = "\0";
if (lseek(fd, 0, SEEK_SET) < 0) {
uError("failed to lseek file %s since %s", fname, strerror(errno));
return TAOS_SYSTEM_ERROR(errno);
}
tdEncodeStoreInfo(buf, pInfo);
taosCalcChecksumAppend(0, (uint8_t *)buf, TD_KVSTORE_HEADER_SIZE);
if (twrite(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) {
uError("failed to write file %s %d bytes since %s", fname, TD_KVSTORE_HEADER_SIZE, strerror(errno));
return TAOS_SYSTEM_ERROR(errno);
}
return TSDB_CODE_SUCCESS;
}
static int tdInitKVStoreHeader(int fd, char *fname) {
SStoreInfo info = {TD_KVSTORE_HEADER_SIZE, 0, 0, 0};
return tdUpdateKVStoreHeader(fd, fname, &info);
}
static void *tdEncodeStoreInfo(void *buf, SStoreInfo *pInfo) {
buf = taosEncodeVariantI64(buf, pInfo->size);
buf = taosEncodeVariantI64(buf, pInfo->tombSize);
buf = taosEncodeVariantI64(buf, pInfo->nRecords);
buf = taosEncodeVariantI64(buf, pInfo->nDels);
return buf;
}
// static void *tdDecodeStoreInfo(void *buf, SStoreInfo *pInfo) {
// buf = taosDecodeVariantI64(buf, &(pInfo->size));
// buf = taosDecodeVariantI64(buf, &(pInfo->tombSize));
// buf = taosDecodeVariantI64(buf, &(pInfo->nRecords));
// buf = taosDecodeVariantI64(buf, &(pInfo->nDels));
// return buf;
// }
static SKVStore *tdNewKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH) {
SKVStore *pStore = (SKVStore *)malloc(sizeof(SKVStore));
if (pStore == NULL) goto _err;
pStore->fname = strdup(fname);
if (pStore->map == NULL) goto _err;
pStore->fsnap = tdGetKVStoreSnapshotFname(fname);
if (pStore->fsnap == NULL) goto _err;
pStore->fnew = tdGetKVStoreNewFname(fname);
if (pStore->fnew == NULL) goto _err;
pStore->fd = -1;
pStore->sfd = -1;
pStore->nfd = -1;
pStore->iFunc = iFunc;
pStore->aFunc = aFunc;
pStore->appH = appH;
pStore->map = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false);
if (pStore->map == NULL) {
terrno = TSDB_CODE_COM_OUT_OF_MEMORY;
goto _err;
}
return pStore;
_err:
terrno = TSDB_CODE_COM_OUT_OF_MEMORY;
tdFreeKVStore(pStore);
return NULL;
}
static void tdFreeKVStore(SKVStore *pStore) {
if (pStore) {
tfree(pStore->fname);
tfree(pStore->fsnap);
tfree(pStore->fnew);
taosHashCleanup(pStore->map);
free(pStore);
}
}
static char *tdGetKVStoreSnapshotFname(char *fdata) {
size_t size = strlen(fdata) + strlen(TD_KVSTORE_SNAP_SUFFIX) + 1;
char * fname = malloc(size);
if (fname == NULL) {
terrno = TSDB_CODE_COM_OUT_OF_MEMORY;
return NULL;
}
sprintf(fname, "%s%s", fdata, TD_KVSTORE_SNAP_SUFFIX);
return fname;
}
static char *tdGetKVStoreNewFname(char *fdata) {
size_t size = strlen(fdata) + strlen(TD_KVSTORE_NEW_SUFFIX) + 1;
char * fname = malloc(size);
if (fname == NULL) {
terrno = TSDB_CODE_COM_OUT_OF_MEMORY;
return NULL;
}
sprintf(fname, "%s%s", fdata, TD_KVSTORE_NEW_SUFFIX);
return fname;
}

View File

@ -87,6 +87,10 @@ void *taosThreadToOpenNewNote(void *param)
umask(0); umask(0);
int fd = open(name, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); int fd = open(name, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO);
if (fd < 0) {
return NULL;
}
taosLockNote(fd, pNote); taosLockNote(fd, pNote);
lseek(fd, 0, SEEK_SET); lseek(fd, 0, SEEK_SET);

View File

@ -75,19 +75,29 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
mkdir(tsVnodeDir, 0755); if (mkdir(tsVnodeDir, 0755) != 0 && errno != EEXIST) {
vError("vgId:%d, failed to create vnode, reason:%s dir:%s", pVnodeCfg->cfg.vgId, strerror(errno), tsVnodeDir);
char rootDir[TSDB_FILENAME_LEN] = {0}; if (errno == EACCES) {
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, pVnodeCfg->cfg.vgId); return TSDB_CODE_VND_NO_DISK_PERMISSIONS;
if (mkdir(rootDir, 0755) != 0) { } else if (errno == ENOSPC) {
vPrint("vgId:%d, failed to create vnode, reason:%s dir:%s", pVnodeCfg->cfg.vgId, strerror(errno), rootDir); return TSDB_CODE_VND_NO_DISKSPACE;
} else if (errno == ENOENT) {
return TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR;
} else {
return TSDB_CODE_VND_INIT_FAILED;
}
}
char rootDir[TSDB_FILENAME_LEN] = {0};
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, pVnodeCfg->cfg.vgId);
if (mkdir(rootDir, 0755) != 0 && errno != EEXIST) {
vError("vgId:%d, failed to create vnode, reason:%s dir:%s", pVnodeCfg->cfg.vgId, strerror(errno), rootDir);
if (errno == EACCES) { if (errno == EACCES) {
return TSDB_CODE_VND_NO_DISK_PERMISSIONS; return TSDB_CODE_VND_NO_DISK_PERMISSIONS;
} else if (errno == ENOSPC) { } else if (errno == ENOSPC) {
return TSDB_CODE_VND_NO_DISKSPACE; return TSDB_CODE_VND_NO_DISKSPACE;
} else if (errno == ENOENT) { } else if (errno == ENOENT) {
return TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR; return TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR;
} else if (errno == EEXIST) {
} else { } else {
return TSDB_CODE_VND_INIT_FAILED; return TSDB_CODE_VND_INIT_FAILED;
} }

View File

@ -21,14 +21,16 @@
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <taos.h> // TAOS header file #include <taos.h> // TAOS header file
#include <unistd.h> #include <sys/time.h>
#include <inttypes.h>
void taosMsleep(int mseconds);
static int32_t doQuery(TAOS* taos, const char* sql) { static int32_t doQuery(TAOS* taos, const char* sql) {
struct timeval t1 = {0};
gettimeofday(&t1, NULL);
TAOS_RES* res = taos_query(taos, sql); TAOS_RES* res = taos_query(taos, sql);
if (taos_errno(res) != 0) { if (taos_errno(res) != 0) {
printf("failed to execute query, reason:%s\n", taos_errstr(taos)); printf("failed to execute query, reason:%s\n", taos_errstr(res));
return -1; return -1;
} }
@ -38,13 +40,19 @@ static int32_t doQuery(TAOS* taos, const char* sql) {
int32_t numOfFields = taos_num_fields(res); int32_t numOfFields = taos_num_fields(res);
TAOS_FIELD* pFields = taos_fetch_fields(res); TAOS_FIELD* pFields = taos_fetch_fields(res);
int32_t i = 0;
while((row = taos_fetch_row(res)) != NULL) { while((row = taos_fetch_row(res)) != NULL) {
taos_print_row(buf, row, pFields, numOfFields); taos_print_row(buf, row, pFields, numOfFields);
printf("%s\n", buf); printf("%d:%s\n", ++i, buf);
memset(buf, 0, 512); memset(buf, 0, 512);
} }
taos_free_result(res); taos_free_result(res);
struct timeval t2 = {0};
gettimeofday(&t2, NULL);
printf("elapsed time:%"PRId64 " ms\n", ((t2.tv_sec*1000000 + t2.tv_usec) - (t1.tv_sec*1000000 + t1.tv_usec))/1000);
return 0; return 0;
} }
@ -101,14 +109,18 @@ int main(int argc, char *argv[]) {
taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); taos = taos_connect(argv[1], "root", "taosdata", NULL, 0);
if (taos == NULL) { if (taos == NULL) {
printf("failed to connect to server, reason:%s\n", taos_errstr(taos)); printf("failed to connect to server, reason:%s\n", taos_errstr(NULL));
exit(1); exit(1);
} }
printf("success to connect to server\n");
printf("success to connect to server\n");
// doQuery(taos, "select c1,count(*) from group_db0.group_mt0 where c1<8 group by c1");
doQuery(taos, "select * from test.m1");
// multiThreadTest(1, taos); // multiThreadTest(1, taos);
doQuery(taos, "use test"); // doQuery(taos, "select tbname from test.m1");
doQuery(taos, "alter table tm99 set tag a=99"); // doQuery(taos, "select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_db0.lm2_stb0 where ts >= 1537146000000 and ts <= 1543145400000 and tbname in ('lm2_tb0') interval(1s) group by t1");
// doQuery(taos, "select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_db0.lm2_stb0 where ts >= 1537146000000 and ts <= 1543145400000 and tbname in ('lm2_tb0', 'lm2_tb1', 'lm2_tb2') interval(1s)");
// for(int32_t i = 0; i < 100000; ++i) { // for(int32_t i = 0; i < 100000; ++i) {
// doQuery(taos, "insert into t1 values(now, 2)"); // doQuery(taos, "insert into t1 values(now, 2)");
// } // }

View File

@ -205,7 +205,7 @@ class Test (Thread):
global written global written
dnodesDir = tdDnodes.getDnodesRootDir() dnodesDir = tdDnodes.getDnodesRootDir()
dataDir = dnodesDir + '/dnode1/*' dataDir = dnodesDir + '/dnode1/data/*'
deleteCmd = 'rm -rf %s' % dataDir deleteCmd = 'rm -rf %s' % dataDir
os.system(deleteCmd) os.system(deleteCmd)

View File

@ -208,7 +208,7 @@ class Test (threading.Thread):
global written global written
dnodesDir = tdDnodes.getDnodesRootDir() dnodesDir = tdDnodes.getDnodesRootDir()
dataDir = dnodesDir + '/dnode1/*' dataDir = dnodesDir + '/dnode1/data/*'
deleteCmd = 'rm -rf %s' % dataDir deleteCmd = 'rm -rf %s' % dataDir
os.system(deleteCmd) os.system(deleteCmd)

136
tests/pytest/regressiontest.sh Executable file
View File

@ -0,0 +1,136 @@
#!/bin/bash
ulimit -c unlimited
python3 ./test.py -f insert/basic.py
python3 ./test.py -f insert/int.py
python3 ./test.py -f insert/float.py
python3 ./test.py -f insert/bigint.py
python3 ./test.py -f insert/bool.py
python3 ./test.py -f insert/double.py
python3 ./test.py -f insert/smallint.py
python3 ./test.py -f insert/tinyint.py
python3 ./test.py -f insert/date.py
python3 ./test.py -f insert/binary.py
python3 ./test.py -f insert/nchar.py
# python3 ./test.py -f insert/nchar-boundary.py
# python3 ./test.py -f insert/nchar-unicode.py
python3 ./test.py -f insert/multi.py
python3 ./test.py -f insert/randomNullCommit.py
python3 ./test.py -f table/column_name.py
python3 ./test.py -f table/column_num.py
python3 ./test.py -f table/db_table.py
# python3 ./test.py -f table/tablename-boundary.py
# tag
python3 ./test.py -f tag_lite/filter.py
python3 ./test.py -f tag_lite/create-tags-boundary.py
python3 ./test.py -f tag_lite/3.py
python3 ./test.py -f tag_lite/4.py
python3 ./test.py -f tag_lite/5.py
python3 ./test.py -f tag_lite/6.py
# python3 ./test.py -f tag_lite/add.py
python3 ./test.py -f tag_lite/bigint.py
python3 ./test.py -f tag_lite/binary_binary.py
python3 ./test.py -f tag_lite/binary.py
python3 ./test.py -f tag_lite/bool_binary.py
python3 ./test.py -f tag_lite/bool_int.py
python3 ./test.py -f tag_lite/bool.py
python3 ./test.py -f tag_lite/change.py
python3 ./test.py -f tag_lite/column.py
# python3 ./test.py -f tag_lite/commit.py
python3 ./test.py -f tag_lite/create.py
# python3 ./test.py -f tag_lite/datatype.py
python3 ./test.py -f tag_lite/datatype-without-alter.py
# python3 ./test.py -f tag_lite/delete.py
python3 ./test.py -f tag_lite/double.py
python3 ./test.py -f tag_lite/float.py
python3 ./test.py -f tag_lite/int_binary.py
python3 ./test.py -f tag_lite/int_float.py
python3 ./test.py -f tag_lite/int.py
# python3 ./test.py -f tag_lite/set.py
python3 ./test.py -f tag_lite/smallint.py
python3 ./test.py -f tag_lite/tinyint.py
# python3 ./test.py -f dbmgmt/database-name-boundary.py
python3 ./test.py -f import_merge/importBlock1HO.py
python3 ./test.py -f import_merge/importBlock1HPO.py
python3 ./test.py -f import_merge/importBlock1H.py
python3 ./test.py -f import_merge/importBlock1S.py
python3 ./test.py -f import_merge/importBlock1Sub.py
python3 ./test.py -f import_merge/importBlock1TO.py
python3 ./test.py -f import_merge/importBlock1TPO.py
python3 ./test.py -f import_merge/importBlock1T.py
python3 ./test.py -f import_merge/importBlock2HO.py
python3 ./test.py -f import_merge/importBlock2HPO.py
python3 ./test.py -f import_merge/importBlock2H.py
python3 ./test.py -f import_merge/importBlock2S.py
python3 ./test.py -f import_merge/importBlock2Sub.py
python3 ./test.py -f import_merge/importBlock2TO.py
python3 ./test.py -f import_merge/importBlock2TPO.py
python3 ./test.py -f import_merge/importBlock2T.py
python3 ./test.py -f import_merge/importBlockbetween.py
python3 ./test.py -f import_merge/importCacheFileHO.py
python3 ./test.py -f import_merge/importCacheFileHPO.py
python3 ./test.py -f import_merge/importCacheFileH.py
python3 ./test.py -f import_merge/importCacheFileS.py
python3 ./test.py -f import_merge/importCacheFileSub.py
python3 ./test.py -f import_merge/importCacheFileTO.py
python3 ./test.py -f import_merge/importCacheFileTPO.py
python3 ./test.py -f import_merge/importCacheFileT.py
python3 ./test.py -f import_merge/importDataH2.py
# python3 ./test.py -f import_merge/importDataHO2.py
# python3 ./test.py -f import_merge/importDataHO.py
python3 ./test.py -f import_merge/importDataHPO.py
python3 ./test.py -f import_merge/importDataLastHO.py
python3 ./test.py -f import_merge/importDataLastHPO.py
python3 ./test.py -f import_merge/importDataLastH.py
python3 ./test.py -f import_merge/importDataLastS.py
python3 ./test.py -f import_merge/importDataLastSub.py
python3 ./test.py -f import_merge/importDataLastTO.py
python3 ./test.py -f import_merge/importDataLastTPO.py
python3 ./test.py -f import_merge/importDataLastT.py
python3 ./test.py -f import_merge/importDataS.py
# python3 ./test.py -f import_merge/importDataSub.py
python3 ./test.py -f import_merge/importDataTO.py
python3 ./test.py -f import_merge/importDataTPO.py
python3 ./test.py -f import_merge/importDataT.py
python3 ./test.py -f import_merge/importHeadOverlap.py
python3 ./test.py -f import_merge/importHeadPartOverlap.py
python3 ./test.py -f import_merge/importHead.py
python3 ./test.py -f import_merge/importHORestart.py
python3 ./test.py -f import_merge/importHPORestart.py
python3 ./test.py -f import_merge/importHRestart.py
python3 ./test.py -f import_merge/importLastHO.py
python3 ./test.py -f import_merge/importLastHPO.py
python3 ./test.py -f import_merge/importLastH.py
python3 ./test.py -f import_merge/importLastS.py
python3 ./test.py -f import_merge/importLastSub.py
python3 ./test.py -f import_merge/importLastTO.py
python3 ./test.py -f import_merge/importLastTPO.py
python3 ./test.py -f import_merge/importLastT.py
python3 ./test.py -f import_merge/importSpan.py
python3 ./test.py -f import_merge/importSRestart.py
python3 ./test.py -f import_merge/importSubRestart.py
python3 ./test.py -f import_merge/importTailOverlap.py
python3 ./test.py -f import_merge/importTailPartOverlap.py
python3 ./test.py -f import_merge/importTail.py
python3 ./test.py -f import_merge/importToCommit.py
python3 ./test.py -f import_merge/importTORestart.py
python3 ./test.py -f import_merge/importTPORestart.py
python3 ./test.py -f import_merge/importTRestart.py
python3 ./test.py -f import_merge/importInsertThenImport.py
# user
python3 ./test.py -f user/user_create.py
python3 ./test.py -f user/pass_len.py
# table
#python3 ./test.py -f table/del_stable.py
#query
python3 ./test.py -f query/filter.py
# python3 ./test.py -f query/filterCombo.py
# python3 ./test.py -f query/queryNormal.py
# python3 ./test.py -f query/queryError.py

View File

@ -31,21 +31,26 @@ class TDTestCase:
tdSql.prepare() tdSql.prepare()
tdLog.info("===== step1 =====") tdLog.info("===== step1 =====")
tdSql.execute("create table stb0(ts timestamp, col1 int, col2 float) tags(tgcol int)") tdSql.execute(
"create table stb0(ts timestamp, col1 int, col2 float) tags(tgcol int)")
for i in range(tbNum): for i in range(tbNum):
tdSql.execute("create table tb%d using stb0 tags(%d)" % (i, i)) tdSql.execute("create table tb%d using stb0 tags(%d)" % (i, i))
for j in range(rowNum): for j in range(rowNum):
tdSql.execute("insert into tb%d values (now - %dm, %d, %d)" % (i, 1440 - j, j, j)) tdSql.execute(
"insert into tb%d values (now - %dm, %d, %d)" %
(i, 1440 - j, j, j))
time.sleep(0.1) time.sleep(0.1)
tdLog.info("===== step2 =====") tdLog.info("===== step2 =====")
tdSql.query("select count(*), count(col1), count(col2) from tb0 interval(1d)") tdSql.query(
"select count(*), count(col1), count(col2) from tb0 interval(1d)")
tdSql.checkData(0, 1, rowNum) tdSql.checkData(0, 1, rowNum)
tdSql.checkData(0, 2, rowNum) tdSql.checkData(0, 2, rowNum)
tdSql.checkData(0, 3, rowNum) tdSql.checkData(0, 3, rowNum)
tdSql.query("show tables") tdSql.query("show tables")
tdSql.checkRows(tbNum) tdSql.checkRows(tbNum)
tdSql.execute("create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)") tdSql.execute(
"create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)")
tdSql.query("show tables") tdSql.query("show tables")
tdSql.checkRows(tbNum + 1) tdSql.checkRows(tbNum + 1)
@ -67,7 +72,8 @@ class TDTestCase:
tdLog.info("===== step6 =====") tdLog.info("===== step6 =====")
time.sleep(0.1) time.sleep(0.1)
tdSql.execute("create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)") tdSql.execute(
"create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)")
tdSql.query("show tables") tdSql.query("show tables")
tdSql.checkRows(tbNum + 1) tdSql.checkRows(tbNum + 1)
@ -81,14 +87,16 @@ class TDTestCase:
tdSql.checkData(0, 3, rowNum) tdSql.checkData(0, 3, rowNum)
tdLog.info("===== step8 =====") tdLog.info("===== step8 =====")
tdSql.query("select count(*), count(col1), count(col2) from stb0 interval(1d)") tdSql.query(
"select count(*), count(col1), count(col2) from stb0 interval(1d)")
tdSql.checkData(0, 1, rowNum * tbNum) tdSql.checkData(0, 1, rowNum * tbNum)
tdSql.checkData(0, 2, rowNum * tbNum) tdSql.checkData(0, 2, rowNum * tbNum)
tdSql.checkData(0, 3, rowNum * tbNum) tdSql.checkData(0, 3, rowNum * tbNum)
tdSql.query("show tables") tdSql.query("show tables")
tdSql.checkRows(tbNum + 1) tdSql.checkRows(tbNum + 1)
tdSql.execute("create table s1 as select count(*), count(col1), count(col2) from stb0 interval(1d)") tdSql.execute(
"create table s1 as select count(*), count(col1), count(col2) from stb0 interval(1d)")
tdSql.query("show tables") tdSql.query("show tables")
tdSql.checkRows(tbNum + 2) tdSql.checkRows(tbNum + 2)
@ -110,7 +118,8 @@ class TDTestCase:
tdSql.error("select * from s1") tdSql.error("select * from s1")
tdLog.info("===== step12 =====") tdLog.info("===== step12 =====")
tdSql.execute("create table s1 as select count(*), count(col1), count(col2) from stb0 interval(1d)") tdSql.execute(
"create table s1 as select count(*), count(col1), count(col2) from stb0 interval(1d)")
tdSql.query("show tables") tdSql.query("show tables")
tdSql.checkRows(tbNum + 2) tdSql.checkRows(tbNum + 2)

View File

@ -24,7 +24,6 @@ class TDTestCase:
tdLog.debug("start to execute %s" % __file__) tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql) tdSql.init(conn.cursor(), logSql)
def run(self): def run(self):
tbNum = 10 tbNum = 10
rowNum = 20 rowNum = 20
@ -33,11 +32,14 @@ class TDTestCase:
tdSql.prepare() tdSql.prepare()
tdLog.info("===== step1 =====") tdLog.info("===== step1 =====")
tdSql.execute("create table stb0(ts timestamp, col1 int, col2 float) tags(tgcol int)") tdSql.execute(
"create table stb0(ts timestamp, col1 int, col2 float) tags(tgcol int)")
for i in range(tbNum): for i in range(tbNum):
tdSql.execute("create table tb%d using stb0 tags(%d)" % (i, i)) tdSql.execute("create table tb%d using stb0 tags(%d)" % (i, i))
for j in range(rowNum): for j in range(rowNum):
tdSql.execute("insert into tb%d values (now - %dm, %d, %d)" % (i, 1440 - j, j, j)) tdSql.execute(
"insert into tb%d values (now - %dm, %d, %d)" %
(i, 1440 - j, j, j))
time.sleep(0.1) time.sleep(0.1)
tdLog.info("===== step2 =====") tdLog.info("===== step2 =====")
@ -45,7 +47,8 @@ class TDTestCase:
tdSql.checkData(0, 1, rowNum) tdSql.checkData(0, 1, rowNum)
tdSql.query("show tables") tdSql.query("show tables")
tdSql.checkRows(tbNum) tdSql.checkRows(tbNum)
tdSql.execute("create table s0 as select count(col1) from tb0 interval(1d)") tdSql.execute(
"create table s0 as select count(col1) from tb0 interval(1d)")
tdSql.query("show tables") tdSql.query("show tables")
tdSql.checkRows(tbNum + 1) tdSql.checkRows(tbNum + 1)
@ -63,7 +66,8 @@ class TDTestCase:
tdSql.error("select * from s0") tdSql.error("select * from s0")
tdLog.info("===== step6 =====") tdLog.info("===== step6 =====")
tdSql.execute("create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)") tdSql.execute(
"create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)")
tdSql.query("show tables") tdSql.query("show tables")
tdSql.checkRows(tbNum + 1) tdSql.checkRows(tbNum + 1)
@ -75,13 +79,15 @@ class TDTestCase:
tdSql.checkData(0, 3, rowNum) tdSql.checkData(0, 3, rowNum)
tdLog.info("===== step8 =====") tdLog.info("===== step8 =====")
tdSql.query("select count(*), count(col1), count(col2) from stb0 interval(1d)") tdSql.query(
"select count(*), count(col1), count(col2) from stb0 interval(1d)")
tdSql.checkData(0, 1, totalNum) tdSql.checkData(0, 1, totalNum)
tdSql.checkData(0, 2, totalNum) tdSql.checkData(0, 2, totalNum)
tdSql.checkData(0, 3, totalNum) tdSql.checkData(0, 3, totalNum)
tdSql.query("show tables") tdSql.query("show tables")
tdSql.checkRows(tbNum + 1) tdSql.checkRows(tbNum + 1)
tdSql.execute("create table s1 as select count(*), count(col1), count(col2) from stb0 interval(1d)") tdSql.execute(
"create table s1 as select count(*), count(col1), count(col2) from stb0 interval(1d)")
tdSql.query("show tables") tdSql.query("show tables")
tdSql.checkRows(tbNum + 2) tdSql.checkRows(tbNum + 2)
@ -101,7 +107,8 @@ class TDTestCase:
tdSql.error("select * from s1") tdSql.error("select * from s1")
tdLog.info("===== step12 =====") tdLog.info("===== step12 =====")
tdSql.execute("create table s1 as select count(col1) from stb0 interval(1d)") tdSql.execute(
"create table s1 as select count(col1) from stb0 interval(1d)")
tdSql.query("show tables") tdSql.query("show tables")
tdSql.checkRows(tbNum + 2) tdSql.checkRows(tbNum + 2)
@ -112,7 +119,6 @@ class TDTestCase:
#tdSql.checkData(0, 2, None) #tdSql.checkData(0, 2, None)
#tdSql.checkData(0, 3, None) #tdSql.checkData(0, 3, None)
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)

View File

@ -194,13 +194,13 @@ class TDDnode:
selfPath = os.path.dirname(os.path.realpath(__file__)) selfPath = os.path.dirname(os.path.realpath(__file__))
binPath = "" binPath = ""
if ("TDinternal" in selfPath): if ("community" in selfPath):
projPath = selfPath + "/../../../../" projPath = selfPath + "/../../../../"
for root, dirs, files in os.walk(projPath): for root, dirs, files in os.walk(projPath):
if ("taosd" in files): if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root)) rootRealPath = os.path.dirname(os.path.realpath(root))
if ("community" not in rootRealPath): if ("packaging" not in rootRealPath):
binPath = os.path.join(root, "taosd") binPath = os.path.join(root, "taosd")
break break
else: else:
@ -213,7 +213,7 @@ class TDDnode:
break break
if (binPath == ""): if (binPath == ""):
tdLog.exit("taosd not found!s") tdLog.exit("taosd not found!")
else: else:
tdLog.info("taosd found in %s" % rootRealPath) tdLog.info("taosd found in %s" % rootRealPath)
@ -319,6 +319,7 @@ class TDDnodes:
self.dnodes.append(TDDnode(8)) self.dnodes.append(TDDnode(8))
self.dnodes.append(TDDnode(9)) self.dnodes.append(TDDnode(9))
self.dnodes.append(TDDnode(10)) self.dnodes.append(TDDnode(10))
self.simDeployed = False
def init(self, path): def init(self, path):
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
@ -378,7 +379,10 @@ class TDDnodes:
self.sim = TDSimClient() self.sim = TDSimClient()
self.sim.init(self.path) self.sim.init(self.path)
self.sim.setTestCluster(self.testCluster) self.sim.setTestCluster(self.testCluster)
self.sim.deploy()
if (self.simDeployed == False):
self.sim.deploy()
self.simDeployed = True
self.check(index) self.check(index)
self.dnodes[index - 1].setTestCluster(self.testCluster) self.dnodes[index - 1].setTestCluster(self.testCluster)

View File

@ -147,7 +147,7 @@ print =============== step3 - db
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1;' 127.0.0.1:6020/rest/sql system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1;' 127.0.0.1:6020/rest/sql
print 21-> $system_content print 21-> $system_content
if $system_content != @{"status":"error","code":1000,"desc":"mnode invalid table id"}@ then if $system_content != @{"status":"error","code":1000,"desc":"mnode invalid table name"}@ then
return -1 return -1
endi endi

View File

@ -180,7 +180,7 @@ if $data03 != 0 then
endi endi
print $data04 print $data04
if $data04 != 0.0000 then if $data04 != 0.00000 then
return -1 return -1
endi endi
@ -201,7 +201,8 @@ if $data13 != 1 then
return -1 return -1
endi endi
if $data14 != 1.0000 then if $data14 != 1.00000 then
print expect 1.00000, actual:$data14
return -1 return -1
endi endi
@ -345,6 +346,19 @@ if $data94 != 9 then
return -1 return -1
endi endi
sql select c1,sum(c1),avg(c1),count(*) from group_mt0 where c1<5 group by c1;
if $row != 5 then
return -1
endi
if $data00 != 0 then
return -1
endi
if $data01 != 800 then
return -1
endi
sql select first(c1), last(ts), first(ts), last(c1),sum(c1),avg(c1),count(*) from group_mt0 where c1<20 group by tbname,c1; sql select first(c1), last(ts), first(ts), last(c1),sum(c1),avg(c1),count(*) from group_mt0 where c1<20 group by tbname,c1;
if $row != 160 then if $row != 160 then
return -1 return -1

View File

@ -307,78 +307,3 @@ cd ../../../debug; make
./test.sh -f unique/vnode/replica3_basic.sim ./test.sh -f unique/vnode/replica3_basic.sim
./test.sh -f unique/vnode/replica3_repeat.sim ./test.sh -f unique/vnode/replica3_repeat.sim
./test.sh -f unique/vnode/replica3_vgroup.sim ./test.sh -f unique/vnode/replica3_vgroup.sim
./test.sh -f unique/account/account_create.sim
./test.sh -f unique/account/account_delete.sim
./test.sh -f unique/account/account_len.sim
./test.sh -f unique/account/authority.sim
./test.sh -f unique/account/basic.sim
./test.sh -f unique/account/paras.sim
./test.sh -f unique/account/pass_alter.sim
./test.sh -f unique/account/pass_len.sim
./test.sh -f unique/account/usage.sim
./test.sh -f unique/account/user_create.sim
./test.sh -f unique/account/user_len.sim
./test.sh -f unique/big/balance.sim
./test.sh -f unique/big/maxvnodes.sim
./test.sh -f unique/big/tcp.sim
./test.sh -f unique/cluster/balance1.sim
./test.sh -f unique/cluster/balance2.sim
./test.sh -f unique/cluster/balance3.sim
./test.sh -f unique/cluster/cache.sim
./test.sh -f unique/column/replica3.sim
./test.sh -f unique/db/commit.sim
./test.sh -f unique/db/delete.sim
./test.sh -f unique/db/delete_part.sim
./test.sh -f unique/db/replica_add12.sim
./test.sh -f unique/db/replica_add13.sim
./test.sh -f unique/db/replica_add23.sim
./test.sh -f unique/db/replica_reduce21.sim
./test.sh -f unique/db/replica_reduce32.sim
./test.sh -f unique/db/replica_reduce31.sim
./test.sh -f unique/db/replica_part.sim
./test.sh -f unique/dnode/balance1.sim
./test.sh -f unique/dnode/balance2.sim
./test.sh -f unique/dnode/balance3.sim
./test.sh -f unique/dnode/balancex.sim
./test.sh -f unique/dnode/offline1.sim
./test.sh -f unique/dnode/offline2.sim
./test.sh -f unique/dnode/remove1.sim
./test.sh -f unique/dnode/remove2.sim
./test.sh -f unique/dnode/vnode_clean.sim
./test.sh -f unique/http/admin.sim
./test.sh -f unique/http/opentsdb.sim
./test.sh -f unique/import/replica2.sim
./test.sh -f unique/import/replica3.sim
./test.sh -f unique/stable/balance_replica1.sim
./test.sh -f unique/stable/dnode2_stop.sim
./test.sh -f unique/stable/dnode2.sim
./test.sh -f unique/stable/dnode3.sim
./test.sh -f unique/stable/replica2_dnode4.sim
./test.sh -f unique/stable/replica2_vnode3.sim
./test.sh -f unique/stable/replica3_dnode6.sim
./test.sh -f unique/stable/replica3_vnode3.sim
./test.sh -f unique/mnode/mgmt22.sim
./test.sh -f unique/mnode/mgmt23.sim
./test.sh -f unique/mnode/mgmt24.sim
./test.sh -f unique/mnode/mgmt25.sim
./test.sh -f unique/mnode/mgmt26.sim
./test.sh -f unique/mnode/mgmt33.sim
./test.sh -f unique/mnode/mgmt34.sim
./test.sh -f unique/mnode/mgmtr2.sim
./test.sh -f unique/vnode/many.sim
./test.sh -f unique/vnode/replica2_basic2.sim
./test.sh -f unique/vnode/replica2_repeat.sim
./test.sh -f unique/vnode/replica3_basic.sim
./test.sh -f unique/vnode/replica3_repeat.sim
./test.sh -f unique/vnode/replica3_vgroup.sim

View File

@ -0,0 +1,213 @@
#unsupport run general/alter/cached_schema_after_alter.sim
#unsupport run general/alter/count.sim
#unsupport run general/alter/import.sim
#unsupport run general/alter/insert1.sim
#unsupport run general/alter/insert2.sim
#unsupport run general/alter/metrics.sim
#unsupport run general/alter/table.sim
run general/cache/new_metrics.sim
run general/cache/restart_metrics.sim
run general/cache/restart_table.sim
run general/connection/connection.sim
run general/column/commit.sim
run general/column/metrics.sim
run general/column/table.sim
run general/compress/commitlog.sim
run general/compress/compress.sim
run general/compress/compress2.sim
run general/compress/uncompress.sim
run general/compute/avg.sim
run general/compute/bottom.sim
run general/compute/count.sim
run general/compute/diff.sim
run general/compute/diff2.sim
run general/compute/first.sim
run general/compute/interval.sim
run general/compute/last.sim
run general/compute/leastsquare.sim
run general/compute/max.sim
run general/compute/min.sim
run general/compute/null.sim
run general/compute/percentile.sim
run general/compute/stddev.sim
run general/compute/sum.sim
run general/compute/top.sim
run general/db/alter_option.sim
run general/db/alter_tables_d2.sim
run general/db/alter_tables_v1.sim
run general/db/alter_tables_v4.sim
run general/db/alter_vgroups.sim
run general/db/basic.sim
run general/db/basic1.sim
run general/db/basic2.sim
run general/db/basic3.sim
run general/db/basic4.sim
run general/db/basic5.sim
run general/db/delete_reuse1.sim
run general/db/delete_reuse2.sim
run general/db/delete_reusevnode.sim
run general/db/delete_reusevnode2.sim
run general/db/delete_writing1.sim
run general/db/delete_writing2.sim
run general/db/delete.sim
run general/db/len.sim
run general/db/repeat.sim
run general/db/tables.sim
run general/db/vnodes.sim
run general/field/2.sim
run general/field/3.sim
run general/field/4.sim
run general/field/5.sim
run general/field/6.sim
run general/field/bigint.sim
run general/field/binary.sim
run general/field/bool.sim
run general/field/single.sim
run general/field/smallint.sim
run general/field/tinyint.sim
run general/http/restful.sim
run general/http/restful_insert.sim
run general/http/restful_limit.sim
run general/http/restful_full.sim
run general/http/prepare.sim
run general/http/telegraf.sim
# run general/http/grafana_bug.sim
# run general/http/grafana.sim
run general/import/basic.sim
run general/import/commit.sim
run general/import/large.sim
run general/import/replica1.sim
run general/insert/basic.sim
run general/insert/insert_drop.sim
run general/insert/query_block1_memory.sim
run general/insert/query_block2_memory.sim
run general/insert/query_block1_file.sim
run general/insert/query_block2_file.sim
run general/insert/query_file_memory.sim
run general/insert/query_multi_file.sim
run general/insert/tcp.sim
#unsupport run general/parser/alter.sim
#unsupport run general/parser/alter1.sim
#unsupport run general/parser/alter_stable.sim
run general/parser/auto_create_tb.sim
run general/parser/auto_create_tb_drop_tb.sim
run general/parser/col_arithmetic_operation.sim
run general/parser/columnValue.sim
# run general/parser/commit.sim
run general/parser/create_db.sim
run general/parser/create_mt.sim
run general/parser/create_tb.sim
run general/parser/dbtbnameValidate.sim
run general/parser/import_commit1.sim
run general/parser/import_commit2.sim
run general/parser/import_commit3.sim
run general/parser/insert_tb.sim
# run general/parser/first_last.sim
#unsupport run general/parser/import_file.sim
# run general/parser/lastrow.sim
run general/parser/nchar.sim
#unsupport run general/parser/null_char.sim
# run general/parser/single_row_in_tb.sim
run general/parser/select_from_cache_disk.sim
# run general/parser/limit.sim
# run general/parser/limit1.sim
# run general/parser/limit1_tblocks100.sim
# run general/parser/mixed_blocks.sim
# run general/parser/selectResNum.sim
run general/parser/select_across_vnodes.sim
run general/parser/slimit1.sim
run general/parser/tbnameIn.sim
run general/parser/binary_escapeCharacter.sim
# run general/parser/projection_limit_offset.sim
run general/parser/limit2.sim
# run general/parser/slimit.sim
run general/parser/fill.sim
# run general/parser/fill_stb.sim
# run general/parser/interp.sim
# run general/parser/where.sim
#unsupport run general/parser/join.sim
#unsupport run general/parser/join_multivnode.sim
# run general/parser/select_with_tags.sim
#unsupport run general/parser/groupby.sim
#unsupport run general/parser/bug.sim
#unsupport run general/parser/tags_dynamically_specifiy.sim
#unsupport run general/parser/set_tag_vals.sim
#unsupport run general/parser/repeatAlter.sim
#unsupport run general/parser/slimit_alter_tags.sim
#unsupport run general/parser/stream_on_sys.sim
#unsupport run general/parser/stream.sim
#unsupport run general/parser/repeatStream.sim
run general/stable/disk.sim
run general/stable/dnode3.sim
run general/stable/metrics.sim
run general/stable/values.sim
run general/stable/vnode3.sim
# run general/table/autocreate.sim
run general/table/basic1.sim
run general/table/basic2.sim
run general/table/basic3.sim
run general/table/bigint.sim
run general/table/binary.sim
run general/table/bool.sim
run general/table/column_name.sim
run general/table/column_num.sim
run general/table/column_value.sim
run general/table/column2.sim
run general/table/date.sim
run general/table/db.table.sim
run general/table/delete_reuse1.sim
run general/table/delete_reuse2.sim
run general/table/delete_writing.sim
run general/table/describe.sim
run general/table/double.sim
run general/table/fill.sim
run general/table/float.sim
run general/table/int.sim
run general/table/limit.sim
run general/table/smallint.sim
run general/table/table_len.sim
# run general/table/table.sim
run general/table/tinyint.sim
run general/table/vgroup.sim
run general/tag/3.sim
run general/tag/4.sim
run general/tag/5.sim
run general/tag/6.sim
#unsupport run general/tag/add.sim
run general/tag/bigint.sim
run general/tag/binary_binary.sim
run general/tag/binary.sim
run general/tag/bool_binary.sim
run general/tag/bool_int.sim
run general/tag/bool.sim
#unsupport run general/tag/change.sim
run general/tag/column.sim
#unsupport run general/tag/commit.sim
run general/tag/create.sim
#unsupport run general/tag/delete.sim
run general/tag/double.sim
run general/tag/filter.sim
run general/tag/float.sim
run general/tag/int_binary.sim
run general/tag/int_float.sim
run general/tag/int.sim
#unsupport run general/tag/set.sim
run general/tag/smallint.sim
run general/tag/tinyint.sim
run general/user/authority.sim
run general/user/monitor.sim
run general/user/pass_alter.sim
run general/user/pass_len.sim
run general/user/user_create.sim
run general/user/user_len.sim
run general/vector/metrics_field.sim
run general/vector/metrics_mix.sim
run general/vector/metrics_query.sim
run general/vector/metrics_tag.sim
run general/vector/metrics_time.sim
run general/vector/multi.sim
run general/vector/single.sim
run general/vector/table_field.sim
run general/vector/table_mix.sim
run general/vector/table_query.sim
run general/vector/table_time.sim

View File

@ -57,23 +57,21 @@ run_back unique/vnode/back_insert_many.sim
sleep 5000 sleep 5000
print ======== step3 print ======== step3
system sh/exec.sh -n dnode2 -s stop
sleep 5000
$x = 0 $x = 0
loop: loop:
print ======== step4 print ======== step4
system sh/exec.sh -n dnode2 -s start
sleep 5000
system sh/exec.sh -n dnode3 -s stop system sh/exec.sh -n dnode3 -s stop
sleep 5000 sleep 5000
system sh/exec.sh -n dnode3 -s start
sleep 5000
print ======== step5 print ======== step5
system sh/exec.sh -n dnode3 -s start
sleep 5000
system sh/exec.sh -n dnode2 -s stop system sh/exec.sh -n dnode2 -s stop
sleep 5000 sleep 5000
system sh/exec.sh -n dnode2 -s start
sleep 5000
print ======== step6 print ======== step6
sql select count(*) from db1.tb1 sql select count(*) from db1.tb1

View File

@ -138,25 +138,25 @@ sleep 5000
#sql insert into d3.t3 values(now, 3) #sql insert into d3.t3 values(now, 3)
#sql insert into d4.t4 values(now, 3) #sql insert into d4.t4 values(now, 3)
sql select * from d1.t1 #sql select * from d1.t1
if $rows != 2 then #if $rows != 2 then
return -1 # return -1
endi #endi
sql select * from d2.t2 #sql select * from d2.t2
if $rows != 2 then #if $rows != 2 then
return -1 # return -1
endi #endi
sql select * from d3.t3 #sql select * from d3.t3
if $rows != 2 then #if $rows != 2 then
return -1 # return -1
endi #endi
sql select * from d4.t4 #sql select * from d4.t4
if $rows != 2 then #if $rows != 2 then
return -1 # return -1
endi #endi
print ========= step4 print ========= step4
system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode2 -s start
@ -169,25 +169,25 @@ sleep 5000
#sql insert into d3.t3 values(now, 4) #sql insert into d3.t3 values(now, 4)
#sql insert into d4.t4 values(now, 4) #sql insert into d4.t4 values(now, 4)
sql select * from d1.t1 #sql select * from d1.t1
if $rows != 2 then #if $rows != 2 then
return -1 # return -1
endi #endi
sql select * from d2.t2 #sql select * from d2.t2
if $rows != 2 then #if $rows != 2 then
return -1 # return -1
endi #endi
sql select * from d3.t3 #sql select * from d3.t3
if $rows != 2 then #if $rows != 2 then
return -1 # return -1
endi #endi
sql select * from d4.t4 #sql select * from d4.t4
if $rows != 2 then #if $rows != 2 then
return -1 # return -1
endi #endi
print ========= step5 print ========= step5
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start

View File

@ -37,22 +37,20 @@ run_back unique/vnode/back_insert.sim
sleep 3000 sleep 3000
print ======== step3 print ======== step3
system sh/exec.sh -n dnode2 -s stop
sleep 5000
$x = 0 $x = 0
loop: loop:
print ======== step4 print ======== step4
system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode2 -s stop
sleep 5000 sleep 5000
system sh/exec.sh -n dnode3 -s stop system sh/exec.sh -n dnode2 -s start
sleep 5000 sleep 5000
print ======== step5 print ======== step5
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s stop
sleep 5000 sleep 5000
system sh/exec.sh -n dnode2 -s stop system sh/exec.sh -n dnode3 -s start
sleep 5000 sleep 5000
print ======== step6 print ======== step6

View File

@ -24,14 +24,19 @@ GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m' GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m' NC='\033[0m'
echo "### run TSIM script ###" echo "### run TSIM test case ###"
cd script cd script
[ -f out.log ] && rm -f out.log [ -f out.log ] && rm -f out.log
if [ "$1" == "cron" ]; then if [ "$1" == "cron" ]; then
echo "### run TSIM regression test ###"
runSimCaseOneByOne regressionSuite.sim
elif [ "$1" == "full" ]; then
echo "### run TSIM full test ###"
runSimCaseOneByOne fullGeneralSuite.sim runSimCaseOneByOne fullGeneralSuite.sim
else else
echo "### run TSIM smoke test ###"
runSimCaseOneByOne basicSuite.sim runSimCaseOneByOne basicSuite.sim
fi fi
@ -53,14 +58,19 @@ if [ "$totalFailed" -ne "0" ]; then
# exit $totalFailed # exit $totalFailed
fi fi
echo "### run Python script ###" echo "### run Python test case ###"
cd ../pytest cd ../pytest
[ -f pytest-out.log ] && rm -f pytest-out.log [ -f pytest-out.log ] && rm -f pytest-out.log
if [ "$1" == "cron" ]; then if [ "$1" == "cron" ]; then
echo "### run Python regression test ###"
runPyCaseOneByOne regressiontest.sh
elif [ "$1" == "full" ]; then
echo "### run Python full test ###"
runPyCaseOneByOne fulltest.sh runPyCaseOneByOne fulltest.sh
else else
echo "### run Python smoke test ###"
runPyCaseOneByOne smoketest.sh runPyCaseOneByOne smoketest.sh
fi fi
totalPySuccess=`grep 'successfully executed' pytest-out.log | wc -l` totalPySuccess=`grep 'successfully executed' pytest-out.log | wc -l`