Merge branch '3.0' into cpwu/3.0

This commit is contained in:
cpwu 2022-07-16 16:50:17 +08:00 committed by GitHub
commit 31f7088a19
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 366 additions and 148 deletions

View File

@ -111,6 +111,8 @@ int32_t derivativeScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalar
int32_t irateScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t twaScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t mavgScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t hllScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t csumScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
#ifdef __cplusplus
}

View File

@ -142,6 +142,7 @@ void taos_close(TAOS *taos) {
int taos_errno(TAOS_RES *res) {
if (res == NULL || TD_RES_TMQ_META(res)) {
if (terrno == TSDB_CODE_RPC_REDIRECT) terrno = TSDB_CODE_RPC_NETWORK_UNAVAIL;
return terrno;
}
@ -149,11 +150,13 @@ int taos_errno(TAOS_RES *res) {
return 0;
}
return ((SRequestObj *)res)->code;
return ((SRequestObj *)res)->code == TSDB_CODE_RPC_REDIRECT ? TSDB_CODE_RPC_NETWORK_UNAVAIL
: ((SRequestObj *)res)->code;
}
const char *taos_errstr(TAOS_RES *res) {
if (res == NULL || TD_RES_TMQ_META(res)) {
if (terrno == TSDB_CODE_RPC_REDIRECT) terrno = TSDB_CODE_RPC_NETWORK_UNAVAIL;
return (const char *)tstrerror(terrno);
}
@ -165,7 +168,8 @@ const char *taos_errstr(TAOS_RES *res) {
if (NULL != pRequest->msgBuf && (strlen(pRequest->msgBuf) > 0 || pRequest->code == TSDB_CODE_RPC_FQDN_ERROR)) {
return pRequest->msgBuf;
} else {
return (const char *)tstrerror(pRequest->code);
return pRequest->code == TSDB_CODE_RPC_REDIRECT ? (const char *)tstrerror(TSDB_CODE_RPC_NETWORK_UNAVAIL)
: (const char *)tstrerror(pRequest->code);
}
}

View File

@ -521,8 +521,9 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
int32_t code = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, (STsdbReader**)&pInfo->dataReader,
GET_TASKID(pTaskInfo));
if (code != 0) {
// TODO
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
return NULL;
}
}

View File

@ -2369,6 +2369,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.getEnvFunc = getHLLFuncEnv,
.initFunc = functionSetup,
.processFunc = hllFunction,
.sprocessFunc = hllScalarFunction,
.finalizeFunc = hllFinalize,
.invertFunc = NULL,
.combineFunc = hllCombine,
@ -2437,6 +2438,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.getEnvFunc = getCsumFuncEnv,
.initFunc = functionSetup,
.processFunc = csumFunction,
.sprocessFunc = csumScalarFunction,
.finalizeFunc = NULL
},
{
@ -2463,7 +2465,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "tail",
.type = FUNCTION_TYPE_TAIL,
.classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC |
.classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC |
FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateTail,
.getEnvFunc = getTailFuncEnv,
@ -2474,7 +2476,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "unique",
.type = FUNCTION_TYPE_UNIQUE,
.classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC |
.classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC |
FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateUnique,
.getEnvFunc = getUniqueFuncEnv,

View File

@ -2723,6 +2723,9 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
// please ref. to the comment in lastRowFunction for the reason why disabling the opt version of last/first function.
// we will use this opt implementation in an new version that is only available in scan subplan
#if 0
if (blockDataOrder == TSDB_ORDER_ASC) {
// filter according to current result firstly
if (pResInfo->numOfRes > 0) {
@ -2770,6 +2773,22 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
}
}
}
#else
for (int32_t i = pInput->startRowIndex; i < pInput->startRowIndex + pInput->numOfRows; ++i) {
if (pInputCol->hasNull && colDataIsNull(pInputCol, pInput->totalRows, i, pColAgg)) {
continue;
}
numOfElems++;
char* data = colDataGetData(pInputCol, i);
TSKEY cts = getRowPTs(pInput->pPTS, i);
if (pResInfo->numOfRes == 0 || pInfo->ts > cts) {
doSaveCurrentVal(pCtx, i, cts, pInputCol->info.type, data);
pResInfo->numOfRes = 1;
}
}
#endif
SET_VAL(pResInfo, numOfElems, 1);
return TSDB_CODE_SUCCESS;
@ -2801,6 +2820,8 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
// please ref. to the comment in lastRowFunction for the reason why disabling the opt version of last/first function.
#if 0
if (blockDataOrder == TSDB_ORDER_ASC) {
for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) {
if (pInputCol->hasNull && colDataIsNull(pInputCol, pInput->totalRows, i, pColAgg)) {
@ -2833,6 +2854,22 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
break;
}
}
#else
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
if (pInputCol->hasNull && colDataIsNull(pInputCol, pInput->totalRows, i, pColAgg)) {
continue;
}
numOfElems++;
char* data = colDataGetData(pInputCol, i);
TSKEY cts = getRowPTs(pInput->pPTS, i);
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
doSaveCurrentVal(pCtx, i, cts, type, data);
pResInfo->numOfRes = 1;
}
}
#endif
SET_VAL(pResInfo, numOfElems, 1);
return TSDB_CODE_SUCCESS;
@ -2988,6 +3025,9 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
#if 0
// the optimized version only function if all tuples in one block are monotonious increasing or descreasing.
// this is NOT always works if project operator exists in downstream.
if (blockDataOrder == TSDB_ORDER_ASC) {
for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) {
char* data = colDataGetData(pInputCol, i);
@ -2997,6 +3037,7 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
doSaveLastrow(pCtx, data, i, cts, pInfo);
}
break;
}
} else { // descending order
@ -3011,7 +3052,19 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
break;
}
}
#else
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
char* data = colDataGetData(pInputCol, i);
TSKEY cts = getRowPTs(pInput->pPTS, i);
numOfElems++;
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
doSaveLastrow(pCtx, data, i, cts, pInfo);
pResInfo->numOfRes = 1;
}
}
#endif
SET_VAL(pResInfo, numOfElems, 1);
return TSDB_CODE_SUCCESS;
}
@ -5926,6 +5979,7 @@ int32_t cachedLastRowFunction(SqlFunctionCtx* pCtx) {
TSKEY cts = getRowPTs(pInput->pPTS, i);
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
doSaveLastrow(pCtx, data, i, cts, pInfo);
pResInfo->numOfRes = 1;
}
}

View File

@ -1746,20 +1746,14 @@ int32_t countScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam
SColumnInfoData *pOutputData = pOutput->columnData;
int64_t *out = (int64_t *)pOutputData->pData;
bool hasNull = false;
*out = 0;
for (int32_t i = 0; i < pInput->numOfRows; ++i) {
if (colDataIsNull_s(pInputData, i)) {
hasNull = true;
break;
continue;
}
(*out)++;
}
if (hasNull) {
colDataAppendNULL(pOutputData, 0);
}
pOutput->numOfRows = 1;
return TSDB_CODE_SUCCESS;
}
@ -2421,3 +2415,11 @@ int32_t twaScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *
int32_t mavgScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
return avgScalarFunction(pInput, inputNum, pOutput);
}
int32_t hllScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
return countScalarFunction(pInput, inputNum, pOutput);
}
int32_t csumScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
return sumScalarFunction(pInput, inputNum, pOutput);
}

View File

@ -79,22 +79,39 @@ class TDSql:
self.queryResult = None
tdLog.info("sql:%s, expect error occured" % (sql))
def query(self, sql, row_tag=None):
def query(self, sql, row_tag=None,queyTimes=10):
self.sql = sql
try:
self.cursor.execute(sql)
self.queryResult = self.cursor.fetchall()
self.queryRows = len(self.queryResult)
self.queryCols = len(self.cursor.description)
except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
traceback.print_exc()
raise Exception(repr(e))
if row_tag:
return self.queryResult
return self.queryRows
i=1
while i <= queyTimes:
try:
self.cursor.execute(sql)
self.queryResult = self.cursor.fetchall()
self.queryRows = len(self.queryResult)
self.queryCols = len(self.cursor.description)
if row_tag:
return self.queryResult
return self.queryRows
except Exception as e:
i+=1
tdLog.notice("Try to query again, query times: %d "%i)
pass
else:
try:
tdLog.notice("Try the last query ")
self.cursor.execute(sql)
self.queryResult = self.cursor.fetchall()
self.queryRows = len(self.queryResult)
self.queryCols = len(self.cursor.description)
if row_tag:
return self.queryResult
return self.queryRows
except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
traceback.print_exc()
raise Exception(repr(e))
def is_err_sql(self, sql):
err_flag = True
@ -274,16 +291,27 @@ class TDSql:
time.sleep(1)
continue
def execute(self, sql):
def execute(self, sql,queyTimes=10):
self.sql = sql
try:
self.affectedRows = self.cursor.execute(sql)
except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
raise Exception(repr(e))
return self.affectedRows
i=1
while i <= queyTimes:
try:
self.affectedRows = self.cursor.execute(sql)
return self.affectedRows
except Exception as e:
i+=1
tdLog.notice("Try to execute sql again, query times: %d "%i)
pass
else:
try:
tdLog.notice("Try the last execute sql ")
self.affectedRows = self.cursor.execute(sql)
return self.affectedRows
except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
raise Exception(repr(e))
def checkAffectedRows(self, expectAffectedRows):
if self.affectedRows != expectAffectedRows:

View File

@ -91,21 +91,21 @@ if $rows != $vgroups then
return -1
endi
if $data[0][4] == LEADER then
if $data[0][6] == FOLLOWER then
if $data[0][8] == FOLLOWER then
if $data[0][4] == leader then
if $data[0][6] == follower then
if $data[0][8] == follower then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
endi
endi
elif $data[0][6] == LEADER then
if $data[0][4] == FOLLOWER then
if $data[0][8] == FOLLOWER then
elif $data[0][6] == leader then
if $data[0][4] == follower then
if $data[0][8] == follower then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
endi
endi
elif $data[0][8] == LEADER then
if $data[0][4] == FOLLOWER then
if $data[0][6] == FOLLOWER then
elif $data[0][8] == leader then
if $data[0][4] == follower then
if $data[0][6] == follower then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
endi
endi
@ -113,21 +113,21 @@ else
goto check_vg_ready
endi
if $data[1][4] == LEADER then
if $data[1][6] == FOLLOWER then
if $data[1][8] == FOLLOWER then
if $data[1][4] == leader then
if $data[1][6] == follower then
if $data[1][8] == follower then
print ---- vgroup $data[1][0] leader locate on dnode $data[1][3]
endi
endi
elif $data[1][6] == LEADER then
if $data[1][4] == FOLLOWER then
if $data[1][8] == FOLLOWER then
elif $data[1][6] == leader then
if $data[1][4] == follower then
if $data[1][8] == follower then
print ---- vgroup $data[1][0] leader locate on dnode $data[1][5]
endi
endi
elif $data[1][8] == LEADER then
if $data[1][4] == FOLLOWER then
if $data[1][6] == FOLLOWER then
elif $data[1][8] == leader then
if $data[1][4] == follower then
if $data[1][6] == follower then
print ---- vgroup $data[1][0] leader locate on dnode $data[1][7]
endi
endi
@ -135,21 +135,21 @@ else
goto check_vg_ready
endi
if $data[2][4] == LEADER then
if $data[2][6] == FOLLOWER then
if $data[2][8] == FOLLOWER then
if $data[2][4] == leader then
if $data[2][6] == follower then
if $data[2][8] == follower then
print ---- vgroup $data[2][0] leader locate on dnode $data[2][3]
endi
endi
elif $data[2][6] == LEADER then
if $data[2][4] == FOLLOWER then
if $data[2][8] == FOLLOWER then
elif $data[2][6] == leader then
if $data[2][4] == follower then
if $data[2][8] == follower then
print ---- vgroup $data[2][0] leader locate on dnode $data[2][5]
endi
endi
elif $data[2][8] == LEADER then
if $data[2][4] == FOLLOWER then
if $data[2][6] == FOLLOWER then
elif $data[2][8] == leader then
if $data[2][4] == follower then
if $data[2][6] == follower then
print ---- vgroup $data[2][0] leader locate on dnode $data[2][7]
endi
endi
@ -157,21 +157,21 @@ else
goto check_vg_ready
endi
if $data[3][4] == LEADER then
if $data[3][6] == FOLLOWER then
if $data[3][8] == FOLLOWER then
if $data[3][4] == leader then
if $data[3][6] == follower then
if $data[3][8] == follower then
print ---- vgroup $data[3][0] leader locate on dnode $data[3][3]
endi
endi
elif $data[3][6] == LEADER then
if $data[3][4] == FOLLOWER then
if $data[3][8] == FOLLOWER then
elif $data[3][6] == leader then
if $data[3][4] == follower then
if $data[3][8] == follower then
print ---- vgroup $data[3][0] leader locate on dnode $data[3][5]
endi
endi
elif $data[3][8] == LEADER then
if $data[3][4] == FOLLOWER then
if $data[3][6] == FOLLOWER then
elif $data[3][8] == leader then
if $data[3][4] == follower then
if $data[3][6] == follower then
print ---- vgroup $data[3][0] leader locate on dnode $data[3][7]
endi
endi
@ -179,21 +179,21 @@ else
goto check_vg_ready
endi
if $data[4][4] == LEADER then
if $data[4][6] == FOLLOWER then
if $data[4][8] == FOLLOWER then
if $data[4][4] == leader then
if $data[4][6] == follower then
if $data[4][8] == follower then
print ---- vgroup $data[4][0] leader locate on dnode $data[4][3]
endi
endi
elif $data[4][6] == LEADER then
if $data[4][4] == FOLLOWER then
if $data[4][8] == FOLLOWER then
elif $data[4][6] == leader then
if $data[4][4] == follower then
if $data[4][8] == follower then
print ---- vgroup $data[4][0] leader locate on dnode $data[4][5]
endi
endi
elif $data[4][8] == LEADER then
if $data[4][4] == FOLLOWER then
if $data[4][6] == FOLLOWER then
elif $data[4][8] == leader then
if $data[4][4] == follower then
if $data[4][6] == follower then
print ---- vgroup $data[4][0] leader locate on dnode $data[4][7]
endi
endi
@ -286,13 +286,13 @@ if $data[0][0] != 1 then
return -1
endi
if $data[0][2] != LEADER then
if $data[0][2] != leader then
goto check_mnode_ready_2
endi
if $data[1][2] != FOLLOWER then
if $data[1][2] != follower then
goto check_mnode_ready_2
endi
if $data[2][2] != FOLLOWER then
if $data[2][2] != follower then
goto check_mnode_ready_2
endi
@ -318,21 +318,21 @@ if $rows != $vgroups then
return -1
endi
if $data[0][4] == LEADER then
if $data[0][6] == FOLLOWER then
if $data[0][8] == FOLLOWER then
if $data[0][4] == leader then
if $data[0][6] == follower then
if $data[0][8] == follower then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
endi
endi
elif $data[0][6] == LEADER then
if $data[0][4] == FOLLOWER then
if $data[0][8] == FOLLOWER then
elif $data[0][6] == leader then
if $data[0][4] == follower then
if $data[0][8] == follower then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
endi
endi
elif $data[0][8] == LEADER then
if $data[0][4] == FOLLOWER then
if $data[0][6] == FOLLOWER then
elif $data[0][8] == leader then
if $data[0][4] == follower then
if $data[0][6] == follower then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
endi
endi
@ -340,21 +340,21 @@ else
goto check_vg_ready1
endi
if $data[1][4] == LEADER then
if $data[1][6] == FOLLOWER then
if $data[1][8] == FOLLOWER then
if $data[1][4] == leader then
if $data[1][6] == follower then
if $data[1][8] == follower then
print ---- vgroup $data[1][0] leader locate on dnode $data[1][3]
endi
endi
elif $data[1][6] == LEADER then
if $data[1][4] == FOLLOWER then
if $data[1][8] == FOLLOWER then
elif $data[1][6] == leader then
if $data[1][4] == follower then
if $data[1][8] == follower then
print ---- vgroup $data[1][0] leader locate on dnode $data[1][5]
endi
endi
elif $data[1][8] == LEADER then
if $data[1][4] == FOLLOWER then
if $data[1][6] == FOLLOWER then
elif $data[1][8] == leader then
if $data[1][4] == follower then
if $data[1][6] == follower then
print ---- vgroup $data[1][0] leader locate on dnode $data[1][7]
endi
endi
@ -362,21 +362,21 @@ else
goto check_vg_ready1
endi
if $data[2][4] == LEADER then
if $data[2][6] == FOLLOWER then
if $data[2][8] == FOLLOWER then
if $data[2][4] == leader then
if $data[2][6] == follower then
if $data[2][8] == follower then
print ---- vgroup $data[2][0] leader locate on dnode $data[2][3]
endi
endi
elif $data[2][6] == LEADER then
if $data[2][4] == FOLLOWER then
if $data[2][8] == FOLLOWER then
elif $data[2][6] == leader then
if $data[2][4] == follower then
if $data[2][8] == follower then
print ---- vgroup $data[2][0] leader locate on dnode $data[2][5]
endi
endi
elif $data[2][8] == LEADER then
if $data[2][4] == FOLLOWER then
if $data[2][6] == FOLLOWER then
elif $data[2][8] == leader then
if $data[2][4] == follower then
if $data[2][6] == follower then
print ---- vgroup $data[2][0] leader locate on dnode $data[2][7]
endi
endi
@ -384,21 +384,21 @@ else
goto check_vg_ready1
endi
if $data[3][4] == LEADER then
if $data[3][6] == FOLLOWER then
if $data[3][8] == FOLLOWER then
if $data[3][4] == leader then
if $data[3][6] == follower then
if $data[3][8] == follower then
print ---- vgroup $data[3][0] leader locate on dnode $data[3][3]
endi
endi
elif $data[3][6] == LEADER then
if $data[3][4] == FOLLOWER then
if $data[3][8] == FOLLOWER then
elif $data[3][6] == leader then
if $data[3][4] == follower then
if $data[3][8] == follower then
print ---- vgroup $data[3][0] leader locate on dnode $data[3][5]
endi
endi
elif $data[3][8] == LEADER then
if $data[3][4] == FOLLOWER then
if $data[3][6] == FOLLOWER then
elif $data[3][8] == leader then
if $data[3][4] == follower then
if $data[3][6] == follower then
print ---- vgroup $data[3][0] leader locate on dnode $data[3][7]
endi
endi
@ -406,21 +406,21 @@ else
goto check_vg_ready1
endi
if $data[4][4] == LEADER then
if $data[4][6] == FOLLOWER then
if $data[4][8] == FOLLOWER then
if $data[4][4] == leader then
if $data[4][6] == follower then
if $data[4][8] == follower then
print ---- vgroup $data[4][0] leader locate on dnode $data[4][3]
endi
endi
elif $data[4][6] == LEADER then
if $data[4][4] == FOLLOWER then
if $data[4][8] == FOLLOWER then
elif $data[4][6] == leader then
if $data[4][4] == follower then
if $data[4][8] == follower then
print ---- vgroup $data[4][0] leader locate on dnode $data[4][5]
endi
endi
elif $data[4][8] == LEADER then
if $data[4][4] == FOLLOWER then
if $data[4][6] == FOLLOWER then
elif $data[4][8] == leader then
if $data[4][4] == follower then
if $data[4][6] == follower then
print ---- vgroup $data[4][0] leader locate on dnode $data[4][7]
endi
endi
@ -539,27 +539,27 @@ if $data[0][0] != 1 then
return -1
endi
if $data[0][2] == LEADER then
if $data[1][2] != FOLLOWER then
if $data[0][2] == leader then
if $data[1][2] != follower then
goto check_mnode_ready_3
endi
if $data[2][2] != FOLLOWER then
if $data[2][2] != follower then
goto check_mnode_ready_3
endi
endi
if $data[1][2] == LEADER then
if $data[0][2] != FOLLOWER then
if $data[1][2] == leader then
if $data[0][2] != follower then
goto check_mnode_ready_3
endi
if $data[2][2] != FOLLOWER then
if $data[2][2] != follower then
goto check_mnode_ready_3
endi
endi
if $data[2][2] == LEADER then
if $data[1][2] != FOLLOWER then
if $data[2][2] == leader then
if $data[1][2] != follower then
goto check_mnode_ready_3
endi
if $data[0][2] != FOLLOWER then
if $data[0][2] != follower then
goto check_mnode_ready_3
endi
endi

View File

@ -0,0 +1,63 @@
import taos
import sys
import datetime
import inspect
from util.log import *
from util.sql import *
from util.cases import *
import random
class TDTestCase:
updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "udfDebugFlag": 143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def case1(self):
tdSql.execute("create database if not exists dbms precision 'ms'")
tdSql.execute("create database if not exists dbus precision 'us'")
tdSql.execute("create database if not exists dbns precision 'ns'")
tdSql.execute("create table dbms.ntb (ts timestamp, c1 int, c2 bigint)")
tdSql.execute("create table dbus.ntb (ts timestamp, c1 int, c2 bigint)")
tdSql.execute("create table dbns.ntb (ts timestamp, c1 int, c2 bigint)")
tdSql.execute("insert into dbms.ntb values ('2022-01-01 08:00:00.001', 1, 2)")
tdSql.execute("insert into dbms.ntb values ('2022-01-01 08:00:00.002', 3, 4)")
tdSql.execute("insert into dbus.ntb values ('2022-01-01 08:00:00.000001', 1, 2)")
tdSql.execute("insert into dbus.ntb values ('2022-01-01 08:00:00.000002', 3, 4)")
tdSql.execute("insert into dbns.ntb values ('2022-01-01 08:00:00.000000001', 1, 2)")
tdSql.execute("insert into dbns.ntb values ('2022-01-01 08:00:00.000000002', 3, 4)")
tdSql.query("select count(c1) from dbms.ntb interval(1a)")
tdSql.checkRows(2)
tdSql.query("select count(c1) from dbus.ntb interval(1u)")
tdSql.checkRows(2)
tdSql.query("select count(c1) from dbns.ntb interval(1b)")
tdSql.checkRows(2)
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========start case1 run ...............")
self.case1()
tdLog.printNoPrefix("==========end case1 run ...............")
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -392,6 +392,31 @@ class TDTestCase:
tdSql.query("select c0,c1 from stb11_1 where (c0>1000) union all select c0,c1 from stb11_1 where c0>2000;")
assert unionallQnode==tdSql.queryResult
queryPolicy=1
tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy)
tdSql.query("show local variables;")
for i in range(tdSql.queryRows):
if tdSql.queryResult[i][0] == "queryPolicy" :
if int(tdSql.queryResult[i][1]) == int(queryPolicy):
tdLog.success('alter queryPolicy to %d successfully'%queryPolicy)
else :
tdLog.debug(tdSql.queryResult)
tdLog.exit("alter queryPolicy to %d failed"%queryPolicy)
tdSql.execute("reset query cache")
tdSql.execute("use db1;")
tdSql.query("show dnodes;")
dnodeId=tdSql.getData(0,0)
tdSql.query("select max(c1) from stb10;")
assert maxQnode==tdSql.getData(0,0)
tdSql.query("select min(c1) from stb11;")
assert minQnode==tdSql.getData(0,0)
tdSql.query("select c0,c1 from stb11_1 where (c0>1000) union select c0,c1 from stb11_1 where c0>2000;")
assert unionQnode==tdSql.queryResult
tdSql.query("select c0,c1 from stb11_1 where (c0>1000) union all select c0,c1 from stb11_1 where c0>2000;")
assert unionallQnode==tdSql.queryResult
# test case : queryPolicy = 2
def test_case2(self):
self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 10, 2, 1*10)

View File

@ -40,9 +40,9 @@ class TDTestCase:
f"insert into rct{j} values ( {ts+i*10000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
)
tdSql.execute(
f"insert into dct{j} values ( {ts+i*10000}, {1+i*0.1},{1400+i*15}, {1+i},{1500+i*20}, {150+i*2},{5+i} )"
f"insert into dct{j} values ( {ts+i*10000}, {1+i*0.1},{1400+i*15}, {i},{1500+i*20}, {150+i*2},{5+i} )"
)
tdSql.execute("insert into dct9 (ts,fuel_state) values('2021-07-13 14:06:33.123Z',1.2) ;")
# def check_avg(self ,origin_query , check_query):
# avg_result = tdSql.getResult(origin_query)
# origin_result = tdSql.getResult(check_query)
@ -60,13 +60,15 @@ class TDTestCase:
def tsbsIotQuery(self):
tdSql.execute("use db_tsbs")
# test interval and partition
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
print(tdSql.queryResult)
parRows=tdSql.queryRows
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
# tdSql.checkRows(parRows)
tdSql.checkRows(parRows)
# test insert into
@ -77,18 +79,53 @@ class TDTestCase:
# test paitition interval fill
# tdSql.query("SELECT name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;")
tdSql.query("SELECT name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;")
# # test partition interval limit
# tdSql.query("SELECT ts,model,floor(2*(sum(nzs)/count(nzs)))/floor(2*(sum(nzs)/count(nzs))) AS broken_down FROM (SELECT ts,model, status/status AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model,ts interval(10m) limit 10;")
# test partition interval limit (PRcore-TD-17410)
# tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);")
# tdSql.checkRows(10)
# test partition interval Pseudo time-column
tdSql.query("SELECT count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
# 1 high-load:
# tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;")
# tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
# 2 stationary-trucks
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
# 3 long-driving-sessions
# tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity FROM readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;")
#4 long-daily-sessions
tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity FROM readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60")
# 5. avg-daily-driving-duration
tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
# 6. avg-daily-driving-session
#taosc core dumped
tdSql.execute("create table random_measure2_1 (ts timestamp,ela float, name binary(40))")
tdSql.query("SELECT ts,diff(mv) AS difka FROM (SELECT ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name,ts interval(10m) fill(value,0)) GROUP BY name,ts;")
tdSql.query("SELECT _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)")
# 7. avg-load
tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
# 8. daily-activity
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
#it's already supported:
# last-loc
tdSql.query("")
# test
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdLog.printNoPrefix("==========step1:create database and table,insert data ==============")
self.prepareData()

@ -1 +1 @@
Subproject commit d807c3ffa6f750f7765e102917d1328cadf21c13
Subproject commit bd496f76b64931c66da2f8b0f24143a98a881cde

@ -1 +1 @@
Subproject commit c5fded266d3b10508e38bf3285bb7ecf798bc343
Subproject commit 7a94ffab45f08e16f09b3f430fe75d717054adb6