Merge branch 'develop' into feature/TD-1925_new
This commit is contained in:
commit
b87d3daf3f
|
@ -5833,14 +5833,43 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) {
|
||||
bool tagProjection = false;
|
||||
bool tableCounting = false;
|
||||
|
||||
int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||
int32_t functionId = pExpr->functionId;
|
||||
|
||||
if (functionId == TSDB_FUNC_TAGPRJ) {
|
||||
tagProjection = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (functionId == TSDB_FUNC_COUNT) {
|
||||
assert(pExpr->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX);
|
||||
tableCounting = true;
|
||||
}
|
||||
}
|
||||
|
||||
return (tableCounting && tagProjection)? -1:0;
|
||||
}
|
||||
|
||||
int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||
const char* msg1 = "functions/columns not allowed in group by query";
|
||||
const char* msg2 = "projection query on columns not allowed";
|
||||
const char* msg3 = "group by not allowed on projection query";
|
||||
const char* msg4 = "retrieve tags not compatible with group by or interval query";
|
||||
const char* msg5 = "functions can not be mixed up";
|
||||
|
||||
// only retrieve tags, group by is not supportted
|
||||
if (tscQueryTags(pQueryInfo)) {
|
||||
if (doTagFunctionCheck(pQueryInfo) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||
}
|
||||
|
||||
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || pQueryInfo->interval.interval > 0) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
} else {
|
||||
|
|
|
@ -188,8 +188,10 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
|
|||
tsBufFlush(output2);
|
||||
|
||||
tsBufDestroy(pSupporter1->pTSBuf);
|
||||
pSupporter1->pTSBuf = NULL;
|
||||
tsBufDestroy(pSupporter2->pTSBuf);
|
||||
|
||||
pSupporter2->pTSBuf = NULL;
|
||||
|
||||
TSKEY et = taosGetTimestampUs();
|
||||
tscDebug("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
|
||||
"intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us",
|
||||
|
@ -219,12 +221,9 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) {
|
|||
assert (pSupporter->uid != 0);
|
||||
|
||||
taosGetTmpfilePath("join-", pSupporter->path);
|
||||
pSupporter->f = fopen(pSupporter->path, "w");
|
||||
|
||||
// todo handle error
|
||||
if (pSupporter->f == NULL) {
|
||||
tscError("%p failed to create tmp file:%s, reason:%s", pSql, pSupporter->path, strerror(errno));
|
||||
}
|
||||
// do NOT create file here to reduce crash generated file left issue
|
||||
pSupporter->f = NULL;
|
||||
|
||||
return pSupporter;
|
||||
}
|
||||
|
@ -244,12 +243,19 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) {
|
|||
|
||||
tscFieldInfoClear(&pSupporter->fieldsInfo);
|
||||
|
||||
if (pSupporter->pTSBuf != NULL) {
|
||||
tsBufDestroy(pSupporter->pTSBuf);
|
||||
pSupporter->pTSBuf = NULL;
|
||||
}
|
||||
|
||||
unlink(pSupporter->path);
|
||||
|
||||
if (pSupporter->f != NULL) {
|
||||
fclose(pSupporter->f);
|
||||
unlink(pSupporter->path);
|
||||
pSupporter->f = NULL;
|
||||
}
|
||||
|
||||
|
||||
if (pSupporter->pVgroupTables != NULL) {
|
||||
taosArrayDestroy(pSupporter->pVgroupTables);
|
||||
pSupporter->pVgroupTables = NULL;
|
||||
|
@ -526,6 +532,8 @@ static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
|
|||
tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code));
|
||||
freeJoinSubqueryObj(pSqlObj);
|
||||
}
|
||||
|
||||
tscDestroyJoinSupporter(pSupporter);
|
||||
}
|
||||
|
||||
// update the query time range according to the join results on timestamp
|
||||
|
@ -921,6 +929,22 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
}
|
||||
|
||||
if (numOfRows > 0) { // write the compressed timestamp to disk file
|
||||
if(pSupporter->f == NULL) {
|
||||
pSupporter->f = fopen(pSupporter->path, "w");
|
||||
|
||||
if (pSupporter->f == NULL) {
|
||||
tscError("%p failed to create tmp file:%s, reason:%s", pSql, pSupporter->path, strerror(errno));
|
||||
|
||||
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
||||
|
||||
quitAllSubquery(pParentSql, pSupporter);
|
||||
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
fwrite(pRes->data, (size_t)pRes->numOfRows, 1, pSupporter->f);
|
||||
fclose(pSupporter->f);
|
||||
pSupporter->f = NULL;
|
||||
|
@ -930,6 +954,9 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
tscError("%p invalid ts comp file from vnode, abort subquery, file size:%d", pSql, numOfRows);
|
||||
|
||||
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
||||
|
||||
quitAllSubquery(pParentSql, pSupporter);
|
||||
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
|
||||
return;
|
||||
|
|
|
@ -1268,12 +1268,14 @@ static void min_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
|||
}
|
||||
|
||||
static void stddev_function(SQLFunctionCtx *pCtx) {
|
||||
// the second stage to calculate standard deviation
|
||||
SStddevInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
|
||||
|
||||
if (pStd->stage == 0) { // the first stage is to calculate average value
|
||||
if (pStd->stage == 0) {
|
||||
// the first stage is to calculate average value
|
||||
avg_function(pCtx);
|
||||
} else {
|
||||
} else if (pStd->num > 0) {
|
||||
// the second stage to calculate standard deviation
|
||||
// if pStd->num == 0, there are no numbers in the first round check. No need to do the second round
|
||||
double *retVal = &pStd->res;
|
||||
double avg = pStd->avg;
|
||||
|
||||
|
|
|
@ -548,7 +548,7 @@ static STimeWindow getActiveTimeWindow(SResultRowInfo *pWindowResInfo, int64_t t
|
|||
if (pWindowResInfo->curIndex == -1) { // the first window, from the previous stored value
|
||||
w.skey = pWindowResInfo->prevSKey;
|
||||
if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
|
||||
w.ekey = taosTimeAdd(w.skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision);
|
||||
w.ekey = taosTimeAdd(w.skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1;
|
||||
} else {
|
||||
w.ekey = w.skey + pQuery->interval.interval - 1;
|
||||
}
|
||||
|
|
|
@ -477,8 +477,6 @@ void taosHashEmpty(SHashObj *pHashObj) {
|
|||
return;
|
||||
}
|
||||
|
||||
uDebug("hash:%p cleanup hash table", pHashObj);
|
||||
|
||||
SHashNode *pNode, *pNext;
|
||||
|
||||
__wr_lock(&pHashObj->lock, pHashObj->type);
|
||||
|
|
|
@ -37,6 +37,7 @@ if $data02 != 0 then
|
|||
endi
|
||||
print data03 = $data03
|
||||
if $data03 != 0.00000 then
|
||||
print expect 0.00000, actual: $data03
|
||||
return -1
|
||||
endi
|
||||
if $data04 != 0.000000000 then
|
||||
|
|
|
@ -361,3 +361,15 @@ endi
|
|||
if $data10 != @20-07-30 17:43:59.000@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =================>td-2610
|
||||
sql select stddev(k) from tm2 where ts='2020-12-29 18:46:19.109'
|
||||
if $rows != 0 then
|
||||
print expect 0, actual:$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select twa(k) from tm2 where ts='2020-12-29 18:46:19.109'
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
|
|
@ -606,6 +606,10 @@ sql insert into t1 values ('2020-03-27 04:21:16.000', 1)('2020-03-27 04:31:17.00
|
|||
sql insert into t2 values ('2020-03-27 04:11:16.000', 1)('2020-03-27 04:11:17.000', 2) ('2020-03-27 04:11:18.000', 3) ('2020-03-27 04:11:19.000', 4) ;
|
||||
sql insert into t2 values ('2020-03-27 04:21:16.000', 1)('2020-03-27 04:31:17.000', 2) ('2020-03-27 04:51:18.000', 3) ('2020-03-27 05:10:19.000', 4) ;
|
||||
|
||||
print =================>TD-2665
|
||||
sql_error create table txx as select avg(c) as t from st;
|
||||
sql_error create table txx1 as select avg(c) as t from t1;
|
||||
|
||||
print =================>TD-2236
|
||||
sql select first(ts),last(ts) from t1 group by c;
|
||||
if $rows != 4 then
|
||||
|
|
|
@ -1,50 +1,50 @@
|
|||
run general/parser/alter.sim
|
||||
sleep 500
|
||||
run general/parser/alter1.sim
|
||||
sleep 500
|
||||
run general/parser/alter_stable.sim
|
||||
sleep 500
|
||||
run general/parser/auto_create_tb.sim
|
||||
sleep 500
|
||||
run general/parser/auto_create_tb_drop_tb.sim
|
||||
sleep 500
|
||||
run general/parser/col_arithmetic_operation.sim
|
||||
sleep 500
|
||||
run general/parser/columnValue.sim
|
||||
sleep 500
|
||||
run general/parser/commit.sim
|
||||
sleep 500
|
||||
run general/parser/create_db.sim
|
||||
sleep 500
|
||||
run general/parser/create_mt.sim
|
||||
sleep 500
|
||||
run general/parser/create_tb.sim
|
||||
sleep 500
|
||||
run general/parser/dbtbnameValidate.sim
|
||||
sleep 500
|
||||
run general/parser/fill.sim
|
||||
sleep 500
|
||||
run general/parser/fill_stb.sim
|
||||
sleep 500
|
||||
#run general/parser/fill_us.sim #
|
||||
sleep 500
|
||||
run general/parser/first_last.sim
|
||||
sleep 500
|
||||
run general/parser/import_commit1.sim
|
||||
sleep 500
|
||||
run general/parser/import_commit2.sim
|
||||
sleep 500
|
||||
run general/parser/import_commit3.sim
|
||||
sleep 500
|
||||
#run general/parser/import_file.sim
|
||||
sleep 500
|
||||
run general/parser/insert_tb.sim
|
||||
sleep 500
|
||||
run general/parser/tags_dynamically_specifiy.sim
|
||||
sleep 500
|
||||
run general/parser/interp.sim
|
||||
sleep 500
|
||||
run general/parser/lastrow.sim
|
||||
#run general/parser/alter.sim
|
||||
#sleep 500
|
||||
#run general/parser/alter1.sim
|
||||
#sleep 500
|
||||
#run general/parser/alter_stable.sim
|
||||
#sleep 500
|
||||
#run general/parser/auto_create_tb.sim
|
||||
#sleep 500
|
||||
#run general/parser/auto_create_tb_drop_tb.sim
|
||||
#sleep 500
|
||||
#run general/parser/col_arithmetic_operation.sim
|
||||
#sleep 500
|
||||
#run general/parser/columnValue.sim
|
||||
#sleep 500
|
||||
#run general/parser/commit.sim
|
||||
#sleep 500
|
||||
#run general/parser/create_db.sim
|
||||
#sleep 500
|
||||
#run general/parser/create_mt.sim
|
||||
#sleep 500
|
||||
#run general/parser/create_tb.sim
|
||||
#sleep 500
|
||||
#run general/parser/dbtbnameValidate.sim
|
||||
#sleep 500
|
||||
#run general/parser/fill.sim
|
||||
#sleep 500
|
||||
#run general/parser/fill_stb.sim
|
||||
#sleep 500
|
||||
##run general/parser/fill_us.sim #
|
||||
#sleep 500
|
||||
#run general/parser/first_last.sim
|
||||
#sleep 500
|
||||
#run general/parser/import_commit1.sim
|
||||
#sleep 500
|
||||
#run general/parser/import_commit2.sim
|
||||
#sleep 500
|
||||
#run general/parser/import_commit3.sim
|
||||
#sleep 500
|
||||
##run general/parser/import_file.sim
|
||||
#sleep 500
|
||||
#run general/parser/insert_tb.sim
|
||||
#sleep 500
|
||||
#run general/parser/tags_dynamically_specifiy.sim
|
||||
#sleep 500
|
||||
#run general/parser/interp.sim
|
||||
#sleep 500
|
||||
#run general/parser/lastrow.sim
|
||||
sleep 500
|
||||
run general/parser/limit.sim
|
||||
sleep 500
|
||||
|
|
Loading…
Reference in New Issue