Merge branch 'main' into fix/liaohj

This commit is contained in:
Haojun Liao 2023-09-28 14:35:14 +08:00
commit 0047d1e1a6
26 changed files with 337 additions and 67 deletions

View File

@ -29,7 +29,7 @@
extern "C" {
#endif
#define AUDIT_DETAIL_MAX 64000
#define AUDIT_DETAIL_MAX 65472
typedef struct {
const char *server;

View File

@ -165,6 +165,13 @@ static FORCE_INLINE int32_t tarray2SortInsert(void *arr, const void *elePtr, int
#define TARRAY2_FOREACH_PTR_REVERSE(a, ep) \
for (int32_t __i = (a)->size - 1; __i >= 0 && ((ep) = &(a)->data[__i], 1); __i--)
#define TARRAY2_SORT(a, cmp) \
do { \
if ((a)->size > 1) { \
taosSort((a)->data, (a)->size, sizeof((a)->data[0]), (__compar_fn_t)cmp); \
} \
} while (0)
#ifdef __cplusplus
}
#endif

View File

@ -33,14 +33,17 @@ adapterName="taosadapter"
benchmarkName="taosBenchmark"
dumpName="taosdump"
demoName="taosdemo"
xname="taosx"
clientName2="taos"
serverName2="${clientName2}d"
configFile2="${clientName2}.cfg"
productName2="TDengine"
emailName2="taosdata.com"
xname2="${clientName2}x"
adapterName2="${clientName2}adapter"
explorerName="${clientName2}-explorer"
benchmarkName2="${clientName2}Benchmark"
demoName2="${clientName2}demo"
dumpName2="${clientName2}dump"
@ -235,6 +238,12 @@ function install_bin() {
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
${csudo}rm -f ${bin_link_dir}/${xname2} || :
${csudo}rm -f ${bin_link_dir}/${explorerName} || :
#Make link
[ -x ${install_main_dir}/bin/${xname2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${xname2} ${bin_link_dir}/${xname2} || :
[ -x ${install_main_dir}/bin/${explorerName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${explorerName} ${bin_link_dir}/${explorerName} || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript2} || :
fi
}
@ -693,9 +702,29 @@ function clean_service_on_systemd() {
fi
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
${csudo}rm -f ${tarbitratord_service_config}
# if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
# ${csudo}rm -f ${service_config_dir}/${serverName2}.service
# fi
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
x_service_config="${service_config_dir}/${xName2}.service"
if [ -e "$x_service_config" ]; then
if systemctl is-active --quiet ${xName2}; then
echo "${productName2} ${xName2} is running, stopping it..."
${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${x_service_config}
fi
explorer_service_config="${service_config_dir}/${explorerName2}.service"
if [ -e "$explorer_service_config" ]; then
if systemctl is-active --quiet ${explorerName2}; then
echo "${productName2} ${explorerName2} is running, stopping it..."
${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${explorer_service_config}
${csudo}rm -f /etc/${clientName2}/explorer.toml
fi
fi
}
function install_service_on_systemd() {

View File

@ -123,10 +123,11 @@ function clean_bin() {
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
${csudo}rm -f ${bin_link_dir}/${keeperName2} || :
# ${csudo}rm -f ${bin_link_dir}/${xName2} || :
# ${csudo}rm -f ${bin_link_dir}/${explorerName2} || :
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
${csudo}rm -f ${bin_link_dir}/${xName2} || :
${csudo}rm -f ${bin_link_dir}/${explorerName2} || :
${csudo}rm -f ${bin_link_dir}/${clientName2} || :
${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || :
${csudo}rm -f ${bin_link_dir}/${dumpName2} || :
@ -195,26 +196,28 @@ function clean_service_on_systemd() {
fi
${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null
# x_service_config="${service_config_dir}/${xName2}.service"
# if [ -e "$x_service_config" ]; then
# if systemctl is-active --quiet ${xName2}; then
# echo "${productName2} ${xName2} is running, stopping it..."
# ${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
# fi
# ${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
# ${csudo}rm -f ${x_service_config}
# fi
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
x_service_config="${service_config_dir}/${xName2}.service"
if [ -e "$x_service_config" ]; then
if systemctl is-active --quiet ${xName2}; then
echo "${productName2} ${xName2} is running, stopping it..."
${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${x_service_config}
fi
# explorer_service_config="${service_config_dir}/${explorerName2}.service"
# if [ -e "$explorer_service_config" ]; then
# if systemctl is-active --quiet ${explorerName2}; then
# echo "${productName2} ${explorerName2} is running, stopping it..."
# ${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
# fi
# ${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
# ${csudo}rm -f ${explorer_service_config}
# ${csudo}rm -f /etc/${clientName2}/explorer.toml
# fi
explorer_service_config="${service_config_dir}/${explorerName2}.service"
if [ -e "$explorer_service_config" ]; then
if systemctl is-active --quiet ${explorerName2}; then
echo "${productName2} ${explorerName2} is running, stopping it..."
${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${explorer_service_config}
${csudo}rm -f /etc/${clientName2}/explorer.toml
fi
fi
}
function clean_service_on_sysvinit() {

View File

@ -2265,7 +2265,7 @@ static int32_t mndProcessAlterStbReq(SRpcMsg *pReq) {
SName name = {0};
tNameFromString(&name, alterReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
auditRecord(pReq, pMnode->clusterId, "alterStb", name.dbname, alterReq.name, alterReq.sql, alterReq.sqlLen);
auditRecord(pReq, pMnode->clusterId, "alterStb", name.dbname, name.tname, alterReq.sql, alterReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {

View File

@ -236,6 +236,7 @@ static int32_t load_fs(STsdb *pTsdb, const char *fname, TFileSetArray *arr) {
code = TARRAY2_APPEND(arr, fset);
TSDB_CHECK_CODE(code, lino, _exit);
}
TARRAY2_SORT(arr, tsdbTFileSetCmprFn);
} else {
code = TSDB_CODE_FILE_CORRUPTED;
TSDB_CHECK_CODE(code, lino, _exit);

View File

@ -189,6 +189,7 @@ static int32_t tsdbJsonToSttLvl(STsdb *pTsdb, const cJSON *json, SSttLvl **lvl)
code = TARRAY2_APPEND(lvl[0]->fobjArr, fobj);
if (code) return code;
}
TARRAY2_SORT(lvl[0]->fobjArr, tsdbTFileObjCmpr);
return 0;
}
@ -268,6 +269,7 @@ int32_t tsdbJsonToTFileSet(STsdb *pTsdb, const cJSON *json, STFileSet **fset) {
code = TARRAY2_APPEND((*fset)->lvlArr, lvl);
if (code) return code;
}
TARRAY2_SORT((*fset)->lvlArr, tsdbSttLvlCmprFn);
} else {
return TSDB_CODE_FILE_CORRUPTED;
}

View File

@ -447,6 +447,9 @@ int32_t tsdbMerge(void *arg) {
_exit:
if (code) {
TSDB_ERROR_LOG(TD_VID(tsdb->pVnode), lino, code);
tsdbFatal("vgId:%d, failed to merge stt files since %s. code:%d", TD_VID(tsdb->pVnode), terrstr(), code);
taosMsleep(100);
exit(EXIT_FAILURE);
} else if (merger->ctx->opened) {
tsdbDebug("vgId:%d %s done", TD_VID(tsdb->pVnode), __func__);
}

View File

@ -4899,6 +4899,7 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
// alloc
STsdbReadSnap* pSnap = (STsdbReadSnap*)taosMemoryCalloc(1, sizeof(STsdbReadSnap));
if (pSnap == NULL) {
taosThreadRwlockUnlock(&pTsdb->rwLock);
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
@ -4908,6 +4909,7 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
pSnap->pMem = pTsdb->mem;
pSnap->pNode = taosMemoryMalloc(sizeof(*pSnap->pNode));
if (pSnap->pNode == NULL) {
taosThreadRwlockUnlock(&pTsdb->rwLock);
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
@ -4922,6 +4924,7 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
pSnap->pIMem = pTsdb->imem;
pSnap->pINode = taosMemoryMalloc(sizeof(*pSnap->pINode));
if (pSnap->pINode == NULL) {
taosThreadRwlockUnlock(&pTsdb->rwLock);
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
@ -4932,6 +4935,9 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
tsdbRefMemTable(pTsdb->imem, pSnap->pINode);
}
// unlock
taosThreadRwlockUnlock(&pTsdb->rwLock);
// fs
code = tsdbFSCreateRefSnapshot(pTsdb->pFS, &pSnap->pfSetArray);
if (code == TSDB_CODE_SUCCESS) {
@ -4939,8 +4945,6 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
}
_exit:
taosThreadRwlockUnlock(&pTsdb->rwLock);
if (code != TSDB_CODE_SUCCESS) {
tsdbError("vgId:%d take read snapshot failed, code:%s", TD_VID(pTsdb->pVnode), tstrerror(code));

View File

@ -165,7 +165,7 @@ int32_t ctgRefreshTbMeta(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetaCtx*
}
if (CTG_IS_META_NULL(output->metaType)) {
ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(ctx->pName));
ctgError("no tbmeta got, tbName:%s", tNameGetTableName(ctx->pName));
ctgRemoveTbMetaFromCache(pCtg, ctx->pName, false);
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
}

View File

@ -1171,7 +1171,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
if (CTG_IS_META_NULL(pOut->metaType)) {
ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName));
ctgError("no tbmeta got, tbName:%s", tNameGetTableName(pName));
ctgRemoveTbMetaFromCache(pCtg, pName, false);
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
}
@ -1341,7 +1341,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
if (CTG_IS_META_NULL(pOut->metaType)) {
ctgTaskError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName));
ctgTaskError("no tbmeta got, tbName:%s", tNameGetTableName(pName));
ctgRemoveTbMetaFromCache(pCtg, pName, false);
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
}

View File

@ -334,6 +334,7 @@ static int32_t createDataBlockForEmptyInput(SOperatorInfo* pOperator, SSDataBloc
colInfo.info.type = TSDB_DATA_TYPE_NULL;
colInfo.info.bytes = 1;
SExprInfo* pOneExpr = &pOperator->exprSupp.pExprInfo[i];
for (int32_t j = 0; j < pOneExpr->base.numOfParams; ++j) {
SFunctParam* pFuncParam = &pOneExpr->base.pParam[j];
@ -353,6 +354,10 @@ static int32_t createDataBlockForEmptyInput(SOperatorInfo* pOperator, SSDataBloc
}
blockDataEnsureCapacity(pBlock, pBlock->info.rows);
for (int32_t i = 0; i < blockDataGetNumOfCols(pBlock); ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
colDataSetNULL(pColInfoData, 0);
}
*ppBlock = pBlock;
return TSDB_CODE_SUCCESS;

View File

@ -2217,7 +2217,9 @@ FETCH_NEXT_BLOCK:
if (pSDB) {
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
if (!pInfo->igCheckUpdate && pInfo->pUpdateInfo) {
checkUpdateData(pInfo, true, pSDB, false);
}
printSpecDataBlock(pSDB, getStreamOpName(pOperator->operatorType), "update", GET_TASKID(pTaskInfo));
calBlockTbName(pInfo, pSDB);
return pSDB;

View File

@ -651,7 +651,7 @@ static int32_t translateApercentileImpl(SFunctionNode* pFunc, char* pErrBuf, int
(SDataType){.bytes = getApercentileMaxSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY};
} else {
// original percent param is reserved
if (2 != numOfParams) {
if (3 != numOfParams && 2 != numOfParams) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
@ -660,6 +660,19 @@ static int32_t translateApercentileImpl(SFunctionNode* pFunc, char* pErrBuf, int
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
if (3 == numOfParams) {
uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type;
if (!IS_STR_DATA_TYPE(para3Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
SNode* pParamNode2 = nodesListGetNode(pFunc->pParameterList, 2);
if (QUERY_NODE_VALUE != nodeType(pParamNode2) || !validateApercentileAlgo((SValueNode*)pParamNode2)) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"Third parameter algorithm of apercentile must be 'default' or 't-digest'");
}
}
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
}
@ -744,7 +757,11 @@ int32_t topBotCreateMergeParam(SNodeList* pRawParameters, SNode* pPartialRes, SN
}
int32_t apercentileCreateMergeParam(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters) {
return reserveFirstMergeParam(pRawParameters, pPartialRes, pParameters);
int32_t code = reserveFirstMergeParam(pRawParameters, pPartialRes, pParameters);
if (TSDB_CODE_SUCCESS == code && pRawParameters->length >= 3) {
code = nodesListStrictAppend(*pParameters, nodesCloneNode(nodesListGetNode(pRawParameters, 2)));
}
return code;
}
static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {

View File

@ -1904,7 +1904,7 @@ int32_t apercentileFunction(SqlFunctionCtx* pCtx) {
return TSDB_CODE_SUCCESS;
}
static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo* pOutput) {
static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo* pOutput, bool* hasRes) {
pOutput->percent = pInput->percent;
pOutput->algo = pInput->algo;
if (pOutput->algo == APERCT_ALGO_TDIGEST) {
@ -1915,6 +1915,10 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo*
return;
}
if (hasRes) {
*hasRes = true;
}
buildTDigestInfo(pOutput);
TDigest* pTDigest = pOutput->pTDigest;
tdigestAutoFill(pTDigest, COMPRESSION);
@ -1931,6 +1935,10 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo*
return;
}
if (hasRes) {
*hasRes = true;
}
buildHistogramInfo(pOutput);
SHistogramInfo* pHisto = pOutput->pHisto;
@ -1970,12 +1978,13 @@ int32_t apercentileFunctionMerge(SqlFunctionCtx* pCtx) {
qDebug("%s total %" PRId64 " rows will merge, %p", __FUNCTION__, pInput->numOfRows, pInfo->pHisto);
bool hasRes = false;
int32_t start = pInput->startRowIndex;
for (int32_t i = start; i < start + pInput->numOfRows; ++i) {
char* data = colDataGetData(pCol, i);
SAPercentileInfo* pInputInfo = (SAPercentileInfo*)varDataVal(data);
apercentileTransferInfo(pInputInfo, pInfo);
apercentileTransferInfo(pInputInfo, pInfo, &hasRes);
}
if (pInfo->algo != APERCT_ALGO_TDIGEST) {
@ -1984,7 +1993,7 @@ int32_t apercentileFunctionMerge(SqlFunctionCtx* pCtx) {
pInfo->pHisto->numOfEntries, pInfo->pHisto);
}
SET_VAL(pResInfo, 1, 1);
SET_VAL(pResInfo, hasRes ? 1 : 0, 1);
return TSDB_CODE_SUCCESS;
}
@ -2056,7 +2065,7 @@ int32_t apercentileCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx)
qDebug("%s start to combine apercentile, %p", __FUNCTION__, pDBuf->pHisto);
apercentileTransferInfo(pSBuf, pDBuf);
apercentileTransferInfo(pSBuf, pDBuf, NULL);
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
pDResInfo->isNullRes &= pSResInfo->isNullRes;
return TSDB_CODE_SUCCESS;

View File

@ -88,6 +88,7 @@ static EDealRes doRewriteExpr(SNode** pNode, void* pContext) {
pCxt->pOutputs[index] = true;
break;
}
index++;
}
}
break;
@ -174,6 +175,7 @@ static int32_t cloneRewriteExprs(SNodeList* pExprs, bool* pOutputs, SNodeList**
break;
}
}
index++;
}
return code;
}

View File

@ -3983,18 +3983,15 @@ _return:
return code;
}
static int32_t fltSclGetDatumValueFromPoint(SFltSclPoint *point, SFltSclDatum *d) {
static int32_t fltSclGetTimeStampDatum(SFltSclPoint *point, SFltSclDatum *d) {
*d = point->val;
if (point->val.kind == FLT_SCL_DATUM_KIND_NULL) {
return TSDB_CODE_SUCCESS;
}
if (point->val.kind == FLT_SCL_DATUM_KIND_MAX) {
getDataMax(d->type.type, &(d->i));
} else if (point->val.kind == FLT_SCL_DATUM_KIND_MIN) {
getDataMin(d->type.type, &(d->i));
}
d->kind = FLT_SCL_DATUM_KIND_INT64;
if (IS_INTEGER_TYPE(d->type.type) || IS_TIMESTAMP_TYPE(d->type.type)) {
if (point->val.kind == FLT_SCL_DATUM_KIND_MAX) {
getDataMax(point->val.type.type, &(d->i));
} else if (point->val.kind == FLT_SCL_DATUM_KIND_MIN) {
getDataMin(point->val.type.type, &(d->i));
} else if (point->val.kind == FLT_SCL_DATUM_KIND_INT64) {
if (point->excl) {
if (point->start) {
++d->i;
@ -4002,6 +3999,28 @@ static int32_t fltSclGetDatumValueFromPoint(SFltSclPoint *point, SFltSclDatum *d
--d->i;
}
}
} else if (point->val.kind == FLT_SCL_DATUM_KIND_FLOAT64) {
double v = d->d;
if (point->excl) {
if (point->start) {
d->i = v + 1;
} else {
d->i = v - 1;
}
} else {
d->i = v;
}
} else if (point->val.kind == FLT_SCL_DATUM_KIND_UINT64) {
uint64_t v = d->u;
if (point->excl) {
if (point->start) {
d->i = v + 1;
} else {
d->i = v - 1;
}
} else {
d->i = v;
}
} else {
qError("not supported type %d when get datum from point", d->type.type);
}
@ -4022,12 +4041,13 @@ int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict) {
SFltSclColumnRange *colRange = taosArrayGet(colRanges, 0);
SArray *points = colRange->points;
if (taosArrayGetSize(points) == 2) {
*win = TSWINDOW_DESC_INITIALIZER;
SFltSclPoint *startPt = taosArrayGet(points, 0);
SFltSclPoint *endPt = taosArrayGet(points, 1);
SFltSclDatum start;
SFltSclDatum end;
fltSclGetDatumValueFromPoint(startPt, &start);
fltSclGetDatumValueFromPoint(endPt, &end);
fltSclGetTimeStampDatum(startPt, &start);
fltSclGetTimeStampDatum(endPt, &end);
win->skey = start.i;
win->ekey = end.i;
*isStrict = true;

View File

@ -576,6 +576,7 @@ void* destroyConnPool(SCliThrd* pThrd) {
connList = taosHashIterate((SHashObj*)pool, connList);
}
taosHashCleanup(pool);
pThrd->pool = NULL;
return NULL;
}
@ -870,9 +871,11 @@ static void cliDestroyConn(SCliConn* conn, bool clear) {
connList->list->numOfConn--;
connList->size--;
} else {
if (pThrd->pool) {
SConnList* connList = taosHashGet((SHashObj*)pThrd->pool, conn->dstAddr, strlen(conn->dstAddr) + 1);
if (connList != NULL) connList->list->numOfConn--;
}
}
conn->list = NULL;
pThrd->newConnCount--;

View File

@ -798,13 +798,12 @@ void taosGetProcIODelta(int64_t *rchars, int64_t *wchars, int64_t *read_bytes, i
}
int32_t taosGetCardInfo(int64_t *receive_bytes, int64_t *transmit_bytes) {
#ifdef WINDOWS
*receive_bytes = 0;
*transmit_bytes = 0;
#ifdef WINDOWS
return 0;
#elif defined(_TD_DARWIN_64)
*receive_bytes = 0;
*transmit_bytes = 0;
return 0;
#else
TdFilePtr pFile = taosOpenFile(tsSysNetFile, TD_FILE_READ | TD_FILE_STREAM);
@ -841,8 +840,8 @@ int32_t taosGetCardInfo(int64_t *receive_bytes, int64_t *transmit_bytes) {
"%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64
" %" PRId64,
nouse0, &o_rbytes, &rpackts, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &o_tbytes, &tpackets);
*receive_bytes = o_rbytes;
*transmit_bytes = o_tbytes;
*receive_bytes += o_rbytes;
*transmit_bytes += o_tbytes;
}
taosCloseFile(&pFile);
@ -854,8 +853,8 @@ int32_t taosGetCardInfo(int64_t *receive_bytes, int64_t *transmit_bytes) {
void taosGetCardInfoDelta(int64_t *receive_bytes, int64_t *transmit_bytes) {
static int64_t last_receive_bytes = 0;
static int64_t last_transmit_bytes = 0;
static int64_t cur_receive_bytes = 0;
static int64_t cur_transmit_bytes = 0;
int64_t cur_receive_bytes = 0;
int64_t cur_transmit_bytes = 0;
if (taosGetCardInfo(&cur_receive_bytes, &cur_transmit_bytes) == 0) {
*receive_bytes = cur_receive_bytes - last_receive_bytes;
*transmit_bytes = cur_transmit_bytes - last_transmit_bytes;

View File

@ -0,0 +1,86 @@
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import tdDnodes
from math import inf
class TDTestCase:
def caseDescription(self):
'''
case1<shenglian zhou>: [TS-4088] timestamp range support operator
'''
return
def init(self, conn, logSql, replicaVer=1):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), True)
self._conn = conn
def restartTaosd(self, index=1, dbname="db"):
tdDnodes.stop(index)
tdDnodes.startWithoutSleep(index)
tdSql.execute(f"use ts_range")
def run(self):
print("running {}".format(__file__))
tdSql.execute("drop database if exists ts_range")
tdSql.execute("create database if not exists ts_range")
tdSql.execute('use ts_range')
tdSql.execute('create table stb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);')
tdSql.execute("create table tb1 using stb1 tags(1,'1',1.0);")
tdSql.execute("create table tb2 using stb1 tags(2,'2',2.0);")
tdSql.execute("create table tb3 using stb1 tags(3,'3',3.0);")
tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"123","1234",1,1,1,1);')
tdSql.execute("insert into tb1 values ('2021-11-11 09:00:01',true,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);")
tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:02\',true,2,NULL,2,NULL,2,NULL,"234",NULL,2,NULL,2,NULL);')
tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:03\',false,NULL,3,NULL,3,NULL,3,NULL,"3456",NULL,3,NULL,3);')
tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:04\',true,4,4,4,4,4,4,"456","4567",4,4,4,4);')
tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:05\',true,127,32767,2147483647,9223372036854775807,3.402823466e+38,1.79769e+308,"567","5678",254,65534,4294967294,9223372036854775807);')
tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:06\',true,-127,-32767,-2147483647,-9223372036854775807,-3.402823466e+38,-1.79769e+308,"678","6789",0,0,0,0);')
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"111","1111",1,1,1,1);')
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:01\',true,2,2,2,2,2,2,"222","2222",2,2,2,2);')
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:02\',true,3,3,2,3,3,3,"333","3333",3,3,3,3);')
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:03\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);')
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:04\',true,5,5,5,5,5,5,"555","5555",5,5,5,5);')
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:05\',true,6,6,6,6,6,6,"666","6666",6,6,6,6);')
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:06\',true,7,7,7,7,7,7,"777","7777",7,7,7,7);')
tdSql.query('select count(*) from stb1 where ts < 1000000000000 + 10s')
tdSql.checkRows(1)
tdSql.checkData(0, 0, 0)
tdSql.query('select count(*) from stb1 where ts >= 1000000000000 + 10s')
tdSql.checkRows(1)
tdSql.checkData(0, 0, 14)
tdSql.query('select count(*) from stb1 where ts > 1000000000000 - 10s and ts <= 1000000000000 + 10s')
tdSql.checkRows(1)
tdSql.checkData(0, 0, 0)
tdSql.query('select count(*) from stb1 where ts > 1636592400000 + 3s');
tdSql.checkData(0, 0, 6)
#tdSql.execute('drop database ts_range')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -993,6 +993,7 @@
,,y,script,./test.sh -f tsim/query/udf_with_const.sim
,,y,script,./test.sh -f tsim/query/join_interval.sim
,,y,script,./test.sh -f tsim/query/join_pk.sim
,,y,script,./test.sh -f tsim/query/count_spread.sim
,,y,script,./test.sh -f tsim/query/unionall_as_table.sim
,,y,script,./test.sh -f tsim/query/multi_order_by.sim
,,y,script,./test.sh -f tsim/query/sys_tbname.sim
@ -1008,6 +1009,7 @@
,,y,script,./test.sh -f tsim/query/nullColSma.sim
,,y,script,./test.sh -f tsim/query/bug3398.sim
,,y,script,./test.sh -f tsim/query/explain_tsorder.sim
,,y,script,./test.sh -f tsim/query/apercentile.sim
,,y,script,./test.sh -f tsim/qnode/basic1.sim
,,y,script,./test.sh -f tsim/snode/basic1.sim
,,y,script,./test.sh -f tsim/mnode/basic1.sim
@ -1243,6 +1245,7 @@
#develop test
,,n,develop-test,python3 ./test.py -f 2-query/table_count_scan.py
,,n,develop-test,python3 ./test.py -f 2-query/ts-range.py
,,n,develop-test,python3 ./test.py -f 2-query/show_create_db.py
,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/auto_create_table_json.py
,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/custom_col_tag.py

View File

@ -78,7 +78,7 @@ class TDSql:
self.cursor.execute(s)
time.sleep(2)
def error(self, sql, expectedErrno = None):
def error(self, sql, expectedErrno = None, expectErrInfo = None):
caller = inspect.getframeinfo(inspect.stack()[1][0])
expectErrNotOccured = True
@ -87,12 +87,9 @@ class TDSql:
except BaseException as e:
expectErrNotOccured = False
self.errno = e.errno
self.error_info = repr(e)
# print(error_info)
# self.error_info = error_info[error_info.index('(')+1:-1].split(",")[0].replace("'","")
error_info = repr(e)
self.error_info = error_info[error_info.index('(')+1:-1].split(",")[0].replace("'","")
# self.error_info = (','.join(error_info.split(",")[:-1]).split("(",1)[1:][0]).replace("'","")
# print("!!!!!!!!!!!!!!",self.error_info)
if expectErrNotOccured:
tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql))
else:
@ -108,8 +105,15 @@ class TDSql:
else:
tdLog.info("sql:%s, expect error occured" % (sql))
return self.error_info
if expectErrInfo != None:
if expectErrInfo == self.error_info:
tdLog.info("sql:%s, expected expectErrInfo %s occured" % (sql, expectErrInfo))
else:
tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
else:
tdLog.info("sql:%s, expect error occured" % (sql))
return self.error_info
def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None):
self.sql = sql

View File

@ -0,0 +1,36 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
sql drop database if exists test2;
sql create database test2;
sql use test2;
sql create table s(ts timestamp,v double) tags(id nchar(16));
sql create table t using s tags('11') ;
sql insert into t values(now,null);
sql select APERCENTILE(v,50,'t-digest') as k from s where ts > now-1d and ts < now interval(1h);
if $rows != 1 then
return -1
endi
if $data00 != NULL then
return -1
endi
sql select APERCENTILE(v,50) as k from s where ts > now-1d and ts < now interval(1h);
if $rows != 1 then
return -1
endi
if $data00 != NULL then
return -1
endi
sql select APERCENTILE(v,50) as k from s where ts > now-1d and ts < now interval(1h);
if $rows != 1 then
return -1
endi
if $data00 != NULL then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -0,0 +1,24 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
sql create database test;
sql use test;
sql create table st(ts timestamp, f int) tags(t int);
sql insert into ct1 using st tags(1) values(now, 0)(now+1s, 1)(now+2s, 10)(now+3s, 11)
sql insert into ct2 using st tags(2) values(now+2s, 2)(now+3s, 3)
sql insert into ct3 using st tags(3) values(now+4s, 4)(now+5s, 5)
sql insert into ct4 using st tags(4) values(now+6s, 6)(now+7s, 7)
sql select count(*), spread(ts) from st where tbname='ct1'
print $data00, $data01
if $data00 != @4@ then
return -1
endi
if $data01 != @3000.000000000@ then
return -1
endi
sql drop database test;
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -180,6 +180,13 @@ class TDTestCase:
tdSql.error(f'show indexes from db.ctb1 from db')
tdSql.error(f'show indexes from `db`.`ctb1` from db')
# check error information
tdSql.error(f'create index idx1 on db2.stb (t1);', expectErrInfo='Database not exist')
tdSql.error(f'use db2;', expectErrInfo='Database not exist')
tdSql.error(f' alter stable db2.stb add column c2 int;', expectErrInfo='Database not exist')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -204,8 +204,12 @@ class TDTestCase:
for sql in sqls:
self.query_and_check_with_slimit(sql, 10, 2, offset)
def test_group_by_operator(self):
tdSql.query('select count(*), c1+1 from meters group by tbname, c1+1', 1)
def run(self):
self.prepareTestEnv()
self.test_group_by_operator()
self.test_interval_limit_offset()
def stop(self):