Merge remote-tracking branch 'origin/3.0' into fix/mnode
This commit is contained in:
commit
7ff241892b
|
@ -65,12 +65,14 @@ for (int i = 1; i < keyLen; ++i) { \
|
|||
#define OTD_TIMESTAMP_COLUMN_NAME "ts"
|
||||
#define OTD_METRIC_VALUE_COLUMN_NAME "value"
|
||||
|
||||
#define TS "_ts"
|
||||
#define TS_LEN 3
|
||||
#define TAG "_tagNone"
|
||||
#define TAG_LEN 8
|
||||
#define VALUE "value"
|
||||
#define VALUE_LEN 5
|
||||
#define TS "_ts"
|
||||
#define TS_LEN 3
|
||||
#define TAG "_tag"
|
||||
#define TAG_LEN 4
|
||||
#define TAG_VALUE "NULL"
|
||||
#define TAG_VALUE_LEN 4
|
||||
#define VALUE "value"
|
||||
#define VALUE_LEN 5
|
||||
|
||||
#define BINARY_ADD_LEN 2 // "binary" 2 means " "
|
||||
#define NCHAR_ADD_LEN 3 // L"nchar" 3 means L" "
|
||||
|
@ -598,25 +600,33 @@ static bool smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg){
|
|||
kvVal->type = TSDB_DATA_TYPE_FLOAT;
|
||||
kvVal->f = (float)result;
|
||||
}else if ((left == 1 && *endptr == 'i') || (left == 3 && strncasecmp(endptr, "i64", left) == 0)){
|
||||
if(result >= (double)INT64_MAX){
|
||||
kvVal->i = INT64_MAX;
|
||||
}else if(result <= (double)INT64_MIN){
|
||||
kvVal->i = INT64_MIN;
|
||||
}else{
|
||||
kvVal->i = result;
|
||||
if(smlDoubleToInt64OverFlow(result)){
|
||||
errno = 0;
|
||||
int64_t tmp = taosStr2Int64(pVal, &endptr, 10);
|
||||
if(errno == ERANGE){
|
||||
smlBuildInvalidDataMsg(msg, "big int out of range[-9223372036854775808,9223372036854775807]", pVal);
|
||||
return false;
|
||||
}
|
||||
kvVal->type = TSDB_DATA_TYPE_BIGINT;
|
||||
kvVal->i = tmp;
|
||||
return true;
|
||||
}
|
||||
kvVal->type = TSDB_DATA_TYPE_BIGINT;
|
||||
kvVal->i = (int64_t)result;
|
||||
}else if ((left == 3 && strncasecmp(endptr, "u64", left) == 0)){
|
||||
if(result < 0){
|
||||
smlBuildInvalidDataMsg(msg, "unsigned big int is too large, out of precision", pVal);
|
||||
return false;
|
||||
}
|
||||
if(result >= (double)UINT64_MAX){
|
||||
kvVal->u = UINT64_MAX;
|
||||
}else{
|
||||
kvVal->u = result;
|
||||
if(result >= (double)UINT64_MAX || result < 0){
|
||||
errno = 0;
|
||||
uint64_t tmp = taosStr2UInt64(pVal, &endptr, 10);
|
||||
if(errno == ERANGE || result < 0){
|
||||
smlBuildInvalidDataMsg(msg, "unsigned big int out of range[0,18446744073709551615]", pVal);
|
||||
return false;
|
||||
}
|
||||
kvVal->type = TSDB_DATA_TYPE_UBIGINT;
|
||||
kvVal->u = tmp;
|
||||
return true;
|
||||
}
|
||||
kvVal->type = TSDB_DATA_TYPE_UBIGINT;
|
||||
kvVal->u = result;
|
||||
}else if (left == 3 && strncasecmp(endptr, "i32", left) == 0){
|
||||
if(!IS_VALID_INT(result)){
|
||||
smlBuildInvalidDataMsg(msg, "int out of range[-2147483648,2147483647]", pVal);
|
||||
|
@ -1103,8 +1113,7 @@ static int32_t smlParseTelnetString(SSmlHandle *info, const char* sql, SSmlTable
|
|||
kv->keyLen = VALUE_LEN;
|
||||
kv->value = value;
|
||||
kv->length = valueLen;
|
||||
if(!smlParseValue(kv, &info->msgBuf) || kv->type == TSDB_DATA_TYPE_BINARY
|
||||
|| kv->type == TSDB_DATA_TYPE_NCHAR || kv->type == TSDB_DATA_TYPE_BOOL){
|
||||
if(!smlParseValue(kv, &info->msgBuf)){
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
|
||||
|
@ -1124,8 +1133,8 @@ static int32_t smlParseCols(const char* data, int32_t len, SArray *cols, char *c
|
|||
if(!kv) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
kv->key = TAG;
|
||||
kv->keyLen = TAG_LEN;
|
||||
kv->value = TAG;
|
||||
kv->length = TAG_LEN;
|
||||
kv->value = TAG_VALUE;
|
||||
kv->length = TAG_VALUE_LEN;
|
||||
kv->type = TSDB_DATA_TYPE_NCHAR;
|
||||
if(cols) taosArrayPush(cols, &kv);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -2264,6 +2273,7 @@ static int32_t smlParseLine(SSmlHandle *info, char* lines[], int numLines){
|
|||
uError("SML:0x%" PRIx64 " smlParseJSON failed:%s", info->id, *lines);
|
||||
return code;
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numLines; ++i) {
|
||||
|
|
|
@ -208,6 +208,7 @@ TEST(testCase, smlParseCols_Error_Test) {
|
|||
memcpy(sql, data[i], len + 1);
|
||||
SArray *cols = taosArrayInit(8, POINTER_BYTES);
|
||||
int32_t ret = smlParseCols(sql, len, cols, NULL, false, dumplicateKey, &msgBuf);
|
||||
printf("i:%d\n",i);
|
||||
ASSERT_NE(ret, TSDB_CODE_SUCCESS);
|
||||
taosHashClear(dumplicateKey);
|
||||
taosMemoryFree(sql);
|
||||
|
@ -272,11 +273,11 @@ TEST(testCase, smlParseCols_tag_Test) {
|
|||
|
||||
// nchar
|
||||
kv = (SSmlKv *)taosArrayGetP(cols, 0);
|
||||
ASSERT_EQ(strncasecmp(kv->key, TAG, strlen(TAG)), 0);
|
||||
ASSERT_EQ(kv->keyLen, strlen(TAG));
|
||||
ASSERT_EQ(strncasecmp(kv->key, TAG, TAG_LEN), 0);
|
||||
ASSERT_EQ(kv->keyLen, TAG_LEN);
|
||||
ASSERT_EQ(kv->type, TSDB_DATA_TYPE_NCHAR);
|
||||
ASSERT_EQ(kv->length, strlen(TAG));
|
||||
ASSERT_EQ(strncasecmp(kv->value, TAG, strlen(TAG)), 0);
|
||||
ASSERT_EQ(kv->length, TAG_LEN);
|
||||
ASSERT_EQ(strncasecmp(kv->value, TAG_VALUE, TAG_VALUE_LEN), 0);
|
||||
taosMemoryFree(kv);
|
||||
|
||||
taosArrayDestroy(cols);
|
||||
|
@ -506,7 +507,7 @@ TEST(testCase, smlProcess_influx_Test) {
|
|||
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,heading=221,grade=0,fuel_consumption=25 1451608403000000000",
|
||||
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451609404000000000",
|
||||
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451619405000000000",
|
||||
"readings,name=truck_1,fleet=South,driver=Albert,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=72.45258,longitude=68.83761,elevation=255,velocity=0,heading=181,grade=0,fuel_consumption=25 145160640600000000",
|
||||
"readings,name=truck_1,fleet=South,driver=Albert,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=72.45258,longitude=68.83761,elevation=255,velocity=0,heading=181,grade=0,fuel_consumption=25 1451606406000000000",
|
||||
"readings,name=truck_2,driver=Derek,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451606407000000000",
|
||||
"readings,name=truck_2,fleet=North,driver=Derek,model=F-150 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451609408000000000",
|
||||
"readings,fleet=South,name=truck_0,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451629409000000000",
|
||||
|
@ -745,7 +746,7 @@ TEST(testCase, smlProcess_json1_Test) {
|
|||
" }\n"
|
||||
" }\n"
|
||||
"]";
|
||||
int ret = smlProcess(info, (char **)(&sql), -1);
|
||||
int ret = smlProcess(info, (char **)(&sql), 1);
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
||||
// case 1
|
||||
|
@ -1221,3 +1222,55 @@ TEST(testCase, sml_TD15662_Test) {
|
|||
|
||||
taos_free_result(res);
|
||||
}
|
||||
|
||||
TEST(testCase, sml_TD15735_Test) {
|
||||
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(taos, nullptr);
|
||||
|
||||
TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "use sml_db");
|
||||
taos_free_result(pRes);
|
||||
|
||||
SRequestObj *request = (SRequestObj *)createRequest((STscObj*)taos, NULL, NULL, TSDB_SQL_INSERT);
|
||||
ASSERT_NE(request, nullptr);
|
||||
|
||||
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
|
||||
ASSERT_NE(info, nullptr);
|
||||
|
||||
const char *sql[1] = {
|
||||
"{'metric': 'pekoiw', 'timestamp': {'value': 1626006833639000000, 'type': 'ns'}, 'value': {'value': False, 'type': 'bool'}, 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {'value': 9223372036854775807, 'type': 'bigint'}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'binaryTagValue', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}}}",
|
||||
};
|
||||
int32_t ret = smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
|
||||
ASSERT_NE(ret, 0);
|
||||
|
||||
destroyRequest(request);
|
||||
smlDestroyInfo(info);
|
||||
}
|
||||
|
||||
TEST(testCase, sml_TD15742_Test) {
|
||||
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(taos, nullptr);
|
||||
|
||||
TAOS_RES* pRes = taos_query(taos, "create database if not exists TD15742");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "use TD15742");
|
||||
taos_free_result(pRes);
|
||||
|
||||
SRequestObj *request = (SRequestObj *)createRequest((STscObj*)taos, NULL, NULL, TSDB_SQL_INSERT);
|
||||
ASSERT_NE(request, nullptr);
|
||||
|
||||
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
|
||||
ASSERT_NE(info, nullptr);
|
||||
|
||||
const char *sql[] = {
|
||||
"zgzbix 1626006833641 False id=zgzbix_992_38861 t0=t t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7=\"binaryTagValue\" t8=L\"ncharTagValue\"",
|
||||
};
|
||||
int ret = smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
||||
destroyRequest(request);
|
||||
smlDestroyInfo(info);
|
||||
}
|
|
@ -425,6 +425,12 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond*
|
|||
rowLen += pCond->colList[i].bytes;
|
||||
}
|
||||
|
||||
// make sure the output SSDataBlock size be less than 2MB.
|
||||
int32_t TWOMB = 2 * 1024 * 1024;
|
||||
if (pReadHandle->outputCapacity * rowLen > TWOMB) {
|
||||
pReadHandle->outputCapacity = TWOMB / rowLen;
|
||||
}
|
||||
|
||||
// allocate buffer in order to load data blocks from file
|
||||
pReadHandle->suppInfo.pstatis = taosMemoryCalloc(pCond->numOfCols, sizeof(SColumnDataAgg));
|
||||
if (pReadHandle->suppInfo.pstatis == NULL) {
|
||||
|
@ -1302,20 +1308,22 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
|
|||
|
||||
if ((ascScan && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) ||
|
||||
(!ascScan && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) {
|
||||
if ((ascScan && (key != TSKEY_INITIAL_VAL && key < binfo.window.skey)) ||
|
||||
(!ascScan && (key != TSKEY_INITIAL_VAL && key > binfo.window.ekey))) {
|
||||
|
||||
bool cacheDataInFileBlockHole = (ascScan && (key != TSKEY_INITIAL_VAL && key < binfo.window.skey)) ||
|
||||
(!ascScan && (key != TSKEY_INITIAL_VAL && key > binfo.window.ekey));
|
||||
if (cacheDataInFileBlockHole) {
|
||||
// do not load file block into buffer
|
||||
int32_t step = ascScan ? 1 : -1;
|
||||
|
||||
TSKEY maxKey =
|
||||
ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? (binfo.window.skey - step) : (binfo.window.ekey - step);
|
||||
TSKEY maxKey = ascScan ? (binfo.window.skey - step) : (binfo.window.ekey - step);
|
||||
cur->rows =
|
||||
tsdbReadRowsFromCache(pCheckInfo, maxKey, pTsdbReadHandle->outputCapacity, &cur->win, pTsdbReadHandle);
|
||||
pTsdbReadHandle->realNumOfRows = cur->rows;
|
||||
|
||||
// update the last key value
|
||||
pCheckInfo->lastKey = cur->win.ekey + step;
|
||||
if (!ASCENDING_TRAVERSE(pTsdbReadHandle->order)) {
|
||||
|
||||
if (!ascScan) {
|
||||
TSWAP(cur->win.skey, cur->win.ekey);
|
||||
}
|
||||
|
||||
|
@ -1334,18 +1342,16 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
|
|||
/*
|
||||
* no data in cache, only load data from file
|
||||
* during the query processing, data in cache will not be checked anymore.
|
||||
*
|
||||
* Here the buffer is not enough, so only part of file block can be loaded into memory buffer
|
||||
*/
|
||||
assert(pTsdbReadHandle->outputCapacity >= binfo.rows);
|
||||
int32_t endPos = getEndPosInDataBlock(pTsdbReadHandle, &binfo);
|
||||
|
||||
if ((cur->pos == 0 && endPos == binfo.rows - 1 && ascScan) ||
|
||||
(cur->pos == (binfo.rows - 1) && endPos == 0 && (!ascScan))) {
|
||||
bool wholeBlockReturned = ((abs(cur->pos - endPos) + 1) == binfo.rows);
|
||||
if (wholeBlockReturned) {
|
||||
pTsdbReadHandle->realNumOfRows = binfo.rows;
|
||||
|
||||
cur->rows = binfo.rows;
|
||||
cur->win = binfo.window;
|
||||
cur->win = binfo.window;
|
||||
cur->mixBlock = false;
|
||||
cur->blockCompleted = true;
|
||||
|
||||
|
@ -1356,12 +1362,24 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
|
|||
cur->lastKey = binfo.window.skey - 1;
|
||||
cur->pos = -1;
|
||||
}
|
||||
} else { // partially copy to dest buffer
|
||||
} else { // partially copy to dest buffer
|
||||
// make sure to only load once
|
||||
bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows -1 && (!ascScan)));
|
||||
if (pTsdbReadHandle->outputCapacity < binfo.rows && firstTimeExtract) {
|
||||
code = doLoadFileDataBlock(pTsdbReadHandle, pBlock, pCheckInfo, cur->slot);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
copyAllRemainRowsFromFileBlock(pTsdbReadHandle, pCheckInfo, &binfo, endPos);
|
||||
cur->mixBlock = true;
|
||||
}
|
||||
|
||||
assert(cur->blockCompleted);
|
||||
if (pTsdbReadHandle->outputCapacity >= binfo.rows) {
|
||||
ASSERT(cur->blockCompleted);
|
||||
}
|
||||
|
||||
if (cur->rows == binfo.rows) {
|
||||
tsdbDebug("%p whole file block qualified, brange:%" PRId64 "-%" PRId64 ", rows:%d, lastKey:%" PRId64 ", %s",
|
||||
pTsdbReadHandle, cur->win.skey, cur->win.ekey, cur->rows, cur->lastKey, pTsdbReadHandle->idStr);
|
||||
|
@ -1858,15 +1876,14 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa
|
|||
SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0];
|
||||
TSKEY* tsArray = pCols->cols[0].pData;
|
||||
|
||||
int32_t step = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? 1 : -1;
|
||||
int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pTsdbReadHandle));
|
||||
bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
|
||||
|
||||
int32_t pos = cur->pos;
|
||||
int32_t step = ascScan? 1 : -1;
|
||||
|
||||
int32_t start = cur->pos;
|
||||
int32_t end = endPos;
|
||||
|
||||
if (!ASCENDING_TRAVERSE(pTsdbReadHandle->order)) {
|
||||
if (!ascScan) {
|
||||
TSWAP(start, end);
|
||||
}
|
||||
|
||||
|
@ -1876,11 +1893,11 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa
|
|||
// the time window should always be ascending order: skey <= ekey
|
||||
cur->win = (STimeWindow){.skey = tsArray[start], .ekey = tsArray[end]};
|
||||
cur->mixBlock = (numOfRows != pBlockInfo->rows);
|
||||
cur->lastKey = tsArray[endPos] + step;
|
||||
cur->blockCompleted = true;
|
||||
cur->lastKey = tsArray[endPos] + step;
|
||||
cur->blockCompleted = (ascScan? (endPos == pBlockInfo->rows - 1):(endPos == 0));
|
||||
|
||||
// The value of pos may be -1 or pBlockInfo->rows, and it is invalid in both cases.
|
||||
pos = endPos + step;
|
||||
int32_t pos = endPos + step;
|
||||
updateInfoAfterMerge(pTsdbReadHandle, pCheckInfo, numOfRows, pos);
|
||||
doCheckGeneratedBlockRange(pTsdbReadHandle);
|
||||
|
||||
|
@ -1892,20 +1909,44 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa
|
|||
int32_t getEndPosInDataBlock(STsdbReadHandle* pTsdbReadHandle, SDataBlockInfo* pBlockInfo) {
|
||||
// NOTE: reverse the order to find the end position in data block
|
||||
int32_t endPos = -1;
|
||||
int32_t order = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
|
||||
bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
|
||||
int32_t order = ascScan? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
|
||||
|
||||
SQueryFilePos* cur = &pTsdbReadHandle->cur;
|
||||
SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0];
|
||||
|
||||
if (ASCENDING_TRAVERSE(pTsdbReadHandle->order) && pTsdbReadHandle->window.ekey >= pBlockInfo->window.ekey) {
|
||||
endPos = pBlockInfo->rows - 1;
|
||||
cur->mixBlock = (cur->pos != 0);
|
||||
} else if (!ASCENDING_TRAVERSE(pTsdbReadHandle->order) && pTsdbReadHandle->window.ekey <= pBlockInfo->window.skey) {
|
||||
endPos = 0;
|
||||
cur->mixBlock = (cur->pos != pBlockInfo->rows - 1);
|
||||
if (pTsdbReadHandle->outputCapacity >= pBlockInfo->rows) {
|
||||
if (ascScan && pTsdbReadHandle->window.ekey >= pBlockInfo->window.ekey) {
|
||||
endPos = pBlockInfo->rows - 1;
|
||||
cur->mixBlock = (cur->pos != 0);
|
||||
} else if ((!ascScan) && pTsdbReadHandle->window.ekey <= pBlockInfo->window.skey) {
|
||||
endPos = 0;
|
||||
cur->mixBlock = (cur->pos != pBlockInfo->rows - 1);
|
||||
} else {
|
||||
assert(pCols->numOfRows > 0);
|
||||
endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pTsdbReadHandle->window.ekey, order);
|
||||
cur->mixBlock = true;
|
||||
}
|
||||
} else {
|
||||
assert(pCols->numOfRows > 0);
|
||||
endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pTsdbReadHandle->window.ekey, order);
|
||||
if (ascScan && pTsdbReadHandle->window.ekey >= pBlockInfo->window.ekey) {
|
||||
endPos = TMIN(cur->pos + pTsdbReadHandle->outputCapacity - 1, pBlockInfo->rows - 1);
|
||||
} else if ((!ascScan) && pTsdbReadHandle->window.ekey <= pBlockInfo->window.skey) {
|
||||
endPos = TMAX(cur->pos - pTsdbReadHandle->outputCapacity + 1, 0);
|
||||
} else {
|
||||
ASSERT(pCols->numOfRows > 0);
|
||||
endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pTsdbReadHandle->window.ekey, order);
|
||||
|
||||
// current data is more than the capacity
|
||||
int32_t size = abs(cur->pos - endPos) + 1;
|
||||
if (size > pTsdbReadHandle->outputCapacity) {
|
||||
int32_t delta = size - pTsdbReadHandle->outputCapacity;
|
||||
if (ascScan) {
|
||||
endPos -= delta;
|
||||
} else {
|
||||
endPos += delta;
|
||||
}
|
||||
}
|
||||
}
|
||||
cur->mixBlock = true;
|
||||
}
|
||||
|
||||
|
@ -2369,7 +2410,7 @@ static int32_t createDataBlocksInfo(STsdbReadHandle* pTsdbReadHandle, int32_t nu
|
|||
|
||||
static int32_t getFirstFileDataBlock(STsdbReadHandle* pTsdbReadHandle, bool* exists);
|
||||
|
||||
static int32_t getDataBlockRv(STsdbReadHandle* pTsdbReadHandle, STableBlockInfo* pNext, bool* exists) {
|
||||
static int32_t getDataBlock(STsdbReadHandle* pTsdbReadHandle, STableBlockInfo* pNext, bool* exists) {
|
||||
int32_t step = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? 1 : -1;
|
||||
SQueryFilePos* cur = &pTsdbReadHandle->cur;
|
||||
|
||||
|
@ -2478,7 +2519,7 @@ static int32_t getFirstFileDataBlock(STsdbReadHandle* pTsdbReadHandle, bool* exi
|
|||
cur->fid = pTsdbReadHandle->pFileGroup->fid;
|
||||
|
||||
STableBlockInfo* pBlockInfo = &pTsdbReadHandle->pDataBlockInfo[cur->slot];
|
||||
return getDataBlockRv(pTsdbReadHandle, pBlockInfo, exists);
|
||||
return getDataBlock(pTsdbReadHandle, pBlockInfo, exists);
|
||||
}
|
||||
|
||||
static bool isEndFileDataBlock(SQueryFilePos* cur, int32_t numOfBlocks, bool ascTrav) {
|
||||
|
@ -2643,7 +2684,7 @@ static int32_t getDataBlocksInFiles(STsdbReadHandle* pTsdbReadHandle, bool* exis
|
|||
} else {
|
||||
moveToNextDataBlockInCurrentFile(pTsdbReadHandle);
|
||||
STableBlockInfo* pNext = &pTsdbReadHandle->pDataBlockInfo[cur->slot];
|
||||
return getDataBlockRv(pTsdbReadHandle, pNext, exists);
|
||||
return getDataBlock(pTsdbReadHandle, pNext, exists);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3546,11 +3546,12 @@ _error:
|
|||
|
||||
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag) {
|
||||
// todo add more information about exchange operation
|
||||
if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE) {
|
||||
int32_t type = pOperator->operatorType;
|
||||
if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE || type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
|
||||
*order = TSDB_ORDER_ASC;
|
||||
*scanFlag = MAIN_SCAN;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
|
||||
} else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
|
||||
STableScanInfo* pTableScanInfo = pOperator->info;
|
||||
*order = pTableScanInfo->cond.order;
|
||||
*scanFlag = pTableScanInfo->scanFlag;
|
||||
|
@ -3910,6 +3911,9 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
|
|||
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
int32_t code = getTableScanInfo(pOperator->pDownstream[0], &order, &scanFlag);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
longjmp(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order, scanFlag, false);
|
||||
blockDataEnsureCapacity(pInfo->pRes, pInfo->pRes->info.rows + pBlock->info.rows);
|
||||
|
@ -4311,23 +4315,29 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* p
|
|||
int32_t numOfRows = 4096;
|
||||
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
|
||||
|
||||
// Make sure the size of SSDataBlock will never exceed the size of 2MB.
|
||||
int32_t TWOMB = 2 * 1024 * 1024;
|
||||
if (numOfRows * pResBlock->info.rowSize > TWOMB) {
|
||||
numOfRows = TWOMB / pResBlock->info.rowSize;
|
||||
}
|
||||
initResultSizeInfo(pOperator, numOfRows);
|
||||
|
||||
initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str);
|
||||
setFunctionResultOutput(&pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfCols, pTaskInfo);
|
||||
pInfo->pPseudoColInfo = setRowTsColumnOutputInfo(pInfo->binfo.pCtx, numOfCols);
|
||||
|
||||
pOperator->name = "ProjectOperator";
|
||||
pInfo->pPseudoColInfo = setRowTsColumnOutputInfo(pInfo->binfo.pCtx, numOfCols);
|
||||
pOperator->name = "ProjectOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_PROJECT;
|
||||
pOperator->blocking = false;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->pExpr = pExprInfo;
|
||||
pOperator->numOfExprs = num;
|
||||
pOperator->blocking = false;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->pExpr = pExprInfo;
|
||||
pOperator->numOfExprs = num;
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
|
||||
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doProjectOperation, NULL, NULL,
|
||||
destroyProjectOperatorInfo, NULL, NULL, NULL);
|
||||
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
int32_t code = appendDownstream(pOperator, &downstream, 1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
|
|
|
@ -76,6 +76,11 @@ int32_t firstFunction(SqlFunctionCtx *pCtx);
|
|||
int32_t lastFunction(SqlFunctionCtx *pCtx);
|
||||
int32_t lastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||
|
||||
bool getUniqueFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||
bool uniqueFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
|
||||
int32_t uniqueFunction(SqlFunctionCtx *pCtx);
|
||||
int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||
|
||||
bool getTopBotFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv);
|
||||
int32_t topFunction(SqlFunctionCtx *pCtx);
|
||||
int32_t bottomFunction(SqlFunctionCtx *pCtx);
|
||||
|
|
|
@ -493,6 +493,21 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
|
||||
if (QUERY_NODE_COLUMN != nodeType(pPara)) {
|
||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||
"The parameters of UNIQUE can only be columns");
|
||||
}
|
||||
|
||||
pFunc->node.resType = ((SExprNode*)pPara)->resType;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||
int32_t paraLen = LIST_LENGTH(pFunc->pParameterList);
|
||||
if (paraLen == 0 || paraLen > 2) {
|
||||
|
@ -878,14 +893,14 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
.finalizeFunc = lastFinalize
|
||||
},
|
||||
{
|
||||
.name = "diff",
|
||||
.type = FUNCTION_TYPE_DIFF,
|
||||
.classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.translateFunc = translateDiff,
|
||||
.getEnvFunc = getDiffFuncEnv,
|
||||
.initFunc = diffFunctionSetup,
|
||||
.processFunc = diffFunction,
|
||||
.finalizeFunc = functionFinalize
|
||||
.name = "unique",
|
||||
.type = FUNCTION_TYPE_UNIQUE,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.translateFunc = translateUnique,
|
||||
.getEnvFunc = getUniqueFuncEnv,
|
||||
.initFunc = uniqueFunctionSetup,
|
||||
.processFunc = uniqueFunction,
|
||||
.finalizeFunc = uniqueFinalize
|
||||
},
|
||||
{
|
||||
.name = "histogram",
|
||||
|
@ -907,6 +922,16 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
.processFunc = hllFunction,
|
||||
.finalizeFunc = hllFinalize
|
||||
},
|
||||
{
|
||||
.name = "diff",
|
||||
.type = FUNCTION_TYPE_DIFF,
|
||||
.classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.translateFunc = translateDiff,
|
||||
.getEnvFunc = getDiffFuncEnv,
|
||||
.initFunc = diffFunctionSetup,
|
||||
.processFunc = diffFunction,
|
||||
.finalizeFunc = functionFinalize
|
||||
},
|
||||
{
|
||||
.name = "state_count",
|
||||
.type = FUNCTION_TYPE_STATE_COUNT,
|
||||
|
|
|
@ -28,12 +28,15 @@
|
|||
#define TAIL_MAX_POINTS_NUM 100
|
||||
#define TAIL_MAX_OFFSET 100
|
||||
|
||||
#define UNIQUE_MAX_RESULT_SIZE (1024*1024*10)
|
||||
|
||||
#define HLL_BUCKET_BITS 14 // The bits of the bucket
|
||||
#define HLL_DATA_BITS (64-HLL_BUCKET_BITS)
|
||||
#define HLL_BUCKETS (1<<HLL_BUCKET_BITS)
|
||||
#define HLL_BUCKET_MASK (HLL_BUCKETS-1)
|
||||
#define HLL_ALPHA_INF 0.721347520444481703680 // constant for 0.5/ln(2)
|
||||
|
||||
|
||||
typedef struct SSumRes {
|
||||
union {
|
||||
int64_t isum;
|
||||
|
@ -197,6 +200,20 @@ typedef struct STailInfo {
|
|||
STailItem **pItems;
|
||||
} STailInfo;
|
||||
|
||||
typedef struct SUniqueItem {
|
||||
int64_t timestamp;
|
||||
bool isNull;
|
||||
char data[];
|
||||
} SUniqueItem;
|
||||
|
||||
typedef struct SUniqueInfo {
|
||||
int32_t numOfPoints;
|
||||
uint8_t colType;
|
||||
int16_t colBytes;
|
||||
SHashObj *pHash;
|
||||
char pItems[];
|
||||
} SUniqueInfo;
|
||||
|
||||
#define SET_VAL(_info, numOfElem, res) \
|
||||
do { \
|
||||
if ((numOfElem) <= 0) { \
|
||||
|
@ -216,6 +233,18 @@ typedef struct STailInfo {
|
|||
} \
|
||||
} while (0);
|
||||
|
||||
#define DO_UPDATE_SUBSID_RES(ctx, ts) \
|
||||
do { \
|
||||
for (int32_t _i = 0; _i < (ctx)->subsidiaries.num; ++_i) { \
|
||||
SqlFunctionCtx* __ctx = (ctx)->subsidiaries.pCtx[_i]; \
|
||||
if (__ctx->functionId == FUNCTION_TS_DUMMY) { \
|
||||
__ctx->tag.i = (ts); \
|
||||
__ctx->tag.nType = TSDB_DATA_TYPE_BIGINT; \
|
||||
} \
|
||||
__ctx->fpSet.process(__ctx); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define UPDATE_DATA(ctx, left, right, num, sign, _ts) \
|
||||
do { \
|
||||
if (((left) < (right)) ^ (sign)) { \
|
||||
|
@ -748,50 +777,6 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
|
|||
return true;
|
||||
}
|
||||
|
||||
#define GET_TS_LIST(x) ((TSKEY*)((x)->ptsList))
|
||||
#define GET_TS_DATA(x, y) (GET_TS_LIST(x)[(y)])
|
||||
|
||||
#define DO_UPDATE_TAG_COLUMNS_WITHOUT_TS(ctx) \
|
||||
do { \
|
||||
for (int32_t _i = 0; _i < (ctx)->tagInfo.numOfTagCols; ++_i) { \
|
||||
SqlFunctionCtx* __ctx = (ctx)->tagInfo.pTagCtxList[_i]; \
|
||||
__ctx->fpSet.process(__ctx); \
|
||||
} \
|
||||
} while (0);
|
||||
|
||||
#define DO_UPDATE_SUBSID_RES(ctx, ts) \
|
||||
do { \
|
||||
for (int32_t _i = 0; _i < (ctx)->subsidiaries.num; ++_i) { \
|
||||
SqlFunctionCtx* __ctx = (ctx)->subsidiaries.pCtx[_i]; \
|
||||
if (__ctx->functionId == FUNCTION_TS_DUMMY) { \
|
||||
__ctx->tag.i = (ts); \
|
||||
__ctx->tag.nType = TSDB_DATA_TYPE_BIGINT; \
|
||||
} \
|
||||
__ctx->fpSet.process(__ctx); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define UPDATE_DATA(ctx, left, right, num, sign, _ts) \
|
||||
do { \
|
||||
if (((left) < (right)) ^ (sign)) { \
|
||||
(left) = (right); \
|
||||
DO_UPDATE_SUBSID_RES(ctx, _ts); \
|
||||
(num) += 1; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define LOOPCHECK_N(val, _col, ctx, _t, _nrow, _start, sign, num) \
|
||||
do { \
|
||||
_t* d = (_t*)((_col)->pData); \
|
||||
for (int32_t i = (_start); i < (_nrow) + (_start); ++i) { \
|
||||
if (((_col)->hasNull) && colDataIsNull_f((_col)->nullbitmap, i)) { \
|
||||
continue; \
|
||||
} \
|
||||
TSKEY ts = (ctx)->ptsList != NULL ? GET_TS_DATA(ctx, i) : 0; \
|
||||
UPDATE_DATA(ctx, val, d[i], num, sign, ts); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
|
||||
static void copyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
|
||||
|
||||
|
@ -1994,6 +1979,99 @@ int32_t lastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
return pResInfo->numOfRes;
|
||||
}
|
||||
|
||||
bool getUniqueFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
|
||||
pEnv->calcMemSize = sizeof(SUniqueInfo) + UNIQUE_MAX_RESULT_SIZE;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool uniqueFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
|
||||
if (!functionSetup(pCtx, pResInfo)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
SUniqueInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
pInfo->numOfPoints = 0;
|
||||
pInfo->colType = pCtx->resDataInfo.type;
|
||||
pInfo->colBytes = pCtx->resDataInfo.bytes;
|
||||
if (pInfo->pHash != NULL) {
|
||||
taosHashClear(pInfo->pHash);
|
||||
} else {
|
||||
pInfo->pHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void doUniqueAdd(SUniqueInfo* pInfo, char *data, TSKEY ts, bool isNull) {
|
||||
int32_t hashKeyBytes = IS_VAR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes;
|
||||
|
||||
SUniqueItem *pHashItem = taosHashGet(pInfo->pHash, data, hashKeyBytes);
|
||||
if (pHashItem == NULL) {
|
||||
int32_t size = sizeof(SUniqueItem) + pInfo->colBytes;
|
||||
SUniqueItem *pItem = (SUniqueItem *)(pInfo->pItems + pInfo->numOfPoints * size);
|
||||
pItem->timestamp = ts;
|
||||
memcpy(pItem->data, data, pInfo->colBytes);
|
||||
|
||||
taosHashPut(pInfo->pHash, data, hashKeyBytes, (char *)pItem, sizeof(SUniqueItem*));
|
||||
pInfo->numOfPoints++;
|
||||
} else if (pHashItem->timestamp > ts) {
|
||||
pHashItem->timestamp = ts;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
int32_t uniqueFunction(SqlFunctionCtx* pCtx) {
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
SUniqueInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
SInputColumnInfoData* pInput = &pCtx->input;
|
||||
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
|
||||
|
||||
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||
SColumnInfoData* pTsOutput = pCtx->pTsOutput;
|
||||
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
|
||||
|
||||
int32_t startOffset = pCtx->offset;
|
||||
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
|
||||
char* data = colDataGetData(pInputCol, i);
|
||||
doUniqueAdd(pInfo, data, tsList[i], colDataIsNull_s(pInputCol, i));
|
||||
|
||||
if (sizeof(SUniqueInfo) + pInfo->numOfPoints * (sizeof(SUniqueItem) + pInfo->colBytes) >= UNIQUE_MAX_RESULT_SIZE) {
|
||||
taosHashCleanup(pInfo->pHash);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
//taosqsort(pInfo->pItems, pInfo->numOfPoints, POINTER_BYTES, NULL, tailCompFn);
|
||||
|
||||
//for (int32_t i = 0; i < pInfo->numOfPoints; ++i) {
|
||||
// int32_t pos = startOffset + i;
|
||||
// STailItem *pItem = pInfo->pItems[i];
|
||||
// if (pItem->isNull) {
|
||||
// colDataAppendNULL(pOutput, pos);
|
||||
// } else {
|
||||
// colDataAppend(pOutput, pos, pItem->data, false);
|
||||
// }
|
||||
//}
|
||||
|
||||
pResInfo->numOfRes = pInfo->numOfPoints;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
SUniqueInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
|
||||
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
|
||||
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
|
||||
|
||||
for (int32_t i = 0; i < pResInfo->numOfRes; ++i) {
|
||||
SUniqueItem *pItem = (SUniqueItem *)(pInfo->pItems + i * (sizeof(SUniqueItem) + pInfo->colBytes));
|
||||
colDataAppend(pCol, i, pItem->data, false);
|
||||
//TODO: handle ts output
|
||||
}
|
||||
|
||||
return pResInfo->numOfRes;
|
||||
}
|
||||
|
||||
bool getDiffFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
|
||||
pEnv->calcMemSize = sizeof(SDiffInfo);
|
||||
return true;
|
||||
|
@ -2106,7 +2184,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo
|
|||
default:
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int32_t diffFunction(SqlFunctionCtx* pCtx) {
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
#run user/pass_alter.sim
|
||||
#run user/basic1.sim
|
||||
#run user/privilege2.sim
|
||||
#run user/user_len.sim
|
||||
#run user/privilege1.sim
|
||||
#run user/pass_len.sim
|
||||
#run tstream/basic1.sim
|
||||
#run tstream/basic0.sim
|
||||
#run table/basic1.sim
|
||||
#run trans/create_db.sim
|
||||
#run stable/alter1.sim
|
||||
#run stable/vnode3.sim
|
||||
#run stable/metrics.sim
|
||||
#run stable/show.sim
|
||||
#run stable/values.sim
|
||||
#run stable/dnode3.sim
|
||||
#run stable/refcount.sim
|
||||
#run stable/disk.sim
|
||||
#run db/basic1.sim
|
||||
#run db/basic3.sim
|
||||
#run db/basic7.sim
|
||||
#run db/basic6.sim
|
||||
#run db/create_all_options.sim
|
||||
#run db/basic2.sim
|
||||
#run db/error1.sim
|
||||
#run db/taosdlog.sim
|
||||
#run db/alter_option.sim
|
||||
#run mnode/basic1.sim
|
||||
#run parser/fourArithmetic-basic.sim
|
||||
#run parser/groupby-basic.sim
|
||||
#run snode/basic1.sim
|
||||
#run query/time_process.sim
|
||||
#run query/stddev.sim
|
||||
#run query/interval-offset.sim
|
||||
#run query/charScalarFunction.sim
|
||||
#run query/complex_select.sim
|
||||
#run query/explain.sim
|
||||
#run query/crash_sql.sim
|
||||
#run query/diff.sim
|
||||
#run query/complex_limit.sim
|
||||
#run query/complex_having.sim
|
||||
#run query/udf.sim
|
||||
#run query/complex_group.sim
|
||||
#run query/interval.sim
|
||||
#run query/session.sim
|
||||
|
||||
print ========> dead lock failed when 2 rows in outputCapacity
|
||||
run query/scalarFunction.sim
|
||||
run query/scalarNull.sim
|
||||
run query/complex_where.sim
|
||||
run tmq/basic1.sim
|
||||
run tmq/basic4.sim
|
||||
run tmq/basic1Of2Cons.sim
|
||||
run tmq/prepareBasicEnv-1vgrp.sim
|
||||
run tmq/topic.sim
|
||||
run tmq/basic4Of2Cons.sim
|
||||
run tmq/prepareBasicEnv-4vgrp.sim
|
||||
run tmq/basic3.sim
|
||||
run tmq/basic2Of2Cons.sim
|
||||
run tmq/basic2.sim
|
||||
run tmq/basic3Of2Cons.sim
|
||||
run tmq/basic2Of2ConsOverlap.sim
|
||||
run tmq/clearConsume.sim
|
||||
run qnode/basic1.sim
|
||||
run dnode/basic1.sim
|
||||
run show/basic.sim
|
||||
run insert/basic1.sim
|
||||
run insert/basic0.sim
|
||||
run insert/backquote.sim
|
||||
run insert/null.sim
|
||||
run sync/oneReplica1VgElectWithInsert.sim
|
||||
run sync/threeReplica1VgElect.sim
|
||||
run sync/oneReplica1VgElect.sim
|
||||
run sync/insertDataByRunBack.sim
|
||||
run sync/threeReplica1VgElectWihtInsert.sim
|
||||
run sma/tsmaCreateInsertData.sim
|
||||
run sma/rsmaCreateInsertQuery.sim
|
||||
run valgrind/checkError.sim
|
||||
run bnode/basic1.sim
|
Loading…
Reference in New Issue