Merge branch 'master' into xiaoping/add_test_case
This commit is contained in:
commit
c7c8ab0082
|
@ -82,9 +82,9 @@ def pre_test(){
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sh '''
|
sh '''
|
||||||
|
|
||||||
cd ${WK}
|
cd ${WK}
|
||||||
git pull >/dev/null
|
git pull >/dev/null
|
||||||
|
|
||||||
export TZ=Asia/Harbin
|
export TZ=Asia/Harbin
|
||||||
date
|
date
|
||||||
git clean -dfx
|
git clean -dfx
|
||||||
|
|
|
@ -6166,7 +6166,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// projection query on super table does not compatible with "group by" syntax
|
// projection query on super table does not compatible with "group by" syntax
|
||||||
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
|
if (tscIsProjectionQuery(pQueryInfo)) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
#define MAX_IP_SIZE 20
|
#define MAX_IP_SIZE 20
|
||||||
#define MAX_PASSWORD_SIZE 20
|
#define MAX_PASSWORD_SIZE 20
|
||||||
#define MAX_HISTORY_SIZE 1000
|
#define MAX_HISTORY_SIZE 1000
|
||||||
#define MAX_COMMAND_SIZE 65536
|
#define MAX_COMMAND_SIZE 1048586
|
||||||
#define HISTORY_FILE ".taos_history"
|
#define HISTORY_FILE ".taos_history"
|
||||||
|
|
||||||
#define DEFAULT_RES_SHOW_NUM 100
|
#define DEFAULT_RES_SHOW_NUM 100
|
||||||
|
|
|
@ -238,7 +238,7 @@ void resetCommand(Command *cmd, const char s[]) {
|
||||||
clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
|
clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
|
||||||
memset(cmd->buffer, 0, MAX_COMMAND_SIZE);
|
memset(cmd->buffer, 0, MAX_COMMAND_SIZE);
|
||||||
memset(cmd->command, 0, MAX_COMMAND_SIZE);
|
memset(cmd->command, 0, MAX_COMMAND_SIZE);
|
||||||
strcpy(cmd->command, s);
|
strncpy(cmd->command, s, MAX_COMMAND_SIZE);
|
||||||
int size = 0;
|
int size = 0;
|
||||||
int width = 0;
|
int width = 0;
|
||||||
getMbSizeInfo(s, &size, &width);
|
getMbSizeInfo(s, &size, &width);
|
||||||
|
|
|
@ -2573,10 +2573,7 @@ static void* createTable(void *sarg)
|
||||||
int64_t lastPrintTime = taosGetTimestampMs();
|
int64_t lastPrintTime = taosGetTimestampMs();
|
||||||
|
|
||||||
int buff_len;
|
int buff_len;
|
||||||
if (superTblInfo)
|
buff_len = BUFFER_SIZE / 8;
|
||||||
buff_len = superTblInfo->maxSqlLen;
|
|
||||||
else
|
|
||||||
buff_len = BUFFER_SIZE;
|
|
||||||
|
|
||||||
char *buffer = calloc(buff_len, 1);
|
char *buffer = calloc(buff_len, 1);
|
||||||
if (buffer == NULL) {
|
if (buffer == NULL) {
|
||||||
|
@ -2624,7 +2621,7 @@ static void* createTable(void *sarg)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
len += snprintf(buffer + len,
|
len += snprintf(buffer + len,
|
||||||
superTblInfo->maxSqlLen - len,
|
buff_len - len,
|
||||||
"if not exists %s.%s%d using %s.%s tags %s ",
|
"if not exists %s.%s%d using %s.%s tags %s ",
|
||||||
winfo->db_name, superTblInfo->childTblPrefix,
|
winfo->db_name, superTblInfo->childTblPrefix,
|
||||||
i, winfo->db_name,
|
i, winfo->db_name,
|
||||||
|
@ -2632,7 +2629,7 @@ static void* createTable(void *sarg)
|
||||||
free(tagsValBuf);
|
free(tagsValBuf);
|
||||||
batchNum++;
|
batchNum++;
|
||||||
if ((batchNum < superTblInfo->batchCreateTableNum)
|
if ((batchNum < superTblInfo->batchCreateTableNum)
|
||||||
&& ((superTblInfo->maxSqlLen - len)
|
&& ((buff_len - len)
|
||||||
>= (superTblInfo->lenOfTagOfOneRow + 256))) {
|
>= (superTblInfo->lenOfTagOfOneRow + 256))) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -3479,9 +3476,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
|
||||||
if (childTblExists
|
if (childTblExists
|
||||||
&& childTblExists->type == cJSON_String
|
&& childTblExists->type == cJSON_String
|
||||||
&& childTblExists->valuestring != NULL) {
|
&& childTblExists->valuestring != NULL) {
|
||||||
if (0 == strncasecmp(childTblExists->valuestring, "yes", 3)) {
|
if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3))
|
||||||
|
&& (g_Dbs.db[i].drop == false)) {
|
||||||
g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS;
|
g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS;
|
||||||
} else if (0 == strncasecmp(childTblExists->valuestring, "no", 2)) {
|
} else if ((0 == strncasecmp(childTblExists->valuestring, "no", 2)
|
||||||
|
|| (g_Dbs.db[i].drop == true))) {
|
||||||
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
|
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
|
||||||
} else {
|
} else {
|
||||||
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
|
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
|
||||||
|
@ -3527,18 +3526,20 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
|
||||||
}
|
}
|
||||||
|
|
||||||
cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit");
|
cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit");
|
||||||
if (childTbl_limit) {
|
if ((childTbl_limit) && (g_Dbs.db[i].drop != true)
|
||||||
|
&& (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
|
||||||
if (childTbl_limit->type != cJSON_Number) {
|
if (childTbl_limit->type != cJSON_Number) {
|
||||||
printf("ERROR: failed to read json, childtable_limit\n");
|
printf("ERROR: failed to read json, childtable_limit\n");
|
||||||
goto PARSE_OVER;
|
goto PARSE_OVER;
|
||||||
}
|
}
|
||||||
g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint;
|
g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint;
|
||||||
} else {
|
} else {
|
||||||
g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result
|
g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result, drop = yes mean all table need recreate, limit value is invalid.
|
||||||
}
|
}
|
||||||
|
|
||||||
cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset");
|
cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset");
|
||||||
if (childTbl_offset) {
|
if ((childTbl_offset) && (g_Dbs.db[i].drop != true)
|
||||||
|
&& (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
|
||||||
if (childTbl_offset->type != cJSON_Number || 0 > childTbl_offset->valueint) {
|
if (childTbl_offset->type != cJSON_Number || 0 > childTbl_offset->valueint) {
|
||||||
printf("ERROR: failed to read json, childtable_offset\n");
|
printf("ERROR: failed to read json, childtable_offset\n");
|
||||||
goto PARSE_OVER;
|
goto PARSE_OVER;
|
||||||
|
@ -5164,13 +5165,15 @@ static void startMultiThreadInsertData(int threads, char* db_name,
|
||||||
int limit, offset;
|
int limit, offset;
|
||||||
|
|
||||||
if ((superTblInfo->childTblExists == TBL_NO_EXISTS) &&
|
if ((superTblInfo->childTblExists == TBL_NO_EXISTS) &&
|
||||||
((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit != 0))) {
|
((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) {
|
||||||
printf("WARNING: offset and limit will not be used since the child tables are not exists!\n");
|
printf("WARNING: offset and limit will not be used since the child tables are not exists!\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS)
|
if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS)
|
||||||
&& (superTblInfo->childTblOffset >= 0)) {
|
&& (superTblInfo->childTblOffset >= 0)) {
|
||||||
if (superTblInfo->childTblLimit < 0) {
|
if ((superTblInfo->childTblLimit < 0)
|
||||||
|
|| ((superTblInfo->childTblOffset + superTblInfo->childTblLimit)
|
||||||
|
> (superTblInfo->childTblCount))) {
|
||||||
superTblInfo->childTblLimit =
|
superTblInfo->childTblLimit =
|
||||||
superTblInfo->childTblCount - superTblInfo->childTblOffset;
|
superTblInfo->childTblCount - superTblInfo->childTblOffset;
|
||||||
}
|
}
|
||||||
|
|
|
@ -315,6 +315,10 @@ void sdbUpdateAsync() {
|
||||||
taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr);
|
taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int node_cmp(const void *l, const void *r) {
|
||||||
|
return ((SNodeInfo *)l)->nodeId - ((SNodeInfo *)r)->nodeId;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t sdbUpdateSync(void *pMnodes) {
|
int32_t sdbUpdateSync(void *pMnodes) {
|
||||||
SMInfos *pMinfos = pMnodes;
|
SMInfos *pMinfos = pMnodes;
|
||||||
if (!mnodeIsRunning()) {
|
if (!mnodeIsRunning()) {
|
||||||
|
@ -382,6 +386,8 @@ int32_t sdbUpdateSync(void *pMnodes) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qsort(syncCfg.nodeInfo, syncCfg.replica, sizeof(syncCfg.nodeInfo[0]), node_cmp);
|
||||||
|
|
||||||
sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica);
|
sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica);
|
||||||
for (int32_t i = 0; i < syncCfg.replica; ++i) {
|
for (int32_t i = 0; i < syncCfg.replica; ++i) {
|
||||||
sdbInfo("vgId:1, mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn,
|
sdbInfo("vgId:1, mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn,
|
||||||
|
|
|
@ -2771,14 +2771,16 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
|
||||||
SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||||
|
|
||||||
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
|
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
|
||||||
|
pInfo->stage += 1;
|
||||||
|
|
||||||
// all data are null, set it completed
|
// all data are null, set it completed
|
||||||
if (pInfo->numOfElems == 0) {
|
if (pInfo->numOfElems == 0) {
|
||||||
pResInfo->complete = true;
|
pResInfo->complete = true;
|
||||||
|
|
||||||
|
return;
|
||||||
} else {
|
} else {
|
||||||
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
|
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
|
||||||
}
|
}
|
||||||
|
|
||||||
pInfo->stage += 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// the first stage, only acquire the min/max value
|
// the first stage, only acquire the min/max value
|
||||||
|
@ -2857,14 +2859,16 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
||||||
SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo);
|
SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo);
|
||||||
|
|
||||||
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
|
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
|
||||||
|
pInfo->stage += 1;
|
||||||
|
|
||||||
// all data are null, set it completed
|
// all data are null, set it completed
|
||||||
if (pInfo->numOfElems == 0) {
|
if (pInfo->numOfElems == 0) {
|
||||||
pResInfo->complete = true;
|
pResInfo->complete = true;
|
||||||
|
|
||||||
|
return;
|
||||||
} else {
|
} else {
|
||||||
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
|
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
|
||||||
}
|
}
|
||||||
|
|
||||||
pInfo->stage += 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pInfo->stage == 0) {
|
if (pInfo->stage == 0) {
|
||||||
|
|
|
@ -4015,7 +4015,7 @@ static SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, in
|
||||||
return pFillCol;
|
return pFillCol;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *tsdb, int32_t vgId, bool isSTableQuery) {
|
int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) {
|
||||||
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||||
|
|
||||||
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
|
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
|
||||||
|
@ -4026,8 +4026,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts
|
||||||
pQuery->timeWindowInterpo = timeWindowInterpoRequired(pQuery);
|
pQuery->timeWindowInterpo = timeWindowInterpoRequired(pQuery);
|
||||||
pQuery->stabledev = isStabledev(pQuery);
|
pQuery->stabledev = isStabledev(pQuery);
|
||||||
|
|
||||||
pRuntimeEnv->prevResult = prevResult;
|
|
||||||
|
|
||||||
setScanLimitationByResultBuffer(pQuery);
|
setScanLimitationByResultBuffer(pQuery);
|
||||||
|
|
||||||
int32_t code = setupQueryHandle(tsdb, pQInfo, isSTableQuery);
|
int32_t code = setupQueryHandle(tsdb, pQInfo, isSTableQuery);
|
||||||
|
@ -6383,6 +6381,8 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p
|
||||||
SArray* prevResult = NULL;
|
SArray* prevResult = NULL;
|
||||||
if (pQueryMsg->prevResultLen > 0) {
|
if (pQueryMsg->prevResultLen > 0) {
|
||||||
prevResult = interResFromBinary(param->prevResult, pQueryMsg->prevResultLen);
|
prevResult = interResFromBinary(param->prevResult, pQueryMsg->prevResultLen);
|
||||||
|
|
||||||
|
pRuntimeEnv->prevResult = prevResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
pQuery->precision = tsdbGetCfg(tsdb)->precision;
|
pQuery->precision = tsdbGetCfg(tsdb)->precision;
|
||||||
|
@ -6404,7 +6404,7 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p
|
||||||
}
|
}
|
||||||
|
|
||||||
// filter the qualified
|
// filter the qualified
|
||||||
if ((code = doInitQInfo(pQInfo, pTsBuf, prevResult, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) {
|
if ((code = doInitQInfo(pQInfo, pTsBuf, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) {
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -217,8 +217,8 @@ python3 ./test.py -f query/floatCompare.py
|
||||||
python3 ./test.py -f query/query1970YearsAf.py
|
python3 ./test.py -f query/query1970YearsAf.py
|
||||||
python3 ./test.py -f query/bug3351.py
|
python3 ./test.py -f query/bug3351.py
|
||||||
python3 ./test.py -f query/bug3375.py
|
python3 ./test.py -f query/bug3375.py
|
||||||
|
python3 ./test.py -f query/queryJoin10tables.py
|
||||||
|
python3 ./test.py -f query/queryStddevWithGroupby.py
|
||||||
|
|
||||||
#stream
|
#stream
|
||||||
python3 ./test.py -f stream/metric_1.py
|
python3 ./test.py -f stream/metric_1.py
|
||||||
|
|
|
@ -0,0 +1,201 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
|
||||||
|
def createtable(self):
|
||||||
|
|
||||||
|
# create stbles
|
||||||
|
tdSql.execute("create table if not exists stb1 (ts timestamp, c1 int) tags(t11 int, t12 int)")
|
||||||
|
tdSql.execute("create table if not exists stb2 (ts timestamp, c2 int) tags(t21 int, t22 int)")
|
||||||
|
tdSql.execute("create table if not exists stb3 (ts timestamp, c3 int) tags(t31 int, t32 int)")
|
||||||
|
tdSql.execute("create table if not exists stb4 (ts timestamp, c4 int) tags(t41 int, t42 int)")
|
||||||
|
tdSql.execute("create table if not exists stb5 (ts timestamp, c5 int) tags(t51 int, t52 int)")
|
||||||
|
tdSql.execute("create table if not exists stb6 (ts timestamp, c6 int) tags(t61 int, t62 int)")
|
||||||
|
tdSql.execute("create table if not exists stb7 (ts timestamp, c7 int) tags(t71 int, t72 int)")
|
||||||
|
tdSql.execute("create table if not exists stb8 (ts timestamp, c8 int) tags(t81 int, t82 int)")
|
||||||
|
tdSql.execute("create table if not exists stb9 (ts timestamp, c9 int) tags(t91 int, t92 int)")
|
||||||
|
tdSql.execute("create table if not exists stb10 (ts timestamp, c10 int) tags(t101 int, t102 int)")
|
||||||
|
tdSql.execute("create table if not exists stb11 (ts timestamp, c11 int) tags(t111 int, t112 int)")
|
||||||
|
|
||||||
|
# create normal tables
|
||||||
|
tdSql.execute("create table t10 using stb1 tags(0, 9)")
|
||||||
|
tdSql.execute("create table t11 using stb1 tags(1, 8)")
|
||||||
|
tdSql.execute("create table t12 using stb1 tags(2, 7)")
|
||||||
|
tdSql.execute("create table t13 using stb1 tags(3, 6)")
|
||||||
|
tdSql.execute("create table t14 using stb1 tags(4, 5)")
|
||||||
|
tdSql.execute("create table t15 using stb1 tags(5, 4)")
|
||||||
|
tdSql.execute("create table t16 using stb1 tags(6, 3)")
|
||||||
|
tdSql.execute("create table t17 using stb1 tags(7, 2)")
|
||||||
|
tdSql.execute("create table t18 using stb1 tags(8, 1)")
|
||||||
|
tdSql.execute("create table t19 using stb1 tags(9, 0)")
|
||||||
|
tdSql.execute("create table t110 using stb1 tags(10, 10)")
|
||||||
|
|
||||||
|
tdSql.execute("create table t20 using stb2 tags(0, 9)")
|
||||||
|
tdSql.execute("create table t21 using stb2 tags(1, 8)")
|
||||||
|
tdSql.execute("create table t22 using stb2 tags(2, 7)")
|
||||||
|
|
||||||
|
tdSql.execute("create table t30 using stb3 tags(0, 9)")
|
||||||
|
tdSql.execute("create table t31 using stb3 tags(1, 8)")
|
||||||
|
tdSql.execute("create table t32 using stb3 tags(2, 7)")
|
||||||
|
|
||||||
|
def inserttable(self):
|
||||||
|
for i in range(100):
|
||||||
|
if i<60:
|
||||||
|
tdSql.execute(f"insert into t20 values('2020-10-01 00:00:{i}.000', {i})")
|
||||||
|
tdSql.execute(f"insert into t21 values('2020-10-01 00:00:{i}.000', {i})")
|
||||||
|
tdSql.execute(f"insert into t22 values('2020-10-01 00:00:{i}.000', {i})")
|
||||||
|
tdSql.execute(f"insert into t30 values('2020-10-01 00:00:{i}.000', {i})")
|
||||||
|
tdSql.execute(f"insert into t31 values('2020-10-01 00:00:{i}.000', {i})")
|
||||||
|
tdSql.execute(f"insert into t32 values('2020-10-01 00:00:{i}.000', {i})")
|
||||||
|
else:
|
||||||
|
tdSql.execute(f"insert into t20 values('2020-10-01 00:01:{i-60}.000', {i})")
|
||||||
|
tdSql.execute(f"insert into t21 values('2020-10-01 00:01:{i-60}.000', {i})")
|
||||||
|
tdSql.execute(f"insert into t22 values('2020-10-01 00:01:{i-60}.000', {i})")
|
||||||
|
tdSql.execute(f"insert into t30 values('2020-10-01 00:01:{i-60}.000', {i})")
|
||||||
|
tdSql.execute(f"insert into t31 values('2020-10-01 00:01:{i-60}.000', {i})")
|
||||||
|
tdSql.execute(f"insert into t32 values('2020-10-01 00:01:{i-60}.000', {i})")
|
||||||
|
for j in range(11):
|
||||||
|
if i<60:
|
||||||
|
tdSql.execute(f"insert into t1{j} values('2020-10-01 00:00:{i}.000', {i})")
|
||||||
|
else:
|
||||||
|
tdSql.execute(f"insert into t1{j} values('2020-10-01 00:01:{i-60}.000', {i})")
|
||||||
|
|
||||||
|
def queryjointable(self):
|
||||||
|
tdSql.error(
|
||||||
|
'''select from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19
|
||||||
|
where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts
|
||||||
|
and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts'''
|
||||||
|
)
|
||||||
|
tdSql.error("select * from t10 where t10.ts=t11.ts")
|
||||||
|
tdSql.error("select * from where t10.ts=t11.ts")
|
||||||
|
tdSql.error("select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19")
|
||||||
|
tdSql.error("select * from stb1, stb2, stb3 where stb1.ts=stb2.ts and stb1.ts=stb3.ts")
|
||||||
|
tdSql.error("select * from stb1, stb2, stb3 where stb1.t11=stb2.t21 and stb1.t11=stb3.t31")
|
||||||
|
tdSql.error("select * from stb1, stb2, stb3")
|
||||||
|
tdSql.error(
|
||||||
|
'''select * from stb1
|
||||||
|
join stb2 on stb1.ts=stb2.ts and stb1.t11=stb2.t21
|
||||||
|
join stb3 on stb1.ts=stb3.ts and stb1.t11=stb3.t31'''
|
||||||
|
)
|
||||||
|
tdSql.error("select * from t10 join t11 on t10.ts=t11.ts join t12 on t11.ts=t12.ts")
|
||||||
|
tdSql.query(
|
||||||
|
'''select * from stb1,stb2,stb3
|
||||||
|
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21 and stb1.t11 =stb3.t31'''
|
||||||
|
)
|
||||||
|
tdSql.checkRows(300)
|
||||||
|
tdSql.query("select * from t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts")
|
||||||
|
tdSql.checkRows(100)
|
||||||
|
tdSql.error("selec * from t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts")
|
||||||
|
tdSql.error("select * form t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts")
|
||||||
|
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts")
|
||||||
|
tdSql.error("select * from t11,t12,t13 when t11.ts <> t12.ts and t11.ts=t13.ts")
|
||||||
|
tdSql.error("select * from t11,t12,t13 when t11.ts != t12.ts and t11.ts=t13.ts")
|
||||||
|
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts or t11.ts=t13.ts")
|
||||||
|
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts=t13.ts")
|
||||||
|
tdSql.error("select * from t11,t12,t13 when t11.c1=t12.c2 and t11.c1=t13.c3")
|
||||||
|
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.c3 and t11.c1=t13.ts")
|
||||||
|
tdSql.error("select ts from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts")
|
||||||
|
tdSql.error("select * from t11,t12,t13 when t11.ts=ts and t11.ts=t13.ts")
|
||||||
|
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts and ts>100")
|
||||||
|
tdSql.error("select * from t11,t12,stb1 when t11.ts=t12.ts and t11.ts=stb1.ts")
|
||||||
|
tdSql.error("select t14.ts from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts")
|
||||||
|
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts1")
|
||||||
|
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t14.ts")
|
||||||
|
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts")
|
||||||
|
tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts and t11.c1=t13.c3")
|
||||||
|
tdSql.error(
|
||||||
|
'''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20
|
||||||
|
where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts
|
||||||
|
and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts and t10.ts=t20.ts'''
|
||||||
|
)
|
||||||
|
tdSql.error(
|
||||||
|
'''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20
|
||||||
|
where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts
|
||||||
|
and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts'''
|
||||||
|
)
|
||||||
|
tdSql.error(
|
||||||
|
'''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19
|
||||||
|
where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts
|
||||||
|
and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts and t10.c1=t19.c1'''
|
||||||
|
)
|
||||||
|
tdSql.error(
|
||||||
|
'''select * from stb1,stb2,stb3
|
||||||
|
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21'''
|
||||||
|
)
|
||||||
|
tdSql.error(
|
||||||
|
'''select * from stb1,stb2,stb3
|
||||||
|
where stb1.ts=stb2.ts and stb1.t11=stb2.t21 and stb1.t11=stb3.t31'''
|
||||||
|
)
|
||||||
|
tdSql.error(
|
||||||
|
'''select * from stb1,stb2,stb3
|
||||||
|
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21 and stb1.t11=stb3.t31
|
||||||
|
and stb1.t12=stb3=t32'''
|
||||||
|
)
|
||||||
|
tdSql.error(
|
||||||
|
'''select * from stb1,stb2,stb3,stb4,stb5,stb6,stb7,stb8,stb9,stb10,stb11
|
||||||
|
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.ts=stb4.ts and stb1.ts=stb5.ts and stb1.ts=stb6.ts
|
||||||
|
and stb1.ts=stb7.ts and stb1.ts=stb8.ts and stb1.ts=stb9.ts and stb1.ts=stb10.ts and stb1.ts=stb11.ts
|
||||||
|
and stb1.t11=stb2.t21 and stb1.t11=stb3.t31 and stb1.t11=stb4.t41 and stb1.t11=stb5.t51
|
||||||
|
and stb1.t11=stb6.t61 and stb1.t11=stb7.t71 and stb1.t11=stb8.t81 and stb1.t11=stb9.t91
|
||||||
|
and stb1.t11=stb10.t101 and stb1.t11=stb11.t111'''
|
||||||
|
)
|
||||||
|
tdSql.error(
|
||||||
|
'''select * from stb1,stb2,stb3,stb4,stb5,stb6,stb7,stb8,stb9,stb10
|
||||||
|
where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.ts=stb4.ts and stb1.ts=stb5.ts and stb1.ts=stb6.ts
|
||||||
|
and stb1.ts=stb7.ts and stb1.ts=stb8.ts and stb1.ts=stb9.ts and stb1.ts=stb10.ts and stb1.t11=stb2.t21
|
||||||
|
and stb1.t11=stb3.t31 and stb1.t11=stb4.t41 and stb1.t11=stb5.t51 and stb1.t11=stb6.t61
|
||||||
|
and stb1.t11=stb7.t71 and stb1.t11=stb8.t81 and stb1.t11=stb9.t91 and stb1.t11=stb10.t101
|
||||||
|
and stb1.t12=stb11.t102'''
|
||||||
|
)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("==========step1:create table")
|
||||||
|
self.createtable()
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("==========step2:insert data")
|
||||||
|
self.inserttable()
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("==========step3:query timestamp type")
|
||||||
|
self.queryjointable()
|
||||||
|
|
||||||
|
# after wal and sync, check again
|
||||||
|
tdSql.query("show dnodes")
|
||||||
|
index = tdSql.getData(0, 0)
|
||||||
|
tdDnodes.stop(index)
|
||||||
|
tdDnodes.start(index)
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("==========step4:query again after wal")
|
||||||
|
self.queryjointable()
|
||||||
|
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,68 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.dnodes import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
def querysqls(self):
|
||||||
|
tdSql.query("select stddev(c1) from t10 group by c1")
|
||||||
|
tdSql.checkRows(6)
|
||||||
|
tdSql.checkData(0, 0, 0)
|
||||||
|
tdSql.checkData(1, 0, 0)
|
||||||
|
tdSql.checkData(2, 0, 0)
|
||||||
|
tdSql.checkData(3, 0, 0)
|
||||||
|
tdSql.checkData(4, 0, 0)
|
||||||
|
tdSql.checkData(5, 0, 0)
|
||||||
|
tdSql.query("select stddev(c2) from t10")
|
||||||
|
tdSql.checkData(0, 0, 0.5)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.execute("drop database if exists db")
|
||||||
|
tdSql.execute("create database if not exists db keep 36500")
|
||||||
|
tdSql.execute("use db")
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("==========step1:create table && insert data")
|
||||||
|
tdSql.execute("create stable stb1 (ts timestamp , c1 int ,c2 float) tags(t1 int)")
|
||||||
|
tdSql.execute("create table t10 using stb1 tags(1)")
|
||||||
|
tdSql.execute("insert into t10 values ('1969-12-31 00:00:00.000', 2,1)")
|
||||||
|
tdSql.execute("insert into t10 values ('1970-01-01 00:00:00.000', 3,1)")
|
||||||
|
tdSql.execute("insert into t10 values (0, 4,1)")
|
||||||
|
tdSql.execute("insert into t10 values (now-18725d, 1,2)")
|
||||||
|
tdSql.execute("insert into t10 values ('2021-04-06 00:00:00.000', 5,2)")
|
||||||
|
tdSql.execute("insert into t10 values (now+1d,6,2)")
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("==========step2:query and check")
|
||||||
|
self.querysqls()
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("==========step3:after wal,check again")
|
||||||
|
tdSql.query("show dnodes")
|
||||||
|
index = tdSql.getData(0, 0)
|
||||||
|
tdDnodes.stop(index)
|
||||||
|
tdDnodes.start(index)
|
||||||
|
self.querysqls()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -220,6 +220,7 @@ sql_error select sum(c3), ts, c2 from group_tb0 where c1 < 20 group by c1;
|
||||||
sql_error select sum(c3), first(ts), c2 from group_tb0 where c1 < 20 group by c1;
|
sql_error select sum(c3), first(ts), c2 from group_tb0 where c1 < 20 group by c1;
|
||||||
sql_error select first(c3), ts, c1, c2 from group_tb0 where c1 < 20 group by c1;
|
sql_error select first(c3), ts, c1, c2 from group_tb0 where c1 < 20 group by c1;
|
||||||
sql_error select first(c3), last(c3), ts, c1 from group_tb0 where c1 < 20 group by c1;
|
sql_error select first(c3), last(c3), ts, c1 from group_tb0 where c1 < 20 group by c1;
|
||||||
|
sql_error select ts from group_tb0 group by c1;
|
||||||
|
|
||||||
#===========================interval=====not support======================
|
#===========================interval=====not support======================
|
||||||
sql_error select count(*), c1 from group_tb0 where c1<20 interval(1y) group by c1;
|
sql_error select count(*), c1 from group_tb0 where c1<20 interval(1y) group by c1;
|
||||||
|
|
Loading…
Reference in New Issue