Merge branch 'master' into test/TD-3295

This commit is contained in:
huili 2021-04-08 11:12:43 +08:00 committed by GitHub
commit 0e246a7f19
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 92 additions and 15 deletions

2
Jenkinsfile vendored
View File

@ -82,9 +82,9 @@ def pre_test(){
}
}
sh '''
cd ${WK}
git pull >/dev/null
export TZ=Asia/Harbin
date
git clean -dfx

View File

@ -2573,10 +2573,7 @@ static void* createTable(void *sarg)
int64_t lastPrintTime = taosGetTimestampMs();
int buff_len;
if (superTblInfo)
buff_len = superTblInfo->maxSqlLen;
else
buff_len = BUFFER_SIZE;
buff_len = BUFFER_SIZE / 8;
char *buffer = calloc(buff_len, 1);
if (buffer == NULL) {
@ -2624,7 +2621,7 @@ static void* createTable(void *sarg)
return NULL;
}
len += snprintf(buffer + len,
superTblInfo->maxSqlLen - len,
buff_len - len,
"if not exists %s.%s%d using %s.%s tags %s ",
winfo->db_name, superTblInfo->childTblPrefix,
i, winfo->db_name,
@ -2632,7 +2629,7 @@ static void* createTable(void *sarg)
free(tagsValBuf);
batchNum++;
if ((batchNum < superTblInfo->batchCreateTableNum)
&& ((superTblInfo->maxSqlLen - len)
&& ((buff_len - len)
>= (superTblInfo->lenOfTagOfOneRow + 256))) {
continue;
}
@ -3479,9 +3476,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (childTblExists
&& childTblExists->type == cJSON_String
&& childTblExists->valuestring != NULL) {
if (0 == strncasecmp(childTblExists->valuestring, "yes", 3)) {
if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3))
&& (g_Dbs.db[i].drop == false)) {
g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS;
} else if (0 == strncasecmp(childTblExists->valuestring, "no", 2)) {
} else if ((0 == strncasecmp(childTblExists->valuestring, "no", 2)
|| (g_Dbs.db[i].drop == true))) {
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
} else {
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
@ -3527,18 +3526,20 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit");
if (childTbl_limit) {
if ((childTbl_limit) && (g_Dbs.db[i].drop != true)
&& (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
if (childTbl_limit->type != cJSON_Number) {
printf("ERROR: failed to read json, childtable_limit\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint;
} else {
g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result
g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result, drop = yes mean all table need recreate, limit value is invalid.
}
cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset");
if (childTbl_offset) {
if ((childTbl_offset) && (g_Dbs.db[i].drop != true)
&& (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
if (childTbl_offset->type != cJSON_Number || 0 > childTbl_offset->valueint) {
printf("ERROR: failed to read json, childtable_offset\n");
goto PARSE_OVER;
@ -5170,7 +5171,9 @@ static void startMultiThreadInsertData(int threads, char* db_name,
if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS)
&& (superTblInfo->childTblOffset >= 0)) {
if (superTblInfo->childTblLimit < 0) {
if ((superTblInfo->childTblLimit < 0)
|| ((superTblInfo->childTblOffset + superTblInfo->childTblLimit)
> (superTblInfo->childTblCount))) {
superTblInfo->childTblLimit =
superTblInfo->childTblCount - superTblInfo->childTblOffset;
}

View File

@ -315,6 +315,10 @@ void sdbUpdateAsync() {
taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr);
}
static int node_cmp(const void *l, const void *r) {
return ((SNodeInfo *)l)->nodeId - ((SNodeInfo *)r)->nodeId;
}
int32_t sdbUpdateSync(void *pMnodes) {
SMInfos *pMinfos = pMnodes;
if (!mnodeIsRunning()) {
@ -382,6 +386,8 @@ int32_t sdbUpdateSync(void *pMnodes) {
return TSDB_CODE_SUCCESS;
}
qsort(syncCfg.nodeInfo, syncCfg.replica, sizeof(syncCfg.nodeInfo[0]), node_cmp);
sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica);
for (int32_t i = 0; i < syncCfg.replica; ++i) {
sdbInfo("vgId:1, mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn,
@ -1131,4 +1137,4 @@ static void *sdbWorkerFp(void *pWorker) {
int32_t sdbGetReplicaNum() {
return tsSdbMgmt.cfg.replica;
}
}

View File

@ -218,7 +218,7 @@ python3 ./test.py -f query/query1970YearsAf.py
python3 ./test.py -f query/bug3351.py
python3 ./test.py -f query/bug3375.py
python3 ./test.py -f query/queryJoin10tables.py
python3 ./test.py -f query/queryStddevWithGroupby.py
#stream
python3 ./test.py -f stream/metric_1.py

View File

@ -0,0 +1,68 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def querysqls(self):
tdSql.query("select stddev(c1) from t10 group by c1")
tdSql.checkRows(6)
tdSql.checkData(0, 0, 0)
tdSql.checkData(1, 0, 0)
tdSql.checkData(2, 0, 0)
tdSql.checkData(3, 0, 0)
tdSql.checkData(4, 0, 0)
tdSql.checkData(5, 0, 0)
tdSql.query("select stddev(c2) from t10")
tdSql.checkData(0, 0, 0.5)
def run(self):
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 36500")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step1:create table && insert data")
tdSql.execute("create stable stb1 (ts timestamp , c1 int ,c2 float) tags(t1 int)")
tdSql.execute("create table t10 using stb1 tags(1)")
tdSql.execute("insert into t10 values ('1969-12-31 00:00:00.000', 2,1)")
tdSql.execute("insert into t10 values ('1970-01-01 00:00:00.000', 3,1)")
tdSql.execute("insert into t10 values (0, 4,1)")
tdSql.execute("insert into t10 values (now-18725d, 1,2)")
tdSql.execute("insert into t10 values ('2021-04-06 00:00:00.000', 5,2)")
tdSql.execute("insert into t10 values (now+1d,6,2)")
tdLog.printNoPrefix("==========step2:query and check")
self.querysqls()
tdLog.printNoPrefix("==========step3:after wal,check again")
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
tdDnodes.stop(index)
tdDnodes.start(index)
self.querysqls()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())