Merge branch 'develop' into filterComboTest

This commit is contained in:
Ping Xiao 2020-05-23 16:59:04 +08:00
commit 3070277c56
75 changed files with 1534 additions and 733 deletions

24
.gitignore vendored
View File

@ -41,3 +41,27 @@ pysim/
# Doxygen Generated files
html/
/.vs
/CMakeFiles/3.10.2
/CMakeCache.txt
/Makefile
/*.cmake
/deps
/src/cq/test/CMakeFiles/cqtest.dir/*.cmake
*.cmake
/src/cq/test/CMakeFiles/cqtest.dir/*.make
*.make
link.txt
*.internal
*.includecache
*.marks
Makefile
CMakeError.log
*.log
/CMakeFiles/CMakeRuleHashes.txt
/CMakeFiles/Makefile2
/CMakeFiles/TargetDirectories.txt
/CMakeFiles/cmake.check_cache
/out/isenseconfig/WSL-Clang-Debug
/out/isenseconfig/WSL-GCC-Debug
/test/cfg

25
CMakeSettings.json Normal file
View File

@ -0,0 +1,25 @@
{
"configurations": [
{
"name": "WSL-GCC-Debug",
"generator": "Unix Makefiles",
"configurationType": "Debug",
"buildRoot": "${projectDir}\\build\\",
"installRoot": "${projectDir}\\out\\install\\${name}",
"cmakeExecutable": "/usr/bin/cmake",
"cmakeCommandArgs": "",
"buildCommandArgs": "",
"ctestCommandArgs": "",
"inheritEnvironments": [ "linux_x64" ],
"wslPath": "${defaultWSLPath}",
"addressSanitizerRuntimeFlags": "detect_leaks=0",
"variables": [
{
"name": "CMAKE_INSTALL_PREFIX",
"value": "/mnt/d/TDengine/TDengine/build",
"type": "PATH"
}
]
}
]
}

View File

@ -226,11 +226,8 @@ typedef struct {
int command;
uint8_t msgType;
union {
bool existsCheck; // check if the table exists or not
bool autoCreated; // if the table is missing, on-the-fly create it. during getmeterMeta
int8_t dataSourceType; // load data from file or not
};
bool autoCreated; // if the table is missing, on-the-fly create it. during getmeterMeta
int8_t dataSourceType; // load data from file or not
union {
int32_t count;

View File

@ -956,7 +956,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
} else {
sql = sToken.z;
}
code = tscGetTableMeta(pSql, pTableMetaInfo);
code = tscGetMeterMetaEx(pSql, pTableMetaInfo, false);
if (pCmd->curSql == NULL) {
assert(code == TSDB_CODE_ACTION_IN_PROGRESS);

View File

@ -638,7 +638,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
(*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * 1);
(*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * pSql->numOfSubs);
if (*pMemBuffer == NULL) {
tscError("%p failed to allocate memory", pSql);
pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY;

View File

@ -571,7 +571,6 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
pTableIdInfo->key = htobe64(tscGetSubscriptionProgress(pSql->pSubscription, pTableMeta->uid, dfltKey));
pQueryMsg->numOfTables = htonl(1); // set the number of tables
pMsg += sizeof(STableIdInfo);
} else {
int32_t index = pTableMetaInfo->vgroupIndex;
@ -601,8 +600,8 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
}
}
tscTrace("%p vgId:%d, query on table:%s, uid:%" PRIu64, pSql, htonl(pQueryMsg->head.vgId), pTableMetaInfo->name,
pTableMeta->uid);
tscTrace("%p vgId:%d, query on table:%s, tid:%d, uid:%" PRIu64, pSql, htonl(pQueryMsg->head.vgId), pTableMetaInfo->name,
pTableMeta->sid, pTableMeta->uid);
return pMsg;
}
@ -1869,6 +1868,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
}
free(pTableMeta);
tscTrace("%p recv table meta: %"PRId64 ", tid:%d, name:%s", pSql, pTableMeta->uid, pTableMeta->sid, pTableMetaInfo->name);
return TSDB_CODE_SUCCESS;
}

View File

@ -103,6 +103,7 @@ extern int32_t tsOfflineThreshold;
extern int32_t tsMgmtEqualVnodeNum;
extern int32_t tsEnableHttpModule;
extern int32_t tsEnableMqttModule;
extern int32_t tsEnableMonitorModule;
extern int32_t tsRestRowLimit;
@ -147,6 +148,7 @@ extern int32_t jniDebugFlag;
extern int32_t tmrDebugFlag;
extern int32_t sdbDebugFlag;
extern int32_t httpDebugFlag;
extern int32_t mqttDebugFlag;
extern int32_t monitorDebugFlag;
extern int32_t uDebugFlag;
extern int32_t rpcDebugFlag;

View File

@ -427,27 +427,25 @@ void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, SDataCol
TSKEY key1 = (*iter1 >= src1->numOfPoints) ? INT64_MAX : ((TSKEY *)(src1->cols[0].pData))[*iter1];
TSKEY key2 = (*iter2 >= src2->numOfPoints) ? INT64_MAX : ((TSKEY *)(src2->cols[0].pData))[*iter2];
if (key1 < key2) {
if (key1 <= key2) {
for (int i = 0; i < src1->numOfCols; i++) {
ASSERT(target->cols[i].type == src1->cols[i].type);
dataColAppendVal(target->cols[i].pData, tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfPoints,
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfPoints,
target->maxPoints);
}
target->numOfPoints++;
(*iter1)++;
} else if (key1 > key2) {
if (key1 == key2) (*iter2)++;
} else {
for (int i = 0; i < src2->numOfCols; i++) {
ASSERT(target->cols[i].type == src2->cols[i].type);
dataColAppendVal(target->cols[i].pData, tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfPoints,
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfPoints,
target->maxPoints);
}
target->numOfPoints++;
(*iter2)++;
} else {
// TODO: deal with duplicate keys
ASSERT(false);
}
}
}

View File

@ -120,6 +120,7 @@ int32_t tsOfflineThreshold = 86400*100; // seconds 10days
int32_t tsMgmtEqualVnodeNum = 4;
int32_t tsEnableHttpModule = 1;
int32_t tsEnableMqttModule = 0; // not finished yet, not started it by default
int32_t tsEnableMonitorModule = 0;
int32_t tsRestRowLimit = 10240;
@ -134,6 +135,7 @@ int32_t cDebugFlag = 135;
int32_t jniDebugFlag = 131;
int32_t odbcDebugFlag = 131;
int32_t httpDebugFlag = 131;
int32_t mqttDebugFlag = 131;
int32_t monitorDebugFlag = 131;
int32_t qDebugFlag = 131;
int32_t rpcDebugFlag = 135;
@ -212,6 +214,7 @@ void taosSetAllDebugFlag() {
jniDebugFlag = debugFlag;
odbcDebugFlag = debugFlag;
httpDebugFlag = debugFlag;
mqttDebugFlag = debugFlag;
monitorDebugFlag = debugFlag;
rpcDebugFlag = debugFlag;
uDebugFlag = debugFlag;
@ -890,6 +893,17 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "mqtt";
cfg.ptr = &tsEnableMqttModule;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
cfg.ptrLength = 1;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "monitor";
cfg.ptr = &tsEnableMonitorModule;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@ -1112,6 +1126,17 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "mqttDebugFlag";
cfg.ptr = &mqttDebugFlag;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG;
cfg.minValue = 0;
cfg.maxValue = 255;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "monitorDebugFlag";
cfg.ptr = &monitorDebugFlag;
cfg.valType = TAOS_CFG_VTYPE_INT32;

View File

@ -304,7 +304,7 @@ tDataTypeDescriptor tDataTypeDesc[11] = {
{TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", tsCompressFloat, tsDecompressFloat, getStatics_f},
{TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", tsCompressDouble, tsDecompressDouble, getStatics_d},
{TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY", tsCompressString, tsDecompressString, NULL},
{TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", tsCompressTimestamp, tsDecompressTimestamp, NULL},
{TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", tsCompressTimestamp, tsDecompressTimestamp, getStatics_i64},
{TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", tsCompressString, tsDecompressString, NULL},
};

View File

@ -16,7 +16,7 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
AUX_SOURCE_DIRECTORY(src SRC)
ADD_EXECUTABLE(taosd ${SRC})
TARGET_LINK_LIBRARIES(taosd mnode taos_static monitor http tsdb twal vnode cJson lz4)
TARGET_LINK_LIBRARIES(taosd mnode taos_static monitor http mqtt tsdb twal vnode cJson lz4)
IF (TD_ACCOUNT)
TARGET_LINK_LIBRARIES(taosd account)

View File

@ -37,6 +37,7 @@ static SDnodeRunStatus tsDnodeRunStatus = TSDB_DNODE_RUN_STATUS_STOPPED;
int32_t dnodeInitSystem() {
dnodeSetRunStatus(TSDB_DNODE_RUN_STATUS_INITIALIZE);
tscEmbedded = 1;
taosBlockSIGPIPE();
taosResolveCRC();
taosInitGlobalCfg();
taosReadGlobalLogCfg();

View File

@ -20,6 +20,7 @@
#include "trpc.h"
#include "mnode.h"
#include "http.h"
#include "mqtt.h"
#include "monitor.h"
#include "dnodeInt.h"
#include "dnodeModule.h"
@ -62,6 +63,16 @@ static void dnodeAllocModules() {
dnodeSetModuleStatus(TSDB_MOD_HTTP);
}
tsModule[TSDB_MOD_MQTT].enable = (tsEnableMqttModule == 1);
tsModule[TSDB_MOD_MQTT].name = "mqtt";
tsModule[TSDB_MOD_MQTT].initFp = mqttInitSystem;
tsModule[TSDB_MOD_MQTT].cleanUpFp = mqttCleanUpSystem;
tsModule[TSDB_MOD_MQTT].startFp = mqttStartSystem;
tsModule[TSDB_MOD_MQTT].stopFp = mqttStopSystem;
if (tsEnableMqttModule) {
dnodeSetModuleStatus(TSDB_MOD_MQTT);
}
tsModule[TSDB_MOD_MONITOR].enable = (tsEnableMonitorModule == 1);
tsModule[TSDB_MOD_MONITOR].name = "monitor";
tsModule[TSDB_MOD_MONITOR].initFp = monitorInitSystem;

35
src/inc/mqtt.h Normal file
View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_MQTT_H
#define TDENGINE_MQTT_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
int32_t mqttGetReqCount();
int32_t mqttInitSystem();
int32_t mqttStartSystem();
void mqttStopSystem();
void mqttCleanUpSystem();
#ifdef __cplusplus
}
#endif
#endif

View File

@ -376,6 +376,7 @@ typedef enum {
TSDB_MOD_MGMT,
TSDB_MOD_HTTP,
TSDB_MOD_MONITOR,
TSDB_MOD_MQTT,
TSDB_MOD_MAX
} EModuleType;

View File

@ -725,6 +725,8 @@ typedef struct {
typedef struct {
uint32_t queryId;
uint32_t streamId;
uint32_t totalDnodes;
uint32_t onlineDnodes;
int8_t killConnection;
SRpcIpSet ipList;
} SCMHeartBeatRsp;

View File

@ -362,6 +362,26 @@ int main(int argc, char *argv[]) {
time_t tTime = time(NULL);
struct tm tm = *localtime(&tTime);
printf("###################################################################\n");
printf("# Server IP: %s:%hu\n", ip_addr == NULL ? "localhost" : ip_addr, port);
printf("# User: %s\n", user);
printf("# Password: %s\n", pass);
printf("# Use metric: %s\n", use_metric ? "true" : "false");
printf("# Datatype of Columns: %s\n", dataString);
printf("# Binary Length(If applicable): %d\n",
(strcasestr(dataString, "BINARY") != NULL) ? len_of_binary : -1);
printf("# Number of Columns per record: %d\n", ncols_per_record);
printf("# Number of Connections: %d\n", nconnections);
printf("# Number of Tables: %d\n", ntables);
printf("# Number of Data per Table: %d\n", nrecords_per_table);
printf("# Records/Request: %d\n", nrecords_per_request);
printf("# Database name: %s\n", db_name);
printf("# Table prefix: %s\n", tb_prefix);
printf("# Test time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
printf("###################################################################\n\n");
printf("Press enter key to continue");
getchar();
fprintf(fp, "###################################################################\n");
fprintf(fp, "# Server IP: %s:%hu\n", ip_addr == NULL ? "localhost" : ip_addr, port);
@ -858,15 +878,16 @@ void generateData(char *res, char **data_type, int num_of_cols, int64_t timestam
pstr += sprintf(pstr, ")");
}
static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJK1234567890";
void rand_string(char *str, int size) {
memset(str, 0, size);
const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJK1234567890";
char *sptr = str;
if (size) {
str[0] = 0;
if (size > 0) {
--size;
for (size_t n = 0; n < size; n++) {
int n;
for (n = 0; n < size; n++) {
int key = rand() % (int)(sizeof charset - 1);
sptr += sprintf(sptr, "%c", charset[key]);
str[n] = charset[key];
}
str[n] = 0;
}
}

View File

@ -533,7 +533,7 @@ int taosDumpOut(SDumpArguments *arguments) {
}
}
taos_free_result(result);
// taos_free_result(result);
if (count == 0) {
fprintf(stderr, "No databases valid to dump\n");
@ -722,6 +722,57 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols
count_temp = counter;
for (; counter < numOfCols; counter++) {
TAOS_ROW row = NULL;
sprintf(command, "select %s from %s limit 1", tableDes->cols[counter].field, tableDes->name);
if (taos_query(taos, command) != 0) {
fprintf(stderr, "failed to run command %s\n", command);
return;
}
result = taos_use_result(taos);
if (result == NULL) {
fprintf(stderr, "failed to use result\n");
return;
}
TAOS_FIELD *fields = taos_fetch_fields(result);
row = taos_fetch_row(result);
switch (fields[0].type) {
case TSDB_DATA_TYPE_BOOL:
sprintf(tableDes->cols[counter].note, "%d", ((((int)(*((char *)row[0]))) == 1) ? 1 : 0));
break;
case TSDB_DATA_TYPE_TINYINT:
sprintf(tableDes->cols[counter].note, "%d", (int)(*((char *)row[0])));
break;
case TSDB_DATA_TYPE_SMALLINT:
sprintf(tableDes->cols[counter].note, "%d", (int)(*((short *)row[0])));
break;
case TSDB_DATA_TYPE_INT:
sprintf(tableDes->cols[counter].note, "%d", *((int *)row[0]));
break;
case TSDB_DATA_TYPE_BIGINT:
sprintf(tableDes->cols[counter].note, "%" PRId64 "", *((int64_t *)row[0]));
break;
case TSDB_DATA_TYPE_FLOAT:
sprintf(tableDes->cols[counter].note, "%f", GET_FLOAT_VAL(row[0]));
break;
case TSDB_DATA_TYPE_DOUBLE:
sprintf(tableDes->cols[counter].note, "%f", GET_DOUBLE_VAL(row[0]));
break;
case TSDB_DATA_TYPE_TIMESTAMP:
sprintf(tableDes->cols[counter].note, "%" PRId64 "", *(int64_t *)row[0]);
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
default:
strncpy(tableDes->cols[counter].note, (char *)row[0], fields[0].bytes);
break;
}
taos_free_result(result);
if (counter != count_temp) {
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {

View File

@ -66,7 +66,6 @@ typedef struct SMnodeObj {
SDnodeObj *pDnode;
} SMnodeObj;
// todo use dynamic length string
typedef struct {
char *tableId;
int8_t type;

View File

@ -34,6 +34,7 @@ char* mgmtGetDnodeStatusStr(int32_t dnodeStatus);
void mgmtMonitorDnodeModule();
int32_t mgmtGetDnodesNum();
int32_t mgmtGetOnlinDnodesNum();
void * mgmtGetNextDnode(void *pIter, SDnodeObj **pDnode);
void mgmtIncDnodeRef(SDnodeObj *pDnode);
void mgmtDecDnodeRef(SDnodeObj *pDnode);

View File

@ -31,6 +31,7 @@ void * mgmtGetNextChildTable(void *pIter, SChildTableObj **pTable);
void * mgmtGetNextSuperTable(void *pIter, SSuperTableObj **pTable);
void mgmtDropAllChildTables(SDbObj *pDropDb);
void mgmtDropAllSuperTables(SDbObj *pDropDb);
void mgmtDropAllChildTablesInVgroups(SVgObj *pVgroup);
#ifdef __cplusplus
}

View File

@ -74,7 +74,6 @@ static int32_t mgmtDnodeActionDelete(SSdbOper *pOper) {
SDnodeObj *pDnode = pOper->pObj;
#ifndef _SYNC
//TODO: drop dnode local
mgmtDropAllDnodeVgroups(pDnode);
#endif
mgmtDropMnodeLocal(pDnode->dnodeId);
@ -179,6 +178,23 @@ int32_t mgmtGetDnodesNum() {
return sdbGetNumOfRows(tsDnodeSdb);
}
int32_t mgmtGetOnlinDnodesNum(char *ep) {
SDnodeObj *pDnode = NULL;
void * pIter = NULL;
int32_t onlineDnodes = 0;
while (1) {
pIter = mgmtGetNextDnode(pIter, &pDnode);
if (pDnode == NULL) break;
if (pDnode->status != TAOS_DN_STATUS_OFFLINE) onlineDnodes++;
mgmtDecDnodeRef(pDnode);
}
sdbFreeIter(pIter);
return onlineDnodes;
}
void *mgmtGetDnode(int32_t dnodeId) {
return sdbGetRow(tsDnodeSdb, &dnodeId);
}
@ -397,7 +413,6 @@ static int32_t mgmtCreateDnode(char *ep) {
return code;
}
//TODO drop others tables
int32_t mgmtDropDnode(SDnodeObj *pDnode) {
SSdbOper oper = {
.type = SDB_OPER_GLOBAL,
@ -410,7 +425,7 @@ int32_t mgmtDropDnode(SDnodeObj *pDnode) {
code = TSDB_CODE_SDB_ERROR;
}
mLPrint("dnode:%d is dropped from cluster, result:%s", pDnode->dnodeId, tstrerror(code));
mLPrint("dnode:%d, is dropped from cluster, result:%s", pDnode->dnodeId, tstrerror(code));
return code;
}

View File

@ -210,6 +210,9 @@ void mgmtUpdateMnodeIpSet() {
mgmtMnodeWrLock();
memset(ipSet, 0, sizeof(tsMnodeRpcIpSet));
memset(mnodes, 0, sizeof(SDMMnodeInfos));
int32_t index = 0;
void * pIter = NULL;
while (1) {

View File

@ -354,7 +354,7 @@ void sdbIncRef(void *handle, void *pObj) {
int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos);
atomic_add_fetch_32(pRefCount, 1);
if (0 && (pTable->tableId == SDB_TABLE_MNODE || pTable->tableId == SDB_TABLE_DNODE)) {
sdbTrace("table:%s, add ref to record:%s:%d", pTable->tableName, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
sdbTrace("add ref to table:%s record:%s:%d", pTable->tableName, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
}
}
@ -365,7 +365,7 @@ void sdbDecRef(void *handle, void *pObj) {
int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos);
int32_t refCount = atomic_sub_fetch_32(pRefCount, 1);
if (0 && (pTable->tableId == SDB_TABLE_MNODE || pTable->tableId == SDB_TABLE_DNODE)) {
sdbTrace("table:%s, def ref of record:%s:%d", pTable->tableName, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
sdbTrace("def ref of table:%s record:%s:%d", pTable->tableName, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
}
int8_t *updateEnd = pObj + pTable->refCountPos - 1;

View File

@ -325,6 +325,8 @@ static void mgmtProcessHeartBeatMsg(SQueuedMsg *pMsg) {
return;
}
pHBRsp->onlineDnodes = htonl(mgmtGetOnlinDnodesNum());
pHBRsp->totalDnodes = htonl(mgmtGetDnodesNum());
mgmtGetMnodeIpSet(&pHBRsp->ipList);
/*

View File

@ -1305,7 +1305,7 @@ static void mgmtProcessSuperTableVgroupMsg(SQueuedMsg *pMsg) {
if (pDnode == NULL) break;
strncpy(pVgroupInfo->vgroups[vgSize].ipAddr[vn].fqdn, pDnode->dnodeFqdn, tListLen(pDnode->dnodeFqdn));
pVgroupInfo->vgroups[vgSize].ipAddr[vn].port = htons(tsDnodeShellPort);
pVgroupInfo->vgroups[vgSize].ipAddr[vn].port = htons(pDnode->dnodePort);
pVgroupInfo->vgroups[vgSize].numOfIps++;
}
@ -1787,6 +1787,34 @@ static void mgmtGetChildTableMeta(SQueuedMsg *pMsg) {
rpcSendResponse(&rpcRsp);
}
void mgmtDropAllChildTablesInVgroups(SVgObj *pVgroup) {
void * pIter = NULL;
int32_t numOfTables = 0;
SChildTableObj *pTable = NULL;
mPrint("vgId:%d, all child tables will be dropped from sdb", pVgroup->vgId);
while (1) {
pIter = mgmtGetNextChildTable(pIter, &pTable);
if (pTable == NULL) break;
if (pTable->vgId == pVgroup->vgId) {
SSdbOper oper = {
.type = SDB_OPER_LOCAL,
.table = tsChildTableSdb,
.pObj = pTable,
};
sdbDeleteRow(&oper);
numOfTables++;
}
mgmtDecTableRef(pTable);
}
sdbFreeIter(pIter);
mPrint("vgId:%d, all child tables is dropped from sdb", pVgroup->vgId);
}
void mgmtDropAllChildTables(SDbObj *pDropDb) {
void * pIter = NULL;
int32_t numOfTables = 0;
@ -1996,7 +2024,7 @@ static void mgmtProcessMultiTableMetaMsg(SQueuedMsg *pMsg) {
SCMMultiTableInfoMsg *pInfo = pMsg->pCont;
pInfo->numOfTables = htonl(pInfo->numOfTables);
int32_t totalMallocLen = 4*1024*1024; // first malloc 4 MB, subsequent reallocation as twice
int32_t totalMallocLen = 4 * 1024 * 1024; // first malloc 4 MB, subsequent reallocation as twice
SMultiTableMeta *pMultiMeta = rpcMallocCont(totalMallocLen);
if (pMultiMeta == NULL) {
mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_SERV_OUT_OF_MEMORY);
@ -2006,26 +2034,30 @@ static void mgmtProcessMultiTableMetaMsg(SQueuedMsg *pMsg) {
pMultiMeta->contLen = sizeof(SMultiTableMeta);
pMultiMeta->numOfTables = 0;
for (int t = 0; t < pInfo->numOfTables; ++t) {
char *tableId = (char*)(pInfo->tableIds + t * TSDB_TABLE_ID_LEN);
for (int32_t t = 0; t < pInfo->numOfTables; ++t) {
char * tableId = (char *)(pInfo->tableIds + t * TSDB_TABLE_ID_LEN + 1);
SChildTableObj *pTable = mgmtGetChildTable(tableId);
if (pTable == NULL) continue;
if (pMsg->pDb == NULL) pMsg->pDb = mgmtGetDbByTableId(tableId);
if (pMsg->pDb == NULL) continue;
if (pMsg->pDb == NULL) {
mgmtDecTableRef(pTable);
continue;
}
int availLen = totalMallocLen - pMultiMeta->contLen;
if (availLen <= sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16)) {
//TODO realloc
//totalMallocLen *= 2;
//pMultiMeta = rpcReMalloc(pMultiMeta, totalMallocLen);
//if (pMultiMeta == NULL) {
/// rpcSendResponse(ahandle, TSDB_CODE_SERV_OUT_OF_MEMORY, NULL, 0);
// return TSDB_CODE_SERV_OUT_OF_MEMORY;
//} else {
// t--;
// continue;
//}
totalMallocLen *= 2;
pMultiMeta = rpcReallocCont(pMultiMeta, totalMallocLen);
if (pMultiMeta == NULL) {
mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_SERV_OUT_OF_MEMORY);
mgmtDecTableRef(pTable);
return;
} else {
t--;
mgmtDecTableRef(pTable);
continue;
}
}
STableMetaMsg *pMeta = (STableMetaMsg *)(pMultiMeta->metas + pMultiMeta->contLen);
@ -2034,6 +2066,8 @@ static void mgmtProcessMultiTableMetaMsg(SQueuedMsg *pMsg) {
pMultiMeta->numOfTables ++;
pMultiMeta->contLen += pMeta->contLen;
}
mgmtDecTableRef(pTable);
}
SRpcMsg rpcRsp = {0};

View File

@ -747,11 +747,14 @@ void mgmtDropAllDnodeVgroups(SDnodeObj *pDropDnode) {
SVgObj *pVgroup = NULL;
int32_t numOfVgroups = 0;
mPrint("dnode:%d, all vgroups will be dropped from sdb", pDropDnode->dnodeId);
while (1) {
pIter = mgmtGetNextVgroup(pIter, &pVgroup);
if (pVgroup == NULL) break;
if (pVgroup->vnodeGid[0].dnodeId == pDropDnode->dnodeId) {
mgmtDropAllChildTablesInVgroups(pVgroup);
SSdbOper oper = {
.type = SDB_OPER_LOCAL,
.table = tsVgroupSdb,
@ -759,12 +762,13 @@ void mgmtDropAllDnodeVgroups(SDnodeObj *pDropDnode) {
};
sdbDeleteRow(&oper);
numOfVgroups++;
continue;
}
mgmtDecVgroupRef(pVgroup);
}
sdbFreeIter(pIter);
mPrint("dnode:%d, all vgroups is dropped from sdb", pDropDnode->dnodeId);
}
void mgmtUpdateAllDbVgroups(SDbObj *pAlterDb) {

View File

@ -148,6 +148,10 @@ static void taosDeleteTimer(void *tharg) {
timer_delete(*pTimer);
}
static pthread_t timerThread;
static timer_t timerId;
static volatile bool stopTimer = false;
void *taosProcessAlarmSignal(void *tharg) {
// Block the signal
sigset_t sigset;
@ -156,7 +160,6 @@ void *taosProcessAlarmSignal(void *tharg) {
sigprocmask(SIG_BLOCK, &sigset, NULL);
void (*callback)(int) = tharg;
static timer_t timerId;
struct sigevent sevent = {{0}};
#ifdef _ALPINE
@ -187,7 +190,7 @@ void *taosProcessAlarmSignal(void *tharg) {
}
int signo;
while (1) {
while (!stopTimer) {
if (sigwait(&sigset, &signo)) {
uError("Failed to wait signal: number %d", signo);
continue;
@ -202,7 +205,6 @@ void *taosProcessAlarmSignal(void *tharg) {
return NULL;
}
static pthread_t timerThread;
int taosInitTimer(void (*callback)(int), int ms) {
pthread_attr_t tattr;
@ -217,7 +219,7 @@ int taosInitTimer(void (*callback)(int), int ms) {
}
void taosUninitTimer() {
pthread_cancel(timerThread);
stopTimer = true;
pthread_join(timerThread, NULL);
}

View File

@ -3,3 +3,4 @@ PROJECT(TDengine)
ADD_SUBDIRECTORY(monitor)
ADD_SUBDIRECTORY(http)
ADD_SUBDIRECTORY(mqtt)

View File

@ -267,8 +267,10 @@ static void httpStopThread(HttpThread* pThread) {
struct epoll_event event = { .events = EPOLLIN };
eventfd_t fd = eventfd(1, 0);
if (fd == -1) {
httpError("%s, failed to create eventfd, will call pthread_cancel instead, which may result in data corruption: %s", pThread->label, strerror(errno));
pthread_cancel(pThread->thread);
} else if (epoll_ctl(pThread->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) {
httpError("%s, failed to call epoll_ctl, will call pthread_cancel instead, which may result in data corruption: %s", pThread->label, strerror(errno));
pthread_cancel(pThread->thread);
}

View File

@ -0,0 +1,22 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/util/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/common/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
INCLUDE_DIRECTORIES(${TD_OS_DIR}/inc)
INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY(mqtt ${SRC})
TARGET_LINK_LIBRARIES(mqtt taos_static z)
IF (TD_ADMIN)
TARGET_LINK_LIBRARIES(mqtt admin)
ENDIF ()
ENDIF ()

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_MQTT_LOG_H
#define TDENGINE_MQTT_LOG_H
#include "tlog.h"
extern int32_t mqttDebugFlag;
#define mqttError(...) \
if (mqttDebugFlag & DEBUG_ERROR) { \
taosPrintLog("ERROR MQT ", 255, __VA_ARGS__); \
}
#define mqttWarn(...) \
if ( mqttDebugFlag & DEBUG_WARN) { \
taosPrintLog("WARN MQT ", mqttDebugFlag, __VA_ARGS__); \
}
#define mqttTrace(...) \
if ( mqttDebugFlag & DEBUG_TRACE) { \
taosPrintLog("MQT ", mqttDebugFlag, __VA_ARGS__); \
}
#define mqttDump(...) \
if ( mqttDebugFlag & DEBUG_TRACE) { \
taosPrintLongString("MQT ", mqttDebugFlag, __VA_ARGS__); \
}
#define mqttPrint(...) \
{ taosPrintLog("MQT ", 255, __VA_ARGS__); }
#endif

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_MQTT_SYSTEM_H
#define TDENGINE_MQTT_SYSTEM_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
int32_t mqttGetReqCount();
int32_t mqttInitSystem();
int32_t mqttStartSystem();
void mqttStopSystem();
void mqttCleanUpSystem();
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "mqttSystem.h"
#include "mqtt.h"
#include "mqttLog.h"
#include "os.h"
#include "taos.h"
#include "tglobal.h"
#include "tsocket.h"
#include "ttimer.h"
int32_t mqttGetReqCount() { return 0; }
int mqttInitSystem() {
mqttPrint("mqttInitSystem");
return 0;
}
int mqttStartSystem() {
mqttPrint("mqttStartSystem");
return 0;
}
void mqttStopSystem() {
mqttPrint("mqttStopSystem");
}
void mqttCleanUpSystem() {
mqttPrint("mqttCleanUpSystem");
}

View File

@ -6041,6 +6041,8 @@ int32_t qCreateQueryInfo(void *tsdb, int32_t vgId, SQueryTableMsg *pQueryMsg, qi
isSTableQuery = TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
STableIdInfo *id = taosArrayGet(pTableIdList, 0);
qTrace("qmsg:%p query table, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid);
if ((code = tsdbGetOneTableGroup(tsdb, id->uid, &groupInfo)) != TSDB_CODE_SUCCESS) {
goto _over;
}

View File

@ -867,9 +867,8 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
// underlying UDP layer does not know it is server or client
pRecv->connType = pRecv->connType | pRpc->connType;
if (pRecv->ip==0 && pConn) {
rpcProcessBrokenLink(pConn);
rpcFreeMsg(pRecv->msg);
if (pRecv->ip == 0 && pConn) {
rpcProcessBrokenLink(pConn);
return NULL;
}

View File

@ -147,8 +147,10 @@ static void taosStopTcpThread(SThreadObj* pThreadObj) {
struct epoll_event event = { .events = EPOLLIN };
eventfd_t fd = eventfd(1, 0);
if (fd == -1) {
tError("%s, failed to create eventfd, will call pthread_cancel instead, which may result in data corruption: %s", pThreadObj->label, strerror(errno));
pthread_cancel(pThreadObj->thread);
} else if (epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) {
tError("%s, failed to call epoll_ctl, will call pthread_cancel instead, which may result in data corruption: %s", pThreadObj->label, strerror(errno));
pthread_cancel(pThreadObj->thread);
}
@ -213,7 +215,6 @@ static void* taosAcceptTcpConnection(void *arg) {
continue;
}
tTrace("%s TCP connection from ip:%s:%hu", pServerObj->label, inet_ntoa(caddr.sin_addr), caddr.sin_port);
taosKeepTcpAlive(connFd);
// pick up the thread to handle this connection
@ -227,7 +228,8 @@ static void* taosAcceptTcpConnection(void *arg) {
inet_ntoa(caddr.sin_addr), pFdObj->port, pFdObj, pThreadObj->numOfFds);
} else {
close(connFd);
tError("%s failed to malloc FdObj(%s)", pServerObj->label, strerror(errno));
tError("%s failed to malloc FdObj(%s) for connection from:%s:%hu", pServerObj->label, strerror(errno),
inet_ntoa(caddr.sin_addr), caddr.sin_port);
}
// pick up next thread for next connection
@ -339,7 +341,9 @@ static void taosReportBrokenLink(SFdObj *pFdObj) {
recvInfo.chandle = NULL;
recvInfo.connType = RPC_CONN_TCP;
(*(pThreadObj->processData))(&recvInfo);
}
} else {
taosFreeFdObj(pFdObj);
}
}
#define maxEvents 10
@ -350,7 +354,7 @@ static void *taosProcessTcpData(void *param) {
struct epoll_event events[maxEvents];
SRecvInfo recvInfo;
SRpcHead rpcHead;
while (1) {
int fdNum = epoll_wait(pThreadObj->pollFd, events, maxEvents, -1);
if (pThreadObj->stop) {
@ -464,7 +468,7 @@ static void taosFreeFdObj(SFdObj *pFdObj) {
pFdObj->signature = NULL;
epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_DEL, pFdObj->fd, NULL);
close(pFdObj->fd);
taosCloseSocket(pFdObj->fd);
pThreadObj->numOfFds--;

View File

@ -127,6 +127,8 @@ int main(int argc, char *argv[]) {
SRpcInit rpcInit;
char dataName[20] = "server.data";
taosBlockSIGPIPE();
memset(&rpcInit, 0, sizeof(rpcInit));
rpcInit.localPort = 7000;
rpcInit.label = "SER";

View File

@ -495,11 +495,12 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo);
int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError);
// --------- For read operations
int tsdbLoadCompIdx(SRWHelper *pHelper, void *target);
int tsdbLoadCompInfo(SRWHelper *pHelper, void *target);
int tsdbLoadCompData(SRWHelper *pHelper, SCompBlock *pCompBlock, void *target);
int tsdbLoadBlockDataCols(SRWHelper *pHelper, SDataCols *pDataCols, int blkIdx, int16_t *colIds, int numOfColIds);
int tsdbLoadBlockData(SRWHelper *pHelper, SCompBlock *pCompBlock, SDataCols *target);
int tsdbLoadCompIdx(SRWHelper *pHelper, void *target);
int tsdbLoadCompInfo(SRWHelper *pHelper, void *target);
int tsdbLoadCompData(SRWHelper *pHelper, SCompBlock *pCompBlock, void *target);
int tsdbLoadBlockDataCols(SRWHelper *pHelper, SDataCols *pDataCols, int blkIdx, int16_t *colIds, int numOfColIds);
int tsdbLoadBlockData(SRWHelper *pHelper, SCompBlock *pCompBlock, SDataCols *target);
void tsdbGetDataStatis(SRWHelper *pHelper, SDataStatis *pStatis, int numOfCols);
// --------- For write operations
int tsdbWriteDataBlock(SRWHelper *pHelper, SDataCols *pDataCols);

View File

@ -289,6 +289,13 @@ int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg) {
if (tsdbCheckTableCfg(pCfg) < 0) return -1;
STable *pTable = tsdbGetTableByUid(pMeta, pCfg->tableId.uid);
if (pTable != NULL) {
tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, pRepo->config.tsdbId, varDataVal(pTable->name),
pTable->tableId.tid, pTable->tableId.uid);
return TSDB_CODE_TABLE_ALREADY_EXIST;
}
STable *super = NULL;
int newSuper = 0;

View File

@ -543,6 +543,34 @@ int tsdbLoadCompData(SRWHelper *pHelper, SCompBlock *pCompBlock, void *target) {
return 0;
}
void tsdbGetDataStatis(SRWHelper *pHelper, SDataStatis *pStatis, int numOfCols) {
SCompData *pCompData = pHelper->pCompData;
for (int i = 0, j = 0; i < numOfCols;) {
if (j >= pCompData->numOfCols) {
pStatis[i].numOfNull = -1;
i++;
continue;
}
if (pStatis[i].colId == pCompData->cols[j].colId) {
pStatis[i].sum = pCompData->cols[j].sum;
pStatis[i].max = pCompData->cols[j].max;
pStatis[i].min = pCompData->cols[j].min;
pStatis[i].maxIndex = pCompData->cols[j].maxIndex;
pStatis[i].minIndex = pCompData->cols[j].minIndex;
pStatis[i].numOfNull = pCompData->cols[j].numOfNull;
i++;
j++;
} else if (pStatis[i].colId < pCompData->cols[j].colId) {
pStatis[i].numOfNull = -1;
i++;
} else {
j++;
}
}
}
static int comparColIdCompCol(const void *arg1, const void *arg2) {
return (*(int16_t *)arg1) - ((SCompCol *)arg2)->colId;
}
@ -748,7 +776,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
pCompCol->colId = pDataCol->colId;
pCompCol->type = pDataCol->type;
if (tDataTypeDesc[pDataCol->type].getStatisFunc) {
if (tDataTypeDesc[pDataCol->type].getStatisFunc && ncol != 0) {
(*tDataTypeDesc[pDataCol->type].getStatisFunc)(
(TSKEY *)(pDataCols->cols[0].pData), pDataCol->pData, rowsToWrite, &(pCompCol->min), &(pCompCol->max),
&(pCompCol->sum), &(pCompCol->minIndex), &(pCompCol->maxIndex), &(pCompCol->numOfNull));

View File

@ -26,7 +26,7 @@
#include "tsdbMain.h"
#define EXTRA_BYTES 2
#define ASCENDING_ORDER_TRAVERSE(o) (o == TSDB_ORDER_ASC)
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
#define QH_GET_NUM_OF_COLS(handle) ((size_t)(taosArrayGetSize((handle)->pColumns)))
enum {
@ -80,7 +80,7 @@ typedef struct STableCheckInfo {
SSkipListIterator* iter; // skip list iterator
SSkipListIterator* iiter; // imem iterator
bool hasObtainBuf; // if we should initialize the in-memory skip list iterator
bool initBuf; // if we should initialize the in-memory skip list iterator
} STableCheckInfo;
typedef struct {
@ -188,7 +188,7 @@ TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STable
* For ascending timestamp order query, query starts from data files. In contrast, buffer will be checked in the first place
* in case of descending timestamp order query.
*/
pQueryHandle->checkFiles = true;//ASCENDING_ORDER_TRAVERSE(pQueryHandle->order);
pQueryHandle->checkFiles = true;//ASCENDING_TRAVERSE(pQueryHandle->order);
pQueryHandle->activeIndex = 0;
// allocate buffer in order to load data blocks from file
@ -234,11 +234,11 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
STable* pTable = pCheckInfo->pTableObj;
assert(pTable != NULL);
if (pCheckInfo->hasObtainBuf) {
if (pCheckInfo->initBuf) {
return true;
}
pCheckInfo->hasObtainBuf = true;
pCheckInfo->initBuf = true;
int32_t order = pHandle->order;
// no data in buffer, abort
@ -335,8 +335,8 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qinfo);
// all data in mem are checked already.
if ((pCheckInfo->lastKey > pHandle->window.ekey && ASCENDING_ORDER_TRAVERSE(pHandle->order)) ||
(pCheckInfo->lastKey < pHandle->window.ekey && !ASCENDING_ORDER_TRAVERSE(pHandle->order))) {
if ((pCheckInfo->lastKey > pHandle->window.ekey && ASCENDING_TRAVERSE(pHandle->order)) ||
(pCheckInfo->lastKey < pHandle->window.ekey && !ASCENDING_TRAVERSE(pHandle->order))) {
return false;
}
@ -394,9 +394,10 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
SCompIdx* compIndex = &pQueryHandle->rhelper.pCompIdx[pCheckInfo->tableId.tid];
if (compIndex->len == 0 || compIndex->numOfBlocks == 0) { // no data block in this file, try next file
if (compIndex->len == 0 || compIndex->numOfBlocks == 0 ||
compIndex->uid != pCheckInfo->tableId.uid) { // no data block in this file, try next file
pCheckInfo->numOfBlocks = 0;
continue;//no data blocks in the file belongs to pCheckInfo->pTable
continue; // no data blocks in the file belongs to pCheckInfo->pTable
} else {
if (pCheckInfo->compSize < compIndex->len) {
assert(compIndex->len > 0);
@ -482,9 +483,11 @@ static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS
return pLocalIdList;
}
static void mergeDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock,
static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock,
SArray* sa);
static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
STsdbQueryHandle* pQueryHandle);
static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) {
STsdbRepo *pRepo = pQueryHandle->pTsdb;
@ -518,11 +521,95 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
return blockLoaded;
}
static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo){
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
SQueryFilePos* cur = &pQueryHandle->cur;
SDataBlockInfo binfo = getTrueDataBlockInfo(pCheckInfo, pBlock);
/*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo);
TSKEY k1 = TSKEY_INITIAL_VAL, k2 = TSKEY_INITIAL_VAL;
if (pCheckInfo->iter != NULL && tSkipListIterGet(pCheckInfo->iter) != NULL) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
SDataRow row = SL_GET_NODE_DATA(node);
k1 = dataRowKey(row);
if (k1 == binfo.window.skey) {
if (tSkipListIterNext(pCheckInfo->iter)) {
node = tSkipListIterGet(pCheckInfo->iter);
row = SL_GET_NODE_DATA(node);
k1 = dataRowKey(row);
} else {
k1 = TSKEY_INITIAL_VAL;
}
}
}
if (pCheckInfo->iiter != NULL && tSkipListIterGet(pCheckInfo->iiter) != NULL) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter);
SDataRow row = SL_GET_NODE_DATA(node);
k2 = dataRowKey(row);
if (k2 == binfo.window.skey) {
if (tSkipListIterNext(pCheckInfo->iiter)) {
node = tSkipListIterGet(pCheckInfo->iiter);
row = SL_GET_NODE_DATA(node);
k2 = dataRowKey(row);
} else {
k2 = TSKEY_INITIAL_VAL;
}
}
}
cur->pos = ASCENDING_TRAVERSE(pQueryHandle->order)? 0:(binfo.rows-1);
if ((ASCENDING_TRAVERSE(pQueryHandle->order) &&
((k1 != TSKEY_INITIAL_VAL && k1 <= binfo.window.ekey) || (k2 != TSKEY_INITIAL_VAL && k2 <= binfo.window.ekey))) ||
(!ASCENDING_TRAVERSE(pQueryHandle->order) &&
((k1 != TSKEY_INITIAL_VAL && k1 >= binfo.window.skey) || (k2 != TSKEY_INITIAL_VAL && k2 >= binfo.window.skey)))) {
if ((ASCENDING_TRAVERSE(pQueryHandle->order) &&
((k1 != TSKEY_INITIAL_VAL && k1 < binfo.window.skey) || (k2 != TSKEY_INITIAL_VAL && k2 < binfo.window.skey))) ||
(!ASCENDING_TRAVERSE(pQueryHandle->order) &&
(((k1 != TSKEY_INITIAL_VAL && k1 > binfo.window.skey) || (k2 != TSKEY_INITIAL_VAL && k2 > binfo.window.skey))))) {
// do not load file block into buffer
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order) ? 1 : -1;
cur->rows = tsdbReadRowsFromCache(pCheckInfo->iter, pCheckInfo->pTableObj, binfo.window.skey - step,
pQueryHandle->outputCapacity, &cur->win.skey, &cur->win.ekey, pQueryHandle);
pQueryHandle->realNumOfRows = cur->rows;
// update the last key value
pCheckInfo->lastKey = cur->win.ekey + step;
if (!ASCENDING_TRAVERSE(pQueryHandle->order)) {
SWAP(cur->win.skey, cur->win.ekey, TSKEY);
}
cur->mixBlock = true;
cur->blockCompleted = false;
return;
}
doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo);
doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, sa);
} else {
pQueryHandle->realNumOfRows = binfo.rows;
cur->rows = binfo.rows;
cur->win = binfo.window;
cur->mixBlock = false;
cur->blockCompleted = true;
cur->lastKey = binfo.window.ekey + (ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1);
}
}
static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) {
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
SQueryFilePos* cur = &pQueryHandle->cur;
if (ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) {
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
// query ended in current block
if (pQueryHandle->window.ekey < pBlock->keyLast || pCheckInfo->lastKey > pBlock->keyFirst) {
if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo)) {
@ -539,62 +626,11 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
cur->pos = 0;
}
mergeDataInDataBlock(pQueryHandle, pCheckInfo, pBlock, sa);
doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, sa);
} else { // the whole block is loaded in to buffer
SDataBlockInfo binfo = getTrueDataBlockInfo(pCheckInfo, pBlock);
/*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo);
TSKEY k1 = TSKEY_INITIAL_VAL, k2 = TSKEY_INITIAL_VAL;
if (pCheckInfo->iter != NULL && tSkipListIterGet(pCheckInfo->iter) != NULL) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
SDataRow row = SL_GET_NODE_DATA(node);
k1 = dataRowKey(row);
if (k1 == binfo.window.skey) {
if (tSkipListIterNext(pCheckInfo->iter)) {
node = tSkipListIterGet(pCheckInfo->iter);
row = SL_GET_NODE_DATA(node);
k1 = dataRowKey(row);
} else {
k1 = TSKEY_INITIAL_VAL;
}
}
}
if (pCheckInfo->iiter != NULL && tSkipListIterGet(pCheckInfo->iiter) != NULL) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter);
SDataRow row = SL_GET_NODE_DATA(node);
k2 = dataRowKey(row);
if (k2 == binfo.window.skey) {
if (tSkipListIterNext(pCheckInfo->iiter)) {
node = tSkipListIterGet(pCheckInfo->iiter);
row = SL_GET_NODE_DATA(node);
k2 = dataRowKey(row);
} else {
k2 = TSKEY_INITIAL_VAL;
}
}
}
cur->pos = 0;
if ((k1 != TSKEY_INITIAL_VAL && k1 < binfo.window.ekey) || (k2 != TSKEY_INITIAL_VAL && k2 < binfo.window.ekey)) {
doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo);
mergeDataInDataBlock(pQueryHandle, pCheckInfo, pBlock, sa);
} else {
pQueryHandle->realNumOfRows = binfo.rows;
cur->rows = binfo.rows;
cur->win = binfo.window;
cur->mixBlock = false;
cur->blockCompleted = true;
cur->lastKey = binfo.window.ekey + (ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)? 1:-1);
}
handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo);
}
} else { //desc order
// query ended in current block
} else { //desc order, query ended in current block
if (pQueryHandle->window.ekey > pBlock->keyFirst) {
if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo)) {
return false;
@ -608,55 +644,11 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
cur->pos = pBlock->numOfPoints - 1;
}
mergeDataInDataBlock(pQueryHandle, pCheckInfo, pBlock, sa);
doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, sa);
} else {
SDataBlockInfo binfo = getTrueDataBlockInfo(pCheckInfo, pBlock);
/*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo);
TSKEY k1 = TSKEY_INITIAL_VAL, k2 = TSKEY_INITIAL_VAL;
if (pCheckInfo->iter != NULL) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
SDataRow row = SL_GET_NODE_DATA(node);
k1 = dataRowKey(row);
if (k1 == binfo.window.skey) {
if (tSkipListIterNext(pCheckInfo->iter)) {
node = tSkipListIterGet(pCheckInfo->iter);
row = SL_GET_NODE_DATA(node);
k1 = dataRowKey(row);
} else {
k1 = TSKEY_INITIAL_VAL;
}
}
}
if (pCheckInfo->iiter != NULL) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter);
SDataRow row = SL_GET_NODE_DATA(node);
k2 = dataRowKey(row);
if (k2 == binfo.window.skey) {
if (tSkipListIterNext(pCheckInfo->iiter)) {
node = tSkipListIterGet(pCheckInfo->iiter);
row = SL_GET_NODE_DATA(node);
k2 = dataRowKey(row);
} else {
k2 = TSKEY_INITIAL_VAL;
}
}
}
cur->pos = binfo.rows - 1;
if ((k1 != TSKEY_INITIAL_VAL && k1 > binfo.window.ekey) || (k2 != TSKEY_INITIAL_VAL && k2 > binfo.window.ekey)) {
doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo);
mergeDataInDataBlock(pQueryHandle, pCheckInfo, pBlock, sa);
} else {
pQueryHandle->realNumOfRows = binfo.rows;
}
}
// pQueryHandle->realNumOfRows = pBlock->numOfPoints;
// cur->pos = pBlock->numOfPoints - 1;
handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo);
}
}
taosArrayDestroy(sa);
return pQueryHandle->realNumOfRows > 0;
@ -726,20 +718,20 @@ static int vnodeBinarySearchKey(char* pValue, int num, TSKEY key, int order) {
static int32_t copyDataFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end) {
char* pData = NULL;
int32_t step = ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)? 1 : -1;
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1 : -1;
SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0];
TSKEY* tsArray = pCols->cols[0].pData;
int32_t num = end - start + 1;
int32_t reqiredNumOfCols = taosArrayGetSize(pQueryHandle->pColumns);
int32_t requiredNumOfCols = taosArrayGetSize(pQueryHandle->pColumns);
//data in buffer has greater timestamp, copy data in file block
for (int32_t i = 0; i < reqiredNumOfCols; ++i) {
for (int32_t i = 0; i < requiredNumOfCols; ++i) {
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
int32_t bytes = pColInfo->info.bytes;
if (ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) {
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
pData = pColInfo->pData + numOfRows * pColInfo->info.bytes;
} else {
pData = pColInfo->pData + (capacity - numOfRows - num) * pColInfo->info.bytes;
@ -783,7 +775,7 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, STableCheckInfo* p
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
if (ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) {
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
pData = pColInfo->pData + numOfRows * pColInfo->info.bytes;
} else {
pData = pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes;
@ -810,7 +802,7 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, STableCheckInfo* p
// only return the qualified data to client in terms of query time window, data rows in the same block but do not
// be included in the query time window will be discarded
static void mergeDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock,
static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock,
SArray* sa) {
SQueryFilePos* cur = &pQueryHandle->cur;
SDataBlockInfo blockInfo = getTrueDataBlockInfo(pCheckInfo, pBlock);
@ -819,10 +811,10 @@ static void mergeDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo
SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0];
int32_t endPos = cur->pos;
if (ASCENDING_ORDER_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey > blockInfo.window.ekey) {
if (ASCENDING_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey > blockInfo.window.ekey) {
endPos = blockInfo.rows - 1;
cur->mixBlock = (cur->pos != 0);
} else if (!ASCENDING_ORDER_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey < blockInfo.window.skey) {
} else if (!ASCENDING_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey < blockInfo.window.skey) {
endPos = 0;
cur->mixBlock = (cur->pos != blockInfo.rows - 1);
} else {
@ -839,13 +831,13 @@ static void mergeDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo
int32_t numOfRows = 0;
pQueryHandle->cur.win = TSWINDOW_INITIALIZER;
int32_t step = ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)? 1:-1;
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1;
// no data in buffer, load data from file directly
if (pCheckInfo->iiter == NULL && pCheckInfo->iter == NULL) {
int32_t start = cur->pos;
int32_t end = endPos;
if (!ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) {
if (!ASCENDING_TRAVERSE(pQueryHandle->order)) {
end = cur->pos;
start = endPos;
}
@ -857,7 +849,7 @@ static void mergeDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo
numOfRows = copyDataFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, start, end);
// if the buffer is not full in case of descending order query, move the data in the front of the buffer
if (!ASCENDING_ORDER_TRAVERSE(pQueryHandle->order) && numOfRows < pQueryHandle->outputCapacity) {
if (!ASCENDING_TRAVERSE(pQueryHandle->order) && numOfRows < pQueryHandle->outputCapacity) {
int32_t emptySize = pQueryHandle->outputCapacity - numOfRows;
int32_t reqNumOfCols = taosArrayGetSize(pQueryHandle->pColumns);
@ -868,8 +860,8 @@ static void mergeDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo
}
pos += (end - start + 1) * step;
cur->blockCompleted = (((pos >= endPos || cur->lastKey > pQueryHandle->window.ekey) && ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) ||
((pos <= endPos || cur->lastKey < pQueryHandle->window.ekey) && !ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)));
cur->blockCompleted = (((pos >= endPos || cur->lastKey > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
((pos <= endPos || cur->lastKey < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order)));
pCheckInfo->lastKey = cur->lastKey;
pQueryHandle->realNumOfRows = numOfRows;
@ -889,18 +881,18 @@ static void mergeDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo
SDataRow row = SL_GET_NODE_DATA(node);
TSKEY key = dataRowKey(row);
if ((key > pQueryHandle->window.ekey && ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) ||
(key < pQueryHandle->window.ekey && !ASCENDING_ORDER_TRAVERSE(pQueryHandle->order))) {
if ((key > pQueryHandle->window.ekey && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key < pQueryHandle->window.ekey && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
break;
}
if (((tsArray[pos] > pQueryHandle->window.ekey || pos > endPos) && ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) ||
((tsArray[pos] < pQueryHandle->window.ekey || pos < endPos) && !ASCENDING_ORDER_TRAVERSE(pQueryHandle->order))) {
if (((tsArray[pos] > pQueryHandle->window.ekey || pos > endPos) && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
((tsArray[pos] < pQueryHandle->window.ekey || pos < endPos) && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
break;
}
if ((key < tsArray[pos] && ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) ||
(key > tsArray[pos] && !ASCENDING_ORDER_TRAVERSE(pQueryHandle->order))) {
if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
copyOneRowFromMem(pQueryHandle, pCheckInfo, pQueryHandle->outputCapacity, numOfRows, row, pSchema);
numOfRows += 1;
if (cur->win.skey == TSKEY_INITIAL_VAL) {
@ -914,20 +906,20 @@ static void mergeDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo
tSkipListIterNext(pCheckInfo->iter);
} else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it
tSkipListIterNext(pCheckInfo->iter);
} else if ((key > tsArray[pos] && ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) ||
(key < tsArray[pos] && !ASCENDING_ORDER_TRAVERSE(pQueryHandle->order))) {
} else if ((key > tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key < tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = tsArray[pos];
}
int32_t order = (pQueryHandle->order == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
int32_t order = ASCENDING_TRAVERSE(pQueryHandle->order) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
int32_t end = vnodeBinarySearchKey(pCols->cols[0].pData, pCols->numOfPoints, key, order);
if (tsArray[end] == key) { // the value of key in cache equals to the end timestamp value, ignore it
tSkipListIterNext(pCheckInfo->iter);
}
int32_t start = -1;
if (ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) {
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
int32_t remain = end - pos + 1;
if (remain + numOfRows > pQueryHandle->outputCapacity) {
end = (pQueryHandle->outputCapacity - numOfRows) + pos - 1;
@ -950,9 +942,13 @@ static void mergeDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo
} while (numOfRows < pQueryHandle->outputCapacity);
if (numOfRows < pQueryHandle->outputCapacity) {
/**
* if cache is empty, load remain file block data. In contrast, if there are remain data in cache, do NOT
* copy them all to result buffer, since it may be overlapped with file data block.
*/
if (node == NULL ||
((dataRowKey(SL_GET_NODE_DATA(node)) > pQueryHandle->window.ekey) && ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) ||
((dataRowKey(SL_GET_NODE_DATA(node)) < pQueryHandle->window.ekey) && !ASCENDING_ORDER_TRAVERSE(pQueryHandle->order))) {
((dataRowKey(SL_GET_NODE_DATA(node)) > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
((dataRowKey(SL_GET_NODE_DATA(node)) < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
// no data in cache or data in cache is greater than the ekey of time window, load data from file block
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = tsArray[pos];
@ -962,7 +958,7 @@ static void mergeDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo
int32_t end = -1;
// all remain data are qualified, but check the remain capacity in the first place.
if (ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) {
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
int32_t remain = endPos - pos + 1;
if (remain + numOfRows > pQueryHandle->outputCapacity) {
endPos = (pQueryHandle->outputCapacity - numOfRows) + pos - 1;
@ -982,44 +978,22 @@ static void mergeDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo
numOfRows = copyDataFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, start, end);
pos += (end - start + 1) * step;
} else {
while(numOfRows < pQueryHandle->outputCapacity && node != NULL &&
(((dataRowKey(SL_GET_NODE_DATA(node)) <= pQueryHandle->window.ekey) && ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) ||
((dataRowKey(SL_GET_NODE_DATA(node)) >= pQueryHandle->window.ekey) && !ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)))) {
SDataRow row = SL_GET_NODE_DATA(node);
TSKEY key = dataRowKey(row);
copyOneRowFromMem(pQueryHandle, pCheckInfo, pQueryHandle->outputCapacity, numOfRows, row, pSchema);
numOfRows += 1;
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
}
cur->win.ekey = key;
cur->lastKey = key + step;
cur->mixBlock = true;
tSkipListIterNext(pCheckInfo->iter);
node = tSkipListIterGet(pCheckInfo->iter);
}
}
}
}
cur->blockCompleted = (((pos >= endPos || cur->lastKey > pQueryHandle->window.ekey) && ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) ||
((pos <= endPos || cur->lastKey < pQueryHandle->window.ekey) && !ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)));
cur->blockCompleted = (((pos >= endPos || cur->lastKey > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
((pos <= endPos || cur->lastKey < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order)));
if (!ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) {
if (!ASCENDING_TRAVERSE(pQueryHandle->order)) {
SWAP(cur->win.skey, cur->win.ekey, TSKEY);
// if the buffer is not full in case of descending order query, move the data in the front of the buffer
if (numOfRows < pQueryHandle->outputCapacity) {
int32_t emptySize = pQueryHandle->outputCapacity - numOfRows;
int32_t reqiredNumOfCols = taosArrayGetSize(pQueryHandle->pColumns);
for(int32_t i = 0; i < reqiredNumOfCols; ++i) {
int32_t requiredNumOfCols = taosArrayGetSize(pQueryHandle->pColumns);
for(int32_t i = 0; i < requiredNumOfCols; ++i) {
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
memmove(pColInfo->pData, pColInfo->pData + emptySize * pColInfo->info.bytes, numOfRows * pColInfo->info.bytes);
}
@ -1245,7 +1219,7 @@ static bool getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle) {
int32_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
while ((pQueryHandle->pFileGroup = tsdbGetFileGroupNext(&pQueryHandle->fileIter)) != NULL) {
int32_t type = ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)? QUERY_RANGE_GREATER_EQUAL:QUERY_RANGE_LESS_EQUAL;
int32_t type = ASCENDING_TRAVERSE(pQueryHandle->order)? QUERY_RANGE_GREATER_EQUAL:QUERY_RANGE_LESS_EQUAL;
if (getFileCompInfo(pQueryHandle, &numOfBlocks, type) != TSDB_CODE_SUCCESS) {
break;
}
@ -1277,7 +1251,7 @@ static bool getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle) {
return false;
}
cur->slot = ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)? 0:pQueryHandle->numOfBlocks-1;
cur->slot = ASCENDING_TRAVERSE(pQueryHandle->order)? 0:pQueryHandle->numOfBlocks-1;
cur->fid = pQueryHandle->pFileGroup->fileId;
STableBlockInfo* pBlockInfo = &pQueryHandle->pDataBlockInfo[cur->slot];
@ -1304,12 +1278,13 @@ static bool getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle) {
// current block is done, try next
if (!cur->mixBlock || cur->blockCompleted) {
if ((cur->slot == pQueryHandle->numOfBlocks - 1 && ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) ||
(cur->slot == 0 && !ASCENDING_ORDER_TRAVERSE(pQueryHandle->order))) {
if ((cur->slot == pQueryHandle->numOfBlocks - 1 && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(cur->slot == 0 && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
// all data blocks in current file has been checked already, try next file if exists
return getDataBlocksInFilesImpl(pQueryHandle);
} else { // next block of the same file
int32_t step = ASCENDING_ORDER_TRAVERSE(pQueryHandle->order) ? 1 : -1;
} else {
// next block of the same file
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order) ? 1 : -1;
cur->slot += step;
cur->mixBlock = false;
@ -1319,9 +1294,8 @@ static bool getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle) {
return loadFileDataBlock(pQueryHandle, pNext->pBlock.compBlock, pNext->pTableCheckInfo);
}
} else {
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
mergeDataInDataBlock(pQueryHandle, pCheckInfo, pBlockInfo->pBlock.compBlock, sa);
return true;
handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->pBlock.compBlock, pCheckInfo);
return pQueryHandle->realNumOfRows > 0;
}
}
}
@ -1364,7 +1338,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pqHandle) {
void changeQueryHandleForLastrowQuery(TsdbQueryHandleT pqHandle) {
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pqHandle;
assert(!ASCENDING_ORDER_TRAVERSE(pQueryHandle->order));
assert(!ASCENDING_TRAVERSE(pQueryHandle->order));
// starts from the buffer in case of descending timestamp order check data blocks
@ -1432,8 +1406,8 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
SDataRow row = SL_GET_NODE_DATA(node);
TSKEY key = dataRowKey(row);
if ((key > maxKey && ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) ||
(key < maxKey && !ASCENDING_ORDER_TRAVERSE(pQueryHandle->order))) {
if ((key > maxKey && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key < maxKey && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
uTrace("%p key:%"PRIu64" beyond qrange:%"PRId64" - %"PRId64", no more data in buffer", pQueryHandle, key, pQueryHandle->window.skey,
pQueryHandle->window.ekey);
@ -1456,7 +1430,7 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
if (ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) {
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
pData = pColInfo->pData + numOfRows * pColInfo->info.bytes;
} else {
pData = pColInfo->pData + (maxRowsToRead - numOfRows - 1) * pColInfo->info.bytes;
@ -1489,7 +1463,7 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
assert(numOfRows <= maxRowsToRead);
// if the buffer is not full in case of descending order query, move the data in the front of the buffer
if (!ASCENDING_ORDER_TRAVERSE(pQueryHandle->order) && numOfRows < maxRowsToRead) {
if (!ASCENDING_TRAVERSE(pQueryHandle->order) && numOfRows < maxRowsToRead) {
int32_t emptySize = maxRowsToRead - numOfRows;
for(int32_t i = 0; i < numOfCols; ++i) {
@ -1505,17 +1479,14 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle) {
STsdbQueryHandle* pHandle = (STsdbQueryHandle*)pQueryHandle;
STable* pTable = NULL;
int32_t rows = 0;
int32_t step = ASCENDING_ORDER_TRAVERSE(pHandle->order)? 1:-1;
int32_t step = ASCENDING_TRAVERSE(pHandle->order)? 1:-1;
// there are data in file
if (pHandle->cur.fid >= 0) {
STableBlockInfo* pBlockInfo = &pHandle->pDataBlockInfo[pHandle->cur.slot];
STableCheckInfo* pCheckInfo = pBlockInfo->pTableCheckInfo;
pTable = pCheckInfo->pTableObj;
STable* pTable = pCheckInfo->pTableObj;
if (pHandle->cur.mixBlock) {
SDataBlockInfo blockInfo = {
@ -1527,33 +1498,31 @@ SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle) {
return blockInfo;
} else {
SDataBlockInfo binfo = getTrueDataBlockInfo(pCheckInfo, pBlockInfo->pBlock.compBlock);
return binfo;
return getTrueDataBlockInfo(pCheckInfo, pBlockInfo->pBlock.compBlock);
}
} else {
STableCheckInfo* pCheckInfo = taosArrayGet(pHandle->pTableCheckInfo, pHandle->activeIndex);
pTable = pCheckInfo->pTableObj;
if (pTable->mem != NULL) {
// create mem table iterator if it is not created yet
STable* pTable = pCheckInfo->pTableObj;
if (pTable->mem != NULL) { // create mem table iterator if it is not created yet
assert(pCheckInfo->iter != NULL);
STimeWindow* win = &pHandle->cur.win;
rows = tsdbReadRowsFromCache(pCheckInfo->iter, pCheckInfo->pTableObj, pHandle->window.ekey,
pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo->iter, pCheckInfo->pTableObj, pHandle->window.ekey,
pHandle->outputCapacity, &win->skey, &win->ekey, pHandle); // todo refactor API
// update the last key value
pCheckInfo->lastKey = win->ekey + step;
}
if (!ASCENDING_ORDER_TRAVERSE(pHandle->order)) {
if (!ASCENDING_TRAVERSE(pHandle->order)) {
SWAP(pHandle->cur.win.skey, pHandle->cur.win.ekey, TSKEY);
}
SDataBlockInfo blockInfo = {
.uid = pTable->tableId.uid,
.tid = pTable->tableId.tid,
.rows = rows,
.rows = pHandle->cur.rows,
.window = pHandle->cur.win,
};
@ -1600,7 +1569,7 @@ SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) {
int32_t numOfRows = copyDataFromFileBlock(pHandle, pHandle->outputCapacity, 0, 0, pBlock->numOfPoints - 1);
// if the buffer is not full in case of descending order query, move the data in the front of the buffer
if (!ASCENDING_ORDER_TRAVERSE(pHandle->order) && numOfRows < pHandle->outputCapacity) {
if (!ASCENDING_TRAVERSE(pHandle->order) && numOfRows < pHandle->outputCapacity) {
int32_t emptySize = pHandle->outputCapacity - numOfRows;
int32_t reqNumOfCols = taosArrayGetSize(pHandle->pColumns);

View File

@ -31,7 +31,6 @@ int taosOpenUdpSocket(uint32_t localIp, uint16_t localPort);
int taosOpenTcpClientSocket(uint32_t ip, uint16_t port, uint32_t localIp);
int taosOpenTcpServerSocket(uint32_t ip, uint16_t port);
int taosKeepTcpAlive(int sockFd);
void taosCloseTcpSocket(int sockFd);
int taosGetFqdn(char *);
uint32_t taosGetIpFromFqdn(const char *);

View File

@ -216,14 +216,14 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c
return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH;
}
static UNUSED_FUNC int32_t compareStrPatternComp(const void* pLeft, const void* pRight) {
static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) {
SPatternCompareInfo pInfo = {'%', '_'};
const char* pattern = pRight;
const char* str = pLeft;
int32_t ret = patternMatch(pattern, str, strlen(str), &pInfo);
char pattern[128] = {0};
memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
assert(varDataLen(pRight) < 128);
int32_t ret = patternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft), &pInfo);
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
}
@ -232,14 +232,14 @@ static int32_t compareFindStrInArray(const void* pLeft, const void* pRight) {
return taosArraySearchString(arr, pLeft) == NULL ? 0 : 1;
}
static UNUSED_FUNC int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
SPatternCompareInfo pInfo = {'%', '_'};
const wchar_t* pattern = pRight;
const wchar_t* str = pLeft;
int32_t ret = WCSPatternMatch(pattern, str, wcslen(str), &pInfo);
wchar_t pattern[128] = {0};
memcpy(pattern, varDataVal(pRight), varDataLen(pRight)/TSDB_NCHAR_SIZE);
assert(varDataLen(pRight) < 128);
int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo);
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
}

View File

@ -305,20 +305,11 @@ int taosOpenTcpClientSocket(uint32_t destIp, uint16_t destPort, uint32_t clientI
sockFd = -1;
}
taosKeepTcpAlive(sockFd);
return sockFd;
}
void taosCloseTcpSocket(int sockFd) {
struct linger linger;
linger.l_onoff = 1;
linger.l_linger = 0;
if (taosSetSockOpt(sockFd, SOL_SOCKET, SO_LINGER, (void *)&linger, sizeof(linger)) < 0) {
uError("setsockopt SO_LINGER failed: %d (%s)", errno, strerror(errno));
}
taosCloseSocket(sockFd);
}
int taosKeepTcpAlive(int sockFd) {
int alive = 1;
if (taosSetSockOpt(sockFd, SOL_SOCKET, SO_KEEPALIVE, (void *)&alive, sizeof(alive)) < 0) {
@ -355,6 +346,15 @@ int taosKeepTcpAlive(int sockFd) {
return -1;
}
struct linger linger = {0};
linger.l_onoff = 1;
//linger.l_linger = 0;
if (taosSetSockOpt(sockFd, SOL_SOCKET, SO_LINGER, (void *)&linger, sizeof(linger)) < 0) {
uError("setsockopt SO_LINGER failed: %d (%s)", errno, strerror(errno));
close(sockFd);
return -1;
}
return 0;
}

View File

@ -1,130 +1,132 @@
#!/bin/bash
python3 ./test.py $1 -f insert/basic.py
python3 ./test.py $1 -f insert/int.py
python3 ./test.py $1 -f insert/float.py
python3 ./test.py $1 -f insert/bigint.py
python3 ./test.py $1 -f insert/bool.py
python3 ./test.py $1 -f insert/double.py
python3 ./test.py $1 -f insert/smallint.py
python3 ./test.py $1 -f insert/tinyint.py
python3 ./test.py $1 -f insert/date.py
python3 ./test.py $1 -f insert/binary.py
python3 ./test.py $1 -f insert/nchar.py
python3 ./test.py $1 -f insert/nchar-boundary.py
python3 ./test.py $1 -f insert/nchar-unicode.py
python3 ./test.py $1 -f insert/multi.py
python3 ./test.py -f insert/basic.py
python3 ./test.py -f insert/int.py
python3 ./test.py -f insert/float.py
python3 ./test.py -f insert/bigint.py
python3 ./test.py -f insert/bool.py
python3 ./test.py -f insert/double.py
python3 ./test.py -f insert/smallint.py
python3 ./test.py -f insert/tinyint.py
python3 ./test.py -f insert/date.py
python3 ./test.py -f insert/binary.py
python3 ./test.py -f insert/nchar.py
python3 ./test.py -f insert/nchar-boundary.py
python3 ./test.py -f insert/nchar-unicode.py
python3 ./test.py -f insert/multi.py
python3 ./test.py -f insert/randomNullCommit.py
python3 ./test.py $1 -f table/column_name.py
python3 ./test.py $1 -f table/column_num.py
python3 ./test.py $1 -f table/db_table.py
python3 ./test.py $1 -f table/tablename-boundary.py
python3 ./test.py -f table/column_name.py
python3 ./test.py -f table/column_num.py
python3 ./test.py -f table/db_table.py
python3 ./test.py -f table/tablename-boundary.py
# tag
python3 ./test.py $1 -f tag_lite/filter.py
python3 ./test.py $1 -f tag_lite/create-tags-boundary.py
python3 ./test.py $1 -f tag_lite/3.py
python3 ./test.py $1 -f tag_lite/4.py
python3 ./test.py $1 -f tag_lite/5.py
python3 ./test.py $1 -f tag_lite/6.py
python3 ./test.py $1 -f tag_lite/add.py
python3 ./test.py $1 -f tag_lite/bigint.py
python3 ./test.py $1 -f tag_lite/binary_binary.py
python3 ./test.py $1 -f tag_lite/binary.py
python3 ./test.py $1 -f tag_lite/bool_binary.py
python3 ./test.py $1 -f tag_lite/bool_int.py
python3 ./test.py $1 -f tag_lite/bool.py
python3 ./test.py $1 -f tag_lite/change.py
python3 ./test.py $1 -f tag_lite/column.py
python3 ./test.py $1 -f tag_lite/commit.py
python3 ./test.py $1 -f tag_lite/create.py
python3 ./test.py $1 -f tag_lite/datatype.py
python3 ./test.py $1 -f tag_lite/datatype-without-alter.py
python3 ./test.py $1 -f tag_lite/delete.py
python3 ./test.py $1 -f tag_lite/double.py
python3 ./test.py $1 -f tag_lite/float.py
python3 ./test.py $1 -f tag_lite/int_binary.py
python3 ./test.py $1 -f tag_lite/int_float.py
python3 ./test.py $1 -f tag_lite/int.py
python3 ./test.py $1 -f tag_lite/set.py
python3 ./test.py $1 -f tag_lite/smallint.py
python3 ./test.py $1 -f tag_lite/tinyint.py
python3 ./test.py -f tag_lite/filter.py
python3 ./test.py -f tag_lite/create-tags-boundary.py
python3 ./test.py -f tag_lite/3.py
python3 ./test.py -f tag_lite/4.py
python3 ./test.py -f tag_lite/5.py
python3 ./test.py -f tag_lite/6.py
python3 ./test.py -f tag_lite/add.py
python3 ./test.py -f tag_lite/bigint.py
python3 ./test.py -f tag_lite/binary_binary.py
python3 ./test.py -f tag_lite/binary.py
python3 ./test.py -f tag_lite/bool_binary.py
python3 ./test.py -f tag_lite/bool_int.py
python3 ./test.py -f tag_lite/bool.py
python3 ./test.py -f tag_lite/change.py
python3 ./test.py -f tag_lite/column.py
python3 ./test.py -f tag_lite/commit.py
python3 ./test.py -f tag_lite/create.py
python3 ./test.py -f tag_lite/datatype.py
python3 ./test.py -f tag_lite/datatype-without-alter.py
python3 ./test.py -f tag_lite/delete.py
python3 ./test.py -f tag_lite/double.py
python3 ./test.py -f tag_lite/float.py
python3 ./test.py -f tag_lite/int_binary.py
python3 ./test.py -f tag_lite/int_float.py
python3 ./test.py -f tag_lite/int.py
python3 ./test.py -f tag_lite/set.py
python3 ./test.py -f tag_lite/smallint.py
python3 ./test.py -f tag_lite/tinyint.py
python3 ./test.py $1 -f dbmgmt/database-name-boundary.py
python3 ./test.py -f dbmgmt/database-name-boundary.py
python3 ./test.py $1 -f import_merge/importBlock1HO.py
python3 ./test.py $1 -f import_merge/importBlock1HPO.py
python3 ./test.py $1 -f import_merge/importBlock1H.py
python3 ./test.py $1 -f import_merge/importBlock1S.py
python3 ./test.py $1 -f import_merge/importBlock1Sub.py
python3 ./test.py $1 -f import_merge/importBlock1TO.py
python3 ./test.py $1 -f import_merge/importBlock1TPO.py
python3 ./test.py $1 -f import_merge/importBlock1T.py
python3 ./test.py $1 -f import_merge/importBlock2HO.py
python3 ./test.py $1 -f import_merge/importBlock2HPO.py
python3 ./test.py $1 -f import_merge/importBlock2H.py
python3 ./test.py $1 -f import_merge/importBlock2S.py
python3 ./test.py $1 -f import_merge/importBlock2Sub.py
python3 ./test.py $1 -f import_merge/importBlock2TO.py
python3 ./test.py $1 -f import_merge/importBlock2TPO.py
python3 ./test.py $1 -f import_merge/importBlock2T.py
python3 ./test.py $1 -f import_merge/importBlockbetween.py
python3 ./test.py $1 -f import_merge/importCacheFileHO.py
python3 ./test.py $1 -f import_merge/importCacheFileHPO.py
python3 ./test.py $1 -f import_merge/importCacheFileH.py
python3 ./test.py $1 -f import_merge/importCacheFileS.py
python3 ./test.py $1 -f import_merge/importCacheFileSub.py
python3 ./test.py $1 -f import_merge/importCacheFileTO.py
python3 ./test.py $1 -f import_merge/importCacheFileTPO.py
python3 ./test.py $1 -f import_merge/importCacheFileT.py
python3 ./test.py $1 -f import_merge/importDataH2.py
python3 ./test.py $1 -f import_merge/importDataHO2.py
python3 ./test.py $1 -f import_merge/importDataHO.py
python3 ./test.py $1 -f import_merge/importDataHPO.py
python3 ./test.py $1 -f import_merge/importDataLastHO.py
python3 ./test.py $1 -f import_merge/importDataLastHPO.py
python3 ./test.py $1 -f import_merge/importDataLastH.py
python3 ./test.py $1 -f import_merge/importDataLastS.py
python3 ./test.py $1 -f import_merge/importDataLastSub.py
python3 ./test.py $1 -f import_merge/importDataLastTO.py
python3 ./test.py $1 -f import_merge/importDataLastTPO.py
python3 ./test.py $1 -f import_merge/importDataLastT.py
python3 ./test.py $1 -f import_merge/importDataS.py
python3 ./test.py $1 -f import_merge/importDataSub.py
python3 ./test.py $1 -f import_merge/importDataTO.py
python3 ./test.py $1 -f import_merge/importDataTPO.py
python3 ./test.py $1 -f import_merge/importDataT.py
python3 ./test.py $1 -f import_merge/importHeadOverlap.py
python3 ./test.py $1 -f import_merge/importHeadPartOverlap.py
python3 ./test.py $1 -f import_merge/importHead.py
python3 ./test.py $1 -f import_merge/importHORestart.py
python3 ./test.py $1 -f import_merge/importHPORestart.py
python3 ./test.py $1 -f import_merge/importHRestart.py
python3 ./test.py $1 -f import_merge/importLastHO.py
python3 ./test.py $1 -f import_merge/importLastHPO.py
python3 ./test.py $1 -f import_merge/importLastH.py
python3 ./test.py $1 -f import_merge/importLastS.py
python3 ./test.py $1 -f import_merge/importLastSub.py
python3 ./test.py $1 -f import_merge/importLastTO.py
python3 ./test.py $1 -f import_merge/importLastTPO.py
python3 ./test.py $1 -f import_merge/importLastT.py
python3 ./test.py $1 -f import_merge/importSpan.py
python3 ./test.py $1 -f import_merge/importSRestart.py
python3 ./test.py $1 -f import_merge/importSubRestart.py
python3 ./test.py $1 -f import_merge/importTailOverlap.py
python3 ./test.py $1 -f import_merge/importTailPartOverlap.py
python3 ./test.py $1 -f import_merge/importTail.py
python3 ./test.py $1 -f import_merge/importToCommit.py
python3 ./test.py $1 -f import_merge/importTORestart.py
python3 ./test.py $1 -f import_merge/importTPORestart.py
python3 ./test.py $1 -f import_merge/importTRestart.py
python3 ./test.py -f import_merge/importBlock1HO.py
python3 ./test.py -f import_merge/importBlock1HPO.py
python3 ./test.py -f import_merge/importBlock1H.py
python3 ./test.py -f import_merge/importBlock1S.py
python3 ./test.py -f import_merge/importBlock1Sub.py
python3 ./test.py -f import_merge/importBlock1TO.py
python3 ./test.py -f import_merge/importBlock1TPO.py
python3 ./test.py -f import_merge/importBlock1T.py
python3 ./test.py -f import_merge/importBlock2HO.py
python3 ./test.py -f import_merge/importBlock2HPO.py
python3 ./test.py -f import_merge/importBlock2H.py
python3 ./test.py -f import_merge/importBlock2S.py
python3 ./test.py -f import_merge/importBlock2Sub.py
python3 ./test.py -f import_merge/importBlock2TO.py
python3 ./test.py -f import_merge/importBlock2TPO.py
python3 ./test.py -f import_merge/importBlock2T.py
python3 ./test.py -f import_merge/importBlockbetween.py
python3 ./test.py -f import_merge/importCacheFileHO.py
python3 ./test.py -f import_merge/importCacheFileHPO.py
python3 ./test.py -f import_merge/importCacheFileH.py
python3 ./test.py -f import_merge/importCacheFileS.py
python3 ./test.py -f import_merge/importCacheFileSub.py
python3 ./test.py -f import_merge/importCacheFileTO.py
python3 ./test.py -f import_merge/importCacheFileTPO.py
python3 ./test.py -f import_merge/importCacheFileT.py
python3 ./test.py -f import_merge/importDataH2.py
python3 ./test.py -f import_merge/importDataHO2.py
python3 ./test.py -f import_merge/importDataHO.py
python3 ./test.py -f import_merge/importDataHPO.py
python3 ./test.py -f import_merge/importDataLastHO.py
python3 ./test.py -f import_merge/importDataLastHPO.py
python3 ./test.py -f import_merge/importDataLastH.py
python3 ./test.py -f import_merge/importDataLastS.py
python3 ./test.py -f import_merge/importDataLastSub.py
python3 ./test.py -f import_merge/importDataLastTO.py
python3 ./test.py -f import_merge/importDataLastTPO.py
python3 ./test.py -f import_merge/importDataLastT.py
python3 ./test.py -f import_merge/importDataS.py
python3 ./test.py -f import_merge/importDataSub.py
python3 ./test.py -f import_merge/importDataTO.py
python3 ./test.py -f import_merge/importDataTPO.py
python3 ./test.py -f import_merge/importDataT.py
python3 ./test.py -f import_merge/importHeadOverlap.py
python3 ./test.py -f import_merge/importHeadPartOverlap.py
python3 ./test.py -f import_merge/importHead.py
python3 ./test.py -f import_merge/importHORestart.py
python3 ./test.py -f import_merge/importHPORestart.py
python3 ./test.py -f import_merge/importHRestart.py
python3 ./test.py -f import_merge/importLastHO.py
python3 ./test.py -f import_merge/importLastHPO.py
python3 ./test.py -f import_merge/importLastH.py
python3 ./test.py -f import_merge/importLastS.py
python3 ./test.py -f import_merge/importLastSub.py
python3 ./test.py -f import_merge/importLastTO.py
python3 ./test.py -f import_merge/importLastTPO.py
python3 ./test.py -f import_merge/importLastT.py
python3 ./test.py -f import_merge/importSpan.py
python3 ./test.py -f import_merge/importSRestart.py
python3 ./test.py -f import_merge/importSubRestart.py
python3 ./test.py -f import_merge/importTailOverlap.py
python3 ./test.py -f import_merge/importTailPartOverlap.py
python3 ./test.py -f import_merge/importTail.py
python3 ./test.py -f import_merge/importToCommit.py
python3 ./test.py -f import_merge/importTORestart.py
python3 ./test.py -f import_merge/importTPORestart.py
python3 ./test.py -f import_merge/importTRestart.py
python3 ./test.py -f import_merge/importInsertThenImport.py
# user
python3 ./test.py $1 -f user/user_create.py
python3 ./test.py $1 -f user/pass_len.py
python3 ./test.py -f user/user_create.py
python3 ./test.py -f user/pass_len.py
# table
#python3 ./test.py $1 -f table/del_stable.py
#python3 ./test.py -f table/del_stable.py
#query
python3 ./test.py $1 -f query/filter.py
python3 ./test.py -f query/filter.py

View File

@ -0,0 +1,85 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdSql.prepare()
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, i int)')
tdLog.info("================= step2")
tdLog.info("insert 100 sequential data")
startTime = self.startTime
for rid in range(1, 101):
tdSql.execute(
'insert into tb1 values(%ld, %d)' %
(startTime + rid, rid))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(100)
tdLog.info("================= step4")
tdLog.info("import 100 sequential data")
startTime = self.startTime
for rid in range(1, 101):
tdSql.execute(
'import into tb1 values(%ld, %d)' %
(startTime + rid, 100 + rid))
tdSql.query('select * from tb1')
tdSql.checkRows(100)
tdSql.checkData(0, 1, 1)
tdLog.info("================= step5")
tdDnodes.stop(1)
tdDnodes.start(1)
tdLog.sleep(10)
tdLog.info("================= step6")
tdLog.info("import 100 sequential data again")
startTime = self.startTime
for rid in range(1, 101):
tdSql.execute(
'import into tb1 values(%ld, %d)' %
(startTime + rid, 100 + rid))
tdLog.info("================= step7")
tdSql.query('select * from tb1')
tdSql.checkRows(100)
tdSql.checkData(0, 1, 1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,64 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import random
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tdLog.info("=============== step1")
tdSql.execute('create table tb (ts timestamp, speed int, temp float, note binary(5), flag bool)')
numOfRecords = 0
randomList = [10, 50, 100, 500, 1000, 5000]
for i in range(0, 10):
num = random.choice(randomList)
tdLog.info("will insert %d records" % num)
for x in range(0, num):
tdLog.info(
'insert into tb values (now + %da, NULL, NULL, NULL, TRUE)' % x)
tdSql.execute(
'insert into tb values (now + %da, NULL, NULL, NULL, TRUE)' % x)
numOfRecords = numOfRecords + num
tdSql.query("select * from tb")
tdSql.checkRows(numOfRecords)
tdSql.checkData(numOfRecords-num, 1, None)
tdSql.checkData(numOfRecords-1, 2, None)
tdLog.info("stop dnode to commit data to disk")
tdDnodes.stop(1)
tdDnodes.start(1)
tdLog.sleep(5)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -19,7 +19,7 @@ from util.sql import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())

View File

@ -50,6 +50,8 @@ python3 ./test.py $1 -f import_merge/importTail.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importTRestart.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importInsertThenImport.py
python3 ./test.py $1 -s && sleep 1
#tag
python3 ./test.py $1 -f tag_lite/filter.py

View File

@ -375,7 +375,7 @@ endi
print ============================ step11
sql_error create table db.t400 using db.st tags(10)
sql show db.tables
if $rows != 50 then
if $rows != 80 then
return -1
endi
@ -427,7 +427,7 @@ sql insert into db.t418 values(now, 1)
sql insert into db.t419 values(now, 1)
sql show db.tables
if $rows != 50 then
if $rows != 100 then
return -1
endi

View File

@ -0,0 +1,104 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode1 -c walLevel -v 2
print ========== prepare data
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 3000
sql connect
sql create dnode $hostname2
sleep 2000
sql create database db maxTables 4
sql use db
print ========== step1
sql create table mt (ts timestamp, tbcol int) TAGS(tgcol int)
sql create table db.t1 using db.mt tags(1)
sql create table db.t2 using db.mt tags(2)
sql create table db.t3 using db.mt tags(3)
sql create table db.t4 using db.mt tags(4)
sql create table db.t5 using db.mt tags(5)
sql create table db.t6 using db.mt tags(6)
sql create table db.t7 using db.mt tags(7)
sql create table db.t8 using db.mt tags(8)
sql create table db.t9 using db.mt tags(9)
sql create table db.t10 using db.mt tags(10)
sql create table db.t11 using db.mt tags(11)
sql create table db.t12 using db.mt tags(12)
sql create table db.t13 using db.mt tags(13)
sql create table db.t14 using db.mt tags(14)
sql create table db.t15 using db.mt tags(15)
sql create table db.t16 using db.mt tags(16)
sql insert into db.t1 values(now, 1)
sql insert into db.t2 values(now, 1)
sql insert into db.t3 values(now, 1)
sql insert into db.t4 values(now, 1)
sql insert into db.t5 values(now, 1)
sql insert into db.t6 values(now, 1)
sql insert into db.t7 values(now, 1)
sql insert into db.t8 values(now, 1)
sql insert into db.t9 values(now, 1)
sql insert into db.t10 values(now, 1)
sql insert into db.t11 values(now, 1)
sql insert into db.t12 values(now, 1)
sql insert into db.t13 values(now, 1)
sql insert into db.t14 values(now, 1)
sql insert into db.t15 values(now, 1)
sql insert into db.t16 values(now, 1)
print ========== step2
sql show tables
print $rows
if $rows != 16 then
return -1
endi
sql select * from mt
print $rows
if $rows != 16 then
return -1
endi
print ========== step3
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 500
sql drop dnode $hostname2
sleep 2000
print ========== step3
sql show tables
print $rows
if $rows != 8 then
return -1
endi
sql select * from mt
print $rows
if $rows != 8 then
return -1
endi
sql select * from db.t5
if $rows != 1 then
return -1
endi
sql select * from db.t13
if $rows != 1 then
return -1
endi
sql_error select * from db.t1
sql_error select * from db.t9
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -37,7 +37,7 @@ endi
print =============== step3 - query data
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6020/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6020/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6020/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6020/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6020/rest/sql

View File

@ -32,10 +32,10 @@ sql connect
sql create database ir1db days 7
sql use ir1db
sql create table tb(ts timestamp, i int)
sql create table tb(ts timestamp, i bigint)
print ================= step1
sql import into tb values(1520000010000, 10000)
sql import into tb values(1520000010000, 1520000010000)
sql select * from tb;
print $rows
if $rows != 1 then
@ -43,7 +43,7 @@ if $rows != 1 then
endi
print ================= step2
sql insert into tb values(1520000008000, 8000)
sql insert into tb values(1520000008000, 1520000008000)
print $rows
sql select * from tb;
if $rows != 2 then
@ -51,7 +51,7 @@ if $rows != 2 then
endi
print ================= step3
sql insert into tb values(1520000020000, 20000)
sql insert into tb values(1520000020000, 1520000020000)
sql select * from tb;
print $rows
if $rows != 3 then
@ -59,9 +59,9 @@ if $rows != 3 then
endi
print ================= step4
sql import into tb values(1520000009000, 9000)
sql import into tb values(1520000015000, 15000)
sql import into tb values(1520000030000, 30000)
sql import into tb values(1520000009000, 1520000009000)
sql import into tb values(1520000015000, 1520000015000)
sql import into tb values(1520000030000, 1520000030000)
sql select * from tb;
print $rows
if $rows != 6 then
@ -69,10 +69,10 @@ if $rows != 6 then
endi
print ================= step5
sql insert into tb values(1520000008000, 8000)
sql insert into tb values(1520000014000, 14000)
sql insert into tb values(1520000025000, 25000)
sql insert into tb values(1520000040000, 40000)
sql insert into tb values(1520000008000, 1520000008000)
sql insert into tb values(1520000014000, 1520000014000)
sql insert into tb values(1520000025000, 1520000025000)
sql insert into tb values(1520000040000, 1520000040000)
sql select * from tb;
print $rows
if $rows != 9 then
@ -80,11 +80,11 @@ if $rows != 9 then
endi
print ================= step6
sql import into tb values(1520000007000, 7000)
sql import into tb values(1520000012000, 12000)
sql import into tb values(1520000023000, 23000)
sql import into tb values(1520000034000, 34000)
sql import into tb values(1520000050000, 50000)
sql import into tb values(1520000007000, 1520000007000)
sql import into tb values(1520000012000, 1520000012000)
sql import into tb values(1520000023000, 1520000023000)
sql import into tb values(1520000034000, 1520000034000)
sql import into tb values(1520000050000, 1520000050000)
sql select * from tb;
print $rows
if $rows != 14 then
@ -104,11 +104,11 @@ if $rows != 14 then
endi
print ================= step7
sql import into tb values(1520000007001, 7001)
sql import into tb values(1520000012001, 12001)
sql import into tb values(1520000023001, 23001)
sql import into tb values(1520000034001, 34001)
sql import into tb values(1520000050001, 50001)
sql import into tb values(1520000007001, 1520000007001)
sql import into tb values(1520000012001, 1520000012001)
sql import into tb values(1520000023001, 1520000023001)
sql import into tb values(1520000034001, 1520000034001)
sql import into tb values(1520000050001, 1520000050001)
sql select * from tb;
print $rows
if $rows != 19 then
@ -117,10 +117,10 @@ if $rows != 19 then
endi
print ================= step8
sql insert into tb values(1520000008002, 8002)
sql insert into tb values(1520000014002, 14002)
sql insert into tb values(1520000025002, 25002)
sql insert into tb values(1520000060000, 60000)
sql insert into tb values(1520000008002, 1520000008002)
sql insert into tb values(1520000014002, 1520000014002)
sql insert into tb values(1520000025002, 1520000025002)
sql insert into tb values(1520000060000, 1520000060000)
sql select * from tb;
print $rows
if $rows != 23 then
@ -142,18 +142,18 @@ print ================= step9
#sql import into tb values(now+14d, 50001)
#sql import into tb values(now+16d, 500051)
sql import into tb values(1517408000000, 7003)
sql import into tb values(1518272000000, 34003)
sql import into tb values(1519136000000, 34003)
sql import into tb values(1519568000000, 34003)
sql import into tb values(1519654400000, 50001)
sql import into tb values(1519827200000, 50001)
sql import into tb values(1520345600000, 50001)
sql import into tb values(1520691200000, 50002)
sql import into tb values(1520864000000, 50003)
sql import into tb values(1521900800000, 50004)
sql import into tb values(1523110400000, 50001)
sql import into tb values(1521382400000, 500051)
sql import into tb values(1517408000000, 1517408000000)
sql import into tb values(1518272000000, 1518272000000)
sql import into tb values(1519136000000, 1519136000000)
sql import into tb values(1519568000000, 1519568000000)
sql import into tb values(1519654400000, 1519654400000)
sql import into tb values(1519827200000, 1519827200000)
sql import into tb values(1520345600000, 1520345600000)
sql import into tb values(1520691200000, 1520691200000)
sql import into tb values(1520864000000, 1520864000000)
sql import into tb values(1521900800000, 1521900800000)
sql import into tb values(1523110400000, 1523110400000)
sql import into tb values(1521382400000, 1521382400000)
sql select * from tb;
print $rows
if $rows != 35 then
@ -176,7 +176,7 @@ endi
print ================= step11
#sql import into tb values(now-50d, 7003) (now-48d, 7003) (now-46d, 7003) (now-44d, 7003) (now-42d, 7003)
sql import into tb values(1515680000000, 7003) (1515852800000, 7003) (1516025600000, 7003) (1516198400000, 7003) (1516371200000, 7003)
sql import into tb values(1515680000000, 1) (1515852800000, 2) (1516025600000, 3) (1516198400000, 4) (1516371200000, 5)
sql select * from tb;
if $rows != 40 then
return -1
@ -184,8 +184,8 @@ endi
print ================= step12
#1520000000000
#sql import into tb values(now-19d, 7003) (now-18d, 7003) (now-17d, 7003) (now-16d, 7003) (now-15d, 7003) (now-14d, 7003) (now-13d, 7003) (now-12d, 7003) (now-11d, 7003)
sql import into tb values(1518358400000, 7003) (1518444800000, 7003) (1518531200000, 7003) (1518617600000, 7003) (1518704000000, 7003) (1518790400000, 7003) (1518876800000, 7003) (1518963200000, 7003) (1519049600000, 7003)
#sql import into tb values(now-19d, -19) (now-18d, -18) (now-17d, -17) (now-16d, -16) (now-15d, -15) (now-14d, -14) (now-13d, -13) (now-12d, -12) (now-11d, -11)
sql import into tb values(1518358400000, 6) (1518444800000, 7) (1518531200000, 8) (1518617600000, 9) (1518704000000, 10) (1518790400000, 11) (1518876800000, 12) (1518963200000, 13) (1519049600000, 14)
sql select * from tb;
print $rows
if $rows != 49 then
@ -195,13 +195,13 @@ endi
print ================= step14
#1520000000000
#sql import into tb values(now-48d, 34003)
#sql import into tb values(now-38d, 50001)
#sql import into tb values(now-28d, 50001)
#sql import into tb values(now-48d, -48)
#sql import into tb values(now-38d, -38)
#sql import into tb values(now-28d, -28)
sql import into tb values(1515852800001, 34003)
sql import into tb values(1516716800000, 50001)
sql import into tb values(1517580800000, 50001)
sql import into tb values(1515852800001, -48)
sql import into tb values(1516716800000, -38)
sql import into tb values(1517580800000, -28)
sql select * from tb;
if $rows != 52 then

View File

@ -65,7 +65,7 @@ sleep 2000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
#run general/parser/limit1_tb.sim
run general/parser/limit1_tb.sim
run general/parser/limit1_stb.sim
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -21,7 +21,7 @@ $stb = $stbPrefix . $i
sql drop database $db -x step1
step1:
sql create database $db tblocks 100
sql create database $db cache 16
print ====== create tables
sql use $db
sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int)

View File

@ -47,19 +47,21 @@ sql select * from $stb order by ts desc limit 5
if $rows != 5 then
return -1
endi
sql select * from $stb order by ts desc limit 5 offset 1
if $rows != 5 then
return -1
endi
if $data01 != 8 then
if $data01 != 9 then
return -1
endi
if $data11 != 7 then
if $data11 != 9 then
return -1
endi
if $data41 != 4 then
if $data41 != 9 then
return -1
endi
sql select * from $stb order by ts asc limit 5
if $rows != 5 then
return -1
@ -67,19 +69,28 @@ endi
if $data00 != @18-09-17 09:00:00.000@ then
return -1
endi
if $data40 != @18-09-17 09:00:00.000@ then
return -1
endi
if $data01 != 0 then
return -1
endi
if $data12 != 1 then
print data12 = $data12
if $data12 != NULL then
return -1
endi
if $data24 != 2.000000000 then
if $data24 != NULL then
return -1
endi
if $data35 != 3 then
if $data35 != 0 then
return -1
endi
if $data49 != nchar4 then
if $data49 != nchar0 then
return -1
endi
@ -87,10 +98,18 @@ sql select * from $stb order by ts asc limit 5 offset 1
if $rows != 5 then
return -1
endi
if $data01 != 1 then
if $data01 != 0 then
return -1
endi
if $data41 != 5 then
if $data41 != 0 then
return -1
endi
if $data40 != @18-09-17 09:00:00.000@ then
return -1
endi
if $data00 != @18-09-17 09:00:00.000@ then
return -1
endi
@ -98,6 +117,7 @@ sql select * from $stb limit 500 offset 1
if $rows != 99 then
return -1
endi
if $data01 != 1 then
return -1
endi
@ -629,6 +649,7 @@ endi
if $data09 != 7 then
return -1
endi
print $data13
if $data13 != 0.000000000 then
return -1
endi
@ -656,6 +677,8 @@ endi
if $data35 != 0 then
return -1
endi
print $data36
if $data36 != 0.000000000 then
return -1
endi
@ -675,49 +698,49 @@ if $data59 != 2 then
return -1
endi
sql select max(c2), min(c2), avg(c2), count(c2), sum(c2), spread(c2), first(c2), last(c2) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 3 and t1 < 6 interval(5m) group by t1 order by t1 desc limit 3 offset 1
if $rows != 3 then
return -1
endi
if $data00 != @18-09-17 09:20:00.000@ then
return -1
endi
if $data01 != 2 then
return -1
endi
if $data02 != 2 then
return -1
endi
if $data09 != 4 then
return -1
endi
if $data13 != 3.000000000 then
return -1
endi
if $data19 != 4 then
return -1
endi
if $data20 != @18-09-17 09:40:00.000@ then
return -1
endi
if $data24 != 1 then
return -1
endi
if $data25 != 4 then
return -1
endi
if $data26 != 0.000000000 then
return -1
endi
if $data27 != 4 then
return -1
endi
if $data28 != 4 then
return -1
endi
if $data29 != 4 then
return -1
endi
#sql select max(c2), min(c2), avg(c2), count(c2), sum(c2), spread(c2), first(c2), last(c2) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 3 and t1 < 6 interval(5m) group by t1 order by t1 desc limit 3 offset 1
#if $rows != 3 then
# return -1
#endi
#if $data00 != @18-09-17 09:20:00.000@ then
# return -1
#endi
#if $data01 != 2 then
# return -1
#endi
#if $data02 != 2 then
# return -1
#endi
#if $data09 != 4 then
# return -1
#endi
#if $data13 != 3.000000000 then
# return -1
#endi
#if $data19 != 4 then
# return -1
#endi
#if $data20 != @18-09-17 09:40:00.000@ then
# return -1
#endi
#if $data24 != 1 then
# return -1
#endi
#if $data25 != 4 then
# return -1
#endi
#if $data26 != 0.000000000 then
# return -1
#endi
#if $data27 != 4 then
# return -1
#endi
#if $data28 != 4 then
# return -1
#endi
#if $data29 != 4 then
# return -1
#endi
sql select max(c2), min(c2), avg(c2), count(c2), spread(c2), first(c2), last(c2), count(ts) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 3 and t1 < 6 interval(5m) group by t1 order by t1 desc limit 3 offset 1
if $rows != 6 then

View File

@ -38,7 +38,6 @@ sleep 2000
run general/parser/lastrow.sim
sleep 2000
run general/parser/nchar.sim
sleep 2000
run general/parser/null_char.sim
sleep 2000
@ -46,7 +45,26 @@ run general/parser/single_row_in_tb.sim
sleep 2000
run general/parser/select_from_cache_disk.sim
sleep 2000
run general/parser/selectResNum.sim
sleep 2000
run general/parser/mixed_blocks.sim
sleep 2000
run general/parser/limit1.sim
sleep 2000
run general/parser/limit.sim
sleep 2000
run general/parser/limit1_tblocks100.sim
sleep 2000
run general/parser/select_across_vnodes.sim
sleep 2000
run general/parser/limit2.sim
sleep 2000
run general/parser/tbnameIn.sim
sleep 2000
run general/parser/slimit.sim
sleep 2000
run general/parser/slimit1.sim
sleep 2000
run general/parser/fill.sim
@ -57,31 +75,15 @@ run general/parser/tags_dynamically_specifiy.sim
sleep 2000
run general/parser/interp.sim
sleep 2000
run general/parser/limit1.sim
sleep 2000
run general/parser/limit1_tblocks100.sim
sleep 2000
run general/parser/limit2.sim
sleep 2000
run general/parser/mixed_blocks.sim
sleep 2000
run general/parser/selectResNum.sim
sleep 2000
run general/parser/select_across_vnodes.sim
sleep 2000
run general/parser/set_tag_vals.sim
sleep 2000
run general/parser/slimit.sim
sleep 2000
run general/parser/slimit1.sim
sleep 2000
run general/parser/slimit_alter_tags.sim
sleep 2000
run general/parser/stream_on_sys.sim
sleep 2000
run general/parser/stream.sim
sleep 2000
run general/parser/tbnameIn.sim
sleep 2000
run general/parser/where.sim
sleep 2000

View File

@ -44,7 +44,7 @@ cd ../../../debug; make
./test.sh -f general/compute/top.sim
./test.sh -f general/db/alter_option.sim
#./test.sh -f general/db/alter_tables_d2.sim
./test.sh -f general/db/alter_tables_d2.sim
./test.sh -f general/db/alter_tables_v1.sim
./test.sh -f general/db/alter_tables_v4.sim
./test.sh -f general/db/alter_vgroups.sim
@ -61,10 +61,11 @@ cd ../../../debug; make
./test.sh -f general/db/delete_writing1.sim
./test.sh -f general/db/delete_writing2.sim
./test.sh -f general/db/delete.sim
./test.sh -f general/db/dropdnodes.sim
./test.sh -f general/db/len.sim
./test.sh -f general/db/repeat.sim
./test.sh -f general/db/tables.sim
#liao ./test.sh -f general/db/vnodes.sim
./test.sh -f general/db/vnodes.sim
./test.sh -f general/field/2.sim
./test.sh -f general/field/3.sim
@ -90,10 +91,10 @@ cd ../../../debug; make
./test.sh -f general/import/basic.sim
./test.sh -f general/import/commit.sim
./test.sh -f general/import/large.sim
#liao ./test.sh -f general/import/replica1.sim
./test.sh -f general/import/replica1.sim
./test.sh -f general/insert/basic.sim
#liao ./test.sh -f general/insert/insert_drop.sim
./test.sh -f general/insert/insert_drop.sim
./test.sh -f general/insert/query_block1_memory.sim
./test.sh -f general/insert/query_block2_memory.sim
./test.sh -f general/insert/query_block1_file.sim
@ -120,22 +121,22 @@ cd ../../../debug; make
./test.sh -f general/parser/insert_tb.sim
./test.sh -f general/parser/first_last.sim
# ./test.sh -f general/parser/import_file.sim
# ./test.sh -f general/parser/lastrow.sim
./test.sh -f general/parser/lastrow.sim
# ./test.sh -f general/parser/nchar.sim
# ./test.sh -f general/parser/null_char.sim
# ./test.sh -f general/parser/single_row_in_tb.sim
./test.sh -f general/parser/single_row_in_tb.sim
./test.sh -f general/parser/select_from_cache_disk.sim
# ./test.sh -f general/parser/limit.sim
./test.sh -f general/parser/limit.sim
# ./test.sh -f general/parser/fill.sim
# ./test.sh -f general/parser/fill_stb.sim
# ./test.sh -f general/parser/tags_dynamically_specifiy.sim
# ./test.sh -f general/parser/interp.sim
# ./test.sh -f general/parser/limit1.sim
# ./test.sh -f general/parser/limit1_tblocks100.sim
./test.sh -f general/parser/limit1.sim
./test.sh -f general/parser/limit1_tblocks100.sim
# ./test.sh -f general/parser/limit2.sim
# ./test.sh -f general/parser/mixed_blocks.sim
./test.sh -f general/parser/mixed_blocks.sim
./test.sh -f general/parser/selectResNum.sim
# ./test.sh -f general/parser/select_across_vnodes.sim
./test.sh -f general/parser/select_across_vnodes.sim
# ./test.sh -f general/parser/set_tag_vals.sim
# ./test.sh -f general/parser/slimit.sim
./test.sh -f general/parser/slimit1.sim
@ -155,7 +156,7 @@ cd ../../../debug; make
#./test.sh -f general/parser/bug.sim
./test.sh -f general/stable/disk.sim
#liao ./test.sh -f general/stable/dnode3.sim
./test.sh -f general/stable/dnode3.sim
./test.sh -f general/stable/metrics.sim
./test.sh -f general/stable/values.sim
./test.sh -f general/stable/vnode3.sim
@ -246,8 +247,8 @@ cd ../../../debug; make
./test.sh -u -f unique/account/user_create.sim
./test.sh -u -f unique/account/user_len.sim
#liao wait ./test.sh -u -f unique/big/balance.sim
#liao wait ./test.sh -u -f unique/big/maxvnodes.sim
./test.sh -u -f unique/big/balance.sim
./test.sh -u -f unique/big/maxvnodes.sim
./test.sh -u -f unique/big/tcp.sim
./test.sh -u -f unique/cluster/balance1.sim
@ -273,25 +274,25 @@ cd ../../../debug; make
./test.sh -u -f unique/dnode/balance3.sim
./test.sh -u -f unique/dnode/balancex.sim
./test.sh -u -f unique/dnode/offline1.sim
#jeff ./test.sh -u -f unique/dnode/offline2.sim
./test.sh -u -f unique/dnode/offline2.sim
./test.sh -u -f unique/dnode/remove1.sim
#jeff ./test.sh -u -f unique/dnode/remove2.sim
./test.sh -u -f unique/dnode/remove2.sim
./test.sh -u -f unique/dnode/vnode_clean.sim
./test.sh -u -f unique/http/admin.sim
./test.sh -u -f unique/http/opentsdb.sim
#liao wait ./test.sh -u -f unique/import/replica2.sim
#liao wait ./test.sh -u -f unique/import/replica3.sim
./test.sh -u -f unique/import/replica2.sim
./test.sh -u -f unique/import/replica3.sim
#liao wait ./test.sh -u -f unique/stable/balance_replica1.sim
#liao wait ./test.sh -u -f unique/stable/dnode2_stop.sim
#liao wait ./test.sh -u -f unique/stable/dnode2.sim
#liao wait ./test.sh -u -f unique/stable/dnode3.sim
#liao wait ./test.sh -u -f unique/stable/replica2_dnode4.sim
#liao wait ./test.sh -u -f unique/stable/replica2_vnode3.sim
#liao wait ./test.sh -u -f unique/stable/replica3_dnode6.sim
#liao wait ./test.sh -u -f unique/stable/replica3_vnode3.sim
./test.sh -u -f unique/stable/balance_replica1.sim
./test.sh -u -f unique/stable/dnode2_stop.sim
./test.sh -u -f unique/stable/dnode2.sim
./test.sh -u -f unique/stable/dnode3.sim
./test.sh -u -f unique/stable/replica2_dnode4.sim
./test.sh -u -f unique/stable/replica2_vnode3.sim
./test.sh -u -f unique/stable/replica3_dnode6.sim
./test.sh -u -f unique/stable/replica3_vnode3.sim
./test.sh -u -f unique/mnode/mgmt22.sim
./test.sh -u -f unique/mnode/mgmt23.sim

View File

@ -0,0 +1,79 @@
cd ../../debug; cmake ..
cd ../../debug; make
cd ../../../debug; cmake ..
cd ../../../debug; make
./test.sh -u -f unique/account/account_create.sim
./test.sh -u -f unique/account/account_delete.sim
./test.sh -u -f unique/account/account_len.sim
./test.sh -u -f unique/account/authority.sim
./test.sh -u -f unique/account/basic.sim
./test.sh -u -f unique/account/paras.sim
./test.sh -u -f unique/account/pass_alter.sim
./test.sh -u -f unique/account/pass_len.sim
./test.sh -u -f unique/account/usage.sim
./test.sh -u -f unique/account/user_create.sim
./test.sh -u -f unique/account/user_len.sim
./test.sh -u -f unique/big/balance.sim
./test.sh -u -f unique/big/maxvnodes.sim
./test.sh -u -f unique/big/tcp.sim
./test.sh -u -f unique/cluster/balance1.sim
./test.sh -u -f unique/cluster/balance2.sim
./test.sh -u -f unique/cluster/balance3.sim
./test.sh -u -f unique/cluster/cache.sim
./test.sh -u -f unique/column/replica3.sim
./test.sh -u -f unique/db/commit.sim
./test.sh -u -f unique/db/delete.sim
./test.sh -u -f unique/db/delete_part.sim
./test.sh -u -f unique/db/replica_add12.sim
./test.sh -u -f unique/db/replica_add13.sim
./test.sh -u -f unique/db/replica_add23.sim
./test.sh -u -f unique/db/replica_reduce21.sim
./test.sh -u -f unique/db/replica_reduce32.sim
./test.sh -u -f unique/db/replica_reduce31.sim
./test.sh -u -f unique/db/replica_part.sim
./test.sh -u -f unique/dnode/balance1.sim
./test.sh -u -f unique/dnode/balance2.sim
./test.sh -u -f unique/dnode/balance3.sim
./test.sh -u -f unique/dnode/balancex.sim
./test.sh -u -f unique/dnode/offline1.sim
./test.sh -u -f unique/dnode/offline2.sim
./test.sh -u -f unique/dnode/remove1.sim
./test.sh -u -f unique/dnode/remove2.sim
./test.sh -u -f unique/dnode/vnode_clean.sim
./test.sh -u -f unique/http/admin.sim
./test.sh -u -f unique/http/opentsdb.sim
./test.sh -u -f unique/import/replica2.sim
./test.sh -u -f unique/import/replica3.sim
./test.sh -u -f unique/stable/balance_replica1.sim
./test.sh -u -f unique/stable/dnode2_stop.sim
./test.sh -u -f unique/stable/dnode2.sim
./test.sh -u -f unique/stable/dnode3.sim
./test.sh -u -f unique/stable/replica2_dnode4.sim
./test.sh -u -f unique/stable/replica2_vnode3.sim
./test.sh -u -f unique/stable/replica3_dnode6.sim
./test.sh -u -f unique/stable/replica3_vnode3.sim
./test.sh -u -f unique/mnode/mgmt22.sim
./test.sh -u -f unique/mnode/mgmt23.sim
./test.sh -u -f unique/mnode/mgmt24.sim
./test.sh -u -f unique/mnode/mgmt25.sim
./test.sh -u -f unique/mnode/mgmt26.sim
./test.sh -u -f unique/mnode/mgmt33.sim
./test.sh -u -f unique/mnode/mgmt34.sim
./test.sh -u -f unique/mnode/mgmtr2.sim
./test.sh -u -f unique/vnode/many.sim
./test.sh -u -f unique/vnode/replica2_basic2.sim
./test.sh -u -f unique/vnode/replica2_repeat.sim
./test.sh -u -f unique/vnode/replica3_basic.sim
./test.sh -u -f unique/vnode/replica3_repeat.sim
./test.sh -u -f unique/vnode/replica3_vgroup.sim

View File

@ -107,6 +107,7 @@ echo "monitorDebugFlag 131" >> $TAOS_CFG
echo "udebugFlag 131" >> $TAOS_CFG
echo "jnidebugFlag 131" >> $TAOS_CFG
echo "sdebugFlag 135" >> $TAOS_CFG
echo "qdebugFlag 135" >> $TAOS_CFG
echo "monitor 0" >> $TAOS_CFG
echo "monitorInterval 1" >> $TAOS_CFG
echo "http 0" >> $TAOS_CFG

View File

@ -1,4 +1,14 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c numOfMPeers -v 2
system sh/cfg.sh -n dnode2 -c numOfMPeers -v 2
system sh/cfg.sh -n dnode3 -c numOfMPeers -v 2
return
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2

View File

@ -93,7 +93,7 @@ $x = 0
show1:
$x = $x + 1
sleep 3000
if $x == 100 then
if $x == 10 then
return -1
endi
@ -107,6 +107,9 @@ if $data2_2 != 2 then
goto show1
endi
sql reset query cache
sleep 1000
sql select count(*) from t10
print select count(*) from t10 $data00 expect $rowNum
if $data00 != $rowNum then
@ -149,7 +152,7 @@ $x = 0
show3:
$x = $x + 1
sleep 3000
if $x == 100 then
if $x == 10 then
return -1
endi
@ -160,7 +163,7 @@ print dnode3 freeVnodes $data2_3
if $data2_1 != 2 then
goto show3
endi
if $data2_2 != NULL then
if $data2_2 != null then
goto show3
endi
if $data2_3 != 2 then
@ -169,6 +172,9 @@ endi
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
sql reset query cache
sleep 1000
sql select count(*) from t10
print select count(*) from t10 $data00 expect $rowNum
if $data00 != $rowNum then
@ -206,22 +212,25 @@ $x = 0
show4:
$x = $x + 1
sleep 3000
if $x == 100 then
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 freeVnodes $data2_1
print dnode3 freeVnodes $data2_3
if $data2_1 != 0 then
if $data2_1 != 4 then
goto show4
endi
if $data2_3 != NULL then
if $data2_3 != null then
goto show4
endi
system sh/exec_up.sh -n dnode3 -s stop -x SIGINT
sql reset query cache
sleep 1000
sql select count(*) from t10
print select count(*) from t10 $data00 expect $rowNum
if $data00 != $rowNum then
@ -253,28 +262,33 @@ if $data00 != $totalNum then
endi
print ========== step5
sql alter database db replica 2
sql create dnode $hostname4
system sh/exec_up.sh -n dnode4 -s start
sql create dnode $hostname4
sleep 3000
sql alter database db replica 2
$x = 0
show5:
$x = $x + 1
sleep 3000
if $x == 100 then
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 freeVnodes $data2_1
print dnode4 freeVnodes $data2_4
if $data2_1 != 0 then
if $data2_1 != 4 then
goto show5
endi
if $data2_4 != 0 then
if $data2_4 != 4 then
goto show5
endi
sql reset query cache
sleep 1000
sql select count(*) from t10
print select count(*) from t10 $data00 expect $rowNum
if $data00 != $rowNum then

View File

@ -1,8 +1,8 @@
system sh/stop_dnodes.sh
$totalVnodes = 100
$minVnodes = 50
$maxVnodes = 50
$totalVnodes = 20
$minVnodes = 10
$maxVnodes = 10
$maxTables = 4
$totalRows = $totalVnodes * $maxTables

View File

@ -111,10 +111,10 @@ sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
if $data2_2 != null then
if $data2_1 != 1 then
goto show4
endi
if $data2_1 != 1 then
if $data2_2 != null then
goto show4
endi
if $data2_3 != 3 then

View File

@ -20,10 +20,10 @@ system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 2000
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 2000
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 2000
system sh/cfg.sh -n dnode1 -c commitlog -v 0
system sh/cfg.sh -n dnode2 -c commitlog -v 0
system sh/cfg.sh -n dnode3 -c commitlog -v 0
system sh/cfg.sh -n dnode4 -c commitlog -v 0
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
print ========= start dnode1
system sh/exec_up.sh -n dnode1 -s start
@ -35,10 +35,10 @@ system sh/exec_up.sh -n dnode2 -s start
sql create database ir2db replica 2 days 7
sql use ir2db
sql create table tb(ts timestamp, i int)
sql create table tb(ts timestamp, i bigint)
print ================= step1
sql import into tb values(1520000010000, 10000)
sql import into tb values(1520000010000, 1520000010000)
sql select * from tb;
print $rows
if $rows != 1 then
@ -46,7 +46,7 @@ if $rows != 1 then
endi
print ================= step2
sql insert into tb values(1520000008000, 8000)
sql insert into tb values(1520000008000, 1520000008000)
print $rows
sql select * from tb;
if $rows != 2 then
@ -54,7 +54,7 @@ if $rows != 2 then
endi
print ================= step3
sql insert into tb values(1520000020000, 20000)
sql insert into tb values(1520000020000, 1520000020000)
sql select * from tb;
print $rows
if $rows != 3 then
@ -62,9 +62,9 @@ if $rows != 3 then
endi
print ================= step4
sql import into tb values(1520000009000, 9000)
sql import into tb values(1520000015000, 15000)
sql import into tb values(1520000030000, 30000)
sql import into tb values(1520000009000, 1520000009000)
sql import into tb values(1520000015000, 1520000015000)
sql import into tb values(1520000030000, 1520000030000)
sql select * from tb;
print $rows
if $rows != 6 then
@ -72,10 +72,10 @@ if $rows != 6 then
endi
print ================= step5
sql insert into tb values(1520000008000, 8000)
sql insert into tb values(1520000014000, 14000)
sql insert into tb values(1520000025000, 25000)
sql insert into tb values(1520000040000, 40000)
sql insert into tb values(1520000008000, 1520000008000)
sql insert into tb values(1520000014000, 1520000014000)
sql insert into tb values(1520000025000, 1520000025000)
sql insert into tb values(1520000040000, 1520000040000)
sql select * from tb;
print $rows
if $rows != 9 then
@ -83,11 +83,11 @@ if $rows != 9 then
endi
print ================= step6
sql import into tb values(1520000007000, 7000)
sql import into tb values(1520000012000, 12000)
sql import into tb values(1520000023000, 23000)
sql import into tb values(1520000034000, 34000)
sql import into tb values(1520000050000, 50000)
sql import into tb values(1520000007000, 1520000007000)
sql import into tb values(1520000012000, 1520000012000)
sql import into tb values(1520000023000, 1520000023000)
sql import into tb values(1520000034000, 1520000034000)
sql import into tb values(1520000050000, 1520000050000)
sql select * from tb;
print $rows
if $rows != 14 then
@ -105,25 +105,26 @@ if $rows != 14 then
endi
print ================= step7
sql import into tb values(1520000007001, 7001)
sql import into tb values(1520000012001, 12001)
sql import into tb values(1520000023001, 23001)
sql import into tb values(1520000034001, 34001)
sql import into tb values(1520000050001, 50001)
sql import into tb values(1520000007001, 1520000007001)
sql import into tb values(1520000012001, 1520000012001)
sql import into tb values(1520000023001, 1520000023001)
sql import into tb values(1520000034001, 1520000034001)
sql import into tb values(1520000050001, 1520000050001)
sql select * from tb;
print $rows
if $rows != 19 then
if $rows != 19 then
print expect 19, actual: $rows
return -1
endi
print ================= step8
sql insert into tb values(1520000008002, 8002)
sql insert into tb values(1520000014002, 14002)
sql insert into tb values(1520000025002, 25002)
sql insert into tb values(1520000060000, 60000)
sql insert into tb values(1520000008002, 1520000008002)
sql insert into tb values(1520000014002, 1520000014002)
sql insert into tb values(1520000025002, 1520000025002)
sql insert into tb values(1520000060000, 1520000060000)
sql select * from tb;
print $rows
if $rows != 24 then
if $rows != 23 then
return -1
endi
@ -142,21 +143,21 @@ print ================= step9
#sql import into tb values(now+14d, 50001)
#sql import into tb values(now+16d, 500051)
sql import into tb values(1517408000000, 7003)
sql import into tb values(1518272000000, 34003)
sql import into tb values(1519136000000, 34003)
sql import into tb values(1519568000000, 34003)
sql import into tb values(1519654400000, 50001)
sql import into tb values(1519827200000, 50001)
sql import into tb values(1520345600000, 50001)
sql import into tb values(1520691200000, 50002)
sql import into tb values(1520864000000, 50003)
sql import into tb values(1521900800000, 50004)
sql import into tb values(1523110400000, 50001)
sql import into tb values(1521382400000, 500051)
sql import into tb values(1517408000000, 1517408000000)
sql import into tb values(1518272000000, 1518272000000)
sql import into tb values(1519136000000, 1519136000000)
sql import into tb values(1519568000000, 1519568000000)
sql import into tb values(1519654400000, 1519654400000)
sql import into tb values(1519827200000, 1519827200000)
sql import into tb values(1520345600000, 1520345600000)
sql import into tb values(1520691200000, 1520691200000)
sql import into tb values(1520864000000, 1520864000000)
sql import into tb values(1521900800000, 1521900800000)
sql import into tb values(1523110400000, 1523110400000)
sql import into tb values(1521382400000, 1521382400000)
sql select * from tb;
print $rows
if $rows != 36 then
if $rows != 35 then
return -1
endi
@ -167,55 +168,57 @@ system sh/exec_up.sh -n dnode1 -s start
sleep 5000
sql select * from tb;
print $rows
if $rows != 36 then
if $rows != 35 then
return -1
endi
print ================= step11
#sql import into tb values(now-50d, 7003) (now-48d, 7003) (now-46d, 7003) (now-44d, 7003) (now-42d, 7003)
sql import into tb values(1515680000000, 7003) (1515852800000, 7003) (1516025600000, 7003) (1516198400000, 7003) (1516371200000, 7003)
sql import into tb values(1515680000000, 1) (1515852800000, 2) (1516025600000, 3) (1516198400000, 4) (1516371200000, 5)
sql select * from tb;
if $rows != 41 then
if $rows != 40 then
return -1
endi
print ================= step12
#1520000000000
#sql import into tb values(now-19d, 7003) (now-18d, 7003) (now-17d, 7003) (now-16d, 7003) (now-15d, 7003) (now-14d, 7003) (now-13d, 7003) (now-12d, 7003) (now-11d, 7003)
sql import into tb values(1518358400000, 7003) (1518444800000, 7003) (1518531200000, 7003) (1518617600000, 7003) (1518704000000, 7003) (1518790400000, 7003) (1518876800000, 7003) (1518963200000, 7003) (1519049600000, 7003)
#sql import into tb values(now-19d, -19) (now-18d, -18) (now-17d, -17) (now-16d, -16) (now-15d, -15) (now-14d, -14) (now-13d, -13) (now-12d, -12) (now-11d, -11)
sql import into tb values(1518358400000, 6) (1518444800000, 7) (1518531200000, 8) (1518617600000, 9) (1518704000000, 10) (1518790400000, 11) (1518876800000, 12) (1518963200000, 13) (1519049600000, 14)
sql select * from tb;
print $rows
if $rows != 50 then
if $rows != 49 then
return -1
endi
print ================= step13
system sh/exec_up.sh -n dnode2 -s stop
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
sleep 5000
system sh/exec_up.sh -n dnode2 -s start
sleep 5000
print ================= step14
#1520000000000
#sql import into tb values(now-48d, 34003)
#sql import into tb values(now-38d, 50001)
#sql import into tb values(now-28d, 50001)
#sql import into tb values(now-48d, -48)
#sql import into tb values(now-38d, -38)
#sql import into tb values(now-28d, -28)
sql import into tb values(1515852800001, 34003)
sql import into tb values(1516716800000, 50001)
sql import into tb values(1517580800000, 50001)
sql import into tb values(1515852800001, -48)
sql import into tb values(1516716800000, -38)
sql import into tb values(1517580800000, -28)
sql select * from tb;
if $rows != 50 then
if $rows != 52 then
return -1
endi
print ================= step15
system sh/exec_up.sh -n dnode2 -s start
sleep 8000
system sh/exec_up.sh -n dnode1 -s stop
sleep 10000
system sh/exec_up.sh -n dnode1 -s stop -x SIGINT
sleep 5000
system sh/exec_up.sh -n dnode1 -s start
sleep 5000
if $rows != 50 then
if $rows != 52 then
return -1
endi

View File

@ -20,14 +20,14 @@ system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 2000
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 2000
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 2000
system sh/cfg.sh -n dnode1 -c commitlog -v 0
system sh/cfg.sh -n dnode2 -c commitlog -v 0
system sh/cfg.sh -n dnode3 -c commitlog -v 0
system sh/cfg.sh -n dnode4 -c commitlog -v 0
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
print ========= start dnode1
system sh/exec_up.sh -n dnode1 -s start
sleep 5000
sleep 3000
sql connect
sql create dnode $hostname2
@ -38,10 +38,10 @@ system sh/exec_up.sh -n dnode3 -s start
sql create database ir3db replica 3 days 7
sql use ir3db
sql create table tb(ts timestamp, i int)
sql create table tb(ts timestamp, i bigint)
print ================= step1
sql import into tb values(1520000010000, 10000)
sql import into tb values(1520000010000, 1520000010000)
sql select * from tb;
print $rows
if $rows != 1 then
@ -49,7 +49,7 @@ if $rows != 1 then
endi
print ================= step2
sql insert into tb values(1520000008000, 8000)
sql insert into tb values(1520000008000, 1520000008000)
print $rows
sql select * from tb;
if $rows != 2 then
@ -57,7 +57,7 @@ if $rows != 2 then
endi
print ================= step3
sql insert into tb values(1520000020000, 20000)
sql insert into tb values(1520000020000, 1520000020000)
sql select * from tb;
print $rows
if $rows != 3 then
@ -65,9 +65,9 @@ if $rows != 3 then
endi
print ================= step4
sql import into tb values(1520000009000, 9000)
sql import into tb values(1520000015000, 15000)
sql import into tb values(1520000030000, 30000)
sql import into tb values(1520000009000, 1520000009000)
sql import into tb values(1520000015000, 1520000015000)
sql import into tb values(1520000030000, 1520000030000)
sql select * from tb;
print $rows
if $rows != 6 then
@ -75,10 +75,10 @@ if $rows != 6 then
endi
print ================= step5
sql insert into tb values(1520000008000, 8000)
sql insert into tb values(1520000014000, 14000)
sql insert into tb values(1520000025000, 25000)
sql insert into tb values(1520000040000, 40000)
sql insert into tb values(1520000008000, 1520000008000)
sql insert into tb values(1520000014000, 1520000014000)
sql insert into tb values(1520000025000, 1520000025000)
sql insert into tb values(1520000040000, 1520000040000)
sql select * from tb;
print $rows
if $rows != 9 then
@ -86,47 +86,48 @@ if $rows != 9 then
endi
print ================= step6
sql import into tb values(1520000007000, 7000)
sql import into tb values(1520000012000, 12000)
sql import into tb values(1520000023000, 23000)
sql import into tb values(1520000034000, 34000)
sql import into tb values(1520000050000, 50000)
sql import into tb values(1520000007000, 1520000007000)
sql import into tb values(1520000012000, 1520000012000)
sql import into tb values(1520000023000, 1520000023000)
sql import into tb values(1520000034000, 1520000034000)
sql import into tb values(1520000050000, 1520000050000)
sql select * from tb;
print $rows
if $rows != 14 then
return -1
endi
#print ================== dnode restart
#system sh/exec_up.sh -n dnode1 -s stop -x SIGINT
#sleep 5000
#system sh/exec_up.sh -n dnode1 -s start
#sleep 5000
#sql select * from tb;
#if $rows != 14 then
# return -1
#endi
print ================== dnode restart
system sh/exec_up.sh -n dnode1 -s stop -x SIGINT
sleep 5000
system sh/exec_up.sh -n dnode1 -s start
sleep 5000
sql select * from tb;
if $rows != 14 then
return -1
endi
print ================= step7
sql import into tb values(1520000007001, 7001)
sql import into tb values(1520000012001, 12001)
sql import into tb values(1520000023001, 23001)
sql import into tb values(1520000034001, 34001)
sql import into tb values(1520000050001, 50001)
sql import into tb values(1520000007001, 1520000007001)
sql import into tb values(1520000012001, 1520000012001)
sql import into tb values(1520000023001, 1520000023001)
sql import into tb values(1520000034001, 1520000034001)
sql import into tb values(1520000050001, 1520000050001)
sql select * from tb;
print $rows
if $rows != 19 then
if $rows != 19 then
print expect 19, actual: $rows
return -1
endi
print ================= step8
sql insert into tb values(1520000008002, 8002)
sql insert into tb values(1520000014002, 14002)
sql insert into tb values(1520000025002, 25002)
sql insert into tb values(1520000060000, 60000)
sql insert into tb values(1520000008002, 1520000008002)
sql insert into tb values(1520000014002, 1520000014002)
sql insert into tb values(1520000025002, 1520000025002)
sql insert into tb values(1520000060000, 1520000060000)
sql select * from tb;
print $rows
if $rows != 24 then
if $rows != 23 then
return -1
endi
@ -145,21 +146,21 @@ print ================= step9
#sql import into tb values(now+14d, 50001)
#sql import into tb values(now+16d, 500051)
sql import into tb values(1517408000000, 7003)
sql import into tb values(1518272000000, 34003)
sql import into tb values(1519136000000, 34003)
sql import into tb values(1519568000000, 34003)
sql import into tb values(1519654400000, 50001)
sql import into tb values(1519827200000, 50001)
sql import into tb values(1520345600000, 50001)
sql import into tb values(1520691200000, 50002)
sql import into tb values(1520864000000, 50003)
sql import into tb values(1521900800000, 50004)
sql import into tb values(1523110400000, 50001)
sql import into tb values(1521382400000, 500051)
sql import into tb values(1517408000000, 1517408000000)
sql import into tb values(1518272000000, 1518272000000)
sql import into tb values(1519136000000, 1519136000000)
sql import into tb values(1519568000000, 1519568000000)
sql import into tb values(1519654400000, 1519654400000)
sql import into tb values(1519827200000, 1519827200000)
sql import into tb values(1520345600000, 1520345600000)
sql import into tb values(1520691200000, 1520691200000)
sql import into tb values(1520864000000, 1520864000000)
sql import into tb values(1521900800000, 1521900800000)
sql import into tb values(1523110400000, 1523110400000)
sql import into tb values(1521382400000, 1521382400000)
sql select * from tb;
print $rows
if $rows != 36 then
if $rows != 35 then
return -1
endi
@ -170,55 +171,57 @@ system sh/exec_up.sh -n dnode1 -s start
sleep 5000
sql select * from tb;
print $rows
if $rows != 36 then
if $rows != 35 then
return -1
endi
print ================= step11
#sql import into tb values(now-50d, 7003) (now-48d, 7003) (now-46d, 7003) (now-44d, 7003) (now-42d, 7003)
sql import into tb values(1515680000000, 7003) (1515852800000, 7003) (1516025600000, 7003) (1516198400000, 7003) (1516371200000, 7003)
sql import into tb values(1515680000000, 1) (1515852800000, 2) (1516025600000, 3) (1516198400000, 4) (1516371200000, 5)
sql select * from tb;
if $rows != 41 then
if $rows != 40 then
return -1
endi
print ================= step12
#1520000000000
#sql import into tb values(now-19d, 7003) (now-18d, 7003) (now-17d, 7003) (now-16d, 7003) (now-15d, 7003) (now-14d, 7003) (now-13d, 7003) (now-12d, 7003) (now-11d, 7003)
sql import into tb values(1518358400000, 7003) (1518444800000, 7003) (1518531200000, 7003) (1518617600000, 7003) (1518704000000, 7003) (1518790400000, 7003) (1518876800000, 7003) (1518963200000, 7003) (1519049600000, 7003)
#sql import into tb values(now-19d, -19) (now-18d, -18) (now-17d, -17) (now-16d, -16) (now-15d, -15) (now-14d, -14) (now-13d, -13) (now-12d, -12) (now-11d, -11)
sql import into tb values(1518358400000, 6) (1518444800000, 7) (1518531200000, 8) (1518617600000, 9) (1518704000000, 10) (1518790400000, 11) (1518876800000, 12) (1518963200000, 13) (1519049600000, 14)
sql select * from tb;
print $rows
if $rows != 50 then
if $rows != 49 then
return -1
endi
print ================= step13
system sh/exec_up.sh -n dnode2 -s stop
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
sleep 5000
system sh/exec_up.sh -n dnode2 -s start
sleep 5000
print ================= step14
#1520000000000
#sql import into tb values(now-48d, 34003)
#sql import into tb values(now-38d, 50001)
#sql import into tb values(now-28d, 50001)
#sql import into tb values(now-48d, -48)
#sql import into tb values(now-38d, -38)
#sql import into tb values(now-28d, -28)
sql import into tb values(1515852800001, 34003)
sql import into tb values(1516716800000, 50001)
sql import into tb values(1517580800000, 50001)
sql import into tb values(1515852800001, -48)
sql import into tb values(1516716800000, -38)
sql import into tb values(1517580800000, -28)
sql select * from tb;
if $rows != 50 then
if $rows != 52 then
return -1
endi
print ================= step15
system sh/exec_up.sh -n dnode2 -s start
sleep 8000
system sh/exec_up.sh -n dnode3 -s stop
sleep 3000
system sh/exec_up.sh -n dnode3 -s stop -x SIGINT
sleep 5000
system sh/exec_up.sh -n dnode3 -s start
sleep 5000
if $rows != 50 then
if $rows != 52 then
return -1
endi

View File

@ -7,8 +7,8 @@ system sh/cfg.sh -n dnode1 -c statusInterval -v 1
system sh/cfg.sh -n dnode2 -c statusInterval -v 1
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode2 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c mgmtEqualVnodeNum -v 0
system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 0
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
@ -60,20 +60,20 @@ print =============== step2
$x = 0
show1:
$x = $x + 1
sleep 1000
if $x == 20 then
sleep 2000
if $x == 10 then
return -1
endi
sql show dnodes -x show1
$dnode1Vnodes = $data3_192.168.0.1
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode2Vnodes = $data3_192.168.0.2
$dnode2Vnodes = $data2_2
print dnode2 $dnode2Vnodes
if $dnode1Vnodes != 0 then
if $dnode1Vnodes != 4 then
goto show1
endi
if $dnode2Vnodes != NULL then
if $dnode2Vnodes != null then
goto show1
endi
print =============== step3 start dnode2
@ -89,9 +89,9 @@ show2:
return -1
endi
sql show dnodes -x show2
$dnode1Vnodes = $data3_192.168.0.1
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode2Vnodes = $data3_192.168.0.2
$dnode2Vnodes = $data2_2
print dnode2 $dnode2Vnodes
if $dnode1Vnodes != 2 then

View File

@ -1,8 +1,8 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode2 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4

View File

@ -84,13 +84,13 @@ print dnode2 $dnode2Vnodes
$dnode3Vnodes = $data2_3
print dnode3 $dnode3Vnodes
if $dnode1Vnodes != 3 then
if $dnode1Vnodes != 1 then
return -1
endi
if $dnode2Vnodes != 3 then
if $dnode2Vnodes != 1 then
return -1
endi
if $dnode3Vnodes != 3 then
if $dnode3Vnodes != 1 then
return -1
endi

View File

@ -3,10 +3,10 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode2 -c walLevel -v 0
system sh/cfg.sh -n dnode3 -c walLevel -v 0
system sh/cfg.sh -n dnode4 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4

View File

@ -1,8 +1,8 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode2 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4

View File

@ -6,12 +6,12 @@ system sh/deploy.sh -n dnode4 -i 4
system sh/deploy.sh -n dnode5 -i 5
system sh/deploy.sh -n dnode6 -i 6
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode2 -c walLevel -v 0
system sh/cfg.sh -n dnode3 -c walLevel -v 0
system sh/cfg.sh -n dnode4 -c walLevel -v 0
system sh/cfg.sh -n dnode5 -c walLevel -v 0
system sh/cfg.sh -n dnode6 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode5 -c walLevel -v 2
system sh/cfg.sh -n dnode6 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4

View File

@ -4,10 +4,10 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode2 -c walLevel -v 0
system sh/cfg.sh -n dnode3 -c walLevel -v 0
system sh/cfg.sh -n dnode4 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4

View File

@ -40,7 +40,6 @@ fi
totalPySuccess=`grep 'successfully executed' pytest-out.txt | wc -l`
if [ "$totalPySuccess" -gt "0" ]; then
grep 'successfully executed' pytest-out.txt
echo -e "${GREEN} ### Total $totalPySuccess python case(s) succeed! ### ${NC}"
fi