Merge remote-tracking branch 'origin/develop' into feature/crash_gen
This commit is contained in:
commit
58153639c5
|
@ -5,29 +5,16 @@
|
||||||
# #
|
# #
|
||||||
########################################################
|
########################################################
|
||||||
|
|
||||||
# master IP for TDengine system
|
# first full-qualified domain name (FQDN) for TDengine system
|
||||||
# masterIp 127.0.0.1
|
# first 127.0.0.1
|
||||||
|
|
||||||
# second IP for TDengine system, for cluster version only
|
# second full-qualified domain name (FQDN) for TDengine system, for cluster edition only
|
||||||
# secondIp 127.0.0.1
|
# second 127.0.0.1
|
||||||
|
|
||||||
# IP address of the server
|
|
||||||
# privateIp 127.0.0.1
|
|
||||||
|
|
||||||
# public IP of server, on which the tdengine are deployed
|
|
||||||
# this IP is assigned by cloud service provider, for cluster version only
|
|
||||||
# publicIp 127.0.0.1
|
|
||||||
|
|
||||||
# network is bound to 0.0.0.0
|
|
||||||
# anyIp 1
|
|
||||||
|
|
||||||
# set socket type ("udp" and "tcp")
|
# set socket type ("udp" and "tcp")
|
||||||
# the server and client should have the same socket type. Otherwise, connect will fail
|
# the server and client should have the same socket type. Otherwise, connect will fail
|
||||||
# sockettype udp
|
# sockettype udp
|
||||||
|
|
||||||
# client local IP
|
|
||||||
# localIp 127.0.0.1
|
|
||||||
|
|
||||||
# for the cluster version, data file's directory is configured this way
|
# for the cluster version, data file's directory is configured this way
|
||||||
# option mount_path tier_level
|
# option mount_path tier_level
|
||||||
# dataDir /mnt/disk1/taos 0
|
# dataDir /mnt/disk1/taos 0
|
||||||
|
@ -46,23 +33,8 @@
|
||||||
# http service port, default tcp[6020]
|
# http service port, default tcp[6020]
|
||||||
# httpPort 6020
|
# httpPort 6020
|
||||||
|
|
||||||
# port for MNode connect to Client, default udp[6030-6034] tcp[6030]
|
# port for MNode connect to Client, default udp[6030-6055] tcp[6030]
|
||||||
# mgmtShellPort 6030
|
# serverPort 6030
|
||||||
|
|
||||||
# port for DNode connect to Client, default udp[6035-6039] tcp[6035]
|
|
||||||
# vnodeShellPort 6035
|
|
||||||
|
|
||||||
# port for MNode connect to VNode, default udp[6040-6044] tcp[6040], for cluster version only
|
|
||||||
# mgmtVnodePort 6040
|
|
||||||
|
|
||||||
# port for DNode connect to DNode, default tcp[6045], for cluster version only
|
|
||||||
# vnodeVnodePort 6045
|
|
||||||
|
|
||||||
# port for MNode connect to MNode, default udp[6050], for cluster version only
|
|
||||||
# mgmtMgmtPort 6050
|
|
||||||
|
|
||||||
# port sync file MNode and MNode, default tcp[6050], for cluster version only
|
|
||||||
# mgmtSyncPort 6050
|
|
||||||
|
|
||||||
# number of threads per CPU core
|
# number of threads per CPU core
|
||||||
# numOfThreadsPerCore 1
|
# numOfThreadsPerCore 1
|
||||||
|
@ -73,20 +45,18 @@
|
||||||
# number of total vnodes in DNode
|
# number of total vnodes in DNode
|
||||||
# numOfTotalVnodes 0
|
# numOfTotalVnodes 0
|
||||||
|
|
||||||
# max number of sessions per vnode
|
# max number of tables per vnode
|
||||||
# tables 1024
|
# maxtablesPerVnode 1000
|
||||||
|
|
||||||
# cache block size
|
# cache block size
|
||||||
# cache 16384
|
# cache 16384
|
||||||
|
|
||||||
# row in file block
|
# row of records in file block
|
||||||
# rows 4096
|
# minRows 100
|
||||||
|
# maxRows 4096
|
||||||
|
|
||||||
# average cache blocks per meter
|
# number of cache blocks per vnode
|
||||||
# ablocks 4
|
# blocks 2
|
||||||
|
|
||||||
# max number of cache blocks per Meter
|
|
||||||
# tblocks 512
|
|
||||||
|
|
||||||
# interval of system monitor
|
# interval of system monitor
|
||||||
# monitorInterval 60
|
# monitorInterval 60
|
||||||
|
@ -118,11 +88,8 @@
|
||||||
# interval of MNode send HB to MNode, unit is Second, for cluster version only
|
# interval of MNode send HB to MNode, unit is Second, for cluster version only
|
||||||
# mgmtPeerHBTimer 1
|
# mgmtPeerHBTimer 1
|
||||||
|
|
||||||
# time to keep MeterMeta in Cache, seconds
|
# duration of to keep tableMeta kept in Cache, seconds
|
||||||
# meterMetaKeepTimer 7200
|
# tableMetaKeepTimer 7200
|
||||||
|
|
||||||
# time to keep MetricMeta in Cache, seconds
|
|
||||||
# metricMetaKeepTimer 600
|
|
||||||
|
|
||||||
# max number of users
|
# max number of users
|
||||||
# maxUsers 1000
|
# maxUsers 1000
|
||||||
|
@ -148,8 +115,8 @@
|
||||||
# system time zone
|
# system time zone
|
||||||
# timezone Asia/Shanghai (CST, +0800)
|
# timezone Asia/Shanghai (CST, +0800)
|
||||||
|
|
||||||
# enable/disable commit log
|
# set write ahead log (WAL) level
|
||||||
# clog 1
|
# walLevel 1
|
||||||
|
|
||||||
# enable/disable async log
|
# enable/disable async log
|
||||||
# asyncLog 1
|
# asyncLog 1
|
||||||
|
@ -268,6 +235,9 @@
|
||||||
# debug flag for TDengine client
|
# debug flag for TDengine client
|
||||||
# cDebugFlag 131
|
# cDebugFlag 131
|
||||||
|
|
||||||
|
# debug flag for query
|
||||||
|
# qDebugflag 131
|
||||||
|
|
||||||
# debug flag for http server
|
# debug flag for http server
|
||||||
# httpDebugFlag 131
|
# httpDebugFlag 131
|
||||||
|
|
||||||
|
|
|
@ -203,7 +203,6 @@ void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo);
|
||||||
|
|
||||||
void tscSetFreeHeatBeat(STscObj* pObj);
|
void tscSetFreeHeatBeat(STscObj* pObj);
|
||||||
bool tscShouldFreeHeatBeat(SSqlObj* pHb);
|
bool tscShouldFreeHeatBeat(SSqlObj* pHb);
|
||||||
void tscCleanSqlCmd(SSqlCmd* pCmd);
|
|
||||||
bool tscShouldBeFreed(SSqlObj* pSql);
|
bool tscShouldBeFreed(SSqlObj* pSql);
|
||||||
|
|
||||||
STableMetaInfo* tscGetTableMetaInfoFromCmd(SSqlCmd *pCmd, int32_t subClauseIndex, int32_t tableIndex);
|
STableMetaInfo* tscGetTableMetaInfoFromCmd(SSqlCmd *pCmd, int32_t subClauseIndex, int32_t tableIndex);
|
||||||
|
@ -220,7 +219,6 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
|
||||||
STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo);
|
STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo);
|
||||||
int32_t tscAddSubqueryInfo(SSqlCmd *pCmd);
|
int32_t tscAddSubqueryInfo(SSqlCmd *pCmd);
|
||||||
|
|
||||||
void tscFreeQueryInfo(SSqlCmd* pCmd);
|
|
||||||
void tscInitQueryInfo(SQueryInfo* pQueryInfo);
|
void tscInitQueryInfo(SQueryInfo* pQueryInfo);
|
||||||
|
|
||||||
void tscClearSubqueryInfo(SSqlCmd* pCmd);
|
void tscClearSubqueryInfo(SSqlCmd* pCmd);
|
||||||
|
|
|
@ -88,7 +88,7 @@ typedef struct SSqlExpr {
|
||||||
int16_t functionId; // function id in aAgg array
|
int16_t functionId; // function id in aAgg array
|
||||||
int16_t resType; // return value type
|
int16_t resType; // return value type
|
||||||
int16_t resBytes; // length of return value
|
int16_t resBytes; // length of return value
|
||||||
int16_t interResBytes; // inter result buffer size
|
int16_t interBytes; // inter result buffer size
|
||||||
int16_t numOfParams; // argument value of each function
|
int16_t numOfParams; // argument value of each function
|
||||||
tVariant param[3]; // parameters are not more than 3
|
tVariant param[3]; // parameters are not more than 3
|
||||||
int32_t offset; // sub result column value of arithmetic expression.
|
int32_t offset; // sub result column value of arithmetic expression.
|
||||||
|
@ -195,7 +195,7 @@ typedef struct SDataBlockList { // todo remove
|
||||||
|
|
||||||
typedef struct SQueryInfo {
|
typedef struct SQueryInfo {
|
||||||
int16_t command; // the command may be different for each subclause, so keep it seperately.
|
int16_t command; // the command may be different for each subclause, so keep it seperately.
|
||||||
uint16_t type; // query/insert/import type
|
uint32_t type; // query/insert/import type
|
||||||
char slidingTimeUnit;
|
char slidingTimeUnit;
|
||||||
|
|
||||||
STimeWindow window;
|
STimeWindow window;
|
||||||
|
@ -283,6 +283,8 @@ typedef struct {
|
||||||
int32_t* length; // length for each field for current row
|
int32_t* length; // length for each field for current row
|
||||||
char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t)
|
char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t)
|
||||||
SColumnIndex * pColumnIndex;
|
SColumnIndex * pColumnIndex;
|
||||||
|
SArithmeticSupport* pArithSup; // support the arithmetic expression calculation on agg functions
|
||||||
|
|
||||||
struct SLocalReducer *pLocalReducer;
|
struct SLocalReducer *pLocalReducer;
|
||||||
} SSqlRes;
|
} SSqlRes;
|
||||||
|
|
||||||
|
|
|
@ -153,7 +153,7 @@ typedef struct SRateInfo {
|
||||||
|
|
||||||
|
|
||||||
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type,
|
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type,
|
||||||
int16_t *bytes, int16_t *interResBytes, int16_t extLength, bool isSuperTable) {
|
int16_t *bytes, int16_t *interBytes, int16_t extLength, bool isSuperTable) {
|
||||||
if (!isValidDataType(dataType, dataBytes)) {
|
if (!isValidDataType(dataType, dataBytes)) {
|
||||||
tscError("Illegal data type %d or data type length %d", dataType, dataBytes);
|
tscError("Illegal data type %d or data type length %d", dataType, dataBytes);
|
||||||
return TSDB_CODE_INVALID_SQL;
|
return TSDB_CODE_INVALID_SQL;
|
||||||
|
@ -164,35 +164,35 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
|
||||||
functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP) {
|
functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP) {
|
||||||
*type = (int16_t)dataType;
|
*type = (int16_t)dataType;
|
||||||
*bytes = (int16_t)dataBytes;
|
*bytes = (int16_t)dataBytes;
|
||||||
*interResBytes = *bytes + sizeof(SResultInfo);
|
*interBytes = *bytes + sizeof(SResultInfo);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (functionId == TSDB_FUNC_TID_TAG) { // todo use struct
|
if (functionId == TSDB_FUNC_TID_TAG) { // todo use struct
|
||||||
*type = TSDB_DATA_TYPE_BINARY;
|
*type = TSDB_DATA_TYPE_BINARY;
|
||||||
*bytes = dataBytes + sizeof(int64_t) + sizeof(int32_t) + sizeof(int32_t); // (uid, tid) + VGID + TAGSIZE
|
*bytes = dataBytes + sizeof(int64_t) + sizeof(int32_t) + sizeof(int32_t); // (uid, tid) + VGID + TAGSIZE
|
||||||
*interResBytes = *bytes;
|
*interBytes = *bytes;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (functionId == TSDB_FUNC_COUNT) {
|
if (functionId == TSDB_FUNC_COUNT) {
|
||||||
*type = TSDB_DATA_TYPE_BIGINT;
|
*type = TSDB_DATA_TYPE_BIGINT;
|
||||||
*bytes = sizeof(int64_t);
|
*bytes = sizeof(int64_t);
|
||||||
*interResBytes = *bytes;
|
*interBytes = *bytes;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (functionId == TSDB_FUNC_ARITHM) {
|
if (functionId == TSDB_FUNC_ARITHM) {
|
||||||
*type = TSDB_DATA_TYPE_DOUBLE;
|
*type = TSDB_DATA_TYPE_DOUBLE;
|
||||||
*bytes = sizeof(double);
|
*bytes = sizeof(double);
|
||||||
*interResBytes = *bytes;
|
*interBytes = *bytes;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (functionId == TSDB_FUNC_TS_COMP) {
|
if (functionId == TSDB_FUNC_TS_COMP) {
|
||||||
*type = TSDB_DATA_TYPE_BINARY;
|
*type = TSDB_DATA_TYPE_BINARY;
|
||||||
*bytes = sizeof(int32_t); // this results is compressed ts data
|
*bytes = sizeof(int32_t); // this results is compressed ts data
|
||||||
*interResBytes = POINTER_BYTES;
|
*interBytes = POINTER_BYTES;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -200,54 +200,54 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
|
||||||
if (functionId == TSDB_FUNC_MIN || functionId == TSDB_FUNC_MAX) {
|
if (functionId == TSDB_FUNC_MIN || functionId == TSDB_FUNC_MAX) {
|
||||||
*type = TSDB_DATA_TYPE_BINARY;
|
*type = TSDB_DATA_TYPE_BINARY;
|
||||||
*bytes = dataBytes + DATA_SET_FLAG_SIZE;
|
*bytes = dataBytes + DATA_SET_FLAG_SIZE;
|
||||||
*interResBytes = *bytes;
|
*interBytes = *bytes;
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else if (functionId == TSDB_FUNC_SUM) {
|
} else if (functionId == TSDB_FUNC_SUM) {
|
||||||
*type = TSDB_DATA_TYPE_BINARY;
|
*type = TSDB_DATA_TYPE_BINARY;
|
||||||
*bytes = sizeof(SSumInfo);
|
*bytes = sizeof(SSumInfo);
|
||||||
*interResBytes = *bytes;
|
*interBytes = *bytes;
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else if (functionId == TSDB_FUNC_AVG) {
|
} else if (functionId == TSDB_FUNC_AVG) {
|
||||||
*type = TSDB_DATA_TYPE_BINARY;
|
*type = TSDB_DATA_TYPE_BINARY;
|
||||||
*bytes = sizeof(SAvgInfo);
|
*bytes = sizeof(SAvgInfo);
|
||||||
*interResBytes = *bytes;
|
*interBytes = *bytes;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
} else if (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE) {
|
} else if (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE) {
|
||||||
*type = TSDB_DATA_TYPE_DOUBLE;
|
*type = TSDB_DATA_TYPE_DOUBLE;
|
||||||
*bytes = sizeof(SRateInfo);
|
*bytes = sizeof(SRateInfo);
|
||||||
*interResBytes = sizeof(SRateInfo);
|
*interBytes = sizeof(SRateInfo);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
|
} else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
|
||||||
*type = TSDB_DATA_TYPE_BINARY;
|
*type = TSDB_DATA_TYPE_BINARY;
|
||||||
*bytes = sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param;
|
*bytes = sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param;
|
||||||
*interResBytes = *bytes;
|
*interBytes = *bytes;
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else if (functionId == TSDB_FUNC_SPREAD) {
|
} else if (functionId == TSDB_FUNC_SPREAD) {
|
||||||
*type = TSDB_DATA_TYPE_BINARY;
|
*type = TSDB_DATA_TYPE_BINARY;
|
||||||
*bytes = sizeof(SSpreadInfo);
|
*bytes = sizeof(SSpreadInfo);
|
||||||
*interResBytes = *bytes;
|
*interBytes = *bytes;
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else if (functionId == TSDB_FUNC_APERCT) {
|
} else if (functionId == TSDB_FUNC_APERCT) {
|
||||||
*type = TSDB_DATA_TYPE_BINARY;
|
*type = TSDB_DATA_TYPE_BINARY;
|
||||||
*bytes = sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1) + sizeof(SHistogramInfo) + sizeof(SAPercentileInfo);
|
*bytes = sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1) + sizeof(SHistogramInfo) + sizeof(SAPercentileInfo);
|
||||||
*interResBytes = *bytes;
|
*interBytes = *bytes;
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else if (functionId == TSDB_FUNC_LAST_ROW) {
|
} else if (functionId == TSDB_FUNC_LAST_ROW) {
|
||||||
*type = TSDB_DATA_TYPE_BINARY;
|
*type = TSDB_DATA_TYPE_BINARY;
|
||||||
*bytes = sizeof(SLastrowInfo) + dataBytes;
|
*bytes = sizeof(SLastrowInfo) + dataBytes;
|
||||||
*interResBytes = *bytes;
|
*interBytes = *bytes;
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else if (functionId == TSDB_FUNC_TWA) {
|
} else if (functionId == TSDB_FUNC_TWA) {
|
||||||
*type = TSDB_DATA_TYPE_DOUBLE;
|
*type = TSDB_DATA_TYPE_DOUBLE;
|
||||||
*bytes = sizeof(STwaInfo);
|
*bytes = sizeof(STwaInfo);
|
||||||
*interResBytes = *bytes;
|
*interBytes = *bytes;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -260,57 +260,57 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
|
||||||
}
|
}
|
||||||
|
|
||||||
*bytes = sizeof(int64_t);
|
*bytes = sizeof(int64_t);
|
||||||
*interResBytes = sizeof(SSumInfo);
|
*interBytes = sizeof(SSumInfo);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else if (functionId == TSDB_FUNC_APERCT) {
|
} else if (functionId == TSDB_FUNC_APERCT) {
|
||||||
*type = TSDB_DATA_TYPE_DOUBLE;
|
*type = TSDB_DATA_TYPE_DOUBLE;
|
||||||
*bytes = sizeof(double);
|
*bytes = sizeof(double);
|
||||||
*interResBytes =
|
*interBytes =
|
||||||
sizeof(SAPercentileInfo) + sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1);
|
sizeof(SAPercentileInfo) + sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else if (functionId == TSDB_FUNC_TWA) {
|
} else if (functionId == TSDB_FUNC_TWA) {
|
||||||
*type = TSDB_DATA_TYPE_DOUBLE;
|
*type = TSDB_DATA_TYPE_DOUBLE;
|
||||||
*bytes = sizeof(double);
|
*bytes = sizeof(double);
|
||||||
*interResBytes = sizeof(STwaInfo);
|
*interBytes = sizeof(STwaInfo);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (functionId == TSDB_FUNC_AVG) {
|
if (functionId == TSDB_FUNC_AVG) {
|
||||||
*type = TSDB_DATA_TYPE_DOUBLE;
|
*type = TSDB_DATA_TYPE_DOUBLE;
|
||||||
*bytes = sizeof(double);
|
*bytes = sizeof(double);
|
||||||
*interResBytes = sizeof(SAvgInfo);
|
*interBytes = sizeof(SAvgInfo);
|
||||||
} else if (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE) {
|
} else if (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE) {
|
||||||
*type = TSDB_DATA_TYPE_DOUBLE;
|
*type = TSDB_DATA_TYPE_DOUBLE;
|
||||||
*bytes = sizeof(double);
|
*bytes = sizeof(double);
|
||||||
*interResBytes = sizeof(SRateInfo);
|
*interBytes = sizeof(SRateInfo);
|
||||||
} else if (functionId == TSDB_FUNC_STDDEV) {
|
} else if (functionId == TSDB_FUNC_STDDEV) {
|
||||||
*type = TSDB_DATA_TYPE_DOUBLE;
|
*type = TSDB_DATA_TYPE_DOUBLE;
|
||||||
*bytes = sizeof(double);
|
*bytes = sizeof(double);
|
||||||
*interResBytes = sizeof(SStddevInfo);
|
*interBytes = sizeof(SStddevInfo);
|
||||||
} else if (functionId == TSDB_FUNC_MIN || functionId == TSDB_FUNC_MAX) {
|
} else if (functionId == TSDB_FUNC_MIN || functionId == TSDB_FUNC_MAX) {
|
||||||
*type = (int16_t)dataType;
|
*type = (int16_t)dataType;
|
||||||
*bytes = (int16_t)dataBytes;
|
*bytes = (int16_t)dataBytes;
|
||||||
*interResBytes = dataBytes + DATA_SET_FLAG_SIZE;
|
*interBytes = dataBytes + DATA_SET_FLAG_SIZE;
|
||||||
} else if (functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_LAST) {
|
} else if (functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_LAST) {
|
||||||
*type = (int16_t)dataType;
|
*type = (int16_t)dataType;
|
||||||
*bytes = (int16_t)dataBytes;
|
*bytes = (int16_t)dataBytes;
|
||||||
*interResBytes = dataBytes + sizeof(SResultInfo);
|
*interBytes = dataBytes + sizeof(SResultInfo);
|
||||||
} else if (functionId == TSDB_FUNC_SPREAD) {
|
} else if (functionId == TSDB_FUNC_SPREAD) {
|
||||||
*type = (int16_t)TSDB_DATA_TYPE_DOUBLE;
|
*type = (int16_t)TSDB_DATA_TYPE_DOUBLE;
|
||||||
*bytes = sizeof(double);
|
*bytes = sizeof(double);
|
||||||
*interResBytes = sizeof(SSpreadInfo);
|
*interBytes = sizeof(SSpreadInfo);
|
||||||
} else if (functionId == TSDB_FUNC_PERCT) {
|
} else if (functionId == TSDB_FUNC_PERCT) {
|
||||||
*type = (int16_t)TSDB_DATA_TYPE_DOUBLE;
|
*type = (int16_t)TSDB_DATA_TYPE_DOUBLE;
|
||||||
*bytes = (int16_t)sizeof(double);
|
*bytes = (int16_t)sizeof(double);
|
||||||
*interResBytes = (int16_t)sizeof(double);
|
*interBytes = (int16_t)sizeof(double);
|
||||||
} else if (functionId == TSDB_FUNC_LEASTSQR) {
|
} else if (functionId == TSDB_FUNC_LEASTSQR) {
|
||||||
*type = TSDB_DATA_TYPE_BINARY;
|
*type = TSDB_DATA_TYPE_BINARY;
|
||||||
*bytes = TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE; // string
|
*bytes = TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE; // string
|
||||||
*interResBytes = *bytes + sizeof(SResultInfo);
|
*interBytes = *bytes + sizeof(SResultInfo);
|
||||||
} else if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) {
|
} else if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) {
|
||||||
*type = TSDB_DATA_TYPE_BINARY;
|
*type = TSDB_DATA_TYPE_BINARY;
|
||||||
*bytes = dataBytes + sizeof(SFirstLastInfo);
|
*bytes = dataBytes + sizeof(SFirstLastInfo);
|
||||||
*interResBytes = *bytes;
|
*interBytes = *bytes;
|
||||||
} else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
|
} else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
|
||||||
*type = (int16_t)dataType;
|
*type = (int16_t)dataType;
|
||||||
*bytes = (int16_t)dataBytes;
|
*bytes = (int16_t)dataBytes;
|
||||||
|
@ -318,11 +318,11 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
|
||||||
size_t size = sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param;
|
size_t size = sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param;
|
||||||
|
|
||||||
// the output column may be larger than sizeof(STopBotInfo)
|
// the output column may be larger than sizeof(STopBotInfo)
|
||||||
*interResBytes = size;
|
*interBytes = size;
|
||||||
} else if (functionId == TSDB_FUNC_LAST_ROW) {
|
} else if (functionId == TSDB_FUNC_LAST_ROW) {
|
||||||
*type = (int16_t)dataType;
|
*type = (int16_t)dataType;
|
||||||
*bytes = (int16_t)dataBytes;
|
*bytes = (int16_t)dataBytes;
|
||||||
*interResBytes = dataBytes + sizeof(SLastrowInfo);
|
*interBytes = dataBytes + sizeof(SLastrowInfo);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_INVALID_SQL;
|
return TSDB_CODE_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -327,15 +327,16 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload,
|
||||||
*(uint32_t*) payload = TSDB_DATA_NCHAR_NULL;
|
*(uint32_t*) payload = TSDB_DATA_NCHAR_NULL;
|
||||||
} else {
|
} else {
|
||||||
// if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
|
// if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
|
||||||
int32_t resLen = -1;
|
size_t wcharLength = 0;
|
||||||
if (!taosMbsToUcs4(pToken->z, pToken->n, payload + VARSTR_HEADER_SIZE, pSchema->bytes - VARSTR_HEADER_SIZE, &resLen)) {
|
if (!taosMbsToUcs4(pToken->z, pToken->n, payload + VARSTR_HEADER_SIZE, pSchema->bytes - VARSTR_HEADER_SIZE,
|
||||||
char buf[512] = {0};
|
&wcharLength)) {
|
||||||
snprintf(buf, 512, "%s", strerror(errno));
|
|
||||||
|
|
||||||
|
char buf[512] = {0};
|
||||||
|
snprintf(buf, tListLen(buf), "%s", strerror(errno));
|
||||||
return tscInvalidSQLErrMsg(msg, buf, pToken->z);
|
return tscInvalidSQLErrMsg(msg, buf, pToken->z);
|
||||||
}
|
}
|
||||||
|
|
||||||
*(uint16_t*)payload = (uint16_t) (resLen * TSDB_NCHAR_SIZE);
|
*(uint16_t*) payload = (uint16_t) (wcharLength);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -960,6 +961,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
||||||
*sqlstr = sql;
|
*sqlstr = sql;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (*sqlstr == NULL) {
|
||||||
|
code = TSDB_CODE_INVALID_SQL;
|
||||||
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1117,7 +1117,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
|
||||||
|
|
||||||
// if the name of column is quoted, remove it and set the right information for later process
|
// if the name of column is quoted, remove it and set the right information for later process
|
||||||
extractColumnNameFromString(pItem);
|
extractColumnNameFromString(pItem);
|
||||||
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
|
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY);
|
||||||
|
|
||||||
// select table_name1.field_name1, table_name2.field_name2 from table_name1, table_name2
|
// select table_name1.field_name1, table_name2.field_name2 from table_name1, table_name2
|
||||||
if (addProjectionExprAndResultField(pQueryInfo, pItem) != TSDB_CODE_SUCCESS) {
|
if (addProjectionExprAndResultField(pQueryInfo, pItem) != TSDB_CODE_SUCCESS) {
|
||||||
|
@ -1198,48 +1198,26 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
|
||||||
columnList.num = 0;
|
columnList.num = 0;
|
||||||
columnList.ids[0] = (SColumnIndex) {0, 0};
|
columnList.ids[0] = (SColumnIndex) {0, 0};
|
||||||
|
|
||||||
insertResultField(pQueryInfo, i, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, "abc", NULL);
|
insertResultField(pQueryInfo, i, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, "dummy_column", NULL);
|
||||||
|
|
||||||
int32_t slot = tscNumOfFields(pQueryInfo) - 1;
|
int32_t slot = tscNumOfFields(pQueryInfo) - 1;
|
||||||
SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, slot);
|
SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, slot);
|
||||||
|
|
||||||
if (pInfo->pSqlExpr == NULL) {
|
if (pInfo->pSqlExpr == NULL) {
|
||||||
SExprInfo* pFuncExpr = calloc(1, sizeof(SExprInfo));
|
SExprInfo* pArithExprInfo = calloc(1, sizeof(SExprInfo));
|
||||||
pInfo->pArithExprInfo = pFuncExpr;
|
|
||||||
|
|
||||||
// arithmetic expression always return result in the format of double float
|
// arithmetic expression always return result in the format of double float
|
||||||
pFuncExpr->bytes = sizeof(double);
|
pArithExprInfo->bytes = sizeof(double);
|
||||||
pFuncExpr->interResBytes = sizeof(double);
|
pArithExprInfo->interBytes = sizeof(double);
|
||||||
pFuncExpr->type = TSDB_DATA_TYPE_DOUBLE;
|
pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE;
|
||||||
|
|
||||||
tExprNode* pNode = NULL;
|
int32_t ret = exprTreeFromSqlExpr(&pArithExprInfo->pExpr, pItem->pNode, pQueryInfo->exprList, pQueryInfo, NULL);
|
||||||
// SArray* colList = taosArrayInit(10, sizeof(SColIndex));
|
|
||||||
|
|
||||||
int32_t ret = exprTreeFromSqlExpr(&pNode, pItem->pNode, pQueryInfo->exprList, pQueryInfo, NULL);
|
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
tExprTreeDestroy(&pNode, NULL);
|
tExprTreeDestroy(&pArithExprInfo->pExpr, NULL);
|
||||||
return invalidSqlErrMsg(pQueryInfo->msg, "invalid expression in select clause");
|
return invalidSqlErrMsg(pQueryInfo->msg, "invalid expression in select clause");
|
||||||
}
|
}
|
||||||
|
|
||||||
pFuncExpr->pExpr = pNode;
|
pInfo->pArithExprInfo = pArithExprInfo;
|
||||||
assert(0);
|
|
||||||
// pExprInfo->pReqColumns = pColIndex;
|
|
||||||
|
|
||||||
// for(int32_t k = 0; k < pFuncExpr->numOfCols; ++k) {
|
|
||||||
// SColIndex* pCol = &pFuncExpr->colList[k];
|
|
||||||
// size_t size = tscSqlExprNumOfExprs(pQueryInfo);
|
|
||||||
//
|
|
||||||
// for(int32_t f = 0; f < size; ++f) {
|
|
||||||
// SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, f);
|
|
||||||
// if (strcmp(pExpr->aliasName, pCol->name) == 0) {
|
|
||||||
// pCol->colIndex = f;
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// assert(pCol->colIndex >= 0 && pCol->colIndex < size);
|
|
||||||
// tfree(pNode);
|
|
||||||
// }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1260,7 +1238,6 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isSTable) {
|
if (isSTable) {
|
||||||
pQueryInfo->type |= TSDB_QUERY_TYPE_STABLE_QUERY;
|
|
||||||
/*
|
/*
|
||||||
* transfer sql functions that need secondary merge into another format
|
* transfer sql functions that need secondary merge into another format
|
||||||
* in dealing with metric queries such as: count/first/last
|
* in dealing with metric queries such as: count/first/last
|
||||||
|
@ -1311,10 +1288,8 @@ SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t c
|
||||||
index.columnIndex = colIndex - tscGetNumOfColumns(pTableMeta);
|
index.columnIndex = colIndex - tscGetNumOfColumns(pTableMeta);
|
||||||
|
|
||||||
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
|
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
|
||||||
pQueryInfo->type = TSDB_QUERY_TYPE_STABLE_QUERY;
|
|
||||||
} else {
|
} else {
|
||||||
index.columnIndex = colIndex;
|
index.columnIndex = colIndex;
|
||||||
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return tscSqlExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes,
|
return tscSqlExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes,
|
||||||
|
@ -1399,7 +1374,7 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum
|
||||||
|
|
||||||
int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pItem) {
|
int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pItem) {
|
||||||
const char* msg0 = "invalid column name";
|
const char* msg0 = "invalid column name";
|
||||||
const char* msg1 = "tag for table query is not allowed";
|
const char* msg1 = "tag for normal table query is not allowed";
|
||||||
|
|
||||||
int32_t startPos = tscSqlExprNumOfExprs(pQueryInfo);
|
int32_t startPos = tscSqlExprNumOfExprs(pQueryInfo);
|
||||||
|
|
||||||
|
@ -1436,7 +1411,7 @@ int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pI
|
||||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
|
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
|
||||||
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||||
|
|
||||||
if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) && UTIL_TABLE_IS_NOMRAL_TABLE(pTableMetaInfo)) {
|
if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) && !UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) {
|
||||||
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
|
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2278,7 +2253,7 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
|
||||||
|
|
||||||
tscSqlExprUpdate(pQueryInfo, k, functionId, pExpr->colInfo.colIndex, TSDB_DATA_TYPE_BINARY, bytes);
|
tscSqlExprUpdate(pQueryInfo, k, functionId, pExpr->colInfo.colIndex, TSDB_DATA_TYPE_BINARY, bytes);
|
||||||
// todo refactor
|
// todo refactor
|
||||||
pExpr->interResBytes = intermediateBytes;
|
pExpr->interBytes = intermediateBytes;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4765,7 +4740,7 @@ static int32_t setKeepOption(SSqlCmd* pCmd, SCMCreateDbMsg* pMsg, SCreateDBInfo*
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t setTimePrecisionOption(SSqlCmd* pCmd, SCMCreateDbMsg* pMsg, SCreateDBInfo* pCreateDbInfo) {
|
static int32_t setTimePrecision(SSqlCmd* pCmd, SCMCreateDbMsg* pMsg, SCreateDBInfo* pCreateDbInfo) {
|
||||||
const char* msg = "invalid time precision";
|
const char* msg = "invalid time precision";
|
||||||
|
|
||||||
pMsg->precision = TSDB_TIME_PRECISION_MILLI; // millisecond by default
|
pMsg->precision = TSDB_TIME_PRECISION_MILLI; // millisecond by default
|
||||||
|
@ -4790,15 +4765,15 @@ static int32_t setTimePrecisionOption(SSqlCmd* pCmd, SCMCreateDbMsg* pMsg, SCrea
|
||||||
}
|
}
|
||||||
|
|
||||||
static void setCreateDBOption(SCMCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
|
static void setCreateDBOption(SCMCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
|
||||||
pMsg->maxSessions = htonl(pCreateDb->tablesPerVnode);
|
pMsg->maxTables = htonl(pCreateDb->maxTablesPerVnode);
|
||||||
pMsg->cacheBlockSize = htonl(-1);
|
pMsg->cacheBlockSize = htonl(pCreateDb->cacheBlockSize);
|
||||||
pMsg->totalBlocks = htonl(-1);
|
pMsg->numOfBlocks = htonl(pCreateDb->numOfBlocks);
|
||||||
pMsg->daysPerFile = htonl(pCreateDb->daysPerFile);
|
pMsg->daysPerFile = htonl(pCreateDb->daysPerFile);
|
||||||
pMsg->commitTime = htonl(pCreateDb->commitTime);
|
pMsg->commitTime = htonl(pCreateDb->commitTime);
|
||||||
pMsg->minRowsPerFileBlock = htonl(-1);
|
pMsg->minRowsPerFileBlock = htonl(pCreateDb->minRowsPerBlock);
|
||||||
pMsg->maxRowsPerFileBlock = htonl(-1);
|
pMsg->maxRowsPerFileBlock = htonl(pCreateDb->maxRowsPerBlock);
|
||||||
pMsg->compression = pCreateDb->compressionLevel;
|
pMsg->compression = pCreateDb->compressionLevel;
|
||||||
pMsg->commitLog = (char)pCreateDb->commitLog;
|
pMsg->walLevel = (char)pCreateDb->walLevel;
|
||||||
pMsg->replications = pCreateDb->replica;
|
pMsg->replications = pCreateDb->replica;
|
||||||
pMsg->ignoreExist = pCreateDb->ignoreExists;
|
pMsg->ignoreExist = pCreateDb->ignoreExists;
|
||||||
}
|
}
|
||||||
|
@ -4811,7 +4786,7 @@ int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql) {
|
||||||
return TSDB_CODE_INVALID_SQL;
|
return TSDB_CODE_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (setTimePrecisionOption(pCmd, pMsg, pCreateDbSql) != TSDB_CODE_SUCCESS) {
|
if (setTimePrecision(pCmd, pMsg, pCreateDbSql) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_INVALID_SQL;
|
return TSDB_CODE_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4937,7 +4912,7 @@ static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) {
|
||||||
if (pExpr->functionId != TSDB_FUNC_TAG_DUMMY && pExpr->functionId != TSDB_FUNC_TS_DUMMY) {
|
if (pExpr->functionId != TSDB_FUNC_TAG_DUMMY && pExpr->functionId != TSDB_FUNC_TS_DUMMY) {
|
||||||
SSchema* pColSchema = &pSchema[pExpr->colInfo.colIndex];
|
SSchema* pColSchema = &pSchema[pExpr->colInfo.colIndex];
|
||||||
getResultDataInfo(pColSchema->type, pColSchema->bytes, pExpr->functionId, pExpr->param[0].i64Key, &pExpr->resType,
|
getResultDataInfo(pColSchema->type, pColSchema->bytes, pExpr->functionId, pExpr->param[0].i64Key, &pExpr->resType,
|
||||||
&pExpr->resBytes, &pExpr->interResBytes, tagLength, true);
|
&pExpr->resBytes, &pExpr->interBytes, tagLength, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5331,8 +5306,8 @@ int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
|
||||||
int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCMCreateDbMsg* pCreate) {
|
int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCMCreateDbMsg* pCreate) {
|
||||||
char msg[512] = {0};
|
char msg[512] = {0};
|
||||||
|
|
||||||
if (pCreate->commitLog != -1 && (pCreate->commitLog < TSDB_MIN_CLOG_LEVEL || pCreate->commitLog > TSDB_MAX_CLOG_LEVEL)) {
|
if (pCreate->walLevel != -1 && (pCreate->walLevel < TSDB_MIN_WAL_LEVEL || pCreate->walLevel > TSDB_MAX_WAL_LEVEL)) {
|
||||||
snprintf(msg, tListLen(msg), "invalid db option commitLog: %d, only 0-2 allowed", pCreate->commitLog);
|
snprintf(msg, tListLen(msg), "invalid db option walLevel: %d, only 0-2 allowed", pCreate->walLevel);
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5357,7 +5332,7 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCMCreateDbMsg* pCreate) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
val = htonl(pCreate->maxSessions);
|
val = htonl(pCreate->maxTables);
|
||||||
if (val != -1 && (val < TSDB_MIN_TABLES || val > TSDB_MAX_TABLES)) {
|
if (val != -1 && (val < TSDB_MIN_TABLES || val > TSDB_MAX_TABLES)) {
|
||||||
snprintf(msg, tListLen(msg), "invalid db option maxSessions: %d valid range: [%d, %d]", val,
|
snprintf(msg, tListLen(msg), "invalid db option maxSessions: %d valid range: [%d, %d]", val,
|
||||||
TSDB_MIN_TABLES, TSDB_MAX_TABLES);
|
TSDB_MIN_TABLES, TSDB_MAX_TABLES);
|
||||||
|
@ -5738,12 +5713,16 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(pQueryInfo->numOfTables == pQuerySql->from->nExpr);
|
assert(pQueryInfo->numOfTables == pQuerySql->from->nExpr);
|
||||||
|
bool isSTable = false;
|
||||||
|
|
||||||
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
|
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
|
||||||
code = tscGetSTableVgroupInfo(pSql, index);
|
isSTable = true;
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
code = tscGetSTableVgroupInfo(pSql, index);
|
||||||
return code;
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
}
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_QUERY);
|
||||||
} else {
|
} else {
|
||||||
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TABLE_QUERY);
|
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TABLE_QUERY);
|
||||||
}
|
}
|
||||||
|
@ -5753,7 +5732,6 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
|
||||||
return TSDB_CODE_INVALID_SQL;
|
return TSDB_CODE_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isSTable = UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo);
|
|
||||||
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable) != TSDB_CODE_SUCCESS) {
|
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_INVALID_SQL;
|
return TSDB_CODE_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -101,7 +101,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu
|
||||||
}
|
}
|
||||||
|
|
||||||
SResultInfo *pResInfo = &pReducer->pResInfo[i];
|
SResultInfo *pResInfo = &pReducer->pResInfo[i];
|
||||||
pResInfo->bufLen = pExpr->interResBytes;
|
pResInfo->bufLen = pExpr->interBytes;
|
||||||
pResInfo->interResultBuf = calloc(1, (size_t)pResInfo->bufLen);
|
pResInfo->interResultBuf = calloc(1, (size_t)pResInfo->bufLen);
|
||||||
|
|
||||||
pCtx->resultInfo = &pReducer->pResInfo[i];
|
pCtx->resultInfo = &pReducer->pResInfo[i];
|
||||||
|
|
|
@ -439,25 +439,6 @@ int tscProcessSql(SSqlObj *pSql) {
|
||||||
} else { // local handler
|
} else { // local handler
|
||||||
return (*tscProcessMsgRsp[pCmd->command])(pSql);
|
return (*tscProcessMsgRsp[pCmd->command])(pSql);
|
||||||
}
|
}
|
||||||
|
|
||||||
// if (QUERY_IS_JOIN_QUERY(type)) {
|
|
||||||
// if ((pQueryInfo->type & TSDB_QUERY_TYPE_SUBQUERY) == 0) {
|
|
||||||
// return tscHandleMasterJoinQuery(pSql);
|
|
||||||
// } else {
|
|
||||||
// // for first stage sub query, iterate all vnodes to get all timestamp
|
|
||||||
// if ((pQueryInfo->type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) != TSDB_QUERY_TYPE_JOIN_SEC_STAGE) {
|
|
||||||
// return doProcessSql(pSql);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { // super table query
|
|
||||||
// tscHandleMasterSTableQuery(pSql);
|
|
||||||
// return pRes->code;
|
|
||||||
// } else if (pSql->fp == (void(*)())tscHandleMultivnodeInsert) { // multi-vnodes insertion
|
|
||||||
// tscHandleMultivnodeInsert(pSql);
|
|
||||||
// return pRes->code;
|
|
||||||
// }
|
|
||||||
|
|
||||||
return doProcessSql(pSql);
|
return doProcessSql(pSql);
|
||||||
}
|
}
|
||||||
|
@ -1907,7 +1888,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
|
||||||
assert(pTableMetaInfo->pTableMeta == NULL);
|
assert(pTableMetaInfo->pTableMeta == NULL);
|
||||||
|
|
||||||
pTableMetaInfo->pTableMeta =
|
pTableMetaInfo->pTableMeta =
|
||||||
(STableMeta *) taosCachePut(tscCacheHandle, pTableMetaInfo->name, pTableMeta, size, tsMeterMetaKeepTimer);
|
(STableMeta *) taosCachePut(tscCacheHandle, pTableMetaInfo->name, pTableMeta, size, tsTableMetaKeepTimer);
|
||||||
|
|
||||||
// todo handle out of memory case
|
// todo handle out of memory case
|
||||||
if (pTableMetaInfo->pTableMeta == NULL) {
|
if (pTableMetaInfo->pTableMeta == NULL) {
|
||||||
|
@ -2016,7 +1997,7 @@ int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) {
|
||||||
// int32_t size = (int32_t)(rsp - ((char *)pMeta)); // Consistent with STableMeta in cache
|
// int32_t size = (int32_t)(rsp - ((char *)pMeta)); // Consistent with STableMeta in cache
|
||||||
//
|
//
|
||||||
// pMeta->index = 0;
|
// pMeta->index = 0;
|
||||||
// (void)taosCachePut(tscCacheHandle, pMeta->tableId, (char *)pMeta, size, tsMeterMetaKeepTimer);
|
// (void)taosCachePut(tscCacheHandle, pMeta->tableId, (char *)pMeta, size, tsTableMetaKeepTimer);
|
||||||
// }
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2215,7 +2196,7 @@ int tscProcessShowRsp(SSqlObj *pSql) {
|
||||||
size_t size = 0;
|
size_t size = 0;
|
||||||
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg, &size);
|
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg, &size);
|
||||||
|
|
||||||
pTableMetaInfo->pTableMeta = taosCachePut(tscCacheHandle, key, (char *)pTableMeta, size, tsMeterMetaKeepTimer);
|
pTableMetaInfo->pTableMeta = taosCachePut(tscCacheHandle, key, (char *)pTableMeta, size, tsTableMetaKeepTimer);
|
||||||
SSchema *pTableSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
|
SSchema *pTableSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
|
||||||
|
|
||||||
if (pQueryInfo->colList == NULL) {
|
if (pQueryInfo->colList == NULL) {
|
||||||
|
@ -2363,8 +2344,10 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
|
||||||
pRes->data = pRetrieve->data;
|
pRes->data = pRetrieve->data;
|
||||||
|
|
||||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||||
tscSetResultPointer(pQueryInfo, pRes);
|
if (tscCreateResPointerInfo(pRes, pQueryInfo) != TSDB_CODE_SUCCESS) {
|
||||||
|
return pRes->code;
|
||||||
|
}
|
||||||
|
|
||||||
if (pSql->pSubscription != NULL) {
|
if (pSql->pSubscription != NULL) {
|
||||||
int32_t numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
|
int32_t numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
|
||||||
|
|
||||||
|
|
|
@ -440,23 +440,6 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) {
|
||||||
return (pQueryInfo->order.order == TSDB_ORDER_DESC) ? pRes->numOfRows : -pRes->numOfRows;
|
return (pQueryInfo->order.order == TSDB_ORDER_DESC) ? pRes->numOfRows : -pRes->numOfRows;
|
||||||
}
|
}
|
||||||
|
|
||||||
static UNUSED_FUNC char *getArithemicInputSrc(void *param, const char *name, int32_t colId) {
|
|
||||||
// SArithmeticSupport *pSupport = (SArithmeticSupport *)param;
|
|
||||||
// SExprInfo * pExpr = pSupport->pArithExpr;
|
|
||||||
|
|
||||||
// int32_t index = -1;
|
|
||||||
// for (int32_t i = 0; i < pExpr->numOfCols; ++i) {
|
|
||||||
// if (strcmp(name, pExpr->colList[i].name) == 0) {
|
|
||||||
// index = i;
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// assert(index >= 0 && index < pExpr->numOfCols);
|
|
||||||
// return pSupport->data[index] + pSupport->offset * pSupport->elemSize[index];
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void waitForRetrieveRsp(void *param, TAOS_RES *tres, int numOfRows) {
|
static void waitForRetrieveRsp(void *param, TAOS_RES *tres, int numOfRows) {
|
||||||
SSqlObj* pSql = (SSqlObj*) tres;
|
SSqlObj* pSql = (SSqlObj*) tres;
|
||||||
|
|
||||||
|
@ -885,7 +868,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
|
||||||
|
|
||||||
static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) {
|
static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) {
|
||||||
// must before clean the sqlcmd object
|
// must before clean the sqlcmd object
|
||||||
tscCleanSqlCmd(&pSql->cmd);
|
tscResetSqlCmdObj(&pSql->cmd);
|
||||||
|
|
||||||
SSqlCmd *pCmd = &pSql->cmd;
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
|
|
||||||
|
|
|
@ -508,7 +508,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
||||||
SSqlInfo SQLInfo = {0};
|
SSqlInfo SQLInfo = {0};
|
||||||
tSQLParse(&SQLInfo, pSql->sqlstr);
|
tSQLParse(&SQLInfo, pSql->sqlstr);
|
||||||
|
|
||||||
tscCleanSqlCmd(&pSql->cmd);
|
tscResetSqlCmdObj(&pSql->cmd);
|
||||||
ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE);
|
ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE);
|
||||||
if (TSDB_CODE_SUCCESS != ret) {
|
if (TSDB_CODE_SUCCESS != ret) {
|
||||||
setErrorInfo(pObj, ret, NULL);
|
setErrorInfo(pObj, ret, NULL);
|
||||||
|
|
|
@ -374,7 +374,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
|
||||||
} else {
|
} else {
|
||||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||||
|
|
||||||
uint16_t type = pQueryInfo->type;
|
uint32_t type = pQueryInfo->type;
|
||||||
taos_free_result_imp(pSql, 1);
|
taos_free_result_imp(pSql, 1);
|
||||||
pRes->numOfRows = 1;
|
pRes->numOfRows = 1;
|
||||||
pRes->numOfTotal = 0;
|
pRes->numOfTotal = 0;
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "tscSubquery.h"
|
#include "tscSubquery.h"
|
||||||
|
#include <qast.h>
|
||||||
#include <tcompare.h>
|
#include <tcompare.h>
|
||||||
#include <tschemautil.h>
|
#include <tschemautil.h>
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
|
@ -1924,7 +1925,7 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
|
||||||
static void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pField) {
|
static void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pField) {
|
||||||
SSqlRes *pRes = &pSql->res;
|
SSqlRes *pRes = &pSql->res;
|
||||||
|
|
||||||
if (isNull(pRes->tsrow[columnIndex], pField->type)) {
|
if (pRes->tsrow[columnIndex] != NULL && isNull(pRes->tsrow[columnIndex], pField->type)) {
|
||||||
pRes->tsrow[columnIndex] = NULL;
|
pRes->tsrow[columnIndex] = NULL;
|
||||||
} else if (pField->type == TSDB_DATA_TYPE_NCHAR) {
|
} else if (pField->type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
|
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
|
||||||
|
@ -1944,6 +1945,24 @@ static void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pF
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static char *getArithemicInputSrc(void *param, const char *name, int32_t colId) {
|
||||||
|
SArithmeticSupport *pSupport = (SArithmeticSupport *) param;
|
||||||
|
|
||||||
|
int32_t index = -1;
|
||||||
|
SSqlExpr* pExpr = NULL;
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < pSupport->numOfCols; ++i) {
|
||||||
|
pExpr = taosArrayGetP(pSupport->exprList, i);
|
||||||
|
if (strncmp(name, pExpr->aliasName, TSDB_COL_NAME_LEN) == 0) {
|
||||||
|
index = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(index >= 0 && index < pSupport->numOfCols);
|
||||||
|
return pSupport->data[index] + pSupport->offset * pExpr->resBytes;
|
||||||
|
}
|
||||||
|
|
||||||
void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
|
void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
|
||||||
SSqlCmd *pCmd = &pSql->cmd;
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
SSqlRes *pRes = &pSql->res;
|
SSqlRes *pRes = &pSql->res;
|
||||||
|
@ -1981,27 +2000,30 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
|
||||||
|
|
||||||
// calculate the result from several other columns
|
// calculate the result from several other columns
|
||||||
if (pSup->pArithExprInfo != NULL) {
|
if (pSup->pArithExprInfo != NULL) {
|
||||||
// SArithmeticSupport *sas = (SArithmeticSupport *)calloc(1, sizeof(SArithmeticSupport));
|
if (pRes->pArithSup == NULL) {
|
||||||
// sas->offset = 0;
|
SArithmeticSupport *sas = (SArithmeticSupport *) calloc(1, sizeof(SArithmeticSupport));
|
||||||
// sas-> = pQueryInfo->fieldsInfo.pExpr[i];
|
sas->offset = 0;
|
||||||
//
|
sas->pArithExpr = pSup->pArithExprInfo;
|
||||||
// sas->numOfCols = sas->pExpr->binExprInfo.numOfCols;
|
sas->numOfCols = tscSqlExprNumOfExprs(pQueryInfo);
|
||||||
//
|
sas->exprList = pQueryInfo->exprList;
|
||||||
// if (pRes->buffer[i] == NULL) {
|
sas->data = calloc(sas->numOfCols, POINTER_BYTES);
|
||||||
// pRes->buffer[i] = malloc(tscFieldInfoGetField(pQueryInfo, i)->bytes);
|
|
||||||
// }
|
pRes->pArithSup = sas;
|
||||||
//
|
}
|
||||||
// for(int32_t k = 0; k < sas->numOfCols; ++k) {
|
|
||||||
// int32_t columnIndex = sas->pExpr->binExprInfo.pReqColumns[k].colIdxInBuf;
|
if (pRes->buffer[i] == NULL) {
|
||||||
// SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, columnIndex);
|
TAOS_FIELD* field = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||||
//
|
pRes->buffer[i] = malloc(field->bytes);
|
||||||
// sas->elemSize[k] = pExpr->resBytes;
|
}
|
||||||
// sas->data[k] = (pRes->data + pRes->numOfRows* pExpr->offset) + pRes->row*pExpr->resBytes;
|
|
||||||
// }
|
for(int32_t k = 0; k < pRes->pArithSup->numOfCols; ++k) {
|
||||||
//
|
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k);
|
||||||
// tSQLBinaryExprCalcTraverse(sas->pExpr->binExprInfo.pBinExpr, 1, pRes->buffer[i], sas, TSQL_SO_ASC, getArithemicInputSrc);
|
pRes->pArithSup->data[k] = (pRes->data + pRes->numOfRows* pExpr->offset) + pRes->row*pExpr->resBytes;
|
||||||
// pRes->tsrow[i] = pRes->buffer[i];
|
}
|
||||||
//
|
|
||||||
|
tExprTreeCalcTraverse(pRes->pArithSup->pArithExpr->pExpr, 1, pRes->buffer[i], pRes->pArithSup,
|
||||||
|
TSDB_ORDER_ASC, getArithemicInputSrc);
|
||||||
|
pRes->tsrow[i] = pRes->buffer[i];
|
||||||
// free(sas); //todo optimization
|
// free(sas); //todo optimization
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2010,7 +2032,7 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
|
||||||
return pRes->tsrow;
|
return pRes->tsrow;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool tscHashRemainDataInSubqueryResultSet(SSqlObj *pSql) {
|
static UNUSED_FUNC bool tscHashRemainDataInSubqueryResultSet(SSqlObj *pSql) {
|
||||||
bool hasData = true;
|
bool hasData = true;
|
||||||
SSqlCmd *pCmd = &pSql->cmd;
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
|
|
||||||
|
|
|
@ -165,7 +165,7 @@ void taos_init_imp() {
|
||||||
taosTmrReset(tscCheckDiskUsage, 10, NULL, tscTmr, &tscCheckDiskUsageTmr);
|
taosTmrReset(tscCheckDiskUsage, 10, NULL, tscTmr, &tscCheckDiskUsageTmr);
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t refreshTime = tsMetricMetaKeepTimer < tsMeterMetaKeepTimer ? tsMetricMetaKeepTimer : tsMeterMetaKeepTimer;
|
int64_t refreshTime = tsTableMetaKeepTimer;
|
||||||
refreshTime = refreshTime > 2 ? 2 : refreshTime;
|
refreshTime = refreshTime > 2 ? 2 : refreshTime;
|
||||||
refreshTime = refreshTime < 1 ? 1 : refreshTime;
|
refreshTime = refreshTime < 1 ? 1 : refreshTime;
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,10 @@
|
||||||
#include "ttokendef.h"
|
#include "ttokendef.h"
|
||||||
#include "tscLog.h"
|
#include "tscLog.h"
|
||||||
|
|
||||||
SCond* tsGetSTableQueryCond(STagCond* pTagCond, uint64_t uid) {
|
static void freeQueryInfoImpl(SQueryInfo* pQueryInfo);
|
||||||
|
static void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache);
|
||||||
|
|
||||||
|
SCond* tsGetSTableQueryCond(STagCond* pTagCond, uint64_t uid) {
|
||||||
if (pTagCond->pCond == NULL) {
|
if (pTagCond->pCond == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -309,6 +312,7 @@ void tscDestroyResPointerInfo(SSqlRes* pRes) {
|
||||||
for (int i = 0; i < pRes->numOfCols; i++) {
|
for (int i = 0; i < pRes->numOfCols; i++) {
|
||||||
tfree(pRes->buffer[i]);
|
tfree(pRes->buffer[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
pRes->numOfCols = 0;
|
pRes->numOfCols = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -320,9 +324,32 @@ void tscDestroyResPointerInfo(SSqlRes* pRes) {
|
||||||
tfree(pRes->pColumnIndex);
|
tfree(pRes->pColumnIndex);
|
||||||
tfree(pRes->buffer);
|
tfree(pRes->buffer);
|
||||||
|
|
||||||
|
if (pRes->pArithSup != NULL) {
|
||||||
|
tfree(pRes->pArithSup->data);
|
||||||
|
tfree(pRes->pArithSup);
|
||||||
|
}
|
||||||
|
|
||||||
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
|
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void tscFreeQueryInfo(SSqlCmd* pCmd) {
|
||||||
|
if (pCmd == NULL || pCmd->numOfClause == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < pCmd->numOfClause; ++i) {
|
||||||
|
char* addr = (char*)pCmd - offsetof(SSqlObj, cmd);
|
||||||
|
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i);
|
||||||
|
|
||||||
|
freeQueryInfoImpl(pQueryInfo);
|
||||||
|
clearAllTableMetaInfo(pQueryInfo, (const char*)addr, false);
|
||||||
|
tfree(pQueryInfo);
|
||||||
|
}
|
||||||
|
|
||||||
|
pCmd->numOfClause = 0;
|
||||||
|
tfree(pCmd->pQueryInfo);
|
||||||
|
}
|
||||||
|
|
||||||
void tscResetSqlCmdObj(SSqlCmd* pCmd) {
|
void tscResetSqlCmdObj(SSqlCmd* pCmd) {
|
||||||
pCmd->command = 0;
|
pCmd->command = 0;
|
||||||
pCmd->numOfCols = 0;
|
pCmd->numOfCols = 0;
|
||||||
|
@ -332,9 +359,10 @@ void tscResetSqlCmdObj(SSqlCmd* pCmd) {
|
||||||
pCmd->parseFinished = 0;
|
pCmd->parseFinished = 0;
|
||||||
|
|
||||||
taosHashCleanup(pCmd->pTableList);
|
taosHashCleanup(pCmd->pTableList);
|
||||||
pCmd->pTableList= NULL;
|
pCmd->pTableList = NULL;
|
||||||
|
|
||||||
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
||||||
|
|
||||||
tscFreeQueryInfo(pCmd);
|
tscFreeQueryInfo(pCmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -343,6 +371,7 @@ void tscFreeSqlResult(SSqlObj* pSql) {
|
||||||
|
|
||||||
SSqlRes* pRes = &pSql->res;
|
SSqlRes* pRes = &pSql->res;
|
||||||
tscDestroyResPointerInfo(pRes);
|
tscDestroyResPointerInfo(pRes);
|
||||||
|
|
||||||
memset(&pSql->res, 0, sizeof(SSqlRes));
|
memset(&pSql->res, 0, sizeof(SSqlRes));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,8 +395,8 @@ void tscPartiallyFreeSqlObj(SSqlObj* pSql) {
|
||||||
pthread_mutex_unlock(&pObj->mutex);
|
pthread_mutex_unlock(&pObj->mutex);
|
||||||
|
|
||||||
tscFreeSqlResult(pSql);
|
tscFreeSqlResult(pSql);
|
||||||
tfree(pSql->pSubs);
|
|
||||||
|
|
||||||
|
tfree(pSql->pSubs);
|
||||||
pSql->freed = 0;
|
pSql->freed = 0;
|
||||||
pSql->numOfSubs = 0;
|
pSql->numOfSubs = 0;
|
||||||
|
|
||||||
|
@ -913,6 +942,7 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
|
||||||
|
|
||||||
if (pInfo->pArithExprInfo != NULL) {
|
if (pInfo->pArithExprInfo != NULL) {
|
||||||
tExprTreeDestroy(&pInfo->pArithExprInfo->pExpr, NULL);
|
tExprTreeDestroy(&pInfo->pArithExprInfo->pExpr, NULL);
|
||||||
|
tfree(pInfo->pArithExprInfo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -947,7 +977,7 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol
|
||||||
pExpr->colInfo.colIndex = pColIndex->columnIndex;
|
pExpr->colInfo.colIndex = pColIndex->columnIndex;
|
||||||
pExpr->resType = type;
|
pExpr->resType = type;
|
||||||
pExpr->resBytes = size;
|
pExpr->resBytes = size;
|
||||||
pExpr->interResBytes = interSize;
|
pExpr->interBytes = interSize;
|
||||||
pExpr->uid = pTableMetaInfo->pTableMeta->uid;
|
pExpr->uid = pTableMetaInfo->pTableMeta->uid;
|
||||||
|
|
||||||
return pExpr;
|
return pExpr;
|
||||||
|
@ -1422,20 +1452,6 @@ bool tscShouldFreeHeatBeat(SSqlObj* pHb) {
|
||||||
return pQueryInfo->type == TSDB_QUERY_TYPE_FREE_RESOURCE;
|
return pQueryInfo->type == TSDB_QUERY_TYPE_FREE_RESOURCE;
|
||||||
}
|
}
|
||||||
|
|
||||||
void tscCleanSqlCmd(SSqlCmd* pCmd) {
|
|
||||||
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
|
||||||
tscFreeQueryInfo(pCmd);
|
|
||||||
|
|
||||||
uint32_t allocSize = pCmd->allocSize;
|
|
||||||
char* allocPtr = pCmd->payload;
|
|
||||||
|
|
||||||
memset(pCmd, 0, sizeof(SSqlCmd));
|
|
||||||
|
|
||||||
// restore values
|
|
||||||
pCmd->allocSize = allocSize;
|
|
||||||
pCmd->payload = allocPtr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* the following three kinds of SqlObj should not be freed
|
* the following three kinds of SqlObj should not be freed
|
||||||
* 1. SqlObj for stream computing
|
* 1. SqlObj for stream computing
|
||||||
|
@ -1630,24 +1646,6 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool rem
|
||||||
tfree(pQueryInfo->pTableMetaInfo);
|
tfree(pQueryInfo->pTableMetaInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
void tscFreeQueryInfo(SSqlCmd* pCmd) {
|
|
||||||
if (pCmd == NULL || pCmd->numOfClause == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int32_t i = 0; i < pCmd->numOfClause; ++i) {
|
|
||||||
char* addr = (char*)pCmd - offsetof(SSqlObj, cmd);
|
|
||||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i);
|
|
||||||
|
|
||||||
freeQueryInfoImpl(pQueryInfo);
|
|
||||||
clearAllTableMetaInfo(pQueryInfo, (const char*)addr, false);
|
|
||||||
tfree(pQueryInfo);
|
|
||||||
}
|
|
||||||
|
|
||||||
pCmd->numOfClause = 0;
|
|
||||||
tfree(pCmd->pQueryInfo);
|
|
||||||
}
|
|
||||||
|
|
||||||
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
|
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
|
||||||
SVgroupsInfo* vgroupList, SArray* pTagCols) {
|
SVgroupsInfo* vgroupList, SArray* pTagCols) {
|
||||||
void* pAlloc = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES);
|
void* pAlloc = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES);
|
||||||
|
|
|
@ -65,8 +65,7 @@ extern int32_t tsStatusInterval;
|
||||||
extern int32_t tsShellActivityTimer;
|
extern int32_t tsShellActivityTimer;
|
||||||
extern int32_t tsVnodePeerHBTimer;
|
extern int32_t tsVnodePeerHBTimer;
|
||||||
extern int32_t tsMgmtPeerHBTimer;
|
extern int32_t tsMgmtPeerHBTimer;
|
||||||
extern int32_t tsMeterMetaKeepTimer;
|
extern int32_t tsTableMetaKeepTimer;
|
||||||
extern int32_t tsMetricMetaKeepTimer;
|
|
||||||
|
|
||||||
extern float tsNumOfThreadsPerCore;
|
extern float tsNumOfThreadsPerCore;
|
||||||
extern float tsRatioOfQueryThreads;
|
extern float tsRatioOfQueryThreads;
|
||||||
|
@ -77,8 +76,8 @@ extern int16_t tsNumOfTotalVnodes;
|
||||||
extern uint32_t tsPublicIpInt;
|
extern uint32_t tsPublicIpInt;
|
||||||
|
|
||||||
extern int32_t tsCacheBlockSize;
|
extern int32_t tsCacheBlockSize;
|
||||||
extern int32_t tsTotalBlocks;
|
extern int32_t tsBlocksPerVnode;
|
||||||
extern int32_t tsTablesPerVnode;
|
extern int32_t tsMaxTablePerVnode;
|
||||||
extern int16_t tsDaysPerFile;
|
extern int16_t tsDaysPerFile;
|
||||||
extern int32_t tsDaysToKeep;
|
extern int32_t tsDaysToKeep;
|
||||||
extern int32_t tsMinRowsInFileBlock;
|
extern int32_t tsMinRowsInFileBlock;
|
||||||
|
@ -86,7 +85,7 @@ extern int32_t tsMaxRowsInFileBlock;
|
||||||
extern int16_t tsCommitTime; // seconds
|
extern int16_t tsCommitTime; // seconds
|
||||||
extern int32_t tsTimePrecision;
|
extern int32_t tsTimePrecision;
|
||||||
extern int16_t tsCompression;
|
extern int16_t tsCompression;
|
||||||
extern int16_t tsCommitLog;
|
extern int16_t tsWAL;
|
||||||
extern int32_t tsReplications;
|
extern int32_t tsReplications;
|
||||||
|
|
||||||
extern int16_t tsAffectedRowsMod;
|
extern int16_t tsAffectedRowsMod;
|
||||||
|
@ -94,7 +93,6 @@ extern int32_t tsNumOfMPeers;
|
||||||
extern int32_t tsMaxShellConns;
|
extern int32_t tsMaxShellConns;
|
||||||
extern int32_t tsMaxTables;
|
extern int32_t tsMaxTables;
|
||||||
|
|
||||||
extern char tsLocalIp[];
|
|
||||||
extern char tsDefaultDB[];
|
extern char tsDefaultDB[];
|
||||||
extern char tsDefaultUser[];
|
extern char tsDefaultUser[];
|
||||||
extern char tsDefaultPass[];
|
extern char tsDefaultPass[];
|
||||||
|
@ -136,7 +134,6 @@ extern int32_t tsHttpEnableRecordSql;
|
||||||
extern int32_t tsTelegrafUseFieldNum;
|
extern int32_t tsTelegrafUseFieldNum;
|
||||||
|
|
||||||
extern int32_t tsTscEnableRecordSql;
|
extern int32_t tsTscEnableRecordSql;
|
||||||
extern int32_t tsAnyIp;
|
|
||||||
|
|
||||||
extern char tsMonitorDbName[];
|
extern char tsMonitorDbName[];
|
||||||
extern char tsInternalPass[];
|
extern char tsInternalPass[];
|
||||||
|
|
|
@ -74,8 +74,7 @@ uint16_t tsSyncPort = 6050;
|
||||||
|
|
||||||
int32_t tsStatusInterval = 1; // second
|
int32_t tsStatusInterval = 1; // second
|
||||||
int32_t tsShellActivityTimer = 3; // second
|
int32_t tsShellActivityTimer = 3; // second
|
||||||
int32_t tsMeterMetaKeepTimer = 7200; // second
|
int32_t tsTableMetaKeepTimer = 7200; // second
|
||||||
int32_t tsMetricMetaKeepTimer = 600; // second
|
|
||||||
int32_t tsRpcTimer = 300;
|
int32_t tsRpcTimer = 300;
|
||||||
int32_t tsRpcMaxTime = 600; // seconds;
|
int32_t tsRpcMaxTime = 600; // seconds;
|
||||||
|
|
||||||
|
@ -85,22 +84,22 @@ int16_t tsNumOfVnodesPerCore = 8;
|
||||||
int16_t tsNumOfTotalVnodes = TSDB_INVALID_VNODE_NUM;
|
int16_t tsNumOfTotalVnodes = TSDB_INVALID_VNODE_NUM;
|
||||||
|
|
||||||
#ifdef _TD_ARM_32_
|
#ifdef _TD_ARM_32_
|
||||||
int32_t tsTablesPerVnode = 100;
|
int32_t tsMaxTablePerVnode = 100;
|
||||||
#else
|
#else
|
||||||
int32_t tsTablesPerVnode = TSDB_DEFAULT_TABLES;
|
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE;
|
int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE;
|
||||||
int32_t tsTotalBlocks = TSDB_DEFAULT_TOTAL_BLOCKS;
|
int32_t tsBlocksPerVnode = TSDB_DEFAULT_TOTAL_BLOCKS;
|
||||||
int16_t tsDaysPerFile = TSDB_DEFAULT_DAYS_PER_FILE;
|
int16_t tsDaysPerFile = TSDB_DEFAULT_DAYS_PER_FILE;
|
||||||
int32_t tsDaysToKeep = TSDB_DEFAULT_KEEP;
|
int32_t tsDaysToKeep = TSDB_DEFAULT_KEEP;
|
||||||
int32_t tsMinRowsInFileBlock = TSDB_DEFAULT_MIN_ROW_FBLOCK;
|
int32_t tsMinRowsInFileBlock = TSDB_DEFAULT_MIN_ROW_FBLOCK;
|
||||||
int32_t tsMaxRowsInFileBlock = TSDB_DEFAULT_MAX_ROW_FBLOCK;
|
int32_t tsMaxRowsInFileBlock = TSDB_DEFAULT_MAX_ROW_FBLOCK;
|
||||||
int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds
|
int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds
|
||||||
int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION;
|
int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION;
|
||||||
int16_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
|
int16_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
|
||||||
int16_t tsCommitLog = TSDB_DEFAULT_CLOG_LEVEL;
|
int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
|
||||||
int32_t tsReplications = TSDB_DEFAULT_REPLICA_NUM;
|
int32_t tsReplications = TSDB_DEFAULT_REPLICA_NUM;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Change the meaning of affected rows:
|
* Change the meaning of affected rows:
|
||||||
|
@ -506,18 +505,8 @@ static void doInitGlobalConfig() {
|
||||||
cfg.unitType = TAOS_CFG_UTYPE_SECOND;
|
cfg.unitType = TAOS_CFG_UTYPE_SECOND;
|
||||||
taosInitConfigOption(cfg);
|
taosInitConfigOption(cfg);
|
||||||
|
|
||||||
cfg.option = "meterMetaKeepTimer";
|
cfg.option = "tableMetaKeepTimer";
|
||||||
cfg.ptr = &tsMeterMetaKeepTimer;
|
cfg.ptr = &tsTableMetaKeepTimer;
|
||||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
|
||||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
|
|
||||||
cfg.minValue = 1;
|
|
||||||
cfg.maxValue = 8640000;
|
|
||||||
cfg.ptrLength = 0;
|
|
||||||
cfg.unitType = TAOS_CFG_UTYPE_SECOND;
|
|
||||||
taosInitConfigOption(cfg);
|
|
||||||
|
|
||||||
cfg.option = "metricMetaKeepTimer";
|
|
||||||
cfg.ptr = &tsMetricMetaKeepTimer;
|
|
||||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
|
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
|
||||||
cfg.minValue = 1;
|
cfg.minValue = 1;
|
||||||
|
@ -587,8 +576,8 @@ static void doInitGlobalConfig() {
|
||||||
taosInitConfigOption(cfg);
|
taosInitConfigOption(cfg);
|
||||||
|
|
||||||
// database configs
|
// database configs
|
||||||
cfg.option = "tables";
|
cfg.option = "maxtablesPerVnode";
|
||||||
cfg.ptr = &tsTablesPerVnode;
|
cfg.ptr = &tsMaxTablePerVnode;
|
||||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||||
cfg.minValue = TSDB_MIN_TABLES;
|
cfg.minValue = TSDB_MIN_TABLES;
|
||||||
|
@ -608,7 +597,7 @@ static void doInitGlobalConfig() {
|
||||||
taosInitConfigOption(cfg);
|
taosInitConfigOption(cfg);
|
||||||
|
|
||||||
cfg.option = "blocks";
|
cfg.option = "blocks";
|
||||||
cfg.ptr = &tsTotalBlocks;
|
cfg.ptr = &tsBlocksPerVnode;
|
||||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||||
cfg.minValue = TSDB_MIN_TOTAL_BLOCKS;
|
cfg.minValue = TSDB_MIN_TOTAL_BLOCKS;
|
||||||
|
@ -677,12 +666,12 @@ static void doInitGlobalConfig() {
|
||||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||||
taosInitConfigOption(cfg);
|
taosInitConfigOption(cfg);
|
||||||
|
|
||||||
cfg.option = "clog";
|
cfg.option = "wallevel";
|
||||||
cfg.ptr = &tsCommitLog;
|
cfg.ptr = &tsWAL;
|
||||||
cfg.valType = TAOS_CFG_VTYPE_INT16;
|
cfg.valType = TAOS_CFG_VTYPE_INT16;
|
||||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||||
cfg.minValue = TSDB_MIN_CLOG_LEVEL;
|
cfg.minValue = TSDB_MIN_WAL_LEVEL;
|
||||||
cfg.maxValue = TSDB_MAX_CLOG_LEVEL;
|
cfg.maxValue = TSDB_MAX_WAL_LEVEL;
|
||||||
cfg.ptrLength = 0;
|
cfg.ptrLength = 0;
|
||||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||||
taosInitConfigOption(cfg);
|
taosInitConfigOption(cfg);
|
||||||
|
|
|
@ -279,9 +279,9 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
||||||
#define TSDB_MAX_COMP_LEVEL 2
|
#define TSDB_MAX_COMP_LEVEL 2
|
||||||
#define TSDB_DEFAULT_COMP_LEVEL 2
|
#define TSDB_DEFAULT_COMP_LEVEL 2
|
||||||
|
|
||||||
#define TSDB_MIN_CLOG_LEVEL 0
|
#define TSDB_MIN_WAL_LEVEL 0
|
||||||
#define TSDB_MAX_CLOG_LEVEL 2
|
#define TSDB_MAX_WAL_LEVEL 2
|
||||||
#define TSDB_DEFAULT_CLOG_LEVEL 2
|
#define TSDB_DEFAULT_WAL_LEVEL 2
|
||||||
|
|
||||||
#define TSDB_MIN_REPLICA_NUM 1
|
#define TSDB_MIN_REPLICA_NUM 1
|
||||||
#define TSDB_MAX_REPLICA_NUM 3
|
#define TSDB_MAX_REPLICA_NUM 3
|
||||||
|
|
|
@ -358,7 +358,7 @@ typedef struct SExprInfo {
|
||||||
struct tExprNode* pExpr;
|
struct tExprNode* pExpr;
|
||||||
int16_t bytes;
|
int16_t bytes;
|
||||||
int16_t type;
|
int16_t type;
|
||||||
int16_t interResBytes;
|
int16_t interBytes;
|
||||||
} SExprInfo;
|
} SExprInfo;
|
||||||
|
|
||||||
typedef struct SColumnFilterInfo {
|
typedef struct SColumnFilterInfo {
|
||||||
|
@ -475,9 +475,9 @@ typedef struct {
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char acct[TSDB_USER_LEN + 1];
|
char acct[TSDB_USER_LEN + 1];
|
||||||
char db[TSDB_DB_NAME_LEN + 1];
|
char db[TSDB_DB_NAME_LEN + 1];
|
||||||
int32_t maxSessions;
|
int32_t maxTables;
|
||||||
int32_t cacheBlockSize; //MB
|
int32_t cacheBlockSize; //MB
|
||||||
int32_t totalBlocks;
|
int32_t numOfBlocks;
|
||||||
int32_t daysPerFile;
|
int32_t daysPerFile;
|
||||||
int32_t daysToKeep1;
|
int32_t daysToKeep1;
|
||||||
int32_t daysToKeep2;
|
int32_t daysToKeep2;
|
||||||
|
@ -486,7 +486,7 @@ typedef struct {
|
||||||
int32_t minRowsPerFileBlock;
|
int32_t minRowsPerFileBlock;
|
||||||
int32_t maxRowsPerFileBlock;
|
int32_t maxRowsPerFileBlock;
|
||||||
int8_t compression;
|
int8_t compression;
|
||||||
int8_t commitLog;
|
int8_t walLevel;
|
||||||
int8_t replications;
|
int8_t replications;
|
||||||
uint8_t precision; // time resolution
|
uint8_t precision; // time resolution
|
||||||
int8_t ignoreExist;
|
int8_t ignoreExist;
|
||||||
|
@ -565,7 +565,7 @@ typedef struct {
|
||||||
int32_t commitTime;
|
int32_t commitTime;
|
||||||
int8_t precision;
|
int8_t precision;
|
||||||
int8_t compression;
|
int8_t compression;
|
||||||
int8_t commitLog;
|
int8_t walLevel;
|
||||||
int8_t replications;
|
int8_t replications;
|
||||||
int8_t wals;
|
int8_t wals;
|
||||||
int8_t quorum;
|
int8_t quorum;
|
||||||
|
|
|
@ -13,8 +13,8 @@
|
||||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef TDENGINE_TSQLDEF_H
|
#ifndef TDENGINE_TTOKENDEF_H
|
||||||
#define TDENGINE_TSQLDEF_H
|
#define TDENGINE_TTOKENDEF_H
|
||||||
|
|
||||||
#define TK_ID 1
|
#define TK_ID 1
|
||||||
#define TK_BOOL 2
|
#define TK_BOOL 2
|
||||||
|
@ -101,123 +101,124 @@
|
||||||
#define TK_CONNS 83
|
#define TK_CONNS 83
|
||||||
#define TK_STATE 84
|
#define TK_STATE 84
|
||||||
#define TK_KEEP 85
|
#define TK_KEEP 85
|
||||||
#define TK_CACHE 86
|
#define TK_MAXTABLES 86
|
||||||
#define TK_REPLICA 87
|
#define TK_CACHE 87
|
||||||
#define TK_DAYS 88
|
#define TK_REPLICA 88
|
||||||
#define TK_ROWS 89
|
#define TK_DAYS 89
|
||||||
#define TK_ABLOCKS 90
|
#define TK_MINROWS 90
|
||||||
#define TK_TBLOCKS 91
|
#define TK_MAXROWS 91
|
||||||
#define TK_CTIME 92
|
#define TK_BLOCKS 92
|
||||||
#define TK_CLOG 93
|
#define TK_CTIME 93
|
||||||
#define TK_COMP 94
|
#define TK_WAL 94
|
||||||
#define TK_PRECISION 95
|
#define TK_COMP 95
|
||||||
#define TK_LP 96
|
#define TK_PRECISION 96
|
||||||
#define TK_RP 97
|
#define TK_LP 97
|
||||||
#define TK_TAGS 98
|
#define TK_RP 98
|
||||||
#define TK_USING 99
|
#define TK_TAGS 99
|
||||||
#define TK_AS 100
|
#define TK_USING 100
|
||||||
#define TK_COMMA 101
|
#define TK_AS 101
|
||||||
#define TK_NULL 102
|
#define TK_COMMA 102
|
||||||
#define TK_SELECT 103
|
#define TK_NULL 103
|
||||||
#define TK_UNION 104
|
#define TK_SELECT 104
|
||||||
#define TK_ALL 105
|
#define TK_UNION 105
|
||||||
#define TK_FROM 106
|
#define TK_ALL 106
|
||||||
#define TK_VARIABLE 107
|
#define TK_FROM 107
|
||||||
#define TK_INTERVAL 108
|
#define TK_VARIABLE 108
|
||||||
#define TK_FILL 109
|
#define TK_INTERVAL 109
|
||||||
#define TK_SLIDING 110
|
#define TK_FILL 110
|
||||||
#define TK_ORDER 111
|
#define TK_SLIDING 111
|
||||||
#define TK_BY 112
|
#define TK_ORDER 112
|
||||||
#define TK_ASC 113
|
#define TK_BY 113
|
||||||
#define TK_DESC 114
|
#define TK_ASC 114
|
||||||
#define TK_GROUP 115
|
#define TK_DESC 115
|
||||||
#define TK_HAVING 116
|
#define TK_GROUP 116
|
||||||
#define TK_LIMIT 117
|
#define TK_HAVING 117
|
||||||
#define TK_OFFSET 118
|
#define TK_LIMIT 118
|
||||||
#define TK_SLIMIT 119
|
#define TK_OFFSET 119
|
||||||
#define TK_SOFFSET 120
|
#define TK_SLIMIT 120
|
||||||
#define TK_WHERE 121
|
#define TK_SOFFSET 121
|
||||||
#define TK_NOW 122
|
#define TK_WHERE 122
|
||||||
#define TK_RESET 123
|
#define TK_NOW 123
|
||||||
#define TK_QUERY 124
|
#define TK_RESET 124
|
||||||
#define TK_ADD 125
|
#define TK_QUERY 125
|
||||||
#define TK_COLUMN 126
|
#define TK_ADD 126
|
||||||
#define TK_TAG 127
|
#define TK_COLUMN 127
|
||||||
#define TK_CHANGE 128
|
#define TK_TAG 128
|
||||||
#define TK_SET 129
|
#define TK_CHANGE 129
|
||||||
#define TK_KILL 130
|
#define TK_SET 130
|
||||||
#define TK_CONNECTION 131
|
#define TK_KILL 131
|
||||||
#define TK_COLON 132
|
#define TK_CONNECTION 132
|
||||||
#define TK_STREAM 133
|
#define TK_COLON 133
|
||||||
#define TK_ABORT 134
|
#define TK_STREAM 134
|
||||||
#define TK_AFTER 135
|
#define TK_ABORT 135
|
||||||
#define TK_ATTACH 136
|
#define TK_AFTER 136
|
||||||
#define TK_BEFORE 137
|
#define TK_ATTACH 137
|
||||||
#define TK_BEGIN 138
|
#define TK_BEFORE 138
|
||||||
#define TK_CASCADE 139
|
#define TK_BEGIN 139
|
||||||
#define TK_CLUSTER 140
|
#define TK_CASCADE 140
|
||||||
#define TK_CONFLICT 141
|
#define TK_CLUSTER 141
|
||||||
#define TK_COPY 142
|
#define TK_CONFLICT 142
|
||||||
#define TK_DEFERRED 143
|
#define TK_COPY 143
|
||||||
#define TK_DELIMITERS 144
|
#define TK_DEFERRED 144
|
||||||
#define TK_DETACH 145
|
#define TK_DELIMITERS 145
|
||||||
#define TK_EACH 146
|
#define TK_DETACH 146
|
||||||
#define TK_END 147
|
#define TK_EACH 147
|
||||||
#define TK_EXPLAIN 148
|
#define TK_END 148
|
||||||
#define TK_FAIL 149
|
#define TK_EXPLAIN 149
|
||||||
#define TK_FOR 150
|
#define TK_FAIL 150
|
||||||
#define TK_IGNORE 151
|
#define TK_FOR 151
|
||||||
#define TK_IMMEDIATE 152
|
#define TK_IGNORE 152
|
||||||
#define TK_INITIALLY 153
|
#define TK_IMMEDIATE 153
|
||||||
#define TK_INSTEAD 154
|
#define TK_INITIALLY 154
|
||||||
#define TK_MATCH 155
|
#define TK_INSTEAD 155
|
||||||
#define TK_KEY 156
|
#define TK_MATCH 156
|
||||||
#define TK_OF 157
|
#define TK_KEY 157
|
||||||
#define TK_RAISE 158
|
#define TK_OF 158
|
||||||
#define TK_REPLACE 159
|
#define TK_RAISE 159
|
||||||
#define TK_RESTRICT 160
|
#define TK_REPLACE 160
|
||||||
#define TK_ROW 161
|
#define TK_RESTRICT 161
|
||||||
#define TK_STATEMENT 162
|
#define TK_ROW 162
|
||||||
#define TK_TRIGGER 163
|
#define TK_STATEMENT 163
|
||||||
#define TK_VIEW 164
|
#define TK_TRIGGER 164
|
||||||
#define TK_COUNT 165
|
#define TK_VIEW 165
|
||||||
#define TK_SUM 166
|
#define TK_COUNT 166
|
||||||
#define TK_AVG 167
|
#define TK_SUM 167
|
||||||
#define TK_MIN 168
|
#define TK_AVG 168
|
||||||
#define TK_MAX 169
|
#define TK_MIN 169
|
||||||
#define TK_FIRST 170
|
#define TK_MAX 170
|
||||||
#define TK_LAST 171
|
#define TK_FIRST 171
|
||||||
#define TK_TOP 172
|
#define TK_LAST 172
|
||||||
#define TK_BOTTOM 173
|
#define TK_TOP 173
|
||||||
#define TK_STDDEV 174
|
#define TK_BOTTOM 174
|
||||||
#define TK_PERCENTILE 175
|
#define TK_STDDEV 175
|
||||||
#define TK_APERCENTILE 176
|
#define TK_PERCENTILE 176
|
||||||
#define TK_LEASTSQUARES 177
|
#define TK_APERCENTILE 177
|
||||||
#define TK_HISTOGRAM 178
|
#define TK_LEASTSQUARES 178
|
||||||
#define TK_DIFF 179
|
#define TK_HISTOGRAM 179
|
||||||
#define TK_SPREAD 180
|
#define TK_DIFF 180
|
||||||
#define TK_TWA 181
|
#define TK_SPREAD 181
|
||||||
#define TK_INTERP 182
|
#define TK_TWA 182
|
||||||
#define TK_LAST_ROW 183
|
#define TK_INTERP 183
|
||||||
#define TK_RATE 184
|
#define TK_LAST_ROW 184
|
||||||
#define TK_IRATE 185
|
#define TK_RATE 185
|
||||||
#define TK_SUM_RATE 186
|
#define TK_IRATE 186
|
||||||
#define TK_SUM_IRATE 187
|
#define TK_SUM_RATE 187
|
||||||
#define TK_AVG_RATE 188
|
#define TK_SUM_IRATE 188
|
||||||
#define TK_AVG_IRATE 189
|
#define TK_AVG_RATE 189
|
||||||
#define TK_SEMI 190
|
#define TK_AVG_IRATE 190
|
||||||
#define TK_NONE 191
|
#define TK_SEMI 191
|
||||||
#define TK_PREV 192
|
#define TK_NONE 192
|
||||||
#define TK_LINEAR 193
|
#define TK_PREV 193
|
||||||
#define TK_IMPORT 194
|
#define TK_LINEAR 194
|
||||||
#define TK_METRIC 195
|
#define TK_IMPORT 195
|
||||||
#define TK_TBNAME 196
|
#define TK_METRIC 196
|
||||||
#define TK_JOIN 197
|
#define TK_TBNAME 197
|
||||||
#define TK_METRICS 198
|
#define TK_JOIN 198
|
||||||
#define TK_STABLE 199
|
#define TK_METRICS 199
|
||||||
#define TK_INSERT 200
|
#define TK_STABLE 200
|
||||||
#define TK_INTO 201
|
#define TK_INSERT 201
|
||||||
#define TK_VALUES 202
|
#define TK_INTO 202
|
||||||
|
#define TK_VALUES 203
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ typedef struct {
|
||||||
} SWalHead;
|
} SWalHead;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t commitLog; // commitLog
|
int8_t walLevel; // wal level
|
||||||
int8_t wals; // number of WAL files;
|
int8_t wals; // number of WAL files;
|
||||||
int8_t keep; // keep the wal file when closed
|
int8_t keep; // keep the wal file when closed
|
||||||
} SWalCfg;
|
} SWalCfg;
|
||||||
|
|
|
@ -60,12 +60,10 @@ TAOS *shellInit(struct arguments *args) {
|
||||||
|
|
||||||
taos_init();
|
taos_init();
|
||||||
/*
|
/*
|
||||||
* set tsMetricMetaKeepTimer = 3000ms
|
* set tsTableMetaKeepTimer = 3000ms
|
||||||
* set tsMeterMetaKeepTimer = 3000ms
|
|
||||||
* means not save cache in shell
|
* means not save cache in shell
|
||||||
*/
|
*/
|
||||||
tsMetricMetaKeepTimer = 3;
|
tsTableMetaKeepTimer = 3000;
|
||||||
tsMeterMetaKeepTimer = 3000;
|
|
||||||
|
|
||||||
// Connect to the database.
|
// Connect to the database.
|
||||||
TAOS *con = taos_connect(args->host, args->user, args->password, args->database, args->port);
|
TAOS *con = taos_connect(args->host, args->user, args->password, args->database, args->port);
|
||||||
|
|
|
@ -151,7 +151,7 @@ typedef struct {
|
||||||
int32_t commitTime;
|
int32_t commitTime;
|
||||||
int8_t precision;
|
int8_t precision;
|
||||||
int8_t compression;
|
int8_t compression;
|
||||||
int8_t commitLog;
|
int8_t walLevel;
|
||||||
int8_t replications;
|
int8_t replications;
|
||||||
int8_t reserved[16];
|
int8_t reserved[16];
|
||||||
} SDbCfg;
|
} SDbCfg;
|
||||||
|
|
|
@ -251,8 +251,8 @@ static int32_t mgmtCheckDbCfg(SDbCfg *pCfg) {
|
||||||
return TSDB_CODE_INVALID_OPTION;
|
return TSDB_CODE_INVALID_OPTION;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pCfg->commitLog < TSDB_MIN_CLOG_LEVEL || pCfg->commitLog > TSDB_MAX_CLOG_LEVEL) {
|
if (pCfg->walLevel < TSDB_MIN_WAL_LEVEL || pCfg->walLevel > TSDB_MAX_WAL_LEVEL) {
|
||||||
mError("invalid db option commitLog:%d, only 0-2 allowed", pCfg->commitLog);
|
mError("invalid db option walLevel:%d, only 0-2 allowed", pCfg->walLevel);
|
||||||
return TSDB_CODE_INVALID_OPTION;
|
return TSDB_CODE_INVALID_OPTION;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,8 +274,8 @@ static int32_t mgmtCheckDbCfg(SDbCfg *pCfg) {
|
||||||
|
|
||||||
static void mgmtSetDefaultDbCfg(SDbCfg *pCfg) {
|
static void mgmtSetDefaultDbCfg(SDbCfg *pCfg) {
|
||||||
if (pCfg->cacheBlockSize < 0) pCfg->cacheBlockSize = tsCacheBlockSize;
|
if (pCfg->cacheBlockSize < 0) pCfg->cacheBlockSize = tsCacheBlockSize;
|
||||||
if (pCfg->totalBlocks < 0) pCfg->totalBlocks = tsTotalBlocks;
|
if (pCfg->totalBlocks < 0) pCfg->totalBlocks = tsBlocksPerVnode;
|
||||||
if (pCfg->maxTables < 0) pCfg->maxTables = tsTablesPerVnode;
|
if (pCfg->maxTables < 0) pCfg->maxTables = tsMaxTablePerVnode;
|
||||||
if (pCfg->daysPerFile < 0) pCfg->daysPerFile = tsDaysPerFile;
|
if (pCfg->daysPerFile < 0) pCfg->daysPerFile = tsDaysPerFile;
|
||||||
if (pCfg->daysToKeep < 0) pCfg->daysToKeep = tsDaysToKeep;
|
if (pCfg->daysToKeep < 0) pCfg->daysToKeep = tsDaysToKeep;
|
||||||
if (pCfg->daysToKeep1 < 0) pCfg->daysToKeep1 = pCfg->daysToKeep;
|
if (pCfg->daysToKeep1 < 0) pCfg->daysToKeep1 = pCfg->daysToKeep;
|
||||||
|
@ -285,7 +285,7 @@ static void mgmtSetDefaultDbCfg(SDbCfg *pCfg) {
|
||||||
if (pCfg->commitTime < 0) pCfg->commitTime = tsCommitTime;
|
if (pCfg->commitTime < 0) pCfg->commitTime = tsCommitTime;
|
||||||
if (pCfg->precision < 0) pCfg->precision = tsTimePrecision;
|
if (pCfg->precision < 0) pCfg->precision = tsTimePrecision;
|
||||||
if (pCfg->compression < 0) pCfg->compression = tsCompression;
|
if (pCfg->compression < 0) pCfg->compression = tsCompression;
|
||||||
if (pCfg->commitLog < 0) pCfg->commitLog = tsCommitLog;
|
if (pCfg->walLevel < 0) pCfg->walLevel = tsWAL;
|
||||||
if (pCfg->replications < 0) pCfg->replications = tsReplications;
|
if (pCfg->replications < 0) pCfg->replications = tsReplications;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -312,8 +312,8 @@ static int32_t mgmtCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate) {
|
||||||
pDb->createdTime = taosGetTimestampMs();
|
pDb->createdTime = taosGetTimestampMs();
|
||||||
pDb->cfg = (SDbCfg) {
|
pDb->cfg = (SDbCfg) {
|
||||||
.cacheBlockSize = pCreate->cacheBlockSize,
|
.cacheBlockSize = pCreate->cacheBlockSize,
|
||||||
.totalBlocks = pCreate->totalBlocks,
|
.totalBlocks = pCreate->numOfBlocks,
|
||||||
.maxTables = pCreate->maxSessions,
|
.maxTables = pCreate->maxTables,
|
||||||
.daysPerFile = pCreate->daysPerFile,
|
.daysPerFile = pCreate->daysPerFile,
|
||||||
.daysToKeep = pCreate->daysToKeep,
|
.daysToKeep = pCreate->daysToKeep,
|
||||||
.daysToKeep1 = pCreate->daysToKeep1,
|
.daysToKeep1 = pCreate->daysToKeep1,
|
||||||
|
@ -323,7 +323,7 @@ static int32_t mgmtCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate) {
|
||||||
.commitTime = pCreate->commitTime,
|
.commitTime = pCreate->commitTime,
|
||||||
.precision = pCreate->precision,
|
.precision = pCreate->precision,
|
||||||
.compression = pCreate->compression,
|
.compression = pCreate->compression,
|
||||||
.commitLog = pCreate->commitLog,
|
.walLevel = pCreate->walLevel,
|
||||||
.replications = pCreate->replications
|
.replications = pCreate->replications
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -491,7 +491,7 @@ static int32_t mgmtGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn)
|
||||||
#endif
|
#endif
|
||||||
pShow->bytes[cols] = 4;
|
pShow->bytes[cols] = 4;
|
||||||
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
||||||
strcpy(pSchema[cols].name, "tables");
|
strcpy(pSchema[cols].name, "maxtables");
|
||||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||||
cols++;
|
cols++;
|
||||||
|
|
||||||
|
@ -521,13 +521,13 @@ static int32_t mgmtGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn)
|
||||||
|
|
||||||
pShow->bytes[cols] = 4;
|
pShow->bytes[cols] = 4;
|
||||||
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
||||||
strcpy(pSchema[cols].name, "ctime(s)");
|
strcpy(pSchema[cols].name, "ctime(Sec.)");
|
||||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||||
cols++;
|
cols++;
|
||||||
|
|
||||||
pShow->bytes[cols] = 1;
|
pShow->bytes[cols] = 1;
|
||||||
pSchema[cols].type = TSDB_DATA_TYPE_TINYINT;
|
pSchema[cols].type = TSDB_DATA_TYPE_TINYINT;
|
||||||
strcpy(pSchema[cols].name, "clog");
|
strcpy(pSchema[cols].name, "wallevel");
|
||||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||||
cols++;
|
cols++;
|
||||||
|
|
||||||
|
@ -659,7 +659,7 @@ static int32_t mgmtRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void *
|
||||||
cols++;
|
cols++;
|
||||||
|
|
||||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||||
*(int8_t *)pWrite = pDb->cfg.commitLog;
|
*(int8_t *)pWrite = pDb->cfg.walLevel;
|
||||||
cols++;
|
cols++;
|
||||||
|
|
||||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||||
|
@ -728,9 +728,10 @@ static int32_t mgmtSetDbDropping(SDbObj *pDb) {
|
||||||
|
|
||||||
static void mgmtProcessCreateDbMsg(SQueuedMsg *pMsg) {
|
static void mgmtProcessCreateDbMsg(SQueuedMsg *pMsg) {
|
||||||
SCMCreateDbMsg *pCreate = pMsg->pCont;
|
SCMCreateDbMsg *pCreate = pMsg->pCont;
|
||||||
pCreate->maxSessions = htonl(pCreate->maxSessions);
|
|
||||||
|
pCreate->maxTables = htonl(pCreate->maxTables);
|
||||||
pCreate->cacheBlockSize = htonl(pCreate->cacheBlockSize);
|
pCreate->cacheBlockSize = htonl(pCreate->cacheBlockSize);
|
||||||
pCreate->totalBlocks = htonl(pCreate->totalBlocks);
|
pCreate->numOfBlocks = htonl(pCreate->numOfBlocks);
|
||||||
pCreate->daysPerFile = htonl(pCreate->daysPerFile);
|
pCreate->daysPerFile = htonl(pCreate->daysPerFile);
|
||||||
pCreate->daysToKeep = htonl(pCreate->daysToKeep);
|
pCreate->daysToKeep = htonl(pCreate->daysToKeep);
|
||||||
pCreate->daysToKeep1 = htonl(pCreate->daysToKeep1);
|
pCreate->daysToKeep1 = htonl(pCreate->daysToKeep1);
|
||||||
|
@ -757,14 +758,15 @@ static void mgmtProcessCreateDbMsg(SQueuedMsg *pMsg) {
|
||||||
static SDbCfg mgmtGetAlterDbOption(SDbObj *pDb, SCMAlterDbMsg *pAlter) {
|
static SDbCfg mgmtGetAlterDbOption(SDbObj *pDb, SCMAlterDbMsg *pAlter) {
|
||||||
SDbCfg newCfg = pDb->cfg;
|
SDbCfg newCfg = pDb->cfg;
|
||||||
int32_t cacheBlockSize = htonl(pAlter->daysToKeep);
|
int32_t cacheBlockSize = htonl(pAlter->daysToKeep);
|
||||||
int32_t totalBlocks = htonl(pAlter->totalBlocks);
|
int32_t totalBlocks = htonl(pAlter->numOfBlocks);
|
||||||
int32_t maxTables = htonl(pAlter->maxSessions);
|
int32_t maxTables = htonl(pAlter->maxTables);
|
||||||
int32_t daysToKeep = htonl(pAlter->daysToKeep);
|
int32_t daysToKeep = htonl(pAlter->daysToKeep);
|
||||||
int32_t daysToKeep1 = htonl(pAlter->daysToKeep1);
|
int32_t daysToKeep1 = htonl(pAlter->daysToKeep1);
|
||||||
int32_t daysToKeep2 = htonl(pAlter->daysToKeep2);
|
int32_t daysToKeep2 = htonl(pAlter->daysToKeep2);
|
||||||
int8_t compression = pAlter->compression;
|
int8_t compression = pAlter->compression;
|
||||||
int8_t replications = pAlter->replications;
|
int8_t replications = pAlter->replications;
|
||||||
|
int8_t walLevel = pAlter->walLevel;
|
||||||
|
|
||||||
terrno = TSDB_CODE_SUCCESS;
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
if (cacheBlockSize > 0 && cacheBlockSize != pDb->cfg.cacheBlockSize) {
|
if (cacheBlockSize > 0 && cacheBlockSize != pDb->cfg.cacheBlockSize) {
|
||||||
|
@ -809,15 +811,20 @@ static SDbCfg mgmtGetAlterDbOption(SDbObj *pDb, SCMAlterDbMsg *pAlter) {
|
||||||
if (replications > 0 && replications != pDb->cfg.replications) {
|
if (replications > 0 && replications != pDb->cfg.replications) {
|
||||||
mTrace("db:%s, replications:%d change to %d", pDb->name, pDb->cfg.replications, replications);
|
mTrace("db:%s, replications:%d change to %d", pDb->name, pDb->cfg.replications, replications);
|
||||||
newCfg.replications = replications;
|
newCfg.replications = replications;
|
||||||
}
|
|
||||||
|
|
||||||
if (replications > mgmtGetDnodesNum()) {
|
if (replications > mgmtGetDnodesNum()) {
|
||||||
mError("db:%s, no enough dnode to change replica:%d", pDb->name, replications);
|
mError("db:%s, no enough dnode to change replica:%d", pDb->name, replications);
|
||||||
terrno = TSDB_CODE_NO_ENOUGH_DNODES;
|
terrno = TSDB_CODE_NO_ENOUGH_DNODES;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pDb->cfg.replications - replications >= 2) {
|
||||||
|
mError("db:%s, replica number can't change from 3 to 1", pDb->name, replications);
|
||||||
|
terrno = TSDB_CODE_INVALID_OPTION;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pDb->cfg.replications - replications >= 2) {
|
if (walLevel > 0 && (walLevel < TSDB_MIN_WAL_LEVEL || walLevel > TSDB_MAX_WAL_LEVEL)) {
|
||||||
mError("db:%s, replica number can't change from 3 to 1", pDb->name, replications);
|
mError("db:%s, wal level %d should be between 0-2, origin:%d", pDb->name, walLevel, pDb->cfg.walLevel);
|
||||||
terrno = TSDB_CODE_INVALID_OPTION;
|
terrno = TSDB_CODE_INVALID_OPTION;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -142,7 +142,7 @@ static void *sdbGetTableFromId(int32_t tableId) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t sdbInitWal() {
|
static int32_t sdbInitWal() {
|
||||||
SWalCfg walCfg = {.commitLog = 2, .wals = 2, .keep = 1};
|
SWalCfg walCfg = {.walLevel = 2, .wals = 2, .keep = 1};
|
||||||
char temp[TSDB_FILENAME_LEN];
|
char temp[TSDB_FILENAME_LEN];
|
||||||
sprintf(temp, "%s/wal", tsMnodeDir);
|
sprintf(temp, "%s/wal", tsMnodeDir);
|
||||||
tsSdbObj.wal = walOpen(temp, &walCfg);
|
tsSdbObj.wal = walOpen(temp, &walCfg);
|
||||||
|
@ -324,7 +324,6 @@ void sdbCleanUp() {
|
||||||
|
|
||||||
tsSdbObj.status = SDB_STATUS_CLOSING;
|
tsSdbObj.status = SDB_STATUS_CLOSING;
|
||||||
syncStop(tsSdbObj.sync);
|
syncStop(tsSdbObj.sync);
|
||||||
free(tsSdbObj.sync);
|
|
||||||
walClose(tsSdbObj.wal);
|
walClose(tsSdbObj.wal);
|
||||||
sem_destroy(&tsSdbObj.sem);
|
sem_destroy(&tsSdbObj.sem);
|
||||||
pthread_mutex_destroy(&tsSdbObj.mutex);
|
pthread_mutex_destroy(&tsSdbObj.mutex);
|
||||||
|
|
|
@ -108,7 +108,6 @@ void mgmtCleanUpShell() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tsQhandleCache) {
|
if (tsQhandleCache) {
|
||||||
taosCacheEmpty(tsQhandleCache);
|
|
||||||
taosCacheCleanup(tsQhandleCache);
|
taosCacheCleanup(tsQhandleCache);
|
||||||
tsQhandleCache = NULL;
|
tsQhandleCache = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -560,7 +560,7 @@ SMDCreateVnodeMsg *mgmtBuildCreateVnodeMsg(SVgObj *pVgroup) {
|
||||||
pCfg->commitTime = htonl(pDb->cfg.commitTime);
|
pCfg->commitTime = htonl(pDb->cfg.commitTime);
|
||||||
pCfg->precision = pDb->cfg.precision;
|
pCfg->precision = pDb->cfg.precision;
|
||||||
pCfg->compression = pDb->cfg.compression;
|
pCfg->compression = pDb->cfg.compression;
|
||||||
pCfg->commitLog = pDb->cfg.commitLog;
|
pCfg->walLevel = pDb->cfg.walLevel;
|
||||||
pCfg->replications = (int8_t) pVgroup->numOfVnodes;
|
pCfg->replications = (int8_t) pVgroup->numOfVnodes;
|
||||||
pCfg->wals = 3;
|
pCfg->wals = 3;
|
||||||
pCfg->quorum = 1;
|
pCfg->quorum = 1;
|
||||||
|
|
|
@ -269,11 +269,12 @@ void httpCleanUpConnect(HttpServer *pServer) {
|
||||||
|
|
||||||
for (i = 0; i < pServer->numOfThreads; ++i) {
|
for (i = 0; i < pServer->numOfThreads; ++i) {
|
||||||
pThread = pServer->pThreads + i;
|
pThread = pServer->pThreads + i;
|
||||||
|
if (pThread == NULL) continue;
|
||||||
//taosCloseSocket(pThread->pollFd);
|
//taosCloseSocket(pThread->pollFd);
|
||||||
|
|
||||||
while (pThread->pHead) {
|
//while (pThread->pHead) {
|
||||||
httpCleanUpContext(pThread->pHead, 0);
|
// httpCleanUpContext(pThread->pHead, 0);
|
||||||
}
|
//}
|
||||||
|
|
||||||
pthread_cancel(pThread->thread);
|
pthread_cancel(pThread->thread);
|
||||||
pthread_join(pThread->thread, NULL);
|
pthread_join(pThread->thread, NULL);
|
||||||
|
@ -504,8 +505,8 @@ void httpAcceptHttpConnection(void *arg) {
|
||||||
sockFd = taosOpenTcpServerSocket(pServer->serverIp, pServer->serverPort);
|
sockFd = taosOpenTcpServerSocket(pServer->serverIp, pServer->serverPort);
|
||||||
|
|
||||||
if (sockFd < 0) {
|
if (sockFd < 0) {
|
||||||
httpError("http server:%s, failed to open http socket, ip:%s:%u", pServer->label, pServer->serverIp,
|
httpError("http server:%s, failed to open http socket, ip:%s:%u error:%s", pServer->label, taosIpStr(pServer->serverIp),
|
||||||
pServer->serverPort);
|
pServer->serverPort, strerror(errno));
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
httpPrint("http service init success at %u", pServer->serverPort);
|
httpPrint("http service init success at %u", pServer->serverPort);
|
||||||
|
@ -645,7 +646,7 @@ bool httpInitConnect(HttpServer *pServer) {
|
||||||
}
|
}
|
||||||
pthread_attr_destroy(&thattr);
|
pthread_attr_destroy(&thattr);
|
||||||
|
|
||||||
httpTrace("http server:%s, initialized, ip:%s:%u, numOfThreads:%d", pServer->label, pServer->serverIp,
|
httpTrace("http server:%s, initialized, ip:%s:%u, numOfThreads:%d", pServer->label, taosIpStr(pServer->serverIp),
|
||||||
pServer->serverPort, pServer->numOfThreads);
|
pServer->serverPort, pServer->numOfThreads);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,7 +117,7 @@ void httpCleanUpSystem() {
|
||||||
httpPrint("http service cleanup");
|
httpPrint("http service cleanup");
|
||||||
httpStopSystem();
|
httpStopSystem();
|
||||||
|
|
||||||
#if 0
|
//#if 0
|
||||||
if (httpServer == NULL) {
|
if (httpServer == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -131,7 +131,13 @@ void httpCleanUpSystem() {
|
||||||
httpServer->timerHandle = NULL;
|
httpServer->timerHandle = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
httpCleanUpConnect(httpServer);
|
if (httpServer->pThreads != NULL) {
|
||||||
|
httpCleanUpConnect(httpServer);
|
||||||
|
httpServer->pThreads = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if 0
|
||||||
httpRemoveAllSessions(httpServer);
|
httpRemoveAllSessions(httpServer);
|
||||||
|
|
||||||
if (httpServer->pContextPool != NULL) {
|
if (httpServer->pContextPool != NULL) {
|
||||||
|
|
|
@ -44,7 +44,7 @@
|
||||||
|
|
||||||
#define SQL_LENGTH 1024
|
#define SQL_LENGTH 1024
|
||||||
#define LOG_LEN_STR 80
|
#define LOG_LEN_STR 80
|
||||||
#define IP_LEN_STR 15
|
#define IP_LEN_STR 18
|
||||||
#define CHECK_INTERVAL 1000
|
#define CHECK_INTERVAL 1000
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
|
@ -115,6 +115,13 @@ static void monitorInitConn(void *para, void *unused) {
|
||||||
if (tsMonitorConn.ep[0] == 0)
|
if (tsMonitorConn.ep[0] == 0)
|
||||||
strcpy(tsMonitorConn.ep, tsLocalEp);
|
strcpy(tsMonitorConn.ep, tsLocalEp);
|
||||||
|
|
||||||
|
int len = strlen(tsMonitorConn.ep);
|
||||||
|
for (int i = 0; i < len; ++i) {
|
||||||
|
if (tsMonitorConn.ep[i] == ':' || tsMonitorConn.ep[i] == '-') {
|
||||||
|
tsMonitorConn.ep[i] = '_';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (tsMonitorConn.conn == NULL) {
|
if (tsMonitorConn.conn == NULL) {
|
||||||
taos_connect_a(NULL, "monitor", tsInternalPass, "", 0, monitorInitConnCb, &tsMonitorConn, &(tsMonitorConn.conn));
|
taos_connect_a(NULL, "monitor", tsInternalPass, "", 0, monitorInitConnCb, &tsMonitorConn, &(tsMonitorConn.conn));
|
||||||
} else {
|
} else {
|
||||||
|
@ -141,8 +148,8 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) {
|
||||||
|
|
||||||
if (cmd == MONITOR_CMD_CREATE_DB) {
|
if (cmd == MONITOR_CMD_CREATE_DB) {
|
||||||
snprintf(sql, SQL_LENGTH,
|
snprintf(sql, SQL_LENGTH,
|
||||||
"create database if not exists %s replica 1 days 10 keep 30 rows 1024 cache 2048 "
|
"create database if not exists %s replica 1 days 10 keep 30 cache 2 "
|
||||||
"ablocks 2 tblocks 32 tables 32 precision 'us'",
|
"blocks 2 maxtables 32 precision 'us'",
|
||||||
tsMonitorDbName);
|
tsMonitorDbName);
|
||||||
} else if (cmd == MONITOR_CMD_CREATE_MT_DN) {
|
} else if (cmd == MONITOR_CMD_CREATE_MT_DN) {
|
||||||
snprintf(sql, SQL_LENGTH,
|
snprintf(sql, SQL_LENGTH,
|
||||||
|
@ -154,7 +161,7 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) {
|
||||||
", io_read float, io_write float"
|
", io_read float, io_write float"
|
||||||
", req_http int, req_select int, req_insert int"
|
", req_http int, req_select int, req_insert int"
|
||||||
") tags (ipaddr binary(%d))",
|
") tags (ipaddr binary(%d))",
|
||||||
tsMonitorDbName, IP_LEN_STR + 1);
|
tsMonitorDbName, TSDB_FQDN_LEN + 1);
|
||||||
} else if (cmd == MONITOR_CMD_CREATE_TB_DN) {
|
} else if (cmd == MONITOR_CMD_CREATE_TB_DN) {
|
||||||
snprintf(sql, SQL_LENGTH, "create table if not exists %s.dn_%s using %s.dn tags('%s')", tsMonitorDbName,
|
snprintf(sql, SQL_LENGTH, "create table if not exists %s.dn_%s using %s.dn tags('%s')", tsMonitorDbName,
|
||||||
tsMonitorConn.ep, tsMonitorDbName, tsLocalEp);
|
tsMonitorConn.ep, tsMonitorDbName, tsLocalEp);
|
||||||
|
|
|
@ -111,13 +111,13 @@ typedef struct SCreateDBInfo {
|
||||||
SSQLToken dbname;
|
SSQLToken dbname;
|
||||||
int32_t replica;
|
int32_t replica;
|
||||||
int32_t cacheBlockSize;
|
int32_t cacheBlockSize;
|
||||||
int32_t tablesPerVnode;
|
int32_t maxTablesPerVnode;
|
||||||
|
int32_t numOfBlocks;
|
||||||
int32_t daysPerFile;
|
int32_t daysPerFile;
|
||||||
int32_t rowPerFileBlock;
|
int32_t minRowsPerBlock;
|
||||||
float numOfAvgCacheBlocks;
|
int32_t maxRowsPerBlock;
|
||||||
int32_t numOfBlocksPerTable;
|
|
||||||
int64_t commitTime;
|
int64_t commitTime;
|
||||||
int32_t commitLog;
|
int32_t walLevel;
|
||||||
int32_t compressionLevel;
|
int32_t compressionLevel;
|
||||||
SSQLToken precision;
|
SSQLToken precision;
|
||||||
bool ignoreExists;
|
bool ignoreExists;
|
||||||
|
|
|
@ -212,31 +212,30 @@ acct_optr(Y) ::= pps(C) tseries(D) storage(P) streams(F) qtime(Q) dbs(E) users(K
|
||||||
%destructor keep {tVariantListDestroy($$);}
|
%destructor keep {tVariantListDestroy($$);}
|
||||||
keep(Y) ::= KEEP tagitemlist(X). { Y = X; }
|
keep(Y) ::= KEEP tagitemlist(X). { Y = X; }
|
||||||
|
|
||||||
tables(Y) ::= TABLES INTEGER(X). { Y = X; }
|
tables(Y) ::= MAXTABLES INTEGER(X). { Y = X; }
|
||||||
cache(Y) ::= CACHE INTEGER(X). { Y = X; }
|
cache(Y) ::= CACHE INTEGER(X). { Y = X; }
|
||||||
replica(Y) ::= REPLICA INTEGER(X). { Y = X; }
|
replica(Y) ::= REPLICA INTEGER(X). { Y = X; }
|
||||||
days(Y) ::= DAYS INTEGER(X). { Y = X; }
|
days(Y) ::= DAYS INTEGER(X). { Y = X; }
|
||||||
rows(Y) ::= ROWS INTEGER(X). { Y = X; }
|
minrows(Y) ::= MINROWS INTEGER(X). { Y = X; }
|
||||||
|
maxrows(Y) ::= MAXROWS INTEGER(X). { Y = X; }
|
||||||
ablocks(Y) ::= ABLOCKS ID(X). { Y = X; }
|
blocks(Y) ::= BLOCKS INTEGER(X). { Y = X; }
|
||||||
tblocks(Y) ::= TBLOCKS INTEGER(X). { Y = X; }
|
|
||||||
ctime(Y) ::= CTIME INTEGER(X). { Y = X; }
|
ctime(Y) ::= CTIME INTEGER(X). { Y = X; }
|
||||||
clog(Y) ::= CLOG INTEGER(X). { Y = X; }
|
wal(Y) ::= WAL INTEGER(X). { Y = X; }
|
||||||
comp(Y) ::= COMP INTEGER(X). { Y = X; }
|
comp(Y) ::= COMP INTEGER(X). { Y = X; }
|
||||||
prec(Y) ::= PRECISION STRING(X). { Y = X; }
|
prec(Y) ::= PRECISION STRING(X). { Y = X; }
|
||||||
|
|
||||||
%type db_optr {SCreateDBInfo}
|
%type db_optr {SCreateDBInfo}
|
||||||
db_optr(Y) ::= . {setDefaultCreateDbOption(&Y);}
|
db_optr(Y) ::= . {setDefaultCreateDbOption(&Y);}
|
||||||
|
|
||||||
db_optr(Y) ::= db_optr(Z) tables(X). { Y = Z; Y.tablesPerVnode = strtol(X.z, NULL, 10); }
|
db_optr(Y) ::= db_optr(Z) tables(X). { Y = Z; Y.maxTablesPerVnode = strtol(X.z, NULL, 10); }
|
||||||
db_optr(Y) ::= db_optr(Z) cache(X). { Y = Z; Y.cacheBlockSize = strtol(X.z, NULL, 10); }
|
db_optr(Y) ::= db_optr(Z) cache(X). { Y = Z; Y.cacheBlockSize = strtol(X.z, NULL, 10); }
|
||||||
db_optr(Y) ::= db_optr(Z) replica(X). { Y = Z; Y.replica = strtol(X.z, NULL, 10); }
|
db_optr(Y) ::= db_optr(Z) replica(X). { Y = Z; Y.replica = strtol(X.z, NULL, 10); }
|
||||||
db_optr(Y) ::= db_optr(Z) days(X). { Y = Z; Y.daysPerFile = strtol(X.z, NULL, 10); }
|
db_optr(Y) ::= db_optr(Z) days(X). { Y = Z; Y.daysPerFile = strtol(X.z, NULL, 10); }
|
||||||
db_optr(Y) ::= db_optr(Z) rows(X). { Y = Z; Y.rowPerFileBlock = strtol(X.z, NULL, 10); }
|
db_optr(Y) ::= db_optr(Z) minrows(X). { Y = Z; Y.minRowsPerBlock = strtod(X.z, NULL); }
|
||||||
db_optr(Y) ::= db_optr(Z) ablocks(X). { Y = Z; Y.numOfAvgCacheBlocks = strtod(X.z, NULL); }
|
db_optr(Y) ::= db_optr(Z) maxrows(X). { Y = Z; Y.maxRowsPerBlock = strtod(X.z, NULL); }
|
||||||
db_optr(Y) ::= db_optr(Z) tblocks(X). { Y = Z; Y.numOfBlocksPerTable = strtol(X.z, NULL, 10); }
|
db_optr(Y) ::= db_optr(Z) blocks(X). { Y = Z; Y.numOfBlocks = strtol(X.z, NULL, 10); }
|
||||||
db_optr(Y) ::= db_optr(Z) ctime(X). { Y = Z; Y.commitTime = strtol(X.z, NULL, 10); }
|
db_optr(Y) ::= db_optr(Z) ctime(X). { Y = Z; Y.commitTime = strtol(X.z, NULL, 10); }
|
||||||
db_optr(Y) ::= db_optr(Z) clog(X). { Y = Z; Y.commitLog = strtol(X.z, NULL, 10); }
|
db_optr(Y) ::= db_optr(Z) wal(X). { Y = Z; Y.walLevel = strtol(X.z, NULL, 10); }
|
||||||
db_optr(Y) ::= db_optr(Z) comp(X). { Y = Z; Y.compressionLevel = strtol(X.z, NULL, 10); }
|
db_optr(Y) ::= db_optr(Z) comp(X). { Y = Z; Y.compressionLevel = strtol(X.z, NULL, 10); }
|
||||||
db_optr(Y) ::= db_optr(Z) prec(X). { Y = Z; Y.precision = X; }
|
db_optr(Y) ::= db_optr(Z) prec(X). { Y = Z; Y.precision = X; }
|
||||||
db_optr(Y) ::= db_optr(Z) keep(X). { Y = Z; Y.keep = X; }
|
db_optr(Y) ::= db_optr(Z) keep(X). { Y = Z; Y.keep = X; }
|
||||||
|
@ -245,7 +244,11 @@ db_optr(Y) ::= db_optr(Z) keep(X). { Y = Z; Y.keep = X; }
|
||||||
alter_db_optr(Y) ::= . { setDefaultCreateDbOption(&Y);}
|
alter_db_optr(Y) ::= . { setDefaultCreateDbOption(&Y);}
|
||||||
|
|
||||||
alter_db_optr(Y) ::= alter_db_optr(Z) replica(X). { Y = Z; Y.replica = strtol(X.z, NULL, 10); }
|
alter_db_optr(Y) ::= alter_db_optr(Z) replica(X). { Y = Z; Y.replica = strtol(X.z, NULL, 10); }
|
||||||
alter_db_optr(Y) ::= alter_db_optr(Z) tables(X). { Y = Z; Y.tablesPerVnode = strtol(X.z, NULL, 10); }
|
alter_db_optr(Y) ::= alter_db_optr(Z) tables(X). { Y = Z; Y.maxTablesPerVnode = strtol(X.z, NULL, 10); }
|
||||||
|
alter_db_optr(Y) ::= alter_db_optr(Z) keep(X). { Y = Z; Y.keep = X; }
|
||||||
|
alter_db_optr(Y) ::= alter_db_optr(Z) blocks(X). { Y = Z; Y.numOfBlocks = strtol(X.z, NULL, 10); }
|
||||||
|
alter_db_optr(Y) ::= alter_db_optr(Z) comp(X). { Y = Z; Y.compressionLevel = strtol(X.z, NULL, 10); }
|
||||||
|
alter_db_optr(Y) ::= alter_db_optr(Z) wal(X). { Y = Z; Y.walLevel = strtol(X.z, NULL, 10); }
|
||||||
|
|
||||||
%type typename {TAOS_FIELD}
|
%type typename {TAOS_FIELD}
|
||||||
typename(A) ::= ids(X). { tSQLSetColumnType (&A, &X); }
|
typename(A) ::= ids(X). { tSQLSetColumnType (&A, &X); }
|
||||||
|
|
|
@ -119,6 +119,7 @@ typedef struct SArithmeticSupport {
|
||||||
SExprInfo *pArithExpr;
|
SExprInfo *pArithExpr;
|
||||||
int32_t numOfCols;
|
int32_t numOfCols;
|
||||||
SColumnInfo *colList;
|
SColumnInfo *colList;
|
||||||
|
SArray* exprList; // client side used
|
||||||
int32_t offset;
|
int32_t offset;
|
||||||
char** data;
|
char** data;
|
||||||
} SArithmeticSupport;
|
} SArithmeticSupport;
|
||||||
|
@ -220,7 +221,7 @@ typedef struct SQLAggFuncElem {
|
||||||
#define GET_RES_INFO(ctx) ((ctx)->resultInfo)
|
#define GET_RES_INFO(ctx) ((ctx)->resultInfo)
|
||||||
|
|
||||||
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type,
|
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type,
|
||||||
int16_t *len, int16_t *interResBytes, int16_t extLength, bool isSuperTable);
|
int16_t *len, int16_t *interBytes, int16_t extLength, bool isSuperTable);
|
||||||
|
|
||||||
#define IS_STREAM_QUERY_VALID(x) (((x)&TSDB_FUNCSTATE_STREAM) != 0)
|
#define IS_STREAM_QUERY_VALID(x) (((x)&TSDB_FUNCSTATE_STREAM) != 0)
|
||||||
#define IS_MULTIOUTPUT(x) (((x)&TSDB_FUNCSTATE_MO) != 0)
|
#define IS_MULTIOUTPUT(x) (((x)&TSDB_FUNCSTATE_MO) != 0)
|
||||||
|
|
|
@ -979,7 +979,8 @@ void tExprTreeCalcTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput,
|
||||||
|
|
||||||
} else if (pRight->nodeType == TSQL_NODE_COL) { // 12 + columnRight
|
} else if (pRight->nodeType == TSQL_NODE_COL) { // 12 + columnRight
|
||||||
// column data specified on right-hand-side
|
// column data specified on right-hand-side
|
||||||
char * pRightInputData = getSourceDataBlock(param, pRight->pSchema->name, pRight->pSchema->colId);
|
char *pRightInputData = getSourceDataBlock(param, pRight->pSchema->name, pRight->pSchema->colId);
|
||||||
|
|
||||||
_bi_consumer_fn_t fp = tGetBiConsumerFn(pLeft->pVal->nType, pRight->pSchema->type, pExprs->_node.optr);
|
_bi_consumer_fn_t fp = tGetBiConsumerFn(pLeft->pVal->nType, pRight->pSchema->type, pExprs->_node.optr);
|
||||||
fp(&pLeft->pVal->i64Key, pRightInputData, 1, numOfRows, pOutput, order);
|
fp(&pLeft->pVal->i64Key, pRightInputData, 1, numOfRows, pOutput, order);
|
||||||
|
|
||||||
|
|
|
@ -52,6 +52,8 @@ int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pStr) {
|
||||||
Parse(pParser, 0, t0, pSQLInfo);
|
Parse(pParser, 0, t0, pSQLInfo);
|
||||||
goto abort_parse;
|
goto abort_parse;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case TK_QUESTION:
|
||||||
case TK_ILLEGAL: {
|
case TK_ILLEGAL: {
|
||||||
snprintf(pSQLInfo->pzErrMsg, tListLen(pSQLInfo->pzErrMsg), "unrecognized token: \"%s\"", t0.z);
|
snprintf(pSQLInfo->pzErrMsg, tListLen(pSQLInfo->pzErrMsg), "unrecognized token: \"%s\"", t0.z);
|
||||||
pSQLInfo->valid = false;
|
pSQLInfo->valid = false;
|
||||||
|
@ -818,7 +820,7 @@ void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBI
|
||||||
|
|
||||||
pInfo->pDCLInfo->dbOpt = *pDB;
|
pInfo->pDCLInfo->dbOpt = *pDB;
|
||||||
pInfo->pDCLInfo->dbOpt.dbname = *pToken;
|
pInfo->pDCLInfo->dbOpt.dbname = *pToken;
|
||||||
pInfo->pDCLInfo->dbOpt.ignoreExists = (pIgExists != NULL);
|
pInfo->pDCLInfo->dbOpt.ignoreExists = (pIgExists->z != NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken *pPwd, SCreateAcctSQL *pAcctInfo) {
|
void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken *pPwd, SCreateAcctSQL *pAcctInfo) {
|
||||||
|
@ -886,16 +888,16 @@ void setKillSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *ip) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo) {
|
void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo) {
|
||||||
pDBInfo->numOfBlocksPerTable = 50;
|
|
||||||
pDBInfo->compressionLevel = -1;
|
pDBInfo->compressionLevel = -1;
|
||||||
|
|
||||||
pDBInfo->commitLog = -1;
|
pDBInfo->walLevel = -1;
|
||||||
pDBInfo->commitTime = -1;
|
pDBInfo->commitTime = -1;
|
||||||
pDBInfo->tablesPerVnode = -1;
|
pDBInfo->maxTablesPerVnode = -1;
|
||||||
pDBInfo->numOfAvgCacheBlocks = -1;
|
|
||||||
|
|
||||||
pDBInfo->cacheBlockSize = -1;
|
pDBInfo->cacheBlockSize = -1;
|
||||||
pDBInfo->rowPerFileBlock = -1;
|
pDBInfo->numOfBlocks = -1;
|
||||||
|
pDBInfo->maxRowsPerBlock = -1;
|
||||||
|
pDBInfo->minRowsPerBlock = -1;
|
||||||
pDBInfo->daysPerFile = -1;
|
pDBInfo->daysPerFile = -1;
|
||||||
|
|
||||||
pDBInfo->replica = -1;
|
pDBInfo->replica = -1;
|
||||||
|
|
|
@ -117,12 +117,13 @@ static SKeyword keywordTable[] = {
|
||||||
{"KEEP", TK_KEEP},
|
{"KEEP", TK_KEEP},
|
||||||
{"REPLICA", TK_REPLICA},
|
{"REPLICA", TK_REPLICA},
|
||||||
{"DAYS", TK_DAYS},
|
{"DAYS", TK_DAYS},
|
||||||
{"ROWS", TK_ROWS},
|
{"MINROWS", TK_MINROWS},
|
||||||
|
{"MAXROWS", TK_MAXROWS},
|
||||||
|
{"BLOCKS", TK_BLOCKS},
|
||||||
|
{"MAXTABLES", TK_MAXTABLES},
|
||||||
{"CACHE", TK_CACHE},
|
{"CACHE", TK_CACHE},
|
||||||
{"ABLOCKS", TK_ABLOCKS},
|
|
||||||
{"TBLOCKS", TK_TBLOCKS},
|
|
||||||
{"CTIME", TK_CTIME},
|
{"CTIME", TK_CTIME},
|
||||||
{"CLOG", TK_CLOG},
|
{"WAL", TK_WAL},
|
||||||
{"COMP", TK_COMP},
|
{"COMP", TK_COMP},
|
||||||
{"PRECISION", TK_PRECISION},
|
{"PRECISION", TK_PRECISION},
|
||||||
{"LP", TK_LP},
|
{"LP", TK_LP},
|
||||||
|
|
|
@ -1402,7 +1402,7 @@ static void setCtxTagColumnInfo(SQuery *pQuery, SQLFunctionCtx *pCtx) {
|
||||||
|
|
||||||
static void setWindowResultInfo(SResultInfo *pResultInfo, SQuery *pQuery, bool isStableQuery) {
|
static void setWindowResultInfo(SResultInfo *pResultInfo, SQuery *pQuery, bool isStableQuery) {
|
||||||
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
|
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
|
||||||
setResultInfoBuf(&pResultInfo[i], pQuery->pSelectExpr[i].interResBytes, isStableQuery);
|
setResultInfoBuf(&pResultInfo[i], pQuery->pSelectExpr[i].interBytes, isStableQuery);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4233,10 +4233,12 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool
|
||||||
|
|
||||||
|
|
||||||
// normal query setup the queryhandle here
|
// normal query setup the queryhandle here
|
||||||
if (isFirstLastRowQuery(pQuery) && !isSTableQuery) { // in case of last_row query, invoke a different API.
|
if (!onlyQueryTags(pQuery)) {
|
||||||
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableIdGroupInfo);
|
if (!isSTableQuery && isFirstLastRowQuery(pQuery)) { // in case of last_row query, invoke a different API.
|
||||||
} else if (!isSTableQuery || isIntervalQuery(pQuery) || isFixedOutputQuery(pQuery)) {
|
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableIdGroupInfo);
|
||||||
pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQInfo->tableIdGroupInfo);
|
} else if (!isSTableQuery || isIntervalQuery(pQuery) || isFixedOutputQuery(pQuery)) {
|
||||||
|
pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQInfo->tableIdGroupInfo);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pQInfo->tsdb = tsdb;
|
pQInfo->tsdb = tsdb;
|
||||||
|
@ -5540,7 +5542,7 @@ static int32_t createSqlFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo
|
||||||
|
|
||||||
int32_t param = pExprs[i].base.arg[0].argValue.i64;
|
int32_t param = pExprs[i].base.arg[0].argValue.i64;
|
||||||
if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].type, &pExprs[i].bytes,
|
if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].type, &pExprs[i].bytes,
|
||||||
&pExprs[i].interResBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) {
|
&pExprs[i].interBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) {
|
||||||
tfree(pExprs);
|
tfree(pExprs);
|
||||||
return TSDB_CODE_INVALID_QUERY_MSG;
|
return TSDB_CODE_INVALID_QUERY_MSG;
|
||||||
}
|
}
|
||||||
|
@ -5566,7 +5568,7 @@ static int32_t createSqlFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo
|
||||||
|
|
||||||
int32_t ret =
|
int32_t ret =
|
||||||
getResultDataInfo(pCol->type, pCol->bytes, functId, pExprs[i].base.arg[0].argValue.i64,
|
getResultDataInfo(pCol->type, pCol->bytes, functId, pExprs[i].base.arg[0].argValue.i64,
|
||||||
&pExprs[i].type, &pExprs[i].bytes, &pExprs[i].interResBytes, tagLen, isSuperTable);
|
&pExprs[i].type, &pExprs[i].bytes, &pExprs[i].interBytes, tagLen, isSuperTable);
|
||||||
assert(ret == TSDB_CODE_SUCCESS);
|
assert(ret == TSDB_CODE_SUCCESS);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5780,10 +5782,10 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou
|
||||||
pQuery->rec.threshold = 4000;
|
pQuery->rec.threshold = 4000;
|
||||||
|
|
||||||
for (int32_t col = 0; col < pQuery->numOfOutput; ++col) {
|
for (int32_t col = 0; col < pQuery->numOfOutput; ++col) {
|
||||||
assert(pExprs[col].interResBytes >= pExprs[col].bytes);
|
assert(pExprs[col].interBytes >= pExprs[col].bytes);
|
||||||
|
|
||||||
// allocate additional memory for interResults that are usually larger then final results
|
// allocate additional memory for interResults that are usually larger then final results
|
||||||
size_t size = (pQuery->rec.capacity + 1) * pExprs[col].bytes + pExprs[col].interResBytes + sizeof(SData);
|
size_t size = (pQuery->rec.capacity + 1) * pExprs[col].bytes + pExprs[col].interBytes + sizeof(SData);
|
||||||
pQuery->sdata[col] = (SData *)calloc(1, size);
|
pQuery->sdata[col] = (SData *)calloc(1, size);
|
||||||
if (pQuery->sdata[col] == NULL) {
|
if (pQuery->sdata[col] == NULL) {
|
||||||
goto _cleanup;
|
goto _cleanup;
|
||||||
|
|
2192
src/query/src/sql.c
2192
src/query/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -40,6 +40,10 @@ bool taosTmrReset(TAOS_TMR_CALLBACK fp, int mseconds, void *param, void *handle,
|
||||||
|
|
||||||
void taosTmrCleanUp(void *handle);
|
void taosTmrCleanUp(void *handle);
|
||||||
|
|
||||||
|
int32_t taosInitTimer(void (*callback)(int), int32_t ms);
|
||||||
|
|
||||||
|
void taosUninitTimer();
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -141,10 +141,7 @@ int32_t taosFileRename(char *fullPath, char *suffix, char delimiter, char **dstP
|
||||||
*/
|
*/
|
||||||
void getTmpfilePath(const char *fileNamePattern, char *dstPath);
|
void getTmpfilePath(const char *fileNamePattern, char *dstPath);
|
||||||
|
|
||||||
int32_t taosInitTimer(void (*callback)(int), int32_t ms);
|
bool taosMbsToUcs4(char *mbs, size_t mbs_len, char *ucs4, int32_t ucs4_max_len, size_t* len);
|
||||||
void taosUninitTimer();
|
|
||||||
|
|
||||||
bool taosMbsToUcs4(char *mbs, int32_t mbs_len, char *ucs4, int32_t ucs4_max_len, int32_t* len);
|
|
||||||
|
|
||||||
int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes);
|
int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes);
|
||||||
|
|
||||||
|
|
|
@ -304,9 +304,9 @@ static FORCE_INLINE SCacheDataNode *taosAddToCacheImpl(SCacheObj *pCacheObj, con
|
||||||
static void doCleanupDataCache(SCacheObj *pCacheObj) {
|
static void doCleanupDataCache(SCacheObj *pCacheObj) {
|
||||||
__cache_wr_lock(pCacheObj);
|
__cache_wr_lock(pCacheObj);
|
||||||
|
|
||||||
if (taosHashGetSize(pCacheObj->pHashTable) > 0) {
|
//if (taosHashGetSize(pCacheObj->pHashTable) > 0) {
|
||||||
taosHashCleanup(pCacheObj->pHashTable);
|
taosHashCleanup(pCacheObj->pHashTable);
|
||||||
}
|
//}
|
||||||
|
|
||||||
__cache_unlock(pCacheObj);
|
__cache_unlock(pCacheObj);
|
||||||
|
|
||||||
|
|
|
@ -490,25 +490,26 @@ bool taosUcs4ToMbs(void *ucs4, int32_t ucs4_max_len, char *mbs) {
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
bool taosMbsToUcs4(char *mbs, int32_t mbs_len, char *ucs4, int32_t ucs4_max_len, int32_t* len) {
|
bool taosMbsToUcs4(char *mbs, size_t mbsLength, char *ucs4, int32_t ucs4_max_len, size_t* len) {
|
||||||
memset(ucs4, 0, ucs4_max_len);
|
memset(ucs4, 0, ucs4_max_len);
|
||||||
#ifdef USE_LIBICONV
|
#ifdef USE_LIBICONV
|
||||||
iconv_t cd = iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset);
|
iconv_t cd = iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset);
|
||||||
size_t ucs4_input_len = mbs_len;
|
size_t ucs4_input_len = mbsLength;
|
||||||
size_t outLen = ucs4_max_len;
|
size_t outLeft = ucs4_max_len;
|
||||||
if (iconv(cd, &mbs, &ucs4_input_len, &ucs4, &outLen) == -1) {
|
if (iconv(cd, &mbs, &ucs4_input_len, &ucs4, &outLeft) == -1) {
|
||||||
iconv_close(cd);
|
iconv_close(cd);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
iconv_close(cd);
|
iconv_close(cd);
|
||||||
if (len != NULL) {
|
if (len != NULL) {
|
||||||
*len = outLen;
|
*len = ucs4_max_len - outLeft;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
#else
|
#else
|
||||||
mbstate_t state = {0};
|
mbstate_t state = {0};
|
||||||
int32_t len = mbsnrtowcs((wchar_t *) ucs4, (const char **) &mbs, mbs_len, ucs4_max_len / 4, &state);
|
int32_t len = mbsnrtowcs((wchar_t *) ucs4, (const char **) &mbs, mbsLength, ucs4_max_len / 4, &state);
|
||||||
return len >= 0;
|
return len >= 0;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,7 +117,7 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
|
||||||
return TSDB_CODE_VG_INIT_FAILED;
|
return TSDB_CODE_VG_INIT_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
dPrint("vgId:%d, vnode is created, clog:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.commitLog);
|
dPrint("vgId:%d, vnode is created, clog:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.walLevel);
|
||||||
code = vnodeOpen(pVnodeCfg->cfg.vgId, rootDir);
|
code = vnodeOpen(pVnodeCfg->cfg.vgId, rootDir);
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
|
@ -434,7 +434,7 @@ static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) {
|
||||||
len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnodeCfg->cfg.commitTime);
|
len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnodeCfg->cfg.commitTime);
|
||||||
len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnodeCfg->cfg.precision);
|
len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnodeCfg->cfg.precision);
|
||||||
len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnodeCfg->cfg.compression);
|
len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnodeCfg->cfg.compression);
|
||||||
len += snprintf(content + len, maxLen - len, " \"commitLog\": %d,\n", pVnodeCfg->cfg.commitLog);
|
len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pVnodeCfg->cfg.walLevel);
|
||||||
len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pVnodeCfg->cfg.replications);
|
len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pVnodeCfg->cfg.replications);
|
||||||
len += snprintf(content + len, maxLen - len, " \"wals\": %d,\n", pVnodeCfg->cfg.wals);
|
len += snprintf(content + len, maxLen - len, " \"wals\": %d,\n", pVnodeCfg->cfg.wals);
|
||||||
len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum);
|
len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum);
|
||||||
|
@ -579,12 +579,12 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
|
||||||
}
|
}
|
||||||
pVnode->tsdbCfg.compression = (int8_t)compression->valueint;
|
pVnode->tsdbCfg.compression = (int8_t)compression->valueint;
|
||||||
|
|
||||||
cJSON *commitLog = cJSON_GetObjectItem(root, "commitLog");
|
cJSON *walLevel = cJSON_GetObjectItem(root, "walLevel");
|
||||||
if (!commitLog || commitLog->type != cJSON_Number) {
|
if (!walLevel || walLevel->type != cJSON_Number) {
|
||||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, commitLog not found", pVnode, pVnode->vgId);
|
dError("pVnode:%p vgId:%d, failed to read vnode cfg, walLevel not found", pVnode, pVnode->vgId);
|
||||||
goto PARSE_OVER;
|
goto PARSE_OVER;
|
||||||
}
|
}
|
||||||
pVnode->walCfg.commitLog = (int8_t)commitLog->valueint;
|
pVnode->walCfg.walLevel = (int8_t) walLevel->valueint;
|
||||||
|
|
||||||
cJSON *wals = cJSON_GetObjectItem(root, "wals");
|
cJSON *wals = cJSON_GetObjectItem(root, "wals");
|
||||||
if (!wals || wals->type != cJSON_Number) {
|
if (!wals || wals->type != cJSON_Number) {
|
||||||
|
|
|
@ -62,7 +62,7 @@ void *walOpen(const char *path, const SWalCfg *pCfg) {
|
||||||
pWal->max = pCfg->wals;
|
pWal->max = pCfg->wals;
|
||||||
pWal->id = 0;
|
pWal->id = 0;
|
||||||
pWal->num = 0;
|
pWal->num = 0;
|
||||||
pWal->level = pCfg->commitLog;
|
pWal->level = pCfg->walLevel;
|
||||||
pWal->keep = pCfg->keep;
|
pWal->keep = pCfg->keep;
|
||||||
strcpy(pWal->path, path);
|
strcpy(pWal->path, path);
|
||||||
pthread_mutex_init(&pWal->mutex, NULL);
|
pthread_mutex_init(&pWal->mutex, NULL);
|
||||||
|
|
|
@ -80,7 +80,7 @@ int main(int argc, char *argv[]) {
|
||||||
taosInitLog("wal.log", 100000, 10);
|
taosInitLog("wal.log", 100000, 10);
|
||||||
|
|
||||||
SWalCfg walCfg;
|
SWalCfg walCfg;
|
||||||
walCfg.commitLog = level;
|
walCfg.walLevel = level;
|
||||||
walCfg.wals = max;
|
walCfg.wals = max;
|
||||||
walCfg.keep = keep;
|
walCfg.keep = keep;
|
||||||
|
|
||||||
|
|
|
@ -74,8 +74,9 @@ int main(int argc, char *argv[]) {
|
||||||
printf("success to connect to server\n");
|
printf("success to connect to server\n");
|
||||||
|
|
||||||
doQuery(taos, "create database if not exists test");
|
doQuery(taos, "create database if not exists test");
|
||||||
doQuery(taos, "use test");
|
doQuery(taos, "create database if not exists test");
|
||||||
doQuery(taos, "select * from t1 order by ts desc");
|
// doQuery(taos, "use test");
|
||||||
|
// doQuery(taos, "select sum(k)*max(k), sum(k), max(k) from tm99");
|
||||||
|
|
||||||
// doQuery(taos, "create table t1(ts timestamp, k binary(12), f nchar(2))");
|
// doQuery(taos, "create table t1(ts timestamp, k binary(12), f nchar(2))");
|
||||||
// for(int32_t i = 0; i< 100000; ++i) {
|
// for(int32_t i = 0; i< 100000; ++i) {
|
||||||
|
|
|
@ -9,6 +9,11 @@ python3 ./test.py $1 -f insert/smallint.py
|
||||||
python3 ./test.py $1 -f insert/tinyint.py
|
python3 ./test.py $1 -f insert/tinyint.py
|
||||||
python3 ./test.py $1 -f insert/date.py
|
python3 ./test.py $1 -f insert/date.py
|
||||||
python3 ./test.py $1 -f insert/binary.py
|
python3 ./test.py $1 -f insert/binary.py
|
||||||
|
|
||||||
|
python3 ./test.py $1 -f table/column_name.py
|
||||||
|
python3 ./test.py $1 -f table/column_num.py
|
||||||
|
python3 ./test.py $1 -f table/db_table.py
|
||||||
|
|
||||||
python3 ./test.py $1 -f import_merge/importBlock1HO.py
|
python3 ./test.py $1 -f import_merge/importBlock1HO.py
|
||||||
python3 ./test.py $1 -f import_merge/importBlock1HPO.py
|
python3 ./test.py $1 -f import_merge/importBlock1HPO.py
|
||||||
python3 ./test.py $1 -f import_merge/importBlock1H.py
|
python3 ./test.py $1 -f import_merge/importBlock1H.py
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.deploy(1)
|
tdDnodes.deploy(1)
|
||||||
|
@ -35,7 +35,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -43,19 +43,19 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than 10 rows less than %d rows will go to data file" %
|
"More than 10 rows less than %d rows will go to data file" %
|
||||||
self.rows)
|
self.maxrows)
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import %d sequential data" % (self.rows / 2))
|
tdLog.info("import %d sequential data" % (self.maxrows / 2))
|
||||||
startTime = self.startTime
|
startTime = self.startTime
|
||||||
sqlcmd = ['import into tb1 values']
|
sqlcmd = ['import into tb1 values']
|
||||||
for rid in range(1, self.rows / 2 + 1):
|
for rid in range(1, self.maxrows / 2 + 1):
|
||||||
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
|
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
|
||||||
tdSql.execute(" ".join(sqlcmd))
|
tdSql.execute(" ".join(sqlcmd))
|
||||||
|
|
||||||
tdLog.info("================= step3")
|
tdLog.info("================= step3")
|
||||||
tdSql.query('select * from tb1')
|
tdSql.query('select * from tb1')
|
||||||
tdSql.checkRows(self.rows / 2)
|
tdSql.checkRows(self.maxrows / 2)
|
||||||
|
|
||||||
tdLog.info("================= step4")
|
tdLog.info("================= step4")
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
|
@ -70,7 +70,7 @@ class TDTestCase:
|
||||||
tdLog.info("================= step7")
|
tdLog.info("================= step7")
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.query('select * from tb1 order by ts desc')
|
tdSql.query('select * from tb1 order by ts desc')
|
||||||
tdSql.checkRows(self.rows / 2 + 1)
|
tdSql.checkRows(self.maxrows / 2 + 1)
|
||||||
|
|
||||||
tdLog.info("================= step8")
|
tdLog.info("================= step8")
|
||||||
tdLog.info("import 10 data in batch before")
|
tdLog.info("import 10 data in batch before")
|
||||||
|
@ -83,7 +83,7 @@ class TDTestCase:
|
||||||
tdLog.info("================= step9")
|
tdLog.info("================= step9")
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.query('select * from tb1 order by ts desc')
|
tdSql.query('select * from tb1 order by ts desc')
|
||||||
tdSql.checkRows(self.rows / 2 + 11)
|
tdSql.checkRows(self.maxrows / 2 + 11)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxwrows = 200
|
||||||
self.rowsPerTable = 20
|
self.rowsPerTable = 20
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
|
@ -36,7 +36,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -44,7 +44,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than 10 rows less than %d rows will go to data file" %
|
"More than 10 rows less than %d rows will go to data file" %
|
||||||
self.rows)
|
self.maxrows)
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import %d sequential data" % self.rowsPerTable)
|
tdLog.info("import %d sequential data" % self.rowsPerTable)
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
self.rowsPerTable = 100
|
self.rowsPerTable = 100
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
|
@ -36,7 +36,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -44,7 +44,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than 10 rows less than %d rows will go to data file" %
|
"More than 10 rows less than %d rows will go to data file" %
|
||||||
self.rows)
|
self.maxrows)
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import %d sequential data" % self.rowsPerTable)
|
tdLog.info("import %d sequential data" % self.rowsPerTable)
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
self.rowsPerTable = 20
|
self.rowsPerTable = 20
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
|
@ -36,7 +36,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -44,7 +44,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than 10 rows less than %d rows will go to data file" %
|
"More than 10 rows less than %d rows will go to data file" %
|
||||||
self.rows)
|
self.maxrows)
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import %d sequential data" % self.rowsPerTable)
|
tdLog.info("import %d sequential data" % self.rowsPerTable)
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.deploy(1)
|
tdDnodes.deploy(1)
|
||||||
|
@ -35,7 +35,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -43,7 +43,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than %d rows less than %d rows will go to data and last file" %
|
"More than %d rows less than %d rows will go to data and last file" %
|
||||||
(self.rows, 10 + self.rows))
|
(self.maxrows, 10 + self.maxrows))
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import 205 sequential data")
|
tdLog.info("import 205 sequential data")
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.deploy(1)
|
tdDnodes.deploy(1)
|
||||||
|
@ -35,7 +35,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -43,7 +43,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than %d rows less than %d rows will go to data and last file" %
|
"More than %d rows less than %d rows will go to data and last file" %
|
||||||
(self.rows, 10 + self.rows))
|
(self.maxrows, 10 + self.maxrows))
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import 205 sequential data")
|
tdLog.info("import 205 sequential data")
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.deploy(1)
|
tdDnodes.deploy(1)
|
||||||
|
@ -35,7 +35,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -43,7 +43,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than %d rows less than %d rows will go to data and last file" %
|
"More than %d rows less than %d rows will go to data and last file" %
|
||||||
(self.rows, 10 + self.rows))
|
(self.maxrows, 10 + self.maxrows))
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import 205 sequential data")
|
tdLog.info("import 205 sequential data")
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.deploy(1)
|
tdDnodes.deploy(1)
|
||||||
|
@ -35,7 +35,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -43,7 +43,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than %d rows less than %d rows will go to data and last file" %
|
"More than %d rows less than %d rows will go to data and last file" %
|
||||||
(self.rows, 10 + self.rows))
|
(self.maxrows, 10 + self.maxrows))
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import 205 sequential data")
|
tdLog.info("import 205 sequential data")
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.deploy(1)
|
tdDnodes.deploy(1)
|
||||||
|
@ -35,7 +35,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -43,7 +43,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than %d rows less than %d rows will go to data and last file" %
|
"More than %d rows less than %d rows will go to data and last file" %
|
||||||
(self.rows, 10 + self.rows))
|
(self.maxrows, 10 + self.maxrows))
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import 205 sequential data")
|
tdLog.info("import 205 sequential data")
|
||||||
|
|
|
@ -27,11 +27,11 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -39,7 +39,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than %d rows less than %d rows will go to data and last file" %
|
"More than %d rows less than %d rows will go to data and last file" %
|
||||||
(self.rows, 10 + self.rows))
|
(self.maxrows, 10 + self.maxrows))
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import 205 sequential data")
|
tdLog.info("import 205 sequential data")
|
||||||
|
|
|
@ -27,11 +27,11 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -39,7 +39,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than %d rows less than %d rows will go to data and last file" %
|
"More than %d rows less than %d rows will go to data and last file" %
|
||||||
(self.rows, 10 + self.rows))
|
(self.maxrows, 10 + self.maxrows))
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import 205 sequential data")
|
tdLog.info("import 205 sequential data")
|
||||||
|
|
|
@ -27,11 +27,11 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -39,7 +39,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than %d rows less than %d rows will go to data and last file" %
|
"More than %d rows less than %d rows will go to data and last file" %
|
||||||
(self.rows, 10 + self.rows))
|
(self.maxrows, 10 + self.maxrows))
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import 205 sequential data")
|
tdLog.info("import 205 sequential data")
|
||||||
|
|
|
@ -27,11 +27,11 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -39,7 +39,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than 10 rows less than %d rows will go to data file" %
|
"More than 10 rows less than %d rows will go to data file" %
|
||||||
self.rows)
|
self.maxrows)
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import 20 sequential data")
|
tdLog.info("import 20 sequential data")
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.deploy(1)
|
tdDnodes.deploy(1)
|
||||||
|
@ -35,7 +35,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -43,19 +43,19 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than 10 rows less than %d rows will go to data file" %
|
"More than 10 rows less than %d rows will go to data file" %
|
||||||
self.rows)
|
self.maxrows)
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import %d sequential data" % (self.rows / 2))
|
tdLog.info("import %d sequential data" % (self.maxrows / 2))
|
||||||
startTime = self.startTime
|
startTime = self.startTime
|
||||||
sqlcmd = ['import into tb1 values']
|
sqlcmd = ['import into tb1 values']
|
||||||
for rid in range(1, self.rows / 2 + 1):
|
for rid in range(1, self.maxrows / 2 + 1):
|
||||||
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
|
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
|
||||||
tdSql.execute(" ".join(sqlcmd))
|
tdSql.execute(" ".join(sqlcmd))
|
||||||
|
|
||||||
tdLog.info("================= step3")
|
tdLog.info("================= step3")
|
||||||
tdSql.query('select * from tb1')
|
tdSql.query('select * from tb1')
|
||||||
tdSql.checkRows(self.rows / 2)
|
tdSql.checkRows(self.maxrows / 2)
|
||||||
|
|
||||||
tdLog.info("================= step4")
|
tdLog.info("================= step4")
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
|
@ -73,7 +73,7 @@ class TDTestCase:
|
||||||
tdLog.info("================= step9")
|
tdLog.info("================= step9")
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.query('select * from tb1 order by ts desc')
|
tdSql.query('select * from tb1 order by ts desc')
|
||||||
tdSql.checkRows(self.rows / 2)
|
tdSql.checkRows(self.maxrows / 2)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -27,11 +27,11 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -39,7 +39,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than 10 rows less than %d rows will go to data file" %
|
"More than 10 rows less than %d rows will go to data file" %
|
||||||
self.rows)
|
self.maxrows)
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import 20 sequential data")
|
tdLog.info("import 20 sequential data")
|
||||||
|
|
|
@ -27,11 +27,11 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -39,7 +39,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than 10 rows less than %d rows will go to data file" %
|
"More than 10 rows less than %d rows will go to data file" %
|
||||||
self.rows)
|
self.maxrows)
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import 20 sequential data")
|
tdLog.info("import 20 sequential data")
|
||||||
|
|
|
@ -27,11 +27,11 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
@ -39,7 +39,7 @@ class TDTestCase:
|
||||||
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
tdSql.execute('create table tb1 (ts timestamp, speed int)')
|
||||||
tdLog.info(
|
tdLog.info(
|
||||||
"More than 10 rows less than %d rows will go to data file" %
|
"More than 10 rows less than %d rows will go to data file" %
|
||||||
self.rows)
|
self.maxrows)
|
||||||
|
|
||||||
tdLog.info("================= step2")
|
tdLog.info("================= step2")
|
||||||
tdLog.info("import 20 sequential data")
|
tdLog.info("import 20 sequential data")
|
||||||
|
|
|
@ -27,11 +27,11 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
|
|
@ -27,11 +27,11 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
|
|
@ -27,11 +27,11 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
|
|
@ -27,11 +27,11 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
|
|
@ -27,11 +27,11 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.deploy(1)
|
tdDnodes.deploy(1)
|
||||||
|
@ -35,7 +35,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.deploy(1)
|
tdDnodes.deploy(1)
|
||||||
|
@ -35,7 +35,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.rows = 200
|
self.maxrows = 200
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.deploy(1)
|
tdDnodes.deploy(1)
|
||||||
|
@ -35,7 +35,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db rows %d' % self.rows)
|
tdSql.execute('create database db maxrows %d' % self.maxrows)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
|
|
@ -18,7 +18,7 @@ class TDTestCase:
|
||||||
# TSIM:
|
# TSIM:
|
||||||
# TSIM: system sh/ip.sh -i 1 -s up
|
# TSIM: system sh/ip.sh -i 1 -s up
|
||||||
# TSIM: system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1
|
# TSIM: system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1
|
||||||
# TSIM: system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
# TSIM: system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
# TSIM: system sh/exec.sh -n dnode1 -s start
|
# TSIM: system sh/exec.sh -n dnode1 -s start
|
||||||
# TSIM:
|
# TSIM:
|
||||||
# TSIM: sleep 3000
|
# TSIM: sleep 3000
|
||||||
|
|
|
@ -34,12 +34,12 @@ python3 ./test.py $1 -f table/db_table.py
|
||||||
python3 ./test.py -s $1
|
python3 ./test.py -s $1
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
||||||
#python3 ./test.py $1 -f import_merge/importDataLastTO.py
|
python3 ./test.py $1 -f import_merge/importDataLastTO.py
|
||||||
#python3 ./test.py -s $1
|
python3 ./test.py -s $1
|
||||||
#sleep 1
|
sleep 1
|
||||||
#python3 ./test.py $1 -f import_merge/importDataLastT.py
|
python3 ./test.py $1 -f import_merge/importDataLastT.py
|
||||||
#python3 ./test.py -s $1
|
python3 ./test.py -s $1
|
||||||
#sleep 1
|
sleep 1
|
||||||
python3 ./test.py $1 -f import_merge/importDataTO.py
|
python3 ./test.py $1 -f import_merge/importDataTO.py
|
||||||
python3 ./test.py -s $1
|
python3 ./test.py -s $1
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
|
@ -18,7 +18,7 @@ class TDTestCase:
|
||||||
# TSIM:
|
# TSIM:
|
||||||
# TSIM: system sh/ip.sh -i 1 -s up
|
# TSIM: system sh/ip.sh -i 1 -s up
|
||||||
# TSIM: system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1
|
# TSIM: system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1
|
||||||
# TSIM: system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
# TSIM: system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
# TSIM: system sh/exec.sh -n dnode1 -s start
|
# TSIM: system sh/exec.sh -n dnode1 -s start
|
||||||
# TSIM:
|
# TSIM:
|
||||||
# TSIM: sleep 3000
|
# TSIM: sleep 3000
|
||||||
|
|
|
@ -18,7 +18,7 @@ class TDTestCase:
|
||||||
# TSIM:
|
# TSIM:
|
||||||
# TSIM: system sh/ip.sh -i 1 -s up
|
# TSIM: system sh/ip.sh -i 1 -s up
|
||||||
# TSIM: system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1
|
# TSIM: system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1
|
||||||
# TSIM: system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
# TSIM: system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
# TSIM: system sh/exec.sh -n dnode1 -s start
|
# TSIM: system sh/exec.sh -n dnode1 -s start
|
||||||
# TSIM:
|
# TSIM:
|
||||||
# TSIM: sleep 3000
|
# TSIM: sleep 3000
|
||||||
|
|
|
@ -20,9 +20,26 @@ run general/table/tinyint.sim
|
||||||
run general/table/db.table.sim
|
run general/table/db.table.sim
|
||||||
|
|
||||||
run general/user/basic1.sim
|
run general/user/basic1.sim
|
||||||
#run general/user/pass_alter.sim
|
run general/user/pass_alter.sim
|
||||||
run general/user/pass_len.sim
|
run general/user/pass_len.sim
|
||||||
run general/user/user_create.sim
|
run general/user/user_create.sim
|
||||||
run general/user/user_len.sim
|
run general/user/user_len.sim
|
||||||
|
|
||||||
|
# run general/compute/count.sim
|
||||||
|
# run general/compute/avg.sim
|
||||||
|
# run general/compute/sum.sim
|
||||||
|
# run general/compute/min.sim
|
||||||
|
# run general/compute/max.sim
|
||||||
|
# run general/compute/first.sim
|
||||||
|
# run general/compute/last.sim
|
||||||
|
run general/compute/stddev.sim
|
||||||
|
# run general/compute/leastsquare.sim
|
||||||
|
run general/compute/top.sim
|
||||||
|
run general/compute/bottom.sim
|
||||||
|
run general/compute/percentile.sim
|
||||||
|
run general/compute/diff.sim
|
||||||
|
# run general/compute/interval.sim
|
||||||
|
run general/compute/null.sim
|
||||||
|
# run general/compute/diff2.sim
|
||||||
|
|
||||||
##################################
|
##################################
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
system sh/stop_dnodes.sh
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c clog -v 0
|
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||||
|
|
||||||
print ========== step1
|
print ========== step1
|
||||||
system sh/cfg.sh -n dnode1 -c monitor -v 1
|
system sh/cfg.sh -n dnode1 -c monitor -v 1
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
system sh/stop_dnodes.sh
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c clog -v 0
|
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
sleep 3000
|
sleep 3000
|
||||||
sql connect
|
sql connect
|
||||||
|
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c clog -v 0
|
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1
|
system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1
|
||||||
system sh/cfg.sh -n dnode1 -c mgmtEqualVnodeNum -v 4
|
system sh/cfg.sh -n dnode1 -c mgmtEqualVnodeNum -v 4
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c clog -v 0
|
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1
|
system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1
|
||||||
system sh/cfg.sh -n dnode1 -c mgmtEqualVnodeNum -v 4
|
system sh/cfg.sh -n dnode1 -c mgmtEqualVnodeNum -v 4
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c clog -v 0
|
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c clog -v 0
|
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
|
@ -4,8 +4,8 @@ system sh/stop_dnodes.sh
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/deploy.sh -n dnode2 -i 2
|
system sh/deploy.sh -n dnode2 -i 2
|
||||||
|
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode2 -c commitLog -v 0
|
system sh/cfg.sh -n dnode2 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c httpMaxThreads -v 2
|
system sh/cfg.sh -n dnode1 -c httpMaxThreads -v 2
|
||||||
system sh/cfg.sh -n dnode2 -c httpMaxThreads -v 2
|
system sh/cfg.sh -n dnode2 -c httpMaxThreads -v 2
|
||||||
system sh/cfg.sh -n dnode1 -c monitor -v 1
|
system sh/cfg.sh -n dnode1 -c monitor -v 1
|
||||||
|
|
|
@ -2,9 +2,8 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c meterMetaKeepTimer -v 10
|
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 10
|
||||||
system sh/cfg.sh -n dnode1 -c metricMetaKeepTimer -v 10
|
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
|
@ -2,9 +2,8 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c meterMetaKeepTimer -v 10
|
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 10
|
||||||
system sh/cfg.sh -n dnode1 -c metricMetaKeepTimer -v 10
|
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
|
@ -2,9 +2,8 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c meterMetaKeepTimer -v 10
|
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 10
|
||||||
system sh/cfg.sh -n dnode1 -c metricMetaKeepTimer -v 10
|
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
@ -48,9 +47,8 @@ endi
|
||||||
print =============== step2
|
print =============== step2
|
||||||
system sh/exec.sh -n dnode1 -s stop
|
system sh/exec.sh -n dnode1 -s stop
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c meterMetaKeepTimer -v 10
|
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 10
|
||||||
system sh/cfg.sh -n dnode1 -c metricMetaKeepTimer -v 10
|
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
print =============== step3
|
print =============== step3
|
||||||
|
|
|
@ -2,9 +2,8 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c meterMetaKeepTimer -v 1
|
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 1
|
||||||
system sh/cfg.sh -n dnode1 -c metricMetaKeepTimer -v 1
|
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
@ -98,9 +97,8 @@ endi
|
||||||
print =============== step4
|
print =============== step4
|
||||||
system sh/exec.sh -n dnode1 -s stop
|
system sh/exec.sh -n dnode1 -s stop
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c meterMetaKeepTimer -v 1
|
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 1
|
||||||
system sh/cfg.sh -n dnode1 -c metricMetaKeepTimer -v 1
|
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
print =============== step5
|
print =============== step5
|
||||||
|
|
|
@ -2,9 +2,8 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c meterMetaKeepTimer -v 10
|
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 10
|
||||||
system sh/cfg.sh -n dnode1 -c metricMetaKeepTimer -v 10
|
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
@ -36,9 +35,8 @@ endi
|
||||||
print =============== step2
|
print =============== step2
|
||||||
system sh/exec.sh -n dnode1 -s stop
|
system sh/exec.sh -n dnode1 -s stop
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c meterMetaKeepTimer -v 10
|
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 10
|
||||||
system sh/cfg.sh -n dnode1 -c metricMetaKeepTimer -v 10
|
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
print =============== step3
|
print =============== step3
|
||||||
|
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c clog -v 0
|
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||||
|
|
||||||
print ========== step1
|
print ========== step1
|
||||||
system sh/cfg.sh -n dnode1 -c monitor -v 1
|
system sh/cfg.sh -n dnode1 -c monitor -v 1
|
||||||
|
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
run lite/column/table.sim
|
run general/column/table.sim
|
||||||
run lite/column/metrics.sim
|
run general/column/metrics.sim
|
||||||
run lite/column/stream.sim
|
run general/column/stream.sim
|
||||||
run lite/column/commit.sim
|
run general/column/commit.sim
|
||||||
|
|
|
@ -2,8 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 1
|
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||||
system sh/cfg.sh -n dnode1 -c cacheBlockSize -v 1024
|
|
||||||
system sh/cfg.sh -n dnode1 -c compression -v 1
|
system sh/cfg.sh -n dnode1 -c compression -v 1
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
|
|
|
@ -2,8 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c cacheBlockSize -v 1024
|
|
||||||
system sh/cfg.sh -n dnode1 -c compression -v 1
|
system sh/cfg.sh -n dnode1 -c compression -v 1
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
|
|
|
@ -2,8 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c cacheBlockSize -v 1024
|
|
||||||
system sh/cfg.sh -n dnode1 -c compression -v 2
|
system sh/cfg.sh -n dnode1 -c compression -v 2
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
|
|
|
@ -2,8 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitLog -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c cacheBlockSize -v 1024
|
|
||||||
system sh/cfg.sh -n dnode1 -c compression -v 1
|
system sh/cfg.sh -n dnode1 -c compression -v 1
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue