other: merge 3.0
This commit is contained in:
commit
a7fdc4bae3
|
@ -14,6 +14,12 @@
|
|||
[](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
|
||||
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||
<br />
|
||||
[](https://twitter.com/tdenginedb)
|
||||
[](https://www.youtube.com/@tdengine)
|
||||
[](https://discord.com/invite/VZdSuUg4pS)
|
||||
[](https://www.linkedin.com/company/tdengine)
|
||||
[](https://stackoverflow.com/questions/tagged/tdengine)
|
||||
|
||||
English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine.com) | [Learn more about TSDB](https://tdengine.com/tsdb/)
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.2.4")
|
||||
SET(TD_VER_NUMBER "3.0.2.5")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG 3e08996
|
||||
GIT_TAG db6c843
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 6a2d9fc
|
||||
GIT_TAG e04f39b
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -12,6 +12,7 @@ SELECT [DISTINCT] select_list
|
|||
from_clause
|
||||
[WHERE condition]
|
||||
[partition_by_clause]
|
||||
[interp_clause]
|
||||
[window_clause]
|
||||
[group_by_clause]
|
||||
[order_by_clasue]
|
||||
|
@ -52,8 +53,11 @@ window_clause: {
|
|||
| STATE_WINDOW(col)
|
||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||
|
||||
interp_clause:
|
||||
RANGE(ts_val, ts_val), EVERY(every_val), FILL(fill_mod_and_val)
|
||||
|
||||
partition_by_clause:
|
||||
PARTITION BY expr [, expr] ...
|
||||
PARTITION BY expr [, expr] ...
|
||||
|
||||
group_by_clause:
|
||||
GROUP BY expr [, expr] ... HAVING condition
|
||||
|
|
|
@ -872,9 +872,9 @@ INTERP(expr)
|
|||
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
||||
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
||||
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
|
||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. The parameter `EVERY` must be an integer, with no quotes, with a time unit of: b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||
- Interpolation is performed based on `FILL` parameter.
|
||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||
- Pseudo column `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@ SELECT [DISTINCT] select_list
|
|||
from_clause
|
||||
[WHERE condition]
|
||||
[partition_by_clause]
|
||||
[interp_clause]
|
||||
[window_clause]
|
||||
[group_by_clause]
|
||||
[order_by_clasue]
|
||||
|
@ -53,8 +54,11 @@ window_clause: {
|
|||
| STATE_WINDOW(col)
|
||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||
|
||||
interp_clause:
|
||||
RANGE(ts_val, ts_val), EVERY(every_val), FILL(fill_mod_and_val)
|
||||
|
||||
partition_by_clause:
|
||||
PARTITION BY expr [, expr] ...
|
||||
PARTITION BY expr [, expr] ...
|
||||
|
||||
group_by_clause:
|
||||
GROUP BY expr [, expr] ... HAVING condition
|
||||
|
|
|
@ -875,9 +875,9 @@ INTERP(expr)
|
|||
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
|
||||
- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
|
||||
- INTERP 需要同时与 RANGE,EVERY 和 FILL 关键字一起使用。
|
||||
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1<=timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
|
||||
- INTERP 根据 EVERY 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(EVERY 值)进行插值。
|
||||
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
|
||||
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
|
||||
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
|
||||
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
|
||||
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
|
||||
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.1.4版本以后支持)。
|
||||
|
||||
|
|
|
@ -10,4 +10,4 @@
|
|||
| 6 | taosdemo | This is an internal tool for testing Our JDBC-JNI, JDBC-RESTful, RESTful interfaces |
|
||||
|
||||
|
||||
more detail: https://docs.taosdata.com/reference/connector/java/
|
||||
more detail: https://docs.taosdata.com/connector/java/
|
||||
|
|
|
@ -58,7 +58,7 @@ extern int32_t tMsgDict[];
|
|||
#define TMSG_INFO(TYPE) \
|
||||
((TYPE) < TDMT_DND_MAX_MSG || (TYPE) < TDMT_MND_MAX_MSG || (TYPE) < TDMT_VND_MAX_MSG || (TYPE) < TDMT_SCH_MAX_MSG || \
|
||||
(TYPE) < TDMT_STREAM_MAX_MSG || (TYPE) < TDMT_MON_MAX_MSG || (TYPE) < TDMT_SYNC_MAX_MSG) || \
|
||||
(TYPE) < TDMT_VND_STREAM_MSG || (TYPE) < TDMT_VND_TMQ_MSG \
|
||||
(TYPE) < TDMT_VND_STREAM_MSG || (TYPE) < TDMT_VND_TMQ_MSG || (TYPE) < TDMT_VND_TMQ_MAX_MSG \
|
||||
? tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] \
|
||||
: 0
|
||||
|
||||
|
@ -144,12 +144,14 @@ typedef enum _mgmt_table {
|
|||
#define TSDB_ALTER_TABLE_UPDATE_OPTIONS 9
|
||||
#define TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME 10
|
||||
|
||||
#define TSDB_FILL_NONE 0
|
||||
#define TSDB_FILL_NULL 1
|
||||
#define TSDB_FILL_SET_VALUE 2
|
||||
#define TSDB_FILL_LINEAR 3
|
||||
#define TSDB_FILL_PREV 4
|
||||
#define TSDB_FILL_NEXT 5
|
||||
#define TSDB_FILL_NONE 0
|
||||
#define TSDB_FILL_NULL 1
|
||||
#define TSDB_FILL_NULL_F 2
|
||||
#define TSDB_FILL_SET_VALUE 3
|
||||
#define TSDB_FILL_SET_VALUE_F 4
|
||||
#define TSDB_FILL_LINEAR 5
|
||||
#define TSDB_FILL_PREV 6
|
||||
#define TSDB_FILL_NEXT 7
|
||||
|
||||
#define TSDB_ALTER_USER_PASSWD 0x1
|
||||
#define TSDB_ALTER_USER_SUPERUSER 0x2
|
||||
|
@ -1752,6 +1754,7 @@ typedef struct {
|
|||
#define STREAM_FILL_HISTORY_ON 1
|
||||
#define STREAM_FILL_HISTORY_OFF 0
|
||||
#define STREAM_DEFAULT_FILL_HISTORY STREAM_FILL_HISTORY_OFF
|
||||
#define STREAM_DEFAULT_IGNORE_UPDATE 0
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_STREAM_FNAME_LEN];
|
||||
|
@ -1769,6 +1772,7 @@ typedef struct {
|
|||
SArray* pTags; // array of SField
|
||||
// 3.0.20
|
||||
int64_t checkpointFreq; // ms
|
||||
int8_t igUpdate;
|
||||
} SCMCreateStreamReq;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -209,134 +209,136 @@
|
|||
#define TK_IGNORE 191
|
||||
#define TK_EXPIRED 192
|
||||
#define TK_FILL_HISTORY 193
|
||||
#define TK_SUBTABLE 194
|
||||
#define TK_KILL 195
|
||||
#define TK_CONNECTION 196
|
||||
#define TK_TRANSACTION 197
|
||||
#define TK_BALANCE 198
|
||||
#define TK_VGROUP 199
|
||||
#define TK_MERGE 200
|
||||
#define TK_REDISTRIBUTE 201
|
||||
#define TK_SPLIT 202
|
||||
#define TK_DELETE 203
|
||||
#define TK_INSERT 204
|
||||
#define TK_NULL 205
|
||||
#define TK_NK_QUESTION 206
|
||||
#define TK_NK_ARROW 207
|
||||
#define TK_ROWTS 208
|
||||
#define TK_QSTART 209
|
||||
#define TK_QEND 210
|
||||
#define TK_QDURATION 211
|
||||
#define TK_WSTART 212
|
||||
#define TK_WEND 213
|
||||
#define TK_WDURATION 214
|
||||
#define TK_IROWTS 215
|
||||
#define TK_CAST 216
|
||||
#define TK_NOW 217
|
||||
#define TK_TODAY 218
|
||||
#define TK_TIMEZONE 219
|
||||
#define TK_CLIENT_VERSION 220
|
||||
#define TK_SERVER_VERSION 221
|
||||
#define TK_SERVER_STATUS 222
|
||||
#define TK_CURRENT_USER 223
|
||||
#define TK_COUNT 224
|
||||
#define TK_LAST_ROW 225
|
||||
#define TK_CASE 226
|
||||
#define TK_END 227
|
||||
#define TK_WHEN 228
|
||||
#define TK_THEN 229
|
||||
#define TK_ELSE 230
|
||||
#define TK_BETWEEN 231
|
||||
#define TK_IS 232
|
||||
#define TK_NK_LT 233
|
||||
#define TK_NK_GT 234
|
||||
#define TK_NK_LE 235
|
||||
#define TK_NK_GE 236
|
||||
#define TK_NK_NE 237
|
||||
#define TK_MATCH 238
|
||||
#define TK_NMATCH 239
|
||||
#define TK_CONTAINS 240
|
||||
#define TK_IN 241
|
||||
#define TK_JOIN 242
|
||||
#define TK_INNER 243
|
||||
#define TK_SELECT 244
|
||||
#define TK_DISTINCT 245
|
||||
#define TK_WHERE 246
|
||||
#define TK_PARTITION 247
|
||||
#define TK_BY 248
|
||||
#define TK_SESSION 249
|
||||
#define TK_STATE_WINDOW 250
|
||||
#define TK_SLIDING 251
|
||||
#define TK_FILL 252
|
||||
#define TK_VALUE 253
|
||||
#define TK_NONE 254
|
||||
#define TK_PREV 255
|
||||
#define TK_LINEAR 256
|
||||
#define TK_NEXT 257
|
||||
#define TK_HAVING 258
|
||||
#define TK_RANGE 259
|
||||
#define TK_EVERY 260
|
||||
#define TK_ORDER 261
|
||||
#define TK_SLIMIT 262
|
||||
#define TK_SOFFSET 263
|
||||
#define TK_LIMIT 264
|
||||
#define TK_OFFSET 265
|
||||
#define TK_ASC 266
|
||||
#define TK_NULLS 267
|
||||
#define TK_ABORT 268
|
||||
#define TK_AFTER 269
|
||||
#define TK_ATTACH 270
|
||||
#define TK_BEFORE 271
|
||||
#define TK_BEGIN 272
|
||||
#define TK_BITAND 273
|
||||
#define TK_BITNOT 274
|
||||
#define TK_BITOR 275
|
||||
#define TK_BLOCKS 276
|
||||
#define TK_CHANGE 277
|
||||
#define TK_COMMA 278
|
||||
#define TK_COMPACT 279
|
||||
#define TK_CONCAT 280
|
||||
#define TK_CONFLICT 281
|
||||
#define TK_COPY 282
|
||||
#define TK_DEFERRED 283
|
||||
#define TK_DELIMITERS 284
|
||||
#define TK_DETACH 285
|
||||
#define TK_DIVIDE 286
|
||||
#define TK_DOT 287
|
||||
#define TK_EACH 288
|
||||
#define TK_FAIL 289
|
||||
#define TK_FILE 290
|
||||
#define TK_FOR 291
|
||||
#define TK_GLOB 292
|
||||
#define TK_ID 293
|
||||
#define TK_IMMEDIATE 294
|
||||
#define TK_IMPORT 295
|
||||
#define TK_INITIALLY 296
|
||||
#define TK_INSTEAD 297
|
||||
#define TK_ISNULL 298
|
||||
#define TK_KEY 299
|
||||
#define TK_MODULES 300
|
||||
#define TK_NK_BITNOT 301
|
||||
#define TK_NK_SEMI 302
|
||||
#define TK_NOTNULL 303
|
||||
#define TK_OF 304
|
||||
#define TK_PLUS 305
|
||||
#define TK_PRIVILEGE 306
|
||||
#define TK_RAISE 307
|
||||
#define TK_REPLACE 308
|
||||
#define TK_RESTRICT 309
|
||||
#define TK_ROW 310
|
||||
#define TK_SEMI 311
|
||||
#define TK_STAR 312
|
||||
#define TK_STATEMENT 313
|
||||
#define TK_STRICT 314
|
||||
#define TK_STRING 315
|
||||
#define TK_TIMES 316
|
||||
#define TK_UPDATE 317
|
||||
#define TK_VALUES 318
|
||||
#define TK_VARIABLE 319
|
||||
#define TK_VIEW 320
|
||||
#define TK_WAL 321
|
||||
#define TK_UPDATE 194
|
||||
#define TK_SUBTABLE 195
|
||||
#define TK_KILL 196
|
||||
#define TK_CONNECTION 197
|
||||
#define TK_TRANSACTION 198
|
||||
#define TK_BALANCE 199
|
||||
#define TK_VGROUP 200
|
||||
#define TK_MERGE 201
|
||||
#define TK_REDISTRIBUTE 202
|
||||
#define TK_SPLIT 203
|
||||
#define TK_DELETE 204
|
||||
#define TK_INSERT 205
|
||||
#define TK_NULL 206
|
||||
#define TK_NK_QUESTION 207
|
||||
#define TK_NK_ARROW 208
|
||||
#define TK_ROWTS 209
|
||||
#define TK_QSTART 210
|
||||
#define TK_QEND 211
|
||||
#define TK_QDURATION 212
|
||||
#define TK_WSTART 213
|
||||
#define TK_WEND 214
|
||||
#define TK_WDURATION 215
|
||||
#define TK_IROWTS 216
|
||||
#define TK_CAST 217
|
||||
#define TK_NOW 218
|
||||
#define TK_TODAY 219
|
||||
#define TK_TIMEZONE 220
|
||||
#define TK_CLIENT_VERSION 221
|
||||
#define TK_SERVER_VERSION 222
|
||||
#define TK_SERVER_STATUS 223
|
||||
#define TK_CURRENT_USER 224
|
||||
#define TK_COUNT 225
|
||||
#define TK_LAST_ROW 226
|
||||
#define TK_CASE 227
|
||||
#define TK_END 228
|
||||
#define TK_WHEN 229
|
||||
#define TK_THEN 230
|
||||
#define TK_ELSE 231
|
||||
#define TK_BETWEEN 232
|
||||
#define TK_IS 233
|
||||
#define TK_NK_LT 234
|
||||
#define TK_NK_GT 235
|
||||
#define TK_NK_LE 236
|
||||
#define TK_NK_GE 237
|
||||
#define TK_NK_NE 238
|
||||
#define TK_MATCH 239
|
||||
#define TK_NMATCH 240
|
||||
#define TK_CONTAINS 241
|
||||
#define TK_IN 242
|
||||
#define TK_JOIN 243
|
||||
#define TK_INNER 244
|
||||
#define TK_SELECT 245
|
||||
#define TK_DISTINCT 246
|
||||
#define TK_WHERE 247
|
||||
#define TK_PARTITION 248
|
||||
#define TK_BY 249
|
||||
#define TK_SESSION 250
|
||||
#define TK_STATE_WINDOW 251
|
||||
#define TK_SLIDING 252
|
||||
#define TK_FILL 253
|
||||
#define TK_VALUE 254
|
||||
#define TK_VALUE_F 255
|
||||
#define TK_NONE 256
|
||||
#define TK_PREV 257
|
||||
#define TK_NULL_F 258
|
||||
#define TK_LINEAR 259
|
||||
#define TK_NEXT 260
|
||||
#define TK_HAVING 261
|
||||
#define TK_RANGE 262
|
||||
#define TK_EVERY 263
|
||||
#define TK_ORDER 264
|
||||
#define TK_SLIMIT 265
|
||||
#define TK_SOFFSET 266
|
||||
#define TK_LIMIT 267
|
||||
#define TK_OFFSET 268
|
||||
#define TK_ASC 269
|
||||
#define TK_NULLS 270
|
||||
#define TK_ABORT 271
|
||||
#define TK_AFTER 272
|
||||
#define TK_ATTACH 273
|
||||
#define TK_BEFORE 274
|
||||
#define TK_BEGIN 275
|
||||
#define TK_BITAND 276
|
||||
#define TK_BITNOT 277
|
||||
#define TK_BITOR 278
|
||||
#define TK_BLOCKS 279
|
||||
#define TK_CHANGE 280
|
||||
#define TK_COMMA 281
|
||||
#define TK_COMPACT 282
|
||||
#define TK_CONCAT 283
|
||||
#define TK_CONFLICT 284
|
||||
#define TK_COPY 285
|
||||
#define TK_DEFERRED 286
|
||||
#define TK_DELIMITERS 287
|
||||
#define TK_DETACH 288
|
||||
#define TK_DIVIDE 289
|
||||
#define TK_DOT 290
|
||||
#define TK_EACH 291
|
||||
#define TK_FAIL 292
|
||||
#define TK_FILE 293
|
||||
#define TK_FOR 294
|
||||
#define TK_GLOB 295
|
||||
#define TK_ID 296
|
||||
#define TK_IMMEDIATE 297
|
||||
#define TK_IMPORT 298
|
||||
#define TK_INITIALLY 299
|
||||
#define TK_INSTEAD 300
|
||||
#define TK_ISNULL 301
|
||||
#define TK_KEY 302
|
||||
#define TK_MODULES 303
|
||||
#define TK_NK_BITNOT 304
|
||||
#define TK_NK_SEMI 305
|
||||
#define TK_NOTNULL 306
|
||||
#define TK_OF 307
|
||||
#define TK_PLUS 308
|
||||
#define TK_PRIVILEGE 309
|
||||
#define TK_RAISE 310
|
||||
#define TK_REPLACE 311
|
||||
#define TK_RESTRICT 312
|
||||
#define TK_ROW 313
|
||||
#define TK_SEMI 314
|
||||
#define TK_STAR 315
|
||||
#define TK_STATEMENT 316
|
||||
#define TK_STRICT 317
|
||||
#define TK_STRING 318
|
||||
#define TK_TIMES 319
|
||||
#define TK_VALUES 320
|
||||
#define TK_VARIABLE 321
|
||||
#define TK_VIEW 322
|
||||
#define TK_WAL 323
|
||||
|
||||
#define TK_NK_SPACE 600
|
||||
#define TK_NK_COMMENT 601
|
||||
|
|
|
@ -389,6 +389,7 @@ typedef struct SStreamOptions {
|
|||
SNode* pDeleteMark;
|
||||
int8_t fillHistory;
|
||||
int8_t ignoreExpired;
|
||||
int8_t ignoreUpdate;
|
||||
} SStreamOptions;
|
||||
|
||||
typedef struct SCreateStreamStmt {
|
||||
|
|
|
@ -93,6 +93,7 @@ typedef struct SScanLogicNode {
|
|||
int64_t watermark;
|
||||
int64_t deleteMark;
|
||||
int8_t igExpired;
|
||||
int8_t igCheckUpdate;
|
||||
SArray* pSmaIndexes;
|
||||
SNodeList* pGroupTags;
|
||||
bool groupSort;
|
||||
|
@ -217,6 +218,7 @@ typedef struct SWindowLogicNode {
|
|||
int64_t watermark;
|
||||
int64_t deleteMark;
|
||||
int8_t igExpired;
|
||||
int8_t igCheckUpdate;
|
||||
EWindowAlgorithm windowAlgo;
|
||||
EOrder inputTsOrder;
|
||||
EOrder outputTsOrder;
|
||||
|
@ -357,6 +359,7 @@ typedef struct STableScanPhysiNode {
|
|||
int64_t watermark;
|
||||
int8_t igExpired;
|
||||
bool assignBlockUid;
|
||||
int8_t igCheckUpdate;
|
||||
} STableScanPhysiNode;
|
||||
|
||||
typedef STableScanPhysiNode STableSeqScanPhysiNode;
|
||||
|
|
|
@ -226,8 +226,10 @@ typedef struct SIntervalWindowNode {
|
|||
typedef enum EFillMode {
|
||||
FILL_MODE_NONE = 1,
|
||||
FILL_MODE_VALUE,
|
||||
FILL_MODE_VALUE_F,
|
||||
FILL_MODE_PREV,
|
||||
FILL_MODE_NULL,
|
||||
FILL_MODE_NULL_F,
|
||||
FILL_MODE_LINEAR,
|
||||
FILL_MODE_NEXT
|
||||
} EFillMode;
|
||||
|
|
|
@ -36,6 +36,7 @@ typedef struct SPlanContext {
|
|||
int64_t watermark;
|
||||
int64_t deleteMark;
|
||||
int8_t igExpired;
|
||||
int8_t igCheckUpdate;
|
||||
char* pMsg;
|
||||
int32_t msgLen;
|
||||
const char* pUser;
|
||||
|
|
|
@ -191,6 +191,7 @@ int32_t walApplyVer(SWal *, int64_t ver);
|
|||
// read
|
||||
SWalReader *walOpenReader(SWal *, SWalFilterCond *pCond);
|
||||
void walCloseReader(SWalReader *pRead);
|
||||
void walReadReset(SWalReader *pReader);
|
||||
int32_t walReadVer(SWalReader *pRead, int64_t ver);
|
||||
int32_t walReadSeekVer(SWalReader *pRead, int64_t ver);
|
||||
int32_t walNextValidMsg(SWalReader *pRead);
|
||||
|
|
|
@ -110,6 +110,8 @@ bool taosValidFile(TdFilePtr pFile);
|
|||
|
||||
int32_t taosGetErrorFile(TdFilePtr pFile);
|
||||
|
||||
int32_t taosCompressFile(char *srcFileName, char *destFileName);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -111,6 +111,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x012A)
|
||||
#define TSDB_CODE_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x012B)
|
||||
#define TSDB_CODE_TIMEOUT_ERROR TAOS_DEF_ERROR_CODE(0, 0x012C)
|
||||
#define TSDB_CODE_MSG_ENCODE_ERROR TAOS_DEF_ERROR_CODE(0, 0x012D)
|
||||
|
||||
#define TSDB_CODE_APP_IS_STARTING TAOS_DEF_ERROR_CODE(0, 0x0130) //
|
||||
#define TSDB_CODE_APP_IS_STOPPING TAOS_DEF_ERROR_CODE(0, 0x0131) //
|
||||
|
@ -355,6 +356,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_MND_STREAM_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03F3)
|
||||
#define TSDB_CODE_MND_STREAM_TASK_DROPPED TAOS_DEF_ERROR_CODE(0, 0x03F4)
|
||||
#define TSDB_CODE_MND_MULTI_REPLICA_SOURCE_DB TAOS_DEF_ERROR_CODE(0, 0x03F5)
|
||||
#define TSDB_CODE_MND_TOO_MANY_STREAMS TAOS_DEF_ERROR_CODE(0, 0x03F6)
|
||||
|
||||
// mnode-sma
|
||||
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)
|
||||
|
|
|
@ -2,12 +2,20 @@
|
|||
|
||||
for /F %%a in ('echo prompt $E ^| cmd') do set "ESC=%%a"
|
||||
|
||||
goto %1
|
||||
if "%1" NEQ "" goto %1
|
||||
|
||||
:needAdmin
|
||||
|
||||
if exist C:\\TDengine\\data\\dnode\\dnodeCfg.json (
|
||||
echo The default data directory C:/TDengine/data contains old data of tdengine 2.x, please clear it before installing!
|
||||
)
|
||||
|
||||
rem // stop and delete service
|
||||
mshta vbscript:createobject("shell.application").shellexecute("%~s0",":stop_delete","","runas",1)(window.close)
|
||||
echo This might take a few moment to accomplish deleting service taosd/taosadapter ...
|
||||
call :check_svc taosd
|
||||
call :check_svc taosadapter
|
||||
|
||||
set source_dir=%2
|
||||
set source_dir=%source_dir:/=\\%
|
||||
set binary_dir=%3
|
||||
|
@ -60,7 +68,6 @@ if exist %binary_dir%\\build\\bin\\taosdump.exe (
|
|||
|
||||
copy %binary_dir%\\build\\bin\\taosd.exe %target_dir% > nul
|
||||
copy %binary_dir%\\build\\bin\\udfd.exe %target_dir% > nul
|
||||
|
||||
if exist %binary_dir%\\build\\bin\\taosadapter.exe (
|
||||
copy %binary_dir%\\build\\bin\\taosadapter.exe %target_dir% > nul
|
||||
)
|
||||
|
@ -80,22 +87,23 @@ goto :eof
|
|||
|
||||
:hasAdmin
|
||||
|
||||
sc query "taosd" && sc stop taosd && sc delete taosd
|
||||
sc query "taosadapter" && sc stop taosadapter && sc delete taosd
|
||||
call :stop_delete
|
||||
call :check_svc taosd
|
||||
call :check_svc taosadapter
|
||||
|
||||
copy /y C:\\TDengine\\driver\\taos.dll C:\\Windows\\System32 > nul
|
||||
if exist C:\\TDengine\\driver\\taosws.dll (
|
||||
copy /y C:\\TDengine\\driver\\taosws.dll C:\\Windows\\System32 > nul
|
||||
)
|
||||
|
||||
sc query "taosd" >nul || sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND
|
||||
sc query "taosadapter" >nul || sc create "taosadapter" binPath= "C:\\TDengine\\taosadapter.exe" start= DEMAND
|
||||
rem // create services
|
||||
sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND
|
||||
sc create "taosadapter" binPath= "C:\\TDengine\\taosadapter.exe" start= DEMAND
|
||||
|
||||
set "env=HKLM\System\CurrentControlSet\Control\Session Manager\Environment"
|
||||
for /f "tokens=2*" %%I in ('reg query "%env%" /v Path ^| findstr /i "\<Path\>"') do (
|
||||
|
||||
rem // make addition persistent through reboots
|
||||
reg add "%env%" /f /v Path /t REG_EXPAND_SZ /d "%%J;C:\TDengine"
|
||||
call :append_if_not_exists %%J
|
||||
|
||||
rem // apply change to the current process
|
||||
for %%a in ("%%J;C:\TDengine") do path %%~a
|
||||
|
@ -105,3 +113,36 @@ rem // use setx to set a temporary throwaway value to trigger a WM_SETTINGCHANGE
|
|||
rem // applies change to new console windows without requiring a reboot
|
||||
(setx /m foo bar & reg delete "%env%" /f /v foo) >NUL 2>NUL
|
||||
|
||||
goto :end
|
||||
|
||||
:append_if_not_exists
|
||||
set "_origin_paths=%*"
|
||||
set "_paths=%*"
|
||||
set "_found=0"
|
||||
:loop
|
||||
for /f "tokens=1* delims=;" %%x in ("%_paths%") do (
|
||||
if "%%x" EQU "C:\TDengine" (
|
||||
set "_found=1"
|
||||
) else (
|
||||
set "_paths=%%y"
|
||||
goto :loop
|
||||
)
|
||||
)
|
||||
if "%_found%" == "0" (
|
||||
rem // make addition persistent through reboots
|
||||
reg add "%env%" /f /v Path /t REG_EXPAND_SZ /d "%_origin_paths%;C:\TDengine"
|
||||
)
|
||||
exit /B 0
|
||||
|
||||
:stop_delete
|
||||
sc stop taosd
|
||||
sc delete taosd
|
||||
sc stop taosadapter
|
||||
sc delete taosadapter
|
||||
exit /B 0
|
||||
|
||||
:check_svc
|
||||
sc query %1 >nul 2>nul && goto :check_svc %1
|
||||
exit /B 0
|
||||
|
||||
:end
|
||||
|
|
|
@ -62,4 +62,4 @@ target_link_libraries(
|
|||
|
||||
if(${BUILD_TEST})
|
||||
ADD_SUBDIRECTORY(test)
|
||||
endif(${BUILD_TEST})
|
||||
endif(${BUILD_TEST})
|
||||
|
|
|
@ -138,6 +138,12 @@ STscObj* taos_connect_internal(const char* ip, const char* user, const char* pas
|
|||
p->mgmtEp = epSet;
|
||||
taosThreadMutexInit(&p->qnodeMutex, NULL);
|
||||
p->pTransporter = openTransporter(user, secretEncrypt, tsNumOfCores / 2);
|
||||
if (p->pTransporter == NULL) {
|
||||
taosThreadMutexUnlock(&appInfo.mutex);
|
||||
taosMemoryFreeClear(key);
|
||||
taosMemoryFree(p);
|
||||
return NULL;
|
||||
}
|
||||
p->pAppHbMgr = appHbMgrInit(p, key);
|
||||
if (NULL == p->pAppHbMgr) {
|
||||
destroyAppInst(p);
|
||||
|
@ -1463,6 +1469,7 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
|||
tscError("failed to sched msg to tsc, tsc ready to quit");
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosMemoryFree(arg->pEpset);
|
||||
destroySendMsgInfo(pMsg->info.ahandle);
|
||||
taosMemoryFree(arg);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,10 @@
|
|||
#include "tref.h"
|
||||
#include "ttimer.h"
|
||||
|
||||
static tb_uid_t processSuid(tb_uid_t suid, char* db){
|
||||
return suid + MurmurHash3_32(db, strlen(db));
|
||||
}
|
||||
|
||||
static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id,
|
||||
int8_t t) {
|
||||
char* string = NULL;
|
||||
|
@ -681,7 +685,7 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
pReq.numOfColumns = req.schemaRow.nCols;
|
||||
pReq.numOfTags = req.schemaTag.nCols;
|
||||
pReq.commentLen = -1;
|
||||
pReq.suid = req.suid;
|
||||
pReq.suid = processSuid(req.suid, pRequest->pDb);
|
||||
pReq.source = TD_REQ_FROM_TAOX;
|
||||
pReq.igExists = true;
|
||||
|
||||
|
@ -753,7 +757,7 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
// build drop stable
|
||||
pReq.igNotExists = true;
|
||||
pReq.source = TD_REQ_FROM_TAOX;
|
||||
pReq.suid = req.suid;
|
||||
pReq.suid = processSuid(req.suid, pRequest->pDb);
|
||||
|
||||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
SName tableName = {0};
|
||||
|
@ -871,6 +875,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
if (pCreateReq->type == TSDB_CHILD_TABLE) {
|
||||
STableMeta* pTableMeta = NULL;
|
||||
SName sName = {0};
|
||||
pCreateReq->ctb.suid = processSuid(pCreateReq->ctb.suid, pRequest->pDb);
|
||||
toName(pTscObj->acctId, pRequest->pDb, pCreateReq->ctb.stbName, &sName);
|
||||
code = catalogGetTableMeta(pCatalog, &conn, &sName, &pTableMeta);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1008,6 +1013,7 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
|
||||
pDropReq = req.pReqs + iReq;
|
||||
pDropReq->igNotExists = true;
|
||||
pDropReq->suid = processSuid(pDropReq->suid, pRequest->pDb);
|
||||
|
||||
SVgroupInfo pInfo = {0};
|
||||
SName pName = {0};
|
||||
|
@ -1922,6 +1928,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
SMqTaosxRspObj rspObj = {0};
|
||||
SDecoder decoder = {0};
|
||||
STableMeta* pTableMeta = NULL;
|
||||
void* schemaContent = NULL;
|
||||
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0);
|
||||
|
@ -2008,27 +2015,49 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
SDecoder decoderTmp = {0};
|
||||
SVCreateTbReq pCreateReq = {0};
|
||||
|
||||
tDecoderInit(&decoderTmp, *dataTmp, *lenTmp);
|
||||
if (tDecodeSVCreateTbReq(&decoderTmp, &pCreateReq) < 0) {
|
||||
tDecoderClear(&decoderTmp);
|
||||
taosMemoryFreeClear(pCreateReq.comment);
|
||||
taosArrayDestroy(pCreateReq.ctb.tagName);
|
||||
goto end;
|
||||
}
|
||||
do{
|
||||
tDecoderInit(&decoderTmp, *dataTmp, *lenTmp);
|
||||
if (tDecodeSVCreateTbReq(&decoderTmp, &pCreateReq) < 0) {
|
||||
code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||
break;
|
||||
}
|
||||
|
||||
ASSERT(pCreateReq.type == TSDB_CHILD_TABLE);
|
||||
if (strcmp(tbName, pCreateReq.name) == 0) {
|
||||
schemaLen = *lenTmp;
|
||||
schemaData = *dataTmp;
|
||||
if (strcmp(tbName, pCreateReq.name) != 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
pCreateReq.ctb.suid = processSuid(pCreateReq.ctb.suid, pRequest->pDb);
|
||||
|
||||
int32_t len = 0;
|
||||
tEncodeSize(tEncodeSVCreateTbReq, &pCreateReq, len, code);
|
||||
if(code != 0) {
|
||||
code = TSDB_CODE_MSG_ENCODE_ERROR;
|
||||
break;
|
||||
}
|
||||
taosMemoryFree(schemaContent);
|
||||
schemaContent = taosMemoryMalloc(len);
|
||||
if(!schemaContent) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
break;
|
||||
}
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, schemaContent, len);
|
||||
code = tEncodeSVCreateTbReq(&encoder, &pCreateReq);
|
||||
if (code != 0) {
|
||||
tEncoderClear(&encoder);
|
||||
code = TSDB_CODE_MSG_ENCODE_ERROR;
|
||||
break;
|
||||
}
|
||||
schemaLen = len;
|
||||
schemaData = schemaContent;
|
||||
strcpy(pName.tname, pCreateReq.ctb.stbName);
|
||||
tDecoderClear(&decoderTmp);
|
||||
taosMemoryFreeClear(pCreateReq.comment);
|
||||
taosArrayDestroy(pCreateReq.ctb.tagName);
|
||||
break;
|
||||
}
|
||||
tEncoderClear(&encoder);
|
||||
}while(0);
|
||||
tDecoderClear(&decoderTmp);
|
||||
taosMemoryFreeClear(pCreateReq.comment);
|
||||
taosArrayDestroy(pCreateReq.ctb.tagName);
|
||||
if(code != 0) goto end;
|
||||
if(schemaLen != 0) break;
|
||||
}
|
||||
|
||||
code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
|
||||
|
@ -2217,6 +2246,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
destroyRequest(pRequest);
|
||||
taosHashCleanup(pVgHash);
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
taosMemoryFree(schemaContent);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -149,17 +149,8 @@ typedef struct {
|
|||
int64_t endTime;
|
||||
} SSmlCostInfo;
|
||||
|
||||
typedef struct {
|
||||
SRequestObj *request;
|
||||
tsem_t sem;
|
||||
int32_t cnt;
|
||||
int32_t total;
|
||||
TdThreadSpinlock lock;
|
||||
} Params;
|
||||
|
||||
typedef struct {
|
||||
int64_t id;
|
||||
Params *params;
|
||||
|
||||
SMLProtocolType protocol;
|
||||
int8_t precision;
|
||||
|
@ -178,7 +169,6 @@ typedef struct {
|
|||
SQuery *pQuery;
|
||||
|
||||
SSmlCostInfo cost;
|
||||
int32_t affectedRows;
|
||||
SSmlMsgBuf msgBuf;
|
||||
SHashObj *dumplicateKey; // for dumplicate key
|
||||
SArray *colsContainer; // for cols parse, if dataFormat == false
|
||||
|
@ -1513,7 +1503,6 @@ static void smlDestroyInfo(SSmlHandle *info) {
|
|||
if (!info->dataFormat) {
|
||||
taosArrayDestroy(info->colsContainer);
|
||||
}
|
||||
destroyRequest(info->pRequest);
|
||||
|
||||
cJSON_Delete(info->root);
|
||||
taosMemoryFreeClear(info);
|
||||
|
@ -2351,20 +2340,11 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
}
|
||||
info->cost.insertRpcTime = taosGetTimestampUs();
|
||||
|
||||
// launchQueryImpl(info->pRequest, info->pQuery, false, NULL);
|
||||
// info->affectedRows = taos_affected_rows(info->pRequest);
|
||||
// return info->pRequest->code;
|
||||
|
||||
SAppClusterSummary *pActivity = &info->taos->pAppInfo->summary;
|
||||
atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertsReq, 1);
|
||||
|
||||
SSqlCallbackWrapper *pWrapper = (SSqlCallbackWrapper *)taosMemoryCalloc(1, sizeof(SSqlCallbackWrapper));
|
||||
if (pWrapper == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pWrapper->pRequest = info->pRequest;
|
||||
launchAsyncQuery(info->pRequest, info->pQuery, NULL, pWrapper);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
launchQueryImpl(info->pRequest, info->pQuery, true, NULL);
|
||||
return info->pRequest->code;
|
||||
}
|
||||
|
||||
static void smlPrintStatisticInfo(SSmlHandle *info) {
|
||||
|
@ -2498,48 +2478,13 @@ static int32_t isSchemalessDb(STscObj *taos, SRequestObj *request) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void smlInsertCallback(void *param, void *res, int32_t code) {
|
||||
SRequestObj *pRequest = (SRequestObj *)res;
|
||||
SSmlHandle *info = (SSmlHandle *)param;
|
||||
int32_t rows = taos_affected_rows(pRequest);
|
||||
|
||||
uDebug("SML:0x%" PRIx64 " result. code:%d, msg:%s", info->id, pRequest->code, pRequest->msgBuf);
|
||||
Params *pParam = info->params;
|
||||
// lock
|
||||
taosThreadSpinLock(&pParam->lock);
|
||||
pParam->cnt++;
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pParam->request->code = code;
|
||||
pParam->request->body.resInfo.numOfRows += rows;
|
||||
} else {
|
||||
pParam->request->body.resInfo.numOfRows += info->affectedRows;
|
||||
}
|
||||
// unlock
|
||||
taosThreadSpinUnlock(&pParam->lock);
|
||||
|
||||
if (pParam->cnt == pParam->total) {
|
||||
tsem_post(&pParam->sem);
|
||||
}
|
||||
uDebug("SML:0x%" PRIx64 " insert finished, code: %d, rows: %d, total: %d", info->id, code, rows, info->affectedRows);
|
||||
info->cost.endTime = taosGetTimestampUs();
|
||||
info->cost.code = code;
|
||||
smlPrintStatisticInfo(info);
|
||||
smlDestroyInfo(info);
|
||||
}
|
||||
|
||||
TAOS_RES *taos_schemaless_insert_inner(SRequestObj *request, char *lines[], char *rawLine, char *rawLineEnd,
|
||||
int numLines, int protocol, int precision, int32_t ttl) {
|
||||
int batchs = 0;
|
||||
STscObj *pTscObj = request->pTscObj;
|
||||
|
||||
SSmlHandle *info = NULL;
|
||||
pTscObj->schemalessType = 1;
|
||||
SSmlMsgBuf msg = {ERROR_MSG_BUF_DEFAULT_SIZE, request->msgBuf};
|
||||
|
||||
Params params = {0};
|
||||
params.request = request;
|
||||
tsem_init(¶ms.sem, 0, 0);
|
||||
taosThreadSpinInit(&(params.lock), 0);
|
||||
|
||||
if (request->pDb == NULL) {
|
||||
request->code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
|
||||
smlBuildInvalidDataMsg(&msg, "Database not specified", NULL);
|
||||
|
@ -2573,65 +2518,24 @@ TAOS_RES *taos_schemaless_insert_inner(SRequestObj *request, char *lines[], char
|
|||
goto end;
|
||||
}
|
||||
|
||||
batchs = ceil(((double)numLines) / tsSmlBatchSize);
|
||||
params.total = batchs;
|
||||
for (int i = 0; i < batchs; ++i) {
|
||||
SRequestObj *req = (SRequestObj *)createRequest(pTscObj->id, TSDB_SQL_INSERT, 0);
|
||||
if (!req) {
|
||||
request->code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
uError("SML:taos_schemaless_insert error request is null");
|
||||
goto end;
|
||||
}
|
||||
SSmlHandle *info = smlBuildSmlInfo(pTscObj, req, (SMLProtocolType)protocol, precision);
|
||||
if (!info) {
|
||||
request->code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
uError("SML:taos_schemaless_insert error SSmlHandle is null");
|
||||
goto end;
|
||||
}
|
||||
|
||||
info->isRawLine = (rawLine == NULL);
|
||||
info->ttl = ttl;
|
||||
|
||||
int32_t perBatch = tsSmlBatchSize;
|
||||
|
||||
if (numLines > perBatch) {
|
||||
numLines -= perBatch;
|
||||
} else {
|
||||
perBatch = numLines;
|
||||
numLines = 0;
|
||||
}
|
||||
|
||||
info->params = ¶ms;
|
||||
info->affectedRows = perBatch;
|
||||
info->pRequest->body.queryFp = smlInsertCallback;
|
||||
info->pRequest->body.param = info;
|
||||
int32_t code = smlProcess(info, lines, rawLine, rawLineEnd, perBatch);
|
||||
if (lines) {
|
||||
lines += perBatch;
|
||||
}
|
||||
if (rawLine) {
|
||||
int num = 0;
|
||||
while (rawLine < rawLineEnd) {
|
||||
if (*(rawLine++) == '\n') {
|
||||
num++;
|
||||
}
|
||||
if (num == perBatch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
info->pRequest->body.queryFp(info, req, code);
|
||||
}
|
||||
info = smlBuildSmlInfo(pTscObj, request, (SMLProtocolType)protocol, precision);
|
||||
if (!info) {
|
||||
request->code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
uError("SML:taos_schemaless_insert error SSmlHandle is null");
|
||||
goto end;
|
||||
}
|
||||
tsem_wait(¶ms.sem);
|
||||
|
||||
info->isRawLine = (rawLine == NULL);
|
||||
info->ttl = ttl;
|
||||
request->code = smlProcess(info, lines, rawLine, rawLineEnd, numLines);
|
||||
|
||||
end:
|
||||
taosThreadSpinDestroy(¶ms.lock);
|
||||
tsem_destroy(¶ms.sem);
|
||||
// ((STscObj *)taos)->schemalessType = 0;
|
||||
pTscObj->schemalessType = 1;
|
||||
uDebug("resultend:%s", request->msgBuf);
|
||||
uDebug("SML:0x%" PRIx64 " insert finished, code: %d", info->id, request->code);
|
||||
info->cost.endTime = taosGetTimestampUs();
|
||||
info->cost.code = request->code;
|
||||
smlPrintStatisticInfo(info);
|
||||
smlDestroyInfo(info);
|
||||
|
||||
return (TAOS_RES *)request;
|
||||
}
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ void createNewTable(TAOS* pConn, int32_t index) {
|
|||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
for(int32_t i = 0; i < 20; i += 20) {
|
||||
for(int32_t i = 0; i < 2000; i += 20) {
|
||||
char sql[1024] = {0};
|
||||
sprintf(sql,
|
||||
"insert into tu%d values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
|
||||
|
@ -692,6 +692,7 @@ TEST(testCase, insert_test) {
|
|||
taos_free_result(pRes);
|
||||
taos_close(pConn);
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST(testCase, projection_query_tables) {
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
|
@ -725,7 +726,7 @@ TEST(testCase, projection_query_tables) {
|
|||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
for (int32_t i = 0; i < 200000; ++i) {
|
||||
for (int32_t i = 0; i < 2; ++i) {
|
||||
printf("create table :%d\n", i);
|
||||
createNewTable(pConn, i);
|
||||
}
|
||||
|
@ -751,6 +752,7 @@ TEST(testCase, projection_query_tables) {
|
|||
taos_close(pConn);
|
||||
}
|
||||
|
||||
#if 0
|
||||
TEST(testCase, tsbs_perf_test) {
|
||||
TdThread qid[20] = {0};
|
||||
|
||||
|
@ -760,8 +762,6 @@ TEST(testCase, tsbs_perf_test) {
|
|||
getchar();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
TEST(testCase, projection_query_stables) {
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(pConn, nullptr);
|
||||
|
@ -790,7 +790,6 @@ TEST(testCase, projection_query_stables) {
|
|||
taos_close(pConn);
|
||||
}
|
||||
|
||||
#if 0
|
||||
TEST(testCase, agg_query_tables) {
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(pConn, nullptr);
|
||||
|
@ -831,7 +830,7 @@ TEST(testCase, async_api_test) {
|
|||
ASSERT_NE(pConn, nullptr);
|
||||
|
||||
taos_query(pConn, "use abc1");
|
||||
#if 0
|
||||
|
||||
TAOS_RES* pRes = taos_query(pConn, "insert into tu(ts) values('2022-02-27 12:12:61')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed, reason:%s\n", taos_errstr(pRes));
|
||||
|
@ -854,7 +853,6 @@ TEST(testCase, async_api_test) {
|
|||
printf("%s\n", str);
|
||||
memset(str, 0, sizeof(str));
|
||||
}
|
||||
#endif
|
||||
|
||||
taos_query_a(pConn, "select count(*) from tu", queryCallback, pConn);
|
||||
getchar();
|
||||
|
|
|
@ -5425,6 +5425,7 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
|
|||
if (tEncodeI32(&encoder, pField->bytes) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pField->name) < 0) return -1;
|
||||
}
|
||||
if (tEncodeI8(&encoder, pReq->igUpdate) < 0) return -1;
|
||||
|
||||
tEndEncode(&encoder);
|
||||
|
||||
|
@ -5486,6 +5487,8 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
|
|||
}
|
||||
}
|
||||
|
||||
if (tDecodeI8(&decoder, &pReq->igUpdate) < 0) return -1;
|
||||
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
|
|
|
@ -49,7 +49,7 @@ static void mmProcessRpcMsg(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
|||
pMsg->info.node = pMgmt->pMnode;
|
||||
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
dGTrace("msg:%p, get from mnode queue", pMsg);
|
||||
dGTrace("msg:%p, get from mnode queue, type:%s", pMsg, TMSG_INFO(pMsg->msgType));
|
||||
|
||||
int32_t code = mndProcessRpcMsg(pMsg);
|
||||
|
||||
|
|
|
@ -648,6 +648,7 @@ typedef struct {
|
|||
int64_t checkpointFreq; // ms
|
||||
int64_t currentTick; // do not serialize
|
||||
int64_t deleteMark;
|
||||
int8_t igCheckUpdate;
|
||||
} SStreamObj;
|
||||
|
||||
int32_t tEncodeSStreamObj(SEncoder* pEncoder, const SStreamObj* pObj);
|
||||
|
|
|
@ -81,7 +81,7 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans);
|
|||
int32_t mndTransProcessRsp(SRpcMsg *pRsp);
|
||||
void mndTransPullup(SMnode *pMnode);
|
||||
int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans);
|
||||
void mndTransExecute(SMnode *pMnode, STrans *pTrans);
|
||||
void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader);
|
||||
int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, const char *dbname);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -78,6 +78,7 @@ int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) {
|
|||
|
||||
// 3.0.20
|
||||
if (tEncodeI64(pEncoder, pObj->checkpointFreq) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pObj->igCheckUpdate) < 0) return -1;
|
||||
|
||||
tEndEncode(pEncoder);
|
||||
return pEncoder->pos;
|
||||
|
@ -145,6 +146,7 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj, int32_t sver) {
|
|||
// 3.0.20
|
||||
if (sver >= 2) {
|
||||
if (tDecodeI64(pDecoder, &pObj->checkpointFreq) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pObj->igCheckUpdate) < 0) return -1;
|
||||
}
|
||||
tEndDecode(pDecoder);
|
||||
return 0;
|
||||
|
@ -489,7 +491,7 @@ int32_t tEncodeSubscribeObj(void **buf, const SMqSubscribeObj *pSub) {
|
|||
tlen += tEncodeSMqConsumerEp(buf, pConsumerEp);
|
||||
cnt++;
|
||||
}
|
||||
if(cnt != sz) return -1;
|
||||
if (cnt != sz) return -1;
|
||||
tlen += taosEncodeArray(buf, pSub->unassignedVgs, (FEncode)tEncodeSMqVgEp);
|
||||
tlen += taosEncodeString(buf, pSub->dbName);
|
||||
return tlen;
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#define MND_STREAM_VER_NUMBER 2
|
||||
#define MND_STREAM_RESERVE_SIZE 64
|
||||
|
||||
#define MND_STREAM_MAX_NUM 10
|
||||
|
||||
static int32_t mndStreamActionInsert(SSdb *pSdb, SStreamObj *pStream);
|
||||
static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream);
|
||||
static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pStream, SStreamObj *pNewStream);
|
||||
|
@ -295,6 +297,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
|
|||
pObj->triggerParam = pCreate->maxDelay;
|
||||
pObj->watermark = pCreate->watermark;
|
||||
pObj->fillHistory = pCreate->fillHistory;
|
||||
pObj->igCheckUpdate = pCreate->igUpdate;
|
||||
|
||||
memcpy(pObj->sourceDb, pCreate->sourceDB, TSDB_DB_FNAME_LEN);
|
||||
SDbObj *pSourceDb = mndAcquireDb(pMnode, pCreate->sourceDB);
|
||||
|
@ -343,6 +346,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
|
|||
.triggerType = pObj->trigger == STREAM_TRIGGER_MAX_DELAY ? STREAM_TRIGGER_WINDOW_CLOSE : pObj->trigger,
|
||||
.watermark = pObj->watermark,
|
||||
.igExpired = pObj->igExpired,
|
||||
.igCheckUpdate = pObj->igCheckUpdate,
|
||||
};
|
||||
|
||||
// using ast and param to build physical plan
|
||||
|
@ -625,6 +629,35 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
{
|
||||
int32_t numOfStream = 0;
|
||||
|
||||
SStreamObj *pStream = NULL;
|
||||
void *pIter = NULL;
|
||||
|
||||
while (1) {
|
||||
pIter = sdbFetch(pMnode->pSdb, SDB_STREAM, pIter, (void **)&pStream);
|
||||
if (pIter == NULL) {
|
||||
if (numOfStream > MND_STREAM_MAX_NUM) {
|
||||
mError("too many streams, no more than 10 for each database");
|
||||
terrno = TSDB_CODE_MND_TOO_MANY_STREAMS;
|
||||
goto _OVER;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (pStream->sourceDbUid == streamObj.sourceDbUid) {
|
||||
++numOfStream;
|
||||
}
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
if (numOfStream > MND_STREAM_MAX_NUM) {
|
||||
mError("too many streams, no more than 10 for each database");
|
||||
terrno = TSDB_CODE_MND_TOO_MANY_STREAMS;
|
||||
goto _OVER;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pDb = mndAcquireDb(pMnode, streamObj.sourceDb);
|
||||
if (pDb->cfg.replications != 1) {
|
||||
mError("stream source db must have only 1 replica, but %s has %d", pDb->name, pDb->cfg.replications);
|
||||
|
|
|
@ -114,8 +114,9 @@ int32_t mndProcessWriteMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta
|
|||
taosThreadMutexUnlock(&pMgmt->lock);
|
||||
STrans *pTrans = mndAcquireTrans(pMnode, transId);
|
||||
if (pTrans != NULL) {
|
||||
mInfo("trans:%d, execute in mnode which not leader or sync timeout", transId);
|
||||
mndTransExecute(pMnode, pTrans);
|
||||
mInfo("trans:%d, execute in mnode which not leader or sync timeout, createTime:%" PRId64 " saved trans:%d",
|
||||
transId, pTrans->createdTime, pMgmt->transId);
|
||||
mndTransExecute(pMnode, pTrans, false);
|
||||
mndReleaseTrans(pMnode, pTrans);
|
||||
// sdbWriteFile(pMnode->pSdb, SDB_WRITE_DELTA);
|
||||
} else {
|
||||
|
@ -372,7 +373,7 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) {
|
|||
taosThreadMutexLock(&pMgmt->lock);
|
||||
pMgmt->errCode = 0;
|
||||
|
||||
if (pMgmt->transId != 0) {
|
||||
if (pMgmt->transId != 0 /* && pMgmt->transId != transId*/) {
|
||||
mError("trans:%d, can't be proposed since trans:%d already waiting for confirm", transId, pMgmt->transId);
|
||||
taosThreadMutexUnlock(&pMgmt->lock);
|
||||
rpcFreeCont(req.pCont);
|
||||
|
|
|
@ -791,16 +791,18 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
|
|||
}
|
||||
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||
|
||||
mInfo("trans:%d, sync to other mnodes, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
|
||||
mInfo("trans:%d, sync to other mnodes, stage:%s createTime:%" PRId64, pTrans->id, mndTransStr(pTrans->stage),
|
||||
pTrans->createdTime);
|
||||
int32_t code = mndSyncPropose(pMnode, pRaw, pTrans->id);
|
||||
if (code != 0) {
|
||||
mError("trans:%d, failed to sync, errno:%s code:%s", pTrans->id, terrstr(), tstrerror(code));
|
||||
mError("trans:%d, failed to sync, errno:%s code:%s createTime:%" PRId64 " saved trans:%d", pTrans->id, terrstr(),
|
||||
tstrerror(code), pTrans->createdTime, pMnode->syncMgmt.transId);
|
||||
sdbFreeRaw(pRaw);
|
||||
return -1;
|
||||
}
|
||||
|
||||
sdbFreeRaw(pRaw);
|
||||
mInfo("trans:%d, sync finished", pTrans->id);
|
||||
mInfo("trans:%d, sync finished, createTime:%" PRId64, pTrans->id, pTrans->createdTime);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -903,7 +905,7 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
|
|||
pTrans->rpcRsp = NULL;
|
||||
pTrans->rpcRspLen = 0;
|
||||
|
||||
mndTransExecute(pMnode, pNew);
|
||||
mndTransExecute(pMnode, pNew, true);
|
||||
mndReleaseTrans(pMnode, pNew);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1066,7 +1068,7 @@ int32_t mndTransProcessRsp(SRpcMsg *pRsp) {
|
|||
|
||||
mInfo("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x retry:0x%x", transId, mndTransStr(pAction->stage),
|
||||
action, pRsp->code, pAction->acceptableCode, pAction->retryCode);
|
||||
mndTransExecute(pMnode, pTrans);
|
||||
mndTransExecute(pMnode, pTrans, true);
|
||||
|
||||
_OVER:
|
||||
mndReleaseTrans(pMnode, pTrans);
|
||||
|
@ -1495,15 +1497,17 @@ static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) {
|
|||
mError("trans:%d, failed to write sdb since %s", pTrans->id, terrstr());
|
||||
}
|
||||
|
||||
mInfo("trans:%d, execute finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes);
|
||||
mInfo("trans:%d, execute finished, code:0x%x, failedTimes:%d createTime:%" PRId64, pTrans->id, pTrans->code,
|
||||
pTrans->failedTimes, pTrans->createdTime);
|
||||
return continueExec;
|
||||
}
|
||||
|
||||
void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
|
||||
void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
|
||||
bool continueExec = true;
|
||||
|
||||
while (continueExec) {
|
||||
mInfo("trans:%d, continue to execute, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
|
||||
mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " leader:%d", pTrans->id,
|
||||
mndTransStr(pTrans->stage), pTrans->createdTime, isLeader);
|
||||
pTrans->lastExecTime = taosGetTimestampMs();
|
||||
switch (pTrans->stage) {
|
||||
case TRN_STAGE_PREPARE:
|
||||
|
@ -1513,13 +1517,23 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
|
|||
continueExec = mndTransPerformRedoActionStage(pMnode, pTrans);
|
||||
break;
|
||||
case TRN_STAGE_COMMIT:
|
||||
continueExec = mndTransPerformCommitStage(pMnode, pTrans);
|
||||
if (isLeader) {
|
||||
continueExec = mndTransPerformCommitStage(pMnode, pTrans);
|
||||
} else {
|
||||
mInfo("trans:%d, can not commit since not leader", pTrans->id);
|
||||
continueExec = false;
|
||||
}
|
||||
break;
|
||||
case TRN_STAGE_COMMIT_ACTION:
|
||||
continueExec = mndTransPerformCommitActionStage(pMnode, pTrans);
|
||||
break;
|
||||
case TRN_STAGE_ROLLBACK:
|
||||
continueExec = mndTransPerformRollbackStage(pMnode, pTrans);
|
||||
if (isLeader) {
|
||||
continueExec = mndTransPerformRollbackStage(pMnode, pTrans);
|
||||
} else {
|
||||
mInfo("trans:%d, can not rollback since not leader", pTrans->id);
|
||||
continueExec = false;
|
||||
}
|
||||
break;
|
||||
case TRN_STAGE_UNDO_ACTION:
|
||||
continueExec = mndTransPerformUndoActionStage(pMnode, pTrans);
|
||||
|
@ -1562,7 +1576,7 @@ int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans) {
|
|||
pAction->errCode = 0;
|
||||
}
|
||||
|
||||
mndTransExecute(pMnode, pTrans);
|
||||
mndTransExecute(pMnode, pTrans, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1620,7 +1634,7 @@ void mndTransPullup(SMnode *pMnode) {
|
|||
int32_t *pTransId = taosArrayGet(pArray, i);
|
||||
STrans *pTrans = mndAcquireTrans(pMnode, *pTransId);
|
||||
if (pTrans != NULL) {
|
||||
mndTransExecute(pMnode, pTrans);
|
||||
mndTransExecute(pMnode, pTrans, true);
|
||||
}
|
||||
mndReleaseTrans(pMnode, pTrans);
|
||||
}
|
||||
|
|
|
@ -178,7 +178,6 @@ int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, void *pTableL
|
|||
|
||||
void tsdbReaderClose(STsdbReader *pReader);
|
||||
bool tsdbNextDataBlock(STsdbReader *pReader);
|
||||
void tsdbRetrieveDataBlockInfo(const STsdbReader *pReader, int32_t *rows, uint64_t *uid, STimeWindow *pWindow);
|
||||
int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SSDataBlock *pDataBlock, bool *allHave);
|
||||
SSDataBlock *tsdbRetrieveDataBlock(STsdbReader *pTsdbReadHandle, SArray *pColumnIdList);
|
||||
int32_t tsdbReaderReset(STsdbReader *pReader, SQueryTableDataCond *pCond);
|
||||
|
|
|
@ -1343,6 +1343,9 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
|
|||
int32_t ret = 0;
|
||||
// get super table
|
||||
if (tdbTbGet(pMeta->pUidIdx, &pCtbEntry->ctbEntry.suid, sizeof(tb_uid_t), &pData, &nData) != 0) {
|
||||
metaError("vgId:%d, failed to get stable suid for update. version:%" PRId64, TD_VID(pMeta->pVnode),
|
||||
pCtbEntry->version);
|
||||
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
|
|
|
@ -517,7 +517,7 @@ bool tLDataIterNextRow(SLDataIter *pIter, const char *idStr) {
|
|||
pIter->rInfo.row = tsdbRowFromBlockData(pBlockData, pIter->iRow);
|
||||
|
||||
_exit:
|
||||
return (terrno == TSDB_CODE_SUCCESS) && (pIter->pSttBlk != NULL);
|
||||
return (terrno == TSDB_CODE_SUCCESS) && (pIter->pSttBlk != NULL) && (pBlockData != NULL);
|
||||
}
|
||||
|
||||
SRowInfo *tLDataIterGet(SLDataIter *pIter) { return &pIter->rInfo; }
|
||||
|
|
|
@ -79,8 +79,8 @@ typedef struct SIOCostSummary {
|
|||
int64_t composedBlocks;
|
||||
double buildComposedBlockTime;
|
||||
double createScanInfoList;
|
||||
double getTbFromMemTime;
|
||||
double getTbFromIMemTime;
|
||||
// double getTbFromMemTime;
|
||||
// double getTbFromIMemTime;
|
||||
double initDelSkylineIterTime;
|
||||
} SIOCostSummary;
|
||||
|
||||
|
@ -220,6 +220,8 @@ static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFil
|
|||
static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter);
|
||||
static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order);
|
||||
|
||||
static FORCE_INLINE STSchema* getLatestTableSchema(STsdbReader* pReader, uint64_t uid);
|
||||
|
||||
static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
|
||||
|
||||
static int32_t setColumnIdSlotList(SBlockLoadSuppInfo* pSupInfo, SColumnInfo* pCols, const int32_t* pSlotIdList,
|
||||
|
@ -444,19 +446,6 @@ static STimeWindow updateQueryTimeWindow(STsdb* pTsdb, STimeWindow* pWindow) {
|
|||
return win;
|
||||
}
|
||||
|
||||
static void limitOutputBufferSize(const SQueryTableDataCond* pCond, int32_t* capacity) {
|
||||
int32_t rowLen = 0;
|
||||
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
|
||||
rowLen += pCond->colList[i].bytes;
|
||||
}
|
||||
|
||||
// make sure the output SSDataBlock size be less than 2MB.
|
||||
const int32_t TWOMB = 2 * 1024 * 1024;
|
||||
if ((*capacity) * rowLen > TWOMB) {
|
||||
(*capacity) = TWOMB / rowLen;
|
||||
}
|
||||
}
|
||||
|
||||
// init file iterator
|
||||
static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdbReader* pReader) {
|
||||
size_t numOfFileset = taosArrayGetSize(aDFileSet);
|
||||
|
@ -642,9 +631,6 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
|
|||
goto _end;
|
||||
}
|
||||
|
||||
// todo refactor.
|
||||
limitOutputBufferSize(pCond, &pReader->capacity);
|
||||
|
||||
// allocate buffer in order to load data blocks from file
|
||||
SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
|
||||
pSup->pColAgg = taosArrayInit(pCond->numOfCols, sizeof(SColumnDataAgg));
|
||||
|
@ -1040,7 +1026,7 @@ static void copyNumericCols(const SColData* pData, SFileBlockDumpInfo* pDumpInfo
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo) {
|
||||
static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader) {
|
||||
SReaderStatus* pStatus = &pReader->status;
|
||||
SDataBlockIter* pBlockIter = &pStatus->blockIter;
|
||||
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
|
||||
|
@ -1057,6 +1043,14 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
|
|||
bool asc = ASCENDING_TRAVERSE(pReader->order);
|
||||
int32_t step = asc ? 1 : -1;
|
||||
|
||||
// no data exists, return directly.
|
||||
if (pBlockData->nRow == 0 || pBlockData->aTSKEY == 0) {
|
||||
tsdbWarn("%p no need to copy since no data in blockData, table uid:%" PRIu64 " has been dropped, %s", pReader, pBlockInfo->uid,
|
||||
pReader->idStr);
|
||||
pResBlock->info.rows = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((pDumpInfo->rowIndex == 0 && asc) || (pDumpInfo->rowIndex == pBlock->nRow - 1 && (!asc))) {
|
||||
if (asc && pReader->window.skey <= pBlock->minKey.ts) {
|
||||
// pDumpInfo->rowIndex = 0;
|
||||
|
@ -1170,12 +1164,19 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
|
|||
|
||||
static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, SBlockData* pBlockData,
|
||||
uint64_t uid) {
|
||||
int32_t code = 0;
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
tBlockDataReset(pBlockData);
|
||||
STSchema* pSchema = getLatestTableSchema(pReader, uid);
|
||||
if (pSchema == NULL) {
|
||||
tsdbDebug("%p table uid:%"PRIu64" has been dropped, no data existed, %s", pReader, uid, pReader->idStr);
|
||||
return code;
|
||||
}
|
||||
|
||||
SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
|
||||
TABLEID tid = {.suid = pReader->suid, .uid = uid};
|
||||
int32_t code =
|
||||
tBlockDataInit(pBlockData, &tid, pReader->pSchema, &pReader->suppInfo.colId[1], pReader->suppInfo.numOfCols - 1);
|
||||
code = tBlockDataInit(pBlockData, &tid, pSchema, &pSup->colId[1], pSup->numOfCols - 1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -1600,8 +1601,7 @@ static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pBlock
|
|||
|
||||
// log the reason why load the datablock for profile
|
||||
if (loadDataBlock) {
|
||||
tsdbDebug("%p uid:%" PRIu64
|
||||
" need to load the datablock, overlapwithneighborblock:%d, hasDup:%d, partiallyRequired:%d, "
|
||||
tsdbDebug("%p uid:%" PRIu64 " need to load the datablock, overlapneighbor:%d, hasDup:%d, partiallyRequired:%d, "
|
||||
"overlapWithKey:%d, greaterThanBuf:%d, overlapWithDel:%d, overlapWithlastBlock:%d, %s",
|
||||
pReader, pBlockInfo->uid, info.overlapWithNeighborBlock, info.hasDupTs, info.partiallyRequired,
|
||||
info.overlapWithKeyInBuf, info.moreThanCapcity, info.overlapWithDelInfo, info.overlapWithLastBlock,
|
||||
|
@ -1637,9 +1637,9 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo*
|
|||
|
||||
double elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
|
||||
tsdbDebug("%p build data block from cache completed, elapsed time:%.2f ms, numOfRows:%d, brange:%" PRId64
|
||||
" - %" PRId64 " %s",
|
||||
" - %" PRId64 ", uid:%"PRIu64", %s",
|
||||
pReader, elapsedTime, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey,
|
||||
pReader->idStr);
|
||||
pBlockScanInfo->uid, pReader->idStr);
|
||||
|
||||
pReader->cost.buildmemBlock += elapsedTime;
|
||||
return code;
|
||||
|
@ -1665,8 +1665,10 @@ static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pB
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool nextRowFromLastBlocks(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pBlockScanInfo,
|
||||
static bool nextRowFromLastBlocks(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo,
|
||||
SVersionRange* pVerRange) {
|
||||
int32_t step = ASCENDING_TRAVERSE(pLastBlockReader->order)? 1:-1;
|
||||
|
||||
while (1) {
|
||||
bool hasVal = tMergeTreeNext(&pLastBlockReader->mergeTree);
|
||||
if (!hasVal) {
|
||||
|
@ -1675,8 +1677,15 @@ static bool nextRowFromLastBlocks(SLastBlockReader* pLastBlockReader, STableBloc
|
|||
|
||||
TSDBROW row = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
|
||||
TSDBKEY k = TSDBROW_KEY(&row);
|
||||
if (!hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->lastBlockDelIndex, &k, pLastBlockReader->order,
|
||||
pVerRange)) {
|
||||
if (hasBeenDropped(pScanInfo->delSkyline, &pScanInfo->lastBlockDelIndex, &k, pLastBlockReader->order, pVerRange)) {
|
||||
pScanInfo->lastKey = k.ts;
|
||||
} else {
|
||||
// the qualifed ts may equal to k.ts, only a greater version one.
|
||||
// here we need to fallback one step.
|
||||
if (pScanInfo->lastKey == k.ts) {
|
||||
pScanInfo->lastKey -= step;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -1699,6 +1708,19 @@ static bool tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SLastBlockReader* pLas
|
|||
return false;
|
||||
}
|
||||
|
||||
static FORCE_INLINE STSchema* getLatestTableSchema(STsdbReader* pReader, uint64_t uid) {
|
||||
if (pReader->pSchema != NULL) {
|
||||
return pReader->pSchema;
|
||||
}
|
||||
|
||||
pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, uid, -1, 1);
|
||||
if (pReader->pSchema == NULL) {
|
||||
tsdbError("failed to get table schema, uid:%" PRIu64 ", it may have been dropped, ver:-1, %s", uid, pReader->idStr);
|
||||
}
|
||||
|
||||
return pReader->pSchema;
|
||||
}
|
||||
|
||||
static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader* pReader, uint64_t uid) {
|
||||
// always set the newest schema version in pReader->pSchema
|
||||
if (pReader->pSchema == NULL) {
|
||||
|
@ -2230,9 +2252,7 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea
|
|||
|
||||
STbData* d = NULL;
|
||||
if (pReader->pReadSnap->pMem != NULL) {
|
||||
st = taosGetTimestampUs();
|
||||
d = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid);
|
||||
pReader->cost.getTbFromMemTime += (taosGetTimestampUs() - st) / 1000.0;
|
||||
if (d != NULL) {
|
||||
code = tsdbTbDataIterCreate(d, &startKey, backward, &pBlockScanInfo->iter.iter);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
|
@ -2253,9 +2273,7 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea
|
|||
|
||||
STbData* di = NULL;
|
||||
if (pReader->pReadSnap->pIMem != NULL) {
|
||||
st = taosGetTimestampUs();
|
||||
di = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid);
|
||||
pReader->cost.getTbFromIMemTime += (taosGetTimestampUs() - st) / 1000.0;
|
||||
if (di != NULL) {
|
||||
code = tsdbTbDataIterCreate(di, &startKey, backward, &pBlockScanInfo->iiter.iter);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
|
@ -2333,6 +2351,7 @@ static bool initLastBlockReader(SLastBlockReader* pLBlockReader, STableBlockScan
|
|||
w.ekey = pScanInfo->lastKey + step;
|
||||
}
|
||||
|
||||
tsdbDebug("init last block reader, window:%"PRId64"-%"PRId64", uid:%"PRIu64", %s", w.skey, w.ekey, pScanInfo->uid, pReader->idStr);
|
||||
int32_t code = tMergeTreeOpen(&pLBlockReader->mergeTree, (pLBlockReader->order == TSDB_ORDER_DESC),
|
||||
pReader->pFileReader, pReader->suid, pScanInfo->uid, &w, &pLBlockReader->verRange,
|
||||
pLBlockReader->pInfo, false, pReader->idStr);
|
||||
|
@ -2506,7 +2525,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
|
|||
if (isCleanFileDataBlock(pReader, pBlockInfo, pBlock, pBlockScanInfo, keyInBuf, pLastBlockReader) &&
|
||||
pBlock->nRow <= pReader->capacity) {
|
||||
if (asc || ((!asc) && (!hasDataInLastBlock(pLastBlockReader)))) {
|
||||
copyBlockDataToSDataBlock(pReader, pBlockScanInfo);
|
||||
copyBlockDataToSDataBlock(pReader);
|
||||
|
||||
// record the last key value
|
||||
pBlockScanInfo->lastKey = asc ? pBlock->maxKey.ts : pBlock->minKey.ts;
|
||||
|
@ -2650,21 +2669,39 @@ _err:
|
|||
}
|
||||
|
||||
TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
|
||||
TSDBKEY key = {.ts = TSKEY_INITIAL_VAL};
|
||||
bool asc = ASCENDING_TRAVERSE(pReader->order);
|
||||
// TSKEY initialVal = asc? TSKEY_MIN:TSKEY_MAX;
|
||||
|
||||
TSDBKEY key = {.ts = TSKEY_INITIAL_VAL}, ikey = {.ts = TSKEY_INITIAL_VAL};
|
||||
|
||||
bool hasKey = false, hasIKey = false;
|
||||
TSDBROW* pRow = getValidMemRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader);
|
||||
if (pRow != NULL) {
|
||||
hasKey = true;
|
||||
key = TSDBROW_KEY(pRow);
|
||||
}
|
||||
|
||||
pRow = getValidMemRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader);
|
||||
if (pRow != NULL) {
|
||||
TSDBKEY k = TSDBROW_KEY(pRow);
|
||||
if (key.ts > k.ts) {
|
||||
key = k;
|
||||
}
|
||||
TSDBROW* pIRow = getValidMemRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader);
|
||||
if (pIRow != NULL) {
|
||||
hasIKey = true;
|
||||
ikey = TSDBROW_KEY(pIRow);
|
||||
}
|
||||
|
||||
return key;
|
||||
if (hasKey) {
|
||||
if (hasIKey) { // has data in mem & imem
|
||||
if (asc) {
|
||||
return key.ts <= ikey.ts ? key : ikey;
|
||||
} else {
|
||||
return key.ts <= ikey.ts ? ikey: key;
|
||||
}
|
||||
} else { // no data in imem
|
||||
return key;
|
||||
}
|
||||
} else {
|
||||
// no data in mem & imem, return the initial value
|
||||
// only imem has data, return ikey
|
||||
return ikey;
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
|
||||
|
@ -2833,21 +2870,14 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
|
|||
|
||||
ASSERT(pBlockInfo != NULL);
|
||||
|
||||
// if (pBlockInfo != NULL) {
|
||||
pScanInfo = *(STableBlockScanInfo**)taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
|
||||
// } else {
|
||||
// pScanInfo = *pReader->status.pTableIter;
|
||||
// }
|
||||
|
||||
if (pScanInfo == NULL) {
|
||||
tsdbError("failed to get table scan-info, %s", pReader->idStr);
|
||||
code = TSDB_CODE_INVALID_PARA;
|
||||
return code;
|
||||
}
|
||||
|
||||
// if (pBlockInfo != NULL) {
|
||||
pBlock = getCurrentBlock(pBlockIter);
|
||||
// }
|
||||
|
||||
initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
|
||||
TSDBKEY keyInBuf = getCurrentKeyInBuf(pScanInfo, pReader);
|
||||
|
@ -2906,14 +2936,14 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
|
|||
SDataBlockInfo* pInfo = &pReader->pResBlock->info;
|
||||
pInfo->rows = pBlock->nRow;
|
||||
pInfo->id.uid = pScanInfo->uid;
|
||||
pInfo->dataLoad = 0;
|
||||
pInfo->window = (STimeWindow){.skey = pBlock->minKey.ts, .ekey = pBlock->maxKey.ts};
|
||||
setComposedBlockFlag(pReader, false);
|
||||
setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlock->maxKey.ts, pReader->order);
|
||||
|
||||
// update the last key for the corresponding table
|
||||
pScanInfo->lastKey = ASCENDING_TRAVERSE(pReader->order) ? pInfo->window.ekey : pInfo->window.skey;
|
||||
tsdbDebug("%p uid:%" PRIu64
|
||||
" clean file block retrieved from file, global index:%d, "
|
||||
tsdbDebug("%p uid:%" PRIu64 " clean file block retrieved from file, global index:%d, "
|
||||
"table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s",
|
||||
pReader, pScanInfo->uid, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->nRow, pBlock->minKey.ts,
|
||||
pBlock->maxKey.ts, pReader->idStr);
|
||||
|
@ -3029,7 +3059,6 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
|
|||
|
||||
// this file does not have data files, let's start check the last block file if exists
|
||||
if (pBlockIter->numOfBlocks == 0) {
|
||||
resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order);
|
||||
resetTableListIndex(&pReader->status);
|
||||
goto _begin;
|
||||
}
|
||||
|
@ -3062,7 +3091,6 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
|
|||
// data blocks in current file are exhausted, let's try the next file now
|
||||
tBlockDataReset(&pReader->status.fileBlockData);
|
||||
resetDataBlockIterator(pBlockIter, pReader->order);
|
||||
resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order);
|
||||
resetTableListIndex(&pReader->status);
|
||||
goto _begin;
|
||||
} else {
|
||||
|
@ -3075,7 +3103,6 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
|
|||
|
||||
// this file does not have blocks, let's start check the last block file
|
||||
if (pBlockIter->numOfBlocks == 0) {
|
||||
resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order);
|
||||
resetTableListIndex(&pReader->status);
|
||||
goto _begin;
|
||||
}
|
||||
|
@ -3794,7 +3821,11 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
|
|||
pCond->twindows.ekey -= 1;
|
||||
}
|
||||
|
||||
int32_t capacity = (pResBlock == NULL)? 4096:pResBlock->info.capacity;
|
||||
int32_t capacity = pVnode->config.tsdbCfg.maxRows;
|
||||
if (pResBlock != NULL) {
|
||||
blockDataEnsureCapacity(pResBlock, capacity);
|
||||
}
|
||||
|
||||
int32_t code = tsdbReaderCreate(pVnode, pCond, ppReader, capacity, pResBlock, idstr);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _err;
|
||||
|
@ -3999,18 +4030,19 @@ void tsdbReaderClose(STsdbReader* pReader) {
|
|||
" SMA-time:%.2f ms, fileBlocks:%" PRId64
|
||||
", fileBlocks-load-time:%.2f ms, "
|
||||
"build in-memory-block-time:%.2f ms, lastBlocks:%" PRId64 ", lastBlocks-time:%.2f ms, composed-blocks:%" PRId64
|
||||
", composed-blocks-time:%.2fms, STableBlockScanInfo size:%.2f Kb, creatTime:%.2f ms,"
|
||||
", getTbFromMem-time:%.2f ms, getTbFromIMem-time:%.2f ms, initDelSkylineIterTime:%.2f ms, %s",
|
||||
", composed-blocks-time:%.2fms, STableBlockScanInfo size:%.2f Kb, createTime:%.2f ms,initDelSkylineIterTime:%.2f ms, %s",
|
||||
pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaDataLoad, pCost->smaLoadTime, pCost->numOfBlocks,
|
||||
pCost->blockLoadTime, pCost->buildmemBlock, pCost->lastBlockLoad, pCost->lastBlockLoadTime, pCost->composedBlocks,
|
||||
pCost->buildComposedBlockTime, numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pCost->createScanInfoList,
|
||||
pCost->getTbFromMemTime, pCost->getTbFromIMemTime, pCost->initDelSkylineIterTime, pReader->idStr);
|
||||
pCost->initDelSkylineIterTime, pReader->idStr);
|
||||
|
||||
taosMemoryFree(pReader->idStr);
|
||||
taosMemoryFree(pReader->pSchema);
|
||||
|
||||
if (pReader->pMemSchema != pReader->pSchema) {
|
||||
taosMemoryFree(pReader->pMemSchema);
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(pReader);
|
||||
}
|
||||
|
||||
|
@ -4090,26 +4122,6 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
|
|||
return false;
|
||||
}
|
||||
|
||||
static void setBlockInfo(const STsdbReader* pReader, int32_t* rows, uint64_t* uid, STimeWindow* pWindow) {
|
||||
*rows = pReader->pResBlock->info.rows;
|
||||
*uid = pReader->pResBlock->info.id.uid;
|
||||
*pWindow = pReader->pResBlock->info.window;
|
||||
}
|
||||
|
||||
void tsdbRetrieveDataBlockInfo(const STsdbReader* pReader, int32_t* rows, uint64_t* uid, STimeWindow* pWindow) {
|
||||
if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) {
|
||||
if (pReader->step == EXTERNAL_ROWS_MAIN) {
|
||||
setBlockInfo(pReader, rows, uid, pWindow);
|
||||
} else if (pReader->step == EXTERNAL_ROWS_PREV) {
|
||||
setBlockInfo(pReader->innerReader[0], rows, uid, pWindow);
|
||||
} else {
|
||||
setBlockInfo(pReader->innerReader[1], rows, uid, pWindow);
|
||||
}
|
||||
} else {
|
||||
setBlockInfo(pReader, rows, uid, pWindow);
|
||||
}
|
||||
}
|
||||
|
||||
static void doFillNullColSMA(SBlockLoadSuppInfo* pSup, int32_t numOfRows, int32_t numOfCols, SColumnDataAgg* pTsAgg) {
|
||||
// do fill all null column value SMA info
|
||||
int32_t i = 0, j = 0;
|
||||
|
@ -4245,7 +4257,7 @@ static SSDataBlock* doRetrieveDataBlock(STsdbReader* pReader) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
copyBlockDataToSDataBlock(pReader, pBlockScanInfo);
|
||||
copyBlockDataToSDataBlock(pReader);
|
||||
return pReader->pResBlock;
|
||||
}
|
||||
|
||||
|
|
|
@ -426,7 +426,13 @@ int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData) {
|
|||
SVnode *pVnode = pWriter->pVnode;
|
||||
|
||||
ASSERT(pHdr->size + sizeof(SSnapDataHdr) == nData);
|
||||
ASSERT(pHdr->index == pWriter->index + 1);
|
||||
|
||||
if (pHdr->index != pWriter->index + 1) {
|
||||
vError("vgId:%d, unexpected vnode snapshot msg. index:%" PRId64 ", expected index:%" PRId64, TD_VID(pVnode),
|
||||
pHdr->index, pWriter->index + 1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pWriter->index = pHdr->index;
|
||||
|
||||
vDebug("vgId:%d, vnode snapshot write data, index:%" PRId64 " type:%d blockLen:%d", TD_VID(pVnode), pHdr->index,
|
||||
|
|
|
@ -1185,7 +1185,7 @@ static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t version, void
|
|||
tDecodeSBatchDeleteReq(&decoder, &deleteReq);
|
||||
|
||||
SMetaReader mr = {0};
|
||||
metaReaderInit(&mr, pVnode->pMeta, 0);
|
||||
metaReaderInit(&mr, pVnode->pMeta, META_READER_NOLOCK);
|
||||
|
||||
int32_t sz = taosArrayGetSize(deleteReq.deleteReqs);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
|
|
|
@ -474,6 +474,8 @@ typedef struct SStreamScanInfo {
|
|||
int32_t blockRecoverContiCnt;
|
||||
int32_t blockRecoverTotCnt;
|
||||
|
||||
int8_t igCheckUpdate;
|
||||
int8_t igExpired;
|
||||
} SStreamScanInfo;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -1647,12 +1647,18 @@ int32_t convertFillType(int32_t mode) {
|
|||
case FILL_MODE_NULL:
|
||||
type = TSDB_FILL_NULL;
|
||||
break;
|
||||
case FILL_MODE_NULL_F:
|
||||
type = TSDB_FILL_NULL_F;
|
||||
break;
|
||||
case FILL_MODE_NEXT:
|
||||
type = TSDB_FILL_NEXT;
|
||||
break;
|
||||
case FILL_MODE_VALUE:
|
||||
type = TSDB_FILL_SET_VALUE;
|
||||
break;
|
||||
case FILL_MODE_VALUE_F:
|
||||
type = TSDB_FILL_SET_VALUE_F;
|
||||
break;
|
||||
case FILL_MODE_LINEAR:
|
||||
type = TSDB_FILL_LINEAR;
|
||||
break;
|
||||
|
@ -1801,28 +1807,30 @@ int32_t tableListAddTableInfo(STableListInfo* pTableList, uint64_t uid, uint64_t
|
|||
|
||||
int32_t tableListGetGroupList(const STableListInfo* pTableList, int32_t ordinalGroupIndex, STableKeyInfo** pKeyInfo,
|
||||
int32_t* size) {
|
||||
int32_t total = tableListGetOutputGroups(pTableList);
|
||||
if (ordinalGroupIndex < 0 || ordinalGroupIndex >= total) {
|
||||
int32_t totalGroups = tableListGetOutputGroups(pTableList);
|
||||
int32_t numOfTables = tableListGetSize(pTableList);
|
||||
|
||||
if (ordinalGroupIndex < 0 || ordinalGroupIndex >= totalGroups) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
|
||||
// here handle two special cases:
|
||||
// 1. only one group exists, and 2. one table exists for each group.
|
||||
if (total == 1) {
|
||||
*size = tableListGetSize(pTableList);
|
||||
if (totalGroups == 1) {
|
||||
*size = numOfTables;
|
||||
*pKeyInfo = (*size == 0) ? NULL : taosArrayGet(pTableList->pTableList, 0);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (total == tableListGetSize(pTableList)) {
|
||||
} else if (totalGroups == numOfTables) {
|
||||
*size = 1;
|
||||
*pKeyInfo = taosArrayGet(pTableList->pTableList, ordinalGroupIndex);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t offset = pTableList->groupOffset[ordinalGroupIndex];
|
||||
if (ordinalGroupIndex < total - 1) {
|
||||
*size = pTableList->groupOffset[offset + 1] - pTableList->groupOffset[offset];
|
||||
if (ordinalGroupIndex < totalGroups - 1) {
|
||||
*size = pTableList->groupOffset[ordinalGroupIndex + 1] - offset;
|
||||
} else {
|
||||
*size = total - pTableList->groupOffset[offset] - 1;
|
||||
*size = numOfTables - offset;
|
||||
}
|
||||
|
||||
*pKeyInfo = taosArrayGet(pTableList->pTableList, offset);
|
||||
|
|
|
@ -140,7 +140,7 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
|
|||
while (1) {
|
||||
SSDataBlock* pBlock = pDownstream->fpSet.getNextFn(pDownstream);
|
||||
if (pBlock == NULL) {
|
||||
if (pInfo->totalInputRows == 0) {
|
||||
if (pInfo->totalInputRows == 0 && (pInfo->pFillInfo->type != TSDB_FILL_NULL_F && pInfo->pFillInfo->type != TSDB_FILL_SET_VALUE_F)) {
|
||||
setOperatorCompleted(pOperator);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -456,7 +456,8 @@ void* destroyStreamFillLinearInfo(SStreamFillLinearInfo* pFillLinear) {
|
|||
return NULL;
|
||||
}
|
||||
void* destroyStreamFillInfo(SStreamFillInfo* pFillInfo) {
|
||||
if (pFillInfo->type == TSDB_FILL_SET_VALUE || pFillInfo->type == TSDB_FILL_NULL) {
|
||||
if (pFillInfo->type == TSDB_FILL_SET_VALUE || pFillInfo->type == TSDB_FILL_SET_VALUE_F ||
|
||||
pFillInfo->type == TSDB_FILL_NULL || pFillInfo->type == TSDB_FILL_NULL_F) {
|
||||
taosMemoryFreeClear(pFillInfo->pResRow->pRowVal);
|
||||
taosMemoryFreeClear(pFillInfo->pResRow);
|
||||
}
|
||||
|
@ -661,7 +662,9 @@ void setDeleteFillValueInfo(TSKEY start, TSKEY end, SStreamFillSupporter* pFillS
|
|||
pFillInfo->pos = FILL_POS_INVALID;
|
||||
switch (pFillInfo->type) {
|
||||
case TSDB_FILL_NULL:
|
||||
case TSDB_FILL_NULL_F:
|
||||
case TSDB_FILL_SET_VALUE:
|
||||
case TSDB_FILL_SET_VALUE_F:
|
||||
break;
|
||||
case TSDB_FILL_PREV:
|
||||
pFillInfo->pResRow = &pFillSup->prev;
|
||||
|
@ -720,7 +723,9 @@ void setFillValueInfo(SSDataBlock* pBlock, TSKEY ts, int32_t rowId, SStreamFillS
|
|||
pFillInfo->pos = FILL_POS_INVALID;
|
||||
switch (pFillInfo->type) {
|
||||
case TSDB_FILL_NULL:
|
||||
case TSDB_FILL_SET_VALUE: {
|
||||
case TSDB_FILL_NULL_F:
|
||||
case TSDB_FILL_SET_VALUE:
|
||||
case TSDB_FILL_SET_VALUE_F: {
|
||||
if (pFillSup->prev.key == pFillInfo->preRowKey) {
|
||||
resetFillWindow(&pFillSup->prev);
|
||||
}
|
||||
|
@ -1360,7 +1365,8 @@ SStreamFillInfo* initStreamFillInfo(SStreamFillSupporter* pFillSup, SSDataBlock*
|
|||
pFillInfo->pLinearInfo->winIndex = 0;
|
||||
|
||||
pFillInfo->pResRow = NULL;
|
||||
if (pFillSup->type == TSDB_FILL_SET_VALUE || pFillSup->type == TSDB_FILL_NULL) {
|
||||
if (pFillSup->type == TSDB_FILL_SET_VALUE || pFillSup->type == TSDB_FILL_SET_VALUE_F
|
||||
|| pFillSup->type == TSDB_FILL_NULL || pFillSup->type == TSDB_FILL_NULL_F) {
|
||||
pFillInfo->pResRow = taosMemoryCalloc(1, sizeof(SResultRowData));
|
||||
pFillInfo->pResRow->key = INT64_MIN;
|
||||
pFillInfo->pResRow->pRowVal = taosMemoryCalloc(1, pFillSup->rowSize);
|
||||
|
@ -1405,7 +1411,7 @@ SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFi
|
|||
goto _error;
|
||||
}
|
||||
|
||||
if (pInfo->pFillInfo->type == TSDB_FILL_SET_VALUE) {
|
||||
if (pInfo->pFillInfo->type == TSDB_FILL_SET_VALUE || pInfo->pFillInfo->type == TSDB_FILL_SET_VALUE_F) {
|
||||
for (int32_t i = 0; i < pInfo->pFillSup->numOfAllCols; ++i) {
|
||||
SFillColInfo* pFillCol = pInfo->pFillSup->pAllColInfo + i;
|
||||
int32_t slotId = GET_DEST_SLOT_ID(pFillCol);
|
||||
|
@ -1427,7 +1433,7 @@ SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFi
|
|||
pCell->isNull = true;
|
||||
}
|
||||
}
|
||||
} else if (pInfo->pFillInfo->type == TSDB_FILL_NULL) {
|
||||
} else if (pInfo->pFillInfo->type == TSDB_FILL_NULL || pInfo->pFillInfo->type == TSDB_FILL_NULL_F) {
|
||||
for (int32_t i = 0; i < pInfo->pFillSup->numOfAllCols; ++i) {
|
||||
SFillColInfo* pFillCol = pInfo->pFillSup->pAllColInfo + i;
|
||||
int32_t slotId = GET_DEST_SLOT_ID(pFillCol);
|
||||
|
|
|
@ -492,8 +492,8 @@ _error:
|
|||
|
||||
static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
|
||||
SPartitionOperatorInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
|
||||
recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
|
||||
int32_t len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
|
||||
|
@ -690,8 +690,8 @@ static int compareDataGroupInfo(const void* group1, const void* group2) {
|
|||
|
||||
static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) {
|
||||
SPartitionOperatorInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
SDataGroupInfo* pGroupInfo =
|
||||
(pInfo->groupIndex != -1) ? taosArrayGet(pInfo->sortedGroupArray, pInfo->groupIndex) : NULL;
|
||||
if (pInfo->groupIndex == -1 || pInfo->pageIndex >= taosArrayGetSize(pGroupInfo->pPageList)) {
|
||||
|
@ -713,7 +713,7 @@ static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) {
|
|||
qError("failed to get buffer, code:%s, %s", tstrerror(terrno), GET_TASKID(pTaskInfo));
|
||||
T_LONG_JMP(pTaskInfo->env, terrno);
|
||||
}
|
||||
|
||||
|
||||
blockDataEnsureCapacity(pInfo->binfo.pRes, pInfo->rowCapacity);
|
||||
blockDataFromBuf1(pInfo->binfo.pRes, page, pInfo->rowCapacity);
|
||||
|
||||
|
@ -829,6 +829,8 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
|
|||
SPartitionOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SPartitionOperatorInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
if (pInfo == NULL || pOperator == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
pTaskInfo->code = terrno;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
|
@ -841,6 +843,8 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
|
|||
SExprInfo* pExprInfo1 = createExprInfo(pPartNode->pExprs, NULL, &num);
|
||||
int32_t code = initExprSupp(&pInfo->scalarSup, pExprInfo1, num);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
pTaskInfo->code = terrno;
|
||||
goto _error;
|
||||
}
|
||||
}
|
||||
|
@ -848,6 +852,8 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
|
|||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pGroupSet = taosHashInit(100, hashFn, false, HASH_NO_LOCK);
|
||||
if (pInfo->pGroupSet == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
pTaskInfo->code = terrno;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
|
@ -866,6 +872,8 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
|
|||
|
||||
int32_t code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, tsTempDir);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
pTaskInfo->code = code;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
|
@ -873,6 +881,8 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
|
|||
pInfo->columnOffset = setupColumnOffset(pInfo->binfo.pRes, pInfo->rowCapacity);
|
||||
code = initGroupOptrInfo(&pInfo->pGroupColVals, &pInfo->groupKeyLen, &pInfo->keyBuf, pInfo->pGroupCols);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
pTaskInfo->code = code;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
|
@ -885,10 +895,15 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
|
|||
createOperatorFpSet(optrDummyOpenFn, hashPartition, NULL, destroyPartitionOperatorInfo, optrDefaultBufFn, NULL);
|
||||
|
||||
code = appendDownstream(pOperator, &downstream, 1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
pTaskInfo->code = code;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
return pOperator;
|
||||
|
||||
_error:
|
||||
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
if (pInfo != NULL) {
|
||||
destroyPartitionOperatorInfo(pInfo);
|
||||
}
|
||||
|
@ -1094,7 +1109,7 @@ void initParDownStream(SOperatorInfo* downstream, SPartitionBySupporter* pParSup
|
|||
SStreamScanInfo* pScanInfo = downstream->info;
|
||||
pScanInfo->partitionSup = *pParSup;
|
||||
pScanInfo->pPartScalarSup = pExpr;
|
||||
if (!pScanInfo->pUpdateInfo) {
|
||||
if (!pScanInfo->igCheckUpdate && !pScanInfo->pUpdateInfo) {
|
||||
pScanInfo->pUpdateInfo = updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, 0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -446,6 +446,16 @@ static STableCachedVal* createTableCacheVal(const SMetaReader* pMetaReader) {
|
|||
// const void *key, size_t keyLen, void *value
|
||||
static void freeCachedMetaItem(const void* key, size_t keyLen, void* value) { freeTableCachedVal(value); }
|
||||
|
||||
|
||||
static void doSetNullValue(SSDataBlock* pBlock, const SExprInfo* pExpr, int32_t numOfExpr) {
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) {
|
||||
int32_t dstSlotId = pExpr[j].base.resSchema.slotId;
|
||||
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotId);
|
||||
colDataAppendNNULL(pColInfoData, 0, pBlock->info.rows);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int32_t numOfExpr, SSDataBlock* pBlock,
|
||||
int32_t rows, const char* idStr, STableMetaCacheInfo* pCache) {
|
||||
// currently only the tbname pseudo column
|
||||
|
@ -465,14 +475,22 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
|
|||
SMetaReader mr = {0};
|
||||
LRUHandle* h = NULL;
|
||||
|
||||
// todo refactor: extract method
|
||||
// the handling of the null data should be packed in the extracted method
|
||||
|
||||
// 1. check if it is existed in meta cache
|
||||
if (pCache == NULL) {
|
||||
metaReaderInit(&mr, pHandle->meta, 0);
|
||||
code = metaGetTableEntryByUidCache(&mr, pBlock->info.id.uid);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
||||
// when encounter the TSDB_CODE_PAR_TABLE_NOT_EXIST error, we proceed.
|
||||
if (terrno == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||
qWarn("failed to get table meta, table may have been dropped, uid:0x%" PRIx64 ", code:%s, %s",
|
||||
pBlock->info.id.uid, tstrerror(terrno), idStr);
|
||||
|
||||
// append null value before return to caller, since the caller will ignore this error code and proceed
|
||||
doSetNullValue(pBlock, pExpr, numOfExpr);
|
||||
} else {
|
||||
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.id.uid, tstrerror(terrno),
|
||||
idStr);
|
||||
|
@ -498,6 +516,8 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
|
|||
if (terrno == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||
qWarn("failed to get table meta, table may have been dropped, uid:0x%" PRIx64 ", code:%s, %s",
|
||||
pBlock->info.id.uid, tstrerror(terrno), idStr);
|
||||
// append null value before return to caller, since the caller will ignore this error code and proceed
|
||||
doSetNullValue(pBlock, pExpr, numOfExpr);
|
||||
} else {
|
||||
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.id.uid, tstrerror(terrno),
|
||||
idStr);
|
||||
|
@ -761,6 +781,10 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
if (pInfo->pResBlock->info.capacity > pOperator->resultInfo.capacity) {
|
||||
pOperator->resultInfo.capacity = pInfo->pResBlock->info.capacity;
|
||||
}
|
||||
}
|
||||
|
||||
SSDataBlock* result = doGroupedTableScan(pOperator);
|
||||
|
@ -864,7 +888,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
|
|||
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
pInfo->pResBlock = createDataBlockFromDescNode(pDescNode);
|
||||
blockDataEnsureCapacity(pInfo->pResBlock, pOperator->resultInfo.capacity);
|
||||
// blockDataEnsureCapacity(pInfo->pResBlock, pOperator->resultInfo.capacity);
|
||||
|
||||
code = filterInitFromNode((SNode*)pTableScanNode->scan.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1415,7 +1439,12 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
|
|||
dumyInfo.cur.pageId = -1;
|
||||
bool isClosed = false;
|
||||
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
|
||||
if (tableInserted && isOverdue(tsCol[rowId], &pInfo->twAggSup)) {
|
||||
bool overDue = isOverdue(tsCol[rowId], &pInfo->twAggSup);
|
||||
if (pInfo->igExpired && overDue) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (tableInserted && overDue) {
|
||||
win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[rowId], &pInfo->interval, TSDB_ORDER_ASC);
|
||||
isClosed = isCloseWindow(&win, &pInfo->twAggSup);
|
||||
}
|
||||
|
@ -1681,41 +1710,6 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
SStreamScanInfo* pInfo = pOperator->info;
|
||||
|
||||
qDebug("stream scan called");
|
||||
#if 0
|
||||
SStreamState* pState = pTaskInfo->streamInfo.pState;
|
||||
if (pState) {
|
||||
printf(">>>>>>>> stream write backend\n");
|
||||
SWinKey key = {
|
||||
.ts = 1,
|
||||
.groupId = 2,
|
||||
};
|
||||
char tmp[100] = "abcdefg1";
|
||||
if (streamStatePut(pState, &key, &tmp, strlen(tmp) + 1) < 0) {
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
key.ts = 2;
|
||||
char tmp2[100] = "abcdefg2";
|
||||
if (streamStatePut(pState, &key, &tmp2, strlen(tmp2) + 1) < 0) {
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
key.groupId = 5;
|
||||
key.ts = 1;
|
||||
char tmp3[100] = "abcdefg3";
|
||||
if (streamStatePut(pState, &key, &tmp3, strlen(tmp3) + 1) < 0) {
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
char* val2 = NULL;
|
||||
int32_t sz;
|
||||
if (streamStateGet(pState, &key, (void**)&val2, &sz) < 0) {
|
||||
ASSERT(0);
|
||||
}
|
||||
printf("stream read %s %d\n", val2, sz);
|
||||
streamFreeVal(val2);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE1 ||
|
||||
pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE2) {
|
||||
|
@ -2348,6 +2342,9 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
|
|||
pInfo->pUpdateDataRes = createSpecialDataBlock(STREAM_CLEAR);
|
||||
pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
|
||||
pInfo->partitionSup.needCalc = false;
|
||||
pInfo->igCheckUpdate = pTableScanNode->igCheckUpdate;
|
||||
pInfo->igExpired = pTableScanNode->igExpired;
|
||||
pInfo->twAggSup.maxTs = INT64_MIN;
|
||||
|
||||
setOperatorInfo(pOperator, "StreamScanOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN, false, OP_NOT_OPENED, pInfo,
|
||||
pTaskInfo);
|
||||
|
|
|
@ -65,7 +65,7 @@ typedef struct SSysTableScanInfo {
|
|||
SSDataBlock* pRes;
|
||||
int64_t numOfBlocks; // extract basic running information.
|
||||
SLoadRemoteDataInfo loadInfo;
|
||||
|
||||
SLimitInfo limitInfo;
|
||||
int32_t tbnameSlotId;
|
||||
} SSysTableScanInfo;
|
||||
|
||||
|
@ -133,7 +133,6 @@ static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capac
|
|||
static SSDataBlock* buildInfoSchemaTableMetaBlock(char* tableName);
|
||||
static void destroySysScanOperator(void* param);
|
||||
static int32_t loadSysTableCallback(void* param, SDataBuf* pMsg, int32_t code);
|
||||
static SSDataBlock* doFilterResult(SSDataBlock* pDataBlock, SFilterInfo* pFilterInfo);
|
||||
static __optSysFilter optSysGetFilterFunc(int32_t ctype, bool* reverse);
|
||||
|
||||
static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, SMetaReader* smrSuperTable,
|
||||
|
@ -351,7 +350,7 @@ static int32_t optSysMergeRslt(SArray* mRslt, SArray* rslt);
|
|||
static SSDataBlock* sysTableScanFromMNode(SOperatorInfo* pOperator, SSysTableScanInfo* pInfo, const char* name,
|
||||
SExecTaskInfo* pTaskInfo);
|
||||
void extractTbnameSlotId(SSysTableScanInfo* pInfo, const SScanPhysiNode* pScanNode);
|
||||
static SSDataBlock* sysTableScanFillTbName(SOperatorInfo* pOperator, const SSysTableScanInfo* pInfo,
|
||||
static void sysTableScanFillTbName(SOperatorInfo* pOperator, const SSysTableScanInfo* pInfo,
|
||||
const char* name, SSDataBlock* pBlock);
|
||||
__optSysFilter optSysGetFilterFunc(int32_t ctype, bool* reverse) {
|
||||
if (ctype == OP_TYPE_LOWER_EQUAL || ctype == OP_TYPE_LOWER_THAN) {
|
||||
|
@ -556,7 +555,7 @@ void relocateAndFilterSysTagsScanResult(SSysTableScanInfo* pInfo, int32_t numOfR
|
|||
pInfo->pRes->info.rows = numOfRows;
|
||||
|
||||
relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, dataBlock->pDataBlock, false);
|
||||
doFilterResult(pInfo->pRes, pFilterInfo);
|
||||
doFilter(pInfo->pRes, pFilterInfo, NULL);
|
||||
blockDataCleanup(dataBlock);
|
||||
}
|
||||
|
||||
|
@ -975,7 +974,7 @@ static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) {
|
|||
pInfo->pRes->info.rows = numOfRows;
|
||||
|
||||
relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false);
|
||||
doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo);
|
||||
doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
|
||||
blockDataCleanup(p);
|
||||
numOfRows = 0;
|
||||
|
@ -991,7 +990,7 @@ static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) {
|
|||
pInfo->pRes->info.rows = numOfRows;
|
||||
|
||||
relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false);
|
||||
doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo);
|
||||
doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
|
||||
blockDataCleanup(p);
|
||||
numOfRows = 0;
|
||||
|
@ -1152,7 +1151,7 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) {
|
|||
pInfo->pRes->info.rows = numOfRows;
|
||||
|
||||
relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false);
|
||||
doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo);
|
||||
doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
|
||||
blockDataCleanup(p);
|
||||
numOfRows = 0;
|
||||
|
@ -1168,7 +1167,7 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) {
|
|||
pInfo->pRes->info.rows = numOfRows;
|
||||
|
||||
relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false);
|
||||
doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo);
|
||||
doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
|
||||
blockDataCleanup(p);
|
||||
numOfRows = 0;
|
||||
|
@ -1199,7 +1198,7 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) {
|
|||
// the retrieve is executed on the mnode, so return tables that belongs to the information schema database.
|
||||
if (pInfo->readHandle.mnd != NULL) {
|
||||
buildSysDbTableInfo(pInfo, pOperator->resultInfo.capacity);
|
||||
doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo);
|
||||
doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
pInfo->loadInfo.totalRows += pInfo->pRes->info.rows;
|
||||
|
||||
setOperatorCompleted(pOperator);
|
||||
|
@ -1324,6 +1323,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
|
|||
getDBNameFromCondition(pInfo->pCondition, dbName);
|
||||
sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName);
|
||||
}
|
||||
|
||||
SSDataBlock* pBlock = NULL;
|
||||
if (strncasecmp(name, TSDB_INS_TABLE_TABLES, TSDB_TABLE_FNAME_LEN) == 0) {
|
||||
pBlock = sysTableScanUserTables(pOperator);
|
||||
|
@ -1336,30 +1336,37 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
|
|||
pBlock = sysTableScanFromMNode(pOperator, pInfo, name, pTaskInfo);
|
||||
}
|
||||
|
||||
return sysTableScanFillTbName(pOperator, pInfo, name, pBlock);
|
||||
}
|
||||
|
||||
static SSDataBlock* sysTableScanFillTbName(SOperatorInfo* pOperator, const SSysTableScanInfo* pInfo,
|
||||
const char* name, SSDataBlock* pBlock) {
|
||||
sysTableScanFillTbName(pOperator, pInfo, name, pBlock);
|
||||
if (pBlock != NULL) {
|
||||
if (pInfo->tbnameSlotId != -1) {
|
||||
SColumnInfoData* pColumnInfoData = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, pInfo->tbnameSlotId);
|
||||
char varTbName[TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE] = {0};
|
||||
memcpy(varDataVal(varTbName), name, strlen(name));
|
||||
varDataSetLen(varTbName, strlen(name));
|
||||
for (int i = 0; i < pBlock->info.rows; ++i) {
|
||||
colDataAppend(pColumnInfoData, i, varTbName, NULL);
|
||||
}
|
||||
doFilterResult(pBlock, pOperator->exprSupp.pFilterInfo);
|
||||
bool limitReached = applyLimitOffset(&pInfo->limitInfo, pBlock, pTaskInfo);
|
||||
if (limitReached) {
|
||||
setOperatorCompleted(pOperator);
|
||||
}
|
||||
}
|
||||
if (pBlock && pBlock->info.rows != 0) {
|
||||
return pBlock;
|
||||
|
||||
return pBlock->info.rows > 0? pBlock:NULL;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void sysTableScanFillTbName(SOperatorInfo* pOperator, const SSysTableScanInfo* pInfo, const char* name,
|
||||
SSDataBlock* pBlock) {
|
||||
if (pBlock == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (pInfo->tbnameSlotId != -1) {
|
||||
SColumnInfoData* pColumnInfoData = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, pInfo->tbnameSlotId);
|
||||
char varTbName[TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE] = {0};
|
||||
memcpy(varDataVal(varTbName), name, strlen(name));
|
||||
varDataSetLen(varTbName, strlen(name));
|
||||
|
||||
colDataAppendNItems(pColumnInfoData, 0, varTbName, pBlock->info.rows);
|
||||
}
|
||||
|
||||
doFilter(pBlock, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
}
|
||||
|
||||
static SSDataBlock* sysTableScanFromMNode(SOperatorInfo* pOperator, SSysTableScanInfo* pInfo, const char* name,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
|
@ -1423,7 +1430,7 @@ static SSDataBlock* sysTableScanFromMNode(SOperatorInfo* pOperator, SSysTableSca
|
|||
updateLoadRemoteInfo(&pInfo->loadInfo, pRsp->numOfRows, pRsp->compLen, startTs, pOperator);
|
||||
|
||||
// todo log the filter info
|
||||
doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo);
|
||||
doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
taosMemoryFree(pRsp);
|
||||
if (pInfo->pRes->info.rows > 0) {
|
||||
return pInfo->pRes;
|
||||
|
@ -1457,13 +1464,13 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan
|
|||
pInfo->sysInfo = pScanPhyNode->sysInfo;
|
||||
pInfo->showRewrite = pScanPhyNode->showRewrite;
|
||||
pInfo->pRes = createDataBlockFromDescNode(pDescNode);
|
||||
|
||||
pInfo->pCondition = pScanNode->node.pConditions;
|
||||
code = filterInitFromNode(pScanNode->node.pConditions, &pOperator->exprSupp.pFilterInfo, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
initLimitInfo(pScanPhyNode->scan.node.pLimit, pScanPhyNode->scan.node.pSlimit, &pInfo->limitInfo);
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
|
||||
|
||||
|
@ -1556,15 +1563,6 @@ int32_t loadSysTableCallback(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSDataBlock* doFilterResult(SSDataBlock* pDataBlock, SFilterInfo* pFilterInfo) {
|
||||
if (pFilterInfo == NULL) {
|
||||
return pDataBlock->info.rows == 0 ? NULL : pDataBlock;
|
||||
}
|
||||
|
||||
doFilter(pDataBlock, pFilterInfo, NULL);
|
||||
return pDataBlock->info.rows == 0 ? NULL : pDataBlock;
|
||||
}
|
||||
|
||||
static int32_t sysChkFilter__Comm(SNode* pNode) {
|
||||
// impl
|
||||
SOperatorNode* pOper = (SOperatorNode*)pNode;
|
||||
|
|
|
@ -186,7 +186,7 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
|
|||
}
|
||||
}
|
||||
}
|
||||
} else if (pFillInfo->type == TSDB_FILL_NULL) { // fill with NULL
|
||||
} else if (pFillInfo->type == TSDB_FILL_NULL || pFillInfo->type == TSDB_FILL_NULL_F) { // fill with NULL
|
||||
setNullRow(pBlock, pFillInfo, index);
|
||||
} else { // fill with user specified value for each column
|
||||
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
|
@ -349,7 +349,7 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t
|
|||
bool isNull = colDataIsNull_s(pSrc, pFillInfo->index);
|
||||
colDataAppend(pDst, index, src, isNull);
|
||||
saveColData(pFillInfo->prev.pRowVal, i, src, isNull); // todo:
|
||||
} else if (pFillInfo->type == TSDB_FILL_NULL) {
|
||||
} else if (pFillInfo->type == TSDB_FILL_NULL || pFillInfo->type == TSDB_FILL_NULL_F) {
|
||||
colDataAppendNULL(pDst, index);
|
||||
} else if (pFillInfo->type == TSDB_FILL_NEXT) {
|
||||
SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->next.pRowVal : pFillInfo->prev.pRowVal;
|
||||
|
@ -546,15 +546,14 @@ bool taosFillHasMoreResults(SFillInfo* pFillInfo) {
|
|||
}
|
||||
|
||||
int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows) {
|
||||
SColumnInfoData* pCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, pFillInfo->srcTsSlotId);
|
||||
|
||||
int64_t* tsList = (int64_t*)pCol->pData;
|
||||
int32_t numOfRows = taosNumOfRemainRows(pFillInfo);
|
||||
|
||||
TSKEY ekey1 = ekey;
|
||||
|
||||
int64_t numOfRes = -1;
|
||||
if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set.
|
||||
SColumnInfoData* pCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, pFillInfo->srcTsSlotId);
|
||||
int64_t* tsList = (int64_t*)pCol->pData;
|
||||
TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
|
||||
numOfRes = taosTimeCountInterval(lastKey, pFillInfo->currentKey, pFillInfo->interval.sliding,
|
||||
pFillInfo->interval.slidingUnit, pFillInfo->interval.precision);
|
||||
|
|
|
@ -181,12 +181,14 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
|||
|
||||
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
|
||||
switch (pSliceInfo->fillType) {
|
||||
case TSDB_FILL_NULL: {
|
||||
case TSDB_FILL_NULL:
|
||||
case TSDB_FILL_NULL_F: {
|
||||
colDataAppendNULL(pDst, rows);
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_FILL_SET_VALUE: {
|
||||
case TSDB_FILL_SET_VALUE:
|
||||
case TSDB_FILL_SET_VALUE_F: {
|
||||
SVariant* pVar = &pSliceInfo->pFillColInfo[j].fillVal;
|
||||
|
||||
if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) {
|
||||
|
|
|
@ -1726,7 +1726,7 @@ void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSuppor
|
|||
SStreamScanInfo* pScanInfo = downstream->info;
|
||||
pScanInfo->windowSup.parentType = type;
|
||||
pScanInfo->windowSup.pIntervalAggSup = pSup;
|
||||
if (!pScanInfo->pUpdateInfo) {
|
||||
if (!pScanInfo->igCheckUpdate && !pScanInfo->pUpdateInfo) {
|
||||
pScanInfo->pUpdateInfo = updateInfoInitP(pInterval, pTwSup->waterMark);
|
||||
}
|
||||
pScanInfo->interval = *pInterval;
|
||||
|
@ -2893,7 +2893,7 @@ void initDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, uin
|
|||
}
|
||||
SStreamScanInfo* pScanInfo = downstream->info;
|
||||
pScanInfo->windowSup = (SWindowSupporter){.pStreamAggSup = pAggSup, .gap = pAggSup->gap, .parentType = type};
|
||||
if (!pScanInfo->pUpdateInfo) {
|
||||
if (!pScanInfo->igCheckUpdate && !pScanInfo->pUpdateInfo) {
|
||||
pScanInfo->pUpdateInfo = updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, pTwSup->waterMark);
|
||||
}
|
||||
pScanInfo->twAggSup = *pTwSup;
|
||||
|
|
|
@ -190,8 +190,9 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
|
|||
qError("Add to buf failed since %s", terrstr(terrno));
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int32_t code = createDiskbasedBuf(&pHandle->pBuf, pHandle->pageSize, pHandle->numOfPages * pHandle->pageSize,
|
||||
"doAddToBuf", tsTempDir);
|
||||
"sortExternalBuf", tsTempDir);
|
||||
dBufSetPrintInfo(pHandle->pBuf);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
|
@ -635,6 +636,7 @@ int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols) {
|
|||
|
||||
static int32_t createInitialSources(SSortHandle* pHandle) {
|
||||
size_t sortBufSize = pHandle->numOfPages * pHandle->pageSize;
|
||||
int32_t code = 0;
|
||||
|
||||
if (pHandle->type == SORT_SINGLESOURCE_SORT) {
|
||||
SSortSource** pSource = taosArrayGet(pHandle->pOrderedSource, 0);
|
||||
|
@ -663,8 +665,8 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
pHandle->beforeFp(pBlock, pHandle->param);
|
||||
}
|
||||
|
||||
int32_t code = blockDataMerge(pHandle->pDataBlock, pBlock);
|
||||
if (code != 0) {
|
||||
code = blockDataMerge(pHandle->pDataBlock, pBlock);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
if (source->param && !source->onlyRef) {
|
||||
taosMemoryFree(source->param);
|
||||
}
|
||||
|
@ -689,6 +691,7 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
blockDataDestroy(source->src.pBlock);
|
||||
source->src.pBlock = NULL;
|
||||
}
|
||||
|
||||
taosMemoryFree(source);
|
||||
return code;
|
||||
}
|
||||
|
@ -696,13 +699,17 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
int64_t el = taosGetTimestampUs() - p;
|
||||
pHandle->sortElapsed += el;
|
||||
|
||||
doAddToBuf(pHandle->pDataBlock, pHandle);
|
||||
code = doAddToBuf(pHandle->pDataBlock, pHandle);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (source->param && !source->onlyRef) {
|
||||
taosMemoryFree(source->param);
|
||||
}
|
||||
|
||||
taosMemoryFree(source);
|
||||
|
||||
if (pHandle->pDataBlock != NULL && pHandle->pDataBlock->info.rows > 0) {
|
||||
|
@ -711,7 +718,7 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
// Perform the in-memory sort and then flush data in the buffer into disk.
|
||||
int64_t p = taosGetTimestampUs();
|
||||
|
||||
int32_t code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
|
||||
code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
|
@ -729,12 +736,12 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
pHandle->tupleHandle.pBlock = pHandle->pDataBlock;
|
||||
return 0;
|
||||
} else {
|
||||
doAddToBuf(pHandle->pDataBlock, pHandle);
|
||||
code = doAddToBuf(pHandle->pDataBlock, pHandle);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tsortOpen(SSortHandle* pHandle) {
|
||||
|
|
|
@ -380,6 +380,7 @@ static int32_t logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) {
|
|||
COPY_SCALAR_FIELD(watermark);
|
||||
COPY_SCALAR_FIELD(deleteMark);
|
||||
COPY_SCALAR_FIELD(igExpired);
|
||||
COPY_SCALAR_FIELD(igCheckUpdate);
|
||||
CLONE_NODE_LIST_FIELD(pGroupTags);
|
||||
COPY_SCALAR_FIELD(groupSort);
|
||||
CLONE_NODE_LIST_FIELD(pTags);
|
||||
|
@ -467,6 +468,7 @@ static int32_t logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* p
|
|||
COPY_SCALAR_FIELD(watermark);
|
||||
COPY_SCALAR_FIELD(deleteMark);
|
||||
COPY_SCALAR_FIELD(igExpired);
|
||||
COPY_SCALAR_FIELD(igCheckUpdate);
|
||||
COPY_SCALAR_FIELD(windowAlgo);
|
||||
COPY_SCALAR_FIELD(inputTsOrder);
|
||||
COPY_SCALAR_FIELD(outputTsOrder);
|
||||
|
|
|
@ -1553,6 +1553,7 @@ static const char* jkTableScanPhysiPlanGroupSort = "GroupSort";
|
|||
static const char* jkTableScanPhysiPlanTags = "Tags";
|
||||
static const char* jkTableScanPhysiPlanSubtable = "Subtable";
|
||||
static const char* jkTableScanPhysiPlanAssignBlockUid = "AssignBlockUid";
|
||||
static const char* jkTableScanPhysiPlanIgnoreUpdate = "IgnoreUpdate";
|
||||
|
||||
static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj;
|
||||
|
@ -1618,6 +1619,9 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanAssignBlockUid, pNode->assignBlockUid);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanIgnoreUpdate, pNode->igCheckUpdate);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -1686,6 +1690,9 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanAssignBlockUid, &pNode->assignBlockUid);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetTinyIntValue(pJson, jkTableScanPhysiPlanIgnoreUpdate, &pNode->igCheckUpdate);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -2078,6 +2078,9 @@ static int32_t physiTableScanNodeInlineToMsg(const void* pObj, STlvEncoder* pEnc
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeValueBool(pEncoder, pNode->assignBlockUid);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeValueI8(pEncoder, pNode->igCheckUpdate);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -2154,6 +2157,9 @@ static int32_t msgToPhysiTableScanNodeInline(STlvDecoder* pDecoder, void* pObj)
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvDecodeValueBool(pDecoder, &pNode->assignBlockUid);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvDecodeValueI8(pDecoder, &pNode->igCheckUpdate);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -1990,10 +1990,14 @@ char* nodesGetFillModeString(EFillMode mode) {
|
|||
return "none";
|
||||
case FILL_MODE_VALUE:
|
||||
return "value";
|
||||
case FILL_MODE_VALUE_F:
|
||||
return "value_f";
|
||||
case FILL_MODE_PREV:
|
||||
return "prev";
|
||||
case FILL_MODE_NULL:
|
||||
return "null";
|
||||
case FILL_MODE_NULL_F:
|
||||
return "null_f";
|
||||
case FILL_MODE_LINEAR:
|
||||
return "linear";
|
||||
case FILL_MODE_NEXT:
|
||||
|
|
|
@ -544,6 +544,7 @@ stream_options(A) ::= stream_options(B) TRIGGER MAX_DELAY duration_literal(C).
|
|||
stream_options(A) ::= stream_options(B) WATERMARK duration_literal(C). { ((SStreamOptions*)B)->pWatermark = releaseRawExprNode(pCxt, C); A = B; }
|
||||
stream_options(A) ::= stream_options(B) IGNORE EXPIRED NK_INTEGER(C). { ((SStreamOptions*)B)->ignoreExpired = taosStr2Int8(C.z, NULL, 10); A = B; }
|
||||
stream_options(A) ::= stream_options(B) FILL_HISTORY NK_INTEGER(C). { ((SStreamOptions*)B)->fillHistory = taosStr2Int8(C.z, NULL, 10); A = B; }
|
||||
stream_options(A) ::= stream_options(B) IGNORE UPDATE NK_INTEGER(C). { ((SStreamOptions*)B)->ignoreUpdate = taosStr2Int8(C.z, NULL, 10); A = B; }
|
||||
|
||||
subtable_opt(A) ::= . { A = NULL; }
|
||||
subtable_opt(A) ::= SUBTABLE NK_LP expression(B) NK_RP. { A = releaseRawExprNode(pCxt, B); }
|
||||
|
@ -978,12 +979,14 @@ sliding_opt(A) ::= SLIDING NK_LP duration_literal(B) NK_RP.
|
|||
fill_opt(A) ::= . { A = NULL; }
|
||||
fill_opt(A) ::= FILL NK_LP fill_mode(B) NK_RP. { A = createFillNode(pCxt, B, NULL); }
|
||||
fill_opt(A) ::= FILL NK_LP VALUE NK_COMMA literal_list(B) NK_RP. { A = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, B)); }
|
||||
fill_opt(A) ::= FILL NK_LP VALUE_F NK_COMMA literal_list(B) NK_RP. { A = createFillNode(pCxt, FILL_MODE_VALUE_F, createNodeListNode(pCxt, B)); }
|
||||
|
||||
%type fill_mode { EFillMode }
|
||||
%destructor fill_mode { }
|
||||
fill_mode(A) ::= NONE. { A = FILL_MODE_NONE; }
|
||||
fill_mode(A) ::= PREV. { A = FILL_MODE_PREV; }
|
||||
fill_mode(A) ::= NULL. { A = FILL_MODE_NULL; }
|
||||
fill_mode(A) ::= NULL_F. { A = FILL_MODE_NULL_F; }
|
||||
fill_mode(A) ::= LINEAR. { A = FILL_MODE_LINEAR; }
|
||||
fill_mode(A) ::= NEXT. { A = FILL_MODE_NEXT; }
|
||||
|
||||
|
@ -1075,4 +1078,4 @@ null_ordering_opt(A) ::= NULLS LAST.
|
|||
|
||||
%fallback ABORT AFTER ATTACH BEFORE BEGIN BITAND BITNOT BITOR BLOCKS CHANGE COMMA COMPACT CONCAT CONFLICT COPY DEFERRED DELIMITERS DETACH DIVIDE DOT EACH END FAIL
|
||||
FILE FOR GLOB ID IMMEDIATE IMPORT INITIALLY INSTEAD ISNULL KEY MODULES NK_BITNOT NK_SEMI NOTNULL OF PLUS PRIVILEGE RAISE REPLACE RESTRICT ROW SEMI STAR STATEMENT
|
||||
STRICT STRING TIMES UPDATE VALUES VARIABLE VIEW WAL.
|
||||
STRICT STRING TIMES VALUES VARIABLE VIEW WAL.
|
||||
|
|
|
@ -1763,6 +1763,7 @@ SNode* createStreamOptions(SAstCreateContext* pCxt) {
|
|||
pOptions->triggerType = STREAM_TRIGGER_AT_ONCE;
|
||||
pOptions->fillHistory = STREAM_DEFAULT_FILL_HISTORY;
|
||||
pOptions->ignoreExpired = STREAM_DEFAULT_IGNORE_EXPIRED;
|
||||
pOptions->ignoreUpdate = STREAM_DEFAULT_IGNORE_UPDATE;
|
||||
return (SNode*)pOptions;
|
||||
}
|
||||
|
||||
|
|
|
@ -148,6 +148,7 @@ static SKeyword keywordTable[] = {
|
|||
{"NOT", TK_NOT},
|
||||
{"NOW", TK_NOW},
|
||||
{"NULL", TK_NULL},
|
||||
{"NULL_F", TK_NULL_F},
|
||||
{"NULLS", TK_NULLS},
|
||||
{"OFFSET", TK_OFFSET},
|
||||
{"ON", TK_ON},
|
||||
|
@ -232,11 +233,13 @@ static SKeyword keywordTable[] = {
|
|||
{"TTL", TK_TTL},
|
||||
{"UNION", TK_UNION},
|
||||
{"UNSIGNED", TK_UNSIGNED},
|
||||
{"UPDATE", TK_UPDATE},
|
||||
{"USE", TK_USE},
|
||||
{"USER", TK_USER},
|
||||
{"USERS", TK_USERS},
|
||||
{"USING", TK_USING},
|
||||
{"VALUE", TK_VALUE},
|
||||
{"VALUE_F", TK_VALUE_F},
|
||||
{"VALUES", TK_VALUES},
|
||||
{"VARCHAR", TK_VARCHAR},
|
||||
{"VARIABLES", TK_VARIABLES},
|
||||
|
|
|
@ -1576,7 +1576,8 @@ static int32_t translateBlockDistFunc(STranslateContext* pCtx, SFunctionNode* pF
|
|||
TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType &&
|
||||
TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType))) {
|
||||
return generateSyntaxErrMsgExt(&pCtx->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC,
|
||||
"%s is only supported on super table, child table or normal table", pFunc->functionName);
|
||||
"%s is only supported on super table, child table or normal table",
|
||||
pFunc->functionName);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1604,6 +1605,7 @@ static int32_t translateMultiResFunc(STranslateContext* pCxt, SFunctionNode* pFu
|
|||
}
|
||||
if (tsKeepColumnName && 1 == LIST_LENGTH(pFunc->pParameterList) && !pFunc->node.asAlias) {
|
||||
strcpy(pFunc->node.userAlias, ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->userAlias);
|
||||
strcpy(pFunc->node.aliasName, pFunc->node.userAlias);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -2546,11 +2548,12 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) {
|
|||
int32_t len = 0;
|
||||
if (QUERY_NODE_COLUMN == nodeType(pExpr)) {
|
||||
SColumnNode* pCol = (SColumnNode*)pExpr;
|
||||
len = snprintf(buf, sizeof(buf), "%s(%s.%s)", pSrcFunc->functionName, pCol->tableAlias, pCol->colName);
|
||||
strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
|
||||
if (tsKeepColumnName) {
|
||||
strcpy(pFunc->node.userAlias, pCol->colName);
|
||||
strcpy(pFunc->node.aliasName, pCol->colName);
|
||||
} else {
|
||||
len = snprintf(buf, sizeof(buf), "%s(%s.%s)", pSrcFunc->functionName, pCol->tableAlias, pCol->colName);
|
||||
strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
|
||||
len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pCol->colName);
|
||||
strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1));
|
||||
}
|
||||
|
@ -2817,7 +2820,7 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList
|
|||
}
|
||||
|
||||
static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList) {
|
||||
if (FILL_MODE_VALUE != pFill->mode) {
|
||||
if (FILL_MODE_VALUE != pFill->mode && FILL_MODE_VALUE_F != pFill->mode) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -5738,6 +5741,7 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt*
|
|||
pReq->watermark = (NULL != pStmt->pOptions->pWatermark ? ((SValueNode*)pStmt->pOptions->pWatermark)->datum.i : 0);
|
||||
pReq->fillHistory = pStmt->pOptions->fillHistory;
|
||||
pReq->igExpired = pStmt->pOptions->ignoreExpired;
|
||||
pReq->igUpdate = pStmt->pOptions->ignoreUpdate;
|
||||
columnDefNodeToField(pStmt->pTags, &pReq->pTags);
|
||||
pReq->numOfTags = LIST_LENGTH(pStmt->pTags);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -643,7 +643,8 @@ TEST_F(ParserInitialCTest, createStream) {
|
|||
auto setCreateStreamReq = [&](const char* pStream, const char* pSrcDb, const char* pSql, const char* pDstStb,
|
||||
int8_t igExists = 0, int8_t triggerType = STREAM_TRIGGER_AT_ONCE, int64_t maxDelay = 0,
|
||||
int64_t watermark = 0, int8_t igExpired = STREAM_DEFAULT_IGNORE_EXPIRED,
|
||||
int8_t fillHistory = STREAM_DEFAULT_FILL_HISTORY) {
|
||||
int8_t fillHistory = STREAM_DEFAULT_FILL_HISTORY,
|
||||
int8_t igUpdate = STREAM_DEFAULT_IGNORE_UPDATE) {
|
||||
snprintf(expect.name, sizeof(expect.name), "0.%s", pStream);
|
||||
snprintf(expect.sourceDB, sizeof(expect.sourceDB), "0.%s", pSrcDb);
|
||||
snprintf(expect.targetStbFullName, sizeof(expect.targetStbFullName), "0.test.%s", pDstStb);
|
||||
|
@ -654,6 +655,7 @@ TEST_F(ParserInitialCTest, createStream) {
|
|||
expect.watermark = watermark;
|
||||
expect.fillHistory = fillHistory;
|
||||
expect.igExpired = igExpired;
|
||||
expect.igUpdate = igUpdate;
|
||||
};
|
||||
|
||||
auto addTag = [&](const char* pFieldName, uint8_t type, int32_t bytes = 0) {
|
||||
|
@ -699,6 +701,7 @@ TEST_F(ParserInitialCTest, createStream) {
|
|||
ASSERT_EQ(pField->flags, pExpectField->flags);
|
||||
}
|
||||
}
|
||||
ASSERT_EQ(req.igUpdate, expect.igUpdate);
|
||||
tFreeSCMCreateStreamReq(&req);
|
||||
});
|
||||
|
||||
|
@ -708,12 +711,11 @@ TEST_F(ParserInitialCTest, createStream) {
|
|||
|
||||
setCreateStreamReq(
|
||||
"s1", "test",
|
||||
"create stream if not exists s1 trigger max_delay 20s watermark 10s ignore expired 0 fill_history 1 into st1 "
|
||||
"as select count(*) from t1 interval(10s)",
|
||||
"st1", 1, STREAM_TRIGGER_MAX_DELAY, 20 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_SECOND, 0, 1);
|
||||
run("CREATE STREAM IF NOT EXISTS s1 TRIGGER MAX_DELAY 20s WATERMARK 10s IGNORE EXPIRED 0 FILL_HISTORY 1 INTO st1 AS "
|
||||
"SELECT COUNT(*) "
|
||||
"FROM t1 INTERVAL(10S)");
|
||||
"create stream if not exists s1 trigger max_delay 20s watermark 10s ignore expired 0 fill_history 1 ignore "
|
||||
"update 1 into st1 as select count(*) from t1 interval(10s)",
|
||||
"st1", 1, STREAM_TRIGGER_MAX_DELAY, 20 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_SECOND, 0, 1, 1);
|
||||
run("CREATE STREAM IF NOT EXISTS s1 TRIGGER MAX_DELAY 20s WATERMARK 10s IGNORE EXPIRED 0 FILL_HISTORY 1 IGNORE "
|
||||
"UPDATE 1 INTO st1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)");
|
||||
clearCreateStreamReq();
|
||||
|
||||
setCreateStreamReq("s1", "test",
|
||||
|
|
|
@ -343,6 +343,13 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
|
||||
pScan->node.groupAction = GROUP_ACTION_NONE;
|
||||
pScan->node.resultDataOrder = DATA_ORDER_LEVEL_IN_BLOCK;
|
||||
if (pCxt->pPlanCxt->streamQuery) {
|
||||
pScan->triggerType = pCxt->pPlanCxt->triggerType;
|
||||
pScan->watermark = pCxt->pPlanCxt->watermark;
|
||||
pScan->deleteMark = pCxt->pPlanCxt->deleteMark;
|
||||
pScan->igExpired = pCxt->pPlanCxt->igExpired;
|
||||
pScan->igCheckUpdate = pCxt->pPlanCxt->igCheckUpdate;
|
||||
}
|
||||
|
||||
// set columns to scan
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
@ -705,6 +712,7 @@ static int32_t createWindowLogicNodeFinalize(SLogicPlanContext* pCxt, SSelectStm
|
|||
pWindow->watermark = pCxt->pPlanCxt->watermark;
|
||||
pWindow->deleteMark = pCxt->pPlanCxt->deleteMark;
|
||||
pWindow->igExpired = pCxt->pPlanCxt->igExpired;
|
||||
pWindow->igCheckUpdate = pCxt->pPlanCxt->igCheckUpdate;
|
||||
}
|
||||
pWindow->inputTsOrder = ORDER_ASC;
|
||||
pWindow->outputTsOrder = ORDER_ASC;
|
||||
|
|
|
@ -328,10 +328,6 @@ static void scanPathOptSetScanWin(SScanLogicNode* pScan) {
|
|||
pScan->sliding = ((SWindowLogicNode*)pParent)->sliding;
|
||||
pScan->intervalUnit = ((SWindowLogicNode*)pParent)->intervalUnit;
|
||||
pScan->slidingUnit = ((SWindowLogicNode*)pParent)->slidingUnit;
|
||||
pScan->triggerType = ((SWindowLogicNode*)pParent)->triggerType;
|
||||
pScan->watermark = ((SWindowLogicNode*)pParent)->watermark;
|
||||
pScan->deleteMark = ((SWindowLogicNode*)pParent)->deleteMark;
|
||||
pScan->igExpired = ((SWindowLogicNode*)pParent)->igExpired;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -582,6 +582,7 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp
|
|||
pTableScan->triggerType = pScanLogicNode->triggerType;
|
||||
pTableScan->watermark = pScanLogicNode->watermark;
|
||||
pTableScan->igExpired = pScanLogicNode->igExpired;
|
||||
pTableScan->igCheckUpdate = pScanLogicNode->igCheckUpdate;
|
||||
pTableScan->assignBlockUid = pCxt->pPlanCxt->rSmaQuery ? true : false;
|
||||
|
||||
int32_t code = createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode);
|
||||
|
|
|
@ -517,7 +517,7 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
|
|||
}
|
||||
|
||||
if (QW_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
|
||||
if (QW_PHASE_POST_FETCH != phase || qwTaskNotInExec(ctx)) {
|
||||
if (QW_PHASE_POST_FETCH != phase || ((!QW_QUERY_RUNNING(ctx)) && qwTaskNotInExec(ctx))) {
|
||||
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
|
||||
QW_ERR_JRET(ctx->rspCode);
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
|
|||
goto _err;
|
||||
}
|
||||
|
||||
pMeta->pTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
pMeta->pTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
|
||||
if (pMeta->pTasks == NULL) {
|
||||
goto _err;
|
||||
}
|
||||
|
|
|
@ -128,7 +128,7 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int
|
|||
memset(statePath, 0, 1024);
|
||||
tstrncpy(statePath, path, 1024);
|
||||
}
|
||||
if (tdbOpen(statePath, szPage, pages, &pState->pTdbState->db, 0) < 0) {
|
||||
if (tdbOpen(statePath, szPage, pages, &pState->pTdbState->db, 1) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ int64_t syncNodeCheckCommitIndex(SSyncNode* ths, SyncIndex indexLikely) {
|
|||
if (indexLikely > ths->commitIndex && syncNodeAgreedUpon(ths, indexLikely)) {
|
||||
SyncIndex commitIndex = indexLikely;
|
||||
syncNodeUpdateCommitIndex(ths, commitIndex);
|
||||
sTrace("vgId:%d, agreed upon. role:%d, term:%" PRId64 ", index: %" PRId64 "", ths->vgId, ths->state,
|
||||
sTrace("vgId:%d, agreed upon. role:%d, term:%" PRId64 ", index:%" PRId64 "", ths->vgId, ths->state,
|
||||
ths->raftStore.currentTerm, commitIndex);
|
||||
}
|
||||
return ths->commitIndex;
|
||||
|
|
|
@ -85,7 +85,7 @@ int64_t syncOpen(SSyncInfo* pSyncInfo) {
|
|||
int32_t syncStart(int64_t rid) {
|
||||
SSyncNode* pSyncNode = syncNodeAcquire(rid);
|
||||
if (pSyncNode == NULL) {
|
||||
sError("failed to acquire rid: %" PRId64 " of tsNodeReftId for pSyncNode", rid);
|
||||
sError("failed to acquire rid:%" PRId64 " of tsNodeReftId for pSyncNode", rid);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -756,7 +756,7 @@ int32_t syncNodeLogStoreRestoreOnNeed(SSyncNode* pNode) {
|
|||
SyncIndex lastVer = pNode->pLogStore->syncLogLastIndex(pNode->pLogStore);
|
||||
if (lastVer < commitIndex || firstVer > commitIndex + 1) {
|
||||
if (pNode->pLogStore->syncLogRestoreFromSnapshot(pNode->pLogStore, commitIndex)) {
|
||||
sError("vgId:%d, failed to restore log store from snapshot since %s. lastVer: %" PRId64 ", snapshotVer: %" PRId64,
|
||||
sError("vgId:%d, failed to restore log store from snapshot since %s. lastVer:%" PRId64 ", snapshotVer:%" PRId64,
|
||||
pNode->vgId, terrstr(), lastVer, commitIndex);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1101,7 +1101,7 @@ int32_t syncNodeRestore(SSyncNode* pSyncNode) {
|
|||
SyncIndex endIndex = pSyncNode->pLogBuf->endIndex;
|
||||
if (lastVer != -1 && endIndex != lastVer + 1) {
|
||||
terrno = TSDB_CODE_WAL_LOG_INCOMPLETE;
|
||||
sError("vgId:%d, failed to restore sync node since %s. expected lastLogIndex: %" PRId64 ", lastVer: %" PRId64 "",
|
||||
sError("vgId:%d, failed to restore sync node since %s. expected lastLogIndex:%" PRId64 ", lastVer:%" PRId64 "",
|
||||
pSyncNode->vgId, terrstr(), endIndex - 1, lastVer);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1820,7 +1820,7 @@ void syncNodeCandidate2Leader(SSyncNode* pSyncNode) {
|
|||
|
||||
SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore);
|
||||
ASSERT(lastIndex >= 0);
|
||||
sInfo("vgId:%d, become leader. term: %" PRId64 ", commit index: %" PRId64 ", last index: %" PRId64 "",
|
||||
sInfo("vgId:%d, become leader. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64 "",
|
||||
pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex);
|
||||
}
|
||||
|
||||
|
@ -1839,7 +1839,7 @@ void syncNodeFollower2Candidate(SSyncNode* pSyncNode) {
|
|||
ASSERT(pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER);
|
||||
pSyncNode->state = TAOS_SYNC_STATE_CANDIDATE;
|
||||
SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore);
|
||||
sInfo("vgId:%d, become candidate from follower. term: %" PRId64 ", commit index: %" PRId64 ", last index: %" PRId64,
|
||||
sInfo("vgId:%d, become candidate from follower. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64,
|
||||
pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex);
|
||||
|
||||
sNTrace(pSyncNode, "follower to candidate");
|
||||
|
@ -1849,7 +1849,7 @@ void syncNodeLeader2Follower(SSyncNode* pSyncNode) {
|
|||
ASSERT(pSyncNode->state == TAOS_SYNC_STATE_LEADER);
|
||||
syncNodeBecomeFollower(pSyncNode, "leader to follower");
|
||||
SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore);
|
||||
sInfo("vgId:%d, become follower from leader. term: %" PRId64 ", commit index: %" PRId64 ", last index: %" PRId64,
|
||||
sInfo("vgId:%d, become follower from leader. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64,
|
||||
pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex);
|
||||
|
||||
sNTrace(pSyncNode, "leader to follower");
|
||||
|
@ -1859,7 +1859,7 @@ void syncNodeCandidate2Follower(SSyncNode* pSyncNode) {
|
|||
ASSERT(pSyncNode->state == TAOS_SYNC_STATE_CANDIDATE);
|
||||
syncNodeBecomeFollower(pSyncNode, "candidate to follower");
|
||||
SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore);
|
||||
sInfo("vgId:%d, become follower from candidate. term: %" PRId64 ", commit index: %" PRId64 ", last index: %" PRId64,
|
||||
sInfo("vgId:%d, become follower from candidate. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64,
|
||||
pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex);
|
||||
|
||||
sNTrace(pSyncNode, "candidate to follower");
|
||||
|
@ -2299,7 +2299,7 @@ int32_t syncNodeAppend(SSyncNode* ths, SSyncRaftEntry* pEntry) {
|
|||
// proceed match index, with replicating on needed
|
||||
SyncIndex matchIndex = syncLogBufferProceed(ths->pLogBuf, ths, NULL);
|
||||
|
||||
sTrace("vgId:%d, append raft entry. index: %" PRId64 ", term: %" PRId64 " pBuf: [%" PRId64 " %" PRId64 " %" PRId64
|
||||
sTrace("vgId:%d, append raft entry. index:%" PRId64 ", term:%" PRId64 " pBuf: [%" PRId64 " %" PRId64 " %" PRId64
|
||||
", %" PRId64 ")",
|
||||
ths->vgId, pEntry->index, pEntry->term, ths->pLogBuf->startIndex, ths->pLogBuf->commitIndex,
|
||||
ths->pLogBuf->matchIndex, ths->pLogBuf->endIndex);
|
||||
|
@ -2472,7 +2472,7 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
|
|||
sError("vgId:%d, sync enqueue step-down msg error, code:%d", ths->vgId, code);
|
||||
rpcFreeCont(rpcMsgLocalCmd.pCont);
|
||||
} else {
|
||||
sTrace("vgId:%d, sync enqueue step-down msg, new-term: %" PRId64, ths->vgId, pSyncMsg->currentTerm);
|
||||
sTrace("vgId:%d, sync enqueue step-down msg, new-term:%" PRId64, ths->vgId, pSyncMsg->currentTerm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2538,7 +2538,7 @@ int32_t syncNodeOnLocalCmd(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
|
|||
(void)syncNodeUpdateCommitIndex(ths, pMsg->commitIndex);
|
||||
}
|
||||
if (syncLogBufferCommit(ths->pLogBuf, ths, ths->commitIndex) < 0) {
|
||||
sError("vgId:%d, failed to commit raft log since %s. commit index: %" PRId64 "", ths->vgId, terrstr(),
|
||||
sError("vgId:%d, failed to commit raft log since %s. commit index:%" PRId64 "", ths->vgId, terrstr(),
|
||||
ths->commitIndex);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -132,16 +132,16 @@ SSyncRaftEntry* syncEntryBuildDummy(SyncTerm term, SyncIndex index, int32_t vgId
|
|||
int32_t syncLogValidateAlignmentOfCommit(SSyncNode* pNode, SyncIndex commitIndex) {
|
||||
SyncIndex firstVer = pNode->pLogStore->syncLogBeginIndex(pNode->pLogStore);
|
||||
if (firstVer > commitIndex + 1) {
|
||||
sError("vgId:%d, firstVer of WAL log greater than tsdb commit version + 1. firstVer: %" PRId64
|
||||
", tsdb commit version: %" PRId64 "",
|
||||
sError("vgId:%d, firstVer of WAL log greater than tsdb commit version + 1. firstVer:%" PRId64
|
||||
", tsdb commit version:%" PRId64 "",
|
||||
pNode->vgId, firstVer, commitIndex);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SyncIndex lastVer = pNode->pLogStore->syncLogLastIndex(pNode->pLogStore);
|
||||
if (lastVer < commitIndex) {
|
||||
sError("vgId:%d, lastVer of WAL log less than tsdb commit version. lastVer: %" PRId64
|
||||
", tsdb commit version: %" PRId64 "",
|
||||
sError("vgId:%d, lastVer of WAL log less than tsdb commit version. lastVer:%" PRId64
|
||||
", tsdb commit version:%" PRId64 "",
|
||||
pNode->vgId, lastVer, commitIndex);
|
||||
return -1;
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
|
|||
bool inBuf = true;
|
||||
|
||||
if (index <= pBuf->commitIndex) {
|
||||
sTrace("vgId:%d, already committed. index: %" PRId64 ", term: %" PRId64 ". log buffer: [%" PRId64 " %" PRId64
|
||||
sTrace("vgId:%d, already committed. index:%" PRId64 ", term:%" PRId64 ". log buffer: [%" PRId64 " %" PRId64
|
||||
" %" PRId64 ", %" PRId64 ")",
|
||||
pNode->vgId, pEntry->index, pEntry->term, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex,
|
||||
pBuf->endIndex);
|
||||
|
@ -306,7 +306,7 @@ int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
|
|||
}
|
||||
|
||||
if (index - pBuf->startIndex >= pBuf->size) {
|
||||
sWarn("vgId:%d, out of buffer range. index: %" PRId64 ", term: %" PRId64 ". log buffer: [%" PRId64 " %" PRId64
|
||||
sWarn("vgId:%d, out of buffer range. index:%" PRId64 ", term:%" PRId64 ". log buffer: [%" PRId64 " %" PRId64
|
||||
" %" PRId64 ", %" PRId64 ")",
|
||||
pNode->vgId, pEntry->index, pEntry->term, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex,
|
||||
pBuf->endIndex);
|
||||
|
@ -314,8 +314,8 @@ int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
|
|||
}
|
||||
|
||||
if (index > pBuf->matchIndex && lastMatchTerm != prevTerm) {
|
||||
sWarn("vgId:%d, not ready to accept. index: %" PRId64 ", term: %" PRId64 ": prevterm: %" PRId64
|
||||
" != lastmatch: %" PRId64 ". log buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")",
|
||||
sWarn("vgId:%d, not ready to accept. index:%" PRId64 ", term:%" PRId64 ": prevterm:%" PRId64
|
||||
" != lastmatch:%" PRId64 ". log buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")",
|
||||
pNode->vgId, pEntry->index, pEntry->term, prevTerm, lastMatchTerm, pBuf->startIndex, pBuf->commitIndex,
|
||||
pBuf->matchIndex, pBuf->endIndex);
|
||||
goto _out;
|
||||
|
@ -328,7 +328,7 @@ int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
|
|||
if (pEntry->term != pExist->term) {
|
||||
(void)syncLogBufferRollback(pBuf, pNode, index);
|
||||
} else {
|
||||
sTrace("vgId:%d, duplicate log entry received. index: %" PRId64 ", term: %" PRId64 ". log buffer: [%" PRId64
|
||||
sTrace("vgId:%d, duplicate log entry received. index:%" PRId64 ", term:%" PRId64 ". log buffer: [%" PRId64
|
||||
" %" PRId64 " %" PRId64 ", %" PRId64 ")",
|
||||
pNode->vgId, pEntry->index, pEntry->term, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex,
|
||||
pBuf->endIndex);
|
||||
|
@ -434,7 +434,7 @@ int64_t syncLogBufferProceed(SSyncLogBuffer* pBuf, SSyncNode* pNode, SyncTerm* p
|
|||
// increase match index
|
||||
pBuf->matchIndex = index;
|
||||
|
||||
sTrace("vgId:%d, log buffer proceed. start index: %" PRId64 ", match index: %" PRId64 ", end index: %" PRId64,
|
||||
sTrace("vgId:%d, log buffer proceed. start index:%" PRId64 ", match index:%" PRId64 ", end index:%" PRId64,
|
||||
pNode->vgId, pBuf->startIndex, pBuf->matchIndex, pBuf->endIndex);
|
||||
|
||||
// replicate on demand
|
||||
|
@ -475,7 +475,7 @@ int32_t syncLogFsmExecute(SSyncNode* pNode, SSyncFSM* pFsm, ESyncState role, Syn
|
|||
}
|
||||
|
||||
if (pEntry->originalRpcType == TDMT_VND_COMMIT) {
|
||||
sInfo("vgId:%d, fsm execute vnode commit. index: %" PRId64 ", term: %" PRId64 "", pNode->vgId, pEntry->index,
|
||||
sInfo("vgId:%d, fsm execute vnode commit. index:%" PRId64 ", term:%" PRId64 "", pNode->vgId, pEntry->index,
|
||||
pEntry->term);
|
||||
}
|
||||
|
||||
|
@ -528,7 +528,7 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm
|
|||
goto _out;
|
||||
}
|
||||
|
||||
sTrace("vgId:%d, commit. log buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 "), role: %d, term: %" PRId64,
|
||||
sTrace("vgId:%d, commit. log buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 "), role:%d, term:%" PRId64,
|
||||
pNode->vgId, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex, role, term);
|
||||
|
||||
// execute in fsm
|
||||
|
@ -541,19 +541,19 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm
|
|||
|
||||
// execute it
|
||||
if (!syncUtilUserCommit(pEntry->originalRpcType)) {
|
||||
sInfo("vgId:%d, commit sync barrier. index: %" PRId64 ", term:%" PRId64 ", type: %s", vgId, pEntry->index,
|
||||
sInfo("vgId:%d, commit sync barrier. index:%" PRId64 ", term:%" PRId64 ", type:%s", vgId, pEntry->index,
|
||||
pEntry->term, TMSG_INFO(pEntry->originalRpcType));
|
||||
}
|
||||
|
||||
if (syncLogFsmExecute(pNode, pFsm, role, term, pEntry, 0) != 0) {
|
||||
sError("vgId:%d, failed to execute sync log entry. index:%" PRId64 ", term:%" PRId64
|
||||
", role: %d, current term: %" PRId64,
|
||||
", role:%d, current term:%" PRId64,
|
||||
vgId, pEntry->index, pEntry->term, role, term);
|
||||
goto _out;
|
||||
}
|
||||
pBuf->commitIndex = index;
|
||||
|
||||
sTrace("vgId:%d, committed index: %" PRId64 ", term: %" PRId64 ", role: %d, current term: %" PRId64 "", pNode->vgId,
|
||||
sTrace("vgId:%d, committed index:%" PRId64 ", term:%" PRId64 ", role:%d, current term:%" PRId64 "", pNode->vgId,
|
||||
pEntry->index, pEntry->term, role, term);
|
||||
|
||||
if (!inBuf) {
|
||||
|
@ -614,7 +614,7 @@ int32_t syncLogReplMgrRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
|
|||
SRaftId* pDestId = &pNode->replicasId[pMgr->peerId];
|
||||
if (pMgr->retryBackoff == SYNC_MAX_RETRY_BACKOFF) {
|
||||
syncLogReplMgrReset(pMgr);
|
||||
sWarn("vgId:%d, reset sync log repl mgr since retry backoff exceeding limit. peer: %" PRIx64, pNode->vgId,
|
||||
sWarn("vgId:%d, reset sync log repl mgr since retry backoff exceeding limit. peer:%" PRIx64, pNode->vgId,
|
||||
pDestId->addr);
|
||||
return -1;
|
||||
}
|
||||
|
@ -639,7 +639,7 @@ int32_t syncLogReplMgrRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
|
|||
if (pMgr->states[pos].acked) {
|
||||
if (pMgr->matchIndex < index && pMgr->states[pos].timeMs + (syncGetRetryMaxWaitMs() << 3) < nowMs) {
|
||||
syncLogReplMgrReset(pMgr);
|
||||
sWarn("vgId:%d, reset sync log repl mgr since stagnation. index: %" PRId64 ", peer: %" PRIx64, pNode->vgId,
|
||||
sWarn("vgId:%d, reset sync log repl mgr since stagnation. index:%" PRId64 ", peer:%" PRIx64, pNode->vgId,
|
||||
index, pDestId->addr);
|
||||
goto _out;
|
||||
}
|
||||
|
@ -648,7 +648,7 @@ int32_t syncLogReplMgrRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
|
|||
|
||||
bool barrier = false;
|
||||
if (syncLogReplMgrReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) {
|
||||
sError("vgId:%d, failed to replicate sync log entry since %s. index: %" PRId64 ", dest: %" PRIx64 "", pNode->vgId,
|
||||
sError("vgId:%d, failed to replicate sync log entry since %s. index:%" PRId64 ", dest:%" PRIx64 "", pNode->vgId,
|
||||
terrstr(), index, pDestId->addr);
|
||||
goto _out;
|
||||
}
|
||||
|
@ -670,8 +670,8 @@ _out:
|
|||
if (retried) {
|
||||
pMgr->retryBackoff = syncLogGetNextRetryBackoff(pMgr);
|
||||
SSyncLogBuffer* pBuf = pNode->pLogBuf;
|
||||
sInfo("vgId:%d, resend %d sync log entries. dest: %" PRIx64 ", indexes: %" PRId64 " ..., terms: ... %" PRId64
|
||||
", retryWaitMs: %" PRId64 ", mgr: [%" PRId64 " %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64
|
||||
sInfo("vgId:%d, resend %d sync log entries. dest:%" PRIx64 ", indexes:%" PRId64 " ..., terms: ... %" PRId64
|
||||
", retryWaitMs:%" PRId64 ", mgr: [%" PRId64 " %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64
|
||||
" %" PRId64 ", %" PRId64 ")",
|
||||
pNode->vgId, count, pDestId->addr, firstIndex, term, retryWaitMs, pMgr->startIndex, pMgr->matchIndex,
|
||||
pMgr->endIndex, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex);
|
||||
|
@ -714,7 +714,7 @@ int32_t syncLogReplMgrProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* p
|
|||
}
|
||||
|
||||
if (pMsg->success == false && pMsg->matchIndex >= pMsg->lastSendIndex) {
|
||||
sWarn("vgId:%d, failed to rollback match index. peer: dnode:%d, match index: %" PRId64 ", last sent: %" PRId64,
|
||||
sWarn("vgId:%d, failed to rollback match index. peer: dnode:%d, match index:%" PRId64 ", last sent:%" PRId64,
|
||||
pNode->vgId, DID(&destId), pMsg->matchIndex, pMsg->lastSendIndex);
|
||||
if (syncNodeStartSnapshot(pNode, &destId) < 0) {
|
||||
sError("vgId:%d, failed to start snapshot for peer dnode:%d", pNode->vgId, DID(&destId));
|
||||
|
@ -761,7 +761,7 @@ int32_t syncLogReplMgrProcessHeartbeatReply(SSyncLogReplMgr* pMgr, SSyncNode* pN
|
|||
SSyncLogBuffer* pBuf = pNode->pLogBuf;
|
||||
taosThreadMutexLock(&pBuf->mutex);
|
||||
if (pMsg->startTime != 0 && pMsg->startTime != pMgr->peerStartTime) {
|
||||
sInfo("vgId:%d, reset sync log repl mgr in heartbeat. peer: %" PRIx64 ", start time:%" PRId64 ", old:%" PRId64 "",
|
||||
sInfo("vgId:%d, reset sync log repl mgr in heartbeat. peer:%" PRIx64 ", start time:%" PRId64 ", old:%" PRId64 "",
|
||||
pNode->vgId, pMsg->srcId.addr, pMsg->startTime, pMgr->peerStartTime);
|
||||
syncLogReplMgrReset(pMgr);
|
||||
pMgr->peerStartTime = pMsg->startTime;
|
||||
|
@ -774,7 +774,7 @@ int32_t syncLogReplMgrProcessReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sync
|
|||
SSyncLogBuffer* pBuf = pNode->pLogBuf;
|
||||
taosThreadMutexLock(&pBuf->mutex);
|
||||
if (pMsg->startTime != pMgr->peerStartTime) {
|
||||
sInfo("vgId:%d, reset sync log repl mgr in appendlog reply. peer: %" PRIx64 ", start time:%" PRId64
|
||||
sInfo("vgId:%d, reset sync log repl mgr in appendlog reply. peer:%" PRIx64 ", start time:%" PRId64
|
||||
", old:%" PRId64,
|
||||
pNode->vgId, pMsg->srcId.addr, pMsg->startTime, pMgr->peerStartTime);
|
||||
syncLogReplMgrReset(pMgr);
|
||||
|
@ -815,7 +815,7 @@ int32_t syncLogReplMgrReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sy
|
|||
bool barrier = false;
|
||||
SyncTerm term = -1;
|
||||
if (syncLogReplMgrReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) {
|
||||
sError("vgId:%d, failed to replicate log entry since %s. index: %" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId,
|
||||
sError("vgId:%d, failed to replicate log entry since %s. index:%" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId,
|
||||
terrstr(), index, pDestId->addr);
|
||||
return -1;
|
||||
}
|
||||
|
@ -830,7 +830,7 @@ int32_t syncLogReplMgrReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sy
|
|||
pMgr->endIndex = index + 1;
|
||||
|
||||
SSyncLogBuffer* pBuf = pNode->pLogBuf;
|
||||
sTrace("vgId:%d, probe peer:%" PRIx64 " with msg of index:%" PRId64 " term: %" PRId64 ". mgr (rs:%d): [%" PRId64
|
||||
sTrace("vgId:%d, probe peer:%" PRIx64 " with msg of index:%" PRId64 " term:%" PRId64 ". mgr (rs:%d): [%" PRId64
|
||||
" %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")",
|
||||
pNode->vgId, pDestId->addr, index, term, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex,
|
||||
pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex);
|
||||
|
@ -860,7 +860,7 @@ int32_t syncLogReplMgrReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode)
|
|||
bool barrier = false;
|
||||
SyncTerm term = -1;
|
||||
if (syncLogReplMgrReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) {
|
||||
sError("vgId:%d, failed to replicate log entry since %s. index: %" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId,
|
||||
sError("vgId:%d, failed to replicate log entry since %s. index:%" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId,
|
||||
terrstr(), index, pDestId->addr);
|
||||
return -1;
|
||||
}
|
||||
|
@ -874,7 +874,7 @@ int32_t syncLogReplMgrReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode)
|
|||
|
||||
pMgr->endIndex = index + 1;
|
||||
if (barrier) {
|
||||
sInfo("vgId:%d, replicated sync barrier to dest: %" PRIx64 ". index: %" PRId64 ", term: %" PRId64
|
||||
sInfo("vgId:%d, replicated sync barrier to dest:%" PRIx64 ". index:%" PRId64 ", term:%" PRId64
|
||||
", repl mgr: rs(%d) [%" PRId64 " %" PRId64 ", %" PRId64 ")",
|
||||
pNode->vgId, pDestId->addr, index, term, pMgr->restored, pMgr->startIndex, pMgr->matchIndex,
|
||||
pMgr->endIndex);
|
||||
|
@ -885,7 +885,7 @@ int32_t syncLogReplMgrReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode)
|
|||
syncLogReplMgrRetryOnNeed(pMgr, pNode);
|
||||
|
||||
SSyncLogBuffer* pBuf = pNode->pLogBuf;
|
||||
sTrace("vgId:%d, replicated %d msgs to peer: %" PRIx64 ". indexes: %" PRId64 "..., terms: ...%" PRId64
|
||||
sTrace("vgId:%d, replicated %d msgs to peer:%" PRIx64 ". indexes:%" PRId64 "..., terms: ...%" PRId64
|
||||
", mgr: (rs:%d) [%" PRId64 " %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64
|
||||
")",
|
||||
pNode->vgId, count, pDestId->addr, firstIndex, term, pMgr->restored, pMgr->startIndex, pMgr->matchIndex,
|
||||
|
@ -1028,7 +1028,7 @@ int32_t syncLogBufferRollback(SSyncLogBuffer* pBuf, SSyncNode* pNode, SyncIndex
|
|||
return 0;
|
||||
}
|
||||
|
||||
sInfo("vgId:%d, rollback sync log buffer. toindex: %" PRId64 ", buffer: [%" PRId64 " %" PRId64 " %" PRId64
|
||||
sInfo("vgId:%d, rollback sync log buffer. toindex:%" PRId64 ", buffer: [%" PRId64 " %" PRId64 " %" PRId64
|
||||
", %" PRId64 ")",
|
||||
pNode->vgId, toIndex, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex);
|
||||
|
||||
|
@ -1119,11 +1119,11 @@ int32_t syncLogReplMgrReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sy
|
|||
|
||||
pEntry = syncLogBufferGetOneEntry(pBuf, pNode, index, &inBuf);
|
||||
if (pEntry == NULL) {
|
||||
sError("vgId:%d, failed to get raft entry for index: %" PRId64 "", pNode->vgId, index);
|
||||
sError("vgId:%d, failed to get raft entry for index:%" PRId64 "", pNode->vgId, index);
|
||||
if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) {
|
||||
SSyncLogReplMgr* pMgr = syncNodeGetLogReplMgr(pNode, pDestId);
|
||||
if (pMgr) {
|
||||
sInfo("vgId:%d, reset sync log repl mgr of peer: %" PRIx64 " since %s. index: %" PRId64, pNode->vgId,
|
||||
sInfo("vgId:%d, reset sync log repl mgr of peer:%" PRIx64 " since %s. index:%" PRId64, pNode->vgId,
|
||||
pDestId->addr, terrstr(), index);
|
||||
(void)syncLogReplMgrReset(pMgr);
|
||||
}
|
||||
|
@ -1134,7 +1134,7 @@ int32_t syncLogReplMgrReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sy
|
|||
|
||||
prevLogTerm = syncLogReplMgrGetPrevLogTerm(pMgr, pNode, index);
|
||||
if (prevLogTerm < 0) {
|
||||
sError("vgId:%d, failed to get prev log term since %s. index: %" PRId64 "", pNode->vgId, terrstr(), index);
|
||||
sError("vgId:%d, failed to get prev log term since %s. index:%" PRId64 "", pNode->vgId, terrstr(), index);
|
||||
goto _err;
|
||||
}
|
||||
if (pTerm) *pTerm = pEntry->term;
|
||||
|
@ -1147,7 +1147,7 @@ int32_t syncLogReplMgrReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sy
|
|||
|
||||
(void)syncNodeSendAppendEntries(pNode, pDestId, &msgOut);
|
||||
|
||||
sTrace("vgId:%d, replicate one msg index: %" PRId64 " term: %" PRId64 " prevterm: %" PRId64 " to dest: 0x%016" PRIx64,
|
||||
sTrace("vgId:%d, replicate one msg index:%" PRId64 " term:%" PRId64 " prevterm:%" PRId64 " to dest: 0x%016" PRIx64,
|
||||
pNode->vgId, pEntry->index, pEntry->term, prevLogTerm, pDestId->addr);
|
||||
|
||||
if (!inBuf) {
|
||||
|
|
|
@ -91,7 +91,7 @@ SSyncRaftEntry* syncEntryBuildNoop(SyncTerm term, SyncIndex index, int32_t vgId)
|
|||
|
||||
void syncEntryDestroy(SSyncRaftEntry* pEntry) {
|
||||
if (pEntry != NULL) {
|
||||
sTrace("free entry: %p", pEntry);
|
||||
sTrace("free entry:%p", pEntry);
|
||||
taosMemoryFree(pEntry);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -249,6 +249,7 @@ int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, SSyncR
|
|||
|
||||
int64_t ts2 = taosGetTimestampNs();
|
||||
code = walReadVer(pWalHandle, index);
|
||||
walReadReset(pWalHandle);
|
||||
int64_t ts3 = taosGetTimestampNs();
|
||||
|
||||
// code = walReadVerCached(pWalHandle, index);
|
||||
|
|
|
@ -43,7 +43,7 @@ void syncUtilNodeInfo2EpSet(const SNodeInfo* pInfo, SEpSet* pEpSet) {
|
|||
bool syncUtilNodeInfo2RaftId(const SNodeInfo* pInfo, SyncGroupId vgId, SRaftId* raftId) {
|
||||
uint32_t ipv4 = taosGetIpv4FromFqdn(pInfo->nodeFqdn);
|
||||
if (ipv4 == 0xFFFFFFFF || ipv4 == 1) {
|
||||
sError("failed to resolve ipv4 addr, fqdn: %s", pInfo->nodeFqdn);
|
||||
sError("failed to resolve ipv4 addr, fqdn:%s", pInfo->nodeFqdn);
|
||||
terrno = TSDB_CODE_TSC_INVALID_FQDN;
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -404,62 +404,50 @@ static int tdbPageFree(SPage *pPage, int idx, SCell *pCell, int szCell) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int tdbPageDefragment(SPage *pPage) {
|
||||
int nFree;
|
||||
int nCells;
|
||||
SCell *pCell;
|
||||
SCell *pNextCell;
|
||||
SCell *pTCell;
|
||||
int szCell;
|
||||
int idx;
|
||||
int iCell;
|
||||
|
||||
nFree = TDB_PAGE_NFREE(pPage);
|
||||
nCells = TDB_PAGE_NCELLS(pPage);
|
||||
|
||||
ASSERT(pPage->pFreeEnd - pPage->pFreeStart < nFree);
|
||||
|
||||
// Loop to compact the page content
|
||||
// Here we use an O(n^2) algorithm to do the job since
|
||||
// this is a low frequency job.
|
||||
pNextCell = (u8 *)pPage->pPageFtr;
|
||||
pCell = NULL;
|
||||
for (iCell = 0;; iCell++) {
|
||||
// compact over
|
||||
if (iCell == nCells) {
|
||||
pPage->pFreeEnd = pNextCell;
|
||||
break;
|
||||
}
|
||||
|
||||
for (int i = 0; i < nCells; i++) {
|
||||
if (TDB_PAGE_CELL_OFFSET_AT(pPage, i) < pNextCell - pPage->pData) {
|
||||
pTCell = TDB_PAGE_CELL_AT(pPage, i);
|
||||
if (pCell == NULL || pCell < pTCell) {
|
||||
pCell = pTCell;
|
||||
idx = i;
|
||||
}
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(pCell != NULL);
|
||||
|
||||
szCell = (*pPage->xCellSize)(pPage, pCell, 0, NULL, NULL);
|
||||
|
||||
ASSERT(pCell + szCell <= pNextCell);
|
||||
if (pCell + szCell < pNextCell) {
|
||||
memmove(pNextCell - szCell, pCell, szCell);
|
||||
}
|
||||
|
||||
pCell = NULL;
|
||||
pNextCell = pNextCell - szCell;
|
||||
TDB_PAGE_CELL_OFFSET_AT_SET(pPage, idx, pNextCell - pPage->pData);
|
||||
typedef struct {
|
||||
int32_t iCell;
|
||||
int32_t offset;
|
||||
} SCellIdx;
|
||||
static int32_t tCellIdxCmprFn(const void *p1, const void *p2) {
|
||||
if (((SCellIdx *)p1)->offset < ((SCellIdx *)p2)->offset) {
|
||||
return -1;
|
||||
} else if (((SCellIdx *)p1)->offset > ((SCellIdx *)p2)->offset) {
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
static int tdbPageDefragment(SPage *pPage) {
|
||||
int32_t nFree = TDB_PAGE_NFREE(pPage);
|
||||
int32_t nCell = TDB_PAGE_NCELLS(pPage);
|
||||
|
||||
ASSERT(pPage->pFreeEnd - pPage->pFreeStart == nFree);
|
||||
SCellIdx *aCellIdx = (SCellIdx *)tdbOsMalloc(sizeof(SCellIdx) * nCell);
|
||||
if (aCellIdx == NULL) return -1;
|
||||
for (int32_t iCell = 0; iCell < nCell; iCell++) {
|
||||
aCellIdx[iCell].iCell = iCell;
|
||||
aCellIdx[iCell].offset = TDB_PAGE_CELL_OFFSET_AT(pPage, iCell);
|
||||
}
|
||||
taosSort(aCellIdx, nCell, sizeof(SCellIdx), tCellIdxCmprFn);
|
||||
|
||||
SCell *pNextCell = (u8 *)pPage->pPageFtr;
|
||||
for (int32_t iCell = nCell - 1; iCell >= 0; iCell--) {
|
||||
SCell *pCell = TDB_PAGE_CELL_AT(pPage, aCellIdx[iCell].iCell);
|
||||
int32_t szCell = pPage->xCellSize(pPage, pCell, 0, NULL, NULL);
|
||||
|
||||
ASSERT(pNextCell - szCell >= pCell);
|
||||
|
||||
pNextCell -= szCell;
|
||||
if (pNextCell > pCell) {
|
||||
memmove(pNextCell, pCell, szCell);
|
||||
TDB_PAGE_CELL_OFFSET_AT_SET(pPage, aCellIdx[iCell].iCell, pNextCell - pPage->pData);
|
||||
}
|
||||
}
|
||||
pPage->pFreeEnd = pNextCell;
|
||||
TDB_PAGE_CCELLS_SET(pPage, pPage->pFreeEnd - pPage->pData);
|
||||
TDB_PAGE_FCELL_SET(pPage, 0);
|
||||
tdbOsFree(aCellIdx);
|
||||
|
||||
ASSERT(pPage->pFreeEnd - pPage->pFreeStart == nFree);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -6,7 +6,11 @@ target_link_libraries(tdbTest tdb gtest gtest_main)
|
|||
add_executable(tdbUtilTest "tdbUtilTest.cpp")
|
||||
target_link_libraries(tdbUtilTest tdb gtest gtest_main)
|
||||
|
||||
# tdbUtilTest
|
||||
# overflow pages testing
|
||||
add_executable(tdbExOVFLTest "tdbExOVFLTest.cpp")
|
||||
target_link_libraries(tdbExOVFLTest tdb gtest gtest_main)
|
||||
|
||||
# page defragment testing
|
||||
add_executable(tdbPageDefragmentTest "tdbPageDefragmentTest.cpp")
|
||||
target_link_libraries(tdbPageDefragmentTest tdb gtest gtest_main)
|
||||
|
||||
|
|
|
@ -0,0 +1,722 @@
|
|||
#include <gtest/gtest.h>
|
||||
|
||||
#define ALLOW_FORBID_FUNC
|
||||
#include "os.h"
|
||||
#include "tdb.h"
|
||||
|
||||
#include <shared_mutex>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
#include "tlog.h"
|
||||
|
||||
typedef struct SPoolMem {
|
||||
int64_t size;
|
||||
struct SPoolMem *prev;
|
||||
struct SPoolMem *next;
|
||||
} SPoolMem;
|
||||
|
||||
static SPoolMem *openPool() {
|
||||
SPoolMem *pPool = (SPoolMem *)taosMemoryMalloc(sizeof(*pPool));
|
||||
|
||||
pPool->prev = pPool->next = pPool;
|
||||
pPool->size = 0;
|
||||
|
||||
return pPool;
|
||||
}
|
||||
|
||||
static void clearPool(SPoolMem *pPool) {
|
||||
SPoolMem *pMem;
|
||||
|
||||
do {
|
||||
pMem = pPool->next;
|
||||
|
||||
if (pMem == pPool) break;
|
||||
|
||||
pMem->next->prev = pMem->prev;
|
||||
pMem->prev->next = pMem->next;
|
||||
pPool->size -= pMem->size;
|
||||
|
||||
taosMemoryFree(pMem);
|
||||
} while (1);
|
||||
|
||||
assert(pPool->size == 0);
|
||||
}
|
||||
|
||||
static void closePool(SPoolMem *pPool) {
|
||||
clearPool(pPool);
|
||||
taosMemoryFree(pPool);
|
||||
}
|
||||
|
||||
static void *poolMalloc(void *arg, size_t size) {
|
||||
void *ptr = NULL;
|
||||
SPoolMem *pPool = (SPoolMem *)arg;
|
||||
SPoolMem *pMem;
|
||||
|
||||
pMem = (SPoolMem *)taosMemoryMalloc(sizeof(*pMem) + size);
|
||||
if (pMem == NULL) {
|
||||
assert(0);
|
||||
}
|
||||
|
||||
pMem->size = sizeof(*pMem) + size;
|
||||
pMem->next = pPool->next;
|
||||
pMem->prev = pPool;
|
||||
|
||||
pPool->next->prev = pMem;
|
||||
pPool->next = pMem;
|
||||
pPool->size += pMem->size;
|
||||
|
||||
ptr = (void *)(&pMem[1]);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void poolFree(void *arg, void *ptr) {
|
||||
SPoolMem *pPool = (SPoolMem *)arg;
|
||||
SPoolMem *pMem;
|
||||
|
||||
pMem = &(((SPoolMem *)ptr)[-1]);
|
||||
|
||||
pMem->next->prev = pMem->prev;
|
||||
pMem->prev->next = pMem->next;
|
||||
pPool->size -= pMem->size;
|
||||
|
||||
taosMemoryFree(pMem);
|
||||
}
|
||||
|
||||
static int tKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) {
|
||||
int k1, k2;
|
||||
|
||||
std::string s1((char *)pKey1 + 3, kLen1 - 3);
|
||||
std::string s2((char *)pKey2 + 3, kLen2 - 3);
|
||||
k1 = stoi(s1);
|
||||
k2 = stoi(s2);
|
||||
|
||||
if (k1 < k2) {
|
||||
return -1;
|
||||
} else if (k1 > k2) {
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int tDefaultKeyCmpr(const void *pKey1, int keyLen1, const void *pKey2, int keyLen2) {
|
||||
int mlen;
|
||||
int cret;
|
||||
|
||||
ASSERT(keyLen1 > 0 && keyLen2 > 0 && pKey1 != NULL && pKey2 != NULL);
|
||||
|
||||
mlen = keyLen1 < keyLen2 ? keyLen1 : keyLen2;
|
||||
cret = memcmp(pKey1, pKey2, mlen);
|
||||
if (cret == 0) {
|
||||
if (keyLen1 < keyLen2) {
|
||||
cret = -1;
|
||||
} else if (keyLen1 > keyLen2) {
|
||||
cret = 1;
|
||||
} else {
|
||||
cret = 0;
|
||||
}
|
||||
}
|
||||
return cret;
|
||||
}
|
||||
|
||||
// TEST(TdbPageDefragmentTest, DISABLED_TbUpsertTest) {
|
||||
// TEST(TdbPageDefragmentTest, TbUpsertTest) {
|
||||
//}
|
||||
|
||||
// TEST(TdbPageDefragmentTest, DISABLED_TbPGetTest) {
|
||||
// TEST(TdbPageDefragmentTest, TbPGetTest) {
|
||||
//}
|
||||
|
||||
static void generateBigVal(char *val, int valLen) {
|
||||
for (int i = 0; i < valLen; ++i) {
|
||||
char c = char(i & 0xff);
|
||||
if (c == 0) {
|
||||
c = 1;
|
||||
}
|
||||
val[i] = c;
|
||||
}
|
||||
}
|
||||
|
||||
static TDB *openEnv(char const *envName, int const pageSize, int const pageNum) {
|
||||
TDB *pEnv = NULL;
|
||||
|
||||
int ret = tdbOpen(envName, pageSize, pageNum, &pEnv, 0);
|
||||
if (ret) {
|
||||
pEnv = NULL;
|
||||
}
|
||||
|
||||
return pEnv;
|
||||
}
|
||||
|
||||
static void insertOfp(void) {
|
||||
int ret = 0;
|
||||
|
||||
taosRemoveDir("tdb");
|
||||
|
||||
// open Env
|
||||
int const pageSize = 4096;
|
||||
int const pageNum = 64;
|
||||
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
|
||||
GTEST_ASSERT_NE(pEnv, nullptr);
|
||||
|
||||
// open db
|
||||
TTB *pDb = NULL;
|
||||
tdb_cmpr_fn_t compFunc = tKeyCmpr;
|
||||
// ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||
ret = tdbTbOpen("ofp_insert.db", 12, -1, compFunc, pEnv, &pDb, 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// open the pool
|
||||
SPoolMem *pPool = openPool();
|
||||
|
||||
// start a transaction
|
||||
TXN *txn = NULL;
|
||||
|
||||
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
|
||||
// generate value payload
|
||||
// char val[((4083 - 4 - 3 - 2) + 1) * 100]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||
char val[32605];
|
||||
int valLen = sizeof(val) / sizeof(val[0]);
|
||||
generateBigVal(val, valLen);
|
||||
|
||||
// insert the generated big data
|
||||
// char const *key = "key1";
|
||||
char const *key = "key123456789";
|
||||
ret = tdbTbInsert(pDb, key, strlen(key), val, valLen, txn);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// commit current transaction
|
||||
tdbCommit(pEnv, txn);
|
||||
tdbPostCommit(pEnv, txn);
|
||||
}
|
||||
|
||||
TEST(TdbPageDefragmentTest, DISABLED_TbInsertTest) {
|
||||
// TEST(TdbPageDefragmentTest, TbInsertTest) {
|
||||
// ofp inserting
|
||||
insertOfp();
|
||||
}
|
||||
|
||||
TEST(TdbPageDefragmentTest, DISABLED_TbGetTest) {
|
||||
// TEST(TdbPageDefragmentTest, TbGetTest) {
|
||||
insertOfp();
|
||||
|
||||
// open Env
|
||||
int const pageSize = 4096;
|
||||
int const pageNum = 64;
|
||||
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
|
||||
GTEST_ASSERT_NE(pEnv, nullptr);
|
||||
|
||||
// open db
|
||||
TTB *pDb = NULL;
|
||||
tdb_cmpr_fn_t compFunc = tKeyCmpr;
|
||||
// int ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||
int ret = tdbTbOpen("ofp_insert.db", 12, -1, compFunc, pEnv, &pDb, 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// generate value payload
|
||||
// char val[((4083 - 4 - 3 - 2) + 1) * 100]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||
char val[32605];
|
||||
int valLen = sizeof(val) / sizeof(val[0]);
|
||||
generateBigVal(val, valLen);
|
||||
|
||||
{ // Query the data
|
||||
void *pVal = NULL;
|
||||
int vLen;
|
||||
|
||||
// char const *key = "key1";
|
||||
char const *key = "key123456789";
|
||||
ret = tdbTbGet(pDb, key, strlen(key), &pVal, &vLen);
|
||||
ASSERT(ret == 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
GTEST_ASSERT_EQ(vLen, valLen);
|
||||
GTEST_ASSERT_EQ(memcmp(val, pVal, vLen), 0);
|
||||
|
||||
tdbFree(pVal);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(TdbPageDefragmentTest, DISABLED_TbDeleteTest) {
|
||||
// TEST(TdbPageDefragmentTest, TbDeleteTest) {
|
||||
int ret = 0;
|
||||
|
||||
taosRemoveDir("tdb");
|
||||
|
||||
// open Env
|
||||
int const pageSize = 4096;
|
||||
int const pageNum = 64;
|
||||
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
|
||||
GTEST_ASSERT_NE(pEnv, nullptr);
|
||||
|
||||
// open db
|
||||
TTB *pDb = NULL;
|
||||
tdb_cmpr_fn_t compFunc = tKeyCmpr;
|
||||
ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// open the pool
|
||||
SPoolMem *pPool = openPool();
|
||||
|
||||
// start a transaction
|
||||
TXN *txn;
|
||||
|
||||
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
|
||||
// generate value payload
|
||||
// char val[((4083 - 4 - 3 - 2) + 1) * 100]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||
char val[((4083 - 4 - 3 - 2) + 1) * 2]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||
int valLen = sizeof(val) / sizeof(val[0]);
|
||||
generateBigVal(val, valLen);
|
||||
|
||||
{ // insert the generated big data
|
||||
ret = tdbTbInsert(pDb, "key1", strlen("key1"), val, valLen, txn);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
}
|
||||
|
||||
{ // query the data
|
||||
void *pVal = NULL;
|
||||
int vLen;
|
||||
|
||||
ret = tdbTbGet(pDb, "key1", strlen("key1"), &pVal, &vLen);
|
||||
ASSERT(ret == 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
GTEST_ASSERT_EQ(vLen, valLen);
|
||||
GTEST_ASSERT_EQ(memcmp(val, pVal, vLen), 0);
|
||||
|
||||
tdbFree(pVal);
|
||||
}
|
||||
/* open to debug committed file
|
||||
tdbCommit(pEnv, &txn);
|
||||
tdbTxnClose(&txn);
|
||||
|
||||
++txnid;
|
||||
tdbTxnOpen(&txn, txnid, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
tdbBegin(pEnv, &txn);
|
||||
*/
|
||||
{ // upsert the data
|
||||
ret = tdbTbUpsert(pDb, "key1", strlen("key1"), "value1", strlen("value1"), txn);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
}
|
||||
|
||||
{ // query the upserted data
|
||||
void *pVal = NULL;
|
||||
int vLen;
|
||||
|
||||
ret = tdbTbGet(pDb, "key1", strlen("key1"), &pVal, &vLen);
|
||||
ASSERT(ret == 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
GTEST_ASSERT_EQ(vLen, strlen("value1"));
|
||||
GTEST_ASSERT_EQ(memcmp("value1", pVal, vLen), 0);
|
||||
|
||||
tdbFree(pVal);
|
||||
}
|
||||
|
||||
{ // delete the data
|
||||
ret = tdbTbDelete(pDb, "key1", strlen("key1"), txn);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
}
|
||||
|
||||
{ // query the deleted data
|
||||
void *pVal = NULL;
|
||||
int vLen = -1;
|
||||
|
||||
ret = tdbTbGet(pDb, "key1", strlen("key1"), &pVal, &vLen);
|
||||
ASSERT(ret == -1);
|
||||
GTEST_ASSERT_EQ(ret, -1);
|
||||
|
||||
GTEST_ASSERT_EQ(vLen, -1);
|
||||
GTEST_ASSERT_EQ(pVal, nullptr);
|
||||
|
||||
tdbFree(pVal);
|
||||
}
|
||||
|
||||
// commit current transaction
|
||||
tdbCommit(pEnv, txn);
|
||||
tdbPostCommit(pEnv, txn);
|
||||
}
|
||||
|
||||
TEST(TdbPageDefragmentTest, DISABLED_simple_insert1) {
|
||||
// TEST(TdbPageDefragmentTest, simple_insert1) {
|
||||
int ret;
|
||||
TDB *pEnv;
|
||||
TTB *pDb;
|
||||
tdb_cmpr_fn_t compFunc;
|
||||
int nData = 1;
|
||||
TXN *txn;
|
||||
int const pageSize = 4096;
|
||||
|
||||
taosRemoveDir("tdb");
|
||||
|
||||
// Open Env
|
||||
ret = tdbOpen("tdb", pageSize, 64, &pEnv, 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// Create a database
|
||||
compFunc = tKeyCmpr;
|
||||
ret = tdbTbOpen("db.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
{
|
||||
char key[64];
|
||||
// char val[(4083 - 4 - 3 - 2)]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||
char val[(4083 - 4 - 3 - 2) + 1]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||
int64_t poolLimit = 4096; // 1M pool limit
|
||||
SPoolMem *pPool;
|
||||
|
||||
// open the pool
|
||||
pPool = openPool();
|
||||
|
||||
// start a transaction
|
||||
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
|
||||
for (int iData = 1; iData <= nData; iData++) {
|
||||
sprintf(key, "key0");
|
||||
sprintf(val, "value%d", iData);
|
||||
|
||||
// ret = tdbTbInsert(pDb, key, strlen(key), val, strlen(val), &txn);
|
||||
// GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// generate value payload
|
||||
int valLen = sizeof(val) / sizeof(val[0]);
|
||||
for (int i = 6; i < valLen; ++i) {
|
||||
char c = char(i & 0xff);
|
||||
if (c == 0) {
|
||||
c = 1;
|
||||
}
|
||||
val[i] = c;
|
||||
}
|
||||
|
||||
ret = tdbTbInsert(pDb, "key1", strlen("key1"), val, valLen, txn);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// if pool is full, commit the transaction and start a new one
|
||||
if (pPool->size >= poolLimit) {
|
||||
// commit current transaction
|
||||
tdbCommit(pEnv, txn);
|
||||
tdbPostCommit(pEnv, txn);
|
||||
|
||||
// start a new transaction
|
||||
clearPool(pPool);
|
||||
|
||||
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
}
|
||||
}
|
||||
|
||||
// commit the transaction
|
||||
tdbCommit(pEnv, txn);
|
||||
tdbPostCommit(pEnv, txn);
|
||||
|
||||
{ // Query the data
|
||||
void *pVal = NULL;
|
||||
int vLen;
|
||||
|
||||
for (int i = 1; i <= nData; i++) {
|
||||
sprintf(key, "key%d", i);
|
||||
// sprintf(val, "value%d", i);
|
||||
|
||||
ret = tdbTbGet(pDb, key, strlen(key), &pVal, &vLen);
|
||||
ASSERT(ret == 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
GTEST_ASSERT_EQ(vLen, sizeof(val) / sizeof(val[0]));
|
||||
GTEST_ASSERT_EQ(memcmp(val, pVal, vLen), 0);
|
||||
}
|
||||
|
||||
tdbFree(pVal);
|
||||
}
|
||||
|
||||
{ // Iterate to query the DB data
|
||||
TBC *pDBC;
|
||||
void *pKey = NULL;
|
||||
void *pVal = NULL;
|
||||
int vLen, kLen;
|
||||
int count = 0;
|
||||
|
||||
ret = tdbTbcOpen(pDb, &pDBC, NULL);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
tdbTbcMoveToFirst(pDBC);
|
||||
|
||||
for (;;) {
|
||||
ret = tdbTbcNext(pDBC, &pKey, &kLen, &pVal, &vLen);
|
||||
if (ret < 0) break;
|
||||
|
||||
// std::cout.write((char *)pKey, kLen) /* << " " << kLen */ << " ";
|
||||
// std::cout.write((char *)pVal, vLen) /* << " " << vLen */;
|
||||
// std::cout << std::endl;
|
||||
|
||||
count++;
|
||||
}
|
||||
|
||||
GTEST_ASSERT_EQ(count, nData);
|
||||
|
||||
tdbTbcClose(pDBC);
|
||||
|
||||
tdbFree(pKey);
|
||||
tdbFree(pVal);
|
||||
}
|
||||
}
|
||||
|
||||
ret = tdbTbDrop(pDb);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// Close a database
|
||||
tdbTbClose(pDb);
|
||||
|
||||
// Close Env
|
||||
ret = tdbClose(pEnv);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
}
|
||||
|
||||
// TEST(TdbPageDefragmentTest, DISABLED_seq_insert) {
|
||||
TEST(TdbPageDefragmentTest, seq_insert) {
|
||||
int ret = 0;
|
||||
TDB *pEnv = NULL;
|
||||
TTB *pDb = NULL;
|
||||
tdb_cmpr_fn_t compFunc;
|
||||
int nData = 64 * 1024;
|
||||
TXN *txn = NULL;
|
||||
int const pageSize = 1 * 1024 * 1024;
|
||||
|
||||
taosRemoveDir("tdb");
|
||||
|
||||
// Open Env
|
||||
ret = tdbOpen("tdb", pageSize, 64, &pEnv, 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// Create a database
|
||||
compFunc = tKeyCmpr;
|
||||
ret = tdbTbOpen("db.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// 1, insert nData kv
|
||||
{
|
||||
char key[64];
|
||||
// char val[(4083 - 4 - 3 - 2)]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||
char val[(4083 - 4 - 3 - 2) + 1]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||
int64_t poolLimit = 4096; // 1M pool limit
|
||||
SPoolMem *pPool;
|
||||
|
||||
// open the pool
|
||||
pPool = openPool();
|
||||
|
||||
// start a transaction
|
||||
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
|
||||
for (int iData = 0; iData < nData; ++iData) {
|
||||
sprintf(key, "key%d", iData);
|
||||
sprintf(val, "value%d", iData);
|
||||
|
||||
ret = tdbTbInsert(pDb, key, strlen(key), val, strlen(val), txn);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
/*
|
||||
// generate value payload
|
||||
int valLen = sizeof(val) / sizeof(val[0]);
|
||||
for (int i = 6; i < valLen; ++i) {
|
||||
char c = char(i & 0xff);
|
||||
if (c == 0) {
|
||||
c = 1;
|
||||
}
|
||||
val[i] = c;
|
||||
}
|
||||
|
||||
ret = tdbTbInsert(pDb, key, strlen(key), val, valLen, txn);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
*/
|
||||
// if pool is full, commit the transaction and start a new one
|
||||
if (pPool->size >= poolLimit) {
|
||||
// commit current transaction
|
||||
tdbCommit(pEnv, txn);
|
||||
tdbPostCommit(pEnv, txn);
|
||||
|
||||
// start a new transaction
|
||||
clearPool(pPool);
|
||||
|
||||
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
}
|
||||
}
|
||||
|
||||
// commit the transaction
|
||||
tdbCommit(pEnv, txn);
|
||||
tdbPostCommit(pEnv, txn);
|
||||
|
||||
// 2, delete nData/2 records
|
||||
}
|
||||
|
||||
// Close a database
|
||||
tdbTbClose(pDb);
|
||||
|
||||
// Close Env
|
||||
ret = tdbClose(pEnv);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
}
|
||||
|
||||
// TEST(TdbPageDefragmentTest, DISABLED_seq_delete) {
|
||||
TEST(TdbPageDefragmentTest, seq_delete) {
|
||||
int ret = 0;
|
||||
TDB *pEnv = NULL;
|
||||
TTB *pDb = NULL;
|
||||
tdb_cmpr_fn_t compFunc;
|
||||
int nData = 64 * 1024;
|
||||
TXN *txn = NULL;
|
||||
int const pageSize = 1 * 1024 * 1024;
|
||||
|
||||
// Open Env
|
||||
ret = tdbOpen("tdb", pageSize, 64, &pEnv, 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// Create a database
|
||||
compFunc = tKeyCmpr;
|
||||
ret = tdbTbOpen("db.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// 2, delete nData/2 records
|
||||
{
|
||||
char key[64];
|
||||
// char val[(4083 - 4 - 3 - 2)]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||
char val[(4083 - 4 - 3 - 2) + 1]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||
int64_t poolLimit = 4096; // 1M pool limit
|
||||
SPoolMem *pPool;
|
||||
|
||||
// open the pool
|
||||
pPool = openPool();
|
||||
|
||||
// start a transaction
|
||||
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
|
||||
for (int iData = 1; iData <= nData; iData++) {
|
||||
if (iData % 2 == 0) continue;
|
||||
|
||||
sprintf(key, "key%d", iData);
|
||||
sprintf(val, "value%d", iData);
|
||||
|
||||
{ // delete the data
|
||||
ret = tdbTbDelete(pDb, key, strlen(key), txn);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
// generate value payload
|
||||
int valLen = sizeof(val) / sizeof(val[0]);
|
||||
for (int i = 6; i < valLen; ++i) {
|
||||
char c = char(i & 0xff);
|
||||
if (c == 0) {
|
||||
c = 1;
|
||||
}
|
||||
val[i] = c;
|
||||
}
|
||||
|
||||
ret = tdbTbInsert(pDb, key, strlen(key), val, valLen, txn);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
*/
|
||||
// if pool is full, commit the transaction and start a new one
|
||||
if (pPool->size >= poolLimit) {
|
||||
// commit current transaction
|
||||
tdbCommit(pEnv, txn);
|
||||
tdbPostCommit(pEnv, txn);
|
||||
|
||||
// start a new transaction
|
||||
clearPool(pPool);
|
||||
|
||||
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
}
|
||||
}
|
||||
|
||||
// commit the transaction
|
||||
tdbCommit(pEnv, txn);
|
||||
tdbPostCommit(pEnv, txn);
|
||||
}
|
||||
|
||||
// Close a database
|
||||
tdbTbClose(pDb);
|
||||
|
||||
// Close Env
|
||||
ret = tdbClose(pEnv);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
}
|
||||
|
||||
// TEST(TdbPageDefragmentTest, DISABLED_defragment_insert) {
|
||||
TEST(TdbPageDefragmentTest, defragment_insert) {
|
||||
int ret = 0;
|
||||
TDB *pEnv = NULL;
|
||||
TTB *pDb = NULL;
|
||||
tdb_cmpr_fn_t compFunc;
|
||||
int nData = 64 * 1024;
|
||||
TXN *txn = NULL;
|
||||
int const pageSize = 1 * 1024 * 1024;
|
||||
|
||||
// Open Env
|
||||
ret = tdbOpen("tdb", pageSize, 64, &pEnv, 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// Create a database
|
||||
compFunc = tKeyCmpr;
|
||||
ret = tdbTbOpen("db.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// 3, insert 32k records
|
||||
{
|
||||
char key[64];
|
||||
// char val[(4083 - 4 - 3 - 2)]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||
char val[(4083 - 4 - 3 - 2) + 1]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||
int64_t poolLimit = 4096; // 1M pool limit
|
||||
SPoolMem *pPool;
|
||||
|
||||
// open the pool
|
||||
pPool = openPool();
|
||||
|
||||
// start a transaction
|
||||
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
|
||||
for (int iData = nData + 1; iData <= nData * 2; iData++) {
|
||||
// if (iData % 2 == 0) continue;
|
||||
sprintf(key, "key%d", iData);
|
||||
sprintf(val, "value%d", iData);
|
||||
|
||||
ret = tdbTbInsert(pDb, key, strlen(key), val, strlen(val), txn);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
/*
|
||||
// generate value payload
|
||||
int valLen = sizeof(val) / sizeof(val[0]);
|
||||
for (int i = 6; i < valLen; ++i) {
|
||||
char c = char(i & 0xff);
|
||||
if (c == 0) {
|
||||
c = 1;
|
||||
}
|
||||
val[i] = c;
|
||||
}
|
||||
|
||||
ret = tdbTbInsert(pDb, key, strlen(key), val, valLen, txn);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
*/
|
||||
// if pool is full, commit the transaction and start a new one
|
||||
if (pPool->size >= poolLimit) {
|
||||
// commit current transaction
|
||||
tdbCommit(pEnv, txn);
|
||||
tdbPostCommit(pEnv, txn);
|
||||
|
||||
// start a new transaction
|
||||
clearPool(pPool);
|
||||
|
||||
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
}
|
||||
}
|
||||
|
||||
// commit the transaction
|
||||
tdbCommit(pEnv, txn);
|
||||
tdbPostCommit(pEnv, txn);
|
||||
|
||||
// 2, delete nData/2 records
|
||||
}
|
||||
|
||||
// Close a database
|
||||
tdbTbClose(pDb);
|
||||
|
||||
// Close Env
|
||||
ret = tdbClose(pEnv);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
}
|
|
@ -420,7 +420,13 @@ static void transHttpEnvInit() {
|
|||
uv_loop_init(http->loop);
|
||||
|
||||
http->asyncPool = transAsyncPoolCreate(http->loop, 1, http, httpAsyncCb);
|
||||
|
||||
if (NULL == http->asyncPool) {
|
||||
taosMemoryFree(http->loop);
|
||||
taosMemoryFree(http);
|
||||
http = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
int err = taosThreadCreate(&http->thread, NULL, httpThread, (void*)http);
|
||||
if (err != 0) {
|
||||
taosMemoryFree(http->loop);
|
||||
|
|
|
@ -671,7 +671,7 @@ static SCliConn* cliCreateConn(SCliThrd* pThrd) {
|
|||
conn->stream = (uv_stream_t*)taosMemoryMalloc(sizeof(uv_tcp_t));
|
||||
uv_tcp_init(pThrd->loop, (uv_tcp_t*)(conn->stream));
|
||||
conn->stream->data = conn;
|
||||
transSetConnOption((uv_tcp_t*)conn->stream);
|
||||
// transSetConnOption((uv_tcp_t*)conn->stream);
|
||||
|
||||
uv_timer_t* timer = taosArrayGetSize(pThrd->timerList) > 0 ? *(uv_timer_t**)taosArrayPop(pThrd->timerList) : NULL;
|
||||
if (timer == NULL) {
|
||||
|
@ -778,7 +778,7 @@ static void cliSendCb(uv_write_t* req, int status) {
|
|||
SCliMsg* pMsg = !transQueueEmpty(&pConn->cliMsgs) ? transQueueGet(&pConn->cliMsgs, 0) : NULL;
|
||||
if (pMsg != NULL) {
|
||||
int64_t cost = taosGetTimestampUs() - pMsg->st;
|
||||
if (cost > 1000) {
|
||||
if (cost > 1000 * 20) {
|
||||
tWarn("%s conn %p send cost:%dus, send exception", CONN_GET_INST_LABEL(pConn), pConn, (int)cost);
|
||||
}
|
||||
}
|
||||
|
@ -800,9 +800,12 @@ static void cliSendCb(uv_write_t* req, int status) {
|
|||
}
|
||||
|
||||
void cliSend(SCliConn* pConn) {
|
||||
bool empty = transQueueEmpty(&pConn->cliMsgs);
|
||||
ASSERTS(empty == false, "trans-cli get invalid msg");
|
||||
if (empty == true) {
|
||||
SCliThrd* pThrd = pConn->hostThrd;
|
||||
STrans* pTransInst = pThrd->pTransInst;
|
||||
|
||||
if (transQueueEmpty(&pConn->cliMsgs)) {
|
||||
tError("%s conn %p not msg to send", pTransInst->label, pConn);
|
||||
cliHandleExcept(pConn);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -812,9 +815,6 @@ void cliSend(SCliConn* pConn) {
|
|||
|
||||
STransConnCtx* pCtx = pCliMsg->ctx;
|
||||
|
||||
SCliThrd* pThrd = pConn->hostThrd;
|
||||
STrans* pTransInst = pThrd->pTransInst;
|
||||
|
||||
STransMsg* pMsg = (STransMsg*)(&pCliMsg->msg);
|
||||
if (pMsg->pCont == 0) {
|
||||
pMsg->pCont = (void*)rpcMallocCont(0);
|
||||
|
@ -1049,8 +1049,8 @@ static FORCE_INLINE uint32_t cliGetIpFromFqdnCache(SHashObj* cache, char* fqdn)
|
|||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
tError("failed to get ip from fqdn:%s since %s", fqdn, terrstr());
|
||||
return addr;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
taosHashPut(cache, fqdn, strlen(fqdn), &addr, sizeof(addr));
|
||||
} else {
|
||||
addr = *v;
|
||||
|
@ -1067,9 +1067,10 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
|
|||
STransConnCtx* pCtx = pMsg->ctx;
|
||||
|
||||
cliMayCvtFqdnToIp(&pCtx->epSet, &pThrd->cvtAddr);
|
||||
STraceId* trace = &pMsg->msg.info.traceId;
|
||||
|
||||
if (!EPSET_IS_VALID(&pCtx->epSet)) {
|
||||
tError("invalid epset");
|
||||
tGError("%s, msg %s sent with invalid epset", pTransInst->label, TMSG_INFO(pMsg->msg.msgType));
|
||||
destroyCmsg(pMsg);
|
||||
return;
|
||||
}
|
||||
|
@ -1132,16 +1133,35 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
|
|||
cliHandleExcept(conn);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
struct sockaddr_in addr;
|
||||
addr.sin_family = AF_INET;
|
||||
addr.sin_addr.s_addr = ipaddr;
|
||||
addr.sin_port = (uint16_t)htons((uint16_t)conn->port);
|
||||
|
||||
STraceId* trace = &(pMsg->msg.info.traceId);
|
||||
tGTrace("%s conn %p try to connect to %s:%d", pTransInst->label, conn, conn->ip, conn->port);
|
||||
int32_t fd = taosCreateSocketWithTimeout(TRANS_CONN_TIMEOUT * 4);
|
||||
if (fd == -1) {
|
||||
tGError("%s conn %p failed to create socket, reason:%s", transLabel(pTransInst), conn,
|
||||
tstrerror(TAOS_SYSTEM_ERROR(errno)));
|
||||
cliHandleExcept(conn);
|
||||
errno = 0;
|
||||
return;
|
||||
}
|
||||
int ret = uv_tcp_open((uv_tcp_t*)conn->stream, fd);
|
||||
if (ret != 0) {
|
||||
tGError("%s conn %p failed to set stream, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret));
|
||||
cliHandleExcept(conn);
|
||||
return;
|
||||
}
|
||||
ret = transSetConnOption((uv_tcp_t*)conn->stream);
|
||||
if (ret != 0) {
|
||||
tGError("%s conn %p failed to set socket opt, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret));
|
||||
cliHandleExcept(conn);
|
||||
return;
|
||||
}
|
||||
|
||||
int ret = uv_tcp_connect(&conn->connReq, (uv_tcp_t*)(conn->stream), (const struct sockaddr*)&addr, cliConnCb);
|
||||
ret = uv_tcp_connect(&conn->connReq, (uv_tcp_t*)(conn->stream), (const struct sockaddr*)&addr, cliConnCb);
|
||||
if (ret != 0) {
|
||||
tGError("%s conn %p failed to connect to %s:%d, reason:%s", pTransInst->label, conn, conn->ip, conn->port,
|
||||
uv_err_name(ret));
|
||||
|
@ -1156,7 +1176,6 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
|
|||
}
|
||||
uv_timer_start(conn->timer, cliConnTimeout, TRANS_CONN_TIMEOUT, 0);
|
||||
}
|
||||
STraceId* trace = &pMsg->msg.info.traceId;
|
||||
tGTrace("%s conn %p ready", pTransInst->label, conn);
|
||||
}
|
||||
static void cliAsyncCb(uv_async_t* handle) {
|
||||
|
@ -1292,7 +1311,11 @@ void* transInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads,
|
|||
|
||||
for (int i = 0; i < cli->numOfThreads; i++) {
|
||||
SCliThrd* pThrd = createThrdObj(shandle);
|
||||
int err = taosThreadCreate(&pThrd->thread, NULL, cliWorkThread, (void*)(pThrd));
|
||||
if (pThrd == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int err = taosThreadCreate(&pThrd->thread, NULL, cliWorkThread, (void*)(pThrd));
|
||||
if (err == 0) {
|
||||
tDebug("success to create tranport-cli thread:%d", i);
|
||||
}
|
||||
|
@ -1349,9 +1372,23 @@ static SCliThrd* createThrdObj(void* trans) {
|
|||
taosThreadMutexInit(&pThrd->msgMtx, NULL);
|
||||
|
||||
pThrd->loop = (uv_loop_t*)taosMemoryMalloc(sizeof(uv_loop_t));
|
||||
uv_loop_init(pThrd->loop);
|
||||
|
||||
int err = uv_loop_init(pThrd->loop);
|
||||
if (err != 0) {
|
||||
tError("failed to init uv_loop, reason:%s", uv_err_name(err));
|
||||
taosMemoryFree(pThrd->loop);
|
||||
taosThreadMutexDestroy(&pThrd->msgMtx);
|
||||
taosMemoryFree(pThrd);
|
||||
return NULL;
|
||||
}
|
||||
pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 8, pThrd, cliAsyncCb);
|
||||
if (pThrd->asyncPool == NULL) {
|
||||
tError("failed to init async pool");
|
||||
uv_loop_close(pThrd->loop);
|
||||
taosMemoryFree(pThrd->loop);
|
||||
taosThreadMutexDestroy(&pThrd->msgMtx);
|
||||
taosMemoryFree(pThrd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pThrd->prepare = taosMemoryCalloc(1, sizeof(uv_prepare_t));
|
||||
uv_prepare_init(pThrd->loop, pThrd->prepare);
|
||||
|
|
|
@ -205,6 +205,10 @@ bool transReadComplete(SConnBuffer* connBuf) {
|
|||
}
|
||||
|
||||
int transSetConnOption(uv_tcp_t* stream) {
|
||||
#if defined(WINDOWS) || defined(DARWIN)
|
||||
#else
|
||||
uv_tcp_keepalive(stream, 1, 20);
|
||||
#endif
|
||||
return uv_tcp_nodelay(stream, 1);
|
||||
// int ret = uv_tcp_keepalive(stream, 5, 60);
|
||||
}
|
||||
|
@ -214,24 +218,37 @@ SAsyncPool* transAsyncPoolCreate(uv_loop_t* loop, int sz, void* arg, AsyncCB cb)
|
|||
pool->nAsync = sz;
|
||||
pool->asyncs = taosMemoryCalloc(1, sizeof(uv_async_t) * pool->nAsync);
|
||||
|
||||
for (int i = 0; i < pool->nAsync; i++) {
|
||||
int i = 0, err = 0;
|
||||
for (i = 0; i < pool->nAsync; i++) {
|
||||
uv_async_t* async = &(pool->asyncs[i]);
|
||||
|
||||
SAsyncItem* item = taosMemoryCalloc(1, sizeof(SAsyncItem));
|
||||
item->pThrd = arg;
|
||||
QUEUE_INIT(&item->qmsg);
|
||||
taosThreadMutexInit(&item->mtx, NULL);
|
||||
|
||||
uv_async_t* async = &(pool->asyncs[i]);
|
||||
uv_async_init(loop, async, cb);
|
||||
async->data = item;
|
||||
err = uv_async_init(loop, async, cb);
|
||||
if (err != 0) {
|
||||
tError("failed to init async, reason:%s", uv_err_name(err));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i != pool->nAsync) {
|
||||
transAsyncPoolDestroy(pool);
|
||||
pool = NULL;
|
||||
}
|
||||
|
||||
return pool;
|
||||
}
|
||||
|
||||
void transAsyncPoolDestroy(SAsyncPool* pool) {
|
||||
for (int i = 0; i < pool->nAsync; i++) {
|
||||
uv_async_t* async = &(pool->asyncs[i]);
|
||||
|
||||
SAsyncItem* item = async->data;
|
||||
if (item == NULL) continue;
|
||||
|
||||
taosThreadMutexDestroy(&item->mtx);
|
||||
taosMemoryFree(item);
|
||||
}
|
||||
|
|
|
@ -594,3 +594,12 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) {
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void walReadReset(SWalReader *pReader) {
|
||||
taosThreadMutexLock(&pReader->mutex);
|
||||
taosCloseFile(&pReader->pIdxFile);
|
||||
taosCloseFile(&pReader->pLogFile);
|
||||
pReader->curInvalid = 1;
|
||||
pReader->curFileFirstVer = -1;
|
||||
taosThreadMutexUnlock(&pReader->mutex);
|
||||
}
|
||||
|
|
|
@ -37,7 +37,9 @@ if(CHECK_STR2INT_ERROR)
|
|||
add_definitions(-DTD_CHECK_STR_TO_INT_ERROR)
|
||||
endif()
|
||||
target_link_libraries(
|
||||
os PUBLIC pthread
|
||||
os
|
||||
PUBLIC pthread
|
||||
PUBLIC zlibstatic
|
||||
)
|
||||
if(TD_WINDOWS)
|
||||
target_link_libraries(
|
||||
|
@ -63,4 +65,4 @@ ENDIF ()
|
|||
|
||||
if(${BUILD_TEST})
|
||||
add_subdirectory(test)
|
||||
endif(${BUILD_TEST})
|
||||
endif(${BUILD_TEST})
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#define ALLOW_FORBID_FUNC
|
||||
#include "os.h"
|
||||
#include "osSemaphore.h"
|
||||
#include "zlib.h"
|
||||
|
||||
#ifdef WINDOWS
|
||||
#include <io.h>
|
||||
|
@ -830,3 +831,48 @@ bool taosCheckAccessFile(const char *pathname, int32_t tdFileAccessOptions) {
|
|||
}
|
||||
|
||||
bool taosCheckExistFile(const char *pathname) { return taosCheckAccessFile(pathname, TD_FILE_ACCESS_EXIST_OK); };
|
||||
|
||||
int32_t taosCompressFile(char *srcFileName, char *destFileName) {
|
||||
int32_t compressSize = 163840;
|
||||
int32_t ret = 0;
|
||||
int32_t len = 0;
|
||||
char *data = taosMemoryMalloc(compressSize);
|
||||
gzFile dstFp = NULL;
|
||||
|
||||
TdFilePtr pSrcFile = taosOpenFile(srcFileName, TD_FILE_READ | TD_FILE_STREAM);
|
||||
if (pSrcFile == NULL) {
|
||||
ret = -1;
|
||||
goto cmp_end;
|
||||
}
|
||||
|
||||
TdFilePtr pFile = taosOpenFile(destFileName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
|
||||
if (pFile == NULL) {
|
||||
ret = -2;
|
||||
goto cmp_end;
|
||||
}
|
||||
|
||||
dstFp = gzdopen(pFile->fd, "wb6f");
|
||||
if (dstFp == NULL) {
|
||||
ret = -3;
|
||||
taosCloseFile(&pFile);
|
||||
goto cmp_end;
|
||||
}
|
||||
|
||||
while (!feof(pSrcFile->fp)) {
|
||||
len = (int32_t)fread(data, 1, compressSize, pSrcFile->fp);
|
||||
(void)gzwrite(dstFp, data, len);
|
||||
}
|
||||
|
||||
cmp_end:
|
||||
if (pSrcFile) {
|
||||
taosCloseFile(&pSrcFile);
|
||||
}
|
||||
|
||||
if (dstFp) {
|
||||
gzclose(dstFp);
|
||||
}
|
||||
|
||||
taosMemoryFree(data);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ typedef struct TdSocket {
|
|||
#endif
|
||||
int refId;
|
||||
SocketFd fd;
|
||||
} * TdSocketPtr, TdSocket;
|
||||
} *TdSocketPtr, TdSocket;
|
||||
|
||||
typedef struct TdSocketServer {
|
||||
#if SOCKET_WITH_LOCK
|
||||
|
@ -63,7 +63,7 @@ typedef struct TdSocketServer {
|
|||
#endif
|
||||
int refId;
|
||||
SocketFd fd;
|
||||
} * TdSocketServerPtr, TdSocketServer;
|
||||
} *TdSocketServerPtr, TdSocketServer;
|
||||
|
||||
typedef struct TdEpoll {
|
||||
#if SOCKET_WITH_LOCK
|
||||
|
@ -71,7 +71,7 @@ typedef struct TdEpoll {
|
|||
#endif
|
||||
int refId;
|
||||
EpollFd fd;
|
||||
} * TdEpollPtr, TdEpoll;
|
||||
} *TdEpollPtr, TdEpoll;
|
||||
|
||||
#if 0
|
||||
int32_t taosSendto(TdSocketPtr pSocket, void *buf, int len, unsigned int flags, const struct sockaddr *dest_addr,
|
||||
|
@ -1005,7 +1005,7 @@ int32_t taosGetFqdn(char *fqdn) {
|
|||
// immediately
|
||||
// hints.ai_family = AF_INET;
|
||||
strcpy(fqdn, hostname);
|
||||
strcpy(fqdn+strlen(hostname), ".local");
|
||||
strcpy(fqdn + strlen(hostname), ".local");
|
||||
#else // __APPLE__
|
||||
struct addrinfo hints = {0};
|
||||
struct addrinfo *result = NULL;
|
||||
|
@ -1060,7 +1060,7 @@ int32_t taosCreateSocketWithTimeout(uint32_t timeout) {
|
|||
#if defined(WINDOWS)
|
||||
SOCKET fd;
|
||||
#else
|
||||
int fd;
|
||||
int fd;
|
||||
#endif
|
||||
if ((fd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) == INVALID_SOCKET) {
|
||||
return -1;
|
||||
|
@ -1071,11 +1071,12 @@ int32_t taosCreateSocketWithTimeout(uint32_t timeout) {
|
|||
return -1;
|
||||
}
|
||||
#elif defined(_TD_DARWIN_64)
|
||||
uint32_t conn_timeout_ms = timeout * 1000;
|
||||
if (0 != setsockopt(fd, IPPROTO_TCP, TCP_CONNECTIONTIMEOUT, (char *)&conn_timeout_ms, sizeof(conn_timeout_ms))) {
|
||||
taosCloseSocketNoCheck1(fd);
|
||||
return -1;
|
||||
}
|
||||
// invalid config
|
||||
// uint32_t conn_timeout_ms = timeout * 1000;
|
||||
// if (0 != setsockopt(fd, IPPROTO_TCP, TCP_CONNECTIONTIMEOUT, (char *)&conn_timeout_ms, sizeof(conn_timeout_ms))) {
|
||||
// taosCloseSocketNoCheck1(fd);
|
||||
// return -1;
|
||||
//}
|
||||
#else // Linux like systems
|
||||
uint32_t conn_timeout_ms = timeout * 1000;
|
||||
if (0 != setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, (char *)&conn_timeout_ms, sizeof(conn_timeout_ms))) {
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "taoserror.h"
|
||||
|
||||
#define PROCESS_ITEM 12
|
||||
#define UUIDLEN37 37
|
||||
|
||||
typedef struct {
|
||||
uint64_t user;
|
||||
|
@ -830,7 +831,8 @@ int32_t taosGetSystemUUID(char *uid, int32_t uidlen) {
|
|||
return 0;
|
||||
#elif defined(_TD_DARWIN_64)
|
||||
uuid_t uuid = {0};
|
||||
char buf[37] = {0};
|
||||
char buf[UUIDLEN37];
|
||||
memset(buf, 0, UUIDLEN37);
|
||||
uuid_generate(uuid);
|
||||
// it's caller's responsibility to make enough space for `uid`, that's 36-char + 1-null
|
||||
uuid_unparse_lower(uuid, buf);
|
||||
|
|
|
@ -88,6 +88,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_NEED_RETRY, "Retry needed")
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE, "Out of memory in rpc queue")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TIMESTAMP, "Invalid timestamp format")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MSG_DECODE_ERROR, "Msg decode error")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MSG_ENCODE_ERROR, "Msg encode error")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_NO_AVAIL_DISK, "No available disk")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_FOUND, "Not found")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_NO_DISKSPACE, "Out of disk space")
|
||||
|
@ -287,6 +288,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_NOT_EXIST, "Stream not exist")
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STREAM_OPTION, "Invalid stream option")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_MUST_BE_DELETED, "Stream must be dropped first")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MULTI_REPLICA_SOURCE_DB, "Stream temporarily does not support source db having replica > 1")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_STREAMS, "Too many streams")
|
||||
|
||||
// mnode-sma
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_ALREADY_EXIST, "SMA already exists")
|
||||
|
|
|
@ -115,7 +115,6 @@ static int32_t taosPushLogBuffer(SLogBuff *pLogBuf, const char *msg, int32_t m
|
|||
static SLogBuff *taosLogBuffNew(int32_t bufSize);
|
||||
static void taosCloseLogByFd(TdFilePtr pFile);
|
||||
static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum);
|
||||
static int32_t taosCompressFile(char *srcFileName, char *destFileName);
|
||||
|
||||
static FORCE_INLINE void taosUpdateDaylight() {
|
||||
struct tm Tm, *ptm;
|
||||
|
@ -748,50 +747,6 @@ static void *taosAsyncOutputLog(void *param) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
int32_t taosCompressFile(char *srcFileName, char *destFileName) {
|
||||
int32_t compressSize = 163840;
|
||||
int32_t ret = 0;
|
||||
int32_t len = 0;
|
||||
char *data = taosMemoryMalloc(compressSize);
|
||||
// gzFile dstFp = NULL;
|
||||
|
||||
// srcFp = fopen(srcFileName, "r");
|
||||
TdFilePtr pSrcFile = taosOpenFile(srcFileName, TD_FILE_READ);
|
||||
if (pSrcFile == NULL) {
|
||||
ret = -1;
|
||||
goto cmp_end;
|
||||
}
|
||||
|
||||
TdFilePtr pFile = taosOpenFile(destFileName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
|
||||
if (pFile == NULL) {
|
||||
ret = -2;
|
||||
goto cmp_end;
|
||||
}
|
||||
|
||||
// dstFp = gzdopen(fd, "wb6f");
|
||||
// if (dstFp == NULL) {
|
||||
// ret = -3;
|
||||
// close(fd);
|
||||
// goto cmp_end;
|
||||
// }
|
||||
//
|
||||
// while (!feof(srcFp)) {
|
||||
// len = (int32_t)fread(data, 1, compressSize, srcFp);
|
||||
// (void)gzwrite(dstFp, data, len);
|
||||
// }
|
||||
|
||||
cmp_end:
|
||||
if (pSrcFile) {
|
||||
taosCloseFile(&pSrcFile);
|
||||
}
|
||||
// if (dstFp) {
|
||||
// gzclose(dstFp);
|
||||
// }
|
||||
taosMemoryFree(data);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool taosAssertDebug(bool condition, const char *file, int32_t line, const char *format, ...) {
|
||||
if (condition) return false;
|
||||
|
||||
|
|
|
@ -5,11 +5,11 @@
|
|||
#include "tsimplehash.h"
|
||||
#include "tlog.h"
|
||||
|
||||
#define GET_PAYLOAD_DATA(_p) ((char*)(_p)->pData + POINTER_BYTES)
|
||||
#define BUF_PAGE_IN_MEM(_p) ((_p)->pData != NULL)
|
||||
#define GET_PAYLOAD_DATA(_p) ((char*)(_p)->pData + POINTER_BYTES)
|
||||
#define BUF_PAGE_IN_MEM(_p) ((_p)->pData != NULL)
|
||||
#define CLEAR_BUF_PAGE_IN_MEM_FLAG(_p) ((_p)->pData = NULL)
|
||||
#define HAS_DATA_IN_DISK(_p) ((_p)->offset >= 0)
|
||||
#define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages)
|
||||
#define HAS_DATA_IN_DISK(_p) ((_p)->offset >= 0)
|
||||
#define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages)
|
||||
|
||||
typedef struct SPageDiskInfo {
|
||||
int64_t offset;
|
||||
|
@ -17,7 +17,7 @@ typedef struct SPageDiskInfo {
|
|||
} SPageDiskInfo, SFreeListItem;
|
||||
|
||||
struct SPageInfo {
|
||||
SListNode* pn; // point to list node struct. it is NULL when the page is evicted from the in-memory buffer
|
||||
SListNode* pn; // point to list node struct. it is NULL when the page is evicted from the in-memory buffer
|
||||
void* pData;
|
||||
int64_t offset;
|
||||
int32_t pageId;
|
||||
|
@ -52,10 +52,13 @@ struct SDiskbasedBuf {
|
|||
};
|
||||
|
||||
static int32_t createDiskFile(SDiskbasedBuf* pBuf) {
|
||||
if (pBuf->path == NULL) { // prepare the file name when needed it
|
||||
if (pBuf->path == NULL) { // prepare the file name when needed it
|
||||
char path[PATH_MAX] = {0};
|
||||
taosGetTmpfilePath(pBuf->prefix, "paged-buf", path);
|
||||
pBuf->path = taosMemoryStrDup(path);
|
||||
if (pBuf->path == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
pBuf->pFile =
|
||||
|
@ -126,6 +129,30 @@ static uint64_t allocateNewPositionInFile(SDiskbasedBuf* pBuf, size_t size) {
|
|||
|
||||
static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) { return pageSize + POINTER_BYTES + sizeof(SFilePage); }
|
||||
|
||||
static int32_t doFlushBufPageImpl(SDiskbasedBuf* pBuf, int64_t offset, const char* pData, int32_t size) {
|
||||
int32_t ret = taosLSeekFile(pBuf->pFile, offset, SEEK_SET);
|
||||
if (ret == -1) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
ret = (int32_t)taosWriteFile(pBuf->pFile, pData, size);
|
||||
if (ret != size) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
// extend the file
|
||||
if (pBuf->fileSize < offset + size) {
|
||||
pBuf->fileSize = offset + size;
|
||||
}
|
||||
|
||||
pBuf->statis.flushBytes += size;
|
||||
pBuf->statis.flushPages += 1;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static char* doFlushBufPage(SDiskbasedBuf* pBuf, SPageInfo* pg) {
|
||||
if (pg->pData == NULL || pg->used) {
|
||||
uError("invalid params in paged buffer process when flushing buf to disk, %s", pBuf->id);
|
||||
|
@ -134,12 +161,15 @@ static char* doFlushBufPage(SDiskbasedBuf* pBuf, SPageInfo* pg) {
|
|||
}
|
||||
|
||||
int32_t size = pBuf->pageSize;
|
||||
char* t = NULL;
|
||||
int64_t offset = pg->offset;
|
||||
|
||||
char* t = NULL;
|
||||
if ((!HAS_DATA_IN_DISK(pg)) || pg->dirty) {
|
||||
void* payload = GET_PAYLOAD_DATA(pg);
|
||||
t = doCompressData(payload, pBuf->pageSize, &size, pBuf);
|
||||
if (size < 0) {
|
||||
uError("failed to compress data when flushing data to disk, %s", pBuf->id);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
@ -147,59 +177,29 @@ static char* doFlushBufPage(SDiskbasedBuf* pBuf, SPageInfo* pg) {
|
|||
// this page is flushed to disk for the first time
|
||||
if (pg->dirty) {
|
||||
if (!HAS_DATA_IN_DISK(pg)) {
|
||||
pg->offset = allocateNewPositionInFile(pBuf, size);
|
||||
offset = allocateNewPositionInFile(pBuf, size);
|
||||
pBuf->nextPos += size;
|
||||
|
||||
int32_t ret = taosLSeekFile(pBuf->pFile, pg->offset, SEEK_SET);
|
||||
if (ret == -1) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
int32_t code = doFlushBufPageImpl(pBuf, offset, t, size);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = (int32_t)taosWriteFile(pBuf->pFile, t, size);
|
||||
if (ret != size) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// extend the file size
|
||||
if (pBuf->fileSize < pg->offset + size) {
|
||||
pBuf->fileSize = pg->offset + size;
|
||||
}
|
||||
|
||||
pBuf->statis.flushBytes += size;
|
||||
pBuf->statis.flushPages += 1;
|
||||
} else {
|
||||
// length becomes greater, current space is not enough, allocate new place, otherwise, do nothing
|
||||
if (pg->length < size) {
|
||||
// 1. add current space to free list
|
||||
SPageDiskInfo dinfo = {.length = pg->length, .offset = pg->offset};
|
||||
SPageDiskInfo dinfo = {.length = pg->length, .offset = offset};
|
||||
taosArrayPush(pBuf->pFree, &dinfo);
|
||||
|
||||
// 2. allocate new position, and update the info
|
||||
pg->offset = allocateNewPositionInFile(pBuf, size);
|
||||
offset = allocateNewPositionInFile(pBuf, size);
|
||||
pBuf->nextPos += size;
|
||||
}
|
||||
|
||||
// 3. write to disk.
|
||||
int32_t ret = taosLSeekFile(pBuf->pFile, pg->offset, SEEK_SET);
|
||||
if (ret == -1) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
int32_t code = doFlushBufPageImpl(pBuf, offset, t, size);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = (int32_t)taosWriteFile(pBuf->pFile, t, size);
|
||||
if (ret != size) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (pBuf->fileSize < pg->offset + size) {
|
||||
pBuf->fileSize = pg->offset + size;
|
||||
}
|
||||
|
||||
pBuf->statis.flushBytes += size;
|
||||
pBuf->statis.flushPages += 1;
|
||||
}
|
||||
} else { // NOTE: the size may be -1, the this recycle page has not been flushed to disk yet.
|
||||
size = pg->length;
|
||||
|
@ -209,9 +209,10 @@ static char* doFlushBufPage(SDiskbasedBuf* pBuf, SPageInfo* pg) {
|
|||
memset(pDataBuf, 0, getAllocPageSize(pBuf->pageSize));
|
||||
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
uDebug("page_flush %p, pageId:%d, offset:%d", pDataBuf, pg->pageId, pg->offset);
|
||||
uDebug("page_flush %p, pageId:%d, offset:%d", pDataBuf, pg->pageId, offset);
|
||||
#endif
|
||||
|
||||
pg->offset = offset;
|
||||
pg->length = size; // on disk size
|
||||
return pDataBuf;
|
||||
}
|
||||
|
@ -236,7 +237,7 @@ static char* flushBufPage(SDiskbasedBuf* pBuf, SPageInfo* pg) {
|
|||
// load file block data in disk
|
||||
static int32_t loadPageFromDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) {
|
||||
if (pg->offset < 0 || pg->length <= 0) {
|
||||
uError("failed to load buf page from disk, offset:%"PRId64", length:%d, %s", pg->offset, pg->length, pBuf->id);
|
||||
uError("failed to load buf page from disk, offset:%" PRId64 ", length:%d, %s", pg->offset, pg->length, pBuf->id);
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
|
||||
|
@ -303,6 +304,7 @@ static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) {
|
|||
static char* evictBufPage(SDiskbasedBuf* pBuf) {
|
||||
SListNode* pn = getEldestUnrefedPage(pBuf);
|
||||
if (pn == NULL) { // no available buffer pages now, return.
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -377,14 +379,14 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
|
|||
goto _error;
|
||||
}
|
||||
|
||||
pPBuf->prefix = (char*) dir;
|
||||
pPBuf->prefix = (char*)dir;
|
||||
pPBuf->emptyDummyIdList = taosArrayInit(1, sizeof(int32_t));
|
||||
|
||||
// qDebug("QInfo:0x%"PRIx64" create resBuf for output, page size:%d, inmem buf pages:%d, file:%s", qId,
|
||||
// pPBuf->pageSize, pPBuf->inMemPages, pPBuf->path);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_error:
|
||||
_error:
|
||||
destroyDiskbasedBuf(pPBuf);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
@ -394,11 +396,12 @@ static char* doExtractPage(SDiskbasedBuf* pBuf) {
|
|||
if (NO_IN_MEM_AVAILABLE_PAGES(pBuf)) {
|
||||
availablePage = evictBufPage(pBuf);
|
||||
if (availablePage == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
uWarn("no available buf pages, current:%d, max:%d", listNEles(pBuf->lruList), pBuf->inMemPages)
|
||||
uWarn("no available buf pages, current:%d, max:%d, reason: %s, %s", listNEles(pBuf->lruList), pBuf->inMemPages,
|
||||
terrstr(), pBuf->id)
|
||||
}
|
||||
} else {
|
||||
availablePage = taosMemoryCalloc(1, getAllocPageSize(pBuf->pageSize)); // add extract bytes in case of zipped buffer increased.
|
||||
availablePage =
|
||||
taosMemoryCalloc(1, getAllocPageSize(pBuf->pageSize)); // add extract bytes in case of zipped buffer increased.
|
||||
if (availablePage == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
@ -546,9 +549,7 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) {
|
|||
|
||||
size_t getTotalBufSize(const SDiskbasedBuf* pBuf) { return (size_t)pBuf->totalBufSize; }
|
||||
|
||||
SArray* getDataBufPagesIdList(SDiskbasedBuf* pBuf) {
|
||||
return pBuf->pIdList;
|
||||
}
|
||||
SArray* getDataBufPagesIdList(SDiskbasedBuf* pBuf) { return pBuf->pIdList; }
|
||||
|
||||
void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
|
||||
if (pBuf == NULL) {
|
||||
|
@ -562,7 +563,7 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
|
|||
needRemoveFile = true;
|
||||
uDebug(
|
||||
"Paged buffer closed, total:%.2f Kb (%d Pages), inmem size:%.2f Kb (%d Pages), file size:%.2f Kb, page "
|
||||
"size:%.2f Kb, %s\n",
|
||||
"size:%.2f Kb, %s",
|
||||
pBuf->totalBufSize / 1024.0, pBuf->numOfPages, listNEles(pBuf->lruList) * pBuf->pageSize / 1024.0,
|
||||
listNEles(pBuf->lruList), pBuf->fileSize / 1024.0, pBuf->pageSize / 1024.0f, pBuf->id);
|
||||
|
||||
|
@ -579,8 +580,7 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
|
|||
ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, ps->loadPages);
|
||||
} else {
|
||||
uDebug(
|
||||
"Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f "
|
||||
"Kb",
|
||||
"Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPgSize:%.2f Kb",
|
||||
ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f,
|
||||
ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages));
|
||||
}
|
||||
|
@ -623,9 +623,7 @@ SPageInfo* getLastPageInfo(SArray* pList) {
|
|||
return pPgInfo;
|
||||
}
|
||||
|
||||
int32_t getPageId(const SPageInfo* pPgInfo) {
|
||||
return pPgInfo->pageId;
|
||||
}
|
||||
int32_t getPageId(const SPageInfo* pPgInfo) { return pPgInfo->pageId; }
|
||||
|
||||
int32_t getBufPageSize(const SDiskbasedBuf* pBuf) { return pBuf->pageSize; }
|
||||
|
||||
|
@ -686,7 +684,7 @@ void dBufPrintStatis(const SDiskbasedBuf* pBuf) {
|
|||
ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f,
|
||||
ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages));
|
||||
} else {
|
||||
//printf("no page loaded\n");
|
||||
// printf("no page loaded\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -178,6 +178,7 @@
|
|||
,,y,script,./test.sh -f tsim/query/udf_with_const.sim
|
||||
,,y,script,./test.sh -f tsim/query/sys_tbname.sim
|
||||
,,y,script,./test.sh -f tsim/query/groupby.sim
|
||||
,,y,script,./test.sh -f tsim/query/forceFill.sim
|
||||
,,y,script,./test.sh -f tsim/qnode/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/snode/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/mnode/basic1.sim
|
||||
|
@ -405,7 +406,7 @@
|
|||
,,y,script,./test.sh -f tmp/monitor.sim
|
||||
|
||||
#system test
|
||||
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/balance_vgroups_r1.py -N 6
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosShell.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosShellError.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosShellNetChk.py
|
||||
|
@ -1046,6 +1047,11 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_data.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_data.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_data.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_data.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_data.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/blockSMA.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-21561.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-20582.py
|
||||
|
|
|
@ -29,6 +29,7 @@ class TDSimClient:
|
|||
self.testCluster = False
|
||||
self.path = path
|
||||
self.cfgDict = {
|
||||
"fqdn": "localhost",
|
||||
"numOfLogLines": "100000000",
|
||||
"locale": "en_US.UTF-8",
|
||||
"charset": "UTF-8",
|
||||
|
@ -119,6 +120,7 @@ class TDDnode:
|
|||
self.asan = False
|
||||
self.remoteIP = ""
|
||||
self.cfgDict = {
|
||||
"fqdn": "localhost",
|
||||
"monitor": "0",
|
||||
"maxShellConns": "30000",
|
||||
"locale": "en_US.UTF-8",
|
||||
|
|
|
@ -15,7 +15,7 @@ $db = $dbPrefix . $i
|
|||
|
||||
sql drop database if exists $db -x step1
|
||||
step1:
|
||||
sql create database $db vgroups 1;
|
||||
sql create database $db vgroups 1 cachemodel 'last_row'
|
||||
|
||||
sql use $db
|
||||
sql create table $tb (ts timestamp, c1 int)
|
||||
|
@ -122,4 +122,74 @@ if $data01 != 199 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql drop table t1
|
||||
|
||||
$rowNum = 8200
|
||||
$ts0 = 1537146000000
|
||||
sql create table t1 (ts timestamp, c1 int)
|
||||
|
||||
$i = 0
|
||||
$ts = $ts0
|
||||
|
||||
$x = 0
|
||||
while $x < $rowNum
|
||||
$xs = $x * $delta
|
||||
$ts = $ts0 + $xs
|
||||
sql insert into t1 values ( $ts , $x )
|
||||
$x = $x + 1
|
||||
endw
|
||||
|
||||
sql delete from t1 where ts<=1537146409500
|
||||
|
||||
sql flush database $db
|
||||
|
||||
print =====================================>TD-22007
|
||||
sql select count(*) from t1 interval(10a)
|
||||
|
||||
sql drop table t1
|
||||
|
||||
sql create table st1 (ts timestamp, k int) tags(a int);
|
||||
sql insert into t1 using st1 tags(1) values('2020-1-1 10:10:10', 0);
|
||||
sql insert into t2 using st1 tags(1) values('2020-1-1 10:10:11', 1);
|
||||
sql insert into t3 using st1 tags(1) values('2020-1-1 10:10:12', 2);
|
||||
sql insert into t4 using st1 tags(1) values('2020-1-1 10:10:13', 3);
|
||||
sql insert into t5 using st1 tags(1) values('2020-1-1 10:10:14', 4);
|
||||
sql insert into t6 using st1 tags(2) values('2020-1-1 10:10:15', 5);
|
||||
sql insert into t7 using st1 tags(2) values('2020-1-1 10:10:16', 6);
|
||||
sql insert into t8 using st1 tags(2) values('2020-1-1 10:10:17', 7);
|
||||
sql insert into t9 using st1 tags(2) values('2020-1-1 10:10:18', 8);
|
||||
sql insert into t10 using st1 tags(2) values('2020-1-1 10:10:19', 9);
|
||||
|
||||
sql select count(*) from st1
|
||||
if $data00 != 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select last_row(*) from st1 group by a
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != @20-01-01 10:10:19.000@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 9 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data10 != @20-01-01 10:10:14.000@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data11 != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ===============================================> TS-2613
|
||||
sql select * from information_schema.ins_databases limit 1 offset 1;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
|
|
@ -0,0 +1,367 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
sql drop database if exists db1;
|
||||
sql create database db1 vgroups 10;
|
||||
sql use db1;
|
||||
sql create stable sta (ts timestamp, f1 double, f2 binary(200)) tags(t1 int);
|
||||
sql create table tba1 using sta tags(1);
|
||||
sql insert into tba1 values ('2022-04-26 15:15:01', 1.0, "a");
|
||||
sql insert into tba1 values ('2022-04-26 15:15:02', 2.0, "b");
|
||||
sql insert into tba1 values ('2022-04-26 15:15:04', 4.0, "b");
|
||||
sql insert into tba1 values ('2022-04-26 15:15:05', 5.0, "b");
|
||||
|
||||
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:00' and ts <= '2022-04-26 15:15:06' interval(1s) fill(value_f, 8.8);
|
||||
if $rows != 7 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 1.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != 2.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != 4.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data50 != 5.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data60 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:00' and ts <= '2022-04-26 15:15:06' interval(1s) fill(value, 8.8);
|
||||
if $rows != 7 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 1.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != 2.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != 4.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data50 != 5.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data60 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:00' and ts <= '2022-04-26 15:15:06' interval(1s) fill(null);
|
||||
if $rows != 7 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 1.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != 2.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != 4.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data50 != 5.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data60 != NULL then
|
||||
return -1
|
||||
endi
|
||||
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:00' and ts <= '2022-04-26 15:15:06' interval(1s) fill(null_f);
|
||||
if $rows != 7 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 1.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != 2.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != 4.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data50 != 5.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data60 != NULL then
|
||||
return -1
|
||||
endi
|
||||
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:06' and ts <= '2022-04-26 15:15:10' interval(1s) fill(value, 8.8);
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:06' and ts <= '2022-04-26 15:15:10' interval(1s) fill(value_f, 8.8);
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:06' and ts <= '2022-04-26 15:15:10' interval(1s) fill(null);
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:06' and ts <= '2022-04-26 15:15:10' interval(1s) fill(null_f);
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != NULL then
|
||||
return -1
|
||||
endi
|
||||
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:16:00' and ts <= '2022-04-26 19:15:59' interval(1s) fill(value_f, 8.8);
|
||||
if $rows != 14400 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:16:00' and ts <= '2022-04-26 19:15:59' interval(1s) fill(null_f);
|
||||
if $rows != 14400 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != NULL then
|
||||
return -1
|
||||
endi
|
||||
sql select interp(f1) from tba1 range('2022-04-26 15:15:00','2022-04-26 15:15:06') every(1s) fill(value_f, 8.8);
|
||||
if $rows != 7 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 1.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != 2.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != 4.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data50 != 5.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data60 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
sql select interp(f1) from tba1 range('2022-04-26 15:15:00','2022-04-26 15:15:06') every(1s) fill(value, 8.8);
|
||||
if $rows != 7 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 1.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != 2.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != 4.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data50 != 5.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data60 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
sql select interp(f1) from tba1 range('2022-04-26 15:15:00','2022-04-26 15:15:06') every(1s) fill(null);
|
||||
if $rows != 7 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 1.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != 2.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != 4.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data50 != 5.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data60 != NULL then
|
||||
return -1
|
||||
endi
|
||||
sql select interp(f1) from tba1 range('2022-04-26 15:15:00','2022-04-26 15:15:06') every(1s) fill(null_f);
|
||||
if $rows != 7 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 1.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != 2.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != 4.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data50 != 5.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data60 != NULL then
|
||||
return -1
|
||||
endi
|
||||
sql select interp(f1) from tba1 range('2022-04-26 15:15:06','2022-04-26 15:15:10') every(1s) fill(value, 8.8);
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
sql select interp(f1) from tba1 range('2022-04-26 15:15:06','2022-04-26 15:15:10') every(1s) fill(value_f, 8.8);
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
sql select interp(f1) from tba1 range('2022-04-26 15:15:06','2022-04-26 15:15:10') every(1s) fill(null);
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != NULL then
|
||||
return -1
|
||||
endi
|
||||
sql select interp(f1) from tba1 range('2022-04-26 15:15:06','2022-04-26 15:15:10') every(1s) fill(null_f);
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data40 != NULL then
|
||||
return -1
|
||||
endi
|
||||
sql select interp(f1) from tba1 range('2022-04-26 15:16:00','2022-04-26 19:15:59') every(1s) fill(value_f, 8.8);
|
||||
if $rows != 14400 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 8.800000000 then
|
||||
return -1
|
||||
endi
|
||||
sql select interp(f1) from tba1 range('2022-04-26 15:16:00','2022-04-26 19:15:59') every(1s) fill(null_f);
|
||||
if $rows != 14400 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != NULL then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -81,6 +81,15 @@ if $rows == 0 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sleep 3000
|
||||
|
||||
sql drop stream if exists streamd1;
|
||||
sql drop stream if exists streamd2;
|
||||
sql drop stream if exists streamd3;
|
||||
sql drop stream if exists streamd4;
|
||||
sql drop stream if exists streamd5;
|
||||
sql drop stream if exists streamd6;
|
||||
|
||||
sql create stream streamd10 into streamd10 as select _wstart, _wend, count(*), first(ca), last(cb) as c2 from t1 interval(10s);
|
||||
|
||||
sql desc streamd10;
|
||||
|
@ -100,15 +109,6 @@ if $rows == 0 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sleep 3000
|
||||
|
||||
sql drop stream if exists streamd1;
|
||||
sql drop stream if exists streamd2;
|
||||
sql drop stream if exists streamd3;
|
||||
sql drop stream if exists streamd4;
|
||||
sql drop stream if exists streamd5;
|
||||
sql drop stream if exists streamd6;
|
||||
|
||||
|
||||
_OVER:
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
|
|
@ -55,62 +55,62 @@ sql create stream stb_asin_stream trigger at_once into output_asin_stb as select
|
|||
sql create stream ctb_asin_stream trigger at_once into output_asin_ctb as select ts, asin(c1), asin(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_asin_stream trigger at_once into output_asin_tb as select ts, asin(c1), asin(c2), c3 from scalar_tb;
|
||||
sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb;
|
||||
sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb;
|
||||
sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb;
|
||||
sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb;
|
||||
sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1;
|
||||
sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb;
|
||||
sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1;
|
||||
sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb;
|
||||
sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb;
|
||||
sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb;
|
||||
sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb;
|
||||
sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb;
|
||||
sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1;
|
||||
sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb;
|
||||
sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1;
|
||||
sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb;
|
||||
sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1;
|
||||
sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb;
|
||||
sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1;
|
||||
sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb;
|
||||
sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1;
|
||||
sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb;
|
||||
sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1;
|
||||
sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb;
|
||||
sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1;
|
||||
sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb;
|
||||
sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1;
|
||||
sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb;
|
||||
sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1;
|
||||
sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb;
|
||||
# sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1;
|
||||
# sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb;
|
||||
# sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1;
|
||||
# sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb;
|
||||
# sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1;
|
||||
# sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb;
|
||||
# sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1;
|
||||
# sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb;
|
||||
# sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1;
|
||||
# sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb;
|
||||
# sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1;
|
||||
# sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb;
|
||||
# sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1;
|
||||
# sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb;
|
||||
# sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1;
|
||||
# sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb;
|
||||
# sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1;
|
||||
# sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb;
|
||||
# sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1;
|
||||
# sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb;
|
||||
# sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1;
|
||||
# sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb;
|
||||
sql insert into scalar_ct1 values (1656668180503, 100, 100.1, "beijing", "taos", "Taos");
|
||||
sql insert into scalar_ct1 values (1656668180503+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");
|
||||
sql insert into scalar_ct1 values (1656668180503+2s, 0, Null, "hebei", "TDengine", Null);
|
||||
|
@ -146,62 +146,62 @@ sql create stream stb_asin_stream trigger at_once into output_asin_stb as select
|
|||
sql create stream ctb_asin_stream trigger at_once into output_asin_ctb as select ts, asin(c1), asin(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_asin_stream trigger at_once into output_asin_tb as select ts, asin(c1), asin(c2), c3 from scalar_tb;
|
||||
sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb;
|
||||
sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb;
|
||||
sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb;
|
||||
sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb;
|
||||
sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1;
|
||||
sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb;
|
||||
sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1;
|
||||
sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb;
|
||||
sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb;
|
||||
sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb;
|
||||
sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb;
|
||||
sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb partition by tbname;
|
||||
sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb;
|
||||
sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1;
|
||||
sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb;
|
||||
sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1;
|
||||
sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb;
|
||||
sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1;
|
||||
sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb;
|
||||
sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1;
|
||||
sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb;
|
||||
sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1;
|
||||
sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb;
|
||||
sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1;
|
||||
sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb;
|
||||
sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1;
|
||||
sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb;
|
||||
sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1;
|
||||
sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb;
|
||||
sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb partition by tbname;
|
||||
sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1;
|
||||
sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb;
|
||||
# sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1;
|
||||
# sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb;
|
||||
# sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1;
|
||||
# sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb;
|
||||
# sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1;
|
||||
# sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb;
|
||||
# sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1;
|
||||
# sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb;
|
||||
# sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1;
|
||||
# sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb;
|
||||
# sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1;
|
||||
# sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb;
|
||||
# sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1;
|
||||
# sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb;
|
||||
# sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1;
|
||||
# sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb;
|
||||
# sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1;
|
||||
# sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb;
|
||||
# sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1;
|
||||
# sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb;
|
||||
# sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1;
|
||||
# sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb;
|
||||
# sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb partition by tbname;
|
||||
# sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1;
|
||||
# sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb;
|
||||
sql insert into scalar_ct1 values (1656668180503, 100, 100.1, "beijing", "taos", "Taos");
|
||||
sql insert into scalar_ct1 values (1656668180503+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");
|
||||
sql insert into scalar_ct1 values (1656668180503+2s, 0, Null, "hebei", "TDengine", Null);
|
||||
|
@ -273,4 +273,4 @@ print ========== step7
|
|||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
|
|
|
@ -14,6 +14,7 @@ sql use test;
|
|||
|
||||
sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));;
|
||||
sql create stream streams1 trigger at_once into streamt as select _wstart ts, count(*) c1 from t1 where ts > 1648791210000 and ts < 1648791413000 interval(10s) fill(value, 100);
|
||||
sql create stream streams1a trigger at_once into streamta as select _wstart ts, count(*) c1 from t1 where ts > 1648791210000 and ts < 1648791413000 interval(10s) fill(value_f, 100);
|
||||
sql insert into t1 values(1648791213000,1,2,3,1.0,'aaa');
|
||||
sleep 100
|
||||
sql insert into t1 values(1648791233000,1,2,3,1.0,'aaa');
|
||||
|
@ -77,6 +78,69 @@ if $data71 != 1 then
|
|||
goto loop0
|
||||
endi
|
||||
|
||||
|
||||
print "force fill vaule"
|
||||
|
||||
$loop_count = 0
|
||||
|
||||
loop0a:
|
||||
sleep 200
|
||||
sql select * from streamta order by ts;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 8 then
|
||||
print =====rows=$rows
|
||||
goto loop0a
|
||||
endi
|
||||
|
||||
if $data01 != 1 then
|
||||
print =====data01=$data01
|
||||
goto loop0a
|
||||
endi
|
||||
|
||||
if $data11 != 1 then
|
||||
print =====data11=$data11
|
||||
goto loop0a
|
||||
endi
|
||||
|
||||
if $data21 != 1 then
|
||||
print =====data21=$data21
|
||||
goto loop0a
|
||||
endi
|
||||
|
||||
if $data31 != 100 then
|
||||
print =====data31=$data31
|
||||
goto loop0a
|
||||
endi
|
||||
|
||||
if $data41 != 1 then
|
||||
print =====data41=$data41
|
||||
goto loop0a
|
||||
endi
|
||||
|
||||
if $data51 != 100 then
|
||||
print =====data01=$data01
|
||||
goto loop0a
|
||||
endi
|
||||
|
||||
if $data61 != 100 then
|
||||
print =====data61=$data61
|
||||
goto loop0a
|
||||
endi
|
||||
|
||||
if $data71 != 1 then
|
||||
print =====data71=$data71
|
||||
goto loop0a
|
||||
endi
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
sql drop stream if exists streams2;
|
||||
sql drop database if exists test2;
|
||||
sql create database test2 vgroups 1;
|
||||
|
@ -408,6 +472,7 @@ sql create table t1 using st tags(1,1,1);
|
|||
sql create table t2 using st tags(2,2,2);
|
||||
|
||||
sql create stream streams4 trigger at_once into streamt4 as select _wstart ts, count(*) c1, concat(tbname, 'aaa') as pname, timezone() from st where ts > 1648791000000 and ts < 1648793000000 partition by tbname interval(10s) fill(NULL);
|
||||
sql create stream streams4a trigger at_once into streamt4a as select _wstart ts, count(*) c1, concat(tbname, 'aaa') as pname, timezone() from st where ts > 1648791000000 and ts < 1648793000000 partition by tbname interval(10s) fill(NULL_F);
|
||||
sql insert into t1 values(1648791213000,1,2,3,1.0,'aaa');
|
||||
sql insert into t1 values(1648791233000,1,2,3,1.0,'aaa');
|
||||
sql insert into t1 values(1648791273000,1,2,3,1.0,'aaa');
|
||||
|
@ -512,32 +577,104 @@ if $data[12][3] == NULL then
|
|||
goto loop4
|
||||
endi
|
||||
|
||||
print "force fill null"
|
||||
|
||||
|
||||
$loop_count = 0
|
||||
|
||||
loop4a:
|
||||
sleep 200
|
||||
sql select * from streamt4a order by pname, ts;
|
||||
|
||||
print ===> $data[0][0] , $data[0][1] , $data[0][2] , $data[0][3]
|
||||
print ===> $data[1][0] , $data[1][1] , $data[1][2] , $data[1][3]
|
||||
print ===> $data[2][0] , $data[2][1] , $data[2][2] , $data[2][3]
|
||||
print ===> $data[3][0] , $data[3][1] , $data[3][2] , $data[3][3]
|
||||
print ===> $data[4][0] , $data[4][1] , $data[4][2] , $data[4][3]
|
||||
print ===> $data[5][0] , $data[5][1] , $data[5][2] , $data[5][3]
|
||||
print ===> $data[6][0] , $data[6][1] , $data[6][2] , $data[6][3]
|
||||
print ===> $data[7][0] , $data[7][1] , $data[7][2] , $data[7][3]
|
||||
print ===> $data[8][0] , $data[8][1] , $data[8][2] , $data[8][3]
|
||||
print ===> $data[9][0] , $data[9][1] , $data[9][2] , $data[9][3]
|
||||
print ===> $data[10][0] , $data[10][1] , $data[10][2] , $data[10][3]
|
||||
print ===> $data[11][0] , $data[11][1] , $data[11][2] , $data[11][3]
|
||||
print ===> $data[12][0] , $data[12][1] , $data[12][2] , $data[12][3]
|
||||
print ===> $data[13][0] , $data[13][1] , $data[13][2] , $data[13][3]
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 14 then
|
||||
print =====rows=$rows
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
if $data11 != NULL then
|
||||
print =====data11=$data11
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
if $data12 != t1aaa then
|
||||
print =====data12=$data12
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
if $data13 == NULL then
|
||||
print =====data13=$data13
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
if $data32 != t1aaa then
|
||||
print =====data32=$data32
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
if $data42 != t1aaa then
|
||||
print =====data42=$data42
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
if $data52 != t1aaa then
|
||||
print =====data52=$data52
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
if $data81 != NULL then
|
||||
print =====data81=$data81
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
if $data82 != t2aaa then
|
||||
print =====data82=$data82
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
if $data83 == NULL then
|
||||
print =====data83=$data83
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
if $data[10][2] != t2aaa then
|
||||
print =====data[10][2]=$data[10][2]
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
if $data[11][2] != t2aaa then
|
||||
print =====data[11][2]=$data[11][2]
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
if $data[12][2] != t2aaa then
|
||||
print =====data[12][2]=$data[12][2]
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if $data[12][3] == NULL then
|
||||
print =====data[12][3]=$data[12][3]
|
||||
goto loop4a
|
||||
endi
|
||||
|
||||
|
||||
|
||||
|
@ -584,4 +721,4 @@ print ============loop_all=$loop_all
|
|||
|
||||
system sh/stop_dnodes.sh
|
||||
|
||||
#goto looptest
|
||||
#goto looptest
|
||||
|
|
|
@ -0,0 +1,285 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 50
|
||||
sql connect
|
||||
|
||||
print step 1 start
|
||||
|
||||
sql drop stream if exists streams0;
|
||||
sql drop database if exists test;
|
||||
sql create database test vgroups 1;
|
||||
sql use test;
|
||||
sql create table t1(ts timestamp, a int, b int , c int);
|
||||
|
||||
print create stream streams0 trigger at_once ignore update 1 into streamt as select _wstart c1, count(*) c2, max(b) c3 from t1 interval(10s);
|
||||
|
||||
sql create stream streams0 trigger at_once ignore update 1 into streamt as select _wstart c1, count(*) c2, max(b) c3 from t1 interval(10s);
|
||||
|
||||
sql insert into t1 values(1648791213000,1,1,1);
|
||||
sql insert into t1 values(1648791213000,2,2,2);
|
||||
|
||||
$loop_count = 0
|
||||
|
||||
loop0:
|
||||
sleep 300
|
||||
sql select * from streamt order by 1,2,3;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 2 then
|
||||
print =====data01=$data01
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
if $data02 != 2 then
|
||||
print =====data02=$data02
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
|
||||
sql insert into t1 values(1648791213000,3,3,3);
|
||||
|
||||
$loop_count = 0
|
||||
|
||||
loop1:
|
||||
sleep 300
|
||||
sql select * from streamt order by 1,2,3;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 3 then
|
||||
print =====data01=$data01
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data02 != 3 then
|
||||
print =====data02=$data02
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
print step 1 end
|
||||
|
||||
print step 2 start
|
||||
|
||||
sql drop stream if exists streams1;
|
||||
sql drop database if exists test1;
|
||||
sql create database test1 vgroups 1;
|
||||
sql use test1;
|
||||
sql create table t1(ts timestamp, a int, b int , c int);
|
||||
|
||||
print create stream streams1 trigger at_once ignore update 1 into streamt1 as select _wstart c1, count(*) c2, max(b) c3 from t1 session(ts, 10s);
|
||||
|
||||
sql create stream streams1 trigger at_once ignore update 1 into streamt1 as select _wstart c1, count(*) c2, max(b) c3 from t1 session(ts, 10s);
|
||||
|
||||
sql insert into t1 values(1648791213000,1,1,1);
|
||||
sql insert into t1 values(1648791213000,2,2,2);
|
||||
|
||||
$loop_count = 0
|
||||
|
||||
loop2:
|
||||
sleep 300
|
||||
sql select * from streamt1 order by 1,2,3;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 2 then
|
||||
print =====data01=$data01
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
if $data02 != 2 then
|
||||
print =====data02=$data02
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
|
||||
sql insert into t1 values(1648791213000,3,3,3);
|
||||
|
||||
$loop_count = 0
|
||||
|
||||
loop3:
|
||||
|
||||
sleep 300
|
||||
sql select * from streamt1 order by 1,2,3;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 3 then
|
||||
print =====data01=$data01
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data02 != 3 then
|
||||
print =====data02=$data02
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
print step 2 end
|
||||
|
||||
print step 3 start
|
||||
|
||||
sql drop stream if exists streams2;
|
||||
sql drop database if exists test2;
|
||||
sql create database test2 vgroups 1;
|
||||
sql use test2;
|
||||
sql create table t1(ts timestamp, a int, b int , c int);
|
||||
|
||||
print create stream streams2 trigger at_once ignore update 1 into streamt2 as select _wstart c1, count(*) c2, max(b) c3 from t1 state_window(c);
|
||||
|
||||
sql create stream streams2 trigger at_once ignore update 1 into streamt2 as select _wstart c1, count(*) c2, max(b) c3 from t1 state_window(c);
|
||||
|
||||
sql insert into t1 values(1648791213000,1,1,1);
|
||||
sql insert into t1 values(1648791213000,2,2,1);
|
||||
|
||||
$loop_count = 0
|
||||
|
||||
loop2:
|
||||
sleep 300
|
||||
sql select * from streamt2 order by 1,2,3;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 2 then
|
||||
print =====data01=$data01
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
if $data02 != 2 then
|
||||
print =====data02=$data02
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
|
||||
sql insert into t1 values(1648791213000,3,3,1);
|
||||
|
||||
$loop_count = 0
|
||||
|
||||
loop3:
|
||||
|
||||
sleep 300
|
||||
sql select * from streamt2 order by 1,2,3;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 3 then
|
||||
print =====data01=$data01
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data02 != 3 then
|
||||
print =====data02=$data02
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
print step 3 end
|
||||
|
||||
print step 4 start
|
||||
|
||||
sql drop stream if exists streams3;
|
||||
sql drop database if exists test3;
|
||||
sql create database test3 vgroups 4;
|
||||
sql use test3;
|
||||
sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int);
|
||||
sql create table t1 using st tags(1,1,1);
|
||||
sql create table t2 using st tags(2,2,2);
|
||||
|
||||
print create stream streams3 trigger at_once ignore update 1 into streamt3 as select _wstart c1, count(*) c2, max(b) c3 from st interval(10s);
|
||||
|
||||
sql create stream streams3 trigger at_once ignore update 1 into streamt3 as select _wstart c1, count(*) c2, max(b) c3 from st interval(10s);
|
||||
|
||||
sql insert into t1 values(1648791213000,1,1,1);
|
||||
sql insert into t1 values(1648791213000,2,2,2);
|
||||
|
||||
sql insert into t2 values(1648791213000,1,1,1);
|
||||
sql insert into t2 values(1648791213000,2,2,2);
|
||||
|
||||
$loop_count = 0
|
||||
|
||||
loop0:
|
||||
sleep 300
|
||||
sql select * from streamt3 order by 1,2,3;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 4 then
|
||||
print =====data01=$data01
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
if $data02 != 2 then
|
||||
print =====data02=$data02
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
|
||||
sql insert into t1 values(1648791213000,3,3,3);
|
||||
|
||||
$loop_count = 0
|
||||
|
||||
loop1:
|
||||
sleep 300
|
||||
sql select * from streamt3 order by 1,2,3;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 5 then
|
||||
print =====data01=$data01
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data02 != 3 then
|
||||
print =====data02=$data02
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
sql insert into t2 values(1648791213000,4,4,4);
|
||||
|
||||
$loop_count = 0
|
||||
|
||||
loop1:
|
||||
sleep 300
|
||||
sql select * from streamt3 order by 1,2,3;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 6 then
|
||||
print =====data01=$data01
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data02 != 4 then
|
||||
print =====data02=$data02
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
print step 4 end
|
||||
|
||||
system sh/stop_dnodes.sh
|
|
@ -52,6 +52,7 @@ sql insert into t1 values(1648791213000,1,2,3,1.0);
|
|||
sql insert into t1 values(1648791223001,1,2,3,1.1);
|
||||
sql insert into t1 values(1648791233002,2,2,3,2.1);
|
||||
sql insert into t1 values(1648791243003,2,2,3,3.1);
|
||||
sleep 300
|
||||
sql insert into t1 values(1648791200000,4,2,3,4.1);
|
||||
|
||||
$loop_count = 0
|
||||
|
@ -115,6 +116,7 @@ sql create stream stream_t1 trigger at_once IGNORE EXPIRED 1 into streamtST1 as
|
|||
sql create stream stream_t2 trigger at_once IGNORE EXPIRED 1 into streamtST2 as select _wstart, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6 from st session(ts, 10s) ;
|
||||
sql insert into ts1 values(1648791211000,1,2,3);
|
||||
sql insert into ts1 values(1648791222001,2,2,3);
|
||||
sleep 300
|
||||
sql insert into ts2 values(1648791211000,1,2,3);
|
||||
sql insert into ts2 values(1648791222001,2,2,3);
|
||||
|
||||
|
|
|
@ -16,6 +16,9 @@ print $data00 $data01 $data02
|
|||
sql use test;
|
||||
|
||||
sql create table t1(ts timestamp, a int, b int , c int, d double, id int);
|
||||
|
||||
print create stream streams1 trigger at_once into streamt1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(a) c4, min(c) c5, max(id) c from t1 state_window(a);
|
||||
|
||||
sql create stream streams1 trigger at_once into streamt1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(a) c4, min(c) c5, max(id) c from t1 state_window(a);
|
||||
|
||||
sql insert into t1 values(1648791213000,1,2,3,1.0,1);
|
||||
|
@ -79,86 +82,88 @@ endi
|
|||
|
||||
if $data03 != 1 then
|
||||
print ======$data03
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data04 != 1 then
|
||||
print ======$data04
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data05 != 3 then
|
||||
print ======$data05
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data06 != 5 then
|
||||
print ======$data06
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
# row 1
|
||||
if $data11 != 1 then
|
||||
print ======$data11
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data12 != 1 then
|
||||
print ======$data12
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data13 != 2 then
|
||||
print ======$data13
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data14 != 2 then
|
||||
print ======$data14
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data15 != 3 then
|
||||
print ======$data15
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data16 != 4 then
|
||||
print ======$data16
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
# row 2
|
||||
if $data21 != 1 then
|
||||
print ======$data21
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data22 != 1 then
|
||||
print ======$data22
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data23 != 1 then
|
||||
print ======$data23
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data24 != 1 then
|
||||
print ======$data24
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data25 != 3 then
|
||||
print ======$data25
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data26 != 6 then
|
||||
print ======$data26
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
print loop1 end
|
||||
|
||||
sql insert into t1 values(1648791213011,1,2,3,1.0,7);
|
||||
|
||||
$loop_count = 0
|
||||
|
@ -174,33 +179,31 @@ endi
|
|||
if $data21 != 2 then
|
||||
print =====data21=$data21
|
||||
goto loop2
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data22 != 2 then
|
||||
print =====data22=$data22
|
||||
goto loop2
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data23 != 2 then
|
||||
print ======$data23
|
||||
return -1
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
if $data24 != 1 then
|
||||
print ======$data24
|
||||
return -1
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
if $data25 != 3 then
|
||||
print ======$data25
|
||||
return -1
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
if $data26 != 7 then
|
||||
print ======$data26
|
||||
return -1
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
sql insert into t1 values(1648791213011,1,2,3,1.0,8);
|
||||
|
@ -238,99 +241,93 @@ endi
|
|||
if $data21 != 1 then
|
||||
print =====data21=$data21
|
||||
goto loop3
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data22 != 1 then
|
||||
print =====data22=$data22
|
||||
goto loop3
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data23 != 10 then
|
||||
print ======$data23
|
||||
return -1
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data24 != 10 then
|
||||
print ======$data24
|
||||
return -1
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data25 != 10 then
|
||||
print ======$data25
|
||||
return -1
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data26 != 12 then
|
||||
print ======$data26
|
||||
return -1
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
# row 3
|
||||
if $data31 != 1 then
|
||||
print =====data31=$data31
|
||||
goto loop3
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data32 != 1 then
|
||||
print =====data32=$data32
|
||||
goto loop3
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data33 != 3 then
|
||||
print ======$data33
|
||||
return -1
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data34 != 3 then
|
||||
print ======$data34
|
||||
return -1
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data35 != 3 then
|
||||
print ======$data35
|
||||
return -1
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data36 != 10 then
|
||||
print ======$data36
|
||||
return -1
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
# row 4
|
||||
if $data41 != 1 then
|
||||
print =====data41=$data41
|
||||
goto loop3
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data42 != 1 then
|
||||
print =====data42=$data42
|
||||
goto loop3
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data43 != 1 then
|
||||
print ======$data43
|
||||
return -1
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data44 != 1 then
|
||||
print ======$data44
|
||||
return -1
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data45 != 3 then
|
||||
print ======$data45
|
||||
return -1
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data46 != 11 then
|
||||
print ======$data46
|
||||
return -1
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
sql insert into t1 values(1648791213030,3,12,12,12.0,13);
|
||||
|
@ -360,95 +357,93 @@ endi
|
|||
|
||||
if $data02 != 2 then
|
||||
print ======$data02
|
||||
return -1
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data03 != 6 then
|
||||
print ======$data03
|
||||
return -1
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data04 != 3 then
|
||||
print ======$data04
|
||||
return -1
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data05 != 3 then
|
||||
print ======$data05
|
||||
return -1
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data06 != 15 then
|
||||
print ======$data06
|
||||
return -1
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
# row 1
|
||||
if $data11 != 1 then
|
||||
print =====data11=$data11
|
||||
goto loop4
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data12 != 1 then
|
||||
print =====data12=$data12
|
||||
goto loop4
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data13 != 15 then
|
||||
print ======$data13
|
||||
return -1
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data14 != 15 then
|
||||
print ======$data14
|
||||
return -1
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data15 != 15 then
|
||||
print ======$data15
|
||||
return -1
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data16 != 16 then
|
||||
print ======$data16
|
||||
return -1
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
# row 2
|
||||
if $data21 != 1 then
|
||||
print =====data21=$data21
|
||||
goto loop4
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data22 != 1 then
|
||||
print =====data22=$data22
|
||||
goto loop4
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data23 != 1 then
|
||||
print ======$data23
|
||||
return -1
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data24 != 1 then
|
||||
print ======$data24
|
||||
return -1
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data25 != 13 then
|
||||
print ======$data25
|
||||
return -1
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data26 != 14 then
|
||||
print ======$data26
|
||||
return -1
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
print loop4 end
|
||||
|
||||
sql create database test1 vgroups 1;
|
||||
sql select * from information_schema.ins_databases;
|
||||
|
||||
|
@ -457,6 +452,9 @@ print $data00 $data01 $data02
|
|||
sql use test1;
|
||||
|
||||
sql create table t1(ts timestamp, a int, b int , c int, d double, id int);
|
||||
|
||||
print create stream streams2 trigger at_once into streamt1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(a) c4, min(c) c5, max(id) c from t1 state_window(a);
|
||||
|
||||
sql create stream streams2 trigger at_once into streamt1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(a) c4, min(c) c5, max(id) c from t1 state_window(a);
|
||||
|
||||
sql insert into t1 values(1648791212000,2,2,3,1.0,1);
|
||||
|
@ -502,6 +500,9 @@ sql create database test3 vgroups 1;
|
|||
sql use test3;
|
||||
|
||||
sql create table t1(ts timestamp, a int, b int , c int, d double, id int);
|
||||
|
||||
print create stream streams3 trigger at_once into streamt3 as select _wstart, count(*) c1, sum(b) c3 from t1 state_window(a);
|
||||
|
||||
sql create stream streams3 trigger at_once into streamt3 as select _wstart, count(*) c1, sum(b) c3 from t1 state_window(a);
|
||||
sql insert into t1 values(1648791212000,1,2,3,1.0,1);
|
||||
sql insert into t1 values(1648791213000,2,2,3,1.0,1);
|
||||
|
@ -543,7 +544,6 @@ if $rows != 10 then
|
|||
goto loop6
|
||||
endi
|
||||
|
||||
|
||||
sql drop stream if exists streams4;
|
||||
sql drop database if exists test4;
|
||||
sql drop stable if exists streamt4;
|
||||
|
@ -552,7 +552,10 @@ sql use test4;
|
|||
sql create table st (ts timestamp, c1 tinyint, c2 smallint) tags (t1 tinyint) ;
|
||||
sql create table t1 using st tags (-81) ;
|
||||
sql create table t2 using st tags (-81) ;
|
||||
sql create stream if not exists streams4 trigger window_close into streamt4 as select _wstart AS start, min(c1),count(c1) from t1 state_window(c1);
|
||||
|
||||
print create stream if not exists streams4 trigger window_close into streamt4 as select _wstart AS startts, min(c1),count(c1) from t1 state_window(c1);
|
||||
|
||||
sql create stream if not exists streams4 trigger window_close into streamt4 as select _wstart AS startts, min(c1),count(c1) from t1 state_window(c1);
|
||||
|
||||
sql insert into t1 (ts, c1) values (1668073288209, 11);
|
||||
sql insert into t1 (ts, c1) values (1668073288210, 11);
|
||||
|
@ -567,7 +570,7 @@ loop7:
|
|||
|
||||
sleep 200
|
||||
|
||||
sql select * from streamt4 order by start;
|
||||
sql select * from streamt4 order by startts;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 20 then
|
||||
|
@ -606,7 +609,7 @@ loop8:
|
|||
|
||||
sleep 200
|
||||
|
||||
sql select * from streamt4 order by start;
|
||||
sql select * from streamt4 order by startts;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 20 then
|
||||
|
@ -636,11 +639,11 @@ sql insert into t1 (ts, c1) values (1668073288225, 65);
|
|||
sql insert into t1 (ts, c1) values (1668073288226, 65);
|
||||
|
||||
$loop_count = 0
|
||||
loop8:
|
||||
loop81:
|
||||
|
||||
sleep 200
|
||||
|
||||
sql select * from streamt4 order by start;
|
||||
sql select * from streamt4 order by startts;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 20 then
|
||||
|
@ -649,27 +652,27 @@ endi
|
|||
|
||||
if $rows != 2 then
|
||||
print =====rows=$rows
|
||||
goto loop8
|
||||
goto loop81
|
||||
endi
|
||||
|
||||
if $data01 != 11 then
|
||||
print =====data01=$data01
|
||||
goto loop8
|
||||
goto loop81
|
||||
endi
|
||||
|
||||
if $data02 != 5 then
|
||||
print =====data02=$data02
|
||||
goto loop8
|
||||
goto loop81
|
||||
endi
|
||||
|
||||
if $data11 != 29 then
|
||||
print =====data11=$data11
|
||||
goto loop8
|
||||
goto loop81
|
||||
endi
|
||||
|
||||
if $data12 != 1 then
|
||||
print =====data12=$data12
|
||||
goto loop8
|
||||
goto loop81
|
||||
endi
|
||||
|
||||
sql insert into t1 (ts, c1) values (1668073288224, 64);
|
||||
|
@ -679,7 +682,7 @@ loop9:
|
|||
|
||||
sleep 200
|
||||
|
||||
sql select * from streamt4 order by start;
|
||||
sql select * from streamt4 order by startts;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 20 then
|
||||
|
@ -738,6 +741,9 @@ sql use test5;
|
|||
sql create table tb (ts timestamp, a int);
|
||||
sql insert into tb values (now + 1m , 1 );
|
||||
sql create table b (c timestamp, d int, e int , f int, g double);
|
||||
|
||||
print create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3 from tb state_window(a);
|
||||
|
||||
sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3 from tb state_window(a);
|
||||
sql insert into b values(1648791213000,NULL,NULL,NULL,NULL);
|
||||
sql select * from streamt order by c1, c2, c3;
|
||||
|
@ -792,4 +798,6 @@ if $data00 != 2 then
|
|||
goto loop10
|
||||
endi
|
||||
|
||||
print state0 end
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
from util import constant
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.common import *
|
||||
from util.sqlset import *
|
||||
from util.cluster import *
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
self.dnode_num=len(cluster.dnodes)
|
||||
self.dbname = 'db_test'
|
||||
self.setsql = TDSetSql()
|
||||
self.stbname = f'{self.dbname}.stb'
|
||||
self.rowNum = 5
|
||||
self.tbnum = 10
|
||||
self.ts = 1537146000000
|
||||
self.binary_str = 'taosdata'
|
||||
self.nchar_str = '涛思数据'
|
||||
self.column_dict = {
|
||||
'ts' : 'timestamp',
|
||||
'col1': 'tinyint',
|
||||
'col2': 'smallint',
|
||||
'col3': 'int',
|
||||
'col4': 'bigint',
|
||||
'col5': 'tinyint unsigned',
|
||||
'col6': 'smallint unsigned',
|
||||
'col7': 'int unsigned',
|
||||
'col8': 'bigint unsigned',
|
||||
'col9': 'float',
|
||||
'col10': 'double',
|
||||
'col11': 'bool',
|
||||
'col12': 'binary(20)',
|
||||
'col13': 'nchar(20)'
|
||||
}
|
||||
self.replica = [1,3]
|
||||
|
||||
def insert_data(self,column_dict,tbname,row_num):
|
||||
insert_sql = self.setsql.set_insertsql(column_dict,tbname,self.binary_str,self.nchar_str)
|
||||
for i in range(row_num):
|
||||
insert_list = []
|
||||
self.setsql.insert_values(column_dict,i,insert_sql,insert_list,self.ts)
|
||||
def prepare_data(self,dbname,stbname,column_dict,tbnum,rowNum,replica):
|
||||
tag_dict = {
|
||||
't0':'int'
|
||||
}
|
||||
tag_values = [
|
||||
f'1'
|
||||
]
|
||||
tdSql.execute(f"create database if not exists {dbname} vgroups 1 replica {replica} ")
|
||||
tdSql.execute(f'use {dbname}')
|
||||
tdSql.execute(self.setsql.set_create_stable_sql(stbname,column_dict,tag_dict))
|
||||
for i in range(tbnum):
|
||||
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags({tag_values[0]})")
|
||||
self.insert_data(self.column_dict,f'{stbname}_{i}',rowNum)
|
||||
def redistribute_vgroups(self,replica,stbname,tbnum,rownum):
|
||||
tdSql.query('show vgroups')
|
||||
vnode_id = tdSql.queryResult[0][0]
|
||||
if replica == 1:
|
||||
for dnode_id in range(1,self.dnode_num+1) :
|
||||
tdSql.execute(f'redistribute vgroup {vnode_id} dnode {dnode_id}')
|
||||
tdSql.query(f'select count(*) from {stbname}')
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],tbnum*rownum)
|
||||
elif replica == 3:
|
||||
for dnode_id in range(1,self.dnode_num-1):
|
||||
tdSql.execute(f'redistribute vgroup {vnode_id} dnode {dnode_id} dnode {dnode_id+1} dnode {dnode_id+2}')
|
||||
tdSql.query(f'select count(*) from {stbname}')
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],tbnum*rownum)
|
||||
|
||||
def run(self):
|
||||
for replica in self.replica:
|
||||
self.prepare_data(self.dbname,self.stbname,self.column_dict,self.tbnum,self.rowNum,replica)
|
||||
self.redistribute_vgroups(replica,self.stbname,self.tbnum,self.rowNum)
|
||||
tdSql.execute(f'drop database {self.dbname}')
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -31,6 +31,10 @@ class TDTestCase:
|
|||
tdSql.execute('create database if not exists test;')
|
||||
tdSql.execute('create table test.stb (ts timestamp, c11 int, c12 float ) TAGS(t11 int, t12 int );')
|
||||
tdSql.execute('create table test.tb using test.stb TAGS (1, 1);')
|
||||
|
||||
# double comma insert check error
|
||||
tdSql.error("insert into test.tb(ts, c11) values(now,,100)")
|
||||
|
||||
sql_list = list()
|
||||
for i in range(5):
|
||||
sql = f'insert into test.tb values (now-{i}m, {i}, {i});'
|
||||
|
|
|
@ -111,6 +111,17 @@ class TDTestCase:
|
|||
sql2 = "select (case when sum(q_smallint)=0 then null else sum(q_smallint) end) from %s.stable_1_1 limit 100;" %database
|
||||
self.constant_check(database,sql1,sql2,0)
|
||||
|
||||
#TD-20257
|
||||
sql1 = "select tbname,first(ts),q_int,q_smallint,q_bigint,case when q_int <0 then 1 else 0 end from %s.stable_1 where tbname = 'stable_1_1' and ts < now partition by tbname state_window(case when q_int <0 then 1 else 0 end);" %database
|
||||
sql2 = "select tbname,first(ts),q_int,q_smallint,q_bigint,case when q_int <0 then 1 else 0 end from %s.stable_1_1 where ts < now partition by tbname state_window(case when q_int <0 then 1 else 0 end);" %database
|
||||
self.constant_check(database,sql1,sql2,0)
|
||||
self.constant_check(database,sql1,sql2,1)
|
||||
self.constant_check(database,sql1,sql2,2)
|
||||
self.constant_check(database,sql1,sql2,3)
|
||||
self.constant_check(database,sql1,sql2,4)
|
||||
self.constant_check(database,sql1,sql2,5)
|
||||
|
||||
#TD-20260
|
||||
sql1 = "select _wstart,avg(q_int),min(q_smallint) from %s.stable_1 where tbname = 'stable_1_1' and ts < now state_window(case when q_smallint <0 then 1 else 0 end);" %database
|
||||
sql2 = "select _wstart,avg(q_int),min(q_smallint) from %s.stable_1_1 where ts < now state_window(case when q_smallint <0 then 1 else 0 end);" %database
|
||||
self.constant_check(database,sql1,sql2,0)
|
||||
|
|
|
@ -0,0 +1,159 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import random
|
||||
import os
|
||||
import time
|
||||
import taos
|
||||
import subprocess
|
||||
from faker import Faker
|
||||
from util.log import tdLog
|
||||
from util.cases import tdCases
|
||||
from util.sql import tdSql
|
||||
from util.dnodes import tdDnodes
|
||||
from util.dnodes import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 131 ,"querySmaOptimize":1}
|
||||
|
||||
def init(self, conn, logSql, replicaVar):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
self.testcasePath = os.path.split(__file__)[0]
|
||||
self.testcaseFilename = os.path.split(__file__)[-1]
|
||||
os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
|
||||
|
||||
self.db = "max_min"
|
||||
|
||||
def dropandcreateDB_random(self,database,n):
|
||||
ts = 1630000000000
|
||||
num_random = 5
|
||||
fake = Faker('zh_CN')
|
||||
tdSql.execute('''drop database if exists %s ;''' %database)
|
||||
tdSql.execute('''create database %s keep 36500 ;'''%(database))
|
||||
tdSql.execute('''use %s;'''%database)
|
||||
|
||||
tdSql.execute('''create stable %s.stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
|
||||
q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
|
||||
tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);'''%database)
|
||||
|
||||
for i in range(num_random):
|
||||
tdSql.execute('''create table %s.stable_1_%d using %s.stable_1 tags('stable_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
|
||||
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
|
||||
|
||||
# insert data
|
||||
for i in range(num_random):
|
||||
for j in range(n):
|
||||
tdSql.execute('''insert into %s.stable_1_%d (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts)\
|
||||
values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d) ;'''
|
||||
% (database,i,ts + i*1000 + j, fake.random_int(min=-2147483647, max=2147483647, step=1),
|
||||
fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i))
|
||||
|
||||
tdSql.query("select count(*) from %s.stable_1;" %database)
|
||||
tdSql.checkData(0,0,num_random*n)
|
||||
tdSql.query("select count(*) from %s.stable_1_1;"%database)
|
||||
tdSql.checkData(0,0,n)
|
||||
|
||||
|
||||
def TD_22219_max(self,database):
|
||||
|
||||
sql3 = "select count(*) from (select max(q_int) from %s.stable_1 group by tbname); ;" %database
|
||||
tdSql.query(sql3)
|
||||
sql_value = tdSql.getData(0,0)
|
||||
self.value_check(sql_value,5)
|
||||
|
||||
sql1 = "select max(q_int) from %s.stable_1_1 ;" %database
|
||||
sql2 = "select max(q_int) from %s.stable_1 where tbname = 'stable_1_1' ;" %database
|
||||
self.constant_check(database,sql1,sql2,0)
|
||||
|
||||
sql3 = "select count(*) from (select max(q_int) from %s.stable_1 group by tbname); ;" %database
|
||||
tdSql.query(sql3)
|
||||
sql_value = tdSql.getData(0,0)
|
||||
self.value_check(sql_value,5)
|
||||
|
||||
def TD_22219_min(self,database):
|
||||
|
||||
sql3 = "select count(*) from (select min(q_int) from %s.stable_1 group by tbname); ;" %database
|
||||
tdSql.query(sql3)
|
||||
sql_value = tdSql.getData(0,0)
|
||||
self.value_check(sql_value,5)
|
||||
|
||||
sql1 = "select min(q_int) from %s.stable_1_1 ;" %database
|
||||
sql2 = "select min(q_int) from %s.stable_1 where tbname = 'stable_1_1' ;" %database
|
||||
self.constant_check(database,sql1,sql2,0)
|
||||
|
||||
sql3 = "select count(*) from (select min(q_int) from %s.stable_1 group by tbname); ;" %database
|
||||
tdSql.query(sql3)
|
||||
sql_value = tdSql.getData(0,0)
|
||||
self.value_check(sql_value,5)
|
||||
|
||||
def constant_check(self,database,sql1,sql2,column):
|
||||
#column =0 代表0列, column = n代表n-1列
|
||||
tdLog.info("\n=============sql1:(%s)___sql2:(%s) ====================\n" %(sql1,sql2))
|
||||
|
||||
tdSql.query(sql1)
|
||||
sql1_value = tdSql.getData(0,column)
|
||||
tdSql.query(sql2)
|
||||
sql2_value = tdSql.getData(0,column)
|
||||
self.value_check(sql1_value,sql2_value)
|
||||
|
||||
tdSql.execute(" flush database %s;" %database)
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
tdSql.query(sql1)
|
||||
sql1_flush_value = tdSql.getData(0,column)
|
||||
tdSql.query(sql2)
|
||||
sql2_flush_value = tdSql.getData(0,column)
|
||||
self.value_check(sql1_flush_value,sql2_flush_value)
|
||||
|
||||
self.value_check(sql1_value,sql1_flush_value)
|
||||
self.value_check(sql2_value,sql2_flush_value)
|
||||
|
||||
def value_check(self,base_value,check_value):
|
||||
if base_value==check_value:
|
||||
tdLog.info(f"checkEqual success, base_value={base_value},check_value={check_value}")
|
||||
else :
|
||||
tdLog.exit(f"checkEqual error, base_value=={base_value},check_value={check_value}")
|
||||
|
||||
def run(self):
|
||||
|
||||
startTime = time.time()
|
||||
|
||||
os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
|
||||
|
||||
self.dropandcreateDB_random("%s" %self.db, 2000)
|
||||
|
||||
self.TD_22219_max("%s" %self.db)
|
||||
|
||||
self.dropandcreateDB_random("%s" %self.db, 2000)
|
||||
|
||||
self.TD_22219_min("%s" %self.db)
|
||||
|
||||
endTime = time.time()
|
||||
print("total time %ds" % (endTime - startTime))
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -194,6 +194,11 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 2, None)
|
||||
tdSql.checkData(1, 1, 1)
|
||||
tdSql.checkData(1, 2, '{"k1":1,"k2":"hello"}')
|
||||
|
||||
tdSql.query("select * from information_schema.ins_tables where table_name = 'stt4'")
|
||||
uid1 = tdSql.getData(0, 5)
|
||||
uid2 = tdSql.getData(1, 5)
|
||||
tdSql.checkNotEqual(uid1, uid2)
|
||||
return
|
||||
|
||||
def checkWal1Vgroup(self):
|
||||
|
|
|
@ -218,8 +218,13 @@ void shellRunSingleCommandWebsocketImp(char *command) {
|
|||
res = ws_query_timeout(shell.ws_conn, command, shell.args.timeout);
|
||||
int code = ws_errno(res);
|
||||
if (code != 0 && !shell.stop_query) {
|
||||
et = taosGetTimestampUs();
|
||||
fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6);
|
||||
// websocket interface masked off first bit from standard error number.
|
||||
if (TSDB_CODE_PAR_SYNTAX_ERROR == (code|0x80000000)) {
|
||||
et = taosGetTimestampUs();
|
||||
fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6);
|
||||
ws_free_result(res);
|
||||
return;
|
||||
}
|
||||
if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) {
|
||||
fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n");
|
||||
} else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) {
|
||||
|
|
Loading…
Reference in New Issue