Merge branch '3.0' of https://github.com/taosdata/TDengine into enh/tsdb_optimize

This commit is contained in:
Hongze Cheng 2023-05-19 10:58:12 +08:00
commit 66ea10755e
73 changed files with 7702 additions and 6135 deletions

View File

@ -424,11 +424,14 @@ The following configuration items apply to TDengine Sink Connector and TDengine
### TDengine Source Connector specific configuration
1. `connection.database`: source database name, no default value.
2. `topic.prefix`: topic name prefix after data is imported into kafka. Use `topic.prefix` + `connection.database` name as the full topic name. Defaults to the empty string "".
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. Default "1970-01-01 00:00:00".
4. `poll.interval.ms`: Pull data interval, the unit is ms. Default is 1000.
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database. Default is 100.
6. `out.format`: The data format. The value could be line or json. The line represents the InfluxDB Line protocol format, and json represents the OpenTSDB JSON format. Default is `line`.
2. `topic.prefix`: topic name prefix used when importing data into kafka. Its defaults value is empty string "".
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. If it is not set, the data importing to Kafka will be started from the first/oldest row in the database.
4. `poll.interval.ms`: The time interval for checking newly created tables or removed tables, default value is 1000.
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database, default is 100.
6. `out.format`: The data format. The value could be `line`, which represents the InfluxDB Line protocol format.
7. 7. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000.
8. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>-<stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>`.
## Other notes

View File

@ -92,7 +92,7 @@ TDengine 的主要功能如下:
## 典型适用场景
作为一个高性能、分布式、支持 SQL 的时序数据库DatabaseTDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具因其充分利用了时序大数据的特点它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
作为一个高性能、分布式、支持 SQL 的时序数据库(Time-series DatabaseTDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具因其充分利用了时序大数据的特点它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
### 数据源特点和需求

View File

@ -434,11 +434,13 @@ confluent local services connect connector unload TDengineSourceConnector
### TDengine Source Connector 特有的配置
1. `connection.database`: 源数据库名称,无缺省值。
2. `topic.prefix` 数据导入 kafka 后 topic 名称前缀。 使用 `topic.prefix` + `connection.database` 名称作为完整 topic 名。默认为空字符串 ""。
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认为 "1970-01-01 00:00:00"
4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认为 1000。
2. `topic.prefix` 数据导入 kafka 时使用的 topic 名称的前缀。默认为空字符串 ""。
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss',若未指定则从指定 DB 中最早的一条记录开始
4. `poll.interval.ms`: 检查是否有新建或删除的表的时间间隔,单位为 ms。默认为 1000。
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认为 line。
6. `out.format`: 数据格式。取值为 `line` 表示 InfluxDB Line 协议格式
7. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 1000.
8. `topic.per.stable`: 如果设置为true表示一个超级表对应一个 Kafka topictopic的命名规则 `<topic.prefix>-<connection.database>-<stable.name>`;如果设置为 false则指定的 DB 中的所有数据进入一个 Kafka topictopic 的命名规则为 `<topic.prefix>-<connection.database>`
## 其他说明

View File

@ -82,7 +82,7 @@ typedef struct STuplePos {
int32_t pageId;
int32_t offset;
};
STupleKey streamTupleKey;
SWinKey streamTupleKey;
};
} STuplePos;
@ -208,11 +208,6 @@ typedef struct SSDataBlock {
SDataBlockInfo info;
} SSDataBlock;
enum {
FETCH_TYPE__DATA = 0,
FETCH_TYPE__NONE,
};
typedef struct SVarColAttr {
int32_t* offset; // start position for each entry in the list
uint32_t length; // used buffer size that contain the valid data

View File

@ -1642,6 +1642,7 @@ typedef struct {
char fqdn[TSDB_FQDN_LEN];
int32_t port;
int8_t force;
int8_t unsafe;
} SDropDnodeReq;
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);

View File

@ -70,286 +70,288 @@
#define TK_RESTORE 52
#define TK_NK_IPTOKEN 53
#define TK_FORCE 54
#define TK_LOCAL 55
#define TK_QNODE 56
#define TK_BNODE 57
#define TK_SNODE 58
#define TK_MNODE 59
#define TK_VNODE 60
#define TK_DATABASE 61
#define TK_USE 62
#define TK_FLUSH 63
#define TK_TRIM 64
#define TK_COMPACT 65
#define TK_IF 66
#define TK_NOT 67
#define TK_EXISTS 68
#define TK_BUFFER 69
#define TK_CACHEMODEL 70
#define TK_CACHESIZE 71
#define TK_COMP 72
#define TK_DURATION 73
#define TK_NK_VARIABLE 74
#define TK_MAXROWS 75
#define TK_MINROWS 76
#define TK_KEEP 77
#define TK_PAGES 78
#define TK_PAGESIZE 79
#define TK_TSDB_PAGESIZE 80
#define TK_PRECISION 81
#define TK_REPLICA 82
#define TK_VGROUPS 83
#define TK_SINGLE_STABLE 84
#define TK_RETENTIONS 85
#define TK_SCHEMALESS 86
#define TK_WAL_LEVEL 87
#define TK_WAL_FSYNC_PERIOD 88
#define TK_WAL_RETENTION_PERIOD 89
#define TK_WAL_RETENTION_SIZE 90
#define TK_WAL_ROLL_PERIOD 91
#define TK_WAL_SEGMENT_SIZE 92
#define TK_STT_TRIGGER 93
#define TK_TABLE_PREFIX 94
#define TK_TABLE_SUFFIX 95
#define TK_NK_COLON 96
#define TK_MAX_SPEED 97
#define TK_START 98
#define TK_TIMESTAMP 99
#define TK_END 100
#define TK_TABLE 101
#define TK_NK_LP 102
#define TK_NK_RP 103
#define TK_STABLE 104
#define TK_ADD 105
#define TK_COLUMN 106
#define TK_MODIFY 107
#define TK_RENAME 108
#define TK_TAG 109
#define TK_SET 110
#define TK_NK_EQ 111
#define TK_USING 112
#define TK_TAGS 113
#define TK_BOOL 114
#define TK_TINYINT 115
#define TK_SMALLINT 116
#define TK_INT 117
#define TK_INTEGER 118
#define TK_BIGINT 119
#define TK_FLOAT 120
#define TK_DOUBLE 121
#define TK_BINARY 122
#define TK_NCHAR 123
#define TK_UNSIGNED 124
#define TK_JSON 125
#define TK_VARCHAR 126
#define TK_MEDIUMBLOB 127
#define TK_BLOB 128
#define TK_VARBINARY 129
#define TK_DECIMAL 130
#define TK_COMMENT 131
#define TK_MAX_DELAY 132
#define TK_WATERMARK 133
#define TK_ROLLUP 134
#define TK_TTL 135
#define TK_SMA 136
#define TK_DELETE_MARK 137
#define TK_FIRST 138
#define TK_LAST 139
#define TK_SHOW 140
#define TK_PRIVILEGES 141
#define TK_DATABASES 142
#define TK_TABLES 143
#define TK_STABLES 144
#define TK_MNODES 145
#define TK_QNODES 146
#define TK_FUNCTIONS 147
#define TK_INDEXES 148
#define TK_ACCOUNTS 149
#define TK_APPS 150
#define TK_CONNECTIONS 151
#define TK_LICENCES 152
#define TK_GRANTS 153
#define TK_QUERIES 154
#define TK_SCORES 155
#define TK_TOPICS 156
#define TK_VARIABLES 157
#define TK_CLUSTER 158
#define TK_BNODES 159
#define TK_SNODES 160
#define TK_TRANSACTIONS 161
#define TK_DISTRIBUTED 162
#define TK_CONSUMERS 163
#define TK_SUBSCRIPTIONS 164
#define TK_VNODES 165
#define TK_ALIVE 166
#define TK_LIKE 167
#define TK_TBNAME 168
#define TK_QTAGS 169
#define TK_AS 170
#define TK_INDEX 171
#define TK_FUNCTION 172
#define TK_INTERVAL 173
#define TK_COUNT 174
#define TK_LAST_ROW 175
#define TK_TOPIC 176
#define TK_META 177
#define TK_CONSUMER 178
#define TK_GROUP 179
#define TK_DESC 180
#define TK_DESCRIBE 181
#define TK_RESET 182
#define TK_QUERY 183
#define TK_CACHE 184
#define TK_EXPLAIN 185
#define TK_ANALYZE 186
#define TK_VERBOSE 187
#define TK_NK_BOOL 188
#define TK_RATIO 189
#define TK_NK_FLOAT 190
#define TK_OUTPUTTYPE 191
#define TK_AGGREGATE 192
#define TK_BUFSIZE 193
#define TK_LANGUAGE 194
#define TK_REPLACE 195
#define TK_STREAM 196
#define TK_INTO 197
#define TK_PAUSE 198
#define TK_RESUME 199
#define TK_TRIGGER 200
#define TK_AT_ONCE 201
#define TK_WINDOW_CLOSE 202
#define TK_IGNORE 203
#define TK_EXPIRED 204
#define TK_FILL_HISTORY 205
#define TK_UPDATE 206
#define TK_SUBTABLE 207
#define TK_UNTREATED 208
#define TK_KILL 209
#define TK_CONNECTION 210
#define TK_TRANSACTION 211
#define TK_BALANCE 212
#define TK_VGROUP 213
#define TK_LEADER 214
#define TK_MERGE 215
#define TK_REDISTRIBUTE 216
#define TK_SPLIT 217
#define TK_DELETE 218
#define TK_INSERT 219
#define TK_NULL 220
#define TK_NK_QUESTION 221
#define TK_NK_ARROW 222
#define TK_ROWTS 223
#define TK_QSTART 224
#define TK_QEND 225
#define TK_QDURATION 226
#define TK_WSTART 227
#define TK_WEND 228
#define TK_WDURATION 229
#define TK_IROWTS 230
#define TK_ISFILLED 231
#define TK_CAST 232
#define TK_NOW 233
#define TK_TODAY 234
#define TK_TIMEZONE 235
#define TK_CLIENT_VERSION 236
#define TK_SERVER_VERSION 237
#define TK_SERVER_STATUS 238
#define TK_CURRENT_USER 239
#define TK_CASE 240
#define TK_WHEN 241
#define TK_THEN 242
#define TK_ELSE 243
#define TK_BETWEEN 244
#define TK_IS 245
#define TK_NK_LT 246
#define TK_NK_GT 247
#define TK_NK_LE 248
#define TK_NK_GE 249
#define TK_NK_NE 250
#define TK_MATCH 251
#define TK_NMATCH 252
#define TK_CONTAINS 253
#define TK_IN 254
#define TK_JOIN 255
#define TK_INNER 256
#define TK_SELECT 257
#define TK_DISTINCT 258
#define TK_WHERE 259
#define TK_PARTITION 260
#define TK_BY 261
#define TK_SESSION 262
#define TK_STATE_WINDOW 263
#define TK_EVENT_WINDOW 264
#define TK_SLIDING 265
#define TK_FILL 266
#define TK_VALUE 267
#define TK_VALUE_F 268
#define TK_NONE 269
#define TK_PREV 270
#define TK_NULL_F 271
#define TK_LINEAR 272
#define TK_NEXT 273
#define TK_HAVING 274
#define TK_RANGE 275
#define TK_EVERY 276
#define TK_ORDER 277
#define TK_SLIMIT 278
#define TK_SOFFSET 279
#define TK_LIMIT 280
#define TK_OFFSET 281
#define TK_ASC 282
#define TK_NULLS 283
#define TK_ABORT 284
#define TK_AFTER 285
#define TK_ATTACH 286
#define TK_BEFORE 287
#define TK_BEGIN 288
#define TK_BITAND 289
#define TK_BITNOT 290
#define TK_BITOR 291
#define TK_BLOCKS 292
#define TK_CHANGE 293
#define TK_COMMA 294
#define TK_CONCAT 295
#define TK_CONFLICT 296
#define TK_COPY 297
#define TK_DEFERRED 298
#define TK_DELIMITERS 299
#define TK_DETACH 300
#define TK_DIVIDE 301
#define TK_DOT 302
#define TK_EACH 303
#define TK_FAIL 304
#define TK_FILE 305
#define TK_FOR 306
#define TK_GLOB 307
#define TK_ID 308
#define TK_IMMEDIATE 309
#define TK_IMPORT 310
#define TK_INITIALLY 311
#define TK_INSTEAD 312
#define TK_ISNULL 313
#define TK_KEY 314
#define TK_MODULES 315
#define TK_NK_BITNOT 316
#define TK_NK_SEMI 317
#define TK_NOTNULL 318
#define TK_OF 319
#define TK_PLUS 320
#define TK_PRIVILEGE 321
#define TK_RAISE 322
#define TK_RESTRICT 323
#define TK_ROW 324
#define TK_SEMI 325
#define TK_STAR 326
#define TK_STATEMENT 327
#define TK_STRICT 328
#define TK_STRING 329
#define TK_TIMES 330
#define TK_VALUES 331
#define TK_VARIABLE 332
#define TK_VIEW 333
#define TK_WAL 334
#define TK_UNSAFE 55
#define TK_LOCAL 56
#define TK_QNODE 57
#define TK_BNODE 58
#define TK_SNODE 59
#define TK_MNODE 60
#define TK_VNODE 61
#define TK_DATABASE 62
#define TK_USE 63
#define TK_FLUSH 64
#define TK_TRIM 65
#define TK_COMPACT 66
#define TK_IF 67
#define TK_NOT 68
#define TK_EXISTS 69
#define TK_BUFFER 70
#define TK_CACHEMODEL 71
#define TK_CACHESIZE 72
#define TK_COMP 73
#define TK_DURATION 74
#define TK_NK_VARIABLE 75
#define TK_MAXROWS 76
#define TK_MINROWS 77
#define TK_KEEP 78
#define TK_PAGES 79
#define TK_PAGESIZE 80
#define TK_TSDB_PAGESIZE 81
#define TK_PRECISION 82
#define TK_REPLICA 83
#define TK_VGROUPS 84
#define TK_SINGLE_STABLE 85
#define TK_RETENTIONS 86
#define TK_SCHEMALESS 87
#define TK_WAL_LEVEL 88
#define TK_WAL_FSYNC_PERIOD 89
#define TK_WAL_RETENTION_PERIOD 90
#define TK_WAL_RETENTION_SIZE 91
#define TK_WAL_ROLL_PERIOD 92
#define TK_WAL_SEGMENT_SIZE 93
#define TK_STT_TRIGGER 94
#define TK_TABLE_PREFIX 95
#define TK_TABLE_SUFFIX 96
#define TK_NK_COLON 97
#define TK_MAX_SPEED 98
#define TK_START 99
#define TK_TIMESTAMP 100
#define TK_END 101
#define TK_TABLE 102
#define TK_NK_LP 103
#define TK_NK_RP 104
#define TK_STABLE 105
#define TK_ADD 106
#define TK_COLUMN 107
#define TK_MODIFY 108
#define TK_RENAME 109
#define TK_TAG 110
#define TK_SET 111
#define TK_NK_EQ 112
#define TK_USING 113
#define TK_TAGS 114
#define TK_BOOL 115
#define TK_TINYINT 116
#define TK_SMALLINT 117
#define TK_INT 118
#define TK_INTEGER 119
#define TK_BIGINT 120
#define TK_FLOAT 121
#define TK_DOUBLE 122
#define TK_BINARY 123
#define TK_NCHAR 124
#define TK_UNSIGNED 125
#define TK_JSON 126
#define TK_VARCHAR 127
#define TK_MEDIUMBLOB 128
#define TK_BLOB 129
#define TK_VARBINARY 130
#define TK_DECIMAL 131
#define TK_COMMENT 132
#define TK_MAX_DELAY 133
#define TK_WATERMARK 134
#define TK_ROLLUP 135
#define TK_TTL 136
#define TK_SMA 137
#define TK_DELETE_MARK 138
#define TK_FIRST 139
#define TK_LAST 140
#define TK_SHOW 141
#define TK_PRIVILEGES 142
#define TK_DATABASES 143
#define TK_TABLES 144
#define TK_STABLES 145
#define TK_MNODES 146
#define TK_QNODES 147
#define TK_FUNCTIONS 148
#define TK_INDEXES 149
#define TK_ACCOUNTS 150
#define TK_APPS 151
#define TK_CONNECTIONS 152
#define TK_LICENCES 153
#define TK_GRANTS 154
#define TK_QUERIES 155
#define TK_SCORES 156
#define TK_TOPICS 157
#define TK_VARIABLES 158
#define TK_CLUSTER 159
#define TK_BNODES 160
#define TK_SNODES 161
#define TK_TRANSACTIONS 162
#define TK_DISTRIBUTED 163
#define TK_CONSUMERS 164
#define TK_SUBSCRIPTIONS 165
#define TK_VNODES 166
#define TK_ALIVE 167
#define TK_LIKE 168
#define TK_TBNAME 169
#define TK_QTAGS 170
#define TK_AS 171
#define TK_INDEX 172
#define TK_FUNCTION 173
#define TK_INTERVAL 174
#define TK_COUNT 175
#define TK_LAST_ROW 176
#define TK_TOPIC 177
#define TK_META 178
#define TK_CONSUMER 179
#define TK_GROUP 180
#define TK_DESC 181
#define TK_DESCRIBE 182
#define TK_RESET 183
#define TK_QUERY 184
#define TK_CACHE 185
#define TK_EXPLAIN 186
#define TK_ANALYZE 187
#define TK_VERBOSE 188
#define TK_NK_BOOL 189
#define TK_RATIO 190
#define TK_NK_FLOAT 191
#define TK_OUTPUTTYPE 192
#define TK_AGGREGATE 193
#define TK_BUFSIZE 194
#define TK_LANGUAGE 195
#define TK_REPLACE 196
#define TK_STREAM 197
#define TK_INTO 198
#define TK_PAUSE 199
#define TK_RESUME 200
#define TK_TRIGGER 201
#define TK_AT_ONCE 202
#define TK_WINDOW_CLOSE 203
#define TK_IGNORE 204
#define TK_EXPIRED 205
#define TK_FILL_HISTORY 206
#define TK_UPDATE 207
#define TK_SUBTABLE 208
#define TK_UNTREATED 209
#define TK_KILL 210
#define TK_CONNECTION 211
#define TK_TRANSACTION 212
#define TK_BALANCE 213
#define TK_VGROUP 214
#define TK_LEADER 215
#define TK_MERGE 216
#define TK_REDISTRIBUTE 217
#define TK_SPLIT 218
#define TK_DELETE 219
#define TK_INSERT 220
#define TK_NULL 221
#define TK_NK_QUESTION 222
#define TK_NK_ARROW 223
#define TK_ROWTS 224
#define TK_QSTART 225
#define TK_QEND 226
#define TK_QDURATION 227
#define TK_WSTART 228
#define TK_WEND 229
#define TK_WDURATION 230
#define TK_IROWTS 231
#define TK_ISFILLED 232
#define TK_CAST 233
#define TK_NOW 234
#define TK_TODAY 235
#define TK_TIMEZONE 236
#define TK_CLIENT_VERSION 237
#define TK_SERVER_VERSION 238
#define TK_SERVER_STATUS 239
#define TK_CURRENT_USER 240
#define TK_CASE 241
#define TK_WHEN 242
#define TK_THEN 243
#define TK_ELSE 244
#define TK_BETWEEN 245
#define TK_IS 246
#define TK_NK_LT 247
#define TK_NK_GT 248
#define TK_NK_LE 249
#define TK_NK_GE 250
#define TK_NK_NE 251
#define TK_MATCH 252
#define TK_NMATCH 253
#define TK_CONTAINS 254
#define TK_IN 255
#define TK_JOIN 256
#define TK_INNER 257
#define TK_SELECT 258
#define TK_DISTINCT 259
#define TK_WHERE 260
#define TK_PARTITION 261
#define TK_BY 262
#define TK_SESSION 263
#define TK_STATE_WINDOW 264
#define TK_EVENT_WINDOW 265
#define TK_SLIDING 266
#define TK_FILL 267
#define TK_VALUE 268
#define TK_VALUE_F 269
#define TK_NONE 270
#define TK_PREV 271
#define TK_NULL_F 272
#define TK_LINEAR 273
#define TK_NEXT 274
#define TK_HAVING 275
#define TK_RANGE 276
#define TK_EVERY 277
#define TK_ORDER 278
#define TK_SLIMIT 279
#define TK_SOFFSET 280
#define TK_LIMIT 281
#define TK_OFFSET 282
#define TK_ASC 283
#define TK_NULLS 284
#define TK_ABORT 285
#define TK_AFTER 286
#define TK_ATTACH 287
#define TK_BEFORE 288
#define TK_BEGIN 289
#define TK_BITAND 290
#define TK_BITNOT 291
#define TK_BITOR 292
#define TK_BLOCKS 293
#define TK_CHANGE 294
#define TK_COMMA 295
#define TK_CONCAT 296
#define TK_CONFLICT 297
#define TK_COPY 298
#define TK_DEFERRED 299
#define TK_DELIMITERS 300
#define TK_DETACH 301
#define TK_DIVIDE 302
#define TK_DOT 303
#define TK_EACH 304
#define TK_FAIL 305
#define TK_FILE 306
#define TK_FOR 307
#define TK_GLOB 308
#define TK_ID 309
#define TK_IMMEDIATE 310
#define TK_IMPORT 311
#define TK_INITIALLY 312
#define TK_INSTEAD 313
#define TK_ISNULL 314
#define TK_KEY 315
#define TK_MODULES 316
#define TK_NK_BITNOT 317
#define TK_NK_SEMI 318
#define TK_NOTNULL 319
#define TK_OF 320
#define TK_PLUS 321
#define TK_PRIVILEGE 322
#define TK_RAISE 323
#define TK_RESTRICT 324
#define TK_ROW 325
#define TK_SEMI 326
#define TK_STAR 327
#define TK_STATEMENT 328
#define TK_STRICT 329
#define TK_STRING 330
#define TK_TIMES 331
#define TK_VALUES 332
#define TK_VARIABLE 333
#define TK_VIEW 334
#define TK_WAL 335
#define TK_NK_SPACE 600

View File

@ -190,8 +190,6 @@ STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int
SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo);
void verifyOffset(void *pWalReader, STqOffsetVal* pOffset);
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
void qStreamSetOpen(qTaskInfo_t tinfo);

View File

@ -249,6 +249,7 @@ typedef struct SDropDnodeStmt {
char fqdn[TSDB_FQDN_LEN];
int32_t port;
bool force;
bool unsafe;
} SDropDnodeStmt;
typedef struct SAlterDnodeStmt {

View File

@ -86,9 +86,8 @@ typedef struct {
int64_t number;
} SStreamStateCur;
int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen);
int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key);
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen);
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);

View File

@ -110,14 +110,14 @@ typedef struct {
int64_t ver;
int32_t* dataRef;
SPackedData submit;
} SStreamDataSubmit2;
} SStreamDataSubmit;
typedef struct {
int8_t type;
int64_t ver;
SArray* dataRefs; // SArray<int32_t*>
SArray* submits; // SArray<SPackedSubmit>
} SStreamMergedSubmit2;
} SStreamMergedSubmit;
typedef struct {
int8_t type;
@ -205,10 +205,10 @@ static FORCE_INLINE void streamQueueProcessFail(SStreamQueue* queue) {
void* streamQueueNextItem(SStreamQueue* queue);
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit, int32_t type);
void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit);
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type);
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
SStreamDataSubmit2* streamSubmitBlockClone(SStreamDataSubmit2* pSubmit);
SStreamDataSubmit* streamSubmitBlockClone(SStreamDataSubmit* pSubmit);
typedef struct {
char* qmsg;

View File

@ -38,8 +38,8 @@ typedef SList SStreamSnapshot;
typedef TSKEY (*GetTsFun)(void*);
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, GetTsFun fp, void* pFile,
TSKEY delMark);
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
GetTsFun fp, void* pFile, TSKEY delMark);
void streamFileStateDestroy(SStreamFileState* pFileState);
void streamFileStateClear(SStreamFileState* pFileState);
bool needClearDiskBuff(SStreamFileState* pFileState);
@ -56,6 +56,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState);
int32_t getSnapshotIdList(SStreamFileState* pFileState, SArray* list);
int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark);
int32_t streamFileStateGeSelectRowSize(SStreamFileState* pFileState);
#ifdef __cplusplus
}

View File

@ -142,7 +142,7 @@ typedef struct {
typedef struct SWalReader SWalReader;
// todo hide this struct
typedef struct SWalReader {
struct SWalReader {
SWal *pWal;
int64_t readerId;
TdFilePtr pLogFile;
@ -154,7 +154,7 @@ typedef struct SWalReader {
SWalFilterCond cond;
// TODO remove it
SWalCkHead *pHead;
} SWalReader;
};
// module initialization
int32_t walInit();
@ -201,6 +201,7 @@ int32_t walNextValidMsg(SWalReader *pRead);
int64_t walReaderGetCurrentVer(const SWalReader *pReader);
int64_t walReaderGetValidFirstVer(const SWalReader *pReader);
void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64_t *ever);
void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset);
// only for tq usage
void walSetReaderCapacity(SWalReader *pRead, int32_t capacity);

View File

@ -408,6 +408,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MNODE_NOT_CATCH_UP TAOS_DEF_ERROR_CODE(0, 0x0412) // internal
#define TSDB_CODE_MNODE_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0413) // internal
#define TSDB_CODE_MNODE_ONLY_TWO_MNODE TAOS_DEF_ERROR_CODE(0, 0x0414) // internal
#define TSDB_CODE_MNODE_NO_NEED_RESTORE TAOS_DEF_ERROR_CODE(0, 0x0415) // internal
// vnode
// #define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) // 2.x
@ -445,6 +446,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_VND_NOT_CATCH_UP TAOS_DEF_ERROR_CODE(0, 0x0532) // internal
#define TSDB_CODE_VND_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0533) // internal
#define TSDB_CODE_VND_DIR_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0534)
#define TSDB_CODE_VND_META_DATA_UNSAFE_DELETE TAOS_DEF_ERROR_CODE(0, 0x0535)
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)

View File

@ -1701,6 +1701,7 @@ int32_t tSerializeSDropDnodeReq(void *buf, int32_t bufLen, SDropDnodeReq *pReq)
if (tEncodeCStr(&encoder, pReq->fqdn) < 0) return -1;
if (tEncodeI32(&encoder, pReq->port) < 0) return -1;
if (tEncodeI8(&encoder, pReq->force) < 0) return -1;
if (tEncodeI8(&encoder, pReq->unsafe) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -1717,6 +1718,12 @@ int32_t tDeserializeSDropDnodeReq(void *buf, int32_t bufLen, SDropDnodeReq *pReq
if (tDecodeCStrTo(&decoder, pReq->fqdn) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->port) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->force) < 0) return -1;
if (!tDecodeIsEnd(&decoder)) {
if (tDecodeI8(&decoder, &pReq->unsafe) < 0) return -1;
} else {
pReq->unsafe = false;
}
tEndDecode(&decoder);
tDecoderClear(&decoder);

View File

@ -23,6 +23,10 @@ static inline void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) {
SEpSet epSet = {0};
dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet);
if (epSet.numOfEps == 1) {
return;
}
const int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
pMsg->pCont = rpcMallocCont(contLen);
if (pMsg->pCont == NULL) {

View File

@ -47,6 +47,7 @@ int32_t mndAllocStbSchemas(const SStbObj *pOld, SStbObj *pNew);
int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId);
void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pStb, int32_t *pContLen, void *alterOriData,
int32_t alterOriDataLen);
int32_t mndSetForceDropCreateStbRedoActions(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup, SStbObj *pStb);
#ifdef __cplusplus
}

View File

@ -40,7 +40,7 @@ int32_t mndAddCreateVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *p
int32_t mndAddAlterVnodeConfirmAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup);
int32_t mndAddAlterVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, tmsg_t msgType);
int32_t mndAddDropVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid, bool isRedo);
int32_t mndSetMoveVgroupsInfoToTrans(SMnode *, STrans *pTrans, int32_t dropDnodeId, bool force);
int32_t mndSetMoveVgroupsInfoToTrans(SMnode *, STrans *pTrans, int32_t dropDnodeId, bool force, bool unsafe);
int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pVgroup,
SArray *pArray);
int32_t mndBuildCompactVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int64_t compactTs,

View File

@ -804,7 +804,7 @@ int32_t mndProcessRestoreDnodeReqImpl(SRpcMsg *pReq){
#endif
static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SMnodeObj *pMObj, SQnodeObj *pQObj,
SSnodeObj *pSObj, int32_t numOfVnodes, bool force) {
SSnodeObj *pSObj, int32_t numOfVnodes, bool force, bool unsafe) {
int32_t code = -1;
SSdbRaw *pRaw = NULL;
STrans *pTrans = NULL;
@ -844,7 +844,7 @@ static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SM
if (numOfVnodes > 0) {
mInfo("trans:%d, %d vnodes on dnode:%d will be dropped", pTrans->id, numOfVnodes, pDnode->id);
if (mndSetMoveVgroupsInfoToTrans(pMnode, pTrans, pDnode->id, force) != 0) goto _OVER;
if (mndSetMoveVgroupsInfoToTrans(pMnode, pTrans, pDnode->id, force, unsafe) != 0) goto _OVER;
}
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
@ -871,11 +871,18 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
mInfo("dnode:%d, start to drop, ep:%s:%d", dropReq.dnodeId, dropReq.fqdn, dropReq.port);
mInfo("dnode:%d, start to drop, ep:%s:%d, force:%s, unsafe:%s",
dropReq.dnodeId, dropReq.fqdn, dropReq.port, dropReq.force?"true":"false", dropReq.unsafe?"true":"false");
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_MNODE) != 0) {
goto _OVER;
}
bool force = dropReq.force;
if(dropReq.unsafe)
{
force = true;
}
pDnode = mndAcquireDnode(pMnode, dropReq.dnodeId);
if (pDnode == NULL) {
int32_t err = terrno;
@ -903,7 +910,7 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
}
int32_t numOfVnodes = mndGetVnodesNum(pMnode, pDnode->id);
if ((numOfVnodes > 0 || pMObj != NULL || pSObj != NULL || pQObj != NULL) && !dropReq.force) {
if ((numOfVnodes > 0 || pMObj != NULL || pSObj != NULL || pQObj != NULL) && !force) {
if (!mndIsDnodeOnline(pDnode, taosGetTimestampMs())) {
terrno = TSDB_CODE_DNODE_OFFLINE;
mError("dnode:%d, failed to drop since %s, vnodes:%d mnode:%d qnode:%d snode:%d", pDnode->id, terrstr(),
@ -912,7 +919,7 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
}
}
code = mndDropDnode(pMnode, pReq, pDnode, pMObj, pQObj, pSObj, numOfVnodes, dropReq.force);
code = mndDropDnode(pMnode, pReq, pDnode, pMObj, pQObj, pSObj, numOfVnodes, force, dropReq.unsafe);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
_OVER:

View File

@ -687,6 +687,31 @@ static int32_t mndSetCreateStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj
return 0;
}
int32_t mndSetForceDropCreateStbRedoActions(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup, SStbObj *pStb) {
SSdb *pSdb = pMnode->pSdb;
int32_t contLen;
void *pReq = mndBuildVCreateStbReq(pMnode, pVgroup, pStb, &contLen, NULL, 0);
if (pReq == NULL) {
return -1;
}
STransAction action = {0};
action.mTraceId = pTrans->mTraceId;
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
action.pCont = pReq;
action.contLen = contLen;
action.msgType = TDMT_VND_CREATE_STB;
action.acceptableCode = TSDB_CODE_TDB_STB_ALREADY_EXIST;
action.retryCode = TSDB_CODE_TDB_STB_NOT_EXIST;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);
return -1;
}
return 0;
}
static int32_t mndSetCreateStbUndoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
SSdb *pSdb = pMnode->pSdb;
SVgObj *pVgroup = NULL;

View File

@ -1306,7 +1306,7 @@ int32_t mndPauseAllStreamTasks(STrans *pTrans, SStreamObj *pStream) {
int32_t sz = taosArrayGetSize(pTasks);
for (int32_t j = 0; j < sz; j++) {
SStreamTask *pTask = taosArrayGetP(pTasks, j);
if (pTask->taskLevel == TASK_LEVEL__SOURCE && mndPauseStreamTask(pTrans, pTask) < 0) {
if (pTask->taskLevel != TASK_LEVEL__SINK && mndPauseStreamTask(pTrans, pTask) < 0) {
return -1;
}
}
@ -1430,7 +1430,7 @@ int32_t mndResumeAllStreamTasks(STrans *pTrans, SStreamObj *pStream, int8_t igUn
int32_t sz = taosArrayGetSize(pTasks);
for (int32_t j = 0; j < sz; j++) {
SStreamTask *pTask = taosArrayGetP(pTasks, j);
if (pTask->taskLevel == TASK_LEVEL__SOURCE && mndResumeStreamTask(pTrans, pTask, igUntreated) < 0) {
if (pTask->taskLevel != TASK_LEVEL__SINK && mndResumeStreamTask(pTrans, pTask, igUntreated) < 0) {
return -1;
}
}

View File

@ -23,6 +23,7 @@
#include "mndTrans.h"
#include "mndUser.h"
#include "tmisce.h"
#include "mndStb.h"
#define VGROUP_VER_NUMBER 1
#define VGROUP_RESERVE_SIZE 64
@ -1378,7 +1379,7 @@ int32_t mndAddDropVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgOb
}
int32_t mndSetMoveVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int32_t vnIndex,
SArray *pArray, bool force) {
SArray *pArray, bool force, bool unsafe) {
SVgObj newVg = {0};
memcpy(&newVg, pVgroup, sizeof(SVgObj));
@ -1455,7 +1456,7 @@ int32_t mndSetMoveVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb,
mInfo("vgId:%d, will add 1 vnode and force remove 1 vnode", pVgroup->vgId);
if (mndAddVnodeToVgroup(pMnode, pTrans, &newVg, pArray) != 0) return -1;
newVg.replica--;
SVnodeGid del = newVg.vnodeGid[vnIndex];
//SVnodeGid del = newVg.vnodeGid[vnIndex];
newVg.vnodeGid[vnIndex] = newVg.vnodeGid[newVg.replica];
memset(&newVg.vnodeGid[newVg.replica], 0, sizeof(SVnodeGid));
{
@ -1476,7 +1477,31 @@ int32_t mndSetMoveVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb,
if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVg, &newVg.vnodeGid[vnIndex]) != 0) return -1;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg) != 0) return -1;
if (newVg.replica == 1) {
if(newVg.replica == 1){
if(force && !unsafe){
terrno = TSDB_CODE_VND_META_DATA_UNSAFE_DELETE;
return -1;
}
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
while (1) {
SStbObj *pStb = NULL;
pIter = sdbFetch(pSdb, SDB_STB, pIter, (void **)&pStb);
if (pIter == NULL) break;
if (strcmp(pStb->db, pDb->name) == 0) {
if (mndSetForceDropCreateStbRedoActions(pMnode, pTrans, &newVg, pStb) != 0) {
sdbCancelFetch(pSdb, pIter);
sdbRelease(pSdb, pStb);
return -1;
}
}
sdbRelease(pSdb, pStb);
}
mInfo("vgId:%d, all data is dropped since replica=1", pVgroup->vgId);
}
}
@ -1498,7 +1523,7 @@ int32_t mndSetMoveVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb,
return 0;
}
int32_t mndSetMoveVgroupsInfoToTrans(SMnode *pMnode, STrans *pTrans, int32_t delDnodeId, bool force) {
int32_t mndSetMoveVgroupsInfoToTrans(SMnode *pMnode, STrans *pTrans, int32_t delDnodeId, bool force, bool unsafe) {
int32_t code = 0;
SArray *pArray = mndBuildDnodesArray(pMnode, delDnodeId);
if (pArray == NULL) return -1;
@ -1521,7 +1546,7 @@ int32_t mndSetMoveVgroupsInfoToTrans(SMnode *pMnode, STrans *pTrans, int32_t del
if (vnIndex != -1) {
mInfo("vgId:%d, vnode:%d will be removed from dnode:%d, force:%d", pVgroup->vgId, vnIndex, delDnodeId, force);
SDbObj *pDb = mndAcquireDb(pMnode, pVgroup->dbName);
code = mndSetMoveVgroupInfoToTrans(pMnode, pTrans, pDb, pVgroup, vnIndex, pArray, force);
code = mndSetMoveVgroupInfoToTrans(pMnode, pTrans, pDb, pVgroup, vnIndex, pArray, force, unsafe);
mndReleaseDb(pMnode, pDb);
}

View File

@ -185,7 +185,6 @@ typedef struct STsdbReader STsdbReader;
int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, void *pTableList, int32_t numOfTables,
SSDataBlock *pResBlock, STsdbReader **ppReader, const char *idstr, bool countOnly, SHashObj** pIgnoreTables);
int32_t tsdbSetTableList(STsdbReader *pReader, const void *pTableList, int32_t num);
void tsdbReaderSetId(STsdbReader *pReader, const char *idstr);
void tsdbReaderClose(STsdbReader *pReader);
int32_t tsdbNextDataBlock(STsdbReader *pReader, bool *hasNext);
@ -198,8 +197,6 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader *pHandle);
void *tsdbGetIdx(SMeta *pMeta);
void *tsdbGetIvtIdx(SMeta *pMeta);
uint64_t tsdbGetReaderMaxVersion(STsdbReader *pReader);
int32_t tsdbSetTableList(STsdbReader *pReader, const void *pTableList, int32_t num);
void tsdbReaderSetId(STsdbReader *pReader, const char *idstr);
void tsdbReaderSetCloseFlag(STsdbReader *pReader);
int32_t tsdbReuseCacherowsReader(void* pReader, void* pTableIdList, int32_t numOfTables);
@ -267,7 +264,7 @@ int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
int32_t tqSeekVer(STqReader *pReader, int64_t ver, const char *id);
int32_t tqNextBlockInWal(STqReader* pReader);
bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, const char* id);

View File

@ -46,28 +46,28 @@ typedef struct STqOffsetStore STqOffsetStore;
// tqPush
//typedef struct {
// // msg info
// int64_t consumerId;
// int64_t reqOffset;
// int64_t processedVer;
// int32_t epoch;
// // rpc info
// int64_t reqId;
// SRpcHandleInfo rpcInfo;
// tmr_h timerId;
// int8_t tmrStopped;
// // exec
// int8_t inputStatus;
// int8_t execStatus;
// SStreamQueue inputQ;
// SRWLatch lock;
//} STqPushHandle;
// typedef struct {
// // msg info
// int64_t consumerId;
// int64_t reqOffset;
// int64_t processedVer;
// int32_t epoch;
// // rpc info
// int64_t reqId;
// SRpcHandleInfo rpcInfo;
// tmr_h timerId;
// int8_t tmrStopped;
// // exec
// int8_t inputStatus;
// int8_t execStatus;
// SStreamQueue inputQ;
// SRWLatch lock;
// } STqPushHandle;
// tqExec
typedef struct {
char* qmsg; // SubPlanToString
char* qmsg; // SubPlanToString
} STqExecCol;
typedef struct {
@ -79,35 +79,35 @@ typedef struct {
} STqExecDb;
typedef struct {
int8_t subType;
STqReader* pTqReader;
qTaskInfo_t task;
int8_t subType;
STqReader* pTqReader;
qTaskInfo_t task;
union {
STqExecCol execCol;
STqExecTb execTb;
STqExecDb execDb;
};
int32_t numOfCols; // number of out pout column, temporarily used
int32_t numOfCols; // number of out pout column, temporarily used
} STqExecHandle;
typedef enum tq_handle_status{
typedef enum tq_handle_status {
TMQ_HANDLE_STATUS_IDLE = 0,
TMQ_HANDLE_STATUS_EXEC = 1,
}tq_handle_status;
} tq_handle_status;
typedef struct {
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
int64_t consumerId;
int32_t epoch;
int8_t fetchMeta;
int64_t snapshotVer;
SWalReader* pWalReader;
SWalRef* pRef;
// STqPushHandle pushHandle; // push
STqExecHandle execHandle; // exec
SRpcMsg* msg;
int32_t noDataPollCnt;
tq_handle_status status;
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
int64_t consumerId;
int32_t epoch;
int8_t fetchMeta;
int64_t snapshotVer;
SWalReader* pWalReader;
SWalRef* pRef;
// STqPushHandle pushHandle; // push
STqExecHandle execHandle; // exec
SRpcMsg* msg;
int32_t noDataPollCnt;
tq_handle_status status;
} STqHandle;
struct STQ {
@ -147,8 +147,8 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea
// tqExec
int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxRsp* pRsp, int32_t* totalRows);
int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols, int8_t precision);
int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp, int32_t type,
int32_t vgId);
int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp,
int32_t type, int32_t vgId);
int32_t tqPushDataRsp(STqHandle* pHandle, int32_t vgId);
// tqMeta

View File

@ -305,10 +305,6 @@ void tsdbUntakeReadSnap(STsdbReader *pReader, STsdbReadSnap *pSnap, bool proa
// tsdbMerge.c ==============================================================================================
int32_t tsdbMerge(STsdb *pTsdb);
#define TSDB_CACHE_NO(c) ((c).cacheLast == 0)
#define TSDB_CACHE_LAST_ROW(c) (((c).cacheLast & 1) > 0)
#define TSDB_CACHE_LAST(c) (((c).cacheLast & 2) > 0)
// tsdbDiskData ==============================================================================================
int32_t tDiskDataBuilderCreate(SDiskDataBuilder **ppBuilder);
void *tDiskDataBuilderDestroy(SDiskDataBuilder *pBuilder);
@ -346,13 +342,17 @@ struct STsdbFS {
};
typedef struct {
rocksdb_t *db;
rocksdb_options_t *options;
rocksdb_flushoptions_t *flushoptions;
rocksdb_writeoptions_t *writeoptions;
rocksdb_readoptions_t *readoptions;
rocksdb_writebatch_t *writebatch;
TdThreadMutex rMutex;
rocksdb_t *db;
rocksdb_comparator_t *my_comparator;
rocksdb_cache_t *blockcache;
rocksdb_block_based_table_options_t *tableoptions;
rocksdb_options_t *options;
rocksdb_flushoptions_t *flushoptions;
rocksdb_writeoptions_t *writeoptions;
rocksdb_readoptions_t *readoptions;
rocksdb_writebatch_t *writebatch;
TdThreadMutex rMutex;
STSchema *pTSchema;
} SRocksCache;
struct STsdb {
@ -781,7 +781,7 @@ typedef struct SLDataIter {
#define tMergeTreeGetRow(_t) (&((_t)->pIter->rInfo.row))
int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
STimeWindow *pTimeWindow, SVersionRange *pVerRange, SSttBlockLoadInfo *pBlockLoadInfo,
bool destroyLoadInfo, const char *idStr, bool strictTimeRange, SLDataIter* pLDataIter);
bool destroyLoadInfo, const char *idStr, bool strictTimeRange, SLDataIter *pLDataIter);
void tMergeTreeAddIter(SMergeTree *pMTree, SLDataIter *pIter);
bool tMergeTreeNext(SMergeTree *pMTree);
bool tMergeTreeIgnoreEarlierTs(SMergeTree *pMTree);
@ -821,13 +821,15 @@ typedef struct SCacheRowsReader {
typedef struct {
TSKEY ts;
int8_t dirty;
SColVal colVal;
} SLastCol;
int32_t tsdbOpenCache(STsdb *pTsdb);
void tsdbCloseCache(STsdb *pTsdb);
int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *row);
int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int32_t ltype);
int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int8_t ltype);
int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int8_t ltype);
int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey);
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb *pTsdb);

View File

@ -197,7 +197,7 @@ void tqClose(STQ*);
int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
int tqRegisterPushHandle(STQ* pTq, void* handle, SRpcMsg* pMsg);
int tqUnregisterPushHandle(STQ* pTq, void* pHandle);
int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed.
int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed.
int tqCommit(STQ*);
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
@ -219,8 +219,6 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessSubmitReqForSubscribe(STQ* pTq);
int32_t tqProcessDeleteDataReq(STQ* pTq, void* pReq, int32_t len, int64_t ver);
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec);
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg);
@ -405,6 +403,10 @@ struct SVnode {
#define VND_IS_RSMA(v) ((v)->config.isRsma == 1)
#define VND_IS_TSMA(v) ((v)->config.isTsma == 1)
#define TSDB_CACHE_NO(c) ((c).cacheLast == 0)
#define TSDB_CACHE_LAST_ROW(c) (((c).cacheLast & 1) > 0)
#define TSDB_CACHE_LAST(c) (((c).cacheLast & 2) > 0)
struct STbUidStore {
tb_uid_t suid;
SArray* tbUids;

View File

@ -1195,159 +1195,6 @@ int32_t extractDelDataBlock(const void* pData, int32_t len, int64_t ver, SStream
return TSDB_CODE_SUCCESS;
}
int32_t tqProcessDeleteDataReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
bool failed = false;
SDecoder* pCoder = &(SDecoder){0};
SDeleteRes* pRes = &(SDeleteRes){0};
pRes->uidList = taosArrayInit(0, sizeof(tb_uid_t));
if (pRes->uidList == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
failed = true;
}
tDecoderInit(pCoder, pReq, len);
tDecodeDeleteRes(pCoder, pRes);
tDecoderClear(pCoder);
int32_t sz = taosArrayGetSize(pRes->uidList);
if (sz == 0 || pRes->affectedRows == 0) {
taosArrayDestroy(pRes->uidList);
return 0;
}
SSDataBlock* pDelBlock = createSpecialDataBlock(STREAM_DELETE_DATA);
blockDataEnsureCapacity(pDelBlock, sz);
pDelBlock->info.rows = sz;
pDelBlock->info.version = ver;
for (int32_t i = 0; i < sz; i++) {
// start key column
SColumnInfoData* pStartCol = taosArrayGet(pDelBlock->pDataBlock, START_TS_COLUMN_INDEX);
colDataSetVal(pStartCol, i, (const char*)&pRes->skey, false); // end key column
SColumnInfoData* pEndCol = taosArrayGet(pDelBlock->pDataBlock, END_TS_COLUMN_INDEX);
colDataSetVal(pEndCol, i, (const char*)&pRes->ekey, false);
// uid column
SColumnInfoData* pUidCol = taosArrayGet(pDelBlock->pDataBlock, UID_COLUMN_INDEX);
int64_t* pUid = taosArrayGet(pRes->uidList, i);
colDataSetVal(pUidCol, i, (const char*)pUid, false);
colDataSetNULL(taosArrayGet(pDelBlock->pDataBlock, GROUPID_COLUMN_INDEX), i);
colDataSetNULL(taosArrayGet(pDelBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX), i);
colDataSetNULL(taosArrayGet(pDelBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX), i);
}
taosArrayDestroy(pRes->uidList);
int32_t* pRef = taosMemoryMalloc(sizeof(int32_t));
*pRef = 1;
taosWLockLatch(&pTq->pStreamMeta->lock);
void* pIter = NULL;
while (1) {
pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter);
if (pIter == NULL) {
break;
}
SStreamTask* pTask = *(SStreamTask**)pIter;
if (pTask->taskLevel != TASK_LEVEL__SOURCE) {
continue;
}
qDebug("s-task:%s delete req enqueue, ver: %" PRId64, pTask->id.idStr, ver);
if (!failed) {
SStreamRefDataBlock* pRefBlock = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0);
pRefBlock->type = STREAM_INPUT__REF_DATA_BLOCK;
pRefBlock->pBlock = pDelBlock;
if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pRefBlock) < 0) {
atomic_sub_fetch_32(pRef, 1);
taosFreeQitem(pRefBlock);
continue;
}
if (streamSchedExec(pTask) < 0) {
qError("s-task:%s stream task launch failed", pTask->id.idStr);
continue;
}
} else {
streamTaskInputFail(pTask);
}
}
taosWUnLockLatch(&pTq->pStreamMeta->lock);
int32_t ref = atomic_sub_fetch_32(pRef, 1);
if (ref == 0) {
blockDataDestroy(pDelBlock);
taosMemoryFree(pRef);
}
#if 0
SStreamDataBlock* pStreamBlock = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0);
pStreamBlock->type = STREAM_INPUT__DATA_BLOCK;
pStreamBlock->blocks = taosArrayInit(0, sizeof(SSDataBlock));
SSDataBlock block = {0};
assignOneDataBlock(&block, pDelBlock);
block.info.type = STREAM_DELETE_DATA;
taosArrayPush(pStreamBlock->blocks, &block);
if (!failed) {
if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pStreamBlock) < 0) {
qError("stream task input del failed, task id %d", pTask->id.taskId);
continue;
}
if (streamSchedExec(pTask) < 0) {
qError("stream task launch failed, task id %d", pTask->id.taskId);
continue;
}
} else {
streamTaskInputFail(pTask);
}
}
blockDataDestroy(pDelBlock);
#endif
return 0;
}
int32_t tqProcessSubmitReqForSubscribe(STQ* pTq) {
int32_t vgId = TD_VID(pTq->pVnode);
taosWLockLatch(&pTq->lock);
if (taosHashGetSize(pTq->pPushMgr) > 0) {
void* pIter = taosHashIterate(pTq->pPushMgr, NULL);
while (pIter) {
STqHandle* pHandle = *(STqHandle**)pIter;
tqDebug("vgId:%d start set submit for pHandle:%p, consumer:0x%" PRIx64, vgId, pHandle, pHandle->consumerId);
if (ASSERT(pHandle->msg != NULL)) {
tqError("pHandle->msg should not be null");
break;
}else{
SRpcMsg msg = {.msgType = TDMT_VND_TMQ_CONSUME, .pCont = pHandle->msg->pCont, .contLen = pHandle->msg->contLen, .info = pHandle->msg->info};
tmsgPutToQueue(&pTq->pVnode->msgCb, QUERY_QUEUE, &msg);
taosMemoryFree(pHandle->msg);
pHandle->msg = NULL;
}
pIter = taosHashIterate(pTq->pPushMgr, pIter);
}
taosHashClear(pTq->pPushMgr);
}
// unlock
taosWUnLockLatch(&pTq->lock);
return 0;
}
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
SStreamTaskRunReq* pReq = pMsg->pCont;
@ -1449,7 +1296,11 @@ int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
}
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
tqStartStreamTasks(pTq);
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
tqStartStreamTasks(pTq);
} else {
streamSchedExec(pTask);
}
}
return 0;

View File

@ -16,8 +16,40 @@
#include "tq.h"
#include "vnd.h"
int32_t tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) {
int32_t tqProcessSubmitReqForSubscribe(STQ* pTq) {
int32_t vgId = TD_VID(pTq->pVnode);
taosWLockLatch(&pTq->lock);
if (taosHashGetSize(pTq->pPushMgr) > 0) {
void* pIter = taosHashIterate(pTq->pPushMgr, NULL);
while (pIter) {
STqHandle* pHandle = *(STqHandle**)pIter;
tqDebug("vgId:%d start set submit for pHandle:%p, consumer:0x%" PRIx64, vgId, pHandle, pHandle->consumerId);
if (ASSERT(pHandle->msg != NULL)) {
tqError("pHandle->msg should not be null");
break;
}else{
SRpcMsg msg = {.msgType = TDMT_VND_TMQ_CONSUME, .pCont = pHandle->msg->pCont, .contLen = pHandle->msg->contLen, .info = pHandle->msg->info};
tmsgPutToQueue(&pTq->pVnode->msgCb, QUERY_QUEUE, &msg);
taosMemoryFree(pHandle->msg);
pHandle->msg = NULL;
}
pIter = taosHashIterate(pTq->pPushMgr, pIter);
}
taosHashClear(pTq->pPushMgr);
}
// unlock
taosWUnLockLatch(&pTq->lock);
return 0;
}
int32_t tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) {
if (msgType == TDMT_VND_SUBMIT) {
tqProcessSubmitReqForSubscribe(pTq);
}
@ -37,10 +69,6 @@ int32_t tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t v
if (msgType == TDMT_VND_SUBMIT || msgType == TDMT_VND_DELETE) {
tqStartStreamTasks(pTq);
}
if (msgType == TDMT_VND_DELETE) {
// tqProcessDeleteDataReq(pTq, POINTER_SHIFT(msg, sizeof(SMsgHead)), msgLen - sizeof(SMsgHead), ver);
}
}
return 0;

View File

@ -325,7 +325,7 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, const char* id) {
memcpy(data, pBody, len);
SPackedData data1 = (SPackedData){.ver = ver, .msgLen = len, .msgStr = data};
*pItem = (SStreamQueueItem*)streamDataSubmitNew(data1, STREAM_INPUT__DATA_SUBMIT);
*pItem = (SStreamQueueItem*)streamDataSubmitNew(&data1, STREAM_INPUT__DATA_SUBMIT);
if (*pItem == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("%s failed to create data submit for stream since out of memory", id);
@ -344,7 +344,7 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, const char* id) {
}
// todo ignore the error in wal?
int32_t tqNextBlockInWal(STqReader* pReader) {
bool tqNextBlockInWal(STqReader* pReader, const char* id) {
SWalReader* pWalReader = pReader->pWalReader;
while (1) {
@ -353,7 +353,7 @@ int32_t tqNextBlockInWal(STqReader* pReader) {
// try next message in wal file
// todo always retry to avoid read failure caused by wal file deletion
if (walNextValidMsg(pWalReader) < 0) {
return FETCH_TYPE__NONE;
return false;
}
void* pBody = POINTER_SHIFT(pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg));
@ -379,24 +379,24 @@ int32_t tqNextBlockInWal(STqReader* pReader) {
if (tDecodeSubmitReq(&decoder, &pReader->submit) < 0) {
tDecoderClear(&decoder);
tqError("decode wal file error, msgLen:%d, ver:%" PRId64, bodyLen, ver);
return FETCH_TYPE__NONE;
return false;
}
tDecoderClear(&decoder);
pReader->nextBlk = 0;
}
size_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData);
int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData);
while (pReader->nextBlk < numOfBlocks) {
tqDebug("tq reader next data block %p, %d %" PRId64 " %d", pReader->msg.msgStr, pReader->msg.msgLen,
pReader->msg.ver, pReader->nextBlk);
tqDebug("tq reader next data block %d/%d, len:%d %" PRId64 " %d", pReader->nextBlk,
numOfBlocks, pReader->msg.msgLen, pReader->msg.ver, pReader->nextBlk);
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
if (pReader->tbIdHash == NULL) {
int32_t code = tqRetrieveDataBlock(pReader, NULL);
if (code == TSDB_CODE_SUCCESS && pReader->pResBlock->info.rows > 0) {
return FETCH_TYPE__DATA;
return true;
}
}
@ -406,7 +406,7 @@ int32_t tqNextBlockInWal(STqReader* pReader) {
int32_t code = tqRetrieveDataBlock(pReader, NULL);
if (code == TSDB_CODE_SUCCESS && pReader->pResBlock->info.rows > 0) {
return FETCH_TYPE__DATA;
return true;
}
} else {
pReader->nextBlk += 1;
@ -414,7 +414,9 @@ int32_t tqNextBlockInWal(STqReader* pReader) {
}
}
qDebug("stream scan return empty, all %d submit blocks consumed, %s", numOfBlocks, id);
tDestroySubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE);
pReader->msg.msgStr = NULL;
}
}
@ -445,8 +447,8 @@ bool tqNextBlockImpl(STqReader* pReader, const char* idstr) {
int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData);
while (pReader->nextBlk < numOfBlocks) {
tqDebug("tq reader next data block, len:%d ver:%" PRId64 " index:%d/%d, %s", pReader->msg.msgLen,
pReader->msg.ver, pReader->nextBlk, numOfBlocks, idstr);
tqDebug("tq reader next data block, len:%d ver:%" PRId64 " index:%d/%d, %s", pReader->msg.msgLen, pReader->msg.ver,
pReader->nextBlk, numOfBlocks, idstr);
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
if (pReader->tbIdHash == NULL) {
@ -703,8 +705,9 @@ int32_t tqRetrieveDataBlock(STqReader* pReader, const char* id) {
SColVal colVal;
tRowGet(pRow, pTSchema, sourceIdx, &colVal);
if (colVal.cid < pColData->info.colId) {
// tqDebug("colIndex:%d column id:%d in row, ignore, the required colId:%d, total cols in schema:%d",
// sourceIdx, colVal.cid, pColData->info.colId, pTSchema->numOfCols);
// tqDebug("colIndex:%d column id:%d in row, ignore, the required colId:%d, total cols in
// schema:%d",
// sourceIdx, colVal.cid, pColData->info.colId, pTSchema->numOfCols);
sourceIdx++;
continue;
} else if (colVal.cid == pColData->info.colId) {
@ -729,13 +732,17 @@ int32_t tqRetrieveDataBlock(STqReader* pReader, const char* id) {
return 0;
}
// todo refactor:
int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet) {
tqDebug("tq reader retrieve data block %p, %d", pReader->msg.msgStr, pReader->nextBlk);
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
pReader->nextBlk++;
if (pSubmitTbDataRet) *pSubmitTbDataRet = pSubmitTbData;
if (pSubmitTbDataRet) {
*pSubmitTbDataRet = pSubmitTbData;
}
int32_t sversion = pSubmitTbData->sver;
int64_t suid = pSubmitTbData->suid;
int64_t uid = pSubmitTbData->uid;

View File

@ -15,7 +15,7 @@
#include "tq.h"
static int32_t createStreamRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle);
static int32_t createStreamTaskRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle);
// this function should be executed by stream threads.
// extract submit block from WAL, and add them into the input queue for the sources tasks.
@ -30,7 +30,7 @@ int32_t tqStreamTasksScanWal(STQ* pTq) {
// check all restore tasks
bool shouldIdle = true;
createStreamRunReq(pTq->pStreamMeta, &shouldIdle);
createStreamTaskRunReq(pTq->pStreamMeta, &shouldIdle);
int32_t times = 0;
@ -57,7 +57,39 @@ int32_t tqStreamTasksScanWal(STQ* pTq) {
return 0;
}
int32_t createStreamRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) {
static int32_t doSetOffsetForWalReader(SStreamTask *pTask, int32_t vgId) {
// seek the stored version and extract data from WAL
int64_t firstVer = walReaderGetValidFirstVer(pTask->exec.pWalReader);
if (pTask->chkInfo.currentVer < firstVer) {
pTask->chkInfo.currentVer = firstVer;
tqWarn("vgId:%d s-task:%s ver earlier than the first ver of wal range %" PRId64 ", forward to %" PRId64, vgId,
pTask->id.idStr, firstVer, pTask->chkInfo.currentVer);
// todo need retry if failed
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, pTask->chkInfo.currentVer);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
// append the data for the stream
tqDebug("vgId:%d s-task:%s wal reader seek to ver:%" PRId64, vgId, pTask->id.idStr, pTask->chkInfo.currentVer);
} else {
int64_t currentVer = walReaderGetCurrentVer(pTask->exec.pWalReader);
if (currentVer == -1) { // we only seek the read for the first time
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, pTask->chkInfo.currentVer);
if (code != TSDB_CODE_SUCCESS) { // no data in wal, quit
return code;
}
// append the data for the stream
tqDebug("vgId:%d s-task:%s wal reader initial seek to ver:%" PRId64, vgId, pTask->id.idStr, pTask->chkInfo.currentVer);
}
}
return TSDB_CODE_SUCCESS;
}
int32_t createStreamTaskRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) {
*pScanIdle = true;
bool noNewDataInWal = true;
int32_t vgId = pStreamMeta->vgId;
@ -67,6 +99,7 @@ int32_t createStreamRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) {
return TSDB_CODE_SUCCESS;
}
// clone the task list, to avoid the task update during scan wal files
SArray* pTaskList = NULL;
taosWLockLatch(&pStreamMeta->lock);
pTaskList = taosArrayDup(pStreamMeta->pTaskList, NULL);
@ -107,38 +140,15 @@ int32_t createStreamRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) {
*pScanIdle = false;
// seek the stored version and extract data from WAL
int64_t firstVer = walReaderGetValidFirstVer(pTask->exec.pWalReader);
if (pTask->chkInfo.currentVer < firstVer) {
pTask->chkInfo.currentVer = firstVer;
tqWarn("vgId:%d s-task:%s ver earlier than the first ver of wal range %" PRId64 ", forward to %" PRId64, vgId,
pTask->id.idStr, firstVer, pTask->chkInfo.currentVer);
// todo need retry if failed
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, pTask->chkInfo.currentVer);
if (code != TSDB_CODE_SUCCESS) {
streamMetaReleaseTask(pStreamMeta, pTask);
continue;
}
// append the data for the stream
tqDebug("vgId:%d s-task:%s wal reader seek to ver:%" PRId64, vgId, pTask->id.idStr, pTask->chkInfo.currentVer);
} else {
int64_t currentVer = walReaderGetCurrentVer(pTask->exec.pWalReader);
if (currentVer == -1) {
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, pTask->chkInfo.currentVer);
if (code != TSDB_CODE_SUCCESS) { // no data in wal, quit
streamMetaReleaseTask(pStreamMeta, pTask);
continue;
}
// append the data for the stream
tqDebug("vgId:%d s-task:%s wal reader initial seek to ver:%" PRId64, vgId, pTask->id.idStr, pTask->chkInfo.currentVer);
}
int32_t code = doSetOffsetForWalReader(pTask, vgId);
if (code != TSDB_CODE_SUCCESS) {
streamMetaReleaseTask(pStreamMeta, pTask);
continue;
}
// append the data for the stream
SStreamQueueItem* pItem = NULL;
int32_t code = extractMsgFromWal(pTask->exec.pWalReader, (void**) &pItem, pTask->id.idStr);
code = extractMsgFromWal(pTask->exec.pWalReader, (void**) &pItem, pTask->id.idStr);
if (code != TSDB_CODE_SUCCESS) { // failed, continue
streamMetaReleaseTask(pStreamMeta, pTask);
continue;
@ -161,7 +171,6 @@ int32_t createStreamRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) {
tqError("s-task:%s append input queue failed, ver:%" PRId64, pTask->id.idStr, pTask->chkInfo.currentVer);
}
streamMetaReleaseTask(pStreamMeta, pTask);
}

View File

@ -17,12 +17,12 @@
#include "tmsg.h"
#include "tq.h"
#define MAX_CATCH_NUM 10240
#define MAX_CACHE_TABLE_INFO_NUM 10240
typedef struct STblInfo {
typedef struct STableSinkInfo {
uint64_t uid;
char tbName[TSDB_TABLE_NAME_LEN];
} STblInfo;
} STableSinkInfo;
int32_t tqBuildDeleteReq(const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
const char* pIdStr) {
@ -97,19 +97,21 @@ end:
return ret;
}
int32_t tqGetTableInfo(SSHashObj* tblInfo ,uint64_t groupId, STblInfo** pTbl) {
void* pVal = tSimpleHashGet(tblInfo, &groupId, sizeof(uint64_t));
static int32_t tqGetTableInfo(SSHashObj* pTableInfoMap,uint64_t groupId, STableSinkInfo** pInfo) {
void* pVal = tSimpleHashGet(pTableInfoMap, &groupId, sizeof(uint64_t));
if (pVal) {
*pTbl = *(STblInfo**)pVal;
*pInfo = *(STableSinkInfo**)pVal;
return TSDB_CODE_SUCCESS;
}
return TSDB_CODE_FAILED;
}
int32_t tqPutTableInfo(SSHashObj* tblInfo ,uint64_t groupId, STblInfo* pTbl) {
if (tSimpleHashGetSize(tblInfo) > MAX_CATCH_NUM) {
return TSDB_CODE_SUCCESS;
int32_t tqPutTableInfo(SSHashObj* tblInfo ,uint64_t groupId, STableSinkInfo* pTbl) {
if (tSimpleHashGetSize(tblInfo) > MAX_CACHE_TABLE_INFO_NUM) {
return TSDB_CODE_FAILED;
}
return tSimpleHashPut(tblInfo, &groupId, sizeof(uint64_t), &pTbl, POINTER_BYTES);
}
@ -274,7 +276,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
crTblArray = NULL;
} else {
SSubmitTbData tbData = {0};
tqDebug("tq sink pipe, convert block1 %d, rows: %d", i, rows);
tqDebug("tq sink pipe, convert block:%d, rows:%d", i, rows);
if (!(tbData.aRowP = taosArrayInit(rows, sizeof(SRow*)))) {
goto _end;
@ -283,35 +285,35 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
tbData.suid = suid;
tbData.uid = 0; // uid is assigned by vnode
tbData.sver = pTSchema->version;
STblInfo* pTblMeta = NULL;
int32_t res = tqGetTableInfo(pTask->tbSink.pTblInfo, pDataBlock->info.id.groupId, &pTblMeta);
STableSinkInfo* pTableSinkInfo = NULL;
int32_t res = tqGetTableInfo(pTask->tbSink.pTblInfo, pDataBlock->info.id.groupId, &pTableSinkInfo);
if (res != TSDB_CODE_SUCCESS) {
pTblMeta = taosMemoryCalloc(1, sizeof(STblInfo));
pTableSinkInfo = taosMemoryCalloc(1, sizeof(STableSinkInfo));
}
char* ctbName = pDataBlock->info.parTbName;
if (!ctbName[0]) {
if (res == TSDB_CODE_SUCCESS) {
memcpy(ctbName, pTblMeta->tbName, strlen(pTblMeta->tbName));
memcpy(ctbName, pTableSinkInfo->tbName, strlen(pTableSinkInfo->tbName));
} else {
char* tmp = buildCtbNameByGroupId(stbFullName, pDataBlock->info.id.groupId);
memcpy(ctbName, tmp, strlen(tmp));
memcpy(pTblMeta->tbName, tmp, strlen(tmp));
memcpy(pTableSinkInfo->tbName, tmp, strlen(tmp));
taosMemoryFree(tmp);
tqDebug("vgId:%d, gropuid:%" PRIu64 " datablock tabel name is null", TD_VID(pVnode),
tqDebug("vgId:%d, gropuId:%" PRIu64 " datablock table name is null", TD_VID(pVnode),
pDataBlock->info.id.groupId);
}
}
if (res == TSDB_CODE_SUCCESS) {
tbData.uid = pTblMeta->uid;
tbData.uid = pTableSinkInfo->uid;
} else {
SMetaReader mr = {0};
metaReaderInit(&mr, pVnode->pMeta, 0);
if (metaGetTableEntryByName(&mr, ctbName) < 0) {
metaReaderClear(&mr);
taosMemoryFree(pTblMeta);
taosMemoryFree(pTableSinkInfo);
tqDebug("vgId:%d, stream write into %s, table auto created", TD_VID(pVnode), ctbName);
SVCreateTbReq* pCreateTbReq = NULL;
@ -371,7 +373,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
tqError("vgId:%d, failed to write into %s, since table type incorrect, type %d", TD_VID(pVnode), ctbName,
mr.me.type);
metaReaderClear(&mr);
taosMemoryFree(pTblMeta);
taosMemoryFree(pTableSinkInfo);
continue;
}
@ -380,13 +382,16 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
", actual suid %" PRId64 "",
TD_VID(pVnode), ctbName, suid, mr.me.ctbEntry.suid);
metaReaderClear(&mr);
taosMemoryFree(pTblMeta);
taosMemoryFree(pTableSinkInfo);
continue;
}
tbData.uid = mr.me.uid;
pTblMeta->uid = mr.me.uid;
tqPutTableInfo(pTask->tbSink.pTblInfo, pDataBlock->info.id.groupId, pTblMeta);
pTableSinkInfo->uid = mr.me.uid;
int32_t code = tqPutTableInfo(pTask->tbSink.pTblInfo, pDataBlock->info.id.groupId, pTableSinkInfo);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pTableSinkInfo);
}
metaReaderClear(&mr);
}
}

View File

@ -15,10 +15,11 @@
#include "tq.h"
#define IS_OFFSET_RESET_TYPE(_t) ((_t) < 0)
#define NO_POLL_CNT 5
#define IS_OFFSET_RESET_TYPE(_t) ((_t) < 0)
#define NO_POLL_CNT 5
static int32_t tqSendMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp, int32_t vgId);
static int32_t tqSendMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq,
const SMqMetaRsp* pRsp, int32_t vgId);
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId) {
char buf[128] = {0};
@ -103,7 +104,8 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand
char formatBuf[80];
tFormatOffset(formatBuf, 80, pOffsetVal);
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, existed offset found, offset reset to %s and continue. reqId:0x%"PRIx64,
tqDebug("tmq poll: consumer:0x%" PRIx64
", subkey %s, vgId:%d, existed offset found, offset reset to %s and continue. reqId:0x%" PRIx64,
consumerId, pHandle->subKey, vgId, formatBuf, pRequest->reqId);
return 0;
} else {
@ -152,7 +154,8 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand
return code;
}
} else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) {
tqError("tmq poll: subkey:%s, no offset committed for consumer:0x%" PRIx64 " in vg %d, subkey %s, reset none failed",
tqError("tmq poll: subkey:%s, no offset committed for consumer:0x%" PRIx64
" in vg %d, subkey %s, reset none failed",
pHandle->subKey, consumerId, vgId, pRequest->subKey);
terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET;
return -1;
@ -196,24 +199,25 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
// NOTE: this pHandle->consumerId may have been changed already.
code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_RSP, vgId);
end:
{
char buf[80] = {0};
tFormatOffset(buf, 80, &dataRsp.rspOffset);
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s, reqId:0x%" PRIx64 " code:%d",
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, code);
tDeleteMqDataRsp(&dataRsp);
}
end : {
char buf[80] = {0};
tFormatOffset(buf, 80, &dataRsp.rspOffset);
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s, reqId:0x%" PRIx64
" code:%d",
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, code);
tDeleteMqDataRsp(&dataRsp);
}
return code;
}
static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg, STqOffsetVal *offset) {
int code = 0;
int32_t vgId = TD_VID(pTq->pVnode);
SWalCkHead* pCkHead = NULL;
SMqMetaRsp metaRsp = {0};
STaosxRsp taosxRsp = {0};
static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
SRpcMsg* pMsg, STqOffsetVal* offset) {
int code = 0;
int32_t vgId = TD_VID(pTq->pVnode);
SWalCkHead* pCkHead = NULL;
SMqMetaRsp metaRsp = {0};
STaosxRsp taosxRsp = {0};
tqInitTaosxRsp(&taosxRsp, pRequest);
if (offset->type != TMQ_OFFSET__LOG) {
@ -245,7 +249,7 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
}
if (offset->type == TMQ_OFFSET__LOG) {
verifyOffset(pHandle->pWalReader, offset);
walReaderVerifyOffset(pHandle->pWalReader, offset);
int64_t fetchVer = offset->version + 1;
pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048);
if (pCkHead == NULL) {
@ -272,12 +276,12 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
}
SWalCont* pHead = &pCkHead->head;
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %d", pRequest->consumerId,
pRequest->epoch, vgId, fetchVer, pHead->msgType);
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %d",
pRequest->consumerId, pRequest->epoch, vgId, fetchVer, pHead->msgType);
// process meta
if (pHead->msgType != TDMT_VND_SUBMIT) {
if(totalRows > 0) {
if (totalRows > 0) {
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer - 1);
code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP, vgId);
goto end;
@ -301,7 +305,8 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
code = tqTaosxScanLog(pTq, pHandle, submit, &taosxRsp, &totalRows);
if (code < 0) {
tqError("tmq poll: tqTaosxScanLog error %" PRId64 ", in vgId:%d, subkey %s", pRequest->consumerId, vgId, pRequest->subKey);
tqError("tmq poll: tqTaosxScanLog error %" PRId64 ", in vgId:%d, subkey %s", pRequest->consumerId, vgId,
pRequest->subKey);
goto end;
}
@ -340,7 +345,7 @@ int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequ
if (blockReturned) {
return 0;
}
} else { // use the consumer specified offset
} else { // use the consumer specified offset
// the offset value can not be monotonious increase??
offset = reqOffset;
}
@ -348,7 +353,7 @@ int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequ
// this is a normal subscribe requirement
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
return extractDataAndRspForNormalSubscribe(pTq, pHandle, pRequest, pMsg, &offset);
} else { // todo handle the case where re-balance occurs.
} else { // todo handle the case where re-balance occurs.
// for taosx
return extractDataAndRspForDbStbSubscribe(pTq, pHandle, pRequest, pMsg, &offset);
}
@ -363,7 +368,8 @@ static void initMqRspHead(SMqRspHead* pMsgHead, int32_t type, int32_t epoch, int
pMsgHead->walever = ever;
}
int32_t tqSendMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp, int32_t vgId) {
int32_t tqSendMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp,
int32_t vgId) {
int32_t len = 0;
int32_t code = 0;
tEncodeSize(tEncodeMqMetaRsp, pRsp, len, code);
@ -387,7 +393,7 @@ int32_t tqSendMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPoll
tEncodeMqMetaRsp(&encoder, pRsp);
tEncoderClear(&encoder);
SRpcMsg resp = { .info = pMsg->info, .pCont = buf, .contLen = tlen, .code = 0 };
SRpcMsg resp = {.info = pMsg->info, .pCont = buf, .contLen = tlen, .code = 0};
tmsgSendRsp(&resp);
tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) send rsp, res msg type %d, offset type:%d", vgId,
@ -397,7 +403,7 @@ int32_t tqSendMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPoll
}
int32_t tqDoSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp* pRsp, int32_t epoch, int64_t consumerId,
int32_t type, int64_t sver, int64_t ever) {
int32_t type, int64_t sver, int64_t ever) {
int32_t len = 0;
int32_t code = 0;
@ -432,7 +438,7 @@ int32_t tqDoSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp*
}
tEncoderClear(&encoder);
SRpcMsg rsp = { .info = *pRpcHandleInfo, .pCont = buf, .contLen = tlen, .code = 0 };
SRpcMsg rsp = {.info = *pRpcHandleInfo, .pCont = buf, .contLen = tlen, .code = 0};
tmsgSendRsp(&rsp);
return 0;

View File

@ -46,7 +46,13 @@ static void tsdbCloseBICache(STsdb *pTsdb) {
}
}
#define ROCKS_KEY_LEN 64
#define ROCKS_KEY_LEN (sizeof(tb_uid_t) + sizeof(int16_t) + sizeof(int8_t))
typedef struct {
tb_uid_t uid;
int16_t cid;
int8_t ltype;
} SLastKey;
static void tsdbGetRocksPath(STsdb *pTsdb, char *path) {
SVnode *pVnode = pTsdb->pVnode;
@ -62,9 +68,56 @@ static void tsdbGetRocksPath(STsdb *pTsdb, char *path) {
}
}
static const char *myCmpName(void *state) {
(void)state;
return "myCmp";
}
static void myCmpDestroy(void *state) { (void)state; }
static int myCmp(void *state, const char *a, size_t alen, const char *b, size_t blen) {
(void)state;
(void)alen;
(void)blen;
SLastKey *lhs = (SLastKey *)a;
SLastKey *rhs = (SLastKey *)b;
if (lhs->uid < rhs->uid) {
return -1;
} else if (lhs->uid > rhs->uid) {
return 1;
}
if (lhs->cid < rhs->cid) {
return -1;
} else if (lhs->cid > rhs->cid) {
return 1;
}
if (lhs->ltype < rhs->ltype) {
return -1;
} else if (lhs->ltype > rhs->ltype) {
return 1;
}
return 0;
}
static int32_t tsdbOpenRocksCache(STsdb *pTsdb) {
int32_t code = 0;
rocksdb_comparator_t *cmp = rocksdb_comparator_create(NULL, myCmpDestroy, myCmp, myCmpName);
if (NULL == cmp) {
code = TSDB_CODE_OUT_OF_MEMORY;
return code;
}
rocksdb_cache_t *cache = rocksdb_cache_create_lru(5 * 1024 * 1024);
pTsdb->rCache.blockcache = cache;
rocksdb_block_based_table_options_t *tableoptions = rocksdb_block_based_options_create();
pTsdb->rCache.tableoptions = tableoptions;
rocksdb_options_t *options = rocksdb_options_create();
if (NULL == options) {
code = TSDB_CODE_OUT_OF_MEMORY;
@ -72,6 +125,9 @@ static int32_t tsdbOpenRocksCache(STsdb *pTsdb) {
}
rocksdb_options_set_create_if_missing(options, 1);
rocksdb_options_set_comparator(options, cmp);
rocksdb_block_based_options_set_block_cache(tableoptions, cache);
rocksdb_options_set_block_based_table_factory(options, tableoptions);
// rocksdb_options_set_inplace_update_support(options, 1);
// rocksdb_options_set_allow_concurrent_memtable_write(options, 0);
@ -80,12 +136,12 @@ static int32_t tsdbOpenRocksCache(STsdb *pTsdb) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err2;
}
// rocksdb_writeoptions_disable_WAL(writeoptions, 1);
rocksdb_writeoptions_disable_WAL(writeoptions, 1);
rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
if (NULL == readoptions) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err2;
goto _err3;
}
char *err = NULL;
@ -94,19 +150,23 @@ static int32_t tsdbOpenRocksCache(STsdb *pTsdb) {
rocksdb_t *db = rocksdb_open(options, cachePath, &err);
if (NULL == db) {
code = -1;
goto _err3;
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
rocksdb_free(err);
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err4;
}
rocksdb_flushoptions_t *flushoptions = rocksdb_flushoptions_create();
if (NULL == flushoptions) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err4;
goto _err5;
}
rocksdb_writebatch_t *writebatch = rocksdb_writebatch_create();
pTsdb->rCache.writebatch = writebatch;
pTsdb->rCache.my_comparator = cmp;
pTsdb->rCache.options = options;
pTsdb->rCache.writeoptions = writeoptions;
pTsdb->rCache.readoptions = readoptions;
@ -115,15 +175,22 @@ static int32_t tsdbOpenRocksCache(STsdb *pTsdb) {
taosThreadMutexInit(&pTsdb->rCache.rMutex, NULL);
pTsdb->rCache.pTSchema = NULL;
return code;
_err5:
rocksdb_close(pTsdb->rCache.db);
_err4:
rocksdb_readoptions_destroy(readoptions);
_err3:
rocksdb_writeoptions_destroy(writeoptions);
_err2:
rocksdb_options_destroy(options);
rocksdb_block_based_options_destroy(tableoptions);
rocksdb_cache_destroy(cache);
_err:
rocksdb_comparator_destroy(cmp);
return code;
}
@ -134,13 +201,33 @@ static void tsdbCloseRocksCache(STsdb *pTsdb) {
rocksdb_readoptions_destroy(pTsdb->rCache.readoptions);
rocksdb_writeoptions_destroy(pTsdb->rCache.writeoptions);
rocksdb_options_destroy(pTsdb->rCache.options);
rocksdb_block_based_options_destroy(pTsdb->rCache.tableoptions);
rocksdb_cache_destroy(pTsdb->rCache.blockcache);
rocksdb_comparator_destroy(pTsdb->rCache.my_comparator);
taosThreadMutexDestroy(&pTsdb->rCache.rMutex);
taosMemoryFree(pTsdb->rCache.pTSchema);
}
static void rocksMayWrite(STsdb *pTsdb, bool force) {
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
if (force || rocksdb_writebatch_count(wb) >= 1024) {
char *err = NULL;
rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
rocksdb_free(err);
}
rocksdb_writebatch_clear(wb);
}
}
int32_t tsdbCacheCommit(STsdb *pTsdb) {
int32_t code = 0;
char *err = NULL;
rocksMayWrite(pTsdb, true);
rocksdb_flush(pTsdb->rCache.db, pTsdb->rCache.flushoptions, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
@ -191,15 +278,15 @@ void tsdbCacheSerialize(SLastCol *pLastCol, char **value, size_t *size) {
*size = length;
}
static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, char const *lstring) {
static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, int8_t ltype) {
SLastCol *pLastCol = NULL;
char *err = NULL;
size_t vlen = 0;
char key[ROCKS_KEY_LEN];
size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":%s", uid, cid, lstring);
char *value = NULL;
value = rocksdb_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, key, klen, &vlen, &err);
char *err = NULL;
size_t vlen = 0;
SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = cid};
size_t klen = ROCKS_KEY_LEN;
char *value = NULL;
value = rocksdb_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, (char *)key, klen, &vlen, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
rocksdb_free(err);
@ -210,12 +297,38 @@ static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, char c
return pLastCol;
}
static void reallocVarData(SColVal *pColVal) {
if (IS_VAR_DATA_TYPE(pColVal->type)) {
uint8_t *pVal = pColVal->value.pData;
pColVal->value.pData = taosMemoryMalloc(pColVal->value.nData);
if (pColVal->value.nData) {
memcpy(pColVal->value.pData, pVal, pColVal->value.nData);
}
}
}
static void tsdbCacheDeleter(const void *key, size_t keyLen, void *value) {
SLastCol *pLastCol = (SLastCol *)value;
// TODO: add dirty flag to SLastCol
if (pLastCol->dirty) {
// TODO: queue into dirty list, free it after save to backstore
} else {
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type) /* && pLastCol->colVal.value.nData > 0*/) {
taosMemoryFree(pLastCol->colVal.value.pData);
}
taosMemoryFree(value);
}
}
int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow) {
int32_t code = 0;
// 1, fetch schema
STSchema *pTSchema = NULL;
int32_t sver = TSDBROW_SVERSION(pRow);
code = metaGetTbTSchemaEx(pTsdb->pVnode->pMeta, suid, uid, sver, &pTSchema);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
@ -229,22 +342,6 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
tsdbRowIterOpen(&iter, pRow, pTSchema);
for (SColVal *pColVal = tsdbRowIterNext(&iter); pColVal; pColVal = tsdbRowIterNext(&iter)) {
/*
if (IS_VAR_DATA_TYPE(pColVal->type)) {
uint8_t *pVal = pColVal->value.pData;
pColVal->value.pData = NULL;
code = tRealloc(&pColVal->value.pData, pColVal->value.nData);
if (code) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
if (pColVal->value.nData) {
memcpy(pColVal->value.pData, pVal, pColVal->value.nData);
}
}
*/
taosArrayPush(aColVal, pColVal);
}
@ -254,23 +351,18 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
int num_keys = TARRAY_SIZE(aColVal);
char **keys_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
size_t *keys_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
char *key_list = taosMemoryMalloc(num_keys * ROCKS_KEY_LEN * 2);
for (int i = 0; i < num_keys; ++i) {
SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, i);
int16_t cid = pColVal->cid;
char *keys = taosMemoryCalloc(2, ROCKS_KEY_LEN);
int last_key_len = snprintf(keys, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last", uid, cid);
if (last_key_len >= ROCKS_KEY_LEN) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code));
}
int lr_key_len = snprintf(keys + ROCKS_KEY_LEN, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last_row", uid, cid);
if (lr_key_len >= ROCKS_KEY_LEN) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code));
}
keys_list[i] = keys;
keys_list[num_keys + i] = keys + ROCKS_KEY_LEN;
keys_list_sizes[i] = last_key_len;
keys_list_sizes[num_keys + i] = lr_key_len;
memcpy(key_list + i * ROCKS_KEY_LEN, &(SLastKey){.ltype = 1, .uid = uid, .cid = cid}, ROCKS_KEY_LEN);
memcpy(key_list + i * ROCKS_KEY_LEN + num_keys * ROCKS_KEY_LEN, &(SLastKey){.ltype = 0, .uid = uid, .cid = cid},
ROCKS_KEY_LEN);
keys_list[i] = key_list + i * ROCKS_KEY_LEN;
keys_list[num_keys + i] = key_list + i * ROCKS_KEY_LEN + num_keys * ROCKS_KEY_LEN;
keys_list_sizes[i] = ROCKS_KEY_LEN;
keys_list_sizes[num_keys + i] = ROCKS_KEY_LEN;
}
char **values_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
size_t *values_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
@ -278,12 +370,10 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
taosThreadMutexLock(&pTsdb->rCache.rMutex);
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list,
keys_list_sizes, values_list, values_list_sizes, errs);
for (int i = 0; i < num_keys; ++i) {
taosMemoryFree(keys_list[i]);
}
for (int i = 0; i < num_keys * 2; ++i) {
rocksdb_free(errs[i]);
}
taosMemoryFree(key_list);
taosMemoryFree(keys_list);
taosMemoryFree(keys_list_sizes);
taosMemoryFree(errs);
@ -292,19 +382,6 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
for (int i = 0; i < num_keys; ++i) {
SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, i);
if (COL_VAL_IS_VALUE(pColVal)) {
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]);
if (NULL == pLastCol || pLastCol->ts <= keyTs) {
char *value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen);
char key[ROCKS_KEY_LEN];
size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last", uid, pColVal->cid);
rocksdb_writebatch_put(wb, key, klen, value, vlen);
taosMemoryFree(value);
}
}
if (!COL_VAL_IS_NONE(pColVal)) {
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i + num_keys]);
@ -313,11 +390,61 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
char *value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen);
char key[ROCKS_KEY_LEN];
size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last_row", uid, pColVal->cid);
rocksdb_writebatch_put(wb, key, klen, value, vlen);
SLastKey key = (SLastKey){.ltype = 0, .uid = uid, .cid = pColVal->cid};
size_t klen = ROCKS_KEY_LEN;
rocksdb_writebatch_put(wb, (char *)&key, klen, value, vlen);
pLastCol = (SLastCol *)value;
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
*pTmpLastCol = *pLastCol;
pLastCol = pTmpLastCol;
reallocVarData(&pLastCol->colVal);
size_t charge = sizeof(*pLastCol);
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) {
charge += pLastCol->colVal.value.nData;
}
LRUStatus status = taosLRUCacheInsert(pTsdb->lruCache, &key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter,
NULL, TAOS_LRU_PRIORITY_LOW);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
taosMemoryFree(value);
}
if (COL_VAL_IS_VALUE(pColVal)) {
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]);
if (NULL == pLastCol || pLastCol->ts <= keyTs) {
char *value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen);
SLastKey key = (SLastKey){.ltype = 1, .uid = uid, .cid = pColVal->cid};
rocksdb_writebatch_put(wb, (char *)&key, ROCKS_KEY_LEN, value, vlen);
pLastCol = (SLastCol *)value;
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
*pTmpLastCol = *pLastCol;
pLastCol = pTmpLastCol;
reallocVarData(&pLastCol->colVal);
size_t charge = sizeof(*pLastCol);
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) {
charge += pLastCol->colVal.value.nData;
}
LRUStatus status = taosLRUCacheInsert(pTsdb->lruCache, &key, ROCKS_KEY_LEN, pLastCol, charge,
tsdbCacheDeleter, NULL, TAOS_LRU_PRIORITY_LOW);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
taosMemoryFree(value);
}
}
}
rocksdb_free(values_list[i]);
@ -326,14 +453,8 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
taosMemoryFree(values_list);
taosMemoryFree(values_list_sizes);
char *err = NULL;
rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
rocksdb_free(err);
}
rocksMayWrite(pTsdb, false);
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
rocksdb_writebatch_clear(wb);
_exit:
taosArrayDestroy(aColVal);
@ -341,53 +462,41 @@ _exit:
return code;
}
static void reallocVarData(SColVal *pColVal) {
if (IS_VAR_DATA_TYPE(pColVal->type)) {
uint8_t *pVal = pColVal->value.pData;
pColVal->value.pData = taosMemoryMalloc(pColVal->value.nData);
if (pColVal->value.nData) {
memcpy(pColVal->value.pData, pVal, pColVal->value.nData);
}
}
}
static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr, int16_t *aCols,
int nCols, int16_t *slotIds);
static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr, int16_t *aCols,
int nCols, int16_t *slotIds);
int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int32_t ltype) {
static char const *alstring[2] = {"last_row", "last"};
char const *lstring = alstring[ltype];
#if 1
int32_t tsdbCacheGetSlow(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int8_t ltype) {
rocksdb_writebatch_t *wb = NULL;
int32_t code = 0;
SArray *pCidList = pr->pCidList;
int num_keys = TARRAY_SIZE(pCidList);
char **keys_list = taosMemoryCalloc(num_keys, sizeof(char *));
size_t *keys_list_sizes = taosMemoryCalloc(num_keys, sizeof(size_t));
char **keys_list = taosMemoryMalloc(num_keys * sizeof(char *));
size_t *keys_list_sizes = taosMemoryMalloc(num_keys * sizeof(size_t));
char *key_list = taosMemoryMalloc(num_keys * ROCKS_KEY_LEN);
for (int i = 0; i < num_keys; ++i) {
int16_t cid = *(int16_t *)taosArrayGet(pCidList, i);
char *keys = taosMemoryCalloc(2, ROCKS_KEY_LEN);
int last_key_len = snprintf(keys, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":%s", uid, cid, lstring);
if (last_key_len >= ROCKS_KEY_LEN) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code));
}
keys_list[i] = keys;
keys_list_sizes[i] = last_key_len;
memcpy(key_list + i * ROCKS_KEY_LEN, &(SLastKey){.ltype = ltype, .uid = uid, .cid = cid}, ROCKS_KEY_LEN);
keys_list[i] = key_list + i * ROCKS_KEY_LEN;
keys_list_sizes[i] = ROCKS_KEY_LEN;
}
char **values_list = taosMemoryCalloc(num_keys, sizeof(char *));
size_t *values_list_sizes = taosMemoryCalloc(num_keys, sizeof(size_t));
char **errs = taosMemoryCalloc(num_keys, sizeof(char *));
char **errs = taosMemoryMalloc(num_keys * sizeof(char *));
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys, (const char *const *)keys_list,
keys_list_sizes, values_list, values_list_sizes, errs);
for (int i = 0; i < num_keys; ++i) {
taosMemoryFree(keys_list[i]);
rocksdb_free(errs[i]);
if (errs[i]) {
rocksdb_free(errs[i]);
}
}
taosMemoryFree(key_list);
taosMemoryFree(keys_list);
taosMemoryFree(keys_list_sizes);
taosMemoryFree(errs);
@ -403,7 +512,7 @@ int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsR
} else {
taosThreadMutexLock(&pTsdb->rCache.rMutex);
pLastCol = tsdbCacheLookup(pTsdb, uid, cid, lstring);
pLastCol = tsdbCacheLookup(pTsdb, uid, cid, ltype);
if (!pLastCol) {
// recalc: load from tsdb
int16_t aCols[1] = {cid};
@ -432,9 +541,10 @@ int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsR
char *value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(pLastCol, &value, &vlen);
char key[ROCKS_KEY_LEN];
size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":%s", uid, pLastCol->colVal.cid, lstring);
rocksdb_writebatch_put(wb, key, klen, value, vlen);
SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = pLastCol->colVal.cid};
size_t klen = ROCKS_KEY_LEN;
rocksdb_writebatch_put(wb, (char *)key, klen, value, vlen);
taosMemoryFree(value);
} else {
@ -442,21 +552,13 @@ int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsR
}
if (wb) {
char *err = NULL;
rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
rocksdb_free(err);
}
rocksdb_writebatch_clear(wb);
rocksMayWrite(pTsdb, false);
}
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
}
taosArrayPush(pLastArray, pLastCol);
taosArrayDestroy(pTmpColArray);
if (freeCol) {
taosMemoryFree(pLastCol);
@ -467,43 +569,370 @@ int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsR
return code;
}
#endif
static SLastCol *tsdbCacheLoadCol(STsdb *pTsdb, SCacheRowsReader *pr, int16_t slotid, tb_uid_t uid, int16_t cid,
int8_t ltype) {
SLastCol *pLastCol = tsdbCacheLookup(pTsdb, uid, cid, ltype);
if (!pLastCol) {
rocksdb_writebatch_t *wb = NULL;
taosThreadMutexLock(&pTsdb->rCache.rMutex);
pLastCol = tsdbCacheLookup(pTsdb, uid, cid, ltype);
if (!pLastCol) {
// recalc: load from tsdb
int16_t aCols[1] = {cid};
int16_t slotIds[1] = {slotid};
SArray *pTmpColArray = NULL;
if (ltype) {
mergeLastCid(uid, pTsdb, &pTmpColArray, pr, aCols, 1, slotIds);
} else {
mergeLastRowCid(uid, pTsdb, &pTmpColArray, pr, aCols, 1, slotIds);
}
if (pTmpColArray && TARRAY_SIZE(pTmpColArray) >= 1) {
pLastCol = taosArrayGet(pTmpColArray, 0);
}
// still null, then make up a none col value
SLastCol noneCol = {.ts = TSKEY_MIN, .colVal = COL_VAL_NONE(cid, pr->pSchema->columns[slotid].type)};
if (!pLastCol) {
pLastCol = &noneCol;
}
// store result back to rocks cache
wb = pTsdb->rCache.writebatch;
char *value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(pLastCol, &value, &vlen);
SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = pLastCol->colVal.cid};
size_t klen = ROCKS_KEY_LEN;
rocksdb_writebatch_put(wb, (char *)key, klen, value, vlen);
taosMemoryFree(value);
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
*pTmpLastCol = *pLastCol;
pLastCol = pTmpLastCol;
taosArrayDestroy(pTmpColArray);
}
if (wb) {
rocksMayWrite(pTsdb, false);
}
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
}
return pLastCol;
}
typedef struct {
int idx;
SLastKey key;
} SIdxKey;
static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SArray *remainCols,
SCacheRowsReader *pr, int8_t ltype) {
int32_t code = 0;
rocksdb_writebatch_t *wb = NULL;
SArray *pTmpColArray = NULL;
int num_keys = TARRAY_SIZE(remainCols);
int16_t *aCols = taosMemoryMalloc(num_keys * sizeof(int16_t));
int16_t *slotIds = taosMemoryMalloc(num_keys * sizeof(int16_t));
for (int i = 0; i < num_keys; ++i) {
SIdxKey *idxKey = taosArrayGet(remainCols, i);
aCols[i] = idxKey->key.cid;
slotIds[i] = pr->pSlotIds[idxKey->idx];
}
if (ltype) {
mergeLastCid(uid, pTsdb, &pTmpColArray, pr, aCols, num_keys, slotIds);
} else {
mergeLastRowCid(uid, pTsdb, &pTmpColArray, pr, aCols, num_keys, slotIds);
}
SLRUCache *pCache = pTsdb->lruCache;
for (int i = 0; i < num_keys; ++i) {
SIdxKey *idxKey = taosArrayGet(remainCols, i);
SLastCol *pLastCol = NULL;
if (pTmpColArray && TARRAY_SIZE(pTmpColArray) >= i + 1) {
pLastCol = taosArrayGet(pTmpColArray, i);
}
// still null, then make up a none col value
SLastCol noneCol = {.ts = TSKEY_MIN,
.colVal = COL_VAL_NONE(idxKey->key.cid, pr->pSchema->columns[slotIds[i]].type)};
if (!pLastCol) {
pLastCol = &noneCol;
}
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
*pTmpLastCol = *pLastCol;
pLastCol = pTmpLastCol;
size_t charge = sizeof(*pLastCol);
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) {
charge += pLastCol->colVal.value.nData;
}
LRUStatus status = taosLRUCacheInsert(pCache, &idxKey->key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, NULL,
TAOS_LRU_PRIORITY_LOW);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
// store result back to rocks cache
wb = pTsdb->rCache.writebatch;
char *value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(pLastCol, &value, &vlen);
SLastKey *key = &idxKey->key;
size_t klen = ROCKS_KEY_LEN;
rocksdb_writebatch_put(wb, (char *)key, klen, value, vlen);
taosMemoryFree(value);
taosArraySet(pLastArray, idxKey->idx, pLastCol);
// taosArrayRemove(remainCols, i);
}
if (wb) {
rocksMayWrite(pTsdb, false);
}
taosArrayDestroy(pTmpColArray);
taosMemoryFree(aCols);
taosMemoryFree(slotIds);
return code;
}
static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SArray *remainCols,
SCacheRowsReader *pr, int8_t ltype) {
int32_t code = 0;
int num_keys = TARRAY_SIZE(remainCols);
char **keys_list = taosMemoryMalloc(num_keys * sizeof(char *));
size_t *keys_list_sizes = taosMemoryMalloc(num_keys * sizeof(size_t));
char *key_list = taosMemoryMalloc(num_keys * ROCKS_KEY_LEN);
for (int i = 0; i < num_keys; ++i) {
int16_t cid = *(int16_t *)taosArrayGet(remainCols, i);
memcpy(key_list + i * ROCKS_KEY_LEN, &((SIdxKey *)taosArrayGet(remainCols, i))->key, ROCKS_KEY_LEN);
keys_list[i] = key_list + i * ROCKS_KEY_LEN;
keys_list_sizes[i] = ROCKS_KEY_LEN;
}
char **values_list = taosMemoryCalloc(num_keys, sizeof(char *));
size_t *values_list_sizes = taosMemoryCalloc(num_keys, sizeof(size_t));
char **errs = taosMemoryMalloc(num_keys * sizeof(char *));
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys, (const char *const *)keys_list,
keys_list_sizes, values_list, values_list_sizes, errs);
for (int i = 0; i < num_keys; ++i) {
if (errs[i]) {
rocksdb_free(errs[i]);
}
}
taosMemoryFree(key_list);
taosMemoryFree(keys_list);
taosMemoryFree(keys_list_sizes);
taosMemoryFree(errs);
SLRUCache *pCache = pTsdb->lruCache;
for (int i = 0, j = 0; i < num_keys && j < TARRAY_SIZE(remainCols); ++i) {
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]);
SIdxKey *idxKey = &((SIdxKey *)TARRAY_DATA(remainCols))[j];
if (pLastCol) {
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
*pTmpLastCol = *pLastCol;
pLastCol = pTmpLastCol;
reallocVarData(&pLastCol->colVal);
size_t charge = sizeof(*pLastCol);
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) {
charge += pLastCol->colVal.value.nData;
}
LRUStatus status = taosLRUCacheInsert(pCache, &idxKey->key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter,
NULL, TAOS_LRU_PRIORITY_LOW);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
taosArraySet(pLastArray, idxKey->idx, pLastCol);
taosArrayRemove(remainCols, j);
taosMemoryFree(values_list[i]);
} else {
++j;
}
}
taosMemoryFree(values_list);
taosMemoryFree(values_list_sizes);
if (TARRAY_SIZE(remainCols) > 0) {
code = tsdbCacheLoadFromRaw(pTsdb, uid, pLastArray, remainCols, pr, ltype);
}
return code;
}
int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int8_t ltype) {
int32_t code = 0;
SArray *remainCols = NULL;
SLRUCache *pCache = pTsdb->lruCache;
SArray *pCidList = pr->pCidList;
int num_keys = TARRAY_SIZE(pCidList);
for (int i = 0; i < num_keys; ++i) {
int16_t cid = ((int16_t *)TARRAY_DATA(pCidList))[i];
SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = cid};
LRUHandle *h = taosLRUCacheLookup(pCache, key, ROCKS_KEY_LEN);
if (h) {
SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pCache, h);
SLastCol lastCol = *pLastCol;
// reallocVarData(&lastCol.colVal);
taosArrayPush(pLastArray, &lastCol);
if (h) {
taosLRUCacheRelease(pCache, h, false);
}
} else {
SLastCol noneCol = {.ts = TSKEY_MIN, .colVal = COL_VAL_NONE(cid, pr->pSchema->columns[pr->pSlotIds[i]].type)};
taosArrayPush(pLastArray, &noneCol);
if (!remainCols) {
remainCols = taosArrayInit(num_keys, sizeof(SIdxKey));
}
taosArrayPush(remainCols, &(SIdxKey){i, *key});
}
}
if (remainCols && TARRAY_SIZE(remainCols) > 0) {
taosThreadMutexLock(&pTsdb->lruMutex);
for (int i = 0; i < TARRAY_SIZE(remainCols);) {
SIdxKey *idxKey = &((SIdxKey *)TARRAY_DATA(remainCols))[i];
LRUHandle *h = taosLRUCacheLookup(pCache, &idxKey->key, ROCKS_KEY_LEN);
if (h) {
SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pCache, h);
SLastCol lastCol = *pLastCol;
reallocVarData(&lastCol.colVal);
taosArraySet(pLastArray, idxKey->idx, &lastCol);
if (h) {
taosLRUCacheRelease(pCache, h, false);
}
taosArrayRemove(remainCols, i);
} else {
++i;
}
}
code = tsdbCacheLoadFromRocks(pTsdb, uid, pLastArray, remainCols, pr, ltype);
taosThreadMutexUnlock(&pTsdb->lruMutex);
}
if (remainCols) {
taosArrayDestroy(remainCols);
}
return code;
}
int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int8_t ltype) {
int32_t code = 0;
SLRUCache *pCache = pTsdb->lruCache;
SArray *pCidList = pr->pCidList;
int num_keys = TARRAY_SIZE(pCidList);
for (int i = 0; i < num_keys; ++i) {
SLastCol *pLastCol = NULL;
int16_t cid = *(int16_t *)taosArrayGet(pCidList, i);
SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = cid};
LRUHandle *h = taosLRUCacheLookup(pCache, key, ROCKS_KEY_LEN);
if (!h) {
taosThreadMutexLock(&pTsdb->lruMutex);
h = taosLRUCacheLookup(pCache, key, ROCKS_KEY_LEN);
if (!h) {
pLastCol = tsdbCacheLoadCol(pTsdb, pr, pr->pSlotIds[i], uid, cid, ltype);
size_t charge = sizeof(*pLastCol);
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) {
charge += pLastCol->colVal.value.nData;
}
LRUStatus status = taosLRUCacheInsert(pCache, key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, &h,
TAOS_LRU_PRIORITY_LOW);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
}
taosThreadMutexUnlock(&pTsdb->lruMutex);
}
pLastCol = (SLastCol *)taosLRUCacheValue(pCache, h);
SLastCol lastCol = *pLastCol;
reallocVarData(&lastCol.colVal);
if (h) {
taosLRUCacheRelease(pCache, h, false);
}
taosArrayPush(pLastArray, &lastCol);
}
return code;
}
int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey) {
int32_t code = 0;
// 1, fetch schema
// fetch schema
STSchema *pTSchema = NULL;
int32_t sver = -1;
int sver = -1;
code = metaGetTbTSchemaEx(pTsdb->pVnode->pMeta, suid, uid, sver, &pTSchema);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return -1;
}
// 3, build keys & multi get from rocks
// build keys & multi get from rocks
int num_keys = pTSchema->numOfCols;
char **keys_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
size_t *keys_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
for (int i = 0; i < num_keys; ++i) {
int16_t cid = pTSchema->columns[i].colId;
char *keys = taosMemoryCalloc(2, ROCKS_KEY_LEN);
int last_key_len = snprintf(keys, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last", uid, cid);
if (last_key_len >= ROCKS_KEY_LEN) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code));
}
int lr_key_len = snprintf(keys + ROCKS_KEY_LEN, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last_row", uid, cid);
if (lr_key_len >= ROCKS_KEY_LEN) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code));
}
size_t klen = ROCKS_KEY_LEN;
char *keys = taosMemoryCalloc(2, sizeof(SLastKey));
((SLastKey *)keys)[0] = (SLastKey){.ltype = 1, .uid = uid, .cid = cid};
((SLastKey *)keys)[1] = (SLastKey){.ltype = 0, .uid = uid, .cid = cid};
keys_list[i] = keys;
keys_list[num_keys + i] = keys + ROCKS_KEY_LEN;
keys_list_sizes[i] = last_key_len;
keys_list_sizes[num_keys + i] = lr_key_len;
keys_list[num_keys + i] = keys + sizeof(SLastKey);
keys_list_sizes[i] = klen;
keys_list_sizes[num_keys + i] = klen;
}
char **values_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
size_t *values_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
char **errs = taosMemoryCalloc(num_keys * 2, sizeof(char *));
taosThreadMutexLock(&pTsdb->rCache.rMutex);
rocksMayWrite(pTsdb, true);
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list,
keys_list_sizes, values_list, values_list_sizes, errs);
for (int i = 0; i < num_keys; ++i) {
@ -520,16 +949,20 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
for (int i = 0; i < num_keys; ++i) {
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]);
if (NULL != pLastCol && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) {
char key[ROCKS_KEY_LEN];
size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last", uid, pLastCol->colVal.cid);
rocksdb_writebatch_delete(wb, key, klen);
SLastKey *key = &(SLastKey){.ltype = 1, .uid = uid, .cid = pLastCol->colVal.cid};
size_t klen = ROCKS_KEY_LEN;
rocksdb_writebatch_delete(wb, (char *)key, klen);
taosLRUCacheErase(pTsdb->lruCache, key, klen);
}
pLastCol = tsdbCacheDeserialize(values_list[i + num_keys]);
if (NULL != pLastCol && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) {
char key[ROCKS_KEY_LEN];
size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last_row", uid, pLastCol->colVal.cid);
rocksdb_writebatch_delete(wb, key, klen);
SLastKey *key = &(SLastKey){.ltype = 0, .uid = uid, .cid = pLastCol->colVal.cid};
size_t klen = ROCKS_KEY_LEN;
rocksdb_writebatch_delete(wb, (char *)key, klen);
taosLRUCacheErase(pTsdb->lruCache, key, klen);
}
rocksdb_free(values_list[i]);
@ -538,14 +971,8 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
taosMemoryFree(values_list);
taosMemoryFree(values_list_sizes);
char *err = NULL;
rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
rocksdb_free(err);
}
rocksMayWrite(pTsdb, true);
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
rocksdb_writebatch_clear(wb);
_exit:
taosMemoryFree(pTSchema);
@ -1111,7 +1538,7 @@ typedef struct {
SMergeTree mergeTree;
SMergeTree *pMergeTree;
SSttBlockLoadInfo *pLoadInfo;
SLDataIter* pDataIter;
SLDataIter *pDataIter;
int64_t lastTs;
} SFSLastNextRowIter;
@ -1152,14 +1579,21 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow, bool *pIgnoreEa
if (code) goto _err;
}
int nTmpCols = nCols;
bool hasTs = false;
if (aCols[0] == PRIMARYKEY_TIMESTAMP_COL_ID) {
--nTmpCols;
hasTs = true;
}
for (int i = 0; i < state->pLoadInfo->numOfStt; ++i) {
state->pLoadInfo[i].colIds = aCols;
state->pLoadInfo[i].numOfCols = nCols;
state->pLoadInfo[i].colIds = hasTs ? aCols + 1 : aCols;
state->pLoadInfo[i].numOfCols = nTmpCols;
state->pLoadInfo[i].isLast = isLast;
}
tMergeTreeOpen(&state->mergeTree, 1, *state->pDataFReader, state->suid, state->uid,
&(STimeWindow){.skey = state->lastTs, .ekey = TSKEY_MAX},
&(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, state->pLoadInfo, false, NULL, true, state->pDataIter);
&(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, state->pLoadInfo, false, NULL, true,
state->pDataIter);
state->pMergeTree = &state->mergeTree;
state->state = SFSLASTNEXTROW_BLOCKROW;
}
@ -1394,11 +1828,13 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
tBlockDataReset(state->pBlockData);
TABLEID tid = {.suid = state->suid, .uid = state->uid};
int nTmpCols = nCols;
if (aCols[0] == PRIMARYKEY_TIMESTAMP_COL_ID && nCols == 1) {
nTmpCols = 0;
bool hasTs = false;
if (aCols[0] == PRIMARYKEY_TIMESTAMP_COL_ID) {
--nTmpCols;
skipBlock = false;
hasTs = true;
}
code = tBlockDataInit(state->pBlockData, &tid, state->pTSchema, aCols, nTmpCols);
code = tBlockDataInit(state->pBlockData, &tid, state->pTSchema, hasTs ? aCols + 1 : aCols, nTmpCols);
if (code) goto _err;
code = tsdbReadDataBlock(*state->pDataFReader, &block, state->pBlockData);
@ -1730,8 +2166,8 @@ typedef struct {
} CacheNextRowIter;
static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb, STSchema *pTSchema, tb_uid_t suid,
SSttBlockLoadInfo *pLoadInfo, SLDataIter* pLDataIter, STsdbReadSnap *pReadSnap, SDataFReader **pDataFReader,
SDataFReader **pDataFReaderLast, int64_t lastTs) {
SSttBlockLoadInfo *pLoadInfo, SLDataIter *pLDataIter, STsdbReadSnap *pReadSnap,
SDataFReader **pDataFReader, SDataFReader **pDataFReaderLast, int64_t lastTs) {
int code = 0;
STbData *pMem = NULL;

View File

@ -180,8 +180,7 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
}
SVnodeCfg* pCfg = &((SVnode*)pVnode)->config;
int32_t numOfStt = pCfg->sttTrigger;
int32_t numOfStt = pCfg->sttTrigger;
p->pLoadInfo = tCreateLastBlockLoadInfo(p->pSchema, NULL, 0, numOfStt);
if (p->pLoadInfo == NULL) {
tsdbCacherowsReaderClose(p);
@ -215,7 +214,7 @@ void* tsdbCacherowsReaderClose(void* pReader) {
taosMemoryFree(p->pSchema);
}
taosMemoryFreeClear(p->pDataIter);
taosMemoryFree(p->pDataIter);
taosMemoryFree(p->pCurrSchema);
destroyLastBlockLoadInfo(p->pLoadInfo);
@ -306,23 +305,27 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
pr->pDataFReader = NULL;
pr->pDataFReaderLast = NULL;
int32_t ltype = (pr->type & CACHESCAN_RETRIEVE_LAST) >> 3;
int8_t ltype = (pr->type & CACHESCAN_RETRIEVE_LAST) >> 3;
// retrieve the only one last row of all tables in the uid list.
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_SINGLE)) {
int64_t st = taosGetTimestampUs();
int64_t totalLastTs = INT64_MAX;
for (int32_t i = 0; i < pr->numOfTables; ++i) {
STableKeyInfo* pKeyInfo = &pr->pTableList[i];
tsdbCacheGet(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
tsdbCacheGetBatch(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
// tsdbCacheGet(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
if (TARRAY_SIZE(pRow) <= 0) {
taosArrayClearEx(pRow, freeItem);
// taosArrayClearEx(pRow, freeItem);
taosArrayClear(pRow);
continue;
}
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
SLastCol* pColVal = taosArrayGet(pRow, 0);
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
taosArrayClearEx(pRow, freeItem);
// taosArrayClearEx(pRow, freeItem);
taosArrayClear(pRow);
continue;
}
@ -373,13 +376,14 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
}
}
if (taosArrayGetSize(pTableUidList) == 0) {
if (TARRAY_SIZE(pTableUidList) == 0) {
taosArrayPush(pTableUidList, &pKeyInfo->uid);
} else {
taosArraySet(pTableUidList, 0, &pKeyInfo->uid);
}
taosArrayClearEx(pRow, freeItem);
// taosArrayClearEx(pRow, freeItem);
taosArrayClear(pRow);
}
if (hasRes) {
@ -387,25 +391,28 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
}
} else if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_ALL)) {
for (int32_t i = pr->tableIndex; i < pr->numOfTables; ++i) {
STableKeyInfo* pKeyInfo = &pr->pTableList[i];
tb_uid_t uid = pr->pTableList[i].uid;
tsdbCacheGet(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
tsdbCacheGetBatch(pr->pTsdb, uid, pRow, pr, ltype);
if (TARRAY_SIZE(pRow) <= 0) {
taosArrayClearEx(pRow, freeItem);
// taosArrayClearEx(pRow, freeItem);
taosArrayClear(pRow);
continue;
}
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
taosArrayClearEx(pRow, freeItem);
// taosArrayClearEx(pRow, freeItem);
taosArrayClear(pRow);
continue;
}
saveOneRow(pRow, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
taosArrayClearEx(pRow, freeItem);
// taosArrayClearEx(pRow, freeItem);
taosArrayClear(pRow);
taosArrayPush(pTableUidList, &pKeyInfo->uid);
taosArrayPush(pTableUidList, &uid);
pr->tableIndex += 1;
++pr->tableIndex;
if (pResBlock->info.rows >= pResBlock->info.capacity) {
goto _end;
}
@ -429,7 +436,9 @@ _end:
}
taosMemoryFree(pRes);
taosArrayDestroyEx(pRow, freeItem);
// taosArrayDestroyEx(pRow, freeItem);
taosArrayDestroy(pRow);
taosArrayDestroyEx(pLastCols, freeItem);
return code;
}

View File

@ -302,12 +302,12 @@ int64_t tsdbCountTbDataRows(STbData *pTbData) {
return rowsNum;
}
void tsdbMemTableCountRows(SMemTable *pMemTable, SSHashObj* pTableMap, int64_t *rowsNum) {
void tsdbMemTableCountRows(SMemTable *pMemTable, SSHashObj *pTableMap, int64_t *rowsNum) {
taosRLockLatch(&pMemTable->latch);
for (int32_t i = 0; i < pMemTable->nBucket; ++i) {
STbData *pTbData = pMemTable->aBucket[i];
while (pTbData) {
void* p = tSimpleHashGet(pTableMap, &pTbData->uid, sizeof(pTbData->uid));
void *p = tSimpleHashGet(pTableMap, &pTbData->uid, sizeof(pTbData->uid));
if (p == NULL) {
pTbData = pTbData->next;
continue;
@ -673,7 +673,10 @@ static int32_t tsdbInsertColDataToTable(SMemTable *pMemTable, STbData *pTbData,
if (key.ts >= pTbData->maxKey) {
pTbData->maxKey = key.ts;
}
tsdbCacheUpdate(pMemTable->pTsdb, pTbData->suid, pTbData->uid, &lRow);
if (!TSDB_CACHE_NO(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheUpdate(pMemTable->pTsdb, pTbData->suid, pTbData->uid, &lRow);
}
// SMemTable
pMemTable->minKey = TMIN(pMemTable->minKey, pTbData->minKey);
@ -734,7 +737,9 @@ static int32_t tsdbInsertRowDataToTable(SMemTable *pMemTable, STbData *pTbData,
if (key.ts >= pTbData->maxKey) {
pTbData->maxKey = key.ts;
}
tsdbCacheUpdate(pMemTable->pTsdb, pTbData->suid, pTbData->uid, &lRow);
if (!TSDB_CACHE_NO(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheUpdate(pMemTable->pTsdb, pTbData->suid, pTbData->uid, &lRow);
}
// SMemTable
pMemTable->minKey = TMIN(pMemTable->minKey, pTbData->minKey);

View File

@ -453,8 +453,10 @@ static int vnodeCommitImpl(SCommitInfo *pInfo) {
#endif
TSDB_CHECK_CODE(code, lino, _exit);
code = tsdbCacheCommit(pVnode->pTsdb);
TSDB_CHECK_CODE(code, lino, _exit);
if (!TSDB_CACHE_NO(pVnode->config)) {
code = tsdbCacheCommit(pVnode->pTsdb);
TSDB_CHECK_CODE(code, lino, _exit);
}
if (VND_IS_RSMA(pVnode)) {
code = smaCommit(pVnode->pSma, pInfo);

View File

@ -299,7 +299,6 @@ typedef struct SPartitionBySupporter {
typedef struct SPartitionDataInfo {
uint64_t groupId;
char* tbname;
SArray* tags;
SArray* rowIds;
} SPartitionDataInfo;

View File

@ -1058,17 +1058,6 @@ void qStreamSetOpen(qTaskInfo_t tinfo) {
pOperator->status = OP_NOT_OPENED;
}
void verifyOffset(void *pWalReader, STqOffsetVal* pOffset){
// if offset version is small than first version , let's seek to first version
taosThreadMutexLock(&((SWalReader*)pWalReader)->pWal->mutex);
int64_t firstVer = walGetFirstVer(((SWalReader*)pWalReader)->pWal);
taosThreadMutexUnlock(&((SWalReader*)pWalReader)->pWal->mutex);
if (pOffset->version + 1 < firstVer){
pOffset->version = firstVer - 1;
}
}
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
@ -1095,7 +1084,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
tsdbReaderClose(pScanBaseInfo->dataReader);
pScanBaseInfo->dataReader = NULL;
verifyOffset(pInfo->tqReader->pWalReader, pOffset);
walReaderVerifyOffset(pInfo->tqReader->pWalReader, pOffset);
if (tqSeekVer(pInfo->tqReader, pOffset->version + 1, id) < 0) {
qError("tqSeekVer failed ver:%" PRId64 ", %s", pOffset->version + 1, id);
return -1;

View File

@ -1215,6 +1215,11 @@ SSDataBlock* buildCreateTableBlock(SExprSupp* tbName, SExprSupp* tag) {
return pBlock;
}
void freePartItem(void* ptr) {
SPartitionDataInfo* pPart = (SPartitionDataInfo*)ptr;
taosArrayDestroy(pPart->rowIds);
}
SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStreamPartitionPhysiNode* pPartNode,
SExecTaskInfo* pTaskInfo) {
int32_t code = TSDB_CODE_SUCCESS;
@ -1293,6 +1298,7 @@ SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStr
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
pInfo->pPartitions = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
taosHashSetFreeFp(pInfo->pPartitions, freePartItem);
pInfo->tsColIndex = 0;
pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);

View File

@ -1697,13 +1697,13 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
if (pTaskInfo->streamInfo.currentOffset.type == TMQ_OFFSET__LOG) {
while (1) {
int32_t type = tqNextBlockInWal(pInfo->tqReader);
bool hasResult = tqNextBlockInWal(pInfo->tqReader, id);
SSDataBlock* pRes = pInfo->tqReader->pResBlock;
// curVersion move to next, so currentOffset = curVersion - 1
tqOffsetResetToLog(&pTaskInfo->streamInfo.currentOffset, pInfo->tqReader->pWalReader->curVersion - 1);
if (type == FETCH_TYPE__DATA) {
if (hasResult) {
qDebug("doQueueScan get data from log %" PRId64 " rows, version:%" PRId64, pRes->info.rows,
pTaskInfo->streamInfo.currentOffset.version);
blockDataCleanup(pInfo->pRes);
@ -1711,7 +1711,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
if (pInfo->pRes->info.rows > 0) {
return pInfo->pRes;
}
} else if (type == FETCH_TYPE__NONE) {
} else {
qDebug("doQueueScan get none from log, return, version:%" PRId64, pTaskInfo->streamInfo.currentOffset.version);
return NULL;
}
@ -2074,8 +2074,9 @@ FETCH_NEXT_BLOCK:
return pInfo->pUpdateRes;
}
const char* id = GET_TASKID(pTaskInfo);
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
const char* id = GET_TASKID(pTaskInfo);
SSDataBlock* pBlock = pInfo->pRes;
SDataBlockInfo* pBlockInfo = &pBlock->info;
int32_t totalBlocks = taosArrayGetSize(pInfo->pBlockLists);
NEXT_SUBMIT_BLK:
@ -2086,12 +2087,6 @@ FETCH_NEXT_BLOCK:
doClearBufferedBlocks(pInfo);
qDebug("stream scan return empty, all %d submit blocks consumed, %s", totalBlocks, id);
void* buff = NULL;
// int32_t len = streamScanOperatorEncode(pInfo, &buff);
// if (len > 0) {
// streamStateSaveInfo(pInfo->pState, STREAM_SCAN_OP_NAME, strlen(STREAM_SCAN_OP_NAME), buff, len);
// }
taosMemoryFreeClear(buff);
return NULL;
}
@ -2105,7 +2100,7 @@ FETCH_NEXT_BLOCK:
}
}
blockDataCleanup(pInfo->pRes);
blockDataCleanup(pBlock);
while (tqNextBlockImpl(pInfo->tqReader, id)) {
int32_t code = tqRetrieveDataBlock(pInfo->tqReader, id);
@ -2120,10 +2115,10 @@ FETCH_NEXT_BLOCK:
return pInfo->pCreateTbRes;
}
doCheckUpdate(pInfo, pBlockInfo->window.ekey, pInfo->pRes);
doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL);
pInfo->pRes->info.dataLoad = 1;
blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex);
doCheckUpdate(pInfo, pBlockInfo->window.ekey, pBlock);
doFilter(pBlock, pOperator->exprSupp.pFilterInfo, NULL);
pBlock->info.dataLoad = 1;
blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex);
if (pBlockInfo->rows > 0 || pInfo->pUpdateDataRes->info.rows > 0) {
break;
@ -2143,7 +2138,7 @@ FETCH_NEXT_BLOCK:
qDebug("stream scan get source rows:%" PRId64", %s", pBlockInfo->rows, id);
if (pBlockInfo->rows > 0) {
return pInfo->pRes;
return pBlock;
}
if (pInfo->pUpdateDataRes->info.rows > 0) {
@ -2151,10 +2146,9 @@ FETCH_NEXT_BLOCK:
}
goto NEXT_SUBMIT_BLK;
} else {
ASSERT(0);
return NULL;
}
return NULL;
}
static SArray* extractTableIdList(const STableListInfo* pTableListInfo) {
@ -2242,44 +2236,6 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
return NULL;
}
// else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
// int64_t fetchVer = pTaskInfo->streamInfo.prepareStatus.version + 1;
//
// while(1){
// if (tqFetchLog(pInfo->tqReader->pWalReader, pInfo->sContext->withMeta, &fetchVer, &pInfo->pCkHead) < 0) {
// qDebug("tmqsnap tmq poll: consumer log end. offset %" PRId64, fetchVer);
// pTaskInfo->streamInfo.lastStatus.version = fetchVer;
// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
// return NULL;
// }
// SWalCont* pHead = &pInfo->pCkHead->head;
// qDebug("tmqsnap tmq poll: consumer log offset %" PRId64 " msgType %d", fetchVer, pHead->msgType);
//
// if (pHead->msgType == TDMT_VND_SUBMIT) {
// SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
// tqReaderSetDataMsg(pInfo->tqReader, pCont, 0);
// SSDataBlock* block = tqLogScanExec(pInfo->sContext->subType, pInfo->tqReader, pInfo->pFilterOutTbUid,
// &pInfo->pRes); if(block){
// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
// pTaskInfo->streamInfo.lastStatus.version = fetchVer;
// qDebug("tmqsnap fetch data msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
// return block;
// }else{
// fetchVer++;
// }
// } else{
// ASSERT(pInfo->sContext->withMeta);
// ASSERT(IS_META_MSG(pHead->msgType));
// qDebug("tmqsnap fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
// pTaskInfo->streamInfo.metaRsp.rspOffset.version = fetchVer;
// pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__LOG;
// pTaskInfo->streamInfo.metaRsp.resMsgType = pHead->msgType;
// pTaskInfo->streamInfo.metaRsp.metaRspLen = pHead->bodyLen;
// pTaskInfo->streamInfo.metaRsp.metaRsp = taosMemoryMalloc(pHead->bodyLen);
// memcpy(pTaskInfo->streamInfo.metaRsp.metaRsp, pHead->body, pHead->bodyLen);
// return NULL;
// }
// }
return NULL;
}

View File

@ -520,6 +520,7 @@ void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey)
pFillInfo->end = endKey;
if (!FILL_IS_ASC_FILL(pFillInfo)) {
pFillInfo->end = taosTimeTruncate(endKey, &pFillInfo->interval, pFillInfo->interval.precision);
pFillInfo->end = taosTimeAdd(pFillInfo->end, pFillInfo->interval.interval, pFillInfo->interval.intervalUnit,pFillInfo->interval.precision);
}
pFillInfo->index = 0;

View File

@ -2674,6 +2674,29 @@ TSKEY compareTs(void* pKey) {
return pWinKey->ts;
}
int32_t getSelectivityBufSize(SqlFunctionCtx* pCtx) {
if (pCtx->subsidiaries.rowLen == 0) {
int32_t rowLen = 0;
for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
rowLen += pc->pExpr->base.resSchema.bytes;
}
return rowLen + pCtx->subsidiaries.num * sizeof(bool);
} else {
return pCtx->subsidiaries.rowLen;
}
}
int32_t getMaxFunResSize(SExprSupp* pSup, int32_t numOfCols) {
int32_t size = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
int32_t resSize = getSelectivityBufSize(pSup->pCtx + i);
size = TMAX(size, resSize);
}
return size;
}
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
SExecTaskInfo* pTaskInfo, int32_t numOfChild) {
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
@ -2720,8 +2743,11 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc);
initBasicInfo(&pInfo->binfo, pResBlock);
pInfo->pState = taosMemoryCalloc(1, sizeof(SStreamState));
*(pInfo->pState) = *(pTaskInfo->streamInfo.pState);
streamStateSetNumber(pInfo->pState, -1);
int32_t code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str,
pTaskInfo->streamInfo.pState);
pInfo->pState);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@ -2730,10 +2756,6 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
pInfo->pState = taosMemoryCalloc(1, sizeof(SStreamState));
*(pInfo->pState) = *(pTaskInfo->streamInfo.pState);
streamStateSetNumber(pInfo->pState, -1);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
pInfo->numOfChild = numOfChild;
@ -2766,7 +2788,8 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
pInfo->numOfDatapack = 0;
pInfo->pUpdated = NULL;
pInfo->pUpdatedMap = NULL;
pInfo->pState->pFileState = streamFileStateInit(tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize,
int32_t funResSize= getMaxFunResSize(&pOperator->exprSupp, numOfCols);
pInfo->pState->pFileState = streamFileStateInit(tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize,
compareTs, pInfo->pState, pInfo->twAggSup.deleteMark);
pInfo->dataVersion = 0;
@ -4889,9 +4912,13 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
pInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
initResultSizeInfo(&pOperator->resultInfo, 4096);
pInfo->pState = taosMemoryCalloc(1, sizeof(SStreamState));
*(pInfo->pState) = *(pTaskInfo->streamInfo.pState);
streamStateSetNumber(pInfo->pState, -1);
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
code = initAggSup(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str,
pTaskInfo->streamInfo.pState);
pInfo->pState);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@ -4912,10 +4939,6 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
pInfo->pState = taosMemoryCalloc(1, sizeof(SStreamState));
*(pInfo->pState) = *(pTaskInfo->streamInfo.pState);
streamStateSetNumber(pInfo->pState, -1);
pInfo->pPhyNode = NULL; // create new child
pInfo->pPullDataMap = NULL;
pInfo->pPullWins = NULL; // SPullWindowInfo
@ -4928,7 +4951,8 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
pInfo->numOfDatapack = 0;
pInfo->pUpdated = NULL;
pInfo->pUpdatedMap = NULL;
pInfo->pState->pFileState = streamFileStateInit(tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize,
int32_t funResSize= getMaxFunResSize(pSup, numOfCols);
pInfo->pState->pFileState = streamFileStateInit(tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize,
compareTs, pInfo->pState, pInfo->twAggSup.deleteMark);
setOperatorInfo(pOperator, "StreamIntervalOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, true, OP_NOT_OPENED,

View File

@ -883,10 +883,6 @@ int32_t setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STu
}
pStart += pDstCol->info.bytes;
}
if (pCtx->saveHandle.pState) {
streamFreeVal((void*)p);
}
}
return TSDB_CODE_SUCCESS;
@ -3123,7 +3119,7 @@ void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsid
return buf;
}
static int32_t doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STupleKey* key,
static int32_t doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, SWinKey* key,
STuplePos* pPos) {
STuplePos p = {0};
if (pHandle->pBuf != NULL) {
@ -3171,7 +3167,7 @@ static int32_t doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf,
int32_t saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
prepareBuf(pCtx);
STupleKey key;
SWinKey key;
if (pCtx->saveHandle.pBuf == NULL) {
SColumnInfoData* pColInfo = taosArrayGet(pSrcBlock->pDataBlock, 0);
if (pColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
@ -3179,7 +3175,6 @@ int32_t saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock*
key.groupId = pSrcBlock->info.id.groupId;
key.ts = skey;
key.exprIdx = pCtx->exprIdx;
}
}

View File

@ -5510,6 +5510,7 @@ static const char* jkDropDnodeStmtDnodeId = "DnodeId";
static const char* jkDropDnodeStmtFqdn = "Fqdn";
static const char* jkDropDnodeStmtPort = "Port";
static const char* jkDropDnodeStmtForce = "Force";
static const char* jkDropDnodeStmtUnsafe = "Unsafe";
static int32_t dropDnodeStmtToJson(const void* pObj, SJson* pJson) {
const SDropDnodeStmt* pNode = (const SDropDnodeStmt*)pObj;
@ -5524,6 +5525,9 @@ static int32_t dropDnodeStmtToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkDropDnodeStmtForce, pNode->force);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkDropDnodeStmtUnsafe, pNode->unsafe);
}
return code;
}
@ -5541,6 +5545,9 @@ static int32_t jsonToDropDnodeStmt(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkDropDnodeStmtForce, &pNode->force);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkDropDnodeStmtUnsafe, &pNode->unsafe);
}
return code;
}

View File

@ -192,7 +192,7 @@ SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const ST
SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t alterType, const SToken* pVal);
SNode* createDropUserStmt(SAstCreateContext* pCxt, SToken* pUserName);
SNode* createCreateDnodeStmt(SAstCreateContext* pCxt, const SToken* pFqdn, const SToken* pPort);
SNode* createDropDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, bool force);
SNode* createDropDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, bool force, bool unsafe);
SNode* createAlterDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, const SToken* pConfig, const SToken* pValue);
SNode* createRealTableNodeForIndexName(SAstCreateContext* pCxt, SToken* pDbName, SToken* pIndexName);
SNode* createCreateIndexStmt(SAstCreateContext* pCxt, EIndexType type, bool ignoreExists, SNode* pIndexName,

View File

@ -126,8 +126,10 @@ with_opt(A) ::= WITH search_condition(B).
/************************************************ create/drop/alter/restore dnode *********************************************/
cmd ::= CREATE DNODE dnode_endpoint(A). { pCxt->pRootNode = createCreateDnodeStmt(pCxt, &A, NULL); }
cmd ::= CREATE DNODE dnode_endpoint(A) PORT NK_INTEGER(B). { pCxt->pRootNode = createCreateDnodeStmt(pCxt, &A, &B); }
cmd ::= DROP DNODE NK_INTEGER(A) force_opt(B). { pCxt->pRootNode = createDropDnodeStmt(pCxt, &A, B); }
cmd ::= DROP DNODE dnode_endpoint(A) force_opt(B). { pCxt->pRootNode = createDropDnodeStmt(pCxt, &A, B); }
cmd ::= DROP DNODE NK_INTEGER(A) force_opt(B). { pCxt->pRootNode = createDropDnodeStmt(pCxt, &A, B, false); }
cmd ::= DROP DNODE dnode_endpoint(A) force_opt(B). { pCxt->pRootNode = createDropDnodeStmt(pCxt, &A, B, false); }
cmd ::= DROP DNODE NK_INTEGER(A) unsafe_opt(B). { pCxt->pRootNode = createDropDnodeStmt(pCxt, &A, false, B); }
cmd ::= DROP DNODE dnode_endpoint(A) unsafe_opt(B). { pCxt->pRootNode = createDropDnodeStmt(pCxt, &A, false, B); }
cmd ::= ALTER DNODE NK_INTEGER(A) NK_STRING(B). { pCxt->pRootNode = createAlterDnodeStmt(pCxt, &A, &B, NULL); }
cmd ::= ALTER DNODE NK_INTEGER(A) NK_STRING(B) NK_STRING(C). { pCxt->pRootNode = createAlterDnodeStmt(pCxt, &A, &B, &C); }
cmd ::= ALTER ALL DNODES NK_STRING(A). { pCxt->pRootNode = createAlterDnodeStmt(pCxt, NULL, &A, NULL); }
@ -145,6 +147,10 @@ dnode_endpoint(A) ::= NK_IPTOKEN(B).
force_opt(A) ::= . { A = false; }
force_opt(A) ::= FORCE. { A = true; }
%type unsafe_opt { bool }
%destructor unsafe_opt { }
unsafe_opt(A) ::= UNSAFE. { A = true; }
/************************************************ alter local *********************************************************/
cmd ::= ALTER LOCAL NK_STRING(A). { pCxt->pRootNode = createAlterLocalStmt(pCxt, &A, NULL); }
cmd ::= ALTER LOCAL NK_STRING(A) NK_STRING(B). { pCxt->pRootNode = createAlterLocalStmt(pCxt, &A, &B); }

View File

@ -1576,7 +1576,7 @@ SNode* createCreateDnodeStmt(SAstCreateContext* pCxt, const SToken* pFqdn, const
return (SNode*)pStmt;
}
SNode* createDropDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, bool force) {
SNode* createDropDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, bool force, bool unsafe) {
CHECK_PARSER_STATUS(pCxt);
SDropDnodeStmt* pStmt = (SDropDnodeStmt*)nodesMakeNode(QUERY_NODE_DROP_DNODE_STMT);
CHECK_OUT_OF_MEM(pStmt);
@ -1589,6 +1589,7 @@ SNode* createDropDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, bool f
}
}
pStmt->force = force;
pStmt->unsafe = unsafe;
return (SNode*)pStmt;
}

View File

@ -241,6 +241,7 @@ static SKeyword keywordTable[] = {
{"TSERIES", TK_TSERIES},
{"TTL", TK_TTL},
{"UNION", TK_UNION},
{"UNSAFE", TK_UNSAFE},
{"UNSIGNED", TK_UNSIGNED},
{"UNTREATED", TK_UNTREATED},
{"UPDATE", TK_UPDATE},

View File

@ -5517,6 +5517,7 @@ static int32_t translateDropDnode(STranslateContext* pCxt, SDropDnodeStmt* pStmt
strcpy(dropReq.fqdn, pStmt->fqdn);
dropReq.port = pStmt->port;
dropReq.force = pStmt->force;
dropReq.unsafe = pStmt->unsafe;
return buildCmdMsg(pCxt, TDMT_MND_DROP_DNODE, (FSerializeFunc)tSerializeSDropDnodeReq, &dropReq);
}

File diff suppressed because it is too large Load Diff

View File

@ -94,15 +94,17 @@ TEST_F(ParserInitialDTest, dropDnode) {
auto clearDropDnodeReq = [&]() { memset(&expect, 0, sizeof(SDropDnodeReq)); };
auto setDropDnodeReqById = [&](int32_t dnodeId, bool force = false) {
auto setDropDnodeReqById = [&](int32_t dnodeId, bool force = false, bool unsafe = false) {
expect.dnodeId = dnodeId;
expect.force = force;
expect.unsafe = unsafe;
};
auto setDropDnodeReqByEndpoint = [&](const char* pFqdn, int32_t port = tsServerPort, bool force = false) {
auto setDropDnodeReqByEndpoint = [&](const char* pFqdn, int32_t port = tsServerPort, bool force = false, bool unsafe = false) {
strcpy(expect.fqdn, pFqdn);
expect.port = port;
expect.force = force;
expect.unsafe = unsafe;
};
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
@ -114,6 +116,7 @@ TEST_F(ParserInitialDTest, dropDnode) {
ASSERT_EQ(std::string(req.fqdn), std::string(expect.fqdn));
ASSERT_EQ(req.port, expect.port);
ASSERT_EQ(req.force, expect.force);
ASSERT_EQ(req.unsafe, expect.unsafe);
});
setDropDnodeReqById(1);
@ -124,6 +127,10 @@ TEST_F(ParserInitialDTest, dropDnode) {
run("DROP DNODE 2 FORCE");
clearDropDnodeReq();
setDropDnodeReqById(2, false, true);
run("DROP DNODE 2 UNSAFE");
clearDropDnodeReq();
setDropDnodeReqByEndpoint("host1", 7030);
run("DROP DNODE 'host1:7030'");
clearDropDnodeReq();
@ -132,6 +139,10 @@ TEST_F(ParserInitialDTest, dropDnode) {
run("DROP DNODE 'host2:8030' FORCE");
clearDropDnodeReq();
setDropDnodeReqByEndpoint("host2", 8030, false, true);
run("DROP DNODE 'host2:8030' UNSAFE");
clearDropDnodeReq();
setDropDnodeReqByEndpoint("host1");
run("DROP DNODE host1");
clearDropDnodeReq();
@ -139,6 +150,10 @@ TEST_F(ParserInitialDTest, dropDnode) {
setDropDnodeReqByEndpoint("host2", tsServerPort, true);
run("DROP DNODE host2 FORCE");
clearDropDnodeReq();
setDropDnodeReqByEndpoint("host2", tsServerPort, false, true);
run("DROP DNODE host2 UNSAFE");
clearDropDnodeReq();
}
// todo DROP function

View File

@ -16,10 +16,10 @@
#include "streamInc.h"
#include "ttimer.h"
#define STREAM_TASK_INPUT_QUEUEU_CAPACITY 20480
#define STREAM_TASK_INPUT_QUEUEU_CAPACITY 20480
#define STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE (50)
#define ONE_MB_F (1048576.0)
#define QUEUE_MEM_SIZE_IN_MB(_q) (taosQueueMemorySize(_q)/ONE_MB_F)
#define QUEUE_MEM_SIZE_IN_MB(_q) (taosQueueMemorySize(_q) / ONE_MB_F)
int32_t streamInit() {
int8_t old;
@ -96,8 +96,8 @@ int32_t streamSetupTrigger(SStreamTask* pTask) {
}
int32_t streamSchedExec(SStreamTask* pTask) {
int8_t schedStatus =
atomic_val_compare_exchange_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE, TASK_SCHED_STATUS__WAITING);
int8_t schedStatus = atomic_val_compare_exchange_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE,
TASK_SCHED_STATUS__WAITING);
if (schedStatus == TASK_SCHED_STATUS__INACTIVE) {
SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
@ -111,7 +111,7 @@ int32_t streamSchedExec(SStreamTask* pTask) {
pRunReq->streamId = pTask->id.streamId;
pRunReq->taskId = pTask->id.taskId;
SRpcMsg msg = { .msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq) };
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
tmsgPutToQueue(pTask->pMsgCb, STREAM_QUEUE, &msg);
qDebug("trigger to run s-task:%s", pTask->id.idStr);
}
@ -283,7 +283,7 @@ int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, S
}
bool tInputQueueIsFull(const SStreamTask* pTask) {
bool isFull = taosQueueItemSize((pTask->inputQueue->queue)) >= STREAM_TASK_INPUT_QUEUEU_CAPACITY;
bool isFull = taosQueueItemSize((pTask->inputQueue->queue)) >= STREAM_TASK_INPUT_QUEUEU_CAPACITY;
double size = QUEUE_MEM_SIZE_IN_MB(pTask->inputQueue->queue);
return (isFull || size >= STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE);
}
@ -294,23 +294,26 @@ int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) {
double size = QUEUE_MEM_SIZE_IN_MB(pTask->inputQueue->queue);
if (type == STREAM_INPUT__DATA_SUBMIT) {
SStreamDataSubmit2* pSubmitBlock = (SStreamDataSubmit2*)pItem;
qDebug("s-task:%s submit enqueue msgLen:%d ver:%" PRId64 ", total in queue:%d, size:%.2fMiB", pTask->id.idStr,
pSubmitBlock->submit.msgLen, pSubmitBlock->submit.ver, total, size);
int32_t numOfBlocks = taosQueueItemSize(pTask->inputQueue->queue) + 1;
double size = QUEUE_MEM_SIZE_IN_MB(pTask->inputQueue->queue);
if ((pTask->taskLevel == TASK_LEVEL__SOURCE) && (tInputQueueIsFull(pTask))) {
qError("s-task:%s input queue is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) abort", pTask->id.idStr,
STREAM_TASK_INPUT_QUEUEU_CAPACITY, STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE,
total, size);
streamDataSubmitDestroy(pSubmitBlock);
taosFreeQitem(pSubmitBlock);
SStreamDataSubmit* px = (SStreamDataSubmit*)pItem;
qDebug("s-task:%s submit enqueue msgLen:%d ver:%" PRId64 ", total in queue:%d, size:%.2fMiB", pTask->id.idStr,
px->submit.msgLen, px->submit.ver, numOfBlocks, size);
if ((pTask->taskLevel == TASK_LEVEL__SOURCE) && tInputQueueIsFull(pTask)) {
qError("s-task:%s input queue is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) abort",
pTask->id.idStr, STREAM_TASK_INPUT_QUEUEU_CAPACITY, STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE, numOfBlocks,
size);
streamDataSubmitDestroy(px);
taosFreeQitem(pItem);
return -1;
}
taosWriteQitem(pTask->inputQueue->queue, pSubmitBlock);
taosWriteQitem(pTask->inputQueue->queue, pItem);
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE ||
type == STREAM_INPUT__REF_DATA_BLOCK) {
int32_t numOfBlocks = taosQueueItemSize(pTask->inputQueue->queue) + 1;
double size = QUEUE_MEM_SIZE_IN_MB(pTask->inputQueue->queue);
if ((pTask->taskLevel == TASK_LEVEL__SOURCE) && (tInputQueueIsFull(pTask))) {
qError("s-task:%s input queue is full, capacity:%d size:%d MiB, current(blocks:%d, size:%.2fMiB) abort",
@ -331,10 +334,6 @@ int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) {
atomic_val_compare_exchange_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__INACTIVE, TASK_TRIGGER_STATUS__ACTIVE);
}
#if 0
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__NORMAL);
#endif
return 0;
}
@ -356,6 +355,4 @@ void* streamQueueNextItem(SStreamQueue* queue) {
}
}
void streamTaskInputFail(SStreamTask* pTask) {
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
}
void streamTaskInputFail(SStreamTask* pTask) { atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED); }

View File

@ -67,8 +67,8 @@ int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock
return 0;
}
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit, int32_t type) {
SStreamDataSubmit2* pDataSubmit = (SStreamDataSubmit2*)taosAllocateQitem(sizeof(SStreamDataSubmit2), DEF_QITEM, submit.msgLen);
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type) {
SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM, pData->msgLen);
if (pDataSubmit == NULL) {
return NULL;
}
@ -79,14 +79,14 @@ SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit, int32_t type) {
return NULL;
}
pDataSubmit->submit = submit;
pDataSubmit->submit = *pData;
*pDataSubmit->dataRef = 1; // initialize the reference count to be 1
pDataSubmit->type = type;
return pDataSubmit;
}
void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit) {
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit) {
int32_t ref = atomic_sub_fetch_32(pDataSubmit->dataRef, 1);
ASSERT(ref >= 0 && pDataSubmit->type == STREAM_INPUT__DATA_SUBMIT);
@ -96,8 +96,8 @@ void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit) {
}
}
SStreamMergedSubmit2* streamMergedSubmitNew() {
SStreamMergedSubmit2* pMerged = (SStreamMergedSubmit2*)taosAllocateQitem(sizeof(SStreamMergedSubmit2), DEF_QITEM, 0);
SStreamMergedSubmit* streamMergedSubmitNew() {
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)taosAllocateQitem(sizeof(SStreamMergedSubmit), DEF_QITEM, 0);
if (pMerged == NULL) {
return NULL;
}
@ -116,30 +116,30 @@ SStreamMergedSubmit2* streamMergedSubmitNew() {
return pMerged;
}
int32_t streamMergeSubmit(SStreamMergedSubmit2* pMerged, SStreamDataSubmit2* pSubmit) {
int32_t streamMergeSubmit(SStreamMergedSubmit* pMerged, SStreamDataSubmit* pSubmit) {
taosArrayPush(pMerged->dataRefs, &pSubmit->dataRef);
taosArrayPush(pMerged->submits, &pSubmit->submit);
pMerged->ver = pSubmit->ver;
return 0;
}
static FORCE_INLINE void streamDataSubmitRefInc(SStreamDataSubmit2* pDataSubmit) {
static FORCE_INLINE void streamDataSubmitRefInc(SStreamDataSubmit* pDataSubmit) {
atomic_add_fetch_32(pDataSubmit->dataRef, 1);
}
SStreamDataSubmit2* streamSubmitBlockClone(SStreamDataSubmit2* pSubmit) {
SStreamDataSubmit* streamSubmitBlockClone(SStreamDataSubmit* pSubmit) {
int32_t len = 0;
if (pSubmit->type == STREAM_INPUT__DATA_SUBMIT) {
len = pSubmit->submit.msgLen;
}
SStreamDataSubmit2* pSubmitClone = taosAllocateQitem(sizeof(SStreamDataSubmit2), DEF_QITEM, len);
SStreamDataSubmit* pSubmitClone = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM, len);
if (pSubmitClone == NULL) {
return NULL;
}
streamDataSubmitRefInc(pSubmit);
memcpy(pSubmitClone, pSubmit, sizeof(SStreamDataSubmit2));
memcpy(pSubmitClone, pSubmit, sizeof(SStreamDataSubmit));
return pSubmitClone;
}
@ -152,17 +152,17 @@ SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem*
taosFreeQitem(pElem);
return dst;
} else if (dst->type == STREAM_INPUT__MERGED_SUBMIT && pElem->type == STREAM_INPUT__DATA_SUBMIT) {
SStreamMergedSubmit2* pMerged = (SStreamMergedSubmit2*)dst;
SStreamDataSubmit2* pBlockSrc = (SStreamDataSubmit2*)pElem;
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)dst;
SStreamDataSubmit* pBlockSrc = (SStreamDataSubmit*)pElem;
streamMergeSubmit(pMerged, pBlockSrc);
taosFreeQitem(pElem);
return dst;
} else if (dst->type == STREAM_INPUT__DATA_SUBMIT && pElem->type == STREAM_INPUT__DATA_SUBMIT) {
SStreamMergedSubmit2* pMerged = streamMergedSubmitNew();
SStreamMergedSubmit* pMerged = streamMergedSubmitNew();
// todo handle error
streamMergeSubmit(pMerged, (SStreamDataSubmit2*)dst);
streamMergeSubmit(pMerged, (SStreamDataSubmit2*)pElem);
streamMergeSubmit(pMerged, (SStreamDataSubmit*)dst);
streamMergeSubmit(pMerged, (SStreamDataSubmit*)pElem);
taosFreeQitem(dst);
taosFreeQitem(pElem);
return (SStreamQueueItem*)pMerged;
@ -180,10 +180,10 @@ void streamFreeQitem(SStreamQueueItem* data) {
taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)blockDataFreeRes);
taosFreeQitem(data);
} else if (type == STREAM_INPUT__DATA_SUBMIT) {
streamDataSubmitDestroy((SStreamDataSubmit2*)data);
streamDataSubmitDestroy((SStreamDataSubmit*)data);
taosFreeQitem(data);
} else if (type == STREAM_INPUT__MERGED_SUBMIT) {
SStreamMergedSubmit2* pMerge = (SStreamMergedSubmit2*)data;
SStreamMergedSubmit* pMerge = (SStreamMergedSubmit*)data;
int32_t sz = taosArrayGetSize(pMerge->submits);
for (int32_t i = 0; i < sz; i++) {
int32_t* pRef = taosArrayGetP(pMerge->dataRefs, i);

View File

@ -51,7 +51,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray*
qSetMultiStreamInput(pExecutor, pTrigger->pBlock, 1, STREAM_INPUT__DATA_BLOCK);
} else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
const SStreamDataSubmit2* pSubmit = (const SStreamDataSubmit2*)data;
const SStreamDataSubmit* pSubmit = (const SStreamDataSubmit*)data;
qSetMultiStreamInput(pExecutor, &pSubmit->submit, 1, STREAM_INPUT__DATA_SUBMIT);
qDebug("s-task:%s set submit blocks as source block completed, %p %p len:%d ver:%" PRId64, pTask->id.idStr, pSubmit,
pSubmit->submit.msgStr, pSubmit->submit.msgLen, pSubmit->submit.ver);
@ -63,7 +63,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray*
qDebug("s-task:%s set sdata blocks as input num:%d, ver:%" PRId64, pTask->id.idStr, numOfBlocks, pBlock->sourceVer);
qSetMultiStreamInput(pExecutor, pBlockList->pData, numOfBlocks, STREAM_INPUT__DATA_BLOCK);
} else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) {
const SStreamMergedSubmit2* pMerged = (const SStreamMergedSubmit2*)data;
const SStreamMergedSubmit* pMerged = (const SStreamMergedSubmit*)data;
SArray* pBlockList = pMerged->submits;
int32_t numOfBlocks = taosArrayGetSize(pBlockList);
@ -149,7 +149,7 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
int32_t batchCnt = 0;
while (1) {
if (streamTaskShouldStop(&pTask->status) || streamTaskShouldPause(&pTask->status)) {
taosArrayDestroy(pRes);
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
return 0;
}
@ -269,6 +269,9 @@ int32_t streamExecForAll(SStreamTask* pTask) {
qDebug("s-task:%s start to extract data block from inputQ", pTask->id.idStr);
while (1) {
if (streamTaskShouldPause(&pTask->status)) {
return 0;
}
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputQueue);
if (qItem == NULL) {
if (pTask->taskLevel == TASK_LEVEL__SOURCE && batchSize < MIN_STREAM_EXEC_BATCH_NUM && times < 5) {
@ -367,11 +370,11 @@ int32_t streamExecForAll(SStreamTask* pTask) {
qRes->blocks = pRes;
if (((SStreamQueueItem*)pInput)->type == STREAM_INPUT__DATA_SUBMIT) {
SStreamDataSubmit2* pSubmit = (SStreamDataSubmit2*)pInput;
SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)pInput;
qRes->childId = pTask->selfChildId;
qRes->sourceVer = pSubmit->ver;
} else if (((SStreamQueueItem*)pInput)->type == STREAM_INPUT__MERGED_SUBMIT) {
SStreamMergedSubmit2* pMerged = (SStreamMergedSubmit2*)pInput;
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)pInput;
qRes->childId = pTask->selfChildId;
qRes->sourceVer = pMerged->ver;
}
@ -408,7 +411,8 @@ int32_t streamTryExec(SStreamTask* pTask) {
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
qDebug("s-task:%s exec completed", pTask->id.idStr);
if (!taosQueueEmpty(pTask->inputQueue->queue) && (!streamTaskShouldStop(&pTask->status)) && (!streamTaskShouldPause(&pTask->status))) {
if (!taosQueueEmpty(pTask->inputQueue->queue) && (!streamTaskShouldStop(&pTask->status)) &&
(!streamTaskShouldPause(&pTask->status))) {
streamSchedExec(pTask);
}
}

View File

@ -27,6 +27,7 @@ SStreamQueue* streamQueueOpen(int64_t cap) {
taosSetQueueCapacity(pQueue->queue, cap);
taosSetQueueMemoryCapacity(pQueue->queue, cap * 1024);
return pQueue;
FAIL:
if (pQueue->queue) taosCloseQueue(pQueue->queue);
if (pQueue->qall) taosFreeQall(pQueue->qall);
@ -105,3 +106,61 @@ SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue) {
return (SStreamQueueRes){0};
}
#endif
#define MAX_STREAM_EXEC_BATCH_NUM 128
#define MIN_STREAM_EXEC_BATCH_NUM 16
// todo refactor:
// read data from input queue
typedef struct SQueueReader {
SStreamQueue* pQueue;
int32_t taskLevel;
int32_t maxBlocks; // maximum block in one batch
int32_t waitDuration; // maximum wait time to format several block into a batch to process, unit: ms
} SQueueReader;
SStreamQueueItem* doReadMultiBlocksFromQueue(SQueueReader* pReader, const char* idstr) {
int32_t numOfBlocks = 0;
int32_t tryCount = 0;
SStreamQueueItem* pRet = NULL;
while (1) {
SStreamQueueItem* qItem = streamQueueNextItem(pReader->pQueue);
if (qItem == NULL) {
if (pReader->taskLevel == TASK_LEVEL__SOURCE && numOfBlocks < MIN_STREAM_EXEC_BATCH_NUM && tryCount < pReader->waitDuration) {
tryCount++;
taosMsleep(1);
qDebug("===stream===try again batchSize:%d", numOfBlocks);
continue;
}
qDebug("===stream===break batchSize:%d", numOfBlocks);
break;
}
if (pRet == NULL) {
pRet = qItem;
streamQueueProcessSuccess(pReader->pQueue);
if (pReader->taskLevel == TASK_LEVEL__SINK) {
break;
}
} else {
// todo we need to sort the data block, instead of just appending into the array list.
void* newRet = NULL;
if ((newRet = streamMergeQueueItem(pRet, qItem)) == NULL) {
streamQueueProcessFail(pReader->pQueue);
break;
} else {
numOfBlocks++;
pRet = newRet;
streamQueueProcessSuccess(pReader->pQueue);
if (numOfBlocks > pReader->maxBlocks) {
qDebug("maximum blocks limit:%d reached, processing, %s", pReader->maxBlocks, idstr);
break;
}
}
}
}
return pRet;
}

View File

@ -272,26 +272,30 @@ int32_t streamStateCommit(SStreamState* pState) {
#endif
}
int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen) {
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
#ifdef USE_ROCKSDB
return streamStateFuncPut_rocksdb(pState, key, value, vLen);
void* pVal = NULL;
int32_t len = 0;
int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), &pVal, &len);
char* buf = ((SRowBuffPos*)pVal)->pRowBuff;
uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState);
memcpy(buf + len - rowSize, value, vLen);
return code;
#else
return tdbTbUpsert(pState->pTdbState->pFuncStateDb, key, sizeof(STupleKey), value, vLen, pState->pTdbState->txn);
#endif
}
int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen) {
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen) {
#ifdef USE_ROCKSDB
return streamStateFuncGet_rocksdb(pState, key, pVal, pVLen);
void* pVal = NULL;
int32_t len = 0;
int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), (void**)(&pVal), &len);
char* buf = ((SRowBuffPos*)pVal)->pRowBuff;
uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState);
*ppVal = buf + len - rowSize;
return code;
#else
return tdbTbGet(pState->pTdbState->pFuncStateDb, key, sizeof(STupleKey), pVal, pVLen);
#endif
}
int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key) {
#ifdef USE_ROCKSDB
return streamStateFuncDel_rocksdb(pState, key);
#else
return tdbTbDelete(pState->pTdbState->pFuncStateDb, key, sizeof(STupleKey), pState->pTdbState->txn);
return tdbTbGet(pState->pTdbState->pFuncStateDb, key, sizeof(STupleKey), ppVal, pVLen);
#endif
}

View File

@ -32,6 +32,7 @@ struct SStreamFileState {
SSHashObj* rowBuffMap;
void* pFileStore;
int32_t rowSize;
int32_t selectivityRowSize;
int32_t keyLen;
uint64_t preCheckPointVersion;
uint64_t checkPointVersion;
@ -45,8 +46,8 @@ struct SStreamFileState {
typedef SRowBuffPos SRowBuffInfo;
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, GetTsFun fp, void* pFile,
TSKEY delMark) {
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
GetTsFun fp, void* pFile, TSKEY delMark) {
if (memSize <= 0) {
memSize = DEFAULT_MAX_STREAM_BUFFER_SIZE;
}
@ -58,6 +59,7 @@ SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_
if (!pFileState) {
goto _error;
}
rowSize += selectRowSize;
pFileState->maxRowCount = TMAX((uint64_t)memSize / rowSize, FLUSH_NUM * 2);
pFileState->usedBuffs = tdListNew(POINTER_BYTES);
pFileState->freeBuffs = tdListNew(POINTER_BYTES);
@ -69,11 +71,11 @@ SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_
}
pFileState->keyLen = keySize;
pFileState->rowSize = rowSize;
pFileState->selectivityRowSize = selectRowSize;
pFileState->preCheckPointVersion = 0;
pFileState->checkPointVersion = 1;
pFileState->pFileStore = pFile;
pFileState->getTs = fp;
pFileState->maxRowCount = TMAX((uint64_t)memSize / rowSize, FLUSH_NUM * 2);
pFileState->curRowCount = 0;
pFileState->deleteMark = delMark;
pFileState->flushMark = INT64_MIN;
@ -441,9 +443,12 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) {
int32_t recoverSnapshot(SStreamFileState* pFileState) {
int32_t code = TSDB_CODE_SUCCESS;
int64_t mark = (INT64_MIN + pFileState->deleteMark >= pFileState->maxTs) ? INT64_MIN
: pFileState->maxTs - pFileState->deleteMark;
deleteExpiredCheckPoint(pFileState, mark);
if (pFileState->maxTs != INT64_MIN) {
int64_t mark = (INT64_MIN + pFileState->deleteMark >= pFileState->maxTs)
? INT64_MIN
: pFileState->maxTs - pFileState->deleteMark;
deleteExpiredCheckPoint(pFileState, mark);
}
void* pStVal = NULL;
int32_t len = 0;
@ -478,4 +483,6 @@ int32_t recoverSnapshot(SStreamFileState* pFileState) {
streamStateFreeCur(pCur);
return TSDB_CODE_SUCCESS;
}
}
int32_t streamFileStateGeSelectRowSize(SStreamFileState* pFileState) { return pFileState->selectivityRowSize; }

View File

@ -2413,6 +2413,11 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
"vgId:%d, heartbeat msg from dnode:%d, cluster:%d, Msgterm:%" PRId64 " currentTerm:%" PRId64,
ths->vgId, DID(&(pMsg->srcId)), CID(&(pMsg->srcId)), pMsg->term, currentTerm);
if(pMsg->term > currentTerm && ths->state == TAOS_SYNC_STATE_LEARNER){
raftStoreSetTerm(ths, pMsg->term);
currentTerm = pMsg->term;
}
if (pMsg->term == currentTerm && ths->state != TAOS_SYNC_STATE_LEADER) {
syncIndexMgrSetRecvTime(ths->pNextIndex, &(pMsg->srcId), tsMs);
resetElect = true;

View File

@ -116,6 +116,17 @@ void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64_t *eve
*ever = pReader->cond.scanUncommited ? lastVer : committedVer;
}
void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset){
// if offset version is small than first version , let's seek to first version
taosThreadMutexLock(&pWalReader->pWal->mutex);
int64_t firstVer = walGetFirstVer((pWalReader)->pWal);
taosThreadMutexUnlock(&pWalReader->pWal->mutex);
if (pOffset->version + 1 < firstVer){
pOffset->version = firstVer - 1;
}
}
static int64_t walReadSeekFilePos(SWalReader *pReader, int64_t fileFirstVer, int64_t ver) {
int64_t ret = 0;

View File

@ -324,6 +324,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SNODE_NOT_DEPLOYED, "Snode not deployed")
TAOS_DEFINE_ERROR(TSDB_CODE_MNODE_NOT_CATCH_UP, "Mnode didn't catch the leader")
TAOS_DEFINE_ERROR(TSDB_CODE_MNODE_ALREADY_IS_VOTER, "Mnode already is a leader")
TAOS_DEFINE_ERROR(TSDB_CODE_MNODE_ONLY_TWO_MNODE, "Only two mnodes exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MNODE_NO_NEED_RESTORE, "No need to restore on this dnode")
// vnode
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VGROUP_ID, "Vnode is closed or removed")
@ -342,6 +343,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_QUERY_BUSY, "Query busy")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_CATCH_UP, "Vnode didn't catch up its leader")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_ALREADY_IS_VOTER, "Vnode already is a voter")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_DIR_ALREADY_EXIST, "Vnode directory already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_META_DATA_UNSAFE_DELETE, "Single replica vnode data will lost permanently after this operation, if you make sure this, please use drop dnode <id> unsafe to execute")
// tsdb
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, "Invalid table ID")

View File

@ -589,6 +589,32 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/db_tb_name_check.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/InsertFuturets.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/insert_wide_column.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_benchmark.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k.py -R
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_1.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_1.py -R
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_1.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_1.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_1.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_2.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_2.py -R
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_2.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_2.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_2.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_3.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_3.py -R
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_3.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_3.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_3.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -R
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/information_schema.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py

View File

@ -68,7 +68,7 @@ docker run \
-v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \
-v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \
-v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \
--rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=true -DJEMALLOC_ENABLED=0;make -j || exit 1"
--rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=true -DJEMALLOC_ENABLED=0;make -j 10|| exit 1"
# -v ${REP_REAL_PATH}/community/contrib/jemalloc/:${REP_DIR}/community/contrib/jemalloc \
if [[ -d ${WORKDIR}/debugNoSan ]] ;then
@ -97,7 +97,7 @@ docker run \
-v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \
-v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \
-v ${REP_REAL_PATH}/community/contrib/jemalloc/:${REP_DIR}/community/contrib/jemalloc \
--rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=true -DJEMALLOC_ENABLED=0;make -j || exit 1 "
--rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=true -DJEMALLOC_ENABLED=0;make -j 10|| exit 1 "
mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan

View File

@ -130,18 +130,6 @@ class TDDnode:
"locale": "en_US.UTF-8",
"charset": "UTF-8",
"asyncLog": "0",
"mDebugFlag": "143",
"dDebugFlag": "143",
"vDebugFlag": "143",
"tqDebugFlag": "143",
"cDebugFlag": "143",
"jniDebugFlag": "143",
"qDebugFlag": "143",
"rpcDebugFlag": "143",
"tmrDebugFlag": "131",
"uDebugFlag": "143",
"sDebugFlag": "143",
"wDebugFlag": "143",
"numOfLogLines": "100000000",
"statusInterval": "1",
"enableQueryHb": "1",

View File

@ -209,7 +209,8 @@ endi
print =============== step5a: drop dnode 3
sql_error drop dnode 3
sql drop dnode 3 force
sql_error drop dnode 3 force
sql drop dnode 3 unsafe
print select * from information_schema.ins_dnodes;
sql select * from information_schema.ins_dnodes;

View File

@ -307,4 +307,63 @@ sql resume stream IF EXISTS streams66666666;
print ===== step 4 over
print ===== step5
sql drop stream if exists streams6;
sql drop database if exists test6;
sql create database test6 vgroups 10;
sql use test6;
sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,tb int,tc int);
sql create table ts1 using st tags(1,1,1);
sql create table ts2 using st tags(2,2,2);
sql create table ts3 using st tags(3,2,2);
sql create table ts4 using st tags(4,2,2);
sql create stream streams6 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 watermark 1d into streamt6 as select _wstart, count(*) c1 from st interval(10s);
sql insert into ts1 values(1648791213001,1,12,3,1.0);
sql insert into ts2 values(1648791213001,1,12,3,1.0);
sql insert into ts3 values(1648791213001,1,12,3,1.0);
sql insert into ts4 values(1648791213001,1,12,3,1.0);
sleep 1000
sql pause stream streams6;
sleep 1000
sql insert into ts1 values(1648791223001,1,12,3,1.0);
sql insert into ts2 values(1648791233001,1,12,3,1.0);
sql resume stream streams6;
sql insert into ts3 values(1648791243001,1,12,3,1.0);
sql insert into ts4 values(1648791253001,1,12,3,1.0);
$loop_count = 0
loop6:
$loop_count = $loop_count + 1
if $loop_count == 20 then
return -1
endi
sleep 500
print 2 select * from streamt6;
sql select * from streamt6;
if $rows != 5 then
print =====rows=$rows
print $data00 $data01 $data02
print $data10 $data11 $data12
print $data20 $data21 $data22
print $data30 $data31 $data32
print $data40 $data41 $data42
print $data50 $data51 $data52
goto loop6
endi
print ===== step5 over
system sh/stop_dnodes.sh

View File

@ -355,7 +355,7 @@ system sh/exec.sh -n dnode4 -s start
$loop_cnt = 0
check_dnode_ready_2:
$loop_cnt = $loop_cnt + 1
sleep 200
sleep 1500
if $loop_cnt == 10 then
print ====> dnode not ready!
return -1

View File

@ -0,0 +1,128 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1,
"max_sql_len": 102400000,
"databases": [{
"dbinfo": {
"name": "json_test",
"drop": "yes",
"replica": 1,
"precision": "ms",
"keep": 36500,
"minRows": 100,
"maxRows": 4096
},
"super_tables": [{
"name": "stb_old",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb_old_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "sample",
"sample_file": "./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv",
"tags_file": "",
"columns": [{"type": "INT","count":1000}, {"type": "BINARY", "len": 16, "count":20}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
},{
"name": "stb_new",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb_new_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT","count":4000}, {"type": "BINARY", "len": 16, "count":90}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":3}]
},{
"name": "stb_mix",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb_mix_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "TINYINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 20,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}],
"tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}]
},{
"name": "stb_excel",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb_excel_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "SMALLINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 30,"count":300},{"type": "BINARY","len": 47,"count":290},{"type": "BINARY","len": 211,"count":1}],
"tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}]
}]
}]
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,40 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from util.cases import tdCases
from .rowlength64k import *
class TDTestCase(TDTestCase):
def run(self):
tdSql.prepare()
startTime_all = time.time()
#self.run_1()
# self.run_2()
self.run_3()
#self.run_4()
endTime_all = time.time()
print("total time %ds" % (endTime_all - startTime_all))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,44 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from util.cases import tdCases
from .rowlength64k import *
class TDTestCase(TDTestCase):
def run(self):
tdSql.prepare()
startTime_all = time.time()
# self.run_1()
# self.run_2()
# self.run_3()
self.run_4()
#self.run_5()
# self.run_5()
# self.run_6()
# self.run_7()
endTime_all = time.time()
print("total time %ds" % (endTime_all - startTime_all))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,42 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from util.cases import tdCases
from .rowlength64k import *
class TDTestCase(TDTestCase):
def run(self):
tdSql.prepare()
startTime_all = time.time()
# self.run_1()
# self.run_2()
# self.run_3()
# self.run_4()
self.run_6()
self.run_7()
endTime_all = time.time()
print("total time %ds" % (endTime_all - startTime_all))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,44 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from util.cases import tdCases
from .rowlength64k import *
class TDTestCase(TDTestCase):
def run(self):
tdSql.prepare()
startTime_all = time.time()
# self.run_1()
# self.run_2()
# self.run_3()
#self.run_4()
self.run_5()
# self.run_5()
# self.run_6()
# self.run_7()
endTime_all = time.time()
print("total time %ds" % (endTime_all - startTime_all))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,192 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import random
import os
import time
import taos
import subprocess
import string
from faker import Faker
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
from util.dnodes import *
class TDTestCase:
updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"querySmaOptimize":1}
def init(self, conn, logSql, replicaVar):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.testcasePath = os.path.split(__file__)[0]
self.testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
now = time.time()
self.ts = int(round(now * 1000))
self.num = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
tdSql.prepare()
startTime_all = time.time()
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
#-N:regular table -d:database name -t:table num -n:rows num per table -l:col num -y:force
#regular old && new
startTime = time.time()
os.system("%staosBenchmark -N -d regular_old -t 1 -n 10 -l 1023 -y" % binPath)
tdSql.execute("use regular_old")
tdSql.query("show tables;")
tdSql.checkRows(1)
tdSql.query("select * from meters;")
tdSql.checkCols(1024)
tdSql.query("describe meters;")
tdSql.checkRows(1024)
os.system("%staosBenchmark -N -d regular_new -t 1 -n 10 -l 4095 -y" % binPath)
tdSql.execute("use regular_new")
tdSql.query("show tables;")
tdSql.checkRows(1)
tdSql.query("select * from meters;")
tdSql.checkCols(4096)
tdSql.query("describe meters;")
tdSql.checkRows(4096)
#super table -d:database name -t:table num -n:rows num per table -l:col num -y:force
os.system("%staosBenchmark -d super_old -t 1 -n 10 -l 1021 -y" % binPath)
tdSql.execute("use super_old")
tdSql.query("show tables;")
tdSql.checkRows(1)
tdSql.query("select * from meters;")
tdSql.checkCols(1024)
tdSql.query("select * from d0;")
tdSql.checkCols(1022)
tdSql.query("describe meters;")
tdSql.checkRows(1024)
tdSql.query("describe d0;")
tdSql.checkRows(1024)
os.system("%staosBenchmark -d super_new -t 1 -n 10 -l 4093 -y" % binPath)
tdSql.execute("use super_new")
tdSql.query("show tables;")
tdSql.checkRows(1)
tdSql.query("select * from meters;")
tdSql.checkCols(4096)
tdSql.query("select * from d0;")
tdSql.checkCols(4094)
tdSql.query("describe meters;")
tdSql.checkRows(4096)
tdSql.query("describe d0;")
tdSql.checkRows(4096)
tdSql.execute("create table stb_new1_1 using meters tags(1,2)")
tdSql.query("select * from stb_new1_1")
tdSql.checkCols(4094)
tdSql.query("describe stb_new1_1;")
tdSql.checkRows(4096)
# insert: create one or mutiple tables per sql and insert multiple rows per sql
os.system("%staosBenchmark -f %s/rowlength64k.json -y " % (binPath,self.testcasePath))
tdSql.execute("use json_test")
tdSql.query("select count (tbname) from stb_old")
tdSql.checkData(0, 0, 10)
tdSql.query("select * from stb_old")
tdSql.checkRows(10)
tdSql.checkCols(1024)
tdSql.query("select count (tbname) from stb_new")
tdSql.checkData(0, 0, 10)
tdSql.query("select * from stb_new")
tdSql.checkRows(10)
tdSql.checkCols(4096)
tdSql.query("describe stb_new;")
tdSql.checkRows(4096)
tdSql.query("select * from stb_new_0")
tdSql.checkRows(10)
tdSql.checkCols(4091)
tdSql.query("describe stb_new_0;")
tdSql.checkRows(4096)
tdSql.execute("create table stb_new1_1 using stb_new tags(1,2,3,4,5)")
tdSql.query("select * from stb_new1_1")
tdSql.checkCols(4091)
tdSql.query("describe stb_new1_1;")
tdSql.checkRows(4096)
tdSql.query("select count (tbname) from stb_mix")
tdSql.checkData(0, 0, 10)
tdSql.query("select * from stb_mix")
tdSql.checkRows(10)
tdSql.checkCols(4096)
tdSql.query("describe stb_mix;")
tdSql.checkRows(4096)
tdSql.query("select * from stb_mix_0")
tdSql.checkRows(10)
tdSql.checkCols(4092)
tdSql.query("describe stb_mix_0;")
tdSql.checkRows(4096)
tdSql.query("select count (tbname) from stb_excel")
tdSql.checkData(0, 0, 10)
tdSql.query("select * from stb_excel")
tdSql.checkRows(10)
tdSql.checkCols(4096)
tdSql.query("describe stb_excel;")
tdSql.checkRows(4096)
tdSql.query("select * from stb_excel_0")
tdSql.checkRows(10)
tdSql.checkCols(4092)
tdSql.query("describe stb_excel_0;")
tdSql.checkRows(4096)
endTime = time.time()
print("total time %ds" % (endTime - startTime))
endTime_all = time.time()
print("total time %ds" % (endTime_all - startTime_all))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

1
tests/system-test/forcedrop Symbolic link
View File

@ -0,0 +1 @@
/home/ubuntu/Documents/github/cadem/emptydebug/forcedrop/