Merge branch '3.0' of https://github.com/taosdata/TDengine into feat/vnode_compact

This commit is contained in:
Hongze Cheng 2022-12-21 14:36:36 +08:00
commit 5ab1cba45e
80 changed files with 8687 additions and 3694 deletions

View File

@ -173,7 +173,7 @@ def pre_test_build_mac() {
'''
sh '''
cd ${WK}/debug
cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false
cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false -DCMAKE_BUILD_TYPE=Release
make -j10
ctest -j10 || exit 7
'''

View File

@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG 566540d
GIT_TAG f0c1753
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG b20c9d1
GIT_TAG 261fcca
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -876,7 +876,8 @@ INTERP(expr)
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. The parameter `EVERY` must be an integer, with no quotes, with a time unit of: b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
- Interpolation is performed based on `FILL` parameter.
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
- Pseudo column `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.1).
### LAST

View File

@ -108,7 +108,7 @@ SHOW STREAMS;
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering:
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggeringthe default value is AT_ONCE:
1. AT_ONCE: triggers on write

View File

@ -24,7 +24,7 @@ func main() {
if err != nil {
panic(err)
}
_, err = db.Exec("create topic if not exists example_tmq_topic with meta as DATABASE example_tmq")
_, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq")
if err != nil {
panic(err)
}

View File

@ -19,6 +19,7 @@ TDengine 提供了兼容 InfluxDB (v1) 和 OpenTSDB 行协议的 Schemaless API
- `precision` TDengine 使用的时间精度
- `u` TDengine 用户名
- `p` TDengine 密码
- `ttl` 自动创建的子表生命周期,以子表的第一条数据的 TTL 参数为准,不可更新。更多信息请参考[创建表文档](taos-sql/table/#创建表)的 TTL 参数
注意: 目前不支持 InfluxDB 的 token 验证方式,仅支持 Basic 验证和查询参数验证。

View File

@ -139,10 +139,10 @@ alter_table_option: {
- ADD COLUMN添加列。
- DROP COLUMN删除列。
- MODIFY COLUMN修改列定义,如果数据列的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。
- MODIFY COLUMN修改列的宽度,数据列的类型必须是 nchar 和 binary使用此指令可以修改其宽度,只能改大,不能改小。
- ADD TAG给超级表添加一个标签。
- DROP TAG删除超级表的一个标签。从超级表删除某个标签后该超级表下的所有子表也会自动删除该标签。
- MODIFY TAG修改超级表的一个标签的定义。如果标签的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。
- MODIFY TAG修改超级表的一个标签的列宽度。标签的类型只能是 nchar 和 binary使用此指令可以修改其宽度,只能改大,不能改小。
- RENAME TAG修改超级表的一个标签的名称。从超级表修改某个标签名后该超级表下的所有子表也会自动更新该标签名。
### 增加列

View File

@ -880,6 +880,7 @@ INTERP(expr)
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.1.4版本以后支持)。
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.2.1版本以后支持)。
### LAST

View File

@ -114,7 +114,7 @@ SELECT * from information_schema.`ins_streams`;
在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。
对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式:
对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式,默认为 AT_ONCE
1. AT_ONCE写入立即触发

View File

@ -35,7 +35,7 @@ int64_t st, et;
typedef struct {
int id;
TAOS *taos;
char name[16];
char name[32];
time_t timeStamp;
int value;
int rowsInserted;

View File

@ -20,7 +20,7 @@
#include <time.h>
#include "taos.h"
static int running = 1;
static int running = 1;
static int32_t msg_process(TAOS_RES* msg) {
char buf[1024];
@ -40,8 +40,8 @@ static int32_t msg_process(TAOS_RES* msg) {
TAOS_FIELD* fields = taos_fetch_fields(msg);
int32_t numOfFields = taos_field_count(msg);
//int32_t* length = taos_fetch_lengths(msg);
int32_t precision = taos_result_precision(msg);
// int32_t* length = taos_fetch_lengths(msg);
int32_t precision = taos_result_precision(msg);
rows++;
taos_print_row(buf, row, fields, numOfFields);
printf("precision: %d, row content: %s\n", precision, buf);
@ -62,14 +62,12 @@ static int32_t init_env() {
pRes = taos_query(pConn, "drop topic topicname");
if (taos_errno(pRes) != 0) {
printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop database if exists tmqdb");
if (taos_errno(pRes) != 0) {
printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
@ -77,7 +75,7 @@ static int32_t init_env() {
pRes = taos_query(pConn, "create database tmqdb precision 'ns'");
if (taos_errno(pRes) != 0) {
printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
return -1;
goto END;
}
taos_free_result(pRes);
@ -87,7 +85,7 @@ static int32_t init_env() {
pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))");
if (taos_errno(pRes) != 0) {
printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes));
return -1;
goto END;
}
taos_free_result(pRes);
@ -96,28 +94,28 @@ static int32_t init_env() {
pRes = taos_query(pConn, "create table tmqdb.ctb0 using tmqdb.stb tags(0, 'subtable0')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
goto END;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table tmqdb.ctb1 using tmqdb.stb tags(1, 'subtable1')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table ctb1, reason:%s\n", taos_errstr(pRes));
return -1;
goto END;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table tmqdb.ctb2 using tmqdb.stb tags(2, 'subtable2')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table ctb2, reason:%s\n", taos_errstr(pRes));
return -1;
goto END;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table tmqdb.ctb3 using tmqdb.stb tags(3, 'subtable3')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table ctb3, reason:%s\n", taos_errstr(pRes));
return -1;
goto END;
}
taos_free_result(pRes);
@ -126,33 +124,37 @@ static int32_t init_env() {
pRes = taos_query(pConn, "insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
goto END;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
goto END;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
goto END;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
goto END;
}
taos_free_result(pRes);
taos_close(pConn);
return 0;
END:
taos_free_result(pRes);
taos_close(pConn);
return -1;
}
int32_t create_topic() {

View File

@ -59,6 +59,7 @@ typedef enum {
TSDB_OPTION_TIMEZONE,
TSDB_OPTION_CONFIGDIR,
TSDB_OPTION_SHELL_ACTIVITY_TIMER,
TSDB_OPTION_USE_ADAPTER,
TSDB_MAX_OPTIONS
} TSDB_OPTION;
@ -218,7 +219,7 @@ DLL_EXPORT const void *taos_get_raw_block(TAOS_RES *res);
DLL_EXPORT int taos_get_db_route_info(TAOS *taos, const char *db, TAOS_DB_ROUTE_INFO *dbInfo);
DLL_EXPORT int taos_get_table_vgId(TAOS *taos, const char *db, const char *table, int *vgId);
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char *tableNameList);
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char *tableNameList);
/* --------------------------schemaless INTERFACE------------------------------- */
@ -229,13 +230,14 @@ DLL_EXPORT TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len
int precision);
DLL_EXPORT TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows,
int protocol, int precision, int64_t reqid);
DLL_EXPORT TAOS_RES *taos_schemaless_insert_ttl(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int32_t ttl);
DLL_EXPORT TAOS_RES *taos_schemaless_insert_ttl(TAOS *taos, char *lines[], int numLines, int protocol, int precision,
int32_t ttl);
DLL_EXPORT TAOS_RES *taos_schemaless_insert_ttl_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol,
int precision, int32_t ttl, int64_t reqid);
int precision, int32_t ttl, int64_t reqid);
DLL_EXPORT TAOS_RES *taos_schemaless_insert_raw_ttl(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol,
int precision, int32_t ttl);
int precision, int32_t ttl);
DLL_EXPORT TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows,
int protocol, int precision, int32_t ttl, int64_t reqid);
int protocol, int precision, int32_t ttl, int64_t reqid);
/* --------------------------TMQ INTERFACE------------------------------- */
@ -308,7 +310,8 @@ DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw);
DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw);
DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char *tbname);
DLL_EXPORT int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const char* tbname, TAOS_FIELD *fields, int numFields);
DLL_EXPORT int taos_write_raw_block_with_fields(TAOS *taos, int rows, char *pData, const char *tbname,
TAOS_FIELD *fields, int numFields);
DLL_EXPORT void tmq_free_raw(tmq_raw_data raw);
// Returning null means error. Returned result need to be freed by tmq_free_json_meta
DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res);

View File

@ -101,6 +101,7 @@ extern int32_t tsRedirectPeriod;
extern int32_t tsRedirectFactor;
extern int32_t tsRedirectMaxPeriod;
extern int32_t tsMaxRetryWaitTime;
extern bool tsUseAdapter;
// client
extern int32_t tsMinSlidingTime;

View File

@ -16,329 +16,330 @@
#ifndef _TD_COMMON_TOKEN_H_
#define _TD_COMMON_TOKEN_H_
#define TK_OR 1
#define TK_AND 2
#define TK_UNION 3
#define TK_ALL 4
#define TK_MINUS 5
#define TK_EXCEPT 6
#define TK_INTERSECT 7
#define TK_NK_BITAND 8
#define TK_NK_BITOR 9
#define TK_NK_LSHIFT 10
#define TK_NK_RSHIFT 11
#define TK_NK_PLUS 12
#define TK_NK_MINUS 13
#define TK_NK_STAR 14
#define TK_NK_SLASH 15
#define TK_NK_REM 16
#define TK_NK_CONCAT 17
#define TK_CREATE 18
#define TK_ACCOUNT 19
#define TK_NK_ID 20
#define TK_PASS 21
#define TK_NK_STRING 22
#define TK_ALTER 23
#define TK_PPS 24
#define TK_TSERIES 25
#define TK_STORAGE 26
#define TK_STREAMS 27
#define TK_QTIME 28
#define TK_DBS 29
#define TK_USERS 30
#define TK_CONNS 31
#define TK_STATE 32
#define TK_USER 33
#define TK_ENABLE 34
#define TK_NK_INTEGER 35
#define TK_SYSINFO 36
#define TK_DROP 37
#define TK_GRANT 38
#define TK_ON 39
#define TK_TO 40
#define TK_REVOKE 41
#define TK_FROM 42
#define TK_SUBSCRIBE 43
#define TK_NK_COMMA 44
#define TK_READ 45
#define TK_WRITE 46
#define TK_NK_DOT 47
#define TK_DNODE 48
#define TK_PORT 49
#define TK_DNODES 50
#define TK_NK_IPTOKEN 51
#define TK_FORCE 52
#define TK_LOCAL 53
#define TK_QNODE 54
#define TK_BNODE 55
#define TK_SNODE 56
#define TK_MNODE 57
#define TK_DATABASE 58
#define TK_USE 59
#define TK_FLUSH 60
#define TK_TRIM 61
#define TK_IF 62
#define TK_NOT 63
#define TK_EXISTS 64
#define TK_BUFFER 65
#define TK_CACHEMODEL 66
#define TK_CACHESIZE 67
#define TK_COMP 68
#define TK_DURATION 69
#define TK_NK_VARIABLE 70
#define TK_MAXROWS 71
#define TK_MINROWS 72
#define TK_KEEP 73
#define TK_PAGES 74
#define TK_PAGESIZE 75
#define TK_TSDB_PAGESIZE 76
#define TK_PRECISION 77
#define TK_REPLICA 78
#define TK_VGROUPS 79
#define TK_SINGLE_STABLE 80
#define TK_RETENTIONS 81
#define TK_SCHEMALESS 82
#define TK_WAL_LEVEL 83
#define TK_WAL_FSYNC_PERIOD 84
#define TK_WAL_RETENTION_PERIOD 85
#define TK_WAL_RETENTION_SIZE 86
#define TK_WAL_ROLL_PERIOD 87
#define TK_WAL_SEGMENT_SIZE 88
#define TK_STT_TRIGGER 89
#define TK_TABLE_PREFIX 90
#define TK_TABLE_SUFFIX 91
#define TK_NK_COLON 92
#define TK_MAX_SPEED 93
#define TK_TABLE 94
#define TK_NK_LP 95
#define TK_NK_RP 96
#define TK_STABLE 97
#define TK_ADD 98
#define TK_COLUMN 99
#define TK_MODIFY 100
#define TK_RENAME 101
#define TK_TAG 102
#define TK_SET 103
#define TK_NK_EQ 104
#define TK_USING 105
#define TK_TAGS 106
#define TK_COMMENT 107
#define TK_BOOL 108
#define TK_TINYINT 109
#define TK_SMALLINT 110
#define TK_INT 111
#define TK_INTEGER 112
#define TK_BIGINT 113
#define TK_FLOAT 114
#define TK_DOUBLE 115
#define TK_BINARY 116
#define TK_TIMESTAMP 117
#define TK_NCHAR 118
#define TK_UNSIGNED 119
#define TK_JSON 120
#define TK_VARCHAR 121
#define TK_MEDIUMBLOB 122
#define TK_BLOB 123
#define TK_VARBINARY 124
#define TK_DECIMAL 125
#define TK_MAX_DELAY 126
#define TK_WATERMARK 127
#define TK_ROLLUP 128
#define TK_TTL 129
#define TK_SMA 130
#define TK_DELETE_MARK 131
#define TK_FIRST 132
#define TK_LAST 133
#define TK_SHOW 134
#define TK_PRIVILEGES 135
#define TK_DATABASES 136
#define TK_TABLES 137
#define TK_STABLES 138
#define TK_MNODES 139
#define TK_QNODES 140
#define TK_FUNCTIONS 141
#define TK_INDEXES 142
#define TK_ACCOUNTS 143
#define TK_APPS 144
#define TK_CONNECTIONS 145
#define TK_LICENCES 146
#define TK_GRANTS 147
#define TK_QUERIES 148
#define TK_SCORES 149
#define TK_TOPICS 150
#define TK_VARIABLES 151
#define TK_CLUSTER 152
#define TK_BNODES 153
#define TK_SNODES 154
#define TK_TRANSACTIONS 155
#define TK_DISTRIBUTED 156
#define TK_CONSUMERS 157
#define TK_SUBSCRIPTIONS 158
#define TK_VNODES 159
#define TK_LIKE 160
#define TK_TBNAME 161
#define TK_QTAGS 162
#define TK_AS 163
#define TK_INDEX 164
#define TK_FUNCTION 165
#define TK_INTERVAL 166
#define TK_COUNT 167
#define TK_LAST_ROW 168
#define TK_TOPIC 169
#define TK_WITH 170
#define TK_META 171
#define TK_CONSUMER 172
#define TK_GROUP 173
#define TK_DESC 174
#define TK_DESCRIBE 175
#define TK_RESET 176
#define TK_QUERY 177
#define TK_CACHE 178
#define TK_EXPLAIN 179
#define TK_ANALYZE 180
#define TK_VERBOSE 181
#define TK_NK_BOOL 182
#define TK_RATIO 183
#define TK_NK_FLOAT 184
#define TK_OUTPUTTYPE 185
#define TK_AGGREGATE 186
#define TK_BUFSIZE 187
#define TK_STREAM 188
#define TK_INTO 189
#define TK_TRIGGER 190
#define TK_AT_ONCE 191
#define TK_WINDOW_CLOSE 192
#define TK_IGNORE 193
#define TK_EXPIRED 194
#define TK_FILL_HISTORY 195
#define TK_SUBTABLE 196
#define TK_KILL 197
#define TK_CONNECTION 198
#define TK_TRANSACTION 199
#define TK_BALANCE 200
#define TK_VGROUP 201
#define TK_MERGE 202
#define TK_REDISTRIBUTE 203
#define TK_SPLIT 204
#define TK_DELETE 205
#define TK_INSERT 206
#define TK_NULL 207
#define TK_NK_QUESTION 208
#define TK_NK_ARROW 209
#define TK_ROWTS 210
#define TK_QSTART 211
#define TK_QEND 212
#define TK_QDURATION 213
#define TK_WSTART 214
#define TK_WEND 215
#define TK_WDURATION 216
#define TK_IROWTS 217
#define TK_CAST 218
#define TK_NOW 219
#define TK_TODAY 220
#define TK_TIMEZONE 221
#define TK_CLIENT_VERSION 222
#define TK_SERVER_VERSION 223
#define TK_SERVER_STATUS 224
#define TK_CURRENT_USER 225
#define TK_CASE 226
#define TK_END 227
#define TK_WHEN 228
#define TK_THEN 229
#define TK_ELSE 230
#define TK_BETWEEN 231
#define TK_IS 232
#define TK_NK_LT 233
#define TK_NK_GT 234
#define TK_NK_LE 235
#define TK_NK_GE 236
#define TK_NK_NE 237
#define TK_MATCH 238
#define TK_NMATCH 239
#define TK_CONTAINS 240
#define TK_IN 241
#define TK_JOIN 242
#define TK_INNER 243
#define TK_SELECT 244
#define TK_DISTINCT 245
#define TK_WHERE 246
#define TK_PARTITION 247
#define TK_BY 248
#define TK_SESSION 249
#define TK_STATE_WINDOW 250
#define TK_EVENT_WINDOW 251
#define TK_START 252
#define TK_SLIDING 253
#define TK_FILL 254
#define TK_VALUE 255
#define TK_NONE 256
#define TK_PREV 257
#define TK_LINEAR 258
#define TK_NEXT 259
#define TK_HAVING 260
#define TK_RANGE 261
#define TK_EVERY 262
#define TK_ORDER 263
#define TK_SLIMIT 264
#define TK_SOFFSET 265
#define TK_LIMIT 266
#define TK_OFFSET 267
#define TK_ASC 268
#define TK_NULLS 269
#define TK_ABORT 270
#define TK_AFTER 271
#define TK_ATTACH 272
#define TK_BEFORE 273
#define TK_BEGIN 274
#define TK_BITAND 275
#define TK_BITNOT 276
#define TK_BITOR 277
#define TK_BLOCKS 278
#define TK_CHANGE 279
#define TK_COMMA 280
#define TK_COMPACT 281
#define TK_CONCAT 282
#define TK_CONFLICT 283
#define TK_COPY 284
#define TK_DEFERRED 285
#define TK_DELIMITERS 286
#define TK_DETACH 287
#define TK_DIVIDE 288
#define TK_DOT 289
#define TK_EACH 290
#define TK_FAIL 291
#define TK_FILE 292
#define TK_FOR 293
#define TK_GLOB 294
#define TK_ID 295
#define TK_IMMEDIATE 296
#define TK_IMPORT 297
#define TK_INITIALLY 298
#define TK_INSTEAD 299
#define TK_ISNULL 300
#define TK_KEY 301
#define TK_MODULES 302
#define TK_NK_BITNOT 303
#define TK_NK_SEMI 304
#define TK_NOTNULL 305
#define TK_OF 306
#define TK_PLUS 307
#define TK_PRIVILEGE 308
#define TK_RAISE 309
#define TK_REPLACE 310
#define TK_RESTRICT 311
#define TK_ROW 312
#define TK_SEMI 313
#define TK_STAR 314
#define TK_STATEMENT 315
#define TK_STRICT 316
#define TK_STRING 317
#define TK_TIMES 318
#define TK_UPDATE 319
#define TK_VALUES 320
#define TK_VARIABLE 321
#define TK_VIEW 322
#define TK_WAL 323
#define TK_OR 1
#define TK_AND 2
#define TK_UNION 3
#define TK_ALL 4
#define TK_MINUS 5
#define TK_EXCEPT 6
#define TK_INTERSECT 7
#define TK_NK_BITAND 8
#define TK_NK_BITOR 9
#define TK_NK_LSHIFT 10
#define TK_NK_RSHIFT 11
#define TK_NK_PLUS 12
#define TK_NK_MINUS 13
#define TK_NK_STAR 14
#define TK_NK_SLASH 15
#define TK_NK_REM 16
#define TK_NK_CONCAT 17
#define TK_CREATE 18
#define TK_ACCOUNT 19
#define TK_NK_ID 20
#define TK_PASS 21
#define TK_NK_STRING 22
#define TK_ALTER 23
#define TK_PPS 24
#define TK_TSERIES 25
#define TK_STORAGE 26
#define TK_STREAMS 27
#define TK_QTIME 28
#define TK_DBS 29
#define TK_USERS 30
#define TK_CONNS 31
#define TK_STATE 32
#define TK_USER 33
#define TK_ENABLE 34
#define TK_NK_INTEGER 35
#define TK_SYSINFO 36
#define TK_DROP 37
#define TK_GRANT 38
#define TK_ON 39
#define TK_TO 40
#define TK_REVOKE 41
#define TK_FROM 42
#define TK_SUBSCRIBE 43
#define TK_NK_COMMA 44
#define TK_READ 45
#define TK_WRITE 46
#define TK_NK_DOT 47
#define TK_DNODE 48
#define TK_PORT 49
#define TK_DNODES 50
#define TK_NK_IPTOKEN 51
#define TK_FORCE 52
#define TK_LOCAL 53
#define TK_QNODE 54
#define TK_BNODE 55
#define TK_SNODE 56
#define TK_MNODE 57
#define TK_DATABASE 58
#define TK_USE 59
#define TK_FLUSH 60
#define TK_TRIM 61
#define TK_IF 62
#define TK_NOT 63
#define TK_EXISTS 64
#define TK_BUFFER 65
#define TK_CACHEMODEL 66
#define TK_CACHESIZE 67
#define TK_COMP 68
#define TK_DURATION 69
#define TK_NK_VARIABLE 70
#define TK_MAXROWS 71
#define TK_MINROWS 72
#define TK_KEEP 73
#define TK_PAGES 74
#define TK_PAGESIZE 75
#define TK_TSDB_PAGESIZE 76
#define TK_PRECISION 77
#define TK_REPLICA 78
#define TK_VGROUPS 79
#define TK_SINGLE_STABLE 80
#define TK_RETENTIONS 81
#define TK_SCHEMALESS 82
#define TK_WAL_LEVEL 83
#define TK_WAL_FSYNC_PERIOD 84
#define TK_WAL_RETENTION_PERIOD 85
#define TK_WAL_RETENTION_SIZE 86
#define TK_WAL_ROLL_PERIOD 87
#define TK_WAL_SEGMENT_SIZE 88
#define TK_STT_TRIGGER 89
#define TK_TABLE_PREFIX 90
#define TK_TABLE_SUFFIX 91
#define TK_NK_COLON 92
#define TK_MAX_SPEED 93
#define TK_TABLE 94
#define TK_NK_LP 95
#define TK_NK_RP 96
#define TK_STABLE 97
#define TK_ADD 98
#define TK_COLUMN 99
#define TK_MODIFY 100
#define TK_RENAME 101
#define TK_TAG 102
#define TK_SET 103
#define TK_NK_EQ 104
#define TK_USING 105
#define TK_TAGS 106
#define TK_COMMENT 107
#define TK_BOOL 108
#define TK_TINYINT 109
#define TK_SMALLINT 110
#define TK_INT 111
#define TK_INTEGER 112
#define TK_BIGINT 113
#define TK_FLOAT 114
#define TK_DOUBLE 115
#define TK_BINARY 116
#define TK_TIMESTAMP 117
#define TK_NCHAR 118
#define TK_UNSIGNED 119
#define TK_JSON 120
#define TK_VARCHAR 121
#define TK_MEDIUMBLOB 122
#define TK_BLOB 123
#define TK_VARBINARY 124
#define TK_DECIMAL 125
#define TK_MAX_DELAY 126
#define TK_WATERMARK 127
#define TK_ROLLUP 128
#define TK_TTL 129
#define TK_SMA 130
#define TK_DELETE_MARK 131
#define TK_FIRST 132
#define TK_LAST 133
#define TK_SHOW 134
#define TK_PRIVILEGES 135
#define TK_DATABASES 136
#define TK_TABLES 137
#define TK_STABLES 138
#define TK_MNODES 139
#define TK_QNODES 140
#define TK_FUNCTIONS 141
#define TK_INDEXES 142
#define TK_ACCOUNTS 143
#define TK_APPS 144
#define TK_CONNECTIONS 145
#define TK_LICENCES 146
#define TK_GRANTS 147
#define TK_QUERIES 148
#define TK_SCORES 149
#define TK_TOPICS 150
#define TK_VARIABLES 151
#define TK_CLUSTER 152
#define TK_BNODES 153
#define TK_SNODES 154
#define TK_TRANSACTIONS 155
#define TK_DISTRIBUTED 156
#define TK_CONSUMERS 157
#define TK_SUBSCRIPTIONS 158
#define TK_VNODES 159
#define TK_LIKE 160
#define TK_TBNAME 161
#define TK_QTAGS 162
#define TK_AS 163
#define TK_INDEX 164
#define TK_FUNCTION 165
#define TK_INTERVAL 166
#define TK_COUNT 167
#define TK_LAST_ROW 168
#define TK_TOPIC 169
#define TK_WITH 170
#define TK_META 171
#define TK_CONSUMER 172
#define TK_GROUP 173
#define TK_DESC 174
#define TK_DESCRIBE 175
#define TK_RESET 176
#define TK_QUERY 177
#define TK_CACHE 178
#define TK_EXPLAIN 179
#define TK_ANALYZE 180
#define TK_VERBOSE 181
#define TK_NK_BOOL 182
#define TK_RATIO 183
#define TK_NK_FLOAT 184
#define TK_OUTPUTTYPE 185
#define TK_AGGREGATE 186
#define TK_BUFSIZE 187
#define TK_STREAM 188
#define TK_INTO 189
#define TK_TRIGGER 190
#define TK_AT_ONCE 191
#define TK_WINDOW_CLOSE 192
#define TK_IGNORE 193
#define TK_EXPIRED 194
#define TK_FILL_HISTORY 195
#define TK_SUBTABLE 196
#define TK_KILL 197
#define TK_CONNECTION 198
#define TK_TRANSACTION 199
#define TK_BALANCE 200
#define TK_VGROUP 201
#define TK_MERGE 202
#define TK_REDISTRIBUTE 203
#define TK_SPLIT 204
#define TK_DELETE 205
#define TK_INSERT 206
#define TK_NULL 207
#define TK_NK_QUESTION 208
#define TK_NK_ARROW 209
#define TK_ROWTS 210
#define TK_QSTART 211
#define TK_QEND 212
#define TK_QDURATION 213
#define TK_WSTART 214
#define TK_WEND 215
#define TK_WDURATION 216
#define TK_IROWTS 217
#define TK_ISFILLED 218
#define TK_CAST 219
#define TK_NOW 220
#define TK_TODAY 221
#define TK_TIMEZONE 222
#define TK_CLIENT_VERSION 223
#define TK_SERVER_VERSION 224
#define TK_SERVER_STATUS 225
#define TK_CURRENT_USER 226
#define TK_CASE 227
#define TK_END 228
#define TK_WHEN 229
#define TK_THEN 230
#define TK_ELSE 231
#define TK_BETWEEN 232
#define TK_IS 233
#define TK_NK_LT 234
#define TK_NK_GT 235
#define TK_NK_LE 236
#define TK_NK_GE 237
#define TK_NK_NE 238
#define TK_MATCH 239
#define TK_NMATCH 240
#define TK_CONTAINS 241
#define TK_IN 242
#define TK_JOIN 243
#define TK_INNER 244
#define TK_SELECT 245
#define TK_DISTINCT 246
#define TK_WHERE 247
#define TK_PARTITION 248
#define TK_BY 249
#define TK_SESSION 250
#define TK_STATE_WINDOW 251
#define TK_EVENT_WINDOW 252
#define TK_START 253
#define TK_SLIDING 254
#define TK_FILL 255
#define TK_VALUE 256
#define TK_NONE 257
#define TK_PREV 258
#define TK_LINEAR 259
#define TK_NEXT 260
#define TK_HAVING 261
#define TK_RANGE 262
#define TK_EVERY 263
#define TK_ORDER 264
#define TK_SLIMIT 265
#define TK_SOFFSET 266
#define TK_LIMIT 267
#define TK_OFFSET 268
#define TK_ASC 269
#define TK_NULLS 270
#define TK_ABORT 271
#define TK_AFTER 272
#define TK_ATTACH 273
#define TK_BEFORE 274
#define TK_BEGIN 275
#define TK_BITAND 276
#define TK_BITNOT 277
#define TK_BITOR 278
#define TK_BLOCKS 279
#define TK_CHANGE 280
#define TK_COMMA 281
#define TK_COMPACT 282
#define TK_CONCAT 283
#define TK_CONFLICT 284
#define TK_COPY 285
#define TK_DEFERRED 286
#define TK_DELIMITERS 287
#define TK_DETACH 288
#define TK_DIVIDE 289
#define TK_DOT 290
#define TK_EACH 291
#define TK_FAIL 292
#define TK_FILE 293
#define TK_FOR 294
#define TK_GLOB 295
#define TK_ID 296
#define TK_IMMEDIATE 297
#define TK_IMPORT 298
#define TK_INITIALLY 299
#define TK_INSTEAD 300
#define TK_ISNULL 301
#define TK_KEY 302
#define TK_MODULES 303
#define TK_NK_BITNOT 304
#define TK_NK_SEMI 305
#define TK_NOTNULL 306
#define TK_OF 307
#define TK_PLUS 308
#define TK_PRIVILEGE 309
#define TK_RAISE 310
#define TK_REPLACE 311
#define TK_RESTRICT 312
#define TK_ROW 313
#define TK_SEMI 314
#define TK_STAR 315
#define TK_STATEMENT 316
#define TK_STRICT 317
#define TK_STRING 318
#define TK_TIMES 319
#define TK_UPDATE 320
#define TK_VALUES 321
#define TK_VARIABLE 322
#define TK_VIEW 323
#define TK_WAL 324
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601

View File

@ -266,6 +266,7 @@ typedef struct {
#define IS_FLOAT_TYPE(_t) ((_t) == TSDB_DATA_TYPE_FLOAT || (_t) == TSDB_DATA_TYPE_DOUBLE)
#define IS_INTEGER_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)))
#define IS_TIMESTAMP_TYPE(_t) ((_t) == TSDB_DATA_TYPE_TIMESTAMP)
#define IS_BOOLEAN_TYPE(_t) ((_t) == TSDB_DATA_TYPE_BOOL)
#define IS_NUMERIC_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)) || (IS_FLOAT_TYPE(_t)))
#define IS_MATHABLE_TYPE(_t) \

View File

@ -120,6 +120,7 @@ typedef enum EFunctionType {
FUNCTION_TYPE_WEND,
FUNCTION_TYPE_WDURATION,
FUNCTION_TYPE_IROWTS,
FUNCTION_TYPE_ISFILLED,
FUNCTION_TYPE_TAGS,
// internal function

View File

@ -225,7 +225,7 @@ typedef struct SAlterUserStmt {
typedef struct SDropUserStmt {
ENodeType type;
char useName[TSDB_USER_LEN];
char userName[TSDB_USER_LEN];
} SDropUserStmt;
typedef struct SCreateDnodeStmt {

View File

@ -117,7 +117,7 @@ typedef enum ENodeType {
// Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100,
QUERY_NODE_SELECT_STMT,
QUERY_NODE_VNODE_MODIF_STMT,
QUERY_NODE_VNODE_MODIFY_STMT,
QUERY_NODE_CREATE_DATABASE_STMT,
QUERY_NODE_DROP_DATABASE_STMT,
QUERY_NODE_ALTER_DATABASE_STMT,

View File

@ -364,7 +364,7 @@ typedef struct SVgDataBlocks {
typedef void (*FFreeDataBlockHash)(SHashObj*);
typedef void (*FFreeDataBlockArray)(SArray*);
typedef struct SVnodeModifOpStmt {
typedef struct SVnodeModifyOpStmt {
ENodeType nodeType;
ENodeType sqlNodeType;
SArray* pDataBlocks; // data block for each vgroup, SArray<SVgDataBlocks*>.
@ -388,7 +388,7 @@ typedef struct SVnodeModifOpStmt {
FFreeDataBlockArray freeArrayFunc;
bool usingTableProcessing;
bool fileProcessing;
} SVnodeModifOpStmt;
} SVnodeModifyOpStmt;
typedef struct SExplainOptions {
ENodeType type;

View File

@ -3,7 +3,7 @@
# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
set -e
#set -x
# set -x
# release.sh -v [cluster | edge]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 | loongarch64...]
@ -96,6 +96,8 @@ while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg; do
esac
done
osType=$(uname)
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp} httpdBuild=${httpdBuild}"
curr_dir=$(pwd)
@ -233,7 +235,12 @@ else
exit 1
fi
CORES=$(grep -c ^processor /proc/cpuinfo)
ostype=`uname`
if [ "${ostype}" == "Darwin" ]; then
CORES=$(sysctl -n hw.ncpu)
else
CORES=$(grep -c ^processor /proc/cpuinfo)
fi
if [[ "$allocator" == "jemalloc" ]]; then
# jemalloc need compile first, so disable parallel build
@ -306,7 +313,7 @@ if [ "$osType" != "Darwin" ]; then
${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
else
# only make client for Darwin
cd ${script_dir}/tools
./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName}
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
fi

View File

@ -2,7 +2,7 @@
#
# Generate tar.gz package for linux client in all os system
set -e
#set -x
# set -x
curr_dir=$(pwd)
compile_dir=$1
@ -249,9 +249,9 @@ if [ "$osType" != "Darwin" ]; then
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
else
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
mv "$(basename ${pkg_name}).tar.gz" ..
rm -rf ./*
mv ../"$(basename ${pkg_name}).tar.gz" .
# mv "$(basename ${pkg_name}).tar.gz" ..
rm -rf ${install_dir} ||:
# mv ../"$(basename ${pkg_name}).tar.gz" .
fi
cd ${curr_dir}

View File

@ -3,7 +3,7 @@
# Generate tar.gz package for all os system
set -e
#set -x
# set -x
curr_dir=$(pwd)
compile_dir=$1
@ -74,14 +74,16 @@ else
tdinsight_caches=""
cd ${build_dir}/bin/ && \
chmod +x TDinsight.sh
tdinsight_caches=$(./TDinsight.sh --download-only | xargs -i printf "${build_dir}/bin/{} ")
./TDinsight.sh --download-only ||:
# tdinsight_caches=$(./TDinsight.sh --download-only | xargs -I printf "${build_dir}/bin/{} ")
cd $orig_pwd
echo "TDinsight caches: $tdinsight_caches"
taostools_bin_files=" ${build_dir}/bin/taosdump \
${build_dir}/bin/taosBenchmark \
${build_dir}/bin/TDinsight.sh \
$tdinsight_caches"
${build_dir}/bin/tdengine-datasource.zip \
${build_dir}/bin/tdengine-datasource.zip.md5sum"
[ -f ${build_dir}/bin/taosx ] && taosx_bin="${build_dir}/bin/taosx"
bin_files="${build_dir}/bin/${serverName} \
@ -96,8 +98,13 @@ else
${script_dir}/taosd-dump-cfg.gdb"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
wslib_files="${build_dir}/lib/libtaosws.so"
if [ "$osType" == "Darwin" ]; then
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
wslib_files="${build_dir}/lib/libtaosws.dylib"
else
lib_files="${build_dir}/lib/libtaos.so.${version}"
wslib_files="${build_dir}/lib/libtaosws.so"
fi
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h"
wsheader_files="${build_dir}/include/taosws.h"
@ -226,7 +233,12 @@ if [ "$verMode" == "cloud" ]; then
fi
cd ${install_dir}
tar -zcv -f ${tarName} * --remove-files || :
if [ "$osType" != "Darwin" ]; then
tar -zcv -f ${tarName} * --remove-files || :
else
tar -zcv -f ${tarName} * || :
fi
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${tarName} error !!!"
@ -288,7 +300,7 @@ if [[ $dbName == "taos" ]]; then
if [ "$verMode" == "cluster" ] || [ "$verMode" == "cloud" ]; then
if [ -d "${web_dir}/admin" ] ; then
mkdir -p ${install_dir}/share/
cp ${web_dir}/admin ${install_dir}/share/ -r
cp -Rfap ${web_dir}/admin ${install_dir}/share/
cp ${web_dir}/png/taos.png ${install_dir}/share/admin/images/taos.png
else
echo "directory not found for enterprise release: ${web_dir}/admin"
@ -362,7 +374,15 @@ if [ "$pagMode" == "lite" ]; then
pkg_name=${pkg_name}-Lite
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" "$(basename ${install_dir})" --remove-files || :
if [ "$osType" != "Darwin" ]; then
tar -zcv -f "$(basename ${pkg_name}).tar.gz" "$(basename ${install_dir})" --remove-files || :
else
tar -zcv -f "$(basename ${pkg_name}).tar.gz" "$(basename ${install_dir})" || :
rm -rf ${install_dir} ||:
([ -d build-taoskeeper ] && rm -rf build-taoskeeper ) ||:
fi
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${pkg_name}.tar.gz error !!!"
@ -371,7 +391,12 @@ fi
if [ -n "${taostools_bin_files}" ]; then
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${taostools_install_dir}/bin/TDinsight.sh && echo "TDinsight.sh downloaded!"|| echo "failed to download TDinsight.sh"
tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || :
if [ "$osType" != "Darwin" ]; then
tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || :
else
tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" || :
rm -rf ${taostools_install_dir} ||:
fi
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${taostools_pkg_name}.tar.gz error !!!"

View File

@ -260,6 +260,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(JN
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_schemalessInsertImp(JNIEnv *, jobject, jobjectArray,
jlong, jint, jint);
/**
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: getTableVgID
* Signature: (Ljava/lang/String;Ljava/lang/String)Lcom/taosdata/jdbc/VGroupIDResp
*/
JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTableVgID(JNIEnv *, jobject, jlong, jstring,
jstring, jobject);
#ifdef __cplusplus
}
#endif

View File

@ -76,7 +76,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
"current:%d, app current:%d",
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst);
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) {
if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType) {
// tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
// "us, exec:%" PRId64 "us",
// duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
@ -469,6 +469,9 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
case TSDB_OPTION_TIMEZONE:
pItem = cfgGetItem(pCfg, "timezone");
break;
case TSDB_OPTION_USE_ADAPTER:
pItem = cfgGetItem(pCfg, "useAdapter");
break;
default:
break;
}

View File

@ -739,6 +739,7 @@ int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog
SArray* pArray = NULL;
SSubmitRsp* pRsp = (SSubmitRsp*)res;
if (pRsp->nBlocks <= 0) {
taosMemoryFreeClear(pRsp->pBlocks);
return TSDB_CODE_SUCCESS;
}
@ -875,7 +876,7 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
}
static bool incompletaFileParsing(SNode* pStmt) {
return QUERY_NODE_VNODE_MODIF_STMT != nodeType(pStmt) ? false : ((SVnodeModifOpStmt*)pStmt)->fileProcessing;
return QUERY_NODE_VNODE_MODIFY_STMT != nodeType(pStmt) ? false : ((SVnodeModifyOpStmt*)pStmt)->fileProcessing;
}
// todo refacto the error code mgmt
@ -954,7 +955,7 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
if (pQuery->pRoot && !pRequest->inRetry) {
STscObj* pTscObj = pRequest->pTscObj;
SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
if (QUERY_NODE_VNODE_MODIF_STMT == pQuery->pRoot->type) {
if (QUERY_NODE_VNODE_MODIFY_STMT == pQuery->pRoot->type) {
atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertsReq, 1);
} else if (QUERY_NODE_SELECT_STMT == pQuery->pRoot->type) {
atomic_add_fetch_64((int64_t*)&pActivity->numOfQueryReq, 1);
@ -1059,7 +1060,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
}
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
SArray* pNodeList = NULL;
if (QUERY_NODE_VNODE_MODIF_STMT != nodeType(pQuery->pRoot)) {
if (QUERY_NODE_VNODE_MODIFY_STMT != nodeType(pQuery->pRoot)) {
buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta);
}

View File

@ -488,7 +488,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
numOfFields);
return JNI_FETCH_END;
} else {
jniDebug("jobj:%p, conn:%p, interrupted query. fetch row error code: %d, msg:%s", jobj, tscon, code, taos_errstr(result));
jniDebug("jobj:%p, conn:%p, interrupted query. fetch row error code: %d, msg:%s", jobj, tscon, code,
taos_errstr(result));
return JNI_RESULT_SET_NULL;
}
}
@ -583,7 +584,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNI
jniDebug("jobj:%p, conn:%p, resultset:%p, no data to retrieve", jobj, tscon, (void *)res);
return JNI_FETCH_END;
} else {
jniError("jobj:%p, conn:%p, query interrupted. fetch block error code:%d, msg:%s", jobj, tscon, error_code, taos_errstr(tres));
jniError("jobj:%p, conn:%p, query interrupted. fetch block error code:%d, msg:%s", jobj, tscon, error_code,
taos_errstr(tres));
return JNI_RESULT_SET_NULL;
}
}
@ -1028,3 +1030,62 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_schemalessInsert
}
return (jlong)tres;
}
// TABLE_VG_ID_FID_CACHE cache resp object for getTableVgID
typedef struct TABLE_VG_ID_FIELD_CACHE {
int cached;
jclass clazz;
jfieldID codeField;
jfieldID vgIDField;
} TABLE_VG_ID_FIELD_CACHE;
TABLE_VG_ID_FIELD_CACHE tableVgIdFieldCache;
void cacheTableVgIDField(JNIEnv *env, jobject jobj) {
if (tableVgIdFieldCache.cached) {
return;
}
tableVgIdFieldCache.clazz = (*env)->GetObjectClass(env, jobj);
tableVgIdFieldCache.codeField = (*env)->GetFieldID(env, tableVgIdFieldCache.clazz, "code", "I");
tableVgIdFieldCache.vgIDField = (*env)->GetFieldID(env, tableVgIdFieldCache.clazz, "vgID", "I");
tableVgIdFieldCache.cached = 1;
}
JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTableVgID(JNIEnv *env, jobject jobj, jlong conn,
jstring jdb, jstring jtable,
jobject resp) {
if (!tableVgIdFieldCache.cached) {
cacheTableVgIDField(env, resp);
}
TAOS *taos = (TAOS *)conn;
if (taos == NULL) {
jniError("jobj:%p, connection already closed", jobj);
(*env)->SetIntField(env, resp, tableVgIdFieldCache.codeField, JNI_CONNECTION_NULL);
return resp;
}
const char *db = NULL;
const char *table = NULL;
int vgID = 0;
if (jdb != NULL) {
db = (*env)->GetStringUTFChars(env, jdb, NULL);
}
if (jtable != NULL) {
table = (*env)->GetStringUTFChars(env, jtable, NULL);
}
int code = taos_get_table_vgId(taos, db, table, &vgID);
if (db != NULL) {
(*env)->ReleaseStringUTFChars(env, jdb, db);
}
if (table != NULL) {
(*env)->ReleaseStringUTFChars(env, jtable, table);
}
(*env)->SetIntField(env, resp, tableVgIdFieldCache.codeField, code);
(*env)->SetIntField(env, resp, tableVgIdFieldCache.vgIDField, vgID);
return resp;
}

View File

@ -1211,7 +1211,8 @@ static void destroyVgHash(void* data) {
taosMemoryFreeClear(vgData->data);
}
int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const char* tbname, TAOS_FIELD *fields, int numFields){
int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const char* tbname, TAOS_FIELD* fields,
int numFields) {
int32_t code = TSDB_CODE_SUCCESS;
STableMeta* pTableMeta = NULL;
SQuery* pQuery = NULL;
@ -1267,14 +1268,14 @@ int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const ch
uint16_t fLen = 0;
int32_t rowSize = 0;
int16_t nVar = 0;
for (int i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
SSchema* schema = pTableMeta->schema + i;
fLen += TYPE_BYTES[schema->type];
rowSize += schema->bytes;
if (IS_VAR_DATA_TYPE(schema->type)) {
nVar++;
}
for (int i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
SSchema* schema = pTableMeta->schema + i;
fLen += TYPE_BYTES[schema->type];
rowSize += schema->bytes;
if (IS_VAR_DATA_TYPE(schema->type)) {
nVar++;
}
}
fLen -= sizeof(TSKEY);
@ -1294,7 +1295,8 @@ int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const ch
tdSRowSetTpInfo(&rb, numOfCols, fLen);
int32_t dataLen = 0;
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column length |
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
// length |
char* pStart = pData + getVersion1BlockMetaSize(pData, numFields);
int32_t* colLength = (int32_t*)pStart;
pStart += sizeof(int32_t) * numFields;
@ -1326,9 +1328,9 @@ int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const ch
for (int32_t k = 0; k < numOfCols; k++) {
const SSchema* pColumn = &pTableMeta->schema[k];
int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name));
if (!index) { // add none
if (!index) { // add none
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NONE, NULL, false, offset, k);
}else{
} else {
if (IS_VAR_DATA_TYPE(pColumn->type)) {
if (pCol[*index].offset[j] != -1) {
char* data = pCol[*index].pData + pCol[*index].offset[j];
@ -1377,13 +1379,13 @@ int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const ch
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
pQuery->haveResultSet = false;
pQuery->msgType = TDMT_VND_SUBMIT;
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT);
if (NULL == pQuery->pRoot) {
uError("create pQuery->pRoot error");
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
SVnodeModifyOpStmt* nodeStmt = (SVnodeModifyOpStmt*)(pQuery->pRoot);
nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES);
SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
@ -1406,7 +1408,7 @@ int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const ch
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
end:
end:
taosMemoryFreeClear(pTableMeta);
qDestroyQuery(pQuery);
taosMemoryFree(subReq);
@ -1495,7 +1497,8 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
tdSRowSetTpInfo(&rb, numOfCols, fLen);
int32_t dataLen = 0;
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column length |
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
// length |
char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols);
int32_t* colLength = (int32_t*)pStart;
pStart += sizeof(int32_t) * numOfCols;
@ -1568,13 +1571,13 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
pQuery->haveResultSet = false;
pQuery->msgType = TDMT_VND_SUBMIT;
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT);
if (NULL == pQuery->pRoot) {
uError("create pQuery->pRoot error");
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
SVnodeModifyOpStmt* nodeStmt = (SVnodeModifyOpStmt*)(pQuery->pRoot);
nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES);
SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
@ -1825,13 +1828,13 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
pQuery->haveResultSet = false;
pQuery->msgType = TDMT_VND_SUBMIT;
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT);
if (NULL == pQuery->pRoot) {
uError("create pQuery->pRoot error");
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
SVnodeModifyOpStmt* nodeStmt = (SVnodeModifyOpStmt*)(pQuery->pRoot);
int32_t numOfVg = taosHashGetSize(pVgHash);
nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES);
@ -1873,7 +1876,6 @@ end:
return code;
}
static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) {
int32_t code = TSDB_CODE_SUCCESS;
SHashObj* pVgHash = NULL;
@ -2131,13 +2133,13 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
pQuery->haveResultSet = false;
pQuery->msgType = TDMT_VND_SUBMIT;
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT);
if (NULL == pQuery->pRoot) {
uError("create pQuery->pRoot error");
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
SVnodeModifyOpStmt* nodeStmt = (SVnodeModifyOpStmt*)(pQuery->pRoot);
int32_t numOfVg = taosHashGetSize(pVgHash);
nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES);
@ -2167,7 +2169,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
end:
end:
tDeleteSTaosxRsp(&rspObj.rsp);
rspObj.resInfo.pRspMsg = NULL;
doFreeReqResultInfo(&rspObj.resInfo);

View File

@ -163,9 +163,9 @@ typedef struct {
SMLProtocolType protocol;
int8_t precision;
bool dataFormat; // true means that the name and order of keys in each line are the same(only for influx protocol)
bool isRawLine;
int32_t ttl;
bool dataFormat; // true means that the name and order of keys in each line are the same(only for influx protocol)
bool isRawLine;
int32_t ttl;
SHashObj *childTables;
SHashObj *superTables;
@ -183,20 +183,20 @@ typedef struct {
SHashObj *dumplicateKey; // for dumplicate key
SArray *colsContainer; // for cols parse, if dataFormat == false
cJSON *root; // for parse json
cJSON *root; // for parse json
} SSmlHandle;
//=================================================================================================
//=================================================================================================
static volatile int64_t linesSmlHandleId = 0;
static int64_t smlGenId() {
int64_t id;
int64_t id;
do {
id = atomic_add_fetch_64(&linesSmlHandleId, 1);
do {
id = atomic_add_fetch_64(&linesSmlHandleId, 1);
} while (id == 0);
return id;
return id;
}
static inline bool smlDoubleToInt64OverFlow(double num) {
@ -606,7 +606,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
end:
taosHashCleanup(hashTmp);
taosMemoryFreeClear(pTableMeta);
// catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1);
// catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1);
return code;
}
@ -1534,7 +1534,7 @@ static SSmlHandle *smlBuildSmlInfo(STscObj *pTscObj, SRequestObj *request, SMLPr
info->pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
info->pQuery->haveResultSet = false;
info->pQuery->msgType = TDMT_VND_SUBMIT;
info->pQuery->pRoot = (SNode *)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
info->pQuery->pRoot = (SNode *)nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT);
if (NULL == info->pQuery->pRoot) {
uError("SML:0x%" PRIx64 " create info->pQuery->pRoot error", info->id);
goto cleanup;
@ -2079,7 +2079,8 @@ static int32_t smlParseJSONString(SSmlHandle *info, cJSON *root, SSmlTableInfo *
static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql, const int len) {
SSmlLineInfo elements = {0};
uDebug("SML:0x%" PRIx64 " smlParseInfluxLine raw:%d, len:%d, sql:%s", info->id, info->isRawLine, len, (info->isRawLine ? "rawdata" : sql));
uDebug("SML:0x%" PRIx64 " smlParseInfluxLine raw:%d, len:%d, sql:%s", info->id, info->isRawLine, len,
(info->isRawLine ? "rawdata" : sql));
int ret = smlParseInfluxString(sql, sql + len, &elements, &info->msgBuf);
if (ret != TSDB_CODE_SUCCESS) {
@ -2371,15 +2372,16 @@ static int32_t smlInsertData(SSmlHandle *info) {
}
static void smlPrintStatisticInfo(SSmlHandle *info) {
uError("SML:0x%" PRIx64
" smlInsertLines result, code:%d,lineNum:%d,stable num:%d,ctable num:%d,create stable num:%d,alter stable tag num:%d,alter stable col num:%d \
uError(
"SML:0x%" PRIx64
" smlInsertLines result, code:%d,lineNum:%d,stable num:%d,ctable num:%d,create stable num:%d,alter stable tag num:%d,alter stable col num:%d \
parse cost:%" PRId64 ",schema cost:%" PRId64 ",bind cost:%" PRId64 ",rpc cost:%" PRId64 ",total cost:%" PRId64
"",
info->id, info->cost.code, info->cost.lineNum, info->cost.numOfSTables, info->cost.numOfCTables,
info->cost.numOfCreateSTables, info->cost.numOfAlterTagSTables, info->cost.numOfAlterColSTables,
info->cost.schemaTime - info->cost.parseTime,
info->cost.insertBindTime - info->cost.schemaTime, info->cost.insertRpcTime - info->cost.insertBindTime,
info->cost.endTime - info->cost.insertRpcTime, info->cost.endTime - info->cost.parseTime);
"",
info->id, info->cost.code, info->cost.lineNum, info->cost.numOfSTables, info->cost.numOfCTables,
info->cost.numOfCreateSTables, info->cost.numOfAlterTagSTables, info->cost.numOfAlterColSTables,
info->cost.schemaTime - info->cost.parseTime, info->cost.insertBindTime - info->cost.schemaTime,
info->cost.insertRpcTime - info->cost.insertBindTime, info->cost.endTime - info->cost.insertRpcTime,
info->cost.endTime - info->cost.parseTime);
}
static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char *rawLineEnd, int numLines) {
@ -2593,7 +2595,7 @@ TAOS_RES *taos_schemaless_insert_inner(SRequestObj *request, char *lines[], char
}
info->isRawLine = (rawLine == NULL);
info->ttl = ttl;
info->ttl = ttl;
int32_t perBatch = tsSmlBatchSize;
@ -2684,16 +2686,19 @@ TAOS_RES *taos_schemaless_insert(TAOS *taos, char *lines[], int numLines, int pr
return taos_schemaless_insert_ttl_with_reqid(taos, lines, numLines, protocol, precision, TSDB_DEFAULT_TABLE_TTL, 0);
}
TAOS_RES *taos_schemaless_insert_ttl(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int32_t ttl) {
TAOS_RES *taos_schemaless_insert_ttl(TAOS *taos, char *lines[], int numLines, int protocol, int precision,
int32_t ttl) {
return taos_schemaless_insert_ttl_with_reqid(taos, lines, numLines, protocol, precision, ttl, 0);
}
TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid) {
return taos_schemaless_insert_ttl_with_reqid(taos, lines, numLines, protocol, precision, TSDB_DEFAULT_TABLE_TTL, reqid);
TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision,
int64_t reqid) {
return taos_schemaless_insert_ttl_with_reqid(taos, lines, numLines, protocol, precision, TSDB_DEFAULT_TABLE_TTL,
reqid);
}
TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol,
int precision, int32_t ttl, int64_t reqid) {
int precision, int32_t ttl, int64_t reqid) {
if (NULL == taos) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return NULL;
@ -2727,13 +2732,18 @@ TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid(TAOS *taos, char *lines, int
return taos_schemaless_insert_inner(request, NULL, lines, lines + len, numLines, protocol, precision, ttl);
}
TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid) {
return taos_schemaless_insert_raw_ttl_with_reqid(taos, lines, len, totalRows, protocol, precision, TSDB_DEFAULT_TABLE_TTL, reqid);
TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol,
int precision, int64_t reqid) {
return taos_schemaless_insert_raw_ttl_with_reqid(taos, lines, len, totalRows, protocol, precision,
TSDB_DEFAULT_TABLE_TTL, reqid);
}
TAOS_RES *taos_schemaless_insert_raw_ttl(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int32_t ttl) {
TAOS_RES *taos_schemaless_insert_raw_ttl(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol,
int precision, int32_t ttl) {
return taos_schemaless_insert_raw_ttl_with_reqid(taos, lines, len, totalRows, protocol, precision, ttl, 0);
}
TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision) {
return taos_schemaless_insert_raw_ttl_with_reqid(taos, lines, len, totalRows, protocol, precision, TSDB_DEFAULT_TABLE_TTL, 0);
TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol,
int precision) {
return taos_schemaless_insert_raw_ttl_with_reqid(taos, lines, len, totalRows, protocol, precision,
TSDB_DEFAULT_TABLE_TTL, 0);
}

View File

@ -438,6 +438,7 @@ int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) {
taosMemoryFree(pParam->pOffset);
taosMemoryFree(pBuf->pData);
taosMemoryFree(pBuf->pEpSet);
/*tscDebug("receive offset commit cb of %s on vgId:%d, offset is %" PRId64, pParam->pOffset->subKey, pParam->->vgId,
* pOffset->version);*/
@ -724,7 +725,10 @@ void tmqAssignDelayedReportTask(void* param, void* tmrId) {
}
int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) {
if (pMsg && pMsg->pData) taosMemoryFree(pMsg->pData);
if (pMsg) {
taosMemoryFree(pMsg->pData);
taosMemoryFree(pMsg->pEpSet);
}
return 0;
}
@ -869,6 +873,8 @@ void tmqClearUnhandleMsg(tmq_t* tmq) {
int32_t tmqSubscribeCb(void* param, SDataBuf* pMsg, int32_t code) {
SMqSubscribeCbParam* pParam = (SMqSubscribeCbParam*)param;
pParam->rspErr = code;
taosMemoryFree(pMsg->pEpSet);
tsem_post(&pParam->rspSem);
return 0;
}
@ -1166,6 +1172,8 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
if (code != 0) {
tscWarn("msg discard from vgId:%d, epoch %d, since %s", vgId, epoch, terrstr());
if (pMsg->pData) taosMemoryFree(pMsg->pData);
if (pMsg->pEpSet) taosMemoryFree(pMsg->pEpSet);
if (code == TSDB_CODE_TMQ_CONSUMER_MISMATCH) {
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__RECOVER);
goto CREATE_MSG_FAIL;
@ -1365,6 +1373,7 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) {
taosMemoryFree(pParam);
}
taosMemoryFree(pMsg->pData);
taosMemoryFree(pMsg->pEpSet);
terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
return -1;
}
@ -1416,6 +1425,8 @@ END:
} else {
taosMemoryFree(pParam);
}
taosMemoryFree(pMsg->pEpSet);
taosMemoryFree(pMsg->pData);
return code;
}

View File

@ -96,6 +96,7 @@ int32_t tsRedirectPeriod = 10;
int32_t tsRedirectFactor = 2;
int32_t tsRedirectMaxPeriod = 1000;
int32_t tsMaxRetryWaitTime = 10000;
bool tsUseAdapter = false;
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
@ -201,9 +202,7 @@ int32_t taosSetTfsCfg(SConfig *pCfg) {
int32_t taosSetTfsCfg(SConfig *pCfg);
#endif
struct SConfig *taosGetCfg() {
return tsCfg;
}
struct SConfig *taosGetCfg() { return tsCfg; }
static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile,
char *apolloUrl) {
@ -314,6 +313,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "smlBatchSize", tsSmlBatchSize, 1, INT32_MAX, true) != 0) return -1;
if (cfgAddInt32(pCfg, "maxMemUsedByInsert", tsMaxMemUsedByInsert, 1, INT32_MAX, true) != 0) return -1;
if (cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, 0) != 0) return -1;
if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, true) != 0) return -1;
tsNumOfTaskQueueThreads = tsNumOfCores / 2;
tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4);
@ -668,6 +668,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsQueryNodeChunkSize = cfgGetItem(pCfg, "queryNodeChunkSize")->i32;
tsQueryUseNodeAllocator = cfgGetItem(pCfg, "queryUseNodeAllocator")->bval;
tsKeepColumnName = cfgGetItem(pCfg, "keepColumnName")->bval;
tsUseAdapter = cfgGetItem(pCfg, "useAdapter")->bval;
tsMaxRetryWaitTime = cfgGetItem(pCfg, "maxRetryWaitTime")->i32;
return 0;

View File

@ -174,6 +174,7 @@ typedef struct {
void* param;
char opername[TSDB_TRANS_OPER_LEN];
SArray* pRpcArray;
SRWLatch lockRpcArray;
} STrans;
typedef struct {

View File

@ -628,6 +628,7 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict,
pTrans->undoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
pTrans->commitActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
pTrans->pRpcArray = taosArrayInit(1, sizeof(SRpcHandleInfo));
taosInitRWLatch(&pTrans->lockRpcArray);
if (pTrans->redoActions == NULL || pTrans->undoActions == NULL || pTrans->commitActions == NULL ||
pTrans->pRpcArray == NULL) {
@ -737,12 +738,14 @@ int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, c
if (pTrans->oper == oper) {
if (strcasecmp(dbname, pTrans->dbname) == 0) {
mInfo("trans:%d, db:%s oper:%d matched with input", pTrans->id, dbname, oper);
taosWLockLatch(&pTrans->lockRpcArray);
if (pTrans->pRpcArray == NULL) {
pTrans->pRpcArray = taosArrayInit(1, sizeof(SRpcHandleInfo));
pTrans->pRpcArray = taosArrayInit(4, sizeof(SRpcHandleInfo));
}
if (pTrans->pRpcArray != NULL && taosArrayPush(pTrans->pRpcArray, &pMsg->info) != NULL) {
code = 0;
}
taosWUnLockLatch(&pTrans->lockRpcArray);
sdbRelease(pMnode->pSdb, pTrans);
break;
@ -944,8 +947,12 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
pTrans->failedTimes, code);
}
taosWLockLatch(&pTrans->lockRpcArray);
int32_t size = taosArrayGetSize(pTrans->pRpcArray);
if (size <= 0) return;
if (size <= 0) {
taosWUnLockLatch(&pTrans->lockRpcArray);
return;
}
for (int32_t i = 0; i < size; ++i) {
SRpcHandleInfo *pInfo = taosArrayGet(pTrans->pRpcArray, i);
@ -997,6 +1004,7 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
}
}
taosArrayClear(pTrans->pRpcArray);
taosWUnLockLatch(&pTrans->lockRpcArray);
}
int32_t mndTransProcessRsp(SRpcMsg *pRsp) {

View File

@ -838,9 +838,9 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
if (pUser->superUser) {
cols = 0;
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
char userName[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0};
char userName[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(userName, pUser->user, pShow->pMeta->pSchemas[cols].bytes);
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)userName, false);
char privilege[20] = {0};
@ -859,9 +859,9 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
char *db = taosHashIterate(pUser->readDbs, NULL);
while (db != NULL) {
cols = 0;
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
char userName[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0};
char userName[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(userName, pUser->user, pShow->pMeta->pSchemas[cols].bytes);
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)userName, false);
char privilege[20] = {0};

View File

@ -161,7 +161,10 @@ int32_t metaSnapWriterClose(SMetaSnapWriter** ppWriter, int8_t rollback) {
SMetaSnapWriter* pWriter = *ppWriter;
if (rollback) {
metaInfo("vgId:%d, meta snapshot writer close and rollback start ", TD_VID(pWriter->pMeta->pVnode));
code = metaAbort(pWriter->pMeta);
metaInfo("vgId:%d, meta snapshot writer close and rollback finished, code:0x%x", TD_VID(pWriter->pMeta->pVnode),
code);
if (code) goto _err;
} else {
code = metaCommit(pWriter->pMeta, pWriter->pMeta->txn);

View File

@ -710,6 +710,9 @@ int metaUpdateCtimeIdx(SMeta *pMeta, const SMetaEntry *pME) {
if (metaBuildCtimeIdxKey(&ctimeKey, pME) < 0) {
return 0;
}
metaDebug("vgId:%d, start to save ctime:%" PRId64 " uid:%" PRId64 " ct:%" PRId64, TD_VID(pMeta->pVnode), pME->version,
pME->uid, ctimeKey.ctime);
return tdbTbInsert(pMeta->pCtimeIdx, &ctimeKey, sizeof(ctimeKey), NULL, 0, pMeta->txn);
}

View File

@ -1109,7 +1109,7 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3
if (pWriter->dWriter.pWriter == NULL || pWriter->fid != fid) {
if (pWriter->dWriter.pWriter) {
ASSERT(fid > pWriter->fid);
// ASSERT(fid > pWriter->fid);
code = tsdbSnapWriteCloseFile(pWriter);
if (code) goto _err;

View File

@ -231,10 +231,6 @@ static const char* cacheModelStr(int8_t cacheModel) {
return TSDB_CACHE_MODEL_NONE_STR;
}
static const char* strictStr(int8_t strict) {
return TSDB_DB_STRICT_ON == strict ? TSDB_DB_STRICT_ON_STR : TSDB_DB_STRICT_OFF_STR;
}
static void setCreateDBResultIntoDataBlock(SSDataBlock* pBlock, char* dbFName, SDbCfgInfo* pCfg) {
blockDataEnsureCapacity(pBlock, 1);
pBlock->info.rows = 1;
@ -269,11 +265,11 @@ static void setCreateDBResultIntoDataBlock(SSDataBlock* pBlock, char* dbFName, S
buf2 + VARSTR_HEADER_SIZE,
"CREATE DATABASE `%s` BUFFER %d CACHESIZE %d CACHEMODEL '%s' COMP %d DURATION %dm "
"WAL_FSYNC_PERIOD %d MAXROWS %d MINROWS %d KEEP %dm,%dm,%dm PAGES %d PAGESIZE %d PRECISION '%s' REPLICA %d "
"STRICT '%s' WAL_LEVEL %d VGROUPS %d SINGLE_STABLE %d",
"WAL_LEVEL %d VGROUPS %d SINGLE_STABLE %d",
dbFName, pCfg->buffer, pCfg->cacheSize, cacheModelStr(pCfg->cacheLast), pCfg->compression, pCfg->daysPerFile,
pCfg->walFsyncPeriod, pCfg->maxRows, pCfg->minRows, pCfg->daysToKeep0, pCfg->daysToKeep1, pCfg->daysToKeep2,
pCfg->pages, pCfg->pageSize, prec, pCfg->replications, strictStr(pCfg->strict), pCfg->walLevel,
pCfg->numOfVgroups, 1 == pCfg->numOfStables);
pCfg->pages, pCfg->pageSize, prec, pCfg->replications, pCfg->walLevel, pCfg->numOfVgroups,
1 == pCfg->numOfStables);
if (retentions) {
len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, " RETENTIONS %s", retentions);

View File

@ -163,6 +163,10 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
colDataAppend(pDst, rows, (char*)&pSliceInfo->current, false);
continue;
} else if (IS_BOOLEAN_TYPE(pExprInfo->base.resSchema.type)) {
bool isFilled = true;
colDataAppend(pDst, pResBlock->info.rows, (char*)&isFilled, false);
continue;
}
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
@ -274,6 +278,9 @@ static void addCurrentRowToResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp*
if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
colDataAppend(pDst, pResBlock->info.rows, (char*)&pSliceInfo->current, false);
} else if (IS_BOOLEAN_TYPE(pExprInfo->base.resSchema.type)) {
bool isFilled = false;
colDataAppend(pDst, pResBlock->info.rows, (char*)&isFilled, false);
} else {
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
SColumnInfoData* pSrc = taosArrayGet(pSrcBlock->pDataBlock, srcSlot);

View File

@ -466,7 +466,7 @@ static int32_t translateStddevMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t
static int32_t translateWduration(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
// pseudo column do not need to check parameters
pFunc->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT};
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT};
return TSDB_CODE_SUCCESS;
}
@ -480,14 +480,21 @@ static int32_t translateNowToday(SFunctionNode* pFunc, char* pErrBuf, int32_t le
return code;
}
pFunc->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_TIMESTAMP};
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP};
return TSDB_CODE_SUCCESS;
}
static int32_t translateTimePseudoColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
// pseudo column do not need to check parameters
pFunc->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_TIMESTAMP};
pFunc->node.resType = (SDataType){.bytes =tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP};
return TSDB_CODE_SUCCESS;
}
static int32_t translateIsFilledPseudoColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
// pseudo column do not need to check parameters
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes, .type = TSDB_DATA_TYPE_BOOL};
return TSDB_CODE_SUCCESS;
}
@ -3254,6 +3261,16 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.sprocessFunc = NULL,
.finalizeFunc = NULL
},
{
.name = "_isfilled",
.type = FUNCTION_TYPE_ISFILLED,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_INTERP_PC_FUNC,
.translateFunc = translateIsFilledPseudoColumn,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = NULL,
.finalizeFunc = NULL
},
{
.name = "_tags",
.type = FUNCTION_TYPE_TAGS,

View File

@ -91,7 +91,7 @@ const char* nodesNodeName(ENodeType type) {
return "SetOperator";
case QUERY_NODE_SELECT_STMT:
return "SelectStmt";
case QUERY_NODE_VNODE_MODIF_STMT:
case QUERY_NODE_VNODE_MODIFY_STMT:
return "VnodeModifStmt";
case QUERY_NODE_CREATE_DATABASE_STMT:
return "CreateDatabaseStmt";
@ -99,6 +99,10 @@ const char* nodesNodeName(ENodeType type) {
return "DropDatabaseStmt";
case QUERY_NODE_ALTER_DATABASE_STMT:
return "AlterDatabaseStmt";
case QUERY_NODE_FLUSH_DATABASE_STMT:
return "FlushDatabaseStmt";
case QUERY_NODE_TRIM_DATABASE_STMT:
return "TrimDatabaseStmt";
case QUERY_NODE_CREATE_TABLE_STMT:
return "CreateTableStmt";
case QUERY_NODE_CREATE_SUBTABLE_CLAUSE:
@ -137,12 +141,20 @@ const char* nodesNodeName(ENodeType type) {
return "CreateQnodeStmt";
case QUERY_NODE_DROP_QNODE_STMT:
return "DropQnodeStmt";
case QUERY_NODE_DROP_SNODE_STMT:
return "DropSnodeStmt";
case QUERY_NODE_DROP_MNODE_STMT:
return "DropMnodeStmt";
case QUERY_NODE_CREATE_TOPIC_STMT:
return "CreateTopicStmt";
case QUERY_NODE_DROP_TOPIC_STMT:
return "DropTopicStmt";
case QUERY_NODE_ALTER_LOCAL_STMT:
return "AlterLocalStmt";
case QUERY_NODE_DROP_STREAM_STMT:
return "DropStreamStmt";
case QUERY_NODE_SPLIT_VGROUP_STMT:
return "SplitVgroupStmt";
case QUERY_NODE_SHOW_DNODES_STMT:
return "ShowDnodesStmt";
case QUERY_NODE_SHOW_MNODES_STMT:
@ -155,6 +167,8 @@ const char* nodesNodeName(ENodeType type) {
return "ShowSnodesStmt";
case QUERY_NODE_SHOW_BNODES_STMT:
return "ShowBnodesStmt";
case QUERY_NODE_SHOW_CLUSTER_STMT:
return "ShowClusterStmt";
case QUERY_NODE_SHOW_DATABASES_STMT:
return "ShowDatabaseStmt";
case QUERY_NODE_SHOW_FUNCTIONS_STMT:
@ -181,8 +195,30 @@ const char* nodesNodeName(ENodeType type) {
return "ShowConsumersStmt";
case QUERY_NODE_SHOW_QUERIES_STMT:
return "ShowQueriesStmt";
case QUERY_NODE_SHOW_VARIABLES_STMT:
return "ShowVariablesStmt";
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
return "ShowDnodeVariablesStmt";
case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
return "ShowTransactionsStmt";
case QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT:
return "ShowSubscriptionsStmt";
case QUERY_NODE_SHOW_VNODES_STMT:
return "ShowVnodeStmt";
case QUERY_NODE_SHOW_USER_PRIVILEGES_STMT:
return "ShowUserPrivilegesStmt";
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
return "ShowCreateDatabasesStmt";
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
return "ShowCreateTablesStmt";
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
return "ShowCreateStablesStmt";
case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT:
return "ShowTableDistributedStmt";
case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT:
return "ShowLocalVariablesStmt";
case QUERY_NODE_SHOW_TABLE_TAGS_STMT:
return "ShowTableTagsStmt";
case QUERY_NODE_DELETE_STMT:
return "DeleteStmt";
case QUERY_NODE_INSERT_STMT:
@ -3569,6 +3605,51 @@ static int32_t jsonToTempTableNode(const SJson* pJson, void* pObj) {
return code;
}
static const char* jkJoinTableJoinType = "JoinType";
static const char* jkJoinTableLeft = "Left";
static const char* jkJoinTableRight = "Right";
static const char* jkJoinTableOnCond = "OnCond";
static int32_t joinTableNodeToJson(const void* pObj, SJson* pJson) {
const SJoinTableNode* pNode = (const SJoinTableNode*)pObj;
int32_t code = tableNodeToJson(pObj, pJson);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkJoinTableJoinType, pNode->joinType);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkJoinTableLeft, nodeToJson, pNode->pLeft);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkJoinTableRight, nodeToJson, pNode->pRight);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkJoinTableOnCond, nodeToJson, pNode->pOnCond);
}
return code;
}
static int32_t jsonToJoinTableNode(const SJson* pJson, void* pObj) {
SJoinTableNode* pNode = (SJoinTableNode*)pObj;
int32_t code = jsonToTableNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkJoinTableJoinType, pNode->joinType, code);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkJoinTableLeft, &pNode->pLeft);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkJoinTableRight, &pNode->pRight);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkJoinTableOnCond, &pNode->pOnCond);
}
return code;
}
static const char* jkGroupingSetType = "GroupingSetType";
static const char* jkGroupingSetParameter = "Parameters";
@ -4398,6 +4479,39 @@ static int32_t jsonToSelectStmt(const SJson* pJson, void* pObj) {
return code;
}
static const char* jkVnodeModifyOpStmtSqlNodeType = "SqlNodeType";
static const char* jkVnodeModifyOpStmtTotalRowsNum = "TotalRowsNum";
static const char* jkVnodeModifyOpStmtTotalTbNum = "TotalTbNum";
static int32_t vnodeModifyStmtToJson(const void* pObj, SJson* pJson) {
const SVnodeModifyOpStmt* pNode = (const SVnodeModifyOpStmt*)pObj;
int32_t code = tjsonAddIntegerToObject(pJson, jkVnodeModifyOpStmtSqlNodeType, pNode->sqlNodeType);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkVnodeModifyOpStmtTotalRowsNum, pNode->totalRowsNum);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkVnodeModifyOpStmtTotalTbNum, pNode->totalTbNum);
}
return code;
}
static int32_t jsonToVnodeModifyStmt(const SJson* pJson, void* pObj) {
SVnodeModifyOpStmt* pNode = (SVnodeModifyOpStmt*)pObj;
int32_t code = TSDB_CODE_SUCCESS;
tjsonGetNumberValue(pJson, jkVnodeModifyOpStmtSqlNodeType, pNode->sqlNodeType, code);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkVnodeModifyOpStmtTotalRowsNum, &pNode->totalRowsNum);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkVnodeModifyOpStmtTotalTbNum, &pNode->totalTbNum);
}
return code;
}
static const char* jkAlterDatabaseStmtDbName = "DbName";
static const char* jkAlterDatabaseStmtOptions = "Options";
@ -4423,6 +4537,107 @@ static int32_t jsonToAlterDatabaseStmt(const SJson* pJson, void* pObj) {
return code;
}
static const char* jkTrimDatabaseStmtDbName = "DbName";
static const char* jkTrimDatabaseStmtMaxSpeed = "MaxSpeed";
static int32_t trimDatabaseStmtToJson(const void* pObj, SJson* pJson) {
const STrimDatabaseStmt* pNode = (const STrimDatabaseStmt*)pObj;
int32_t code = tjsonAddStringToObject(pJson, jkTrimDatabaseStmtDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkTrimDatabaseStmtMaxSpeed, pNode->maxSpeed);
}
return code;
}
static int32_t jsonToTrimDatabaseStmt(const SJson* pJson, void* pObj) {
STrimDatabaseStmt* pNode = (STrimDatabaseStmt*)pObj;
int32_t code = tjsonGetStringValue(pJson, jkTrimDatabaseStmtDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkTrimDatabaseStmtMaxSpeed, &pNode->maxSpeed);
}
return code;
}
static const char* jkDropTableClauseDbName = "DbName";
static const char* jkDropTableClauseTableName = "TableName";
static const char* jkDropTableClauseIgnoreNotExists = "IgnoreNotExists";
static int32_t dropTableClauseToJson(const void* pObj, SJson* pJson) {
const SDropTableClause* pNode = (const SDropTableClause*)pObj;
int32_t code = tjsonAddStringToObject(pJson, jkDropTableClauseDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkDropTableClauseTableName, pNode->tableName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkDropTableClauseIgnoreNotExists, pNode->ignoreNotExists);
}
return code;
}
static int32_t jsonToDropTableClause(const SJson* pJson, void* pObj) {
SDropTableClause* pNode = (SDropTableClause*)pObj;
int32_t code = tjsonGetStringValue(pJson, jkDropTableClauseDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkDropTableClauseTableName, pNode->tableName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkDropTableClauseIgnoreNotExists, &pNode->ignoreNotExists);
}
return code;
}
static const char* jkDropTableStmtTables = "Tables";
static int32_t dropTableStmtToJson(const void* pObj, SJson* pJson) {
const SDropTableStmt* pNode = (const SDropTableStmt*)pObj;
return nodeListToJson(pJson, jkDropTableStmtTables, pNode->pTables);
}
static int32_t jsonToDropTableStmt(const SJson* pJson, void* pObj) {
SDropTableStmt* pNode = (SDropTableStmt*)pObj;
return jsonToNodeList(pJson, jkDropTableStmtTables, &pNode->pTables);
}
static const char* jkDropSuperTableStmtDbName = "DbName";
static const char* jkDropSuperTableStmtTableName = "TableName";
static const char* jkDropSuperTableStmtIgnoreNotExists = "IgnoreNotExists";
static int32_t dropStableStmtToJson(const void* pObj, SJson* pJson) {
const SDropSuperTableStmt* pNode = (const SDropSuperTableStmt*)pObj;
int32_t code = tjsonAddStringToObject(pJson, jkDropSuperTableStmtDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkDropSuperTableStmtTableName, pNode->tableName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkDropSuperTableStmtIgnoreNotExists, pNode->ignoreNotExists);
}
return code;
}
static int32_t jsonToDropStableStmt(const SJson* pJson, void* pObj) {
SDropSuperTableStmt* pNode = (SDropSuperTableStmt*)pObj;
int32_t code = tjsonGetStringValue(pJson, jkDropSuperTableStmtDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkDropSuperTableStmtTableName, pNode->tableName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkDropSuperTableStmtIgnoreNotExists, &pNode->ignoreNotExists);
}
return code;
}
static const char* jkAlterTableStmtDbName = "DbName";
static const char* jkAlterTableStmtTableName = "TableName";
static const char* jkAlterTableStmtAlterType = "AlterType";
@ -4490,6 +4705,30 @@ static int32_t jsonToAlterTableStmt(const SJson* pJson, void* pObj) {
return code;
}
static const char* jkDropUserStmtUserName = "UserName";
static int32_t dropUserStmtToJson(const void* pObj, SJson* pJson) {
const SDropUserStmt* pNode = (const SDropUserStmt*)pObj;
return tjsonAddStringToObject(pJson, jkDropUserStmtUserName, pNode->userName);
}
static int32_t jsonToDropUserStmt(const SJson* pJson, void* pObj) {
SDropUserStmt* pNode = (SDropUserStmt*)pObj;
return tjsonGetStringValue(pJson, jkDropUserStmtUserName, pNode->userName);
}
static const char* jkUseDatabaseStmtDbName = "DbName";
static int32_t useDatabaseStmtToJson(const void* pObj, SJson* pJson) {
const SUseDatabaseStmt* pNode = (const SUseDatabaseStmt*)pObj;
return tjsonAddStringToObject(pJson, jkUseDatabaseStmtDbName, pNode->dbName);
}
static int32_t jsonToUseDatabaseStmt(const SJson* pJson, void* pObj) {
SUseDatabaseStmt* pNode = (SUseDatabaseStmt*)pObj;
return tjsonGetStringValue(pJson, jkUseDatabaseStmtDbName, pNode->dbName);
}
static const char* jkAlterDnodeStmtDnodeId = "DnodeId";
static const char* jkAlterDnodeStmtConfig = "Config";
static const char* jkAlterDnodeStmtValue = "Value";
@ -4522,6 +4761,69 @@ static int32_t jsonToAlterDnodeStmt(const SJson* pJson, void* pObj) {
return code;
}
static const char* jkDropComponentNodeStmtDnodeId = "DnodeId";
static int32_t dropComponentNodeStmtToJson(const void* pObj, SJson* pJson) {
const SDropComponentNodeStmt* pNode = (const SDropComponentNodeStmt*)pObj;
return tjsonAddIntegerToObject(pJson, jkDropComponentNodeStmtDnodeId, pNode->dnodeId);
}
static int32_t jsonToDropComponentNodeStmt(const SJson* pJson, void* pObj) {
SDropComponentNodeStmt* pNode = (SDropComponentNodeStmt*)pObj;
return tjsonGetIntValue(pJson, jkDropComponentNodeStmtDnodeId, &pNode->dnodeId);
}
static int32_t dropQnodeStmtToJson(const void* pObj, SJson* pJson) { return dropComponentNodeStmtToJson(pObj, pJson); }
static int32_t jsonToDropQnodeStmt(const SJson* pJson, void* pObj) { return jsonToDropComponentNodeStmt(pJson, pObj); }
static int32_t dropSnodeStmtToJson(const void* pObj, SJson* pJson) { return dropComponentNodeStmtToJson(pObj, pJson); }
static int32_t jsonToDropSnodeStmt(const SJson* pJson, void* pObj) { return jsonToDropComponentNodeStmt(pJson, pObj); }
static int32_t dropMnodeStmtToJson(const void* pObj, SJson* pJson) { return dropComponentNodeStmtToJson(pObj, pJson); }
static int32_t jsonToDropMnodeStmt(const SJson* pJson, void* pObj) { return jsonToDropComponentNodeStmt(pJson, pObj); }
static const char* jkDropDnodeStmtDnodeId = "DnodeId";
static const char* jkDropDnodeStmtFqdn = "Fqdn";
static const char* jkDropDnodeStmtPort = "Port";
static const char* jkDropDnodeStmtForce = "Force";
static int32_t dropDnodeStmtToJson(const void* pObj, SJson* pJson) {
const SDropDnodeStmt* pNode = (const SDropDnodeStmt*)pObj;
int32_t code = tjsonAddIntegerToObject(pJson, jkDropDnodeStmtDnodeId, pNode->dnodeId);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkDropDnodeStmtFqdn, pNode->fqdn);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDropDnodeStmtPort, pNode->port);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkDropDnodeStmtForce, pNode->force);
}
return code;
}
static int32_t jsonToDropDnodeStmt(const SJson* pJson, void* pObj) {
SDropDnodeStmt* pNode = (SDropDnodeStmt*)pObj;
int32_t code = tjsonGetIntValue(pJson, jkDropDnodeStmtDnodeId, &pNode->dnodeId);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkDropDnodeStmtFqdn, pNode->fqdn);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkDropDnodeStmtPort, &pNode->port);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkDropDnodeStmtForce, &pNode->force);
}
return code;
}
static const char* jkCreateTopicStmtTopicName = "TopicName";
static const char* jkCreateTopicStmtSubscribeDbName = "SubscribeDbName";
static const char* jkCreateTopicStmtIgnoreExists = "IgnoreExists";
@ -4561,6 +4863,328 @@ static int32_t jsonToCreateTopicStmt(const SJson* pJson, void* pObj) {
return code;
}
static const char* jkDropTopicStmtTopicName = "TopicName";
static const char* jkDropTopicStmtIgnoreNotExists = "IgnoreNotExists";
static int32_t dropTopicStmtToJson(const void* pObj, SJson* pJson) {
const SDropTopicStmt* pNode = (const SDropTopicStmt*)pObj;
int32_t code = tjsonAddStringToObject(pJson, jkDropTopicStmtTopicName, pNode->topicName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkDropTopicStmtIgnoreNotExists, pNode->ignoreNotExists);
}
return code;
}
static int32_t jsonToDropTopicStmt(const SJson* pJson, void* pObj) {
SDropTopicStmt* pNode = (SDropTopicStmt*)pObj;
int32_t code = tjsonGetStringValue(pJson, jkDropTopicStmtTopicName, pNode->topicName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkDropTopicStmtIgnoreNotExists, &pNode->ignoreNotExists);
}
return code;
}
static const char* jkDropStreamStmtStreamName = "StreamName";
static const char* jkDropStreamStmtIgnoreNotExists = "IgnoreNotExists";
static int32_t dropStreamStmtToJson(const void* pObj, SJson* pJson) {
const SDropStreamStmt* pNode = (const SDropStreamStmt*)pObj;
int32_t code = tjsonAddStringToObject(pJson, jkDropStreamStmtStreamName, pNode->streamName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkDropStreamStmtIgnoreNotExists, pNode->ignoreNotExists);
}
return code;
}
static int32_t jsonToDropStreamStmt(const SJson* pJson, void* pObj) {
SDropStreamStmt* pNode = (SDropStreamStmt*)pObj;
int32_t code = tjsonGetStringValue(pJson, jkDropStreamStmtStreamName, pNode->streamName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkDropStreamStmtIgnoreNotExists, &pNode->ignoreNotExists);
}
return code;
}
static const char* jkSplitVgroupStmtVgroupId = "VgroupId";
static int32_t splitVgroupStmtToJson(const void* pObj, SJson* pJson) {
const SSplitVgroupStmt* pNode = (const SSplitVgroupStmt*)pObj;
return tjsonAddIntegerToObject(pJson, jkSplitVgroupStmtVgroupId, pNode->vgId);
}
static int32_t jsonToSplitVgroupStmt(const SJson* pJson, void* pObj) {
SSplitVgroupStmt* pNode = (SSplitVgroupStmt*)pObj;
return tjsonGetIntValue(pJson, jkSplitVgroupStmtVgroupId, &pNode->vgId);
}
static const char* jkShowStmtDbName = "DbName";
static const char* jkShowStmtTbName = "TbName";
static const char* jkShowStmtTableCondType = "TableCondType";
static int32_t showStmtToJson(const void* pObj, SJson* pJson) {
const SShowStmt* pNode = (const SShowStmt*)pObj;
int32_t code = tjsonAddObject(pJson, jkShowStmtDbName, nodeToJson, pNode->pDbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkShowStmtTbName, nodeToJson, pNode->pTbName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkShowStmtTableCondType, pNode->tableCondType);
}
return code;
}
static int32_t jsonToShowStmt(const SJson* pJson, void* pObj) {
SShowStmt* pNode = (SShowStmt*)pObj;
int32_t code = jsonToNodeObject(pJson, jkShowStmtDbName, &pNode->pDbName);
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkShowStmtTbName, &pNode->pTbName);
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkShowStmtTableCondType, pNode->tableCondType, code);
}
return code;
}
static int32_t showDnodesStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowDnodesStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showMnodesStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowMnodesStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showQnodesStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowQnodesStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showClusterStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowClusterStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showDatabasesStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowDatabasesStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showFunctionsStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowFunctionsStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showIndexesStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowIndexesStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showStablesStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowStablesStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showStreamsStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowStreamsStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showTablesStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowTablesStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showTagsStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowTagsStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showUsersStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowUsersStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showVgroupsStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowVgroupsStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showConsumersStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowConsumersStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showVariablesStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowVariablesStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static const char* jkShowDnodeVariablesStmtDnodeId = "DnodeId";
static const char* jkShowDnodeVariablesStmtLikePattern = "LikePattern";
static int32_t showDnodeVariablesStmtToJson(const void* pObj, SJson* pJson) {
const SShowDnodeVariablesStmt* pNode = (const SShowDnodeVariablesStmt*)pObj;
int32_t code = tjsonAddObject(pJson, jkShowDnodeVariablesStmtDnodeId, nodeToJson, pNode->pDnodeId);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkShowDnodeVariablesStmtLikePattern, nodeToJson, pNode->pLikePattern);
}
return code;
}
static int32_t jsonToShowDnodeVariablesStmt(const SJson* pJson, void* pObj) {
SShowDnodeVariablesStmt* pNode = (SShowDnodeVariablesStmt*)pObj;
int32_t code = jsonToNodeObject(pJson, jkShowDnodeVariablesStmtDnodeId, &pNode->pDnodeId);
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkShowDnodeVariablesStmtLikePattern, &pNode->pLikePattern);
}
return code;
}
static int32_t showTransactionsStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowTransactionsStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static int32_t showSubscriptionsStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowSubscriptionsStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static const char* jkShowVnodesStmtDnodeId = "DnodeId";
static const char* jkShowVnodesStmtDnodeEndpoint = "DnodeEndpoint";
static int32_t showVnodesStmtToJson(const void* pObj, SJson* pJson) {
const SShowVnodesStmt* pNode = (const SShowVnodesStmt*)pObj;
int32_t code = tjsonAddObject(pJson, jkShowVnodesStmtDnodeId, nodeToJson, pNode->pDnodeId);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkShowVnodesStmtDnodeEndpoint, nodeToJson, pNode->pDnodeEndpoint);
}
return code;
}
static int32_t jsonToShowVnodesStmt(const SJson* pJson, void* pObj) {
SShowVnodesStmt* pNode = (SShowVnodesStmt*)pObj;
int32_t code = jsonToNodeObject(pJson, jkShowVnodesStmtDnodeId, &pNode->pDnodeId);
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkShowVnodesStmtDnodeEndpoint, &pNode->pDnodeEndpoint);
}
return code;
}
static int32_t showUserPrivilegesStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowUserPrivilegesStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static const char* jkShowCreateDatabaseStmtDbName = "DbName";
static int32_t showCreateDatabaseStmtToJson(const void* pObj, SJson* pJson) {
const SShowCreateDatabaseStmt* pNode = (const SShowCreateDatabaseStmt*)pObj;
return tjsonAddStringToObject(pJson, jkShowCreateDatabaseStmtDbName, pNode->dbName);
}
static int32_t jsonToShowCreateDatabaseStmt(const SJson* pJson, void* pObj) {
SShowCreateDatabaseStmt* pNode = (SShowCreateDatabaseStmt*)pObj;
return tjsonGetStringValue(pJson, jkShowCreateDatabaseStmtDbName, pNode->dbName);
}
static const char* jkShowCreateTableStmtDbName = "DbName";
static const char* jkShowCreateTableStmtTableName = "TableName";
static int32_t showCreateTableStmtToJson(const void* pObj, SJson* pJson) {
const SShowCreateTableStmt* pNode = (const SShowCreateTableStmt*)pObj;
int32_t code = tjsonAddStringToObject(pJson, jkShowCreateTableStmtDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkShowCreateTableStmtTableName, pNode->tableName);
}
return code;
}
static int32_t jsonToShowCreateTableStmt(const SJson* pJson, void* pObj) {
SShowCreateTableStmt* pNode = (SShowCreateTableStmt*)pObj;
int32_t code = tjsonGetStringValue(pJson, jkShowCreateTableStmtDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkShowCreateTableStmtTableName, pNode->tableName);
}
return code;
}
static int32_t showCreateStableStmtToJson(const void* pObj, SJson* pJson) {
return showCreateTableStmtToJson(pObj, pJson);
}
static int32_t jsonToShowCreateStableStmt(const SJson* pJson, void* pObj) {
return jsonToShowCreateTableStmt(pJson, pObj);
}
static const char* jkShowTableDistributedStmtDbName = "DbName";
static const char* jkShowTableDistributedStmtTableName = "TableName";
static int32_t showTableDistributedStmtToJson(const void* pObj, SJson* pJson) {
const SShowTableDistributedStmt* pNode = (const SShowTableDistributedStmt*)pObj;
int32_t code = tjsonAddStringToObject(pJson, jkShowTableDistributedStmtDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkShowTableDistributedStmtTableName, pNode->tableName);
}
return code;
}
static int32_t jsonToShowTableDistributedStmt(const SJson* pJson, void* pObj) {
SShowTableDistributedStmt* pNode = (SShowTableDistributedStmt*)pObj;
int32_t code = tjsonGetStringValue(pJson, jkShowTableDistributedStmtDbName, pNode->dbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkShowTableDistributedStmtTableName, pNode->tableName);
}
return code;
}
static int32_t showLocalVariablesStmtToJson(const void* pObj, SJson* pJson) { return showStmtToJson(pObj, pJson); }
static int32_t jsonToShowLocalVariablesStmt(const SJson* pJson, void* pObj) { return jsonToShowStmt(pJson, pObj); }
static const char* jkShowTableTagsStmtDbName = "DbName";
static const char* jkShowTableTagsStmtTbName = "TbName";
static const char* jkShowTableTagsStmtTags = "Tags";
static int32_t showTableTagsStmtToJson(const void* pObj, SJson* pJson) {
const SShowTableTagsStmt* pNode = (const SShowTableTagsStmt*)pObj;
int32_t code = tjsonAddObject(pJson, jkShowTableTagsStmtDbName, nodeToJson, pNode->pDbName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkShowTableTagsStmtTbName, nodeToJson, pNode->pTbName);
}
if (TSDB_CODE_SUCCESS == code) {
code = nodeListToJson(pJson, jkShowTableTagsStmtTags, pNode->pTags);
}
return code;
}
static int32_t jsonToShowTableTagsStmt(const SJson* pJson, void* pObj) {
SShowTableTagsStmt* pNode = (SShowTableTagsStmt*)pObj;
int32_t code = jsonToNodeObject(pJson, jkShowTableTagsStmtDbName, &pNode->pDbName);
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkShowTableTagsStmtTbName, &pNode->pTbName);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkShowTableTagsStmtTags, &pNode->pTags);
}
return code;
}
static const char* jkDeleteStmtFromTable = "FromTable";
static const char* jkDeleteStmtWhere = "Where";
static const char* jkDeleteStmtCountFunc = "CountFunc";
@ -4645,7 +5269,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
case QUERY_NODE_TEMP_TABLE:
return tempTableNodeToJson(pObj, pJson);
case QUERY_NODE_JOIN_TABLE:
break;
return joinTableNodeToJson(pObj, pJson);
case QUERY_NODE_GROUPING_SET:
return groupingSetNodeToJson(pObj, pJson);
case QUERY_NODE_ORDER_BY_EXPR:
@ -4688,25 +5312,98 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return setOperatorToJson(pObj, pJson);
case QUERY_NODE_SELECT_STMT:
return selectStmtToJson(pObj, pJson);
case QUERY_NODE_VNODE_MODIF_STMT:
case QUERY_NODE_VNODE_MODIFY_STMT:
return vnodeModifyStmtToJson(pObj, pJson);
case QUERY_NODE_CREATE_DATABASE_STMT:
break;
case QUERY_NODE_ALTER_DATABASE_STMT:
return alterDatabaseStmtToJson(pObj, pJson);
case QUERY_NODE_TRIM_DATABASE_STMT:
return trimDatabaseStmtToJson(pObj, pJson);
case QUERY_NODE_CREATE_TABLE_STMT:
break;
case QUERY_NODE_DROP_TABLE_CLAUSE:
return dropTableClauseToJson(pObj, pJson);
case QUERY_NODE_DROP_TABLE_STMT:
return dropTableStmtToJson(pObj, pJson);
case QUERY_NODE_DROP_SUPER_TABLE_STMT:
return dropStableStmtToJson(pObj, pJson);
case QUERY_NODE_ALTER_TABLE_STMT:
return alterTableStmtToJson(pObj, pJson);
case QUERY_NODE_DROP_USER_STMT:
return dropUserStmtToJson(pObj, pJson);
case QUERY_NODE_USE_DATABASE_STMT:
break;
return useDatabaseStmtToJson(pObj, pJson);
case QUERY_NODE_DROP_DNODE_STMT:
return dropDnodeStmtToJson(pObj, pJson);
case QUERY_NODE_ALTER_DNODE_STMT:
return alterDnodeStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_DATABASES_STMT:
case QUERY_NODE_SHOW_TABLES_STMT:
case QUERY_NODE_SHOW_TAGS_STMT:
break;
case QUERY_NODE_DROP_QNODE_STMT:
return dropQnodeStmtToJson(pObj, pJson);
case QUERY_NODE_DROP_SNODE_STMT:
return dropSnodeStmtToJson(pObj, pJson);
case QUERY_NODE_DROP_MNODE_STMT:
return dropMnodeStmtToJson(pObj, pJson);
case QUERY_NODE_CREATE_TOPIC_STMT:
return createTopicStmtToJson(pObj, pJson);
case QUERY_NODE_DROP_TOPIC_STMT:
return dropTopicStmtToJson(pObj, pJson);
case QUERY_NODE_DROP_STREAM_STMT:
return dropStreamStmtToJson(pObj, pJson);
case QUERY_NODE_SPLIT_VGROUP_STMT:
return splitVgroupStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_DNODES_STMT:
return showDnodesStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_MNODES_STMT:
return showMnodesStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_QNODES_STMT:
return showQnodesStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_CLUSTER_STMT:
return showClusterStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_DATABASES_STMT:
return showDatabasesStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_FUNCTIONS_STMT:
return showFunctionsStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_INDEXES_STMT:
return showIndexesStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_STABLES_STMT:
return showStablesStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_STREAMS_STMT:
return showStreamsStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_TABLES_STMT:
return showTablesStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_TAGS_STMT:
return showTagsStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_USERS_STMT:
return showUsersStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_VGROUPS_STMT:
return showVgroupsStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_CONSUMERS_STMT:
return showConsumersStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_VARIABLES_STMT:
return showVariablesStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
return showDnodeVariablesStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
return showTransactionsStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT:
return showSubscriptionsStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_VNODES_STMT:
return showVnodesStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_USER_PRIVILEGES_STMT:
return showUserPrivilegesStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
return showCreateDatabaseStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
return showCreateTableStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
return showCreateStableStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT:
return showTableDistributedStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT:
return showLocalVariablesStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_TABLE_TAGS_STMT:
return showTableTagsStmtToJson(pObj, pJson);
case QUERY_NODE_DELETE_STMT:
return deleteStmtToJson(pObj, pJson);
case QUERY_NODE_LOGIC_PLAN_SCAN:
@ -4827,6 +5524,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToRealTableNode(pJson, pObj);
case QUERY_NODE_TEMP_TABLE:
return jsonToTempTableNode(pJson, pObj);
case QUERY_NODE_JOIN_TABLE:
return jsonToJoinTableNode(pJson, pObj);
case QUERY_NODE_GROUPING_SET:
return jsonToGroupingSetNode(pJson, pObj);
case QUERY_NODE_ORDER_BY_EXPR:
@ -4865,14 +5564,94 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToSetOperator(pJson, pObj);
case QUERY_NODE_SELECT_STMT:
return jsonToSelectStmt(pJson, pObj);
case QUERY_NODE_VNODE_MODIFY_STMT:
return jsonToVnodeModifyStmt(pJson, pObj);
case QUERY_NODE_ALTER_DATABASE_STMT:
return jsonToAlterDatabaseStmt(pJson, pObj);
case QUERY_NODE_TRIM_DATABASE_STMT:
return jsonToTrimDatabaseStmt(pJson, pObj);
case QUERY_NODE_DROP_TABLE_CLAUSE:
return jsonToDropTableClause(pJson, pObj);
case QUERY_NODE_DROP_TABLE_STMT:
return jsonToDropTableStmt(pJson, pObj);
case QUERY_NODE_DROP_SUPER_TABLE_STMT:
return jsonToDropStableStmt(pJson, pObj);
case QUERY_NODE_ALTER_TABLE_STMT:
return jsonToAlterTableStmt(pJson, pObj);
case QUERY_NODE_DROP_USER_STMT:
return jsonToDropUserStmt(pJson, pObj);
case QUERY_NODE_USE_DATABASE_STMT:
return jsonToUseDatabaseStmt(pJson, pObj);
case QUERY_NODE_DROP_DNODE_STMT:
return jsonToDropDnodeStmt(pJson, pObj);
case QUERY_NODE_ALTER_DNODE_STMT:
return jsonToAlterDnodeStmt(pJson, pObj);
case QUERY_NODE_DROP_QNODE_STMT:
return jsonToDropQnodeStmt(pJson, pObj);
case QUERY_NODE_DROP_SNODE_STMT:
return jsonToDropSnodeStmt(pJson, pObj);
case QUERY_NODE_DROP_MNODE_STMT:
return jsonToDropMnodeStmt(pJson, pObj);
case QUERY_NODE_CREATE_TOPIC_STMT:
return jsonToCreateTopicStmt(pJson, pObj);
case QUERY_NODE_DROP_TOPIC_STMT:
return jsonToDropTopicStmt(pJson, pObj);
case QUERY_NODE_DROP_STREAM_STMT:
return jsonToDropStreamStmt(pJson, pObj);
case QUERY_NODE_SPLIT_VGROUP_STMT:
return jsonToSplitVgroupStmt(pJson, pObj);
case QUERY_NODE_SHOW_DNODES_STMT:
return jsonToShowDnodesStmt(pJson, pObj);
case QUERY_NODE_SHOW_MNODES_STMT:
return jsonToShowMnodesStmt(pJson, pObj);
case QUERY_NODE_SHOW_QNODES_STMT:
return jsonToShowQnodesStmt(pJson, pObj);
case QUERY_NODE_SHOW_CLUSTER_STMT:
return jsonToShowClusterStmt(pJson, pObj);
case QUERY_NODE_SHOW_DATABASES_STMT:
return jsonToShowDatabasesStmt(pJson, pObj);
case QUERY_NODE_SHOW_FUNCTIONS_STMT:
return jsonToShowFunctionsStmt(pJson, pObj);
case QUERY_NODE_SHOW_INDEXES_STMT:
return jsonToShowIndexesStmt(pJson, pObj);
case QUERY_NODE_SHOW_STABLES_STMT:
return jsonToShowStablesStmt(pJson, pObj);
case QUERY_NODE_SHOW_STREAMS_STMT:
return jsonToShowStreamsStmt(pJson, pObj);
case QUERY_NODE_SHOW_TABLES_STMT:
return jsonToShowTablesStmt(pJson, pObj);
case QUERY_NODE_SHOW_TAGS_STMT:
return jsonToShowTagsStmt(pJson, pObj);
case QUERY_NODE_SHOW_USERS_STMT:
return jsonToShowUsersStmt(pJson, pObj);
case QUERY_NODE_SHOW_VGROUPS_STMT:
return jsonToShowVgroupsStmt(pJson, pObj);
case QUERY_NODE_SHOW_CONSUMERS_STMT:
return jsonToShowConsumersStmt(pJson, pObj);
case QUERY_NODE_SHOW_VARIABLES_STMT:
return jsonToShowVariablesStmt(pJson, pObj);
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
return jsonToShowDnodeVariablesStmt(pJson, pObj);
case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
return jsonToShowTransactionsStmt(pJson, pObj);
case QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT:
return jsonToShowSubscriptionsStmt(pJson, pObj);
case QUERY_NODE_SHOW_VNODES_STMT:
return jsonToShowVnodesStmt(pJson, pObj);
case QUERY_NODE_SHOW_USER_PRIVILEGES_STMT:
return jsonToShowUserPrivilegesStmt(pJson, pObj);
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
return jsonToShowCreateDatabaseStmt(pJson, pObj);
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
return jsonToShowCreateTableStmt(pJson, pObj);
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
return jsonToShowCreateStableStmt(pJson, pObj);
case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT:
return jsonToShowTableDistributedStmt(pJson, pObj);
case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT:
return jsonToShowLocalVariablesStmt(pJson, pObj);
case QUERY_NODE_SHOW_TABLE_TAGS_STMT:
return jsonToShowTableTagsStmt(pJson, pObj);
case QUERY_NODE_DELETE_STMT:
return jsonToDeleteStmt(pJson, pObj);
case QUERY_NODE_LOGIC_PLAN_SCAN:

View File

@ -305,8 +305,8 @@ SNode* nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(SSetOperator));
case QUERY_NODE_SELECT_STMT:
return makeNode(type, sizeof(SSelectStmt));
case QUERY_NODE_VNODE_MODIF_STMT:
return makeNode(type, sizeof(SVnodeModifOpStmt));
case QUERY_NODE_VNODE_MODIFY_STMT:
return makeNode(type, sizeof(SVnodeModifyOpStmt));
case QUERY_NODE_CREATE_DATABASE_STMT:
return makeNode(type, sizeof(SCreateDatabaseStmt));
case QUERY_NODE_DROP_DATABASE_STMT:
@ -818,8 +818,8 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode((SNode*)pStmt->pSlimit);
break;
}
case QUERY_NODE_VNODE_MODIF_STMT: {
SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)pNode;
case QUERY_NODE_VNODE_MODIFY_STMT: {
SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)pNode;
destroyVgDataBlockArray(pStmt->pDataBlocks);
taosMemoryFreeClear(pStmt->pTableMeta);
taosHashCleanup(pStmt->pVgroupsHashObj);

View File

@ -741,6 +741,7 @@ pseudo_column(A) ::= WSTART(B).
pseudo_column(A) ::= WEND(B). { A = createRawExprNode(pCxt, &B, createFunctionNode(pCxt, &B, NULL)); }
pseudo_column(A) ::= WDURATION(B). { A = createRawExprNode(pCxt, &B, createFunctionNode(pCxt, &B, NULL)); }
pseudo_column(A) ::= IROWTS(B). { A = createRawExprNode(pCxt, &B, createFunctionNode(pCxt, &B, NULL)); }
pseudo_column(A) ::= ISFILLED(B). { A = createRawExprNode(pCxt, &B, createFunctionNode(pCxt, &B, NULL)); }
pseudo_column(A) ::= QTAGS(B). { A = createRawExprNode(pCxt, &B, createFunctionNode(pCxt, &B, NULL)); }
function_expression(A) ::= function_name(B) NK_LP expression_list(C) NK_RP(D). { A = createRawExprNodeExt(pCxt, &B, &D, createFunctionNode(pCxt, &B, C)); }

View File

@ -1474,7 +1474,7 @@ SNode* createDropUserStmt(SAstCreateContext* pCxt, SToken* pUserName) {
}
SDropUserStmt* pStmt = (SDropUserStmt*)nodesMakeNode(QUERY_NODE_DROP_USER_STMT);
CHECK_OUT_OF_MEM(pStmt);
COPY_STRING_FORM_ID_TOKEN(pStmt->useName, pUserName);
COPY_STRING_FORM_ID_TOKEN(pStmt->userName, pUserName);
return (SNode*)pStmt;
}

View File

@ -245,6 +245,10 @@ static int32_t collectMetaKeyFromCreateTable(SCollectMetaKeyCxt* pCxt, SCreateTa
if (TSDB_CODE_SUCCESS == code && NULL == pStmt->pTags) {
code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache);
}
if (TSDB_CODE_SUCCESS == code) {
code = reserveUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser, pStmt->dbName, AUTH_TYPE_WRITE,
pCxt->pMetaCache);
}
return code;
}
@ -261,6 +265,10 @@ static int32_t collectMetaKeyFromCreateMultiTable(SCollectMetaKeyCxt* pCxt, SCre
if (TSDB_CODE_SUCCESS == code) {
code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pClause->dbName, pClause->tableName, pCxt->pMetaCache);
}
if (TSDB_CODE_SUCCESS == code) {
code = reserveUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser, pClause->dbName, AUTH_TYPE_WRITE,
pCxt->pMetaCache);
}
if (TSDB_CODE_SUCCESS != code) {
break;
}
@ -351,38 +359,59 @@ static int32_t collectMetaKeyFromCreateStream(SCollectMetaKeyCxt* pCxt, SCreateS
}
static int32_t collectMetaKeyFromShowDnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODES,
pCxt->pMetaCache);
if (pCxt->pParseCxt->enableSysInfo) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODES,
pCxt->pMetaCache);
}
return TSDB_CODE_SUCCESS;
}
static int32_t collectMetaKeyFromShowMnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MNODES,
pCxt->pMetaCache);
if (pCxt->pParseCxt->enableSysInfo) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MNODES,
pCxt->pMetaCache);
}
return TSDB_CODE_SUCCESS;
}
static int32_t collectMetaKeyFromShowModules(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MODULES,
pCxt->pMetaCache);
if (pCxt->pParseCxt->enableSysInfo) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MODULES,
pCxt->pMetaCache);
}
return TSDB_CODE_SUCCESS;
}
static int32_t collectMetaKeyFromShowQnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_QNODES,
pCxt->pMetaCache);
if (pCxt->pParseCxt->enableSysInfo) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_QNODES,
pCxt->pMetaCache);
}
return TSDB_CODE_SUCCESS;
}
static int32_t collectMetaKeyFromShowSnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SNODES,
pCxt->pMetaCache);
if (pCxt->pParseCxt->enableSysInfo) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SNODES,
pCxt->pMetaCache);
}
return TSDB_CODE_SUCCESS;
}
static int32_t collectMetaKeyFromShowBnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_BNODES,
pCxt->pMetaCache);
if (pCxt->pParseCxt->enableSysInfo) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_BNODES,
pCxt->pMetaCache);
}
return TSDB_CODE_SUCCESS;
}
static int32_t collectMetaKeyFromShowCluster(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER,
pCxt->pMetaCache);
if (pCxt->pParseCxt->enableSysInfo) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER,
pCxt->pMetaCache);
}
return TSDB_CODE_SUCCESS;
}
static int32_t collectMetaKeyFromShowDatabases(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {

View File

@ -78,7 +78,7 @@ static int32_t authSetOperator(SAuthCxt* pCxt, SSetOperator* pSetOper) {
}
static int32_t authDropUser(SAuthCxt* pCxt, SDropUserStmt* pStmt) {
if (!pCxt->pParseCxt->isSuperUser || 0 == strcmp(pStmt->useName, TSDB_DEFAULT_USER)) {
if (!pCxt->pParseCxt->isSuperUser || 0 == strcmp(pStmt->userName, TSDB_DEFAULT_USER)) {
return TSDB_CODE_PAR_PERMISSION_DENIED;
}
return TSDB_CODE_SUCCESS;
@ -104,6 +104,22 @@ static int32_t authShowCreateTable(SAuthCxt* pCxt, SShowCreateTableStmt* pStmt)
return checkAuth(pCxt, pStmt->dbName, AUTH_TYPE_READ);
}
static int32_t authCreateTable(SAuthCxt* pCxt, SCreateTableStmt* pStmt) {
return checkAuth(pCxt, pStmt->dbName, AUTH_TYPE_WRITE);
}
static int32_t authCreateMultiTable(SAuthCxt* pCxt, SCreateMultiTableStmt* pStmt) {
int32_t code = TSDB_CODE_SUCCESS;
SNode* pNode = NULL;
FOREACH(pNode, pStmt->pSubTables) {
code = checkAuth(pCxt, ((SCreateSubTableClause*)pNode)->dbName, AUTH_TYPE_WRITE);
if (TSDB_CODE_SUCCESS != code) {
break;
}
}
return code;
}
static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
switch (nodeType(pStmt)) {
case QUERY_NODE_SET_OPERATOR:
@ -116,6 +132,10 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
return authDelete(pCxt, (SDeleteStmt*)pStmt);
case QUERY_NODE_INSERT_STMT:
return authInsert(pCxt, (SInsertStmt*)pStmt);
case QUERY_NODE_CREATE_TABLE_STMT:
return authCreateTable(pCxt, (SCreateTableStmt*)pStmt);
case QUERY_NODE_CREATE_MULTI_TABLE_STMT:
return authCreateMultiTable(pCxt, (SCreateMultiTableStmt*)pStmt);
case QUERY_NODE_SHOW_DNODES_STMT:
case QUERY_NODE_SHOW_MNODES_STMT:
case QUERY_NODE_SHOW_MODULES_STMT:

View File

@ -155,7 +155,7 @@ static int32_t ignoreUsingClause(SInsertParseContext* pCxt, const char** pSql) {
return code;
}
static int32_t parseDuplicateUsingClause(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, bool* pDuplicate) {
static int32_t parseDuplicateUsingClause(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, bool* pDuplicate) {
*pDuplicate = false;
char tbFName[TSDB_TABLE_FNAME_LEN];
@ -518,7 +518,7 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema,
// input pStmt->pSql: [(tag1_name, ...)] TAGS (tag1_value, ...) ...
// output pStmt->pSql: TAGS (tag1_value, ...) ...
static int32_t parseBoundTagsClause(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseBoundTagsClause(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
SSchema* pTagsSchema = getTableTagSchema(pStmt->pTableMeta);
insSetBoundColumnInfo(&pCxt->tags, pTagsSchema, getNumOfTags(pStmt->pTableMeta));
@ -533,7 +533,7 @@ static int32_t parseBoundTagsClause(SInsertParseContext* pCxt, SVnodeModifOpStmt
return parseBoundColumns(pCxt, &pStmt->pSql, true, &pCxt->tags, pTagsSchema);
}
static int32_t parseTagValue(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, SSchema* pTagSchema, SToken* pToken,
static int32_t parseTagValue(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, SSchema* pTagSchema, SToken* pToken,
SArray* pTagName, SArray* pTagVals, STag** pTag) {
if (!isNullValue(pTagSchema->type, pToken)) {
taosArrayPush(pTagName, pTagSchema->name);
@ -561,7 +561,7 @@ static int32_t parseTagValue(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt
return code;
}
static void buildCreateTbReq(SVnodeModifOpStmt* pStmt, STag* pTag, SArray* pTagName) {
static void buildCreateTbReq(SVnodeModifyOpStmt* pStmt, STag* pTag, SArray* pTagName) {
insBuildCreateTbReq(&pStmt->createTblReq, pStmt->targetTableName.tname, pTag, pStmt->pTableMeta->suid,
pStmt->usingTableName.tname, pTagName, pStmt->pTableMeta->tableInfo.numOfTags,
TSDB_DEFAULT_TABLE_TTL);
@ -591,7 +591,7 @@ static int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, SMsgBuf* pMs
}
// pSql -> tag1_value, ...)
static int32_t parseTagsClauseImpl(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseTagsClauseImpl(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
int32_t code = TSDB_CODE_SUCCESS;
SSchema* pSchema = getTableTagSchema(pStmt->pTableMeta);
SArray* pTagVals = taosArrayInit(pCxt->tags.numOfBound, sizeof(STagVal));
@ -649,7 +649,7 @@ static int32_t parseTagsClauseImpl(SInsertParseContext* pCxt, SVnodeModifOpStmt*
// input pStmt->pSql: TAGS (tag1_value, ...) [table_options] ...
// output pStmt->pSql: [table_options] ...
static int32_t parseTagsClause(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseTagsClause(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
SToken token;
NEXT_TOKEN(pStmt->pSql, token);
if (TK_TAGS != token.type) {
@ -673,7 +673,7 @@ static int32_t parseTagsClause(SInsertParseContext* pCxt, SVnodeModifOpStmt* pSt
return code;
}
static int32_t storeTableMeta(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t storeTableMeta(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
pStmt->pTableMeta->suid = pStmt->pTableMeta->uid;
pStmt->pTableMeta->uid = pStmt->totalTbNum;
pStmt->pTableMeta->tableType = TSDB_CHILD_TABLE;
@ -688,7 +688,7 @@ static int32_t storeTableMeta(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStm
return taosHashPut(pStmt->pSubTableHashObj, tbFName, strlen(tbFName), &pBackup, POINTER_BYTES);
}
static int32_t parseTableOptions(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseTableOptions(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
do {
int32_t index = 0;
SToken token;
@ -731,7 +731,7 @@ static int32_t parseTableOptions(SInsertParseContext* pCxt, SVnodeModifOpStmt* p
// output pStmt->pSql:
// 1. [(field1_name, ...)]
// 2. VALUES ... | FILE ...
static int32_t parseUsingClauseBottom(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseUsingClauseBottom(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
if (!pStmt->usingTableProcessing || pCxt->usingDuplicateTable) {
return TSDB_CODE_SUCCESS;
}
@ -805,7 +805,7 @@ static int32_t getTableMeta(SInsertParseContext* pCxt, SName* pTbName, bool isSt
return code;
}
static int32_t getTableVgroup(SParseContext* pCxt, SVnodeModifOpStmt* pStmt, bool isStb, bool* pMissCache) {
static int32_t getTableVgroup(SParseContext* pCxt, SVnodeModifyOpStmt* pStmt, bool isStb, bool* pMissCache) {
int32_t code = TSDB_CODE_SUCCESS;
SVgroupInfo vg;
bool exists = true;
@ -830,7 +830,7 @@ static int32_t getTableVgroup(SParseContext* pCxt, SVnodeModifOpStmt* pStmt, boo
return code;
}
static int32_t getTableMetaAndVgroupImpl(SParseContext* pCxt, SVnodeModifOpStmt* pStmt, bool* pMissCache) {
static int32_t getTableMetaAndVgroupImpl(SParseContext* pCxt, SVnodeModifyOpStmt* pStmt, bool* pMissCache) {
SVgroupInfo vg;
int32_t code = catalogGetCachedTableVgMeta(pCxt->pCatalog, &pStmt->targetTableName, &vg, &pStmt->pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
@ -842,7 +842,7 @@ static int32_t getTableMetaAndVgroupImpl(SParseContext* pCxt, SVnodeModifOpStmt*
return code;
}
static int32_t getTableMetaAndVgroup(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, bool* pMissCache) {
static int32_t getTableMetaAndVgroup(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, bool* pMissCache) {
SParseContext* pComCxt = pCxt->pComCxt;
int32_t code = TSDB_CODE_SUCCESS;
if (pComCxt->async) {
@ -868,25 +868,16 @@ static int32_t collectUseDatabase(const SName* pName, SHashObj* pDbs) {
return taosHashPut(pDbs, dbFName, strlen(dbFName), dbFName, sizeof(dbFName));
}
static int32_t getTargetTableSchema(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t getTargetTableSchema(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
if (pCxt->forceUpdate) {
pCxt->missCache = true;
return TSDB_CODE_SUCCESS;
}
int32_t code = checkAuth(pCxt->pComCxt, &pStmt->targetTableName, &pCxt->missCache);
#if 0
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
code = getTableMeta(pCxt, &pStmt->targetTableName, false, &pStmt->pTableMeta, &pCxt->missCache);
}
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
code = getTableVgroup(pCxt->pComCxt, pStmt, false, &pCxt->missCache);
}
#else
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
code = getTableMetaAndVgroup(pCxt, pStmt, &pCxt->missCache);
}
#endif
if (TSDB_CODE_SUCCESS == code && !pCxt->pComCxt->async) {
code = collectUseDatabase(&pStmt->targetTableName, pStmt->pDbFNameHashObj);
if (TSDB_CODE_SUCCESS == code) {
@ -896,11 +887,11 @@ static int32_t getTargetTableSchema(SInsertParseContext* pCxt, SVnodeModifOpStmt
return code;
}
static int32_t preParseUsingTableName(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, SToken* pTbName) {
static int32_t preParseUsingTableName(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, SToken* pTbName) {
return insCreateSName(&pStmt->usingTableName, pTbName, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg);
}
static int32_t getUsingTableSchema(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t getUsingTableSchema(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
if (pCxt->forceUpdate) {
pCxt->missCache = true;
return TSDB_CODE_SUCCESS;
@ -922,7 +913,7 @@ static int32_t getUsingTableSchema(SInsertParseContext* pCxt, SVnodeModifOpStmt*
return code;
}
static int32_t parseUsingTableNameImpl(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseUsingTableNameImpl(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
SToken token;
NEXT_TOKEN(pStmt->pSql, token);
int32_t code = preParseUsingTableName(pCxt, pStmt, &token);
@ -941,7 +932,7 @@ static int32_t parseUsingTableNameImpl(SInsertParseContext* pCxt, SVnodeModifOpS
// output pStmt->pSql:
// 1. [(tag1_name, ...)] TAGS (tag1_value, ...) [table_options]] ...
// 2. VALUES ... | FILE ...
static int32_t parseUsingTableName(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseUsingTableName(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
SToken token;
int32_t index = 0;
NEXT_TOKEN_KEEP_SQL(pStmt->pSql, token, index);
@ -959,7 +950,7 @@ static int32_t parseUsingTableName(SInsertParseContext* pCxt, SVnodeModifOpStmt*
return code;
}
static int32_t preParseTargetTableName(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, SToken* pTbName) {
static int32_t preParseTargetTableName(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, SToken* pTbName) {
return insCreateSName(&pStmt->targetTableName, pTbName, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg);
}
@ -970,7 +961,7 @@ static int32_t preParseTargetTableName(SInsertParseContext* pCxt, SVnodeModifOpS
// output pStmt->pSql:
// 1. [ USING ... ] ...
// 2. VALUES ... | FILE ...
static int32_t preParseBoundColumnsClause(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t preParseBoundColumnsClause(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
SToken token;
int32_t index = 0;
NEXT_TOKEN_KEEP_SQL(pStmt->pSql, token, index);
@ -984,7 +975,7 @@ static int32_t preParseBoundColumnsClause(SInsertParseContext* pCxt, SVnodeModif
return skipParentheses(pCxt, &pStmt->pSql);
}
static int32_t getTableDataBlocks(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, STableDataBlocks** pDataBuf) {
static int32_t getTableDataBlocks(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, STableDataBlocks** pDataBuf) {
if (pCxt->pComCxt->async) {
uint64_t uid = pStmt->pTableMeta->uid;
if (pStmt->usingTableProcessing) {
@ -1002,7 +993,7 @@ static int32_t getTableDataBlocks(SInsertParseContext* pCxt, SVnodeModifOpStmt*
pDataBuf, NULL, &pStmt->createTblReq);
}
static int32_t parseBoundColumnsClause(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt,
static int32_t parseBoundColumnsClause(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt,
STableDataBlocks* pDataBuf) {
SToken token;
int32_t index = 0;
@ -1029,7 +1020,7 @@ static int32_t parseBoundColumnsClause(SInsertParseContext* pCxt, SVnodeModifOpS
// 1. [(tag1_name, ...)] ...
// 2. VALUES ... | FILE ...
// output pStmt->pSql: VALUES ... | FILE ...
static int32_t parseSchemaClauseBottom(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt,
static int32_t parseSchemaClauseBottom(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt,
STableDataBlocks** pDataBuf) {
int32_t code = parseUsingClauseBottom(pCxt, pStmt);
if (TSDB_CODE_SUCCESS == code) {
@ -1045,7 +1036,7 @@ static int32_t parseSchemaClauseBottom(SInsertParseContext* pCxt, SVnodeModifOpS
// output pStmt->pSql:
// 1. [(tag1_name, ...)] ...
// 2. VALUES ... | FILE ...
static int32_t parseSchemaClauseTop(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, SToken* pTbName) {
static int32_t parseSchemaClauseTop(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, SToken* pTbName) {
int32_t code = preParseTargetTableName(pCxt, pStmt, pTbName);
if (TSDB_CODE_SUCCESS == code) {
// option: [(field1_name, ...)]
@ -1337,7 +1328,7 @@ static int32_t allocateMemIfNeed(STableDataBlocks* pDataBlock, int32_t rowSize,
}
// pSql -> (field1_value, ...) [(field1_value2, ...) ...]
static int32_t parseValues(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, STableDataBlocks* pDataBuf,
static int32_t parseValues(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, STableDataBlocks* pDataBuf,
int32_t maxRows, int32_t* pNumOfRows, SToken* pToken) {
int32_t code = insInitRowBuilder(&pDataBuf->rowBuilder, pDataBuf->pTableMeta->sversion, &pDataBuf->boundColumnInfo);
@ -1383,7 +1374,7 @@ static int32_t parseValues(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt,
}
// VALUES (field1_value, ...) [(field1_value2, ...) ...]
static int32_t parseValuesClause(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, STableDataBlocks* pDataBuf,
static int32_t parseValuesClause(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, STableDataBlocks* pDataBuf,
SToken* pToken) {
int32_t maxNumOfRows = 0;
int32_t numOfRows = 0;
@ -1403,7 +1394,7 @@ static int32_t parseValuesClause(SInsertParseContext* pCxt, SVnodeModifOpStmt* p
return code;
}
static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, STableDataBlocks* pDataBuf,
static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, STableDataBlocks* pDataBuf,
int maxRows, int32_t* pNumOfRows) {
int32_t code = insInitRowBuilder(&pDataBuf->rowBuilder, pDataBuf->pTableMeta->sversion, &pDataBuf->boundColumnInfo);
@ -1461,7 +1452,7 @@ static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt,
return code;
}
static int32_t parseDataFromFileImpl(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, STableDataBlocks* pDataBuf) {
static int32_t parseDataFromFileImpl(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, STableDataBlocks* pDataBuf) {
int32_t maxNumOfRows = 0;
int32_t numOfRows = 0;
int32_t code = allocateMemIfNeed(pDataBuf, insGetExtendedRowSize(pDataBuf), &maxNumOfRows);
@ -1485,7 +1476,7 @@ static int32_t parseDataFromFileImpl(SInsertParseContext* pCxt, SVnodeModifOpStm
return code;
}
static int32_t parseDataFromFile(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, SToken* pFilePath,
static int32_t parseDataFromFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, SToken* pFilePath,
STableDataBlocks* pDataBuf) {
char filePathStr[TSDB_FILENAME_LEN] = {0};
if (TK_NK_STRING == pFilePath->type) {
@ -1501,8 +1492,12 @@ static int32_t parseDataFromFile(SInsertParseContext* pCxt, SVnodeModifOpStmt* p
return parseDataFromFileImpl(pCxt, pStmt, pDataBuf);
}
static int32_t parseFileClause(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, STableDataBlocks* pDataBuf,
static int32_t parseFileClause(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, STableDataBlocks* pDataBuf,
SToken* pToken) {
if (tsUseAdapter) {
return buildInvalidOperationMsg(&pCxt->msg, "proxy mode does not support csv loading");
}
NEXT_TOKEN(pStmt->pSql, *pToken);
if (0 == pToken->n || (TK_NK_STRING != pToken->type && TK_NK_ID != pToken->type)) {
return buildSyntaxErrMsg(&pCxt->msg, "file path is required following keyword FILE", pToken->z);
@ -1511,7 +1506,7 @@ static int32_t parseFileClause(SInsertParseContext* pCxt, SVnodeModifOpStmt* pSt
}
// VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
static int32_t parseDataClause(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, STableDataBlocks* pDataBuf) {
static int32_t parseDataClause(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, STableDataBlocks* pDataBuf) {
SToken token;
NEXT_TOKEN(pStmt->pSql, token);
switch (token.type) {
@ -1528,7 +1523,7 @@ static int32_t parseDataClause(SInsertParseContext* pCxt, SVnodeModifOpStmt* pSt
// input pStmt->pSql:
// 1. [(tag1_name, ...)] ...
// 2. VALUES ... | FILE ...
static int32_t parseInsertTableClauseBottom(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseInsertTableClauseBottom(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
STableDataBlocks* pDataBuf = NULL;
int32_t code = parseSchemaClauseBottom(pCxt, pStmt, &pDataBuf);
if (TSDB_CODE_SUCCESS == code) {
@ -1537,7 +1532,7 @@ static int32_t parseInsertTableClauseBottom(SInsertParseContext* pCxt, SVnodeMod
return code;
}
static void resetEnvPreTable(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static void resetEnvPreTable(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
destroyBoundColumnInfo(&pCxt->tags);
taosMemoryFreeClear(pStmt->pTableMeta);
tdDestroySVCreateTbReq(&pStmt->createTblReq);
@ -1549,7 +1544,7 @@ static void resetEnvPreTable(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt
}
// input pStmt->pSql: [(field1_name, ...)] [ USING ... ] VALUES ... | FILE ...
static int32_t parseInsertTableClause(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, SToken* pTbName) {
static int32_t parseInsertTableClause(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, SToken* pTbName) {
resetEnvPreTable(pCxt, pStmt);
int32_t code = parseSchemaClauseTop(pCxt, pStmt, pTbName);
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
@ -1558,7 +1553,7 @@ static int32_t parseInsertTableClause(SInsertParseContext* pCxt, SVnodeModifOpSt
return code;
}
static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, SToken* pTbName,
static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, SToken* pTbName,
bool* pHasData) {
// no data in the sql string anymore.
if (0 == pTbName->n) {
@ -1597,7 +1592,7 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif
return TSDB_CODE_SUCCESS;
}
static int32_t setStmtInfo(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t setStmtInfo(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
SParsedDataColInfo* tags = taosMemoryMalloc(sizeof(pCxt->tags));
if (NULL == tags) {
return TSDB_CODE_OUT_OF_MEMORY;
@ -1615,7 +1610,7 @@ static int32_t setStmtInfo(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt)
return code;
}
static int32_t parseInsertBodyBottom(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseInsertBodyBottom(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
if (TSDB_QUERY_HAS_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT)) {
return setStmtInfo(pCxt, pStmt);
}
@ -1636,7 +1631,7 @@ static int32_t parseInsertBodyBottom(SInsertParseContext* pCxt, SVnodeModifOpStm
// [(field1_name, ...)]
// VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
// [...];
static int32_t parseInsertBody(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseInsertBody(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
SToken token;
int32_t code = TSDB_CODE_SUCCESS;
bool hasData = true;
@ -1659,7 +1654,7 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt, SVnodeModifOpStmt* pSt
static void destroySubTableHashElem(void* p) { taosMemoryFree(*(STableMeta**)p); }
static int32_t createVnodeModifOpStmt(SInsertParseContext* pCxt, bool reentry, SNode** pOutput) {
SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT);
if (NULL == pStmt) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@ -1726,6 +1721,8 @@ static int32_t getTableMetaFromMetaData(const SArray* pTables, STableMeta** pMet
if (1 != taosArrayGetSize(pTables)) {
return TSDB_CODE_FAILED;
}
taosMemoryFreeClear(*pMeta);
SMetaRes* pRes = taosArrayGet(pTables, 0);
if (TSDB_CODE_SUCCESS == pRes->code) {
*pMeta = tableMetaDup((const STableMeta*)pRes->pRes);
@ -1736,7 +1733,7 @@ static int32_t getTableMetaFromMetaData(const SArray* pTables, STableMeta** pMet
return pRes->code;
}
static int32_t getTableVgroupFromMetaData(const SArray* pTables, SVnodeModifOpStmt* pStmt, bool isStb) {
static int32_t getTableVgroupFromMetaData(const SArray* pTables, SVnodeModifyOpStmt* pStmt, bool isStb) {
if (1 != taosArrayGetSize(pTables)) {
return TSDB_CODE_FAILED;
}
@ -1755,7 +1752,7 @@ static int32_t getTableVgroupFromMetaData(const SArray* pTables, SVnodeModifOpSt
}
static int32_t getTableSchemaFromMetaData(SInsertParseContext* pCxt, const SMetaData* pMetaData,
SVnodeModifOpStmt* pStmt, bool isStb) {
SVnodeModifyOpStmt* pStmt, bool isStb) {
int32_t code = checkAuthFromMetaData(pMetaData->pUser);
if (TSDB_CODE_SUCCESS == code) {
code = getTableMetaFromMetaData(pMetaData->pTableMeta, &pStmt->pTableMeta);
@ -1788,7 +1785,7 @@ static void clearCatalogReq(SCatalogReq* pCatalogReq) {
}
static int32_t setVnodeModifOpStmt(SInsertParseContext* pCxt, SCatalogReq* pCatalogReq, const SMetaData* pMetaData,
SVnodeModifOpStmt* pStmt) {
SVnodeModifyOpStmt* pStmt) {
clearCatalogReq(pCatalogReq);
if (pStmt->usingTableProcessing) {
@ -1802,7 +1799,7 @@ static int32_t resetVnodeModifOpStmt(SInsertParseContext* pCxt, SQuery* pQuery)
int32_t code = createVnodeModifOpStmt(pCxt, true, &pQuery->pRoot);
if (TSDB_CODE_SUCCESS == code) {
SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)pQuery->pRoot;
SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)pQuery->pRoot;
(*pCxt->pComCxt->pStmtCb->getExecInfoFn)(pCxt->pComCxt->pStmtCb->pStmt, &pStmt->pVgroupsHashObj,
&pStmt->pTableBlockHashObj);
@ -1831,7 +1828,7 @@ static int32_t initInsertQuery(SInsertParseContext* pCxt, SCatalogReq* pCatalogR
return resetVnodeModifOpStmt(pCxt, *pQuery);
}
SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)(*pQuery)->pRoot;
SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)(*pQuery)->pRoot;
if (!pStmt->fileProcessing) {
return setVnodeModifOpStmt(pCxt, pCatalogReq, pMetaData, pStmt);
@ -1841,7 +1838,7 @@ static int32_t initInsertQuery(SInsertParseContext* pCxt, SCatalogReq* pCatalogR
}
static int32_t setRefreshMate(SQuery* pQuery) {
SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)pQuery->pRoot;
SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)pQuery->pRoot;
if (taosHashGetSize(pStmt->pTableNameHashObj) > 0) {
taosArrayDestroy(pQuery->pTableList);
@ -1872,7 +1869,7 @@ static int32_t setRefreshMate(SQuery* pQuery) {
// [(field1_name, ...)]
// VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
// [...];
static int32_t parseInsertSqlFromStart(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseInsertSqlFromStart(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
int32_t code = skipInsertInto(&pStmt->pSql, &pCxt->msg);
if (TSDB_CODE_SUCCESS == code) {
code = parseInsertBody(pCxt, pStmt);
@ -1880,7 +1877,7 @@ static int32_t parseInsertSqlFromStart(SInsertParseContext* pCxt, SVnodeModifOpS
return code;
}
static int32_t parseInsertSqlFromCsv(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseInsertSqlFromCsv(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
STableDataBlocks* pDataBuf = NULL;
int32_t code = getTableDataBlocks(pCxt, pStmt, &pDataBuf);
if (TSDB_CODE_SUCCESS == code) {
@ -1898,7 +1895,7 @@ static int32_t parseInsertSqlFromCsv(SInsertParseContext* pCxt, SVnodeModifOpStm
return code;
}
static int32_t parseInsertSqlFromTable(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseInsertSqlFromTable(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
int32_t code = parseInsertTableClauseBottom(pCxt, pStmt);
if (TSDB_CODE_SUCCESS == code) {
code = parseInsertBody(pCxt, pStmt);
@ -1906,7 +1903,7 @@ static int32_t parseInsertSqlFromTable(SInsertParseContext* pCxt, SVnodeModifOpS
return code;
}
static int32_t parseInsertSqlImpl(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
static int32_t parseInsertSqlImpl(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
if (pStmt->pSql == pCxt->pComCxt->pSql || NULL != pCxt->pComCxt->pStmtCb) {
return parseInsertSqlFromStart(pCxt, pStmt);
}
@ -1958,7 +1955,7 @@ static int32_t buildInsertUserAuthReq(const char* pUser, SName* pName, SArray**
return TSDB_CODE_SUCCESS;
}
static int32_t buildInsertCatalogReq(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, SCatalogReq* pCatalogReq) {
static int32_t buildInsertCatalogReq(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, SCatalogReq* pCatalogReq) {
int32_t code = buildInsertUserAuthReq(pCxt->pComCxt->pUser, &pStmt->targetTableName, &pCatalogReq->pUser);
if (TSDB_CODE_SUCCESS == code) {
if (0 == pStmt->usingTableName.type) {
@ -1974,7 +1971,7 @@ static int32_t buildInsertCatalogReq(SInsertParseContext* pCxt, SVnodeModifOpStm
}
static int32_t setNextStageInfo(SInsertParseContext* pCxt, SQuery* pQuery, SCatalogReq* pCatalogReq) {
SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)pQuery->pRoot;
SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)pQuery->pRoot;
if (pCxt->missCache) {
parserDebug("0x%" PRIx64 " %d rows of %d tables have been inserted before cache miss", pCxt->pComCxt->requestId,
pStmt->totalRowsNum, pStmt->totalTbNum);
@ -1999,7 +1996,7 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal
int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery);
if (TSDB_CODE_SUCCESS == code) {
code = parseInsertSqlImpl(&context, (SVnodeModifOpStmt*)(*pQuery)->pRoot);
code = parseInsertSqlImpl(&context, (SVnodeModifyOpStmt*)(*pQuery)->pRoot);
}
if (TSDB_CODE_SUCCESS == code) {
code = setNextStageInfo(&context, *pQuery, pCatalogReq);

View File

@ -37,7 +37,7 @@ int32_t qBuildStmtOutput(SQuery* pQuery, SHashObj* pVgHash, SHashObj* pBlockHash
code = insMergeTableDataBlocks(pBlockHash, &pVgDataBlocks);
}
if (TSDB_CODE_SUCCESS == code) {
code = insBuildOutput(pVgHash, pVgDataBlocks, &((SVnodeModifOpStmt*)pQuery->pRoot)->pDataBlocks);
code = insBuildOutput(pVgHash, pVgDataBlocks, &((SVnodeModifyOpStmt*)pQuery->pRoot)->pDataBlocks);
}
insDestroyBlockArrayList(pVgDataBlocks);
return code;
@ -47,7 +47,7 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch
TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen) {
STableDataBlocks* pDataBlock = (STableDataBlocks*)pBlock;
SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen};
int32_t code = TSDB_CODE_SUCCESS;
int32_t code = TSDB_CODE_SUCCESS;
SParsedDataColInfo* tags = (SParsedDataColInfo*)boundTags;
if (NULL == tags) {
return TSDB_CODE_APP_ERROR;
@ -137,7 +137,8 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch
}
SVCreateTbReq tbReq = {0};
insBuildCreateTbReq(&tbReq, tName, pTag, suid, sTableName, tagName, pDataBlock->pTableMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL);
insBuildCreateTbReq(&tbReq, tName, pTag, suid, sTableName, tagName, pDataBlock->pTableMeta->tableInfo.numOfTags,
TSDB_DEFAULT_TABLE_TTL);
code = insBuildCreateTbMsg(pDataBlock, &tbReq);
tdDestroySVCreateTbReq(&tbReq);

View File

@ -260,6 +260,7 @@ static SKeyword keywordTable[] = {
{"WRITE", TK_WRITE},
{"_C0", TK_ROWTS},
{"_IROWTS", TK_IROWTS},
{"_ISFILLED", TK_ISFILLED},
{"_QDURATION", TK_QDURATION},
{"_QEND", TK_QEND},
{"_QSTART", TK_QSTART},

View File

@ -352,7 +352,7 @@ static int32_t getTableMetaImpl(STranslateContext* pCxt, const SName* pName, STa
code = catalogGetTableMeta(pParCxt->pCatalog, &conn, pName, pMeta);
}
}
if (TSDB_CODE_SUCCESS != code) {
if (TSDB_CODE_SUCCESS != code && TSDB_CODE_TSC_INVALID_TABLE_NAME != code) {
parserError("0x%" PRIx64 " catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", pCxt->pParseCxt->requestId,
tstrerror(code), pName->dbname, pName->tname);
}
@ -5110,7 +5110,7 @@ static int32_t translateAlterUser(STranslateContext* pCxt, SAlterUserStmt* pStmt
static int32_t translateDropUser(STranslateContext* pCxt, SDropUserStmt* pStmt) {
SDropUserReq dropReq = {0};
strcpy(dropReq.user, pStmt->useName);
strcpy(dropReq.user, pStmt->userName);
return buildCmdMsg(pCxt, TDMT_MND_DROP_USER, (FSerializeFunc)tSerializeSDropUserReq, &dropReq);
}
@ -6645,7 +6645,7 @@ static void destroyCreateTbReqBatch(void* data) {
}
int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray) {
SVnodeModifOpStmt* pNewStmt = (SVnodeModifOpStmt*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
SVnodeModifyOpStmt* pNewStmt = (SVnodeModifyOpStmt*)nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT);
if (pNewStmt == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@ -7734,9 +7734,9 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
pQuery->msgType = TDMT_VND_SUBMIT;
break;
case QUERY_NODE_VNODE_MODIF_STMT:
case QUERY_NODE_VNODE_MODIFY_STMT:
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
pQuery->msgType = toMsgType(((SVnodeModifOpStmt*)pQuery->pRoot)->sqlNodeType);
pQuery->msgType = toMsgType(((SVnodeModifyOpStmt*)pQuery->pRoot)->sqlNodeType);
break;
case QUERY_NODE_DESCRIBE_STMT:
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:

File diff suppressed because it is too large Load Diff

View File

@ -504,8 +504,8 @@ TEST_F(ParserInitialATest, alterTable) {
};
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_VNODE_MODIF_STMT);
SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)pQuery->pRoot;
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_VNODE_MODIFY_STMT);
SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)pQuery->pRoot;
ASSERT_EQ(pStmt->sqlNodeType, QUERY_NODE_ALTER_TABLE_STMT);
ASSERT_NE(pStmt->pDataBlocks, nullptr);

View File

@ -953,6 +953,10 @@ static int32_t createFillLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
code = TSDB_CODE_OUT_OF_MEMORY;
}
if (TSDB_CODE_SUCCESS == code && 0 == LIST_LENGTH(pFill->node.pTargets)) {
code = createColumnByRewriteExpr(pFill->pWStartTs, &pFill->node.pTargets);
}
if (TSDB_CODE_SUCCESS == code) {
*pLogicNode = (SLogicNode*)pFill;
} else {
@ -1396,7 +1400,7 @@ static int32_t getMsgType(ENodeType sqlType) {
return TDMT_VND_SUBMIT;
}
static int32_t createVnodeModifLogicNode(SLogicPlanContext* pCxt, SVnodeModifOpStmt* pStmt, SLogicNode** pLogicNode) {
static int32_t createVnodeModifLogicNode(SLogicPlanContext* pCxt, SVnodeModifyOpStmt* pStmt, SLogicNode** pLogicNode) {
SVnodeModifyLogicNode* pModif = (SVnodeModifyLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_VNODE_MODIFY);
if (NULL == pModif) {
return TSDB_CODE_OUT_OF_MEMORY;
@ -1580,8 +1584,8 @@ static int32_t createQueryLogicNode(SLogicPlanContext* pCxt, SNode* pStmt, SLogi
switch (nodeType(pStmt)) {
case QUERY_NODE_SELECT_STMT:
return createSelectLogicNode(pCxt, (SSelectStmt*)pStmt, pLogicNode);
case QUERY_NODE_VNODE_MODIF_STMT:
return createVnodeModifLogicNode(pCxt, (SVnodeModifOpStmt*)pStmt, pLogicNode);
case QUERY_NODE_VNODE_MODIFY_STMT:
return createVnodeModifLogicNode(pCxt, (SVnodeModifyOpStmt*)pStmt, pLogicNode);
case QUERY_NODE_EXPLAIN_STMT:
return createQueryLogicNode(pCxt, ((SExplainStmt*)pStmt)->pQuery, pLogicNode);
case QUERY_NODE_SET_OPERATOR:

View File

@ -104,6 +104,8 @@ TEST_F(PlanBasicTest, interpFunc) {
run("SELECT _IROWTS, INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)");
run("SELECT _IROWTS, INTERP(c1), _ISFILLED FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)");
run("SELECT TBNAME, _IROWTS, INTERP(c1) FROM t1 PARTITION BY TBNAME "
"RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)");
}

View File

@ -456,6 +456,7 @@ int32_t schHandleLinkBrokenCallback(void *param, SDataBuf *pMsg, int32_t code) {
if (head->isHbParam) {
taosMemoryFree(pMsg->pData);
taosMemoryFree(pMsg->pEpSet);
SSchHbCallbackParam *hbParam = (SSchHbCallbackParam *)param;
SSchTrans trans = {.pTrans = hbParam->pTrans, .pHandle = NULL};

View File

@ -275,6 +275,8 @@ int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
SyncIndex index = pEntry->index;
SyncIndex prevIndex = pEntry->index - 1;
SyncTerm lastMatchTerm = syncLogBufferGetLastMatchTerm(pBuf);
SSyncRaftEntry* pExist = NULL;
bool inBuf = true;
if (index <= pBuf->commitIndex) {
sTrace("vgId:%d, already committed. index: %" PRId64 ", term: %" PRId64 ". log buffer: [%" PRId64 " %" PRId64
@ -306,10 +308,9 @@ int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
}
// check current in buffer
SSyncRaftEntry* pExist = pBuf->entries[index % pBuf->size].pItem;
pExist = syncLogBufferGetOneEntry(pBuf, pNode, index, &inBuf);
if (pExist != NULL) {
ASSERT(pEntry->index == pExist->index);
if (pEntry->term != pExist->term) {
(void)syncLogBufferRollback(pBuf, pNode, index);
} else {
@ -317,14 +318,15 @@ int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
" %" PRId64 " %" PRId64 ", %" PRId64 ")",
pNode->vgId, pEntry->index, pEntry->term, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex,
pBuf->endIndex);
SyncTerm existPrevTerm = pBuf->entries[index % pBuf->size].prevLogTerm;
ASSERT(pEntry->term == pExist->term && prevTerm == existPrevTerm);
SyncTerm existPrevTerm = syncLogReplMgrGetPrevLogTerm(NULL, pNode, index);
ASSERT(pEntry->term == pExist->term && (pEntry->index > pBuf->matchIndex || prevTerm == existPrevTerm));
ret = 0;
goto _out;
}
}
// update
ASSERT(pBuf->entries[index % pBuf->size].pItem == NULL);
SSyncLogBufEntry tmp = {.pItem = pEntry, .prevLogIndex = prevIndex, .prevLogTerm = prevTerm};
pEntry = NULL;
pBuf->entries[index % pBuf->size] = tmp;
@ -337,6 +339,10 @@ int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
_out:
syncEntryDestroy(pEntry);
if (!inBuf) {
syncEntryDestroy(pExist);
pExist = NULL;
}
syncLogBufferValidate(pBuf);
taosThreadMutexUnlock(&pBuf->mutex);
return ret;
@ -1008,6 +1014,16 @@ int32_t syncLogBufferRollback(SSyncLogBuffer* pBuf, SSyncNode* pNode, SyncIndex
lastVer = pNode->pLogStore->syncLogLastIndex(pNode->pLogStore);
ASSERT(toIndex == lastVer + 1);
// refill buffer on need
if (toIndex <= pBuf->startIndex) {
int32_t ret = syncLogBufferInitWithoutLock(pBuf, pNode);
if (ret < 0) {
sError("vgId:%d, failed to refill sync log buffer since %s", pNode->vgId, terrstr());
return -1;
}
}
ASSERT(pBuf->endIndex == toIndex);
syncLogBufferValidate(pBuf);
return 0;
}

View File

@ -111,6 +111,9 @@ void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell
void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)) {
tdbTrace("page/init: %p %" PRIu8 " %p", pPage, szAmHdr, xCellSize);
pPage->pPageHdr = pPage->pData + szAmHdr;
if (TDB_PAGE_NCELLS(pPage) == 0) {
return tdbPageZero(pPage, szAmHdr, xCellSize);
}
pPage->pCellIdx = pPage->pPageHdr + TDB_PAGE_HDR_SIZE(pPage);
pPage->pFreeStart = pPage->pCellIdx + TDB_PAGE_OFFSET_SIZE(pPage) * TDB_PAGE_NCELLS(pPage);
pPage->pFreeEnd = pPage->pData + TDB_PAGE_CCELLS(pPage);

View File

@ -466,11 +466,19 @@ int tdbPagerAbort(SPager *pPager, TXN *pTxn) {
return -1;
}
if (tdbOsLSeek(jfd, 0L, SEEK_SET) < 0) {
tdbError("failed to lseek jfd due to %s. file:%s, offset:0", strerror(errno), pPager->dbFileName);
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
u8 *pageBuf = tdbOsCalloc(1, pPager->pageSize);
if (pageBuf == NULL) {
return -1;
}
tdbDebug("tdb/abort: pager:%p,", pPager);
for (int pgIndex = 0; pgIndex < journalSize; ++pgIndex) {
// read pgno & the page from journal
SPgno pgno;
@ -481,6 +489,8 @@ int tdbPagerAbort(SPager *pPager, TXN *pTxn) {
return -1;
}
tdbTrace("tdb/abort: pgno:%d,", pgno);
ret = tdbOsRead(jfd, pageBuf, pPager->pageSize);
if (ret < 0) {
tdbOsFree(pageBuf);
@ -578,12 +588,12 @@ int tdbPagerFlushPage(SPager *pPager, TXN *pTxn) {
return -1;
}
tdbTrace("tdb/flush:%p, %d/%d/%d", pPager, pPager->dbOrigSize, pPager->dbFileSize, maxPgno);
tdbTrace("tdb/flush:%p, pgno:%d, %d/%d/%d", pPager, pgno, pPager->dbOrigSize, pPager->dbFileSize, maxPgno);
pPager->dbOrigSize = maxPgno;
pPage->isDirty = 0;
tdbTrace("pager/flush drop page: %p %d from dirty tree: %p", pPage, TDB_PAGE_PGNO(pPage), &pPager->rbt);
tdbTrace("pager/flush drop page: %p, pgno:%d, from dirty tree: %p", pPage, TDB_PAGE_PGNO(pPage), &pPager->rbt);
tRBTreeDrop(&pPager->rbt, (SRBTreeNode *)pPage);
tdbPCacheRelease(pPager->pCache, pPage, pTxn);
@ -830,7 +840,7 @@ static int tdbPagerPWritePageToDB(SPager *pPager, SPage *pPage) {
return 0;
}
static int tdbPagerRestore(SPager *pPager, SBTree *pBt, const char *jFileName) {
static int tdbPagerRestore(SPager *pPager, const char *jFileName) {
int ret = 0;
SPgno journalSize = 0;
u8 *pageBuf = NULL;
@ -908,7 +918,7 @@ static int tdbPagerRestore(SPager *pPager, SBTree *pBt, const char *jFileName) {
return 0;
}
int tdbPagerRestoreJournals(SPager *pPager, SBTree *pBt) {
int tdbPagerRestoreJournals(SPager *pPager) {
tdbDirEntryPtr pDirEntry;
tdbDirPtr pDir = taosOpenDir(pPager->pEnv->dbName);
if (pDir == NULL) {
@ -919,7 +929,7 @@ int tdbPagerRestoreJournals(SPager *pPager, SBTree *pBt) {
while ((pDirEntry = tdbReadDir(pDir)) != NULL) {
char *name = tdbDirEntryBaseName(tdbGetDirEntryName(pDirEntry));
if (strncmp(TDB_MAINDB_NAME "-journal", name, 16) == 0) {
if (tdbPagerRestore(pPager, pBt, name) < 0) {
if (tdbPagerRestore(pPager, name) < 0) {
tdbCloseDir(&pDir);
tdbError("failed to restore file due to %s. jFileName:%s", strerror(errno), name);

View File

@ -107,6 +107,16 @@ int tdbTbOpen(const char *tbname, int keyLen, int valLen, tdb_cmpr_fn_t keyCmprF
ASSERT(pPager != NULL);
if (rollback) {
tdbPagerRollback(pPager);
} else {
ret = tdbPagerRestoreJournals(pPager);
if (ret < 0) {
tdbOsFree(pTb);
return -1;
}
}
// pTb->pBt
ret = tdbBtreeOpen(keyLen, valLen, pPager, tbname, pgno, keyCmprFn, pEnv, &(pTb->pBt));
if (ret < 0) {
@ -114,16 +124,6 @@ int tdbTbOpen(const char *tbname, int keyLen, int valLen, tdb_cmpr_fn_t keyCmprF
return -1;
}
if (rollback) {
tdbPagerRollback(pPager);
} else {
ret = tdbPagerRestoreJournals(pPager, pTb->pBt);
if (ret < 0) {
tdbOsFree(pTb);
return -1;
}
}
*ppTb = pTb;
return 0;
}

View File

@ -197,7 +197,7 @@ int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initP
TXN *pTxn);
void tdbPagerReturnPage(SPager *pPager, SPage *pPage, TXN *pTxn);
int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno);
int tdbPagerRestoreJournals(SPager *pPager, SBTree *pBt);
int tdbPagerRestoreJournals(SPager *pPager);
int tdbPagerRollback(SPager *pPager);
// tdbPCache.c ====================================

View File

@ -599,6 +599,10 @@ static int32_t allocConnRef(SCliConn* conn, bool update) {
exh->pThrd = conn->hostThrd;
exh->refId = transAddExHandle(transGetRefMgt(), exh);
conn->refId = exh->refId;
if (conn->refId == -1) {
taosMemoryFree(exh);
}
return 0;
}

View File

@ -648,6 +648,9 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 6 -M 3 -n 3
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 6 -M 3
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 6 -M 3 -n 3
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py -N 6 -M 3
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeModifyMeta.py -N 6 -M 3
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 6 -M 3
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 6 -M 3 -n 3
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 6 -M 3
@ -658,6 +661,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 6 -M 3 -n 3
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
@ -1018,6 +1022,11 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_select.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_select.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_select.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py -R
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-20582.py
#develop test

View File

@ -53,7 +53,7 @@ class ConfigureyCluster:
# configure dnoe of independent mnodes
if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == True :
dnode.addExtraCfg("supportVnodes", 0)
dnode.addExtraCfg("supportVnodes", 1024)
# print(dnode)
self.dnodes.append(dnode)
return self.dnodes

View File

@ -3,6 +3,9 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/deploy.sh -n dnode5 -i 5
system sh/deploy.sh -n dnode6 -i 6
system sh/deploy.sh -n dnode7 -i 7
system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
@ -14,6 +17,9 @@ print =============== step1: create dnodes
sql create dnode $hostname port 7200
sql create dnode $hostname port 7300
sql create dnode $hostname port 7400
sql create dnode $hostname port 7500
sql create dnode $hostname port 7600
sql create dnode $hostname port 7700
$x = 0
step1:
@ -29,7 +35,7 @@ print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
print ===> $data20 $data21 $data22 $data23 $data24 $data25
print ===> $data30 $data31 $data32 $data33 $data24 $data35
if $rows != 4 then
if $rows != 7 then
return -1
endi
if $data(1)[4] != ready then

View File

@ -68,6 +68,7 @@ class TDTestCase:
my_file = Path(f"{packagePath}/{packageName}")
if not my_file.exists():
print(f"{packageName} is not exists")
tdLog.info(f"cd {packagePath} && wget https://www.tdengine.com/assets-download/3.0/{packageName}")
os.system(f"cd {packagePath} && wget https://www.tdengine.com/assets-download/3.0/{packageName}")
else:
print(f"{packageName} has been exists")

File diff suppressed because it is too large Load Diff

View File

@ -362,12 +362,12 @@ class TDTestCase:
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(linear)")
tdSql.checkRows(0)
tdLog.printNoPrefix("==========step8:test _irowts with interp")
tdLog.printNoPrefix("==========step8:test _irowts,_isfilled with interp")
# fill null
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(null)")
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(null)")
tdSql.checkRows(9)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:08.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:08.500')
@ -379,9 +379,19 @@ class TDTestCase:
tdSql.checkData(7, 0, '2020-02-01 00:00:11.500')
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)")
tdSql.checkData(0, 1, True)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, False)
tdSql.checkData(5, 1, True)
tdSql.checkData(6, 1, True)
tdSql.checkData(7, 1, True)
tdSql.checkData(8, 1, True)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)")
tdSql.checkRows(13)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:05.000')
@ -397,9 +407,23 @@ class TDTestCase:
tdSql.checkData(11, 0, '2020-02-01 00:00:15.000')
tdSql.checkData(12, 0, '2020-02-01 00:00:16.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(null)")
tdSql.checkData(0, 1, True)
tdSql.checkData(1, 1, False)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, True)
tdSql.checkData(5, 1, True)
tdSql.checkData(6, 1, False)
tdSql.checkData(7, 1, True)
tdSql.checkData(8, 1, True)
tdSql.checkData(9, 1, True)
tdSql.checkData(10, 1, True)
tdSql.checkData(11, 1, False)
tdSql.checkData(12, 1, True)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(null)")
tdSql.checkRows(6)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:07.000')
@ -408,10 +432,16 @@ class TDTestCase:
tdSql.checkData(4, 0, '2020-02-01 00:00:13.000')
tdSql.checkData(5, 0, '2020-02-01 00:00:15.000')
tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, True)
tdSql.checkData(5, 1, False)
# fill value
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(value, 1)")
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(value, 1)")
tdSql.checkRows(9)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:08.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:08.500')
@ -423,9 +453,19 @@ class TDTestCase:
tdSql.checkData(7, 0, '2020-02-01 00:00:11.500')
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)")
tdSql.checkData(0, 1, True)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, False)
tdSql.checkData(5, 1, True)
tdSql.checkData(6, 1, True)
tdSql.checkData(7, 1, True)
tdSql.checkData(8, 1, True)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)")
tdSql.checkRows(13)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:05.000')
@ -441,9 +481,23 @@ class TDTestCase:
tdSql.checkData(11, 0, '2020-02-01 00:00:15.000')
tdSql.checkData(12, 0, '2020-02-01 00:00:16.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(value, 1)")
tdSql.checkData(0, 1, True)
tdSql.checkData(1, 1, False)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, True)
tdSql.checkData(5, 1, True)
tdSql.checkData(6, 1, False)
tdSql.checkData(7, 1, True)
tdSql.checkData(8, 1, True)
tdSql.checkData(9, 1, True)
tdSql.checkData(10, 1, True)
tdSql.checkData(11, 1, False)
tdSql.checkData(12, 1, True)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(value, 1)")
tdSql.checkRows(6)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:07.000')
@ -452,10 +506,17 @@ class TDTestCase:
tdSql.checkData(4, 0, '2020-02-01 00:00:13.000')
tdSql.checkData(5, 0, '2020-02-01 00:00:15.000')
tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, True)
tdSql.checkData(5, 1, False)
# fill prev
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(prev)")
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(prev)")
tdSql.checkRows(9)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:08.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:08.500')
@ -467,9 +528,19 @@ class TDTestCase:
tdSql.checkData(7, 0, '2020-02-01 00:00:11.500')
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(prev)")
tdSql.checkData(0, 1, True)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, False)
tdSql.checkData(5, 1, True)
tdSql.checkData(6, 1, True)
tdSql.checkData(7, 1, True)
tdSql.checkData(8, 1, True)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(prev)")
tdSql.checkRows(12)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:06.000')
@ -484,9 +555,22 @@ class TDTestCase:
tdSql.checkData(10, 0, '2020-02-01 00:00:15.000')
tdSql.checkData(11, 0, '2020-02-01 00:00:16.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(prev)")
tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, True)
tdSql.checkData(5, 1, False)
tdSql.checkData(6, 1, True)
tdSql.checkData(7, 1, True)
tdSql.checkData(8, 1, True)
tdSql.checkData(9, 1, True)
tdSql.checkData(10, 1, False)
tdSql.checkData(11, 1, True)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(prev)")
tdSql.checkRows(6)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:07.000')
@ -495,10 +579,16 @@ class TDTestCase:
tdSql.checkData(4, 0, '2020-02-01 00:00:13.000')
tdSql.checkData(5, 0, '2020-02-01 00:00:15.000')
tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, True)
tdSql.checkData(5, 1, False)
# fill next
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(next)")
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(next)")
tdSql.checkRows(9)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:08.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:08.500')
@ -510,9 +600,19 @@ class TDTestCase:
tdSql.checkData(7, 0, '2020-02-01 00:00:11.500')
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
tdSql.checkData(0, 1, True)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, False)
tdSql.checkData(5, 1, True)
tdSql.checkData(6, 1, True)
tdSql.checkData(7, 1, True)
tdSql.checkData(8, 1, True)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
tdSql.checkRows(12)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:05.000')
@ -527,9 +627,22 @@ class TDTestCase:
tdSql.checkData(10, 0, '2020-02-01 00:00:14.000')
tdSql.checkData(11, 0, '2020-02-01 00:00:15.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(next)")
tdSql.checkData(0, 1, True)
tdSql.checkData(1, 1, False)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, True)
tdSql.checkData(5, 1, True)
tdSql.checkData(6, 1, False)
tdSql.checkData(7, 1, True)
tdSql.checkData(8, 1, True)
tdSql.checkData(9, 1, True)
tdSql.checkData(10, 1, True)
tdSql.checkData(11, 1, False)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(next)")
tdSql.checkRows(6)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:07.000')
@ -538,10 +651,17 @@ class TDTestCase:
tdSql.checkData(4, 0, '2020-02-01 00:00:13.000')
tdSql.checkData(5, 0, '2020-02-01 00:00:15.000')
tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, True)
tdSql.checkData(5, 1, False)
# fill linear
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(linear)")
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(linear)")
tdSql.checkRows(9)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:08.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:08.500')
@ -553,9 +673,19 @@ class TDTestCase:
tdSql.checkData(7, 0, '2020-02-01 00:00:11.500')
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
tdSql.checkData(0, 1, True)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, False)
tdSql.checkData(5, 1, True)
tdSql.checkData(6, 1, True)
tdSql.checkData(7, 1, True)
tdSql.checkData(8, 1, True)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
tdSql.checkRows(11)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:06.000')
@ -569,9 +699,21 @@ class TDTestCase:
tdSql.checkData(9, 0, '2020-02-01 00:00:14.000')
tdSql.checkData(10, 0, '2020-02-01 00:00:15.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(linear)")
tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, True)
tdSql.checkData(5, 1, False)
tdSql.checkData(6, 1, True)
tdSql.checkData(7, 1, True)
tdSql.checkData(8, 1, True)
tdSql.checkData(9, 1, True)
tdSql.checkData(10, 1, False)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(linear)")
tdSql.checkRows(6)
tdSql.checkCols(2)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
tdSql.checkData(1, 0, '2020-02-01 00:00:07.000')
@ -580,28 +722,47 @@ class TDTestCase:
tdSql.checkData(4, 0, '2020-02-01 00:00:13.000')
tdSql.checkData(5, 0, '2020-02-01 00:00:15.000')
# multiple _irowts
tdSql.query(f"select interp(c0),_irowts from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
tdSql.checkRows(11)
tdSql.checkCols(2)
tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
tdSql.checkData(3, 1, True)
tdSql.checkData(4, 1, True)
tdSql.checkData(5, 1, False)
tdSql.checkData(0, 1, '2020-02-01 00:00:05.000')
tdSql.checkData(1, 1, '2020-02-01 00:00:06.000')
tdSql.checkData(2, 1, '2020-02-01 00:00:07.000')
tdSql.checkData(3, 1, '2020-02-01 00:00:08.000')
tdSql.checkData(4, 1, '2020-02-01 00:00:09.000')
tdSql.checkData(5, 1, '2020-02-01 00:00:10.000')
tdSql.checkData(6, 1, '2020-02-01 00:00:11.000')
tdSql.checkData(7, 1, '2020-02-01 00:00:12.000')
tdSql.checkData(8, 1, '2020-02-01 00:00:13.000')
tdSql.checkData(9, 1, '2020-02-01 00:00:14.000')
# multiple _irowts,_isfilled
tdSql.query(f"select interp(c0),_irowts,_isfilled from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
tdSql.checkRows(11)
tdSql.checkCols(3)
tdSql.checkData(0, 1, '2020-02-01 00:00:05.000')
tdSql.checkData(1, 1, '2020-02-01 00:00:06.000')
tdSql.checkData(2, 1, '2020-02-01 00:00:07.000')
tdSql.checkData(3, 1, '2020-02-01 00:00:08.000')
tdSql.checkData(4, 1, '2020-02-01 00:00:09.000')
tdSql.checkData(5, 1, '2020-02-01 00:00:10.000')
tdSql.checkData(6, 1, '2020-02-01 00:00:11.000')
tdSql.checkData(7, 1, '2020-02-01 00:00:12.000')
tdSql.checkData(8, 1, '2020-02-01 00:00:13.000')
tdSql.checkData(9, 1, '2020-02-01 00:00:14.000')
tdSql.checkData(10, 1, '2020-02-01 00:00:15.000')
tdSql.query(f"select _irowts, interp(c0), interp(c0), _irowts from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
tdSql.checkRows(11)
tdSql.checkCols(4)
tdSql.checkData(0, 2, False)
tdSql.checkData(1, 2, True)
tdSql.checkData(2, 2, True)
tdSql.checkData(3, 2, True)
tdSql.checkData(4, 2, True)
tdSql.checkData(5, 2, False)
tdSql.checkData(6, 2, True)
tdSql.checkData(7, 2, True)
tdSql.checkData(8, 2, True)
tdSql.checkData(9, 2, True)
tdSql.checkData(10, 2, False)
cols = (0, 3)
tdSql.query(f"select _irowts, _isfilled, interp(c0), interp(c0), _isfilled, _irowts from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
tdSql.checkRows(11)
tdSql.checkCols(6)
cols = (0, 5)
for i in cols:
tdSql.checkData(0, i, '2020-02-01 00:00:05.000')
tdSql.checkData(1, i, '2020-02-01 00:00:06.000')
@ -615,6 +776,20 @@ class TDTestCase:
tdSql.checkData(9, i, '2020-02-01 00:00:14.000')
tdSql.checkData(10, i, '2020-02-01 00:00:15.000')
cols = (1, 4)
for i in cols:
tdSql.checkData(0, i, False)
tdSql.checkData(1, i, True)
tdSql.checkData(2, i, True)
tdSql.checkData(3, i, True)
tdSql.checkData(4, i, True)
tdSql.checkData(5, i, False)
tdSql.checkData(6, i, True)
tdSql.checkData(7, i, True)
tdSql.checkData(8, i, True)
tdSql.checkData(9, i, True)
tdSql.checkData(10, i, False)
tdLog.printNoPrefix("==========step9:test intra block interpolation")
tdSql.execute(f"drop database {dbname}");

View File

@ -19,12 +19,17 @@ BINARY_COL = "c8"
NCHAR_COL = "c9"
TS_COL = "c10"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
UINT_COL = "c11"
UBINT_COL = "c12"
USINT_COL = "c13"
UTINT_COL = "c14"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, UINT_COL, UBINT_COL, USINT_COL, UTINT_COL]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ]
ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL, UINT_COL, UBINT_COL, USINT_COL, UTINT_COL ]
DBNAME = "db"
class TDTestCase:
@ -208,6 +213,13 @@ class TDTestCase:
tdLog.info(f"sql: {current_sqls[i]}")
tdSql.query(current_sqls[i])
def check_result(self):
for col in NUM_COL:
tdSql.query("select leastsquares(%s, 1, 9) from %s.stb1" % (col, DBNAME))
tdSql.checkRows(1)
res = tdSql.getData(0, 0)
if res is None:
tdLog.exit("result is not correct")
def __test_current(self):
# tdSql.query("explain select c1 from {dbname}.ct1")
@ -236,6 +248,7 @@ class TDTestCase:
def all_test(self):
self.__test_error()
self.__test_current()
self.check_result()
def __create_tb(self, dbname=DBNAME):
@ -243,13 +256,15 @@ class TDTestCase:
create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp, {UINT_COL} int unsigned,
{UBINT_COL} bigint unsigned, {USINT_COL} smallint unsigned, {UTINT_COL} tinyint unsigned
) tags (t1 int)
'''
create_ntb_sql = f'''create table {dbname}.nt1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp, {UINT_COL} int unsigned,
{UBINT_COL} bigint unsigned, {USINT_COL} smallint unsigned, {UTINT_COL} tinyint unsigned
)
'''
tdSql.execute(create_stb_sql)
@ -262,49 +277,49 @@ class TDTestCase:
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127} )"
)
tdSql.execute(
f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127} )"
)
tdSql.execute(
f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127} )"
)
tdSql.execute(
f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 }, 0, 0, 0, 0)
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 }, 0, 0, 0, NULL )
'''
)
tdSql.execute(
f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}, NULL, NULL, NULL, NULL
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}, NULL, NULL, NULL, NULL
)
'''
)
tdSql.execute(
f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }, NULL, NULL, NULL, NULL
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }, NULL, NULL, NULL, NULL
)
'''
)
@ -312,22 +327,22 @@ class TDTestCase:
for i in range(rows):
insert_data = f'''insert into {dbname}.nt1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i }, NULL, NULL, NULL, NULL )
'''
tdSql.execute(insert_data)
tdSql.execute(
f'''insert into {dbname}.nt1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }, NULL, NULL, NULL, NULL
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }, NULL, NULL, NULL, NULL
)
'''
)

View File

@ -161,6 +161,19 @@ class TDTestCase:
stableName= '%s_%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
for i in range(5):
clusterComCreate.createUser(newTdSql,f"user{i}",f"pass{i}")
userTdSql=tdCom.newTdSql(user=f"user{i}",password=f"pass{i}")
clusterComCreate.alterUser(userTdSql,f"user{i}",f"pass{i+1}")
clusterComCreate.deleteUser(newTdSql,f"user{i}")
for j in range(5):
i=100
clusterComCreate.createUser(newTdSql,f"user{i}",f"pass{i}")
userTdSql=tdCom.newTdSql(user=f"user{i}",password=f"pass{i}")
clusterComCreate.alterUser(userTdSql,f"user{i}",f"pass{i+1}")
clusterComCreate.deleteUser(newTdSql,f"user{i}")
for tr in threads:
tr.start()
for tr in threads:

View File

@ -67,29 +67,11 @@ class TDTestCase:
self._async_raise(thread.ident, SystemExit)
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
print("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("use db%d" %couti)
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def reCreateUser(self, tdsql, count, user, passwd):
clusterComCreate.createUser(tdsql,f"{user}{count}",f"{passwd}{count}")
userTdSql=tdCom.newTdSql(user=f"{user}{count}",password=f"{passwd}{count}")
clusterComCreate.alterUser(userTdSql,f"{user}{count}",f"{passwd}{count+1}")
clusterComCreate.deleteUser(tdsql,f"{user}{count}")
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
@ -161,6 +143,8 @@ class TDTestCase:
stableName= '%s_%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
threads.append(threading.Thread(target=self.reCreateUser,args=(newTdSql,i,"user","passwd")))
for tr in threads:
tr.start()

View File

@ -147,6 +147,9 @@ class TDTestCase:
# print(f"==================={dbNameIndex},{a11111}")
threads.append(threading.Thread(target=clusterComCreate.createDeltedatabases, args=(newTdSql, dbNameIndex,repeatNumber,paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])))
redbNameIndex = '%s%d'%(paraDict["dbName"],i+100)
threads.append(threading.Thread(target=clusterComCreate.createDeltedatabases, args=(newTdSql, redbNameIndex,1,paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])))
for tr in threads:
tr.start()
@ -199,7 +202,7 @@ class TDTestCase:
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=4,stopRole='dnode')
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=1,stopRole='dnode')
def stop(self):
tdSql.close()

View File

@ -97,7 +97,7 @@ class TDTestCase:
'dropFlag': 1,
'event': '',
'vgroups': 4,
'replica': 1,
'replica': 3,
'stbName': 'stb',
'stbNumbers': 2,
'colPrefix': 'c',
@ -105,9 +105,9 @@ class TDTestCase:
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 200,
'ctbNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
"rowsPerTbl": 10000,
"rowsPerTbl": 100000,
"batchNum": 5000
}
@ -198,16 +198,16 @@ class TDTestCase:
clusterComCheck.checkDbRows(dbNumbers)
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
tdSql.execute("use %s" %(paraDict["dbName"]))
tdSql.query("show stables")
# tdSql.execute("use %s" %(paraDict["dbName"]))
tdSql.query("show %s.stables"%(paraDict["dbName"]))
tdSql.checkRows(paraDict["stbNumbers"])
for i in range(paraDict['stbNumbers']):
stableName= '%s_%d'%(paraDict['stbName'],i)
tdSql.query("select * from %s"%stableName)
tdSql.checkRows(rowsPerStb)
stableName= '%s.%s_%d'%(paraDict["dbName"],paraDict['stbName'],i)
tdSql.query("select count(*) from %s"%stableName)
tdSql.checkData(0,0,rowsPerStb)
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=1,stopRole='dnode')
def stop(self):
tdSql.close()

View File

@ -0,0 +1,202 @@
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
from numpy import row_stack
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import time
import inspect
import ctypes
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
tdSql.init(conn.cursor())
self.host = socket.gethostname()
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def _async_raise(self, tid, exctype):
"""raises the exception, performs cleanup if needed"""
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stopThread(self,thread):
self._async_raise(thread.ident, SystemExit)
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db0_0',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'replica': 3,
'stbName': 'stb',
'stbNumbers': 2,
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 200,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
"rowsPerTbl": 1000,
"batchNum": 5000
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("select * from information_schema.ins_dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(4,1,'%s:6430'%self.host)
clusterComCheck.checkDnodes(dnodeNumbers)
#check mnode status
tdLog.info("check mnode status")
clusterComCheck.checkMnodeStatus(mnodeNums)
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("select * from information_schema.ins_dnodes;")
print(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
# create stable:stb_0
stableName= paraDict['stbName']
newTdSql=tdCom.newTdSql()
clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
#create child table:ctb_0
for i in range(paraDict['stbNumbers']):
stableName= '%s_%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
#insert date
for i in range(paraDict['stbNumbers']):
stableName= '%s_%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
for tr in threads:
tr.start()
for tr in threads:
tr.join()
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
for i in range(mnodeNums):
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
# sleep(10)
tdDnodes[i+mnodeNums].starttaosd()
# sleep(10)
elif stopRole == "dnode":
for i in range(dnodeNumbers):
tdDnodes[i].stoptaosd()
clusterComCheck.checkDbRows(dbNumbers)
if i == 0 :
stableName= '%s_%d'%(paraDict['stbName'],0)
newTdSql=tdCom.newTdSql()
clusterComCreate.alterStbMetaData(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
# sleep(10)
tdDnodes[i].starttaosd()
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("123")
else:
print("456")
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
clusterComCheck.checkDnodes(dnodeNumbers)
clusterComCheck.checkDbRows(dbNumbers)
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
# tdSql.execute("use %s" %(paraDict["dbName"]))
tdSql.query("show %s.stables"%(paraDict["dbName"]))
tdSql.checkRows(paraDict["stbNumbers"])
for i in range(paraDict['stbNumbers']):
stableName= '%s.%s_%d'%(paraDict["dbName"],paraDict['stbName'],i)
tdSql.query("select count(*) from %s"%stableName)
if i == 0 :
tdSql.checkData(0,0,rowsPerStb*2)
else:
tdSql.checkData(0,0,rowsPerStb)
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=1,stopRole='dnode')
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,209 @@
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import time
import inspect
import ctypes
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.replicaVar=int(replicaVar)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def _async_raise(self, tid, exctype):
"""raises the exception, performs cleanup if needed"""
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stopThread(self,thread):
self._async_raise(thread.ident, SystemExit)
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
print("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("use db%d" %couti)
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db',
'dbNumbers': 4,
'dropFlag': 1,
'event': '',
'vgroups': 4,
'replica': 3,
'stbName': 'stb',
'stbNumbers': 100,
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
dnodeNumbers=int(dnodeNumbers)
dbNumbers=paraDict['dbNumbers']
mnodeNums=int(mnodeNums)
repeatNumber = 2
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allDbNumbers=dbNumbers
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
paraDict['replica'] = self.replicaVar
tdLog.info("first check dnode and mnode")
tdSql.query("select * from information_schema.ins_dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(4,1,'%s:6430'%self.host)
clusterComCheck.checkDnodes(dnodeNumbers)
#check mnode status
tdLog.info("check mnode status")
clusterComCheck.checkMnodeStatus(mnodeNums)
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("select * from information_schema.ins_dnodes;")
print(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
# create database and stable
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
for i in range(dbNumbers):
dbNameIndex = '%s%d'%(paraDict["dbName"],0)
newTdSql=tdCom.newTdSql()
# a11111=paraDict["dbNumbers"]
# print(f"==================={dbNameIndex},{a11111}")
clusterComCreate.createDeltedatabases(newTdSql, dbNameIndex,repeatNumber,paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
redbNameIndex = '%s%d'%(paraDict["dbName"],100)
clusterComCreate.createDeltedatabases(newTdSql, redbNameIndex,1,paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
for i in range(mnodeNums):
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
# sleep(10)
tdDnodes[i+mnodeNums].starttaosd()
# sleep(10)
elif stopRole == "dnode":
for i in range(dnodeNumbers):
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("check dnodes status is ready")
else:
tdLog.info("check dnodes status is not ready")
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
tdLog.info("check dnode number:")
clusterComCheck.checkDnodes(dnodeNumbers)
tdSql.query("select * from information_schema.ins_databases")
tdLog.debug("we find %d databases but exepect to create %d databases "%(tdSql.queryRows-2,allDbNumbers))
# tdLog.info("check DB Rows:")
# clusterComCheck.checkDbRows(allDbNumbers)
# tdLog.info("check DB Status on by on")
# for i in range(restartNumbers):
# clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i))
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=2,stopRole='dnode')
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,207 @@
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
from numpy import row_stack
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import time
import inspect
import ctypes
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
tdSql.init(conn.cursor())
self.host = socket.gethostname()
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def _async_raise(self, tid, exctype):
"""raises the exception, performs cleanup if needed"""
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stopThread(self,thread):
self._async_raise(thread.ident, SystemExit)
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db0_0',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'replica': 3,
'stbName': 'stb',
'stbNumbers': 2,
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 200,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
"rowsPerTbl": 1000,
"batchNum": 5000
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("select * from information_schema.ins_dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(4,1,'%s:6430'%self.host)
clusterComCheck.checkDnodes(dnodeNumbers)
#check mnode status
tdLog.info("check mnode status")
clusterComCheck.checkMnodeStatus(mnodeNums)
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("select * from information_schema.ins_dnodes;")
print(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
# create stable:stb_0
stableName= paraDict['stbName']
newTdSql=tdCom.newTdSql()
clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
#create child table:ctb_0
for i in range(paraDict['stbNumbers']):
stableName= '%s_%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
#insert date
for i in range(paraDict['stbNumbers']):
stableName= '%s_%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
for tr in threads:
tr.start()
for tr in threads:
tr.join()
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
for i in range(mnodeNums):
tdDnodes[i].stoptaosd()
if i == 0 :
stableName= '%s_%d'%(paraDict['stbName'],0)
newTdSql=tdCom.newTdSql()
clusterComCreate.alterStbMetaData(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
elif i == 1 :
tdSql.execute("ALTER TABLE db0_0.stb_0_0 SET TAG t1r=10000;")
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
# sleep(10)
tdDnodes[i+mnodeNums].starttaosd()
# sleep(10)
elif stopRole == "dnode":
for i in range(dnodeNumbers):
tdDnodes[i].stoptaosd()
clusterComCheck.checkDbRows(dbNumbers)
# sleep(10)
tdDnodes[i].starttaosd()
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("123")
else:
print("456")
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
clusterComCheck.checkDnodes(dnodeNumbers)
clusterComCheck.checkDbRows(dbNumbers)
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
# tdSql.execute("use %s" %(paraDict["dbName"]))
tdSql.query("select t1r from db0_0.stb_0_0 limit 1;")
tdSql.checkData(0,0,10000)
tdSql.query("show %s.stables"%(paraDict["dbName"]))
tdSql.checkRows(paraDict["stbNumbers"])
for i in range(paraDict['stbNumbers']):
stableName= '%s.%s_%d'%(paraDict["dbName"],paraDict['stbName'],i)
tdSql.query("select count(*) from %s"%stableName)
if i == 0 :
tdSql.checkData(0,0,rowsPerStb*2)
else:
tdSql.checkData(0,0,rowsPerStb)
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=1,stopRole='mnode')
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,224 @@
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
from numpy import row_stack
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import time
import inspect
import ctypes
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.replicaVar = int(replicaVar)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def _async_raise(self, tid, exctype):
"""raises the exception, performs cleanup if needed"""
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stopThread(self,thread):
self._async_raise(thread.ident, SystemExit)
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
print("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("use db%d" %couti)
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db0_0',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'replica': 1,
'stbName': 'stb',
'stbNumbers': 2,
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 200,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
"rowsPerTbl": 100,
"batchNum": 5000
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
paraDict['replica'] = self.replicaVar
tdLog.info("first check dnode and mnode")
tdSql.query("select * from information_schema.ins_dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(4,1,'%s:6430'%self.host)
clusterComCheck.checkDnodes(dnodeNumbers)
#check mnode status
tdLog.info("check mnode status")
clusterComCheck.checkMnodeStatus(mnodeNums)
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("select * from information_schema.ins_dnodes;")
print(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
# create stable:stb_0
stableName= paraDict['stbName']
newTdSql=tdCom.newTdSql()
clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
#create child table:ctb_0
for i in range(paraDict['stbNumbers']):
stableName= '%s_%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
#insert data
for i in range(paraDict['stbNumbers']):
stableName= '%s_%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
for tr in threads:
tr.start()
for tr in threads:
tr.join()
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
for i in range(mnodeNums):
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
# sleep(10)
tdDnodes[i+mnodeNums].starttaosd()
# sleep(10)
elif stopRole == "dnode":
for i in range(dnodeNumbers):
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("dnode is ready")
else:
print("dnodes is not ready")
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
clusterComCheck.checkDnodes(dnodeNumbers)
clusterComCheck.checkDbRows(dbNumbers)
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
newTdSql=tdCom.newTdSql()
newTdSql.execute("reset query cache")
newTdSql.execute("use %s" %(paraDict["dbName"]))
newTdSql.query("show %s.stables"%(paraDict["dbName"]))
newTdSql.checkRows(paraDict["stbNumbers"])
for i in range(paraDict['stbNumbers']):
stableName= '%s_%d'%(paraDict['stbName'],i)
newTdSql.query("select * from %s"%stableName)
newTdSql.checkRows(rowsPerStb)
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=2,stopRole='dnode')
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -1,324 +0,0 @@
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import time
import inspect
import ctypes
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
tdLog.debug(f"start to excute {__file__}")
# tdSql.init(conn.cursor())
# self.host = socket.gethostname()
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def _async_raise(self, tid, exctype):
"""raises the exception, performs cleanup if needed"""
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stop_thread(self,thread):
self._async_raise(thread.ident, SystemExit)
def insert_data(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
print("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("use db%d" %couti)
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def checkData(self,dbname,stbname,stableCount,CtableCount,rowsPerSTable,):
tdSql.execute("use %s"%dbname)
tdSql.query("show stables")
tdSql.checkRows(stableCount)
tdSql.query("show tables")
tdSql.checkRows(CtableCount)
for i in range(stableCount):
tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,rowsPerSTable)
return
def checkdnodes(self,dnodenumber):
count=0
while count < 100:
time.sleep(1)
statusReadyBumber=0
tdSql.query("select * from information_schema.ins_dnodes;")
if tdSql.checkRows(dnodenumber) :
print("dnode is %d nodes"%dnodenumber)
for i in range(dnodenumber):
if tdSql.queryResult[i][4] !='ready' :
status=tdSql.queryResult[i][4]
print("dnode:%d status is %s "%(i,status))
break
else:
statusReadyBumber+=1
print(statusReadyBumber)
if statusReadyBumber == dnodenumber :
print("all of %d mnodes is ready in 10s "%dnodenumber)
return True
break
count+=1
else:
print("%d mnodes is not ready in 10s "%dnodenumber)
return False
def check3mnode(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("select * from information_schema.ins_mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='follower':
print("three mnodes is ready in 10s")
break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
print("three mnodes is ready in 10s")
break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
print("three mnodes is ready in 10s")
break
count+=1
else:
print("three mnodes is not ready in 10s ")
return -1
tdSql.query("select * from information_schema.ins_mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
tdSql.checkData(1,3,'ready')
tdSql.checkData(2,1,'%s:6230'%self.host)
tdSql.checkData(2,3,'ready')
def check3mnode1off(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("select * from information_schema.ins_mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='offline' :
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
print("stop mnodes on dnode 2 successfully in 10s")
break
elif tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
print("stop mnodes on dnode 2 successfully in 10s")
break
count+=1
else:
print("stop mnodes on dnode 2 failed in 10s ")
return -1
tdSql.error("drop mnode on dnode 1;")
tdSql.query("select * from information_schema.ins_mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'offline')
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
tdSql.checkData(1,3,'ready')
tdSql.checkData(2,1,'%s:6230'%self.host)
tdSql.checkData(2,3,'ready')
def check3mnode2off(self):
count=0
while count < 40:
time.sleep(1)
tdSql.query("select * from information_schema.ins_mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='offline':
if tdSql.queryResult[2][2]=='follower':
print("stop mnodes on dnode 2 successfully in 10s")
break
count+=1
else:
print("stop mnodes on dnode 2 failed in 10s ")
return -1
tdSql.error("drop mnode on dnode 2;")
tdSql.query("select * from information_schema.ins_mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
tdSql.checkData(1,2,'offline')
tdSql.checkData(1,3,'ready')
tdSql.checkData(2,1,'%s:6230'%self.host)
tdSql.checkData(2,2,'follower')
tdSql.checkData(2,3,'ready')
def check3mnode3off(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("select * from information_schema.ins_mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[2][2]=='offline':
if tdSql.queryResult[1][2]=='follower':
print("stop mnodes on dnode 3 successfully in 10s")
break
count+=1
else:
print("stop mnodes on dnode 3 failed in 10s")
return -1
tdSql.error("drop mnode on dnode 3;")
tdSql.query("select * from information_schema.ins_mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
tdSql.checkData(1,2,'follower')
tdSql.checkData(1,3,'ready')
tdSql.checkData(2,1,'%s:6230'%self.host)
tdSql.checkData(2,2,'offline')
tdSql.checkData(2,3,'ready')
def check5dnode(self):
tdSql.query("select * from information_schema.ins_dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
def five_dnode_three_mnode(self,dnodenumber):
tdSql.query("select * from information_schema.ins_dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
tdSql.query("select * from information_schema.ins_mnodes;")
tdSql.checkRows(1)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
# fisr add three mnodes;
tdSql.execute("create mnode on dnode 2")
tdSql.execute("create mnode on dnode 3")
# fisrt check statut ready
self.check3mnode()
tdSql.error("create mnode on dnode 2")
tdSql.query("select * from information_schema.ins_dnodes;")
print(tdSql.queryResult)
tdLog.debug("stop all of mnode ")
# seperate vnode and mnode in different dnodes.
# create database and stable
stopcount =0
while stopcount < 2:
for i in range(dnodenumber):
# threads=[]
# threads = MyThreadFunc(self.insert_data(i*2,i*2+2))
threads=threading.Thread(target=self.insert_data, args=(i,i+1))
threads.start()
self.TDDnodes.stoptaosd(i+1)
self.TDDnodes.starttaosd(i+1)
if self.checkdnodes(5):
print("123")
threads.join()
else:
print("456")
threads.join()
self.stop_thread(threads)
assert 1 == 2 ,"some dnode started failed"
return False
# self.check3mnode()
self.check3mnode()
stopcount+=1
self.check3mnode()
def run(self):
# print(self.master_dnode.cfgDict)
self.five_dnode_three_mnode(5)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -64,7 +64,7 @@ class ClusterComCheck:
dbNumbers=int(dbNumbers)
count=0
while count < 5:
tdSql.query("select * from information_schema.ins_databases;")
tdSql.query("select * from information_schema.ins_databases where name!='collectd' ;")
count+=1
if tdSql.checkRows(dbNumbers+2):
tdLog.success("we find %d databases and expect %d in clusters! " %(tdSql.queryRows,dbNumbers+2))

View File

@ -137,12 +137,24 @@ class ClusterComCreate:
# for i in range(dbNumbers):
for i in range(dbNumbers):
if dropFlag == 1:
tsql.execute("drop database if exists %s_%d"%(dbNameIndex,1))
tdLog.debug("create database if not exists %s_%d vgroups %d replica %d"%(dbNameIndex,1, vgroups, replica))
tsql.execute("create database if not exists %s_%d vgroups %d replica %d"%(dbNameIndex,1, vgroups, replica))
tdLog.debug("complete to create database %s_%d"%(dbNameIndex,1))
tsql.execute("drop database if exists %s_%d"%(dbNameIndex,i))
tdLog.debug("create database if not exists %s_%d vgroups %d replica %d"%(dbNameIndex,i, vgroups, replica))
tsql.execute("create database if not exists %s_%d vgroups %d replica %d"%(dbNameIndex,i, vgroups, replica))
tdLog.debug("complete to create database %s_%d"%(dbNameIndex,i))
def createUser(self,tsql,user,password):
tdLog.info(f"create new user f{user}")
tsql.execute(f"CREATE USER {user} PASS '{password}';")
def alterUser(self,tsql,user,password):
tdLog.info(f"alter user {user} pass '{password}'")
tsql.execute(f"alter USER {user} pass '{password}' ;")
def deleteUser(self,tsql,user):
tdLog.info(f"drop user f{user}")
tsql.execute(f"DROP USER {user} ;")
def create_stable(self,tsql, dbName,stbName):
tsql.execute("create table if not exists %s.%s (ts timestamp, c1 int, c2 int, c3 binary(16)) tags(t1 int, t2 binary(32))"%(dbName, stbName))
tdLog.debug("complete to create %s.%s" %(dbName, stbName))
@ -202,6 +214,52 @@ class ClusterComCreate:
tdLog.debug("insert data ............ [OK]")
return
def alterStbMetaData(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=None):
tdLog.debug("alter Stb column ............")
tdLog.debug(f"ALTER STABLE {dbName}.{stbName} MODIFY COLUMN c3 binary(20);")
tsql.execute(f" ALTER STABLE {dbName}.{stbName} MODIFY COLUMN c3 binary(20);")
tdLog.debug(f"ALTER STABLE {dbName}.{stbName} ADD COLUMN c4 DOUBLE;")
tsql.execute(f" ALTER STABLE {dbName}.{stbName} ADD COLUMN c4 DOUBLE;")
tdLog.debug(f"ALTER STABLE {dbName}.{stbName} DROP COLUMN c2;")
tsql.execute(f" ALTER STABLE {dbName}.{stbName} DROP COLUMN c2;")
tdLog.debug(f"ALTER STABLE {dbName}.{stbName} RENAME TAG t1 t1r;")
tsql.execute(f" ALTER STABLE {dbName}.{stbName} RENAME TAG t1 t1r;")
tdLog.debug(f"ALTER STABLE {dbName}.{stbName} DROP TAG t2;")
tsql.execute(f" ALTER STABLE {dbName}.{stbName} DROP TAG t2;")
tdLog.debug(f"ALTER STABLE {dbName}.{stbName} ADD TAG t2 binary(32) ;")
tsql.execute(f" ALTER STABLE {dbName}.{stbName} ADD TAG t2 binary(32);")
tdLog.debug(f"ALTER STABLE {dbName}.{stbName} MODIFY TAG t2 binary(34) ;")
tsql.execute(f" ALTER STABLE {dbName}.{stbName} MODIFY TAG t2 binary(34);")
tdLog.debug(f"ALTER STABLE {dbName}.{stbName} ADD TAG t3 double ;")
tsql.execute(f" ALTER STABLE {dbName}.{stbName} ADD TAG t3 double;")
tsql.error(f" ALTER STABLE {dbName}.{stbName} ADD TAG t2 double;")
tdLog.debug("start to insert data ............")
# tsql.execute("use %s" %dbName)
pre_insert = "insert into "
sql = pre_insert
if startTs is None:
t = time.time()
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
for i in range(ctbNum):
sql += " %s.%s_%d values "%(dbName,stbName,i)
for j in range(rowsPerTbl):
sql += "(%d, %d,'mnode_%d', %d ) "%(startTs + j, j, j,j)
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
tsql.execute(sql)
if j < rowsPerTbl - 1:
sql = "insert into %s.%s_%d values " %(dbName,stbName,i)
else:
sql = "insert into "
#end sql
if sql != pre_insert:
#print("insert sql:%s"%sql)
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
def insert_data_1(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs):
tdLog.debug("start to insert data ............")
tsql.execute("use %s" %dbName)

View File

@ -969,38 +969,43 @@ bool matchVarWord(SWord* word1, SWord* word2) {
// ------------------- match words --------------------------
//
// compare command cmd1 come from shellCommands , cmd2 come from user input
int32_t compareCommand(SWords* cmd1, SWords* cmd2) {
SWord* word1 = cmd1->head;
SWord* word2 = cmd2->head;
// compare command cmdPattern come from shellCommands , cmdInput come from user input
int32_t compareCommand(SWords* cmdPattern, SWords* cmdInput) {
SWord* wordPattern = cmdPattern->head;
SWord* wordInput = cmdInput->head;
if (word1 == NULL || word2 == NULL) {
if (wordPattern == NULL || wordInput == NULL) {
return -1;
}
for (int32_t i = 0; i < cmd1->count; i++) {
if (word1->type == WT_TEXT) {
for (int32_t i = 0; i < cmdPattern->count; i++) {
if (wordPattern->type == WT_TEXT) {
// WT_TEXT match
if (word1->len == word2->len) {
if (strncasecmp(word1->word, word2->word, word1->len) != 0) return -1;
} else if (word1->len < word2->len) {
if (wordPattern->len == wordInput->len) {
if (strncasecmp(wordPattern->word, wordInput->word, wordPattern->len) != 0) return -1;
} else if (wordPattern->len < wordInput->len) {
return -1;
} else {
// word1->len > word2->len
if (strncasecmp(word1->word, word2->word, word2->len) == 0) {
cmd1->matchIndex = i;
cmd1->matchLen = word2->len;
return i;
// wordPattern->len > wordInput->len
if (strncasecmp(wordPattern->word, wordInput->word, wordInput->len) == 0) {
if (i + 1 == cmdInput->count) {
// last word return match
cmdPattern->matchIndex = i;
cmdPattern->matchLen = wordInput->len;
return i;
} else {
return -1;
}
} else {
return -1;
}
}
} else {
// WT_VAR auto match any one word
if (word2->next == NULL) { // input words last one
if (matchVarWord(word1, word2)) {
cmd1->matchIndex = i;
cmd1->matchLen = word2->len;
if (wordInput->next == NULL) { // input words last one
if (matchVarWord(wordPattern, wordInput)) {
cmdPattern->matchIndex = i;
cmdPattern->matchLen = wordInput->len;
varMode = true;
return i;
}
@ -1009,9 +1014,9 @@ int32_t compareCommand(SWords* cmd1, SWords* cmd2) {
}
// move next
word1 = word1->next;
word2 = word2->next;
if (word1 == NULL || word2 == NULL) {
wordPattern = wordPattern->next;
wordInput = wordInput->next;
if (wordPattern == NULL || wordInput == NULL) {
return -1;
}
}