Merge branch '3.0' of github.com:taosdata/TDengine into 3.0
This commit is contained in:
commit
16435c904b
|
@ -55,7 +55,7 @@ enum {
|
|||
enum {
|
||||
STREAM_INPUT__DATA_SUBMIT = 1,
|
||||
STREAM_INPUT__DATA_BLOCK,
|
||||
STREAM_INPUT__TABLE_SCAN,
|
||||
// STREAM_INPUT__TABLE_SCAN,
|
||||
STREAM_INPUT__TQ_SCAN,
|
||||
STREAM_INPUT__DATA_RETRIEVE,
|
||||
STREAM_INPUT__TRIGGER,
|
||||
|
|
|
@ -55,11 +55,11 @@ extern int32_t tMsgDict[];
|
|||
|
||||
#define TMSG_SEG_CODE(TYPE) (((TYPE)&0xff00) >> 8)
|
||||
#define TMSG_SEG_SEQ(TYPE) ((TYPE)&0xff)
|
||||
#define TMSG_INFO(TYPE) \
|
||||
((TYPE) >= 0 && \
|
||||
((TYPE) < TDMT_DND_MAX_MSG || (TYPE) < TDMT_MND_MAX_MSG || (TYPE) < TDMT_VND_MAX_MSG || (TYPE) < TDMT_SCH_MAX_MSG || \
|
||||
(TYPE) < TDMT_STREAM_MAX_MSG || (TYPE) < TDMT_MON_MAX_MSG || (TYPE) < TDMT_SYNC_MAX_MSG)) \
|
||||
? tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] \
|
||||
#define TMSG_INFO(TYPE) \
|
||||
((TYPE) >= 0 && ((TYPE) < TDMT_DND_MAX_MSG || (TYPE) < TDMT_MND_MAX_MSG || (TYPE) < TDMT_VND_MAX_MSG || \
|
||||
(TYPE) < TDMT_SCH_MAX_MSG || (TYPE) < TDMT_STREAM_MAX_MSG || (TYPE) < TDMT_MON_MAX_MSG || \
|
||||
(TYPE) < TDMT_SYNC_MAX_MSG)) \
|
||||
? tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] \
|
||||
: 0
|
||||
#define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE))
|
||||
|
||||
|
@ -815,6 +815,13 @@ typedef struct {
|
|||
int32_t tSerializeSDbCfgReq(void* buf, int32_t bufLen, SDbCfgReq* pReq);
|
||||
int32_t tDeserializeSDbCfgReq(void* buf, int32_t bufLen, SDbCfgReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
char db[TSDB_DB_FNAME_LEN];
|
||||
} STrimDbReq;
|
||||
|
||||
int32_t tSerializeSTrimDbReq(void* buf, int32_t bufLen, STrimDbReq* pReq);
|
||||
int32_t tDeserializeSTrimDbReq(void* buf, int32_t bufLen, STrimDbReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t numOfVgroups;
|
||||
int32_t numOfStables;
|
||||
|
|
|
@ -116,6 +116,7 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_DB, "alter-db", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_SYNC_DB, "sync-db", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_COMPACT_DB, "compact-db", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB, "trim-db", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_GET_DB_CFG, "get-db-cfg", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_VGROUP_LIST, "vgroup-list", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_FUNC, "create-func", NULL, NULL)
|
||||
|
|
|
@ -74,201 +74,202 @@
|
|||
#define TK_DATABASE 56
|
||||
#define TK_USE 57
|
||||
#define TK_FLUSH 58
|
||||
#define TK_IF 59
|
||||
#define TK_NOT 60
|
||||
#define TK_EXISTS 61
|
||||
#define TK_BUFFER 62
|
||||
#define TK_CACHELAST 63
|
||||
#define TK_CACHELASTSIZE 64
|
||||
#define TK_COMP 65
|
||||
#define TK_DURATION 66
|
||||
#define TK_NK_VARIABLE 67
|
||||
#define TK_FSYNC 68
|
||||
#define TK_MAXROWS 69
|
||||
#define TK_MINROWS 70
|
||||
#define TK_KEEP 71
|
||||
#define TK_PAGES 72
|
||||
#define TK_PAGESIZE 73
|
||||
#define TK_PRECISION 74
|
||||
#define TK_REPLICA 75
|
||||
#define TK_STRICT 76
|
||||
#define TK_WAL 77
|
||||
#define TK_VGROUPS 78
|
||||
#define TK_SINGLE_STABLE 79
|
||||
#define TK_RETENTIONS 80
|
||||
#define TK_SCHEMALESS 81
|
||||
#define TK_NK_COLON 82
|
||||
#define TK_TABLE 83
|
||||
#define TK_NK_LP 84
|
||||
#define TK_NK_RP 85
|
||||
#define TK_STABLE 86
|
||||
#define TK_ADD 87
|
||||
#define TK_COLUMN 88
|
||||
#define TK_MODIFY 89
|
||||
#define TK_RENAME 90
|
||||
#define TK_TAG 91
|
||||
#define TK_SET 92
|
||||
#define TK_NK_EQ 93
|
||||
#define TK_USING 94
|
||||
#define TK_TAGS 95
|
||||
#define TK_COMMENT 96
|
||||
#define TK_BOOL 97
|
||||
#define TK_TINYINT 98
|
||||
#define TK_SMALLINT 99
|
||||
#define TK_INT 100
|
||||
#define TK_INTEGER 101
|
||||
#define TK_BIGINT 102
|
||||
#define TK_FLOAT 103
|
||||
#define TK_DOUBLE 104
|
||||
#define TK_BINARY 105
|
||||
#define TK_TIMESTAMP 106
|
||||
#define TK_NCHAR 107
|
||||
#define TK_UNSIGNED 108
|
||||
#define TK_JSON 109
|
||||
#define TK_VARCHAR 110
|
||||
#define TK_MEDIUMBLOB 111
|
||||
#define TK_BLOB 112
|
||||
#define TK_VARBINARY 113
|
||||
#define TK_DECIMAL 114
|
||||
#define TK_MAX_DELAY 115
|
||||
#define TK_WATERMARK 116
|
||||
#define TK_ROLLUP 117
|
||||
#define TK_TTL 118
|
||||
#define TK_SMA 119
|
||||
#define TK_FIRST 120
|
||||
#define TK_LAST 121
|
||||
#define TK_SHOW 122
|
||||
#define TK_DATABASES 123
|
||||
#define TK_TABLES 124
|
||||
#define TK_STABLES 125
|
||||
#define TK_MNODES 126
|
||||
#define TK_MODULES 127
|
||||
#define TK_QNODES 128
|
||||
#define TK_FUNCTIONS 129
|
||||
#define TK_INDEXES 130
|
||||
#define TK_ACCOUNTS 131
|
||||
#define TK_APPS 132
|
||||
#define TK_CONNECTIONS 133
|
||||
#define TK_LICENCE 134
|
||||
#define TK_GRANTS 135
|
||||
#define TK_QUERIES 136
|
||||
#define TK_SCORES 137
|
||||
#define TK_TOPICS 138
|
||||
#define TK_VARIABLES 139
|
||||
#define TK_BNODES 140
|
||||
#define TK_SNODES 141
|
||||
#define TK_CLUSTER 142
|
||||
#define TK_TRANSACTIONS 143
|
||||
#define TK_DISTRIBUTED 144
|
||||
#define TK_CONSUMERS 145
|
||||
#define TK_SUBSCRIPTIONS 146
|
||||
#define TK_LIKE 147
|
||||
#define TK_INDEX 148
|
||||
#define TK_FUNCTION 149
|
||||
#define TK_INTERVAL 150
|
||||
#define TK_TOPIC 151
|
||||
#define TK_AS 152
|
||||
#define TK_WITH 153
|
||||
#define TK_META 154
|
||||
#define TK_CONSUMER 155
|
||||
#define TK_GROUP 156
|
||||
#define TK_DESC 157
|
||||
#define TK_DESCRIBE 158
|
||||
#define TK_RESET 159
|
||||
#define TK_QUERY 160
|
||||
#define TK_CACHE 161
|
||||
#define TK_EXPLAIN 162
|
||||
#define TK_ANALYZE 163
|
||||
#define TK_VERBOSE 164
|
||||
#define TK_NK_BOOL 165
|
||||
#define TK_RATIO 166
|
||||
#define TK_NK_FLOAT 167
|
||||
#define TK_COMPACT 168
|
||||
#define TK_VNODES 169
|
||||
#define TK_IN 170
|
||||
#define TK_OUTPUTTYPE 171
|
||||
#define TK_AGGREGATE 172
|
||||
#define TK_BUFSIZE 173
|
||||
#define TK_STREAM 174
|
||||
#define TK_INTO 175
|
||||
#define TK_TRIGGER 176
|
||||
#define TK_AT_ONCE 177
|
||||
#define TK_WINDOW_CLOSE 178
|
||||
#define TK_IGNORE 179
|
||||
#define TK_EXPIRED 180
|
||||
#define TK_KILL 181
|
||||
#define TK_CONNECTION 182
|
||||
#define TK_TRANSACTION 183
|
||||
#define TK_BALANCE 184
|
||||
#define TK_VGROUP 185
|
||||
#define TK_MERGE 186
|
||||
#define TK_REDISTRIBUTE 187
|
||||
#define TK_SPLIT 188
|
||||
#define TK_SYNCDB 189
|
||||
#define TK_DELETE 190
|
||||
#define TK_INSERT 191
|
||||
#define TK_NULL 192
|
||||
#define TK_NK_QUESTION 193
|
||||
#define TK_NK_ARROW 194
|
||||
#define TK_ROWTS 195
|
||||
#define TK_TBNAME 196
|
||||
#define TK_QSTARTTS 197
|
||||
#define TK_QENDTS 198
|
||||
#define TK_WSTARTTS 199
|
||||
#define TK_WENDTS 200
|
||||
#define TK_WDURATION 201
|
||||
#define TK_CAST 202
|
||||
#define TK_NOW 203
|
||||
#define TK_TODAY 204
|
||||
#define TK_TIMEZONE 205
|
||||
#define TK_CLIENT_VERSION 206
|
||||
#define TK_SERVER_VERSION 207
|
||||
#define TK_SERVER_STATUS 208
|
||||
#define TK_CURRENT_USER 209
|
||||
#define TK_COUNT 210
|
||||
#define TK_LAST_ROW 211
|
||||
#define TK_BETWEEN 212
|
||||
#define TK_IS 213
|
||||
#define TK_NK_LT 214
|
||||
#define TK_NK_GT 215
|
||||
#define TK_NK_LE 216
|
||||
#define TK_NK_GE 217
|
||||
#define TK_NK_NE 218
|
||||
#define TK_MATCH 219
|
||||
#define TK_NMATCH 220
|
||||
#define TK_CONTAINS 221
|
||||
#define TK_JOIN 222
|
||||
#define TK_INNER 223
|
||||
#define TK_SELECT 224
|
||||
#define TK_DISTINCT 225
|
||||
#define TK_WHERE 226
|
||||
#define TK_PARTITION 227
|
||||
#define TK_BY 228
|
||||
#define TK_SESSION 229
|
||||
#define TK_STATE_WINDOW 230
|
||||
#define TK_SLIDING 231
|
||||
#define TK_FILL 232
|
||||
#define TK_VALUE 233
|
||||
#define TK_NONE 234
|
||||
#define TK_PREV 235
|
||||
#define TK_LINEAR 236
|
||||
#define TK_NEXT 237
|
||||
#define TK_HAVING 238
|
||||
#define TK_RANGE 239
|
||||
#define TK_EVERY 240
|
||||
#define TK_ORDER 241
|
||||
#define TK_SLIMIT 242
|
||||
#define TK_SOFFSET 243
|
||||
#define TK_LIMIT 244
|
||||
#define TK_OFFSET 245
|
||||
#define TK_ASC 246
|
||||
#define TK_NULLS 247
|
||||
#define TK_ID 248
|
||||
#define TK_NK_BITNOT 249
|
||||
#define TK_VALUES 250
|
||||
#define TK_IMPORT 251
|
||||
#define TK_NK_SEMI 252
|
||||
#define TK_FILE 253
|
||||
#define TK_TRIM 59
|
||||
#define TK_IF 60
|
||||
#define TK_NOT 61
|
||||
#define TK_EXISTS 62
|
||||
#define TK_BUFFER 63
|
||||
#define TK_CACHELAST 64
|
||||
#define TK_CACHELASTSIZE 65
|
||||
#define TK_COMP 66
|
||||
#define TK_DURATION 67
|
||||
#define TK_NK_VARIABLE 68
|
||||
#define TK_FSYNC 69
|
||||
#define TK_MAXROWS 70
|
||||
#define TK_MINROWS 71
|
||||
#define TK_KEEP 72
|
||||
#define TK_PAGES 73
|
||||
#define TK_PAGESIZE 74
|
||||
#define TK_PRECISION 75
|
||||
#define TK_REPLICA 76
|
||||
#define TK_STRICT 77
|
||||
#define TK_WAL 78
|
||||
#define TK_VGROUPS 79
|
||||
#define TK_SINGLE_STABLE 80
|
||||
#define TK_RETENTIONS 81
|
||||
#define TK_SCHEMALESS 82
|
||||
#define TK_NK_COLON 83
|
||||
#define TK_TABLE 84
|
||||
#define TK_NK_LP 85
|
||||
#define TK_NK_RP 86
|
||||
#define TK_STABLE 87
|
||||
#define TK_ADD 88
|
||||
#define TK_COLUMN 89
|
||||
#define TK_MODIFY 90
|
||||
#define TK_RENAME 91
|
||||
#define TK_TAG 92
|
||||
#define TK_SET 93
|
||||
#define TK_NK_EQ 94
|
||||
#define TK_USING 95
|
||||
#define TK_TAGS 96
|
||||
#define TK_COMMENT 97
|
||||
#define TK_BOOL 98
|
||||
#define TK_TINYINT 99
|
||||
#define TK_SMALLINT 100
|
||||
#define TK_INT 101
|
||||
#define TK_INTEGER 102
|
||||
#define TK_BIGINT 103
|
||||
#define TK_FLOAT 104
|
||||
#define TK_DOUBLE 105
|
||||
#define TK_BINARY 106
|
||||
#define TK_TIMESTAMP 107
|
||||
#define TK_NCHAR 108
|
||||
#define TK_UNSIGNED 109
|
||||
#define TK_JSON 110
|
||||
#define TK_VARCHAR 111
|
||||
#define TK_MEDIUMBLOB 112
|
||||
#define TK_BLOB 113
|
||||
#define TK_VARBINARY 114
|
||||
#define TK_DECIMAL 115
|
||||
#define TK_MAX_DELAY 116
|
||||
#define TK_WATERMARK 117
|
||||
#define TK_ROLLUP 118
|
||||
#define TK_TTL 119
|
||||
#define TK_SMA 120
|
||||
#define TK_FIRST 121
|
||||
#define TK_LAST 122
|
||||
#define TK_SHOW 123
|
||||
#define TK_DATABASES 124
|
||||
#define TK_TABLES 125
|
||||
#define TK_STABLES 126
|
||||
#define TK_MNODES 127
|
||||
#define TK_MODULES 128
|
||||
#define TK_QNODES 129
|
||||
#define TK_FUNCTIONS 130
|
||||
#define TK_INDEXES 131
|
||||
#define TK_ACCOUNTS 132
|
||||
#define TK_APPS 133
|
||||
#define TK_CONNECTIONS 134
|
||||
#define TK_LICENCE 135
|
||||
#define TK_GRANTS 136
|
||||
#define TK_QUERIES 137
|
||||
#define TK_SCORES 138
|
||||
#define TK_TOPICS 139
|
||||
#define TK_VARIABLES 140
|
||||
#define TK_BNODES 141
|
||||
#define TK_SNODES 142
|
||||
#define TK_CLUSTER 143
|
||||
#define TK_TRANSACTIONS 144
|
||||
#define TK_DISTRIBUTED 145
|
||||
#define TK_CONSUMERS 146
|
||||
#define TK_SUBSCRIPTIONS 147
|
||||
#define TK_LIKE 148
|
||||
#define TK_INDEX 149
|
||||
#define TK_FUNCTION 150
|
||||
#define TK_INTERVAL 151
|
||||
#define TK_TOPIC 152
|
||||
#define TK_AS 153
|
||||
#define TK_WITH 154
|
||||
#define TK_META 155
|
||||
#define TK_CONSUMER 156
|
||||
#define TK_GROUP 157
|
||||
#define TK_DESC 158
|
||||
#define TK_DESCRIBE 159
|
||||
#define TK_RESET 160
|
||||
#define TK_QUERY 161
|
||||
#define TK_CACHE 162
|
||||
#define TK_EXPLAIN 163
|
||||
#define TK_ANALYZE 164
|
||||
#define TK_VERBOSE 165
|
||||
#define TK_NK_BOOL 166
|
||||
#define TK_RATIO 167
|
||||
#define TK_NK_FLOAT 168
|
||||
#define TK_COMPACT 169
|
||||
#define TK_VNODES 170
|
||||
#define TK_IN 171
|
||||
#define TK_OUTPUTTYPE 172
|
||||
#define TK_AGGREGATE 173
|
||||
#define TK_BUFSIZE 174
|
||||
#define TK_STREAM 175
|
||||
#define TK_INTO 176
|
||||
#define TK_TRIGGER 177
|
||||
#define TK_AT_ONCE 178
|
||||
#define TK_WINDOW_CLOSE 179
|
||||
#define TK_IGNORE 180
|
||||
#define TK_EXPIRED 181
|
||||
#define TK_KILL 182
|
||||
#define TK_CONNECTION 183
|
||||
#define TK_TRANSACTION 184
|
||||
#define TK_BALANCE 185
|
||||
#define TK_VGROUP 186
|
||||
#define TK_MERGE 187
|
||||
#define TK_REDISTRIBUTE 188
|
||||
#define TK_SPLIT 189
|
||||
#define TK_SYNCDB 190
|
||||
#define TK_DELETE 191
|
||||
#define TK_INSERT 192
|
||||
#define TK_NULL 193
|
||||
#define TK_NK_QUESTION 194
|
||||
#define TK_NK_ARROW 195
|
||||
#define TK_ROWTS 196
|
||||
#define TK_TBNAME 197
|
||||
#define TK_QSTARTTS 198
|
||||
#define TK_QENDTS 199
|
||||
#define TK_WSTARTTS 200
|
||||
#define TK_WENDTS 201
|
||||
#define TK_WDURATION 202
|
||||
#define TK_CAST 203
|
||||
#define TK_NOW 204
|
||||
#define TK_TODAY 205
|
||||
#define TK_TIMEZONE 206
|
||||
#define TK_CLIENT_VERSION 207
|
||||
#define TK_SERVER_VERSION 208
|
||||
#define TK_SERVER_STATUS 209
|
||||
#define TK_CURRENT_USER 210
|
||||
#define TK_COUNT 211
|
||||
#define TK_LAST_ROW 212
|
||||
#define TK_BETWEEN 213
|
||||
#define TK_IS 214
|
||||
#define TK_NK_LT 215
|
||||
#define TK_NK_GT 216
|
||||
#define TK_NK_LE 217
|
||||
#define TK_NK_GE 218
|
||||
#define TK_NK_NE 219
|
||||
#define TK_MATCH 220
|
||||
#define TK_NMATCH 221
|
||||
#define TK_CONTAINS 222
|
||||
#define TK_JOIN 223
|
||||
#define TK_INNER 224
|
||||
#define TK_SELECT 225
|
||||
#define TK_DISTINCT 226
|
||||
#define TK_WHERE 227
|
||||
#define TK_PARTITION 228
|
||||
#define TK_BY 229
|
||||
#define TK_SESSION 230
|
||||
#define TK_STATE_WINDOW 231
|
||||
#define TK_SLIDING 232
|
||||
#define TK_FILL 233
|
||||
#define TK_VALUE 234
|
||||
#define TK_NONE 235
|
||||
#define TK_PREV 236
|
||||
#define TK_LINEAR 237
|
||||
#define TK_NEXT 238
|
||||
#define TK_HAVING 239
|
||||
#define TK_RANGE 240
|
||||
#define TK_EVERY 241
|
||||
#define TK_ORDER 242
|
||||
#define TK_SLIMIT 243
|
||||
#define TK_SOFFSET 244
|
||||
#define TK_LIMIT 245
|
||||
#define TK_OFFSET 246
|
||||
#define TK_ASC 247
|
||||
#define TK_NULLS 248
|
||||
#define TK_ID 249
|
||||
#define TK_NK_BITNOT 250
|
||||
#define TK_VALUES 251
|
||||
#define TK_IMPORT 252
|
||||
#define TK_NK_SEMI 253
|
||||
#define TK_FILE 254
|
||||
|
||||
#define TK_NK_SPACE 300
|
||||
#define TK_NK_COMMENT 301
|
||||
|
|
|
@ -103,6 +103,11 @@ typedef struct SFlushDatabaseStmt {
|
|||
char dbName[TSDB_DB_NAME_LEN];
|
||||
} SFlushDatabaseStmt;
|
||||
|
||||
typedef struct STrimDatabaseStmt {
|
||||
ENodeType type;
|
||||
char dbName[TSDB_DB_NAME_LEN];
|
||||
} STrimDatabaseStmt;
|
||||
|
||||
typedef struct STableOptions {
|
||||
ENodeType type;
|
||||
bool commentNull;
|
||||
|
|
|
@ -112,6 +112,7 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_DROP_DATABASE_STMT,
|
||||
QUERY_NODE_ALTER_DATABASE_STMT,
|
||||
QUERY_NODE_FLUSH_DATABASE_STMT,
|
||||
QUERY_NODE_TRIM_DATABASE_STMT,
|
||||
QUERY_NODE_CREATE_TABLE_STMT,
|
||||
QUERY_NODE_CREATE_SUBTABLE_CLAUSE,
|
||||
QUERY_NODE_CREATE_MULTI_TABLE_STMT,
|
||||
|
|
|
@ -124,6 +124,7 @@ typedef struct SWal {
|
|||
typedef struct {
|
||||
int8_t scanUncommited;
|
||||
int8_t scanMeta;
|
||||
int8_t enableRef;
|
||||
} SWalFilterCond;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -69,13 +69,14 @@ typedef void (*_ref_fn_t)(const void *pObj);
|
|||
#define T_REF_VAL_GET(x) (x)->_ref.val
|
||||
|
||||
// single writer multiple reader lock
|
||||
typedef volatile int32_t SRWLatch;
|
||||
typedef volatile int64_t SRWLatch;
|
||||
|
||||
void taosInitRWLatch(SRWLatch *pLatch);
|
||||
void taosWLockLatch(SRWLatch *pLatch);
|
||||
void taosWUnLockLatch(SRWLatch *pLatch);
|
||||
void taosRLockLatch(SRWLatch *pLatch);
|
||||
void taosRUnLockLatch(SRWLatch *pLatch);
|
||||
void taosInitRWLatch(SRWLatch *pLatch);
|
||||
void taosInitReentrantRWLatch(SRWLatch *pLatch);
|
||||
void taosWLockLatch(SRWLatch *pLatch);
|
||||
void taosWUnLockLatch(SRWLatch *pLatch);
|
||||
void taosRLockLatch(SRWLatch *pLatch);
|
||||
void taosRUnLockLatch(SRWLatch *pLatch);
|
||||
int32_t taosWTryLockLatch(SRWLatch *pLatch);
|
||||
|
||||
// copy on read
|
||||
|
|
|
@ -305,18 +305,23 @@ function install_lib() {
|
|||
${install_main_dir}/driver &&
|
||||
${csudo}chmod 777 ${install_main_dir}/driver/libtaos.so.${verNumber}
|
||||
|
||||
${csudo}cp ${binary_dir}/build/lib/libtaosws.so \
|
||||
${install_main_dir}/driver &&
|
||||
${csudo}chmod 777 ${install_main_dir}/driver/libtaosws.so
|
||||
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || :
|
||||
|
||||
if [ -d "${lib64_link_dir}" ]; then
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1
|
||||
${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so
|
||||
${csudo}ln -sf ${lib64_link_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1
|
||||
${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so
|
||||
fi
|
||||
|
||||
if [ -f ${binary_dir}/build/lib/libtaosws.so ]; then
|
||||
${csudo}cp ${binary_dir}/build/lib/libtaosws.so \
|
||||
${install_main_dir}/driver &&
|
||||
${csudo}chmod 777 ${install_main_dir}/driver/libtaosws.so ||:
|
||||
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || :
|
||||
|
||||
if [ -d "${lib64_link_dir}" ]; then
|
||||
${csudo}ln -sf ${lib64_link_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
|
||||
fi
|
||||
fi
|
||||
else
|
||||
${csudo}cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib \
|
||||
|
@ -357,26 +362,26 @@ function install_header() {
|
|||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosws.h ||:
|
||||
${csudo}cp -f ${source_dir}/include/client/taos.h ${source_dir}/include/common/taosdef.h ${source_dir}/include/util/taoserror.h ${source_dir}/include/libs/function/taosudf.h \
|
||||
${csudo}rm -f ${inc_link_dir}/taosws.h || :
|
||||
|
||||
${csudo}cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taosdef.h ${source_dir}/src/inc/taoserror.h \
|
||||
${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
||||
|
||||
${csudo}cp -f ${binary_dir}/build/include/taosws.h ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/taosws.h
|
||||
if [ -f ${binary_dir}/build/include/taosws.h ]; then
|
||||
${csudo}cp -f ${binary_dir}/build/include/taosws.h ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/taosws.h ||:
|
||||
${csudo}ln -s ${install_main_dir}/include/taosws.h ${inc_link_dir}/taosws.h ||:
|
||||
fi
|
||||
|
||||
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taosudf.h ${inc_link_dir}/taosudf.h
|
||||
|
||||
${csudo}ln -s ${install_main_dir}/include/taosws.h ${inc_link_dir}/taosws.h || :
|
||||
else
|
||||
${csudo}cp -f ${source_dir}/include/client/taos.h ${source_dir}/include/common/taosdef.h ${source_dir}/include/util/taoserror.h ${source_dir}/include/libs/function/taosudf.h \
|
||||
${install_main_dir}/include ||
|
||||
${csudo}cp -f ${source_dir}/include/client/taos.h ${source_dir}/include/common/taosdef.h ${source_dir}/include/util/taoserror.h ${source_dir}/include/libs/function/taosudf.h \
|
||||
${install_main_2_dir}/include &&
|
||||
${csudo}chmod 644 ${install_main_dir}/include/* ||
|
||||
${csudo}chmod 644 ${install_main_dir}/include/* ||:
|
||||
${csudo}chmod 644 ${install_main_2_dir}/include/*
|
||||
fi
|
||||
}
|
||||
|
|
|
@ -901,6 +901,8 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
|
|||
|
||||
tmq_t* pTmq = taosMemoryCalloc(1, sizeof(tmq_t));
|
||||
if (pTmq == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tscError("consumer %ld setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), pTmq->groupId);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -917,6 +919,8 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
|
|||
pTmq->delayedTask = taosOpenQueue();
|
||||
|
||||
if (pTmq->clientTopics == NULL || pTmq->mqueue == NULL || pTmq->qall == NULL || pTmq->delayedTask == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tscError("consumer %ld setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), pTmq->groupId);
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
|
@ -943,12 +947,14 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
|
|||
|
||||
// init semaphore
|
||||
if (tsem_init(&pTmq->rspSem, 0, 0) != 0) {
|
||||
tscError("consumer %ld setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), pTmq->groupId);
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
// init connection
|
||||
pTmq->pTscObj = taos_connect_internal(conf->ip, user, pass, NULL, NULL, conf->port, CONN_TYPE__TMQ);
|
||||
if (pTmq->pTscObj == NULL) {
|
||||
tscError("consumer %ld setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), pTmq->groupId);
|
||||
tsem_destroy(&pTmq->rspSem);
|
||||
goto FAIL;
|
||||
}
|
||||
|
|
|
@ -463,6 +463,7 @@ SSDataBlock* blockDataExtractBlock(SSDataBlock* pBlock, int32_t startIndex, int3
|
|||
|
||||
pDst->info = pBlock->info;
|
||||
pDst->info.rows = 0;
|
||||
pDst->info.capacity = 0;
|
||||
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData colInfo = {0};
|
||||
|
|
|
@ -55,7 +55,7 @@ int32_t tsNumOfMnodeQueryThreads = 2;
|
|||
int32_t tsNumOfMnodeFetchThreads = 1;
|
||||
int32_t tsNumOfMnodeReadThreads = 1;
|
||||
int32_t tsNumOfVnodeQueryThreads = 2;
|
||||
int32_t tsNumOfVnodeFetchThreads = 1;
|
||||
int32_t tsNumOfVnodeFetchThreads = 4;
|
||||
int32_t tsNumOfVnodeWriteThreads = 2;
|
||||
int32_t tsNumOfVnodeSyncThreads = 2;
|
||||
int32_t tsNumOfVnodeMergeThreads = 2;
|
||||
|
@ -190,7 +190,6 @@ int32_t tsMqRebalanceInterval = 2;
|
|||
int32_t tsTtlUnit = 86400;
|
||||
int32_t tsTtlPushInterval = 60;
|
||||
|
||||
|
||||
void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary) {
|
||||
tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN);
|
||||
tsDiskCfg[index].level = level;
|
||||
|
@ -292,15 +291,14 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "numOfLogLines", tsNumOfLogLines, 1000, 2000000000, 1) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "asyncLog", tsAsyncLog, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "logKeepDays", 0, -365000, 365000, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "cDebugFlag", cDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "tmrDebugFlag", tmrDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "uDebugFlag", uDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "rpcDebugFlag", rpcDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "qDebugFlag", qDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "tmrDebugFlag", tmrDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "jniDebugFlag", jniDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "qDebugFlag", qDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "cDebugFlag", cDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -308,7 +306,6 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "dDebugFlag", dDebugFlag, 0, 255, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "vDebugFlag", vDebugFlag, 0, 255, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "mDebugFlag", mDebugFlag, 0, 255, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "qDebugFlag", qDebugFlag, 0, 255, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "wDebugFlag", wDebugFlag, 0, 255, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "sDebugFlag", sDebugFlag, 0, 255, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "tsdbDebugFlag", tsdbDebugFlag, 0, 255, 0) != 0) return -1;
|
||||
|
@ -470,7 +467,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
|
||||
if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400*365, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 10000, 1) != 0) return -1;
|
||||
|
||||
if (cfgAddBool(pCfg, "udf", tsStartUdfd, 0) != 0) return -1;
|
||||
|
@ -485,20 +482,18 @@ static void taosSetClientLogCfg(SConfig *pCfg) {
|
|||
tsNumOfLogLines = cfgGetItem(pCfg, "numOfLogLines")->i32;
|
||||
tsAsyncLog = cfgGetItem(pCfg, "asyncLog")->bval;
|
||||
tsLogKeepDays = cfgGetItem(pCfg, "logKeepDays")->i32;
|
||||
cDebugFlag = cfgGetItem(pCfg, "cDebugFlag")->i32;
|
||||
uDebugFlag = cfgGetItem(pCfg, "uDebugFlag")->i32;
|
||||
qDebugFlag = cfgGetItem(pCfg, "qDebugFlag")->i32;
|
||||
rpcDebugFlag = cfgGetItem(pCfg, "rpcDebugFlag")->i32;
|
||||
tmrDebugFlag = cfgGetItem(pCfg, "tmrDebugFlag")->i32;
|
||||
uDebugFlag = cfgGetItem(pCfg, "uDebugFlag")->i32;
|
||||
jniDebugFlag = cfgGetItem(pCfg, "jniDebugFlag")->i32;
|
||||
idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32;
|
||||
rpcDebugFlag = cfgGetItem(pCfg, "rpcDebugFlag")->i32;
|
||||
qDebugFlag = cfgGetItem(pCfg, "qDebugFlag")->i32;
|
||||
cDebugFlag = cfgGetItem(pCfg, "cDebugFlag")->i32;
|
||||
}
|
||||
|
||||
static void taosSetServerLogCfg(SConfig *pCfg) {
|
||||
dDebugFlag = cfgGetItem(pCfg, "dDebugFlag")->i32;
|
||||
vDebugFlag = cfgGetItem(pCfg, "vDebugFlag")->i32;
|
||||
mDebugFlag = cfgGetItem(pCfg, "mDebugFlag")->i32;
|
||||
qDebugFlag = cfgGetItem(pCfg, "qDebugFlag")->i32;
|
||||
wDebugFlag = cfgGetItem(pCfg, "wDebugFlag")->i32;
|
||||
sDebugFlag = cfgGetItem(pCfg, "sDebugFlag")->i32;
|
||||
tsdbDebugFlag = cfgGetItem(pCfg, "tsdbDebugFlag")->i32;
|
||||
|
@ -636,7 +631,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t taosSetCfg(SConfig *pCfg, char* name) {
|
||||
int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
||||
int32_t len = strlen(name);
|
||||
char lowcaseName[CFG_NAME_MAX_LEN + 1] = {0};
|
||||
strntolower(lowcaseName, name, TMIN(CFG_NAME_MAX_LEN, len));
|
||||
|
@ -666,7 +661,7 @@ int32_t taosSetCfg(SConfig *pCfg, char* name) {
|
|||
tsCompressColData = cfgGetItem(pCfg, "compressColData")->i32;
|
||||
} else if (strcasecmp("countAlwaysReturnValue", name) == 0) {
|
||||
tsCountAlwaysReturnValue = cfgGetItem(pCfg, "countAlwaysReturnValue")->i32;
|
||||
} else if (strcasecmp("cDebugFlag", name) == 0) {
|
||||
} else if (strcasecmp("cDebugFlag", name) == 0) {
|
||||
cDebugFlag = cfgGetItem(pCfg, "cDebugFlag")->i32;
|
||||
}
|
||||
break;
|
||||
|
@ -691,10 +686,10 @@ int32_t taosSetCfg(SConfig *pCfg, char* name) {
|
|||
tstrncpy(tsLocalFqdn, cfgGetItem(pCfg, "fqdn")->str, TSDB_FQDN_LEN);
|
||||
tsServerPort = (uint16_t)cfgGetItem(pCfg, "serverPort")->i32;
|
||||
snprintf(tsLocalEp, sizeof(tsLocalEp), "%s:%u", tsLocalFqdn, tsServerPort);
|
||||
|
||||
|
||||
char defaultFirstEp[TSDB_EP_LEN] = {0};
|
||||
snprintf(defaultFirstEp, TSDB_EP_LEN, "%s:%u", tsLocalFqdn, tsServerPort);
|
||||
|
||||
|
||||
SConfigItem *pFirstEpItem = cfgGetItem(pCfg, "firstEp");
|
||||
SEp firstEp = {0};
|
||||
taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp);
|
||||
|
@ -704,10 +699,10 @@ int32_t taosSetCfg(SConfig *pCfg, char* name) {
|
|||
tstrncpy(tsLocalFqdn, cfgGetItem(pCfg, "fqdn")->str, TSDB_FQDN_LEN);
|
||||
tsServerPort = (uint16_t)cfgGetItem(pCfg, "serverPort")->i32;
|
||||
snprintf(tsLocalEp, sizeof(tsLocalEp), "%s:%u", tsLocalFqdn, tsServerPort);
|
||||
|
||||
|
||||
char defaultFirstEp[TSDB_EP_LEN] = {0};
|
||||
snprintf(defaultFirstEp, TSDB_EP_LEN, "%s:%u", tsLocalFqdn, tsServerPort);
|
||||
|
||||
|
||||
SConfigItem *pFirstEpItem = cfgGetItem(pCfg, "firstEp");
|
||||
SEp firstEp = {0};
|
||||
taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp);
|
||||
|
@ -778,7 +773,7 @@ int32_t taosSetCfg(SConfig *pCfg, char* name) {
|
|||
} else if (strcasecmp("minSlidingTime", name) == 0) {
|
||||
tsMinSlidingTime = cfgGetItem(pCfg, "minSlidingTime")->i32;
|
||||
} else if (strcasecmp("minIntervalTime", name) == 0) {
|
||||
tsMinIntervalTime = cfgGetItem(pCfg, "minIntervalTime")->i32;
|
||||
tsMinIntervalTime = cfgGetItem(pCfg, "minIntervalTime")->i32;
|
||||
} else if (strcasecmp("minimalLogDirGB", name) == 0) {
|
||||
tsLogSpace.reserved = cfgGetItem(pCfg, "minimalLogDirGB")->fval;
|
||||
}
|
||||
|
@ -924,10 +919,10 @@ int32_t taosSetCfg(SConfig *pCfg, char* name) {
|
|||
tstrncpy(tsLocalFqdn, cfgGetItem(pCfg, "fqdn")->str, TSDB_FQDN_LEN);
|
||||
tsServerPort = (uint16_t)cfgGetItem(pCfg, "serverPort")->i32;
|
||||
snprintf(tsLocalEp, sizeof(tsLocalEp), "%s:%u", tsLocalFqdn, tsServerPort);
|
||||
|
||||
|
||||
char defaultFirstEp[TSDB_EP_LEN] = {0};
|
||||
snprintf(defaultFirstEp, TSDB_EP_LEN, "%s:%u", tsLocalFqdn, tsServerPort);
|
||||
|
||||
|
||||
SConfigItem *pFirstEpItem = cfgGetItem(pCfg, "firstEp");
|
||||
SEp firstEp = {0};
|
||||
taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp);
|
||||
|
@ -999,14 +994,13 @@ int32_t taosSetCfg(SConfig *pCfg, char* name) {
|
|||
break;
|
||||
}
|
||||
default:
|
||||
terrno = TSDB_CODE_CFG_NOT_FOUND;
|
||||
terrno = TSDB_CODE_CFG_NOT_FOUND;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd,
|
||||
const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc) {
|
||||
if (tsCfg == NULL) osDefaultInit();
|
||||
|
|
|
@ -2647,6 +2647,31 @@ int32_t tDeserializeSDbCfgReq(void *buf, int32_t bufLen, SDbCfgReq *pReq) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSTrimDbReq(void *buf, int32_t bufLen, STrimDbReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->db) < 0) return -1;
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
return tlen;
|
||||
}
|
||||
|
||||
int32_t tDeserializeSTrimDbReq(void *buf, int32_t bufLen, STrimDbReq *pReq) {
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, buf, bufLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->db) < 0) return -1;
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSDbCfgRsp(void *buf, int32_t bufLen, const SDbCfgRsp *pRsp) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
|
|
|
@ -15,11 +15,11 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "mndConsumer.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndDb.h"
|
||||
#include "mndDnode.h"
|
||||
#include "mndMnode.h"
|
||||
#include "mndOffset.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndStb.h"
|
||||
#include "mndSubscribe.h"
|
||||
|
@ -435,17 +435,6 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
|
|||
goto SUBSCRIBE_OVER;
|
||||
}
|
||||
|
||||
#if 0
|
||||
// ref topic to prevent drop
|
||||
// TODO make topic complete
|
||||
SMqTopicObj topicObj = {0};
|
||||
memcpy(&topicObj, pTopic, sizeof(SMqTopicObj));
|
||||
topicObj.refConsumerCnt = pTopic->refConsumerCnt + 1;
|
||||
mInfo("subscribe topic %s by consumer:%" PRId64 ",cgroup %s, refcnt %d", pTopic->name, consumerId, cgroup,
|
||||
topicObj.refConsumerCnt);
|
||||
if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) goto SUBSCRIBE_OVER;
|
||||
#endif
|
||||
|
||||
mndReleaseTopic(pMnode, pTopic);
|
||||
}
|
||||
|
||||
|
@ -472,8 +461,8 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
|
|||
|
||||
int32_t status = atomic_load_32(&pConsumerOld->status);
|
||||
|
||||
mInfo("receive subscribe request from old consumer:%" PRId64 ", current status: %s", consumerId,
|
||||
mndConsumerStatusName(status));
|
||||
mInfo("receive subscribe request from existing consumer:%" PRId64 ", current status: %s, subscribe topic num: %d",
|
||||
consumerId, mndConsumerStatusName(status), newTopicNum);
|
||||
|
||||
if (status != MQ_CONSUMER_STATUS__READY) {
|
||||
terrno = TSDB_CODE_MND_CONSUMER_NOT_READY;
|
||||
|
@ -849,12 +838,15 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *
|
|||
pShow->pIter = sdbFetch(pSdb, SDB_CONSUMER, pShow->pIter, (void **)&pConsumer);
|
||||
if (pShow->pIter == NULL) break;
|
||||
if (taosArrayGetSize(pConsumer->assignedTopics) == 0) {
|
||||
mDebug("showing consumer %ld no assigned topic, skip", pConsumer->consumerId);
|
||||
sdbRelease(pSdb, pConsumer);
|
||||
continue;
|
||||
}
|
||||
|
||||
taosRLockLatch(&pConsumer->lock);
|
||||
|
||||
mDebug("showing consumer %ld", pConsumer->consumerId);
|
||||
|
||||
int32_t topicSz = taosArrayGetSize(pConsumer->assignedTopics);
|
||||
bool hasTopic = true;
|
||||
if (topicSz == 0) {
|
||||
|
|
|
@ -512,9 +512,6 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
|
|||
pHandle->execHandle.execDb.pFilterOutTbUid =
|
||||
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
for (int32_t i = 0; i < 5; i++) {
|
||||
pHandle->execHandle.pExecReader[i] = tqOpenReader(pTq->pVnode);
|
||||
}
|
||||
pHandle->execHandle.execTb.suid = req.suid;
|
||||
SArray* tbUidList = taosArrayInit(0, sizeof(int64_t));
|
||||
vnodeGetCtbIdList(pTq->pVnode, req.suid, tbUidList);
|
||||
|
@ -524,6 +521,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
|
|||
tqDebug("vgId:%d, idx %d, uid:%" PRId64, TD_VID(pTq->pVnode), i, tbUid);
|
||||
}
|
||||
for (int32_t i = 0; i < 5; i++) {
|
||||
pHandle->execHandle.pExecReader[i] = tqOpenReader(pTq->pVnode);
|
||||
tqReaderSetTbUidList(pHandle->execHandle.pExecReader[i], tbUidList);
|
||||
}
|
||||
taosArrayDestroy(tbUidList);
|
||||
|
|
|
@ -174,28 +174,9 @@ int32_t tqScanSnapshot(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, S
|
|||
#endif
|
||||
|
||||
int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp, int32_t workerId) {
|
||||
if (pExec->subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
qTaskInfo_t task = pExec->execCol.task[workerId];
|
||||
ASSERT(task);
|
||||
qSetStreamInput(task, pReq, STREAM_INPUT__DATA_SUBMIT, false);
|
||||
while (1) {
|
||||
SSDataBlock* pDataBlock = NULL;
|
||||
uint64_t ts = 0;
|
||||
if (qExecTask(task, &pDataBlock, &ts) < 0) {
|
||||
ASSERT(0);
|
||||
}
|
||||
if (pDataBlock == NULL) break;
|
||||
ASSERT(pExec->subType != TOPIC_SUB_TYPE__COLUMN);
|
||||
|
||||
ASSERT(pDataBlock->info.rows != 0);
|
||||
|
||||
tqAddBlockDataToRsp(pDataBlock, pRsp);
|
||||
if (pRsp->withTbName) {
|
||||
int64_t uid = pExec->pExecReader[workerId]->msgIter.uid;
|
||||
tqAddTbNameToRsp(pTq, uid, pRsp);
|
||||
}
|
||||
pRsp->blockNum++;
|
||||
}
|
||||
} else if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
pRsp->withSchema = 1;
|
||||
STqReader* pReader = pExec->pExecReader[workerId];
|
||||
tqReaderSetDataMsg(pReader, pReq, 0);
|
||||
|
@ -232,9 +213,11 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR
|
|||
pRsp->blockNum++;
|
||||
}
|
||||
}
|
||||
|
||||
if (pRsp->blockNum == 0) {
|
||||
pRsp->skipLogNum++;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -323,6 +323,9 @@ static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) {
|
|||
|
||||
if ((asc && (win.ekey < pReader->window.skey)) || ((!asc) && (win.skey > pReader->window.ekey))) {
|
||||
pIter->index += step;
|
||||
if ((asc && pIter->index >= pIter->numOfFiles) || ((!asc) && pIter->index < 0)) {
|
||||
return false;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -548,8 +548,8 @@ static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
|
|||
|
||||
int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
|
||||
SSyncInfo syncInfo = {
|
||||
//.snapshotStrategy = SYNC_STRATEGY_WAL_FIRST,
|
||||
.snapshotStrategy = SYNC_STRATEGY_NO_SNAPSHOT,
|
||||
.snapshotStrategy = SYNC_STRATEGY_WAL_FIRST,
|
||||
//.snapshotStrategy = SYNC_STRATEGY_NO_SNAPSHOT,
|
||||
.batchSize = 10,
|
||||
.vgId = pVnode->config.vgId,
|
||||
.isStandBy = pVnode->config.standby,
|
||||
|
|
|
@ -480,37 +480,35 @@ typedef struct SCtgOperation {
|
|||
|
||||
#define TD_RWLATCH_WRITE_FLAG_COPY 0x40000000
|
||||
|
||||
#define CTG_IS_LOCKED(_lock) atomic_load_32((_lock))
|
||||
|
||||
#define CTG_LOCK(type, _lock) do { \
|
||||
if (CTG_READ == (type)) { \
|
||||
assert(atomic_load_32((_lock)) >= 0); \
|
||||
CTG_LOCK_DEBUG("CTG RLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) >= 0); \
|
||||
CTG_LOCK_DEBUG("CTG RLOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
taosRLockLatch(_lock); \
|
||||
CTG_LOCK_DEBUG("CTG RLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_32((_lock)) > 0); \
|
||||
CTG_LOCK_DEBUG("CTG RLOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) > 0); \
|
||||
} else { \
|
||||
assert(atomic_load_32((_lock)) >= 0); \
|
||||
CTG_LOCK_DEBUG("CTG WLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) >= 0); \
|
||||
CTG_LOCK_DEBUG("CTG WLOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
taosWLockLatch(_lock); \
|
||||
CTG_LOCK_DEBUG("CTG WLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_32((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||
CTG_LOCK_DEBUG("CTG WLOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define CTG_UNLOCK(type, _lock) do { \
|
||||
if (CTG_READ == (type)) { \
|
||||
assert(atomic_load_32((_lock)) > 0); \
|
||||
CTG_LOCK_DEBUG("CTG RULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) > 0); \
|
||||
CTG_LOCK_DEBUG("CTG RULOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
taosRUnLockLatch(_lock); \
|
||||
CTG_LOCK_DEBUG("CTG RULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_32((_lock)) >= 0); \
|
||||
CTG_LOCK_DEBUG("CTG RULOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) >= 0); \
|
||||
} else { \
|
||||
assert(atomic_load_32((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||
CTG_LOCK_DEBUG("CTG WULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||
CTG_LOCK_DEBUG("CTG WULOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
taosWUnLockLatch(_lock); \
|
||||
CTG_LOCK_DEBUG("CTG WULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_32((_lock)) >= 0); \
|
||||
CTG_LOCK_DEBUG("CTG WULOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) >= 0); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -789,9 +789,13 @@ _return:
|
|||
|
||||
int32_t ctgCallUserCb(void* param) {
|
||||
SCtgJob* pJob = (SCtgJob*)param;
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " ctg start to call user cb with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode));
|
||||
|
||||
(*pJob->userFp)(&pJob->jobRes, pJob->userParam, pJob->jobResCode);
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " ctg end to call user cb", pJob->queryId);
|
||||
|
||||
taosRemoveRef(gCtgMgmt.jobPool, pJob->refId);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -822,8 +826,6 @@ int32_t ctgHandleTaskEnd(SCtgTask* pTask, int32_t rspCode) {
|
|||
|
||||
_return:
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " ctg call user callback with rsp %s", pJob->queryId, tstrerror(code));
|
||||
|
||||
pJob->jobResCode = code;
|
||||
|
||||
//taosSsleep(2);
|
||||
|
|
|
@ -540,6 +540,9 @@ typedef struct SIndefOperatorInfo {
|
|||
SArray* pPseudoColInfo;
|
||||
SExprSupp scalarSup;
|
||||
SNode* pCondition;
|
||||
uint64_t groupId;
|
||||
|
||||
SSDataBlock* pNextGroupRes;
|
||||
} SIndefOperatorInfo;
|
||||
|
||||
typedef struct SFillOperatorInfo {
|
||||
|
@ -551,6 +554,8 @@ typedef struct SFillOperatorInfo {
|
|||
bool multigroupResult;
|
||||
STimeWindow win;
|
||||
SNode* pCondition;
|
||||
SArray* pColMatchColInfo;
|
||||
int32_t primaryTsCol;
|
||||
} SFillOperatorInfo;
|
||||
|
||||
typedef struct SGroupbyOperatorInfo {
|
||||
|
|
|
@ -60,9 +60,8 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
|
|||
taosArrayAddAll(p->pDataBlock, pDataBlock->pDataBlock);
|
||||
taosArrayPush(pInfo->pBlockLists, &p);
|
||||
}
|
||||
} else if (type == STREAM_INPUT__TABLE_SCAN) {
|
||||
// do nothing
|
||||
ASSERT(pInfo->blockType == STREAM_INPUT__TABLE_SCAN);
|
||||
/*} else if (type == STREAM_INPUT__TABLE_SCAN) {*/
|
||||
/*ASSERT(pInfo->blockType == STREAM_INPUT__TABLE_SCAN);*/
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
|
@ -71,6 +70,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
|
|||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
int32_t qStreamScanSnapshot(qTaskInfo_t tinfo) {
|
||||
if (tinfo == NULL) {
|
||||
return TSDB_CODE_QRY_APP_ERROR;
|
||||
|
@ -78,6 +78,7 @@ int32_t qStreamScanSnapshot(qTaskInfo_t tinfo) {
|
|||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
return doSetStreamBlock(pTaskInfo->pRoot, NULL, 0, STREAM_INPUT__TABLE_SCAN, 0, NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid) {
|
||||
return qSetMultiStreamInput(tinfo, input, 1, type, assignUid);
|
||||
|
|
|
@ -299,7 +299,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
|
|||
}
|
||||
ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version);
|
||||
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
|
||||
pInfo->blockType = STREAM_INPUT__TABLE_SCAN;
|
||||
/*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
|
||||
int64_t uid = pOffset->uid;
|
||||
int64_t ts = pOffset->ts;
|
||||
|
||||
|
|
|
@ -571,8 +571,8 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
|
|||
setPseudoOutputColInfo(pResult, pCtx, pPseudoList);
|
||||
pResult->info.groupId = pSrcBlock->info.groupId;
|
||||
|
||||
// if the source equals to the destination, it is to create a new column as the result of scalar function or some
|
||||
// operators.
|
||||
// if the source equals to the destination, it is to create a new column as the result of scalar
|
||||
// function or some operators.
|
||||
bool createNewColModel = (pResult == pSrcBlock);
|
||||
|
||||
int32_t numOfRows = 0;
|
||||
|
@ -580,17 +580,17 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
|
|||
for (int32_t k = 0; k < numOfOutput; ++k) {
|
||||
int32_t outputSlotId = pExpr[k].base.resSchema.slotId;
|
||||
SqlFunctionCtx* pfCtx = &pCtx[k];
|
||||
SInputColumnInfoData* pInputData = &pfCtx->input;
|
||||
|
||||
if (pExpr[k].pExpr->nodeType == QUERY_NODE_COLUMN) { // it is a project query
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pResult->pDataBlock, outputSlotId);
|
||||
if (pResult->info.rows > 0 && !createNewColModel) {
|
||||
colDataMergeCol(pColInfoData, pResult->info.rows, &pResult->info.capacity, pfCtx->input.pData[0],
|
||||
pfCtx->input.numOfRows);
|
||||
colDataMergeCol(pColInfoData, pResult->info.rows, &pResult->info.capacity, pInputData->pData[0], pInputData->numOfRows);
|
||||
} else {
|
||||
colDataAssign(pColInfoData, pfCtx->input.pData[0], pfCtx->input.numOfRows, &pResult->info);
|
||||
colDataAssign(pColInfoData, pInputData->pData[0], pInputData->numOfRows, &pResult->info);
|
||||
}
|
||||
|
||||
numOfRows = pfCtx->input.numOfRows;
|
||||
numOfRows = pInputData->numOfRows;
|
||||
} else if (pExpr[k].pExpr->nodeType == QUERY_NODE_VALUE) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pResult->pDataBlock, outputSlotId);
|
||||
|
||||
|
@ -623,14 +623,12 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
|
|||
numOfRows = dest.numOfRows;
|
||||
taosArrayDestroy(pBlockList);
|
||||
} else if (pExpr[k].pExpr->nodeType == QUERY_NODE_FUNCTION) {
|
||||
ASSERT(!fmIsAggFunc(pfCtx->functionId));
|
||||
|
||||
// _rowts/_c0, not tbname column
|
||||
if (fmIsPseudoColumnFunc(pfCtx->functionId) && (!fmIsScanPseudoColumnFunc(pfCtx->functionId))) {
|
||||
// do nothing
|
||||
} else if (fmIsIndefiniteRowsFunc(pfCtx->functionId)) {
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(&pCtx[k]);
|
||||
pfCtx->fpSet.init(&pCtx[k], pResInfo);
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pfCtx);
|
||||
pfCtx->fpSet.init(pfCtx, pResInfo);
|
||||
|
||||
pfCtx->pOutput = taosArrayGet(pResult->pDataBlock, outputSlotId);
|
||||
pfCtx->offset = createNewColModel ? 0 : pResult->info.rows; // set the start offset
|
||||
|
@ -642,6 +640,23 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
|
|||
}
|
||||
|
||||
numOfRows = pfCtx->fpSet.process(pfCtx);
|
||||
} else if (fmIsAggFunc(pfCtx->functionId)) {
|
||||
// _group_key function for "partition by tbname" + csum(col_name) query
|
||||
SColumnInfoData* pOutput = taosArrayGet(pResult->pDataBlock, outputSlotId);
|
||||
int32_t slotId = pfCtx->param[0].pCol->slotId;
|
||||
|
||||
// todo handle the json tag
|
||||
SColumnInfoData* pInput = taosArrayGet(pSrcBlock->pDataBlock, slotId);
|
||||
for(int32_t f = 0; f < pSrcBlock->info.rows; ++f) {
|
||||
bool isNull = colDataIsNull_s(pInput, f);
|
||||
if (isNull) {
|
||||
colDataAppendNULL(pOutput, pResult->info.rows + f);
|
||||
} else {
|
||||
char* data = colDataGetData(pInput, f);
|
||||
colDataAppend(pOutput, pResult->info.rows + f, data, isNull);
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
SArray* pBlockList = taosArrayInit(4, POINTER_BYTES);
|
||||
taosArrayPush(pBlockList, &pSrcBlock);
|
||||
|
@ -675,25 +690,6 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) {
|
||||
if (IS_VAR_DATA_TYPE(type)) {
|
||||
// todo disable this
|
||||
|
||||
// if (pResultRow->key == NULL) {
|
||||
// pResultRow->key = taosMemoryMalloc(varDataTLen(pData));
|
||||
// varDataCopy(pResultRow->key, pData);
|
||||
// } else {
|
||||
// ASSERT(memcmp(pResultRow->key, pData, varDataTLen(pData)) == 0);
|
||||
// }
|
||||
} else {
|
||||
int64_t v = -1;
|
||||
GET_TYPED_DATA(v, int64_t, type, pData);
|
||||
|
||||
pResultRow->win.skey = v;
|
||||
pResultRow->win.ekey = v;
|
||||
}
|
||||
}
|
||||
|
||||
bool functionNeedToExecute(SqlFunctionCtx* pCtx) {
|
||||
struct SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
|
||||
|
@ -3825,6 +3821,40 @@ _error:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOperatorInfo* downstream, SExecTaskInfo* pTaskInfo) {
|
||||
int32_t order = 0;
|
||||
int32_t scanFlag = 0;
|
||||
|
||||
SIndefOperatorInfo* pIndefInfo = pOperator->info;
|
||||
SOptrBasicInfo* pInfo = &pIndefInfo->binfo;
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
int32_t code = getTableScanInfo(downstream, &order, &scanFlag);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
longjmp(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
// there is an scalar expression that needs to be calculated before apply the group aggregation.
|
||||
SExprSupp* pScalarSup = &pIndefInfo->scalarSup;
|
||||
if (pScalarSup->pExprInfo != NULL) {
|
||||
code = projectApplyFunctions(pScalarSup->pExprInfo, pBlock, pBlock, pScalarSup->pCtx, pScalarSup->numOfExprs,
|
||||
pIndefInfo->pPseudoColInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
longjmp(pTaskInfo->env, code);
|
||||
}
|
||||
}
|
||||
|
||||
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false);
|
||||
blockDataEnsureCapacity(pInfo->pRes, pInfo->pRes->info.rows + pBlock->info.rows);
|
||||
|
||||
code = projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs,
|
||||
pIndefInfo->pPseudoColInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
longjmp(pTaskInfo->env, code);
|
||||
}
|
||||
}
|
||||
|
||||
static SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator) {
|
||||
SIndefOperatorInfo* pIndefInfo = pOperator->info;
|
||||
SOptrBasicInfo* pInfo = &pIndefInfo->binfo;
|
||||
|
@ -3839,8 +3869,6 @@ static SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
int64_t st = 0;
|
||||
int32_t order = 0;
|
||||
int32_t scanFlag = 0;
|
||||
|
||||
if (pOperator->cost.openCost == 0) {
|
||||
st = taosGetTimestampUs();
|
||||
|
@ -3848,42 +3876,54 @@ static SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator) {
|
|||
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
|
||||
while (1) {
|
||||
// The downstream exec may change the value of the newgroup, so use a local variable instead.
|
||||
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
|
||||
if (pBlock == NULL) {
|
||||
doSetOperatorCompleted(pOperator);
|
||||
break;
|
||||
while(1) {
|
||||
// here we need to handle the existsed group results
|
||||
if (pIndefInfo->pNextGroupRes != NULL) { // todo extract method
|
||||
for (int32_t k = 0; k < pSup->numOfExprs; ++k) {
|
||||
SqlFunctionCtx* pCtx = &pSup->pCtx[k];
|
||||
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
pResInfo->initialized = false;
|
||||
pCtx->pOutput = NULL;
|
||||
}
|
||||
|
||||
doHandleDataBlock(pOperator, pIndefInfo->pNextGroupRes, downstream, pTaskInfo);
|
||||
pIndefInfo->pNextGroupRes = NULL;
|
||||
}
|
||||
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
int32_t code = getTableScanInfo(pOperator->pDownstream[0], &order, &scanFlag);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
longjmp(pTaskInfo->env, code);
|
||||
}
|
||||
if (pInfo->pRes->info.rows < pOperator->resultInfo.threshold) {
|
||||
while (1) {
|
||||
// The downstream exec may change the value of the newgroup, so use a local variable instead.
|
||||
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
|
||||
if (pBlock == NULL) {
|
||||
doSetOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
|
||||
// there is an scalar expression that needs to be calculated before apply the group aggregation.
|
||||
SExprSupp* pScalarSup = &pIndefInfo->scalarSup;
|
||||
if (pScalarSup->pExprInfo != NULL) {
|
||||
code = projectApplyFunctions(pScalarSup->pExprInfo, pBlock, pBlock, pScalarSup->pCtx, pScalarSup->numOfExprs,
|
||||
pIndefInfo->pPseudoColInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
longjmp(pTaskInfo->env, code);
|
||||
if (pIndefInfo->groupId == 0 && pBlock->info.groupId != 0) {
|
||||
pIndefInfo->groupId = pBlock->info.groupId; // this is the initial group result
|
||||
} else {
|
||||
if (pIndefInfo->groupId != pBlock->info.groupId) { // reset output buffer and computing status
|
||||
pIndefInfo->groupId = pBlock->info.groupId;
|
||||
pIndefInfo->pNextGroupRes = pBlock;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
doHandleDataBlock(pOperator, pBlock, downstream, pTaskInfo);
|
||||
if (pInfo->pRes->info.rows >= pOperator->resultInfo.threshold) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false);
|
||||
blockDataEnsureCapacity(pInfo->pRes, pInfo->pRes->info.rows + pBlock->info.rows);
|
||||
|
||||
code = projectApplyFunctions(pOperator->exprSupp.pExprInfo, pInfo->pRes, pBlock, pSup->pCtx,
|
||||
pOperator->exprSupp.numOfExprs, pIndefInfo->pPseudoColInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
longjmp(pTaskInfo->env, code);
|
||||
doFilter(pIndefInfo->pCondition, pInfo->pRes);
|
||||
size_t rows = pInfo->pRes->info.rows;
|
||||
if (rows >= 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
doFilter(pIndefInfo->pCondition, pInfo->pRes);
|
||||
|
||||
size_t rows = pInfo->pRes->info.rows;
|
||||
pOperator->resultInfo.totalRows += rows;
|
||||
|
||||
|
@ -3928,24 +3968,23 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
|
|||
if (numOfRows * pResBlock->info.rowSize > TWOMB) {
|
||||
numOfRows = TWOMB / pResBlock->info.rowSize;
|
||||
}
|
||||
|
||||
initResultSizeInfo(pOperator, numOfRows);
|
||||
|
||||
initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfExpr, keyBufSize, pTaskInfo->id.str);
|
||||
initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfExpr, keyBufSize, pTaskInfo->id.str);
|
||||
initBasicInfo(&pInfo->binfo, pResBlock);
|
||||
|
||||
setFunctionResultOutput(pOperator, &pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfExpr);
|
||||
|
||||
pInfo->binfo.pRes = pResBlock;
|
||||
pInfo->pPseudoColInfo = setRowTsColumnOutputInfo(pSup->pCtx, numOfExpr);
|
||||
pInfo->pCondition = pPhyNode->node.pConditions;
|
||||
pInfo->binfo.pRes = pResBlock;
|
||||
pInfo->pCondition = pPhyNode->node.pConditions;
|
||||
pInfo->pPseudoColInfo= setRowTsColumnOutputInfo(pSup->pCtx, numOfExpr);
|
||||
|
||||
pOperator->name = "IndefinitOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_PROJECT;
|
||||
pOperator->blocking = false;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->exprSupp.pExprInfo = pExprInfo;
|
||||
pOperator->exprSupp.numOfExprs = numOfExpr;
|
||||
pOperator->name = "IndefinitOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC;
|
||||
pOperator->blocking = false;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
|
||||
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doApplyIndefinitFunction, NULL, NULL,
|
||||
|
@ -4001,10 +4040,16 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
|
|||
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == downstream->operatorType
|
||||
? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval
|
||||
: &((SIntervalAggOperatorInfo*)downstream->info)->interval;
|
||||
|
||||
int32_t type = convertFillType(pPhyFillNode->mode);
|
||||
|
||||
SResultInfo* pResultInfo = &pOperator->resultInfo;
|
||||
initResultSizeInfo(pOperator, 4096);
|
||||
pInfo->primaryTsCol = ((SColumnNode*)pPhyFillNode->pWStartTs)->slotId;
|
||||
|
||||
int32_t numOfOutputCols = 0;
|
||||
SArray* pColMatchColInfo =
|
||||
extractColMatchInfo(pPhyFillNode->pTargets, pPhyFillNode->node.pOutputDataBlockDesc, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID);
|
||||
|
||||
int32_t code = initFillInfo(pInfo, pExprInfo, num, (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange,
|
||||
pResultInfo->capacity, pTaskInfo->id.str, pInterval, type);
|
||||
|
@ -4012,17 +4057,18 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
|
|||
goto _error;
|
||||
}
|
||||
|
||||
pInfo->pRes = pResBlock;
|
||||
pInfo->multigroupResult = multigroupResult;
|
||||
pInfo->pCondition = pPhyFillNode->node.pConditions;
|
||||
pOperator->name = "FillOperator";
|
||||
pOperator->blocking = false;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_FILL;
|
||||
pOperator->exprSupp.pExprInfo = pExprInfo;
|
||||
pInfo->pRes = pResBlock;
|
||||
pInfo->multigroupResult = multigroupResult;
|
||||
pInfo->pCondition = pPhyFillNode->node.pConditions;
|
||||
pInfo->pColMatchColInfo = pColMatchColInfo;
|
||||
pOperator->name = "FillOperator";
|
||||
pOperator->blocking = false;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_FILL;
|
||||
pOperator->exprSupp.pExprInfo = pExprInfo;
|
||||
pOperator->exprSupp.numOfExprs = num;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
|
||||
pOperator->fpSet =
|
||||
createOperatorFpSet(operatorDummyOpenFn, doFill, NULL, NULL, destroySFillOperatorInfo, NULL, NULL, NULL);
|
||||
|
|
|
@ -637,6 +637,7 @@ static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) {
|
|||
int32_t* pageId = taosArrayGet(pGroupInfo->pPageList, pInfo->pageIndex);
|
||||
void* page = getBufPage(pInfo->pBuf, *pageId);
|
||||
|
||||
blockDataEnsureCapacity(pInfo->binfo.pRes, pInfo->rowCapacity);
|
||||
blockDataFromBuf1(pInfo->binfo.pRes, page, pInfo->rowCapacity);
|
||||
|
||||
pInfo->pageIndex += 1;
|
||||
|
|
|
@ -1123,6 +1123,7 @@ static void setBlockGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32
|
|||
uidCol[i] = getGroupId(pOperator, uidCol[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock) {
|
||||
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
|
||||
SOperatorInfo* pOperator = pInfo->pStreamScanOp;
|
||||
|
@ -1216,13 +1217,9 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
if (setBlockIntoRes(pInfo, &ret.data) < 0) {
|
||||
ASSERT(0);
|
||||
}
|
||||
/*pTaskInfo->streamInfo.lastStatus = ret.offset;*/
|
||||
// TODO clean data block
|
||||
if (pInfo->pRes->info.rows > 0) {
|
||||
return pInfo->pRes;
|
||||
} else {
|
||||
// data is filtered out, do clean
|
||||
|
||||
/*tDeleteSSDataBlock(&ret.data);*/
|
||||
}
|
||||
} else if (ret.fetchType == FETCH_TYPE__META) {
|
||||
ASSERT(0);
|
||||
|
@ -1240,6 +1237,10 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
} else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
|
||||
SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
|
||||
return pResult && pResult->info.rows > 0 ? pResult : NULL;
|
||||
} else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
// TODO scan meta
|
||||
ASSERT(0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t total = taosArrayGetSize(pInfo->pBlockLists);
|
||||
|
@ -1397,6 +1398,11 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
}
|
||||
}
|
||||
|
||||
static SSDataBlock* doRawScan(SOperatorInfo* pInfo) {
|
||||
//
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) {
|
||||
SArray* tableIdList = taosArrayInit(4, sizeof(uint64_t));
|
||||
|
||||
|
@ -1409,6 +1415,19 @@ static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) {
|
|||
return tableIdList;
|
||||
}
|
||||
|
||||
// for subscribing db or stb (not including column),
|
||||
// if this scan is used, meta data can be return
|
||||
// and schemas are decided when scanning
|
||||
SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode,
|
||||
SExecTaskInfo* pTaskInfo, STimeWindowAggSupp* pTwSup) {
|
||||
// create operator
|
||||
// create tb reader
|
||||
// create meta reader
|
||||
// create tq reader
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode,
|
||||
SExecTaskInfo* pTaskInfo, STimeWindowAggSupp* pTwSup, uint64_t queryId,
|
||||
uint64_t taskId) {
|
||||
|
@ -1452,16 +1471,16 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
|
|||
|
||||
if (pHandle) {
|
||||
SOperatorInfo* pTableScanOp = createTableScanOperatorInfo(pTableScanNode, pHandle, pTaskInfo);
|
||||
STableScanInfo* pSTInfo = (STableScanInfo*)pTableScanOp->info;
|
||||
STableScanInfo* pTSInfo = (STableScanInfo*)pTableScanOp->info;
|
||||
if (pHandle->version > 0) {
|
||||
pSTInfo->cond.endVersion = pHandle->version;
|
||||
pTSInfo->cond.endVersion = pHandle->version;
|
||||
}
|
||||
|
||||
SArray* tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, 0);
|
||||
if (pHandle->initTableReader) {
|
||||
pSTInfo->scanMode = TABLE_SCAN__TABLE_ORDER;
|
||||
pSTInfo->dataReader = NULL;
|
||||
if (tsdbReaderOpen(pHandle->vnode, &pSTInfo->cond, tableList, &pSTInfo->dataReader, NULL) < 0) {
|
||||
pTSInfo->scanMode = TABLE_SCAN__TABLE_ORDER;
|
||||
pTSInfo->dataReader = NULL;
|
||||
if (tsdbReaderOpen(pHandle->vnode, &pTSInfo->cond, tableList, &pTSInfo->dataReader, NULL) < 0) {
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
@ -1475,14 +1494,14 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
|
|||
pInfo->tqReader = pHandle->tqReader;
|
||||
}
|
||||
|
||||
if (pSTInfo->interval.interval > 0) {
|
||||
pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, pTwSup->waterMark);
|
||||
if (pTSInfo->interval.interval > 0) {
|
||||
pInfo->pUpdateInfo = updateInfoInitP(&pTSInfo->interval, pTwSup->waterMark);
|
||||
} else {
|
||||
pInfo->pUpdateInfo = NULL;
|
||||
}
|
||||
|
||||
pInfo->pTableScanOp = pTableScanOp;
|
||||
pInfo->interval = pSTInfo->interval;
|
||||
pInfo->interval = pTSInfo->interval;
|
||||
|
||||
pInfo->readHandle = *pHandle;
|
||||
pInfo->tableUid = pScanPhyNode->uid;
|
||||
|
|
|
@ -217,6 +217,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
|
|||
doSetOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
|
||||
if (blockDataGetNumOfRows(pBlock) > 0) {
|
||||
break;
|
||||
}
|
||||
|
@ -601,8 +602,7 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
|
|||
break;
|
||||
}
|
||||
|
||||
if (pInfo->groupSort)
|
||||
{
|
||||
if (pInfo->groupSort) {
|
||||
uint64_t tupleGroupId = tsortGetGroupId(pTupleHandle);
|
||||
if (!pInfo->hasGroupId) {
|
||||
pInfo->groupId = tupleGroupId;
|
||||
|
|
|
@ -50,8 +50,11 @@ static void setTagsValue(SFillInfo* pFillInfo, void** data, int32_t genRows) {
|
|||
|
||||
static void setNullRow(SSDataBlock* pBlock, int32_t numOfCol, int32_t rowIndex) {
|
||||
// the first are always the timestamp column, so start from the second column.
|
||||
for (int32_t i = 1; i < taosArrayGetSize(pBlock->pDataBlock); ++i) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pBlock->pDataBlock); ++i) {
|
||||
SColumnInfoData* p = taosArrayGet(pBlock->pDataBlock, i);
|
||||
if (p->info.type == TSDB_DATA_TYPE_TIMESTAMP && i == 0) {
|
||||
continue;
|
||||
}
|
||||
colDataAppendNULL(p, rowIndex);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4502,6 +4502,7 @@ static SSDataBlock* doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
size_t rows = pRes->info.rows;
|
||||
blockDataUpdateTsWindow(pRes, iaInfo->primaryTsIndex);
|
||||
pOperator->resultInfo.totalRows += rows;
|
||||
return (rows == 0) ? NULL : pRes;
|
||||
}
|
||||
|
|
|
@ -139,7 +139,7 @@ bool fmIsAggFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MG
|
|||
|
||||
bool fmIsScalarFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_SCALAR_FUNC); }
|
||||
|
||||
bool fmIsVectorFunc(int32_t funcId) { return !fmIsScalarFunc(funcId) && !fmIsScanPseudoColumnFunc(funcId); }
|
||||
bool fmIsVectorFunc(int32_t funcId) { return !fmIsScalarFunc(funcId) && !fmIsPseudoColumnFunc(funcId); }
|
||||
|
||||
bool fmIsSelectFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_SELECT_FUNC); }
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ FstDfa *dfaBuilderBuild(FstDfaBuilder *builder);
|
|||
bool dfaBuilderRunState(FstDfaBuilder *builder, FstSparseSet *cur, FstSparseSet *next, uint32_t state, uint8_t bytes,
|
||||
uint32_t *result);
|
||||
|
||||
bool dfaBuilderCachedState(FstDfaBuilder *builder, FstSparseSet *set, uint32_t *result);
|
||||
bool dfaBuilderCacheState(FstDfaBuilder *builder, FstSparseSet *set, uint32_t *result);
|
||||
|
||||
/*
|
||||
* dfa related func
|
||||
|
|
|
@ -23,17 +23,18 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
typedef struct FstSparseSet {
|
||||
uint32_t *dense;
|
||||
uint32_t *sparse;
|
||||
int32_t size;
|
||||
int32_t *dense;
|
||||
int32_t *sparse;
|
||||
int32_t size;
|
||||
int32_t cap;
|
||||
} FstSparseSet;
|
||||
|
||||
FstSparseSet *sparSetCreate(int32_t sz);
|
||||
void sparSetDestroy(FstSparseSet *s);
|
||||
uint32_t sparSetLen(FstSparseSet *ss);
|
||||
uint32_t sparSetAdd(FstSparseSet *ss, uint32_t ip);
|
||||
uint32_t sparSetGet(FstSparseSet *ss, uint32_t i);
|
||||
bool sparSetContains(FstSparseSet *ss, uint32_t ip);
|
||||
bool sparSetAdd(FstSparseSet *ss, int32_t ip, int32_t *val);
|
||||
bool sparSetGet(FstSparseSet *ss, int32_t i, int32_t *val);
|
||||
bool sparSetContains(FstSparseSet *ss, int32_t ip);
|
||||
void sparSetClear(FstSparseSet *ss);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -64,7 +64,7 @@ void dfaBuilderDestroy(FstDfaBuilder *builder) {
|
|||
taosMemoryFree(builder);
|
||||
}
|
||||
|
||||
FstDfa *dfaBuilder(FstDfaBuilder *builder) {
|
||||
FstDfa *dfaBuilderBuild(FstDfaBuilder *builder) {
|
||||
uint32_t sz = taosArrayGetSize(builder->dfa->insts);
|
||||
FstSparseSet *cur = sparSetCreate(sz);
|
||||
FstSparseSet *nxt = sparSetCreate(sz);
|
||||
|
@ -73,7 +73,7 @@ FstDfa *dfaBuilder(FstDfaBuilder *builder) {
|
|||
|
||||
SArray *states = taosArrayInit(0, sizeof(uint32_t));
|
||||
uint32_t result;
|
||||
if (dfaBuilderCachedState(builder, cur, &result)) {
|
||||
if (dfaBuilderCacheState(builder, cur, &result)) {
|
||||
taosArrayPush(states, &result);
|
||||
}
|
||||
SHashObj *seen = taosHashInit(12, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
|
||||
|
@ -98,22 +98,21 @@ FstDfa *dfaBuilder(FstDfaBuilder *builder) {
|
|||
return builder->dfa;
|
||||
}
|
||||
|
||||
FstDfa *dfaBuilderBuild(FstDfaBuilder *builer) { return NULL; }
|
||||
|
||||
bool dfaBuilderRunState(FstDfaBuilder *builder, FstSparseSet *cur, FstSparseSet *next, uint32_t state, uint8_t byte,
|
||||
uint32_t *result) {
|
||||
sparSetClear(cur);
|
||||
DfaState *t = taosArrayGet(builder->dfa->states, state);
|
||||
for (int i = 0; i < taosArrayGetSize(t->insts); i++) {
|
||||
uint32_t ip = *(int32_t *)taosArrayGet(t->insts, i);
|
||||
sparSetAdd(cur, ip);
|
||||
int32_t ip = *(int32_t *)taosArrayGet(t->insts, i);
|
||||
bool succ = sparSetAdd(cur, ip, NULL);
|
||||
assert(succ == true);
|
||||
}
|
||||
dfaRun(builder->dfa, cur, next, byte);
|
||||
|
||||
t = taosArrayGet(builder->dfa->states, state);
|
||||
|
||||
uint32_t nxtState;
|
||||
if (dfaBuilderCachedState(builder, next, &nxtState)) {
|
||||
if (dfaBuilderCacheState(builder, next, &nxtState)) {
|
||||
t->next[byte] = nxtState;
|
||||
*result = nxtState;
|
||||
return true;
|
||||
|
@ -121,12 +120,13 @@ bool dfaBuilderRunState(FstDfaBuilder *builder, FstSparseSet *cur, FstSparseSet
|
|||
return false;
|
||||
}
|
||||
|
||||
bool dfaBuilderCachedState(FstDfaBuilder *builder, FstSparseSet *set, uint32_t *result) {
|
||||
bool dfaBuilderCacheState(FstDfaBuilder *builder, FstSparseSet *set, uint32_t *result) {
|
||||
SArray *tinsts = taosArrayInit(4, sizeof(uint32_t));
|
||||
bool isMatch = false;
|
||||
|
||||
for (int i = 0; i < sparSetLen(set); i++) {
|
||||
uint32_t ip = sparSetGet(set, i);
|
||||
int32_t ip;
|
||||
if (false == sparSetGet(set, i, &ip)) continue;
|
||||
|
||||
Inst *inst = taosArrayGet(builder->dfa->insts, ip);
|
||||
if (inst->ty == JUMP || inst->ty == SPLIT) {
|
||||
|
@ -186,7 +186,8 @@ void dfaAdd(FstDfa *dfa, FstSparseSet *set, uint32_t ip) {
|
|||
if (sparSetContains(set, ip)) {
|
||||
return;
|
||||
}
|
||||
sparSetAdd(set, ip);
|
||||
bool succ = sparSetAdd(set, ip, NULL);
|
||||
// assert(succ == true);
|
||||
Inst *inst = taosArrayGet(dfa->insts, ip);
|
||||
if (inst->ty == MATCH || inst->ty == RANGE) {
|
||||
// do nothing
|
||||
|
@ -203,7 +204,8 @@ bool dfaRun(FstDfa *dfa, FstSparseSet *from, FstSparseSet *to, uint8_t byte) {
|
|||
bool isMatch = false;
|
||||
sparSetClear(to);
|
||||
for (int i = 0; i < sparSetLen(from); i++) {
|
||||
uint32_t ip = sparSetGet(from, i);
|
||||
int32_t ip;
|
||||
if (false == sparSetGet(from, i, &ip)) continue;
|
||||
|
||||
Inst *inst = taosArrayGet(dfa->insts, ip);
|
||||
if (inst->ty == JUMP || inst->ty == SPLIT) {
|
||||
|
|
|
@ -22,15 +22,15 @@ FstRegex *regexCreate(const char *str) {
|
|||
if (regex == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
int32_t sz = (int32_t)strlen(str);
|
||||
char *orig = taosMemoryCalloc(1, sz);
|
||||
memcpy(orig, str, sz);
|
||||
|
||||
regex->orig = orig;
|
||||
regex->orig = tstrdup(str);
|
||||
|
||||
// construct insts based on str
|
||||
SArray *insts = NULL;
|
||||
|
||||
SArray *insts = taosArrayInit(256, sizeof(uint8_t));
|
||||
for (int i = 0; i < strlen(str); i++) {
|
||||
uint8_t v = str[i];
|
||||
taosArrayPush(insts, &v);
|
||||
}
|
||||
FstDfaBuilder *builder = dfaBuilderCreate(insts);
|
||||
regex->dfa = dfaBuilderBuild(builder);
|
||||
return regex;
|
||||
|
|
|
@ -15,14 +15,24 @@
|
|||
|
||||
#include "indexFstSparse.h"
|
||||
|
||||
static void sparSetUtil(int32_t *buf, int32_t cap) {
|
||||
for (int32_t i = 0; i < cap; i++) {
|
||||
buf[i] = -1;
|
||||
}
|
||||
}
|
||||
FstSparseSet *sparSetCreate(int32_t sz) {
|
||||
FstSparseSet *ss = taosMemoryCalloc(1, sizeof(FstSparseSet));
|
||||
if (ss == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ss->dense = (uint32_t *)taosMemoryCalloc(sz, sizeof(uint32_t));
|
||||
ss->sparse = (uint32_t *)taosMemoryCalloc(sz, sizeof(uint32_t));
|
||||
ss->dense = (int32_t *)taosMemoryMalloc(sz * sizeof(int32_t));
|
||||
ss->sparse = (int32_t *)taosMemoryMalloc(sz * sizeof(int32_t));
|
||||
sparSetUtil(ss->dense, sz);
|
||||
sparSetUtil(ss->sparse, sz);
|
||||
|
||||
ss->cap = sz;
|
||||
|
||||
ss->size = 0;
|
||||
return ss;
|
||||
}
|
||||
|
@ -38,23 +48,39 @@ uint32_t sparSetLen(FstSparseSet *ss) {
|
|||
// Get occupied size
|
||||
return ss == NULL ? 0 : ss->size;
|
||||
}
|
||||
uint32_t sparSetAdd(FstSparseSet *ss, uint32_t ip) {
|
||||
bool sparSetAdd(FstSparseSet *ss, int32_t ip, int32_t *idx) {
|
||||
if (ss == NULL) {
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
if (ip >= ss->cap || ip < 0) {
|
||||
return false;
|
||||
}
|
||||
uint32_t i = ss->size;
|
||||
ss->dense[i] = ip;
|
||||
ss->sparse[ip] = i;
|
||||
ss->size += 1;
|
||||
return i;
|
||||
|
||||
if (idx != NULL) *idx = i;
|
||||
|
||||
return true;
|
||||
}
|
||||
uint32_t sparSetGet(FstSparseSet *ss, uint32_t i) {
|
||||
// check later
|
||||
return ss->dense[i];
|
||||
bool sparSetGet(FstSparseSet *ss, int32_t idx, int32_t *ip) {
|
||||
if (idx >= ss->cap || idx >= ss->size || idx < 0) {
|
||||
return false;
|
||||
}
|
||||
int32_t val = ss->dense[idx];
|
||||
if (ip != NULL) {
|
||||
*ip = val;
|
||||
}
|
||||
return val == -1 ? false : true;
|
||||
}
|
||||
bool sparSetContains(FstSparseSet *ss, uint32_t ip) {
|
||||
uint32_t i = ss->sparse[ip];
|
||||
if (i < ss->size && ss->dense[i] == ip) {
|
||||
bool sparSetContains(FstSparseSet *ss, int32_t ip) {
|
||||
if (ip >= ss->cap || ip < 0) {
|
||||
return false;
|
||||
}
|
||||
int32_t i = ss->sparse[ip];
|
||||
|
||||
if (i >= 0 && i < ss->cap && i < ss->size && ss->dense[i] == ip) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
|
@ -64,5 +90,7 @@ void sparSetClear(FstSparseSet *ss) {
|
|||
if (ss == NULL) {
|
||||
return;
|
||||
}
|
||||
sparSetUtil(ss->dense, ss->cap);
|
||||
sparSetUtil(ss->sparse, ss->cap);
|
||||
ss->size = 0;
|
||||
}
|
||||
|
|
|
@ -51,20 +51,62 @@ class FstSparseSetEnv : public ::testing::Test {
|
|||
};
|
||||
|
||||
// test FstDfaBuilder
|
||||
TEST_F(FstUtilEnv, test1) {}
|
||||
TEST_F(FstUtilEnv, test2) {}
|
||||
TEST_F(FstUtilEnv, test3) {}
|
||||
TEST_F(FstUtilEnv, test4) {}
|
||||
TEST_F(FstUtilEnv, test1) {
|
||||
// test
|
||||
}
|
||||
TEST_F(FstUtilEnv, test2) {
|
||||
// test
|
||||
}
|
||||
TEST_F(FstUtilEnv, test3) {
|
||||
// test
|
||||
}
|
||||
TEST_F(FstUtilEnv, test4) {
|
||||
// test
|
||||
}
|
||||
|
||||
// test FstRegex
|
||||
|
||||
TEST_F(FstRegexEnv, test1) {}
|
||||
TEST_F(FstRegexEnv, test1) {
|
||||
//
|
||||
EXPECT_EQ(regex != NULL, true);
|
||||
}
|
||||
TEST_F(FstRegexEnv, test2) {}
|
||||
TEST_F(FstRegexEnv, test3) {}
|
||||
TEST_F(FstRegexEnv, test4) {}
|
||||
|
||||
// test FstSparseSet
|
||||
TEST_F(FstSparseSetEnv, test1) {}
|
||||
TEST_F(FstSparseSetEnv, test2) {}
|
||||
TEST_F(FstSparseSetEnv, test3) {}
|
||||
TEST_F(FstSparseSetEnv, test4) {}
|
||||
TEST_F(FstSparseSetEnv, test1) {
|
||||
for (int8_t i = 0; i < 20; i++) {
|
||||
int32_t val = -1;
|
||||
bool succ = sparSetAdd(set, 'a' + i, &val);
|
||||
}
|
||||
EXPECT_EQ(sparSetLen(set), 20);
|
||||
for (int8_t i = 0; i < 20; i++) {
|
||||
int val = -1;
|
||||
bool find = sparSetGet(set, i, &val);
|
||||
EXPECT_EQ(find, true);
|
||||
EXPECT_EQ(val, i + 'a');
|
||||
}
|
||||
for (int8_t i = 'a'; i < 'a' + 20; i++) {
|
||||
EXPECT_EQ(sparSetContains(set, i), true);
|
||||
}
|
||||
|
||||
for (int8_t i = 'A'; i < 20; i++) {
|
||||
EXPECT_EQ(sparSetContains(set, 'A'), false);
|
||||
}
|
||||
|
||||
for (int i = 512; i < 1000; i++) {
|
||||
EXPECT_EQ(sparSetAdd(set, i, NULL), false);
|
||||
|
||||
EXPECT_EQ(sparSetGet(set, i, NULL), false);
|
||||
EXPECT_EQ(sparSetContains(set, i), false);
|
||||
}
|
||||
sparSetClear(set);
|
||||
|
||||
for (int i = 'a'; i < 'a' + 20; i++) {
|
||||
EXPECT_EQ(sparSetGet(set, i, NULL), false);
|
||||
}
|
||||
for (int i = 1000; i < 2000; i++) {
|
||||
EXPECT_EQ(sparSetGet(set, i, NULL), false);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -105,6 +105,8 @@ SNode* nodesMakeNode(ENodeType type) {
|
|||
return makeNode(type, sizeof(SAlterDatabaseStmt));
|
||||
case QUERY_NODE_FLUSH_DATABASE_STMT:
|
||||
return makeNode(type, sizeof(SFlushDatabaseStmt));
|
||||
case QUERY_NODE_TRIM_DATABASE_STMT:
|
||||
return makeNode(type, sizeof(STrimDatabaseStmt));
|
||||
case QUERY_NODE_CREATE_TABLE_STMT:
|
||||
return makeNode(type, sizeof(SCreateTableStmt));
|
||||
case QUERY_NODE_CREATE_SUBTABLE_CLAUSE:
|
||||
|
@ -548,6 +550,7 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
nodesDestroyNode((SNode*)((SAlterDatabaseStmt*)pNode)->pOptions);
|
||||
break;
|
||||
case QUERY_NODE_FLUSH_DATABASE_STMT: // no pointer field
|
||||
case QUERY_NODE_TRIM_DATABASE_STMT: // no pointer field
|
||||
break;
|
||||
case QUERY_NODE_CREATE_TABLE_STMT: {
|
||||
SCreateTableStmt* pStmt = (SCreateTableStmt*)pNode;
|
||||
|
|
|
@ -137,6 +137,7 @@ SNode* createCreateDatabaseStmt(SAstCreateContext* pCxt, bool ignoreExists, STok
|
|||
SNode* createDropDatabaseStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pDbName);
|
||||
SNode* createAlterDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pOptions);
|
||||
SNode* createFlushDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName);
|
||||
SNode* createTrimDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName);
|
||||
SNode* createDefaultTableOptions(SAstCreateContext* pCxt);
|
||||
SNode* createAlterTableOptions(SAstCreateContext* pCxt);
|
||||
SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType type, void* pVal);
|
||||
|
|
|
@ -158,6 +158,7 @@ cmd ::= DROP DATABASE exists_opt(A) db_name(B).
|
|||
cmd ::= USE db_name(A). { pCxt->pRootNode = createUseDatabaseStmt(pCxt, &A); }
|
||||
cmd ::= ALTER DATABASE db_name(A) alter_db_options(B). { pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &A, B); }
|
||||
cmd ::= FLUSH DATABASE db_name(A). { pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &A); }
|
||||
cmd ::= TRIM DATABASE db_name(A). { pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &A); }
|
||||
|
||||
%type not_exists_opt { bool }
|
||||
%destructor not_exists_opt { }
|
||||
|
@ -934,7 +935,11 @@ query_expression_body(A) ::=
|
|||
query_primary(A) ::= query_specification(B). { A = B; }
|
||||
query_primary(A) ::=
|
||||
NK_LP query_expression_body(B)
|
||||
order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP. { A = B; }
|
||||
order_by_clause_opt(C) slimit_clause_opt(D) limit_clause_opt(E) NK_RP. {
|
||||
A = addOrderByClause(pCxt, B, C);
|
||||
A = addSlimitClause(pCxt, A, D);
|
||||
A = addLimitClause(pCxt, A, E);
|
||||
}
|
||||
|
||||
%type order_by_clause_opt { SNodeList* }
|
||||
%destructor order_by_clause_opt { nodesDestroyList($$); }
|
||||
|
|
|
@ -665,6 +665,9 @@ SNode* addHavingClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pHaving) {
|
|||
|
||||
SNode* addOrderByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pOrderByList) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
if (NULL == pOrderByList) {
|
||||
return pStmt;
|
||||
}
|
||||
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
|
||||
((SSelectStmt*)pStmt)->pOrderByList = pOrderByList;
|
||||
} else {
|
||||
|
@ -675,6 +678,9 @@ SNode* addOrderByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pOrder
|
|||
|
||||
SNode* addSlimitClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pSlimit) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
if (NULL == pSlimit) {
|
||||
return pStmt;
|
||||
}
|
||||
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
|
||||
((SSelectStmt*)pStmt)->pSlimit = (SLimitNode*)pSlimit;
|
||||
}
|
||||
|
@ -683,6 +689,9 @@ SNode* addSlimitClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pSlimit) {
|
|||
|
||||
SNode* addLimitClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pLimit) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
if (NULL == pLimit) {
|
||||
return pStmt;
|
||||
}
|
||||
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
|
||||
((SSelectStmt*)pStmt)->pLimit = (SLimitNode*)pLimit;
|
||||
} else {
|
||||
|
@ -922,7 +931,18 @@ SNode* createFlushDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName) {
|
|||
if (!checkDbName(pCxt, pDbName, false)) {
|
||||
return NULL;
|
||||
}
|
||||
SAlterDatabaseStmt* pStmt = (SAlterDatabaseStmt*)nodesMakeNode(QUERY_NODE_FLUSH_DATABASE_STMT);
|
||||
SFlushDatabaseStmt* pStmt = (SFlushDatabaseStmt*)nodesMakeNode(QUERY_NODE_FLUSH_DATABASE_STMT);
|
||||
CHECK_OUT_OF_MEM(pStmt);
|
||||
COPY_STRING_FORM_ID_TOKEN(pStmt->dbName, pDbName);
|
||||
return (SNode*)pStmt;
|
||||
}
|
||||
|
||||
SNode* createTrimDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
if (!checkDbName(pCxt, pDbName, false)) {
|
||||
return NULL;
|
||||
}
|
||||
STrimDatabaseStmt* pStmt = (STrimDatabaseStmt*)nodesMakeNode(QUERY_NODE_TRIM_DATABASE_STMT);
|
||||
CHECK_OUT_OF_MEM(pStmt);
|
||||
COPY_STRING_FORM_ID_TOKEN(pStmt->dbName, pDbName);
|
||||
return (SNode*)pStmt;
|
||||
|
|
|
@ -216,6 +216,7 @@ static SKeyword keywordTable[] = {
|
|||
{"TRANSACTION", TK_TRANSACTION},
|
||||
{"TRANSACTIONS", TK_TRANSACTIONS},
|
||||
{"TRIGGER", TK_TRIGGER},
|
||||
{"TRIM", TK_TRIM},
|
||||
{"TSERIES", TK_TSERIES},
|
||||
{"TTL", TK_TTL},
|
||||
{"UNION", TK_UNION},
|
||||
|
|
|
@ -1189,7 +1189,7 @@ static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) {
|
|||
if (fmIsSelectFunc(pFunc->funcId)) {
|
||||
pSelect->hasSelectFunc = true;
|
||||
++(pSelect->selectFuncNum);
|
||||
} else if (fmIsAggFunc(pFunc->funcId) || fmIsIndefiniteRowsFunc(pFunc->funcId)) {
|
||||
} else if (fmIsVectorFunc(pFunc->funcId)) {
|
||||
pSelect->hasOtherVectorFunc = true;
|
||||
}
|
||||
pSelect->hasUniqueFunc = pSelect->hasUniqueFunc ? true : (FUNCTION_TYPE_UNIQUE == pFunc->funcType);
|
||||
|
@ -1514,18 +1514,11 @@ static int32_t rewriteColsToSelectValFunc(STranslateContext* pCxt, SSelectStmt*
|
|||
typedef struct CheckAggColCoexistCxt {
|
||||
STranslateContext* pTranslateCxt;
|
||||
bool existCol;
|
||||
int32_t selectFuncNum;
|
||||
bool existOtherVectorFunc;
|
||||
} CheckAggColCoexistCxt;
|
||||
|
||||
static EDealRes doCheckAggColCoexist(SNode** pNode, void* pContext) {
|
||||
CheckAggColCoexistCxt* pCxt = (CheckAggColCoexistCxt*)pContext;
|
||||
if (isVectorFunc(*pNode)) {
|
||||
if (isSelectFunc(*pNode)) {
|
||||
++(pCxt->selectFuncNum);
|
||||
} else {
|
||||
pCxt->existOtherVectorFunc = true;
|
||||
}
|
||||
return DEAL_RES_IGNORE_CHILD;
|
||||
}
|
||||
SNode* pPartKey = NULL;
|
||||
|
@ -1542,16 +1535,15 @@ static EDealRes doCheckAggColCoexist(SNode** pNode, void* pContext) {
|
|||
|
||||
static int32_t checkAggColCoexist(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||
if (NULL != pSelect->pGroupByList || NULL != pSelect->pWindow ||
|
||||
(!pSelect->hasAggFuncs && !pSelect->hasIndefiniteRowsFunc)) {
|
||||
(!pSelect->hasAggFuncs && !pSelect->hasIndefiniteRowsFunc && !pSelect->hasInterpFunc)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
CheckAggColCoexistCxt cxt = {
|
||||
.pTranslateCxt = pCxt, .existCol = false, .selectFuncNum = 0, .existOtherVectorFunc = false};
|
||||
CheckAggColCoexistCxt cxt = {.pTranslateCxt = pCxt, .existCol = false};
|
||||
nodesRewriteExprs(pSelect->pProjectionList, doCheckAggColCoexist, &cxt);
|
||||
if (!pSelect->isDistinct) {
|
||||
nodesRewriteExprs(pSelect->pOrderByList, doCheckAggColCoexist, &cxt);
|
||||
}
|
||||
if (1 == cxt.selectFuncNum && !cxt.existOtherVectorFunc) {
|
||||
if (1 == pSelect->selectFuncNum && !pSelect->hasOtherVectorFunc) {
|
||||
return rewriteColsToSelectValFunc(pCxt, pSelect);
|
||||
}
|
||||
if (cxt.existCol) {
|
||||
|
@ -3152,6 +3144,14 @@ static int32_t translateAlterDatabase(STranslateContext* pCxt, SAlterDatabaseStm
|
|||
return buildCmdMsg(pCxt, TDMT_MND_ALTER_DB, (FSerializeFunc)tSerializeSAlterDbReq, &alterReq);
|
||||
}
|
||||
|
||||
static int32_t translateTrimDatabase(STranslateContext* pCxt, STrimDatabaseStmt* pStmt) {
|
||||
STrimDbReq req = {0};
|
||||
SName name = {0};
|
||||
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName));
|
||||
tNameGetFullDbName(&name, req.db);
|
||||
return buildCmdMsg(pCxt, TDMT_MND_TRIM_DB, (FSerializeFunc)tSerializeSTrimDbReq, &req);
|
||||
}
|
||||
|
||||
static int32_t columnDefNodeToField(SNodeList* pList, SArray** pArray) {
|
||||
*pArray = taosArrayInit(LIST_LENGTH(pList), sizeof(SField));
|
||||
SNode* pNode;
|
||||
|
@ -4584,6 +4584,9 @@ static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) {
|
|||
case QUERY_NODE_ALTER_DATABASE_STMT:
|
||||
code = translateAlterDatabase(pCxt, (SAlterDatabaseStmt*)pNode);
|
||||
break;
|
||||
case QUERY_NODE_TRIM_DATABASE_STMT:
|
||||
code = translateTrimDatabase(pCxt, (STrimDatabaseStmt*)pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_TABLE_STMT:
|
||||
code = translateCreateSuperTable(pCxt, (SCreateTableStmt*)pNode);
|
||||
break;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -245,13 +245,21 @@ TEST_F(ParserSelectTest, orderBy) {
|
|||
TEST_F(ParserSelectTest, distinct) {
|
||||
useDb("root", "test");
|
||||
|
||||
// run("SELECT distinct c1, c2 FROM t1 WHERE c1 > 0 order by c1");
|
||||
run("SELECT distinct c1, c2 FROM t1 WHERE c1 > 0 order by c1");
|
||||
|
||||
// run("SELECT distinct c1 + 10, c2 FROM t1 WHERE c1 > 0 order by c1 + 10, c2");
|
||||
run("SELECT distinct c1 + 10, c2 FROM t1 WHERE c1 > 0 order by c1 + 10, c2");
|
||||
|
||||
// run("SELECT distinct c1 + 10 cc1, c2 cc2 FROM t1 WHERE c1 > 0 order by cc1, c2");
|
||||
run("SELECT distinct c1 + 10 cc1, c2 cc2 FROM t1 WHERE c1 > 0 order by cc1, c2");
|
||||
|
||||
// run("SELECT distinct COUNT(c2) FROM t1 WHERE c1 > 0 GROUP BY c1 order by COUNT(c2)");
|
||||
run("SELECT distinct COUNT(c2) FROM t1 WHERE c1 > 0 GROUP BY c1 order by COUNT(c2)");
|
||||
}
|
||||
|
||||
TEST_F(ParserSelectTest, limit) {
|
||||
useDb("root", "test");
|
||||
|
||||
run("SELECT c1, c2 FROM t1 LIMIT 10");
|
||||
|
||||
run("(SELECT c1, c2 FROM t1 LIMIT 10)");
|
||||
}
|
||||
|
||||
// INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [FILL(fill_mod_and_val)]
|
||||
|
|
|
@ -222,6 +222,25 @@ TEST_F(ParserShowToUseTest, splitVgroup) {
|
|||
run("SPLIT VGROUP 15");
|
||||
}
|
||||
|
||||
TEST_F(ParserShowToUseTest, trimDatabase) {
|
||||
useDb("root", "test");
|
||||
|
||||
STrimDbReq expect = {0};
|
||||
|
||||
auto setTrimDbReq = [&](const char* pDb) { snprintf(expect.db, sizeof(expect.db), "0.%s", pDb); };
|
||||
|
||||
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
|
||||
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_TRIM_DATABASE_STMT);
|
||||
ASSERT_EQ(pQuery->pCmdMsg->msgType, TDMT_MND_TRIM_DB);
|
||||
STrimDbReq req = {0};
|
||||
ASSERT_EQ(tDeserializeSTrimDbReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req), TSDB_CODE_SUCCESS);
|
||||
ASSERT_EQ(std::string(req.db), std::string(expect.db));
|
||||
});
|
||||
|
||||
setTrimDbReq("wxy_db");
|
||||
run("TRIM DATABASE wxy_db");
|
||||
}
|
||||
|
||||
TEST_F(ParserShowToUseTest, useDatabase) {
|
||||
useDb("root", "test");
|
||||
|
||||
|
|
|
@ -316,34 +316,34 @@ typedef struct SQWorkerMgmt {
|
|||
#define QW_LOCK(type, _lock) \
|
||||
do { \
|
||||
if (QW_READ == (type)) { \
|
||||
assert(atomic_load_32((_lock)) >= 0); \
|
||||
QW_LOCK_DEBUG("QW RLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) >= 0); \
|
||||
QW_LOCK_DEBUG("QW RLOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
taosRLockLatch(_lock); \
|
||||
QW_LOCK_DEBUG("QW RLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_32((_lock)) > 0); \
|
||||
QW_LOCK_DEBUG("QW RLOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) > 0); \
|
||||
} else { \
|
||||
assert(atomic_load_32((_lock)) >= 0); \
|
||||
QW_LOCK_DEBUG("QW WLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) >= 0); \
|
||||
QW_LOCK_DEBUG("QW WLOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
taosWLockLatch(_lock); \
|
||||
QW_LOCK_DEBUG("QW WLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_32((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||
QW_LOCK_DEBUG("QW WLOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define QW_UNLOCK(type, _lock) \
|
||||
do { \
|
||||
if (QW_READ == (type)) { \
|
||||
assert(atomic_load_32((_lock)) > 0); \
|
||||
QW_LOCK_DEBUG("QW RULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) > 0); \
|
||||
QW_LOCK_DEBUG("QW RULOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
taosRUnLockLatch(_lock); \
|
||||
QW_LOCK_DEBUG("QW RULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_32((_lock)) >= 0); \
|
||||
QW_LOCK_DEBUG("QW RULOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) >= 0); \
|
||||
} else { \
|
||||
assert(atomic_load_32((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||
QW_LOCK_DEBUG("QW WULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||
QW_LOCK_DEBUG("QW WULOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
taosWUnLockLatch(_lock); \
|
||||
QW_LOCK_DEBUG("QW WULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_32((_lock)) >= 0); \
|
||||
QW_LOCK_DEBUG("QW WULOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) >= 0); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -1041,7 +1041,7 @@ int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qW
|
|||
|
||||
*qWorkerMgmt = mgmt;
|
||||
|
||||
qDebug("qworker initialized for node, type:%d, id:%d, handle:%p", mgmt->nodeType, mgmt->nodeId, mgmt);
|
||||
qDebug("qworker initialized, type:%d, id:%d, handle:%p", mgmt->nodeType, mgmt->nodeId, mgmt);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
|
|
|
@ -3623,7 +3623,8 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) {
|
|||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
||||
if (FILTER_GET_FLAG(stat->info->options, FLT_OPTION_TIMESTAMP) && node->opType >= OP_TYPE_NOT_EQUAL) {
|
||||
if (FILTER_GET_FLAG(stat->info->options, FLT_OPTION_TIMESTAMP) &&
|
||||
(node->opType >= OP_TYPE_NOT_EQUAL) && (node->opType != OP_TYPE_IS_NULL && node->opType != OP_TYPE_IS_NOT_NULL)) {
|
||||
stat->scalarMode = true;
|
||||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
|
|
@ -54,6 +54,11 @@ typedef enum {
|
|||
SCH_OP_GET_STATUS,
|
||||
} SCH_OP_TYPE;
|
||||
|
||||
typedef struct SSchDebug {
|
||||
bool lockEnable;
|
||||
bool apiEnable;
|
||||
} SSchDebug;
|
||||
|
||||
typedef struct SSchTrans {
|
||||
void *pTrans;
|
||||
void *pHandle;
|
||||
|
@ -186,7 +191,7 @@ typedef struct SSchTaskProfile {
|
|||
|
||||
typedef struct SSchTask {
|
||||
uint64_t taskId; // task id
|
||||
SRWLatch lock; // task lock
|
||||
SRWLatch lock; // task reentrant lock
|
||||
int32_t maxExecTimes; // task may exec times
|
||||
int32_t execId; // task current execute try index
|
||||
SSchLevel *level; // level
|
||||
|
@ -356,8 +361,41 @@ extern SSchedulerMgmt schMgmt;
|
|||
#define SCH_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { SCH_SET_ERRNO(_code); } return _code; } while (0)
|
||||
#define SCH_ERR_JRET(c) do { code = c; if (code != TSDB_CODE_SUCCESS) { SCH_SET_ERRNO(code); goto _return; } } while (0)
|
||||
|
||||
#define SCH_LOCK(type, _lock) (SCH_READ == (type) ? taosRLockLatch(_lock) : taosWLockLatch(_lock))
|
||||
#define SCH_UNLOCK(type, _lock) (SCH_READ == (type) ? taosRUnLockLatch(_lock) : taosWUnLockLatch(_lock))
|
||||
#define SCH_LOCK_DEBUG(...) do { if (gSCHDebug.lockEnable) { qDebug(__VA_ARGS__); } } while (0)
|
||||
|
||||
#define TD_RWLATCH_WRITE_FLAG_COPY 0x40000000
|
||||
|
||||
#define SCH_LOCK(type, _lock) do { \
|
||||
if (SCH_READ == (type)) { \
|
||||
assert(atomic_load_64(_lock) >= 0); \
|
||||
SCH_LOCK_DEBUG("SCH RLOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
taosRLockLatch(_lock); \
|
||||
SCH_LOCK_DEBUG("SCH RLOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64(_lock) > 0); \
|
||||
} else { \
|
||||
assert(atomic_load_64(_lock) >= 0); \
|
||||
SCH_LOCK_DEBUG("SCH WLOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
taosWLockLatch(_lock); \
|
||||
SCH_LOCK_DEBUG("SCH WLOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64(_lock) & TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define SCH_UNLOCK(type, _lock) do { \
|
||||
if (SCH_READ == (type)) { \
|
||||
assert(atomic_load_64((_lock)) > 0); \
|
||||
SCH_LOCK_DEBUG("SCH RULOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
taosRUnLockLatch(_lock); \
|
||||
SCH_LOCK_DEBUG("SCH RULOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) >= 0); \
|
||||
} else { \
|
||||
assert(atomic_load_64((_lock)) & TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||
SCH_LOCK_DEBUG("SCH WULOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
taosWUnLockLatch(_lock); \
|
||||
SCH_LOCK_DEBUG("SCH WULOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
||||
assert(atomic_load_64((_lock)) >= 0); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
void schDeregisterTaskHb(SSchJob *pJob, SSchTask *pTask);
|
||||
|
@ -435,6 +473,8 @@ int32_t schGetTaskFromList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTas
|
|||
int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel, int32_t levelNum);
|
||||
int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask);
|
||||
|
||||
extern SSchDebug gSCHDebug;
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "schInt.h"
|
||||
|
||||
tsem_t schdRspSem;
|
||||
SSchDebug gSCHDebug = {0};
|
||||
|
||||
void schdExecCallback(SExecResult* pResult, void* param, int32_t code) {
|
||||
if (code) {
|
||||
|
|
|
@ -337,14 +337,14 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) {
|
|||
SCH_SET_JOB_TYPE(pJob, plan->subplanType);
|
||||
|
||||
SSchTask task = {0};
|
||||
SCH_ERR_JRET(schInitTask(pJob, &task, plan, pLevel, levelNum));
|
||||
|
||||
SSchTask *pTask = taosArrayPush(pLevel->subTasks, &task);
|
||||
if (NULL == pTask) {
|
||||
SCH_TASK_ELOG("taosArrayPush task to level failed, level:%d, taskIdx:%d", pLevel->level, n);
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
SCH_ERR_JRET(schInitTask(pJob, pTask, plan, pLevel, levelNum));
|
||||
|
||||
SCH_ERR_JRET(schAppendJobDataSrc(pJob, pTask));
|
||||
|
||||
if (0 != taosHashPut(planToTask, &plan, POINTER_BYTES, &pTask, POINTER_BYTES)) {
|
||||
|
@ -543,9 +543,12 @@ int32_t schLaunchJobLowerLevel(SSchJob *pJob, SSchTask *pTask) {
|
|||
|
||||
int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp) {
|
||||
if (rsp->tbFName[0]) {
|
||||
SCH_LOCK(SCH_WRITE, &pJob->resLock);
|
||||
|
||||
if (NULL == pJob->execRes.res) {
|
||||
pJob->execRes.res = taosArrayInit(pJob->taskNum, sizeof(STbVerInfo));
|
||||
if (NULL == pJob->execRes.res) {
|
||||
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
|
||||
SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
|
@ -557,6 +560,8 @@ int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp) {
|
|||
|
||||
taosArrayPush((SArray *)pJob->execRes.res, &tbInfo);
|
||||
pJob->execRes.msgType = TDMT_SCH_QUERY;
|
||||
|
||||
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -60,6 +60,7 @@ int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *
|
|||
if (NULL == pTask->execNodes || NULL == pTask->profile.execTime) {
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
taosInitReentrantRWLatch(&pTask->lock);
|
||||
|
||||
SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_INIT);
|
||||
|
||||
|
@ -263,7 +264,7 @@ int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) {
|
|||
SSchTask *parent = *(SSchTask **)taosArrayGet(pTask->parents, i);
|
||||
int32_t readyNum = atomic_add_fetch_32(&parent->childReady, 1);
|
||||
|
||||
SCH_LOCK(SCH_WRITE, &parent->lock);
|
||||
SCH_LOCK_TASK(parent);
|
||||
SDownstreamSourceNode source = {.type = QUERY_NODE_DOWNSTREAM_SOURCE,
|
||||
.taskId = pTask->taskId,
|
||||
.schedId = schMgmt.sId,
|
||||
|
@ -272,7 +273,7 @@ int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) {
|
|||
.fetchMsgType = SCH_FETCH_TYPE(pTask),
|
||||
};
|
||||
qSetSubplanExecutionNode(parent->plan, pTask->plan->id.groupId, &source);
|
||||
SCH_UNLOCK(SCH_WRITE, &parent->lock);
|
||||
SCH_UNLOCK_TASK(parent);
|
||||
|
||||
if (SCH_TASK_READY_FOR_LAUNCH(readyNum, parent)) {
|
||||
SCH_TASK_DLOG("all %d children task done, start to launch parent task 0x%" PRIx64, readyNum, parent->taskId);
|
||||
|
|
|
@ -30,19 +30,24 @@ SWalReader *walOpenReader(SWal *pWal, SWalFilterCond *cond) {
|
|||
pRead->pWal = pWal;
|
||||
pRead->pIdxFile = NULL;
|
||||
pRead->pLogFile = NULL;
|
||||
pRead->curVersion = -5;
|
||||
pRead->curVersion = -1;
|
||||
pRead->curFileFirstVer = -1;
|
||||
pRead->curInvalid = 1;
|
||||
pRead->capacity = 0;
|
||||
if (cond)
|
||||
if (cond) {
|
||||
pRead->cond = *cond;
|
||||
else {
|
||||
} else {
|
||||
pRead->cond.scanMeta = 0;
|
||||
pRead->cond.scanUncommited = 0;
|
||||
pRead->cond.enableRef = 0;
|
||||
}
|
||||
|
||||
taosThreadMutexInit(&pRead->mutex, NULL);
|
||||
|
||||
/*if (pRead->cond.enableRef) {*/
|
||||
/*walOpenRef(pWal);*/
|
||||
/*}*/
|
||||
|
||||
pRead->pHead = taosMemoryMalloc(sizeof(SWalCkHead));
|
||||
if (pRead->pHead == NULL) {
|
||||
terrno = TSDB_CODE_WAL_OUT_OF_MEMORY;
|
||||
|
@ -151,24 +156,8 @@ static int32_t walReadChangeFile(SWalReader *pRead, int64_t fileFirstVer) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t walReadSeekVer(SWalReader *pRead, int64_t ver) {
|
||||
int32_t walReadSeekVerImpl(SWalReader *pRead, int64_t ver) {
|
||||
SWal *pWal = pRead->pWal;
|
||||
if (!pRead->curInvalid && ver == pRead->curVersion) {
|
||||
wDebug("wal version %ld match, no need to reset", ver);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pRead->curInvalid = 1;
|
||||
pRead->curVersion = ver;
|
||||
|
||||
if (ver > pWal->vers.lastVer || ver < pWal->vers.firstVer) {
|
||||
wDebug("vgId:%d, invalid index:%" PRId64 ", first index:%" PRId64 ", last index:%" PRId64, pRead->pWal->cfg.vgId,
|
||||
ver, pWal->vers.firstVer, pWal->vers.lastVer);
|
||||
terrno = TSDB_CODE_WAL_LOG_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
if (ver < pWal->vers.snapshotVer) {
|
||||
}
|
||||
|
||||
SWalFileInfo tmpInfo;
|
||||
tmpInfo.firstVer = ver;
|
||||
|
@ -190,6 +179,31 @@ int32_t walReadSeekVer(SWalReader *pRead, int64_t ver) {
|
|||
wDebug("wal version reset from %ld to %ld", pRead->curVersion, ver);
|
||||
|
||||
pRead->curVersion = ver;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t walReadSeekVer(SWalReader *pRead, int64_t ver) {
|
||||
SWal *pWal = pRead->pWal;
|
||||
if (!pRead->curInvalid && ver == pRead->curVersion) {
|
||||
wDebug("wal version %ld match, no need to reset", ver);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pRead->curInvalid = 1;
|
||||
pRead->curVersion = ver;
|
||||
|
||||
if (ver > pWal->vers.lastVer || ver < pWal->vers.firstVer) {
|
||||
wDebug("vgId:%d, invalid index:%" PRId64 ", first index:%" PRId64 ", last index:%" PRId64, pRead->pWal->cfg.vgId,
|
||||
ver, pWal->vers.firstVer, pWal->vers.lastVer);
|
||||
terrno = TSDB_CODE_WAL_LOG_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
if (ver < pWal->vers.snapshotVer) {
|
||||
}
|
||||
|
||||
if (walReadSeekVerImpl(pRead, ver) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -198,6 +212,8 @@ void walSetReaderCapacity(SWalReader *pRead, int32_t capacity) { pRead->capacity
|
|||
|
||||
static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) {
|
||||
int64_t contLen;
|
||||
bool seeked = false;
|
||||
|
||||
if (pRead->curInvalid || pRead->curVersion != fetchVer) {
|
||||
if (walReadSeekVer(pRead, fetchVer) < 0) {
|
||||
ASSERT(0);
|
||||
|
@ -205,17 +221,26 @@ static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) {
|
|||
pRead->curInvalid = 1;
|
||||
return -1;
|
||||
}
|
||||
seeked = true;
|
||||
}
|
||||
contLen = taosReadFile(pRead->pLogFile, pRead->pHead, sizeof(SWalCkHead));
|
||||
if (contLen != sizeof(SWalCkHead)) {
|
||||
if (contLen < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
while (1) {
|
||||
contLen = taosReadFile(pRead->pLogFile, pRead->pHead, sizeof(SWalCkHead));
|
||||
if (contLen == sizeof(SWalCkHead)) {
|
||||
break;
|
||||
} else if (contLen == 0 && !seeked) {
|
||||
walReadSeekVerImpl(pRead, fetchVer);
|
||||
seeked = true;
|
||||
continue;
|
||||
} else {
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
if (contLen < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
} else {
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
}
|
||||
ASSERT(0);
|
||||
pRead->curInvalid = 1;
|
||||
return -1;
|
||||
}
|
||||
ASSERT(0);
|
||||
pRead->curInvalid = 1;
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -379,20 +404,14 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) {
|
|||
}
|
||||
|
||||
int32_t walReadVer(SWalReader *pRead, int64_t ver) {
|
||||
int64_t code;
|
||||
int64_t contLen;
|
||||
bool seeked = false;
|
||||
|
||||
if (pRead->pWal->vers.firstVer == -1) {
|
||||
terrno = TSDB_CODE_WAL_LOG_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pRead->curInvalid || pRead->curVersion != ver) {
|
||||
if (walReadSeekVer(pRead, ver) < 0) {
|
||||
wError("vgId:%d, unexpected wal log index:%" PRId64 ", since %s", pRead->pWal->cfg.vgId, ver, terrstr());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (ver > pRead->pWal->vers.lastVer || ver < pRead->pWal->vers.firstVer) {
|
||||
wError("vgId:%d, invalid index:%" PRId64 ", first index:%" PRId64 ", last index:%" PRId64, pRead->pWal->cfg.vgId,
|
||||
ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.lastVer);
|
||||
|
@ -400,21 +419,35 @@ int32_t walReadVer(SWalReader *pRead, int64_t ver) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
ASSERT(taosValidFile(pRead->pLogFile) == true);
|
||||
|
||||
code = taosReadFile(pRead->pLogFile, pRead->pHead, sizeof(SWalCkHead));
|
||||
if (code != sizeof(SWalCkHead)) {
|
||||
if (code < 0)
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
else {
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
ASSERT(0);
|
||||
if (pRead->curInvalid || pRead->curVersion != ver) {
|
||||
if (walReadSeekVer(pRead, ver) < 0) {
|
||||
wError("vgId:%d, unexpected wal log index:%" PRId64 ", since %s", pRead->pWal->cfg.vgId, ver, terrstr());
|
||||
return -1;
|
||||
}
|
||||
return -1;
|
||||
seeked = true;
|
||||
}
|
||||
|
||||
code = walValidHeadCksum(pRead->pHead);
|
||||
if (code != 0) {
|
||||
while (1) {
|
||||
contLen = taosReadFile(pRead->pLogFile, pRead->pHead, sizeof(SWalCkHead));
|
||||
if (contLen == sizeof(SWalCkHead)) {
|
||||
break;
|
||||
} else if (contLen == 0 && !seeked) {
|
||||
walReadSeekVerImpl(pRead, ver);
|
||||
seeked = true;
|
||||
continue;
|
||||
} else {
|
||||
if (contLen < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
} else {
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
}
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
contLen = walValidHeadCksum(pRead->pHead);
|
||||
if (contLen != 0) {
|
||||
wError("vgId:%d, unexpected wal log index:%" PRId64 ", since head checksum not passed", pRead->pWal->cfg.vgId, ver);
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
return -1;
|
||||
|
@ -430,9 +463,9 @@ int32_t walReadVer(SWalReader *pRead, int64_t ver) {
|
|||
pRead->capacity = pRead->pHead->head.bodyLen;
|
||||
}
|
||||
|
||||
if ((code = taosReadFile(pRead->pLogFile, pRead->pHead->head.body, pRead->pHead->head.bodyLen)) !=
|
||||
if ((contLen = taosReadFile(pRead->pLogFile, pRead->pHead->head.body, pRead->pHead->head.bodyLen)) !=
|
||||
pRead->pHead->head.bodyLen) {
|
||||
if (code < 0)
|
||||
if (contLen < 0)
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
else {
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
|
@ -449,8 +482,8 @@ int32_t walReadVer(SWalReader *pRead, int64_t ver) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
code = walValidBodyCksum(pRead->pHead);
|
||||
if (code != 0) {
|
||||
contLen = walValidBodyCksum(pRead->pHead);
|
||||
if (contLen != 0) {
|
||||
wError("vgId:%d, unexpected wal log index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId, ver);
|
||||
pRead->curInvalid = 1;
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
|
|
|
@ -17,8 +17,10 @@
|
|||
#include "tlockfree.h"
|
||||
|
||||
#define TD_RWLATCH_WRITE_FLAG 0x40000000
|
||||
#define TD_RWLATCH_REENTRANT_FLAG 0x4000000000000000
|
||||
|
||||
void taosInitRWLatch(SRWLatch *pLatch) { *pLatch = 0; }
|
||||
void taosInitReentrantRWLatch(SRWLatch *pLatch) { *pLatch = TD_RWLATCH_REENTRANT_FLAG; }
|
||||
|
||||
void taosWLockLatch(SRWLatch *pLatch) {
|
||||
SRWLatch oLatch, nLatch;
|
||||
|
@ -26,8 +28,14 @@ void taosWLockLatch(SRWLatch *pLatch) {
|
|||
|
||||
// Set write flag
|
||||
while (1) {
|
||||
oLatch = atomic_load_32(pLatch);
|
||||
oLatch = atomic_load_64(pLatch);
|
||||
if (oLatch & TD_RWLATCH_WRITE_FLAG) {
|
||||
if (oLatch & TD_RWLATCH_REENTRANT_FLAG) {
|
||||
nLatch = (((oLatch >> 32) + 1) << 32) | (oLatch & 0xFFFFFFFF);
|
||||
if (atomic_val_compare_exchange_64(pLatch, oLatch, nLatch) == oLatch) break;
|
||||
|
||||
continue;
|
||||
}
|
||||
nLoops++;
|
||||
if (nLoops > 1000) {
|
||||
sched_yield();
|
||||
|
@ -37,14 +45,14 @@ void taosWLockLatch(SRWLatch *pLatch) {
|
|||
}
|
||||
|
||||
nLatch = oLatch | TD_RWLATCH_WRITE_FLAG;
|
||||
if (atomic_val_compare_exchange_32(pLatch, oLatch, nLatch) == oLatch) break;
|
||||
if (atomic_val_compare_exchange_64(pLatch, oLatch, nLatch) == oLatch) break;
|
||||
}
|
||||
|
||||
// wait for all reads end
|
||||
nLoops = 0;
|
||||
while (1) {
|
||||
oLatch = atomic_load_32(pLatch);
|
||||
if (oLatch == TD_RWLATCH_WRITE_FLAG) break;
|
||||
oLatch = atomic_load_64(pLatch);
|
||||
if (0 == (oLatch & 0xFFFFFFF)) break;
|
||||
nLoops++;
|
||||
if (nLoops > 1000) {
|
||||
sched_yield();
|
||||
|
@ -53,29 +61,50 @@ void taosWLockLatch(SRWLatch *pLatch) {
|
|||
}
|
||||
}
|
||||
|
||||
// no reentrant
|
||||
int32_t taosWTryLockLatch(SRWLatch *pLatch) {
|
||||
SRWLatch oLatch, nLatch;
|
||||
oLatch = atomic_load_32(pLatch);
|
||||
if (oLatch) {
|
||||
oLatch = atomic_load_64(pLatch);
|
||||
if (oLatch << 2) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
nLatch = oLatch | TD_RWLATCH_WRITE_FLAG;
|
||||
if (atomic_val_compare_exchange_32(pLatch, oLatch, nLatch) == oLatch) {
|
||||
if (atomic_val_compare_exchange_64(pLatch, oLatch, nLatch) == oLatch) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
void taosWUnLockLatch(SRWLatch *pLatch) { atomic_store_32(pLatch, 0); }
|
||||
void taosWUnLockLatch(SRWLatch *pLatch) {
|
||||
SRWLatch oLatch, nLatch, wLatch;
|
||||
|
||||
while (1) {
|
||||
oLatch = atomic_load_64(pLatch);
|
||||
|
||||
if (0 == (oLatch & TD_RWLATCH_REENTRANT_FLAG)) {
|
||||
atomic_store_64(pLatch, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
wLatch = ((oLatch << 2) >> 34);
|
||||
if (wLatch) {
|
||||
nLatch = ((--wLatch) << 32) | TD_RWLATCH_REENTRANT_FLAG | TD_RWLATCH_WRITE_FLAG;
|
||||
} else {
|
||||
nLatch = TD_RWLATCH_REENTRANT_FLAG;
|
||||
}
|
||||
|
||||
if (atomic_val_compare_exchange_64(pLatch, oLatch, nLatch) == oLatch) break;
|
||||
}
|
||||
}
|
||||
|
||||
void taosRLockLatch(SRWLatch *pLatch) {
|
||||
SRWLatch oLatch, nLatch;
|
||||
int32_t nLoops = 0;
|
||||
|
||||
while (1) {
|
||||
oLatch = atomic_load_32(pLatch);
|
||||
oLatch = atomic_load_64(pLatch);
|
||||
if (oLatch & TD_RWLATCH_WRITE_FLAG) {
|
||||
nLoops++;
|
||||
if (nLoops > 1000) {
|
||||
|
@ -86,8 +115,8 @@ void taosRLockLatch(SRWLatch *pLatch) {
|
|||
}
|
||||
|
||||
nLatch = oLatch + 1;
|
||||
if (atomic_val_compare_exchange_32(pLatch, oLatch, nLatch) == oLatch) break;
|
||||
if (atomic_val_compare_exchange_64(pLatch, oLatch, nLatch) == oLatch) break;
|
||||
}
|
||||
}
|
||||
|
||||
void taosRUnLockLatch(SRWLatch *pLatch) { atomic_fetch_sub_32(pLatch, 1); }
|
||||
void taosRUnLockLatch(SRWLatch *pLatch) { atomic_fetch_sub_64(pLatch, 1); }
|
||||
|
|
|
@ -745,14 +745,14 @@ cmp_end:
|
|||
void taosSetAllDebugFlag(int32_t flag) {
|
||||
if (flag <= 0) return;
|
||||
|
||||
uDebugFlag = flag;
|
||||
rpcDebugFlag = flag;
|
||||
jniDebugFlag = flag;
|
||||
qDebugFlag = flag;
|
||||
cDebugFlag = flag;
|
||||
dDebugFlag = flag;
|
||||
vDebugFlag = flag;
|
||||
mDebugFlag = flag;
|
||||
cDebugFlag = flag;
|
||||
jniDebugFlag = flag;
|
||||
uDebugFlag = flag;
|
||||
rpcDebugFlag = flag;
|
||||
qDebugFlag = flag;
|
||||
wDebugFlag = flag;
|
||||
sDebugFlag = flag;
|
||||
tsdbDebugFlag = flag;
|
||||
|
@ -761,6 +761,5 @@ void taosSetAllDebugFlag(int32_t flag) {
|
|||
udfDebugFlag = flag;
|
||||
smaDebugFlag = flag;
|
||||
idxDebugFlag = flag;
|
||||
|
||||
uInfo("all debug flag are set to %d", flag);
|
||||
}
|
||||
|
|
|
@ -58,9 +58,10 @@ class ConfigureyCluster:
|
|||
self.dnodes.append(dnode)
|
||||
return self.dnodes
|
||||
|
||||
def create_dnode(self,conn):
|
||||
def create_dnode(self,conn,dnodeNum):
|
||||
tdSql.init(conn.cursor())
|
||||
for dnode in self.dnodes[1:]:
|
||||
dnodeNum=int(dnodeNum)
|
||||
for dnode in self.dnodes[1:dnodeNum]:
|
||||
# print(dnode.cfgDict)
|
||||
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||
tdSql.execute(" create dnode '%s';"%dnode_id)
|
||||
|
|
|
@ -122,28 +122,29 @@ echo "secondEp ${HOSTNAME}:7200" >> $TAOS_CFG
|
|||
echo "fqdn ${HOSTNAME}" >> $TAOS_CFG
|
||||
echo "serverPort ${NODE}" >> $TAOS_CFG
|
||||
echo "supportVnodes 1024" >> $TAOS_CFG
|
||||
echo "statusInterval 1" >> $TAOS_CFG
|
||||
echo "dataDir $DATA_DIR" >> $TAOS_CFG
|
||||
echo "logDir $LOG_DIR" >> $TAOS_CFG
|
||||
echo "debugFlag 0" >> $TAOS_CFG
|
||||
echo "mDebugFlag 143" >> $TAOS_CFG
|
||||
echo "dDebugFlag 143" >> $TAOS_CFG
|
||||
echo "vDebugFlag 143" >> $TAOS_CFG
|
||||
echo "tqDebugFlag 143" >> $TAOS_CFG
|
||||
echo "tsdbDebugFlag 143" >> $TAOS_CFG
|
||||
echo "cDebugFlag 143" >> $TAOS_CFG
|
||||
echo "jniDebugFlag 143" >> $TAOS_CFG
|
||||
echo "qDebugFlag 143" >> $TAOS_CFG
|
||||
echo "rpcDebugFlag 143" >> $TAOS_CFG
|
||||
echo "sDebugFlag 143" >> $TAOS_CFG
|
||||
echo "wDebugFlag 143" >> $TAOS_CFG
|
||||
echo "idxDebugFlag 143" >> $TAOS_CFG
|
||||
echo "fsDebugFlag 143" >> $TAOS_CFG
|
||||
echo "udfDebugFlag 143" >> $TAOS_CFG
|
||||
echo "smaDebugFlag 143" >> $TAOS_CFG
|
||||
echo "tmrDebugFlag 131" >> $TAOS_CFG
|
||||
echo "uDebugFlag 131" >> $TAOS_CFG
|
||||
echo "rpcDebugFlag 131" >> $TAOS_CFG
|
||||
echo "jniDebugFlag 143" >> $TAOS_CFG
|
||||
echo "qDebugFlag 143" >> $TAOS_CFG
|
||||
echo "cDebugFlag 143" >> $TAOS_CFG
|
||||
echo "dDebugFlag 143" >> $TAOS_CFG
|
||||
echo "vDebugFlag 143" >> $TAOS_CFG
|
||||
echo "mDebugFlag 143" >> $TAOS_CFG
|
||||
echo "wDebugFlag 143" >> $TAOS_CFG
|
||||
echo "sDebugFlag 143" >> $TAOS_CFG
|
||||
echo "tsdbDebugFlag 143" >> $TAOS_CFG
|
||||
echo "tqDebugFlag 143" >> $TAOS_CFG
|
||||
echo "fsDebugFlag 143" >> $TAOS_CFG
|
||||
echo "idxDebugFlag 143" >> $TAOS_CFG
|
||||
echo "udfDebugFlag 143" >> $TAOS_CFG
|
||||
echo "smaDebugFlag 143" >> $TAOS_CFG
|
||||
echo "idxDebugFlag 143" >> $TAOS_CFG
|
||||
echo "numOfLogLines 20000000" >> $TAOS_CFG
|
||||
echo "statusInterval 1" >> $TAOS_CFG
|
||||
echo "asyncLog 0" >> $TAOS_CFG
|
||||
echo "locale en_US.UTF-8" >> $TAOS_CFG
|
||||
echo "telemetryReporting 0" >> $TAOS_CFG
|
||||
|
|
|
@ -48,7 +48,7 @@ sql insert into ct1 values(now+1s, 11, 2.1, 3.1)(now+2s, -12, -2.2, -3.2)(now+3s
|
|||
|
||||
print =============== step6: select data
|
||||
sql select * from ct1
|
||||
#sql select * from stb
|
||||
sql select * from stb
|
||||
|
||||
_OVER:
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
|
|
@ -47,7 +47,7 @@ sql insert into ct1 values(now+0s, 10, 2.0, 3.0)
|
|||
sql insert into ct1 values(now+1s, 11, 2.1, 3.1)(now+2s, -12, -2.2, -3.2)(now+3s, -13, -2.3, -3.3)
|
||||
|
||||
print =============== step6: select data
|
||||
#sql select * from ct1
|
||||
sql select * from ct1
|
||||
#sql select * from stb
|
||||
|
||||
_OVER:
|
||||
|
@ -58,7 +58,7 @@ print ----> start to check if there are ERRORS in vagrind log file for each dnod
|
|||
system_content sh/checkValgrind.sh -n dnode1
|
||||
|
||||
print cmd return result ----> [ $system_content ]
|
||||
if $system_content <= 0 then
|
||||
if $system_content <= 2 then
|
||||
return 0
|
||||
endi
|
||||
|
||||
|
|
|
@ -1,262 +0,0 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import threading as thd
|
||||
import multiprocessing as mp
|
||||
from numpy.lib.function_base import insert
|
||||
import taos
|
||||
from taos import *
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
import datetime as dt
|
||||
from datetime import datetime
|
||||
from ctypes import *
|
||||
import time
|
||||
# constant define
|
||||
WAITS = 5 # wait seconds
|
||||
|
||||
class TDTestCase:
|
||||
#
|
||||
# --------------- main frame -------------------
|
||||
def caseDescription(self):
|
||||
'''
|
||||
limit and offset keyword function test cases;
|
||||
case1: limit offset base function test
|
||||
case2: offset return valid
|
||||
'''
|
||||
return
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files or "taosd.exe" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
# init
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
# tdSql.prepare()
|
||||
# self.create_tables();
|
||||
self.ts = 1500000000000
|
||||
|
||||
# stop
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
# --------------- case -------------------
|
||||
|
||||
|
||||
def newcon(self,host,cfg):
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
port =6030
|
||||
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
|
||||
print(con)
|
||||
return con
|
||||
|
||||
def test_stmt_set_tbname_tag(self,conn):
|
||||
dbname = "stmt_tag"
|
||||
|
||||
try:
|
||||
conn.execute("drop database if exists %s" % dbname)
|
||||
conn.execute("create database if not exists %s PRECISION 'us' " % dbname)
|
||||
conn.select_db(dbname)
|
||||
conn.execute("create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\
|
||||
bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
|
||||
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp , vc varchar(100)) tags (t1 timestamp, t2 bool,\
|
||||
t3 tinyint, t4 tinyint, t5 smallint, t6 int, t7 bigint, t8 tinyint unsigned, t9 smallint unsigned, \
|
||||
t10 int unsigned, t11 bigint unsigned, t12 float, t13 double, t14 binary(100), t15 nchar(100), t16 timestamp)")
|
||||
|
||||
stmt = conn.statement("insert into ? using log tags (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \
|
||||
values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||
tags = new_bind_params(16)
|
||||
tags[0].timestamp(1626861392589123, PrecisionEnum.Microseconds)
|
||||
tags[1].bool(True)
|
||||
tags[2].bool(False)
|
||||
tags[3].tinyint(2)
|
||||
tags[4].smallint(3)
|
||||
tags[5].int(4)
|
||||
tags[6].bigint(5)
|
||||
tags[7].tinyint_unsigned(6)
|
||||
tags[8].smallint_unsigned(7)
|
||||
tags[9].int_unsigned(8)
|
||||
tags[10].bigint_unsigned(9)
|
||||
tags[11].float(10.1)
|
||||
tags[12].double(10.11)
|
||||
tags[13].binary("hello")
|
||||
tags[14].nchar("stmt")
|
||||
tags[15].timestamp(1626861392589, PrecisionEnum.Milliseconds)
|
||||
stmt.set_tbname_tags("tb1", tags)
|
||||
params = new_multi_binds(17)
|
||||
params[0].timestamp((1626861392589111, 1626861392590111, 1626861392591111))
|
||||
params[1].bool((True, None, False))
|
||||
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
|
||||
params[3].tinyint([0, 127, None])
|
||||
params[4].smallint([3, None, 2])
|
||||
params[5].int([3, 4, None])
|
||||
params[6].bigint([3, 4, None])
|
||||
params[7].tinyint_unsigned([3, 4, None])
|
||||
params[8].smallint_unsigned([3, 4, None])
|
||||
params[9].int_unsigned([3, 4, None])
|
||||
params[10].bigint_unsigned([3, 4, 5])
|
||||
params[11].float([3, None, 1])
|
||||
params[12].double([3, None, 1.2])
|
||||
params[13].binary(["abc", "dddafadfadfadfadfa", None])
|
||||
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
|
||||
params[15].timestamp([None, None, 1626861392591])
|
||||
params[16].binary(["涛思数据16", None, "a long string with 中文-字符"])
|
||||
|
||||
stmt.bind_param_batch(params)
|
||||
stmt.execute()
|
||||
|
||||
assert stmt.affected_rows == 3
|
||||
|
||||
#query all
|
||||
querystmt1=conn.statement("select * from log where bu < ?")
|
||||
queryparam1=new_bind_params(1)
|
||||
print(type(queryparam1))
|
||||
queryparam1[0].int(10)
|
||||
querystmt1.bind_param(queryparam1)
|
||||
querystmt1.execute()
|
||||
result1=querystmt1.use_result()
|
||||
rows1=result1.fetch_all()
|
||||
print(rows1[0])
|
||||
print(rows1[1])
|
||||
print(rows1[2])
|
||||
assert str(rows1[0][0]) == "2021-07-21 17:56:32.589111"
|
||||
assert rows1[0][10] == 3
|
||||
assert rows1[1][10] == 4
|
||||
|
||||
#query: Numeric Functions
|
||||
querystmt2=conn.statement("select abs(?) from log where bu < ?")
|
||||
queryparam2=new_bind_params(2)
|
||||
print(type(queryparam2))
|
||||
queryparam2[0].int(5)
|
||||
queryparam2[1].int(5)
|
||||
querystmt2.bind_param(queryparam2)
|
||||
querystmt2.execute()
|
||||
result2=querystmt2.use_result()
|
||||
rows2=result2.fetch_all()
|
||||
print("2",rows2)
|
||||
assert rows2[0][0] == 5
|
||||
assert rows2[1][0] == 5
|
||||
|
||||
|
||||
#query: Numeric Functions and escapes
|
||||
|
||||
querystmt3=conn.statement("select abs(?) from log where nn= 'a? long string with 中文字符' ")
|
||||
queryparam3=new_bind_params(1)
|
||||
print(type(queryparam3))
|
||||
queryparam3[0].int(5)
|
||||
querystmt3.bind_param(queryparam3)
|
||||
querystmt3.execute()
|
||||
result3=querystmt3.use_result()
|
||||
rows3=result3.fetch_all()
|
||||
print("3",rows3)
|
||||
assert rows3 == []
|
||||
|
||||
#query: string Functions
|
||||
|
||||
querystmt9=conn.statement("select CHAR_LENGTH(?) from log ")
|
||||
queryparam9=new_bind_params(1)
|
||||
print(type(queryparam9))
|
||||
queryparam9[0].binary('中文字符')
|
||||
querystmt9.bind_param(queryparam9)
|
||||
querystmt9.execute()
|
||||
result9=querystmt9.use_result()
|
||||
rows9=result9.fetch_all()
|
||||
print("9",rows9)
|
||||
assert rows9[0][0] == 12, 'fourth case is failed'
|
||||
assert rows9[1][0] == 12, 'fourth case is failed'
|
||||
|
||||
#query: conversion Functions
|
||||
|
||||
querystmt4=conn.statement("select cast( ? as bigint) from log ")
|
||||
queryparam4=new_bind_params(1)
|
||||
print(type(queryparam4))
|
||||
queryparam4[0].binary('1232a')
|
||||
querystmt4.bind_param(queryparam4)
|
||||
querystmt4.execute()
|
||||
result4=querystmt4.use_result()
|
||||
rows4=result4.fetch_all()
|
||||
print("5",rows4)
|
||||
assert rows4[0][0] == 1232
|
||||
assert rows4[1][0] == 1232
|
||||
|
||||
querystmt4=conn.statement("select cast( ? as binary(10)) from log ")
|
||||
queryparam4=new_bind_params(1)
|
||||
print(type(queryparam4))
|
||||
queryparam4[0].int(123)
|
||||
querystmt4.bind_param(queryparam4)
|
||||
querystmt4.execute()
|
||||
result4=querystmt4.use_result()
|
||||
rows4=result4.fetch_all()
|
||||
print("6",rows4)
|
||||
assert rows4[0][0] == '123'
|
||||
assert rows4[1][0] == '123'
|
||||
|
||||
# #query: datatime Functions
|
||||
|
||||
# querystmt4=conn.statement(" select timediff('2021-07-21 17:56:32.590111',?,1s) from log ")
|
||||
# queryparam4=new_bind_params(1)
|
||||
# print(type(queryparam4))
|
||||
# queryparam4[0].timestamp(1626861392591111)
|
||||
# querystmt4.bind_param(queryparam4)
|
||||
# querystmt4.execute()
|
||||
# result4=querystmt4.use_result()
|
||||
# rows4=result4.fetch_all()
|
||||
# print("7",rows4)
|
||||
# assert rows4[0][0] == 1, 'seventh case is failed'
|
||||
# assert rows4[1][0] == 1, 'seventh case is failed'
|
||||
|
||||
|
||||
|
||||
# conn.execute("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
|
||||
except Exception as err:
|
||||
# conn.execute("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
raise err
|
||||
|
||||
def run(self):
|
||||
buildPath = self.getBuildPath()
|
||||
config = buildPath+ "../sim/dnode1/cfg/"
|
||||
host="localhost"
|
||||
connectstmt=self.newcon(host,config)
|
||||
self.test_stmt_set_tbname_tag(connectstmt)
|
||||
|
||||
return
|
||||
|
||||
|
||||
# add case with filename
|
||||
#
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -360,15 +360,15 @@ class TDTestCase:
|
|||
tdSql.checkRows(229)
|
||||
tdSql.checkData(0,0,0)
|
||||
tdSql.query("select diff(c1) from stb1 partition by tbname ")
|
||||
tdSql.checkRows(199)
|
||||
tdSql.checkRows(190)
|
||||
# tdSql.query("select diff(st1) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(229)
|
||||
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(199)
|
||||
tdSql.checkRows(190)
|
||||
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(199)
|
||||
tdSql.checkRows(190)
|
||||
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(199)
|
||||
tdSql.checkRows(190)
|
||||
|
||||
# # bug need fix
|
||||
# tdSql.query("select diff(st1+c1) from stb1 partition by tbname slimit 1 ")
|
||||
|
@ -378,7 +378,7 @@ class TDTestCase:
|
|||
|
||||
# bug need fix
|
||||
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(199)
|
||||
tdSql.checkRows(190)
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query("select tbname , diff(c1) from stb1 partition by tbname")
|
||||
|
|
|
@ -678,15 +678,15 @@ class TDTestCase:
|
|||
tdSql.checkRows(68)
|
||||
tdSql.checkData(0,0,1.000000000)
|
||||
tdSql.query("select mavg(c1,3) from stb1 partition by tbname ")
|
||||
tdSql.checkRows(38)
|
||||
tdSql.checkRows(20)
|
||||
# tdSql.query("select mavg(st1,3) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(38)
|
||||
tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
|
||||
tdSql.checkRows(38)
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
|
||||
tdSql.checkRows(38)
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
|
||||
tdSql.checkRows(38)
|
||||
tdSql.checkRows(20)
|
||||
|
||||
# # bug need fix
|
||||
# tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname slimit 1 ")
|
||||
|
@ -696,7 +696,7 @@ class TDTestCase:
|
|||
|
||||
# bug need fix
|
||||
tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
|
||||
tdSql.checkRows(38)
|
||||
tdSql.checkRows(20)
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query("select tbname , mavg(c1,3) from stb1 partition by tbname")
|
||||
|
|
|
@ -32,9 +32,9 @@ class TDTestCase:
|
|||
#
|
||||
# --------------- main frame -------------------
|
||||
#
|
||||
clientCfgDict = {'queryPolicy': '1','debugFlag': 135}
|
||||
clientCfgDict = {'queryPolicy': '1','debugFlag': 143}
|
||||
clientCfgDict["queryPolicy"] = '1'
|
||||
clientCfgDict["debugFlag"] = 131
|
||||
clientCfgDict["debugFlag"] = 143
|
||||
|
||||
updatecfgDict = {'clientCfg': {}}
|
||||
updatecfgDict = {'debugFlag': 143}
|
||||
|
@ -278,22 +278,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0,0,rowsPerSTable)
|
||||
return
|
||||
|
||||
# test case1 base
|
||||
def test_case1(self):
|
||||
#stableCount=threadNumbersCtb
|
||||
parameterDict = {'vgroups': 1, \
|
||||
'threadNumbersCtb': 5, \
|
||||
'threadNumbersIda': 5, \
|
||||
'stableCount': 5, \
|
||||
'tablesPerStb': 50, \
|
||||
'rowsPerTable': 10, \
|
||||
'dbname': 'db', \
|
||||
'stbname': 'stb', \
|
||||
'host': 'localhost', \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
|
||||
tdLog.debug("-----create database and muti-thread create tables test------- ")
|
||||
|
||||
# test case : Switch back and forth among the three queryPolicy(1\2\3)
|
||||
def test_case1(self):
|
||||
self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 1, 2, 1*10)
|
||||
tdSql.execute("use db1;")
|
||||
|
@ -407,6 +392,7 @@ class TDTestCase:
|
|||
tdSql.query("select c0,c1 from stb11_1 where (c0>1000) union all select c0,c1 from stb11_1 where c0>2000;")
|
||||
assert unionallQnode==tdSql.queryResult
|
||||
|
||||
# test case : queryPolicy = 2
|
||||
def test_case2(self):
|
||||
self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 10, 2, 1*10)
|
||||
tdSql.query("show qnodes")
|
||||
|
@ -438,8 +424,9 @@ class TDTestCase:
|
|||
tdSql.query("select max(c1) from stb10_0;")
|
||||
tdSql.query("select min(c1) from stb11_0;")
|
||||
|
||||
def test_case3(self):
|
||||
# test case : queryPolicy = 3
|
||||
|
||||
def test_case3(self):
|
||||
tdSql.execute('alter local "queryPolicy" "3"')
|
||||
tdLog.debug("create qnode on dnode 1")
|
||||
tdSql.execute("create qnode on dnode 1")
|
||||
|
@ -472,10 +459,16 @@ class TDTestCase:
|
|||
# run case
|
||||
def run(self):
|
||||
# test qnode
|
||||
tdLog.debug(" test_case1 ............ [start]")
|
||||
self.test_case1()
|
||||
tdLog.debug(" test_case1 ............ [OK]")
|
||||
tdLog.debug(" test_case2 ............ [start]")
|
||||
self.test_case2()
|
||||
|
||||
tdLog.debug(" test_case2 ............ [OK]")
|
||||
tdLog.debug(" test_case3 ............ [start]")
|
||||
self.test_case3()
|
||||
tdLog.debug(" test_case3 ............ [OK]")
|
||||
|
||||
# tdLog.debug(" LIMIT test_case3 ............ [OK]")
|
||||
|
||||
def stop(self):
|
||||
|
@ -487,4 +480,4 @@ class TDTestCase:
|
|||
# add case with filename
|
||||
#
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
import taos
|
||||
import sys
|
||||
import datetime
|
||||
import inspect
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
|
||||
def prepareData(self):
|
||||
database="db_tsbs"
|
||||
ts=1451606400000
|
||||
tdSql.execute(f"create database {database};")
|
||||
tdSql.execute(f"use {database} ")
|
||||
tdSql.execute('''
|
||||
create table readings (ts timestamp,latitude double,longitude double,elevation double,velocity double,heading double,grade double,fuel_consumption double,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30));
|
||||
''')
|
||||
tdSql.execute('''
|
||||
create table diagnostics (ts timestamp,fuel_state double,current_load double,status bigint,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30)) ;
|
||||
''')
|
||||
|
||||
for i in range(10):
|
||||
tdSql.execute(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
|
||||
tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
|
||||
for j in range(10):
|
||||
for i in range(10):
|
||||
tdSql.execute(
|
||||
f"insert into rct{j} values ( {ts+i*10000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into dct{j} values ( {ts+i*10000}, {1+i*0.1},{1400+i*15}, {1+i},{1500+i*20}, {150+i*2},{5+i} )"
|
||||
)
|
||||
|
||||
# def check_avg(self ,origin_query , check_query):
|
||||
# avg_result = tdSql.getResult(origin_query)
|
||||
# origin_result = tdSql.getResult(check_query)
|
||||
|
||||
# check_status = True
|
||||
# for row_index , row in enumerate(avg_result):
|
||||
# for col_index , elem in enumerate(row):
|
||||
# if avg_result[row_index][col_index] != origin_result[row_index][col_index]:
|
||||
# check_status = False
|
||||
# if not check_status:
|
||||
# tdLog.notice("avg function value has not as expected , sql is \"%s\" "%origin_query )
|
||||
# sys.exit(1)
|
||||
# else:
|
||||
# tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query )
|
||||
|
||||
|
||||
def tsbsIotQuery(self):
|
||||
tdSql.execute("use db_tsbs")
|
||||
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
tdLog.printNoPrefix("==========step1:create database and table,insert data ==============")
|
||||
self.tsbsIotQuery()
|
||||
self.tsbsIotQuery()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -124,9 +124,9 @@ class TDTestCase:
|
|||
tdSql.query('show databases;')
|
||||
tdSql.checkData(2,5,'no_strict')
|
||||
tdSql.error('alter database db strict 0')
|
||||
tdSql.execute('alter database db strict 1')
|
||||
tdSql.query('show databases;')
|
||||
tdSql.checkData(2,5,'strict')
|
||||
# tdSql.execute('alter database db strict 1')
|
||||
# tdSql.query('show databases;')
|
||||
# tdSql.checkData(2,5,'strict')
|
||||
|
||||
def getConnection(self, dnode):
|
||||
host = dnode.cfgDict["fqdn"]
|
||||
|
|
|
@ -0,0 +1,229 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
from numpy import row_stack
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def _async_raise(self, tid, exctype):
|
||||
"""raises the exception, performs cleanup if needed"""
|
||||
if not inspect.isclass(exctype):
|
||||
exctype = type(exctype)
|
||||
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
||||
def stopThread(self,thread):
|
||||
self._async_raise(thread.ident, SystemExit)
|
||||
|
||||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
print("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
|
||||
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db0_0',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'replica': 1,
|
||||
'stbName': 'stb',
|
||||
'stbNumbers': 2,
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 200,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
"rowsPerTbl": 1000,
|
||||
"batchNum": 5000
|
||||
}
|
||||
hostname = socket.gethostname()
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
|
||||
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||
dbNumbers = 1
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
||||
# create database and stable
|
||||
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
tdDnodes=cluster.dnodes
|
||||
# dnode6=cluster.addDnode(6)
|
||||
# tdDnodes.append(dnode6)
|
||||
# tdDnodes = ClusterDnodes(tdDnodes)
|
||||
stopcount =0
|
||||
threads=[]
|
||||
|
||||
# create stable:stb_0
|
||||
stableName= paraDict['stbName']
|
||||
newTdSql=tdCom.newTdSql()
|
||||
clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
|
||||
#create child table:ctb_0
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
newTdSql=tdCom.newTdSql()
|
||||
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
|
||||
#insert date
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
newTdSql=tdCom.newTdSql()
|
||||
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
dnode6Port=int(6030+5*100)
|
||||
tdSql.execute("create dnode '%s:%d'"%(hostname,dnode6Port))
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
||||
while stopcount < restartNumbers:
|
||||
tdLog.info(" restart loop: %d"%stopcount )
|
||||
if stopRole == "mnode":
|
||||
for i in range(mnodeNums):
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i+mnodeNums].starttaosd()
|
||||
# sleep(10)
|
||||
elif stopRole == "dnode":
|
||||
for i in range(dnodeNumbers):
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
tdLog.info("123")
|
||||
else:
|
||||
print("456")
|
||||
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
|
||||
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkDbRows(dbNumbers)
|
||||
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
|
||||
|
||||
tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||
tdSql.query("show stables")
|
||||
tdSql.checkRows(paraDict["stbNumbers"])
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
tdSql.query("select * from %s"%stableName)
|
||||
tdSql.checkRows(rowsPerStb)
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,223 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
from numpy import row_stack
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def _async_raise(self, tid, exctype):
|
||||
"""raises the exception, performs cleanup if needed"""
|
||||
if not inspect.isclass(exctype):
|
||||
exctype = type(exctype)
|
||||
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
||||
def stopThread(self,thread):
|
||||
self._async_raise(thread.ident, SystemExit)
|
||||
|
||||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
print("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
|
||||
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db0_0',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'replica': 1,
|
||||
'stbName': 'stb',
|
||||
'stbNumbers': 2,
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 200,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
"rowsPerTbl": 1000,
|
||||
"batchNum": 5000
|
||||
}
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
|
||||
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||
dbNumbers = 1
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
||||
# create database and stable
|
||||
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
threads=[]
|
||||
|
||||
# create stable:stb_0
|
||||
stableName= paraDict['stbName']
|
||||
newTdSql=tdCom.newTdSql()
|
||||
clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
|
||||
#create child table:ctb_0
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
newTdSql=tdCom.newTdSql()
|
||||
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
|
||||
#insert date
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
newTdSql=tdCom.newTdSql()
|
||||
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
|
||||
while stopcount < restartNumbers:
|
||||
tdLog.info(" restart loop: %d"%stopcount )
|
||||
if stopRole == "mnode":
|
||||
for i in range(mnodeNums):
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i+mnodeNums].starttaosd()
|
||||
# sleep(10)
|
||||
elif stopRole == "dnode":
|
||||
for i in range(dnodeNumbers):
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
tdLog.info("123")
|
||||
else:
|
||||
print("456")
|
||||
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkDbRows(dbNumbers)
|
||||
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
|
||||
|
||||
tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||
tdSql.query("show stables")
|
||||
tdSql.checkRows(paraDict["stbNumbers"])
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
tdSql.query("select * from %s"%stableName)
|
||||
tdSql.checkRows(rowsPerStb)
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -143,42 +143,43 @@ class TDTestCase:
|
|||
threads=[]
|
||||
for i in range(restartNumbers):
|
||||
dbNameIndex = '%s%d'%(paraDict["dbName"],i)
|
||||
threads.append(threading.Thread(target=clusterComCreate.create_databases, args=(tdSql, dbNameIndex,paraDict["dbNumbers"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])))
|
||||
newTdSql=tdCom.newTdSql()
|
||||
threads.append(threading.Thread(target=clusterComCreate.create_databases, args=(newTdSql, dbNameIndex,paraDict["dbNumbers"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])))
|
||||
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
# while stopcount < restartNumbers:
|
||||
# tdLog.info(" restart loop: %d"%stopcount )
|
||||
# if stopRole == "mnode":
|
||||
# for i in range(mnodeNums):
|
||||
# tdDnodes[i].stoptaosd()
|
||||
# # sleep(10)
|
||||
# tdDnodes[i].starttaosd()
|
||||
# # sleep(10)
|
||||
# elif stopRole == "vnode":
|
||||
# for i in range(vnodeNumbers):
|
||||
# tdDnodes[i+mnodeNums].stoptaosd()
|
||||
# # sleep(10)
|
||||
# tdDnodes[i+mnodeNums].starttaosd()
|
||||
# # sleep(10)
|
||||
# elif stopRole == "dnode":
|
||||
# for i in range(dnodeNumbers):
|
||||
# tdDnodes[i].stoptaosd()
|
||||
# # sleep(10)
|
||||
# tdDnodes[i].starttaosd()
|
||||
# # sleep(10)
|
||||
while stopcount < restartNumbers:
|
||||
tdLog.info(" restart loop: %d"%stopcount )
|
||||
if stopRole == "mnode":
|
||||
for i in range(mnodeNums):
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i+mnodeNums].starttaosd()
|
||||
# sleep(10)
|
||||
elif stopRole == "dnode":
|
||||
for i in range(dnodeNumbers):
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
|
||||
# # dnodeNumbers don't include database of schema
|
||||
# if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
# tdLog.info("check dnodes status is ready")
|
||||
# else:
|
||||
# tdLog.info("check dnodes status is not ready")
|
||||
# self.stopThread(threads)
|
||||
# tdLog.exit("one or more of dnodes failed to start ")
|
||||
# # self.check3mnode()
|
||||
# stopcount+=1
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
tdLog.info("check dnodes status is ready")
|
||||
else:
|
||||
tdLog.info("check dnodes status is not ready")
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
|
|
|
@ -92,7 +92,7 @@ class TDTestCase:
|
|||
|
||||
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db0_0',
|
||||
paraDict = {'dbName': 'db',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
|
@ -143,7 +143,8 @@ class TDTestCase:
|
|||
threads=[]
|
||||
for i in range(restartNumbers):
|
||||
stableName= '%s%d'%(paraDict['stbName'],i)
|
||||
threads.append(threading.Thread(target=clusterComCreate.create_stables, args=(tdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])))
|
||||
newTdSql=tdCom.newTdSql()
|
||||
threads.append(threading.Thread(target=clusterComCreate.create_stables, args=(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])))
|
||||
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
|
|
|
@ -0,0 +1,221 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
from numpy import row_stack
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def _async_raise(self, tid, exctype):
|
||||
"""raises the exception, performs cleanup if needed"""
|
||||
if not inspect.isclass(exctype):
|
||||
exctype = type(exctype)
|
||||
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
||||
def stopThread(self,thread):
|
||||
self._async_raise(thread.ident, SystemExit)
|
||||
|
||||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
print("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
|
||||
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db0_0',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'replica': 1,
|
||||
'stbName': 'stb',
|
||||
'stbNumbers': 2,
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 200,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
"rowsPerTbl": 10000,
|
||||
"batchNum": 5000
|
||||
}
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
|
||||
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||
dbNumbers = 1
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
||||
# create database and stable
|
||||
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
threads=[]
|
||||
|
||||
# create stable:stb_0
|
||||
stableName= paraDict['stbName']
|
||||
newTdSql=tdCom.newTdSql()
|
||||
clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
|
||||
#create child table:ctb_0
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
newTdSql=tdCom.newTdSql()
|
||||
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
|
||||
#insert date
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
newTdSql=tdCom.newTdSql()
|
||||
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
while stopcount < restartNumbers:
|
||||
tdLog.info(" restart loop: %d"%stopcount )
|
||||
if stopRole == "mnode":
|
||||
for i in range(mnodeNums):
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i+mnodeNums].starttaosd()
|
||||
# sleep(10)
|
||||
elif stopRole == "dnode":
|
||||
for i in range(dnodeNumbers):
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
tdLog.info("123")
|
||||
else:
|
||||
print("456")
|
||||
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkDbRows(dbNumbers)
|
||||
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
|
||||
|
||||
tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||
tdSql.query("show stables")
|
||||
tdSql.checkRows(paraDict["stbNumbers"])
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
tdSql.query("select * from %s"%stableName)
|
||||
tdSql.checkRows(rowsPerStb)
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -116,7 +116,8 @@ class TDTestCase:
|
|||
threads=[]
|
||||
for i in range(restartNumbers):
|
||||
dbNameIndex = '%s%d'%(paraDict["dbName"],i)
|
||||
threads.append(threading.Thread(target=clusterComCreate.create_databases, args=(tdSql, dbNameIndex,paraDict["dbNumbers"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])))
|
||||
newTdSql=tdCom.newTdSql()
|
||||
threads.append(threading.Thread(target=clusterComCreate.create_databases, args=(newTdSql, dbNameIndex,paraDict["dbNumbers"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])))
|
||||
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
|
|
|
@ -30,8 +30,8 @@ class TDTestCase:
|
|||
self.TDDnodes = None
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
|
||||
|
||||
print(tdSql)
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
@ -113,6 +113,7 @@ class TDTestCase:
|
|||
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
|
||||
dbNumbers = 1
|
||||
|
||||
print(tdSql)
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
|
@ -141,9 +142,11 @@ class TDTestCase:
|
|||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
threads=[]
|
||||
|
||||
for i in range(restartNumbers):
|
||||
stableName= '%s%d'%(paraDict['stbName'],i)
|
||||
threads.append(threading.Thread(target=clusterComCreate.create_stables, args=(tdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])))
|
||||
newTdSql=tdCom.newTdSql()
|
||||
threads.append(threading.Thread(target=clusterComCreate.create_stables, args=(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])))
|
||||
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
|
|
|
@ -152,7 +152,7 @@ class ClusterComCreate:
|
|||
if (i % 2 == 0):
|
||||
tagValue = 'shanghai'
|
||||
|
||||
sql += " %s%d using %s tags(%d, '%s')"%(ctbPrefix,i,stbName,i+1, tagValue)
|
||||
sql += " %s_%d using %s tags(%d, '%s')"%(ctbPrefix,i,stbName,i+1, tagValue)
|
||||
if (i > 0) and (i%100 == 0):
|
||||
tsql.execute(sql)
|
||||
sql = pre_create
|
||||
|
@ -173,13 +173,13 @@ class ClusterComCreate:
|
|||
startTs = int(round(t * 1000))
|
||||
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
|
||||
for i in range(ctbNum):
|
||||
sql += " %s%d values "%(stbName,i)
|
||||
sql += " %s_%d values "%(stbName,i)
|
||||
for j in range(rowsPerTbl):
|
||||
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
|
||||
sql += "(%d, %d, %d, 'mnode_%d') "%(startTs + j, j, j,j)
|
||||
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
|
||||
tsql.execute(sql)
|
||||
if j < rowsPerTbl - 1:
|
||||
sql = "insert into %s%d values " %(stbName,i)
|
||||
sql = "insert into %s_%d values " %(stbName,i)
|
||||
else:
|
||||
sql = "insert into "
|
||||
#end sql
|
||||
|
|
|
@ -122,10 +122,10 @@ python3 ./test.py -f 2-query/and_or_for_byte.py
|
|||
python3 ./test.py -f 2-query/function_null.py
|
||||
#python3 ./test.py -f 2-query/queryQnode.py
|
||||
|
||||
#python3 ./test.py -f 6-cluster/5dnode1mnode.py
|
||||
#python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode1mnode.py
|
||||
#BUG python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
|
||||
#python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3
|
||||
#python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3
|
||||
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 5 -M 3
|
||||
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3
|
||||
|
@ -135,7 +135,8 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -
|
|||
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py
|
||||
# python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
|
||||
# python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3
|
||||
|
||||
# BUG Redict python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5
|
||||
# python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 5 -M 3
|
||||
|
||||
python3 ./test.py -f 7-tmq/basic5.py
|
||||
python3 ./test.py -f 7-tmq/subscribeDb.py
|
||||
|
|
|
@ -64,8 +64,9 @@ if __name__ == "__main__":
|
|||
updateCfgDict = {}
|
||||
execCmd = ""
|
||||
queryPolicy = 1
|
||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:', [
|
||||
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy'])
|
||||
createDnodeNums = 1
|
||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:', [
|
||||
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums'])
|
||||
for key, value in opts:
|
||||
if key in ['-h', '--help']:
|
||||
tdLog.printNoPrefix(
|
||||
|
@ -81,9 +82,11 @@ if __name__ == "__main__":
|
|||
tdLog.printNoPrefix('-d update cfg dict, base64 json str')
|
||||
tdLog.printNoPrefix('-k not kill valgrind processer')
|
||||
tdLog.printNoPrefix('-e eval str to run')
|
||||
tdLog.printNoPrefix('-N create dnodes numbers in clusters')
|
||||
tdLog.printNoPrefix('-N start dnodes numbers in clusters')
|
||||
tdLog.printNoPrefix('-M create mnode numbers in clusters')
|
||||
tdLog.printNoPrefix('-Q set queryPolicy in one dnode')
|
||||
tdLog.printNoPrefix('-C create Dnode Numbers in one cluster')
|
||||
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
@ -143,6 +146,9 @@ if __name__ == "__main__":
|
|||
if key in ['-Q', '--queryPolicy']:
|
||||
queryPolicy = value
|
||||
|
||||
if key in ['-C', '--createDnodeNums']:
|
||||
createDnodeNums = value
|
||||
|
||||
if not execCmd == "":
|
||||
tdDnodes.init(deployPath)
|
||||
print(execCmd)
|
||||
|
@ -239,7 +245,11 @@ if __name__ == "__main__":
|
|||
host,
|
||||
config=tdDnodes.getSimCfgPath())
|
||||
print(tdDnodes.getSimCfgPath(),host)
|
||||
cluster.create_dnode(conn)
|
||||
if createDnodeNums == 1:
|
||||
createDnodeNums=dnodeNums
|
||||
else:
|
||||
createDnodeNums=createDnodeNums
|
||||
cluster.create_dnode(conn,createDnodeNums)
|
||||
try:
|
||||
if cluster.check_dnode(conn) :
|
||||
print("check dnode ready")
|
||||
|
@ -314,7 +324,11 @@ if __name__ == "__main__":
|
|||
host,
|
||||
config=tdDnodes.getSimCfgPath())
|
||||
print(tdDnodes.getSimCfgPath(),host)
|
||||
cluster.create_dnode(conn)
|
||||
if createDnodeNums == 1:
|
||||
createDnodeNums=dnodeNums
|
||||
else:
|
||||
createDnodeNums=createDnodeNums
|
||||
cluster.create_dnode(conn,createDnodeNums)
|
||||
try:
|
||||
if cluster.check_dnode(conn) :
|
||||
print("check dnode ready")
|
||||
|
|
|
@ -1,25 +1,46 @@
|
|||
IF (TD_WEBSOCKET)
|
||||
MESSAGE("${Green} use libtaos-ws${ColourReset}")
|
||||
IF (TD_LINUX)
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(taosws-rs
|
||||
PREFIX "taosws-rs"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs
|
||||
BUILD_ALWAYS off
|
||||
DEPENDS taos
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config"
|
||||
PATCH_COMMAND
|
||||
COMMAND git clean -f -d
|
||||
BUILD_COMMAND
|
||||
COMMAND cargo build --release -p taos-ws-sys
|
||||
COMMAND ./taos-ws-sys/ci/package.sh
|
||||
INSTALL_COMMAND
|
||||
COMMAND cmake -E copy target/libtaosws/libtaosws.so ${CMAKE_BINARY_DIR}/build/lib
|
||||
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/build/include
|
||||
COMMAND cmake -E copy target/libtaosws/taosws.h ${CMAKE_BINARY_DIR}/build/include
|
||||
)
|
||||
ENDIF()
|
||||
MESSAGE("${Green} use libtaos-ws${ColourReset}")
|
||||
IF (TD_LINUX)
|
||||
IF (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs/target/release/libtaosws.so" OR "${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs/target/release/libtaosws.so" IS_NEWER_THAN "${CMAKE_SOURCE_DIR}/.git/modules/tools/taosws-rs/FETCH_HEAD")
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(taosws-rs
|
||||
PREFIX "taosws-rs"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs
|
||||
BUILD_ALWAYS off
|
||||
DEPENDS taos
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config"
|
||||
PATCH_COMMAND
|
||||
COMMAND git clean -f -d
|
||||
BUILD_COMMAND
|
||||
COMMAND cargo build --release -p taos-ws-sys
|
||||
COMMAND ./taos-ws-sys/ci/package.sh
|
||||
INSTALL_COMMAND
|
||||
COMMAND cmake -E copy target/libtaosws/libtaosws.so ${CMAKE_BINARY_DIR}/build/lib
|
||||
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/build/include
|
||||
COMMAND cmake -E copy target/libtaosws/taosws.h ${CMAKE_BINARY_DIR}/build/include
|
||||
)
|
||||
ELSE()
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(taosws-rs
|
||||
PREFIX "taosws-rs"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs
|
||||
BUILD_ALWAYS on
|
||||
DEPENDS taos
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config"
|
||||
PATCH_COMMAND
|
||||
COMMAND git clean -f -d
|
||||
BUILD_COMMAND
|
||||
COMMAND cargo build --release -p taos-ws-sys
|
||||
COMMAND ./taos-ws-sys/ci/package.sh
|
||||
INSTALL_COMMAND
|
||||
COMMAND cmake -E copy target/libtaosws/libtaosws.so ${CMAKE_BINARY_DIR}/build/lib
|
||||
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/build/include
|
||||
COMMAND cmake -E copy target/libtaosws/taosws.h ${CMAKE_BINARY_DIR}/build/include
|
||||
)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_TAOS_TOOLS)
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 9cb71e3c4c0474553aa961cbe19795541c29b5c7
|
||||
Subproject commit 3f42d428eb6b90dea2651f4ccea66e44705c831b
|
|
@ -1 +1 @@
|
|||
Subproject commit 430982a0c2c29a819ffc414d11f49f2d424ca3fe
|
||||
Subproject commit 7a94ffab45f08e16f09b3f430fe75d717054adb6
|
Loading…
Reference in New Issue