Merge branch '3.0' of https://github.com/taosdata/TDengine into feat/tsdb_refact

This commit is contained in:
Hongze Cheng 2022-06-18 01:53:01 +00:00
commit 2e590c3036
75 changed files with 5294 additions and 2476 deletions

View File

@ -32,6 +32,13 @@ int32_t init_env() {
} }
taos_free_result(pRes); taos_free_result(pRes);
pRes = taos_query(pConn, "create database if not exists abc2 vgroups 20");
if (taos_errno(pRes) != 0) {
printf("error in create db, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "use abc1"); pRes = taos_query(pConn, "use abc1");
if (taos_errno(pRes) != 0) { if (taos_errno(pRes) != 0) {
printf("error in use db, reason:%s\n", taos_errstr(pRes)); printf("error in use db, reason:%s\n", taos_errstr(pRes));
@ -81,9 +88,9 @@ int32_t create_stream() {
/*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/ /*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/
/*const char* sql = "select sum(k) from tu1 interval(10m)";*/ /*const char* sql = "select sum(k) from tu1 interval(10m)";*/
/*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/ /*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/
pRes = taos_query( pRes = taos_query(pConn,
pConn, "create stream stream1 trigger at_once into abc2.outstb as select _wstartts, sum(k) from st1 "
"create stream stream1 trigger max_delay 10s into outstb as select _wstartts, sum(k) from st1 interval(10m)"); "partition by tbname interval(10m) ");
if (taos_errno(pRes) != 0) { if (taos_errno(pRes) != 0) {
printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes)); printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes));
return -1; return -1;

View File

@ -209,15 +209,6 @@ DLL_EXPORT TAOS_RES *taos_schemaless_insert(TAOS *taos, char *lines[], int numLi
/* --------------------------TMQ INTERFACE------------------------------- */ /* --------------------------TMQ INTERFACE------------------------------- */
#if 0
enum {
TMQ_RESP_ERR__FAIL = -1,
TMQ_RESP_ERR__SUCCESS = 0,
};
typedef int32_t tmq_resp_err_t;
#endif
typedef struct tmq_t tmq_t; typedef struct tmq_t tmq_t;
typedef struct tmq_conf_t tmq_conf_t; typedef struct tmq_conf_t tmq_conf_t;
typedef struct tmq_list_t tmq_list_t; typedef struct tmq_list_t tmq_list_t;
@ -236,15 +227,13 @@ DLL_EXPORT const char *tmq_err2str(int32_t code);
/* ------------------------TMQ CONSUMER INTERFACE------------------------ */ /* ------------------------TMQ CONSUMER INTERFACE------------------------ */
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
// timeout: -1 means infinitely waiting
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */ /* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */

View File

@ -159,11 +159,14 @@ int64_t qGetQueriedTableUid(qTaskInfo_t tinfo);
*/ */
int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t tagCondLen, SArray* pTableIdList); int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t tagCondLen, SArray* pTableIdList);
void qProcessFetchRsp(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet); void qProcessFetchRsp(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet);
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes); int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes);
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -62,6 +62,7 @@ typedef struct SSyncCfg {
typedef struct SFsmCbMeta { typedef struct SFsmCbMeta {
SyncIndex index; SyncIndex index;
SyncIndex lastConfigIndex;
bool isWeak; bool isWeak;
int32_t code; int32_t code;
ESyncState state; ESyncState state;
@ -75,6 +76,7 @@ typedef struct SReConfigCbMeta {
int32_t code; int32_t code;
SyncIndex index; SyncIndex index;
SyncTerm term; SyncTerm term;
SyncIndex lastConfigIndex;
SyncTerm currentTerm; SyncTerm currentTerm;
SSyncCfg oldCfg; SSyncCfg oldCfg;
SSyncCfg newCfg; SSyncCfg newCfg;

View File

@ -23,22 +23,22 @@ extern "C" {
// If the error is in a third-party library, place this header file under the third-party library header file. // If the error is in a third-party library, place this header file under the third-party library header file.
// When you want to use this feature, you should find or add the same function in the following section. // When you want to use this feature, you should find or add the same function in the following section.
#ifndef ALLOW_FORBID_FUNC #ifndef ALLOW_FORBID_FUNC
#define strptime STRPTIME_FUNC_TAOS_FORBID #define strptime STRPTIME_FUNC_TAOS_FORBID
#define gettimeofday GETTIMEOFDAY_FUNC_TAOS_FORBID #define gettimeofday GETTIMEOFDAY_FUNC_TAOS_FORBID
#define localtime LOCALTIME_FUNC_TAOS_FORBID #define localtime LOCALTIME_FUNC_TAOS_FORBID
#define localtime_s LOCALTIMES_FUNC_TAOS_FORBID #define localtime_s LOCALTIMES_FUNC_TAOS_FORBID
#define localtime_r LOCALTIMER_FUNC_TAOS_FORBID #define localtime_r LOCALTIMER_FUNC_TAOS_FORBID
#define time TIME_FUNC_TAOS_FORBID #define time TIME_FUNC_TAOS_FORBID
#define mktime MKTIME_FUNC_TAOS_FORBID #define mktime MKTIME_FUNC_TAOS_FORBID
#endif #endif
#ifdef WINDOWS #ifdef WINDOWS
#define CLOCK_REALTIME 0 #define CLOCK_REALTIME 0
#define MILLISECOND_PER_SECOND (1000i64) #define MILLISECOND_PER_SECOND (1000i64)
#else #else
#define MILLISECOND_PER_SECOND ((int64_t)1000L) #define MILLISECOND_PER_SECOND ((int64_t)1000L)
#endif #endif
#define MILLISECOND_PER_MINUTE (MILLISECOND_PER_SECOND * 60) #define MILLISECOND_PER_MINUTE (MILLISECOND_PER_SECOND * 60)
@ -82,13 +82,13 @@ static FORCE_INLINE int64_t taosGetTimestampNs() {
return (int64_t)systemTime.tv_sec * 1000000000L + (int64_t)systemTime.tv_nsec; return (int64_t)systemTime.tv_sec * 1000000000L + (int64_t)systemTime.tv_nsec;
} }
char *taosStrpTime(const char *buf, const char *fmt, struct tm *tm); char * taosStrpTime(const char *buf, const char *fmt, struct tm *tm);
struct tm *taosLocalTime(const time_t *timep, struct tm *result); struct tm *taosLocalTime(const time_t *timep, struct tm *result);
time_t taosTime(time_t *t); time_t taosTime(time_t *t);
time_t taosMktime(struct tm *timep); time_t taosMktime(struct tm *timep);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif /*_TD_OS_TIME_H_*/ #endif /*_TD_OS_TIME_H_*/

View File

@ -442,108 +442,6 @@ int32_t* taosGetErrno();
#define TSDB_CODE_WAL_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1004) #define TSDB_CODE_WAL_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1004)
#define TSDB_CODE_WAL_LOG_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x1005) #define TSDB_CODE_WAL_LOG_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x1005)
// http
#define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not online"
#define TSDB_CODE_HTTP_UNSUPPORT_URL TAOS_DEF_ERROR_CODE(0, 0x1101) //"url is not support"
#define TSDB_CODE_HTTP_INVALID_URL TAOS_DEF_ERROR_CODE(0, 0x1102) //invalid url format"
#define TSDB_CODE_HTTP_NO_ENOUGH_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1103) //"no enough memory"
#define TSDB_CODE_HTTP_REQUSET_TOO_BIG TAOS_DEF_ERROR_CODE(0, 0x1104) //"request size is too big"
#define TSDB_CODE_HTTP_NO_AUTH_INFO TAOS_DEF_ERROR_CODE(0, 0x1105) //"no auth info input"
#define TSDB_CODE_HTTP_NO_MSG_INPUT TAOS_DEF_ERROR_CODE(0, 0x1106) //"request is empty"
#define TSDB_CODE_HTTP_NO_SQL_INPUT TAOS_DEF_ERROR_CODE(0, 0x1107) //"no sql input"
#define TSDB_CODE_HTTP_NO_EXEC_USEDB TAOS_DEF_ERROR_CODE(0, 0x1108) //"no need to execute use db cmd"
#define TSDB_CODE_HTTP_SESSION_FULL TAOS_DEF_ERROR_CODE(0, 0x1109) //"session list was full"
#define TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR TAOS_DEF_ERROR_CODE(0, 0x110A) //"generate taosd token error"
#define TSDB_CODE_HTTP_INVALID_MULTI_REQUEST TAOS_DEF_ERROR_CODE(0, 0x110B) //"size of multi request is 0"
#define TSDB_CODE_HTTP_CREATE_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110C) //"failed to create gzip"
#define TSDB_CODE_HTTP_FINISH_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110D) //"failed to finish gzip"
#define TSDB_CODE_HTTP_LOGIN_FAILED TAOS_DEF_ERROR_CODE(0, 0x110E) //"failed to login"
#define TSDB_CODE_HTTP_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x1120) //"invalid http version"
#define TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH TAOS_DEF_ERROR_CODE(0, 0x1121) //"invalid content length"
#define TSDB_CODE_HTTP_INVALID_AUTH_TYPE TAOS_DEF_ERROR_CODE(0, 0x1122) //"invalid type of Authorization"
#define TSDB_CODE_HTTP_INVALID_AUTH_FORMAT TAOS_DEF_ERROR_CODE(0, 0x1123) //"invalid format of Authorization"
#define TSDB_CODE_HTTP_INVALID_BASIC_AUTH TAOS_DEF_ERROR_CODE(0, 0x1124) //"invalid basic Authorization"
#define TSDB_CODE_HTTP_INVALID_TAOSD_AUTH TAOS_DEF_ERROR_CODE(0, 0x1125) //"invalid taosd Authorization"
#define TSDB_CODE_HTTP_PARSE_METHOD_FAILED TAOS_DEF_ERROR_CODE(0, 0x1126) //"failed to parse method"
#define TSDB_CODE_HTTP_PARSE_TARGET_FAILED TAOS_DEF_ERROR_CODE(0, 0x1127) //"failed to parse target"
#define TSDB_CODE_HTTP_PARSE_VERSION_FAILED TAOS_DEF_ERROR_CODE(0, 0x1128) //"failed to parse http version"
#define TSDB_CODE_HTTP_PARSE_SP_FAILED TAOS_DEF_ERROR_CODE(0, 0x1129) //"failed to parse sp"
#define TSDB_CODE_HTTP_PARSE_STATUS_FAILED TAOS_DEF_ERROR_CODE(0, 0x112A) //"failed to parse status"
#define TSDB_CODE_HTTP_PARSE_PHRASE_FAILED TAOS_DEF_ERROR_CODE(0, 0x112B) //"failed to parse phrase"
#define TSDB_CODE_HTTP_PARSE_CRLF_FAILED TAOS_DEF_ERROR_CODE(0, 0x112C) //"failed to parse crlf"
#define TSDB_CODE_HTTP_PARSE_HEADER_FAILED TAOS_DEF_ERROR_CODE(0, 0x112D) //"failed to parse header"
#define TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED TAOS_DEF_ERROR_CODE(0, 0x112E) //"failed to parse header key"
#define TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED TAOS_DEF_ERROR_CODE(0, 0x112F) //"failed to parse header val"
#define TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED TAOS_DEF_ERROR_CODE(0, 0x1130) //"failed to parse chunk size"
#define TSDB_CODE_HTTP_PARSE_CHUNK_FAILED TAOS_DEF_ERROR_CODE(0, 0x1131) //"failed to parse chunk"
#define TSDB_CODE_HTTP_PARSE_END_FAILED TAOS_DEF_ERROR_CODE(0, 0x1132) //"failed to parse end section"
#define TSDB_CODE_HTTP_PARSE_INVALID_STATE TAOS_DEF_ERROR_CODE(0, 0x1134) //"invalid parse state"
#define TSDB_CODE_HTTP_PARSE_ERROR_STATE TAOS_DEF_ERROR_CODE(0, 0x1135) //"failed to parse error section"
#define TSDB_CODE_HTTP_GC_QUERY_NULL TAOS_DEF_ERROR_CODE(0, 0x1150) //"query size is 0"
#define TSDB_CODE_HTTP_GC_QUERY_SIZE TAOS_DEF_ERROR_CODE(0, 0x1151) //"query size can not more than 100"
#define TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR TAOS_DEF_ERROR_CODE(0, 0x1152) //"parse grafana json error"
#define TSDB_CODE_HTTP_TG_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1160) //"database name can not be null"
#define TSDB_CODE_HTTP_TG_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1161) //"database name too long"
#define TSDB_CODE_HTTP_TG_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1162) //"invalid telegraf json fromat"
#define TSDB_CODE_HTTP_TG_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1163) //"metrics size is 0"
#define TSDB_CODE_HTTP_TG_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1164) //"metrics size can not more than 1K"
#define TSDB_CODE_HTTP_TG_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1165) //"metric name not find"
#define TSDB_CODE_HTTP_TG_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1166) //"metric name type should be string"
#define TSDB_CODE_HTTP_TG_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1167) //"metric name length is 0"
#define TSDB_CODE_HTTP_TG_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1168) //"metric name length too long"
#define TSDB_CODE_HTTP_TG_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1169) //"timestamp not find"
#define TSDB_CODE_HTTP_TG_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x116A) //"timestamp type should be integer"
#define TSDB_CODE_HTTP_TG_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x116B) //"timestamp value smaller than 0"
#define TSDB_CODE_HTTP_TG_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x116C) //"tags not find"
#define TSDB_CODE_HTTP_TG_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x116D) //"tags size is 0"
#define TSDB_CODE_HTTP_TG_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x116E) //"tags size too long"
#define TSDB_CODE_HTTP_TG_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x116F) //"tag is null"
#define TSDB_CODE_HTTP_TG_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1170) //"tag name is null"
#define TSDB_CODE_HTTP_TG_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x1171) //"tag name length too long"
#define TSDB_CODE_HTTP_TG_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x1172) //"tag value type should be number or string"
#define TSDB_CODE_HTTP_TG_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x1173) //"tag value is null"
#define TSDB_CODE_HTTP_TG_TABLE_NULL TAOS_DEF_ERROR_CODE(0, 0x1174) //"table is null"
#define TSDB_CODE_HTTP_TG_TABLE_SIZE TAOS_DEF_ERROR_CODE(0, 0x1175) //"table name length too long"
#define TSDB_CODE_HTTP_TG_FIELDS_NULL TAOS_DEF_ERROR_CODE(0, 0x1176) //"fields not find"
#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x1177) //"fields size is 0"
#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x1178) //"fields size too long"
#define TSDB_CODE_HTTP_TG_FIELD_NULL TAOS_DEF_ERROR_CODE(0, 0x1179) //"field is null"
#define TSDB_CODE_HTTP_TG_FIELD_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x117A) //"field name is null"
#define TSDB_CODE_HTTP_TG_FIELD_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x117B) //"field name length too long"
#define TSDB_CODE_HTTP_TG_FIELD_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x117C) //"field value type should be number or string"
#define TSDB_CODE_HTTP_TG_FIELD_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x117D) //"field value is null"
#define TSDB_CODE_HTTP_TG_HOST_NOT_STRING TAOS_DEF_ERROR_CODE(0, 0x117E) //"host type should be string"
#define TSDB_CODE_HTTP_TG_STABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x117F) //"stable not exist"
#define TSDB_CODE_HTTP_OP_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1190) //"database name can not be null"
#define TSDB_CODE_HTTP_OP_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1191) //"database name too long"
#define TSDB_CODE_HTTP_OP_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1192) //"invalid opentsdb json fromat"
#define TSDB_CODE_HTTP_OP_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1193) //"metrics size is 0"
#define TSDB_CODE_HTTP_OP_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1194) //"metrics size can not more than 10K"
#define TSDB_CODE_HTTP_OP_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1195) //"metric name not find"
#define TSDB_CODE_HTTP_OP_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1196) //"metric name type should be string"
#define TSDB_CODE_HTTP_OP_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1197) //"metric name length is 0"
#define TSDB_CODE_HTTP_OP_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1198) //"metric name length can not more than 22"
#define TSDB_CODE_HTTP_OP_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1199) //"timestamp not find"
#define TSDB_CODE_HTTP_OP_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x119A) //"timestamp type should be integer"
#define TSDB_CODE_HTTP_OP_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x119B) //"timestamp value smaller than 0"
#define TSDB_CODE_HTTP_OP_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x119C) //"tags not find"
#define TSDB_CODE_HTTP_OP_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x119D) //"tags size is 0"
#define TSDB_CODE_HTTP_OP_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x119E) //"tags size too long"
#define TSDB_CODE_HTTP_OP_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x119F) //"tag is null"
#define TSDB_CODE_HTTP_OP_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x11A0) //"tag name is null"
#define TSDB_CODE_HTTP_OP_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x11A1) //"tag name length too long"
#define TSDB_CODE_HTTP_OP_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A2) //"tag value type should be boolean number or string"
#define TSDB_CODE_HTTP_OP_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A3) //"tag value is null"
#define TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x11A4) //"tag value can not more than 64"
#define TSDB_CODE_HTTP_OP_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A5) //"value not find"
#define TSDB_CODE_HTTP_OP_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A6) //"value type should be boolean number or string"
#define TSDB_CODE_HTTP_REQUEST_JSON_ERROR TAOS_DEF_ERROR_CODE(0, 0x1F00) //"http request json error"
// tfs // tfs
#define TSDB_CODE_FS_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x2200) #define TSDB_CODE_FS_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x2200)
#define TSDB_CODE_FS_INVLD_CFG TAOS_DEF_ERROR_CODE(0, 0x2201) #define TSDB_CODE_FS_INVLD_CFG TAOS_DEF_ERROR_CODE(0, 0x2201)

View File

@ -220,7 +220,8 @@ static const SSysDbTableSchema transSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "db1", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "db2", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "last_action_info", {.name = "last_action_info",

View File

@ -1713,6 +1713,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks
} }
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId) { char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId) {
ASSERT(stbName[0] != 0);
SArray* tags = taosArrayInit(0, sizeof(void*)); SArray* tags = taosArrayInit(0, sizeof(void*));
SSmlKv* pTag = taosMemoryCalloc(1, sizeof(SSmlKv)); SSmlKv* pTag = taosMemoryCalloc(1, sizeof(SSmlKv));
pTag->key = "group_id"; pTag->key = "group_id";

View File

@ -124,7 +124,8 @@ typedef struct {
int32_t lastErrorNo; int32_t lastErrorNo;
tmsg_t lastMsgType; tmsg_t lastMsgType;
SEpSet lastEpset; SEpSet lastEpset;
char dbname[TSDB_DB_FNAME_LEN]; char dbname1[TSDB_DB_FNAME_LEN];
char dbname2[TSDB_DB_FNAME_LEN];
int32_t startFunc; int32_t startFunc;
int32_t stopFunc; int32_t stopFunc;
int32_t paramLen; int32_t paramLen;

View File

@ -68,7 +68,7 @@ int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction);
int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction); int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction);
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen); void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen); void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen);
void mndTransSetDbName(STrans *pTrans, const char *dbname); void mndTransSetDbName(STrans *pTrans, const char *dbname1, const char *dbname2);
void mndTransSetSerial(STrans *pTrans); void mndTransSetSerial(STrans *pTrans);
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans); int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans);

View File

@ -477,7 +477,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
mDebug("trans:%d, used to create db:%s", pTrans->id, pCreate->db); mDebug("trans:%d, used to create db:%s", pTrans->id, pCreate->db);
mndTransSetDbName(pTrans, dbObj.name); mndTransSetDbName(pTrans, dbObj.name, NULL);
if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
@ -668,7 +668,7 @@ static int32_t mndAlterDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pOld, SDbObj *p
mDebug("trans:%d, used to alter db:%s", pTrans->id, pOld->name); mDebug("trans:%d, used to alter db:%s", pTrans->id, pOld->name);
int32_t code = -1; int32_t code = -1;
mndTransSetDbName(pTrans, pOld->name); mndTransSetDbName(pTrans, pOld->name, NULL);
if (mndSetAlterDbRedoLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER; if (mndSetAlterDbRedoLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
if (mndSetAlterDbCommitLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER; if (mndSetAlterDbCommitLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
if (mndSetAlterDbRedoActions(pMnode, pTrans, pOld, pNew) != 0) goto _OVER; if (mndSetAlterDbRedoActions(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
@ -921,7 +921,7 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) {
if (pTrans == NULL) goto _OVER; if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop db:%s", pTrans->id, pDb->name); mDebug("trans:%d, used to drop db:%s", pTrans->id, pDb->name);
mndTransSetDbName(pTrans, pDb->name); mndTransSetDbName(pTrans, pDb->name, NULL);
if (mndSetDropDbRedoLogs(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndSetDropDbRedoLogs(pMnode, pTrans, pDb) != 0) goto _OVER;
if (mndSetDropDbCommitLogs(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndSetDropDbCommitLogs(pMnode, pTrans, pDb) != 0) goto _OVER;

View File

@ -105,7 +105,7 @@ int32_t mndPersistTaskDeployReq(STrans* pTrans, SStreamTask* pTask, const SEpSet
int32_t size = encoder.pos; int32_t size = encoder.pos;
int32_t tlen = sizeof(SMsgHead) + size; int32_t tlen = sizeof(SMsgHead) + size;
tEncoderClear(&encoder); tEncoderClear(&encoder);
void* buf = taosMemoryMalloc(tlen); void* buf = taosMemoryCalloc(1, tlen);
if (buf == NULL) { if (buf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY; terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1; return -1;
@ -157,6 +157,7 @@ int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, STrans* pTrans, SStreamObj*
} }
sdbRelease(pMnode->pSdb, pDb); sdbRelease(pMnode->pSdb, pDb);
memcpy(pTask->shuffleDispatcher.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
SArray* pVgs = pTask->shuffleDispatcher.dbInfo.pVgroupInfos; SArray* pVgs = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
int32_t sz = taosArrayGetSize(pVgs); int32_t sz = taosArrayGetSize(pVgs);
SArray* sinkLv = taosArrayGetP(pStream->tasks, 0); SArray* sinkLv = taosArrayGetP(pStream->tasks, 0);
@ -166,6 +167,7 @@ int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, STrans* pTrans, SStreamObj*
for (int32_t j = 0; j < sinkLvSize; j++) { for (int32_t j = 0; j < sinkLvSize; j++) {
SStreamTask* pLastLevelTask = taosArrayGetP(sinkLv, j); SStreamTask* pLastLevelTask = taosArrayGetP(sinkLv, j);
if (pLastLevelTask->nodeId == pVgInfo->vgId) { if (pLastLevelTask->nodeId == pVgInfo->vgId) {
ASSERT(pVgInfo->vgId > 0);
pVgInfo->taskId = pLastLevelTask->taskId; pVgInfo->taskId = pLastLevelTask->taskId;
ASSERT(pVgInfo->taskId != 0); ASSERT(pVgInfo->taskId != 0);
break; break;

View File

@ -609,7 +609,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
int32_t code = -1; int32_t code = -1;
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq); STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq);
if (pTrans == NULL) goto _OVER; if (pTrans == NULL) goto _OVER;
mndTransSetDbName(pTrans, pDb->name); mndTransSetDbName(pTrans, pDb->name, NULL);
mndTransSetSerial(pTrans); mndTransSetSerial(pTrans);
mDebug("trans:%d, used to create sma:%s", pTrans->id, pCreate->name); mDebug("trans:%d, used to create sma:%s", pTrans->id, pCreate->name);
@ -852,7 +852,7 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
if (pTrans == NULL) goto _OVER; if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name); mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name);
mndTransSetDbName(pTrans, pDb->name); mndTransSetDbName(pTrans, pDb->name, NULL);
if (mndSetDropSmaRedoLogs(pMnode, pTrans, pSma) != 0) goto _OVER; if (mndSetDropSmaRedoLogs(pMnode, pTrans, pSma) != 0) goto _OVER;
if (mndSetDropSmaVgroupRedoLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER; if (mndSetDropSmaVgroupRedoLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER;

View File

@ -758,7 +758,7 @@ _OVER:
} }
int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) { int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
mndTransSetDbName(pTrans, pDb->name); mndTransSetDbName(pTrans, pDb->name, NULL);
if (mndSetCreateStbRedoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1; if (mndSetCreateStbRedoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
if (mndSetCreateStbUndoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1; if (mndSetCreateStbUndoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
if (mndSetCreateStbCommitLogs(pMnode, pTrans, pDb, pStb) != 0) return -1; if (mndSetCreateStbCommitLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
@ -1396,7 +1396,7 @@ static int32_t mndAlterStb(SMnode *pMnode, SRpcMsg *pReq, const SMAlterStbReq *p
if (pTrans == NULL) goto _OVER; if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to alter stb:%s", pTrans->id, pAlter->name); mDebug("trans:%d, used to alter stb:%s", pTrans->id, pAlter->name);
mndTransSetDbName(pTrans, pDb->name); mndTransSetDbName(pTrans, pDb->name, NULL);
if (needRsp) { if (needRsp) {
void *pCont = NULL; void *pCont = NULL;
@ -1537,7 +1537,7 @@ static int32_t mndDropStb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *p
if (pTrans == NULL) goto _OVER; if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop stb:%s", pTrans->id, pStb->name); mDebug("trans:%d, used to drop stb:%s", pTrans->id, pStb->name);
mndTransSetDbName(pTrans, pDb->name); mndTransSetDbName(pTrans, pDb->name, NULL);
if (mndSetDropStbRedoLogs(pMnode, pTrans, pStb) != 0) goto _OVER; if (mndSetDropStbRedoLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
if (mndSetDropStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER; if (mndSetDropStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER;

View File

@ -613,9 +613,9 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
goto _OVER; goto _OVER;
} }
mndTransSetDbName(pTrans, createStreamReq.sourceDB); mndTransSetDbName(pTrans, createStreamReq.sourceDB, NULL);
// TODO // TODO
/*mndTransSetDbName(pTrans, streamObj.targetDb);*/ /*mndTransSetDbName(pTrans, streamObj.targetDb, NULL);*/
mDebug("trans:%d, used to create stream:%s", pTrans->id, createStreamReq.name); mDebug("trans:%d, used to create stream:%s", pTrans->id, createStreamReq.name);
// build stream obj from request // build stream obj from request

View File

@ -403,7 +403,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOutputObj *pOutput) { static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOutputObj *pOutput) {
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pMsg); STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pMsg);
mndTransSetDbName(pTrans, pOutput->pSub->dbName); mndTransSetDbName(pTrans, pOutput->pSub->dbName, NULL);
if (pTrans == NULL) return -1; if (pTrans == NULL) return -1;
// make txn: // make txn:

View File

@ -46,13 +46,14 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw); int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw);
pMgmt->errCode = cbMeta.code; pMgmt->errCode = cbMeta.code;
mDebug("trans:%d, is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64 " role:%s raw:%p", transId, mDebug("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64
pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, syncStr(cbMeta.state), pRaw); " role:%s raw:%p",
transId, pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, cbMeta.lastConfigIndex, syncStr(cbMeta.state),
pRaw);
if (pMgmt->errCode == 0) { if (pMgmt->errCode == 0) {
sdbWriteWithoutFree(pMnode->pSdb, pRaw); sdbWriteWithoutFree(pMnode->pSdb, pRaw);
sdbSetApplyIndex(pMnode->pSdb, cbMeta.index); sdbSetApplyInfo(pMnode->pSdb, cbMeta.index, cbMeta.term, cbMeta.lastConfigIndex);
sdbSetApplyTerm(pMnode->pSdb, cbMeta.term);
} }
if (pMgmt->transId == transId) { if (pMgmt->transId == transId) {
@ -68,36 +69,19 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
mndReleaseTrans(pMnode, pTrans); mndReleaseTrans(pMnode, pTrans);
} }
if (cbMeta.index - sdbGetApplyIndex(pMnode->pSdb) > 100) { sdbWriteFile(pMnode->pSdb, SDB_WRITE_DELTA);
SSnapshotMeta sMeta = {0};
// if (syncGetSnapshotMeta(pMnode->syncMgmt.sync, &sMeta) == 0) {
if (syncGetSnapshotMetaByIndex(pMnode->syncMgmt.sync, cbMeta.index, &sMeta) == 0) {
sdbSetCurConfig(pMnode->pSdb, sMeta.lastConfigIndex);
}
sdbWriteFile(pMnode->pSdb);
}
} }
} }
int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) { int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) {
SMnode *pMnode = pFsm->data; SMnode *pMnode = pFsm->data;
pSnapshot->lastApplyIndex = sdbGetCommitIndex(pMnode->pSdb); sdbGetCommitInfo(pMnode->pSdb, &pSnapshot->lastApplyIndex, &pSnapshot->lastApplyTerm, &pSnapshot->lastConfigIndex);
pSnapshot->lastApplyTerm = sdbGetCommitTerm(pMnode->pSdb);
pSnapshot->lastConfigIndex = sdbGetCurConfig(pMnode->pSdb);
return 0; return 0;
} }
void mndRestoreFinish(struct SSyncFSM *pFsm) { void mndRestoreFinish(struct SSyncFSM *pFsm) {
SMnode *pMnode = pFsm->data; SMnode *pMnode = pFsm->data;
SSnapshotMeta sMeta = {0};
// if (syncGetSnapshotMeta(pMnode->syncMgmt.sync, &sMeta) == 0) {
SyncIndex snapshotIndex = sdbGetApplyIndex(pMnode->pSdb);
if (syncGetSnapshotMetaByIndex(pMnode->syncMgmt.sync, snapshotIndex, &sMeta) == 0) {
sdbSetCurConfig(pMnode->pSdb, sMeta.lastConfigIndex);
}
if (!pMnode->deploy) { if (!pMnode->deploy) {
mInfo("mnode sync restore finished, and will handle outstanding transactions"); mInfo("mnode sync restore finished, and will handle outstanding transactions");
mndTransPullup(pMnode); mndTransPullup(pMnode);

View File

@ -566,7 +566,7 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
#endif #endif
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq); STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq);
mndTransSetDbName(pTrans, pTopic->db); mndTransSetDbName(pTrans, pTopic->db, NULL);
if (pTrans == NULL) { if (pTrans == NULL) {
mError("topic:%s, failed to drop since %s", pTopic->name, terrstr()); mError("topic:%s, failed to drop since %s", pTopic->name, terrstr());
return -1; return -1;

View File

@ -22,8 +22,8 @@
#include "mndSync.h" #include "mndSync.h"
#include "mndUser.h" #include "mndUser.h"
#define TRANS_VER_NUMBER 1 #define TRANS_VER_NUMBER 1
#define TRANS_ARRAY_SIZE 8 #define TRANS_ARRAY_SIZE 8
#define TRANS_RESERVE_SIZE 64 #define TRANS_RESERVE_SIZE 64
static SSdbRaw *mndTransActionEncode(STrans *pTrans); static SSdbRaw *mndTransActionEncode(STrans *pTrans);
@ -52,7 +52,7 @@ static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans);
static bool mndCantExecuteTransAction(SMnode *pMnode) { return !pMnode->deploy && !mndIsMaster(pMnode); } static bool mndCannotExecuteTransAction(SMnode *pMnode) { return !pMnode->deploy && !mndIsMaster(pMnode); }
static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans); static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans);
static int32_t mndProcessTransReq(SRpcMsg *pReq); static int32_t mndProcessTransReq(SRpcMsg *pReq);
@ -122,7 +122,8 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
SDB_SET_INT8(pRaw, dataPos, pTrans->conflict, _OVER) SDB_SET_INT8(pRaw, dataPos, pTrans->conflict, _OVER)
SDB_SET_INT8(pRaw, dataPos, pTrans->exec, _OVER) SDB_SET_INT8(pRaw, dataPos, pTrans->exec, _OVER)
SDB_SET_INT64(pRaw, dataPos, pTrans->createdTime, _OVER) SDB_SET_INT64(pRaw, dataPos, pTrans->createdTime, _OVER)
SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER) SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname1, TSDB_DB_FNAME_LEN, _OVER)
SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname2, TSDB_DB_FNAME_LEN, _OVER)
SDB_SET_INT32(pRaw, dataPos, pTrans->redoActionPos, _OVER) SDB_SET_INT32(pRaw, dataPos, pTrans->redoActionPos, _OVER)
int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions); int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions);
@ -270,7 +271,8 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
pTrans->conflict = conflict; pTrans->conflict = conflict;
pTrans->exec = exec; pTrans->exec = exec;
SDB_GET_INT64(pRaw, dataPos, &pTrans->createdTime, _OVER) SDB_GET_INT64(pRaw, dataPos, &pTrans->createdTime, _OVER)
SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER) SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname1, TSDB_DB_FNAME_LEN, _OVER)
SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname2, TSDB_DB_FNAME_LEN, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pTrans->redoActionPos, _OVER) SDB_GET_INT32(pRaw, dataPos, &pTrans->redoActionPos, _OVER)
SDB_GET_INT32(pRaw, dataPos, &redoActionNum, _OVER) SDB_GET_INT32(pRaw, dataPos, &redoActionNum, _OVER)
SDB_GET_INT32(pRaw, dataPos, &undoActionNum, _OVER) SDB_GET_INT32(pRaw, dataPos, &undoActionNum, _OVER)
@ -521,9 +523,10 @@ static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) {
} }
if (pOld->stage == TRN_STAGE_ROLLBACK) { if (pOld->stage == TRN_STAGE_ROLLBACK) {
pOld->stage = TRN_STAGE_FINISHED; pOld->stage = TRN_STAGE_REDO_ACTION;
mTrace("trans:%d, stage from rollback to finished since perform update action", pNew->id); mTrace("trans:%d, stage from rollback to undoAction since perform update action", pNew->id);
} }
return 0; return 0;
} }
@ -649,7 +652,14 @@ void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *
pTrans->paramLen = paramLen; pTrans->paramLen = paramLen;
} }
void mndTransSetDbName(STrans *pTrans, const char *dbname) { memcpy(pTrans->dbname, dbname, TSDB_DB_FNAME_LEN); } void mndTransSetDbName(STrans *pTrans, const char *dbname1, const char *dbname2) {
if (dbname1 != NULL) {
memcpy(pTrans->dbname1, dbname1, TSDB_DB_FNAME_LEN);
}
if (dbname2 != NULL) {
memcpy(pTrans->dbname2, dbname2, TSDB_DB_FNAME_LEN);
}
}
void mndTransSetSerial(STrans *pTrans) { pTrans->exec = TRN_EXEC_SERIAL; } void mndTransSetSerial(STrans *pTrans) { pTrans->exec = TRN_EXEC_SERIAL; }
@ -674,6 +684,12 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
return 0; return 0;
} }
static bool mndCheckDbConflict(const char *db, STrans *pTrans) {
if (db[0] == 0) return false;
if (strcmp(db, pTrans->dbname1) == 0 || strcmp(db, pTrans->dbname2) == 0) return true;
return false;
}
static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) { static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) {
STrans *pTrans = NULL; STrans *pTrans = NULL;
void *pIter = NULL; void *pIter = NULL;
@ -688,14 +704,21 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) {
if (pNew->conflict == TRN_CONFLICT_GLOBAL) conflict = true; if (pNew->conflict == TRN_CONFLICT_GLOBAL) conflict = true;
if (pNew->conflict == TRN_CONFLICT_DB) { if (pNew->conflict == TRN_CONFLICT_DB) {
if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true; if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true;
if (pTrans->conflict == TRN_CONFLICT_DB && strcmp(pNew->dbname, pTrans->dbname) == 0) conflict = true; if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) {
if (pTrans->conflict == TRN_CONFLICT_DB_INSIDE && strcmp(pNew->dbname, pTrans->dbname) == 0) conflict = true; if (mndCheckDbConflict(pNew->dbname1, pTrans)) conflict = true;
if (mndCheckDbConflict(pNew->dbname2, pTrans)) conflict = true;
}
} }
if (pNew->conflict == TRN_CONFLICT_DB_INSIDE) { if (pNew->conflict == TRN_CONFLICT_DB_INSIDE) {
if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true; if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true;
if (pTrans->conflict == TRN_CONFLICT_DB && strcmp(pNew->dbname, pTrans->dbname) == 0) conflict = true; if (pTrans->conflict == TRN_CONFLICT_DB) {
if (mndCheckDbConflict(pNew->dbname1, pTrans)) conflict = true;
if (mndCheckDbConflict(pNew->dbname2, pTrans)) conflict = true;
}
} }
mError("trans:%d, can't execute since conflict with trans:%d, db:%s", pNew->id, pTrans->id, pTrans->dbname);
mError("trans:%d, can't execute since conflict with trans:%d, db1:%s db2:%s", pNew->id, pTrans->id, pTrans->dbname1,
pTrans->dbname2);
sdbRelease(pMnode->pSdb, pTrans); sdbRelease(pMnode->pSdb, pTrans);
} }
@ -704,7 +727,7 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) {
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) { int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) { if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) {
if (strlen(pTrans->dbname) == 0) { if (strlen(pTrans->dbname1) == 0 && strlen(pTrans->dbname2) == 0) {
terrno = TSDB_CODE_MND_TRANS_CONFLICT; terrno = TSDB_CODE_MND_TRANS_CONFLICT;
mError("trans:%d, failed to prepare conflict db not set", pTrans->id); mError("trans:%d, failed to prepare conflict db not set", pTrans->id);
return -1; return -1;
@ -915,7 +938,7 @@ static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransActi
static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction) { static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction) {
if (pAction->msgSent) return 0; if (pAction->msgSent) return 0;
if (mndCantExecuteTransAction(pMnode)) return -1; if (mndCannotExecuteTransAction(pMnode)) return -1;
int64_t signature = pTrans->id; int64_t signature = pTrans->id;
signature = (signature << 32); signature = (signature << 32);
@ -1115,7 +1138,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
pTrans->lastEpset = pAction->epSet; pTrans->lastEpset = pAction->epSet;
} }
if (mndCantExecuteTransAction(pMnode)) break; if (mndCannotExecuteTransAction(pMnode)) break;
if (code == 0) { if (code == 0) {
pTrans->code = 0; pTrans->code = 0;
@ -1158,7 +1181,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) {
code = mndTransExecuteRedoActions(pMnode, pTrans); code = mndTransExecuteRedoActions(pMnode, pTrans);
} }
if (mndCantExecuteTransAction(pMnode)) return false; if (mndCannotExecuteTransAction(pMnode)) return false;
if (code == 0) { if (code == 0) {
pTrans->code = 0; pTrans->code = 0;
@ -1171,8 +1194,8 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) {
} else { } else {
pTrans->code = terrno; pTrans->code = terrno;
if (pTrans->policy == TRN_POLICY_ROLLBACK) { if (pTrans->policy == TRN_POLICY_ROLLBACK) {
pTrans->stage = TRN_STAGE_UNDO_ACTION; pTrans->stage = TRN_STAGE_ROLLBACK;
mError("trans:%d, stage from redoAction to undoAction since %s", pTrans->id, terrstr()); mError("trans:%d, stage from redoAction to rollback since %s", pTrans->id, terrstr());
continueExec = true; continueExec = true;
} else { } else {
pTrans->failedTimes++; pTrans->failedTimes++;
@ -1185,7 +1208,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) {
} }
static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) { static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) {
if (mndCantExecuteTransAction(pMnode)) return false; if (mndCannotExecuteTransAction(pMnode)) return false;
bool continueExec = true; bool continueExec = true;
int32_t code = mndTransCommit(pMnode, pTrans); int32_t code = mndTransCommit(pMnode, pTrans);
@ -1197,16 +1220,9 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) {
continueExec = true; continueExec = true;
} else { } else {
pTrans->code = terrno; pTrans->code = terrno;
if (pTrans->policy == TRN_POLICY_ROLLBACK) { pTrans->failedTimes++;
pTrans->stage = TRN_STAGE_UNDO_ACTION; mError("trans:%d, stage keep on commit since %s, failedTimes:%d", pTrans->id, terrstr(), pTrans->failedTimes);
mError("trans:%d, stage from commit to undoAction since %s, failedTimes:%d", pTrans->id, terrstr(), continueExec = false;
pTrans->failedTimes);
continueExec = true;
} else {
pTrans->failedTimes++;
mError("trans:%d, stage keep on commit since %s, failedTimes:%d", pTrans->id, terrstr(), pTrans->failedTimes);
continueExec = false;
}
} }
return continueExec; return continueExec;
@ -1235,11 +1251,9 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) {
bool continueExec = true; bool continueExec = true;
int32_t code = mndTransExecuteUndoActions(pMnode, pTrans); int32_t code = mndTransExecuteUndoActions(pMnode, pTrans);
if (mndCantExecuteTransAction(pMnode)) return false;
if (code == 0) { if (code == 0) {
pTrans->stage = TRN_STAGE_ROLLBACK; pTrans->stage = TRN_STAGE_FINISHED;
mDebug("trans:%d, stage from undoAction to rollback", pTrans->id); mDebug("trans:%d, stage from undoAction to finished", pTrans->id);
continueExec = true; continueExec = true;
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) { } else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
mDebug("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code)); mDebug("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code));
@ -1254,14 +1268,14 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) {
} }
static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans) { static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans) {
if (mndCantExecuteTransAction(pMnode)) return false; if (mndCannotExecuteTransAction(pMnode)) return false;
bool continueExec = true; bool continueExec = true;
int32_t code = mndTransRollback(pMnode, pTrans); int32_t code = mndTransRollback(pMnode, pTrans);
if (code == 0) { if (code == 0) {
pTrans->stage = TRN_STAGE_FINISHED; pTrans->stage = TRN_STAGE_UNDO_ACTION;
mDebug("trans:%d, stage from rollback to finished", pTrans->id); mDebug("trans:%d, stage from rollback to undoAction", pTrans->id);
continueExec = true; continueExec = true;
} else { } else {
pTrans->failedTimes++; pTrans->failedTimes++;
@ -1309,12 +1323,12 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
case TRN_STAGE_COMMIT_ACTION: case TRN_STAGE_COMMIT_ACTION:
continueExec = mndTransPerformCommitActionStage(pMnode, pTrans); continueExec = mndTransPerformCommitActionStage(pMnode, pTrans);
break; break;
case TRN_STAGE_UNDO_ACTION:
continueExec = mndTransPerformUndoActionStage(pMnode, pTrans);
break;
case TRN_STAGE_ROLLBACK: case TRN_STAGE_ROLLBACK:
continueExec = mndTransPerformRollbackStage(pMnode, pTrans); continueExec = mndTransPerformRollbackStage(pMnode, pTrans);
break; break;
case TRN_STAGE_UNDO_ACTION:
continueExec = mndTransPerformUndoActionStage(pMnode, pTrans);
break;
case TRN_STAGE_FINISHED: case TRN_STAGE_FINISHED:
continueExec = mndTransPerfromFinishedStage(pMnode, pTrans); continueExec = mndTransPerfromFinishedStage(pMnode, pTrans);
break; break;
@ -1416,13 +1430,8 @@ void mndTransPullup(SMnode *pMnode) {
mndReleaseTrans(pMnode, pTrans); mndReleaseTrans(pMnode, pTrans);
} }
SSnapshotMeta sMeta = {0}; // todo, set to SDB_WRITE_DELTA
// if (syncGetSnapshotMeta(pMnode->syncMgmt.sync, &sMeta) == 0) { sdbWriteFile(pMnode->pSdb, 0);
SyncIndex snapshotIndex = sdbGetApplyIndex(pMnode->pSdb);
if (syncGetSnapshotMetaByIndex(pMnode->syncMgmt.sync, snapshotIndex, &sMeta) == 0) {
sdbSetCurConfig(pMnode->pSdb, sMeta.lastConfigIndex);
}
sdbWriteFile(pMnode->pSdb);
taosArrayDestroy(pArray); taosArrayDestroy(pArray);
} }
@ -1451,10 +1460,15 @@ static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)stage, false); colDataAppend(pColInfo, numOfRows, (const char *)stage, false);
char dbname[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; char dbname1[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(dbname, mndGetDbStr(pTrans->dbname), pShow->pMeta->pSchemas[cols].bytes); STR_WITH_MAXSIZE_TO_VARSTR(dbname1, mndGetDbStr(pTrans->dbname1), pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)dbname, false); colDataAppend(pColInfo, numOfRows, (const char *)dbname1, false);
char dbname2[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(dbname2, mndGetDbStr(pTrans->dbname2), pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)dbname2, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->failedTimes, false); colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->failedTimes, false);

View File

@ -493,8 +493,11 @@ TEST_F(MndTestSdb, 01_Write_Str) {
ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2); ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2);
ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1); ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1);
ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2); ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2);
sdbSetApplyIndex(pSdb, -1); sdbSetApplyInfo(pSdb, -1, -1, -1);
ASSERT_EQ(sdbGetApplyIndex(pSdb), -1); // int64_t index, config;
// int64_t term;
// sdbGetCommitInfo(pSdb, &index, &term, &config);
// ASSERT_EQ(index, -1);
ASSERT_EQ(mnode.insertTimes, 2); ASSERT_EQ(mnode.insertTimes, 2);
ASSERT_EQ(mnode.deleteTimes, 0); ASSERT_EQ(mnode.deleteTimes, 0);
@ -700,11 +703,12 @@ TEST_F(MndTestSdb, 01_Write_Str) {
} }
// write version // write version
sdbSetApplyIndex(pSdb, 0); sdbSetApplyInfo(pSdb, 0, 0, 0);
sdbSetApplyIndex(pSdb, 1); sdbSetApplyInfo(pSdb, 1, 0, 0);
ASSERT_EQ(sdbGetApplyIndex(pSdb), 1); // sdbGetApplyInfo(pSdb, &index, &term, &config);
ASSERT_EQ(sdbWriteFile(pSdb), 0); // ASSERT_EQ(index, 1);
ASSERT_EQ(sdbWriteFile(pSdb), 0); ASSERT_EQ(sdbWriteFile(pSdb, 0), 0);
ASSERT_EQ(sdbWriteFile(pSdb, 0), 0);
sdbCleanup(pSdb); sdbCleanup(pSdb);
ASSERT_EQ(mnode.insertTimes, 7); ASSERT_EQ(mnode.insertTimes, 7);
@ -772,7 +776,11 @@ TEST_F(MndTestSdb, 01_Read_Str) {
ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2); ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2);
ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1); ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1);
ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 5); ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 5);
ASSERT_EQ(sdbGetApplyIndex(pSdb), 1);
int64_t index, config;
int64_t term;
sdbGetCommitInfo(pSdb, &index, &term, &config);
ASSERT_EQ(index, 1);
ASSERT_EQ(mnode.insertTimes, 4); ASSERT_EQ(mnode.insertTimes, 4);
ASSERT_EQ(mnode.deleteTimes, 0); ASSERT_EQ(mnode.deleteTimes, 0);

View File

@ -128,7 +128,7 @@ class MndTestTrans2 : public ::testing::Test {
mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1); mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1);
if (pDb != NULL) { if (pDb != NULL) {
mndTransSetDbName(pTrans, pDb->name); mndTransSetDbName(pTrans, pDb->name, NULL);
} }
int32_t code = mndTransPrepare(pMnode, pTrans); int32_t code = mndTransPrepare(pMnode, pTrans);
@ -201,7 +201,7 @@ class MndTestTrans2 : public ::testing::Test {
} }
if (pDb != NULL) { if (pDb != NULL) {
mndTransSetDbName(pTrans, pDb->name); mndTransSetDbName(pTrans, pDb->name, NULL);
} }
int32_t code = mndTransPrepare(pMnode, pTrans); int32_t code = mndTransPrepare(pMnode, pTrans);

View File

@ -37,6 +37,8 @@ extern "C" {
#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }} #define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }}
// clang-format on // clang-format on
#define SDB_WRITE_DELTA 100
#define SDB_GET_VAL(pData, dataPos, val, pos, func, type) \ #define SDB_GET_VAL(pData, dataPos, val, pos, func, type) \
{ \ { \
if (func(pRaw, dataPos, val) != 0) { \ if (func(pRaw, dataPos, val) != 0) { \
@ -169,11 +171,12 @@ typedef struct SSdb {
SWal *pWal; SWal *pWal;
char *currDir; char *currDir;
char *tmpDir; char *tmpDir;
int64_t lastCommitVer; int64_t commitIndex;
int64_t lastCommitTerm; int64_t commitTerm;
int64_t curVer; int64_t commitConfig;
int64_t curTerm; int64_t applyIndex;
int64_t curConfig; int64_t applyTerm;
int64_t applyConfig;
int64_t tableVer[SDB_MAX]; int64_t tableVer[SDB_MAX];
int64_t maxId[SDB_MAX]; int64_t maxId[SDB_MAX];
EKeyType keyTypes[SDB_MAX]; EKeyType keyTypes[SDB_MAX];
@ -257,7 +260,7 @@ int32_t sdbReadFile(SSdb *pSdb);
* @param pSdb The sdb object. * @param pSdb The sdb object.
* @return int32_t 0 for success, -1 for failure. * @return int32_t 0 for success, -1 for failure.
*/ */
int32_t sdbWriteFile(SSdb *pSdb); int32_t sdbWriteFile(SSdb *pSdb, int32_t delta);
/** /**
* @brief Parse and write raw data to sdb, then free the pRaw object * @brief Parse and write raw data to sdb, then free the pRaw object
@ -361,14 +364,8 @@ int64_t sdbGetTableVer(SSdb *pSdb, ESdbType type);
* @param index The update value of the apply index. * @param index The update value of the apply index.
* @return int32_t The current index of sdb * @return int32_t The current index of sdb
*/ */
void sdbSetApplyIndex(SSdb *pSdb, int64_t index); void sdbSetApplyInfo(SSdb *pSdb, int64_t index, int64_t term, int64_t config);
void sdbSetApplyTerm(SSdb *pSdb, int64_t term); void sdbGetCommitInfo(SSdb *pSdb, int64_t *index, int64_t *term, int64_t *config);
void sdbSetCurConfig(SSdb *pSdb, int64_t config);
int64_t sdbGetApplyIndex(SSdb *pSdb);
int64_t sdbGetApplyTerm(SSdb *pSdb);
int64_t sdbGetCommitIndex(SSdb *pSdb);
int64_t sdbGetCommitTerm(SSdb *pSdb);
int64_t sdbGetCurConfig(SSdb *pSdb);
SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen); SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen);
void sdbFreeRaw(SSdbRaw *pRaw); void sdbFreeRaw(SSdbRaw *pRaw);

View File

@ -53,11 +53,12 @@ SSdb *sdbInit(SSdbOpt *pOption) {
} }
pSdb->pWal = pOption->pWal; pSdb->pWal = pOption->pWal;
pSdb->curVer = -1; pSdb->applyIndex = -1;
pSdb->curTerm = -1; pSdb->applyTerm = -1;
pSdb->lastCommitVer = -1; pSdb->applyConfig = -1;
pSdb->lastCommitTerm = -1; pSdb->commitIndex = -1;
pSdb->curConfig = -1; pSdb->commitTerm = -1;
pSdb->commitConfig = -1;
pSdb->pMnode = pOption->pMnode; pSdb->pMnode = pOption->pMnode;
taosThreadMutexInit(&pSdb->filelock, NULL); taosThreadMutexInit(&pSdb->filelock, NULL);
mDebug("sdb init successfully"); mDebug("sdb init successfully");
@ -67,7 +68,7 @@ SSdb *sdbInit(SSdbOpt *pOption) {
void sdbCleanup(SSdb *pSdb) { void sdbCleanup(SSdb *pSdb) {
mDebug("start to cleanup sdb"); mDebug("start to cleanup sdb");
sdbWriteFile(pSdb); sdbWriteFile(pSdb, 0);
if (pSdb->currDir != NULL) { if (pSdb->currDir != NULL) {
taosMemoryFreeClear(pSdb->currDir); taosMemoryFreeClear(pSdb->currDir);
@ -159,23 +160,20 @@ static int32_t sdbCreateDir(SSdb *pSdb) {
return 0; return 0;
} }
void sdbSetApplyIndex(SSdb *pSdb, int64_t index) { pSdb->curVer = index; } void sdbSetApplyInfo(SSdb *pSdb, int64_t index, int64_t term, int64_t config) {
mTrace("mnode apply info changed, from index:%" PRId64 " term:%" PRId64 " config:%" PRId64 ", to index:%" PRId64
void sdbSetApplyTerm(SSdb *pSdb, int64_t term) { pSdb->curTerm = term; } " term:%" PRId64 " config:%" PRId64,
pSdb->applyIndex, pSdb->applyTerm, pSdb->applyConfig, index, term, config);
void sdbSetCurConfig(SSdb *pSdb, int64_t config) { pSdb->applyIndex = index;
if (pSdb->curConfig != config) { pSdb->applyTerm = term;
mDebug("mnode sync config set from %" PRId64 " to %" PRId64, pSdb->curConfig, config); pSdb->applyConfig = config;
pSdb->curConfig = config;
}
} }
int64_t sdbGetApplyIndex(SSdb *pSdb) { return pSdb->curVer; } void sdbGetCommitInfo(SSdb *pSdb, int64_t *index, int64_t *term, int64_t *config) {
*index = pSdb->commitIndex;
int64_t sdbGetApplyTerm(SSdb *pSdb) { return pSdb->curTerm; } *term = pSdb->commitTerm;
*config = pSdb->commitConfig;
int64_t sdbGetCommitIndex(SSdb *pSdb) { return pSdb->lastCommitVer; } mTrace("mnode current info, apply index:%" PRId64 " term:%" PRId64 " config:%" PRId64 ", commit index:%" PRId64
" term:%" PRId64 " config:%" PRId64,
int64_t sdbGetCommitTerm(SSdb *pSdb) { return pSdb->lastCommitTerm; } pSdb->applyIndex, pSdb->applyTerm, pSdb->applyConfig, *index, *term, *config);
}
int64_t sdbGetCurConfig(SSdb *pSdb) { return pSdb->curConfig; }

View File

@ -67,10 +67,12 @@ static void sdbResetData(SSdb *pSdb) {
mDebug("sdb:%s is reset", sdbTableName(i)); mDebug("sdb:%s is reset", sdbTableName(i));
} }
pSdb->curVer = -1; pSdb->applyIndex = -1;
pSdb->curTerm = -1; pSdb->applyTerm = -1;
pSdb->lastCommitVer = -1; pSdb->applyConfig = -1;
pSdb->lastCommitTerm = -1; pSdb->commitIndex = -1;
pSdb->commitTerm = -1;
pSdb->commitConfig = -1;
mDebug("sdb reset successfully"); mDebug("sdb reset successfully");
} }
@ -90,7 +92,7 @@ static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) {
return -1; return -1;
} }
ret = taosReadFile(pFile, &pSdb->curVer, sizeof(int64_t)); ret = taosReadFile(pFile, &pSdb->applyIndex, sizeof(int64_t));
if (ret < 0) { if (ret < 0) {
terrno = TAOS_SYSTEM_ERROR(errno); terrno = TAOS_SYSTEM_ERROR(errno);
return -1; return -1;
@ -100,7 +102,7 @@ static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) {
return -1; return -1;
} }
ret = taosReadFile(pFile, &pSdb->curTerm, sizeof(int64_t)); ret = taosReadFile(pFile, &pSdb->applyTerm, sizeof(int64_t));
if (ret < 0) { if (ret < 0) {
terrno = TAOS_SYSTEM_ERROR(errno); terrno = TAOS_SYSTEM_ERROR(errno);
return -1; return -1;
@ -110,7 +112,7 @@ static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) {
return -1; return -1;
} }
ret = taosReadFile(pFile, &pSdb->curConfig, sizeof(int64_t)); ret = taosReadFile(pFile, &pSdb->applyConfig, sizeof(int64_t));
if (ret < 0) { if (ret < 0) {
terrno = TAOS_SYSTEM_ERROR(errno); terrno = TAOS_SYSTEM_ERROR(errno);
return -1; return -1;
@ -173,17 +175,17 @@ static int32_t sdbWriteFileHead(SSdb *pSdb, TdFilePtr pFile) {
return -1; return -1;
} }
if (taosWriteFile(pFile, &pSdb->curVer, sizeof(int64_t)) != sizeof(int64_t)) { if (taosWriteFile(pFile, &pSdb->applyIndex, sizeof(int64_t)) != sizeof(int64_t)) {
terrno = TAOS_SYSTEM_ERROR(errno); terrno = TAOS_SYSTEM_ERROR(errno);
return -1; return -1;
} }
if (taosWriteFile(pFile, &pSdb->curTerm, sizeof(int64_t)) != sizeof(int64_t)) { if (taosWriteFile(pFile, &pSdb->applyTerm, sizeof(int64_t)) != sizeof(int64_t)) {
terrno = TAOS_SYSTEM_ERROR(errno); terrno = TAOS_SYSTEM_ERROR(errno);
return -1; return -1;
} }
if (taosWriteFile(pFile, &pSdb->curConfig, sizeof(int64_t)) != sizeof(int64_t)) { if (taosWriteFile(pFile, &pSdb->applyConfig, sizeof(int64_t)) != sizeof(int64_t)) {
terrno = TAOS_SYSTEM_ERROR(errno); terrno = TAOS_SYSTEM_ERROR(errno);
return -1; return -1;
} }
@ -300,11 +302,12 @@ static int32_t sdbReadFileImp(SSdb *pSdb) {
} }
code = 0; code = 0;
pSdb->lastCommitVer = pSdb->curVer; pSdb->commitIndex = pSdb->applyIndex;
pSdb->lastCommitTerm = pSdb->curTerm; pSdb->commitTerm = pSdb->applyTerm;
pSdb->commitConfig = pSdb->applyConfig;
memcpy(pSdb->tableVer, tableVer, sizeof(tableVer)); memcpy(pSdb->tableVer, tableVer, sizeof(tableVer));
mDebug("read sdb file:%s successfully, index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file, pSdb->lastCommitVer, mDebug("read sdb file:%s successfully, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file,
pSdb->lastCommitTerm, pSdb->curConfig); pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig);
_OVER: _OVER:
taosCloseFile(&pFile); taosCloseFile(&pFile);
@ -336,9 +339,10 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
char curfile[PATH_MAX] = {0}; char curfile[PATH_MAX] = {0};
snprintf(curfile, sizeof(curfile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); snprintf(curfile, sizeof(curfile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
mDebug("start to write sdb file, current ver:%" PRId64 " term:%" PRId64 ", commit ver:%" PRId64 " term:%" PRId64 mDebug("start to write sdb file, apply index:%" PRId64 " term:%" PRId64 " config:%" PRId64 ", commit index:%" PRId64
" file:%s", " term:%" PRId64 " config:%" PRId64 ", file:%s",
pSdb->curVer, pSdb->curTerm, pSdb->lastCommitVer, pSdb->lastCommitTerm, curfile); pSdb->applyIndex, pSdb->applyTerm, pSdb->applyConfig, pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig,
curfile);
TdFilePtr pFile = taosOpenFile(tmpfile, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); TdFilePtr pFile = taosOpenFile(tmpfile, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (pFile == NULL) { if (pFile == NULL) {
@ -430,25 +434,30 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
if (code != 0) { if (code != 0) {
mError("failed to write sdb file:%s since %s", curfile, tstrerror(code)); mError("failed to write sdb file:%s since %s", curfile, tstrerror(code));
} else { } else {
pSdb->lastCommitVer = pSdb->curVer; pSdb->commitIndex = pSdb->applyIndex;
pSdb->lastCommitTerm = pSdb->curTerm; pSdb->commitTerm = pSdb->applyTerm;
mDebug("write sdb file successfully, index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s", pSdb->commitConfig = pSdb->applyConfig;
pSdb->lastCommitVer, pSdb->lastCommitTerm, pSdb->curConfig, curfile); mDebug("write sdb file successfully, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s",
pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig, curfile);
} }
terrno = code; terrno = code;
return code; return code;
} }
int32_t sdbWriteFile(SSdb *pSdb) { int32_t sdbWriteFile(SSdb *pSdb, int32_t delta) {
int32_t code = 0; int32_t code = 0;
if (pSdb->curVer == pSdb->lastCommitVer) { if (pSdb->applyIndex == pSdb->commitIndex) {
return 0;
}
if (pSdb->applyIndex - pSdb->commitIndex < delta) {
return 0; return 0;
} }
taosThreadMutexLock(&pSdb->filelock); taosThreadMutexLock(&pSdb->filelock);
if (pSdb->pWal != NULL) { if (pSdb->pWal != NULL) {
code = walBeginSnapshot(pSdb->pWal, pSdb->curVer); code = walBeginSnapshot(pSdb->pWal, pSdb->applyIndex);
} }
if (code == 0) { if (code == 0) {
code = sdbWriteFileImp(pSdb); code = sdbWriteFileImp(pSdb);
@ -470,7 +479,7 @@ int32_t sdbDeploy(SSdb *pSdb) {
return -1; return -1;
} }
if (sdbWriteFile(pSdb) != 0) { if (sdbWriteFile(pSdb, 0) != 0) {
return -1; return -1;
} }
@ -522,9 +531,9 @@ int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) {
snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
taosThreadMutexLock(&pSdb->filelock); taosThreadMutexLock(&pSdb->filelock);
int64_t commitIndex = pSdb->lastCommitVer; int64_t commitIndex = pSdb->commitIndex;
int64_t commitTerm = pSdb->lastCommitTerm; int64_t commitTerm = pSdb->commitTerm;
int64_t curConfig = pSdb->curConfig; int64_t commitConfig = pSdb->commitConfig;
if (taosCopyFile(datafile, pIter->name) < 0) { if (taosCopyFile(datafile, pIter->name) < 0) {
taosThreadMutexUnlock(&pSdb->filelock); taosThreadMutexUnlock(&pSdb->filelock);
terrno = TAOS_SYSTEM_ERROR(errno); terrno = TAOS_SYSTEM_ERROR(errno);
@ -543,8 +552,8 @@ int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) {
} }
*ppIter = pIter; *ppIter = pIter;
mInfo("sdbiter:%p, is created to read snapshot, index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s", pIter, mInfo("sdbiter:%p, is created to read snapshot, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s",
commitIndex, commitTerm, curConfig, pIter->name); pIter, commitIndex, commitTerm, commitConfig, pIter->name);
return 0; return 0;
} }

View File

@ -180,8 +180,8 @@ void vnodeApplyMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
for (int32_t i = 0; i < numOfMsgs; ++i) { for (int32_t i = 0; i < numOfMsgs; ++i) {
if (taosGetQitem(qall, (void **)&pMsg) == 0) continue; if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
vTrace("vgId:%d, msg:%p get from vnode-apply queue, type:%s handle:%p", vgId, pMsg, TMSG_INFO(pMsg->msgType), vTrace("vgId:%d, msg:%p get from vnode-apply queue, index:%" PRId64 " type:%s handle:%p", vgId, pMsg,
pMsg->info.handle); pMsg->info.conn.applyIndex, TMSG_INFO(pMsg->msgType), pMsg->info.handle);
SRpcMsg rsp = {.code = pMsg->code, .info = pMsg->info}; SRpcMsg rsp = {.code = pMsg->code, .info = pMsg->info};
if (rsp.code == 0) { if (rsp.code == 0) {
@ -334,8 +334,8 @@ static void vnodeSyncReconfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReCon
syncGetAndDelRespRpc(pVnode->sync, cbMeta.seqNum, &rpcMsg.info); syncGetAndDelRespRpc(pVnode->sync, cbMeta.seqNum, &rpcMsg.info);
rpcMsg.info.conn.applyIndex = cbMeta.index; rpcMsg.info.conn.applyIndex = cbMeta.index;
vInfo("vgId:%d, alter vnode replica is confirmed, type:%s contLen:%d seq:%" PRIu64 " handle:%p", TD_VID(pVnode), vInfo("vgId:%d, alter vnode replica is confirmed, type:%s contLen:%d seq:%" PRIu64 " index:%" PRId64 " handle:%p",
TMSG_INFO(pMsg->msgType), pMsg->contLen, cbMeta.seqNum, rpcMsg.info.handle); TD_VID(pVnode), TMSG_INFO(pMsg->msgType), pMsg->contLen, cbMeta.seqNum, cbMeta.index, rpcMsg.info.handle);
if (rpcMsg.info.handle != NULL) { if (rpcMsg.info.handle != NULL) {
tmsgSendRsp(&rpcMsg); tmsgSendRsp(&rpcMsg);
} }

View File

@ -15,7 +15,9 @@
#ifndef TDENGINE_QUERYUTIL_H #ifndef TDENGINE_QUERYUTIL_H
#define TDENGINE_QUERYUTIL_H #define TDENGINE_QUERYUTIL_H
#include <libs/function/function.h> #include "function.h"
#include "nodes.h"
#include "plannodes.h"
#include "tbuffer.h" #include "tbuffer.h"
#include "tcommon.h" #include "tcommon.h"
#include "tpagedbuf.h" #include "tpagedbuf.h"
@ -77,7 +79,7 @@ typedef struct SResultRowInfo {
struct SqlFunctionCtx; struct SqlFunctionCtx;
size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput); size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput);
int32_t initResultRowInfo(SResultRowInfo* pResultRowInfo, int32_t size); void initResultRowInfo(SResultRowInfo* pResultRowInfo);
void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo); void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo);
void closeAllResultRows(SResultRowInfo* pResultRowInfo); void closeAllResultRows(SResultRowInfo* pResultRowInfo);
@ -86,7 +88,7 @@ void initResultRow(SResultRow *pResultRow);
void closeResultRow(SResultRow* pResultRow); void closeResultRow(SResultRow* pResultRow);
bool isResultRowClosed(SResultRow* pResultRow); bool isResultRowClosed(SResultRow* pResultRow);
struct SResultRowEntryInfo* getResultCell(const SResultRow* pRow, int32_t index, const int32_t* offset); struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset);
static FORCE_INLINE SResultRow *getResultRowByPos(SDiskbasedBuf* pBuf, SResultRowPosition* pos) { static FORCE_INLINE SResultRow *getResultRowByPos(SDiskbasedBuf* pBuf, SResultRowPosition* pos) {
SFilePage* bufPage = (SFilePage*) getBufPage(pBuf, pos->pageId); SFilePage* bufPage = (SFilePage*) getBufPage(pBuf, pos->pageId);
@ -98,9 +100,27 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap,
void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList); void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList);
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo); void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo);
bool hashRemainDataInGroupInfo(SGroupResInfo* pGroupResInfo); bool hasDataInGroupInfo(SGroupResInfo* pGroupResInfo);
bool incNextGroup(SGroupResInfo* pGroupResInfo);
int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo); int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo);
SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode);
int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo* pListInfo, SNode* pTagCond);
SArray* createSortInfo(SNodeList* pNodeList);
SArray* extractPartitionColInfo(SNodeList* pNodeList);
SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols, int32_t type);
SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs);
SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowCellInfoOffset);
void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray* pCols);
void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWindow);
SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode);
SColumn extractColumnFromColumnNode(SColumnNode* pColNode);
int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode);
void cleanupQueryTableDataCond(SQueryTableDataCond* pCond);
#endif // TDENGINE_QUERYUTIL_H #endif // TDENGINE_QUERYUTIL_H

View File

@ -747,43 +747,27 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo,
void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset, void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset,
int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput, int32_t order); int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput, int32_t order);
int32_t setGroupResultOutputBuf(SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t type, int16_t bytes,
int32_t groupId, SDiskbasedBuf* pBuf, SExecTaskInfo* pTaskInfo, SAggSupporter* pAggSup);
void doDestroyBasicInfo(SOptrBasicInfo* pInfo, int32_t numOfOutput); void doDestroyBasicInfo(SOptrBasicInfo* pInfo, int32_t numOfOutput);
int32_t setDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData, int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData,
int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total, int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total,
SArray* pColList); SArray* pColList);
void getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key, STimeWindow* win); void getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key, STimeWindow* win);
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag); int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag);
int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz); int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz);
SArray* extractPartitionColInfo(SNodeList* pNodeList);
void doSetOperatorCompleted(SOperatorInfo* pOperator); void doSetOperatorCompleted(SOperatorInfo* pOperator);
void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock); void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock);
SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowCellInfoOffset);
void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray* pCols);
void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWindow);
void cleanupAggSup(SAggSupporter* pAggSup); void cleanupAggSup(SAggSupporter* pAggSup);
void destroyBasicOperatorInfo(void* param, int32_t numOfOutput); void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle); void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId); void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId);
SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode);
SColumn extractColumnFromColumnNode(SColumnNode* pColNode);
SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity, SArray* pColMatchInfo, SSortOperatorInfo* pInfo);
SSDataBlock* loadNextDataBlock(void* param); SSDataBlock* loadNextDataBlock(void* param);
void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset); void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset);
SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols,
SExecTaskInfo* pTaskInfo, int32_t type);
SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs);
SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode);
int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode);
void clearupQueryTableDataCond(SQueryTableDataCond* pCond);
SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo,
char* pData, int16_t bytes, bool masterscan, uint64_t groupId, char* pData, int16_t bytes, bool masterscan, uint64_t groupId,
SExecTaskInfo* pTaskInfo, bool isIntervalQuery, SAggSupporter* pSup); SExecTaskInfo* pTaskInfo, bool isIntervalQuery, SAggSupporter* pSup);
@ -799,9 +783,9 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo*
int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo); int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode *pNode, SExecTaskInfo* pTaskInfo); SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode *pNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t num, SSDataBlock* pResBlock, SLimit* pLimit, SLimit* pSlimit, SNode* pCondition, SExecTaskInfo* pTaskInfo); SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo *createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, SExprInfo* pExprInfo, int32_t numOfCols, SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortPhyNode, SExecTaskInfo* pTaskInfo);
SArray* pIndexMap, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams, int32_t numStreams, SSDataBlock* pInputBlock, SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams, int32_t numStreams, SSDataBlock* pInputBlock,
SSDataBlock* pResBlock, SArray* pSortInfo, SArray* pColMatchColInfo, SSDataBlock* pResBlock, SArray* pSortInfo, SArray* pColMatchColInfo,
SExecTaskInfo* pTaskInfo); SExecTaskInfo* pTaskInfo);
@ -831,10 +815,9 @@ SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SExecTaskInfo*
SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle,
STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, STimeWindowAggSupp* pTwSup); STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, STimeWindowAggSupp* pTwSup);
SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode, bool multigroupResult,
SExecTaskInfo* pTaskInfo);
SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols,
SInterval* pInterval, STimeWindow* pWindow, SSDataBlock* pResBlock, int32_t fillType, SNodeListNode* fillVal,
bool multigroupResult, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols,
SSDataBlock* pResBlock, STimeWindowAggSupp *pTwAggSupp, int32_t tsSlotId, SColumn* pStateKeyCol, SExecTaskInfo* pTaskInfo); SSDataBlock* pResBlock, STimeWindowAggSupp *pTwAggSupp, int32_t tsSlotId, SColumn* pStateKeyCol, SExecTaskInfo* pTaskInfo);
@ -843,7 +826,8 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResultBlock, const SNodeListNode* pValNode, SExecTaskInfo* pTaskInfo); SSDataBlock* pResultBlock, const SNodeListNode* pValNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, SExecTaskInfo* pTaskInfo); SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SJoinPhysiNode* pJoinNode,
SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream,
SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo); SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
@ -864,8 +848,8 @@ void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlo
bool isTaskKilled(SExecTaskInfo* pTaskInfo); bool isTaskKilled(SExecTaskInfo* pTaskInfo);
int32_t checkForQueryBuf(size_t numOfTables); int32_t checkForQueryBuf(size_t numOfTables);
void setTaskKilled(SExecTaskInfo* pTaskInfo); void setTaskKilled(SExecTaskInfo* pTaskInfo);
void queryCostStatis(SExecTaskInfo* pTaskInfo); void queryCostStatis(SExecTaskInfo* pTaskInfo);
void doDestroyTask(SExecTaskInfo* pTaskInfo); void doDestroyTask(SExecTaskInfo* pTaskInfo);
int32_t getMaximumIdleDurationSec(); int32_t getMaximumIdleDurationSec();
@ -884,7 +868,7 @@ int32_t encodeOperator(SOperatorInfo* ops, char** data, int32_t *length);
* length: the length of data * length: the length of data
* return: result code, 0 means success * return: result code, 0 means success
*/ */
int32_t decodeOperator(SOperatorInfo* ops, char* data, int32_t length); int32_t decodeOperator(SOperatorInfo* ops, const char* data, int32_t length);
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status); void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
@ -914,8 +898,6 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs, const int32_t* rowCellOffset, SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs, const int32_t* rowCellOffset,
SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo); SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo,
SNode* pTagCond);
int32_t createMultipleDataReaders(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, int32_t createMultipleDataReaders(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
STableListInfo* pTableListInfo, SArray* arrayReader, uint64_t queryId, STableListInfo* pTableListInfo, SArray* arrayReader, uint64_t queryId,
uint64_t taskId, SNode* pTagCond); uint64_t taskId, SNode* pTagCond);

View File

@ -63,7 +63,7 @@ typedef int32_t (*_sort_merge_compar_fn_t)(const void* p1, const void* p2, void*
* @param type * @param type
* @return * @return
*/ */
SSortHandle* tsortCreateSortHandle(SArray* pOrderInfo, SArray* pIndexMap, int32_t type, int32_t pageSize, int32_t numOfPages, SSDataBlock* pBlock, const char* idstr); SSortHandle* tsortCreateSortHandle(SArray* pOrderInfo, int32_t type, int32_t pageSize, int32_t numOfPages, SSDataBlock* pBlock, const char* idstr);
/** /**
* *

View File

@ -14,24 +14,20 @@
*/ */
#include "os.h" #include "os.h"
#include "tmsg.h" #include "index.h"
#include "function.h"
#include "functionMgt.h"
#include "tdatablock.h"
#include "thash.h" #include "thash.h"
#include "tmsg.h"
#include "executil.h" #include "executil.h"
#include "executorimpl.h" #include "executorimpl.h"
#include "tcompression.h" #include "tcompression.h"
#include "tlosertree.h"
typedef struct SCompSupporter { void initResultRowInfo(SResultRowInfo *pResultRowInfo) {
STableQueryInfo **pTableQueryInfo;
int32_t *rowIndex;
int32_t order;
} SCompSupporter;
int32_t initResultRowInfo(SResultRowInfo *pResultRowInfo, int32_t size) {
pResultRowInfo->size = 0; pResultRowInfo->size = 0;
pResultRowInfo->cur.pageId = -1; pResultRowInfo->cur.pageId = -1;
return TSDB_CODE_SUCCESS;
} }
void cleanupResultRowInfo(SResultRowInfo *pResultRowInfo) { void cleanupResultRowInfo(SResultRowInfo *pResultRowInfo) {
@ -74,7 +70,7 @@ void closeResultRow(SResultRow* pResultRow) {
} }
// TODO refactor: use macro // TODO refactor: use macro
SResultRowEntryInfo* getResultCell(const SResultRow* pRow, int32_t index, const int32_t* offset) { SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset) {
assert(index >= 0 && offset != NULL); assert(index >= 0 && offset != NULL);
return (SResultRowEntryInfo*)((char*) pRow->pEntryInfo + offset[index]); return (SResultRowEntryInfo*)((char*) pRow->pEntryInfo + offset[index]);
} }
@ -160,7 +156,7 @@ void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayL
ASSERT(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo)); ASSERT(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo));
} }
bool hashRemainDataInGroupInfo(SGroupResInfo* pGroupResInfo) { bool hasDataInGroupInfo(SGroupResInfo* pGroupResInfo) {
if (pGroupResInfo->pRows == NULL) { if (pGroupResInfo->pRows == NULL) {
return false; return false;
} }
@ -177,86 +173,532 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) {
return (int32_t) taosArrayGetSize(pGroupResInfo->pRows); return (int32_t) taosArrayGetSize(pGroupResInfo->pRows);
} }
static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *param) { SArray* createSortInfo(SNodeList* pNodeList) {
int32_t left = *(int32_t *)pLeft; size_t numOfCols = LIST_LENGTH(pNodeList);
int32_t right = *(int32_t *)pRight; SArray* pList = taosArrayInit(numOfCols, sizeof(SBlockOrderInfo));
if (pList == NULL) {
SCompSupporter * supporter = (SCompSupporter *)param; terrno = TSDB_CODE_OUT_OF_MEMORY;
return pList;
int32_t leftPos = supporter->rowIndex[left];
int32_t rightPos = supporter->rowIndex[right];
/* left source is exhausted */
if (leftPos == -1) {
return 1;
} }
/* right source is exhausted*/ for (int32_t i = 0; i < numOfCols; ++i) {
if (rightPos == -1) { SOrderByExprNode* pSortKey = (SOrderByExprNode*)nodesListGetNode(pNodeList, i);
return -1; SBlockOrderInfo bi = {0};
bi.order = (pSortKey->order == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
bi.nullFirst = (pSortKey->nullOrder == NULL_ORDER_FIRST);
SColumnNode* pColNode = (SColumnNode*)pSortKey->pExpr;
bi.slotId = pColNode->slotId;
taosArrayPush(pList, &bi);
} }
ASSERT(0); return pList;
STableQueryInfo** pList = supporter->pTableQueryInfo;
// SResultRow* pWindowRes1 = pList[left]->resInfo.pResult[leftPos];
// SResultRow * pWindowRes1 = getResultRow(&(pList[left]->resInfo), leftPos);
// TSKEY leftTimestamp = pWindowRes1->win.skey;
// SResultRowInfo *pWindowResInfo2 = &(pList[right]->resInfo);
// SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos);
// SResultRow* pWindowRes2 = pList[right]->resInfo.pResult[rightPos];
// TSKEY rightTimestamp = pWindowRes2->win.skey;
// if (leftTimestamp == rightTimestamp) {
return 0;
// }
// if (supporter->order == TSDB_ORDER_ASC) {
// return (leftTimestamp > rightTimestamp)? 1:-1;
// } else {
// return (leftTimestamp < rightTimestamp)? 1:-1;
// }
} }
int32_t tsAscOrder(const void* p1, const void* p2) { SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode) {
SResultRowCell* pc1 = (SResultRowCell*) p1; int32_t numOfCols = LIST_LENGTH(pNode->pSlots);
SResultRowCell* pc2 = (SResultRowCell*) p2;
if (pc1->groupId == pc2->groupId) { SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
ASSERT(0); pBlock->pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData));
// if (pc1->pRow->win.skey == pc2->pRow->win.skey) {
// return 0; pBlock->info.blockId = pNode->dataBlockId;
// } else { pBlock->info.rowSize = pNode->totalRowSize; // todo ??
// return (pc1->pRow->win.skey < pc2->pRow->win.skey)? -1:1; pBlock->info.type = STREAM_INVALID;
// }
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData idata = {{0}};
SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i);
// if (!pDescNode->output) { // todo disable it temporarily
// continue;
// }
idata.info.type = pDescNode->dataType.type;
idata.info.bytes = pDescNode->dataType.bytes;
idata.info.scale = pDescNode->dataType.scale;
idata.info.slotId = pDescNode->slotId;
idata.info.precision = pDescNode->dataType.precision;
if (IS_VAR_DATA_TYPE(idata.info.type)) {
pBlock->info.hasVarCol = true;
}
taosArrayPush(pBlock->pDataBlock, &idata);
}
pBlock->info.numOfCols = taosArrayGetSize(pBlock->pDataBlock);
return pBlock;
}
int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo* pListInfo, SNode* pTagCond) {
int32_t code = TSDB_CODE_SUCCESS;
pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo));
uint64_t tableUid = pScanNode->uid;
if (pScanNode->tableType == TSDB_SUPER_TABLE) {
if (pTagCond) {
SIndexMetaArg metaArg = {
.metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid};
SArray* res = taosArrayInit(8, sizeof(uint64_t));
code = doFilterTag(pTagCond, &metaArg, res);
if (code == TSDB_CODE_INDEX_REBUILDING) { // todo
// doFilter();
} else if (code != TSDB_CODE_SUCCESS) {
qError("failed to get tableIds, reason: %s, suid: %" PRIu64 "", tstrerror(code), tableUid);
taosArrayDestroy(res);
terrno = code;
return code;
} else {
qDebug("sucess to get tableIds, size: %d, suid: %" PRIu64 "", (int)taosArrayGetSize(res), tableUid);
}
for (int i = 0; i < taosArrayGetSize(res); i++) {
STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i)};
taosArrayPush(pListInfo->pTableList, &info);
}
taosArrayDestroy(res);
} else {
code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList);
}
} else { // Create one table group.
STableKeyInfo info = {.lastKey = 0, .uid = tableUid};
taosArrayPush(pListInfo->pTableList, &info);
}
return code;
}
SArray* extractPartitionColInfo(SNodeList* pNodeList) {
if(!pNodeList) {
return NULL;
}
size_t numOfCols = LIST_LENGTH(pNodeList);
SArray* pList = taosArrayInit(numOfCols, sizeof(SColumn));
if (pList == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnNode* pColNode = (SColumnNode*)nodesListGetNode(pNodeList, i);
// todo extract method
SColumn c = {0};
c.slotId = pColNode->slotId;
c.colId = pColNode->colId;
c.type = pColNode->node.resType.type;
c.bytes = pColNode->node.resType.bytes;
c.precision = pColNode->node.resType.precision;
c.scale = pColNode->node.resType.scale;
taosArrayPush(pList, &c);
}
return pList;
}
SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols,
int32_t type) {
size_t numOfCols = LIST_LENGTH(pNodeList);
SArray* pList = taosArrayInit(numOfCols, sizeof(SColMatchInfo));
if (pList == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
for (int32_t i = 0; i < numOfCols; ++i) {
STargetNode* pNode = (STargetNode*)nodesListGetNode(pNodeList, i);
SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
SColMatchInfo c = {0};
c.output = true;
c.colId = pColNode->colId;
c.srcSlotId = pColNode->slotId;
c.matchType = type;
c.targetSlotId = pNode->slotId;
taosArrayPush(pList, &c);
}
*numOfOutputCols = 0;
int32_t num = LIST_LENGTH(pOutputNodeList->pSlots);
for (int32_t i = 0; i < num; ++i) {
SSlotDescNode* pNode = (SSlotDescNode*)nodesListGetNode(pOutputNodeList->pSlots, i);
// todo: add reserve flag check
// it is a column reserved for the arithmetic expression calculation
if (pNode->slotId >= numOfCols) {
(*numOfOutputCols) += 1;
continue;
}
SColMatchInfo* info = taosArrayGet(pList, pNode->slotId);
if (pNode->output) {
(*numOfOutputCols) += 1;
} else {
info->output = false;
}
}
return pList;
}
static SResSchema createResSchema(int32_t type, int32_t bytes, int32_t slotId, int32_t scale, int32_t precision,
const char* name) {
SResSchema s = {0};
s.scale = scale;
s.type = type;
s.bytes = bytes;
s.slotId = slotId;
s.precision = precision;
strncpy(s.name, name, tListLen(s.name));
return s;
}
static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType) {
SColumn* pCol = taosMemoryCalloc(1, sizeof(SColumn));
if (pCol == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
pCol->slotId = slotId;
pCol->colId = colId;
pCol->bytes = pType->bytes;
pCol->type = pType->type;
pCol->scale = pType->scale;
pCol->precision = pType->precision;
pCol->dataBlockId = blockId;
return pCol;
}
SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs) {
int32_t numOfFuncs = LIST_LENGTH(pNodeList);
int32_t numOfGroupKeys = 0;
if (pGroupKeys != NULL) {
numOfGroupKeys = LIST_LENGTH(pGroupKeys);
}
*numOfExprs = numOfFuncs + numOfGroupKeys;
SExprInfo* pExprs = taosMemoryCalloc(*numOfExprs, sizeof(SExprInfo));
for (int32_t i = 0; i < (*numOfExprs); ++i) {
STargetNode* pTargetNode = NULL;
if (i < numOfFuncs) {
pTargetNode = (STargetNode*)nodesListGetNode(pNodeList, i);
} else {
pTargetNode = (STargetNode*)nodesListGetNode(pGroupKeys, i - numOfFuncs);
}
SExprInfo* pExp = &pExprs[i];
pExp->pExpr = taosMemoryCalloc(1, sizeof(tExprNode));
pExp->pExpr->_function.num = 1;
pExp->pExpr->_function.functionId = -1;
int32_t type = nodeType(pTargetNode->pExpr);
// it is a project query, or group by column
if (type == QUERY_NODE_COLUMN) {
pExp->pExpr->nodeType = QUERY_NODE_COLUMN;
SColumnNode* pColNode = (SColumnNode*)pTargetNode->pExpr;
pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
pExp->base.numOfParams = 1;
SDataType* pType = &pColNode->node.resType;
pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
pType->precision, pColNode->colName);
pExp->base.pParam[0].pCol = createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType);
pExp->base.pParam[0].type = FUNC_PARAM_TYPE_COLUMN;
} else if (type == QUERY_NODE_VALUE) {
pExp->pExpr->nodeType = QUERY_NODE_VALUE;
SValueNode* pValNode = (SValueNode*)pTargetNode->pExpr;
pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
pExp->base.numOfParams = 1;
SDataType* pType = &pValNode->node.resType;
pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
pType->precision, pValNode->node.aliasName);
pExp->base.pParam[0].type = FUNC_PARAM_TYPE_VALUE;
nodesValueNodeToVariant(pValNode, &pExp->base.pParam[0].param);
} else if (type == QUERY_NODE_FUNCTION) {
pExp->pExpr->nodeType = QUERY_NODE_FUNCTION;
SFunctionNode* pFuncNode = (SFunctionNode*)pTargetNode->pExpr;
SDataType* pType = &pFuncNode->node.resType;
pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
pType->precision, pFuncNode->node.aliasName);
pExp->pExpr->_function.functionId = pFuncNode->funcId;
pExp->pExpr->_function.pFunctNode = pFuncNode;
strncpy(pExp->pExpr->_function.functionName, pFuncNode->functionName,
tListLen(pExp->pExpr->_function.functionName));
#if 1
// todo refactor: add the parameter for tbname function
if (strcmp(pExp->pExpr->_function.functionName, "tbname") == 0) {
pFuncNode->pParameterList = nodesMakeList();
ASSERT(LIST_LENGTH(pFuncNode->pParameterList) == 0);
SValueNode* res = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
if (NULL == res) { // todo handle error
} else {
res->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT};
nodesListAppend(pFuncNode->pParameterList, (SNode*)res);
}
}
#endif
int32_t numOfParam = LIST_LENGTH(pFuncNode->pParameterList);
pExp->base.pParam = taosMemoryCalloc(numOfParam, sizeof(SFunctParam));
pExp->base.numOfParams = numOfParam;
for (int32_t j = 0; j < numOfParam; ++j) {
SNode* p1 = nodesListGetNode(pFuncNode->pParameterList, j);
if (p1->type == QUERY_NODE_COLUMN) {
SColumnNode* pcn = (SColumnNode*)p1;
pExp->base.pParam[j].type = FUNC_PARAM_TYPE_COLUMN;
pExp->base.pParam[j].pCol = createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType);
} else if (p1->type == QUERY_NODE_VALUE) {
SValueNode* pvn = (SValueNode*)p1;
pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE;
nodesValueNodeToVariant(pvn, &pExp->base.pParam[j].param);
}
}
} else if (type == QUERY_NODE_OPERATOR) {
pExp->pExpr->nodeType = QUERY_NODE_OPERATOR;
SOperatorNode* pNode = (SOperatorNode*)pTargetNode->pExpr;
pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
pExp->base.numOfParams = 1;
SDataType* pType = &pNode->node.resType;
pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
pType->precision, pNode->node.aliasName);
pExp->pExpr->_optrRoot.pRootNode = pTargetNode->pExpr;
} else {
ASSERT(0);
}
}
return pExprs;
}
// set the output buffer for the selectivity + tag query
static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
int32_t num = 0;
SqlFunctionCtx* p = NULL;
SqlFunctionCtx** pValCtx = taosMemoryCalloc(numOfOutput, POINTER_BYTES);
if (pValCtx == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < numOfOutput; ++i) {
if (strcmp(pCtx[i].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
pValCtx[num++] = &pCtx[i];
} else if (fmIsSelectFunc(pCtx[i].functionId)) {
p = &pCtx[i];
}
}
if (p != NULL) {
p->subsidiaries.pCtx = pValCtx;
p->subsidiaries.num = num;
} else { } else {
return (pc1->groupId < pc2->groupId)? -1:1; taosMemoryFreeClear(pValCtx);
}
return TSDB_CODE_SUCCESS;
}
SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowCellInfoOffset) {
SqlFunctionCtx* pFuncCtx = (SqlFunctionCtx*)taosMemoryCalloc(numOfOutput, sizeof(SqlFunctionCtx));
if (pFuncCtx == NULL) {
return NULL;
}
*rowCellInfoOffset = taosMemoryCalloc(numOfOutput, sizeof(int32_t));
if (*rowCellInfoOffset == 0) {
taosMemoryFreeClear(pFuncCtx);
return NULL;
}
for (int32_t i = 0; i < numOfOutput; ++i) {
SExprInfo* pExpr = &pExprInfo[i];
SExprBasicInfo* pFunct = &pExpr->base;
SqlFunctionCtx* pCtx = &pFuncCtx[i];
pCtx->functionId = -1;
pCtx->curBufPage = -1;
pCtx->pExpr = pExpr;
if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) {
SFuncExecEnv env = {0};
pCtx->functionId = pExpr->pExpr->_function.pFunctNode->funcId;
if (fmIsAggFunc(pCtx->functionId) || fmIsIndefiniteRowsFunc(pCtx->functionId)) {
bool isUdaf = fmIsUserDefinedFunc(pCtx->functionId);
if (!isUdaf) {
fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet);
} else {
char* udfName = pExpr->pExpr->_function.pFunctNode->functionName;
strncpy(pCtx->udfName, udfName, strlen(udfName));
fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet);
}
pCtx->fpSet.getEnv(pExpr->pExpr->_function.pFunctNode, &env);
} else {
fmGetScalarFuncExecFuncs(pCtx->functionId, &pCtx->sfp);
if (pCtx->sfp.getEnv != NULL) {
pCtx->sfp.getEnv(pExpr->pExpr->_function.pFunctNode, &env);
}
}
pCtx->resDataInfo.interBufSize = env.calcMemSize;
} else if (pExpr->pExpr->nodeType == QUERY_NODE_COLUMN || pExpr->pExpr->nodeType == QUERY_NODE_OPERATOR ||
pExpr->pExpr->nodeType == QUERY_NODE_VALUE) {
// for simple column, the result buffer needs to hold at least one element.
pCtx->resDataInfo.interBufSize = pFunct->resSchema.bytes;
}
pCtx->input.numOfInputCols = pFunct->numOfParams;
pCtx->input.pData = taosMemoryCalloc(pFunct->numOfParams, POINTER_BYTES);
pCtx->input.pColumnDataAgg = taosMemoryCalloc(pFunct->numOfParams, POINTER_BYTES);
pCtx->pTsOutput = NULL;
pCtx->resDataInfo.bytes = pFunct->resSchema.bytes;
pCtx->resDataInfo.type = pFunct->resSchema.type;
pCtx->order = TSDB_ORDER_ASC;
pCtx->start.key = INT64_MIN;
pCtx->end.key = INT64_MIN;
pCtx->numOfParams = pExpr->base.numOfParams;
pCtx->increase = false;
pCtx->param = pFunct->pParam;
}
for (int32_t i = 1; i < numOfOutput; ++i) {
(*rowCellInfoOffset)[i] =
(int32_t)((*rowCellInfoOffset)[i - 1] + sizeof(SResultRowEntryInfo) + pFuncCtx[i - 1].resDataInfo.interBufSize);
}
setSelectValueColumnInfo(pFuncCtx, numOfOutput);
return pFuncCtx;
}
// NOTE: sources columns are more than the destination SSDatablock columns.
void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray* pCols) {
size_t numOfSrcCols = taosArrayGetSize(pCols);
int32_t i = 0, j = 0;
while (i < numOfSrcCols && j < taosArrayGetSize(pColMatchInfo)) {
SColumnInfoData* p = taosArrayGet(pCols, i);
SColMatchInfo* pmInfo = taosArrayGet(pColMatchInfo, j);
if (!pmInfo->output) {
j++;
continue;
}
if (p->info.colId == pmInfo->colId) {
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, pmInfo->targetSlotId);
colDataAssign(pDst, p, pBlock->info.rows);
i++;
j++;
} else if (p->info.colId < pmInfo->colId) {
i++;
} else {
ASSERT(0);
}
} }
} }
int32_t tsDescOrder(const void* p1, const void* p2) { SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) {
SResultRowCell* pc1 = (SResultRowCell*) p1; SInterval interval = {
SResultRowCell* pc2 = (SResultRowCell*) p2; .interval = pTableScanNode->interval,
.sliding = pTableScanNode->sliding,
.intervalUnit = pTableScanNode->intervalUnit,
.slidingUnit = pTableScanNode->slidingUnit,
.offset = pTableScanNode->offset,
};
if (pc1->groupId == pc2->groupId) { return interval;
ASSERT(0); }
// if (pc1->pRow->win.skey == pc2->pRow->win.skey) {
// return 0; SColumn extractColumnFromColumnNode(SColumnNode* pColNode) {
// } else { SColumn c = {0};
// return (pc1->pRow->win.skey < pc2->pRow->win.skey)? 1:-1; c.slotId = pColNode->slotId;
// } c.colId = pColNode->colId;
} else { c.type = pColNode->node.resType.type;
return (pc1->groupId < pc2->groupId)? -1:1; c.bytes = pColNode->node.resType.bytes;
c.scale = pColNode->node.resType.scale;
c.precision = pColNode->node.resType.precision;
return c;
}
int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode) {
pCond->loadExternalRows = false;
pCond->order = pTableScanNode->scanSeq[0] > 0 ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
pCond->numOfCols = LIST_LENGTH(pTableScanNode->scan.pScanCols);
pCond->colList = taosMemoryCalloc(pCond->numOfCols, sizeof(SColumnInfo));
if (pCond->colList == NULL) {
terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
return terrno;
} }
// pCond->twindow = pTableScanNode->scanRange;
// TODO: get it from stable scan node
pCond->numOfTWindows = 1;
pCond->twindows = taosMemoryCalloc(pCond->numOfTWindows, sizeof(STimeWindow));
pCond->twindows[0] = pTableScanNode->scanRange;
pCond->suid = pTableScanNode->scan.suid;
#if 1
// todo work around a problem, remove it later
for (int32_t i = 0; i < pCond->numOfTWindows; ++i) {
if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) ||
(pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) {
TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey);
}
}
#endif
for (int32_t i = 0; i < pCond->numOfTWindows; ++i) {
if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) ||
(pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) {
TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey);
}
}
taosqsort(pCond->twindows, pCond->numOfTWindows, sizeof(STimeWindow), pCond, compareTimeWindow);
pCond->type = BLOCK_LOAD_OFFSET_SEQ_ORDER;
// pCond->type = pTableScanNode->scanFlag;
int32_t j = 0;
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
STargetNode* pNode = (STargetNode*)nodesListGetNode(pTableScanNode->scan.pScanCols, i);
SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
if (pColNode->colType == COLUMN_TYPE_TAG) {
continue;
}
pCond->colList[j].type = pColNode->node.resType.type;
pCond->colList[j].bytes = pColNode->node.resType.bytes;
pCond->colList[j].colId = pColNode->colId;
j += 1;
}
pCond->numOfCols = j;
return TSDB_CODE_SUCCESS;
} }
void orderTheResultRows(STaskRuntimeEnv* pRuntimeEnv) { void cleanupQueryTableDataCond(SQueryTableDataCond* pCond) {
__compar_fn_t fn = NULL; taosMemoryFree(pCond->twindows);
// if (pRuntimeEnv->pQueryAttr->order.order == TSDB_ORDER_ASC) { taosMemoryFree(pCond->colList);
// fn = tsAscOrder; }
// } else {
// fn = tsDescOrder;
// }
taosArraySort(pRuntimeEnv->pResultRowArrayList, fn);
}

View File

@ -219,4 +219,23 @@ int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t *resNum, SExplainExecInfo
return getOperatorExplainExecInfo(pTaskInfo->pRoot, pRes, &capacity, resNum); return getOperatorExplainExecInfo(pTaskInfo->pRoot, pRes, &capacity, resNum);
} }
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len) {
SExecTaskInfo* pTaskInfo = (struct SExecTaskInfo*)tinfo;
if (pTaskInfo->pRoot == NULL) {
return TSDB_CODE_INVALID_PARA;
}
return encodeOperator(pTaskInfo->pRoot, pOutput, len);
}
int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len) {
SExecTaskInfo* pTaskInfo = (struct SExecTaskInfo*) tinfo;
if (pTaskInfo == NULL || pInput == NULL || len == 0) {
return TSDB_CODE_INVALID_PARA;
}
return decodeOperator(pTaskInfo->pRoot, pInput, len);
}

File diff suppressed because it is too large Load Diff

View File

@ -26,8 +26,10 @@
#include "ttypes.h" #include "ttypes.h"
#include "executorInt.h" #include "executorInt.h"
static void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInfo** pGroupInfo, int32_t len);
static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity); static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity);
static void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInfo** pGroupInfo, int32_t len); static int32_t setGroupResultOutputBuf(SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t type, int16_t bytes,
int32_t groupId, SDiskbasedBuf* pBuf, SExecTaskInfo* pTaskInfo, SAggSupporter* pAggSup);
static void destroyGroupOperatorInfo(void* param, int32_t numOfOutput) { static void destroyGroupOperatorInfo(void* param, int32_t numOfOutput) {
SGroupbyOperatorInfo* pInfo = (SGroupbyOperatorInfo*)param; SGroupbyOperatorInfo* pInfo = (SGroupbyOperatorInfo*)param;
@ -291,7 +293,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
size_t rows = pRes->info.rows; size_t rows = pRes->info.rows;
if (rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { if (rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator); doSetOperatorCompleted(pOperator);
} }
@ -355,7 +357,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pRes); doFilter(pInfo->pCondition, pRes);
bool hasRemain = hashRemainDataInGroupInfo(&pInfo->groupResInfo); bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
if (!hasRemain) { if (!hasRemain) {
doSetOperatorCompleted(pOperator); doSetOperatorCompleted(pOperator);
break; break;
@ -395,7 +397,7 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
initResultSizeInfo(pOperator, 4096); initResultSizeInfo(pOperator, 4096);
initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResultBlock, pInfo->groupKeyLen, pTaskInfo->id.str); initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResultBlock, pInfo->groupKeyLen, pTaskInfo->id.str);
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); initResultRowInfo(&pInfo->binfo.resultRowInfo);
pOperator->name = "GroupbyAggOperator"; pOperator->name = "GroupbyAggOperator";
pOperator->blocking = true; pOperator->blocking = true;
@ -738,4 +740,18 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
taosMemoryFreeClear(pInfo); taosMemoryFreeClear(pInfo);
taosMemoryFreeClear(pOperator); taosMemoryFreeClear(pOperator);
return NULL; return NULL;
}
int32_t setGroupResultOutputBuf(SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t type, int16_t bytes,
int32_t groupId, SDiskbasedBuf* pBuf, SExecTaskInfo* pTaskInfo,
SAggSupporter* pAggSup) {
SResultRowInfo* pResultRowInfo = &binfo->resultRowInfo;
SqlFunctionCtx* pCtx = binfo->pCtx;
SResultRow* pResultRow =
doSetResultOutBufByKey(pBuf, pResultRowInfo, (char*)pData, bytes, true, groupId, pTaskInfo, false, pAggSup);
assert(pResultRow != NULL);
setResultRowInitCtx(pResultRow, pCtx, numOfCols, binfo->rowCellInfoOffset);
return TSDB_CODE_SUCCESS;
} }

View File

@ -28,27 +28,32 @@ static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator);
static void destroyMergeJoinOperator(void* param, int32_t numOfOutput); static void destroyMergeJoinOperator(void* param, int32_t numOfOutput);
static void extractTimeCondition(SJoinOperatorInfo* Info, SLogicConditionNode* pLogicConditionNode); static void extractTimeCondition(SJoinOperatorInfo* Info, SLogicConditionNode* pLogicConditionNode);
SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SJoinPhysiNode* pJoinNode,
int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, SExecTaskInfo* pTaskInfo) {
SExecTaskInfo* pTaskInfo) {
SJoinOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SJoinOperatorInfo)); SJoinOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SJoinOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pOperator == NULL || pInfo == NULL) { if (pOperator == NULL || pInfo == NULL) {
goto _error; goto _error;
} }
SSDataBlock* pResBlock = createResDataBlock(pJoinNode->node.pOutputDataBlockDesc);
int32_t numOfCols = 0;
SExprInfo* pExprInfo = createExprInfo(pJoinNode->pTargets, NULL, &numOfCols);
initResultSizeInfo(pOperator, 4096); initResultSizeInfo(pOperator, 4096);
pInfo->pRes = pResBlock; pInfo->pRes = pResBlock;
pOperator->name = "MergeJoinOperator"; pOperator->name = "MergeJoinOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN;
pOperator->blocking = false; pOperator->blocking = false;
pOperator->status = OP_NOT_OPENED; pOperator->status = OP_NOT_OPENED;
pOperator->pExpr = pExprInfo; pOperator->pExpr = pExprInfo;
pOperator->numOfExprs = numOfCols; pOperator->numOfExprs = numOfCols;
pOperator->info = pInfo; pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo; pOperator->pTaskInfo = pTaskInfo;
SNode* pOnCondition = pJoinNode->pOnConditions;
if (nodeType(pOnCondition) == QUERY_NODE_OPERATOR) { if (nodeType(pOnCondition) == QUERY_NODE_OPERATOR) {
SOperatorNode* pNode = (SOperatorNode*)pOnCondition; SOperatorNode* pNode = (SOperatorNode*)pOnCondition;
setJoinColumnInfo(&pInfo->leftCol, (SColumnNode*)pNode->pLeft); setJoinColumnInfo(&pInfo->leftCol, (SColumnNode*)pNode->pLeft);

View File

@ -496,18 +496,6 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
return NULL; return NULL;
} }
SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) {
SInterval interval = {
.interval = pTableScanNode->interval,
.sliding = pTableScanNode->sliding,
.intervalUnit = pTableScanNode->intervalUnit,
.slidingUnit = pTableScanNode->slidingUnit,
.offset = pTableScanNode->offset,
};
return interval;
}
static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len) { static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len) {
SFileBlockLoadRecorder* pRecorder = taosMemoryCalloc(1, sizeof(SFileBlockLoadRecorder)); SFileBlockLoadRecorder* pRecorder = taosMemoryCalloc(1, sizeof(SFileBlockLoadRecorder));
STableScanInfo* pTableScanInfo = pOptr->info; STableScanInfo* pTableScanInfo = pOptr->info;
@ -520,7 +508,7 @@ static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptr
static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) { static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) {
STableScanInfo* pTableScanInfo = (STableScanInfo*)param; STableScanInfo* pTableScanInfo = (STableScanInfo*)param;
blockDataDestroy(pTableScanInfo->pResBlock); blockDataDestroy(pTableScanInfo->pResBlock);
clearupQueryTableDataCond(&pTableScanInfo->cond); cleanupQueryTableDataCond(&pTableScanInfo->cond);
tsdbReaderClose(pTableScanInfo->dataReader); tsdbReaderClose(pTableScanInfo->dataReader);
@ -542,8 +530,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc; SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc;
int32_t numOfCols = 0; int32_t numOfCols = 0;
SArray* pColList = SArray* pColList = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID);
extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID);
int32_t code = initQueryTableDataCond(&pInfo->cond, pTableScanNode); int32_t code = initQueryTableDataCond(&pInfo->cond, pTableScanNode);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
@ -1066,8 +1053,7 @@ SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHan
STableScanInfo* pSTInfo = (STableScanInfo*)pTableScanDummy->info; STableScanInfo* pSTInfo = (STableScanInfo*)pTableScanDummy->info;
int32_t numOfCols = 0; int32_t numOfCols = 0;
pInfo->pColMatchInfo = pInfo->pColMatchInfo = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID);
extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID);
int32_t numOfOutput = taosArrayGetSize(pInfo->pColMatchInfo); int32_t numOfOutput = taosArrayGetSize(pInfo->pColMatchInfo);
SArray* pColIds = taosArrayInit(numOfOutput, sizeof(int16_t)); SArray* pColIds = taosArrayInit(numOfOutput, sizeof(int16_t));
@ -1525,7 +1511,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
} }
} }
setDataBlockFromFetchRsp(pInfo->pRes, &pInfo->loadInfo, pRsp->numOfRows, pRsp->data, pRsp->compLen, extractDataBlockFromFetchRsp(pInfo->pRes, &pInfo->loadInfo, pRsp->numOfRows, pRsp->data, pRsp->compLen,
pOperator->numOfExprs, startTs, NULL, pInfo->scanCols); pOperator->numOfExprs, startTs, NULL, pInfo->scanCols);
// todo log the filter info // todo log the filter info
@ -1614,7 +1600,7 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan
SSDataBlock* pResBlock = createResDataBlock(pDescNode); SSDataBlock* pResBlock = createResDataBlock(pDescNode);
int32_t num = 0; int32_t num = 0;
SArray* colList = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &num, pTaskInfo, COL_MATCH_FROM_COL_ID); SArray* colList = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &num, COL_MATCH_FROM_COL_ID);
pInfo->accountId = pScanPhyNode->accountId; pInfo->accountId = pScanPhyNode->accountId;
pInfo->showRewrite = pScanPhyNode->showRewrite; pInfo->showRewrite = pScanPhyNode->showRewrite;
@ -1820,27 +1806,26 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi
SDataBlockDescNode* pDescNode = pPhyNode->node.pOutputDataBlockDesc; SDataBlockDescNode* pDescNode = pPhyNode->node.pOutputDataBlockDesc;
int32_t num = 0;
int32_t numOfExprs = 0; int32_t numOfExprs = 0;
SExprInfo* pExprInfo = createExprInfo(pPhyNode->pScanPseudoCols, NULL, &numOfExprs); SExprInfo* pExprInfo = createExprInfo(pPhyNode->pScanPseudoCols, NULL, &numOfExprs);
SArray* colList = extractColMatchInfo(pPhyNode->pScanPseudoCols, pDescNode, &num, COL_MATCH_FROM_COL_ID);
int32_t num = 0; pInfo->pTableList = pTableListInfo;
SArray* colList = extractColMatchInfo(pPhyNode->pScanPseudoCols, pDescNode, &num, pTaskInfo, COL_MATCH_FROM_COL_ID); pInfo->pColMatchInfo = colList;
pInfo->pRes = createResDataBlock(pDescNode);
pInfo->readHandle = *pReadHandle;
pInfo->curPos = 0;
pInfo->pFilterNode = pPhyNode->node.pConditions;
pInfo->pTableList = pTableListInfo; pOperator->name = "TagScanOperator";
pInfo->pColMatchInfo = colList;
pInfo->pRes = createResDataBlock(pDescNode);
;
pInfo->readHandle = *pReadHandle;
pInfo->curPos = 0;
pInfo->pFilterNode = pPhyNode->node.pConditions;
pOperator->name = "TagScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN;
pOperator->blocking = false; pOperator->blocking = false;
pOperator->status = OP_NOT_OPENED; pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo; pOperator->info = pInfo;
pOperator->pExpr = pExprInfo; pOperator->pExpr = pExprInfo;
pOperator->numOfExprs = numOfExprs; pOperator->numOfExprs = numOfExprs;
pOperator->pTaskInfo = pTaskInfo; pOperator->pTaskInfo = pTaskInfo;
initResultSizeInfo(pOperator, 4096); initResultSizeInfo(pOperator, 4096);
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
@ -1910,7 +1895,7 @@ int32_t createMultipleDataReaders(STableScanPhysiNode* pTableScanNode, SReadHand
STableListInfo* pTableListInfo, SArray* arrayReader, uint64_t queryId, STableListInfo* pTableListInfo, SArray* arrayReader, uint64_t queryId,
uint64_t taskId, SNode* pTagCond) { uint64_t taskId, SNode* pTagCond) {
int32_t code = int32_t code =
getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); getTableList(pHandle->meta, &pTableScanNode->scan, pTableListInfo, pTagCond);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
goto _error; goto _error;
} }
@ -1938,7 +1923,7 @@ int32_t createMultipleDataReaders(STableScanPhysiNode* pTableScanNode, SReadHand
taosArrayDestroy(subListInfo->pTableList); taosArrayDestroy(subListInfo->pTableList);
taosMemoryFree(subListInfo); taosMemoryFree(subListInfo);
} }
clearupQueryTableDataCond(&cond); cleanupQueryTableDataCond(&cond);
return 0; return 0;
@ -2136,7 +2121,7 @@ int32_t doOpenTableMergeScanOperator(SOperatorInfo* pOperator) {
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
pInfo->pSortHandle = pInfo->pSortHandle =
tsortCreateSortHandle(pInfo->pSortInfo, pInfo->pColMatchInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize,
numOfBufPage, pInfo->pSortInputBlock, pTaskInfo->id.str); numOfBufPage, pInfo->pSortInputBlock, pTaskInfo->id.str);
tsortSetFetchRawDataFp(pInfo->pSortHandle, getTableDataBlock, NULL, NULL); tsortSetFetchRawDataFp(pInfo->pSortHandle, getTableDataBlock, NULL, NULL);
@ -2213,7 +2198,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
void destroyTableMergeScanOperatorInfo(void* param, int32_t numOfOutput) { void destroyTableMergeScanOperatorInfo(void* param, int32_t numOfOutput) {
STableMergeScanInfo* pTableScanInfo = (STableMergeScanInfo*)param; STableMergeScanInfo* pTableScanInfo = (STableMergeScanInfo*)param;
clearupQueryTableDataCond(&pTableScanInfo->cond); cleanupQueryTableDataCond(&pTableScanInfo->cond);
for (int32_t i = 0; i < taosArrayGetSize(pTableScanInfo->dataReaders); ++i) { for (int32_t i = 0; i < taosArrayGetSize(pTableScanInfo->dataReaders); ++i) {
STsdbReader* reader = taosArrayGetP(pTableScanInfo->dataReaders, i); STsdbReader* reader = taosArrayGetP(pTableScanInfo->dataReaders, i);
@ -2263,7 +2248,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
int32_t numOfCols = 0; int32_t numOfCols = 0;
SArray* pColList = SArray* pColList =
extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID);
int32_t code = initQueryTableDataCond(&pInfo->cond, pTableScanNode); int32_t code = initQueryTableDataCond(&pInfo->cond, pTableScanNode);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {

View File

@ -22,41 +22,52 @@ static int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain
static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput); static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput);
SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortPhyNode, SExecTaskInfo* pTaskInfo) {
SExprInfo* pExprInfo, int32_t numOfCols, SArray* pColMatchColInfo,
SExecTaskInfo* pTaskInfo) {
SSortOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SSortOperatorInfo)); SSortOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SSortOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
int32_t rowSize = pResBlock->info.rowSize; if (pInfo == NULL || pOperator == NULL/* || rowSize > 100 * 1024 * 1024*/) {
if (pInfo == NULL || pOperator == NULL || rowSize > 100 * 1024 * 1024) {
goto _error; goto _error;
} }
pOperator->pExpr = pExprInfo; SDataBlockDescNode* pDescNode = pSortPhyNode->node.pOutputDataBlockDesc;
pOperator->numOfExprs = numOfCols;
int32_t numOfCols = 0;
SSDataBlock* pResBlock = createResDataBlock(pDescNode);
SExprInfo* pExprInfo = createExprInfo(pSortPhyNode->pExprs, NULL, &numOfCols);
int32_t numOfOutputCols = 0;
SArray* pColMatchColInfo =
extractColMatchInfo(pSortPhyNode->pTargets, pDescNode, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID);
pInfo->binfo.pCtx = createSqlFunctionCtx(pExprInfo, numOfCols, &pInfo->binfo.rowCellInfoOffset); pInfo->binfo.pCtx = createSqlFunctionCtx(pExprInfo, numOfCols, &pInfo->binfo.rowCellInfoOffset);
pInfo->binfo.pRes = pResBlock; pInfo->binfo.pRes = pResBlock;
initResultSizeInfo(pOperator, 1024); initResultSizeInfo(pOperator, 1024);
pInfo->pSortInfo = pSortInfo; pInfo->pSortInfo = createSortInfo(pSortPhyNode->pSortKeys);;
pInfo->pColMatchInfo = pColMatchColInfo; pInfo->pColMatchInfo = pColMatchColInfo;
pOperator->name = "SortOperator"; pOperator->name = "SortOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SORT; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SORT;
pOperator->blocking = true; pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED; pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo; pOperator->info = pInfo;
pOperator->pExpr = pExprInfo;
pOperator->numOfExprs = numOfCols;
pOperator->pTaskInfo = pTaskInfo;
// lazy evaluation for the following parameter since the input datablock is not known till now. // lazy evaluation for the following parameter since the input datablock is not known till now.
// pInfo->bufPageSize = rowSize < 1024 ? 1024 * 2 : rowSize * 2; // there are headers, so pageSize = rowSize + // pInfo->bufPageSize = rowSize < 1024 ? 1024 * 2 : rowSize * 2;
// header pInfo->sortBufSize = pInfo->bufPageSize * 16; // TODO dynamic set the available sort buffer // there are headers, so pageSize = rowSize + header pInfo->sortBufSize = pInfo->bufPageSize * 16;
// TODO dynamic set the available sort buffer
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(doOpenSortOperator, doSort, NULL, NULL, destroyOrderOperatorInfo, NULL, NULL, pOperator->fpSet = createOperatorFpSet(doOpenSortOperator, doSort, NULL, NULL, destroyOrderOperatorInfo, NULL, NULL,
getExplainExecInfo); getExplainExecInfo);
int32_t code = appendDownstream(pOperator, &downstream, 1); int32_t code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
return pOperator; return pOperator;
_error: _error:
@ -154,7 +165,7 @@ int32_t doOpenSortOperator(SOperatorInfo* pOperator) {
pInfo->startTs = taosGetTimestampUs(); pInfo->startTs = taosGetTimestampUs();
// pInfo->binfo.pRes is not equalled to the input datablock. // pInfo->binfo.pRes is not equalled to the input datablock.
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, pInfo->pColMatchInfo, SORT_SINGLESOURCE_SORT, -1, -1, pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_SINGLESOURCE_SORT, -1, -1,
NULL, pTaskInfo->id.str); NULL, pTaskInfo->id.str);
tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, applyScalarFunction, pOperator); tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, applyScalarFunction, pOperator);
@ -248,7 +259,7 @@ int32_t doOpenMultiwaySortMergeOperator(SOperatorInfo* pOperator) {
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, pInfo->pColMatchInfo, SORT_MULTISOURCE_MERGE, pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE,
pInfo->bufPageSize, numOfBufPage, pInfo->pInputBlock, pTaskInfo->id.str); pInfo->bufPageSize, numOfBufPage, pInfo->pInputBlock, pTaskInfo->id.str);
tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, NULL, NULL); tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, NULL, NULL);

View File

@ -1090,7 +1090,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
if (pOperator->status == OP_RES_TO_RETURN) { if (pOperator->status == OP_RES_TO_RETURN) {
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pBInfo->pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator); doSetOperatorCompleted(pOperator);
return NULL; return NULL;
} }
@ -1122,7 +1122,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC); initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC);
blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity); blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity);
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pBInfo->pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator); doSetOperatorCompleted(pOperator);
} }
@ -1153,7 +1153,7 @@ static SSDataBlock* doBuildIntervalResult(SOperatorInfo* pOperator) {
blockDataEnsureCapacity(pBlock, pOperator->resultInfo.capacity); blockDataEnsureCapacity(pBlock, pOperator->resultInfo.capacity);
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pBlock->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { if (pBlock->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator); doSetOperatorCompleted(pOperator);
} }
@ -1176,7 +1176,7 @@ static void finalizeUpdatedResult(int32_t numOfOutput, SDiskbasedBuf* pBuf, SArr
SResultRow* pRow = (SResultRow*)((char*)bufPage + pPos->pos.offset); SResultRow* pRow = (SResultRow*)((char*)bufPage + pPos->pos.offset);
for (int32_t j = 0; j < numOfOutput; ++j) { for (int32_t j = 0; j < numOfOutput; ++j) {
SResultRowEntryInfo* pEntry = getResultCell(pRow, j, rowCellInfoOffset); SResultRowEntryInfo* pEntry = getResultEntryInfo(pRow, j, rowCellInfoOffset);
if (pRow->numOfRows < pEntry->numOfRes) { if (pRow->numOfRows < pEntry->numOfRes) {
pRow->numOfRows = pEntry->numOfRes; pRow->numOfRows = pEntry->numOfRes;
} }
@ -1199,7 +1199,7 @@ void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SOptrB
SResultRow* pResult = getResultRowByPos(pResultBuf, p1); SResultRow* pResult = getResultRowByPos(pResultBuf, p1);
SqlFunctionCtx* pCtx = pBinfo->pCtx; SqlFunctionCtx* pCtx = pBinfo->pCtx;
for (int32_t i = 0; i < numOfOutput; ++i) { for (int32_t i = 0; i < numOfOutput; ++i) {
pCtx[i].resultInfo = getResultCell(pResult, i, pBinfo->rowCellInfoOffset); pCtx[i].resultInfo = getResultEntryInfo(pResult, i, pBinfo->rowCellInfoOffset);
struct SResultRowEntryInfo* pResInfo = pCtx[i].resultInfo; struct SResultRowEntryInfo* pResInfo = pCtx[i].resultInfo;
if (fmIsWindowPseudoColumnFunc(pCtx[i].functionId)) { if (fmIsWindowPseudoColumnFunc(pCtx[i].functionId)) {
continue; continue;
@ -1301,7 +1301,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
if (pOperator->status == OP_RES_TO_RETURN) { if (pOperator->status == OP_RES_TO_RETURN) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pInfo->binfo.pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { if (pInfo->binfo.pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
pOperator->status = OP_EXEC_DONE; pOperator->status = OP_EXEC_DONE;
} }
return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes; return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
@ -1476,7 +1476,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
} }
} }
initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); initResultRowInfo(&pInfo->binfo.resultRowInfo);
pOperator->name = "TimeIntervalAggOperator"; pOperator->name = "TimeIntervalAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL;
@ -1533,7 +1533,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExpr
goto _error; goto _error;
} }
initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); initResultRowInfo(&pInfo->binfo.resultRowInfo);
pOperator->name = "StreamTimeIntervalAggOperator"; pOperator->name = "StreamTimeIntervalAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL;
@ -1643,7 +1643,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
if (pOperator->status == OP_RES_TO_RETURN) { if (pOperator->status == OP_RES_TO_RETURN) {
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pBInfo->pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator); doSetOperatorCompleted(pOperator);
return NULL; return NULL;
} }
@ -1678,7 +1678,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC); initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC);
blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity); blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity);
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pBInfo->pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator); doSetOperatorCompleted(pOperator);
} }
@ -1714,7 +1714,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
// if (pOperator->status == OP_RES_TO_RETURN) { // if (pOperator->status == OP_RES_TO_RETURN) {
// // doBuildResultDatablock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes); // // doBuildResultDatablock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes);
// if (pResBlock->info.rows == 0 || !hashRemainDataInGroupInfo(&pSliceInfo->groupResInfo)) { // if (pResBlock->info.rows == 0 || !hasDataInGroupInfo(&pSliceInfo->groupResInfo)) {
// doSetOperatorCompleted(pOperator); // doSetOperatorCompleted(pOperator);
// } // }
// //
@ -1908,7 +1908,7 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SExprInfo*
goto _error; goto _error;
} }
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); initResultRowInfo(&pInfo->binfo.resultRowInfo);
pInfo->pFillColInfo = createFillColInfo(pExprInfo, numOfCols, pValNode); pInfo->pFillColInfo = createFillColInfo(pExprInfo, numOfCols, pValNode);
pInfo->binfo.pRes = pResultBlock; pInfo->binfo.pRes = pResultBlock;
@ -1956,7 +1956,7 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf
initResultSizeInfo(pOperator, 4096); initResultSizeInfo(pOperator, 4096);
initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExpr, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str); initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExpr, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str);
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); initResultRowInfo(&pInfo->binfo.resultRowInfo);
pInfo->twAggSup = *pTwAggSup; pInfo->twAggSup = *pTwAggSup;
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
@ -2006,7 +2006,7 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo
} }
pInfo->twAggSup = *pTwAggSupp; pInfo->twAggSup = *pTwAggSupp;
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); initResultRowInfo(&pInfo->binfo.resultRowInfo);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
pInfo->tsSlotId = tsSlotId; pInfo->tsSlotId = tsSlotId;
@ -2153,7 +2153,7 @@ static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo)
taosHashClear(pInfo->aggSup.pResultRowHashTable); taosHashClear(pInfo->aggSup.pResultRowHashTable);
clearDiskbasedBuf(pInfo->aggSup.pResultBuf); clearDiskbasedBuf(pInfo->aggSup.pResultBuf);
cleanupResultRowInfo(&pInfo->binfo.resultRowInfo); cleanupResultRowInfo(&pInfo->binfo.resultRowInfo);
initResultRowInfo(&pInfo->binfo.resultRowInfo, 1); initResultRowInfo(&pInfo->binfo.resultRowInfo);
} }
static void clearUpdateDataBlock(SSDataBlock* pBlock) { static void clearUpdateDataBlock(SSDataBlock* pBlock) {
@ -2319,7 +2319,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
goto _error; goto _error;
} }
initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); initResultRowInfo(&pInfo->binfo.resultRowInfo);
pInfo->pChildren = NULL; pInfo->pChildren = NULL;
if (numOfChild > 0) { if (numOfChild > 0) {
pInfo->pChildren = taosArrayInit(numOfChild, sizeof(SOperatorInfo)); pInfo->pChildren = taosArrayInit(numOfChild, sizeof(SOperatorInfo));
@ -2456,10 +2456,12 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
} }
initDummyFunction(pInfo->pDummyCtx, pInfo->binfo.pCtx, numOfCols); initDummyFunction(pInfo->pDummyCtx, pInfo->binfo.pCtx, numOfCols);
pInfo->twAggSup = (STimeWindowAggSupp) {.waterMark = pSessionNode->window.watermark, pInfo->twAggSup = (STimeWindowAggSupp) {
.waterMark = pSessionNode->window.watermark,
.calTrigger = pSessionNode->window.triggerType, .calTrigger = pSessionNode->window.triggerType,
.maxTs = INT64_MIN}; .maxTs = INT64_MIN};
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
pInfo->primaryTsIndex = tsSlotId; pInfo->primaryTsIndex = tsSlotId;
@ -2901,7 +2903,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
return pInfo->pDelRes; return pInfo->pDelRes;
} }
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf); doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf);
if (pBInfo->pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator); doSetOperatorCompleted(pOperator);
} }
return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes;
@ -3269,7 +3271,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
return pInfo->pDelRes; return pInfo->pDelRes;
} }
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf); doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf);
if (pBInfo->pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator); doSetOperatorCompleted(pOperator);
} }
return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes;
@ -3342,7 +3344,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
pInfo->stateCol = extractColumnFromColumnNode(pColNode); pInfo->stateCol = extractColumnFromColumnNode(pColNode);
initResultSizeInfo(pOperator, 4096); initResultSizeInfo(pOperator, 4096);
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); initResultRowInfo(&pInfo->binfo.resultRowInfo);
pInfo->twAggSup = (STimeWindowAggSupp){ pInfo->twAggSup = (STimeWindowAggSupp){
.waterMark = pStateNode->window.watermark, .waterMark = pStateNode->window.watermark,
.calTrigger = pStateNode->window.triggerType, .calTrigger = pStateNode->window.triggerType,
@ -3590,7 +3592,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI
goto _error; goto _error;
} }
initResultRowInfo(&iaInfo->binfo.resultRowInfo, (int32_t)1); initResultRowInfo(&iaInfo->binfo.resultRowInfo);
pOperator->name = "TimeMergeIntervalAggOperator"; pOperator->name = "TimeMergeIntervalAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL;

View File

@ -71,7 +71,7 @@ SSDataBlock* tsortGetSortedDataBlock(const SSortHandle* pSortHandle) {
* @param type * @param type
* @return * @return
*/ */
SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, SArray* pIndexMap, int32_t type, int32_t pageSize, int32_t numOfPages, SSDataBlock* pBlock, const char* idstr) { SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, int32_t type, int32_t pageSize, int32_t numOfPages, SSDataBlock* pBlock, const char* idstr) {
SSortHandle* pSortHandle = taosMemoryCalloc(1, sizeof(SSortHandle)); SSortHandle* pSortHandle = taosMemoryCalloc(1, sizeof(SSortHandle));
pSortHandle->type = type; pSortHandle->type = type;

View File

@ -209,7 +209,7 @@ TEST(testCase, inMem_sort_Test) {
SArray* orderInfo = taosArrayInit(1, sizeof(SBlockOrderInfo)); SArray* orderInfo = taosArrayInit(1, sizeof(SBlockOrderInfo));
taosArrayPush(orderInfo, &oi); taosArrayPush(orderInfo, &oi);
SSortHandle* phandle = tsortCreateSortHandle(orderInfo, NULL, SORT_SINGLESOURCE_SORT, 1024, 5, NULL, "test_abc"); SSortHandle* phandle = tsortCreateSortHandle(orderInfo, SORT_SINGLESOURCE_SORT, 1024, 5, NULL, "test_abc");
tsortSetFetchRawDataFp(phandle, getSingleColDummyBlock, NULL, NULL); tsortSetFetchRawDataFp(phandle, getSingleColDummyBlock, NULL, NULL);
_info* pInfo = (_info*) taosMemoryCalloc(1, sizeof(_info)); _info* pInfo = (_info*) taosMemoryCalloc(1, sizeof(_info));
@ -298,7 +298,7 @@ TEST(testCase, external_mem_sort_Test) {
SArray* orderInfo = taosArrayInit(1, sizeof(SBlockOrderInfo)); SArray* orderInfo = taosArrayInit(1, sizeof(SBlockOrderInfo));
taosArrayPush(orderInfo, &oi); taosArrayPush(orderInfo, &oi);
SSortHandle* phandle = tsortCreateSortHandle(orderInfo, NULL, SORT_SINGLESOURCE_SORT, 128, 3, NULL, "test_abc"); SSortHandle* phandle = tsortCreateSortHandle(orderInfo, SORT_SINGLESOURCE_SORT, 128, 3, NULL, "test_abc");
tsortSetFetchRawDataFp(phandle, getSingleColDummyBlock, NULL, NULL); tsortSetFetchRawDataFp(phandle, getSingleColDummyBlock, NULL, NULL);
SSortSource* ps = static_cast<SSortSource*>(taosMemoryCalloc(1, sizeof(SSortSource))); SSortSource* ps = static_cast<SSortSource*>(taosMemoryCalloc(1, sizeof(SSortSource)));
@ -365,7 +365,7 @@ TEST(testCase, ordered_merge_sort_Test) {
taosArrayPush(pBlock->pDataBlock, &colInfo); taosArrayPush(pBlock->pDataBlock, &colInfo);
} }
SSortHandle* phandle = tsortCreateSortHandle(orderInfo, NULL, SORT_MULTISOURCE_MERGE, 1024, 5, pBlock,"test_abc"); SSortHandle* phandle = tsortCreateSortHandle(orderInfo, SORT_MULTISOURCE_MERGE, 1024, 5, pBlock,"test_abc");
tsortSetFetchRawDataFp(phandle, getSingleColDummyBlock, NULL, NULL); tsortSetFetchRawDataFp(phandle, getSingleColDummyBlock, NULL, NULL);
tsortSetComparFp(phandle, docomp); tsortSetComparFp(phandle, docomp);

View File

@ -134,6 +134,7 @@ int32_t streamBuildDispatchMsg(SStreamTask* pTask, SStreamDataBlock* data, SRpcM
int32_t sz = taosArrayGetSize(vgInfo); int32_t sz = taosArrayGetSize(vgInfo);
for (int32_t i = 0; i < sz; i++) { for (int32_t i = 0; i < sz; i++) {
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
ASSERT(pVgInfo->vgId > 0);
if (hashValue >= pVgInfo->hashBegin && hashValue <= pVgInfo->hashEnd) { if (hashValue >= pVgInfo->hashBegin && hashValue <= pVgInfo->hashEnd) {
vgId = pVgInfo->vgId; vgId = pVgInfo->vgId;
downstreamTaskId = pVgInfo->taskId; downstreamTaskId = pVgInfo->taskId;

View File

@ -70,7 +70,7 @@ int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) {
if (tEncodeSEpSet(pEncoder, &pTask->fixedEpDispatcher.epSet) < 0) return -1; if (tEncodeSEpSet(pEncoder, &pTask->fixedEpDispatcher.epSet) < 0) return -1;
} else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) { } else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) {
if (tSerializeSUseDbRspImp(pEncoder, &pTask->shuffleDispatcher.dbInfo) < 0) return -1; if (tSerializeSUseDbRspImp(pEncoder, &pTask->shuffleDispatcher.dbInfo) < 0) return -1;
/*if (tEncodeI8(pEncoder, pTask->shuffleDispatcher.hashMethod) < 0) return -1;*/ if (tEncodeCStr(pEncoder, pTask->shuffleDispatcher.stbFullName) < 0) return -1;
} }
if (tEncodeI64(pEncoder, pTask->triggerParam) < 0) return -1; if (tEncodeI64(pEncoder, pTask->triggerParam) < 0) return -1;
@ -119,8 +119,8 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
if (tDecodeI32(pDecoder, &pTask->fixedEpDispatcher.nodeId) < 0) return -1; if (tDecodeI32(pDecoder, &pTask->fixedEpDispatcher.nodeId) < 0) return -1;
if (tDecodeSEpSet(pDecoder, &pTask->fixedEpDispatcher.epSet) < 0) return -1; if (tDecodeSEpSet(pDecoder, &pTask->fixedEpDispatcher.epSet) < 0) return -1;
} else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) { } else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) {
/*if (tDecodeI8(pDecoder, &pTask->shuffleDispatcher.hashMethod) < 0) return -1;*/
if (tDeserializeSUseDbRspImp(pDecoder, &pTask->shuffleDispatcher.dbInfo) < 0) return -1; if (tDeserializeSUseDbRspImp(pDecoder, &pTask->shuffleDispatcher.dbInfo) < 0) return -1;
if (tDecodeCStrTo(pDecoder, pTask->shuffleDispatcher.stbFullName) < 0) return -1;
} }
if (tDecodeI64(pDecoder, &pTask->triggerParam) < 0) return -1; if (tDecodeI64(pDecoder, &pTask->triggerParam) < 0) return -1;

View File

@ -171,7 +171,8 @@ void syncNodeClose(SSyncNode* pSyncNode);
int32_t syncNodePropose(SSyncNode* pSyncNode, const SRpcMsg* pMsg, bool isWeak); int32_t syncNodePropose(SSyncNode* pSyncNode, const SRpcMsg* pMsg, bool isWeak);
// option // option
bool syncNodeSnapshotEnable(SSyncNode* pSyncNode); bool syncNodeSnapshotEnable(SSyncNode* pSyncNode);
SyncIndex syncNodeGetSnapshotConfigIndex(SSyncNode* pSyncNode, SyncIndex snapshotLastApplyIndex);
// ping -------------- // ping --------------
int32_t syncNodePing(SSyncNode* pSyncNode, const SRaftId* destRaftId, SyncPing* pMsg); int32_t syncNodePing(SSyncNode* pSyncNode, const SRaftId* destRaftId, SyncPing* pMsg);

View File

@ -47,14 +47,15 @@ typedef struct SRaftCfg {
SRaftCfg *raftCfgOpen(const char *path); SRaftCfg *raftCfgOpen(const char *path);
int32_t raftCfgClose(SRaftCfg *pRaftCfg); int32_t raftCfgClose(SRaftCfg *pRaftCfg);
int32_t raftCfgPersist(SRaftCfg *pRaftCfg); int32_t raftCfgPersist(SRaftCfg *pRaftCfg);
int32_t raftCfgAddConfigIndex(SRaftCfg *pRaftCfg, SyncIndex configIndex);
cJSON * syncCfg2Json(SSyncCfg *pSyncCfg); cJSON *syncCfg2Json(SSyncCfg *pSyncCfg);
char * syncCfg2Str(SSyncCfg *pSyncCfg); char *syncCfg2Str(SSyncCfg *pSyncCfg);
int32_t syncCfgFromJson(const cJSON *pRoot, SSyncCfg *pSyncCfg); int32_t syncCfgFromJson(const cJSON *pRoot, SSyncCfg *pSyncCfg);
int32_t syncCfgFromStr(const char *s, SSyncCfg *pSyncCfg); int32_t syncCfgFromStr(const char *s, SSyncCfg *pSyncCfg);
cJSON * raftCfg2Json(SRaftCfg *pRaftCfg); cJSON *raftCfg2Json(SRaftCfg *pRaftCfg);
char * raftCfg2Str(SRaftCfg *pRaftCfg); char *raftCfg2Str(SRaftCfg *pRaftCfg);
int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg); int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg);
int32_t raftCfgFromStr(const char *s, SRaftCfg *pRaftCfg); int32_t raftCfgFromStr(const char *s, SRaftCfg *pRaftCfg);

View File

@ -208,8 +208,9 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
SRpcMsg rpcMsg; SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pRollBackEntry, &rpcMsg); syncEntry2OriginalRpc(pRollBackEntry, &rpcMsg);
SFsmCbMeta cbMeta; SFsmCbMeta cbMeta = {0};
cbMeta.index = pRollBackEntry->index; cbMeta.index = pRollBackEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index);
cbMeta.isWeak = pRollBackEntry->isWeak; cbMeta.isWeak = pRollBackEntry->isWeak;
cbMeta.code = 0; cbMeta.code = 0;
cbMeta.state = ths->state; cbMeta.state = ths->state;
@ -234,8 +235,9 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
if (ths->pFsm != NULL) { if (ths->pFsm != NULL) {
// if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_SYNC_NOOP) { // if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_SYNC_NOOP) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pAppendEntry->originalRpcType)) { if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pAppendEntry->originalRpcType)) {
SFsmCbMeta cbMeta; SFsmCbMeta cbMeta = {0};
cbMeta.index = pAppendEntry->index; cbMeta.index = pAppendEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index);
cbMeta.isWeak = pAppendEntry->isWeak; cbMeta.isWeak = pAppendEntry->isWeak;
cbMeta.code = 2; cbMeta.code = 2;
cbMeta.state = ths->state; cbMeta.state = ths->state;
@ -266,8 +268,9 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
if (ths->pFsm != NULL) { if (ths->pFsm != NULL) {
// if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_SYNC_NOOP) { // if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_SYNC_NOOP) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pAppendEntry->originalRpcType)) { if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pAppendEntry->originalRpcType)) {
SFsmCbMeta cbMeta; SFsmCbMeta cbMeta = {0};
cbMeta.index = pAppendEntry->index; cbMeta.index = pAppendEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index);
cbMeta.isWeak = pAppendEntry->isWeak; cbMeta.isWeak = pAppendEntry->isWeak;
cbMeta.code = 3; cbMeta.code = 3;
cbMeta.state = ths->state; cbMeta.state = ths->state;
@ -696,8 +699,9 @@ static int32_t syncNodeMakeLogSame(SSyncNode* ths, SyncAppendEntries* pMsg) {
SRpcMsg rpcMsg; SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pRollBackEntry, &rpcMsg); syncEntry2OriginalRpc(pRollBackEntry, &rpcMsg);
SFsmCbMeta cbMeta; SFsmCbMeta cbMeta = {0};
cbMeta.index = pRollBackEntry->index; cbMeta.index = pRollBackEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index);
cbMeta.isWeak = pRollBackEntry->isWeak; cbMeta.isWeak = pRollBackEntry->isWeak;
cbMeta.code = 0; cbMeta.code = 0;
cbMeta.state = ths->state; cbMeta.state = ths->state;
@ -725,8 +729,9 @@ static int32_t syncNodePreCommit(SSyncNode* ths, SSyncRaftEntry* pEntry) {
syncEntry2OriginalRpc(pEntry, &rpcMsg); syncEntry2OriginalRpc(pEntry, &rpcMsg);
if (ths->pFsm != NULL) { if (ths->pFsm != NULL) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) { if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) {
SFsmCbMeta cbMeta; SFsmCbMeta cbMeta = {0};
cbMeta.index = pEntry->index; cbMeta.index = pEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index);
cbMeta.isWeak = pEntry->isWeak; cbMeta.isWeak = pEntry->isWeak;
cbMeta.code = 2; cbMeta.code = 2;
cbMeta.state = ths->state; cbMeta.state = ths->state;

View File

@ -444,6 +444,21 @@ int32_t syncGetSnapshotMetaByIndex(int64_t rid, SyncIndex snapshotIndex, struct
return 0; return 0;
} }
SyncIndex syncNodeGetSnapshotConfigIndex(SSyncNode* pSyncNode, SyncIndex snapshotLastApplyIndex) {
ASSERT(pSyncNode->pRaftCfg->configIndexCount >= 1);
SyncIndex lastIndex = (pSyncNode->pRaftCfg->configIndexArr)[0];
for (int i = 0; i < pSyncNode->pRaftCfg->configIndexCount; ++i) {
if ((pSyncNode->pRaftCfg->configIndexArr)[i] > lastIndex &&
(pSyncNode->pRaftCfg->configIndexArr)[i] <= snapshotLastApplyIndex) {
lastIndex = (pSyncNode->pRaftCfg->configIndexArr)[i];
}
}
sTrace("sync syncNodeGetSnapshotConfigIndex index:%ld lastConfigIndex:%ld", snapshotLastApplyIndex, lastIndex);
return lastIndex;
}
const char* syncGetMyRoleStr(int64_t rid) { const char* syncGetMyRoleStr(int64_t rid) {
const char* s = syncUtilState2String(syncGetMyRole(rid)); const char* s = syncUtilState2String(syncGetMyRole(rid));
return s; return s;
@ -2068,8 +2083,9 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg) {
if (ths->pFsm != NULL) { if (ths->pFsm != NULL) {
// if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_SYNC_NOOP) { // if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_SYNC_NOOP) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) { if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) {
SFsmCbMeta cbMeta; SFsmCbMeta cbMeta = {0};
cbMeta.index = pEntry->index; cbMeta.index = pEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index);
cbMeta.isWeak = pEntry->isWeak; cbMeta.isWeak = pEntry->isWeak;
cbMeta.code = 0; cbMeta.code = 0;
cbMeta.state = ths->state; cbMeta.state = ths->state;
@ -2090,8 +2106,9 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg) {
if (ths->pFsm != NULL) { if (ths->pFsm != NULL) {
// if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_SYNC_NOOP) { // if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_SYNC_NOOP) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) { if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) {
SFsmCbMeta cbMeta; SFsmCbMeta cbMeta = {0};
cbMeta.index = pEntry->index; cbMeta.index = pEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index);
cbMeta.isWeak = pEntry->isWeak; cbMeta.isWeak = pEntry->isWeak;
cbMeta.code = 1; cbMeta.code = 1;
cbMeta.state = ths->state; cbMeta.state = ths->state;
@ -2155,11 +2172,12 @@ static int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftE
} }
*/ */
if (ths->pFsm->FpLeaderTransferCb != NULL) { if (ths->pFsm->FpLeaderTransferCb != NULL) {
SFsmCbMeta cbMeta; SFsmCbMeta cbMeta = {0};
cbMeta.code = 0; cbMeta.code = 0;
cbMeta.currentTerm = ths->pRaftStore->currentTerm; cbMeta.currentTerm = ths->pRaftStore->currentTerm;
cbMeta.flag = 0; cbMeta.flag = 0;
cbMeta.index = pEntry->index; cbMeta.index = pEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index);
cbMeta.isWeak = pEntry->isWeak; cbMeta.isWeak = pEntry->isWeak;
cbMeta.seqNum = pEntry->seqNum; cbMeta.seqNum = pEntry->seqNum;
cbMeta.state = ths->state; cbMeta.state = ths->state;
@ -2261,6 +2279,7 @@ static int32_t syncNodeConfigChange(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftE
cbMeta.code = 0; cbMeta.code = 0;
cbMeta.currentTerm = ths->pRaftStore->currentTerm; cbMeta.currentTerm = ths->pRaftStore->currentTerm;
cbMeta.index = pEntry->index; cbMeta.index = pEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, pEntry->index);
cbMeta.term = pEntry->term; cbMeta.term = pEntry->term;
cbMeta.newCfg = newSyncCfg; cbMeta.newCfg = newSyncCfg;
cbMeta.oldCfg = oldSyncCfg; cbMeta.oldCfg = oldSyncCfg;
@ -2295,8 +2314,9 @@ int32_t syncNodeCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex,
// user commit // user commit
if (ths->pFsm->FpCommitCb != NULL && syncUtilUserCommit(pEntry->originalRpcType)) { if (ths->pFsm->FpCommitCb != NULL && syncUtilUserCommit(pEntry->originalRpcType)) {
SFsmCbMeta cbMeta; SFsmCbMeta cbMeta = {0};
cbMeta.index = pEntry->index; cbMeta.index = pEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index);
cbMeta.isWeak = pEntry->isWeak; cbMeta.isWeak = pEntry->isWeak;
cbMeta.code = 0; cbMeta.code = 0;
cbMeta.state = ths->state; cbMeta.state = ths->state;
@ -2310,6 +2330,8 @@ int32_t syncNodeCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex,
// config change // config change
if (pEntry->originalRpcType == TDMT_SYNC_CONFIG_CHANGE) { if (pEntry->originalRpcType == TDMT_SYNC_CONFIG_CHANGE) {
raftCfgAddConfigIndex(ths->pRaftCfg, pEntry->index);
raftCfgPersist(ths->pRaftCfg);
code = syncNodeConfigChange(ths, &rpcMsg, pEntry); code = syncNodeConfigChange(ths, &rpcMsg, pEntry);
ASSERT(code == 0); ASSERT(code == 0);
} }

View File

@ -66,6 +66,13 @@ int32_t raftCfgPersist(SRaftCfg *pRaftCfg) {
return 0; return 0;
} }
int32_t raftCfgAddConfigIndex(SRaftCfg *pRaftCfg, SyncIndex configIndex) {
ASSERT(pRaftCfg->configIndexCount <= MAX_CONFIG_INDEX_COUNT);
(pRaftCfg->configIndexArr)[pRaftCfg->configIndexCount] = configIndex;
++(pRaftCfg->configIndexCount);
return 0;
}
cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) { cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) {
char u64buf[128] = {0}; char u64buf[128] = {0};
cJSON *pRoot = cJSON_CreateObject(); cJSON *pRoot = cJSON_CreateObject();

View File

@ -50,6 +50,7 @@ SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaI
} else { } else {
sError("snapshotSenderCreate cannot create sender"); sError("snapshotSenderCreate cannot create sender");
} }
return pSender; return pSender;
} }
@ -84,6 +85,10 @@ void snapshotSenderStart(SSyncSnapshotSender *pSender) {
// get current snapshot info // get current snapshot info
pSender->pSyncNode->pFsm->FpGetSnapshot(pSender->pSyncNode->pFsm, &(pSender->snapshot)); pSender->pSyncNode->pFsm->FpGetSnapshot(pSender->pSyncNode->pFsm, &(pSender->snapshot));
sTrace("snapshotSenderStart lastApplyIndex:%ld, lastApplyTerm:%lu, lastConfigIndex:%ld",
pSender->snapshot.lastApplyIndex, pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex);
if (pSender->snapshot.lastConfigIndex != SYNC_INDEX_INVALID) { if (pSender->snapshot.lastConfigIndex != SYNC_INDEX_INVALID) {
/* /*
SSyncRaftEntry *pEntry = NULL; SSyncRaftEntry *pEntry = NULL;
@ -421,7 +426,7 @@ cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) {
char *snapshotSender2Str(SSyncSnapshotSender *pSender) { char *snapshotSender2Str(SSyncSnapshotSender *pSender) {
cJSON *pJson = snapshotSender2Json(pSender); cJSON *pJson = snapshotSender2Json(pSender);
char * serialized = cJSON_Print(pJson); char *serialized = cJSON_Print(pJson);
cJSON_Delete(pJson); cJSON_Delete(pJson);
return serialized; return serialized;
} }
@ -542,7 +547,7 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) {
cJSON_AddStringToObject(pFromId, "addr", u64buf); cJSON_AddStringToObject(pFromId, "addr", u64buf);
{ {
uint64_t u64 = pReceiver->fromId.addr; uint64_t u64 = pReceiver->fromId.addr;
cJSON * pTmp = pFromId; cJSON *pTmp = pFromId;
char host[128] = {0}; char host[128] = {0};
uint16_t port; uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port); syncUtilU642Addr(u64, host, sizeof(host), &port);
@ -566,7 +571,7 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) {
char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver) { char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver) {
cJSON *pJson = snapshotReceiver2Json(pReceiver); cJSON *pJson = snapshotReceiver2Json(pReceiver);
char * serialized = cJSON_Print(pJson); char *serialized = cJSON_Print(pJson);
cJSON_Delete(pJson); cJSON_Delete(pJson);
return serialized; return serialized;
} }

View File

@ -376,17 +376,19 @@ static void transDQTimeout(uv_timer_t* timer) {
SDelayQueue* queue = timer->data; SDelayQueue* queue = timer->data;
tTrace("timer %p timeout", timer); tTrace("timer %p timeout", timer);
uint64_t timeout = 0; uint64_t timeout = 0;
int64_t current = taosGetTimestampMs();
do { do {
HeapNode* minNode = heapMin(queue->heap); HeapNode* minNode = heapMin(queue->heap);
if (minNode == NULL) break; if (minNode == NULL) break;
SDelayTask* task = container_of(minNode, SDelayTask, node); SDelayTask* task = container_of(minNode, SDelayTask, node);
if (task->execTime <= taosGetTimestampMs()) {
if (task->execTime <= current) {
heapRemove(queue->heap, minNode); heapRemove(queue->heap, minNode);
task->func(task->arg); task->func(task->arg);
taosMemoryFree(task); taosMemoryFree(task);
timeout = 0; timeout = 0;
} else { } else {
timeout = task->execTime - taosGetTimestampMs(); timeout = task->execTime - current;
break; break;
} }
} while (1); } while (1);

View File

@ -77,6 +77,7 @@ class TDTestCase:
tdSql.error(f'alter stable {stbname} modify column c9 double') tdSql.error(f'alter stable {stbname} modify column c9 double')
tdSql.error(f'alter stable {stbname} modify column c10 float') tdSql.error(f'alter stable {stbname} modify column c10 float')
tdSql.error(f'alter stable {stbname} modify column c11 int') tdSql.error(f'alter stable {stbname} modify column c11 int')
tdSql.error(f'alter stable {stbname} drop tag t0')
tdSql.execute(f'drop database {dbname}') tdSql.execute(f'drop database {dbname}')
def alter_stable_tag_check(self,dbname,stbname,tbname): def alter_stable_tag_check(self,dbname,stbname,tbname):
@ -126,6 +127,11 @@ class TDTestCase:
for i in ['int','unsigned int','float','binary(10)','nchar(10)']: for i in ['int','unsigned int','float','binary(10)','nchar(10)']:
tdSql.error(f'alter stable {stbname} modify tag t8 {i}') tdSql.error(f'alter stable {stbname} modify tag t8 {i}')
tdSql.error(f'alter stable {stbname} modify tag t4 int') tdSql.error(f'alter stable {stbname} modify tag t4 int')
tdSql.error(f'alter stable {stbname} drop column t0')
#!bug TD-16410
# tdSql.error(f'alter stable {tbname} set tag t1=100 ')
# tdSql.execute(f'create table ntb (ts timestamp,c0 int)')
tdSql.error(f'alter stable ntb add column c2 ')
tdSql.execute(f'drop database {dbname}') tdSql.execute(f'drop database {dbname}')
def run(self): def run(self):

View File

@ -52,8 +52,6 @@ class TDTestCase:
tdSql.execute(f'create database if not exists {dbname}') tdSql.execute(f'create database if not exists {dbname}')
stbname = self.get_long_name(length=3, mode="letters") stbname = self.get_long_name(length=3, mode="letters")
tbname = self.get_long_name(length=3, mode="letters") tbname = self.get_long_name(length=3, mode="letters")
tdLog.info('--------------------------child table tag check--------------------------------------')
tdLog.info(f'-----------------create stable {stbname} and child table {tbname}-------------------')
tdSql.execute(f'create stable if not exists {dbname}.{stbname} (col_ts timestamp, c1 int) tags (tag_ts timestamp, t1 tinyint, t2 smallint, t3 int, \ tdSql.execute(f'create stable if not exists {dbname}.{stbname} (col_ts timestamp, c1 int) tags (tag_ts timestamp, t1 tinyint, t2 smallint, t3 int, \
t4 bigint, t5 tinyint unsigned, t6 smallint unsigned, t7 int unsigned, t8 bigint unsigned, t9 float, t10 double, t11 bool,t12 binary(20),t13 nchar(20))') t4 bigint, t5 tinyint unsigned, t6 smallint unsigned, t7 int unsigned, t8 bigint unsigned, t9 float, t10 double, t11 bool,t12 binary(20),t13 nchar(20))')
tdSql.execute(f'create table if not exists {dbname}.{tbname} using {dbname}.{stbname} tags(now, 1, 2, 3, 4, 5, 6, 7, 8, 9.9, 10.1, True,"abc123","涛思数据")') tdSql.execute(f'create table if not exists {dbname}.{tbname} using {dbname}.{stbname} tags(now, 1, 2, 3, 4, 5, 6, 7, 8, 9.9, 10.1, True,"abc123","涛思数据")')
@ -90,13 +88,16 @@ class TDTestCase:
tdSql.checkData(0,15,tag_nchar) tdSql.checkData(0,15,tag_nchar)
# bug TD-16211 insert length more than setting binary and nchar # bug TD-16211 insert length more than setting binary and nchar
# tag_binary = self.get_long_name(length=21, mode="letters") # error_tag_binary = self.get_long_name(length=21, mode="letters")
# tag_nchar = self.get_long_name(length=21, mode="letters") # error_tag_nchar = self.get_long_name(length=21, mode="letters")
# tdSql.error(f'alter table {dbname}.{tbname} set tag t12 = "{tag_binary}"') # tdSql.error(f'alter table {dbname}.{tbname} set tag t12 = "{error_tag_binary}"')
# tdSql.error(f'alter table {dbname}.{tbname} set tag t13 = "{tag_nchar}"') # tdSql.error(f'alter table {dbname}.{tbname} set tag t13 = "{error_tag_nchar}"')
error_tag_binary = self.get_long_name(length=25, mode="letters")
error_tag_nchar = self.get_long_name(length=25, mode="letters")
tdSql.error(f'alter table {dbname}.{tbname} set tag t12 = "{error_tag_binary}"')
tdSql.error(f'alter table {dbname}.{tbname} set tag t13 = "{error_tag_nchar}"')
# bug TD-16210 modify binary to nchar # bug TD-16210 modify binary to nchar
# tdSql.error(f'alter table {dbname}.{tbname} modify tag t12 nchar(10)') tdSql.error(f'alter table {dbname}.{tbname} modify tag t12 nchar(10)')
tdSql.execute(f"drop database {dbname}") tdSql.execute(f"drop database {dbname}")
def alter_ntb_column_check(self): def alter_ntb_column_check(self):
''' '''
@ -125,6 +126,20 @@ class TDTestCase:
tdSql.execute(f'alter table {dbname}.{tbname} drop column `c15`') tdSql.execute(f'alter table {dbname}.{tbname} drop column `c15`')
tdSql.query(f'describe {dbname}.{tbname}') tdSql.query(f'describe {dbname}.{tbname}')
tdSql.checkRows(14) tdSql.checkRows(14)
#! TD-16422
# tdSql.execute(f'alter table {dbname}.{tbname} add column c16 binary(10)')
# tdSql.query(f'describe {dbname}.{tbname}')
# tdSql.checkRows(15)
# tdSql.checkEqual(tdSql.queryResult[14][2],10)
# tdSql.execute(f'alter table {dbname}.{tbname} drop column c16')
# tdSql.execute(f'alter table {dbname}.{tbname} add column c16 nchar(10)')
# tdSql.query(f'describe {dbname}.{tbname}')
# tdSql.checkRows(15)
# tdSql.checkEqual(tdSql.queryResult[14][2],10)
# tdSql.execute(f'alter table {dbname}.{tbname} drop column c16')
tdSql.execute(f'alter table {dbname}.{tbname} modify column c12 binary(30)') tdSql.execute(f'alter table {dbname}.{tbname} modify column c12 binary(30)')
tdSql.query(f'describe {dbname}.{tbname}') tdSql.query(f'describe {dbname}.{tbname}')
tdSql.checkData(12,2,30) tdSql.checkData(12,2,30)

View File

@ -119,7 +119,7 @@ class TDTestCase:
# tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate)) # tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate))
return return
def mutiThread_create_tables(self,host,dbname,stbname,vgroups,threadNumbers,childrowcount): def mutiThread_create_tables(self,host,dbname,stbname,vgroups,threadNumbers,childcount):
buildPath = self.getBuildPath() buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/" config = buildPath+ "../sim/dnode1/cfg/"
@ -128,7 +128,7 @@ class TDTestCase:
tsql.execute("drop database if exists %s"%dbname) tsql.execute("drop database if exists %s"%dbname)
tsql.execute("create database %s vgroups %d"%(dbname,vgroups)) tsql.execute("create database %s vgroups %d"%(dbname,vgroups))
tsql.execute("use %s" %dbname) tsql.execute("use %s" %dbname)
count=int(childrowcount) count=int(childcount)
threads = [] threads = []
for i in range(threadNumbers): for i in range(threadNumbers):
tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i)) tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i))
@ -264,19 +264,88 @@ class TDTestCase:
speedCreate=count/spendTime speedCreate=count/spendTime
tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate)) tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate))
return return
def checkData(self,dbname,stbname,stableCount,CtableCount,rowsPerSTable,):
tdSql.execute("use %s"%dbname)
tdSql.query("show stables")
tdSql.checkRows(stableCount)
tdSql.query("show tables")
tdSql.checkRows(CtableCount)
for i in range(stableCount):
tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,rowsPerSTable)
return
# test case1 base # test case1 base
def test_case1(self): def test_case1(self):
#stableCount=threadNumbersCtb
parameterDict = {'vgroups': 1, \
'threadNumbersCtb': 5, \
'threadNumbersIda': 5, \
'stableCount': 5, \
'tablesPerStb': 50, \
'rowsPerTable': 10, \
'dbname': 'db', \
'stbname': 'stb', \
'host': 'localhost', \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
tdLog.debug("-----create database and muti-thread create tables test------- ") tdLog.debug("-----create database and muti-thread create tables test------- ")
#host,dbname,stbname,vgroups,threadNumbers,tcountStart,tcountStop #host,dbname,stbname,vgroups,threadNumbers,tcountStart,tcountStop
#host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount #host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount
self.mutiThread_create_tables(host="localhost",dbname="db",stbname="stb", vgroups=1, threadNumbers=5, childrowcount=50) self.mutiThread_create_tables(
self.mutiThread_insert_data(host="localhost",dbname="db",stbname="stb", threadNumbers=5,chilCount=50,ts_start=self.ts,childrowcount=10) host=parameterDict['host'],
dbname=parameterDict['dbname'],
stbname=parameterDict['stbname'],
vgroups=parameterDict['vgroups'],
threadNumbers=parameterDict['threadNumbersCtb'],
childcount=parameterDict['tablesPerStb'])
return self.mutiThread_insert_data(
host=parameterDict['host'],
dbname=parameterDict['dbname'],
stbname=parameterDict['stbname'],
threadNumbers=parameterDict['threadNumbersIda'],
chilCount=parameterDict['tablesPerStb'],
ts_start=parameterDict['startTs'],
childrowcount=parameterDict['rowsPerTable'])
tableCount=parameterDict['threadNumbersCtb']*parameterDict['tablesPerStb']
rowsPerStable=parameterDict['rowsPerTable']*parameterDict['tablesPerStb']
self.checkData(dbname=parameterDict['dbname'],stbname=parameterDict['stbname'], stableCount=parameterDict['threadNumbersCtb'],CtableCount=tableCount,rowsPerSTable=rowsPerStable)
def test_case3(self): def test_case3(self):
self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 1, 8, 1*10) #stableCount=threadNumbersCtb
parameterDict = {'vgroups': 1, \
'threadNumbersCtb': 8, \
'stableCount': 5, \
'tablesPerStb': 10, \
'rowsPerTable': 100, \
'dbname': 'db1', \
'stbname': 'stb1', \
'host': 'localhost', \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
self.taosBenchCreate(
parameterDict['host'],
"no",
parameterDict['dbname'],
parameterDict['stbname'],
parameterDict['vgroups'],
parameterDict['threadNumbersCtb'],
parameterDict['tablesPerStb'])
tableCount=parameterDict['threadNumbersCtb']*parameterDict['tablesPerStb']
rowsPerStable=parameterDict['rowsPerTable']*parameterDict['tablesPerStb']
self.checkData(
dbname=parameterDict['dbname'],
stbname=parameterDict['stbname'],
stableCount=parameterDict['threadNumbersCtb'],
CtableCount=tableCount,
rowsPerSTable=rowsPerStable)
# self.taosBenchCreate("test209","no","db2", "stb2", 1, 8, 1*10000) # self.taosBenchCreate("test209","no","db2", "stb2", 1, 8, 1*10000)
# self.taosBenchCreate("chenhaoran02","no","db1", "stb1", 1, 8, 1*10000) # self.taosBenchCreate("chenhaoran02","no","db1", "stb1", 1, 8, 1*10000)
@ -320,14 +389,6 @@ class TDTestCase:
# tdSql.execute("create qnode on dnode %s"%dnodeId) # tdSql.execute("create qnode on dnode %s"%dnodeId)
# self.taosBenchCreate("test209","no","db2", "stb2", 1, 8, 1*10000)
# self.taosBenchCreate("chenhaoran02","no","db1", "stb1", 1, 8, 1*10000)
# self.taosBenchCreate("db1", "stb1", 4, 5, 100*10000)
# self.taosBenchCreate("db1", "stb1", 1, 5, 100*10000)
# run case # run case
def run(self): def run(self):

View File

@ -11,7 +11,7 @@
"confirm_parameter_prompt": "no", "confirm_parameter_prompt": "no",
"insert_interval": 0, "insert_interval": 0,
"interlace_rows": 0, "interlace_rows": 0,
"num_of_records_per_req": 100, "num_of_records_per_req": 100000,
"databases": [ "databases": [
{ {
"dbinfo": { "dbinfo": {
@ -29,7 +29,7 @@
"batch_create_tbl_num": 50000, "batch_create_tbl_num": 50000,
"data_source": "rand", "data_source": "rand",
"insert_mode": "taosc", "insert_mode": "taosc",
"insert_rows": 1, "insert_rows": 100,
"interlace_rows": 0, "interlace_rows": 0,
"insert_interval": 0, "insert_interval": 0,
"max_sql_len": 10000000, "max_sql_len": 10000000,
@ -45,28 +45,28 @@
}, },
{ {
"type": "DOUBLE", "type": "DOUBLE",
"count": 100 "count": 1
}, },
{ {
"type": "BINARY", "type": "BINARY",
"len": 400, "len": 40,
"count": 10 "count": 1
}, },
{ {
"type": "nchar", "type": "nchar",
"len": 200, "len": 20,
"count": 20 "count": 1
} }
], ],
"tags": [ "tags": [
{ {
"type": "TINYINT", "type": "TINYINT",
"count": 2 "count": 1
}, },
{ {
"type": "BINARY", "type": "BINARY",
"len": 16, "len": 16,
"count": 2 "count": 1
} }
] ]
} }

View File

@ -11,9 +11,12 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import random
import string
from util.log import * from util.log import *
from util.cases import * from util.cases import *
from util.sql import * from util.sql import *
from util.common import *
@ -23,79 +26,89 @@ class TDTestCase:
tdSql.init(conn.cursor()) tdSql.init(conn.cursor())
self.rowNum = 10 self.rowNum = 10
self.tbnum = 20
self.ts = 1537146000000 self.ts = 1537146000000
self.binary_str = 'taosdata'
def run(self): self.nchar_str = '涛思数据'
def bottom_check_base(self):
tdSql.prepare() tdSql.prepare()
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table stb_1 using stb tags('beijing')")
tdSql.execute("create table test1 using test tags('beijing')") column_list = ['col1','col2','col3','col4','col5','col6','col7','col8']
error_column_list = ['col11','col12','col13']
error_param_list = [0,101]
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
# bottom verifacation for i in column_list:
tdSql.error("select bottom(ts, 10) from test") tdSql.query(f'select bottom({i},2) from stb_1')
tdSql.error("select bottom(col1, 0) from test") tdSql.checkRows(2)
tdSql.error("select bottom(col1, 101) from test") tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
tdSql.error("select bottom(col2, 0) from test") for j in error_param_list:
tdSql.error("select bottom(col2, 101) from test") tdSql.error(f'select bottom({i},{j}) from stb_1')
tdSql.error("select bottom(col3, 0) from test") for i in error_column_list:
tdSql.error("select bottom(col3, 101) from test") tdSql.error(f'select bottom({i},10) from stb_1')
tdSql.error("select bottom(col4, 0) from test") tdSql.query("select ts,bottom(col1, 2),ts from stb_1 group by tbname")
tdSql.error("select bottom(col4, 101) from test")
tdSql.error("select bottom(col5, 0) from test")
tdSql.error("select bottom(col5, 101) from test")
tdSql.error("select bottom(col6, 0) from test")
tdSql.error("select bottom(col6, 101) from test")
tdSql.error("select bottom(col7, 10) from test")
tdSql.error("select bottom(col8, 10) from test")
tdSql.error("select bottom(col9, 10) from test")
tdSql.query("select bottom(col1, 2) from test")
tdSql.checkRows(2) tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) tdSql.query('select bottom(col2,1) from stb_1 interval(1y) order by col2')
tdSql.query("select bottom(col2, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
tdSql.query("select bottom(col3, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
tdSql.query("select bottom(col4, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
tdSql.query("select bottom(col11, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
tdSql.query("select bottom(col12, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
tdSql.query("select bottom(col13, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
tdSql.query("select bottom(col13,50) from test")
tdSql.checkRows(10)
tdSql.query("select bottom(col14, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
tdSql.query("select ts,bottom(col1, 2) from test1")
tdSql.checkRows(2)
tdSql.query("select ts,bottom(col1, 2),ts from test group by tbname")
tdSql.checkRows(2)
tdSql.query('select bottom(col2,1) from test interval(1y) order by col2')
tdSql.checkData(0,0,1) tdSql.checkData(0,0,1)
tdSql.error('select * from stb_1 where bottom(col2,1)=1')
tdSql.execute('drop database db')
def bottom_check_distribute(self):
# prepare data for vgroup 4
dbname = tdCom.getLongName(5, "letters")
stbname = tdCom.getLongName(5, "letters")
vgroup_num = 2
child_table_num = 20
tdSql.execute(f"create database if not exists {dbname} vgroups {vgroup_num}")
tdSql.execute(f'use {dbname}')
# build 20 child tables,every table insert 10 rows
tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
for i in range(child_table_num):
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags('beijing')")
tdSql.execute(f"insert into {stbname}_{i}(ts) values(%d)" % (self.ts - 1-i))
column_list = ['col1','col2','col3','col4','col5','col6','col7','col8']
error_column_list = ['col11','col12','col13']
error_param_list = [0,101]
for i in [f'{stbname}', f'{dbname}.{stbname}']:
for j in column_list:
tdSql.query(f"select bottom({j},1) from {i}")
tdSql.checkRows(0)
tdSql.query('show tables')
vgroup_list = []
for i in range(len(tdSql.queryResult)):
vgroup_list.append(tdSql.queryResult[i][6])
vgroup_list_set = set(vgroup_list)
tdSql.error('select * from test where bottom(col2,1)=1') for i in vgroup_list_set:
vgroups_num = vgroup_list.count(i)
if vgroups_num >=2:
tdLog.info(f'This scene with {vgroups_num} vgroups is ok!')
continue
else:
tdLog.exit(f'This scene does not meet the requirements with {vgroups_num} vgroup!\n')
for i in range(self.rowNum):
for j in range(child_table_num):
tdSql.execute(f"insert into {stbname}_{j} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
for i in column_list:
tdSql.query(f'select bottom({i},2) from {stbname}')
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(1,),(1,)])
for j in error_param_list:
tdSql.error(f'select bottom({i},{j}) from {stbname}')
for i in error_column_list:
tdSql.error(f'select bottom({i},10) from {stbname}')
tdSql.execute(f'drop database {dbname}')
def run(self):
self.bottom_check_base()
self.bottom_check_distribute()
def stop(self): def stop(self):

View File

@ -0,0 +1,198 @@
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
import random
class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
self.ts = 1537146000000
def prepare_datas_of_distribute(self):
# prepate datas for 20 tables distributed at different vgroups
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
tdSql.execute(" use testdb ")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
tbname = "ct"+f'{i}'
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog.info(" prepare data for distributed_aggregate done! ")
def check_distribute_datas(self):
# get vgroup_ids of all
tdSql.query("show vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
vnode_tables[table_name[6]].append(table_name[0])
self.vnode_disbutes = vnode_tables
count = 0
for k ,v in vnode_tables.items():
if len(v)>=2:
count+=1
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
def distribute_agg_query(self):
# basic filter
tdSql.query("select apercentile(c1 , 20) from stb1 where c1 is null")
tdSql.checkRows(0)
tdSql.query("select apercentile(c1 , 20) from stb1 where t1=1")
tdSql.checkData(0,0,2.800000000)
tdSql.query("select apercentile(c1+c2 ,100) from stb1 where c1 =1 ")
tdSql.checkData(0,0,11112.000000000)
tdSql.query("select apercentile(c1 ,10 ) from stb1 where tbname=\"ct2\"")
tdSql.checkData(0,0,2.000000000)
tdSql.query("select apercentile(c1,20) from stb1 partition by tbname")
tdSql.checkRows(20)
tdSql.query("select apercentile(c1,20) from stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
tdSql.query("select apercentile(c1,20) from stb1 union all select apercentile(c1,20) from stb1 ")
tdSql.checkRows(2)
tdSql.checkData(0,0,7.389181281)
# join
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
tdSql.execute(" create table tb1 using st tags(1) ")
tdSql.execute(" create table tb2 using st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
tdSql.query("select apercentile(tb1.c1,100), apercentile(tb2.c2,100) from tb1, tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,9.000000000)
tdSql.checkData(0,0,9.000000000)
# group by
tdSql.execute(" use testdb ")
tdSql.query(" select max(c1),c1 from stb1 group by t1 ")
tdSql.checkRows(20)
tdSql.query(" select max(c1),c1 from stb1 group by c1 ")
tdSql.checkRows(30)
tdSql.query(" select max(c1),c2 from stb1 group by c2 ")
tdSql.checkRows(31)
# partition by tbname or partition by tag
tdSql.query("select apercentile(c1 ,10)from stb1 partition by tbname")
query_data = tdSql.queryResult
# nest query for support max
tdSql.query("select apercentile(c2+2,10)+1 from (select max(c1) c2 from stb1)")
tdSql.checkData(0,0,31.000000000)
tdSql.query("select apercentile(c1+2,10)+1 as c2 from (select ts ,c1 ,c2 from stb1)")
tdSql.checkData(0,0,7.560701700)
tdSql.query("select apercentile(a+2,10)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
tdSql.checkData(0,0,7.560701700)
# mixup with other functions
tdSql.query("select max(c1),count(c1),last(c2,c3),spread(c1), apercentile(c1,10) from stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)
tdSql.checkData(0,3,-999)
tdSql.checkData(0,4,28.000000000)
tdSql.checkData(0,5,4.560701700)
def run(self):
self.prepare_datas_of_distribute()
self.check_distribute_datas()
self.distribute_agg_query()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,296 @@
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
import random
class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
self.ts = 1537146000000
def check_count_functions(self, tbname , col_name):
max_sql = f"select count({col_name}) from {tbname};"
same_sql = f"select sum(c) from (select {col_name} ,1 as c from {tbname} where {col_name} is not null) "
tdSql.query(max_sql)
max_result = tdSql.queryResult
tdSql.query(same_sql)
same_result = tdSql.queryResult
if max_result !=same_result:
tdLog.exit(" count function work not as expected, sql : %s "% max_sql)
else:
tdLog.info(" count function work as expected, sql : %s "% max_sql)
def prepare_datas_of_distribute(self):
# prepate datas for 20 tables distributed at different vgroups
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
tdSql.execute(" use testdb ")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
tbname = "ct"+f'{i}'
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog.info(" prepare data for distributed_aggregate done! ")
def check_distribute_datas(self):
# get vgroup_ids of all
tdSql.query("show vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
vnode_tables[table_name[6]].append(table_name[0])
self.vnode_disbutes = vnode_tables
count = 0
for k ,v in vnode_tables.items():
if len(v)>=2:
count+=1
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
def check_count_distribute_diff_vnode(self,col_name):
vgroup_ids = []
for k ,v in self.vnode_disbutes.items():
if len(v)>=2:
vgroup_ids.append(k)
distribute_tbnames = []
for vgroup_id in vgroup_ids:
vnode_tables = self.vnode_disbutes[vgroup_id]
distribute_tbnames.append(random.sample(vnode_tables,1)[0])
tbname_ins = ""
for tbname in distribute_tbnames:
tbname_ins += "'%s' ,"%tbname
tbname_filters = tbname_ins[:-1]
max_sql = f"select count({col_name}) from stb1 where tbname in ({tbname_filters});"
same_sql = f"select sum(c) from (select {col_name} ,1 as c from stb1 where tbname in ({tbname_filters}) and {col_name} is not null) "
tdSql.query(max_sql)
max_result = tdSql.queryResult
tdSql.query(same_sql)
same_result = tdSql.queryResult
if max_result !=same_result:
tdLog.exit(" count function work not as expected, sql : %s "% max_sql)
else:
tdLog.info(" count function work as expected, sql : %s "% max_sql)
def check_count_status(self):
# check max function work status
tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
tablenames.append(table_name[0])
tdSql.query("desc stb1")
col_names = tdSql.queryResult
colnames = []
for col_name in col_names:
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
colnames.append(col_name[0])
for tablename in tablenames:
for colname in colnames:
self.check_count_functions(tablename,colname)
# check max function for different vnode
for colname in colnames:
if colname.startswith("c"):
self.check_count_distribute_diff_vnode(colname)
else:
# self.check_count_distribute_diff_vnode(colname) # bug for tag
pass
def distribute_agg_query(self):
# basic filter
tdSql.query("select count(c1) from stb1 ")
tdSql.checkData(0,0,184)
tdSql.query("select count(c1) from stb1 where t1=1")
tdSql.checkData(0,0,9)
tdSql.query("select count(c1+c2) from stb1 where c1 =1 ")
tdSql.checkData(0,0,2)
tdSql.query("select count(c1) from stb1 where tbname=\"ct2\"")
tdSql.checkData(0,0,9)
tdSql.query("select count(c1) from stb1 partition by tbname")
tdSql.checkRows(20)
tdSql.query("select count(c1) from stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
tdSql.query("select count(c1) from stb1 union all select count(c1) from stb1 ")
tdSql.checkRows(2)
tdSql.checkData(0,0,184)
# join
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
tdSql.execute(" create table tb1 using st tags(1) ")
tdSql.execute(" create table tb2 using st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
tdSql.query("select count(tb1.c1), count(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,10)
tdSql.checkData(0,1,10)
# group by
tdSql.execute(" use testdb ")
tdSql.query(" select count(*) from stb1 ")
tdSql.checkData(0,0,187)
tdSql.query(" select count(*) from stb1 group by t1 ")
tdSql.checkRows(20)
tdSql.query(" select count(*) from stb1 group by c1 ")
tdSql.checkRows(30)
tdSql.query(" select count(*) from stb1 group by c2 ")
tdSql.checkRows(31)
# partition by tbname or partition by tag
tdSql.query("select max(c1),tbname from stb1 partition by tbname")
query_data = tdSql.queryResult
for row in query_data:
tbname = row[1]
tdSql.query(" select max(c1) from %s "%tbname)
tdSql.checkData(0,0,row[0])
tdSql.query("select max(c1),tbname from stb1 partition by t1")
query_data = tdSql.queryResult
for row in query_data:
tbname = row[1]
tdSql.query(" select max(c1) from %s "%tbname)
tdSql.checkData(0,0,row[0])
# nest query for support max
tdSql.query("select abs(c2+2)+1 from (select count(c1) c2 from stb1)")
tdSql.checkData(0,0,187.000000000)
tdSql.query("select count(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)")
tdSql.checkData(0,0,184)
tdSql.query("select count(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
tdSql.checkData(0,0,184)
# mixup with other functions
tdSql.query("select max(c1),count(c1),last(c2,c3) from stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)
tdSql.checkData(0,3,-999)
def run(self):
self.prepare_datas_of_distribute()
self.check_distribute_datas()
self.check_count_status()
self.distribute_agg_query()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,293 @@
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
import random
class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
self.ts = 1537146000000
def check_max_functions(self, tbname , col_name):
max_sql = f"select max({col_name}) from {tbname};"
same_sql = f"select {col_name} from {tbname} order by {col_name} desc limit 1"
tdSql.query(max_sql)
max_result = tdSql.queryResult
tdSql.query(same_sql)
same_result = tdSql.queryResult
if max_result !=same_result:
tdLog.exit(" max function work not as expected, sql : %s "% max_sql)
else:
tdLog.info(" max function work as expected, sql : %s "% max_sql)
def prepare_datas_of_distribute(self):
# prepate datas for 20 tables distributed at different vgroups
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
tdSql.execute(" use testdb ")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
tbname = "ct"+f'{i}'
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog.info(" prepare data for distributed_aggregate done! ")
def check_distribute_datas(self):
# get vgroup_ids of all
tdSql.query("show vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
vnode_tables[table_name[6]].append(table_name[0])
self.vnode_disbutes = vnode_tables
count = 0
for k ,v in vnode_tables.items():
if len(v)>=2:
count+=1
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
def check_max_distribute_diff_vnode(self,col_name):
vgroup_ids = []
for k ,v in self.vnode_disbutes.items():
if len(v)>=2:
vgroup_ids.append(k)
distribute_tbnames = []
for vgroup_id in vgroup_ids:
vnode_tables = self.vnode_disbutes[vgroup_id]
distribute_tbnames.append(random.sample(vnode_tables,1)[0])
tbname_ins = ""
for tbname in distribute_tbnames:
tbname_ins += "'%s' ,"%tbname
tbname_filters = tbname_ins[:-1]
max_sql = f"select max({col_name}) from stb1 where tbname in ({tbname_filters});"
same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) order by {col_name} desc limit 1"
tdSql.query(max_sql)
max_result = tdSql.queryResult
tdSql.query(same_sql)
same_result = tdSql.queryResult
if max_result !=same_result:
tdLog.exit(" max function work not as expected, sql : %s "% max_sql)
else:
tdLog.info(" max function work as expected, sql : %s "% max_sql)
def check_max_status(self):
# check max function work status
tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
tablenames.append(table_name[0])
tdSql.query("desc stb1")
col_names = tdSql.queryResult
colnames = []
for col_name in col_names:
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
colnames.append(col_name[0])
for tablename in tablenames:
for colname in colnames:
self.check_max_functions(tablename,colname)
# check max function for different vnode
for colname in colnames:
if colname.startswith("c"):
self.check_max_distribute_diff_vnode(colname)
else:
# self.check_max_distribute_diff_vnode(colname) # bug for tag
pass
def distribute_agg_query(self):
# basic filter
tdSql.query("select max(c1) from stb1 where c1 is null")
tdSql.checkRows(0)
tdSql.query("select max(c1) from stb1 where t1=1")
tdSql.checkData(0,0,10)
tdSql.query("select max(c1+c2) from stb1 where c1 =1 ")
tdSql.checkData(0,0,11112.000000000)
tdSql.query("select max(c1) from stb1 where tbname=\"ct2\"")
tdSql.checkData(0,0,10)
tdSql.query("select max(c1) from stb1 partition by tbname")
tdSql.checkRows(20)
tdSql.query("select max(c1) from stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
tdSql.query("select max(c1) from stb1 union all select max(c1) from stb1 ")
tdSql.checkRows(2)
tdSql.checkData(0,0,28)
# join
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
tdSql.execute(" create table tb1 using st tags(1) ")
tdSql.execute(" create table tb2 using st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
tdSql.query("select max(tb1.c1), tb2.c2 from tb1, tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,9)
tdSql.checkData(0,0,9.00000)
# group by
tdSql.execute(" use testdb ")
tdSql.query(" select max(c1),c1 from stb1 group by t1 ")
tdSql.checkRows(20)
tdSql.query(" select max(c1),c1 from stb1 group by c1 ")
tdSql.checkRows(30)
tdSql.query(" select max(c1),c2 from stb1 group by c2 ")
tdSql.checkRows(31)
# partition by tbname or partition by tag
tdSql.query("select max(c1),tbname from stb1 partition by tbname")
query_data = tdSql.queryResult
for row in query_data:
tbname = row[1]
tdSql.query(" select max(c1) from %s "%tbname)
tdSql.checkData(0,0,row[0])
tdSql.query("select max(c1),tbname from stb1 partition by t1")
query_data = tdSql.queryResult
for row in query_data:
tbname = row[1]
tdSql.query(" select max(c1) from %s "%tbname)
tdSql.checkData(0,0,row[0])
# nest query for support max
tdSql.query("select abs(c2+2)+1 from (select max(c1) c2 from stb1)")
tdSql.checkData(0,0,31.000000000)
tdSql.query("select max(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)")
tdSql.checkData(0,0,31.000000000)
tdSql.query("select max(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
tdSql.checkData(0,0,31.000000000)
# mixup with other functions
tdSql.query("select max(c1),count(c1),last(c2,c3) from stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)
tdSql.checkData(0,3,-999)
def run(self):
self.prepare_datas_of_distribute()
self.check_distribute_datas()
self.check_max_status()
self.distribute_agg_query()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,294 @@
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
import random
class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
self.ts = 1537146000000
def check_min_functions(self, tbname , col_name):
min_sql = f"select min({col_name}) from {tbname};"
same_sql = f"select {col_name} from {tbname} where {col_name} is not null order by {col_name} asc limit 1"
tdSql.query(min_sql)
min_result = tdSql.queryResult
tdSql.query(same_sql)
same_result = tdSql.queryResult
if min_result !=same_result:
tdLog.exit(" min function work not as expected, sql : %s "% min_sql)
else:
tdLog.info(" min function work as expected, sql : %s "% min_sql)
def prepare_datas_of_distribute(self):
# prepate datas for 20 tables distributed at different vgroups
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
tdSql.execute(" use testdb ")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
tbname = "ct"+f'{i}'
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog.info(" prepare data for distributed_aggregate done! ")
def check_distribute_datas(self):
# get vgroup_ids of all
tdSql.query("show vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
vnode_tables[table_name[6]].append(table_name[0])
self.vnode_disbutes = vnode_tables
count = 0
for k ,v in vnode_tables.items():
if len(v)>=2:
count+=1
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
def check_min_distribute_diff_vnode(self,col_name):
vgroup_ids = []
for k ,v in self.vnode_disbutes.items():
if len(v)>=2:
vgroup_ids.append(k)
distribute_tbnames = []
for vgroup_id in vgroup_ids:
vnode_tables = self.vnode_disbutes[vgroup_id]
distribute_tbnames.append(random.sample(vnode_tables,1)[0])
tbname_ins = ""
for tbname in distribute_tbnames:
tbname_ins += "'%s' ,"%tbname
tbname_filters = tbname_ins[:-1]
min_sql = f"select min({col_name}) from stb1 where tbname in ({tbname_filters});"
same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null order by {col_name} asc limit 1"
tdSql.query(min_sql)
min_result = tdSql.queryResult
tdSql.query(same_sql)
same_result = tdSql.queryResult
if min_result !=same_result:
tdLog.exit(" min function work not as expected, sql : %s "% min_sql)
else:
tdLog.info(" min function work as expected, sql : %s "% min_sql)
def check_min_status(self):
# check max function work status
tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
tablenames.append(table_name[0])
tdSql.query("desc stb1")
col_names = tdSql.queryResult
colnames = []
for col_name in col_names:
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
colnames.append(col_name[0])
for tablename in tablenames:
for colname in colnames:
self.check_min_functions(tablename,colname)
# check max function for different vnode
for colname in colnames:
if colname.startswith("c"):
self.check_min_distribute_diff_vnode(colname)
else:
# self.check_min_distribute_diff_vnode(colname) # bug for tag
pass
def distribute_agg_query(self):
# basic filter
tdSql.query("select min(c1) from stb1 where c1 is null")
tdSql.checkRows(0)
tdSql.query("select min(c1) from stb1 where t1=1")
tdSql.checkData(0,0,2)
tdSql.query("select min(c1+c2) from stb1 where c1 =1 ")
tdSql.checkData(0,0,11112.000000000)
tdSql.query("select min(c1) from stb1 where tbname=\"ct2\"")
tdSql.checkData(0,0,2)
tdSql.query("select min(c1) from stb1 partition by tbname")
tdSql.checkRows(20)
tdSql.query("select min(c1) from stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
tdSql.query("select min(c1) from stb1 union all select min(c1) from stb1 ")
tdSql.checkRows(2)
tdSql.checkData(0,0,0)
# join
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
tdSql.execute(" create table tb1 using st tags(1) ")
tdSql.execute(" create table tb2 using st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
tdSql.query("select min(tb1.c1), tb2.c2 from tb1, tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,0)
tdSql.checkData(0,0,0.00000)
# group by
tdSql.execute(" use testdb ")
tdSql.query(" select min(c1),c1 from stb1 group by t1 ")
tdSql.checkRows(20)
tdSql.query(" select min(c1),c1 from stb1 group by c1 ")
tdSql.checkRows(30)
tdSql.query(" select min(c1),c2 from stb1 group by c2 ")
tdSql.checkRows(31)
# partition by tbname or partition by tag
tdSql.query("select min(c1),tbname from stb1 partition by tbname")
query_data = tdSql.queryResult
for row in query_data:
tbname = row[1]
tdSql.query(" select min(c1) from %s "%tbname)
tdSql.checkData(0,0,row[0])
tdSql.query("select min(c1),tbname from stb1 partition by t1")
query_data = tdSql.queryResult
for row in query_data:
tbname = row[1]
tdSql.query(" select min(c1) from %s "%tbname)
tdSql.checkData(0,0,row[0])
# nest query for support max
tdSql.query("select abs(c2+2)+1 from (select min(c1) c2 from stb1)")
tdSql.checkData(0,0,3.000000000)
tdSql.query("select min(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)")
tdSql.checkData(0,0,3.000000000)
tdSql.query("select min(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
tdSql.checkData(0,0,3.000000000)
# mixup with other functions
tdSql.query("select max(c1),count(c1),last(c2,c3),min(c1) from stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)
tdSql.checkData(0,3,-999)
tdSql.checkData(0,4,0)
def run(self):
self.prepare_datas_of_distribute()
self.check_distribute_datas()
self.check_min_status()
self.distribute_agg_query()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,281 @@
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
import random
class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
self.ts = 1537146000000
def check_spread_functions(self, tbname , col_name):
spread_sql = f"select spread({col_name}) from {tbname};"
same_sql = f"select max({col_name})-min({col_name}) from {tbname}"
tdSql.query(spread_sql)
spread_result = tdSql.queryResult
tdSql.query(same_sql)
same_result = tdSql.queryResult
if spread_result !=same_result:
tdLog.exit(" max function work not as expected, sql : %s "% spread_sql)
else:
tdLog.info(" max function work as expected, sql : %s "% spread_sql)
def prepare_datas_of_distribute(self):
# prepate datas for 20 tables distributed at different vgroups
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
tdSql.execute(" use testdb ")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
tbname = "ct"+f'{i}'
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog.info(" prepare data for distributed_aggregate done! ")
def check_distribute_datas(self):
# get vgroup_ids of all
tdSql.query("show vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
vnode_tables[table_name[6]].append(table_name[0])
self.vnode_disbutes = vnode_tables
count = 0
for k ,v in vnode_tables.items():
if len(v)>=2:
count+=1
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
def check_spread_distribute_diff_vnode(self,col_name):
vgroup_ids = []
for k ,v in self.vnode_disbutes.items():
if len(v)>=2:
vgroup_ids.append(k)
distribute_tbnames = []
for vgroup_id in vgroup_ids:
vnode_tables = self.vnode_disbutes[vgroup_id]
distribute_tbnames.append(random.sample(vnode_tables,1)[0])
tbname_ins = ""
for tbname in distribute_tbnames:
tbname_ins += "'%s' ,"%tbname
tbname_filters = tbname_ins[:-1]
spread_sql = f"select spread({col_name}) from stb1 where tbname in ({tbname_filters})"
same_sql = f"select max({col_name}) - min({col_name}) from stb1 where tbname in ({tbname_filters})"
tdSql.query(spread_sql)
spread_result = tdSql.queryResult
tdSql.query(same_sql)
same_result = tdSql.queryResult
if spread_result !=same_result:
tdLog.exit(" spread function work not as expected, sql : %s "% spread_sql)
else:
tdLog.info(" spread function work as expected, sql : %s "% spread_sql)
def check_spread_status(self):
# check max function work status
tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
tablenames.append(table_name[0])
tdSql.query("desc stb1")
col_names = tdSql.queryResult
colnames = []
for col_name in col_names:
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
colnames.append(col_name[0])
for tablename in tablenames:
for colname in colnames:
self.check_spread_functions(tablename,colname)
# check max function for different vnode
for colname in colnames:
if colname.startswith("c"):
self.check_spread_distribute_diff_vnode(colname)
else:
# self.check_spread_distribute_diff_vnode(colname) # bug for tag
pass
def distribute_agg_query(self):
# basic filter
tdSql.query("select spread(c1) from stb1 where c1 is null")
tdSql.checkRows(0)
tdSql.query("select spread(c1) from stb1 where t1=1")
tdSql.checkData(0,0,8.000000000)
tdSql.query("select spread(c1+c2) from stb1 where c1 =1 ")
tdSql.checkData(0,0,0.000000000)
tdSql.query("select spread(c1) from stb1 where tbname=\"ct2\"")
tdSql.checkData(0,0,8.000000000)
tdSql.query("select spread(c1) from stb1 partition by tbname")
tdSql.checkRows(20)
tdSql.query("select spread(c1) from stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
tdSql.query("select spread(c1) from stb1 union all select max(c1)-min(c1) from stb1 ")
tdSql.checkRows(2)
tdSql.checkData(0,0,28.000000000)
# join
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
tdSql.execute(" create table tb1 using st tags(1) ")
tdSql.execute(" create table tb2 using st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
tdSql.query("select spread(tb1.c1), spread(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,9.000000000)
tdSql.checkData(0,0,9.00000)
# group by
tdSql.execute(" use testdb ")
tdSql.query(" select max(c1),c1 from stb1 group by t1 ")
tdSql.checkRows(20)
tdSql.query(" select max(c1),c1 from stb1 group by c1 ")
tdSql.checkRows(30)
tdSql.query(" select max(c1),c2 from stb1 group by c2 ")
tdSql.checkRows(31)
# partition by tbname or partition by tag
tdSql.query("select spread(c1) from stb1 partition by tbname")
query_data = tdSql.queryResult
# nest query for support max
tdSql.query("select spread(c2+2)+1 from (select max(c1) c2 from stb1)")
tdSql.checkData(0,0,1.000000000)
tdSql.query("select spread(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)")
tdSql.checkData(0,0,29.000000000)
tdSql.query("select spread(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
tdSql.checkData(0,0,29.000000000)
# mixup with other functions
tdSql.query("select max(c1),count(c1),last(c2,c3),spread(c1) from stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)
tdSql.checkData(0,3,-999)
tdSql.checkData(0,4,28.000000000)
def run(self):
self.prepare_datas_of_distribute()
self.check_distribute_datas()
self.check_spread_status()
self.distribute_agg_query()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,278 @@
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
import random ,os ,sys
import platform
class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
self.ts = 1537146000000
def check_sum_functions(self, tbname , col_name):
sum_sql = f"select sum({col_name}) from {tbname};"
same_sql = f"select {col_name} from {tbname} where {col_name} is not null "
tdSql.query(same_sql)
pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
if (platform.system().lower() == 'windows' and pre_data.dtype == 'int32'):
pre_data = np.array(pre_data, dtype = 'int64')
pre_sum = np.sum(pre_data)
tdSql.query(sum_sql)
tdSql.checkData(0,0,pre_sum)
def prepare_datas_of_distribute(self):
# prepate datas for 20 tables distributed at different vgroups
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
tdSql.execute(" use testdb ")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
tbname = "ct"+f'{i}'
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog.info(" prepare data for distributed_aggregate done! ")
def check_distribute_datas(self):
# get vgroup_ids of all
tdSql.query("show vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
vnode_tables[table_name[6]].append(table_name[0])
self.vnode_disbutes = vnode_tables
count = 0
for k ,v in vnode_tables.items():
if len(v)>=2:
count+=1
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
def check_sum_distribute_diff_vnode(self,col_name):
vgroup_ids = []
for k ,v in self.vnode_disbutes.items():
if len(v)>=2:
vgroup_ids.append(k)
distribute_tbnames = []
for vgroup_id in vgroup_ids:
vnode_tables = self.vnode_disbutes[vgroup_id]
distribute_tbnames.append(random.sample(vnode_tables,1)[0])
tbname_ins = ""
for tbname in distribute_tbnames:
tbname_ins += "'%s' ,"%tbname
tbname_filters = tbname_ins[:-1]
sum_sql = f"select sum({col_name}) from stb1 where tbname in ({tbname_filters});"
same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null "
tdSql.query(same_sql)
pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
if (platform.system().lower() == 'windows' and pre_data.dtype == 'int32'):
pre_data = np.array(pre_data, dtype = 'int64')
pre_sum = np.sum(pre_data)
tdSql.query(sum_sql)
tdSql.checkData(0,0,pre_sum)
def check_sum_status(self):
# check max function work status
tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
tablenames.append(table_name[0])
tdSql.query("desc stb1")
col_names = tdSql.queryResult
colnames = []
for col_name in col_names:
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
colnames.append(col_name[0])
for tablename in tablenames:
for colname in colnames:
self.check_sum_functions(tablename,colname)
# check max function for different vnode
for colname in colnames:
if colname.startswith("c"):
self.check_sum_distribute_diff_vnode(colname)
else:
# self.check_sum_distribute_diff_vnode(colname) # bug for tag
pass
def distribute_agg_query(self):
# basic filter
tdSql.query(" select sum(c1) from stb1 ")
tdSql.checkData(0,0,2592)
tdSql.query(" select sum(a) from (select sum(c1) a from stb1 partition by tbname) ")
tdSql.checkData(0,0,2592)
tdSql.query(" select sum(c1) from stb1 where t1=1")
tdSql.checkData(0,0,54)
tdSql.query("select sum(c1+c2) from stb1 where c1 =1 ")
tdSql.checkData(0,0,22224.000000000)
tdSql.query("select sum(c1) from stb1 where tbname=\"ct2\"")
tdSql.checkData(0,0,54)
tdSql.query("select sum(c1) from stb1 partition by tbname")
tdSql.checkRows(20)
tdSql.query("select sum(c1) from stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
tdSql.query("select sum(c1) from stb1 union all select sum(c1) from stb1 ")
tdSql.checkRows(2)
tdSql.checkData(0,0,2592)
tdSql.query("select sum(a) from (select sum(c1) a from stb1 union all select sum(c1) a from stb1)")
tdSql.checkRows(1)
tdSql.checkData(0,0,5184)
# join
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
tdSql.execute(" create table tb1 using st tags(1) ")
tdSql.execute(" create table tb2 using st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
tdSql.query("select sum(tb1.c1), sum(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,45)
tdSql.checkData(0,1,45.000000000)
# group by
tdSql.execute(" use testdb ")
# partition by tbname or partition by tag
tdSql.query("select sum(c1) from stb1 partition by tbname")
tdSql.checkRows(20)
# nest query for support max
tdSql.query("select abs(c2+2)+1 from (select sum(c1) c2 from stb1)")
tdSql.checkData(0,0,2595.000000000)
tdSql.query("select sum(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)")
tdSql.checkData(0,0,2960.000000000)
tdSql.query("select sum(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
tdSql.checkData(0,0,2960.000000000)
# mixup with other functions
tdSql.query("select max(c1),count(c1),last(c2,c3),sum(c1+c2) from stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)
tdSql.checkData(0,3,-999)
tdSql.checkData(0,4,28202310.000000000)
def run(self):
self.prepare_datas_of_distribute()
self.check_distribute_datas()
self.check_sum_status()
self.distribute_agg_query()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -11,8 +11,11 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import random
import string
import sys import sys
import taos import taos
from util.common import *
from util.log import * from util.log import *
from util.cases import * from util.cases import *
from util.sql import * from util.sql import *
@ -25,124 +28,159 @@ class TDTestCase:
tdSql.init(conn.cursor()) tdSql.init(conn.cursor())
self.rowNum = 10 self.rowNum = 10
self.tbnum = 20
self.ts = 1537146000000 self.ts = 1537146000000
self.binary_str = 'taosdata'
def run(self): self.nchar_str = '涛思数据'
def first_check_base(self):
tdSql.prepare() tdSql.prepare()
column_dict = {
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, 'col1': 'tinyint',
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') 'col2': 'smallint',
tdSql.execute("create table test1 using test tags('beijing')") 'col3': 'int',
tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1)) 'col4': 'bigint',
'col5': 'tinyint unsigned',
# first verifacation 'col6': 'smallint unsigned',
# bug TD-15957 'col7': 'int unsigned',
tdSql.query("select first(*) from test1") 'col8': 'bigint unsigned',
tdSql.checkRows(1) 'col9': 'float',
tdSql.checkData(0, 1, None) 'col10': 'double',
'col11': 'bool',
tdSql.query("select first(col1) from test1") 'col12': 'binary(20)',
tdSql.checkRows(0) 'col13': 'nchar(20)'
}
tdSql.query("select first(col2) from test1") tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
tdSql.checkRows(0) col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
tdSql.execute("create table stb_1 using stb tags('beijing')")
tdSql.query("select first(col3) from test1") tdSql.execute("insert into stb_1(ts) values(%d)" % (self.ts - 1))
tdSql.checkRows(0) column_list = ['col1','col2','col3','col4','col5','col6','col7','col8','col9','col10','col11','col12','col13']
for i in ['stb_1','db.stb_1','stb_1','db.stb_1']:
tdSql.query("select first(col4) from test1") tdSql.query(f"select first(*) from {i}")
tdSql.checkRows(0) tdSql.checkRows(1)
tdSql.checkData(0, 1, None)
tdSql.query("select first(col11) from test1") #!bug TD-16561
tdSql.checkRows(0) # for i in ['stb','db.stb']:
# tdSql.query(f"select first(*) from {i}")
tdSql.query("select first(col12) from test1") # tdSql.checkRows(1)
tdSql.checkRows(0) # tdSql.checkData(0, 1, None)
for i in column_list:
tdSql.query("select first(col13) from test1") for j in ['stb_1','db.stb_1','stb_1','db.stb_1']:
tdSql.checkRows(0) tdSql.query(f"select first({i}) from {j}")
tdSql.checkRows(0)
tdSql.query("select first(col14) from test1")
tdSql.checkRows(0)
tdSql.query("select first(col5) from test1")
tdSql.checkRows(0)
tdSql.query("select first(col6) from test1")
tdSql.checkRows(0)
tdSql.query("select first(col7) from test1")
tdSql.checkRows(0)
tdSql.query("select first(col8) from test1")
tdSql.checkRows(0)
tdSql.query("select first(col9) from test1")
tdSql.checkRows(0)
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
for k, v in column_dict.items():
tdSql.query("select first(*) from test1") for j in ['stb_1', 'db.stb_1', 'stb', 'db.stb']:
tdSql.checkRows(1) tdSql.query(f"select first({k}) from {j}")
tdSql.checkData(0, 1, 1) tdSql.checkRows(1)
# tinyint,smallint,int,bigint,tinyint unsigned,smallint unsigned,int unsigned,bigint unsigned
tdSql.query("select first(col1) from test1") if v == 'tinyint' or v == 'smallint' or v == 'int' or v == 'bigint' or v == 'tinyint unsigned' or v == 'smallint unsigned'\
tdSql.checkRows(1) or v == 'int unsigned' or v == 'bigint unsigned':
tdSql.checkData(0, 0, 1) tdSql.checkData(0, 0, 1)
# float,double
tdSql.query("select first(col2) from test1") elif v == 'float' or v == 'double':
tdSql.checkRows(1) tdSql.checkData(0, 0, 0.1)
tdSql.checkData(0, 0, 1) # bool
elif v == 'bool':
tdSql.query("select first(col3) from test1") tdSql.checkData(0, 0, False)
tdSql.checkRows(1) # binary
tdSql.checkData(0, 0, 1) elif 'binary' in v:
tdSql.checkData(0, 0, f'{self.binary_str}1')
tdSql.query("select first(col4) from test1") # nchar
tdSql.checkRows(1) elif 'nchar' in v:
tdSql.checkData(0, 0, 1) tdSql.checkData(0, 0, f'{self.nchar_str}1')
#!bug TD-16569
tdSql.query("select first(col11) from test1") tdSql.query("select first(*),last(*) from stb where ts < 23 interval(1s)")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 1)
tdSql.query("select first(col12) from test1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 1)
tdSql.query("select first(col13) from test1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 1)
tdSql.query("select first(col14) from test1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 1)
tdSql.query("select first(col5) from test1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 0.1)
tdSql.query("select first(col6) from test1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 0.1)
tdSql.query("select first(col7) from test1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, False)
tdSql.query("select first(col8) from test1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'taosdata1')
tdSql.query("select first(col9) from test1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, '涛思数据1')
tdSql.query("select first(*),last(*) from test1 where ts < 23 interval(1s)")
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.execute('drop database db')
def first_check_stb_distribute(self):
# prepare data for vgroup 4
dbname = tdCom.getLongName(10, "letters")
stbname = tdCom.getLongName(5, "letters")
child_table_num = 20
vgroup = 2
column_dict = {
'col1': 'tinyint',
'col2': 'smallint',
'col3': 'int',
'col4': 'bigint',
'col5': 'tinyint unsigned',
'col6': 'smallint unsigned',
'col7': 'int unsigned',
'col8': 'bigint unsigned',
'col9': 'float',
'col10': 'double',
'col11': 'bool',
'col12': 'binary(20)',
'col13': 'nchar(20)'
}
tdSql.execute(f"create database if not exists {dbname} vgroups {vgroup}")
tdSql.execute(f'use {dbname}')
# build 20 child tables,every table insert 10 rows
tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
for i in range(child_table_num):
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags('beijing')")
tdSql.execute(f"insert into {stbname}_{i}(ts) values(%d)" % (self.ts - 1-i))
#!bug TD-16561
# for i in [f'{stbname}', f'{dbname}.{stbname}']:
# tdSql.query(f"select first(*) from {i}")
# tdSql.checkRows(1)
# tdSql.checkData(0, 1, None)
tdSql.query('show tables')
vgroup_list = []
for i in range(len(tdSql.queryResult)):
vgroup_list.append(tdSql.queryResult[i][6])
vgroup_list_set = set(vgroup_list)
# print(vgroup_list_set)
# print(vgroup_list)
for i in vgroup_list_set:
vgroups_num = vgroup_list.count(i)
if vgroups_num >=2:
tdLog.info(f'This scene with {vgroups_num} vgroups is ok!')
continue
else:
tdLog.exit('This scene does not meet the requirements with {vgroups_num} vgroup!\n')
for i in range(child_table_num):
for j in range(self.rowNum):
tdSql.execute(f"insert into {stbname}_{i} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + j + i, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 0.1, j + 0.1, j % 2, j + 1, j + 1))
for k, v in column_dict.items():
for j in [f'{stbname}_{i}', f'{dbname}.{stbname}_{i}', f'{stbname}', f'{dbname}.{stbname}']:
tdSql.query(f"select first({k}) from {j}")
tdSql.checkRows(1)
# tinyint,smallint,int,bigint,tinyint unsigned,smallint unsigned,int unsigned,bigint unsigned
if v == 'tinyint' or v == 'smallint' or v == 'int' or v == 'bigint' or v == 'tinyint unsigned' or v == 'smallint unsigned'\
or v == 'int unsigned' or v == 'bigint unsigned':
tdSql.checkData(0, 0, 1)
# float,double
elif v == 'float' or v == 'double':
tdSql.checkData(0, 0, 0.1)
# bool
elif v == 'bool':
tdSql.checkData(0, 0, False)
# binary
elif 'binary' in v:
tdSql.checkData(0, 0, f'{self.binary_str}1')
# nchar
elif 'nchar' in v:
tdSql.checkData(0, 0, f'{self.nchar_str}1')
#!bug TD-16569
# tdSql.query(f"select first(*),last(*) from {stbname} where ts < 23 interval(1s)")
# tdSql.checkRows(0)
tdSql.execute(f'drop database {dbname}')
pass
def run(self):
self.first_check_base()
self.first_check_stb_distribute()
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -44,8 +44,7 @@ class TDTestCase:
buildPath = root[:len(root) - len("/build/bin")] buildPath = root[:len(root) - len("/build/bin")]
break break
return buildPath return buildPath
def histogram_check_base(self):
def run(self):
print("running {}".format(__file__)) print("running {}".format(__file__))
tdSql.execute("drop database if exists db") tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db") tdSql.execute("create database if not exists db")
@ -3183,6 +3182,72 @@ class TDTestCase:
tdSql.execute('drop database db') tdSql.execute('drop database db')
def histogram_check_distribute(self):
dbname = "db"
stbname = "stb"
row_num = 10
child_table_num = 20
vgroups = 2
user_input_json = "[1,3,5,7]"
ts = 1537146000000
binary_str = 'taosdata'
nchar_str = '涛思数据'
column_dict = {
'ts' : 'timestamp',
'col1' : 'tinyint',
'col2' : 'smallint',
'col3' : 'int',
'col4' : 'bigint',
'col5' : 'tinyint unsigned',
'col6' : 'smallint unsigned',
'col7' : 'int unsigned',
'col8' : 'bigint unsigned',
'col9' : 'float',
'col10': 'double',
'col11': 'bool',
'col12': 'binary(20)',
'col13': 'nchar(20)'
}
tdSql.execute(f"create database if not exists {dbname} vgroups {vgroups}")
tdSql.execute(f'use {dbname}')
# build 20 child tables,every table insert 10 rows
tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
for i in range(child_table_num):
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags('beijing')")
tdSql.query('show tables')
vgroup_list = []
for i in range(len(tdSql.queryResult)):
vgroup_list.append(tdSql.queryResult[i][6])
vgroup_list_set = set(vgroup_list)
for i in vgroup_list_set:
vgroups_num = vgroup_list.count(i)
if vgroups_num >=2:
tdLog.info(f'This scene with {vgroups_num} vgroups is ok!')
continue
else:
tdLog.exit(f'This scene does not meet the requirements with {vgroups_num} vgroup!\n')
for i in range(child_table_num):
for j in range(row_num):
tdSql.execute(f"insert into {stbname}_{i} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{binary_str}%d', '{nchar_str}%d')"
% (ts + j + i, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 0.1, j + 0.1, j % 2, j + 1, j + 1))
# user_input
for k,v in column_dict.items():
if v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or v.lower() =='float' or v.lower() =='double'\
or v.lower() =='tinyint unsigned' or v.lower() =='smallint unsigned' or v.lower() =='int unsigned' or v.lower() =='bigint unsigned':
tdSql.query(f'select histogram({k}, "user_input", "{user_input_json}", 0) from {stbname}')
tdSql.checkRows(len(user_input_json[1:-1].split(','))-1)
elif 'binary' in v.lower() or 'nchar' in v.lower() or 'bool' == v.lower():
tdSql.error(f'select histogram({k}, "user_input", "{user_input_json}", 0) from {stbname}')
tdSql.execute(f'drop database {dbname}')
def run(self):
self.histogram_check_base()
self.histogram_check_distribute()
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)

View File

@ -214,6 +214,79 @@ class TDTestCase:
for i in range(4): for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
{ i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
def __create_stable(self,stbname='stb',column_dict={'ts':'timestamp','col1': 'tinyint','col2': 'smallint','col3': 'int',
'col4': 'bigint','col5': 'tinyint unsigned','col6': 'smallint unsigned','col7': 'int unsigned',
'col8': 'bigint unsigned','col9': 'float','col10': 'double','col11': 'bool','col12': 'binary(20)','col13': 'nchar(20)'},
tag_dict={'ts_tag':'timestamp','t1': 'tinyint','t2': 'smallint','t3': 'int',
't4': 'bigint','t5': 'tinyint unsigned','t6': 'smallint unsigned','t7': 'int unsigned',
't8': 'bigint unsigned','t9': 'float','t10': 'double','t11': 'bool','t12': 'binary(20)','t13': 'nchar(20)'}):
column_sql = ''
tag_sql = ''
for k,v in column_dict.items():
column_sql += f"{k} {v},"
for k,v in tag_dict.items():
tag_sql += f"{k} {v},"
tdSql.execute(f'create table if not exists {stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})')
def __insert_data(self):
pass
def __hyperloglog_check_distribute(self):
dbname = "dbtest"
stbname = "stb"
childtable_num = 20
vgroups_num = 4
row_num = 10
ts = 1537146000000
binary_str = 'taosdata'
nchar_str = '涛思数据'
column_dict = {
'ts':'timestamp',
'col1': 'tinyint',
'col2': 'smallint',
'col3': 'int',
'col4': 'bigint',
'col5': 'tinyint unsigned',
'col6': 'smallint unsigned',
'col7': 'int unsigned',
'col8': 'bigint unsigned',
'col9': 'float',
'col10': 'double',
'col11': 'bool',
'col12': 'binary(20)',
'col13': 'nchar(20)'
}
tag_dict = {
'loc':'nchar(20)'
}
tdSql.execute(f"create database if not exists {dbname} vgroups {vgroups_num}")
tdSql.execute(f'use {dbname}')
self.__create_stable(stbname,column_dict,tag_dict)
for i in range(childtable_num):
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags('beijing')")
tdSql.query('show tables')
vgroup_list = []
for i in range(len(tdSql.queryResult)):
vgroup_list.append(tdSql.queryResult[i][6])
vgroup_list_set = set(vgroup_list)
for i in vgroup_list_set:
vgroups_num = vgroup_list.count(i)
if vgroups_num >=2:
tdLog.info(f'This scene with {vgroups_num} vgroups is ok!')
continue
else:
tdLog.exit('This scene does not meet the requirements with {vgroups_num} vgroup!\n')
for i in range(row_num):
tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{binary_str}%d', '{nchar_str}%d')"
% (ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
for k in column_dict.keys():
tdSql.query(f"select hyperloglog({k}) from {stbname}")
tdSql.checkRows(1)
tdSql.query(f"select hyperloglog({k}) from {stbname} group by {k}")
tdSql.execute(f'drop database {dbname}')
def __insert_data(self, rows): def __insert_data(self, rows):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
@ -311,6 +384,10 @@ class TDTestCase:
tdLog.printNoPrefix("==========step4:after wal, all check again ") tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test() self.all_test()
tdLog.printNoPrefix("==========step5: distribute scene check")
self.__hyperloglog_check_distribute()
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success(f"{__file__} successfully executed") tdLog.success(f"{__file__} successfully executed")

View File

@ -1,6 +1,9 @@
import random
import string
from util.log import * from util.log import *
from util.cases import * from util.cases import *
from util.sql import * from util.sql import *
from util.common import *
import numpy as np import numpy as np
@ -10,416 +13,258 @@ class TDTestCase:
tdSql.init(conn.cursor()) tdSql.init(conn.cursor())
self.rowNum = 10 self.rowNum = 10
self.tbnum = 20
self.ts = 1537146000000 self.ts = 1537146000000
self.binary_str = 'taosdata'
def run(self): self.nchar_str = '涛思数据'
def set_create_normaltable_sql(self, ntbname, column_dict):
column_sql = ''
for k, v in column_dict.items():
column_sql += f"{k} {v},"
create_ntb_sql = f'create table {ntbname} (ts timestamp,{column_sql[:-1]})'
return create_ntb_sql
def set_create_stable_sql(self,stbname,column_dict,tag_dict):
column_sql = ''
tag_sql = ''
for k,v in column_dict.items():
column_sql += f"{k} {v},"
for k,v in tag_dict.items():
tag_sql += f"{k} {v},"
create_stb_sql = f'create table {stbname} (ts timestamp,{column_sql[:-1]}) tags({tag_sql[:-1]})'
return create_stb_sql
def last_check_stb_tb_base(self):
tdSql.prepare() tdSql.prepare()
stbname = tdCom.getLongName(5, "letters")
column_dict = {
'col1': 'tinyint',
'col2': 'smallint',
'col3': 'int',
'col4': 'bigint',
'col5': 'tinyint unsigned',
'col6': 'smallint unsigned',
'col7': 'int unsigned',
'col8': 'bigint unsigned',
'col9': 'float',
'col10': 'double',
'col11': 'bool',
'col12': 'binary(20)',
'col13': 'nchar(20)'
}
tag_dict = {
'loc':'nchar(20)'
}
tdSql.execute(self.set_create_stable_sql(stbname,column_dict,tag_dict))
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, tdSql.execute(f"create table {stbname}_1 using {stbname} tags('beijing')")
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute(f"insert into {stbname}_1(ts) values(%d)" % (self.ts - 1))
tdSql.execute("create table stb_1 using stb tags('beijing')")
tdSql.execute("insert into stb_1(ts) values(%d)" % (self.ts - 1))
# last verifacation for i in [f'{stbname}_1', f'db.{stbname}_1']:
tdSql.query("select last(*) from stb_1") tdSql.query(f"select last(*) from {i}")
tdSql.checkRows(1)
tdSql.checkData(0, 1, None)
#!bug TD-16561
# for i in ['stb','db.stb','stb','db.stb']:
# tdSql.query(f"select last(*) from {i}")
# tdSql.checkRows(1)
# tdSql.checkData(0, 1, None)
for i in column_dict.keys():
for j in [f'{stbname}_1', f'db.{stbname}_1', f'{stbname}', f'db.{stbname}']:
tdSql.query(f"select last({i}) from {j}")
tdSql.checkRows(0)
tdSql.query(f"select last({list(column_dict.keys())[0]}) from {stbname}_1 group by {list(column_dict.keys())[-1]}")
tdSql.checkRows(1) tdSql.checkRows(1)
tdSql.checkData(0, 1, None)
tdSql.query("select last(*) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 1, None)
tdSql.query("select last(col1) from stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col1) from db.stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col2) from stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col2) from db.stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col3) from stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col3) from db.stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col4) from stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col4) from db.stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col11) from stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col11) from db.stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col12) from stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col12) from db.stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col13) from stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col13) from db.stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col14) from stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col14) from db.stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col5) from stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col5) from db.stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col6) from stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col6) from db.stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col7) from stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col7) from db.stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col8) from stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col8) from db.stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col9) from stb_1")
tdSql.checkRows(0)
tdSql.query("select last(col9) from db.stb_1")
tdSql.checkRows(0)
tdSql.query("select count(col1) from stb_1 group by col7")
tdSql.checkRows(1)
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" tdSql.execute(f"insert into {stbname}_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
for i in [f'{stbname}_1', f'db.{stbname}_1', f'{stbname}', f'db.{stbname}']:
tdSql.query(f"select last(*) from {i}")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 10)
for k, v in column_dict.items():
for j in [f'{stbname}_1', f'db.{stbname}_1', f'{stbname}', f'db.{stbname}']:
tdSql.query(f"select last({k}) from {j}")
tdSql.checkRows(1)
# tinyint,smallint,int,bigint,tinyint unsigned,smallint unsigned,int unsigned,bigint unsigned
if v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned'\
or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned':
tdSql.checkData(0, 0, 10)
# float,double
elif v.lower() == 'float' or v.lower() == 'double':
tdSql.checkData(0, 0, 9.1)
# bool
elif v.lower() == 'bool':
tdSql.checkData(0, 0, True)
# binary
elif 'binary' in v.lower():
tdSql.checkData(0, 0, f'{self.binary_str}{self.rowNum}')
# nchar
elif 'nchar' in v.lower():
tdSql.checkData(0, 0, f'{self.nchar_str}{self.rowNum}')
for i in [f'{stbname}_1', f'db.{stbname}_1', f'{stbname}', f'db.{stbname}']:
tdSql.query(f"select last({list(column_dict.keys())[0]},{list(column_dict.keys())[1]},{list(column_dict.keys())[2]}) from {stbname}_1")
tdSql.checkData(0, 2, 10)
tdSql.query("select last(*) from stb_1") tdSql.error(f"select {list(column_dict.keys())[0]} from {stbname} where last({list(column_dict.keys())[12]})='涛思数据10'")
tdSql.checkRows(1) tdSql.error(f"select {list(column_dict.keys())[0]} from {stbname}_1 where last({list(column_dict.keys())[12]})='涛思数据10'")
tdSql.checkData(0, 1, 10) tdSql.execute('drop database db')
tdSql.query("select last(*) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 10)
tdSql.query("select last(col1) from stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col1) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col2) from stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col2) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col3) from stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col3) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col4) from stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col4) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col11) from stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col11) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col12) from stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col12) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col13) from stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col13) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col14) from stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col14) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col5) from stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 9.1)
tdSql.query("select last(col5) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 9.1)
tdSql.query("select last(col6) from stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 9.1)
tdSql.query("select last(col6) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 9.1)
tdSql.query("select last(col7) from stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, True)
tdSql.query("select last(col7) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, True)
tdSql.query("select last(col8) from stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'taosdata10')
tdSql.query("select last(col8) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'taosdata10')
tdSql.query("select last(col9) from stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, '涛思数据10')
tdSql.query("select last(col9) from db.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, '涛思数据10')
tdSql.query("select last(col1,col2,col3) from stb_1")
tdSql.checkData(0,2,10)
tdSql.query("select last(*) from stb") def last_check_ntb_base(self):
tdSql.checkRows(1) tdSql.prepare()
tdSql.checkData(0, 1, 10) ntbname = tdCom.getLongName(5, "letters")
tdSql.query("select last(*) from db.stb") column_dict = {
tdSql.checkRows(1) 'col1': 'tinyint',
tdSql.checkData(0, 1, 10) 'col2': 'smallint',
tdSql.query("select last(col1) from stb") 'col3': 'int',
tdSql.checkRows(1) 'col4': 'bigint',
tdSql.checkData(0, 0, 10) 'col5': 'tinyint unsigned',
tdSql.query("select last(col1) from db.stb") 'col6': 'smallint unsigned',
tdSql.checkRows(1) 'col7': 'int unsigned',
tdSql.checkData(0, 0, 10) 'col8': 'bigint unsigned',
tdSql.query("select last(col2) from stb") 'col9': 'float',
tdSql.checkRows(1) 'col10': 'double',
tdSql.checkData(0, 0, 10) 'col11': 'bool',
tdSql.query("select last(col2) from db.stb") 'col12': 'binary(20)',
tdSql.checkRows(1) 'col13': 'nchar(20)'
tdSql.checkData(0, 0, 10) }
tdSql.query("select last(col3) from stb") create_ntb_sql = self.set_create_normaltable_sql(ntbname, column_dict)
tdSql.checkRows(1) tdSql.execute(create_ntb_sql)
tdSql.checkData(0, 0, 10) tdSql.execute(f"insert into {ntbname}(ts) values(%d)" % (self.ts - 1))
tdSql.query("select last(col3) from db.stb") tdSql.query(f"select last(*) from {ntbname}")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col4) from stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col4) from db.stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col11) from stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col11) from db.stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col12) from stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col12) from db.stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col13) from stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col13) from db.stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col14) from stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col14) from db.stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col5) from stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 9.1)
tdSql.query("select last(col5) from db.stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 9.1)
tdSql.query("select last(col6) from stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 9.1)
tdSql.query("select last(col6) from db.stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 9.1)
tdSql.query("select last(col7) from stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, True)
tdSql.query("select last(col7) from db.stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, True)
tdSql.query("select last(col8) from stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'taosdata10')
tdSql.query("select last(col8) from db.stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'taosdata10')
tdSql.query("select last(col9) from stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, '涛思数据10')
tdSql.query("select last(col9) from db.stb")
tdSql.checkRows(1)
tdSql.checkData(0, 0, '涛思数据10')
tdSql.query("select last(col1,col2,col3) from stb")
tdSql.checkData(0,2,10)
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
tdSql.execute("insert into ntb(ts) values(%d)" % (self.ts - 1))
tdSql.query("select last(*) from ntb")
tdSql.checkRows(1) tdSql.checkRows(1)
tdSql.checkData(0, 1, None) tdSql.checkData(0, 1, None)
tdSql.query("select last(*) from db.ntb") tdSql.query(f"select last(*) from db.{ntbname}")
tdSql.checkRows(1) tdSql.checkRows(1)
tdSql.checkData(0, 1, None) tdSql.checkData(0, 1, None)
tdSql.query("select last(col1) from ntb") for i in column_dict.keys():
tdSql.checkRows(0) for j in [f'{ntbname}', f'db.{ntbname}']:
tdSql.query("select last(col1) from db.ntb") tdSql.query(f"select last({i}) from {j}")
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.query("select last(col2) from ntb")
tdSql.checkRows(0)
tdSql.query("select last(col2) from db.ntb")
tdSql.checkRows(0)
tdSql.query("select last(col3) from ntb")
tdSql.checkRows(0)
tdSql.query("select last(col3) from db.ntb")
tdSql.checkRows(0)
tdSql.query("select last(col4) from ntb")
tdSql.checkRows(0)
tdSql.query("select last(col4) from db.ntb")
tdSql.checkRows(0)
tdSql.query("select last(col11) from ntb")
tdSql.checkRows(0)
tdSql.query("select last(col11) from db.ntb")
tdSql.checkRows(0)
tdSql.query("select last(col12) from ntb")
tdSql.checkRows(0)
tdSql.query("select last(col12) from db.ntb")
tdSql.checkRows(0)
tdSql.query("select last(col13) from ntb")
tdSql.checkRows(0)
tdSql.query("select last(col13) from db.ntb")
tdSql.checkRows(0)
tdSql.query("select last(col14) from ntb")
tdSql.checkRows(0)
tdSql.query("select last(col14) from db.ntb")
tdSql.checkRows(0)
tdSql.query("select last(col5) from ntb")
tdSql.checkRows(0)
tdSql.query("select last(col5) from db.ntb")
tdSql.checkRows(0)
tdSql.query("select last(col6) from ntb")
tdSql.checkRows(0)
tdSql.query("select last(col6) from db.ntb")
tdSql.checkRows(0)
tdSql.query("select last(col7) from ntb")
tdSql.checkRows(0)
tdSql.query("select last(col7) from db.ntb")
tdSql.checkRows(0)
tdSql.query("select last(col8) from ntb")
tdSql.checkRows(0)
tdSql.query("select last(col8) from db.ntb")
tdSql.checkRows(0)
tdSql.query("select last(col9) from ntb")
tdSql.checkRows(0)
tdSql.query("select last(col9) from db.ntb")
tdSql.checkRows(0)
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" tdSql.execute(f"insert into {ntbname} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
tdSql.query(f"select last(*) from {ntbname}")
tdSql.query("select last(*) from ntb")
tdSql.checkRows(1) tdSql.checkRows(1)
tdSql.checkData(0, 1, 10) tdSql.checkData(0, 1, 10)
tdSql.query("select last(*) from db.ntb") tdSql.query(f"select last(*) from db.{ntbname}")
tdSql.checkRows(1) tdSql.checkRows(1)
tdSql.checkData(0, 1, 10) tdSql.checkData(0, 1, 10)
tdSql.query("select last(col1) from ntb") for k, v in column_dict.items():
tdSql.checkRows(1) for j in [f'{ntbname}', f'db.{ntbname}']:
tdSql.checkData(0, 0, 10) tdSql.query(f"select last({k}) from {j}")
tdSql.query("select last(col1) from db.ntb") tdSql.checkRows(1)
tdSql.checkRows(1) # tinyint,smallint,int,bigint,tinyint unsigned,smallint unsigned,int unsigned,bigint unsigned
tdSql.checkData(0, 0, 10) if v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned'\
tdSql.query("select last(col2) from ntb") or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned':
tdSql.checkRows(1) tdSql.checkData(0, 0, 10)
tdSql.checkData(0, 0, 10) # float,double
tdSql.query("select last(col2) from db.ntb") elif v.lower() == 'float' or v.lower() == 'double':
tdSql.checkRows(1) tdSql.checkData(0, 0, 9.1)
tdSql.checkData(0, 0, 10) # bool
tdSql.query("select last(col3) from ntb") elif v.lower() == 'bool':
tdSql.checkRows(1) tdSql.checkData(0, 0, True)
tdSql.checkData(0, 0, 10) # binary
tdSql.query("select last(col3) from db.ntb") elif 'binary' in v.lower():
tdSql.checkRows(1) tdSql.checkData(0, 0, f'{self.binary_str}{self.rowNum}')
tdSql.checkData(0, 0, 10) # nchar
tdSql.query("select last(col4) from ntb") elif 'nchar' in v.lower():
tdSql.checkRows(1) tdSql.checkData(0, 0, f'{self.nchar_str}{self.rowNum}')
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col4) from db.ntb") tdSql.error(
tdSql.checkRows(1) f"select {list(column_dict.keys())[0]} from {ntbname} where last({list(column_dict.keys())[9]})='涛思数据10'")
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col11) from ntb") def last_check_stb_distribute(self):
tdSql.checkRows(1) # prepare data for vgroup 4
tdSql.checkData(0, 0, 10) dbname = tdCom.getLongName(10, "letters")
tdSql.query("select last(col11) from db.ntb") stbname = tdCom.getLongName(5, "letters")
tdSql.checkRows(1) vgroup_num = 4
tdSql.checkData(0, 0, 10) column_dict = {
tdSql.query("select last(col12) from ntb") 'col1': 'tinyint',
tdSql.checkRows(1) 'col2': 'smallint',
tdSql.checkData(0, 0, 10) 'col3': 'int',
tdSql.query("select last(col12) from db.ntb") 'col4': 'bigint',
tdSql.checkRows(1) 'col5': 'tinyint unsigned',
tdSql.checkData(0, 0, 10) 'col6': 'smallint unsigned',
tdSql.query("select last(col13) from ntb") 'col7': 'int unsigned',
tdSql.checkRows(1) 'col8': 'bigint unsigned',
tdSql.checkData(0, 0, 10) 'col9': 'float',
tdSql.query("select last(col13) from db.ntb") 'col10': 'double',
tdSql.checkRows(1) 'col11': 'bool',
tdSql.checkData(0, 0, 10) 'col12': 'binary(20)',
tdSql.query("select last(col14) from ntb") 'col13': 'nchar(20)'
tdSql.checkRows(1) }
tdSql.checkData(0, 0, 10)
tdSql.query("select last(col14) from db.ntb") tdSql.execute(
tdSql.checkRows(1) f"create database if not exists {dbname} vgroups {vgroup_num}")
tdSql.checkData(0, 0, 10) tdSql.execute(f'use {dbname}')
tdSql.query("select last(col5) from ntb")
tdSql.checkRows(1) # build 20 child tables,every table insert 10 rows
tdSql.checkData(0, 0, 9.1) tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
tdSql.query("select last(col5) from db.ntb") col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
tdSql.checkRows(1) for i in range(self.tbnum):
tdSql.checkData(0, 0, 9.1) tdSql.execute(
tdSql.query("select last(col6) from ntb") f"create table {stbname}_{i} using {stbname} tags('beijing')")
tdSql.checkRows(1) tdSql.execute(
tdSql.checkData(0, 0, 9.1) f"insert into {stbname}_{i}(ts) values(%d)" % (self.ts - 1-i))
tdSql.query("select last(col6) from db.ntb") # for i in [f'{stbname}', f'{dbname}.{stbname}']:
tdSql.checkRows(1) # tdSql.query(f"select last(*) from {i}")
tdSql.checkData(0, 0, 9.1) # tdSql.checkRows(1)
tdSql.query("select last(col7) from ntb") # tdSql.checkData(0, 1, None)
tdSql.checkRows(1) tdSql.query('show tables')
tdSql.checkData(0, 0, True) vgroup_list = []
tdSql.query("select last(col7) from db.ntb") for i in range(len(tdSql.queryResult)):
tdSql.checkRows(1) vgroup_list.append(tdSql.queryResult[i][6])
tdSql.checkData(0, 0, True) vgroup_list_set = set(vgroup_list)
tdSql.query("select last(col8) from ntb") for i in vgroup_list_set:
tdSql.checkRows(1) vgroups_num = vgroup_list.count(i)
tdSql.checkData(0, 0, 'taosdata10') if vgroups_num >= 2:
tdSql.query("select last(col8) from db.ntb") tdLog.info(f'This scene with {vgroups_num} vgroups is ok!')
tdSql.checkRows(1) continue
tdSql.checkData(0, 0, 'taosdata10') else:
tdSql.query("select last(col9) from ntb") tdLog.exit(
tdSql.checkRows(1) 'This scene does not meet the requirements with {vgroups_num} vgroup!\n')
tdSql.checkData(0, 0, '涛思数据10')
tdSql.query("select last(col9) from db.ntb") for i in range(self.tbnum):
tdSql.checkRows(1) for j in range(self.rowNum):
tdSql.checkData(0, 0, '涛思数据10') tdSql.execute(f"insert into {stbname}_{i} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
tdSql.query("select last(col1,col2,col3) from ntb") % (self.ts + j + i, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 0.1, j + 0.1, j % 2, j + 1, j + 1))
tdSql.checkData(0,2,10) for i in [f'{stbname}', f'{dbname}.{stbname}']:
tdSql.query(f"select last(*) from {i}")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 10)
for k, v in column_dict.items():
for j in [f'{stbname}', f'{dbname}.{stbname}']:
tdSql.query(f"select last({k}) from {j}")
tdSql.checkRows(1)
# tinyint,smallint,int,bigint,tinyint unsigned,smallint unsigned,int unsigned,bigint unsigned
if v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned'\
or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned':
tdSql.checkData(0, 0, 10)
# float,double
elif v.lower() == 'float' or v.lower() == 'double':
tdSql.checkData(0, 0, 9.1)
# bool
elif v.lower() == 'bool':
tdSql.checkData(0, 0, True)
# binary
elif 'binary' in v.lower():
tdSql.checkData(0, 0, f'{self.binary_str}{self.rowNum}')
# nchar
elif 'nchar' in v.lower():
tdSql.checkData(0, 0, f'{self.nchar_str}{self.rowNum}')
tdSql.execute(f'drop database {dbname}')
def run(self):
self.last_check_stb_tb_base()
self.last_check_ntb_base()
self.last_check_stb_distribute()
tdSql.error("select col1 from stb where last(col9)='涛思数据10'")
tdSql.error("select col1 from ntb where last(col9)='涛思数据10'")
tdSql.error("select col1 from stb_1 where last(col9)='涛思数据10'")
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())

View File

@ -5,198 +5,213 @@ import numpy as np
class TDTestCase: class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql): def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__) tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor()) tdSql.init(conn.cursor())
self.rowNum = 10 self.rowNum = 10
self.ts = 1537146000000 self.ts = 1537146000000
self.binary_str = 'taosdata'
def prepare_data(self): self.nchar_str = '涛思数据'
def max_check_stb_and_tb_base(self):
pass
def run(self):
tdSql.prepare() tdSql.prepare()
intData = [] intData = []
floatData = [] floatData = []
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
tdSql.execute("create table stb_1 using stb tags('beijing')") tdSql.execute("create table stb_1 using stb tags('beijing')")
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
intData.append(i + 1) intData.append(i + 1)
floatData.append(i + 0.1) floatData.append(i + 0.1)
for i in range(self.rowNum): for i in ['ts','col11','col12','col13']:
tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" for j in ['db.stb','stb','db.stb_1','stb_1']:
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) tdSql.error(f'select max({i} from {j} )')
intData.append(i + 1)
floatData.append(i + 0.1)
# max verifacation for i in range(1,11):
tdSql.error("select max(ts) from stb_1") for j in ['db.stb','stb','db.stb_1','stb_1']:
tdSql.error("select max(ts) from db.stb_1") tdSql.query(f"select max(col{i}) from {j}")
tdSql.error("select max(col7) from stb_1") if i<9:
tdSql.error("select max(col7) from db.stb_1") tdSql.checkData(0, 0, np.max(intData))
tdSql.error("select max(col8) from stb_1") elif i>=9:
tdSql.error("select max(col8) from db.stb_1") tdSql.checkData(0, 0, np.max(floatData))
tdSql.error("select max(col9) from stb_1")
tdSql.error("select max(col9) from db.stb_1")
tdSql.query("select max(col1) from stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col1) from db.stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col2) from stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col2) from db.stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col3) from stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col3) from db.stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col4) from stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col4) from db.stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col11) from stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col11) from db.stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col12) from stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col12) from db.stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col13) from stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col13) from db.stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col14) from stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col14) from db.stb_1")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col5) from stb_1")
tdSql.checkData(0, 0, np.max(floatData))
tdSql.query("select max(col5) from db.stb_1")
tdSql.checkData(0, 0, np.max(floatData))
tdSql.query("select max(col6) from stb_1")
tdSql.checkData(0, 0, np.max(floatData))
tdSql.query("select max(col6) from db.stb_1")
tdSql.checkData(0, 0, np.max(floatData))
tdSql.query("select max(col1) from stb_1 where col2<=5") tdSql.query("select max(col1) from stb_1 where col2<=5")
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.error("select max(ts) from stb")
tdSql.error("select max(ts) from db.stb")
tdSql.error("select max(col7) from stb")
tdSql.error("select max(col7) from db.stb")
tdSql.error("select max(col8) from stb")
tdSql.error("select max(col8) from db.stb")
tdSql.error("select max(col9) from stb")
tdSql.error("select max(col9) from db.stb")
tdSql.query("select max(col1) from stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col1) from db.stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col2) from stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col2) from db.stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col3) from stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col3) from db.stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col4) from stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col4) from db.stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col11) from stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col11) from db.stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col12) from stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col12) from db.stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col13) from stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col13) from db.stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col14) from stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col14) from db.stb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col5) from stb")
tdSql.checkData(0, 0, np.max(floatData))
tdSql.query("select max(col5) from db.stb")
tdSql.checkData(0, 0, np.max(floatData))
tdSql.query("select max(col6) from stb")
tdSql.checkData(0, 0, np.max(floatData))
tdSql.query("select max(col6) from db.stb")
tdSql.checkData(0, 0, np.max(floatData))
tdSql.query("select max(col1) from stb where col2<=5") tdSql.query("select max(col1) from stb where col2<=5")
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.execute('drop database db')
def max_check_ntb_base(self):
tdSql.error("select max(ts) from ntb") tdSql.prepare()
tdSql.error("select max(ts) from db.ntb") intData = []
tdSql.error("select max(col7) from ntb") floatData = []
tdSql.error("select max(col7) from db.ntb") tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
tdSql.error("select max(col8) from ntb") col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20))''')
tdSql.error("select max(col8) from db.ntb") for i in range(self.rowNum):
tdSql.error("select max(col9) from ntb") tdSql.execute(f"insert into ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
tdSql.error("select max(col9) from db.ntb") % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
intData.append(i + 1)
tdSql.query("select max(col1) from ntb") floatData.append(i + 0.1)
tdSql.checkData(0, 0, np.max(intData)) for i in ['ts','col11','col12','col13']:
tdSql.query("select max(col1) from db.ntb") for j in ['db.ntb','ntb']:
tdSql.checkData(0, 0, np.max(intData)) tdSql.error(f'select max({i} from {j} )')
tdSql.query("select max(col2) from ntb") for i in range(1,11):
tdSql.checkData(0, 0, np.max(intData)) for j in ['db.ntb','ntb']:
tdSql.query("select max(col2) from db.ntb") tdSql.query(f"select max(col{i}) from {j}")
tdSql.checkData(0, 0, np.max(intData)) if i<9:
tdSql.query("select max(col3) from ntb") tdSql.checkData(0, 0, np.max(intData))
tdSql.checkData(0, 0, np.max(intData)) elif i>=9:
tdSql.query("select max(col3) from db.ntb") tdSql.checkData(0, 0, np.max(floatData))
tdSql.checkData(0, 0, np.max(intData)) tdSql.query("select max(col1) from ntb where col2<=5")
tdSql.query("select max(col4) from ntb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col4) from db.ntb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col11) from ntb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col11) from db.ntb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col12) from ntb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col12) from db.ntb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col13) from ntb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col13) from db.ntb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col14) from ntb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col14) from db.ntb")
tdSql.checkData(0, 0, np.max(intData))
tdSql.query("select max(col5) from ntb")
tdSql.checkData(0, 0, np.max(floatData))
tdSql.query("select max(col5) from db.ntb")
tdSql.checkData(0, 0, np.max(floatData))
tdSql.query("select max(col6) from ntb")
tdSql.checkData(0, 0, np.max(floatData))
tdSql.query("select max(col6) from db.ntb")
tdSql.checkData(0, 0, np.max(floatData))
tdSql.query("select max(col1) from stb_1 where col2<=5")
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.execute('drop database db')
def check_max_functions(self, tbname , col_name):
max_sql = f"select max({col_name}) from {tbname};"
same_sql = f"select {col_name} from {tbname} order by {col_name} desc limit 1"
tdSql.query(max_sql)
max_result = tdSql.queryResult
tdSql.query(same_sql)
same_result = tdSql.queryResult
if max_result !=same_result:
tdLog.exit(" max function work not as expected, sql : %s "% max_sql)
else:
tdLog.info(" max function work as expected, sql : %s "% max_sql)
def support_distributed_aggregate(self):
# prepate datas for 20 tables distributed at different vgroups
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
tdSql.execute(" use testdb ")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
tbname = "ct"+f'{i}'
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog.info(" prepare data for distributed_aggregate done! ")
# get vgroup_ids of all
tdSql.query("show vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
vnode_tables[table_name[6]].append(table_name[0])
count = 0
for k ,v in vnode_tables.items():
if len(v)>=2:
count+=1
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
# check max function work status
tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
tablenames.append(table_name[0])
tdSql.query("desc stb1")
col_names = tdSql.queryResult
colnames = []
for col_name in col_names:
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
colnames.append(col_name[0])
for tablename in tablenames:
for colname in colnames:
self.check_max_functions(tablename,colname)
# max function with basic filter
print(vnode_tables)
def run(self):
# max verifacation
self.max_check_stb_and_tb_base()
self.max_check_ntb_base()
self.support_distributed_aggregate()
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)

View File

@ -11,6 +11,9 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import random
import string
from util.common import *
from util.log import * from util.log import *
from util.cases import * from util.cases import *
from util.sql import * from util.sql import *
@ -22,82 +25,89 @@ class TDTestCase:
tdSql.init(conn.cursor()) tdSql.init(conn.cursor())
self.rowNum = 10 self.rowNum = 10
self.tbnum = 20
self.ts = 1537146000000 self.ts = 1537146000000
self.binary_str = 'taosdata'
def run(self): self.nchar_str = '涛思数据'
def top_check_base(self):
tdSql.prepare() tdSql.prepare()
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
tdSql.execute("create table stb_1 using stb tags('beijing')")
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
tdSql.execute("create table test1 using test tags('beijing')")
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
column_list = ['col1','col2','col3','col4','col5','col6','col7','col8']
error_column_list = ['col11','col12','col13']
# top verifacation error_param_list = [0,101]
tdSql.error("select top(ts, 10) from test") for i in column_list:
tdSql.error("select top(col1, 0) from test") tdSql.query(f'select top({i},2) from stb_1')
tdSql.error("select top(col1, 101) from test") tdSql.checkRows(2)
tdSql.error("select top(col2, 0) from test") tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)])
tdSql.error("select top(col2, 101) from test") for j in error_param_list:
tdSql.error("select top(col3, 0) from test") tdSql.error(f'select top({i},{j}) from stb_1')
tdSql.error("select top(col3, 101) from test") for i in error_column_list:
tdSql.error("select top(col4, 0) from test") tdSql.error(f'select top({i},10) from stb_1')
tdSql.error("select top(col4, 101) from test") tdSql.query("select ts,top(col1, 2),ts from stb_1 group by tbname")
tdSql.error("select top(col5, 0) from test")
tdSql.error("select top(col5, 101) from test")
tdSql.error("select top(col6, 0) from test")
tdSql.error("select top(col6, 101) from test")
tdSql.error("select top(col7, 10) from test")
tdSql.error("select top(col8, 10) from test")
tdSql.error("select top(col9, 10) from test")
tdSql.error("select top(col11, 0) from test")
tdSql.error("select top(col11, 101) from test")
tdSql.error("select top(col12, 0) from test")
tdSql.error("select top(col12, 101) from test")
tdSql.error("select top(col13, 0) from test")
tdSql.error("select top(col13, 101) from test")
tdSql.error("select top(col14, 0) from test")
tdSql.error("select top(col14, 101) from test")
tdSql.query("select top(col1, 2) from test")
tdSql.checkRows(2) tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) tdSql.query('select top(col2,1) from stb_1 interval(1y) order by col2')
tdSql.query("select top(col2, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)])
tdSql.query("select top(col3, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)])
tdSql.query("select top(col4, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)])
tdSql.query("select top(col11, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)])
tdSql.query("select top(col12, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)])
tdSql.query("select top(col13, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)])
tdSql.query("select top(col14, 2) from test")
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)])
tdSql.query("select ts,top(col1, 2),ts from test1")
tdSql.checkRows(2)
tdSql.query("select top(col14, 100) from test")
tdSql.checkRows(10)
tdSql.query("select ts,top(col1, 2),ts from test group by tbname")
tdSql.checkRows(2)
tdSql.query('select top(col2,1) from test interval(1y) order by col2')
tdSql.checkData(0,0,10) tdSql.checkData(0,0,10)
tdSql.error("select * from stb_1 where top(col2,1)=1")
tdSql.execute('drop database db')
def top_check_stb_distribute(self):
# prepare data for vgroup 4
dbname = tdCom.getLongName(10, "letters")
stbname = tdCom.getLongName(5, "letters")
tdSql.execute(f"create database if not exists {dbname} vgroups 2")
tdSql.execute(f'use {dbname}')
# build 20 child tables,every table insert 10 rows
tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
for i in range(self.tbnum):
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags('beijing')")
tdSql.execute(f"insert into {stbname}_{i}(ts) values(%d)" % (self.ts - 1-i))
column_list = ['col1','col2','col3','col4','col5','col6','col7','col8']
for i in [f'{stbname}', f'{dbname}.{stbname}']:
for j in column_list:
tdSql.query(f"select top({j},1) from {i}")
tdSql.checkRows(0)
tdSql.query('show tables')
vgroup_list = []
for i in range(len(tdSql.queryResult)):
vgroup_list.append(tdSql.queryResult[i][6])
vgroup_list_set = set(vgroup_list)
tdSql.error("select * from test where bottom(col2,1)=1") for i in vgroup_list_set:
tdSql.error("select top(col14, 0) from test;") vgroups_num = vgroup_list.count(i)
if vgroups_num >=2:
tdLog.info(f'This scene with {vgroups_num} vgroups is ok!')
continue
else:
tdLog.exit(f'This scene does not meet the requirements with {vgroups_num} vgroup!\n')
for i in range(self.rowNum):
for j in range(self.tbnum):
tdSql.execute(f"insert into {stbname}_{j} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
error_column_list = ['col11','col12','col13']
error_param_list = [0,101]
for i in column_list:
tdSql.query(f'select top({i},2) from {stbname}')
tdSql.checkRows(2)
tdSql.checkEqual(tdSql.queryResult,[(10,),(10,)])
for j in error_param_list:
tdSql.error(f'select top({i},{j}) from {stbname}')
for i in error_column_list:
tdSql.error(f'select top({i},10) from {stbname}')
tdSql.query(f"select ts,top(col1, 2),ts from {stbname} group by tbname")
tdSql.checkRows(2*self.tbnum)
tdSql.query(f'select top(col2,1) from {stbname} interval(1y) order by col2')
tdSql.checkData(0,0,10)
tdSql.error(f"select * from {stbname} where top(col2,1)=1")
def run(self):
self.top_check_base()
self.top_check_stb_distribute()
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)

View File

@ -269,7 +269,8 @@ class TDTestCase:
tdSql.query("show dnodes;") tdSql.query("show dnodes;")
print(tdSql.queryResult) print(tdSql.queryResult)
# drop and follower of mnode
# drop follower of mnode
dropcount =0 dropcount =0
while dropcount <= 10: while dropcount <= 10:
for i in range(1,3): for i in range(1,3):

View File

@ -0,0 +1,428 @@
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import time
import inspect
import ctypes
class MyDnodes(TDDnodes):
def __init__(self ,dnodes_lists):
super(MyDnodes,self).__init__()
self.dnodes = dnodes_lists # dnode must be TDDnode instance
self.simDeployed = False
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
self.ts = 1500000000000
def buildcluster(self,dnodenumber):
self.depoly_cluster(dnodenumber)
self.master_dnode = self.TDDnodes.dnodes[0]
self.host=self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor())
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def _async_raise(self, tid, exctype):
"""raises the exception, performs cleanup if needed"""
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stop_thread(self,thread):
self._async_raise(thread.ident, SystemExit)
def createDbTbale(self,dbcountStart,dbcountStop,stbname,chilCount):
# fisrt add data : db\stable\childtable\general table
for couti in range(dbcountStart,dbcountStop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
print("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("use db%d" %couti)
tdSql.execute(
'''create table %s
(ts timestamp, c1 int, c2 bigint,c3 binary(16), c4 timestamp)
tags (t1 int)
'''%stbname
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(chilCount):
tdSql.execute(f'create table {stbname}_{i+1} using {stbname} tags ( {i+1} )')
def insertTabaleData(self,dbcountStart,dbcountStop,stbname,chilCount,ts_start,rowCount):
# insert data : create childtable and data
for couti in range(dbcountStart,dbcountStop):
tdSql.execute("use db%d" %couti)
pre_insert = "insert into "
sql = pre_insert
chilCount=int(chilCount)
allRows=chilCount*rowCount
tdLog.debug("doing insert data into stable-index:%s rows:%d ..."%(stbname, allRows))
exeStartTime=time.time()
for i in range(0,chilCount):
sql += " %s_%d values "%(stbname,i)
for j in range(rowCount):
sql += "(%d, %d, %d,'taos_%d',%d) "%(ts_start + j*1000, j, j, j, ts_start + j*1000)
if j >0 and j%4000 == 0:
# print(sql)
tdSql.execute(sql)
sql = "insert into %s_%d values " %(stbname,i)
# end sql
if sql != pre_insert:
# print(sql)
print(len(sql))
tdSql.execute(sql)
exeEndTime=time.time()
spendTime=exeEndTime-exeStartTime
speedInsert=allRows/spendTime
tdLog.debug("spent %.2fs to INSERT %d rows into %s , insert rate is %.2f rows/s... [OK]"% (spendTime,allRows,stbname,speedInsert))
def checkData(self,dbname,stbname,stableCount,CtableCount,rowsPerSTable,):
tdSql.execute("use %s"%dbname)
tdSql.query("show stables")
tdSql.checkRows(stableCount)
tdSql.query("show tables")
tdSql.checkRows(CtableCount)
for i in range(stableCount):
tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,rowsPerSTable)
return
def depoly_cluster(self ,dnodes_nums):
testCluster = False
valgrind = 0
hostname = socket.gethostname()
dnodes = []
start_port = 6030
start_port_sec = 6130
for num in range(1, dnodes_nums+1):
dnode = TDDnode(num)
dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}")
dnode.addExtraCfg("fqdn", f"{hostname}")
dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}")
dnode.addExtraCfg("monitorFqdn", hostname)
dnode.addExtraCfg("monitorPort", 7043)
dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}")
dnodes.append(dnode)
self.TDDnodes = MyDnodes(dnodes)
self.TDDnodes.init("")
self.TDDnodes.setTestCluster(testCluster)
self.TDDnodes.setValgrind(valgrind)
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index)
# create cluster
for dnode in self.TDDnodes.dnodes[1:]:
# print(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
print(cmd)
os.system(cmd)
time.sleep(2)
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
def checkdnodes(self,dnodenumber):
count=0
while count < 10:
time.sleep(1)
statusReadyBumber=0
tdSql.query("show dnodes;")
if tdSql.checkRows(dnodenumber) :
print("dnode is %d nodes"%dnodenumber)
for i in range(dnodenumber):
if tdSql.queryResult[i][4] !='ready' :
status=tdSql.queryResult[i][4]
print("dnode:%d status is %s "%(i,status))
break
else:
statusReadyBumber+=1
print(statusReadyBumber)
if statusReadyBumber == dnodenumber :
print("all of %d mnodes is ready in 10s "%dnodenumber)
return True
break
count+=1
else:
print("%d mnodes is not ready in 10s "%dnodenumber)
return False
def check3mnode(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='follower':
print("three mnodes is ready in 10s")
break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
print("three mnodes is ready in 10s")
break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
print("three mnodes is ready in 10s")
break
count+=1
else:
print("three mnodes is not ready in 10s ")
return -1
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
tdSql.checkData(1,3,'ready')
tdSql.checkData(2,1,'%s:6230'%self.host)
tdSql.checkData(2,3,'ready')
def check3mnode1off(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='offline' :
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
print("stop mnodes on dnode 2 successfully in 10s")
break
elif tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
print("stop mnodes on dnode 2 successfully in 10s")
break
count+=1
else:
print("stop mnodes on dnode 2 failed in 10s ")
return -1
tdSql.error("drop mnode on dnode 1;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'offline')
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
tdSql.checkData(1,3,'ready')
tdSql.checkData(2,1,'%s:6230'%self.host)
tdSql.checkData(2,3,'ready')
def check3mnode2off(self):
count=0
while count < 40:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='offline':
if tdSql.queryResult[2][2]=='follower':
print("stop mnodes on dnode 2 successfully in 10s")
break
count+=1
else:
print("stop mnodes on dnode 2 failed in 10s ")
return -1
tdSql.error("drop mnode on dnode 2;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
tdSql.checkData(1,2,'offline')
tdSql.checkData(1,3,'ready')
tdSql.checkData(2,1,'%s:6230'%self.host)
tdSql.checkData(2,2,'follower')
tdSql.checkData(2,3,'ready')
def check3mnode3off(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[2][2]=='offline':
if tdSql.queryResult[1][2]=='follower':
print("stop mnodes on dnode 3 successfully in 10s")
break
count+=1
else:
print("stop mnodes on dnode 3 failed in 10s")
return -1
tdSql.error("drop mnode on dnode 3;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
tdSql.checkData(1,2,'follower')
tdSql.checkData(1,3,'ready')
tdSql.checkData(2,1,'%s:6230'%self.host)
tdSql.checkData(2,2,'offline')
tdSql.checkData(2,3,'ready')
def five_dnode_three_mnode(self,dnodenumber):
# testcase parameters
vgroups=1
dbcountStart=0
dbcountStop=1
dbname="db"
stbname="stb"
tablesPerStb=1000
rowsPerTable=100
startTs=1640966400000 # 2022-01-01 00:00:00.000
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
tdSql.query("show mnodes;")
tdSql.checkRows(1)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
# fisr add three mnodes;
tdSql.execute("create mnode on dnode 2")
tdSql.execute("create mnode on dnode 3")
# fisrt check statut ready
self.check3mnode()
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
print(tdSql.queryResult)
tdLog.debug("stop all of mnode ")
# drop follower of mnode and insert data
self.createDbTbale(dbcountStart, dbcountStop,stbname,tablesPerStb)
#(method) insertTabaleData: (dbcountStart: Any, dbcountStop: Any, stbname: Any, chilCount: Any, ts_start: Any, rowCount: Any) -> None
threads=threading.Thread(target=self.insertTabaleData, args=(
dbcountStart,
dbcountStop,
stbname,
tablesPerStb,
startTs,
rowsPerTable))
threads.start()
dropcount =0
while dropcount <= 10:
for i in range(1,3):
tdLog.debug("drop mnode on dnode %d"%(i+1))
tdSql.execute("drop mnode on dnode %d"%(i+1))
tdSql.query("show mnodes;")
count=0
while count<10:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(2):
print("drop mnode %d successfully"%(i+1))
break
count+=1
tdLog.debug("create mnode on dnode %d"%(i+1))
tdSql.execute("create mnode on dnode %d"%(i+1))
count=0
while count<10:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(3):
print("drop mnode %d successfully"%(i+1))
break
count+=1
dropcount+=1
threads.join()
self.check3mnode()
def getConnection(self, dnode):
host = dnode.cfgDict["fqdn"]
port = dnode.cfgDict["serverPort"]
config_dir = dnode.cfgDir
return taos.connect(host=host, port=int(port), config=config_dir)
def run(self):
# print(self.master_dnode.cfgDict)
self.buildcluster(5)
self.five_dnode_three_mnode(5)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,377 @@
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import time
import inspect
import ctypes
class MyDnodes(TDDnodes):
def __init__(self ,dnodes_lists):
super(MyDnodes,self).__init__()
self.dnodes = dnodes_lists # dnode must be TDDnode instance
self.simDeployed = False
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
def buildcluster(self,dnodenumber):
self.depoly_cluster(dnodenumber)
self.master_dnode = self.TDDnodes.dnodes[0]
self.host=self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor())
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def _async_raise(self, tid, exctype):
"""raises the exception, performs cleanup if needed"""
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stop_thread(self,thread):
self._async_raise(thread.ident, SystemExit)
def insert_data(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
print("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("use db%d" %couti)
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def checkData(self,dbname,stbname,stableCount,CtableCount,rowsPerSTable,):
tdSql.execute("use %s"%dbname)
tdSql.query("show stables")
tdSql.checkRows(stableCount)
tdSql.query("show tables")
tdSql.checkRows(CtableCount)
for i in range(stableCount):
tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,rowsPerSTable)
return
def depoly_cluster(self ,dnodes_nums=5,independent=True):
testCluster = False
valgrind = 0
hostname = socket.gethostname()
dnodes = []
start_port = 6030
start_port_sec = 6130
for num in range(1, dnodes_nums+1):
dnode = TDDnode(num)
dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}")
dnode.addExtraCfg("fqdn", f"{hostname}")
dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}")
dnode.addExtraCfg("monitorFqdn", hostname)
dnode.addExtraCfg("monitorPort", 7043)
dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}")
# configure three dnoe don't support vnodes
if independent and (num < 4):
dnode.addExtraCfg("supportVnodes", 0)
dnodes.append(dnode)
self.TDDnodes = MyDnodes(dnodes)
self.TDDnodes.init("")
self.TDDnodes.setTestCluster(testCluster)
self.TDDnodes.setValgrind(valgrind)
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index)
# create cluster
for dnode in self.TDDnodes.dnodes[1:]:
# print(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
print(cmd)
os.system(cmd)
time.sleep(2)
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
def checkdnodes(self,dnodenumber):
count=0
while count < 100:
time.sleep(1)
statusReadyBumber=0
tdSql.query("show dnodes;")
if tdSql.checkRows(dnodenumber) :
print("dnode is %d nodes"%dnodenumber)
for i in range(dnodenumber):
if tdSql.queryResult[i][4] !='ready' :
status=tdSql.queryResult[i][4]
print("dnode:%d status is %s "%(i,status))
break
else:
statusReadyBumber+=1
print(statusReadyBumber)
if statusReadyBumber == dnodenumber :
print("all of %d mnodes is ready in 10s "%dnodenumber)
return True
break
count+=1
else:
print("%d mnodes is not ready in 10s "%dnodenumber)
return False
def check3mnode(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='follower':
print("three mnodes is ready in 10s")
break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
print("three mnodes is ready in 10s")
break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
print("three mnodes is ready in 10s")
break
count+=1
else:
print("three mnodes is not ready in 10s ")
return -1
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
tdSql.checkData(1,3,'ready')
tdSql.checkData(2,1,'%s:6230'%self.host)
tdSql.checkData(2,3,'ready')
def check3mnode1off(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='offline' :
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
print("stop mnodes on dnode 2 successfully in 10s")
break
elif tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
print("stop mnodes on dnode 2 successfully in 10s")
break
count+=1
else:
print("stop mnodes on dnode 2 failed in 10s ")
return -1
tdSql.error("drop mnode on dnode 1;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'offline')
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
tdSql.checkData(1,3,'ready')
tdSql.checkData(2,1,'%s:6230'%self.host)
tdSql.checkData(2,3,'ready')
def check3mnode2off(self):
count=0
while count < 40:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='offline':
if tdSql.queryResult[2][2]=='follower':
print("stop mnodes on dnode 2 successfully in 10s")
break
count+=1
else:
print("stop mnodes on dnode 2 failed in 10s ")
return -1
tdSql.error("drop mnode on dnode 2;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
tdSql.checkData(1,2,'offline')
tdSql.checkData(1,3,'ready')
tdSql.checkData(2,1,'%s:6230'%self.host)
tdSql.checkData(2,2,'follower')
tdSql.checkData(2,3,'ready')
def check3mnode3off(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[2][2]=='offline':
if tdSql.queryResult[1][2]=='follower':
print("stop mnodes on dnode 3 successfully in 10s")
break
count+=1
else:
print("stop mnodes on dnode 3 failed in 10s")
return -1
tdSql.error("drop mnode on dnode 3;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
tdSql.checkData(1,2,'follower')
tdSql.checkData(1,3,'ready')
tdSql.checkData(2,1,'%s:6230'%self.host)
tdSql.checkData(2,2,'offline')
tdSql.checkData(2,3,'ready')
def five_dnode_three_mnode(self,dnodenumber):
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
tdSql.query("show mnodes;")
tdSql.checkRows(1)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
# fisr add three mnodes;
tdSql.execute("create mnode on dnode 2")
tdSql.execute("create mnode on dnode 3")
# fisrt check statut ready
self.check3mnode()
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
print(tdSql.queryResult)
tdLog.debug("stop all of mnode ")
# seperate vnode and mnode in different dnodes.
# create database and stable
stopcount =0
while stopcount < 2:
for i in range(dnodenumber):
# threads=[]
# threads = MyThreadFunc(self.insert_data(i*2,i*2+2))
threads=threading.Thread(target=self.insert_data, args=(i,i+1))
threads.start()
self.TDDnodes.stoptaosd(i+1)
self.TDDnodes.starttaosd(i+1)
if self.checkdnodes(5):
print("123")
threads.join()
else:
print("456")
self.stop_thread(threads)
assert 1 == 2 ,"some dnode started failed"
return False
# self.check3mnode()
self.check3mnode()
stopcount+=1
self.check3mnode()
def getConnection(self, dnode):
host = dnode.cfgDict["fqdn"]
port = dnode.cfgDict["serverPort"]
config_dir = dnode.cfgDir
return taos.connect(host=host, port=int(port), config=config_dir)
def run(self):
# print(self.master_dnode.cfgDict)
self.buildcluster(5)
self.five_dnode_three_mnode(5)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -192,7 +192,7 @@ class TDTestCase:
time.sleep(1) time.sleep(1)
tdLog.info("start consume processor") tdLog.info("start consume processor")
pollDelay = 100 pollDelay = 20
showMsg = 1 showMsg = 1
showRow = 1 showRow = 1
@ -208,7 +208,7 @@ class TDTestCase:
os.system(shellCmd) os.system(shellCmd)
# wait for data ready # wait for data ready
prepareEnvThread.join() # prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result") tdLog.info("insert process end, and start to check consume result")
while 1: while 1:

View File

@ -322,176 +322,6 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 5 end ...... ") tdLog.printNoPrefix("======== test case 5 end ...... ")
def tmqCase6(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db")
tdLog.info("step 1: create database, stb, ctb and insert data")
# create and start thread
parameterDict = {'cfg': '', \
'dbName': 'db60', \
'vgroups': 4, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 5000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
self.initConsumerTable()
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
parameterDict2 = {'cfg': '', \
'dbName': 'db61', \
'vgroups': 4, \
'stbName': 'stb2', \
'ctbNum': 10, \
'rowsPerTbl': 5000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups']))
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
prepareEnvThread2.start()
tdLog.info("create topics from db")
topicName1 = 'topic_db60'
topicName2 = 'topic_db61'
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
topicList = topicName1 + ',' + topicName2
ifcheckdata = 0
ifManualCommit = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:false,\
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
#consumerId = 1
#self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
event.wait()
tdLog.info("start consume processor")
pollDelay = 100
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
# wait for data ready
prepareEnvThread.join()
prepareEnvThread2.join()
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicName1)
tdSql.query("drop topic %s"%topicName2)
tdLog.printNoPrefix("======== test case 6 end ...... ")
def tmqCase7(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db")
tdLog.info("step 1: create database, stb, ctb and insert data")
# create and start thread
parameterDict = {'cfg': '', \
'dbName': 'db70', \
'vgroups': 4, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 5000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
self.initConsumerTable()
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
parameterDict2 = {'cfg': '', \
'dbName': 'db71', \
'vgroups': 4, \
'stbName': 'stb2', \
'ctbNum': 10, \
'rowsPerTbl': 5000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups']))
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
prepareEnvThread2.start()
tdLog.info("create topics from db")
topicName1 = 'topic_db60'
topicName2 = 'topic_db61'
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
topicList = topicName1 + ',' + topicName2
ifcheckdata = 0
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:false,\
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
consumerId = 1
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
event.wait()
tdLog.info("start consume processor")
pollDelay = 100
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
# wait for data ready
prepareEnvThread.join()
prepareEnvThread2.join()
tdLog.info("insert process end, and start to check consume result")
expectRows = 2
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicName1)
tdSql.query("drop topic %s"%topicName2)
tdLog.printNoPrefix("======== test case 7 end ...... ")
def run(self): def run(self):
tdSql.prepare() tdSql.prepare()
@ -505,8 +335,6 @@ class TDTestCase:
self.tmqCase4(cfgPath, buildPath) self.tmqCase4(cfgPath, buildPath)
self.tmqCase5(cfgPath, buildPath) self.tmqCase5(cfgPath, buildPath)
self.tmqCase6(cfgPath, buildPath)
self.tmqCase7(cfgPath, buildPath)
def stop(self): def stop(self):

View File

@ -72,10 +72,10 @@ class TDTestCase:
if tdSql.getRows() == expectRows: if tdSql.getRows() == expectRows:
break break
else: else:
time.sleep(5) time.sleep(5)
for i in range(expectRows): for i in range(expectRows):
tdLog.info ("ts: %s, consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 0), tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3)) resultList.append(tdSql.getData(i , 3))
return resultList return resultList
@ -85,7 +85,7 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log' logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
if (platform.system().lower() == 'windows'): if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
@ -97,7 +97,7 @@ class TDTestCase:
tdLog.info(shellCmd) tdLog.info(shellCmd)
os.system(shellCmd) os.system(shellCmd)
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum):
tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("use %s" %dbName) tsql.execute("use %s" %dbName)
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
@ -151,8 +151,7 @@ class TDTestCase:
parameterDict["dbName"],\ parameterDict["dbName"],\
parameterDict["vgroups"],\ parameterDict["vgroups"],\
parameterDict["stbName"],\ parameterDict["stbName"],\
parameterDict["ctbNum"],\ parameterDict["ctbNum"])
parameterDict["rowsPerTbl"])
self.insert_data(tsql,\ self.insert_data(tsql,\
parameterDict["dbName"],\ parameterDict["dbName"],\
@ -163,16 +162,16 @@ class TDTestCase:
parameterDict["startTs"]) parameterDict["startTs"])
return return
def tmqCase8(self, cfgPath, buildPath): def tmqCase6(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 8: Produce while one consume to subscribe one db, inclue 1 stb") tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db")
tdLog.info("step 1: create database, stb, ctb and insert data") tdLog.info("step 1: create database, stb, ctb and insert data")
# create and start thread # create and start thread
parameterDict = {'cfg': '', \ parameterDict = {'cfg': '', \
'dbName': 'db8', \ 'dbName': 'db60', \
'vgroups': 4, \ 'vgroups': 4, \
'stbName': 'stb', \ 'stbName': 'stb', \
'ctbNum': 10, \ 'ctbNum': 10, \
'rowsPerTbl': 10000, \ 'rowsPerTbl': 5000, \
'batchNum': 100, \ 'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000 'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath parameterDict['cfg'] = cfgPath
@ -183,14 +182,32 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start() prepareEnvThread.start()
parameterDict2 = {'cfg': '', \
'dbName': 'db61', \
'vgroups': 4, \
'stbName': 'stb2', \
'ctbNum': 10, \
'rowsPerTbl': 5000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups']))
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
prepareEnvThread2.start()
tdLog.info("create topics from db") tdLog.info("create topics from db")
topicName1 = 'topic_db1' topicName1 = 'topic_db60'
topicName2 = 'topic_db61'
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName']))
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0 consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
topicList = topicName1 topicList = topicName1 + ',' + topicName2
ifcheckdata = 0 ifcheckdata = 0
ifManualCommit = 0 ifManualCommit = 0
keyList = 'group.id:cgrp1,\ keyList = 'group.id:cgrp1,\
@ -199,6 +216,9 @@ class TDTestCase:
auto.offset.reset:earliest' auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
#consumerId = 1
#self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
event.wait() event.wait()
tdLog.info("start consume processor") tdLog.info("start consume processor")
@ -208,7 +228,8 @@ class TDTestCase:
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
# wait for data ready # wait for data ready
prepareEnvThread.join() prepareEnvThread.join()
prepareEnvThread2.join()
tdLog.info("insert process end, and start to check consume result") tdLog.info("insert process end, and start to check consume result")
expectRows = 1 expectRows = 1
@ -221,36 +242,21 @@ class TDTestCase:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!") tdLog.exit("tmq consume rows error!")
tdLog.info("again start consume processer")
self.initConsumerTable()
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicName1) tdSql.query("drop topic %s"%topicName1)
tdSql.query("drop topic %s"%topicName2)
tdLog.printNoPrefix("======== test case 8 end ...... ") tdLog.printNoPrefix("======== test case 6 end ...... ")
def tmqCase9(self, cfgPath, buildPath): def tmqCase7(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 9: Produce while one consume to subscribe one db, inclue 1 stb") tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db")
tdLog.info("step 1: create database, stb, ctb and insert data") tdLog.info("step 1: create database, stb, ctb and insert data")
# create and start thread # create and start thread
parameterDict = {'cfg': '', \ parameterDict = {'cfg': '', \
'dbName': 'db9', \ 'dbName': 'db70', \
'vgroups': 4, \ 'vgroups': 4, \
'stbName': 'stb', \ 'stbName': 'stb', \
'ctbNum': 10, \ 'ctbNum': 10, \
'rowsPerTbl': 10000, \ 'rowsPerTbl': 5000, \
'batchNum': 100, \ 'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000 'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath parameterDict['cfg'] = cfgPath
@ -261,14 +267,32 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start() prepareEnvThread.start()
parameterDict2 = {'cfg': '', \
'dbName': 'db71', \
'vgroups': 4, \
'stbName': 'stb2', \
'ctbNum': 10, \
'rowsPerTbl': 5000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups']))
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
prepareEnvThread2.start()
tdLog.info("create topics from db") tdLog.info("create topics from db")
topicName1 = 'topic_db1' topicName1 = 'topic_db60'
topicName2 = 'topic_db61'
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName']))
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0 consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
topicList = topicName1 topicList = topicName1 + ',' + topicName2
ifcheckdata = 0 ifcheckdata = 0
ifManualCommit = 1 ifManualCommit = 1
keyList = 'group.id:cgrp1,\ keyList = 'group.id:cgrp1,\
@ -277,86 +301,7 @@ class TDTestCase:
auto.offset.reset:earliest' auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
event.wait() consumerId = 1
tdLog.info("start consume processor")
pollDelay = 100
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
# wait for data ready
prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdSql.query("select count(*) from %s.%s" %(parameterDict['dbName'], parameterDict['stbName']))
countOfStb = tdSql.getData(0,0)
print ("====total rows of stb: %d"%countOfStb)
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
if totalConsumeRows < expectrowcnt:
tdLog.exit("tmq consume rows error!")
tdLog.info("again start consume processer")
self.initConsumerTable()
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows2 = 0
for i in range(expectRows):
totalConsumeRows2 += resultList[i]
tdLog.info("firstly act consume rows: %d"%(totalConsumeRows))
tdLog.info("secondly act consume rows: %d, expect consume rows: %d"%(totalConsumeRows2, expectrowcnt))
if totalConsumeRows + totalConsumeRows2 != expectrowcnt:
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicName1)
tdLog.printNoPrefix("======== test case 9 end ...... ")
def tmqCase10(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 10: Produce while one consume to subscribe one db, inclue 1 stb")
tdLog.info("step 1: create database, stb, ctb and insert data")
# create and start thread
parameterDict = {'cfg': '', \
'dbName': 'db10', \
'vgroups': 4, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 10000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
self.initConsumerTable()
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
topicList = topicName1
ifcheckdata = 0
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:false,\
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
event.wait() event.wait()
@ -367,23 +312,12 @@ class TDTestCase:
showRow = 1 showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
time.sleep(2)
tdLog.info("pkill consume processor")
if (platform.system().lower() == 'windows'):
os.system("TASKKILL /F /IM tmq_sim.exe")
else:
os.system('pkill tmq_sim')
expectRows = 0
resultList = self.selectConsumeResult(expectRows)
# wait for data ready # wait for data ready
prepareEnvThread.join() prepareEnvThread.join()
prepareEnvThread2.join()
tdLog.info("insert process end, and start to check consume result") tdLog.info("insert process end, and start to check consume result")
expectRows = 2
tdLog.info("again start consume processer")
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
expectRows = 1
resultList = self.selectConsumeResult(expectRows) resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0 totalConsumeRows = 0
for i in range(expectRows): for i in range(expectRows):
@ -393,85 +327,10 @@ class TDTestCase:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!") tdLog.exit("tmq consume rows error!")
time.sleep(15)
tdSql.query("drop topic %s"%topicName1) tdSql.query("drop topic %s"%topicName1)
tdSql.query("drop topic %s"%topicName2)
tdLog.printNoPrefix("======== test case 10 end ...... ") tdLog.printNoPrefix("======== test case 7 end ...... ")
def tmqCase11(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 11: Produce while one consume to subscribe one db, inclue 1 stb")
tdLog.info("step 1: create database, stb, ctb and insert data")
# create and start thread
parameterDict = {'cfg': '', \
'dbName': 'db11', \
'vgroups': 4, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 10000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
self.initConsumerTable()
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
topicList = topicName1
ifcheckdata = 0
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
event.wait()
tdLog.info("start consume processor")
pollDelay = 20
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
time.sleep(6)
tdLog.info("pkill consume processor")
if (platform.system().lower() == 'windows'):
os.system("TASKKILL /F /IM tmq_sim.exe")
else:
os.system('pkill tmq_sim')
expectRows = 0
resultList = self.selectConsumeResult(expectRows)
# wait for data ready
prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
tdLog.info("again start consume processer")
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows >= expectrowcnt or totalConsumeRows <= 0:
tdLog.info("act consume rows: %d, expect consume rows between %d and 0"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
time.sleep(15)
tdSql.query("drop topic %s"%topicName1)
tdLog.printNoPrefix("======== test case 11 end ...... ")
def run(self): def run(self):
tdSql.prepare() tdSql.prepare()
@ -484,10 +343,9 @@ class TDTestCase:
cfgPath = buildPath + "/../sim/psim/cfg" cfgPath = buildPath + "/../sim/psim/cfg"
tdLog.info("cfgPath: %s" % cfgPath) tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase8(cfgPath, buildPath) self.tmqCase6(cfgPath, buildPath)
self.tmqCase9(cfgPath, buildPath) self.tmqCase7(cfgPath, buildPath)
self.tmqCase10(cfgPath, buildPath)
self.tmqCase11(cfgPath, buildPath)
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -0,0 +1,347 @@
import taos
import sys
import time
import socket
import os
import threading
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
class TDTestCase:
hostname = socket.gethostname()
#rpcDebugFlagVal = '143'
#clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
#clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
#updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
#updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files or "taosd.exe" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def newcur(self,cfg,host,port):
user = "root"
password = "taosdata"
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
cur=con.cursor()
print(cur)
return cur
def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
tdSql.query(sql)
def selectConsumeResult(self,expectRows,cdbName='cdb'):
resultList=[]
while 1:
tdSql.query("select * from %s.consumeresult"%cdbName)
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
if tdSql.getRows() == expectRows:
break
else:
time.sleep(5)
for i in range(expectRows):
tdLog.info ("ts: %s, consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 0), tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
if valgrind == 1:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl):
tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("use %s" %dbName)
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
pre_create = "create table"
sql = pre_create
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
for i in range(ctbNum):
sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1)
if (i > 0) and (i%100 == 0):
tsql.execute(sql)
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
event.set()
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
return
def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs):
tdLog.debug("start to insert data ............")
tsql.execute("use %s" %dbName)
pre_insert = "insert into "
sql = pre_insert
t = time.time()
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
for i in range(ctbNum):
sql += " %s_%d values "%(stbName,i)
for j in range(rowsPerTbl):
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
tsql.execute(sql)
if j < rowsPerTbl - 1:
sql = "insert into %s_%d values " %(stbName,i)
else:
sql = "insert into "
#end sql
if sql != pre_insert:
#print("insert sql:%s"%sql)
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
def prepareEnv(self, **parameterDict):
print ("input parameters:")
print (parameterDict)
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
self.create_tables(tsql,\
parameterDict["dbName"],\
parameterDict["vgroups"],\
parameterDict["stbName"],\
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"])
self.insert_data(tsql,\
parameterDict["dbName"],\
parameterDict["stbName"],\
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"],\
parameterDict["startTs"])
return
def tmqCase8(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 8: Produce while one consume to subscribe one db, inclue 1 stb")
tdLog.info("step 1: create database, stb, ctb and insert data")
# create and start thread
parameterDict = {'cfg': '', \
'dbName': 'db8', \
'vgroups': 4, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 10000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
self.initConsumerTable()
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2
topicList = topicName1
ifcheckdata = 0
ifManualCommit = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:false,\
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
event.wait()
tdLog.info("start consume processor")
pollDelay = 100
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
# wait for data ready
prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
tdLog.info("again start consume processer")
self.initConsumerTable()
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicName1)
tdLog.printNoPrefix("======== test case 8 end ...... ")
def tmqCase9(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 9: Produce while one consume to subscribe one db, inclue 1 stb")
tdLog.info("step 1: create database, stb, ctb and insert data")
# create and start thread
parameterDict = {'cfg': '', \
'dbName': 'db9', \
'vgroups': 4, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 10000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
self.initConsumerTable()
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2
topicList = topicName1
ifcheckdata = 0
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:false,\
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
event.wait()
tdLog.info("start consume processor")
pollDelay = 100
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
# wait for data ready
prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdSql.query("select count(*) from %s.%s" %(parameterDict['dbName'], parameterDict['stbName']))
countOfStb = tdSql.getData(0,0)
print ("====total rows of stb: %d"%countOfStb)
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
if totalConsumeRows < expectrowcnt:
tdLog.exit("tmq consume rows error!")
tdLog.info("again start consume processer")
self.initConsumerTable()
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows2 = 0
for i in range(expectRows):
totalConsumeRows2 += resultList[i]
tdLog.info("firstly act consume rows: %d"%(totalConsumeRows))
tdLog.info("secondly act consume rows: %d, expect consume rows: %d"%(totalConsumeRows2, expectrowcnt))
if totalConsumeRows + totalConsumeRows2 != expectrowcnt:
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicName1)
tdLog.printNoPrefix("======== test case 9 end ...... ")
def run(self):
tdSql.prepare()
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
cfgPath = buildPath + "/../sim/psim/cfg"
tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase8(cfgPath, buildPath)
self.tmqCase9(cfgPath, buildPath)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,337 @@
import taos
import sys
import time
import socket
import os
import threading
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
class TDTestCase:
hostname = socket.gethostname()
#rpcDebugFlagVal = '143'
#clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
#clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
#updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
#updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files or "taosd.exe" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def newcur(self,cfg,host,port):
user = "root"
password = "taosdata"
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
cur=con.cursor()
print(cur)
return cur
def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
tdSql.query(sql)
def selectConsumeResult(self,expectRows,cdbName='cdb'):
resultList=[]
while 1:
tdSql.query("select * from %s.consumeresult"%cdbName)
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
if tdSql.getRows() == expectRows:
break
else:
time.sleep(5)
for i in range(expectRows):
tdLog.info ("ts: %s, consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 0), tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
if valgrind == 1:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl):
tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("use %s" %dbName)
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
pre_create = "create table"
sql = pre_create
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
for i in range(ctbNum):
sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1)
if (i > 0) and (i%100 == 0):
tsql.execute(sql)
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
event.set()
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
return
def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs):
tdLog.debug("start to insert data ............")
tsql.execute("use %s" %dbName)
pre_insert = "insert into "
sql = pre_insert
t = time.time()
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
for i in range(ctbNum):
sql += " %s_%d values "%(stbName,i)
for j in range(rowsPerTbl):
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
tsql.execute(sql)
if j < rowsPerTbl - 1:
sql = "insert into %s_%d values " %(stbName,i)
else:
sql = "insert into "
#end sql
if sql != pre_insert:
#print("insert sql:%s"%sql)
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
def prepareEnv(self, **parameterDict):
print ("input parameters:")
print (parameterDict)
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
self.create_tables(tsql,\
parameterDict["dbName"],\
parameterDict["vgroups"],\
parameterDict["stbName"],\
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"])
self.insert_data(tsql,\
parameterDict["dbName"],\
parameterDict["stbName"],\
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"],\
parameterDict["startTs"])
return
def tmqCase10(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 10: Produce while one consume to subscribe one db, inclue 1 stb")
tdLog.info("step 1: create database, stb, ctb and insert data")
# create and start thread
parameterDict = {'cfg': '', \
'dbName': 'db10', \
'vgroups': 4, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 10000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
self.initConsumerTable()
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
topicList = topicName1
ifcheckdata = 0
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:false,\
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
event.wait()
tdLog.info("start consume processor")
pollDelay = 100
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
time.sleep(2)
tdLog.info("pkill consume processor")
if (platform.system().lower() == 'windows'):
os.system("TASKKILL /F /IM tmq_sim.exe")
else:
os.system('pkill tmq_sim')
expectRows = 0
resultList = self.selectConsumeResult(expectRows)
# wait for data ready
prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
tdLog.info("again start consume processer")
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
time.sleep(15)
tdSql.query("drop topic %s"%topicName1)
tdLog.printNoPrefix("======== test case 10 end ...... ")
def tmqCase11(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 11: Produce while one consume to subscribe one db, inclue 1 stb")
tdLog.info("step 1: create database, stb, ctb and insert data")
# create and start thread
parameterDict = {'cfg': '', \
'dbName': 'db11', \
'vgroups': 4, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 10000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
self.initConsumerTable()
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
topicList = topicName1
ifcheckdata = 0
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
event.wait()
tdLog.info("start consume processor")
pollDelay = 20
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
time.sleep(6)
tdLog.info("pkill consume processor")
if (platform.system().lower() == 'windows'):
os.system("TASKKILL /F /IM tmq_sim.exe")
else:
os.system('pkill tmq_sim')
expectRows = 0
resultList = self.selectConsumeResult(expectRows)
# wait for data ready
prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
tdLog.info("again start consume processer")
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows >= expectrowcnt or totalConsumeRows <= 0:
tdLog.info("act consume rows: %d, expect consume rows between %d and 0"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
time.sleep(15)
tdSql.query("drop topic %s"%topicName1)
tdLog.printNoPrefix("======== test case 11 end ...... ")
def run(self):
tdSql.prepare()
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
cfgPath = buildPath + "/../sim/psim/cfg"
tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase10(cfgPath, buildPath)
self.tmqCase11(cfgPath, buildPath)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -98,17 +98,25 @@ python3 ./test.py -f 2-query/stateduration.py
python3 ./test.py -f 2-query/function_stateduration.py python3 ./test.py -f 2-query/function_stateduration.py
python3 ./test.py -f 2-query/statecount.py python3 ./test.py -f 2-query/statecount.py
python3 ./test.py -f 2-query/tail.py python3 ./test.py -f 2-query/tail.py
python3 ./test.py -f 2-query/distribute_agg_count.py
python3 ./test.py -f 2-query/distribute_agg_max.py
python3 ./test.py -f 2-query/distribute_agg_min.py
python3 ./test.py -f 2-query/distribute_agg_sum.py
python3 ./test.py -f 2-query/distribute_agg_spread.py
python3 ./test.py -f 2-query/distribute_agg_apercentile.py
python3 ./test.py -f 6-cluster/5dnode1mnode.py python3 ./test.py -f 6-cluster/5dnode1mnode.py
python3 ./test.py -f 6-cluster/5dnode2mnode.py python3 ./test.py -f 6-cluster/5dnode2mnode.py
#python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py #python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py
python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py #python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py # BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py
python3 ./test.py -f 7-tmq/basic5.py python3 ./test.py -f 7-tmq/basic5.py
python3 ./test.py -f 7-tmq/subscribeDb.py python3 ./test.py -f 7-tmq/subscribeDb.py
python3 ./test.py -f 7-tmq/subscribeDb0.py python3 ./test.py -f 7-tmq/subscribeDb0.py
python3 ./test.py -f 7-tmq/subscribeDb1.py python3 ./test.py -f 7-tmq/subscribeDb1.py
python3 ./test.py -f 7-tmq/subscribeDb2.py
python3 ./test.py -f 7-tmq/subscribeDb3.py
python3 ./test.py -f 7-tmq/subscribeStb.py python3 ./test.py -f 7-tmq/subscribeStb.py
python3 ./test.py -f 7-tmq/subscribeStb0.py python3 ./test.py -f 7-tmq/subscribeStb0.py
python3 ./test.py -f 7-tmq/subscribeStb1.py python3 ./test.py -f 7-tmq/subscribeStb1.py

View File

@ -283,7 +283,8 @@ void dumpTrans(SSdb *pSdb, SJson *json) {
tjsonAddIntegerToObject(item, "conflict", pObj->conflict); tjsonAddIntegerToObject(item, "conflict", pObj->conflict);
tjsonAddIntegerToObject(item, "exec", pObj->exec); tjsonAddIntegerToObject(item, "exec", pObj->exec);
tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime)); tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
tjsonAddStringToObject(item, "dbname", pObj->dbname); tjsonAddStringToObject(item, "dbname1", pObj->dbname1);
tjsonAddStringToObject(item, "dbname2", pObj->dbname2);
tjsonAddIntegerToObject(item, "commitLogNum", taosArrayGetSize(pObj->commitActions)); tjsonAddIntegerToObject(item, "commitLogNum", taosArrayGetSize(pObj->commitActions));
tjsonAddIntegerToObject(item, "redoActionNum", taosArrayGetSize(pObj->redoActions)); tjsonAddIntegerToObject(item, "redoActionNum", taosArrayGetSize(pObj->redoActions));
tjsonAddIntegerToObject(item, "undoActionNum", taosArrayGetSize(pObj->undoActions)); tjsonAddIntegerToObject(item, "undoActionNum", taosArrayGetSize(pObj->undoActions));
@ -294,8 +295,9 @@ void dumpTrans(SSdb *pSdb, SJson *json) {
void dumpHeader(SSdb *pSdb, SJson *json) { void dumpHeader(SSdb *pSdb, SJson *json) {
tjsonAddIntegerToObject(json, "sver", 1); tjsonAddIntegerToObject(json, "sver", 1);
tjsonAddStringToObject(json, "curVer", i642str(pSdb->curVer)); tjsonAddStringToObject(json, "applyIndex", i642str(pSdb->applyIndex));
tjsonAddStringToObject(json, "curTerm", i642str(pSdb->curTerm)); tjsonAddStringToObject(json, "applyTerm", i642str(pSdb->applyTerm));
tjsonAddStringToObject(json, "applyConfig", i642str(pSdb->applyConfig));
SJson *maxIdsJson = tjsonCreateObject(); SJson *maxIdsJson = tjsonCreateObject();
tjsonAddItemToObject(json, "maxIds", maxIdsJson); tjsonAddItemToObject(json, "maxIds", maxIdsJson);

1
tools/taosadapter Submodule

@ -0,0 +1 @@
Subproject commit 9ce3f5c98ef95d9c7c596c4ed7302b0ed69a92b2