Merge remote-tracking branch 'origin/3.0' into feature/qnode
This commit is contained in:
commit
d0aa9436e4
|
@ -46,11 +46,17 @@ ENDIF ()
|
|||
|
||||
IF (TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/W3 /D_WIN32")
|
||||
SET(COMMON_FLAGS "/w /D_WIN32")
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
||||
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
||||
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||
# ENDIF ()
|
||||
IF (CMAKE_DEPFILE_FLAGS_C)
|
||||
SET(CMAKE_DEPFILE_FLAGS_C "")
|
||||
ENDIF ()
|
||||
IF (CMAKE_DEPFILE_FLAGS_CXX)
|
||||
SET(CMAKE_DEPFILE_FLAGS_CXX "")
|
||||
ENDIF ()
|
||||
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
|
||||
|
|
|
@ -46,6 +46,18 @@ IF(${TD_WINDOWS})
|
|||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_TEST
|
||||
"If build unit tests using googletest"
|
||||
OFF
|
||||
)
|
||||
ELSE ()
|
||||
|
||||
option(
|
||||
BUILD_TEST
|
||||
"If build unit tests using googletest"
|
||||
ON
|
||||
)
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
|
@ -54,12 +66,6 @@ option(
|
|||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_TEST
|
||||
"If build unit tests using googletest"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_LEVELDB
|
||||
"If build with leveldb"
|
||||
|
|
|
@ -2592,18 +2592,6 @@ static FORCE_INLINE void tDeleteSMqAskEpRsp(SMqAskEpRsp* pRsp) {
|
|||
taosArrayDestroyEx(pRsp->topics, (void (*)(void*))tDeleteSMqSubTopicEp);
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t sourceVg;
|
||||
int64_t sourceVer;
|
||||
SArray* data; // SArray<SSDataBlock>
|
||||
} SStreamDispatchReq;
|
||||
|
||||
typedef struct {
|
||||
int8_t inputStatus;
|
||||
} SStreamDispatchRsp;
|
||||
|
||||
#define TD_AUTO_CREATE_TABLE 0x1
|
||||
typedef struct {
|
||||
int64_t suid;
|
||||
|
|
|
@ -200,6 +200,10 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_VND_TASK_WRITE_EXEC, "vnode-task-write-exec", SStreamTaskExecReq, SStreamTaskExecRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
|
||||
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_TASK_RUN, "vnode-stream-task-run", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_TASK_DISPATCH, "vnode-stream-task-dispatch", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_TASK_RECOVER, "vnode-stream-task-recover", NULL, NULL)
|
||||
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_SMA, "vnode-create-sma", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL)
|
||||
|
|
|
@ -189,7 +189,9 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
|
|||
#define SET_META_TYPE_BOTH_TABLE(t) (t) = META_TYPE_BOTH_TABLE
|
||||
|
||||
#define NEED_CLIENT_RM_TBLMETA_ERROR(_code) \
|
||||
((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST)
|
||||
((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST || \
|
||||
(_code) == TSDB_CODE_PAR_INVALID_COLUMNS_NUM || (_code) == TSDB_CODE_PAR_INVALID_COLUMN || \
|
||||
(_code) == TSDB_CODE_PAR_TAGS_NOT_MATCHED)
|
||||
#define NEED_CLIENT_REFRESH_VG_ERROR(_code) \
|
||||
((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
|
||||
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_TABLE_RECREATED)
|
||||
|
@ -200,7 +202,7 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
|
|||
#define NEED_SCHEDULER_RETRY_ERROR(_code) \
|
||||
((_code) == TSDB_CODE_RPC_REDIRECT || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
|
||||
|
||||
#define REQUEST_MAX_TRY_TIMES 5
|
||||
#define REQUEST_MAX_TRY_TIMES 1
|
||||
|
||||
#define qFatal(...) \
|
||||
do { \
|
||||
|
|
|
@ -107,7 +107,7 @@ static FORCE_INLINE void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit)
|
|||
if (ref == 0) {
|
||||
taosMemoryFree(pDataSubmit->data);
|
||||
taosMemoryFree(pDataSubmit->dataRef);
|
||||
taosFreeQitem(pDataSubmit);
|
||||
// taosFreeQitem(pDataSubmit);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -286,6 +286,36 @@ typedef struct {
|
|||
int32_t taskId;
|
||||
} SStreamTaskRunReq;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t sourceTaskId;
|
||||
int32_t sourceVg;
|
||||
#if 0
|
||||
int64_t sourceVer;
|
||||
#endif
|
||||
SArray* data; // SArray<SSDataBlock>
|
||||
} SStreamDispatchReq;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int8_t inputStatus;
|
||||
} SStreamDispatchRsp;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t sourceTaskId;
|
||||
int32_t sourceVg;
|
||||
} SStreamTaskRecoverReq;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int8_t inputStatus;
|
||||
} SStreamTaskRecoverRsp;
|
||||
|
||||
int32_t streamEnqueueDataSubmit(SStreamTask* pTask, SStreamDataSubmit* input);
|
||||
int32_t streamEnqueueDataBlk(SStreamTask* pTask, SStreamDataBlock* input);
|
||||
int32_t streamDequeueOutput(SStreamTask* pTask, void** output);
|
||||
|
@ -296,6 +326,12 @@ int32_t streamTaskRun(SStreamTask* pTask);
|
|||
|
||||
int32_t streamTaskHandleInput(SStreamTask* pTask, void* data);
|
||||
|
||||
int32_t streamTaskProcessRunReq(SStreamTask* pTask, SMsgCb* pMsgCb);
|
||||
int32_t streamTaskProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchReq* pReq, SRpcMsg* pMsg);
|
||||
int32_t streamTaskProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchRsp* pRsp);
|
||||
int32_t streamTaskProcessRecoverReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg);
|
||||
int32_t streamTaskProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -13,18 +13,18 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cJSON.h"
|
||||
#include "clientInt.h"
|
||||
#include "clientLog.h"
|
||||
#include "command.h"
|
||||
#include "scheduler.h"
|
||||
#include "tdatablock.h"
|
||||
#include "tdataformat.h"
|
||||
#include "tdef.h"
|
||||
#include "tglobal.h"
|
||||
#include "tmsgtype.h"
|
||||
#include "tpagedbuf.h"
|
||||
#include "tref.h"
|
||||
#include "cJSON.h"
|
||||
#include "tdataformat.h"
|
||||
|
||||
static int32_t initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSet);
|
||||
static SMsgSendInfo* buildConnectMsg(SRequestObj* pRequest);
|
||||
|
@ -189,7 +189,8 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
|
|||
setResSchemaInfo(&pRequest->body.resInfo, (*pQuery)->pResSchema, (*pQuery)->numOfResCols);
|
||||
setResPrecision(&pRequest->body.resInfo, (*pQuery)->precision);
|
||||
}
|
||||
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code || NEED_CLIENT_HANDLE_ERROR(code)) {
|
||||
TSWAP(pRequest->dbList, (*pQuery)->pDbList);
|
||||
TSWAP(pRequest->tableList, (*pQuery)->pTableList);
|
||||
}
|
||||
|
@ -503,7 +504,8 @@ SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
|
|||
int32_t retryNum = 0;
|
||||
int32_t code = 0;
|
||||
|
||||
while (retryNum++ < REQUEST_MAX_TRY_TIMES) {
|
||||
do {
|
||||
destroyRequest(pRequest);
|
||||
pRequest = launchQuery(pTscObj, sql, sqlLen);
|
||||
if (pRequest == NULL || TSDB_CODE_SUCCESS == pRequest->code || !NEED_CLIENT_HANDLE_ERROR(pRequest->code)) {
|
||||
break;
|
||||
|
@ -514,9 +516,7 @@ SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
|
|||
pRequest->code = code;
|
||||
break;
|
||||
}
|
||||
|
||||
destroyRequest(pRequest);
|
||||
}
|
||||
} while (retryNum++ < REQUEST_MAX_TRY_TIMES);
|
||||
|
||||
return pRequest;
|
||||
}
|
||||
|
@ -828,8 +828,7 @@ static int32_t doPrepareResPtr(SReqResultInfo* pResInfo) {
|
|||
static char* parseTagDatatoJson(void* p) {
|
||||
char* string = NULL;
|
||||
cJSON* json = cJSON_CreateObject();
|
||||
if (json == NULL)
|
||||
{
|
||||
if (json == NULL) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
@ -857,8 +856,7 @@ static char* parseTagDatatoJson(void *p){
|
|||
char type = *val;
|
||||
if (type == TSDB_DATA_TYPE_NULL) {
|
||||
cJSON* value = cJSON_CreateNull();
|
||||
if (value == NULL)
|
||||
{
|
||||
if (value == NULL) {
|
||||
goto end;
|
||||
}
|
||||
cJSON_AddItemToObject(json, tagJsonKey, value);
|
||||
|
@ -874,8 +872,7 @@ static char* parseTagDatatoJson(void *p){
|
|||
}
|
||||
value = cJSON_CreateString(tagJsonValue);
|
||||
taosMemoryFree(tagJsonValue);
|
||||
if (value == NULL)
|
||||
{
|
||||
if (value == NULL) {
|
||||
goto end;
|
||||
}
|
||||
} else if (varDataLen(realData) == 0) {
|
||||
|
@ -888,8 +885,7 @@ static char* parseTagDatatoJson(void *p){
|
|||
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
|
||||
double jsonVd = *(double*)(realData);
|
||||
cJSON* value = cJSON_CreateNumber(jsonVd);
|
||||
if (value == NULL)
|
||||
{
|
||||
if (value == NULL) {
|
||||
goto end;
|
||||
}
|
||||
cJSON_AddItemToObject(json, tagJsonKey, value);
|
||||
|
@ -904,15 +900,13 @@ static char* parseTagDatatoJson(void *p){
|
|||
} else if (type == TSDB_DATA_TYPE_BOOL) {
|
||||
char jsonVd = *(char*)(realData);
|
||||
cJSON* value = cJSON_CreateBool(jsonVd);
|
||||
if (value == NULL)
|
||||
{
|
||||
if (value == NULL) {
|
||||
goto end;
|
||||
}
|
||||
cJSON_AddItemToObject(json, tagJsonKey, value);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
}
|
||||
string = cJSON_PrintUnformatted(json);
|
||||
end:
|
||||
|
@ -963,7 +957,6 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
|
|||
if (pCol->offset[j] != -1) {
|
||||
char* pStart = pCol->offset[j] + pCol->pData;
|
||||
|
||||
|
||||
int32_t jsonInnerType = *pStart;
|
||||
char* jsonInnerData = pStart + CHAR_BYTES;
|
||||
char dst[TSDB_MAX_JSON_TAG_LEN] = {0};
|
||||
|
|
|
@ -58,13 +58,9 @@ for (int i = 1; i < keyLen; ++i) { \
|
|||
#define IS_INVALID_COL_LEN(len) ((len) <= 0 || (len) >= TSDB_COL_NAME_LEN)
|
||||
#define IS_INVALID_TABLE_LEN(len) ((len) <= 0 || (len) >= TSDB_TABLE_NAME_LEN)
|
||||
|
||||
#define OTD_MAX_FIELDS_NUM 2
|
||||
#define OTD_JSON_SUB_FIELDS_NUM 2
|
||||
#define OTD_JSON_FIELDS_NUM 4
|
||||
|
||||
#define OTD_TIMESTAMP_COLUMN_NAME "ts"
|
||||
#define OTD_METRIC_VALUE_COLUMN_NAME "value"
|
||||
|
||||
#define TS "_ts"
|
||||
#define TS_LEN 3
|
||||
#define TAG "_tag"
|
||||
|
@ -728,57 +724,43 @@ static int64_t smlGetTimeValue(const char *value, int32_t len, int8_t type) {
|
|||
if(value + len != endPtr){
|
||||
return -1;
|
||||
}
|
||||
if(tsInt64 == 0){
|
||||
return taosGetTimestampNs();
|
||||
}
|
||||
double ts = tsInt64;
|
||||
switch (type) {
|
||||
case TSDB_TIME_PRECISION_HOURS:
|
||||
ts *= (3600 * 1e9);
|
||||
tsInt64 *= (3600 * 1e9);
|
||||
ts *= NANOSECOND_PER_HOUR;
|
||||
tsInt64 *= NANOSECOND_PER_HOUR;
|
||||
break;
|
||||
case TSDB_TIME_PRECISION_MINUTES:
|
||||
ts *= (60 * 1e9);
|
||||
tsInt64 *= (60 * 1e9);
|
||||
ts *= NANOSECOND_PER_MINUTE;
|
||||
tsInt64 *= NANOSECOND_PER_MINUTE;
|
||||
break;
|
||||
case TSDB_TIME_PRECISION_SECONDS:
|
||||
ts *= (1e9);
|
||||
tsInt64 *= (1e9);
|
||||
ts *= NANOSECOND_PER_SEC;
|
||||
tsInt64 *= NANOSECOND_PER_SEC;
|
||||
break;
|
||||
case TSDB_TIME_PRECISION_MILLI:
|
||||
ts *= (1e6);
|
||||
tsInt64 *= (1e6);
|
||||
ts *= NANOSECOND_PER_MSEC;
|
||||
tsInt64 *= NANOSECOND_PER_MSEC;
|
||||
break;
|
||||
case TSDB_TIME_PRECISION_MICRO:
|
||||
ts *= (1e3);
|
||||
tsInt64 *= (1e3);
|
||||
ts *= NANOSECOND_PER_USEC;
|
||||
tsInt64 *= NANOSECOND_PER_USEC;
|
||||
break;
|
||||
case TSDB_TIME_PRECISION_NANO:
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
}
|
||||
if(ts >= (double)INT64_MAX || ts <= 0){
|
||||
if(ts >= (double)INT64_MAX || ts < 0){
|
||||
return -1;
|
||||
}
|
||||
|
||||
return tsInt64;
|
||||
}
|
||||
|
||||
static int64_t smlGetTimeNow(int8_t precision) {
|
||||
switch (precision) {
|
||||
case TSDB_TIME_PRECISION_HOURS:
|
||||
return taosGetTimestampMs()/1000/3600;
|
||||
case TSDB_TIME_PRECISION_MINUTES:
|
||||
return taosGetTimestampMs()/1000/60;
|
||||
case TSDB_TIME_PRECISION_SECONDS:
|
||||
return taosGetTimestampMs()/1000;
|
||||
case TSDB_TIME_PRECISION_MILLI:
|
||||
case TSDB_TIME_PRECISION_MICRO:
|
||||
case TSDB_TIME_PRECISION_NANO:
|
||||
return taosGetTimestamp(precision);
|
||||
default:
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
static int8_t smlGetTsTypeByLen(int32_t len) {
|
||||
if (len == TSDB_TIME_PRECISION_SEC_DIGITS) {
|
||||
return TSDB_TIME_PRECISION_SECONDS;
|
||||
|
@ -810,14 +792,15 @@ static int8_t smlGetTsTypeByPrecision(int8_t precision) {
|
|||
}
|
||||
|
||||
static int64_t smlParseInfluxTime(SSmlHandle* info, const char* data, int32_t len){
|
||||
if(len == 0){
|
||||
return taosGetTimestamp(TSDB_TIME_PRECISION_NANO);
|
||||
}
|
||||
|
||||
int8_t tsType = smlGetTsTypeByPrecision(info->precision);
|
||||
if (tsType == -1) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp precision", NULL);
|
||||
return -1;
|
||||
}
|
||||
if(len == 0){
|
||||
return smlGetTimeNow(tsType);
|
||||
}
|
||||
|
||||
int64_t ts = smlGetTimeValue(data, len, tsType);
|
||||
if(ts == -1){
|
||||
|
@ -1619,7 +1602,8 @@ static int32_t smlParseTSFromJSON(SSmlHandle *info, cJSON *root, SArray *cols) {
|
|||
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
||||
}
|
||||
if(timeDouble <= 0){
|
||||
|
||||
if(timeDouble < 0){
|
||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
||||
}
|
||||
uint8_t tsLen = smlGetTimestampLen((int64_t)timeDouble);
|
||||
|
@ -1637,6 +1621,8 @@ static int32_t smlParseTSFromJSON(SSmlHandle *info, cJSON *root, SArray *cols) {
|
|||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
||||
}
|
||||
tsVal = timeDouble;
|
||||
} else if(timeDouble == 0){
|
||||
tsVal = taosGetTimestampNs();
|
||||
}else {
|
||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
||||
}
|
||||
|
|
|
@ -567,6 +567,7 @@ TEST(testCase, insert_test) {
|
|||
taos_free_result(pRes);
|
||||
taos_close(pConn);
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST(testCase, projection_query_tables) {
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
|
@ -605,7 +606,7 @@ TEST(testCase, projection_query_tables) {
|
|||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
for(int32_t i = 0; i < 10000000; i += 20) {
|
||||
for(int32_t i = 0; i < 100000; i += 20) {
|
||||
char sql[1024] = {0};
|
||||
sprintf(sql,
|
||||
"insert into tu values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
|
||||
|
@ -625,7 +626,7 @@ TEST(testCase, projection_query_tables) {
|
|||
|
||||
printf("start to insert next table\n");
|
||||
|
||||
for(int32_t i = 0; i < 10000000; i += 20) {
|
||||
for(int32_t i = 0; i < 100000; i += 20) {
|
||||
char sql[1024] = {0};
|
||||
sprintf(sql,
|
||||
"insert into tu2 values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
|
||||
|
@ -692,8 +693,6 @@ TEST(testCase, projection_query_stables) {
|
|||
taos_close(pConn);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
TEST(testCase, agg_query_tables) {
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(pConn, nullptr);
|
||||
|
|
|
@ -1203,24 +1203,17 @@ TEST(testCase, sml_TD15662_Test) {
|
|||
SRequestObj *request = (SRequestObj *)createRequest((STscObj *)taos, NULL, NULL, TSDB_SQL_INSERT);
|
||||
ASSERT_NE(request, nullptr);
|
||||
|
||||
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
|
||||
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||
ASSERT_NE(info, nullptr);
|
||||
|
||||
const char *sql[] = {
|
||||
"iyyyje,id=iyyyje_41943_1303,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
|
||||
"hetrey,id=sub_table_0123456,t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64",
|
||||
};
|
||||
int ret = smlProcess(info, (char **)sql, sizeof(sql) / sizeof(sql[0]));
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
||||
// case 1
|
||||
TAOS_RES *res = taos_query(taos, "select * from t_a5615048edae55218a22a149edebdc82");
|
||||
ASSERT_NE(res, nullptr);
|
||||
|
||||
TAOS_ROW row = taos_fetch_row(res);
|
||||
int64_t ts = *(int64_t*)row[0];
|
||||
ASSERT_EQ(ts, 1626006833639000000);
|
||||
|
||||
taos_free_result(res);
|
||||
destroyRequest(request);
|
||||
smlDestroyInfo(info);
|
||||
}
|
||||
|
||||
TEST(testCase, sml_TD15735_Test) {
|
||||
|
@ -1262,11 +1255,11 @@ TEST(testCase, sml_TD15742_Test) {
|
|||
SRequestObj *request = (SRequestObj *)createRequest((STscObj*)taos, NULL, NULL, TSDB_SQL_INSERT);
|
||||
ASSERT_NE(request, nullptr);
|
||||
|
||||
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
|
||||
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||
ASSERT_NE(info, nullptr);
|
||||
|
||||
const char *sql[] = {
|
||||
"zgzbix 1626006833641 False id=zgzbix_992_38861 t0=t t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7=\"binaryTagValue\" t8=L\"ncharTagValue\"",
|
||||
"test_ms,t0=t c0=f 1626006833641",
|
||||
};
|
||||
int ret = smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
|
|
@ -126,7 +126,9 @@ int32_t mmPutRpcMsgToReadQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
return mmPutRpcMsgToWorker(&pMgmt->readWorker, pMsg);
|
||||
}
|
||||
|
||||
int32_t mmPutRpcMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { return mmPutRpcMsgToWorker(&pMgmt->syncWorker, pMsg); }
|
||||
int32_t mmPutRpcMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
return mmPutRpcMsgToWorker(&pMgmt->syncWorker, pMsg);
|
||||
}
|
||||
|
||||
int32_t mmStartWorker(SMnodeMgmt *pMgmt) {
|
||||
SSingleWorkerCfg qCfg = {
|
||||
|
|
|
@ -314,6 +314,10 @@ SArray *vmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_MERGE_EXEC, vmPutNodeMsgToMergeQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_WRITE_EXEC, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RUN, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_DISPATCH, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RECOVER, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_VNODE, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT_VNODE, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE, vmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -22,21 +22,19 @@
|
|||
static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) {
|
||||
SRpcMsg rsp = {
|
||||
.code = code,
|
||||
.info = pMsg->info,
|
||||
.pCont = pMsg->info.rsp,
|
||||
.contLen = pMsg->info.rspLen,
|
||||
.info = pMsg->info,
|
||||
};
|
||||
tmsgSendRsp(&rsp);
|
||||
}
|
||||
|
||||
static void vmProcessMgmtMonitorQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
||||
SVnodeMgmt *pMgmt = pInfo->ahandle;
|
||||
|
||||
int32_t code = -1;
|
||||
tmsg_t msgType = pMsg->msgType;
|
||||
dTrace("msg:%p, get from vnode queue, type:%s", pMsg, TMSG_INFO(msgType));
|
||||
dTrace("msg:%p, get from vnode queue, type:%s", pMsg, TMSG_INFO(pMsg->msgType));
|
||||
|
||||
switch (msgType) {
|
||||
switch (pMsg->msgType) {
|
||||
case TDMT_MON_VM_INFO:
|
||||
code = vmProcessGetMonitorInfoReq(pMgmt, pMsg);
|
||||
break;
|
||||
|
@ -54,7 +52,7 @@ static void vmProcessMgmtMonitorQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
|||
dError("msg:%p, not processed in vnode queue", pMsg);
|
||||
}
|
||||
|
||||
if (msgType & 1u) {
|
||||
if (IsReq(pMsg)) {
|
||||
if (code != 0 && terrno != 0) code = terrno;
|
||||
vmSendRsp(pMsg, code);
|
||||
}
|
||||
|
@ -72,12 +70,11 @@ static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
|||
if (code != 0) {
|
||||
if (terrno != 0) code = terrno;
|
||||
vmSendRsp(pMsg, code);
|
||||
|
||||
}
|
||||
dTrace("msg:%p, is freed, code:0x%x", pMsg, code);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
static void vmProcessFetchQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
||||
SVnodeObj *pVnode = pInfo->ahandle;
|
||||
|
@ -87,16 +84,14 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
|||
if (code != 0) {
|
||||
if (terrno != 0) code = terrno;
|
||||
vmSendRsp(pMsg, code);
|
||||
|
||||
}
|
||||
dTrace("msg:%p, is freed, code:0x%x", pMsg, code);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
||||
SVnodeObj *pVnode = pInfo->ahandle;
|
||||
|
||||
SArray * pArray = taosArrayInit(numOfMsgs, sizeof(SRpcMsg *));
|
||||
if (pArray == NULL) {
|
||||
dError("failed to process %d msgs in write-queue since %s", numOfMsgs, terrstr());
|
||||
|
@ -116,7 +111,7 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
|||
|
||||
for (int i = 0; i < taosArrayGetSize(pArray); i++) {
|
||||
SRpcMsg *pMsg = *(SRpcMsg **)taosArrayGet(pArray, i);
|
||||
SRpcMsg rsp = {.info = pMsg->info, .pCont = NULL, .contLen = 0};
|
||||
SRpcMsg rsp = {.info = pMsg->info};
|
||||
|
||||
int32_t ret = syncPropose(vnodeGetSyncHandle(pVnode->pImpl), pMsg, false);
|
||||
if (ret == TAOS_SYNC_PROPOSE_NOT_LEADER) {
|
||||
|
@ -130,7 +125,6 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
|||
rsp.code = TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
tmsgSendRsp(&rsp);
|
||||
} else if (ret == TAOS_SYNC_PROPOSE_SUCCESS) {
|
||||
// ok
|
||||
// send response in applyQ
|
||||
} else {
|
||||
assert(0);
|
||||
|
@ -149,16 +143,13 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
|||
|
||||
static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
||||
SVnodeObj *pVnode = pInfo->ahandle;
|
||||
SRpcMsg *pMsg = NULL;
|
||||
SRpcMsg rsp;
|
||||
|
||||
for (int32_t i = 0; i < numOfMsgs; ++i) {
|
||||
SRpcMsg *pMsg = NULL;
|
||||
taosGetQitem(qall, (void **)&pMsg);
|
||||
|
||||
// init response rpc msg
|
||||
rsp.code = 0;
|
||||
rsp.pCont = NULL;
|
||||
rsp.contLen = 0;
|
||||
SRpcMsg rsp = {0};
|
||||
|
||||
// get original rpc msg
|
||||
assert(pMsg->msgType == TDMT_VND_SYNC_APPLY_MSG);
|
||||
|
@ -177,7 +168,6 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
|||
rpcFreeCont(originalRpcMsg.pCont);
|
||||
|
||||
// if leader, send response
|
||||
// if (pMsg->rpcMsg.handle != NULL && pMsg->rpcMsg.ahandle != NULL) {
|
||||
if (pMsg->info.handle != NULL) {
|
||||
rsp.info = pMsg->info;
|
||||
tmsgSendRsp(&rsp);
|
||||
|
@ -190,21 +180,19 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
|||
|
||||
static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
||||
SVnodeObj *pVnode = pInfo->ahandle;
|
||||
SRpcMsg *pMsg = NULL;
|
||||
|
||||
for (int32_t i = 0; i < numOfMsgs; ++i) {
|
||||
SRpcMsg *pMsg = NULL;
|
||||
taosGetQitem(qall, (void **)&pMsg);
|
||||
|
||||
// todo
|
||||
SRpcMsg *pRsp = NULL;
|
||||
int32_t ret = vnodeProcessSyncReq(pVnode->pImpl, pMsg, &pRsp);
|
||||
if (ret != 0) {
|
||||
// if leader, send response
|
||||
int32_t code = vnodeProcessSyncReq(pVnode->pImpl, pMsg, NULL);
|
||||
if (code != 0) {
|
||||
if (pMsg->info.handle != NULL) {
|
||||
SRpcMsg rsp = {0};
|
||||
rsp.code = terrno;
|
||||
rsp.info = pMsg->info;
|
||||
dTrace("msg:%p, process sync queue error since code:%s", pMsg, terrstr());
|
||||
SRpcMsg rsp = {
|
||||
.code = (terrno < 0) ? terrno : code,
|
||||
.info = pMsg->info,
|
||||
};
|
||||
dTrace("msg:%p, failed to process sync queue since %s", pMsg, terrstr());
|
||||
tmsgSendRsp(&rsp);
|
||||
}
|
||||
}
|
||||
|
@ -216,9 +204,9 @@ static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOf
|
|||
|
||||
static void vmProcessMergeQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
||||
SVnodeObj *pVnode = pInfo->ahandle;
|
||||
SRpcMsg *pMsg = NULL;
|
||||
|
||||
for (int32_t i = 0; i < numOfMsgs; ++i) {
|
||||
SRpcMsg *pMsg = NULL;
|
||||
taosGetQitem(qall, (void **)&pMsg);
|
||||
|
||||
dTrace("msg:%p, get from vnode-merge queue", pMsg);
|
||||
|
@ -226,13 +214,12 @@ static void vmProcessMergeQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
|||
if (code != 0) {
|
||||
if (terrno != 0) code = terrno;
|
||||
vmSendRsp(pMsg, code);
|
||||
|
||||
}
|
||||
dTrace("msg:%p, is freed, code:0x%x", pMsg, code);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t vmPutNodeMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) {
|
||||
SRpcMsg * pRpc = pMsg;
|
||||
|
@ -308,7 +295,6 @@ int32_t vmPutNodeMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
|
||||
int32_t vmPutNodeMsgToMonitorQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SSingleWorker *pWorker = &pMgmt->monitorWorker;
|
||||
|
||||
dTrace("msg:%p, put into vnode-monitor worker, type:%s", pMsg, TMSG_INFO(pMsg->msgType));
|
||||
taosWriteQitem(pWorker->queue, pMsg);
|
||||
return 0;
|
||||
|
@ -316,14 +302,17 @@ int32_t vmPutNodeMsgToMonitorQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
|
||||
static int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pRpc, EQueueType qtype) {
|
||||
SMsgHead * pHead = pRpc->pCont;
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId);
|
||||
if (pVnode == NULL) return -1;
|
||||
|
||||
SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM);
|
||||
int32_t code = 0;
|
||||
|
||||
if (pMsg != NULL) {
|
||||
if (pMsg == NULL) {
|
||||
rpcFreeCont(pRpc->pCont);
|
||||
pRpc->pCont = NULL;
|
||||
code = -1;
|
||||
} else {
|
||||
memcpy(pMsg, pRpc, sizeof(SRpcMsg));
|
||||
switch (qtype) {
|
||||
case WRITE_QUEUE:
|
||||
|
@ -428,7 +417,7 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
dDebug("vgId:%d, vnode queue is alloced", pVnode->vgId);
|
||||
dDebug("vgId:%d, queue is alloced", pVnode->vgId);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -445,7 +434,7 @@ void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
pVnode->pQueryQ = NULL;
|
||||
pVnode->pFetchQ = NULL;
|
||||
pVnode->pMergeQ = NULL;
|
||||
dDebug("vgId:%d, vnode queue is freed", pVnode->vgId);
|
||||
dDebug("vgId:%d, queue is freed", pVnode->vgId);
|
||||
}
|
||||
|
||||
int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
|
||||
|
@ -496,7 +485,7 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
|
|||
.param = pMgmt,
|
||||
};
|
||||
if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) {
|
||||
dError("failed to start mnode vnode-monitor worker since %s", terrstr());
|
||||
dError("failed to start vnode-monitor worker since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -161,6 +161,7 @@ void dmGetVnodeLoads(SMonVloadInfo *pInfo) {
|
|||
void dmGetMnodeLoads(SMonMloadInfo *pInfo) {
|
||||
SDnode *pDnode = dmInstance();
|
||||
SMgmtWrapper *pWrapper = &pDnode->wrappers[MNODE];
|
||||
if (dmMarkWrapper(pWrapper) == 0) {
|
||||
if (tsMultiProcess) {
|
||||
dmSendLocalRecv(pDnode, TDMT_MON_MM_LOAD, tDeserializeSMonMloadInfo, pInfo);
|
||||
} else if (pWrapper->pMgmt != NULL) {
|
||||
|
@ -168,3 +169,4 @@ void dmGetMnodeLoads(SMonMloadInfo *pInfo) {
|
|||
}
|
||||
dmReleaseWrapper(pWrapper);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -126,6 +126,8 @@ STqReadHandle *tqInitSubmitMsgScanner(SMeta *pMeta);
|
|||
void tqReadHandleSetColIdList(STqReadHandle *pReadHandle, SArray *pColIdList);
|
||||
int32_t tqReadHandleSetTbUidList(STqReadHandle *pHandle, const SArray *tbUidList);
|
||||
int32_t tqReadHandleAddTbUidList(STqReadHandle *pHandle, const SArray *tbUidList);
|
||||
int32_t tqReadHandleRemoveTbUidList(STqReadHandle* pHandle, const SArray* tbUidList);
|
||||
|
||||
int32_t tqReadHandleSetMsg(STqReadHandle *pHandle, SSubmitReq *pMsg, int64_t ver);
|
||||
bool tqNextDataBlock(STqReadHandle *pHandle);
|
||||
bool tqNextDataBlockFilterOut(STqReadHandle *pHandle, SHashObj *filterOutUids);
|
||||
|
|
|
@ -121,10 +121,18 @@ int tqCommit(STQ*);
|
|||
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
|
||||
int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId);
|
||||
int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessStreamTrigger(STQ* pTq, void* data, int32_t dataLen, int32_t workerId);
|
||||
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId);
|
||||
int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen);
|
||||
#if 0
|
||||
int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId);
|
||||
int32_t tqProcessStreamTrigger(STQ* pTq, void* data, int32_t dataLen, int32_t workerId);
|
||||
#endif
|
||||
int32_t tqProcessStreamTriggerNew(STQ* pTq, SSubmitReq* data);
|
||||
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||
|
||||
// sma
|
||||
int32_t smaOpen(SVnode* pVnode);
|
||||
|
|
|
@ -106,11 +106,10 @@ static void tdSRowDemo() {
|
|||
|
||||
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
|
||||
void* pIter = NULL;
|
||||
STqExec* pExec = NULL;
|
||||
while (1) {
|
||||
pIter = taosHashIterate(pTq->execs, pIter);
|
||||
if (pIter == NULL) break;
|
||||
pExec = (STqExec*)pIter;
|
||||
STqExec* pExec = (STqExec*)pIter;
|
||||
if (pExec->subType == TOPIC_SUB_TYPE__DB) {
|
||||
if (!isAdd) {
|
||||
int32_t sz = taosArrayGetSize(tbUidList);
|
||||
|
@ -129,7 +128,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) {
|
||||
int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver, SRpcHandleInfo handleInfo) {
|
||||
if (msgType != TDMT_VND_SUBMIT) return 0;
|
||||
void* pIter = NULL;
|
||||
STqExec* pExec = NULL;
|
||||
|
@ -239,10 +238,9 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_
|
|||
|
||||
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
|
||||
tEncodeSMqDataBlkRsp(&abuf, &rsp);
|
||||
pMsg->pCont = buf;
|
||||
pMsg->contLen = tlen;
|
||||
pMsg->code = 0;
|
||||
tmsgSendRsp(pMsg);
|
||||
|
||||
SRpcMsg resp = {.info = handleInfo, .pCont = buf, .contLen = tlen, .code = 0};
|
||||
tmsgSendRsp(&resp);
|
||||
|
||||
atomic_store_ptr(&pExec->pushHandle.handle, NULL);
|
||||
taosWUnLockLatch(&pExec->pushHandle.lock);
|
||||
|
@ -275,6 +273,9 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver)
|
|||
}
|
||||
memcpy(data, msg, msgLen);
|
||||
|
||||
tqProcessStreamTriggerNew(pTq, data);
|
||||
|
||||
#if 0
|
||||
SRpcMsg req = {
|
||||
.msgType = TDMT_VND_STREAM_TRIGGER,
|
||||
.pCont = data,
|
||||
|
@ -282,6 +283,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver)
|
|||
};
|
||||
|
||||
tmsgPutToQueue(&pTq->pVnode->msgCb, FETCH_QUEUE, &req);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -663,10 +665,9 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
|||
|
||||
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
|
||||
tEncodeSMqDataBlkRsp(&abuf, &rsp);
|
||||
pMsg->pCont = buf;
|
||||
pMsg->contLen = tlen;
|
||||
pMsg->code = 0;
|
||||
tmsgSendRsp(pMsg);
|
||||
|
||||
SRpcMsg resp = {.info = pMsg->info, .pCont = buf, .contLen = tlen, .code = 0};
|
||||
tmsgSendRsp(&resp);
|
||||
|
||||
tqDebug("vg %d offset %ld from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld",
|
||||
TD_VID(pTq->pVnode), fetchOffset, consumerId, pReq->epoch, rsp.blockNum, rsp.reqOffset, rsp.rspOffset);
|
||||
|
@ -845,12 +846,10 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
|||
/*rsp.pBlockData = pRes;*/
|
||||
|
||||
/*taosArrayDestroyEx(rsp.pBlockData, (void (*)(void*))tDeleteSSDataBlock);*/
|
||||
pMsg->pCont = buf;
|
||||
pMsg->contLen = msgLen;
|
||||
pMsg->code = 0;
|
||||
SRpcMsg resp = {.info = pMsg->info, pCont = buf, .contLen = msgLen, .code = 0};
|
||||
tqDebug("vg %d offset %ld msgType %d from consumer %ld (epoch %d) actual rsp", TD_VID(pTq->pVnode), fetchOffset,
|
||||
pHead->msgType, consumerId, pReq->epoch);
|
||||
tmsgSendRsp(pMsg);
|
||||
tmsgSendRsp(&resp);
|
||||
taosMemoryFree(pHead);
|
||||
return 0;
|
||||
} else {
|
||||
|
@ -878,10 +877,9 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
|||
|
||||
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
|
||||
tEncodeSMqPollRspV2(&abuf, &rspV2);
|
||||
pMsg->pCont = buf;
|
||||
pMsg->contLen = tlen;
|
||||
pMsg->code = 0;
|
||||
tmsgSendRsp(pMsg);
|
||||
|
||||
SRpcMsg resp = {.info = pMsg->info, .pCont = buf, .contLen = tlen, .code = 0};
|
||||
tmsgSendRsp(&resp);
|
||||
tqDebug("vg %d offset %ld from consumer %ld (epoch %d) not rsp", TD_VID(pTq->pVnode), fetchOffset, consumerId,
|
||||
pReq->epoch);
|
||||
/*}*/
|
||||
|
@ -980,12 +978,24 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
|
|||
}
|
||||
|
||||
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int32_t parallel) {
|
||||
pTask->status = TASK_STATUS__IDLE;
|
||||
pTask->inputStatus = TASK_INPUT_STATUS__NORMAL;
|
||||
pTask->outputStatus = TASK_OUTPUT_STATUS__NORMAL;
|
||||
|
||||
pTask->inputQ = taosOpenQueue();
|
||||
pTask->outputQ = taosOpenQueue();
|
||||
pTask->inputQAll = taosAllocateQall();
|
||||
pTask->outputQAll = taosAllocateQall();
|
||||
|
||||
if (pTask->inputQ == NULL || pTask->outputQ == NULL || pTask->inputQAll == NULL || pTask->outputQAll == NULL)
|
||||
goto FAIL;
|
||||
|
||||
if (pTask->execType != TASK_EXEC__NONE) {
|
||||
// expand runners
|
||||
pTask->exec.numOfRunners = parallel;
|
||||
pTask->exec.runners = taosMemoryCalloc(parallel, sizeof(SStreamRunner));
|
||||
if (pTask->exec.runners == NULL) {
|
||||
return -1;
|
||||
goto FAIL;
|
||||
}
|
||||
for (int32_t i = 0; i < parallel; i++) {
|
||||
STqReadHandle* pStreamReader = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
|
||||
|
@ -1007,6 +1017,13 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int32_t parallel) {
|
|||
}
|
||||
|
||||
return 0;
|
||||
FAIL:
|
||||
if (pTask->inputQ) taosCloseQueue(pTask->inputQ);
|
||||
if (pTask->outputQ) taosCloseQueue(pTask->outputQ);
|
||||
if (pTask->inputQAll) taosFreeQall(pTask->inputQAll);
|
||||
if (pTask->outputQAll) taosFreeQall(pTask->outputQAll);
|
||||
if (pTask) taosMemoryFree(pTask);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen) {
|
||||
|
@ -1058,6 +1075,7 @@ int32_t tqProcessStreamTrigger(STQ* pTq, void* data, int32_t dataLen, int32_t wo
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int32_t tqProcessStreamTriggerNew(STQ* pTq, SSubmitReq* data) {
|
||||
SStreamDataSubmit* pSubmit = NULL;
|
||||
|
||||
|
@ -1108,6 +1126,7 @@ FAIL:
|
|||
}
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId) {
|
||||
SStreamTaskExecReq req;
|
||||
|
@ -1125,25 +1144,28 @@ int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessStreamTrigger2(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
|
||||
int32_t tqProcessStreamTriggerNew(STQ* pTq, SSubmitReq* pReq) {
|
||||
void* pIter = NULL;
|
||||
bool failed = false;
|
||||
|
||||
SStreamDataSubmit* pSubmit = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM);
|
||||
if (pSubmit == NULL) {
|
||||
failed = true;
|
||||
goto SET_TASK_FAIL;
|
||||
}
|
||||
pSubmit->dataRef = taosMemoryMalloc(sizeof(int32_t));
|
||||
if (pSubmit->dataRef == NULL) {
|
||||
failed = true;
|
||||
goto SET_TASK_FAIL;
|
||||
}
|
||||
|
||||
pSubmit->type = STREAM_DATA_TYPE_SUBMIT_BLOCK;
|
||||
pSubmit->sourceVer = ver;
|
||||
pSubmit->sourceVg = pTq->pVnode->config.vgId;
|
||||
pSubmit->type = STREAM_INPUT__DATA_SUBMIT;
|
||||
/*pSubmit->sourceVer = ver;*/
|
||||
/*pSubmit->sourceVg = pTq->pVnode->config.vgId;*/
|
||||
pSubmit->data = pReq;
|
||||
*pSubmit->dataRef = 1;
|
||||
|
||||
SET_TASK_FAIL:
|
||||
while (1) {
|
||||
pIter = taosHashIterate(pTq->pStreamTasks, pIter);
|
||||
if (pIter == NULL) break;
|
||||
|
@ -1162,7 +1184,18 @@ int32_t tqProcessStreamTrigger2(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
|
|||
|
||||
int8_t execStatus = atomic_load_8(&pTask->status);
|
||||
if (execStatus == TASK_STATUS__IDLE || execStatus == TASK_STATUS__CLOSING) {
|
||||
// TODO dispatch task launch msg to fetch queue
|
||||
SStreamTaskRunReq* pRunReq = taosMemoryMalloc(sizeof(SStreamTaskRunReq));
|
||||
if (pRunReq == NULL) continue;
|
||||
// TODO: do we need htonl?
|
||||
pRunReq->head.vgId = pTq->pVnode->config.vgId;
|
||||
pRunReq->streamId = pTask->streamId;
|
||||
pRunReq->taskId = pTask->taskId;
|
||||
SRpcMsg msg = {
|
||||
.msgType = TDMT_VND_TASK_RUN,
|
||||
.pCont = pRunReq,
|
||||
.contLen = sizeof(SStreamTaskRunReq),
|
||||
};
|
||||
tmsgPutToQueue(&pTq->pVnode->msgCb, FETCH_QUEUE, &msg);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
@ -1174,11 +1207,53 @@ int32_t tqProcessStreamTrigger2(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
|
|||
streamDataSubmitRefDec(pSubmit);
|
||||
return 0;
|
||||
} else {
|
||||
if (pSubmit) {
|
||||
if (pSubmit->dataRef) {
|
||||
taosMemoryFree(pSubmit->dataRef);
|
||||
}
|
||||
taosFreeQitem(pSubmit);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskExec2(STQ* pTq, char* msg, int32_t msgLen) {
|
||||
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
//
|
||||
SStreamTaskRunReq* pReq = pMsg->pCont;
|
||||
int32_t taskId = pReq->taskId;
|
||||
SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
|
||||
streamTaskProcessRunReq(pTask, &pTq->pVnode->msgCb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamDispatchReq* pReq = pMsg->pCont;
|
||||
int32_t taskId = pReq->taskId;
|
||||
SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
|
||||
streamTaskProcessDispatchReq(pTask, &pTq->pVnode->msgCb, pReq, pMsg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamTaskRecoverReq* pReq = pMsg->pCont;
|
||||
int32_t taskId = pReq->taskId;
|
||||
SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
|
||||
streamTaskProcessRecoverReq(pTask, &pTq->pVnode->msgCb, pReq, pMsg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamDispatchRsp* pRsp = pMsg->pCont;
|
||||
int32_t taskId = pRsp->taskId;
|
||||
SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
|
||||
streamTaskProcessDispatchRsp(pTask, &pTq->pVnode->msgCb, pRsp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamTaskRecoverRsp* pRsp = pMsg->pCont;
|
||||
int32_t taskId = pRsp->taskId;
|
||||
SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
|
||||
streamTaskProcessRecoverRsp(pTask, pRsp);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -231,3 +231,14 @@ int tqReadHandleAddTbUidList(STqReadHandle* pHandle, const SArray* tbUidList) {
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tqReadHandleRemoveTbUidList(STqReadHandle* pHandle, const SArray* tbUidList) {
|
||||
ASSERT(pHandle->tbIdHash != NULL);
|
||||
|
||||
for(int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
|
||||
int64_t* pKey = (int64_t*) taosArrayGet(tbUidList, i);
|
||||
taosHashRemove(pHandle->tbIdHash, pKey, sizeof(int64_t));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2076,8 +2076,14 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
|
|||
}
|
||||
#endif
|
||||
if (TD_SUPPORT_UPDATE(pCfg->update)) {
|
||||
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos);
|
||||
if (lastKeyAppend != key) {
|
||||
if (lastKeyAppend != TSKEY_INITIAL_VAL) {
|
||||
++curRow;
|
||||
}
|
||||
lastKeyAppend = key;
|
||||
}
|
||||
// load data from file firstly
|
||||
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos);
|
||||
|
||||
if (rv1 != TD_ROW_SVER(row1)) {
|
||||
rv1 = TD_ROW_SVER(row1);
|
||||
|
@ -2087,7 +2093,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
|
|||
}
|
||||
|
||||
// still assign data into current row
|
||||
mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
|
||||
numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
|
||||
pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
|
||||
|
||||
if (cur->win.skey == TSKEY_INITIAL_VAL) {
|
||||
|
@ -2099,7 +2105,6 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
|
|||
cur->mixBlock = true;
|
||||
|
||||
moveToNextRowInMem(pCheckInfo);
|
||||
++curRow;
|
||||
|
||||
pos += step;
|
||||
} else {
|
||||
|
|
|
@ -106,11 +106,13 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg
|
|||
pMsg->contLen - sizeof(SMsgHead)) < 0) {
|
||||
}
|
||||
} break;
|
||||
#if 0
|
||||
case TDMT_VND_TASK_WRITE_EXEC: {
|
||||
if (tqProcessTaskExec(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), pMsg->contLen - sizeof(SMsgHead),
|
||||
0) < 0) {
|
||||
}
|
||||
} break;
|
||||
#endif
|
||||
case TDMT_VND_ALTER_VNODE:
|
||||
break;
|
||||
default:
|
||||
|
@ -181,11 +183,32 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
|||
return vnodeGetTableMeta(pVnode, pMsg);
|
||||
case TDMT_VND_CONSUME:
|
||||
return tqProcessPollReq(pVnode->pTq, pMsg, pInfo->workerId);
|
||||
|
||||
case TDMT_VND_TASK_RUN: {
|
||||
int32_t code = tqProcessTaskRunReq(pVnode->pTq, pMsg);
|
||||
pMsg->pCont = NULL;
|
||||
return code;
|
||||
}
|
||||
case TDMT_VND_TASK_DISPATCH:
|
||||
return tqProcessTaskDispatchReq(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_TASK_RECOVER:
|
||||
return tqProcessTaskRecoverReq(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_TASK_DISPATCH_RSP:
|
||||
return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_TASK_RECOVER_RSP:
|
||||
return tqProcessTaskRecoverRsp(pVnode->pTq, pMsg);
|
||||
|
||||
#if 0
|
||||
case TDMT_VND_TASK_PIPE_EXEC:
|
||||
case TDMT_VND_TASK_MERGE_EXEC:
|
||||
return tqProcessTaskExec(pVnode->pTq, msgstr, msgLen, 0);
|
||||
case TDMT_VND_STREAM_TRIGGER:
|
||||
return tqProcessStreamTrigger(pVnode->pTq, pMsg->pCont, pMsg->contLen, 0);
|
||||
case TDMT_VND_STREAM_TRIGGER:{
|
||||
// refactor, avoid double free
|
||||
int code = tqProcessStreamTrigger(pVnode->pTq, pMsg->pCont, pMsg->contLen, 0);
|
||||
pMsg->pCont = NULL;
|
||||
return code;
|
||||
}
|
||||
#endif
|
||||
case TDMT_VND_QUERY_HEARTBEAT:
|
||||
return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg);
|
||||
default:
|
||||
|
|
|
@ -125,19 +125,10 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, void* streamReadHandle) {
|
|||
return pTaskInfo;
|
||||
}
|
||||
|
||||
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
|
||||
// traverse to the stream scanner node to add this table id
|
||||
SOperatorInfo* pInfo = pTaskInfo->pRoot;
|
||||
while (pInfo->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
|
||||
pInfo = pInfo->pDownstream[0];
|
||||
}
|
||||
|
||||
SStreamBlockScanInfo* pScanInfo = pInfo->info;
|
||||
if (isAdd) {
|
||||
static SArray* filterQualifiedChildTables(const SStreamBlockScanInfo* pScanInfo, const SArray* tableIdList) {
|
||||
SArray* qa = taosArrayInit(4, sizeof(tb_uid_t));
|
||||
|
||||
// let's discard the tables those are not created according to the queried super table.
|
||||
SMetaReader mr = {0};
|
||||
metaReaderInit(&mr, pScanInfo->readHandle.meta, 0);
|
||||
for (int32_t i = 0; i < taosArrayGetSize(tableIdList); ++i) {
|
||||
|
@ -158,17 +149,36 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
|
|||
}
|
||||
|
||||
metaReaderClear(&mr);
|
||||
return qa;
|
||||
}
|
||||
|
||||
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
|
||||
// traverse to the stream scanner node to add this table id
|
||||
SOperatorInfo* pInfo = pTaskInfo->pRoot;
|
||||
while (pInfo->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
|
||||
pInfo = pInfo->pDownstream[0];
|
||||
}
|
||||
|
||||
int32_t code = 0;
|
||||
SStreamBlockScanInfo* pScanInfo = pInfo->info;
|
||||
if (isAdd) { // add new table id
|
||||
SArray* qa = filterQualifiedChildTables(pScanInfo, tableIdList);
|
||||
|
||||
qDebug(" %d qualified child tables added into stream scanner", (int32_t)taosArrayGetSize(qa));
|
||||
int32_t code = tqReadHandleAddTbUidList(pScanInfo->streamBlockReader, qa);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
} else {
|
||||
assert(0);
|
||||
code = tqReadHandleAddTbUidList(pScanInfo->streamBlockReader, qa);
|
||||
taosArrayDestroy(qa);
|
||||
|
||||
} else { // remove the table id in current list
|
||||
SArray* qa = filterQualifiedChildTables(pScanInfo, tableIdList);
|
||||
|
||||
qDebug(" %d remove child tables from the stream scanner", (int32_t)taosArrayGetSize(tableIdList));
|
||||
code = tqReadHandleAddTbUidList(pScanInfo->streamBlockReader, tableIdList);
|
||||
taosArrayDestroy(qa);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t qGetQueriedTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, int32_t* tversion) {
|
||||
|
|
|
@ -2062,15 +2062,7 @@ void setExecutionContext(int32_t numOfOutput, uint64_t groupId, SExecTaskInfo* p
|
|||
pAggInfo->groupId = groupId;
|
||||
}
|
||||
|
||||
/**
|
||||
* For interval query of both super table and table, copy the data in ascending order, since the output results are
|
||||
* ordered in SWindowResutl already. While handling the group by query for both table and super table,
|
||||
* all group result are completed already.
|
||||
*
|
||||
* @param pQInfo
|
||||
* @param result
|
||||
*/
|
||||
int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo,
|
||||
int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo,
|
||||
int32_t* rowCellOffset, SqlFunctionCtx* pCtx, int32_t numOfExprs) {
|
||||
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
|
||||
int32_t start = pGroupResInfo->index;
|
||||
|
@ -2087,6 +2079,15 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprIn
|
|||
continue;
|
||||
}
|
||||
|
||||
if (pBlock->info.groupId == 0) {
|
||||
pBlock->info.groupId = pPos->groupId;
|
||||
} else {
|
||||
// current value belongs to different group, it can't be packed into one datablock
|
||||
if (pBlock->info.groupId != pPos->groupId) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
|
||||
break;
|
||||
}
|
||||
|
@ -2100,9 +2101,8 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprIn
|
|||
if (pCtx[j].fpSet.finalize) {
|
||||
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
|
||||
if (TAOS_FAILED(code)) {
|
||||
qError("%s build result data block error, code %s", GET_TASKID(taskInfo), tstrerror(code));
|
||||
taskInfo->code = code;
|
||||
longjmp(taskInfo->env, code);
|
||||
qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
|
||||
longjmp(pTaskInfo->env, code);
|
||||
}
|
||||
} else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
|
||||
// do nothing, todo refactor
|
||||
|
@ -2124,7 +2124,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprIn
|
|||
}
|
||||
}
|
||||
|
||||
// qDebug("QInfo:0x%"PRIx64" copy data to query buf completed", GET_TASKID(pRuntimeEnv));
|
||||
qDebug("%s result generated, rows:%d, groupId:%"PRIu64, GET_TASKID(pTaskInfo), pBlock->info.rows, pBlock->info.groupId);
|
||||
blockDataUpdateTsWindow(pBlock);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2145,10 +2145,9 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG
|
|||
return;
|
||||
}
|
||||
|
||||
// clear the existed group id
|
||||
pBlock->info.groupId = 0;
|
||||
doCopyToSDataBlock(pTaskInfo, pBlock, pExprInfo, pBuf, pGroupResInfo, rowCellOffset, pCtx, numOfExprs);
|
||||
|
||||
// add condition (pBlock->info.rows >= 1) just to runtime happy
|
||||
blockDataUpdateTsWindow(pBlock);
|
||||
}
|
||||
|
||||
static void updateNumOfRowsInResultRows(SqlFunctionCtx* pCtx, int32_t numOfOutput, SResultRowInfo* pResultRowInfo,
|
||||
|
@ -3656,7 +3655,6 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
|
|||
doSetOperatorCompleted(pOperator);
|
||||
}
|
||||
|
||||
doSetOperatorCompleted(pOperator);
|
||||
return (blockDataGetNumOfRows(pInfo->pRes) != 0) ? pInfo->pRes : NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -41,6 +41,13 @@
|
|||
sToken = tStrGetToken(pSql, &index, false); \
|
||||
} while (0)
|
||||
|
||||
#define NEXT_VALID_TOKEN(pSql, sToken) \
|
||||
do { \
|
||||
sToken.n = tGetToken(pSql, &sToken.type); \
|
||||
sToken.z = pSql; \
|
||||
pSql += sToken.n; \
|
||||
} while (TK_NK_SPACE == sToken.type)
|
||||
|
||||
typedef struct SInsertParseContext {
|
||||
SParseContext* pComCxt; // input
|
||||
char* pSql; // input
|
||||
|
@ -482,9 +489,11 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int
|
|||
return buildSyntaxErrMsg(pMsgBuf, "invalid bool data", pToken->z);
|
||||
}
|
||||
} else if (pToken->type == TK_NK_INTEGER) {
|
||||
return func(pMsgBuf, ((taosStr2Int64(pToken->z, NULL, 10) == 0) ? &FALSE_VALUE : &TRUE_VALUE), pSchema->bytes, param);
|
||||
return func(pMsgBuf, ((taosStr2Int64(pToken->z, NULL, 10) == 0) ? &FALSE_VALUE : &TRUE_VALUE), pSchema->bytes,
|
||||
param);
|
||||
} else if (pToken->type == TK_NK_FLOAT) {
|
||||
return func(pMsgBuf, ((taosStr2Double(pToken->z, NULL) == 0) ? &FALSE_VALUE : &TRUE_VALUE), pSchema->bytes, param);
|
||||
return func(pMsgBuf, ((taosStr2Double(pToken->z, NULL) == 0) ? &FALSE_VALUE : &TRUE_VALUE), pSchema->bytes,
|
||||
param);
|
||||
} else {
|
||||
return buildSyntaxErrMsg(pMsgBuf, "invalid bool data", pToken->z);
|
||||
}
|
||||
|
@ -685,7 +694,7 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, SParsedDataColInfo*
|
|||
isOrdered = false;
|
||||
}
|
||||
if (index < 0) {
|
||||
return buildSyntaxErrMsg(&pCxt->msg, "invalid column/tag name", sToken.z);
|
||||
return generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_INVALID_COLUMN, sToken.z);
|
||||
}
|
||||
if (pColList->cols[index].valStat == VAL_STAT_HAS) {
|
||||
return buildSyntaxErrMsg(&pCxt->msg, "duplicated column name", sToken.z);
|
||||
|
@ -895,8 +904,10 @@ static int32_t parseUsingClause(SInsertParseContext* pCxt, SName* name, char* tb
|
|||
return buildSyntaxErrMsg(&pCxt->msg, "( is expected", sToken.z);
|
||||
}
|
||||
CHECK_CODE(parseTagsClause(pCxt, pCxt->pTableMeta->schema, getTableInfo(pCxt->pTableMeta).precision, name->tname));
|
||||
NEXT_TOKEN(pCxt->pSql, sToken);
|
||||
if (TK_NK_RP != sToken.type) {
|
||||
NEXT_VALID_TOKEN(pCxt->pSql, sToken);
|
||||
if (TK_NK_COMMA == sToken.type) {
|
||||
return generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_TAGS_NOT_MATCHED);
|
||||
} else if (TK_NK_RP != sToken.type) {
|
||||
return buildSyntaxErrMsg(&pCxt->msg, ") is expected", sToken.z);
|
||||
}
|
||||
|
||||
|
@ -996,8 +1007,10 @@ static int32_t parseValues(SInsertParseContext* pCxt, STableDataBlocks* pDataBlo
|
|||
pDataBlock->size += extendedRowSize; // len;
|
||||
}
|
||||
|
||||
NEXT_TOKEN(pCxt->pSql, sToken);
|
||||
if (TK_NK_RP != sToken.type) {
|
||||
NEXT_VALID_TOKEN(pCxt->pSql, sToken);
|
||||
if (TK_NK_COMMA == sToken.type) {
|
||||
return generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_INVALID_COLUMNS_NUM);
|
||||
} else if (TK_NK_RP != sToken.type) {
|
||||
return buildSyntaxErrMsg(&pCxt->msg, ") expected", sToken.z);
|
||||
}
|
||||
|
||||
|
@ -1160,7 +1173,8 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
|
|||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
memcpy(tags, &pCxt->tags, sizeof(pCxt->tags));
|
||||
(*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pMeta, tags, tbFName, autoCreateTbl, pCxt->pVgroupsHashObj, pCxt->pTableBlockHashObj);
|
||||
(*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pMeta, tags, tbFName, autoCreateTbl, pCxt->pVgroupsHashObj,
|
||||
pCxt->pTableBlockHashObj);
|
||||
|
||||
memset(&pCxt->tags, 0, sizeof(pCxt->tags));
|
||||
pCxt->pVgroupsHashObj = NULL;
|
||||
|
@ -1238,7 +1252,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = parseInsertBody(&context);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
if (TSDB_CODE_SUCCESS == code || NEED_CLIENT_HANDLE_ERROR(code)) {
|
||||
SName* pTable = taosHashIterate(context.pTableNameHashObj, NULL);
|
||||
while (NULL != pTable) {
|
||||
taosArrayPush((*pQuery)->pTableList, pTable);
|
||||
|
@ -1688,8 +1702,8 @@ static int32_t smlBuildTagRow(SArray* cols, SKVRowBuilder* tagsBuilder, SParsedD
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t smlBindData(void *handle, SArray *tags, SArray *colsSchema, SArray *cols, bool format,
|
||||
STableMeta *pTableMeta, char *tableName, char *msgBuf, int16_t msgBufLen) {
|
||||
int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols, bool format, STableMeta* pTableMeta,
|
||||
char* tableName, char* msgBuf, int16_t msgBufLen) {
|
||||
SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen};
|
||||
|
||||
SSmlExecHandle* smlHandle = (SSmlExecHandle*)handle;
|
||||
|
@ -1702,7 +1716,8 @@ int32_t smlBindData(void *handle, SArray *tags, SArray *colsSchema, SArray *cols
|
|||
return ret;
|
||||
}
|
||||
SKVRow row = NULL;
|
||||
ret = smlBuildTagRow(tags, &smlHandle->tableExecHandle.tagsBuilder, &smlHandle->tableExecHandle.tags, pTagsSchema, &row, &pBuf);
|
||||
ret = smlBuildTagRow(tags, &smlHandle->tableExecHandle.tagsBuilder, &smlHandle->tableExecHandle.tags, pTagsSchema,
|
||||
&row, &pBuf);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -120,6 +120,20 @@ static int32_t getTableMeta(STranslateContext* pCxt, const char* pDbName, const
|
|||
return getTableMetaImpl(pCxt, toName(pCxt->pParseCxt->acctId, pDbName, pTableName, &name), pMeta);
|
||||
}
|
||||
|
||||
static int32_t refreshGetTableMeta(STranslateContext* pCxt, const char* pDbName, const char* pTableName,
|
||||
STableMeta** pMeta) {
|
||||
SParseContext* pParCxt = pCxt->pParseCxt;
|
||||
SName name;
|
||||
toName(pCxt->pParseCxt->acctId, pDbName, pTableName, &name);
|
||||
int32_t code =
|
||||
catalogRefreshGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, &name, pMeta, false);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("catalogRefreshGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pDbName,
|
||||
pTableName);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t getTableDistVgInfo(STranslateContext* pCxt, const SName* pName, SArray** pVgInfo) {
|
||||
SParseContext* pParCxt = pCxt->pParseCxt;
|
||||
int32_t code = collectUseDatabase(pName, pCxt->pDbs);
|
||||
|
@ -3201,7 +3215,7 @@ static int32_t translateExplain(STranslateContext* pCxt, SExplainStmt* pStmt) {
|
|||
}
|
||||
|
||||
static int32_t translateDescribe(STranslateContext* pCxt, SDescribeStmt* pStmt) {
|
||||
return getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pStmt->pMeta);
|
||||
return refreshGetTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pStmt->pMeta);
|
||||
}
|
||||
|
||||
static int32_t translateKillConnection(STranslateContext* pCxt, SKillStmt* pStmt) {
|
||||
|
|
|
@ -68,7 +68,7 @@ static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMs
|
|||
|
||||
// get groupId, compute hash value
|
||||
uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));
|
||||
//
|
||||
|
||||
// get node
|
||||
// TODO: optimize search process
|
||||
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||
|
@ -152,13 +152,13 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
|
|||
|
||||
// exec
|
||||
while (1) {
|
||||
SSDataBlock* output;
|
||||
SSDataBlock* output = NULL;
|
||||
uint64_t ts = 0;
|
||||
if (qExecTask(exec, &output, &ts) < 0) {
|
||||
ASSERT(false);
|
||||
}
|
||||
if (output == NULL) break;
|
||||
taosArrayPush(pRes, &output);
|
||||
taosArrayPush(pRes, output);
|
||||
}
|
||||
|
||||
// destroy
|
||||
|
@ -189,7 +189,7 @@ int32_t streamTaskExec2(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
|||
taosFreeQitem(data);
|
||||
|
||||
if (taosArrayGetSize(pRes) != 0) {
|
||||
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
|
||||
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
||||
resQ->type = STREAM_INPUT__DATA_BLOCK;
|
||||
resQ->blocks = pRes;
|
||||
taosWriteQitem(pTask->outputQ, resQ);
|
||||
|
@ -209,7 +209,7 @@ int32_t streamTaskExec2(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
|||
taosFreeQitem(data);
|
||||
|
||||
if (taosArrayGetSize(pRes) != 0) {
|
||||
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
|
||||
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
||||
resQ->type = STREAM_INPUT__DATA_BLOCK;
|
||||
resQ->blocks = pRes;
|
||||
taosWriteQitem(pTask->outputQ, resQ);
|
||||
|
@ -231,7 +231,7 @@ int32_t streamTaskExec2(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
|||
taosFreeQitem(data);
|
||||
|
||||
if (taosArrayGetSize(pRes) != 0) {
|
||||
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
|
||||
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
||||
resQ->type = STREAM_INPUT__DATA_BLOCK;
|
||||
resQ->blocks = pRes;
|
||||
taosWriteQitem(pTask->outputQ, resQ);
|
||||
|
@ -253,7 +253,7 @@ int32_t streamTaskExec2(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
|||
taosFreeQitem(data);
|
||||
|
||||
if (taosArrayGetSize(pRes) != 0) {
|
||||
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
|
||||
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
||||
resQ->type = STREAM_INPUT__DATA_BLOCK;
|
||||
resQ->blocks = pRes;
|
||||
taosWriteQitem(pTask->outputQ, resQ);
|
||||
|
@ -392,12 +392,14 @@ int32_t streamTaskEnqueue(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg*
|
|||
// 1.2 enqueue
|
||||
pBlock->type = STREAM_DATA_TYPE_SSDATA_BLOCK;
|
||||
pBlock->sourceVg = pReq->sourceVg;
|
||||
pBlock->sourceVer = pReq->sourceVer;
|
||||
/*pBlock->sourceVer = pReq->sourceVer;*/
|
||||
taosWriteQitem(pTask->inputQ, pBlock);
|
||||
|
||||
// 1.3 rsp by input status
|
||||
SStreamDispatchRsp* pCont = rpcMallocCont(sizeof(SStreamDispatchRsp));
|
||||
pCont->inputStatus = status;
|
||||
pCont->streamId = pReq->streamId;
|
||||
pCont->taskId = pReq->sourceTaskId;
|
||||
pRsp->pCont = pCont;
|
||||
pRsp->contLen = sizeof(SStreamDispatchRsp);
|
||||
tmsgSendRsp(pRsp);
|
||||
|
@ -439,12 +441,12 @@ int32_t streamTaskProcessRunReq(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamTaskProcessRecoverReq(SStreamTask* pTask, char* msg) {
|
||||
int32_t streamTaskProcessRecoverReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg) {
|
||||
//
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamTaskProcessRecoverRsp(SStreamTask* pTask, char* msg) {
|
||||
int32_t streamTaskProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp) {
|
||||
//
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -14,6 +14,9 @@
|
|||
*/
|
||||
#include "tdbInt.h"
|
||||
|
||||
// #include <sys/types.h>
|
||||
// #include <unistd.h>
|
||||
|
||||
struct SPCache {
|
||||
int szPage;
|
||||
int nPages;
|
||||
|
@ -32,7 +35,6 @@ static inline uint32_t tdbPCachePageHash(const SPgid *pPgid) {
|
|||
uint32_t *t = (uint32_t *)((pPgid)->fileid);
|
||||
return (uint32_t)(t[0] + t[1] + t[2] + t[3] + t[4] + t[5] + (pPgid)->pgno);
|
||||
}
|
||||
#define PAGE_IS_PINNED(pPage) ((pPage)->pLruNext == NULL)
|
||||
|
||||
static int tdbPCacheOpenImpl(SPCache *pCache);
|
||||
static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn);
|
||||
|
@ -80,16 +82,22 @@ int tdbPCacheClose(SPCache *pCache) {
|
|||
|
||||
SPage *tdbPCacheFetch(SPCache *pCache, const SPgid *pPgid, TXN *pTxn) {
|
||||
SPage *pPage;
|
||||
i32 nRef;
|
||||
|
||||
tdbPCacheLock(pCache);
|
||||
|
||||
pPage = tdbPCacheFetchImpl(pCache, pPgid, pTxn);
|
||||
if (pPage) {
|
||||
tdbRefPage(pPage);
|
||||
nRef = tdbRefPage(pPage);
|
||||
}
|
||||
|
||||
ASSERT(pPage);
|
||||
|
||||
tdbPCacheUnlock(pCache);
|
||||
|
||||
// printf("thread %" PRId64 " fetch page %d pgno %d pPage %p nRef %d\n", taosGetSelfPthreadId(), pPage->id,
|
||||
// TDB_PAGE_PGNO(pPage), pPage, nRef);
|
||||
|
||||
return pPage;
|
||||
}
|
||||
|
||||
|
@ -98,16 +106,16 @@ void tdbPCacheRelease(SPCache *pCache, SPage *pPage, TXN *pTxn) {
|
|||
|
||||
ASSERT(pTxn);
|
||||
|
||||
nRef = tdbUnrefPage(pPage);
|
||||
ASSERT(nRef >= 0);
|
||||
// nRef = tdbUnrefPage(pPage);
|
||||
// ASSERT(nRef >= 0);
|
||||
|
||||
if (nRef == 0) {
|
||||
tdbPCacheLock(pCache);
|
||||
|
||||
nRef = tdbUnrefPage(pPage);
|
||||
if (nRef == 0) {
|
||||
// test the nRef again to make sure
|
||||
// it is safe th handle the page
|
||||
nRef = tdbGetPageRef(pPage);
|
||||
if (nRef == 0) {
|
||||
// nRef = tdbGetPageRef(pPage);
|
||||
// if (nRef == 0) {
|
||||
if (pPage->isLocal) {
|
||||
tdbPCacheUnpinPage(pCache, pPage);
|
||||
} else {
|
||||
|
@ -118,10 +126,11 @@ void tdbPCacheRelease(SPCache *pCache, SPage *pPage, TXN *pTxn) {
|
|||
|
||||
tdbPageDestroy(pPage, pTxn->xFree, pTxn->xArg);
|
||||
}
|
||||
// }
|
||||
}
|
||||
|
||||
tdbPCacheUnlock(pCache);
|
||||
}
|
||||
// printf("thread %" PRId64 " relas page %d pgno %d pPage %p nRef %d\n", taosGetSelfPthreadId(), pPage->id,
|
||||
// TDB_PAGE_PGNO(pPage), pPage, nRef);
|
||||
}
|
||||
|
||||
int tdbPCacheGetPageSize(SPCache *pCache) { return pCache->szPage; }
|
||||
|
@ -223,6 +232,7 @@ static void tdbPCachePinPage(SPCache *pCache, SPage *pPage) {
|
|||
|
||||
pCache->nRecyclable--;
|
||||
|
||||
// printf("pin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage);
|
||||
tdbTrace("pin page %d", pPage->id);
|
||||
}
|
||||
}
|
||||
|
@ -243,6 +253,7 @@ static void tdbPCacheUnpinPage(SPCache *pCache, SPage *pPage) {
|
|||
|
||||
pCache->nRecyclable++;
|
||||
|
||||
// printf("unpin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage);
|
||||
tdbTrace("unpin page %d", pPage->id);
|
||||
}
|
||||
|
||||
|
@ -253,10 +264,12 @@ static void tdbPCacheRemovePageFromHash(SPCache *pCache, SPage *pPage) {
|
|||
h = tdbPCachePageHash(&(pPage->pgid));
|
||||
for (ppPage = &(pCache->pgHash[h % pCache->nHash]); (*ppPage) && *ppPage != pPage; ppPage = &((*ppPage)->pHashNext))
|
||||
;
|
||||
ASSERT(*ppPage == pPage);
|
||||
*ppPage = pPage->pHashNext;
|
||||
|
||||
if (*ppPage) {
|
||||
*ppPage = pPage->pHashNext;
|
||||
pCache->nPage--;
|
||||
// printf("rmv page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage);
|
||||
}
|
||||
|
||||
tdbTrace("remove page %d to hash", pPage->id);
|
||||
}
|
||||
|
@ -271,6 +284,7 @@ static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage) {
|
|||
|
||||
pCache->nPage++;
|
||||
|
||||
// printf("add page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage);
|
||||
tdbTrace("add page %d to hash", pPage->id);
|
||||
}
|
||||
|
||||
|
|
|
@ -265,6 +265,7 @@ int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initPa
|
|||
pgid.pgno = pgno;
|
||||
pPage = tdbPCacheFetch(pPager->pCache, &pgid, pTxn);
|
||||
if (pPage == NULL) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -272,10 +273,14 @@ int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initPa
|
|||
if (!TDB_PAGE_INITIALIZED(pPage)) {
|
||||
ret = tdbPagerInitPage(pPager, pPage, initPage, arg, loadPage);
|
||||
if (ret < 0) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// printf("thread %" PRId64 " pager fetch page %d pgno %d ppage %p\n", taosGetSelfPthreadId(), pPage->id,
|
||||
// TDB_PAGE_PGNO(pPage), pPage);
|
||||
|
||||
ASSERT(TDB_PAGE_INITIALIZED(pPage));
|
||||
ASSERT(pPage->pPager == pPager);
|
||||
|
||||
|
@ -284,7 +289,11 @@ int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initPa
|
|||
return 0;
|
||||
}
|
||||
|
||||
void tdbPagerReturnPage(SPager *pPager, SPage *pPage, TXN *pTxn) { tdbPCacheRelease(pPager->pCache, pPage, pTxn); }
|
||||
void tdbPagerReturnPage(SPager *pPager, SPage *pPage, TXN *pTxn) {
|
||||
tdbPCacheRelease(pPager->pCache, pPage, pTxn);
|
||||
// printf("thread %" PRId64 " pager retun page %d pgno %d ppage %p\n", taosGetSelfPthreadId(), pPage->id,
|
||||
// TDB_PAGE_PGNO(pPage), pPage);
|
||||
}
|
||||
|
||||
static int tdbPagerAllocFreePage(SPager *pPager, SPgno *ppgno) {
|
||||
// TODO: Allocate a page from the free list
|
||||
|
@ -352,6 +361,7 @@ static int tdbPagerInitPage(SPager *pPager, SPage *pPage, int (*initPage)(SPage
|
|||
|
||||
ret = (*initPage)(pPage, arg, init);
|
||||
if (ret < 0) {
|
||||
ASSERT(0);
|
||||
TDB_UNLOCK_PAGE(pPage);
|
||||
return -1;
|
||||
}
|
||||
|
@ -370,6 +380,7 @@ static int tdbPagerInitPage(SPager *pPager, SPage *pPage, int (*initPage)(SPage
|
|||
}
|
||||
}
|
||||
} else {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -275,15 +275,15 @@ static inline i32 tdbUnrefPage(SPage *pPage) {
|
|||
#define P_LOCK_FAIL -1
|
||||
|
||||
static inline int tdbTryLockPage(tdb_spinlock_t *pLock) {
|
||||
int ret;
|
||||
if (tdbSpinlockTrylock(pLock) == 0) {
|
||||
ret = P_LOCK_SUCC;
|
||||
} else if (errno == EBUSY) {
|
||||
ret = P_LOCK_BUSY;
|
||||
int ret = tdbSpinlockTrylock(pLock);
|
||||
if (ret == 0) {
|
||||
return P_LOCK_SUCC;
|
||||
} else if (ret == EBUSY) {
|
||||
return P_LOCK_BUSY;
|
||||
} else {
|
||||
ret = P_LOCK_FAIL;
|
||||
ASSERT(0);
|
||||
return P_LOCK_FAIL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define TDB_INIT_PAGE_LOCK(pPage) tdbSpinlockInit(&((pPage)->lock), 0)
|
||||
|
|
|
@ -486,18 +486,18 @@ TEST(tdb_test, DISABLED_simple_upsert1) {
|
|||
tdbClose(pEnv);
|
||||
}
|
||||
|
||||
TEST(tdb_test, DISABLED_multi_thread_query) {
|
||||
TEST(tdb_test, multi_thread_query) {
|
||||
int ret;
|
||||
TDB *pEnv;
|
||||
TTB *pDb;
|
||||
tdb_cmpr_fn_t compFunc;
|
||||
int nData = 100000;
|
||||
int nData = 1000000;
|
||||
TXN txn;
|
||||
|
||||
taosRemoveDir("tdb");
|
||||
|
||||
// Open Env
|
||||
ret = tdbOpen("tdb", 512, 1, &pEnv);
|
||||
ret = tdbOpen("tdb", 4096, 10, &pEnv);
|
||||
GTEST_ASSERT_EQ(ret, 0);
|
||||
|
||||
// Create a database
|
||||
|
@ -507,7 +507,7 @@ TEST(tdb_test, DISABLED_multi_thread_query) {
|
|||
|
||||
char key[64];
|
||||
char val[64];
|
||||
int64_t poolLimit = 4096; // 1M pool limit
|
||||
int64_t poolLimit = 4096 * 20; // 1M pool limit
|
||||
int64_t txnid = 0;
|
||||
SPoolMem *pPool;
|
||||
|
||||
|
@ -600,7 +600,7 @@ TEST(tdb_test, DISABLED_multi_thread_query) {
|
|||
GTEST_ASSERT_EQ(ret, 0);
|
||||
}
|
||||
|
||||
TEST(tdb_test, multi_thread1) {
|
||||
TEST(tdb_test, DISABLED_multi_thread1) {
|
||||
#if 0
|
||||
int ret;
|
||||
TDB *pDb;
|
||||
|
|
|
@ -94,7 +94,9 @@ void rpcFreeCont(void* cont) {
|
|||
if (cont == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
taosMemoryFree((char*)cont - TRANS_MSG_OVERHEAD);
|
||||
tTrace("free mem: %p", (char*)cont - TRANS_MSG_OVERHEAD);
|
||||
}
|
||||
void* rpcReallocCont(void* ptr, int contLen) {
|
||||
if (ptr == NULL) {
|
||||
|
|
|
@ -133,6 +133,7 @@ int transAllocBuffer(SConnBuffer* connBuf, uv_buf_t* uvBuf) {
|
|||
} else {
|
||||
p->cap = p->total;
|
||||
p->buf = taosMemoryRealloc(p->buf, p->cap);
|
||||
tTrace("internal malloc mem: %p, size: %d", p->buf, p->cap);
|
||||
|
||||
uvBuf->base = p->buf + p->len;
|
||||
uvBuf->len = p->cap - p->len;
|
||||
|
|
|
@ -469,6 +469,8 @@ static void uvStartSendResp(SSrvMsg* smsg) {
|
|||
|
||||
if (pConn->broken == true) {
|
||||
// persist by
|
||||
transFreeMsg(smsg->msg.pCont);
|
||||
taosMemoryFree(smsg);
|
||||
transUnrefSrvHandle(pConn);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -162,7 +162,7 @@ void *taosAllocateQitem(int32_t size, EQItype itype) {
|
|||
uTrace("item:%p, node:%p is allocated", pNode->item, pNode);
|
||||
}
|
||||
|
||||
return (void *)pNode->item;
|
||||
return pNode->item;
|
||||
}
|
||||
|
||||
void taosFreeQitem(void *pItem) {
|
||||
|
|
|
@ -264,7 +264,7 @@ class TDDnode:
|
|||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||
valgrindCmdline = "valgrind --log-file=\"valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||
|
||||
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
|
@ -325,7 +325,7 @@ class TDDnode:
|
|||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||
valgrindCmdline = "valgrind --log-file=\"valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||
|
||||
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
|
|
|
@ -0,0 +1,230 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 50
|
||||
sql connect
|
||||
|
||||
print =============== create database
|
||||
sql create database d0 keep 365000d,365000d,365000d
|
||||
sql use d0
|
||||
|
||||
print =============== create super table and register rsma
|
||||
sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1 delay 2;
|
||||
|
||||
sql show stables
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== create child table
|
||||
sql create table ct1 using stb tags("BeiJing", "ChaoYang")
|
||||
sql create table ct2 using stb tags("BeiJing", "HaiDian")
|
||||
|
||||
sql show tables
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step3-1 insert records into ct1
|
||||
sql insert into ct1 values('2022-05-03 16:59:00.010', 10);
|
||||
sql insert into ct1 values('2022-05-03 16:59:00.011', 11);
|
||||
sql insert into ct1 values('2022-05-03 16:59:00.016', 16);
|
||||
sql insert into ct1 values('2022-05-03 16:59:00.016', 17);
|
||||
sql insert into ct1 values('2022-05-03 16:59:00.020', 20);
|
||||
sql insert into ct1 values('2022-05-03 16:59:00.016', 18);
|
||||
sql insert into ct1 values('2022-05-03 16:59:00.021', 21);
|
||||
sql insert into ct1 values('2022-05-03 16:59:00.022', 22);
|
||||
|
||||
print =============== step3-1 query records of ct1 from memory
|
||||
sql select * from ct1;
|
||||
print $data00 $data01
|
||||
print $data10 $data11
|
||||
print $data20 $data21
|
||||
print $data30 $data31
|
||||
print $data40 $data41
|
||||
print $data50 $data51
|
||||
|
||||
if $rows != 6 then
|
||||
print rows $rows != 6
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 10 then
|
||||
print data01 $data01 != 10
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data21 != 18 then
|
||||
print data21 $data21 != 18
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data51 != 22 then
|
||||
print data51 $data51 != 22
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step3-1 insert records into ct2
|
||||
sql insert into ct2 values('2022-03-02 16:59:00.010', 1),('2022-03-02 16:59:00.010',11),('2022-04-01 16:59:00.011',2),('2022-04-01 16:59:00.011',5),('2022-03-06 16:59:00.013',7);
|
||||
sql insert into ct2 values('2022-03-02 16:59:00.010', 3),('2022-03-02 16:59:00.010',33),('2022-04-01 16:59:00.011',4),('2022-04-01 16:59:00.011',6),('2022-03-06 16:59:00.013',8);
|
||||
sql insert into ct2 values('2022-03-02 16:59:00.010', 103),('2022-03-02 16:59:00.010',303),('2022-04-01 16:59:00.011',40),('2022-04-01 16:59:00.011',60),('2022-03-06 16:59:00.013',80);
|
||||
|
||||
print =============== step3-1 query records of ct2 from memory
|
||||
sql select * from ct2;
|
||||
print $data00 $data01
|
||||
print $data10 $data11
|
||||
print $data20 $data21
|
||||
|
||||
if $rows != 3 then
|
||||
print rows $rows != 3
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 103 then
|
||||
print data01 $data01 != 103
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data11 != 80 then
|
||||
print data11 $data11 != 80
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data21 != 40 then
|
||||
print data21 $data21 != 40
|
||||
return -1
|
||||
endi
|
||||
|
||||
#==================== reboot to trigger commit data to file
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
print =============== step3-2 query records of ct1 from file
|
||||
sql select * from ct1;
|
||||
print $data00 $data01
|
||||
print $data10 $data11
|
||||
print $data20 $data21
|
||||
print $data30 $data31
|
||||
print $data40 $data41
|
||||
print $data50 $data51
|
||||
|
||||
if $rows != 6 then
|
||||
print rows $rows != 6
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 10 then
|
||||
print data01 $data01 != 10
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data21 != 18 then
|
||||
print data21 $data21 != 18
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data51 != 22 then
|
||||
print data51 $data51 != 22
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step3-2 query records of ct2 from file
|
||||
sql select * from ct2;
|
||||
print $data00 $data01
|
||||
print $data10 $data11
|
||||
print $data20 $data21
|
||||
|
||||
if $rows != 3 then
|
||||
print rows $rows != 3
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 103 then
|
||||
print data01 $data01 != 103
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data11 != 80 then
|
||||
print data11 $data11 != 80
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data21 != 40 then
|
||||
print data21 $data21 != 40
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step3-3 query records of ct1 from memory and file(merge)
|
||||
sql insert into ct1 values('2022-05-03 16:59:00.010', 100);
|
||||
sql insert into ct1 values('2022-05-03 16:59:00.022', 200);
|
||||
sql insert into ct1 values('2022-05-03 16:59:00.016', 160);
|
||||
|
||||
sql select * from ct1;
|
||||
print $data00 $data01
|
||||
print $data10 $data11
|
||||
print $data20 $data21
|
||||
print $data30 $data31
|
||||
print $data40 $data41
|
||||
print $data50 $data51
|
||||
|
||||
if $rows != 6 then
|
||||
print rows $rows != 6
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 100 then
|
||||
print data01 $data01 != 100
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data21 != 160 then
|
||||
print data21 $data21 != 160
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data51 != 200 then
|
||||
print data51 $data51 != 200
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step3-3 query records of ct2 from memory and file(merge)
|
||||
sql insert into ct2(ts) values('2022-04-02 16:59:00.016');
|
||||
sql insert into ct2 values('2022-03-06 16:59:00.013', NULL);
|
||||
sql insert into ct2 values('2022-03-01 16:59:00.016', 10);
|
||||
sql insert into ct2(ts) values('2022-04-01 16:59:00.011');
|
||||
sql select * from ct2;
|
||||
print $data00 $data01
|
||||
print $data10 $data11
|
||||
print $data20 $data21
|
||||
print $data30 $data31
|
||||
print $data40 $data41
|
||||
|
||||
if $rows != 5 then
|
||||
print rows $rows != 5
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 10 then
|
||||
print data01 $data01 != 10
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data11 != 103 then
|
||||
print data11 $data11 != 103
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data21 != NULL then
|
||||
print data21 $data21 != NULL
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data31 != 40 then
|
||||
print data31 $data31 != 40
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data41 != NULL then
|
||||
print data41 $data41 != NULL
|
||||
return -1
|
||||
endi
|
|
@ -71,7 +71,7 @@ sql create database db replica $replica vgroups $vgroups
|
|||
$loop_cnt = 0
|
||||
check_db_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
sleep 20
|
||||
if $loop_cnt == 10 then
|
||||
print ====> db not ready!
|
||||
return -1
|
||||
|
@ -83,7 +83,7 @@ print $data(db)[13] $data(db)[14] $data(db)[15] $data(db)[16] $data(db)[17] $dat
|
|||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data(db)[19] != nostrict then
|
||||
if $data(db)[19] != ready then
|
||||
goto check_db_ready
|
||||
endi
|
||||
|
||||
|
@ -93,7 +93,7 @@ $loop_cnt = 0
|
|||
check_vg_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 40 then
|
||||
if $loop_cnt == 300 then
|
||||
print ====> vgroups not ready!
|
||||
return -1
|
||||
endi
|
||||
|
@ -175,6 +175,7 @@ while $i < $tbNum
|
|||
endw
|
||||
|
||||
$totalTblNum = $tbNum * 2
|
||||
print ====>totalTblNum:$totalTblNum
|
||||
sql show tables
|
||||
if $rows != $totalTblNum then
|
||||
return -1
|
||||
|
@ -226,7 +227,7 @@ $loop_cnt = 0
|
|||
check_vg_ready_2:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
if $loop_cnt == 300 then
|
||||
print ====> vgroups switch fail!!!
|
||||
return -1
|
||||
endi
|
||||
|
|
|
@ -30,9 +30,12 @@ class TDTestCase:
|
|||
#
|
||||
# --------------- main frame -------------------
|
||||
#
|
||||
clientCfgDict = {'queryproxy': '1'}
|
||||
clientCfgDict = {'queryproxy': '1','debugFlag': 135}
|
||||
clientCfgDict["queryproxy"] = '2'
|
||||
clientCfgDict["debugFlag"] = 143
|
||||
|
||||
updatecfgDict = {'clientCfg': {}}
|
||||
updatecfgDict = {'debugFlag': 143}
|
||||
updatecfgDict["clientCfg"] = clientCfgDict
|
||||
def caseDescription(self):
|
||||
'''
|
||||
|
@ -116,7 +119,7 @@ class TDTestCase:
|
|||
# tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate))
|
||||
return
|
||||
|
||||
def mutiThread_create_tables(self,host,dbname,stbname,vgroups,threadNumbers,count):
|
||||
def mutiThread_create_tables(self,host,dbname,stbname,vgroups,threadNumbers,childrowcount):
|
||||
buildPath = self.getBuildPath()
|
||||
config = buildPath+ "../sim/dnode1/cfg/"
|
||||
|
||||
|
@ -125,7 +128,7 @@ class TDTestCase:
|
|||
tsql.execute("drop database if exists %s"%dbname)
|
||||
tsql.execute("create database %s vgroups %d"%(dbname,vgroups))
|
||||
tsql.execute("use %s" %dbname)
|
||||
count=int(count)
|
||||
count=int(childrowcount)
|
||||
threads = []
|
||||
for i in range(threadNumbers):
|
||||
tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i))
|
||||
|
@ -137,7 +140,7 @@ class TDTestCase:
|
|||
tr.join()
|
||||
end_time = time.time()
|
||||
spendTime=end_time-start_time
|
||||
speedCreate=count/spendTime
|
||||
speedCreate=threadNumbers*count/spendTime
|
||||
tdLog.debug("spent %.2fs to create %d stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,threadNumbers,threadNumbers*count,speedCreate))
|
||||
|
||||
return
|
||||
|
@ -146,41 +149,39 @@ class TDTestCase:
|
|||
|
||||
|
||||
# insert data
|
||||
def insert_data(self, host, dbname, stbname, ts_start,rowCount):
|
||||
def insert_data(self, host, dbname, stbname, chilCount, ts_start, rowCount):
|
||||
buildPath = self.getBuildPath()
|
||||
config = buildPath+ "../sim/dnode1/cfg/"
|
||||
|
||||
tsql=self.newcur(host,config)
|
||||
tdLog.debug("ready to inser data")
|
||||
|
||||
tsql.execute("use %s" %dbname)
|
||||
pre_insert = "insert into "
|
||||
sql = pre_insert
|
||||
tcount=int(tcount)
|
||||
allRows=tcount*rowCount
|
||||
chilCount=int(chilCount)
|
||||
allRows=chilCount*rowCount
|
||||
tdLog.debug("doing insert data into stable-index:%s rows:%d ..."%(stbname, allRows))
|
||||
exeStartTime=time.time()
|
||||
for i in range(0,tcount):
|
||||
for i in range(0,chilCount):
|
||||
sql += " %s_%d values "%(stbname,i)
|
||||
for j in range(rowCount):
|
||||
sql += "(%d, %d, 'taos_%d') "%(ts_start + j*1000, j, j)
|
||||
if j >0 and j%5000 == 0:
|
||||
if j >0 and j%4000 == 0:
|
||||
# print(sql)
|
||||
tdSql.execute(sql)
|
||||
tsql.execute(sql)
|
||||
sql = "insert into %s_%d values " %(stbname,i)
|
||||
# end sql
|
||||
if sql != pre_insert:
|
||||
# print(sql)
|
||||
tdSql.execute(sql)
|
||||
print(len(sql))
|
||||
tsql.execute(sql)
|
||||
exeEndTime=time.time()
|
||||
spendTime=exeEndTime-exeStartTime
|
||||
speedInsert=allRows/spendTime
|
||||
# tdLog.debug("spent %.2fs to INSERT %d rows , insert rate is %.2f rows/s... [OK]"% (spendTime,allRows,speedInsert))
|
||||
|
||||
tdLog.debug("INSERT TABLE DATA ............ [OK]")
|
||||
tdLog.debug("spent %.2fs to INSERT %d rows into %s , insert rate is %.2f rows/s... [OK]"% (spendTime,allRows,stbname,speedInsert))
|
||||
# tdLog.debug("INSERT TABLE DATA ............ [OK]")
|
||||
return
|
||||
|
||||
def mutiThread_insert_data(self, host, dbname, stbname, threadNumbers, ts_start, tcountStart,tcountStop,rowCount):
|
||||
def mutiThread_insert_data(self, host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount):
|
||||
buildPath = self.getBuildPath()
|
||||
config = buildPath+ "../sim/dnode1/cfg/"
|
||||
|
||||
|
@ -188,42 +189,11 @@ class TDTestCase:
|
|||
tdLog.debug("ready to inser data")
|
||||
|
||||
tsql.execute("use %s" %dbname)
|
||||
pre_insert = "insert into "
|
||||
sql = pre_insert
|
||||
tcount=tcountStop-tcountStart
|
||||
allRows=tcount*rowCount
|
||||
tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbname, allRows))
|
||||
exeStartTime=time.time()
|
||||
for i in range(tcountStart,tcountStop):
|
||||
sql += " %s_%d values "%(stbname,i)
|
||||
for j in range(rowCount):
|
||||
sql += "(%d, %d, 'taos_%d') "%(ts_start + j*1000, j, j)
|
||||
if j >0 and j%5000 == 0:
|
||||
# print(sql)
|
||||
tdSql.execute(sql)
|
||||
sql = "insert into %s_%d values " %(stbname,i)
|
||||
# end sql
|
||||
if sql != pre_insert:
|
||||
# print(sql)
|
||||
tdSql.execute(sql)
|
||||
exeEndTime=time.time()
|
||||
spendTime=exeEndTime-exeStartTime
|
||||
speedInsert=allRows/spendTime
|
||||
# tdLog.debug("spent %.2fs to INSERT %d rows , insert rate is %.2f rows/s... [OK]"% (spendTime,allRows,speedInsert))
|
||||
|
||||
tdLog.debug("INSERT TABLE DATA ............ [OK]")
|
||||
|
||||
|
||||
buildPath = self.getBuildPath()
|
||||
config = buildPath+ "../sim/dnode1/cfg/"
|
||||
|
||||
tsql=self.newcur(host,config)
|
||||
tsql.execute("use %s" %dbname)
|
||||
count=int(count)
|
||||
chilCount=int(chilCount)
|
||||
threads = []
|
||||
for i in range(threadNumbers):
|
||||
tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i))
|
||||
threads.append(thd.Thread(target=self.create_tables, args=(host, dbname, stbname+"%d"%i, count,)))
|
||||
# tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i))
|
||||
threads.append(thd.Thread(target=self.insert_data, args=(host, dbname, stbname+"%d"%i, chilCount, ts_start, childrowcount,)))
|
||||
start_time = time.time()
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
|
@ -231,8 +201,18 @@ class TDTestCase:
|
|||
tr.join()
|
||||
end_time = time.time()
|
||||
spendTime=end_time-start_time
|
||||
speedCreate=count/spendTime
|
||||
tdLog.debug("spent %.2fs to create %d stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,threadNumbers,threadNumbers*count,speedCreate))
|
||||
tableCounts=threadNumbers*chilCount
|
||||
stableRows=chilCount*childrowcount
|
||||
allRows=stableRows*threadNumbers
|
||||
speedInsert=allRows/spendTime
|
||||
|
||||
for i in range(threadNumbers):
|
||||
tdSql.execute("use %s" %dbname)
|
||||
tdSql.query("select count(*) from %s%d"%(stbname,i))
|
||||
tdSql.checkData(0,0,stableRows)
|
||||
tdLog.debug("spent %.2fs to insert %d rows into %d stable and %d table, speed is %.2f table/s... [OK]"% (spendTime,allRows,threadNumbers,tableCounts,speedInsert))
|
||||
tdLog.debug("INSERT TABLE DATA ............ [OK]")
|
||||
|
||||
return
|
||||
|
||||
|
||||
|
@ -288,7 +268,10 @@ class TDTestCase:
|
|||
def test_case1(self):
|
||||
tdLog.debug("-----create database and muti-thread create tables test------- ")
|
||||
#host,dbname,stbname,vgroups,threadNumbers,tcountStart,tcountStop
|
||||
self.mutiThread_create_tables(host="localhost",dbname="db2",stbname="stb2", vgroups=1, threadNumbers=5, count=10000)
|
||||
#host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount
|
||||
self.mutiThread_create_tables(host="localhost",dbname="db",stbname="stb", vgroups=1, threadNumbers=5, childrowcount=50)
|
||||
self.mutiThread_insert_data(host="localhost",dbname="db",stbname="stb", threadNumbers=5,chilCount=50,ts_start=self.ts,childrowcount=10)
|
||||
|
||||
return
|
||||
|
||||
# test case2 base:insert data
|
||||
|
@ -366,17 +349,17 @@ class TDTestCase:
|
|||
# run case
|
||||
def run(self):
|
||||
|
||||
# # test base case
|
||||
# self.test_case1()
|
||||
# tdLog.debug(" LIMIT test_case1 ............ [OK]")
|
||||
# create database and tables。
|
||||
self.test_case1()
|
||||
tdLog.debug(" LIMIT test_case1 ............ [OK]")
|
||||
|
||||
# test case
|
||||
# # taosBenchmark : create database and table
|
||||
# self.test_case2()
|
||||
# tdLog.debug(" LIMIT test_case2 ............ [OK]")
|
||||
|
||||
# test case
|
||||
self.test_case3()
|
||||
tdLog.debug(" LIMIT test_case3 ............ [OK]")
|
||||
# # taosBenchmark:create database/table and insert data
|
||||
# self.test_case3()
|
||||
# tdLog.debug(" LIMIT test_case3 ............ [OK]")
|
||||
|
||||
|
||||
# # test qnode
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue