Merge branch '3.0' of https://github.com/taosdata/TDengine into fix/hzcheng_3.0

This commit is contained in:
Hongze Cheng 2022-05-21 01:43:10 +00:00
commit 84d0044b0d
51 changed files with 1942 additions and 671 deletions

View File

@ -46,6 +46,18 @@ IF(${TD_WINDOWS})
ON
)
option(
BUILD_TEST
"If build unit tests using googletest"
OFF
)
ELSE ()
option(
BUILD_TEST
"If build unit tests using googletest"
ON
)
ENDIF ()
option(
@ -54,12 +66,6 @@ option(
OFF
)
option(
BUILD_TEST
"If build unit tests using googletest"
ON
)
option(
BUILD_WITH_LEVELDB
"If build with leveldb"

View File

@ -1352,6 +1352,9 @@ typedef struct {
typedef struct {
int32_t code;
char tbFName[TSDB_TABLE_FNAME_LEN];
int32_t sversion;
int32_t tversion;
} SResReadyRsp;
typedef struct {
@ -2589,18 +2592,6 @@ static FORCE_INLINE void tDeleteSMqAskEpRsp(SMqAskEpRsp* pRsp) {
taosArrayDestroyEx(pRsp->topics, (void (*)(void*))tDeleteSMqSubTopicEp);
}
typedef struct {
int64_t streamId;
int32_t taskId;
int32_t sourceVg;
int64_t sourceVer;
SArray* data; // SArray<SSDataBlock>
} SStreamDispatchReq;
typedef struct {
int8_t inputStatus;
} SStreamDispatchRsp;
#define TD_AUTO_CREATE_TABLE 0x1
typedef struct {
int64_t suid;

View File

@ -200,6 +200,10 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_TASK_WRITE_EXEC, "vnode-task-write-exec", SStreamTaskExecReq, SStreamTaskExecRsp)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TASK_RUN, "vnode-stream-task-run", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TASK_DISPATCH, "vnode-stream-task-dispatch", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TASK_RECOVER, "vnode-stream-task-recover", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_SMA, "vnode-create-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL)

View File

@ -57,6 +57,12 @@ typedef struct SIndexMeta {
} SIndexMeta;
typedef struct STbVerInfo {
char tbFName[TSDB_TABLE_FNAME_LEN];
int32_t sversion;
int32_t tversion;
} STbVerInfo;
/*
* ASSERT(sizeof(SCTableMeta) == 24)
* ASSERT(tableType == TSDB_CHILD_TABLE)
@ -183,7 +189,9 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define SET_META_TYPE_BOTH_TABLE(t) (t) = META_TYPE_BOTH_TABLE
#define NEED_CLIENT_RM_TBLMETA_ERROR(_code) \
((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST)
((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST || \
(_code) == TSDB_CODE_PAR_INVALID_COLUMNS_NUM || (_code) == TSDB_CODE_PAR_INVALID_COLUMN || \
(_code) == TSDB_CODE_PAR_TAGS_NOT_MATCHED)
#define NEED_CLIENT_REFRESH_VG_ERROR(_code) \
((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_TABLE_RECREATED)
@ -194,7 +202,7 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define NEED_SCHEDULER_RETRY_ERROR(_code) \
((_code) == TSDB_CODE_RPC_REDIRECT || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
#define REQUEST_MAX_TRY_TIMES 5
#define REQUEST_MAX_TRY_TIMES 1
#define qFatal(...) \
do { \

View File

@ -107,7 +107,7 @@ static FORCE_INLINE void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit)
if (ref == 0) {
taosMemoryFree(pDataSubmit->data);
taosMemoryFree(pDataSubmit->dataRef);
taosFreeQitem(pDataSubmit);
// taosFreeQitem(pDataSubmit);
}
}
@ -286,6 +286,36 @@ typedef struct {
int32_t taskId;
} SStreamTaskRunReq;
typedef struct {
int64_t streamId;
int32_t taskId;
int32_t sourceTaskId;
int32_t sourceVg;
#if 0
int64_t sourceVer;
#endif
SArray* data; // SArray<SSDataBlock>
} SStreamDispatchReq;
typedef struct {
int64_t streamId;
int32_t taskId;
int8_t inputStatus;
} SStreamDispatchRsp;
typedef struct {
int64_t streamId;
int32_t taskId;
int32_t sourceTaskId;
int32_t sourceVg;
} SStreamTaskRecoverReq;
typedef struct {
int64_t streamId;
int32_t taskId;
int8_t inputStatus;
} SStreamTaskRecoverRsp;
int32_t streamEnqueueDataSubmit(SStreamTask* pTask, SStreamDataSubmit* input);
int32_t streamEnqueueDataBlk(SStreamTask* pTask, SStreamDataBlock* input);
int32_t streamDequeueOutput(SStreamTask* pTask, void** output);
@ -296,6 +326,12 @@ int32_t streamTaskRun(SStreamTask* pTask);
int32_t streamTaskHandleInput(SStreamTask* pTask, void* data);
int32_t streamTaskProcessRunReq(SStreamTask* pTask, SMsgCb* pMsgCb);
int32_t streamTaskProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchReq* pReq, SRpcMsg* pMsg);
int32_t streamTaskProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchRsp* pRsp);
int32_t streamTaskProcessRecoverReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg);
int32_t streamTaskProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp);
#ifdef __cplusplus
}
#endif

View File

@ -45,7 +45,7 @@ static int32_t hbProcessUserAuthInfoRsp(void *value, int32_t valueLen, struct SC
catalogUpdateUserAuthInfo(pCatalog, rsp);
}
tFreeSUserAuthBatchRsp(&batchRsp);
taosArrayDestroy(batchRsp.pArray);
return TSDB_CODE_SUCCESS;
}
@ -285,6 +285,7 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) {
int64_t *rid = pIter;
SRequestObj *pRequest = acquireRequest(*rid);
if (NULL == pRequest) {
pIter = taosHashIterate(pObj->pRequests, pIter);
continue;
}
@ -544,7 +545,7 @@ SClientHbBatchReq *hbGatherAllInfo(SAppHbMgr *pAppHbMgr) {
}
taosArrayPush(pBatchReq->reqs, pOneReq);
hbClearClientHbReq(pOneReq);
//hbClearClientHbReq(pOneReq);
pIter = taosHashIterate(pAppHbMgr->activeInfo, pIter);
}
@ -565,6 +566,11 @@ void hbClearReqInfo(SAppHbMgr *pAppHbMgr) {
tFreeReqKvHash(pOneReq->info);
taosHashClear(pOneReq->info);
if (pOneReq->query) {
taosArrayDestroy(pOneReq->query->queryDesc);
taosMemoryFreeClear(pOneReq->query);
}
pIter = taosHashIterate(pAppHbMgr->activeInfo, pIter);
}
}
@ -649,6 +655,9 @@ static int32_t hbCreateThread() {
}
static void hbStopThread() {
if (0 == atomic_load_8(&clientHbMgr.inited)) {
return;
}
if (atomic_val_compare_exchange_8(&clientHbMgr.threadStop, 0, 1)) {
tscDebug("hb thread already stopped");
return;
@ -745,13 +754,13 @@ int hbMgrInit() {
hbMgrInitHandle();
// init backgroud thread
/*hbCreateThread();*/
hbCreateThread();
return 0;
}
void hbMgrCleanUp() {
// hbStopThread();
hbStopThread();
// destroy all appHbMgr
int8_t old = atomic_val_compare_exchange_8(&clientHbMgr.inited, 1, 0);

View File

@ -13,18 +13,18 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "cJSON.h"
#include "clientInt.h"
#include "clientLog.h"
#include "command.h"
#include "scheduler.h"
#include "tdatablock.h"
#include "tdataformat.h"
#include "tdef.h"
#include "tglobal.h"
#include "tmsgtype.h"
#include "tpagedbuf.h"
#include "tref.h"
#include "cJSON.h"
#include "tdataformat.h"
static int32_t initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSet);
static SMsgSendInfo* buildConnectMsg(SRequestObj* pRequest);
@ -189,7 +189,8 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
setResSchemaInfo(&pRequest->body.resInfo, (*pQuery)->pResSchema, (*pQuery)->numOfResCols);
setResPrecision(&pRequest->body.resInfo, (*pQuery)->precision);
}
}
if (TSDB_CODE_SUCCESS == code || NEED_CLIENT_HANDLE_ERROR(code)) {
TSWAP(pRequest->dbList, (*pQuery)->pDbList);
TSWAP(pRequest->tableList, (*pQuery)->pTableList);
}
@ -299,6 +300,8 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
schedulerFreeJob(pRequest->body.queryJob);
}
*pRes = res.res;
pRequest->code = code;
terrno = code;
return pRequest->code;
@ -346,6 +349,23 @@ int32_t validateSversion(SRequestObj* pRequest, void* res) {
taosArrayPush(pArray, &tbSver);
}
} else if (TDMT_VND_QUERY == pRequest->type) {
SArray* pTbArray = (SArray*)res;
int32_t tbNum = taosArrayGetSize(pTbArray);
if (tbNum <= 0) {
return TSDB_CODE_SUCCESS;
}
pArray = taosArrayInit(tbNum, sizeof(STbSVersion));
if (NULL == pArray) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < tbNum; ++i) {
STbVerInfo* tbInfo = taosArrayGet(pTbArray, i);
STbSVersion tbSver = {.tbFName = tbInfo->tbFName, .sver = tbInfo->sversion};
taosArrayPush(pArray, &tbSver);
}
}
SCatalog* pCatalog = NULL;
@ -370,6 +390,7 @@ void freeRequestRes(SRequestObj* pRequest, void* res) {
if (TDMT_VND_SUBMIT == pRequest->type) {
tFreeSSubmitRsp((SSubmitRsp*)res);
} else if (TDMT_VND_QUERY == pRequest->type) {
taosArrayDestroy((SArray *)res);
}
}
@ -483,7 +504,8 @@ SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
int32_t retryNum = 0;
int32_t code = 0;
while (retryNum++ < REQUEST_MAX_TRY_TIMES) {
do {
destroyRequest(pRequest);
pRequest = launchQuery(pTscObj, sql, sqlLen);
if (pRequest == NULL || TSDB_CODE_SUCCESS == pRequest->code || !NEED_CLIENT_HANDLE_ERROR(pRequest->code)) {
break;
@ -494,9 +516,7 @@ SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
pRequest->code = code;
break;
}
destroyRequest(pRequest);
}
} while (retryNum++ < REQUEST_MAX_TRY_TIMES);
return pRequest;
}
@ -805,21 +825,20 @@ static int32_t doPrepareResPtr(SReqResultInfo* pResInfo) {
return TSDB_CODE_SUCCESS;
}
static char* parseTagDatatoJson(void *p){
static char* parseTagDatatoJson(void* p) {
char* string = NULL;
cJSON *json = cJSON_CreateObject();
if (json == NULL)
{
cJSON* json = cJSON_CreateObject();
if (json == NULL) {
goto end;
}
int16_t nCols = kvRowNCols(p);
char tagJsonKey[256] = {0};
for (int j = 0; j < nCols; ++j) {
SColIdx * pColIdx = kvRowColIdxAt(p, j);
SColIdx* pColIdx = kvRowColIdxAt(p, j);
char* val = (char*)(kvRowColVal(p, pColIdx));
if (j == 0){
if(*val == TSDB_DATA_TYPE_NULL){
if (j == 0) {
if (*val == TSDB_DATA_TYPE_NULL) {
string = taosMemoryCalloc(1, 8);
sprintf(varDataVal(string), "%s", TSDB_DATA_NULL_STR_L);
varDataSetLen(string, strlen(varDataVal(string)));
@ -835,18 +854,17 @@ static char* parseTagDatatoJson(void *p){
val += varDataTLen(val);
char* realData = POINTER_SHIFT(val, CHAR_BYTES);
char type = *val;
if(type == TSDB_DATA_TYPE_NULL) {
if (type == TSDB_DATA_TYPE_NULL) {
cJSON* value = cJSON_CreateNull();
if (value == NULL)
{
if (value == NULL) {
goto end;
}
cJSON_AddItemToObject(json, tagJsonKey, value);
}else if(type == TSDB_DATA_TYPE_NCHAR) {
} else if (type == TSDB_DATA_TYPE_NCHAR) {
cJSON* value = NULL;
if (varDataLen(realData) > 0){
char *tagJsonValue = taosMemoryCalloc(varDataLen(realData), 1);
int32_t length = taosUcs4ToMbs((TdUcs4 *)varDataVal(realData), varDataLen(realData), tagJsonValue);
if (varDataLen(realData) > 0) {
char* tagJsonValue = taosMemoryCalloc(varDataLen(realData), 1);
int32_t length = taosUcs4ToMbs((TdUcs4*)varDataVal(realData), varDataLen(realData), tagJsonValue);
if (length < 0) {
tscError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, val);
taosMemoryFree(tagJsonValue);
@ -854,45 +872,41 @@ static char* parseTagDatatoJson(void *p){
}
value = cJSON_CreateString(tagJsonValue);
taosMemoryFree(tagJsonValue);
if (value == NULL)
{
if (value == NULL) {
goto end;
}
}else if(varDataLen(realData) == 0){
} else if (varDataLen(realData) == 0) {
value = cJSON_CreateString("");
}else{
} else {
ASSERT(0);
}
cJSON_AddItemToObject(json, tagJsonKey, value);
}else if(type == TSDB_DATA_TYPE_DOUBLE){
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
double jsonVd = *(double*)(realData);
cJSON* value = cJSON_CreateNumber(jsonVd);
if (value == NULL)
{
if (value == NULL) {
goto end;
}
cJSON_AddItemToObject(json, tagJsonKey, value);
// }else if(type == TSDB_DATA_TYPE_BIGINT){
// int64_t jsonVd = *(int64_t*)(realData);
// cJSON* value = cJSON_CreateNumber((double)jsonVd);
// if (value == NULL)
// {
// goto end;
// }
// cJSON_AddItemToObject(json, tagJsonKey, value);
}else if (type == TSDB_DATA_TYPE_BOOL) {
// }else if(type == TSDB_DATA_TYPE_BIGINT){
// int64_t jsonVd = *(int64_t*)(realData);
// cJSON* value = cJSON_CreateNumber((double)jsonVd);
// if (value == NULL)
// {
// goto end;
// }
// cJSON_AddItemToObject(json, tagJsonKey, value);
} else if (type == TSDB_DATA_TYPE_BOOL) {
char jsonVd = *(char*)(realData);
cJSON* value = cJSON_CreateBool(jsonVd);
if (value == NULL)
{
if (value == NULL) {
goto end;
}
cJSON_AddItemToObject(json, tagJsonKey, value);
}else{
} else {
ASSERT(0);
}
}
string = cJSON_PrintUnformatted(json);
end:
@ -930,7 +944,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
pResultInfo->pCol[i].pData = pResultInfo->convertBuf[i];
pResultInfo->row[i] = pResultInfo->pCol[i].pData;
}else if (type == TSDB_DATA_TYPE_JSON && colLength[i] > 0) {
} else if (type == TSDB_DATA_TYPE_JSON && colLength[i] > 0) {
char* p = taosMemoryRealloc(pResultInfo->convertBuf[i], colLength[i]);
if (p == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
@ -943,7 +957,6 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
if (pCol->offset[j] != -1) {
char* pStart = pCol->offset[j] + pCol->pData;
int32_t jsonInnerType = *pStart;
char* jsonInnerData = pStart + CHAR_BYTES;
char dst[TSDB_MAX_JSON_TAG_LEN] = {0};
@ -951,7 +964,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
sprintf(varDataVal(dst), "%s", TSDB_DATA_NULL_STR_L);
varDataSetLen(dst, strlen(varDataVal(dst)));
} else if (jsonInnerType == TSDB_DATA_TYPE_JSON) {
char *jsonString = parseTagDatatoJson(jsonInnerData);
char* jsonString = parseTagDatatoJson(jsonInnerData);
STR_TO_VARSTR(dst, jsonString);
taosMemoryFree(jsonString);
} else if (jsonInnerType == TSDB_DATA_TYPE_NCHAR) { // value -> "value"

View File

@ -58,13 +58,9 @@ for (int i = 1; i < keyLen; ++i) { \
#define IS_INVALID_COL_LEN(len) ((len) <= 0 || (len) >= TSDB_COL_NAME_LEN)
#define IS_INVALID_TABLE_LEN(len) ((len) <= 0 || (len) >= TSDB_TABLE_NAME_LEN)
#define OTD_MAX_FIELDS_NUM 2
#define OTD_JSON_SUB_FIELDS_NUM 2
#define OTD_JSON_FIELDS_NUM 4
#define OTD_TIMESTAMP_COLUMN_NAME "ts"
#define OTD_METRIC_VALUE_COLUMN_NAME "value"
#define TS "_ts"
#define TS_LEN 3
#define TAG "_tag"
@ -728,57 +724,43 @@ static int64_t smlGetTimeValue(const char *value, int32_t len, int8_t type) {
if(value + len != endPtr){
return -1;
}
if(tsInt64 == 0){
return taosGetTimestampNs();
}
double ts = tsInt64;
switch (type) {
case TSDB_TIME_PRECISION_HOURS:
ts *= (3600 * 1e9);
tsInt64 *= (3600 * 1e9);
ts *= NANOSECOND_PER_HOUR;
tsInt64 *= NANOSECOND_PER_HOUR;
break;
case TSDB_TIME_PRECISION_MINUTES:
ts *= (60 * 1e9);
tsInt64 *= (60 * 1e9);
ts *= NANOSECOND_PER_MINUTE;
tsInt64 *= NANOSECOND_PER_MINUTE;
break;
case TSDB_TIME_PRECISION_SECONDS:
ts *= (1e9);
tsInt64 *= (1e9);
ts *= NANOSECOND_PER_SEC;
tsInt64 *= NANOSECOND_PER_SEC;
break;
case TSDB_TIME_PRECISION_MILLI:
ts *= (1e6);
tsInt64 *= (1e6);
ts *= NANOSECOND_PER_MSEC;
tsInt64 *= NANOSECOND_PER_MSEC;
break;
case TSDB_TIME_PRECISION_MICRO:
ts *= (1e3);
tsInt64 *= (1e3);
ts *= NANOSECOND_PER_USEC;
tsInt64 *= NANOSECOND_PER_USEC;
break;
case TSDB_TIME_PRECISION_NANO:
break;
default:
ASSERT(0);
}
if(ts >= (double)INT64_MAX || ts <= 0){
if(ts >= (double)INT64_MAX || ts < 0){
return -1;
}
return tsInt64;
}
static int64_t smlGetTimeNow(int8_t precision) {
switch (precision) {
case TSDB_TIME_PRECISION_HOURS:
return taosGetTimestampMs()/1000/3600;
case TSDB_TIME_PRECISION_MINUTES:
return taosGetTimestampMs()/1000/60;
case TSDB_TIME_PRECISION_SECONDS:
return taosGetTimestampMs()/1000;
case TSDB_TIME_PRECISION_MILLI:
case TSDB_TIME_PRECISION_MICRO:
case TSDB_TIME_PRECISION_NANO:
return taosGetTimestamp(precision);
default:
ASSERT(0);
}
}
static int8_t smlGetTsTypeByLen(int32_t len) {
if (len == TSDB_TIME_PRECISION_SEC_DIGITS) {
return TSDB_TIME_PRECISION_SECONDS;
@ -810,14 +792,15 @@ static int8_t smlGetTsTypeByPrecision(int8_t precision) {
}
static int64_t smlParseInfluxTime(SSmlHandle* info, const char* data, int32_t len){
if(len == 0){
return taosGetTimestamp(TSDB_TIME_PRECISION_NANO);
}
int8_t tsType = smlGetTsTypeByPrecision(info->precision);
if (tsType == -1) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp precision", NULL);
return -1;
}
if(len == 0){
return smlGetTimeNow(tsType);
}
int64_t ts = smlGetTimeValue(data, len, tsType);
if(ts == -1){
@ -1619,7 +1602,8 @@ static int32_t smlParseTSFromJSON(SSmlHandle *info, cJSON *root, SArray *cols) {
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
}
if(timeDouble <= 0){
if(timeDouble < 0){
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
}
uint8_t tsLen = smlGetTimestampLen((int64_t)timeDouble);
@ -1637,7 +1621,9 @@ static int32_t smlParseTSFromJSON(SSmlHandle *info, cJSON *root, SArray *cols) {
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
}
tsVal = timeDouble;
} else {
} else if(timeDouble == 0){
tsVal = taosGetTimestampNs();
}else {
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
}
} else if (cJSON_IsObject(timestamp)) {

View File

@ -567,6 +567,7 @@ TEST(testCase, insert_test) {
taos_free_result(pRes);
taos_close(pConn);
}
#endif
TEST(testCase, projection_query_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
@ -605,7 +606,7 @@ TEST(testCase, projection_query_tables) {
}
taos_free_result(pRes);
for(int32_t i = 0; i < 10000000; i += 20) {
for(int32_t i = 0; i < 100000; i += 20) {
char sql[1024] = {0};
sprintf(sql,
"insert into tu values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
@ -625,7 +626,7 @@ TEST(testCase, projection_query_tables) {
printf("start to insert next table\n");
for(int32_t i = 0; i < 10000000; i += 20) {
for(int32_t i = 0; i < 100000; i += 20) {
char sql[1024] = {0};
sprintf(sql,
"insert into tu2 values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
@ -692,8 +693,6 @@ TEST(testCase, projection_query_stables) {
taos_close(pConn);
}
#endif
TEST(testCase, agg_query_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);

View File

@ -1203,24 +1203,17 @@ TEST(testCase, sml_TD15662_Test) {
SRequestObj *request = (SRequestObj *)createRequest((STscObj *)taos, NULL, NULL, TSDB_SQL_INSERT);
ASSERT_NE(request, nullptr);
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
ASSERT_NE(info, nullptr);
const char *sql[] = {
"iyyyje,id=iyyyje_41943_1303,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
"hetrey,id=sub_table_0123456,t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64",
};
int ret = smlProcess(info, (char **)sql, sizeof(sql) / sizeof(sql[0]));
ASSERT_EQ(ret, 0);
// case 1
TAOS_RES *res = taos_query(taos, "select * from t_a5615048edae55218a22a149edebdc82");
ASSERT_NE(res, nullptr);
TAOS_ROW row = taos_fetch_row(res);
int64_t ts = *(int64_t*)row[0];
ASSERT_EQ(ts, 1626006833639000000);
taos_free_result(res);
destroyRequest(request);
smlDestroyInfo(info);
}
TEST(testCase, sml_TD15735_Test) {
@ -1262,11 +1255,11 @@ TEST(testCase, sml_TD15742_Test) {
SRequestObj *request = (SRequestObj *)createRequest((STscObj*)taos, NULL, NULL, TSDB_SQL_INSERT);
ASSERT_NE(request, nullptr);
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
ASSERT_NE(info, nullptr);
const char *sql[] = {
"zgzbix 1626006833641 False id=zgzbix_992_38861 t0=t t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7=\"binaryTagValue\" t8=L\"ncharTagValue\"",
"test_ms,t0=t c0=f 1626006833641",
};
int ret = smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
ASSERT_EQ(ret, 0);

View File

@ -188,13 +188,14 @@ int main(int argc, char const *argv[]) {
}
if (dmInitLog() != 0) {
printf("failed to start since init log error");
printf("failed to start since init log error\n");
taosCleanupArgs();
return -1;
}
if (taosInitCfg(configDir, global.envCmd, global.envFile, global.apolloUrl, global.pArgs, 0) != 0) {
dError("failed to start since read config error");
taosCloseLog();
taosCleanupArgs();
return -1;
}

View File

@ -126,7 +126,9 @@ int32_t mmPutRpcMsgToReadQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return mmPutRpcMsgToWorker(&pMgmt->readWorker, pMsg);
}
int32_t mmPutRpcMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { return mmPutRpcMsgToWorker(&pMgmt->syncWorker, pMsg); }
int32_t mmPutRpcMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return mmPutRpcMsgToWorker(&pMgmt->syncWorker, pMsg);
}
int32_t mmStartWorker(SMnodeMgmt *pMgmt) {
SSingleWorkerCfg qCfg = {

View File

@ -314,6 +314,10 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_MERGE_EXEC, vmPutNodeMsgToMergeQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_WRITE_EXEC, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RUN, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_DISPATCH, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RECOVER, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_VNODE, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT_VNODE, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE, vmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;

View File

@ -70,11 +70,10 @@ static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
if (code != 0) {
if (terrno != 0) code = terrno;
vmSendRsp(pMsg, code);
}
dTrace("msg:%p, is freed, code:0x%x", pMsg, code);
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
}
}
static void vmProcessFetchQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
@ -85,16 +84,15 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
if (code != 0) {
if (terrno != 0) code = terrno;
vmSendRsp(pMsg, code);
}
dTrace("msg:%p, is freed, code:0x%x", pMsg, code);
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
}
}
static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
SVnodeObj *pVnode = pInfo->ahandle;
SArray *pArray = taosArrayInit(numOfMsgs, sizeof(SRpcMsg *));
SArray * pArray = taosArrayInit(numOfMsgs, sizeof(SRpcMsg *));
if (pArray == NULL) {
dError("failed to process %d msgs in write-queue since %s", numOfMsgs, terrstr());
return;
@ -216,16 +214,15 @@ static void vmProcessMergeQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
if (code != 0) {
if (terrno != 0) code = terrno;
vmSendRsp(pMsg, code);
}
dTrace("msg:%p, is freed, code:0x%x", pMsg, code);
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
}
}
}
static int32_t vmPutNodeMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) {
SRpcMsg *pRpc = pMsg;
SRpcMsg * pRpc = pMsg;
SMsgHead *pHead = pRpc->pCont;
int32_t code = 0;
@ -304,7 +301,7 @@ int32_t vmPutNodeMsgToMonitorQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
}
static int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pRpc, EQueueType qtype) {
SMsgHead *pHead = pRpc->pCont;
SMsgHead * pHead = pRpc->pCont;
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId);
if (pVnode == NULL) return -1;

View File

@ -96,6 +96,11 @@ static inline int32_t dmPushToProcQueue(SProc *proc, SProcQueue *queue, SRpcMsg
const int32_t fullLen = headLen + bodyLen + 8;
const int64_t handle = (int64_t)pMsg->info.handle;
if (fullLen > queue->total) {
terrno = TSDB_CODE_OUT_OF_RANGE;
return -1;
}
taosThreadMutexLock(&queue->mutex);
if (fullLen > queue->avail) {
taosThreadMutexUnlock(&queue->mutex);
@ -448,7 +453,7 @@ void dmPutToProcPQueue(SProc *proc, SRpcMsg *pMsg, EProcFuncType ftype) {
break;
}
if (retry == 10) {
if (terrno != TSDB_CODE_OUT_OF_SHM_MEM) {
pMsg->code = terrno;
if (pMsg->contLen > 0) {
rpcFreeCont(pMsg->pCont);

View File

@ -340,8 +340,7 @@ static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHb
if (pHbReq->query) {
SQueryHbReqBasic *pBasic = pHbReq->query;
SRpcConnInfo connInfo = {0};
rpcGetConnInfo(pMsg->info.handle, &connInfo);
SRpcConnInfo connInfo = pMsg->conn;
SConnObj *pConn = mndAcquireConn(pMnode, pBasic->connId);
if (pConn == NULL) {

View File

@ -233,6 +233,7 @@ static int32_t mndUserActionUpdate(SSdb *pSdb, SUserObj *pOld, SUserObj *pNew) {
mTrace("user:%s, perform update action, old row:%p new row:%p", pOld->user, pOld, pNew);
taosWLockLatch(&pOld->lock);
pOld->updateTime = pNew->updateTime;
pOld->authVersion = pNew->authVersion;
memcpy(pOld->pass, pNew->pass, TSDB_PASSWORD_LEN);
TSWAP(pOld->readDbs, pNew->readDbs);
TSWAP(pOld->writeDbs, pNew->writeDbs);
@ -765,6 +766,7 @@ int32_t mndValidateUserAuthInfo(SMnode *pMnode, SUserAuthVersion *pUsers, int32_
continue;
}
pUsers[i].version = ntohl(pUsers[i].version);
if (pUser->authVersion <= pUsers[i].version) {
mndReleaseUser(pMnode, pUser);
continue;

View File

@ -24,12 +24,14 @@
extern "C" {
#endif
// clang-format off
#define mFatal(...) { if (mDebugFlag & DEBUG_FATAL) { taosPrintLog("MND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }}
#define mError(...) { if (mDebugFlag & DEBUG_ERROR) { taosPrintLog("MND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }}
#define mWarn(...) { if (mDebugFlag & DEBUG_WARN) { taosPrintLog("MND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }}
#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND ", DEBUG_INFO, 255, __VA_ARGS__); }}
#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }}
#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }}
// clang-format on
typedef struct SSdbRaw {
int8_t type;

View File

@ -20,6 +20,7 @@
#define SDB_TABLE_SIZE 24
#define SDB_RESERVE_SIZE 512
#define SDB_FILE_VER 1
static int32_t sdbRunDeployFp(SSdb *pSdb) {
mDebug("start to deploy sdb");
@ -39,7 +40,22 @@ static int32_t sdbRunDeployFp(SSdb *pSdb) {
}
static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) {
int32_t ret = taosReadFile(pFile, &pSdb->curVer, sizeof(int64_t));
int64_t sver = 0;
int32_t ret = taosReadFile(pFile, &sver, sizeof(int64_t));
if (ret < 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
if (ret != sizeof(int64_t)) {
terrno = TSDB_CODE_FILE_CORRUPTED;
return -1;
}
if (sver != SDB_FILE_VER) {
terrno = TSDB_CODE_FILE_CORRUPTED;
return -1;
}
ret = taosReadFile(pFile, &pSdb->curVer, sizeof(int64_t));
if (ret < 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
@ -96,6 +112,12 @@ static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) {
}
static int32_t sdbWriteFileHead(SSdb *pSdb, TdFilePtr pFile) {
int64_t sver = SDB_FILE_VER;
if (taosWriteFile(pFile, &sver, sizeof(int64_t)) != sizeof(int64_t)) {
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
if (taosWriteFile(pFile, &pSdb->curVer, sizeof(int64_t)) != sizeof(int64_t)) {
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;

View File

@ -126,6 +126,8 @@ STqReadHandle *tqInitSubmitMsgScanner(SMeta *pMeta);
void tqReadHandleSetColIdList(STqReadHandle *pReadHandle, SArray *pColIdList);
int32_t tqReadHandleSetTbUidList(STqReadHandle *pHandle, const SArray *tbUidList);
int32_t tqReadHandleAddTbUidList(STqReadHandle *pHandle, const SArray *tbUidList);
int32_t tqReadHandleRemoveTbUidList(STqReadHandle* pHandle, const SArray* tbUidList);
int32_t tqReadHandleSetMsg(STqReadHandle *pHandle, SSubmitReq *pMsg, int64_t ver);
bool tqNextDataBlock(STqReadHandle *pHandle);
bool tqNextDataBlockFilterOut(STqReadHandle *pHandle, SHashObj *filterOutUids);

View File

@ -121,10 +121,18 @@ int tqCommit(STQ*);
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen);
int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen);
int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId);
int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen);
int32_t tqProcessStreamTrigger(STQ* pTq, void* data, int32_t dataLen, int32_t workerId);
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId);
int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen);
#if 0
int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId);
int32_t tqProcessStreamTrigger(STQ* pTq, void* data, int32_t dataLen, int32_t workerId);
#endif
int32_t tqProcessStreamTriggerNew(STQ* pTq, SSubmitReq* data);
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg);
// sma
int32_t smaOpen(SVnode* pVnode);

View File

@ -106,11 +106,10 @@ static void tdSRowDemo() {
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
void* pIter = NULL;
STqExec* pExec = NULL;
while (1) {
pIter = taosHashIterate(pTq->execs, pIter);
if (pIter == NULL) break;
pExec = (STqExec*)pIter;
STqExec* pExec = (STqExec*)pIter;
if (pExec->subType == TOPIC_SUB_TYPE__DB) {
if (!isAdd) {
int32_t sz = taosArrayGetSize(tbUidList);
@ -129,7 +128,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
return 0;
}
int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) {
int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver, SRpcHandleInfo handleInfo) {
if (msgType != TDMT_VND_SUBMIT) return 0;
void* pIter = NULL;
STqExec* pExec = NULL;
@ -239,10 +238,9 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
tEncodeSMqDataBlkRsp(&abuf, &rsp);
pMsg->pCont = buf;
pMsg->contLen = tlen;
pMsg->code = 0;
tmsgSendRsp(pMsg);
SRpcMsg resp = {.info = handleInfo, .pCont = buf, .contLen = tlen, .code = 0};
tmsgSendRsp(&resp);
atomic_store_ptr(&pExec->pushHandle.handle, NULL);
taosWUnLockLatch(&pExec->pushHandle.lock);
@ -275,6 +273,9 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver)
}
memcpy(data, msg, msgLen);
tqProcessStreamTriggerNew(pTq, data);
#if 0
SRpcMsg req = {
.msgType = TDMT_VND_STREAM_TRIGGER,
.pCont = data,
@ -282,6 +283,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver)
};
tmsgPutToQueue(&pTq->pVnode->msgCb, FETCH_QUEUE, &req);
#endif
return 0;
}
@ -663,10 +665,9 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
tEncodeSMqDataBlkRsp(&abuf, &rsp);
pMsg->pCont = buf;
pMsg->contLen = tlen;
pMsg->code = 0;
tmsgSendRsp(pMsg);
SRpcMsg resp = {.info = pMsg->info, .pCont = buf, .contLen = tlen, .code = 0};
tmsgSendRsp(&resp);
tqDebug("vg %d offset %ld from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld",
TD_VID(pTq->pVnode), fetchOffset, consumerId, pReq->epoch, rsp.blockNum, rsp.reqOffset, rsp.rspOffset);
@ -845,12 +846,10 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
/*rsp.pBlockData = pRes;*/
/*taosArrayDestroyEx(rsp.pBlockData, (void (*)(void*))tDeleteSSDataBlock);*/
pMsg->pCont = buf;
pMsg->contLen = msgLen;
pMsg->code = 0;
SRpcMsg resp = {.info = pMsg->info, pCont = buf, .contLen = msgLen, .code = 0};
tqDebug("vg %d offset %ld msgType %d from consumer %ld (epoch %d) actual rsp", TD_VID(pTq->pVnode), fetchOffset,
pHead->msgType, consumerId, pReq->epoch);
tmsgSendRsp(pMsg);
tmsgSendRsp(&resp);
taosMemoryFree(pHead);
return 0;
} else {
@ -878,10 +877,9 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
tEncodeSMqPollRspV2(&abuf, &rspV2);
pMsg->pCont = buf;
pMsg->contLen = tlen;
pMsg->code = 0;
tmsgSendRsp(pMsg);
SRpcMsg resp = {.info = pMsg->info, .pCont = buf, .contLen = tlen, .code = 0};
tmsgSendRsp(&resp);
tqDebug("vg %d offset %ld from consumer %ld (epoch %d) not rsp", TD_VID(pTq->pVnode), fetchOffset, consumerId,
pReq->epoch);
/*}*/
@ -980,12 +978,24 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
}
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int32_t parallel) {
pTask->status = TASK_STATUS__IDLE;
pTask->inputStatus = TASK_INPUT_STATUS__NORMAL;
pTask->outputStatus = TASK_OUTPUT_STATUS__NORMAL;
pTask->inputQ = taosOpenQueue();
pTask->outputQ = taosOpenQueue();
pTask->inputQAll = taosAllocateQall();
pTask->outputQAll = taosAllocateQall();
if (pTask->inputQ == NULL || pTask->outputQ == NULL || pTask->inputQAll == NULL || pTask->outputQAll == NULL)
goto FAIL;
if (pTask->execType != TASK_EXEC__NONE) {
// expand runners
pTask->exec.numOfRunners = parallel;
pTask->exec.runners = taosMemoryCalloc(parallel, sizeof(SStreamRunner));
if (pTask->exec.runners == NULL) {
return -1;
goto FAIL;
}
for (int32_t i = 0; i < parallel; i++) {
STqReadHandle* pStreamReader = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
@ -1007,6 +1017,13 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int32_t parallel) {
}
return 0;
FAIL:
if (pTask->inputQ) taosCloseQueue(pTask->inputQ);
if (pTask->outputQ) taosCloseQueue(pTask->outputQ);
if (pTask->inputQAll) taosFreeQall(pTask->inputQAll);
if (pTask->outputQAll) taosFreeQall(pTask->outputQAll);
if (pTask) taosMemoryFree(pTask);
return -1;
}
int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen) {
@ -1058,6 +1075,7 @@ int32_t tqProcessStreamTrigger(STQ* pTq, void* data, int32_t dataLen, int32_t wo
return 0;
}
#if 0
int32_t tqProcessStreamTriggerNew(STQ* pTq, SSubmitReq* data) {
SStreamDataSubmit* pSubmit = NULL;
@ -1108,6 +1126,7 @@ FAIL:
}
return -1;
}
#endif
int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId) {
SStreamTaskExecReq req;
@ -1125,25 +1144,28 @@ int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId)
return 0;
}
int32_t tqProcessStreamTrigger2(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
int32_t tqProcessStreamTriggerNew(STQ* pTq, SSubmitReq* pReq) {
void* pIter = NULL;
bool failed = false;
SStreamDataSubmit* pSubmit = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM);
if (pSubmit == NULL) {
failed = true;
goto SET_TASK_FAIL;
}
pSubmit->dataRef = taosMemoryMalloc(sizeof(int32_t));
if (pSubmit->dataRef == NULL) {
failed = true;
goto SET_TASK_FAIL;
}
pSubmit->type = STREAM_DATA_TYPE_SUBMIT_BLOCK;
pSubmit->sourceVer = ver;
pSubmit->sourceVg = pTq->pVnode->config.vgId;
pSubmit->type = STREAM_INPUT__DATA_SUBMIT;
/*pSubmit->sourceVer = ver;*/
/*pSubmit->sourceVg = pTq->pVnode->config.vgId;*/
pSubmit->data = pReq;
*pSubmit->dataRef = 1;
SET_TASK_FAIL:
while (1) {
pIter = taosHashIterate(pTq->pStreamTasks, pIter);
if (pIter == NULL) break;
@ -1162,7 +1184,18 @@ int32_t tqProcessStreamTrigger2(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
int8_t execStatus = atomic_load_8(&pTask->status);
if (execStatus == TASK_STATUS__IDLE || execStatus == TASK_STATUS__CLOSING) {
// TODO dispatch task launch msg to fetch queue
SStreamTaskRunReq* pRunReq = taosMemoryMalloc(sizeof(SStreamTaskRunReq));
if (pRunReq == NULL) continue;
// TODO: do we need htonl?
pRunReq->head.vgId = pTq->pVnode->config.vgId;
pRunReq->streamId = pTask->streamId;
pRunReq->taskId = pTask->taskId;
SRpcMsg msg = {
.msgType = TDMT_VND_TASK_RUN,
.pCont = pRunReq,
.contLen = sizeof(SStreamTaskRunReq),
};
tmsgPutToQueue(&pTq->pVnode->msgCb, FETCH_QUEUE, &msg);
}
} else {
@ -1174,11 +1207,53 @@ int32_t tqProcessStreamTrigger2(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
streamDataSubmitRefDec(pSubmit);
return 0;
} else {
if (pSubmit) {
if (pSubmit->dataRef) {
taosMemoryFree(pSubmit->dataRef);
}
taosFreeQitem(pSubmit);
}
return -1;
}
}
int32_t tqProcessTaskExec2(STQ* pTq, char* msg, int32_t msgLen) {
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
//
SStreamTaskRunReq* pReq = pMsg->pCont;
int32_t taskId = pReq->taskId;
SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
streamTaskProcessRunReq(pTask, &pTq->pVnode->msgCb);
return 0;
}
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg) {
SStreamDispatchReq* pReq = pMsg->pCont;
int32_t taskId = pReq->taskId;
SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
streamTaskProcessDispatchReq(pTask, &pTq->pVnode->msgCb, pReq, pMsg);
return 0;
}
int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg) {
SStreamTaskRecoverReq* pReq = pMsg->pCont;
int32_t taskId = pReq->taskId;
SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
streamTaskProcessRecoverReq(pTask, &pTq->pVnode->msgCb, pReq, pMsg);
return 0;
}
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
SStreamDispatchRsp* pRsp = pMsg->pCont;
int32_t taskId = pRsp->taskId;
SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
streamTaskProcessDispatchRsp(pTask, &pTq->pVnode->msgCb, pRsp);
return 0;
}
int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg) {
SStreamTaskRecoverRsp* pRsp = pMsg->pCont;
int32_t taskId = pRsp->taskId;
SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
streamTaskProcessRecoverRsp(pTask, pRsp);
return 0;
}

View File

@ -231,3 +231,14 @@ int tqReadHandleAddTbUidList(STqReadHandle* pHandle, const SArray* tbUidList) {
return 0;
}
int tqReadHandleRemoveTbUidList(STqReadHandle* pHandle, const SArray* tbUidList) {
ASSERT(pHandle->tbIdHash != NULL);
for(int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
int64_t* pKey = (int64_t*) taosArrayGet(tbUidList, i);
taosHashRemove(pHandle->tbIdHash, pKey, sizeof(int64_t));
}
return 0;
}

View File

@ -2076,8 +2076,14 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
}
#endif
if (TD_SUPPORT_UPDATE(pCfg->update)) {
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos);
if (lastKeyAppend != key) {
if (lastKeyAppend != TSKEY_INITIAL_VAL) {
++curRow;
}
lastKeyAppend = key;
}
// load data from file firstly
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos);
if (rv1 != TD_ROW_SVER(row1)) {
rv1 = TD_ROW_SVER(row1);
@ -2087,7 +2093,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
}
// still assign data into current row
mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
if (cur->win.skey == TSKEY_INITIAL_VAL) {
@ -2099,7 +2105,6 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
cur->mixBlock = true;
moveToNextRowInMem(pCheckInfo);
++curRow;
pos += step;
} else {

View File

@ -106,11 +106,13 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg
pMsg->contLen - sizeof(SMsgHead)) < 0) {
}
} break;
#if 0
case TDMT_VND_TASK_WRITE_EXEC: {
if (tqProcessTaskExec(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), pMsg->contLen - sizeof(SMsgHead),
0) < 0) {
}
} break;
#endif
case TDMT_VND_ALTER_VNODE:
break;
default:
@ -181,11 +183,32 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
return vnodeGetTableMeta(pVnode, pMsg);
case TDMT_VND_CONSUME:
return tqProcessPollReq(pVnode->pTq, pMsg, pInfo->workerId);
case TDMT_VND_TASK_RUN: {
int32_t code = tqProcessTaskRunReq(pVnode->pTq, pMsg);
pMsg->pCont = NULL;
return code;
}
case TDMT_VND_TASK_DISPATCH:
return tqProcessTaskDispatchReq(pVnode->pTq, pMsg);
case TDMT_VND_TASK_RECOVER:
return tqProcessTaskRecoverReq(pVnode->pTq, pMsg);
case TDMT_VND_TASK_DISPATCH_RSP:
return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg);
case TDMT_VND_TASK_RECOVER_RSP:
return tqProcessTaskRecoverRsp(pVnode->pTq, pMsg);
#if 0
case TDMT_VND_TASK_PIPE_EXEC:
case TDMT_VND_TASK_MERGE_EXEC:
return tqProcessTaskExec(pVnode->pTq, msgstr, msgLen, 0);
case TDMT_VND_STREAM_TRIGGER:
return tqProcessStreamTrigger(pVnode->pTq, pMsg->pCont, pMsg->contLen, 0);
case TDMT_VND_STREAM_TRIGGER:{
// refactor, avoid double free
int code = tqProcessStreamTrigger(pVnode->pTq, pMsg->pCont, pMsg->contLen, 0);
pMsg->pCont = NULL;
return code;
}
#endif
case TDMT_VND_QUERY_HEARTBEAT:
return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg);
default:

View File

@ -2885,7 +2885,7 @@ _return:
int32_t ctgGetTbSverFromCache(SCatalog* pCtg, const SName* pTableName, int32_t* sver) {
int32_t ctgGetTbSverFromCache(SCatalog* pCtg, const SName* pTableName, int32_t* sver, int32_t *tbType, uint64_t *suid, char* stbName) {
*sver = -1;
if (NULL == pCtg->dbCache) {
@ -2903,14 +2903,12 @@ int32_t ctgGetTbSverFromCache(SCatalog* pCtg, const SName* pTableName, int32_t*
return TSDB_CODE_SUCCESS;
}
int32_t tbType = 0;
uint64_t suid = 0;
CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock);
STableMeta* tbMeta = taosHashGet(dbCache->tbCache.metaCache, pTableName->tname, strlen(pTableName->tname));
if (tbMeta) {
tbType = tbMeta->tableType;
suid = tbMeta->suid;
if (tbType != TSDB_CHILD_TABLE) {
*tbType = tbMeta->tableType;
*suid = tbMeta->suid;
if (*tbType != TSDB_CHILD_TABLE) {
*sver = tbMeta->sversion;
}
}
@ -2921,44 +2919,49 @@ int32_t ctgGetTbSverFromCache(SCatalog* pCtg, const SName* pTableName, int32_t*
return TSDB_CODE_SUCCESS;
}
if (tbType != TSDB_CHILD_TABLE) {
if (*tbType != TSDB_CHILD_TABLE) {
ctgReleaseDBCache(pCtg, dbCache);
ctgDebug("Got sver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, tbType, dbFName, pTableName->tname);
ctgDebug("Got sver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tbType, dbFName, pTableName->tname);
return TSDB_CODE_SUCCESS;
}
ctgDebug("Got subtable meta from cache, dbFName:%s, tbName:%s, suid:%" PRIx64, dbFName, pTableName->tname, suid);
ctgDebug("Got subtable meta from cache, dbFName:%s, tbName:%s, suid:%" PRIx64, dbFName, pTableName->tname, *suid);
CTG_LOCK(CTG_READ, &dbCache->tbCache.stbLock);
STableMeta **stbMeta = taosHashGet(dbCache->tbCache.stbCache, &suid, sizeof(suid));
STableMeta **stbMeta = taosHashGet(dbCache->tbCache.stbCache, suid, sizeof(*suid));
if (NULL == stbMeta || NULL == *stbMeta) {
CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock);
ctgReleaseDBCache(pCtg, dbCache);
ctgDebug("stb not in stbCache, suid:%"PRIx64, suid);
ctgDebug("stb not in stbCache, suid:%"PRIx64, *suid);
return TSDB_CODE_SUCCESS;
}
if ((*stbMeta)->suid != suid) {
if ((*stbMeta)->suid != *suid) {
CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock);
ctgReleaseDBCache(pCtg, dbCache);
ctgError("stable suid in stbCache mis-match, expected suid:%"PRIx64 ",actual suid:%"PRIx64, suid, (*stbMeta)->suid);
ctgError("stable suid in stbCache mis-match, expected suid:%"PRIx64 ",actual suid:%"PRIx64, *suid, (*stbMeta)->suid);
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
size_t nameLen = 0;
char* name = taosHashGetKey(*stbMeta, &nameLen);
strncpy(stbName, name, nameLen);
stbName[nameLen] = 0;
*sver = (*stbMeta)->sversion;
CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock);
ctgReleaseDBCache(pCtg, dbCache);
ctgDebug("Got sver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, tbType, dbFName, pTableName->tname);
ctgDebug("Got sver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tbType, dbFName, pTableName->tname);
return TSDB_CODE_SUCCESS;
}
int32_t catalogChkTbMetaVersion(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, SArray* pTables) {
CTG_API_ENTER();
@ -2977,9 +2980,26 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, void *pTrans, const SEpSet* pMgm
continue;
}
ctgGetTbSverFromCache(pCtg, &name, &sver);
int32_t tbType = 0;
uint64_t suid = 0;
char stbName[TSDB_TABLE_FNAME_LEN];
ctgGetTbSverFromCache(pCtg, &name, &sver, &tbType, &suid, stbName);
if (sver >= 0 && sver < pTb->sver) {
catalogRemoveTableMeta(pCtg, &name); //TODO REMOVE STB FROM CACHE
switch (tbType) {
case TSDB_CHILD_TABLE: {
SName stb = name;
strcpy(stb.tname, stbName);
catalogRemoveTableMeta(pCtg, &stb);
break;
}
case TSDB_SUPER_TABLE:
case TSDB_NORMAL_TABLE:
catalogRemoveTableMeta(pCtg, &name);
break;
default:
ctgError("ignore table type %d", tbType);
break;
}
}
}

View File

@ -125,19 +125,10 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, void* streamReadHandle) {
return pTaskInfo;
}
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
// traverse to the stream scanner node to add this table id
SOperatorInfo* pInfo = pTaskInfo->pRoot;
while (pInfo->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
pInfo = pInfo->pDownstream[0];
}
SStreamBlockScanInfo* pScanInfo = pInfo->info;
if (isAdd) {
static SArray* filterQualifiedChildTables(const SStreamBlockScanInfo* pScanInfo, const SArray* tableIdList) {
SArray* qa = taosArrayInit(4, sizeof(tb_uid_t));
// let's discard the tables those are not created according to the queried super table.
SMetaReader mr = {0};
metaReaderInit(&mr, pScanInfo->readHandle.meta, 0);
for (int32_t i = 0; i < taosArrayGetSize(tableIdList); ++i) {
@ -158,17 +149,32 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
}
metaReaderClear(&mr);
return qa;
}
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
// traverse to the stream scanner node to add this table id
SOperatorInfo* pInfo = pTaskInfo->pRoot;
while (pInfo->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
pInfo = pInfo->pDownstream[0];
}
int32_t code = 0;
SStreamBlockScanInfo* pScanInfo = pInfo->info;
if (isAdd) { // add new table id
SArray* qa = filterQualifiedChildTables(pScanInfo, tableIdList);
qDebug(" %d qualified child tables added into stream scanner", (int32_t)taosArrayGetSize(qa));
int32_t code = tqReadHandleAddTbUidList(pScanInfo->streamBlockReader, qa);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
} else {
assert(0);
code = tqReadHandleAddTbUidList(pScanInfo->streamBlockReader, qa);
taosArrayDestroy(qa);
} else { // remove the table id in current list
qDebug(" %d remove child tables from the stream scanner", (int32_t)taosArrayGetSize(tableIdList));
code = tqReadHandleRemoveTbUidList(pScanInfo->streamBlockReader, tableIdList);
}
return TSDB_CODE_SUCCESS;
return code;
}
int32_t qGetQueriedTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, int32_t* tversion) {
@ -177,8 +183,16 @@ int32_t qGetQueriedTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tab
*sversion = pTaskInfo->schemaVer.sversion;
*tversion = pTaskInfo->schemaVer.tversion;
if (pTaskInfo->schemaVer.dbname) {
strcpy(dbName, pTaskInfo->schemaVer.dbname);
} else {
dbName[0] = 0;
}
if (pTaskInfo->schemaVer.tablename) {
strcpy(tableName, pTaskInfo->schemaVer.tablename);
} else {
tableName[0] = 0;
}
return 0;
}

View File

@ -2062,15 +2062,7 @@ void setExecutionContext(int32_t numOfOutput, uint64_t groupId, SExecTaskInfo* p
pAggInfo->groupId = groupId;
}
/**
* For interval query of both super table and table, copy the data in ascending order, since the output results are
* ordered in SWindowResutl already. While handling the group by query for both table and super table,
* all group result are completed already.
*
* @param pQInfo
* @param result
*/
int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo,
int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo,
int32_t* rowCellOffset, SqlFunctionCtx* pCtx, int32_t numOfExprs) {
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
int32_t start = pGroupResInfo->index;
@ -2087,6 +2079,15 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprIn
continue;
}
if (pBlock->info.groupId == 0) {
pBlock->info.groupId = pPos->groupId;
} else {
// current value belongs to different group, it can't be packed into one datablock
if (pBlock->info.groupId != pPos->groupId) {
break;
}
}
if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
break;
}
@ -2100,9 +2101,8 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprIn
if (pCtx[j].fpSet.finalize) {
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
if (TAOS_FAILED(code)) {
qError("%s build result data block error, code %s", GET_TASKID(taskInfo), tstrerror(code));
taskInfo->code = code;
longjmp(taskInfo->env, code);
qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
longjmp(pTaskInfo->env, code);
}
} else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
// do nothing, todo refactor
@ -2124,7 +2124,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprIn
}
}
// qDebug("QInfo:0x%"PRIx64" copy data to query buf completed", GET_TASKID(pRuntimeEnv));
qDebug("%s result generated, rows:%d, groupId:%"PRIu64, GET_TASKID(pTaskInfo), pBlock->info.rows, pBlock->info.groupId);
blockDataUpdateTsWindow(pBlock);
return 0;
}
@ -2145,10 +2145,9 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG
return;
}
// clear the existed group id
pBlock->info.groupId = 0;
doCopyToSDataBlock(pTaskInfo, pBlock, pExprInfo, pBuf, pGroupResInfo, rowCellOffset, pCtx, numOfExprs);
// add condition (pBlock->info.rows >= 1) just to runtime happy
blockDataUpdateTsWindow(pBlock);
}
static void updateNumOfRowsInResultRows(SqlFunctionCtx* pCtx, int32_t numOfOutput, SResultRowInfo* pResultRowInfo,
@ -3656,7 +3655,6 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
doSetOperatorCompleted(pOperator);
}
doSetOperatorCompleted(pOperator);
return (blockDataGetNumOfRows(pInfo->pRes) != 0) ? pInfo->pRes : NULL;
}
@ -4576,10 +4574,11 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t*
return pExprs;
}
static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPTR_EXEC_MODEL model) {
static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPTR_EXEC_MODEL model, char* dbFName) {
SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo));
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
pTaskInfo->schemaVer.dbname = strdup(dbFName);
pTaskInfo->cost.created = taosGetTimestampMs();
pTaskInfo->id.queryId = queryId;
pTaskInfo->execModel = model;
@ -4994,16 +4993,10 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod
return NULL;
}
const char* tname = pTaskInfo->schemaVer.tablename;
for (int32_t i = 0; i < numOfCols; ++i) {
STargetNode* pNode = (STargetNode*)nodesListGetNode(pNodeList, i);
SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
if (tname != NULL && (pTaskInfo->schemaVer.dbname == NULL) &&
strncmp(pColNode->tableName, tname, tListLen(pColNode->tableName)) == 0) {
pTaskInfo->schemaVer.dbname = strdup(pColNode->dbName);
}
SColMatchInfo c = {0};
c.output = true;
c.colId = pColNode->colId;
@ -5099,7 +5092,7 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead
uint64_t queryId = pPlan->id.queryId;
int32_t code = TSDB_CODE_SUCCESS;
*pTaskInfo = createExecTaskInfo(queryId, taskId, model);
*pTaskInfo = createExecTaskInfo(queryId, taskId, model, pPlan->dbFName);
if (*pTaskInfo == NULL) {
code = TSDB_CODE_QRY_OUT_OF_MEMORY;
goto _complete;

View File

@ -41,6 +41,13 @@
sToken = tStrGetToken(pSql, &index, false); \
} while (0)
#define NEXT_VALID_TOKEN(pSql, sToken) \
do { \
sToken.n = tGetToken(pSql, &sToken.type); \
sToken.z = pSql; \
pSql += sToken.n; \
} while (TK_NK_SPACE == sToken.type)
typedef struct SInsertParseContext {
SParseContext* pComCxt; // input
char* pSql; // input
@ -482,9 +489,11 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int
return buildSyntaxErrMsg(pMsgBuf, "invalid bool data", pToken->z);
}
} else if (pToken->type == TK_NK_INTEGER) {
return func(pMsgBuf, ((taosStr2Int64(pToken->z, NULL, 10) == 0) ? &FALSE_VALUE : &TRUE_VALUE), pSchema->bytes, param);
return func(pMsgBuf, ((taosStr2Int64(pToken->z, NULL, 10) == 0) ? &FALSE_VALUE : &TRUE_VALUE), pSchema->bytes,
param);
} else if (pToken->type == TK_NK_FLOAT) {
return func(pMsgBuf, ((taosStr2Double(pToken->z, NULL) == 0) ? &FALSE_VALUE : &TRUE_VALUE), pSchema->bytes, param);
return func(pMsgBuf, ((taosStr2Double(pToken->z, NULL) == 0) ? &FALSE_VALUE : &TRUE_VALUE), pSchema->bytes,
param);
} else {
return buildSyntaxErrMsg(pMsgBuf, "invalid bool data", pToken->z);
}
@ -685,7 +694,7 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, SParsedDataColInfo*
isOrdered = false;
}
if (index < 0) {
return buildSyntaxErrMsg(&pCxt->msg, "invalid column/tag name", sToken.z);
return generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_INVALID_COLUMN, sToken.z);
}
if (pColList->cols[index].valStat == VAL_STAT_HAS) {
return buildSyntaxErrMsg(&pCxt->msg, "duplicated column name", sToken.z);
@ -895,8 +904,10 @@ static int32_t parseUsingClause(SInsertParseContext* pCxt, SName* name, char* tb
return buildSyntaxErrMsg(&pCxt->msg, "( is expected", sToken.z);
}
CHECK_CODE(parseTagsClause(pCxt, pCxt->pTableMeta->schema, getTableInfo(pCxt->pTableMeta).precision, name->tname));
NEXT_TOKEN(pCxt->pSql, sToken);
if (TK_NK_RP != sToken.type) {
NEXT_VALID_TOKEN(pCxt->pSql, sToken);
if (TK_NK_COMMA == sToken.type) {
return generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_TAGS_NOT_MATCHED);
} else if (TK_NK_RP != sToken.type) {
return buildSyntaxErrMsg(&pCxt->msg, ") is expected", sToken.z);
}
@ -996,8 +1007,10 @@ static int32_t parseValues(SInsertParseContext* pCxt, STableDataBlocks* pDataBlo
pDataBlock->size += extendedRowSize; // len;
}
NEXT_TOKEN(pCxt->pSql, sToken);
if (TK_NK_RP != sToken.type) {
NEXT_VALID_TOKEN(pCxt->pSql, sToken);
if (TK_NK_COMMA == sToken.type) {
return generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_INVALID_COLUMNS_NUM);
} else if (TK_NK_RP != sToken.type) {
return buildSyntaxErrMsg(&pCxt->msg, ") expected", sToken.z);
}
@ -1060,7 +1073,7 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
int32_t tbNum = 0;
char tbFName[TSDB_TABLE_FNAME_LEN];
bool autoCreateTbl = false;
STableMeta *pMeta = NULL;
STableMeta* pMeta = NULL;
// for each table
while (1) {
@ -1160,7 +1173,8 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
memcpy(tags, &pCxt->tags, sizeof(pCxt->tags));
(*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pMeta, tags, tbFName, autoCreateTbl, pCxt->pVgroupsHashObj, pCxt->pTableBlockHashObj);
(*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pMeta, tags, tbFName, autoCreateTbl, pCxt->pVgroupsHashObj,
pCxt->pTableBlockHashObj);
memset(&pCxt->tags, 0, sizeof(pCxt->tags));
pCxt->pVgroupsHashObj = NULL;
@ -1238,7 +1252,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
if (TSDB_CODE_SUCCESS == code) {
code = parseInsertBody(&context);
}
if (TSDB_CODE_SUCCESS == code) {
if (TSDB_CODE_SUCCESS == code || NEED_CLIENT_HANDLE_ERROR(code)) {
SName* pTable = taosHashIterate(context.pTableNameHashObj, NULL);
while (NULL != pTable) {
taosArrayPush((*pQuery)->pTableList, pTable);
@ -1581,7 +1595,7 @@ typedef struct SmlExecTableHandle {
typedef struct SmlExecHandle {
SHashObj* pBlockHash;
SmlExecTableHandle tableExecHandle;
SQuery *pQuery;
SQuery* pQuery;
} SSmlExecHandle;
static void smlDestroyTableHandle(void* pHandle) {
@ -1673,9 +1687,9 @@ static int32_t smlBuildTagRow(SArray* cols, SKVRowBuilder* tagsBuilder, SParsedD
SSchema* pTagSchema = &pSchema[tags->boundColumns[i] - 1]; // colId starts with 1
param.schema = pTagSchema;
SSmlKv* kv = taosArrayGetP(cols, i);
if(IS_VAR_DATA_TYPE(kv->type)){
if (IS_VAR_DATA_TYPE(kv->type)) {
KvRowAppend(msg, kv->value, kv->length, &param);
}else{
} else {
KvRowAppend(msg, &(kv->value), kv->length, &param);
}
}
@ -1688,8 +1702,8 @@ static int32_t smlBuildTagRow(SArray* cols, SKVRowBuilder* tagsBuilder, SParsedD
return TSDB_CODE_SUCCESS;
}
int32_t smlBindData(void *handle, SArray *tags, SArray *colsSchema, SArray *cols, bool format,
STableMeta *pTableMeta, char *tableName, char *msgBuf, int16_t msgBufLen) {
int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols, bool format, STableMeta* pTableMeta,
char* tableName, char* msgBuf, int16_t msgBufLen) {
SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen};
SSmlExecHandle* smlHandle = (SSmlExecHandle*)handle;
@ -1702,7 +1716,8 @@ int32_t smlBindData(void *handle, SArray *tags, SArray *colsSchema, SArray *cols
return ret;
}
SKVRow row = NULL;
ret = smlBuildTagRow(tags, &smlHandle->tableExecHandle.tagsBuilder, &smlHandle->tableExecHandle.tags, pTagsSchema, &row, &pBuf);
ret = smlBuildTagRow(tags, &smlHandle->tableExecHandle.tagsBuilder, &smlHandle->tableExecHandle.tags, pTagsSchema,
&row, &pBuf);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@ -1733,7 +1748,7 @@ int32_t smlBindData(void *handle, SArray *tags, SArray *colsSchema, SArray *cols
initRowBuilder(&pDataBlock->rowBuilder, pDataBlock->pTableMeta->sversion, &pDataBlock->boundColumnInfo);
int32_t rowNum = taosArrayGetSize(cols);
if(rowNum <= 0) {
if (rowNum <= 0) {
return buildInvalidOperationMsg(&pBuf, "cols size <= 0");
}
ret = allocateMemForSize(pDataBlock, extendedRowSize * rowNum);
@ -1744,9 +1759,9 @@ int32_t smlBindData(void *handle, SArray *tags, SArray *colsSchema, SArray *cols
for (int32_t r = 0; r < rowNum; ++r) {
STSRow* row = (STSRow*)(pDataBlock->pData + pDataBlock->size); // skip the SSubmitBlk header
tdSRowResetBuf(pBuilder, row);
void *rowData = taosArrayGetP(cols, r);
void* rowData = taosArrayGetP(cols, r);
size_t rowDataSize = 0;
if(format){
if (format) {
rowDataSize = taosArrayGetSize(rowData);
}
@ -1781,9 +1796,9 @@ int32_t smlBindData(void *handle, SArray *tags, SArray *colsSchema, SArray *cols
kv->i = convertTimePrecision(kv->i, TSDB_TIME_PRECISION_NANO, pTableMeta->tableInfo.precision);
}
if(IS_VAR_DATA_TYPE(kv->type)){
if (IS_VAR_DATA_TYPE(kv->type)) {
MemRowAppend(&pBuf, kv->value, colLen, &param);
}else{
} else {
MemRowAppend(&pBuf, &(kv->value), colLen, &param);
}
}

View File

@ -120,6 +120,20 @@ static int32_t getTableMeta(STranslateContext* pCxt, const char* pDbName, const
return getTableMetaImpl(pCxt, toName(pCxt->pParseCxt->acctId, pDbName, pTableName, &name), pMeta);
}
static int32_t refreshGetTableMeta(STranslateContext* pCxt, const char* pDbName, const char* pTableName,
STableMeta** pMeta) {
SParseContext* pParCxt = pCxt->pParseCxt;
SName name;
toName(pCxt->pParseCxt->acctId, pDbName, pTableName, &name);
int32_t code =
catalogRefreshGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, &name, pMeta, false);
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogRefreshGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pDbName,
pTableName);
}
return code;
}
static int32_t getTableDistVgInfo(STranslateContext* pCxt, const SName* pName, SArray** pVgInfo) {
SParseContext* pParCxt = pCxt->pParseCxt;
int32_t code = collectUseDatabase(pName, pCxt->pDbs);
@ -3201,7 +3215,7 @@ static int32_t translateExplain(STranslateContext* pCxt, SExplainStmt* pStmt) {
}
static int32_t translateDescribe(STranslateContext* pCxt, SDescribeStmt* pStmt) {
return getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pStmt->pMeta);
return refreshGetTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pStmt->pMeta);
}
static int32_t translateKillConnection(STranslateContext* pCxt, SKillStmt* pStmt) {

View File

@ -58,7 +58,7 @@ static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen
// 3. valid column names
for (int32_t j = i + 1; j < numOfCols; ++j) {
if (strncasecmp(pSchema[i].name, pSchema[j].name, sizeof(pSchema[i].name) - 1) == 0) {
if (strncmp(pSchema[i].name, pSchema[j].name, sizeof(pSchema[i].name) - 1) == 0) {
return false;
}
}

View File

@ -131,6 +131,7 @@ typedef struct SQWTaskCtx {
void *taskHandle;
void *sinkHandle;
SSubplan *plan;
STbVerInfo tbInfo;
} SQWTaskCtx;
typedef struct SQWSchStatus {

View File

@ -36,7 +36,7 @@ int32_t qwBuildAndSendFetchRsp(SRpcHandleInfo *pConn, SRetrieveTableRsp *pRsp, i
int32_t code);
void qwBuildFetchRsp(void *msg, SOutputData *input, int32_t len, bool qComplete);
int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code);
int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo);
int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code);
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execInfo, int32_t num);
void qwFreeFetchRsp(void *msg);

View File

@ -718,6 +718,16 @@ int32_t qwGetResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, void
return TSDB_CODE_SUCCESS;
}
void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) {
char dbFName[TSDB_DB_FNAME_LEN];
char tbName[TSDB_TABLE_NAME_LEN];
qGetQueriedTableSchemaVersion(pTaskInfo, dbFName, tbName, &ctx->tbInfo.sversion, &ctx->tbInfo.tversion);
sprintf(ctx->tbInfo.tbFName, "%s.%s", dbFName, tbName);
}
int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *input, SQWPhaseOutput *output) {
int32_t code = 0;
SQWTaskCtx *ctx = NULL;
@ -899,6 +909,11 @@ _return:
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_PARTIAL_SUCCEED);
}
if (readyConnection) {
qwBuildAndSendReadyRsp(readyConnection, code, ctx ? &ctx->tbInfo : NULL);
QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", readyConnection->handle, code, tstrerror(code));
}
if (ctx) {
QW_UPDATE_RSP_CODE(ctx, code);
@ -910,11 +925,6 @@ _return:
qwReleaseTaskCtx(mgmt, ctx);
}
if (readyConnection) {
qwBuildAndSendReadyRsp(readyConnection, code);
QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", readyConnection->handle, code, tstrerror(code));
}
if (code) {
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_FAILED);
}
@ -975,6 +985,7 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, int8_t taskType, int8_t ex
atomic_store_ptr(&ctx->sinkHandle, sinkHandle);
if (pTaskInfo && sinkHandle) {
qwSaveTbVersionInfo(pTaskInfo, ctx);
QW_ERR_JRET(qwExecTask(QW_FPARAMS(), ctx, NULL));
}
@ -1047,7 +1058,7 @@ _return:
}
if (needRsp) {
qwBuildAndSendReadyRsp(&qwMsg->connInfo, code);
qwBuildAndSendReadyRsp(&qwMsg->connInfo, code, NULL);
QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code));
}
@ -1150,8 +1161,7 @@ int32_t qwProcessFetch(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
QW_ERR_JRET(qwGetResFromSink(QW_FPARAMS(), ctx, &dataLen, &rsp, &sOutput));
if (NULL == rsp) {
atomic_store_ptr(&ctx->dataConnInfo.handle, qwMsg->connInfo.handle);
atomic_store_ptr(&ctx->dataConnInfo.ahandle, qwMsg->connInfo.ahandle);
ctx->dataConnInfo = qwMsg->connInfo;
QW_SET_EVENT_RECEIVED(ctx, QW_EVENT_FETCH);
} else {

View File

@ -47,7 +47,7 @@ int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code) {
SQueryTableRsp rsp = {.code = code};
int32_t contLen = tSerializeSQueryTableRsp(NULL, 0, &rsp);
void *msg = rpcMallocCont(contLen);
void * msg = rpcMallocCont(contLen);
tSerializeSQueryTableRsp(msg, contLen, &rsp);
SRpcMsg rpcRsp = {
@ -63,9 +63,14 @@ int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code) {
return TSDB_CODE_SUCCESS;
}
int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code) {
int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo) {
SResReadyRsp *pRsp = (SResReadyRsp *)rpcMallocCont(sizeof(SResReadyRsp));
pRsp->code = code;
if (tbInfo) {
strcpy(pRsp->tbFName, tbInfo->tbFName);
pRsp->sversion = tbInfo->sversion;
pRsp->tversion = tbInfo->tversion;
}
SRpcMsg rpcRsp = {
.msgType = TDMT_VND_RES_READY_RSP,
@ -85,7 +90,7 @@ int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execIn
SExplainRsp rsp = {.numOfPlans = num, .subplanInfo = execInfo};
int32_t contLen = tSerializeSExplainRsp(NULL, 0, &rsp);
void *pRsp = rpcMallocCont(contLen);
void * pRsp = rpcMallocCont(contLen);
tSerializeSExplainRsp(pRsp, contLen, &rsp);
SRpcMsg rpcRsp = {
@ -104,7 +109,7 @@ int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execIn
int32_t qwBuildAndSendHbRsp(SRpcHandleInfo *pConn, SSchedulerHbRsp *pStatus, int32_t code) {
int32_t contLen = tSerializeSSchedulerHbRsp(NULL, 0, pStatus);
void *pRsp = rpcMallocCont(contLen);
void * pRsp = rpcMallocCont(contLen);
tSerializeSSchedulerHbRsp(pRsp, contLen, pStatus);
SRpcMsg rpcRsp = {
@ -212,7 +217,7 @@ int32_t qwBuildAndSendShowRsp(SRpcMsg *pMsg, int32_t code) {
showRsp.tableMeta.numOfColumns = cols;
int32_t bufLen = tSerializeSShowRsp(NULL, 0, &showRsp);
void *pBuf = rpcMallocCont(bufLen);
void * pBuf = rpcMallocCont(bufLen);
tSerializeSShowRsp(pBuf, bufLen, &showRsp);
SRpcMsg rpcMsg = {
@ -341,7 +346,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
int32_t code = 0;
SSubQueryMsg *msg = pMsg->pCont;
SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
if (NULL == msg || pMsg->contLen <= sizeof(*msg)) {
QW_ELOG("invalid query msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
@ -361,7 +366,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
int64_t rId = msg->refId;
SQWMsg qwMsg = {.node = node, .msg = msg->msg + msg->sqlLen, .msgLen = msg->phyLen, .connInfo = pMsg->info};
char *sql = strndup(msg->msg, msg->sqlLen);
char * sql = strndup(msg->msg, msg->sqlLen);
QW_SCH_TASK_DLOG("processQuery start, node:%p, handle:%p, sql:%s", node, pMsg->info.handle, sql);
taosMemoryFreeClear(sql);
@ -378,8 +383,8 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
bool queryDone = false;
SQueryContinueReq *msg = (SQueryContinueReq *)pMsg->pCont;
bool needStop = false;
SQWTaskCtx *handles = NULL;
SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
SQWTaskCtx * handles = NULL;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
QW_ELOG("invalid cquery msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
@ -407,7 +412,7 @@ int32_t qWorkerProcessReadyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
SResReadyReq *msg = pMsg->pCont;
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
QW_ELOG("invalid task ready msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
@ -467,7 +472,7 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
}
SResFetchReq *msg = pMsg->pCont;
SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
QW_ELOG("invalid fetch msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
@ -505,7 +510,7 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
int32_t code = 0;
STaskCancelReq *msg = pMsg->pCont;
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
@ -542,7 +547,7 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
int32_t code = 0;
STaskDropReq *msg = pMsg->pCont;
SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
QW_ELOG("invalid task drop msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
@ -581,7 +586,7 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
int32_t code = 0;
SSchedulerHbReq req = {0};
SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
if (NULL == pMsg->pCont) {
QW_ELOG("invalid hb msg, msg:%p, msgLen:%d", pMsg->pCont, pMsg->contLen);

View File

@ -17,7 +17,7 @@ TARGET_INCLUDE_DIRECTORIES(
PUBLIC "${TD_SOURCE_DIR}/source/libs/parser/inc"
PRIVATE "${TD_SOURCE_DIR}/source/libs/scalar/inc"
)
#add_test(
# NAME scalarTest
# COMMAND scalarTest
#)
add_test(
NAME scalarTest
COMMAND scalarTest
)

View File

@ -1238,8 +1238,8 @@ TEST(columnTest, json_column_logic_op) {
printf("--------------------json null---------------------\n");
key = "k3";
bool eRes2[len+len1] = {false, false, false, false, false, false, true, false, false, false, false, false, false};
key = "k3"; // (null is true) return NULL, so use DBL_MAX represent NULL
double eRes2[len+len1] = {false, false, false, false, false, false, true, false, DBL_MAX, false, false, false, false};
for(int i = 0; i < len; i++){
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes2[i], op[i]);
}
@ -1290,8 +1290,8 @@ TEST(columnTest, json_column_logic_op) {
printf("---------------------json not exist--------------------\n");
key = "k10";
double eRes10[len+len1] = {false, false, false, false, false, false, true, false, false, false, false, false, false};
key = "k10"; // (NULL is true) return NULL, so use DBL_MAX represent NULL
double eRes10[len+len1] = {false, false, false, false, false, false, true, false, DBL_MAX, false, false, false, false};
for(int i = 0; i < len; i++){
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes10[i], op[i]);
}
@ -3456,7 +3456,7 @@ TEST(ScalarFunctionTest, powFunction_column) {
//TINYINT AND FLOAT
int8_t param0[] = {2, 3, 4};
float param1[] = {3.0, 3.0, 2.0};
float param1[] = {3.0, 3.0, 3.0};
scltMakeDataBlock(&input[0], TSDB_DATA_TYPE_TINYINT, 0, rowNum, false);
pInput[0] = *input[0];
for (int32_t i = 0; i < rowNum; ++i) {

View File

@ -39,12 +39,6 @@ enum {
SCH_WRITE,
};
typedef enum {
SCH_RES_TYPE_QUERY,
SCH_RES_TYPE_FETCH,
} SCH_RES_TYPE;
typedef struct SSchTrans {
void *transInst;
void *transHandle;
@ -197,7 +191,7 @@ typedef struct SSchJob {
int32_t errCode;
SArray *errList; // SArray<SQueryErrorInfo>
SRWLatch resLock;
SCH_RES_TYPE resType;
void *queryRes;
void *resData; //TODO free it or not
int32_t resNumOfRows;
const char *sql;

View File

@ -1058,8 +1058,6 @@ _return:
int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRsp *pRsp) {
SCH_TASK_DLOG("got explain rsp, rows:%d, complete:%d", htonl(pRsp->numOfRows), pRsp->completed);
pJob->resType = SCH_RES_TYPE_FETCH;
atomic_store_32(&pJob->resNumOfRows, htonl(pRsp->numOfRows));
atomic_store_ptr(&pJob->resData, pRsp);
@ -1070,6 +1068,27 @@ int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRs
return TSDB_CODE_SUCCESS;
}
int32_t schSaveJobQueryRes(SSchJob *pJob, SResReadyRsp *rsp) {
if (rsp->tbFName[0]) {
if (NULL == pJob->queryRes) {
pJob->queryRes = taosArrayInit(pJob->taskNum, sizeof(STbVerInfo));
if (NULL == pJob->queryRes) {
SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
}
STbVerInfo tbInfo;
strcpy(tbInfo.tbFName, rsp->tbFName);
tbInfo.sversion = rsp->sversion;
tbInfo.tversion = rsp->tversion;
taosArrayPush((SArray *)pJob->queryRes, &tbInfo);
}
return TSDB_CODE_SUCCESS;
}
// Note: no more task error processing, handled in function internal
int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, char *msg, int32_t msgSize,
int32_t rspCode) {
@ -1180,10 +1199,9 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
atomic_add_fetch_32(&pJob->resNumOfRows, rsp->affectedRows);
SCH_TASK_DLOG("submit succeed, affectedRows:%d", rsp->affectedRows);
pJob->resType = SCH_RES_TYPE_QUERY;
SCH_LOCK(SCH_WRITE, &pJob->resLock);
if (pJob->resData) {
SSubmitRsp *sum = pJob->resData;
if (pJob->queryRes) {
SSubmitRsp *sum = pJob->queryRes;
sum->affectedRows += rsp->affectedRows;
sum->nBlocks += rsp->nBlocks;
sum->pBlocks = taosMemoryRealloc(sum->pBlocks, sum->nBlocks * sizeof(*sum->pBlocks));
@ -1191,7 +1209,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
taosMemoryFree(rsp->pBlocks);
taosMemoryFree(rsp);
} else {
pJob->resData = rsp;
pJob->queryRes = rsp;
}
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
}
@ -1225,6 +1243,9 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
}
SCH_ERR_JRET(rsp->code);
SCH_ERR_JRET(schSaveJobQueryRes(pJob, rsp));
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
@ -2399,6 +2420,12 @@ void schFreeJobImpl(void *job) {
qExplainFreeCtx(pJob->explainCtx);
if (SCH_IS_QUERY_JOB(pJob)) {
taosArrayDestroy((SArray *)pJob->queryRes);
} else {
tFreeSSubmitRsp((SSubmitRsp*)pJob->queryRes);
}
taosMemoryFreeClear(pJob->resData);
taosMemoryFreeClear(pJob);
@ -2461,8 +2488,6 @@ int32_t schExecStaticExplain(void *transport, SArray *pNodeList, SQueryPlan *pDa
SCH_ERR_JRET(qExecStaticExplain(pDag, (SRetrieveTableRsp **)&pJob->resData));
pJob->resType = SCH_RES_TYPE_FETCH;
int64_t refId = taosAddRef(schMgmt.jobRef, pJob);
if (refId < 0) {
SCH_JOB_ELOG("taosAddRef job failed, error:%s", tstrerror(terrno));
@ -2540,24 +2565,30 @@ int32_t schedulerExecJob(void *transport, SArray *nodeList, SQueryPlan *pDag, in
SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
int32_t code = 0;
*pJob = 0;
if (EXPLAIN_MODE_STATIC == pDag->explainInfo.mode) {
SCH_ERR_RET(schExecStaticExplain(transport, nodeList, pDag, pJob, sql, true));
} else {
SCH_ERR_RET(schExecJobImpl(transport, nodeList, pDag, pJob, sql, startTs, true));
SCH_ERR_JRET(schExecJobImpl(transport, nodeList, pDag, pJob, sql, startTs, true));
}
_return:
if (*pJob) {
SSchJob *job = schAcquireJob(*pJob);
pRes->code = atomic_load_32(&job->errCode);
pRes->numOfRows = job->resNumOfRows;
if (SCH_RES_TYPE_QUERY == job->resType) {
pRes->res = job->resData;
job->resData = NULL;
}
pRes->res = job->queryRes;
job->queryRes = NULL;
schReleaseJob(*pJob);
}
return TSDB_CODE_SUCCESS;
return code;
}
int32_t schedulerAsyncExecJob(void *transport, SArray *pNodeList, SQueryPlan *pDag, const char *sql, int64_t *pJob) {

View File

@ -68,7 +68,7 @@ static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMs
// get groupId, compute hash value
uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));
//
// get node
// TODO: optimize search process
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
@ -152,13 +152,13 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
// exec
while (1) {
SSDataBlock* output;
SSDataBlock* output = NULL;
uint64_t ts = 0;
if (qExecTask(exec, &output, &ts) < 0) {
ASSERT(false);
}
if (output == NULL) break;
taosArrayPush(pRes, &output);
taosArrayPush(pRes, output);
}
// destroy
@ -189,7 +189,7 @@ int32_t streamTaskExec2(SStreamTask* pTask, SMsgCb* pMsgCb) {
taosFreeQitem(data);
if (taosArrayGetSize(pRes) != 0) {
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
resQ->type = STREAM_INPUT__DATA_BLOCK;
resQ->blocks = pRes;
taosWriteQitem(pTask->outputQ, resQ);
@ -209,7 +209,7 @@ int32_t streamTaskExec2(SStreamTask* pTask, SMsgCb* pMsgCb) {
taosFreeQitem(data);
if (taosArrayGetSize(pRes) != 0) {
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
resQ->type = STREAM_INPUT__DATA_BLOCK;
resQ->blocks = pRes;
taosWriteQitem(pTask->outputQ, resQ);
@ -231,7 +231,7 @@ int32_t streamTaskExec2(SStreamTask* pTask, SMsgCb* pMsgCb) {
taosFreeQitem(data);
if (taosArrayGetSize(pRes) != 0) {
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
resQ->type = STREAM_INPUT__DATA_BLOCK;
resQ->blocks = pRes;
taosWriteQitem(pTask->outputQ, resQ);
@ -253,7 +253,7 @@ int32_t streamTaskExec2(SStreamTask* pTask, SMsgCb* pMsgCb) {
taosFreeQitem(data);
if (taosArrayGetSize(pRes) != 0) {
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
resQ->type = STREAM_INPUT__DATA_BLOCK;
resQ->blocks = pRes;
taosWriteQitem(pTask->outputQ, resQ);
@ -392,12 +392,14 @@ int32_t streamTaskEnqueue(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg*
// 1.2 enqueue
pBlock->type = STREAM_DATA_TYPE_SSDATA_BLOCK;
pBlock->sourceVg = pReq->sourceVg;
pBlock->sourceVer = pReq->sourceVer;
/*pBlock->sourceVer = pReq->sourceVer;*/
taosWriteQitem(pTask->inputQ, pBlock);
// 1.3 rsp by input status
SStreamDispatchRsp* pCont = rpcMallocCont(sizeof(SStreamDispatchRsp));
pCont->inputStatus = status;
pCont->streamId = pReq->streamId;
pCont->taskId = pReq->sourceTaskId;
pRsp->pCont = pCont;
pRsp->contLen = sizeof(SStreamDispatchRsp);
tmsgSendRsp(pRsp);
@ -439,12 +441,12 @@ int32_t streamTaskProcessRunReq(SStreamTask* pTask, SMsgCb* pMsgCb) {
return 0;
}
int32_t streamTaskProcessRecoverReq(SStreamTask* pTask, char* msg) {
int32_t streamTaskProcessRecoverReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg) {
//
return 0;
}
int32_t streamTaskProcessRecoverRsp(SStreamTask* pTask, char* msg) {
int32_t streamTaskProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp) {
//
return 0;
}

View File

@ -94,7 +94,9 @@ void rpcFreeCont(void* cont) {
if (cont == NULL) {
return;
}
taosMemoryFree((char*)cont - TRANS_MSG_OVERHEAD);
tTrace("free mem: %p", (char*)cont - TRANS_MSG_OVERHEAD);
}
void* rpcReallocCont(void* ptr, int contLen) {
if (ptr == NULL) {

View File

@ -133,6 +133,7 @@ int transAllocBuffer(SConnBuffer* connBuf, uv_buf_t* uvBuf) {
} else {
p->cap = p->total;
p->buf = taosMemoryRealloc(p->buf, p->cap);
tTrace("internal malloc mem: %p, size: %d", p->buf, p->cap);
uvBuf->base = p->buf + p->len;
uvBuf->len = p->cap - p->len;

View File

@ -469,6 +469,8 @@ static void uvStartSendResp(SSrvMsg* smsg) {
if (pConn->broken == true) {
// persist by
transFreeMsg(smsg->msg.pCont);
taosMemoryFree(smsg);
transUnrefSrvHandle(pConn);
return;
}

View File

@ -264,7 +264,7 @@ class TDDnode:
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
binPath, self.cfgDir)
else:
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
valgrindCmdline = "valgrind --log-file=\"valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
cmd = "nohup %s %s -c %s 2>&1 & " % (
valgrindCmdline, binPath, self.cfgDir)
@ -325,7 +325,7 @@ class TDDnode:
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
binPath, self.cfgDir)
else:
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
valgrindCmdline = "valgrind --log-file=\"valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
cmd = "nohup %s %s -c %s 2>&1 & " % (
valgrindCmdline, binPath, self.cfgDir)

View File

@ -0,0 +1,230 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sleep 50
sql connect
print =============== create database
sql create database d0 keep 365000d,365000d,365000d
sql use d0
print =============== create super table and register rsma
sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1 delay 2;
sql show stables
if $rows != 1 then
return -1
endi
print =============== create child table
sql create table ct1 using stb tags("BeiJing", "ChaoYang")
sql create table ct2 using stb tags("BeiJing", "HaiDian")
sql show tables
if $rows != 2 then
return -1
endi
print =============== step3-1 insert records into ct1
sql insert into ct1 values('2022-05-03 16:59:00.010', 10);
sql insert into ct1 values('2022-05-03 16:59:00.011', 11);
sql insert into ct1 values('2022-05-03 16:59:00.016', 16);
sql insert into ct1 values('2022-05-03 16:59:00.016', 17);
sql insert into ct1 values('2022-05-03 16:59:00.020', 20);
sql insert into ct1 values('2022-05-03 16:59:00.016', 18);
sql insert into ct1 values('2022-05-03 16:59:00.021', 21);
sql insert into ct1 values('2022-05-03 16:59:00.022', 22);
print =============== step3-1 query records of ct1 from memory
sql select * from ct1;
print $data00 $data01
print $data10 $data11
print $data20 $data21
print $data30 $data31
print $data40 $data41
print $data50 $data51
if $rows != 6 then
print rows $rows != 6
return -1
endi
if $data01 != 10 then
print data01 $data01 != 10
return -1
endi
if $data21 != 18 then
print data21 $data21 != 18
return -1
endi
if $data51 != 22 then
print data51 $data51 != 22
return -1
endi
print =============== step3-1 insert records into ct2
sql insert into ct2 values('2022-03-02 16:59:00.010', 1),('2022-03-02 16:59:00.010',11),('2022-04-01 16:59:00.011',2),('2022-04-01 16:59:00.011',5),('2022-03-06 16:59:00.013',7);
sql insert into ct2 values('2022-03-02 16:59:00.010', 3),('2022-03-02 16:59:00.010',33),('2022-04-01 16:59:00.011',4),('2022-04-01 16:59:00.011',6),('2022-03-06 16:59:00.013',8);
sql insert into ct2 values('2022-03-02 16:59:00.010', 103),('2022-03-02 16:59:00.010',303),('2022-04-01 16:59:00.011',40),('2022-04-01 16:59:00.011',60),('2022-03-06 16:59:00.013',80);
print =============== step3-1 query records of ct2 from memory
sql select * from ct2;
print $data00 $data01
print $data10 $data11
print $data20 $data21
if $rows != 3 then
print rows $rows != 3
return -1
endi
if $data01 != 103 then
print data01 $data01 != 103
return -1
endi
if $data11 != 80 then
print data11 $data11 != 80
return -1
endi
if $data21 != 40 then
print data21 $data21 != 40
return -1
endi
#==================== reboot to trigger commit data to file
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
print =============== step3-2 query records of ct1 from file
sql select * from ct1;
print $data00 $data01
print $data10 $data11
print $data20 $data21
print $data30 $data31
print $data40 $data41
print $data50 $data51
if $rows != 6 then
print rows $rows != 6
return -1
endi
if $data01 != 10 then
print data01 $data01 != 10
return -1
endi
if $data21 != 18 then
print data21 $data21 != 18
return -1
endi
if $data51 != 22 then
print data51 $data51 != 22
return -1
endi
print =============== step3-2 query records of ct2 from file
sql select * from ct2;
print $data00 $data01
print $data10 $data11
print $data20 $data21
if $rows != 3 then
print rows $rows != 3
return -1
endi
if $data01 != 103 then
print data01 $data01 != 103
return -1
endi
if $data11 != 80 then
print data11 $data11 != 80
return -1
endi
if $data21 != 40 then
print data21 $data21 != 40
return -1
endi
print =============== step3-3 query records of ct1 from memory and file(merge)
sql insert into ct1 values('2022-05-03 16:59:00.010', 100);
sql insert into ct1 values('2022-05-03 16:59:00.022', 200);
sql insert into ct1 values('2022-05-03 16:59:00.016', 160);
sql select * from ct1;
print $data00 $data01
print $data10 $data11
print $data20 $data21
print $data30 $data31
print $data40 $data41
print $data50 $data51
if $rows != 6 then
print rows $rows != 6
return -1
endi
if $data01 != 100 then
print data01 $data01 != 100
return -1
endi
if $data21 != 160 then
print data21 $data21 != 160
return -1
endi
if $data51 != 200 then
print data51 $data51 != 200
return -1
endi
print =============== step3-3 query records of ct2 from memory and file(merge)
sql insert into ct2(ts) values('2022-04-02 16:59:00.016');
sql insert into ct2 values('2022-03-06 16:59:00.013', NULL);
sql insert into ct2 values('2022-03-01 16:59:00.016', 10);
sql insert into ct2(ts) values('2022-04-01 16:59:00.011');
sql select * from ct2;
print $data00 $data01
print $data10 $data11
print $data20 $data21
print $data30 $data31
print $data40 $data41
if $rows != 5 then
print rows $rows != 5
return -1
endi
if $data01 != 10 then
print data01 $data01 != 10
return -1
endi
if $data11 != 103 then
print data11 $data11 != 103
return -1
endi
if $data21 != NULL then
print data21 $data21 != NULL
return -1
endi
if $data31 != 40 then
print data31 $data31 != 40
return -1
endi
if $data41 != NULL then
print data41 $data41 != NULL
return -1
endi

View File

@ -71,7 +71,7 @@ sql create database db replica $replica vgroups $vgroups
$loop_cnt = 0
check_db_ready:
$loop_cnt = $loop_cnt + 1
sleep 200
sleep 20
if $loop_cnt == 10 then
print ====> db not ready!
return -1
@ -83,7 +83,7 @@ print $data(db)[13] $data(db)[14] $data(db)[15] $data(db)[16] $data(db)[17] $dat
if $rows != 3 then
return -1
endi
if $data(db)[19] != nostrict then
if $data(db)[19] != ready then
goto check_db_ready
endi
@ -93,7 +93,7 @@ $loop_cnt = 0
check_vg_ready:
$loop_cnt = $loop_cnt + 1
sleep 200
if $loop_cnt == 40 then
if $loop_cnt == 300 then
print ====> vgroups not ready!
return -1
endi
@ -175,6 +175,7 @@ while $i < $tbNum
endw
$totalTblNum = $tbNum * 2
print ====>totalTblNum:$totalTblNum
sql show tables
if $rows != $totalTblNum then
return -1
@ -226,7 +227,7 @@ $loop_cnt = 0
check_vg_ready_2:
$loop_cnt = $loop_cnt + 1
sleep 200
if $loop_cnt == 10 then
if $loop_cnt == 300 then
print ====> vgroups switch fail!!!
return -1
endi

View File

@ -30,9 +30,12 @@ class TDTestCase:
#
# --------------- main frame -------------------
#
clientCfgDict = {'queryproxy': '1'}
clientCfgDict = {'queryproxy': '1','debugFlag': 135}
clientCfgDict["queryproxy"] = '2'
clientCfgDict["debugFlag"] = 143
updatecfgDict = {'clientCfg': {}}
updatecfgDict = {'debugFlag': 143}
updatecfgDict["clientCfg"] = clientCfgDict
def caseDescription(self):
'''
@ -116,7 +119,7 @@ class TDTestCase:
# tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate))
return
def mutiThread_create_tables(self,host,dbname,stbname,vgroups,threadNumbers,count):
def mutiThread_create_tables(self,host,dbname,stbname,vgroups,threadNumbers,childrowcount):
buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/"
@ -125,7 +128,7 @@ class TDTestCase:
tsql.execute("drop database if exists %s"%dbname)
tsql.execute("create database %s vgroups %d"%(dbname,vgroups))
tsql.execute("use %s" %dbname)
count=int(count)
count=int(childrowcount)
threads = []
for i in range(threadNumbers):
tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i))
@ -137,7 +140,7 @@ class TDTestCase:
tr.join()
end_time = time.time()
spendTime=end_time-start_time
speedCreate=count/spendTime
speedCreate=threadNumbers*count/spendTime
tdLog.debug("spent %.2fs to create %d stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,threadNumbers,threadNumbers*count,speedCreate))
return
@ -146,41 +149,39 @@ class TDTestCase:
# insert data
def insert_data(self, host, dbname, stbname, ts_start,rowCount):
def insert_data(self, host, dbname, stbname, chilCount, ts_start, rowCount):
buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/"
tsql=self.newcur(host,config)
tdLog.debug("ready to inser data")
tsql.execute("use %s" %dbname)
pre_insert = "insert into "
sql = pre_insert
tcount=int(tcount)
allRows=tcount*rowCount
chilCount=int(chilCount)
allRows=chilCount*rowCount
tdLog.debug("doing insert data into stable-index:%s rows:%d ..."%(stbname, allRows))
exeStartTime=time.time()
for i in range(0,tcount):
for i in range(0,chilCount):
sql += " %s_%d values "%(stbname,i)
for j in range(rowCount):
sql += "(%d, %d, 'taos_%d') "%(ts_start + j*1000, j, j)
if j >0 and j%5000 == 0:
if j >0 and j%4000 == 0:
# print(sql)
tdSql.execute(sql)
tsql.execute(sql)
sql = "insert into %s_%d values " %(stbname,i)
# end sql
if sql != pre_insert:
# print(sql)
tdSql.execute(sql)
print(len(sql))
tsql.execute(sql)
exeEndTime=time.time()
spendTime=exeEndTime-exeStartTime
speedInsert=allRows/spendTime
# tdLog.debug("spent %.2fs to INSERT %d rows , insert rate is %.2f rows/s... [OK]"% (spendTime,allRows,speedInsert))
tdLog.debug("INSERT TABLE DATA ............ [OK]")
tdLog.debug("spent %.2fs to INSERT %d rows into %s , insert rate is %.2f rows/s... [OK]"% (spendTime,allRows,stbname,speedInsert))
# tdLog.debug("INSERT TABLE DATA ............ [OK]")
return
def mutiThread_insert_data(self, host, dbname, stbname, threadNumbers, ts_start, tcountStart,tcountStop,rowCount):
def mutiThread_insert_data(self, host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount):
buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/"
@ -188,42 +189,11 @@ class TDTestCase:
tdLog.debug("ready to inser data")
tsql.execute("use %s" %dbname)
pre_insert = "insert into "
sql = pre_insert
tcount=tcountStop-tcountStart
allRows=tcount*rowCount
tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbname, allRows))
exeStartTime=time.time()
for i in range(tcountStart,tcountStop):
sql += " %s_%d values "%(stbname,i)
for j in range(rowCount):
sql += "(%d, %d, 'taos_%d') "%(ts_start + j*1000, j, j)
if j >0 and j%5000 == 0:
# print(sql)
tdSql.execute(sql)
sql = "insert into %s_%d values " %(stbname,i)
# end sql
if sql != pre_insert:
# print(sql)
tdSql.execute(sql)
exeEndTime=time.time()
spendTime=exeEndTime-exeStartTime
speedInsert=allRows/spendTime
# tdLog.debug("spent %.2fs to INSERT %d rows , insert rate is %.2f rows/s... [OK]"% (spendTime,allRows,speedInsert))
tdLog.debug("INSERT TABLE DATA ............ [OK]")
buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/"
tsql=self.newcur(host,config)
tsql.execute("use %s" %dbname)
count=int(count)
chilCount=int(chilCount)
threads = []
for i in range(threadNumbers):
tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i))
threads.append(thd.Thread(target=self.create_tables, args=(host, dbname, stbname+"%d"%i, count,)))
# tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i))
threads.append(thd.Thread(target=self.insert_data, args=(host, dbname, stbname+"%d"%i, chilCount, ts_start, childrowcount,)))
start_time = time.time()
for tr in threads:
tr.start()
@ -231,8 +201,18 @@ class TDTestCase:
tr.join()
end_time = time.time()
spendTime=end_time-start_time
speedCreate=count/spendTime
tdLog.debug("spent %.2fs to create %d stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,threadNumbers,threadNumbers*count,speedCreate))
tableCounts=threadNumbers*chilCount
stableRows=chilCount*childrowcount
allRows=stableRows*threadNumbers
speedInsert=allRows/spendTime
for i in range(threadNumbers):
tdSql.execute("use %s" %dbname)
tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,stableRows)
tdLog.debug("spent %.2fs to insert %d rows into %d stable and %d table, speed is %.2f table/s... [OK]"% (spendTime,allRows,threadNumbers,tableCounts,speedInsert))
tdLog.debug("INSERT TABLE DATA ............ [OK]")
return
@ -288,7 +268,10 @@ class TDTestCase:
def test_case1(self):
tdLog.debug("-----create database and muti-thread create tables test------- ")
#host,dbname,stbname,vgroups,threadNumbers,tcountStart,tcountStop
self.mutiThread_create_tables(host="localhost",dbname="db2",stbname="stb2", vgroups=1, threadNumbers=5, count=10000)
#host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount
self.mutiThread_create_tables(host="localhost",dbname="db",stbname="stb", vgroups=1, threadNumbers=5, childrowcount=50)
self.mutiThread_insert_data(host="localhost",dbname="db",stbname="stb", threadNumbers=5,chilCount=50,ts_start=self.ts,childrowcount=10)
return
# test case2 base:insert data
@ -366,17 +349,17 @@ class TDTestCase:
# run case
def run(self):
# # test base case
# self.test_case1()
# tdLog.debug(" LIMIT test_case1 ............ [OK]")
# create database and tables。
self.test_case1()
tdLog.debug(" LIMIT test_case1 ............ [OK]")
# test case
# # taosBenchmark create database and table
# self.test_case2()
# tdLog.debug(" LIMIT test_case2 ............ [OK]")
# test case
self.test_case3()
tdLog.debug(" LIMIT test_case3 ............ [OK]")
# # taosBenchmarkcreate database/table and insert data
# self.test_case3()
# tdLog.debug(" LIMIT test_case3 ............ [OK]")
# # test qnode

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
add_executable(create_table create_table.c)
add_executable(tmq_demo tmqDemo.c)
add_executable(tmq_sim tmqSim.c)
add_executable(create_table createTable.c)
target_link_libraries(
create_table
PUBLIC taos_static
@ -22,3 +22,21 @@ target_link_libraries(
PUBLIC common
PUBLIC os
)
if(NOT TD_WINDOWS)
add_executable(sdbDump sdbDump.c)
target_link_libraries(
sdbDump
PUBLIC dnode
PUBLIC mnode
PUBLIC sdb
PUBLIC os
)
target_include_directories(
sdbDump
PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode"
PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/impl/inc"
PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/sdb/inc"
PRIVATE "${TD_SOURCE_DIR}/source/dnode/mgmt/node_mgmt/inc"
)
ENDIF ()

434
tests/test/c/sdbDump.c Normal file
View File

@ -0,0 +1,434 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "dmMgmt.h"
#include "mndInt.h"
#include "sdbInt.h"
#include "tconfig.h"
#include "tjson.h"
#define TMP_SDB_DATA_DIR "/tmp/dumpsdb"
#define TMP_SDB_MNODE_DIR "/tmp/dumpsdb/mnode"
#define TMP_SDB_FILE "/tmp/dumpsdb/mnode/data/sdb.data"
#define TMP_SDB_PATH "/tmp/dumpsdb/mnode/data"
void reportStartup(const char *name, const char *desc) {}
void sendRsp(SRpcMsg *pMsg) { rpcFreeCont(pMsg->pCont); }
int32_t sendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
terrno = TSDB_CODE_INVALID_PTR;
return -1;
}
char *i642str(int64_t val) {
static char str[24] = {0};
snprintf(str, sizeof(str), "%" PRId64, val);
return str;
}
void dumpFunc(SSdb *pSdb, SJson *json) {}
void dumpDb(SSdb *pSdb, SJson *json) {
void *pIter = NULL;
SJson *items = tjsonCreateObject();
tjsonAddItemToObject(json, "dbs", items);
while (1) {
SDbObj *pObj = NULL;
pIter = sdbFetch(pSdb, SDB_DB, pIter, (void **)&pObj);
if (pIter == NULL) break;
SJson *item = tjsonCreateObject();
tjsonAddItemToObject(items, "db", item);
tjsonAddStringToObject(item, "name", pObj->name);
tjsonAddStringToObject(item, "acct", pObj->acct);
tjsonAddStringToObject(item, "createUser", pObj->createUser);
tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
tjsonAddStringToObject(item, "uid", i642str(pObj->uid));
tjsonAddIntegerToObject(item, "cfgVersion", pObj->cfgVersion);
tjsonAddIntegerToObject(item, "vgVersion", pObj->vgVersion);
tjsonAddIntegerToObject(item, "numOfVgroups", pObj->cfg.numOfVgroups);
tjsonAddIntegerToObject(item, "numOfStables", pObj->cfg.numOfStables);
tjsonAddIntegerToObject(item, "buffer", pObj->cfg.buffer);
tjsonAddIntegerToObject(item, "pageSize", pObj->cfg.pageSize);
tjsonAddIntegerToObject(item, "pages", pObj->cfg.pages);
tjsonAddIntegerToObject(item, "daysPerFile", pObj->cfg.daysPerFile);
tjsonAddIntegerToObject(item, "daysToKeep0", pObj->cfg.daysToKeep0);
tjsonAddIntegerToObject(item, "daysToKeep1", pObj->cfg.daysToKeep1);
tjsonAddIntegerToObject(item, "daysToKeep2", pObj->cfg.daysToKeep2);
tjsonAddIntegerToObject(item, "minRows", pObj->cfg.minRows);
tjsonAddIntegerToObject(item, "maxRows", pObj->cfg.maxRows);
tjsonAddIntegerToObject(item, "fsyncPeriod", pObj->cfg.fsyncPeriod);
tjsonAddIntegerToObject(item, "walLevel", pObj->cfg.walLevel);
tjsonAddIntegerToObject(item, "precision", pObj->cfg.precision);
tjsonAddIntegerToObject(item, "compression", pObj->cfg.compression);
tjsonAddIntegerToObject(item, "replications", pObj->cfg.replications);
tjsonAddIntegerToObject(item, "strict", pObj->cfg.strict);
tjsonAddIntegerToObject(item, "cacheLastRow", pObj->cfg.cacheLastRow);
tjsonAddIntegerToObject(item, "hashMethod", pObj->cfg.hashMethod);
tjsonAddIntegerToObject(item, "numOfRetensions", pObj->cfg.numOfRetensions);
sdbRelease(pSdb, pObj);
}
}
void dumpStb(SSdb *pSdb, SJson *json) {
void *pIter = NULL;
SJson *items = tjsonCreateObject();
tjsonAddItemToObject(json, "stbs", items);
while (1) {
SStbObj *pObj = NULL;
pIter = sdbFetch(pSdb, SDB_STB, pIter, (void **)&pObj);
if (pIter == NULL) break;
SJson *item = tjsonCreateObject();
tjsonAddItemToObject(items, "stb", item);
tjsonAddStringToObject(item, "name", pObj->name);
tjsonAddStringToObject(item, "db", pObj->db);
tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
tjsonAddStringToObject(item, "uid", i642str(pObj->uid));
tjsonAddStringToObject(item, "dbUid", i642str(pObj->dbUid));
tjsonAddIntegerToObject(item, "version", pObj->version);
tjsonAddIntegerToObject(item, "tagVer", pObj->tagVer);
tjsonAddIntegerToObject(item, "colVer", pObj->colVer);
tjsonAddIntegerToObject(item, "nextColId", pObj->nextColId);
tjsonAddIntegerToObject(item, "xFilesFactor", pObj->xFilesFactor * 10000);
tjsonAddIntegerToObject(item, "delay", pObj->delay);
tjsonAddIntegerToObject(item, "ttl", pObj->ttl);
tjsonAddIntegerToObject(item, "numOfColumns", pObj->numOfColumns);
tjsonAddIntegerToObject(item, "numOfTags", pObj->numOfTags);
tjsonAddIntegerToObject(item, "commentLen", pObj->commentLen);
tjsonAddIntegerToObject(item, "ast1Len", pObj->ast1Len);
tjsonAddIntegerToObject(item, "ast2Len", pObj->ast2Len);
sdbRelease(pSdb, pObj);
}
}
void dumpSma(SSdb *pSdb, SJson *json) {}
void dumpVgroup(SSdb *pSdb, SJson *json) {}
void dumpTopic(SSdb *pSdb, SJson *json) {}
void dumpConsumber(SSdb *pSdb, SJson *json) {}
void dumpSubscribe(SSdb *pSdb, SJson *json) {}
void dumpOffset(SSdb *pSdb, SJson *json) {}
void dumpStream(SSdb *pSdb, SJson *json) {}
void dumpAcct(SSdb *pSdb, SJson *json) {
void *pIter = NULL;
SJson *items = tjsonCreateObject();
tjsonAddItemToObject(json, "accts", items);
while (1) {
SAcctObj *pObj = NULL;
pIter = sdbFetch(pSdb, SDB_ACCT, pIter, (void **)&pObj);
if (pIter == NULL) break;
SJson *item = tjsonCreateObject();
tjsonAddItemToObject(items, "acct", item);
tjsonAddStringToObject(item, "acct", pObj->acct);
tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
tjsonAddIntegerToObject(item, "acctId", pObj->acctId);
sdbRelease(pSdb, pObj);
}
}
void dumpAuth(SSdb *pSdb, SJson *json) {}
void dumpUser(SSdb *pSdb, SJson *json) {
void *pIter = NULL;
SJson *items = tjsonCreateObject();
tjsonAddItemToObject(json, "users", items);
while (1) {
SUserObj *pObj = NULL;
pIter = sdbFetch(pSdb, SDB_USER, pIter, (void **)&pObj);
if (pIter == NULL) break;
SJson *item = tjsonCreateObject();
tjsonAddItemToObject(items, "user", item);
tjsonAddStringToObject(item, "name", pObj->user);
tjsonAddStringToObject(item, "pass", pObj->pass);
tjsonAddStringToObject(item, "acct", pObj->acct);
tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
tjsonAddIntegerToObject(item, "superUser", pObj->superUser);
tjsonAddIntegerToObject(item, "authVersion", pObj->authVersion);
tjsonAddIntegerToObject(item, "numOfReadDbs", taosHashGetSize(pObj->readDbs));
tjsonAddIntegerToObject(item, "numOfWriteDbs", taosHashGetSize(pObj->writeDbs));
sdbRelease(pSdb, pObj);
}
}
void dumpDnode(SSdb *pSdb, SJson *json) {
void *pIter = NULL;
SJson *items = tjsonCreateObject();
tjsonAddItemToObject(json, "dnodes", items);
while (1) {
SDnodeObj *pObj = NULL;
pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pObj);
if (pIter == NULL) break;
SJson *item = tjsonCreateObject();
tjsonAddItemToObject(items, "dnode", item);
tjsonAddIntegerToObject(item, "id", pObj->id);
tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
tjsonAddIntegerToObject(item, "port", pObj->port);
tjsonAddStringToObject(item, "fqdn", pObj->fqdn);
sdbRelease(pSdb, pObj);
}
}
void dumpBnode(SSdb *pSdb, SJson *json) {}
void dumpSnode(SSdb *pSdb, SJson *json) {}
void dumpQnode(SSdb *pSdb, SJson *json) {}
void dumpMnode(SSdb *pSdb, SJson *json) {
void *pIter = NULL;
SJson *items = tjsonCreateObject();
tjsonAddItemToObject(json, "mnodes", items);
while (1) {
SMnodeObj *pObj = NULL;
pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj);
if (pIter == NULL) break;
SJson *item = tjsonCreateObject();
tjsonAddItemToObject(items, "mnode", item);
tjsonAddIntegerToObject(item, "id", pObj->id);
tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
sdbRelease(pSdb, pObj);
}
}
void dumpCluster(SSdb *pSdb, SJson *json) {
void *pIter = NULL;
SJson *items = tjsonCreateObject();
tjsonAddItemToObject(json, "clusters", items);
while (1) {
SClusterObj *pObj = NULL;
pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pObj);
if (pIter == NULL) break;
SJson *item = tjsonCreateObject();
tjsonAddItemToObject(items, "cluster", item);
tjsonAddStringToObject(item, "id", i642str(pObj->id));
tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
tjsonAddStringToObject(item, "name", pObj->name);
sdbRelease(pSdb, pObj);
}
}
void dumpTrans(SSdb *pSdb, SJson *json) {
void *pIter = NULL;
SJson *items = tjsonCreateObject();
tjsonAddItemToObject(json, "transactions", items);
while (1) {
STrans *pObj = NULL;
pIter = sdbFetch(pSdb, SDB_TRANS, pIter, (void **)&pObj);
if (pIter == NULL) break;
SJson *item = tjsonCreateObject();
tjsonAddItemToObject(items, "trans", item);
tjsonAddIntegerToObject(item, "id", pObj->id);
tjsonAddIntegerToObject(item, "stage", pObj->stage);
tjsonAddIntegerToObject(item, "policy", pObj->policy);
tjsonAddIntegerToObject(item, "type", pObj->type);
tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
tjsonAddStringToObject(item, "dbUid", i642str(pObj->dbUid));
tjsonAddStringToObject(item, "dbname", pObj->dbname);
tjsonAddIntegerToObject(item, "redoLogNum", taosArrayGetSize(pObj->redoLogs));
tjsonAddIntegerToObject(item, "undoLogNum", taosArrayGetSize(pObj->undoLogs));
tjsonAddIntegerToObject(item, "commitLogNum", taosArrayGetSize(pObj->commitLogs));
tjsonAddIntegerToObject(item, "redoActionNum", taosArrayGetSize(pObj->redoActions));
tjsonAddIntegerToObject(item, "undoActionNum", taosArrayGetSize(pObj->undoActions));
sdbRelease(pSdb, pObj);
}
}
void dumpHeader(SSdb *pSdb, SJson *json) {
tjsonAddIntegerToObject(json, "sver", 1);
tjsonAddStringToObject(json, "curVer", i642str(pSdb->curVer));
SJson *maxIdsJson = tjsonCreateObject();
tjsonAddItemToObject(json, "maxIds", maxIdsJson);
for (int32_t i = 0; i < SDB_MAX; ++i) {
int64_t maxId = 0;
if (i < SDB_MAX) {
maxId = pSdb->maxId[i];
}
tjsonAddStringToObject(maxIdsJson, sdbTableName(i), i642str(maxId));
}
SJson *tableVersJson = tjsonCreateObject();
tjsonAddItemToObject(json, "tableVers", tableVersJson);
for (int32_t i = 0; i < SDB_MAX; ++i) {
int64_t tableVer = 0;
if (i < SDB_MAX) {
tableVer = pSdb->tableVer[i];
}
tjsonAddStringToObject(tableVersJson, sdbTableName(i), i642str(tableVer));
}
}
int32_t dumpSdb() {
SMsgCb msgCb = {0};
msgCb.reportStartupFp = reportStartup;
msgCb.sendReqFp = sendReq;
msgCb.sendRspFp = sendRsp;
msgCb.mgmt = (SMgmtWrapper *)(&msgCb); // hack
tmsgSetDefault(&msgCb);
walInit();
SMnodeOpt opt = {.msgCb = msgCb};
SMnode *pMnode = mndOpen(TMP_SDB_MNODE_DIR, &opt);
if (pMnode == NULL) return -1;
SSdb *pSdb = pMnode->pSdb;
SJson *json = tjsonCreateObject();
dumpHeader(pSdb, json);
dumpFunc(pSdb, json);
dumpDb(pSdb, json);
dumpStb(pSdb, json);
dumpSma(pSdb, json);
dumpVgroup(pSdb, json);
dumpTopic(pSdb, json);
dumpConsumber(pSdb, json);
dumpSubscribe(pSdb, json);
dumpOffset(pSdb, json);
dumpStream(pSdb, json);
dumpAcct(pSdb, json);
dumpAuth(pSdb, json);
dumpUser(pSdb, json);
dumpDnode(pSdb, json);
dumpBnode(pSdb, json);
dumpSnode(pSdb, json);
dumpQnode(pSdb, json);
dumpMnode(pSdb, json);
dumpCluster(pSdb, json);
dumpTrans(pSdb, json);
char *pCont = tjsonToString(json);
int32_t contLen = strlen(pCont);
char file[] = "sdb.json";
TdFilePtr pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (pFile == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
dError("failed to write %s since %s", file, terrstr());
return -1;
}
taosWriteFile(pFile, pCont, contLen);
taosWriteFile(pFile, "\n", 1);
taosFsyncFile(pFile);
taosCloseFile(&pFile);
tjsonDelete(json);
taosMemoryFree(pCont);
taosRemoveDir(TMP_SDB_DATA_DIR);
return 0;
}
int32_t parseArgs(int32_t argc, char *argv[]) {
char file[PATH_MAX] = {0};
for (int32_t i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-c") == 0) {
if (i < argc - 1) {
if (strlen(argv[++i]) >= PATH_MAX) {
printf("config file path overflow");
return -1;
}
tstrncpy(configDir, argv[i], PATH_MAX);
} else {
printf("'-c' requires a parameter, default is %s\n", configDir);
return -1;
}
} else if (strcmp(argv[i], "-f") == 0) {
if (i < argc - 1) {
if (strlen(argv[++i]) >= PATH_MAX) {
printf("file path overflow");
return -1;
}
tstrncpy(file, argv[i], PATH_MAX);
} else {
printf("'-f' requires a parameter, default is %s\n", configDir);
return -1;
}
} else {
printf("-c Configuration directory. \n");
printf("-f Input sdb.data file. \n");
return -1;
}
}
if (taosCreateLog("dumplog", 1, configDir, NULL, NULL, NULL, NULL, 1) != 0) {
printf("failed to dump since init log error\n");
return -1;
}
if (taosInitCfg(configDir, NULL, NULL, NULL, NULL, 0) != 0) {
printf("failed to dump since read config error\n");
return -1;
}
if (file[0] == 0) {
snprintf(file, PATH_MAX, "%s/mnode/data/sdb.data", tsDataDir);
}
strcpy(tsDataDir, TMP_SDB_DATA_DIR);
taosMulMkDir(TMP_SDB_PATH);
taosCopyFile(file, TMP_SDB_FILE);
return 0;
}
int32_t main(int32_t argc, char *argv[]) {
if (parseArgs(argc, argv) != 0) {
return -1;
}
return dumpSdb();
}