From f1ee1abd7b8566c685633619f455906c1438384b Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Sun, 5 Jun 2022 20:33:21 +0800 Subject: [PATCH 01/14] delete data --- include/common/tmsg.h | 21 ++--- include/libs/qworker/qworker.h | 1 + source/common/src/tmsg.c | 81 ++++++++++++------ source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 1 + source/dnode/qnode/src/qnode.c | 1 + source/dnode/vnode/src/vnd/vnodeSvr.c | 17 ++++ source/libs/monitor/src/monMsg.c | 2 + source/libs/qworker/inc/qwInt.h | 2 + source/libs/qworker/src/qwMsg.c | 41 ++++++++-- source/libs/qworker/src/qwUtil.c | 5 +- source/libs/qworker/src/qworker.c | 91 ++++++++++++++++++++- source/libs/scheduler/src/schRemote.c | 40 +++++++++ 12 files changed, 254 insertions(+), 49 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 39dc5361e6..5019dcc51c 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -933,6 +933,7 @@ typedef struct { int64_t numOfProcessedFetch; int64_t numOfProcessedDrop; int64_t numOfProcessedHb; + int64_t numOfProcessedDelete; int64_t cacheDataSize; int64_t numOfQueryInQueue; int64_t numOfFetchInQueue; @@ -2689,20 +2690,20 @@ int32_t tEncodeSVSubmitReq(SEncoder* pCoder, const SVSubmitReq* pReq); int32_t tDecodeSVSubmitReq(SDecoder* pCoder, SVSubmitReq* pReq); typedef struct { - int64_t delUid; - int64_t tbUid; // super/child/normal table - int8_t type; // table type - int16_t nWnds; - char* tbFullName; - char* subPlan; - STimeWindow wnds[]; + SMsgHead header; + uint64_t sId; + uint64_t queryId; + uint64_t taskId; + uint32_t sqlLen; + uint32_t phyLen; + char* sql; + char* msg; } SVDeleteReq; -int32_t tEncodeSVDeleteReq(SEncoder* pCoder, const SVDeleteReq* pReq); -int32_t tDecodeSVDeleteReq(SDecoder* pCoder, SVDeleteReq* pReq); +int32_t tSerializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq); +int32_t tDeserializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq); typedef struct { - int32_t code; int64_t affectedRows; } SVDeleteRsp; diff --git a/include/libs/qworker/qworker.h b/include/libs/qworker/qworker.h index aa20082fe0..94ed3ace42 100644 --- a/include/libs/qworker/qworker.h +++ b/include/libs/qworker/qworker.h @@ -47,6 +47,7 @@ typedef struct { uint64_t fetchProcessed; uint64_t dropProcessed; uint64_t hbProcessed; + uint64_t deleteProcessed; uint64_t numOfQueryInQueue; uint64_t numOfFetchInQueue; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 9c9f33ac96..26d18e910c 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -933,6 +933,7 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) { if (tEncodeI64(&encoder, pReq->qload.numOfProcessedFetch) < 0) return -1; if (tEncodeI64(&encoder, pReq->qload.numOfProcessedDrop) < 0) return -1; if (tEncodeI64(&encoder, pReq->qload.numOfProcessedHb) < 0) return -1; + if (tEncodeI64(&encoder, pReq->qload.numOfProcessedDelete) < 0) return -1; if (tEncodeI64(&encoder, pReq->qload.cacheDataSize) < 0) return -1; if (tEncodeI64(&encoder, pReq->qload.numOfQueryInQueue) < 0) return -1; if (tEncodeI64(&encoder, pReq->qload.numOfFetchInQueue) < 0) return -1; @@ -1002,6 +1003,7 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) { if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedFetch) < 0) return -1; if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedDrop) < 0) return -1; if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedHb) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedDelete) < 0) return -1; if (tDecodeI64(&decoder, &pReq->qload.cacheDataSize) < 0) return -1; if (tDecodeI64(&decoder, &pReq->qload.numOfQueryInQueue) < 0) return -1; if (tDecodeI64(&decoder, &pReq->qload.numOfFetchInQueue) < 0) return -1; @@ -3814,39 +3816,64 @@ int32_t tDecodeSVGetTsmaExpWndsRsp(SDecoder *pCoder, SVGetTsmaExpWndsRsp *pReq) return 0; } -int32_t tEncodeSVDeleteReq(SEncoder* pCoder, const SVDeleteReq* pReq) { - if (tStartEncode(pCoder) < 0) return -1; - - if (tEncodeI64(pCoder, pReq->delUid) < 0) return -1; - if (tEncodeI64(pCoder, pReq->tbUid) < 0) return -1; - if (tEncodeI8(pCoder, pReq->type) < 0) return -1; - if (tEncodeI16v(pCoder, pReq->nWnds) < 0) return -1; - if (tEncodeCStr(pCoder, pReq->tbFullName) < 0) return -1; - if (tEncodeCStr(pCoder, pReq->subPlan) < 0) return -1; - for (int16_t i = 0; i < pReq->nWnds; ++i) { - if (tEncodeI64(pCoder, pReq->wnds[i].skey) < 0) return -1; - if (tEncodeI64(pCoder, pReq->wnds[i].ekey) < 0) return -1; +int32_t tSerializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) { + int32_t headLen = sizeof(SMsgHead); + if (buf != NULL) { + buf = (char *)buf + headLen; + bufLen -= headLen; } - tEndEncode(pCoder); - return 0; + SEncoder encoder = {0}; + tEncoderInit(&encoder, buf, bufLen); + + if (tStartEncode(&encoder) < 0) return -1; + if (tEncodeU64(&encoder, pReq->sId) < 0) return -1; + if (tEncodeU64(&encoder, pReq->queryId) < 0) return -1; + if (tEncodeU64(&encoder, pReq->taskId) < 0) return -1; + if (tEncodeU32(&encoder, pReq->sqlLen) < 0) return -1; + if (tEncodeU32(&encoder, pReq->phyLen) < 0) return -1; + if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1; + if (tEncodeCStr(&encoder, pReq->msg) < 0) return -1; + tEndEncode(&encoder); + + int32_t tlen = encoder.pos; + tEncoderClear(&encoder); + + if (buf != NULL) { + SMsgHead *pHead = (SMsgHead *)((char *)buf - headLen); + pHead->vgId = htonl(pReq->header.vgId); + pHead->contLen = htonl(tlen + headLen); + } + + return tlen + headLen; } -int32_t tDecodeSVDeleteReq(SDecoder* pCoder, SVDeleteReq* pReq) { - if (tStartDecode(pCoder) < 0) return -1; +int32_t tDeserializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) { + int32_t headLen = sizeof(SMsgHead); - if (tDecodeI64(pCoder, &pReq->delUid) < 0) return -1; - if (tDecodeI64(pCoder, &pReq->tbUid) < 0) return -1; - if (tDecodeI8(pCoder, &pReq->type) < 0) return -1; - if (tDecodeI16v(pCoder, &pReq->nWnds) < 0) return -1; - if (tDecodeCStr(pCoder, &pReq->tbFullName) < 0) return -1; - if (tDecodeCStr(pCoder, &pReq->subPlan) < 0) return -1; - for (int16_t i = 0; i < pReq->nWnds; ++i) { - if (tDecodeI64(pCoder, &pReq->wnds[i].skey) < 0) return -1; - if (tDecodeI64(pCoder, &pReq->wnds[i].ekey) < 0) return -1; - } + SMsgHead *pHead = buf; + pHead->vgId = pReq->header.vgId; + pHead->contLen = pReq->header.contLen; - tEndDecode(pCoder); + SDecoder decoder = {0}; + tDecoderInit(&decoder, (char *)buf + headLen, bufLen - headLen); + + if (tStartDecode(&decoder) < 0) return -1; + if (tDecodeU64(&decoder, &pReq->sId) < 0) return -1; + if (tDecodeU64(&decoder, &pReq->queryId) < 0) return -1; + if (tDecodeU64(&decoder, &pReq->taskId) < 0) return -1; + if (tDecodeU32(&decoder, &pReq->sqlLen) < 0) return -1; + if (tDecodeU32(&decoder, &pReq->phyLen) < 0) return -1; + pReq->sql = taosMemoryCalloc(1, pReq->sqlLen + 1); + if (NULL == pReq->sql) return -1; + pReq->msg = taosMemoryCalloc(1, pReq->phyLen + 1); + if (NULL == pReq->msg) return -1; + if (tDecodeCStrTo(&decoder, pReq->sql) < 0) return -1; + if (tDecodeCStrTo(&decoder, pReq->msg) < 0) return -1; + + tEndDecode(&decoder); + + tDecoderClear(&decoder); return 0; } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 0819f79cf9..a811e3b997 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -358,6 +358,7 @@ SArray *vmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RUN, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_DISPATCH, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RECOVER, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_REPLICA, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/qnode/src/qnode.c b/source/dnode/qnode/src/qnode.c index 45b88318c4..ebaf73a952 100644 --- a/source/dnode/qnode/src/qnode.c +++ b/source/dnode/qnode/src/qnode.c @@ -59,6 +59,7 @@ int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { pLoad->numOfProcessedFetch = stat.fetchProcessed; pLoad->numOfProcessedDrop = stat.dropProcessed; pLoad->numOfProcessedHb = stat.hbProcessed; + pLoad->numOfProcessedDelete = stat.deleteProcessed; return 0; } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 537f2e0964..7f731cce50 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -23,6 +23,7 @@ static int vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pReq, i static int vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp); static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp); static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp); +static int vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg *pRsp); int32_t vnodePreprocessReq(SVnode *pVnode, SRpcMsg *pMsg) { int32_t code = 0; @@ -141,6 +142,9 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg case TDMT_VND_SUBMIT: if (vnodeProcessSubmitReq(pVnode, version, pMsg->pCont, pMsg->contLen, pRsp) < 0) goto _err; break; + case TDMT_VND_DELETE: + if (vnodeProcessFetchMsg(pVnode, pMsg, pRsp) < 0) goto _err; + break; /* TQ */ case TDMT_VND_MQ_VG_CHANGE: if (tqProcessVgChangeReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), @@ -252,6 +256,19 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { } } +int vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg *pRsp) { + vTrace("message in write queue is processing"); + char *msgstr = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); + int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); + switch (pMsg->msgType) { + case TDMT_VND_DELETE: + return qWorkerProcessDeleteMsg(pVnode, pVnode->pQuery, pMsg, pRsp); + default: + vError("unknown msg type:%d in write queue", pMsg->msgType); + return TSDB_CODE_VND_APP_ERROR; + } +} + // TODO: remove the function void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) { // TODO diff --git a/source/libs/monitor/src/monMsg.c b/source/libs/monitor/src/monMsg.c index 944a7b5475..a041b582a9 100644 --- a/source/libs/monitor/src/monMsg.c +++ b/source/libs/monitor/src/monMsg.c @@ -569,6 +569,7 @@ int32_t tSerializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo) { if (tEncodeI64(&encoder, pInfo->numOfProcessedFetch) < 0) return -1; if (tEncodeI64(&encoder, pInfo->numOfProcessedDrop) < 0) return -1; if (tEncodeI64(&encoder, pInfo->numOfProcessedHb) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->numOfProcessedDelete) < 0) return -1; if (tEncodeI64(&encoder, pInfo->cacheDataSize) < 0) return -1; if (tEncodeI64(&encoder, pInfo->numOfQueryInQueue) < 0) return -1; if (tEncodeI64(&encoder, pInfo->numOfFetchInQueue) < 0) return -1; @@ -591,6 +592,7 @@ int32_t tDeserializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo) { if (tDecodeI64(&decoder, &pInfo->numOfProcessedFetch) < 0) return -1; if (tDecodeI64(&decoder, &pInfo->numOfProcessedDrop) < 0) return -1; if (tDecodeI64(&decoder, &pInfo->numOfProcessedHb) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->numOfProcessedDelete) < 0) return -1; if (tDecodeI64(&decoder, &pInfo->cacheDataSize) < 0) return -1; if (tDecodeI64(&decoder, &pInfo->numOfQueryInQueue) < 0) return -1; if (tDecodeI64(&decoder, &pInfo->numOfFetchInQueue) < 0) return -1; diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h index 082db6428f..1d31c86308 100644 --- a/source/libs/qworker/inc/qwInt.h +++ b/source/libs/qworker/inc/qwInt.h @@ -160,6 +160,7 @@ typedef struct SQWMsgStat { uint64_t cancelProcessed; uint64_t dropProcessed; uint64_t hbProcessed; + uint64_t deleteProcessed; } SQWMsgStat; typedef struct SQWRTStat { @@ -357,6 +358,7 @@ int32_t qwUpdateTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type); int64_t qwGetTimeInQueue(SQWorker *mgmt, EQueueType type); void qwClearExpiredSch(SArray* pExpiredSch); int32_t qwAcquireScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch); +void qwFreeTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx *ctx); void qwDbgDumpMgmtInfo(SQWorker *mgmt); int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore); diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c index 2f1ecc5172..d4bc7892e4 100644 --- a/source/libs/qworker/src/qwMsg.c +++ b/source/libs/qworker/src/qwMsg.c @@ -300,13 +300,6 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } - msg->sId = msg->sId; - msg->queryId = msg->queryId; - msg->taskId = msg->taskId; - msg->refId = msg->refId; - msg->phyLen = msg->phyLen; - msg->sqlLen = msg->sqlLen; - uint64_t sId = msg->sId; uint64_t qId = msg->queryId; uint64_t tId = msg->taskId; @@ -523,3 +516,37 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_ return TSDB_CODE_SUCCESS; } + + +int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SRpcMsg *pRsp) { + if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg || NULL == pRsp) { + QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + int32_t code = 0; + SVDeleteReq req = {0}; + SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + + QW_STAT_INC(mgmt->stat.msgStat.deleteProcessed, 1); + + tDeserializeSVDeleteReq(pMsg->pCont, pMsg->contLen, &req); + + uint64_t sId = req.sId; + uint64_t qId = req.queryId; + uint64_t tId = req.taskId; + int64_t rId = 0; + + SQWMsg qwMsg = {.node = node, .msg = req.msg, .msgLen = req.phyLen, .connInfo = pMsg->info}; + QW_SCH_TASK_DLOG("processDelete start, node:%p, handle:%p, sql:%s", node, pMsg->info.handle, req.sql); + taosMemoryFreeClear(req.sql); + + QW_ERR_JRET(qwProcessDelete(QW_FPARAMS(), &qwMsg, pRsp)); + + QW_SCH_TASK_DLOG("processDelete end, node:%p", node); + +_return: + + QW_RET(code); +} + + diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index 8bfb80f061..4b881d2b4f 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -290,8 +290,9 @@ int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { QW_RET(code); } -void qwFreeTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { +void qwFreeTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { tmsgReleaseHandle(&ctx->ctrlConnInfo, TAOS_CONN_SERVER); + ctx->ctrlConnInfo.handle = NULL; ctx->ctrlConnInfo.refId = -1; @@ -333,7 +334,7 @@ int32_t qwDropTaskCtx(QW_FPARAMS_DEF) { QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); } - qwFreeTask(QW_FPARAMS(), &octx); + qwFreeTaskCtx(QW_FPARAMS(), &octx); QW_TASK_DLOG_E("task ctx dropped"); diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 44a8fdf7f4..81a390f4c2 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -183,7 +183,7 @@ int32_t qwGenerateSchHbRsp(SQWorker *mgmt, SQWSchStatus *sch, SQWHbInfo *hbInfo) return TSDB_CODE_SUCCESS; } -int32_t qwGetResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, void **rspMsg, SOutputData *pOutput) { +int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, void **rspMsg, SOutputData *pOutput) { int32_t len = 0; SRetrieveTableRsp *rsp = NULL; bool queryEnd = false; @@ -242,6 +242,49 @@ int32_t qwGetResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, void return TSDB_CODE_SUCCESS; } +int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, void **rspMsg) { + int32_t len = 0; + SVDeleteRsp rsp = {0}; + bool queryEnd = false; + int32_t code = 0; + SOutputData output = {0}; + + dsGetDataLength(ctx->sinkHandle, &len, &queryEnd); + + if (len <= 0 || len != sizeof(SVDeleteRsp)) { + QW_TASK_ELOG("invalid length from dsGetDataLength, length:%d", len); + QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + output.pData = taosMemoryCalloc(1, len); + if (NULL == output.pData) { + QW_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + code = dsGetDataBlock(ctx->sinkHandle, &output); + if (code) { + QW_TASK_ELOG("dsGetDataBlock failed, code:%x - %s", code, tstrerror(code)); + taosMemoryFree(output.pData); + QW_ERR_RET(code); + } + + rsp.affectedRows = *(int64_t*)output.pData; + + int32_t len; + int32_t ret = 0; + SEncoder coder = {0}; + tEncodeSize(tEncodeSVDeleteRsp, &rsp, len, ret); + void *msg = taosMemoryCalloc(1, len); + tEncoderInit(&coder, msg, len); + tEncodeSVDeleteRsp(&coder, &rsp); + tEncoderClear(&coder); + + *rspMsg = msg; + *dataLen = len; + + return TSDB_CODE_SUCCESS; +} + int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *input, SQWPhaseOutput *output) { int32_t code = 0; @@ -547,7 +590,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { if (QW_IS_EVENT_RECEIVED(ctx, QW_EVENT_FETCH)) { SOutputData sOutput = {0}; - QW_ERR_JRET(qwGetResFromSink(QW_FPARAMS(), ctx, &dataLen, &rsp, &sOutput)); + QW_ERR_JRET(qwGetQueryResFromSink(QW_FPARAMS(), ctx, &dataLen, &rsp, &sOutput)); if ((!sOutput.queryEnd) && (DS_BUF_LOW == sOutput.bufStatus || DS_BUF_EMPTY == sOutput.bufStatus)) { QW_TASK_DLOG("task not end and buf is %s, need to continue query", qwBufStatusStr(sOutput.bufStatus)); @@ -620,7 +663,7 @@ int32_t qwProcessFetch(QW_FPARAMS_DEF, SQWMsg *qwMsg) { QW_ERR_JRET(qwGetTaskCtx(QW_FPARAMS(), &ctx)); SOutputData sOutput = {0}; - QW_ERR_JRET(qwGetResFromSink(QW_FPARAMS(), ctx, &dataLen, &rsp, &sOutput)); + QW_ERR_JRET(qwGetQueryResFromSink(QW_FPARAMS(), ctx, &dataLen, &rsp, &sOutput)); if (NULL == rsp) { ctx->dataConnInfo = qwMsg->connInfo; @@ -875,6 +918,47 @@ _return: qwRelease(refId); } +int32_t qwProcessDelete(QW_FPARAMS_DEF, SQWMsg *qwMsg, SRpcMsg *pRsp) { + int32_t code = 0; + SSubplan *plan = NULL; + qTaskInfo_t pTaskInfo = NULL; + DataSinkHandle sinkHandle = NULL; + SQWTaskCtx ctx = {0}; + + code = qStringToSubplan(qwMsg->msg, &plan); + if (TSDB_CODE_SUCCESS != code) { + code = TSDB_CODE_INVALID_MSG; + QW_TASK_ELOG("task physical plan to subplan failed, code:%x - %s", code, tstrerror(code)); + QW_ERR_JRET(code); + } + + ctx->plan = plan; + + code = qCreateExecTask(qwMsg->node, mgmt->nodeId, tId, plan, &pTaskInfo, &sinkHandle, OPTR_EXEC_MODEL_BATCH); + if (code) { + QW_TASK_ELOG("qCreateExecTask failed, code:%x - %s", code, tstrerror(code)); + QW_ERR_JRET(code); + } + + if (NULL == sinkHandle || NULL == pTaskInfo) { + QW_TASK_ELOG("create task result error, taskHandle:%p, sinkHandle:%p", pTaskInfo, sinkHandle); + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + ctx->taskHandle = pTaskInfo; + ctx->sinkHandle = sinkHandle; + + QW_ERR_JRET(qwExecTask(QW_FPARAMS(), ctx, NULL)); + + QW_ERR_JRET(qwGetDeleteResFromSink(QW_FPARAMS(), &ctx, &pRsp->contLen, &pRsp->pCont)); + +_return: + + qwFreeTaskCtx(QW_FPARAMS(), &ctx); + + QW_RET(TSDB_CODE_SUCCESS); +} + int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb) { if (NULL == qWorkerMgmt || pMsgCb->mgmt == NULL) { @@ -1007,6 +1091,7 @@ int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pSt pStat->fetchProcessed = QW_STAT_GET(mgmt->stat.msgStat.fetchProcessed); pStat->dropProcessed = QW_STAT_GET(mgmt->stat.msgStat.dropProcessed); pStat->hbProcessed = QW_STAT_GET(mgmt->stat.msgStat.hbProcessed); + pStat->deleteProcessed = QW_STAT_GET(mgmt->stat.msgStat.deleteProcessed); pStat->numOfQueryInQueue = handle->pMsgCb->qsizeFp(handle->pMsgCb->mgmt, mgmt->nodeId, QUERY_QUEUE); pStat->numOfFetchInQueue = handle->pMsgCb->qsizeFp(handle->pMsgCb->mgmt, mgmt->nodeId, FETCH_QUEUE); diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index bf51d8d631..0e9e714d7d 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -227,6 +227,25 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch break; } + case TDMT_VND_DELETE_RSP: { + SCH_ERR_JRET(rspCode); + + if (msg) { + SDecoder coder = {0}; + SVDeleteRsp rsp = {0}; + tDecoderInit(&coder, msg, msgSize); + tDecodeSVDeleteRsp(&coder, &rsp); + + atomic_add_fetch_32(&pJob->resNumOfRows, rsp->affectedRows); + SCH_TASK_DLOG("delete succeed, affectedRows:%d", rsp->affectedRows); + } + + taosMemoryFreeClear(msg); + + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + + break; + } case TDMT_VND_QUERY_RSP: { SQueryTableRsp *rsp = (SQueryTableRsp *)msg; @@ -982,6 +1001,27 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, break; } + case TDMT_VND_DELETE: { + SVDeleteReq req = {0}; + req.sId = schMgmt.sId; + req.queryId = pJob->queryId; + req.taskId = pTask->taskId; + req.phyLen = pTask->msgLen; + req.sqlLen = strlen(pJob->sql); + req.sql = pJob->sql; + req.msg = pTask->msg; + int32_t len = tSerializeSVDeleteReq(NULL, 0, &req); + msg = taosMemoryCalloc(1, len); + if (NULL == msg) { + SCH_TASK_ELOG("calloc %d failed", len); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + tSerializeSVDeleteReq(msg, len, &req); + SVDeleteReq *pMsg = msg; + pMsg->header.vgId = htonl(addr->nodeId); + break; + } case TDMT_VND_QUERY: { SCH_ERR_RET(schMakeQueryRpcCtx(pJob, pTask, &rpcCtx)); From 33a7a83d6569de5736bc87a3428161f7464a2310 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Mon, 6 Jun 2022 20:59:36 +0800 Subject: [PATCH 02/14] feature delete data --- include/libs/executor/dataSinkMgt.h | 14 +- include/libs/qworker/qworker.h | 9 +- source/client/src/clientImpl.c | 2 +- source/common/src/tmsg.c | 2 - source/dnode/vnode/src/vnd/vnodeSvr.c | 11 +- source/libs/executor/inc/dataSinkInt.h | 1 + source/libs/executor/inc/executorimpl.h | 1 + source/libs/executor/src/dataDeleter.c | 254 +++++++++++++++++++++ source/libs/executor/src/dataDispatcher.c | 6 +- source/libs/executor/src/dataSinkMgt.c | 9 +- source/libs/executor/src/executorMain.c | 9 +- source/libs/executor/src/executorimpl.c | 31 +++ source/libs/parser/src/parTranslater.c | 4 + source/libs/planner/src/planPhysiCreater.c | 1 + source/libs/planner/src/planner.c | 2 +- source/libs/qworker/inc/qwMsg.h | 1 + source/libs/qworker/src/qwMsg.c | 4 +- source/libs/qworker/src/qwUtil.c | 4 +- source/libs/qworker/src/qworker.c | 30 +-- source/libs/scheduler/src/schJob.c | 2 +- source/libs/scheduler/src/schRemote.c | 27 ++- source/libs/scheduler/src/schUtil.c | 4 +- 22 files changed, 383 insertions(+), 45 deletions(-) create mode 100644 source/libs/executor/src/dataDeleter.c diff --git a/include/libs/executor/dataSinkMgt.h b/include/libs/executor/dataSinkMgt.h index 2cc9caca6f..c23cf162aa 100644 --- a/include/libs/executor/dataSinkMgt.h +++ b/include/libs/executor/dataSinkMgt.h @@ -32,6 +32,18 @@ extern "C" { struct SDataSink; struct SSDataBlock; +typedef struct SDeleterRes { + uint64_t uid; + SArray* uidList; + int64_t skey; + int64_t ekey; + int64_t affectedRows; +} SDeleterRes; + +typedef struct SDeleterParam { + SArray* pUidList; +} SDeleterParam; + typedef struct SDataSinkStat { uint64_t cachedSize; } SDataSinkStat; @@ -64,7 +76,7 @@ typedef struct SOutputData { * @param pHandle output * @return error code */ -int32_t dsCreateDataSinker(const SDataSinkNode* pDataSink, DataSinkHandle* pHandle); +int32_t dsCreateDataSinker(const SDataSinkNode* pDataSink, DataSinkHandle* pHandle, void* pParam); int32_t dsDataSinkGetCacheSize(SDataSinkStat *pStat); diff --git a/include/libs/qworker/qworker.h b/include/libs/qworker/qworker.h index 94ed3ace42..f3f147955a 100644 --- a/include/libs/qworker/qworker.h +++ b/include/libs/qworker/qworker.h @@ -31,7 +31,12 @@ enum { NODE_TYPE_MNODE, }; - +typedef struct SDeleteRes { + uint64_t uid; + SArray* uidList; + int64_t skey; + int64_t ekey; +} SDeleteRes; typedef struct SQWorkerCfg { uint32_t maxSchedulerNum; @@ -75,6 +80,8 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int6 int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); +int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SRpcMsg *pRsp, SDeleteRes *pRes); + void qWorkerDestroy(void **qWorkerMgmt); int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pStat); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 916017543d..ca29261755 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -431,7 +431,7 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList return pRequest->code; } - if (TDMT_VND_SUBMIT == pRequest->type || TDMT_VND_CREATE_TABLE == pRequest->type) { + if (TDMT_VND_SUBMIT == pRequest->type || TDMT_VND_DELETE == pRequest->type || TDMT_VND_CREATE_TABLE == pRequest->type) { pRequest->body.resInfo.numOfRows = res.numOfRows; if (pRequest->body.queryJob != 0) { diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 26d18e910c..68c0eed1fb 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -3880,7 +3880,6 @@ int32_t tDeserializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) { int32_t tEncodeSVDeleteRsp(SEncoder* pCoder, const SVDeleteRsp* pReq) { if (tStartEncode(pCoder) < 0) return -1; - if (tEncodeI32(pCoder, pReq->code) < 0) return -1; if (tEncodeI64(pCoder, pReq->affectedRows) < 0) return -1; tEndEncode(pCoder); @@ -3890,7 +3889,6 @@ int32_t tEncodeSVDeleteRsp(SEncoder* pCoder, const SVDeleteRsp* pReq) { int32_t tDecodeSVDeleteRsp(SDecoder* pCoder, SVDeleteRsp* pReq) { if (tStartDecode(pCoder) < 0) return -1; - if (tDecodeI32(pCoder, &pReq->code) < 0) return -1; if (tDecodeI64(pCoder, &pReq->affectedRows) < 0) return -1; tEndDecode(pCoder); diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index ade5f36c8f..3eee6713d8 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -24,7 +24,7 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp); static int32_t vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp); static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp); -static int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg *pRsp); +static int32_t vnodeProcessWriteMsg(SVnode *pVnode, int64_t version, SRpcMsg *pMsg, SRpcMsg *pRsp); int32_t vnodePreprocessReq(SVnode *pVnode, SRpcMsg *pMsg) { int32_t code = 0; @@ -144,7 +144,7 @@ int32_t vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp if (vnodeProcessSubmitReq(pVnode, version, pMsg->pCont, pMsg->contLen, pRsp) < 0) goto _err; break; case TDMT_VND_DELETE: - if (vnodeProcessFetchMsg(pVnode, pMsg, pRsp) < 0) goto _err; + if (vnodeProcessWriteMsg(pVnode, version, pMsg, pRsp) < 0) goto _err; break; /* TQ */ case TDMT_VND_MQ_VG_CHANGE: @@ -260,13 +260,16 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { } } -int vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg *pRsp) { +int32_t vnodeProcessWriteMsg(SVnode *pVnode, int64_t version, SRpcMsg *pMsg, SRpcMsg *pRsp) { vTrace("message in write queue is processing"); char *msgstr = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); + SDeleteRes res = {0}; + SReadHandle handle = {.meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb}; + switch (pMsg->msgType) { case TDMT_VND_DELETE: - return qWorkerProcessDeleteMsg(pVnode, pVnode->pQuery, pMsg, pRsp); + return qWorkerProcessDeleteMsg(&handle, pVnode->pQuery, pMsg, pRsp, &res); default: vError("unknown msg type:%d in write queue", pMsg->msgType); return TSDB_CODE_VND_APP_ERROR; diff --git a/source/libs/executor/inc/dataSinkInt.h b/source/libs/executor/inc/dataSinkInt.h index 8f49440105..dead1aff73 100644 --- a/source/libs/executor/inc/dataSinkInt.h +++ b/source/libs/executor/inc/dataSinkInt.h @@ -49,6 +49,7 @@ typedef struct SDataSinkHandle { } SDataSinkHandle; int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle); +int32_t createDataDeleter(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle, void *pParam); #ifdef __cplusplus } diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 7d597217ee..5439ba0c89 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -855,6 +855,7 @@ int32_t decodeOperator(SOperatorInfo* ops, char* data, int32_t length); void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status); int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, EOPTR_EXEC_MODEL model); +int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo); int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo** pRes, int32_t* capacity, int32_t* resNum); diff --git a/source/libs/executor/src/dataDeleter.c b/source/libs/executor/src/dataDeleter.c new file mode 100644 index 0000000000..33b7811e6c --- /dev/null +++ b/source/libs/executor/src/dataDeleter.c @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "dataSinkInt.h" +#include "dataSinkMgt.h" +#include "executorimpl.h" +#include "planner.h" +#include "tcompression.h" +#include "tdatablock.h" +#include "tglobal.h" +#include "tqueue.h" + +extern SDataSinkStat gDataSinkStat; + +typedef struct SDataDeleterBuf { + int32_t useSize; + int32_t allocSize; + char* pData; +} SDataDeleterBuf; + +typedef struct SDataCacheEntry { + int32_t dataLen; + int32_t numOfRows; + int32_t numOfCols; + int8_t compressed; + char data[]; +} SDataCacheEntry; + +typedef struct SDataDeleterHandle { + SDataSinkHandle sink; + SDataSinkManager* pManager; + SDataBlockDescNode* pSchema; + SDataDeleterNode* pDeleter; + SDeleterParam* pParam; + STaosQueue* pDataBlocks; + SDataDeleterBuf nextOutput; + int32_t status; + bool queryEnd; + uint64_t useconds; + uint64_t cachedSize; + TdThreadMutex mutex; +} SDataDeleterHandle; + +static bool needCompress(const SSDataBlock* pData, int32_t numOfCols) { + if (tsCompressColData < 0 || 0 == pData->info.rows) { + return false; + } + + for (int32_t col = 0; col < numOfCols; ++col) { + SColumnInfoData* pColRes = taosArrayGet(pData->pDataBlock, col); + int32_t colSize = pColRes->info.bytes * pData->info.rows; + if (NEEDTO_COMPRESS_QUERY(colSize)) { + return true; + } + } + + return false; +} + +static void toDataCacheEntry(SDataDeleterHandle* pHandle, const SInputData* pInput, SDataDeleterBuf* pBuf) { + int32_t numOfCols = LIST_LENGTH(pHandle->pSchema->pSlots); + + SDataCacheEntry* pEntry = (SDataCacheEntry*)pBuf->pData; + pEntry->compressed = 0; + pEntry->numOfRows = pInput->pData->info.rows; + pEntry->numOfCols = pInput->pData->info.numOfCols; + pEntry->dataLen = sizeof(SDeleterRes); + + ASSERT(1 == pEntry->numOfRows); + ASSERT(1 == pEntry->numOfCols); + + pBuf->useSize = sizeof(SDataCacheEntry); + + SColumnInfoData* pColRes = (SColumnInfoData*)taosArrayGet(pInput->pData->pDataBlock, 0); + + SDeleterRes* pRes = (SDeleterRes*)pEntry->data; + pRes->uid = pHandle->pDeleter->tableId; + pRes->uidList = pHandle->pParam->pUidList; + pRes->skey = pHandle->pDeleter->deleteTimeRange.skey; + pRes->ekey = pHandle->pDeleter->deleteTimeRange.ekey; + pRes->affectedRows = *(int64_t*)pColRes->pData; + + pBuf->useSize += pEntry->dataLen; + + atomic_add_fetch_64(&pHandle->cachedSize, pEntry->dataLen); + atomic_add_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen); +} + +static bool allocBuf(SDataDeleterHandle* pDeleter, const SInputData* pInput, SDataDeleterBuf* pBuf) { + uint32_t capacity = pDeleter->pManager->cfg.maxDataBlockNumPerQuery; + if (taosQueueItemSize(pDeleter->pDataBlocks) > capacity) { + qError("SinkNode queue is full, no capacity, max:%d, current:%d, no capacity", capacity, + taosQueueItemSize(pDeleter->pDataBlocks)); + return false; + } + + pBuf->allocSize = sizeof(SDataCacheEntry) + sizeof(SDeleterRes); + + pBuf->pData = taosMemoryMalloc(pBuf->allocSize); + if (pBuf->pData == NULL) { + qError("SinkNode failed to malloc memory, size:%d, code:%d", pBuf->allocSize, TAOS_SYSTEM_ERROR(errno)); + } + + return NULL != pBuf->pData; +} + +static int32_t updateStatus(SDataDeleterHandle* pDeleter) { + taosThreadMutexLock(&pDeleter->mutex); + int32_t blockNums = taosQueueItemSize(pDeleter->pDataBlocks); + int32_t status = + (0 == blockNums ? DS_BUF_EMPTY + : (blockNums < pDeleter->pManager->cfg.maxDataBlockNumPerQuery ? DS_BUF_LOW : DS_BUF_FULL)); + pDeleter->status = status; + taosThreadMutexUnlock(&pDeleter->mutex); + return status; +} + +static int32_t getStatus(SDataDeleterHandle* pDeleter) { + taosThreadMutexLock(&pDeleter->mutex); + int32_t status = pDeleter->status; + taosThreadMutexUnlock(&pDeleter->mutex); + return status; +} + +static int32_t putDataBlock(SDataSinkHandle* pHandle, const SInputData* pInput, bool* pContinue) { + SDataDeleterHandle* pDeleter = (SDataDeleterHandle*)pHandle; + SDataDeleterBuf* pBuf = taosAllocateQitem(sizeof(SDataDeleterBuf), DEF_QITEM); + if (NULL == pBuf || !allocBuf(pDeleter, pInput, pBuf)) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + toDataCacheEntry(pDeleter, pInput, pBuf); + taosWriteQitem(pDeleter->pDataBlocks, pBuf); + *pContinue = (DS_BUF_LOW == updateStatus(pDeleter) ? true : false); + return TSDB_CODE_SUCCESS; +} + +static void endPut(struct SDataSinkHandle* pHandle, uint64_t useconds) { + SDataDeleterHandle* pDeleter = (SDataDeleterHandle*)pHandle; + taosThreadMutexLock(&pDeleter->mutex); + pDeleter->queryEnd = true; + pDeleter->useconds = useconds; + taosThreadMutexUnlock(&pDeleter->mutex); +} + +static void getDataLength(SDataSinkHandle* pHandle, int32_t* pLen, bool* pQueryEnd) { + SDataDeleterHandle* pDeleter = (SDataDeleterHandle*)pHandle; + if (taosQueueEmpty(pDeleter->pDataBlocks)) { + *pQueryEnd = pDeleter->queryEnd; + *pLen = 0; + return; + } + + SDataDeleterBuf* pBuf = NULL; + taosReadQitem(pDeleter->pDataBlocks, (void**)&pBuf); + memcpy(&pDeleter->nextOutput, pBuf, sizeof(SDataDeleterBuf)); + taosFreeQitem(pBuf); + *pLen = ((SDataCacheEntry*)(pDeleter->nextOutput.pData))->dataLen; + *pQueryEnd = pDeleter->queryEnd; + qDebug("got data len %d, row num %d in sink", *pLen, ((SDataCacheEntry*)(pDeleter->nextOutput.pData))->numOfRows); +} + +static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) { + SDataDeleterHandle* pDeleter = (SDataDeleterHandle*)pHandle; + if (NULL == pDeleter->nextOutput.pData) { + assert(pDeleter->queryEnd); + pOutput->useconds = pDeleter->useconds; + pOutput->precision = pDeleter->pSchema->precision; + pOutput->bufStatus = DS_BUF_EMPTY; + pOutput->queryEnd = pDeleter->queryEnd; + return TSDB_CODE_SUCCESS; + } + SDataCacheEntry* pEntry = (SDataCacheEntry*)(pDeleter->nextOutput.pData); + memcpy(pOutput->pData, pEntry->data, pEntry->dataLen); + pOutput->numOfRows = pEntry->numOfRows; + pOutput->numOfCols = pEntry->numOfCols; + pOutput->compressed = pEntry->compressed; + + atomic_sub_fetch_64(&pDeleter->cachedSize, pEntry->dataLen); + atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen); + + taosMemoryFreeClear(pDeleter->nextOutput.pData); // todo persistent + pOutput->bufStatus = updateStatus(pDeleter); + taosThreadMutexLock(&pDeleter->mutex); + pOutput->queryEnd = pDeleter->queryEnd; + pOutput->useconds = pDeleter->useconds; + pOutput->precision = pDeleter->pSchema->precision; + taosThreadMutexUnlock(&pDeleter->mutex); + + return TSDB_CODE_SUCCESS; +} + +static int32_t destroyDataSinker(SDataSinkHandle* pHandle) { + SDataDeleterHandle* pDeleter = (SDataDeleterHandle*)pHandle; + atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pDeleter->cachedSize); + taosMemoryFreeClear(pDeleter->nextOutput.pData); + while (!taosQueueEmpty(pDeleter->pDataBlocks)) { + SDataDeleterBuf* pBuf = NULL; + taosReadQitem(pDeleter->pDataBlocks, (void**)&pBuf); + taosMemoryFreeClear(pBuf->pData); + taosFreeQitem(pBuf); + } + taosCloseQueue(pDeleter->pDataBlocks); + taosThreadMutexDestroy(&pDeleter->mutex); + return TSDB_CODE_SUCCESS; +} + +static int32_t getCacheSize(struct SDataSinkHandle* pHandle, uint64_t* size) { + SDataDeleterHandle* pDispatcher = (SDataDeleterHandle*)pHandle; + + *size = atomic_load_64(&pDispatcher->cachedSize); + return TSDB_CODE_SUCCESS; +} + +int32_t createDataDeleter(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle, void *pParam) { + SDataDeleterHandle* deleter = taosMemoryCalloc(1, sizeof(SDataDeleterHandle)); + if (NULL == deleter) { + terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + + SDataDeleterNode* pDeleterNode = (SDataDeleterNode *)pDataSink; + deleter->sink.fPut = putDataBlock; + deleter->sink.fEndPut = endPut; + deleter->sink.fGetLen = getDataLength; + deleter->sink.fGetData = getDataBlock; + deleter->sink.fDestroy = destroyDataSinker; + deleter->sink.fGetCacheSize = getCacheSize; + deleter->pManager = pManager; + deleter->pDeleter = pDeleterNode; + deleter->pSchema = pDataSink->pInputDataBlockDesc; + deleter->pParam = pParam; + deleter->status = DS_BUF_EMPTY; + deleter->queryEnd = false; + deleter->pDataBlocks = taosOpenQueue(); + taosThreadMutexInit(&deleter->mutex, NULL); + if (NULL == deleter->pDataBlocks) { + terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + *pHandle = deleter; + return TSDB_CODE_SUCCESS; +} diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index 080cf5c2ad..eaa366b7e7 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -83,7 +83,7 @@ static void toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* pIn pEntry->numOfCols = pInput->pData->info.numOfCols; pEntry->dataLen = 0; - pBuf->useSize = sizeof(SRetrieveTableRsp); + pBuf->useSize = sizeof(SDataCacheEntry); blockCompressEncode(pInput->pData, pEntry->data, &pEntry->dataLen, numOfCols, pEntry->compressed); pBuf->useSize += pEntry->dataLen; @@ -100,7 +100,7 @@ static bool allocBuf(SDataDispatchHandle* pDispatcher, const SInputData* pInput, return false; } - pBuf->allocSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pInput->pData); + pBuf->allocSize = sizeof(SDataCacheEntry) + blockGetEncodeSize(pInput->pData); pBuf->pData = taosMemoryMalloc(pBuf->allocSize); if (pBuf->pData == NULL) { @@ -211,7 +211,7 @@ static int32_t destroyDataSinker(SDataSinkHandle* pHandle) { return TSDB_CODE_SUCCESS; } -int32_t getCacheSize(struct SDataSinkHandle* pHandle, uint64_t* size) { +static int32_t getCacheSize(struct SDataSinkHandle* pHandle, uint64_t* size) { SDataDispatchHandle* pDispatcher = (SDataDispatchHandle*)pHandle; *size = atomic_load_64(&pDispatcher->cachedSize); diff --git a/source/libs/executor/src/dataSinkMgt.c b/source/libs/executor/src/dataSinkMgt.c index 9016ca274a..ffa9822e92 100644 --- a/source/libs/executor/src/dataSinkMgt.c +++ b/source/libs/executor/src/dataSinkMgt.c @@ -34,9 +34,12 @@ int32_t dsDataSinkGetCacheSize(SDataSinkStat *pStat) { } -int32_t dsCreateDataSinker(const SDataSinkNode *pDataSink, DataSinkHandle* pHandle) { - if (QUERY_NODE_PHYSICAL_PLAN_DISPATCH == nodeType(pDataSink)) { - return createDataDispatcher(&gDataSinkManager, pDataSink, pHandle); +int32_t dsCreateDataSinker(const SDataSinkNode *pDataSink, DataSinkHandle* pHandle, void* pParam) { + switch (nodeType(pDataSink)) { + case QUERY_NODE_PHYSICAL_PLAN_DISPATCH: + return createDataDispatcher(&gDataSinkManager, pDataSink, pHandle); + case QUERY_NODE_PHYSICAL_PLAN_DELETE: + return createDataDeleter(&gDataSinkManager, pDataSink, pHandle, pParam); } return TSDB_CODE_FAILED; } diff --git a/source/libs/executor/src/executorMain.c b/source/libs/executor/src/executorMain.c index 7757825733..c014b23953 100644 --- a/source/libs/executor/src/executorMain.c +++ b/source/libs/executor/src/executorMain.c @@ -45,8 +45,15 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, if (code != TSDB_CODE_SUCCESS) { goto _error; } + if (handle) { - code = dsCreateDataSinker(pSubplan->pDataSink, handle); + void* pSinkParam = NULL; + code = createDataSinkParam(pSubplan->pDataSink, &pSinkParam, pTaskInfo); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + code = dsCreateDataSinker(pSubplan->pDataSink, handle, pSinkParam); } _error: diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 92b3195f36..4042326184 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -4952,6 +4952,37 @@ int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length) { return TDB_CODE_SUCCESS; } +int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo) { + SExecTaskInfo* pTask = (SExecTaskInfo*)pTaskInfo; + + switch (pNode->type) { + case QUERY_NODE_PHYSICAL_PLAN_DELETE: { + SDeleterParam *pDeleterParam = taosMemoryCalloc(1, sizeof(SDeleterParam)); + if (NULL == pDeleterParam) { + return TSDB_CODE_OUT_OF_MEMORY; + } + int32_t tbNum = taosArrayGetSize(pTask->tableqinfoList.pTableList); + pDeleterParam->pUidList = taosArrayInit(tbNum, sizeof(uint64_t)); + if (NULL == pDeleterParam->pUidList) { + taosMemoryFree(pDeleterParam); + return TSDB_CODE_OUT_OF_MEMORY; + } + for (int32_t i = 0; i < tbNum; ++i) { + STableKeyInfo *pTable = taosArrayGet(pTask->tableqinfoList.pTableList, i); + taosArrayPush(pDeleterParam->pUidList, &pTable->uid); + } + + *pParam = pDeleterParam; + break; + } + default: + break; + } + + return TSDB_CODE_SUCCESS; +} + + int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, EOPTR_EXEC_MODEL model) { uint64_t queryId = pPlan->id.queryId; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index f1892d0040..c385f26591 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -5012,6 +5012,10 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) { pQuery->haveResultSet = true; pQuery->msgType = TDMT_VND_QUERY; break; + case QUERY_NODE_DELETE_STMT: + pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; + pQuery->msgType = TDMT_VND_DELETE; + break; case QUERY_NODE_VNODE_MODIF_STMT: pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; pQuery->msgType = toMsgType(((SVnodeModifOpStmt*)pQuery->pRoot)->sqlNodeType); diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 3c000e3ff7..e306db48e8 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -1335,6 +1335,7 @@ static int32_t buildDeleteSubplan(SPhysiPlanContext* pCxt, SVnodeModifyLogicNode if (TSDB_CODE_SUCCESS == code) { code = createDataDeleter(pCxt, pModify, pSubplan->pNode, &pSubplan->pDataSink); } + pSubplan->msgType = TDMT_VND_DELETE; return code; } diff --git a/source/libs/planner/src/planner.c b/source/libs/planner/src/planner.c index f8d240c7b2..1921b16388 100644 --- a/source/libs/planner/src/planner.c +++ b/source/libs/planner/src/planner.c @@ -88,7 +88,7 @@ int32_t qSetSubplanExecutionNode(SSubplan* subplan, int32_t groupId, SDownstream } int32_t qSubPlanToString(const SSubplan* pSubplan, char** pStr, int32_t* pLen) { - if (SUBPLAN_TYPE_MODIFY == pSubplan->subplanType) { + if (SUBPLAN_TYPE_MODIFY == pSubplan->subplanType && NULL == pSubplan->pNode) { SDataInserterNode* insert = (SDataInserterNode*)pSubplan->pDataSink; *pLen = insert->size; *pStr = insert->pData; diff --git a/source/libs/qworker/inc/qwMsg.h b/source/libs/qworker/inc/qwMsg.h index 5a8a45cd51..29861d87ac 100644 --- a/source/libs/qworker/inc/qwMsg.h +++ b/source/libs/qworker/inc/qwMsg.h @@ -30,6 +30,7 @@ int32_t qwProcessReady(QW_FPARAMS_DEF, SQWMsg *qwMsg); int32_t qwProcessFetch(QW_FPARAMS_DEF, SQWMsg *qwMsg); int32_t qwProcessDrop(QW_FPARAMS_DEF, SQWMsg *qwMsg); int32_t qwProcessHb(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *req); +int32_t qwProcessDelete(QW_FPARAMS_DEF, SQWMsg *qwMsg, SRpcMsg *pRsp, SDeleteRes *pRes); int32_t qwBuildAndSendDropRsp(SRpcHandleInfo *pConn, int32_t code); int32_t qwBuildAndSendCancelRsp(SRpcHandleInfo *pConn, int32_t code); diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c index d4bc7892e4..848a0420ca 100644 --- a/source/libs/qworker/src/qwMsg.c +++ b/source/libs/qworker/src/qwMsg.c @@ -518,7 +518,7 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_ } -int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SRpcMsg *pRsp) { +int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SRpcMsg *pRsp, SDeleteRes *pRes) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg || NULL == pRsp) { QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } @@ -540,7 +540,7 @@ int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SR QW_SCH_TASK_DLOG("processDelete start, node:%p, handle:%p, sql:%s", node, pMsg->info.handle, req.sql); taosMemoryFreeClear(req.sql); - QW_ERR_JRET(qwProcessDelete(QW_FPARAMS(), &qwMsg, pRsp)); + QW_ERR_JRET(qwProcessDelete(QW_FPARAMS(), &qwMsg, pRsp, pRes)); QW_SCH_TASK_DLOG("processDelete end, node:%p", node); diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index 4b881d2b4f..667008e68e 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -291,7 +291,9 @@ int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { } void qwFreeTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { - tmsgReleaseHandle(&ctx->ctrlConnInfo, TAOS_CONN_SERVER); + if (ctx->ctrlConnInfo.handle) { + tmsgReleaseHandle(&ctx->ctrlConnInfo, TAOS_CONN_SERVER); + } ctx->ctrlConnInfo.handle = NULL; ctx->ctrlConnInfo.refId = -1; diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 81a390f4c2..333884f883 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -242,7 +242,7 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, return TSDB_CODE_SUCCESS; } -int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, void **rspMsg) { +int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, void **rspMsg, SDeleteRes *pRes) { int32_t len = 0; SVDeleteRsp rsp = {0}; bool queryEnd = false; @@ -251,7 +251,7 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen dsGetDataLength(ctx->sinkHandle, &len, &queryEnd); - if (len <= 0 || len != sizeof(SVDeleteRsp)) { + if (len <= 0 || len != sizeof(SDeleterRes)) { QW_TASK_ELOG("invalid length from dsGetDataLength, length:%d", len); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } @@ -268,13 +268,17 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen QW_ERR_RET(code); } - rsp.affectedRows = *(int64_t*)output.pData; + SDeleterRes* pDelRes = (SDeleterRes*)output.pData; + + rsp.affectedRows = pDelRes->affectedRows; + pRes->uid = pDelRes->uid; + pRes->uidList = pDelRes->uidList; + pRes->skey = pDelRes->skey; + pRes->ekey = pDelRes->ekey; - int32_t len; - int32_t ret = 0; SEncoder coder = {0}; - tEncodeSize(tEncodeSVDeleteRsp, &rsp, len, ret); - void *msg = taosMemoryCalloc(1, len); + tEncodeSize(tEncodeSVDeleteRsp, &rsp, len, code); + void *msg = rpcMallocCont(len); tEncoderInit(&coder, msg, len); tEncodeSVDeleteRsp(&coder, &rsp); tEncoderClear(&coder); @@ -918,7 +922,7 @@ _return: qwRelease(refId); } -int32_t qwProcessDelete(QW_FPARAMS_DEF, SQWMsg *qwMsg, SRpcMsg *pRsp) { +int32_t qwProcessDelete(QW_FPARAMS_DEF, SQWMsg *qwMsg, SRpcMsg *pRsp, SDeleteRes *pRes) { int32_t code = 0; SSubplan *plan = NULL; qTaskInfo_t pTaskInfo = NULL; @@ -932,7 +936,7 @@ int32_t qwProcessDelete(QW_FPARAMS_DEF, SQWMsg *qwMsg, SRpcMsg *pRsp) { QW_ERR_JRET(code); } - ctx->plan = plan; + ctx.plan = plan; code = qCreateExecTask(qwMsg->node, mgmt->nodeId, tId, plan, &pTaskInfo, &sinkHandle, OPTR_EXEC_MODEL_BATCH); if (code) { @@ -945,12 +949,12 @@ int32_t qwProcessDelete(QW_FPARAMS_DEF, SQWMsg *qwMsg, SRpcMsg *pRsp) { QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); } - ctx->taskHandle = pTaskInfo; - ctx->sinkHandle = sinkHandle; + ctx.taskHandle = pTaskInfo; + ctx.sinkHandle = sinkHandle; - QW_ERR_JRET(qwExecTask(QW_FPARAMS(), ctx, NULL)); + QW_ERR_JRET(qwExecTask(QW_FPARAMS(), &ctx, NULL)); - QW_ERR_JRET(qwGetDeleteResFromSink(QW_FPARAMS(), &ctx, &pRsp->contLen, &pRsp->pCont)); + QW_ERR_JRET(qwGetDeleteResFromSink(QW_FPARAMS(), &ctx, &pRsp->contLen, &pRsp->pCont, pRes)); _return: diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c index ca90d2fe34..b51ce8e1c1 100644 --- a/source/libs/scheduler/src/schJob.c +++ b/source/libs/scheduler/src/schJob.c @@ -1476,7 +1476,7 @@ int32_t schExecJobImpl(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_ SSchJob *pJob = NULL; SCH_ERR_RET(schInitJob(&pJob, pDag, pTrans, pNodeList, sql, pRes, startTs, sync)); - qDebug("QID:0x%" PRIx64 " jobId:0x%"PRIx64 " started", pDag->queryId, pJob->refId); + qDebug("QID:0x%" PRIx64 " job refId 0x%"PRIx64 " started", pDag->queryId, pJob->refId); *job = pJob->refId; SCH_ERR_JRET(schLaunchJob(pJob)); diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index 0e9e714d7d..e40db48401 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -62,10 +62,11 @@ int32_t schValidateReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgTy case TDMT_VND_DROP_TABLE_RSP: case TDMT_VND_ALTER_TABLE_RSP: case TDMT_VND_SUBMIT_RSP: + case TDMT_VND_DELETE_RSP: break; default: SCH_TASK_ELOG("unknown rsp msg, type:%s, status:%s", TMSG_INFO(msgType), jobTaskStatusStr(taskStatus)); - SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + SCH_ERR_RET(TSDB_CODE_INVALID_MSG); } if (lastMsgType != reqMsgType) { @@ -236,8 +237,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch tDecoderInit(&coder, msg, msgSize); tDecodeSVDeleteRsp(&coder, &rsp); - atomic_add_fetch_32(&pJob->resNumOfRows, rsp->affectedRows); - SCH_TASK_DLOG("delete succeed, affectedRows:%d", rsp->affectedRows); + atomic_add_fetch_32(&pJob->resNumOfRows, rsp.affectedRows); + SCH_TASK_DLOG("delete succeed, affectedRows:%" PRId64, rsp.affectedRows); } taosMemoryFreeClear(msg); @@ -430,6 +431,10 @@ int32_t schHandleQueryCallback(void *param, const SDataBuf *pMsg, int32_t code) return schHandleCallback(param, pMsg, TDMT_VND_QUERY_RSP, code); } +int32_t schHandleDeleteCallback(void *param, const SDataBuf *pMsg, int32_t code) { + return schHandleCallback(param, pMsg, TDMT_VND_DELETE_RSP, code); +} + int32_t schHandleFetchCallback(void *param, const SDataBuf *pMsg, int32_t code) { return schHandleCallback(param, pMsg, TDMT_VND_FETCH_RSP, code); } @@ -520,6 +525,9 @@ int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp) { case TDMT_VND_QUERY: *fp = schHandleQueryCallback; break; + case TDMT_VND_DELETE: + *fp = schHandleDeleteCallback; + break; case TDMT_VND_EXPLAIN: *fp = schHandleExplainCallback; break; @@ -1003,23 +1011,22 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, case TDMT_VND_DELETE: { SVDeleteReq req = {0}; + req.header.vgId = addr->nodeId; req.sId = schMgmt.sId; req.queryId = pJob->queryId; req.taskId = pTask->taskId; req.phyLen = pTask->msgLen; req.sqlLen = strlen(pJob->sql); - req.sql = pJob->sql; + req.sql = (char*)pJob->sql; req.msg = pTask->msg; - int32_t len = tSerializeSVDeleteReq(NULL, 0, &req); - msg = taosMemoryCalloc(1, len); + msgSize = tSerializeSVDeleteReq(NULL, 0, &req); + msg = taosMemoryCalloc(1, msgSize); if (NULL == msg) { - SCH_TASK_ELOG("calloc %d failed", len); + SCH_TASK_ELOG("calloc %d failed", msgSize); SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); } - tSerializeSVDeleteReq(msg, len, &req); - SVDeleteReq *pMsg = msg; - pMsg->header.vgId = htonl(addr->nodeId); + tSerializeSVDeleteReq(msg, msgSize, &req); break; } case TDMT_VND_QUERY: { diff --git a/source/libs/scheduler/src/schUtil.c b/source/libs/scheduler/src/schUtil.c index 18398802db..38c03c74d9 100644 --- a/source/libs/scheduler/src/schUtil.c +++ b/source/libs/scheduler/src/schUtil.c @@ -257,6 +257,8 @@ void schFreeRpcCtx(SRpcCtx *pCtx) { taosHashCleanup(pCtx->args); - (*pCtx->freeFunc)(pCtx->brokenVal.val); + if (pCtx->freeFunc) { + (*pCtx->freeFunc)(pCtx->brokenVal.val); + } } From 1b26da1bae5a90cca6e39f7547b13f0f3cbd30e1 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Mon, 6 Jun 2022 22:18:50 +0800 Subject: [PATCH 03/14] refactor(stream): distributed execution --- include/common/tmsgdef.h | 1 + include/libs/stream/tstream.h | 17 ---- source/dnode/vnode/src/tq/tq.c | 2 +- source/libs/stream/inc/streamInc.h | 1 + source/libs/stream/src/streamMsg.c | 145 +++++++++++++++++++++++++++- source/libs/stream/src/streamSink.c | 7 +- 6 files changed, 147 insertions(+), 26 deletions(-) diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 73f8515f22..8a811774b2 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -187,6 +187,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TASK_RUN, "vnode-stream-task-run", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TASK_DISPATCH, "vnode-stream-task-dispatch", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_TASK_DISPATCH_WRITE, "vnode-stream-task-dispatch-write", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TASK_RECOVER, "vnode-stream-task-recover", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CREATE_SMA, "vnode-create-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index bb0b6dc0a0..82674c6115 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -285,12 +285,6 @@ struct SStreamTask { int8_t inputStatus; int8_t outputStatus; -#if 0 - STaosQueue* inputQ; - STaosQall* inputQAll; - STaosQueue* outputQ; - STaosQall* outputQAll; -#endif SStreamQueue* inputQueue; SStreamQueue* outputQueue; @@ -371,13 +365,6 @@ typedef struct { int32_t taskId; } SStreamTaskRunReq; -typedef struct { - // SMsgHead head; - int64_t streamId; - int64_t version; - SArray* res; // SArray -} SStreamSinkReq; - typedef struct { int64_t streamId; int32_t taskId; @@ -413,10 +400,6 @@ typedef struct { int32_t streamTriggerByWrite(SStreamTask* pTask, int32_t vgId, SMsgCb* pMsgCb); -int32_t streamEnqueueDataSubmit(SStreamTask* pTask, SStreamDataSubmit* input); -int32_t streamEnqueueDataBlk(SStreamTask* pTask, SStreamDataBlock* input); -int32_t streamDequeueOutput(SStreamTask* pTask, void** output); - int32_t streamTaskRun(SStreamTask* pTask); int32_t streamTaskProcessRunReq(SStreamTask* pTask, SMsgCb* pMsgCb); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 38482ccad5..3de5109a1a 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -216,8 +216,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { } // TODO wrap in destroy func - taosArrayDestroy(rsp.blockData); taosArrayDestroy(rsp.blockDataLen); + taosArrayDestroyP(rsp.blockData, (FDelete)taosMemoryFree); if (rsp.withSchema) { taosArrayDestroyP(rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper); diff --git a/source/libs/stream/inc/streamInc.h b/source/libs/stream/inc/streamInc.h index 604539f16a..b5f7362689 100644 --- a/source/libs/stream/inc/streamInc.h +++ b/source/libs/stream/inc/streamInc.h @@ -25,6 +25,7 @@ extern "C" { int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb); int32_t streamSink1(SStreamTask* pTask, SMsgCb* pMsgCb); +int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDataBlock* data); #ifdef __cplusplus } diff --git a/source/libs/stream/src/streamMsg.c b/source/libs/stream/src/streamMsg.c index 81f18fea8d..9f22bbbe8a 100644 --- a/source/libs/stream/src/streamMsg.c +++ b/source/libs/stream/src/streamMsg.c @@ -32,7 +32,7 @@ int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* p if (tEncodeBinary(pEncoder, data, len) < 0) return -1; } tEndEncode(pEncoder); - return 0; + return pEncoder->pos; } int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { @@ -60,11 +60,150 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { return 0; } -int32_t streamBuildDispatchMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) { +static int32_t streamAddBlockToDispatchMsg(const SSDataBlock* pBlock, SStreamDispatchReq* pReq) { + int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock); + void* buf = taosMemoryCalloc(1, dataStrLen); + if (buf == NULL) return -1; + + SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf; + pRetrieve->useconds = 0; + pRetrieve->precision = TSDB_DEFAULT_PRECISION; + pRetrieve->compressed = 0; + pRetrieve->completed = 1; + pRetrieve->numOfRows = htonl(pBlock->info.rows); + + int32_t actualLen = 0; + blockCompressEncode(pBlock, pRetrieve->data, &actualLen, pBlock->info.numOfCols, false); + actualLen += sizeof(SRetrieveTableRsp); + ASSERT(actualLen <= dataStrLen); + taosArrayPush(pReq->dataLen, &actualLen); + taosArrayPush(pReq->data, &buf); + + return 0; +} + +int32_t streamBuildDispatchMsg(SStreamTask* pTask, SStreamDataBlock* data, SRpcMsg* pMsg, SEpSet** ppEpSet) { + void* buf = NULL; + int32_t code = -1; + int32_t blockNum = taosArrayGetSize(data->blocks); + ASSERT(blockNum != 0); + SStreamDispatchReq req = { .streamId = pTask->streamId, - .data = data, + .sourceTaskId = pTask->taskId, + .sourceVg = data->sourceVg, + .sourceChildId = pTask->childId, + .blockNum = blockNum, }; + + req.data = taosArrayInit(blockNum, sizeof(void*)); + req.dataLen = taosArrayInit(blockNum, sizeof(int32_t)); + if (req.data == NULL || req.dataLen == NULL) { + goto FAIL; + } + for (int32_t i = 0; i < blockNum; i++) { + SSDataBlock* pDataBlock = taosArrayGet(data->blocks, i); + if (streamAddBlockToDispatchMsg(pDataBlock, &req) < 0) { + goto FAIL; + } + } + int32_t vgId = 0; + int32_t downstreamTaskId = 0; + // find ep + if (pTask->dispatchType == TASK_DISPATCH__FIXED) { + vgId = pTask->fixedEpDispatcher.nodeId; + *ppEpSet = &pTask->fixedEpDispatcher.epSet; + downstreamTaskId = pTask->fixedEpDispatcher.taskId; + } else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) { + // TODO get ctbName + char ctbName[TSDB_TABLE_FNAME_LEN + 22] = {0}; + SSDataBlock* pBlock = taosArrayGet(data->blocks, 0); + sprintf(ctbName, "%s:%ld", pTask->shuffleDispatcher.stbFullName, pBlock->info.groupId); + // get vg and ep + // TODO: get hash function by hashMethod + + // get groupId, compute hash value + uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName)); + + // get node + // TODO: optimize search process + SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos; + int32_t sz = taosArrayGetSize(vgInfo); + for (int32_t i = 0; i < sz; i++) { + SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); + if (hashValue >= pVgInfo->hashBegin && hashValue <= pVgInfo->hashEnd) { + vgId = pVgInfo->vgId; + downstreamTaskId = pVgInfo->taskId; + *ppEpSet = &pVgInfo->epSet; + break; + } + } + ASSERT(vgId != 0); + } + + req.taskId = downstreamTaskId; + + // serialize + int32_t tlen; + tEncodeSize(tEncodeStreamDispatchReq, &req, tlen, code); + if (code < 0) goto FAIL; + buf = rpcMallocCont(sizeof(SMsgHead) + tlen); + if (buf == NULL) { + code = -1; + goto FAIL; + } + + ((SMsgHead*)buf)->vgId = htonl(pTask->fixedEpDispatcher.nodeId); + void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); + + SEncoder encoder; + tEncoderInit(&encoder, abuf, tlen); + if ((code = tEncodeStreamDispatchReq(&encoder, &req)) < 0) { + goto FAIL; + } + tEncoderClear(&encoder); + + pMsg->contLen = tlen + sizeof(SMsgHead); + pMsg->pCont = buf; + + code = 0; +FAIL: + if (buf) taosMemoryFree(buf); + if (req.data) taosArrayDestroyP(req.data, (FDelete)taosMemoryFree); + if (req.dataLen) taosArrayDestroy(req.dataLen); + return code; +} + +int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDataBlock* data) { + if (pTask->dispatchType == TASK_DISPATCH__INPLACE) { + SRpcMsg dispatchMsg = {0}; + if (streamBuildDispatchMsg(pTask, data, &dispatchMsg, NULL) < 0) { + ASSERT(0); + return -1; + } + + int32_t qType; + if (pTask->dispatchMsgType == TDMT_VND_TASK_DISPATCH || pTask->dispatchMsgType == TDMT_SND_TASK_DISPATCH) { + qType = FETCH_QUEUE; + } else if (pTask->dispatchMsgType == TDMT_VND_TASK_DISPATCH_WRITE) { + qType = WRITE_QUEUE; + } else { + ASSERT(0); + } + tmsgPutToQueue(pMsgCb, qType, &dispatchMsg); + } else if (pTask->dispatchType == TASK_DISPATCH__FIXED) { + SRpcMsg dispatchMsg = {0}; + SEpSet* pEpSet = NULL; + if (streamBuildDispatchMsg(pTask, data, &dispatchMsg, &pEpSet) < 0) { + ASSERT(0); + return -1; + } + + tmsgSendReq(pEpSet, &dispatchMsg); + } else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) { + // TODO + ASSERT(0); + } return 0; } diff --git a/source/libs/stream/src/streamSink.c b/source/libs/stream/src/streamSink.c index 6acdd1064c..6fd0a00517 100644 --- a/source/libs/stream/src/streamSink.c +++ b/source/libs/stream/src/streamSink.c @@ -41,12 +41,9 @@ int32_t streamSink1(SStreamTask* pTask, SMsgCb* pMsgCb) { pTask->smaSink.smaSink(pTask->ahandle, pTask->smaSink.smaId, pBlock->blocks); } - if (pTask->dispatchType == TASK_DISPATCH__INPLACE) { - ASSERT(queue == pTask->outputQueue); - } else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) { - ASSERT(queue == pTask->outputQueue); - } else if (pTask->dispatchType == TASK_DISPATCH__FIXED) { + if (pTask->dispatchType != TASK_DISPATCH__NONE) { ASSERT(queue == pTask->outputQueue); + streamDispatch(pTask, pMsgCb, pBlock); } streamQueueProcessSuccess(queue); From 42d02a7ea676539665742b951ec4851ec4f02ee2 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 7 Jun 2022 09:11:53 +0800 Subject: [PATCH 04/14] fix uidlist issue --- source/libs/executor/src/executorimpl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 4042326184..0b738d829c 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -4953,7 +4953,7 @@ int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length) { } int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo) { - SExecTaskInfo* pTask = (SExecTaskInfo*)pTaskInfo; + SExecTaskInfo* pTask = *(SExecTaskInfo**)pTaskInfo; switch (pNode->type) { case QUERY_NODE_PHYSICAL_PLAN_DELETE: { From 886609f62b8dc2332ac4d06f943858fe2646829a Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Tue, 7 Jun 2022 10:36:32 +0800 Subject: [PATCH 05/14] docs: merge PHP doc change from develop to 3.0 --- docs-cn/07-develop/01-connect/index.md | 2 +- docs-cn/14-reference/03-connector/php.mdx | 152 ++++++++++++++++++++++ docs-examples/php/insert_stmt.php | 2 +- 3 files changed, 154 insertions(+), 2 deletions(-) create mode 100644 docs-cn/14-reference/03-connector/php.mdx diff --git a/docs-cn/07-develop/01-connect/index.md b/docs-cn/07-develop/01-connect/index.md index 3a15d03f93..4abceb884f 100644 --- a/docs-cn/07-develop/01-connect/index.md +++ b/docs-cn/07-develop/01-connect/index.md @@ -212,7 +212,7 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive && tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 ``` -> 版本 `v1.0.0` 可替换为任意更新的版本,可在 Release 中查看最新版本。 +> 版本 `v1.0.2` 可替换为任意更新的版本,可在 Release 中查看最新版本。 **非 Swoole 环境:** diff --git a/docs-cn/14-reference/03-connector/php.mdx b/docs-cn/14-reference/03-connector/php.mdx new file mode 100644 index 0000000000..8ee9fba2af --- /dev/null +++ b/docs-cn/14-reference/03-connector/php.mdx @@ -0,0 +1,152 @@ +--- +sidebar_position: 1 +sidebar_label: PHP +title: PHP Connector +--- + +`php-tdengine` 是由社区贡献的 PHP 连接器扩展,还特别支持了 Swoole 协程化。 + +PHP 连接器依赖 TDengine 客户端驱动,其安装方式与 C/C++ 驱动相同。 + +项目地址: + +TDengine 服务端或客户端安装后,`taos.h` 位于: + +- Linux:`/usr/local/taos/include` +- Windows:`C:\TDengine\include` + +TDengine 客户端驱动的动态库位于: + +- Linux: `/usr/local/taos/driver/libtaos.so` +- Windows: `C:\TDengine\taos.dll` + +## 支持的平台 + +* Windows、Linux、MacOS + +* PHP >= 7.4 + +* TDengine >= 2.0 + +* Swoole >= 4.8 (可选) + +## 支持的版本 + +TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一对应的强对应关系,建议使用与 TDengine 服务端完全相同的客户端驱动。虽然低版本的客户端驱动在前三段版本号一致(即仅第四段版本号不同)的情况下也能够与高版本的服务端相兼容,但这并非推荐用法。强烈不建议使用高版本的客户端驱动访问低版本的服务端。 + +## 安装步骤 + +### 安装 TDengine 客户端驱动 + +TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector#安装步骤) + +### 编译安装 php-tdengine + +**下载代码并解压:** + +```shell +curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \ +&& mkdir php-tdengine \ +&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 +``` + +> 版本 `v1.0.2` 可替换为任意更新的版本,可在 Release 中查看最新版本。 + +**非 Swoole 环境:** + +```shell +phpize && ./configure && make -j && make install +``` + +**手动指定 tdengine 目录:** + +```shell +phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install +``` + +> `--with-tdengine-dir=` 后跟上 tdengine 目录。 +> 适用于默认找不到的情况,或者 MacOS 系统用户。 + +**Swoole 环境:** + +```shell +phpize && ./configure --enable-swoole && make -j && make install +``` + +**启用扩展:** + +方法一:在 `php.ini` 中加入 `extension=tdengine` + +方法二:运行带参数 `php -dextension=tdengine test.php` + +## 示例程序 + +本节展示了使用客户端驱动访问 TDengine 集群的常见访问方式的示例代码。 + +> 所有错误都会抛出异常: `TDengine\Exception\TDengineException` + +### 建立连接 + +
+建立连接 + +```c +{{#include docs-examples/php/connect.php}} +``` + +
+ +### 插入数据 + +
+插入数据 + +```c +{{#include docs-examples/php/insert.php}} +``` + +
+ +### 同步查询 + +
+同步查询 + +```c +{{#include docs-examples/php/query.php}} +``` + +
+ +### 参数绑定 + +
+参数绑定 + +```c +{{#include docs-examples/php/insert_stmt.php}} +``` + +
+ +## 常量 + +### 字段类型 + +| 参数名称 | 说明 | +| ------------ | ------------ +| `TDengine\TSDB_DATA_TYPE_NULL` | null | +| `TDengine\TSDB_DATA_TYPE_BOOL` | bool | +| `TDengine\TSDB_DATA_TYPE_TINYINT` | tinyint | +| `TDengine\TSDB_DATA_TYPE_SMALLINT` | smallint | +| `TDengine\TSDB_DATA_TYPE_INT` | int | +| `TDengine\TSDB_DATA_TYPE_BIGINT` | bigint | +| `TDengine\TSDB_DATA_TYPE_FLOAT` | float | +| `TDengine\TSDB_DATA_TYPE_DOUBLE` | double | +| `TDengine\TSDB_DATA_TYPE_BINARY` | binary | +| `TDengine\TSDB_DATA_TYPE_TIMESTAMP` | timestamp | +| `TDengine\TSDB_DATA_TYPE_NCHAR` | nchar | +| `TDengine\TSDB_DATA_TYPE_UTINYINT` | utinyint | +| `TDengine\TSDB_DATA_TYPE_USMALLINT` | usmallint | +| `TDengine\TSDB_DATA_TYPE_UINT` | uint | +| `TDengine\TSDB_DATA_TYPE_UBIGINT` | ubigint | diff --git a/docs-examples/php/insert_stmt.php b/docs-examples/php/insert_stmt.php index 99a9a6aef3..c927a9b0ce 100644 --- a/docs-examples/php/insert_stmt.php +++ b/docs-examples/php/insert_stmt.php @@ -22,7 +22,7 @@ try { // set table name and tags $stmt->setTableNameTags('d1001', [ - // 支持格式同参数绑定 + // same format as parameter binding [TDengine\TSDB_DATA_TYPE_BINARY, 'California.SanFrancisco'], [TDengine\TSDB_DATA_TYPE_INT, 2], ]); From 858868d76a3f312e7d648b128957fd604d2f16f1 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Tue, 7 Jun 2022 09:49:03 +0800 Subject: [PATCH 06/14] refactor(stream): distributed execution --- examples/c/stream_demo.c | 4 ++-- include/libs/stream/tstream.h | 2 ++ source/dnode/vnode/src/tq/tq.c | 18 ++++++++++++++---- source/libs/stream/src/stream.c | 12 +++++++++--- source/libs/stream/src/streamExec.c | 1 + source/libs/stream/src/streamMsg.c | 28 ++++++++++++++++++++++------ source/libs/stream/src/streamSink.c | 13 +++++++++---- 7 files changed, 59 insertions(+), 19 deletions(-) diff --git a/examples/c/stream_demo.c b/examples/c/stream_demo.c index 97ff2886fc..943fcbdb53 100644 --- a/examples/c/stream_demo.c +++ b/examples/c/stream_demo.c @@ -25,7 +25,7 @@ int32_t init_env() { return -1; } - TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1"); + TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 2"); if (taos_errno(pRes) != 0) { printf("error in create db, reason:%s\n", taos_errstr(pRes)); return -1; @@ -82,7 +82,7 @@ int32_t create_stream() { /*const char* sql = "select sum(k) from tu1 interval(10m)";*/ /*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/ pRes = taos_query( - pConn, "create stream stream1 trigger at_once into outstb as select _wstartts, sum(k) from tu1 interval(10m)"); + pConn, "create stream stream1 trigger at_once into outstb as select _wstartts, sum(k) from st1 interval(10m)"); if (taos_errno(pRes) != 0) { printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes)); return -1; diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 82674c6115..db8d3ac033 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -398,6 +398,8 @@ typedef struct { int8_t inputStatus; } SStreamTaskRecoverRsp; +int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq); + int32_t streamTriggerByWrite(SStreamTask* pTask, int32_t vgId, SMsgCb* pMsgCb); int32_t streamTaskRun(SStreamTask* pTask); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 3de5109a1a..2b5e18c1db 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -421,10 +421,20 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) { } int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg) { - SStreamDispatchReq* pReq = pMsg->pCont; - int32_t taskId = pReq->taskId; - SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); - streamProcessDispatchReq(pTask, &pTq->pVnode->msgCb, pReq, pMsg); + char* msgStr = pMsg->pCont; + char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead)); + int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); + SStreamDispatchReq req; + SDecoder decoder; + tDecoderInit(&decoder, msgBody, msgLen); + tDecodeStreamDispatchReq(&decoder, &req); + int32_t taskId = req.taskId; + SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); + SRpcMsg rsp = { + .info = pMsg->info, + .code = 0, + }; + streamProcessDispatchReq(pTask, &pTq->pVnode->msgCb, &req, &rsp); return 0; } diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index d89f2ed57d..99c61e1479 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -57,12 +57,14 @@ int32_t streamTaskEnqueue(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* } // rsp by input status - SStreamDispatchRsp* pCont = rpcMallocCont(sizeof(SStreamDispatchRsp)); + void* buf = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamDispatchRsp)); + ((SMsgHead*)buf)->vgId = htonl(pReq->sourceVg); + SStreamDispatchRsp* pCont = POINTER_SHIFT(buf, sizeof(SMsgHead)); pCont->inputStatus = status; pCont->streamId = pReq->streamId; pCont->taskId = pReq->sourceTaskId; - pRsp->pCont = pCont; - pRsp->contLen = sizeof(SStreamDispatchRsp); + pRsp->pCont = buf; + pRsp->contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp); tmsgSendRsp(pRsp); return status == TASK_INPUT_STATUS__NORMAL ? 0 : -1; } @@ -87,8 +89,12 @@ int32_t streamProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDisp } int32_t streamProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchRsp* pRsp) { + ASSERT(pRsp->inputStatus == TASK_OUTPUT_STATUS__NORMAL || pRsp->inputStatus == TASK_OUTPUT_STATUS__BLOCKED); + int8_t old = atomic_exchange_8(&pTask->outputStatus, pRsp->inputStatus); + ASSERT(old == TASK_OUTPUT_STATUS__WAIT); if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) { // TODO: init recover timer + return 0; } // continue dispatch streamSink1(pTask, pMsgCb); diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 4b88cf503e..72df516e0d 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -43,6 +43,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes) if (output == NULL) break; // TODO: do we need free memory? SSDataBlock* outputCopy = createOneDataBlock(output, true); + outputCopy->info.childId = pTask->childId; taosArrayPush(pRes, outputCopy); } return 0; diff --git a/source/libs/stream/src/streamMsg.c b/source/libs/stream/src/streamMsg.c index 9f22bbbe8a..769d672042 100644 --- a/source/libs/stream/src/streamMsg.c +++ b/source/libs/stream/src/streamMsg.c @@ -13,7 +13,7 @@ * along with this program. If not, see . */ -#include "tstream.h" +#include "streamInc.h" int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) { if (tStartEncode(pEncoder) < 0) return -1; @@ -147,13 +147,13 @@ int32_t streamBuildDispatchMsg(SStreamTask* pTask, SStreamDataBlock* data, SRpcM int32_t tlen; tEncodeSize(tEncodeStreamDispatchReq, &req, tlen, code); if (code < 0) goto FAIL; + code = -1; buf = rpcMallocCont(sizeof(SMsgHead) + tlen); if (buf == NULL) { - code = -1; goto FAIL; } - ((SMsgHead*)buf)->vgId = htonl(pTask->fixedEpDispatcher.nodeId); + ((SMsgHead*)buf)->vgId = htonl(vgId); void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); SEncoder encoder; @@ -165,16 +165,24 @@ int32_t streamBuildDispatchMsg(SStreamTask* pTask, SStreamDataBlock* data, SRpcM pMsg->contLen = tlen + sizeof(SMsgHead); pMsg->pCont = buf; + pMsg->msgType = pTask->dispatchMsgType; code = 0; FAIL: - if (buf) taosMemoryFree(buf); + if (code < 0 && buf) rpcFreeCont(buf); if (req.data) taosArrayDestroyP(req.data, (FDelete)taosMemoryFree); if (req.dataLen) taosArrayDestroy(req.dataLen); return code; } int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDataBlock* data) { +#if 0 + int8_t old = + atomic_val_compare_exchange_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL, TASK_OUTPUT_STATUS__WAIT); + if (old != TASK_OUTPUT_STATUS__NORMAL) { + return 0; + } +#endif if (pTask->dispatchType == TASK_DISPATCH__INPLACE) { SRpcMsg dispatchMsg = {0}; if (streamBuildDispatchMsg(pTask, data, &dispatchMsg, NULL) < 0) { @@ -201,12 +209,19 @@ int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDataBlock* dat tmsgSendReq(pEpSet, &dispatchMsg); } else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) { - // TODO - ASSERT(0); + SRpcMsg dispatchMsg = {0}; + SEpSet* pEpSet = NULL; + if (streamBuildDispatchMsg(pTask, data, &dispatchMsg, &pEpSet) < 0) { + ASSERT(0); + return -1; + } + + tmsgSendReq(pEpSet, &dispatchMsg); } return 0; } +#if 0 static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) { SStreamTaskExecReq req = { .streamId = pTask->streamId, @@ -287,3 +302,4 @@ static int32_t streamShuffleDispatch(SStreamTask* pTask, SMsgCb* pMsgCb, SHashOb } return 0; } +#endif diff --git a/source/libs/stream/src/streamSink.c b/source/libs/stream/src/streamSink.c index 6fd0a00517..35bebe0e63 100644 --- a/source/libs/stream/src/streamSink.c +++ b/source/libs/stream/src/streamSink.c @@ -13,8 +13,7 @@ * along with this program. If not, see . */ -#include "executor.h" -#include "tstream.h" +#include "streamInc.h" int32_t streamSink1(SStreamTask* pTask, SMsgCb* pMsgCb) { SStreamQueue* queue; @@ -23,12 +22,13 @@ int32_t streamSink1(SStreamTask* pTask, SMsgCb* pMsgCb) { } else { queue = pTask->outputQueue; } + /*if (streamDequeueBegin(queue) == true) {*/ /*return -1;*/ /*}*/ - if (pTask->sinkType == TASK_SINK__TABLE || pTask->sinkType == TASK_SINK__SMA) { - ASSERT(pTask->dispatchType == TASK_DISPATCH__NONE); + if (pTask->sinkType == TASK_SINK__TABLE || pTask->sinkType == TASK_SINK__SMA || + pTask->dispatchType != TASK_DISPATCH__NONE) { while (1) { SStreamDataBlock* pBlock = streamQueueNextItem(queue); if (pBlock == NULL) break; @@ -36,13 +36,18 @@ int32_t streamSink1(SStreamTask* pTask, SMsgCb* pMsgCb) { // local sink if (pTask->sinkType == TASK_SINK__TABLE) { + ASSERT(pTask->dispatchType == TASK_DISPATCH__NONE); pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, 0, pBlock->blocks); } else if (pTask->sinkType == TASK_SINK__SMA) { + ASSERT(pTask->dispatchType == TASK_DISPATCH__NONE); pTask->smaSink.smaSink(pTask->ahandle, pTask->smaSink.smaId, pBlock->blocks); } + // TODO: sink and dispatch should be only one if (pTask->dispatchType != TASK_DISPATCH__NONE) { ASSERT(queue == pTask->outputQueue); + ASSERT(pTask->sinkType == TASK_SINK__NONE); + streamDispatch(pTask, pMsgCb, pBlock); } From 1a84d071885117b9afc59f99516b49c6d970a4df Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Tue, 7 Jun 2022 11:36:40 +0800 Subject: [PATCH 07/14] fix(stream): msg deserialize --- source/common/src/tdatablock.c | 41 ++++++++++++++++------------- source/libs/stream/inc/streamInc.h | 2 ++ source/libs/stream/src/stream.c | 16 +++++------ source/libs/stream/src/streamData.c | 23 ++++++++++++++++ source/libs/stream/src/streamMsg.c | 1 + 5 files changed, 56 insertions(+), 27 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index c615af705a..b5d278b563 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -99,7 +99,7 @@ void colDataTrim(SColumnInfoData* pColumnInfoData) { // TODO } -int32_t getJsonValueLen(const char *data) { +int32_t getJsonValueLen(const char* data) { int32_t dataLen = 0; if (*data == TSDB_DATA_TYPE_NULL) { dataLen = CHAR_BYTES; @@ -109,7 +109,7 @@ int32_t getJsonValueLen(const char *data) { dataLen = DOUBLE_BYTES + CHAR_BYTES; } else if (*data == TSDB_DATA_TYPE_BOOL) { dataLen = CHAR_BYTES + CHAR_BYTES; - } else if (*data & TD_TAG_JSON) { // json string + } else if (*data & TD_TAG_JSON) { // json string dataLen = ((STag*)(data))->len; } else { ASSERT(0); @@ -137,7 +137,7 @@ int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, con int32_t dataLen = 0; if (type == TSDB_DATA_TYPE_JSON) { dataLen = getJsonValueLen(pData); - }else { + } else { dataLen = varDataTLen(pData); } @@ -1283,7 +1283,7 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) { if (n % 8 == 0) { memmove(nullBitmap, nullBitmap + n / 8, newLen); } else { - int32_t tail = n % 8; + int32_t tail = n % 8; int32_t i = 0; uint8_t* p = (uint8_t*)nullBitmap; @@ -1301,7 +1301,7 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) { } } else if (n > 8) { int32_t gap = len - newLen; - while(i < newLen) { + while (i < newLen) { uint8_t v = p[i + gap]; p[i] = (v << tail); @@ -1316,7 +1316,6 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) { } } - static void colDataTrimFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) { if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n) * sizeof(int32_t)); @@ -1544,7 +1543,8 @@ void blockDebugShowData(const SArray* dataBlocks, const char* flag) { * * TODO: colId should be set */ -int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, tb_uid_t suid) { +int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, + tb_uid_t suid) { int32_t sz = taosArrayGetSize(pDataBlocks); int32_t bufSize = sizeof(SSubmitReq); for (int32_t i = 0; i < sz; ++i) { @@ -1585,11 +1585,11 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks int32_t dataLen = 0; for (int32_t j = 0; j < rows; ++j) { // iterate by row tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen)); // set row buf - bool isStartKey = false; + bool isStartKey = false; int32_t offset = 0; for (int32_t k = 0; k < colNum; ++k) { // iterate by column SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); - STColumn* pCol = &pTSchema->columns[k]; + STColumn* pCol = &pTSchema->columns[k]; ASSERT(pCol->type == pColInfoData->info.type); void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes); switch (pColInfoData->info.type) { @@ -1600,15 +1600,18 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks offset, k); } else { - tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true, offset, k); + tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, + true, offset, k); } break; case TSDB_DATA_TYPE_NCHAR: { - tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, TSDB_DATA_TYPE_NCHAR, TD_VTYPE_NORM, var, true, offset, k); + tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, TSDB_DATA_TYPE_NCHAR, TD_VTYPE_NORM, var, true, + offset, k); break; } case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY - tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, TSDB_DATA_TYPE_VARCHAR, TD_VTYPE_NORM, var, true, offset, k); + tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, TSDB_DATA_TYPE_VARCHAR, TD_VTYPE_NORM, var, true, + offset, k); break; } case TSDB_DATA_TYPE_VARBINARY: @@ -1620,7 +1623,8 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks break; default: if (pColInfoData->info.type < TSDB_DATA_TYPE_MAX && pColInfoData->info.type > TSDB_DATA_TYPE_NULL) { - tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, pColInfoData->info.type, TD_VTYPE_NORM, var, true, offset, k); + tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, pColInfoData->info.type, TD_VTYPE_NORM, var, + true, offset, k); } else { uError("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type); TASSERT(0); @@ -1667,7 +1671,7 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo const char* stbFullName, int32_t vgId) { SSubmitReq* ret = NULL; SArray* tagArray = taosArrayInit(1, sizeof(STagVal)); - if(!tagArray) { + if (!tagArray) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } @@ -1692,8 +1696,6 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo createTbReq.type = TSDB_CHILD_TABLE; createTbReq.ctb.suid = suid; - - STagVal tagVal = {.cid = 1, .type = TSDB_DATA_TYPE_UBIGINT, .pData = (uint8_t*)&pDataBlock->info.groupId, @@ -1831,11 +1833,12 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo } ret->length = htonl(ret->length); - taosArrayDestroy(tagArray); + taosArrayDestroy(tagArray); return ret; } -void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress) { +void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, + int8_t needCompress) { int32_t* actualLen = (int32_t*)data; data += sizeof(int32_t); @@ -1929,4 +1932,4 @@ const char* blockCompressDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t ASSERT(pStart - pData == dataLen); return pStart; -} \ No newline at end of file +} diff --git a/source/libs/stream/inc/streamInc.h b/source/libs/stream/inc/streamInc.h index b5f7362689..48c43b0775 100644 --- a/source/libs/stream/inc/streamInc.h +++ b/source/libs/stream/inc/streamInc.h @@ -27,6 +27,8 @@ int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb); int32_t streamSink1(SStreamTask* pTask, SMsgCb* pMsgCb); int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDataBlock* data); +int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData); + #ifdef __cplusplus } #endif diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 99c61e1479..91ede155bb 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -35,18 +35,19 @@ int32_t streamTriggerByWrite(SStreamTask* pTask, int32_t vgId, SMsgCb* pMsgCb) { return 0; } -#if 1 int32_t streamTaskEnqueue(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp) { - SStreamDataBlock* pBlock = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM); + SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM); int8_t status; // enqueue - if (pBlock != NULL) { - pBlock->type = STREAM_DATA_TYPE_SSDATA_BLOCK; - pBlock->sourceVg = pReq->sourceVg; - pBlock->blocks = pReq->data; + if (pData != NULL) { + pData->type = STREAM_DATA_TYPE_SSDATA_BLOCK; + pData->sourceVg = pReq->sourceVg; + // decode + /*pData->blocks = pReq->data;*/ /*pBlock->sourceVer = pReq->sourceVer;*/ - if (streamTaskInput(pTask, (SStreamQueueItem*)pBlock) == 0) { + streamDispatchReqToData(pReq, pData); + if (streamTaskInput(pTask, (SStreamQueueItem*)pData) == 0) { status = TASK_INPUT_STATUS__NORMAL; } else { status = TASK_INPUT_STATUS__FAILED; @@ -68,7 +69,6 @@ int32_t streamTaskEnqueue(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* tmsgSendRsp(pRsp); return status == TASK_INPUT_STATUS__NORMAL ? 0 : -1; } -#endif int32_t streamProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchReq* pReq, SRpcMsg* pRsp) { // 1. handle input diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index 95c0290058..7139e77407 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -36,6 +36,29 @@ void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput) { } #endif +int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData) { + int32_t blockNum = pReq->blockNum; + SArray* pArray = taosArrayInit(blockNum, sizeof(SSDataBlock)); + if (pArray == NULL) { + return -1; + } + taosArraySetSize(pArray, blockNum); + + ASSERT(pReq->blockNum == taosArrayGetSize(pReq->data)); + ASSERT(pReq->blockNum == taosArrayGetSize(pReq->dataLen)); + + for (int32_t i = 0; i < blockNum; i++) { + int32_t len = *(int32_t*)taosArrayGet(pReq->dataLen, i); + SRetrieveTableRsp* pRetrieve = taosArrayGetP(pReq->data, i); + SSDataBlock* pDataBlock = taosArrayGet(pArray, i); + blockCompressDecode(pDataBlock, htonl(pRetrieve->numOfCols), htonl(pRetrieve->numOfRows), pRetrieve->data); + // TODO: refactor + pDataBlock->info.childId = pReq->sourceChildId; + } + pData->blocks = pArray; + return 0; +} + SStreamDataSubmit* streamDataSubmitNew(SSubmitReq* pReq) { SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM); if (pDataSubmit == NULL) return NULL; diff --git a/source/libs/stream/src/streamMsg.c b/source/libs/stream/src/streamMsg.c index 769d672042..e5f953b7cc 100644 --- a/source/libs/stream/src/streamMsg.c +++ b/source/libs/stream/src/streamMsg.c @@ -71,6 +71,7 @@ static int32_t streamAddBlockToDispatchMsg(const SSDataBlock* pBlock, SStreamDis pRetrieve->compressed = 0; pRetrieve->completed = 1; pRetrieve->numOfRows = htonl(pBlock->info.rows); + pRetrieve->numOfCols = htonl(pBlock->info.numOfCols); int32_t actualLen = 0; blockCompressEncode(pBlock, pRetrieve->data, &actualLen, pBlock->info.numOfCols, false); From a60ba4feb9d1834947654eb517d4e25f308ebb68 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Tue, 7 Jun 2022 11:32:51 +0800 Subject: [PATCH 08/14] docs: PHP connector English version in 2.4 --- docs-cn/07-develop/01-connect/index.md | 2 +- docs-cn/14-reference/03-connector/php.mdx | 2 +- docs-en/07-develop/01-connect/index.md | 43 +++++- docs-en/14-reference/03-connector/cpp.mdx | 2 +- docs-en/14-reference/03-connector/php.mdx | 152 ++++++++++++++++++++++ 5 files changed, 197 insertions(+), 4 deletions(-) create mode 100644 docs-en/14-reference/03-connector/php.mdx diff --git a/docs-cn/07-develop/01-connect/index.md b/docs-cn/07-develop/01-connect/index.md index 4abceb884f..b1857b9739 100644 --- a/docs-cn/07-develop/01-connect/index.md +++ b/docs-cn/07-develop/01-connect/index.md @@ -212,7 +212,7 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive && tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 ``` -> 版本 `v1.0.2` 可替换为任意更新的版本,可在 Release 中查看最新版本。 +> 版本 `v1.0.2` 只是示例,可替换为任意更新的版本,可在 [TDengine PHP Connector 发布历史](https://github.com/Yurunsoft/php-tdengine/releases) 中查看可用版本。 **非 Swoole 环境:** diff --git a/docs-cn/14-reference/03-connector/php.mdx b/docs-cn/14-reference/03-connector/php.mdx index 8ee9fba2af..221c6cc050 100644 --- a/docs-cn/14-reference/03-connector/php.mdx +++ b/docs-cn/14-reference/03-connector/php.mdx @@ -6,7 +6,7 @@ title: PHP Connector `php-tdengine` 是由社区贡献的 PHP 连接器扩展,还特别支持了 Swoole 协程化。 -PHP 连接器依赖 TDengine 客户端驱动,其安装方式与 C/C++ 驱动相同。 +PHP 连接器依赖 TDengine 客户端驱动。 项目地址: diff --git a/docs-en/07-develop/01-connect/index.md b/docs-en/07-develop/01-connect/index.md index b9217b828d..76525f7197 100644 --- a/docs-en/07-develop/01-connect/index.md +++ b/docs-en/07-develop/01-connect/index.md @@ -19,7 +19,7 @@ import InstallOnLinux from "../../14-reference/03-connector/\_windows_install.md import VerifyLinux from "../../14-reference/03-connector/\_verify_linux.mdx"; import VerifyWindows from "../../14-reference/03-connector/\_verify_windows.mdx"; -Any application programs running on any kind of platform can access TDengine through the REST API provided by TDengine. For details, please refer to [REST API](/reference/rest-api/). Additionally, application programs can use the connectors of multiple programming languages including C/C++, Java, Python, Go, Node.js, C#, and Rust to access TDengine. This chapter describes how to establish a connection to TDengine and briefly introduces how to install and use connectors. For details about the connectors, please refer to [Connectors](/reference/connector/) +Any application programs running on any kind of platform can access TDengine through the REST API provided by TDengine. For details, please refer to [REST API](/reference/rest-api/). Additionally, application programs can use the connectors of multiple programming languages including C/C++, Java, Python, Go, Node.js, C#, Rust to access TDengine. This chapter describes how to establish a connection to TDengine and briefly introduces how to install and use connectors. TDengine community also provides connectors in LUA and PHP languages. For details about the connectors, please refer to [Connectors](/reference/connector/). ## Establish Connection @@ -200,6 +200,47 @@ install.packages("RJDBC") If the client driver (taosc) is already installed, then the C connector is already available.
+ + + + +**Download Source Code Package and Unzip:** + +```shell +curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \ +&& mkdir php-tdengine \ +&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 +``` + +> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases). + +**Non-Swoole Environment:** + +```shell +phpize && ./configure && make -j && make install +``` + +**Specify TDengine Location:** + +```shell +phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install +``` + +> `--with-tdengine-dir=` is followed by the TDengine installation location. +> This way is useful in case TDengine location can't be found automatically or macOS. + +**Swoole Environment:** + +```shell +phpize && ./configure --enable-swoole && make -j && make install +``` + +**Enable The Extension:** + +Option One: Add `extension=tdengine` in `php.ini` + +Option Two: Specify the extension on CLI `php -d extension=tdengine test.php` + diff --git a/docs-en/14-reference/03-connector/cpp.mdx b/docs-en/14-reference/03-connector/cpp.mdx index d549413012..e0cdf2bf2c 100644 --- a/docs-en/14-reference/03-connector/cpp.mdx +++ b/docs-en/14-reference/03-connector/cpp.mdx @@ -26,7 +26,7 @@ Please refer to [list of supported platforms](/reference/connector#supported-pla ## Supported versions -The version number of the TDengine client driver and the version number of the TDengine server should be the same. A lower version of the client driver is compatible with a higher version of the server, if the first three version numbers are the same (i.e., only the fourth version number is different). For e.g. if the client version is x.y.z.1 and the server version is x.y.z.2 the client and server are compatible. But in general we do not recommend using a lower client version with a newer server version. It is also strongly discouraged to use a higher version of the client driver to access a lower version of the TDengine server. +The version number of the TDengine client driver and the version number of the TDengine server should be same. A lower version of the client driver is compatible with a higher version of the server, if the first three version numbers are the same (i.e., only the fourth version number is different). For e.g. if the client version is x.y.z.1 and the server version is x.y.z.2 the client and server are compatible. But in general we do not recommend using a lower client version with a newer server version. It is also strongly discouraged to use a higher version of the client driver to access a lower version of the TDengine server. ## Installation steps diff --git a/docs-en/14-reference/03-connector/php.mdx b/docs-en/14-reference/03-connector/php.mdx new file mode 100644 index 0000000000..0ea56b5209 --- /dev/null +++ b/docs-en/14-reference/03-connector/php.mdx @@ -0,0 +1,152 @@ +--- +sidebar_position: 1 +sidebar_label: PHP +title: PHP Connector +--- + +`php-tdengine` is the TDengine PHP connector provided by TDengine community. In particular, it supports Swoole coroutine. + +PHP Connector relies on TDengine client driver. + +Project Repository: + +After TDengine client or server is installed, `taos.h` is located at: + +- Linux:`/usr/local/taos/include` +- Windows:`C:\TDengine\include` + +TDengine client driver is located at: + +- Linux: `/usr/local/taos/driver/libtaos.so` +- Windows: `C:\TDengine\taos.dll` + +## Supported Platforms + +- Windows、Linux、MacOS + +- PHP >= 7.4 + +- TDengine >= 2.0 + +- Swoole >= 4.8 (Optional) + +## Supported Versions + +Because the version of TDengine client driver is tightly associated with that of TDengine server, it's strongly suggested to use the client driver of same version as TDengine server, even though the client driver can work with TDengine server if the first 3 sections of the versions are same. + +## Installation + +### Install TDengine Client Driver + +Regarding how to install TDengine client driver please refer to [Install Client Driver](/reference/connector#installation-steps) + +### Install php-tdengine + +**Download Source Code Package and Unzip:** + +```shell +curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \ +&& mkdir php-tdengine \ +&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 +``` + +> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases). + +**Non-Swoole Environment:** + +```shell +phpize && ./configure && make -j && make install +``` + +**Specify TDengine location:** + +```shell +phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install +``` + +> `--with-tdengine-dir=` is followed by TDengine location. +> It's useful in case TDengine installatio location can't be found automatically or MacOS. + +**Swoole Environment:** + +```shell +phpize && ./configure --enable-swoole && make -j && make install +``` + +**Enable Extension:** + +Option One: Add `extension=tdengine` in `php.ini`. + +Option Two: Use CLI `php -dextension=tdengine test.php`. + +## Sample Programs + +In this section a few sample programs which use TDengine PHP connector to access TDengine cluster are demonstrated. + +> Any error would throw exception: `TDengine\Exception\TDengineException` + +### Establish Conection + +
+Establish Connection + +```c +{{#include docs-examples/php/connect.php}} +``` + +
+ +### Insert Data + +
+Insert Data + +```c +{{#include docs-examples/php/insert.php}} +``` + +
+ +### Synchronous Query + +
+Synchronous Query + +```c +{{#include docs-examples/php/query.php}} +``` + +
+ +### Parameter Binding + +
+Parameter Binding + +```c +{{#include docs-examples/php/insert_stmt.php}} +``` + +
+ +## Constants + +### Types + +| Type in PHP Connector | Description | +| ----------------------------------- | ----------- | +| `TDengine\TSDB_DATA_TYPE_NULL` | null | +| `TDengine\TSDB_DATA_TYPE_BOOL` | bool | +| `TDengine\TSDB_DATA_TYPE_TINYINT` | tinyint | +| `TDengine\TSDB_DATA_TYPE_SMALLINT` | smallint | +| `TDengine\TSDB_DATA_TYPE_INT` | int | +| `TDengine\TSDB_DATA_TYPE_BIGINT` | bigint | +| `TDengine\TSDB_DATA_TYPE_FLOAT` | float | +| `TDengine\TSDB_DATA_TYPE_DOUBLE` | double | +| `TDengine\TSDB_DATA_TYPE_BINARY` | binary | +| `TDengine\TSDB_DATA_TYPE_TIMESTAMP` | timestamp | +| `TDengine\TSDB_DATA_TYPE_NCHAR` | nchar | +| `TDengine\TSDB_DATA_TYPE_UTINYINT` | utinyint | +| `TDengine\TSDB_DATA_TYPE_USMALLINT` | usmallint | +| `TDengine\TSDB_DATA_TYPE_UINT` | uint | +| `TDengine\TSDB_DATA_TYPE_UBIGINT` | ubigint | From d9a4c582c6aa579a54d9889f7edeb83814752717 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Tue, 7 Jun 2022 11:41:14 +0800 Subject: [PATCH 09/14] docs: correct a few terms --- docs-cn/14-reference/03-connector/php.mdx | 4 +--- docs-en/14-reference/03-connector/php.mdx | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/docs-cn/14-reference/03-connector/php.mdx b/docs-cn/14-reference/03-connector/php.mdx index 221c6cc050..90fda04632 100644 --- a/docs-cn/14-reference/03-connector/php.mdx +++ b/docs-cn/14-reference/03-connector/php.mdx @@ -131,9 +131,7 @@ phpize && ./configure --enable-swoole && make -j && make install ## 常量 -### 字段类型 - -| 参数名称 | 说明 | +| 常量 | 说明 | | ------------ | ------------ | `TDengine\TSDB_DATA_TYPE_NULL` | null | | `TDengine\TSDB_DATA_TYPE_BOOL` | bool | diff --git a/docs-en/14-reference/03-connector/php.mdx b/docs-en/14-reference/03-connector/php.mdx index 0ea56b5209..839a5c8c3c 100644 --- a/docs-en/14-reference/03-connector/php.mdx +++ b/docs-en/14-reference/03-connector/php.mdx @@ -131,9 +131,7 @@ In this section a few sample programs which use TDengine PHP connector to access ## Constants -### Types - -| Type in PHP Connector | Description | +| Constant | Description | | ----------------------------------- | ----------- | | `TDengine\TSDB_DATA_TYPE_NULL` | null | | `TDengine\TSDB_DATA_TYPE_BOOL` | bool | From d4a214eb63cefea83e8525931a2f1a9311288fd4 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Tue, 7 Jun 2022 11:53:31 +0800 Subject: [PATCH 10/14] docs: correct PHP connector release address --- docs-cn/14-reference/03-connector/php.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs-cn/14-reference/03-connector/php.mdx b/docs-cn/14-reference/03-connector/php.mdx index 90fda04632..f150aed4c8 100644 --- a/docs-cn/14-reference/03-connector/php.mdx +++ b/docs-cn/14-reference/03-connector/php.mdx @@ -50,7 +50,7 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive && tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 ``` -> 版本 `v1.0.2` 可替换为任意更新的版本,可在 Release 中查看最新版本。 +> 版本 `v1.0.2` 可替换为任意更新的版本,可在 [TDengine PHP Connector 发布历史](https://github.com/Yurunsoft/php-tdengine/releases)。 **非 Swoole 环境:** From bd944176415e24e7ff776ce8c288a1a0e72ecb28 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Tue, 7 Jun 2022 12:57:13 +0800 Subject: [PATCH 11/14] docs: fix syntax error --- docs-en/07-develop/01-connect/index.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs-en/07-develop/01-connect/index.md b/docs-en/07-develop/01-connect/index.md index 76525f7197..720f8e2384 100644 --- a/docs-en/07-develop/01-connect/index.md +++ b/docs-en/07-develop/01-connect/index.md @@ -200,7 +200,6 @@ install.packages("RJDBC") If the client driver (taosc) is already installed, then the C connector is already available.
- From 343532b6c39a887bf40b86a7cf1a935a4ef87161 Mon Sep 17 00:00:00 2001 From: huolibo Date: Tue, 7 Jun 2022 11:35:53 +0800 Subject: [PATCH 12/14] docs: modify jdbc clone address --- docs-cn/14-reference/03-connector/java.mdx | 7 ++-- docs-en/14-reference/03-connector/java.mdx | 43 +++++++++++----------- 2 files changed, 26 insertions(+), 24 deletions(-) diff --git a/docs-cn/14-reference/03-connector/java.mdx b/docs-cn/14-reference/03-connector/java.mdx index 2677571606..839054ef15 100644 --- a/docs-cn/14-reference/03-connector/java.mdx +++ b/docs-cn/14-reference/03-connector/java.mdx @@ -93,8 +93,8 @@ Maven 项目中,在 pom.xml 中添加以下依赖: 可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector ```shell -git clone https://github.com/taosdata/TDengine.git -cd TDengine/src/connector/jdbc +git clone https://github.com/taosdata/taos-connector-jdbc.git +cd taos-connector-jdbc mvn clean install -Dmaven.test.skip=true ``` @@ -199,6 +199,7 @@ url 中的配置参数如下: - user:登录 TDengine 用户名,默认值 'root'。 - password:用户登录密码,默认值 'taosdata'。 - batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。从 taos-jdbcdriver-2.0.38 和 TDengine 2.4.0.12 版本开始,JDBC REST 连接增加批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。 +- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。默认为 UTF-8。 - batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。 **注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。 @@ -348,7 +349,7 @@ JDBC 连接器可能报错的错误码包括 3 种:JDBC driver 本身的报错 具体的错误码请参考: -- [TDengine Java Connector](https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) +- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) - [TDengine_ERROR_CODE](https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h) ### 通过参数绑定写入数据 diff --git a/docs-en/14-reference/03-connector/java.mdx b/docs-en/14-reference/03-connector/java.mdx index 33d715c2e2..88e42f4b80 100644 --- a/docs-en/14-reference/03-connector/java.mdx +++ b/docs-en/14-reference/03-connector/java.mdx @@ -42,18 +42,18 @@ Please refer to [Version Support List](/reference/connector#version-support). TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Java is as follows: | TDengine DataType | JDBCType (driver version < 2.0.24) | JDBCType (driver version > = 2.0.24) | -| ----------------- | --------------------------------- | ---------------------------------- | -| TIMESTAMP | java.lang.Long | java.sql.Timestamp | -| INT | java.lang.Integer | java.lang.Integer | -| BIGINT | java.lang.Long | java.lang.Long | -| FLOAT | java.lang.Float | java.lang.Float | -| DOUBLE | java.lang.Double | java.lang.Double | -| SMALLINT | java.lang.Short | java.lang.Short | -| TINYINT | java.lang.Byte | java.lang.Byte | -| BOOL | java.lang.Boolean | java.lang.Boolean | -| BINARY | java.lang.String | byte array | -| NCHAR | java.lang.String | java.lang.String | -| JSON | - | java.lang.String | +| ----------------- | ---------------------------------- | ------------------------------------ | +| TIMESTAMP | java.lang.Long | java.sql.Timestamp | +| INT | java.lang.Integer | java.lang.Integer | +| BIGINT | java.lang.Long | java.lang.Long | +| FLOAT | java.lang.Float | java.lang.Float | +| DOUBLE | java.lang.Double | java.lang.Double | +| SMALLINT | java.lang.Short | java.lang.Short | +| TINYINT | java.lang.Byte | java.lang.Byte | +| BOOL | java.lang.Boolean | java.lang.Boolean | +| BINARY | java.lang.String | byte array | +| NCHAR | java.lang.String | java.lang.String | +| JSON | - | java.lang.String | **Note**: Only TAG supports JSON types @@ -91,8 +91,8 @@ Add following dependency in the `pom.xml` file of your Maven project: You can build Java connector from source code after cloning the TDengine project: ```shell -git clone https://github.com/taosdata/TDengine.git -cd TDengine/src/connector/jdbc +git clone https://github.com/taosdata/taos-connector-jdbc.git +cd taos-connector-jdbc mvn clean install -Dmaven.test.skip=true ``` @@ -197,6 +197,7 @@ The configuration parameters in the URL are as follows. - user: Login TDengine user name, default value 'root'. - password: user login password, default value 'taosdata'. - batchfetch: true: pull the result set in batch when executing the query; false: pull the result set row by row. The default value is false. batchfetch uses HTTP for data transfer. The JDBC REST connection supports bulk data pulling function in taos-jdbcdriver-2.0.38 and TDengine 2.4.0.12 and later versions. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance. +- charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true. Default value is UTF-8. - batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false. **Note**: Some configuration items (e.g., locale, timezone) do not work in the REST connection. @@ -261,7 +262,7 @@ The configuration parameters in properties are as follows. - TSDBDriver.PROPERTY_KEY_CHARSET: takes effect only when using JDBC native connection. In the character set used by the client, the default value is the system character set. - TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale. - TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone. -For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only). + For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only). ### Priority of configuration parameters @@ -350,7 +351,7 @@ There are three types of error codes that the JDBC connector can report: For specific error codes, please refer to. -- [TDengine Java Connector](https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) +- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) - [TDengine_ERROR_CODE](https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h) ### Writing data via parameter binding @@ -808,11 +809,11 @@ Please refer to: [JDBC example](https://github.com/taosdata/TDengine/tree/develo ## Recent update logs -| taos-jdbcdriver version | major changes | -| :------------------: | :----------------------------: | -| 2.0.38 | JDBC REST connections add bulk pull function | -| 2.0.37 | Added support for json tags | -| 2.0.36 | Add support for schemaless writing | +| taos-jdbcdriver version | major changes | +| :---------------------: | :------------------------------------------: | +| 2.0.38 | JDBC REST connections add bulk pull function | +| 2.0.37 | Added support for json tags | +| 2.0.36 | Add support for schemaless writing | ## Frequently Asked Questions From 4831493fc5440f92fa3dbd94e0e2b8f2afb19003 Mon Sep 17 00:00:00 2001 From: huolibo Date: Tue, 7 Jun 2022 11:44:46 +0800 Subject: [PATCH 13/14] docs: fix description --- docs-cn/14-reference/03-connector/java.mdx | 2 +- docs-en/14-reference/03-connector/java.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs-cn/14-reference/03-connector/java.mdx b/docs-cn/14-reference/03-connector/java.mdx index 839054ef15..e4e68131c5 100644 --- a/docs-cn/14-reference/03-connector/java.mdx +++ b/docs-cn/14-reference/03-connector/java.mdx @@ -261,7 +261,7 @@ properties 中的配置参数如下: - TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。 - TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 sq 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。 - TSDBDriver.PROPERTY_KEY_CONFIG_DIR:仅在使用 JDBC 原生连接时生效。客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。 -- TSDBDriver.PROPERTY_KEY_CHARSET:仅在使用 JDBC 原生连接时生效。 客户端使用的字符集,默认值为系统字符集。 +- TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。 - TSDBDriver.PROPERTY_KEY_LOCALE:仅在使用 JDBC 原生连接时生效。 客户端语言环境,默认值系统当前 locale。 - TSDBDriver.PROPERTY_KEY_TIME_ZONE:仅在使用 JDBC 原生连接时生效。 客户端使用的时区,默认值为系统当前时区。 - 此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。更多详细配置请参考[客户端配置](/reference/config/#仅客户端适用)。 diff --git a/docs-en/14-reference/03-connector/java.mdx b/docs-en/14-reference/03-connector/java.mdx index 88e42f4b80..9d2c5cd327 100644 --- a/docs-en/14-reference/03-connector/java.mdx +++ b/docs-en/14-reference/03-connector/java.mdx @@ -259,7 +259,7 @@ The configuration parameters in properties are as follows. - TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true: pull the result set in batch when executing query; false: pull the result set row by row. The default value is: false. - TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true: when executing executeBatch of Statement, if there is a SQL execution failure in the middle, continue to execute the following sq. false: no longer execute any statement after the failed SQL. The default value is: false. - TSDBDriver.PROPERTY_KEY_CONFIG_DIR: Only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS. -- TSDBDriver.PROPERTY_KEY_CHARSET: takes effect only when using JDBC native connection. In the character set used by the client, the default value is the system character set. +- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set. - TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale. - TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone. For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only). From 6dc497609b3706d270433962be9a6f6e7eb6bfc4 Mon Sep 17 00:00:00 2001 From: huolibo Date: Tue, 7 Jun 2022 11:57:15 +0800 Subject: [PATCH 14/14] docs: remove default value --- docs-cn/14-reference/03-connector/java.mdx | 2 +- docs-en/14-reference/03-connector/java.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs-cn/14-reference/03-connector/java.mdx b/docs-cn/14-reference/03-connector/java.mdx index e4e68131c5..ddab9e5f24 100644 --- a/docs-cn/14-reference/03-connector/java.mdx +++ b/docs-cn/14-reference/03-connector/java.mdx @@ -199,7 +199,7 @@ url 中的配置参数如下: - user:登录 TDengine 用户名,默认值 'root'。 - password:用户登录密码,默认值 'taosdata'。 - batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。从 taos-jdbcdriver-2.0.38 和 TDengine 2.4.0.12 版本开始,JDBC REST 连接增加批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。 -- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。默认为 UTF-8。 +- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。 - batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。 **注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。 diff --git a/docs-en/14-reference/03-connector/java.mdx b/docs-en/14-reference/03-connector/java.mdx index 9d2c5cd327..6c40f753be 100644 --- a/docs-en/14-reference/03-connector/java.mdx +++ b/docs-en/14-reference/03-connector/java.mdx @@ -197,7 +197,7 @@ The configuration parameters in the URL are as follows. - user: Login TDengine user name, default value 'root'. - password: user login password, default value 'taosdata'. - batchfetch: true: pull the result set in batch when executing the query; false: pull the result set row by row. The default value is false. batchfetch uses HTTP for data transfer. The JDBC REST connection supports bulk data pulling function in taos-jdbcdriver-2.0.38 and TDengine 2.4.0.12 and later versions. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance. -- charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true. Default value is UTF-8. +- charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true. - batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false. **Note**: Some configuration items (e.g., locale, timezone) do not work in the REST connection.