add libuv

This commit is contained in:
yihaoDeng 2022-01-11 23:06:36 +08:00
parent 3077a2449f
commit b945000779
11 changed files with 720 additions and 546 deletions

View File

@ -4,7 +4,7 @@ ExternalProject_Add(libuv
GIT_REPOSITORY https://github.com/libuv/libuv.git
GIT_TAG v1.42.0
SOURCE_DIR "${CMAKE_CONTRIB_DIR}/libuv"
BINARY_DIR ""
BINARY_DIR "${CMAKE_CONTRIB_DIR}/libuv"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""

View File

@ -13,3 +13,18 @@ target_link_libraries(
PUBLIC util
PUBLIC common
)
if (${BUILD_WITH_UV})
target_include_directories(
transport
PUBLIC "${CMAKE_SOURCE_DIR}/contrib/libuv/include"
)
#LINK_DIRECTORIES("${CMAKE_SOURCE_DIR}/debug/contrib/libuv")
target_link_libraries(
transport
PUBLIC uv_a
)
add_definitions(-DUSE_UV)
endif(${BUILD_WITH_UV})

View File

@ -16,6 +16,8 @@
#ifndef TDENGINE_RPC_CACHE_H
#define TDENGINE_RPC_CACHE_H
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -16,10 +16,15 @@
#ifndef TDENGINE_RPCHEAD_H
#define TDENGINE_RPCHEAD_H
#include <tdef.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef USE_UV
#else
#define RPC_CONN_TCP 2
extern int tsRpcOverhead;
@ -70,11 +75,10 @@ typedef struct {
} SRpcDigest;
#pragma pack(pop)
#endif
#ifdef __cplusplus
}
#endif
#endif // TDENGINE_RPCHEAD_H

View File

@ -15,11 +15,14 @@
#ifndef _rpc_tcp_header_
#define _rpc_tcp_header_
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef USE_UV
#else
void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThreads, void *fp, void *shandle);
void taosStopTcpServer(void *param);
void taosCleanUpTcpServer(void *param);
@ -32,6 +35,8 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin
void taosCloseTcpConnection(void *chandle);
int taosSendTcpData(uint32_t ip, uint16_t port, void *data, int len, void *chandle);
#endif
#ifdef __cplusplus
}
#endif

View File

@ -20,6 +20,11 @@
extern "C" {
#endif
#ifdef USE_UV
#else
#endif
#ifdef __cplusplus
}
#endif

View File

@ -13,15 +13,18 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "rpcCache.h"
#include "os.h"
#include "rpcLog.h"
#include "taosdef.h"
#include "tglobal.h"
#include "tmempool.h"
#include "ttimer.h"
#include "tutil.h"
#include "rpcLog.h"
#include "rpcCache.h"
#ifdef USE_UV
#else
typedef struct SConnHash {
char fqdn[TSDB_FQDN_LEN];
uint16_t port;
@ -146,7 +149,8 @@ void rpcAddConnIntoCache(void *handle, void *data, char *fqdn, uint16_t port, in
rpcUnlockCache(pCache->lockedBy + hash);
pCache->total++;
// tTrace("%p %s:%hu:%d:%d:%p added into cache, connections:%d", data, fqdn, port, connType, hash, pNode, pCache->count[hash]);
// tTrace("%p %s:%hu:%d:%d:%p added into cache, connections:%d", data, fqdn, port, connType, hash, pNode,
// pCache->count[hash]);
return;
}
@ -200,9 +204,11 @@ void *rpcGetConnFromCache(void *handle, char *fqdn, uint16_t port, int8_t connTy
rpcUnlockCache(pCache->lockedBy + hash);
if (pData) {
//tTrace("%p %s:%hu:%d:%d:%p retrieved from cache, connections:%d", pData, fqdn, port, connType, hash, pNode, pCache->count[hash]);
// tTrace("%p %s:%hu:%d:%d:%p retrieved from cache, connections:%d", pData, fqdn, port, connType, hash, pNode,
// pCache->count[hash]);
} else {
//tTrace("%s:%hu:%d:%d failed to retrieve conn from cache, connections:%d", fqdn, port, connType, hash, pCache->count[hash]);
// tTrace("%s:%hu:%d:%d failed to retrieve conn from cache, connections:%d", fqdn, port, connType, hash,
// pCache->count[hash]);
}
return pData;
@ -242,7 +248,8 @@ static void rpcRemoveExpiredNodes(SConnCache *pCache, SConnHash *pNode, int hash
pNext = pNode->next;
pCache->total--;
pCache->count[hash]--;
//tTrace("%p %s:%hu:%d:%d:%p removed from cache, connections:%d", pNode->data, pNode->fqdn, pNode->port, pNode->connType, hash, pNode,
// tTrace("%p %s:%hu:%d:%d:%p removed from cache, connections:%d", pNode->data, pNode->fqdn, pNode->port,
// pNode->connType, hash, pNode,
// pCache->count[hash]);
taosMemPoolFree(pCache->connHashMemPool, (char *)pNode);
pNode = pNext;
@ -288,4 +295,4 @@ static void rpcUnlockCache(int64_t *lockedBy) {
assert(false);
}
}
#endif

View File

@ -13,24 +13,172 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <uv.h>
#include "lz4.h"
#include "os.h"
#include "rpcCache.h"
#include "rpcHead.h"
#include "rpcLog.h"
#include "rpcTcp.h"
#include "rpcUdp.h"
#include "taoserror.h"
#include "tglobal.h"
#include "thash.h"
#include "tidpool.h"
#include "tmd5.h"
#include "tmempool.h"
#include "tmsg.h"
#include "tref.h"
#include "trpc.h"
#include "ttimer.h"
#include "tutil.h"
#include "lz4.h"
#include "tref.h"
#include "taoserror.h"
#include "tglobal.h"
#include "tmsg.h"
#include "trpc.h"
#include "thash.h"
#include "rpcLog.h"
#include "rpcUdp.h"
#include "rpcCache.h"
#include "rpcTcp.h"
#include "rpcHead.h"
#ifdef USE_UV
#define container_of(ptr, type, member) ((type*)((char*)(ptr)-offsetof(type, member)))
int32_t rpcInit() { return -1; }
void rpcCleanup() { return; };
void* rpcOpen(const SRpcInit* pRpc) { return NULL; }
void rpcClose(void* arg) { return; }
void* rpcMallocCont(int contLen) { return NULL; }
void rpcFreeCont(void* cont) { return; }
void* rpcReallocCont(void* ptr, int contLen) { return NULL; }
void rpcSendRequest(void* thandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t* rid) { return; }
void rpcSendResponse(const SRpcMsg* pMsg) {}
void rpcSendRedirectRsp(void* pConn, const SEpSet* pEpSet) {}
int rpcGetConnInfo(void* thandle, SRpcConnInfo* pInfo) { return -1; }
void rpcSendRecv(void* shandle, SEpSet* pEpSet, SRpcMsg* pReq, SRpcMsg* pRsp) { return; }
int rpcReportProgress(void* pConn, char* pCont, int contLen) { return -1; }
void rpcCancelRequest(int64_t rid) { return; }
typedef struct SThreadObj {
pthread_t thread;
uv_pipe_t* pipe;
uv_loop_t* loop;
uv_async_t* workerAsync; //
int fd;
} SThreadObj;
typedef struct SServerObj {
uv_tcp_t server;
uv_loop_t* loop;
int workerIdx;
int numOfThread;
SThreadObj** pThreadObj;
uv_pipe_t** pipe;
} SServerObj;
typedef struct SConnCtx {
uv_tcp_t* pClient;
uv_timer_t* pTimer;
uv_async_t* pWorkerAsync;
int ref;
} SConnCtx;
void allocBuffer(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) {
buf->base = malloc(suggested_size);
buf->len = suggested_size;
}
void onTimeout(uv_timer_t* handle) {
// opt
tDebug("time out");
}
void onRead(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) {
// opt
tDebug("data already was read on a stream");
}
void onWrite(uv_write_t* req, int status) {
// opt
if (req) tDebug("data already was written on stream");
}
static void workerAsyncCB(uv_async_t* handle) {
// opt
SThreadObj* pObj = container_of(handle, SThreadObj, workerAsync);
}
void onAccept(uv_stream_t* stream, int status) {
if (status == -1) {
return;
}
SServerObj* pObj = container_of(stream, SServerObj, server);
tDebug("new conntion accepted by main server, dispatch to one worker thread");
uv_tcp_t* cli = (uv_tcp_t*)malloc(sizeof(uv_tcp_t));
uv_tcp_init(pObj->loop, cli);
if (uv_accept(stream, (uv_stream_t*)cli) == 0) {
uv_write_t* wr = (uv_write_t*)malloc(sizeof(uv_write_t));
uv_buf_t buf = uv_buf_init("a", 1);
// despatch to worker thread
pObj->workerIdx = (pObj->workerIdx + 1) % pObj->numOfThread;
uv_write2(wr, (uv_stream_t*)&(pObj->pipe[pObj->workerIdx][0]), &buf, 1, (uv_stream_t*)cli, onWrite);
} else {
uv_close((uv_handle_t*)cli, NULL);
}
}
void onConnection(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
if (nread < 0) {
if (nread != UV_EOF) {
tError("read error %s", uv_err_name(nread));
}
// TODO(log other failure reason)
uv_close((uv_handle_t*)q, NULL);
return;
}
SThreadObj* pObj = (SThreadObj*)container_of(q, struct SThreadObj, pipe);
uv_pipe_t* pipe = (uv_pipe_t*)q;
if (!uv_pipe_pending_count(pipe)) {
tError("No pending count");
return;
}
uv_handle_type pending = uv_pipe_pending_type(pipe);
assert(pending == UV_TCP);
SConnCtx* pConn = malloc(sizeof(SConnCtx));
/* init conn timer*/
pConn->pTimer = malloc(sizeof(uv_timer_t));
uv_timer_init(pObj->loop, pConn->pTimer);
pConn->pClient = (uv_tcp_t*)malloc(sizeof(uv_tcp_t));
pConn->pWorkerAsync = pObj->workerAsync; // thread safty
uv_tcp_init(pObj->loop, pConn->pClient);
if (uv_accept(q, (uv_stream_t*)(pConn->pClient)) == 0) {
uv_os_fd_t fd;
uv_fileno((const uv_handle_t*)pConn->pClient, &fd);
tDebug("new connection created: %d", fd);
uv_timer_start(pConn->pTimer, onTimeout, 10, 0);
uv_read_start((uv_stream_t*)(pConn->pClient), allocBuffer, onRead);
} else {
uv_timer_stop(pConn->pTimer);
free(pConn->pTimer);
uv_close((uv_handle_t*)pConn->pClient, NULL);
free(pConn->pClient);
free(pConn);
}
}
void* workerThread(void* arg) {
SThreadObj* pObj = (SThreadObj*)arg;
int fd = pObj->fd;
pObj->loop = (uv_loop_t*)malloc(sizeof(uv_loop_t));
uv_loop_init(pObj->loop);
uv_pipe_init(pObj->loop, pObj->pipe, 1);
uv_pipe_open(pObj->pipe, fd);
pObj->workerAsync = malloc(sizeof(uv_async_t));
uv_async_init(pObj->loop, pObj->workerAsync, workerAsyncCB);
uv_read_start((uv_stream_t*)pObj->pipe, allocBuffer, onConnection);
}
#else
#define RPC_MSG_OVERHEAD (sizeof(SRpcReqContext) + sizeof(SRpcHead) + sizeof(SRpcDigest))
#define rpcHeadFromCont(cont) ((SRpcHead*)((char*)cont - sizeof(SRpcHead)))
@ -146,18 +294,10 @@ static int32_t tsRpcNum = 0;
#define RPC_CONN_TCPC 3
void *(*taosInitConn[])(uint32_t ip, uint16_t port, char *label, int threads, void *fp, void *shandle) = {
taosInitUdpConnection,
taosInitUdpConnection,
taosInitTcpServer,
taosInitTcpClient
};
taosInitUdpConnection, taosInitUdpConnection, taosInitTcpServer, taosInitTcpClient};
void (*taosCleanUpConn[])(void *thandle) = {
taosCleanUpUdpConnection,
taosCleanUpUdpConnection,
taosCleanUpTcpServer,
taosCleanUpTcpClient
};
void (*taosCleanUpConn[])(void *thandle) = {taosCleanUpUdpConnection, taosCleanUpUdpConnection, taosCleanUpTcpServer,
taosCleanUpTcpClient};
void (*taosStopConn[])(void *thandle) = {
taosStopUdpConnection,
@ -167,11 +307,7 @@ void (*taosStopConn[])(void *thandle) = {
};
int (*taosSendData[])(uint32_t ip, uint16_t port, void *data, int len, void *chandle) = {
taosSendUdpData,
taosSendUdpData,
taosSendTcpData,
taosSendTcpData
};
taosSendUdpData, taosSendUdpData, taosSendTcpData, taosSendTcpData};
void *(*taosOpenConn[])(void *shandle, void *thandle, uint32_t ip, uint16_t port) = {
taosOpenUdpConnection,
@ -180,12 +316,7 @@ void *(*taosOpenConn[])(void *shandle, void *thandle, uint32_t ip, uint16_t port
taosOpenTcpClientConnection,
};
void (*taosCloseConn[])(void *chandle) = {
NULL,
NULL,
taosCloseTcpConnection,
taosCloseTcpConnection
};
void (*taosCloseConn[])(void *chandle) = {NULL, NULL, taosCloseTcpConnection, taosCloseTcpConnection};
static SRpcConn *rpcOpenConn(SRpcInfo *pRpc, char *peerFqdn, uint16_t peerPort, int8_t connType);
static void rpcCloseConn(void *thandle);
@ -311,10 +442,10 @@ void *rpcOpen(const SRpcInit *pInit) {
pthread_mutex_init(&pRpc->mutex, NULL);
pRpc->tcphandle = (*taosInitConn[pRpc->connType|RPC_CONN_TCP])(0, pRpc->localPort, pRpc->label,
pRpc->numOfThreads, rpcProcessMsgFromPeer, pRpc);
pRpc->udphandle = (*taosInitConn[pRpc->connType])(0, pRpc->localPort, pRpc->label,
pRpc->numOfThreads, rpcProcessMsgFromPeer, pRpc);
pRpc->tcphandle = (*taosInitConn[pRpc->connType | RPC_CONN_TCP])(0, pRpc->localPort, pRpc->label, pRpc->numOfThreads,
rpcProcessMsgFromPeer, pRpc);
pRpc->udphandle =
(*taosInitConn[pRpc->connType])(0, pRpc->localPort, pRpc->label, pRpc->numOfThreads, rpcProcessMsgFromPeer, pRpc);
if (pRpc->tcphandle == NULL || pRpc->udphandle == NULL) {
tError("%s failed to init network, port:%d", pRpc->label, pRpc->localPort);
@ -410,10 +541,9 @@ void rpcSendRequest(void *shandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t
// connection type is application specific.
// for TDengine, all the query, show commands shall have TCP connection
tmsg_t type = pMsg->msgType;
if (type == TDMT_VND_QUERY || type == TDMT_MND_SHOW_RETRIEVE
|| type == TDMT_VND_FETCH || type == TDMT_MND_VGROUP_LIST
|| type == TDMT_VND_TABLES_META || type == TDMT_VND_TABLE_META
|| type == TDMT_MND_SHOW || type == TDMT_MND_STATUS || type == TDMT_VND_ALTER_TABLE)
if (type == TDMT_VND_QUERY || type == TDMT_MND_SHOW_RETRIEVE || type == TDMT_VND_FETCH ||
type == TDMT_MND_VGROUP_LIST || type == TDMT_VND_TABLES_META || type == TDMT_VND_TABLE_META ||
type == TDMT_MND_SHOW || type == TDMT_MND_STATUS || type == TDMT_VND_ALTER_TABLE)
pContext->connType = RPC_CONN_TCPC;
pContext->rid = taosAddRef(tsRpcRefId, pContext);
@ -482,8 +612,7 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
rpcSendMsgToPeer(pConn, msg, msgLen);
// if not set to secured, set it expcet NOT_READY case, since client wont treat it as secured
if (pConn->secured == 0 && pMsg->code != TSDB_CODE_RPC_NOT_READY)
pConn->secured = 1; // connection shall be secured
if (pConn->secured == 0 && pMsg->code != TSDB_CODE_RPC_NOT_READY) pConn->secured = 1; // connection shall be secured
if (pConn->pReqMsg) rpcFreeCont(pConn->pReqMsg);
pConn->pReqMsg = NULL;
@ -567,7 +696,6 @@ int rpcReportProgress(void *handle, char *pCont, int contLen) {
}
void rpcCancelRequest(int64_t rid) {
SRpcReqContext *pContext = taosAcquireRef(tsRpcRefId, rid);
if (pContext == NULL) return;
@ -631,7 +759,8 @@ static void rpcReleaseConn(SRpcConn *pConn) {
if (pRpc->connType == TAOS_CONN_SERVER) {
char hashstr[40] = {0};
size_t size = snprintf(hashstr, sizeof(hashstr), "%x:%x:%x:%d", pConn->peerIp, pConn->linkUid, pConn->peerId, pConn->connType);
size_t size = snprintf(hashstr, sizeof(hashstr), "%x:%x:%x:%d", pConn->peerIp, pConn->linkUid, pConn->peerId,
pConn->connType);
taosHashRemove(pRpc->hash, hashstr, size);
rpcFreeMsg(pConn->pRspMsg); // it may have a response msg saved, but not request msg
pConn->pRspMsg = NULL;
@ -682,8 +811,7 @@ static void rpcCloseConn(void *thandle) {
rpcLockConn(pConn);
if (pConn->user[0])
rpcReleaseConn(pConn);
if (pConn->user[0]) rpcReleaseConn(pConn);
rpcUnlockConn(pConn);
}
@ -717,7 +845,8 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
char hashstr[40] = {0};
SRpcHead *pHead = (SRpcHead *)pRecv->msg;
size_t size = snprintf(hashstr, sizeof(hashstr), "%x:%x:%x:%d", pRecv->ip, pHead->linkUid, pHead->sourceId, pRecv->connType);
size_t size =
snprintf(hashstr, sizeof(hashstr), "%x:%x:%x:%d", pRecv->ip, pHead->linkUid, pHead->sourceId, pRecv->connType);
// check if it is already allocated
SRpcConn **ppConn = (SRpcConn **)(taosHashGet(pRpc->hash, hashstr, size));
@ -767,7 +896,8 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
}
taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES);
tDebug("%s %p server connection is allocated, uid:0x%x sid:%d key:%s", pRpc->label, pConn, pConn->linkUid, sid, hashstr);
tDebug("%s %p server connection is allocated, uid:0x%x sid:%d key:%s", pRpc->label, pConn, pConn->linkUid, sid,
hashstr);
}
return pConn;
@ -807,7 +937,8 @@ static SRpcConn *rpcSetupConnToServer(SRpcReqContext *pContext) {
SRpcInfo *pRpc = pContext->pRpc;
SEpSet * pEpSet = &pContext->epSet;
pConn = rpcGetConnFromCache(pRpc->pCache, pEpSet->fqdn[pEpSet->inUse], pEpSet->port[pEpSet->inUse], pContext->connType);
pConn =
rpcGetConnFromCache(pRpc->pCache, pEpSet->fqdn[pEpSet->inUse], pEpSet->port[pEpSet->inUse], pContext->connType);
if (pConn == NULL || pConn->user[0] == 0) {
pConn = rpcOpenConn(pRpc, pEpSet->fqdn[pEpSet->inUse], pEpSet->port[pEpSet->inUse], pContext->connType);
}
@ -825,13 +956,11 @@ static SRpcConn *rpcSetupConnToServer(SRpcReqContext *pContext) {
}
static int rpcProcessReqHead(SRpcConn *pConn, SRpcHead *pHead) {
if (pConn->peerId == 0) {
pConn->peerId = pHead->sourceId;
} else {
if (pConn->peerId != pHead->sourceId) {
tDebug("%s, source Id is changed, old:0x%08x new:0x%08x", pConn->info,
pConn->peerId, pHead->sourceId);
tDebug("%s, source Id is changed, old:0x%08x new:0x%08x", pConn->info, pConn->peerId, pHead->sourceId);
return TSDB_CODE_RPC_INVALID_VALUE;
}
}
@ -856,8 +985,7 @@ static int rpcProcessReqHead(SRpcConn *pConn, SRpcHead *pHead) {
}
if (pConn->inType != 0) {
tDebug("%s, last session is not finished, inTranId:%d tranId:%d", pConn->info,
pConn->inTranId, pHead->tranId);
tDebug("%s, last session is not finished, inTranId:%d tranId:%d", pConn->info, pConn->inTranId, pHead->tranId);
return TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED;
}
@ -961,18 +1089,22 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont
if (TMSG_INDEX(pHead->msgType) >= TDMT_MAX || TMSG_INDEX(pHead->msgType) <= 0) {
tDebug("%s sid:%d, invalid message type:%d", pRpc->label, sid, pHead->msgType);
terrno = TSDB_CODE_RPC_INVALID_MSG_TYPE; return NULL;
terrno = TSDB_CODE_RPC_INVALID_MSG_TYPE;
return NULL;
}
if (sid < 0 || sid >= pRpc->sessions) {
tDebug("%s sid:%d, sid is out of range, max sid:%d, %s discarded", pRpc->label, sid,
pRpc->sessions, TMSG_INFO(pHead->msgType));
terrno = TSDB_CODE_RPC_INVALID_SESSION_ID; return NULL;
tDebug("%s sid:%d, sid is out of range, max sid:%d, %s discarded", pRpc->label, sid, pRpc->sessions,
TMSG_INFO(pHead->msgType));
terrno = TSDB_CODE_RPC_INVALID_SESSION_ID;
return NULL;
}
if (rpcIsReq(pHead->msgType) && htonl(pHead->msgVer) != tsVersion >> 8) {
tDebug("%s sid:%d, invalid client version:%x/%x %s", pRpc->label, sid, htonl(pHead->msgVer), tsVersion, TMSG_INFO(pHead->msgType));
terrno = TSDB_CODE_RPC_INVALID_VERSION; return NULL;
tDebug("%s sid:%d, invalid client version:%x/%x %s", pRpc->label, sid, htonl(pHead->msgVer), tsVersion,
TMSG_INFO(pHead->msgType));
terrno = TSDB_CODE_RPC_INVALID_VERSION;
return NULL;
}
pConn = rpcGetConnObj(pRpc, sid, pRecv);
@ -1100,13 +1232,13 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
taosIpPort2String(pRecv->ip, pRecv->port, ipstr);
if (TMSG_INDEX(pHead->msgType) >= 1 && TMSG_INDEX(pHead->msgType) < TDMT_MAX) {
tDebug("%s %p %p, %s received from %s, parse code:0x%x len:%d sig:0x%08x:0x%08x:%d code:0x%x", pRpc->label,
pConn, (void *)pHead->ahandle, TMSG_INFO(pHead->msgType), ipstr, terrno, pRecv->msgLen,
pHead->sourceId, pHead->destId, pHead->tranId, pHead->code);
tDebug("%s %p %p, %s received from %s, parse code:0x%x len:%d sig:0x%08x:0x%08x:%d code:0x%x", pRpc->label, pConn,
(void *)pHead->ahandle, TMSG_INFO(pHead->msgType), ipstr, terrno, pRecv->msgLen, pHead->sourceId,
pHead->destId, pHead->tranId, pHead->code);
} else {
tDebug("%s %p %p, %d received from %s, parse code:0x%x len:%d sig:0x%08x:0x%08x:%d code:0x%x", pRpc->label,
pConn, (void *)pHead->ahandle, pHead->msgType, ipstr, terrno, pRecv->msgLen,
pHead->sourceId, pHead->destId, pHead->tranId, pHead->code);
tDebug("%s %p %p, %d received from %s, parse code:0x%x len:%d sig:0x%08x:0x%08x:%d code:0x%x", pRpc->label, pConn,
(void *)pHead->ahandle, pHead->msgType, ipstr, terrno, pRecv->msgLen, pHead->sourceId, pHead->destId,
pHead->tranId, pHead->code);
}
int32_t code = terrno;
@ -1118,9 +1250,11 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
rpcCloseConn(pConn);
}
if (TMSG_INDEX(pHead->msgType) + 1 > 1 && TMSG_INDEX(pHead->msgType) + 1 < TDMT_MAX) {
tDebug("%s %p %p, %s is sent with error code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle, TMSG_INFO(pHead->msgType+1), code);
tDebug("%s %p %p, %s is sent with error code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle,
TMSG_INFO(pHead->msgType + 1), code);
} else {
tError("%s %p %p, %s is sent with error code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle, TMSG_INFO(pHead->msgType), code);
tError("%s %p %p, %s is sent with error code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle,
TMSG_INFO(pHead->msgType), code);
}
}
} else { // msg is passed to app only parsing is ok
@ -1144,8 +1278,7 @@ static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) {
} else {
// for asynchronous API
SEpSet *pEpSet = NULL;
if (pContext->epSet.inUse != pContext->oldInUse || pContext->redirect)
pEpSet = &pContext->epSet;
if (pContext->epSet.inUse != pContext->oldInUse || pContext->redirect) pEpSet = &pContext->epSet;
(*pRpc->cfp)(pRpc->parent, pMsg, pEpSet);
}
@ -1155,7 +1288,6 @@ static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) {
}
static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqContext *pContext) {
SRpcInfo *pRpc = pConn->pRpc;
SRpcMsg rpcMsg;
@ -1180,7 +1312,8 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqConte
// for UDP, port may be changed by server, the port in epSet shall be used for cache
if (pHead->code != TSDB_CODE_RPC_TOO_SLOW) {
rpcAddConnIntoCache(pRpc->pCache, pConn, pConn->peerFqdn, pContext->epSet.port[pContext->epSet.inUse], pConn->connType);
rpcAddConnIntoCache(pRpc->pCache, pConn, pConn->peerFqdn, pContext->epSet.port[pContext->epSet.inUse],
pConn->connType);
} else {
rpcCloseConn(pConn);
}
@ -1200,7 +1333,8 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqConte
}
rpcSendReqToServer(pRpc, pContext);
rpcFreeCont(rpcMsg.pCont);
} else if (pHead->code == TSDB_CODE_RPC_NOT_READY || pHead->code == TSDB_CODE_APP_NOT_READY || pHead->code == TSDB_CODE_DND_OFFLINE) {
} else if (pHead->code == TSDB_CODE_RPC_NOT_READY || pHead->code == TSDB_CODE_APP_NOT_READY ||
pHead->code == TSDB_CODE_DND_OFFLINE) {
pContext->code = pHead->code;
rpcProcessConnError(pContext, NULL);
rpcFreeCont(rpcMsg.pCont);
@ -1347,14 +1481,12 @@ static void rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) {
msgLen = rpcAddAuthPart(pConn, msg, msgLen);
if (rpcIsReq(pHead->msgType)) {
tDebug("%s, %s is sent to %s:%hu, len:%d sig:0x%08x:0x%08x:%d",
pConn->info, TMSG_INFO(pHead->msgType), pConn->peerFqdn, pConn->peerPort,
msgLen, pHead->sourceId, pHead->destId, pHead->tranId);
tDebug("%s, %s is sent to %s:%hu, len:%d sig:0x%08x:0x%08x:%d", pConn->info, TMSG_INFO(pHead->msgType),
pConn->peerFqdn, pConn->peerPort, msgLen, pHead->sourceId, pHead->destId, pHead->tranId);
} else {
if (pHead->code == 0) pConn->secured = 1; // for success response, set link as secured
tDebug("%s, %s is sent to 0x%x:%hu, code:0x%x len:%d sig:0x%08x:0x%08x:%d",
pConn->info, TMSG_INFO(pHead->msgType), pConn->peerIp, pConn->peerPort,
htonl(pHead->code), msgLen, pHead->sourceId, pHead->destId, pHead->tranId);
tDebug("%s, %s is sent to 0x%x:%hu, code:0x%x len:%d sig:0x%08x:0x%08x:%d", pConn->info, TMSG_INFO(pHead->msgType),
pConn->peerIp, pConn->peerPort, htonl(pHead->code), msgLen, pHead->sourceId, pHead->destId, pHead->tranId);
}
// tTrace("connection type is: %d", pConn->connType);
@ -1411,7 +1543,8 @@ static void rpcProcessRetryTimer(void *param, void *tmrId) {
pConn->pTimer = taosTmrStart(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl);
} else {
// close the connection
tDebug("%s, failed to send msg:%s to %s:%hu", pConn->info, TMSG_INFO(pConn->outType), pConn->peerFqdn, pConn->peerPort);
tDebug("%s, failed to send msg:%s to %s:%hu", pConn->info, TMSG_INFO(pConn->outType), pConn->peerFqdn,
pConn->peerPort);
if (pConn->pContext) {
pConn->pContext->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
pConn->pContext->pConn = NULL;
@ -1593,8 +1726,8 @@ static int rpcCheckAuthentication(SRpcConn *pConn, char *msg, int msgLen) {
// for response, if code is auth failure, it shall bypass the auth process
code = htonl(pHead->code);
if (code == TSDB_CODE_RPC_INVALID_TIME_STAMP || code == TSDB_CODE_RPC_AUTH_FAILURE ||
code == TSDB_CODE_RPC_INVALID_VERSION ||
code == TSDB_CODE_RPC_AUTH_REQUIRED || code == TSDB_CODE_MND_USER_NOT_EXIST || code == TSDB_CODE_RPC_NOT_READY) {
code == TSDB_CODE_RPC_INVALID_VERSION || code == TSDB_CODE_RPC_AUTH_REQUIRED ||
code == TSDB_CODE_MND_USER_NOT_EXIST || code == TSDB_CODE_RPC_NOT_READY) {
pHead->msgLen = (int32_t)htonl((uint32_t)pHead->msgLen);
// tTrace("%s, dont check authentication since code is:0x%x", pConn->info, code);
return 0;
@ -1647,13 +1780,9 @@ static void rpcUnlockConn(SRpcConn *pConn) {
}
}
static void rpcAddRef(SRpcInfo *pRpc)
{
atomic_add_fetch_32(&pRpc->refCount, 1);
}
static void rpcAddRef(SRpcInfo *pRpc) { atomic_add_fetch_32(&pRpc->refCount, 1); }
static void rpcDecRef(SRpcInfo *pRpc)
{
static void rpcDecRef(SRpcInfo *pRpc) {
if (atomic_sub_fetch_32(&pRpc->refCount, 1) == 0) {
rpcCloseConnCache(pRpc->pCache);
taosHashCleanup(pRpc->hash);
@ -1668,4 +1797,4 @@ static void rpcDecRef(SRpcInfo *pRpc)
atomic_sub_fetch_32(&tsRpcNum, 1);
}
}
#endif

View File

@ -13,14 +13,18 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "rpcTcp.h"
#include <uv.h>
#include "os.h"
#include "tutil.h"
#include "rpcHead.h"
#include "rpcLog.h"
#include "taosdef.h"
#include "taoserror.h"
#include "rpcLog.h"
#include "rpcHead.h"
#include "rpcTcp.h"
#include "tutil.h"
#ifdef USE_UV
#else
typedef struct SFdObj {
void * signature;
SOCKET fd; // TCP socket FD
@ -173,7 +177,9 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
}
static void taosStopTcpThread(SThreadObj *pThreadObj) {
if (pThreadObj == NULL) { return;}
if (pThreadObj == NULL) {
return;
}
// save thread into local variable and signal thread to stop
pthread_t thread = pThreadObj->thread;
if (!taosCheckPthreadValid(thread)) {
@ -262,7 +268,6 @@ static void *taosAcceptTcpConnection(void *arg) {
continue;
}
// pick up the thread to handle this connection
pThreadObj = pServerObj->pThreadObj[threadId];
@ -396,8 +401,7 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin
struct sockaddr_in sin;
uint16_t localPort = 0;
unsigned int addrlen = sizeof(sin);
if (getsockname(fd, (struct sockaddr *)&sin, &addrlen) == 0 &&
sin.sin_family == AF_INET && addrlen == sizeof(sin)) {
if (getsockname(fd, (struct sockaddr *)&sin, &addrlen) == 0 && sin.sin_family == AF_INET && addrlen == sizeof(sin)) {
localPort = (uint16_t)ntohs(sin.sin_port);
}
@ -407,8 +411,8 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin
pFdObj->thandle = thandle;
pFdObj->port = port;
pFdObj->ip = ip;
tDebug("%s %p TCP connection to 0x%x:%hu is created, localPort:%hu FD:%p numOfFds:%d",
pThreadObj->label, thandle, ip, port, localPort, pFdObj, pThreadObj->numOfFds);
tDebug("%s %p TCP connection to 0x%x:%hu is created, localPort:%hu FD:%p numOfFds:%d", pThreadObj->label, thandle,
ip, port, localPort, pFdObj, pThreadObj->numOfFds);
} else {
tError("%s failed to malloc client FdObj(%s)", pThreadObj->label, strerror(errno));
taosCloseSocket(fd);
@ -441,7 +445,6 @@ int taosSendTcpData(uint32_t ip, uint16_t port, void *data, int len, void *chand
}
static void taosReportBrokenLink(SFdObj *pFdObj) {
SThreadObj *pThreadObj = pFdObj->pThreadObj;
// notify the upper layer, so it will clean the associated context
@ -483,7 +486,8 @@ static int taosReadTcpData(SFdObj *pFdObj, SRecvInfo *pInfo) {
tError("%s %p TCP malloc(size:%d) fail", pThreadObj->label, pFdObj->thandle, msgLen);
return -1;
} else {
tTrace("%s %p read data, FD:%p fd:%d TCP malloc mem:%p", pThreadObj->label, pFdObj->thandle, pFdObj, pFdObj->fd, buffer);
tTrace("%s %p read data, FD:%p fd:%d TCP malloc mem:%p", pThreadObj->label, pFdObj->thandle, pFdObj, pFdObj->fd,
buffer);
}
msg = buffer + tsRpcOverhead;
@ -491,8 +495,7 @@ static int taosReadTcpData(SFdObj *pFdObj, SRecvInfo *pInfo) {
retLen = taosReadMsg(pFdObj->fd, msg + headLen, leftLen);
if (leftLen != retLen) {
tError("%s %p read error, leftLen:%d retLen:%d FD:%p",
pThreadObj->label, pFdObj->thandle, leftLen, retLen, pFdObj);
tError("%s %p read error, leftLen:%d retLen:%d FD:%p", pThreadObj->label, pFdObj->thandle, leftLen, retLen, pFdObj);
free(buffer);
return -1;
}
@ -620,7 +623,6 @@ static SFdObj *taosMallocFdObj(SThreadObj *pThreadObj, SOCKET fd) {
}
static void taosFreeFdObj(SFdObj *pFdObj) {
if (pFdObj == NULL) return;
if (pFdObj->signature != pFdObj) return;
@ -638,8 +640,8 @@ static void taosFreeFdObj(SFdObj *pFdObj) {
pThreadObj->numOfFds--;
if (pThreadObj->numOfFds < 0)
tError("%s %p TCP thread:%d, number of FDs is negative!!!",
pThreadObj->label, pFdObj->thandle, pThreadObj->threadId);
tError("%s %p TCP thread:%d, number of FDs is negative!!!", pThreadObj->label, pFdObj->thandle,
pThreadObj->threadId);
if (pFdObj->prev) {
(pFdObj->prev)->next = pFdObj->next;
@ -653,8 +655,10 @@ static void taosFreeFdObj(SFdObj *pFdObj) {
pthread_mutex_unlock(&pThreadObj->mutex);
tDebug("%s %p TCP connection is closed, FD:%p fd:%d numOfFds:%d",
pThreadObj->label, pFdObj->thandle, pFdObj, pFdObj->fd, pThreadObj->numOfFds);
tDebug("%s %p TCP connection is closed, FD:%p fd:%d numOfFds:%d", pThreadObj->label, pFdObj->thandle, pFdObj,
pFdObj->fd, pThreadObj->numOfFds);
tfree(pFdObj);
}
#endif

View File

@ -13,15 +13,18 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "rpcUdp.h"
#include "os.h"
#include "ttimer.h"
#include "tutil.h"
#include "rpcHead.h"
#include "rpcLog.h"
#include "taosdef.h"
#include "taoserror.h"
#include "rpcLog.h"
#include "rpcUdp.h"
#include "rpcHead.h"
#include "ttimer.h"
#include "tutil.h"
#ifdef USE_UV
// no support upd currently
#else
#define RPC_MAX_UDP_CONNS 256
#define RPC_MAX_UDP_PKTS 1000
#define RPC_UDP_BUF_TIME 5 // mseconds
@ -98,8 +101,8 @@ void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads
struct sockaddr_in sin;
unsigned int addrlen = sizeof(sin);
if (getsockname(pConn->fd, (struct sockaddr *)&sin, &addrlen) == 0 &&
sin.sin_family == AF_INET && addrlen == sizeof(sin)) {
if (getsockname(pConn->fd, (struct sockaddr *)&sin, &addrlen) == 0 && sin.sin_family == AF_INET &&
addrlen == sizeof(sin)) {
pConn->localPort = (uint16_t)ntohs(sin.sin_port);
}
@ -257,4 +260,4 @@ int taosSendUdpData(uint32_t ip, uint16_t port, void *data, int dataLen, void *c
return ret;
}
#endif