Merge remote-tracking branch 'origin/3.0' into feature/tfs
This commit is contained in:
commit
ec66739d4d
|
@ -188,16 +188,19 @@ void* tDeserializeSClientHbRsp(void* buf, SClientHbRsp* pRsp);
|
|||
|
||||
static FORCE_INLINE void tFreeClientHbReq(void *pReq) {
|
||||
SClientHbReq* req = (SClientHbReq*)pReq;
|
||||
taosHashCleanup(req->info);
|
||||
free(pReq);
|
||||
if (req->info) taosHashCleanup(req->info);
|
||||
}
|
||||
|
||||
int tSerializeSClientHbBatchReq(void** buf, const SClientHbBatchReq* pReq);
|
||||
void* tDeserializeSClientHbBatchReq(void* buf, SClientHbBatchReq* pReq);
|
||||
|
||||
static FORCE_INLINE void tFreeClientHbBatchReq(void* pReq) {
|
||||
static FORCE_INLINE void tFreeClientHbBatchReq(void* pReq, bool deep) {
|
||||
SClientHbBatchReq *req = (SClientHbBatchReq*)pReq;
|
||||
//taosArrayDestroyEx(req->reqs, tFreeClientHbReq);
|
||||
if (deep) {
|
||||
taosArrayDestroyEx(req->reqs, tFreeClientHbReq);
|
||||
} else {
|
||||
taosArrayDestroy(req->reqs);
|
||||
}
|
||||
free(pReq);
|
||||
}
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ int32_t dsCreateDataSinker(const struct SDataSink *pDataSink, DataSinkHandle* pH
|
|||
*/
|
||||
int32_t dsPutDataBlock(DataSinkHandle handle, const SInputData* pInput, bool* pContinue);
|
||||
|
||||
void dsEndPut(DataSinkHandle handle, int64_t useconds);
|
||||
void dsEndPut(DataSinkHandle handle, uint64_t useconds);
|
||||
|
||||
/**
|
||||
* Get the length of the data returned by the next call to dsGetDataBlock.
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "common.h"
|
||||
|
||||
typedef void* qTaskInfo_t;
|
||||
typedef void* DataSinkHandle;
|
||||
struct SSubplan;
|
||||
|
@ -34,7 +36,7 @@ struct SSubplan;
|
|||
* @param qId
|
||||
* @return
|
||||
*/
|
||||
int32_t qCreateExecTask(void* tsdb, int32_t vgId, struct SSubplan* pPlan, qTaskInfo_t* pTaskInfo);
|
||||
int32_t qCreateExecTask(void* tsdb, int32_t vgId, struct SSubplan* pPlan, qTaskInfo_t* pTaskInfo, DataSinkHandle* handle);
|
||||
|
||||
/**
|
||||
* The main task execution function, including query on both table and multiple tables,
|
||||
|
@ -44,7 +46,7 @@ int32_t qCreateExecTask(void* tsdb, int32_t vgId, struct SSubplan* pPlan, qTaskI
|
|||
* @param handle
|
||||
* @return
|
||||
*/
|
||||
int32_t qExecTask(qTaskInfo_t tinfo, DataSinkHandle* handle);
|
||||
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t *useconds);
|
||||
|
||||
/**
|
||||
* Retrieve the produced results information, if current query is not paused or completed,
|
||||
|
|
|
@ -114,6 +114,14 @@ void schedulerDestroy(void);
|
|||
*/
|
||||
int32_t schedulerConvertDagToTaskList(SQueryDag* pDag, SArray **pTasks);
|
||||
|
||||
/**
|
||||
* make one task info's multiple copies
|
||||
* @param src
|
||||
* @param dst SArray**<STaskInfo>
|
||||
* @return
|
||||
*/
|
||||
int32_t schedulerCopyTask(STaskInfo *src, SArray **dst, int32_t copyNum);
|
||||
|
||||
void schedulerFreeTaskList(SArray *taskList);
|
||||
|
||||
|
||||
|
|
|
@ -361,6 +361,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_QRY_TASK_DROPPING TAOS_DEF_ERROR_CODE(0, 0x0717) //"Task dropping")
|
||||
#define TSDB_CODE_QRY_DUPLICATTED_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0718) //"Duplicatted operation")
|
||||
#define TSDB_CODE_QRY_TASK_MSG_ERROR TAOS_DEF_ERROR_CODE(0, 0x0719) //"Task message error")
|
||||
#define TSDB_CODE_QRY_JOB_FREED TAOS_DEF_ERROR_CODE(0, 0x071A) //"Job freed")
|
||||
|
||||
// grant
|
||||
#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800) //"License expired")
|
||||
|
|
|
@ -60,15 +60,17 @@ SClientHbBatchReq* hbGatherAllInfo(SAppHbMgr *pAppHbMgr) {
|
|||
pIter = taosHashIterate(pAppHbMgr->activeInfo, pIter);
|
||||
}
|
||||
|
||||
#if 0
|
||||
pIter = taosHashIterate(pAppHbMgr->getInfoFuncs, NULL);
|
||||
while (pIter != NULL) {
|
||||
FGetConnInfo getConnInfoFp = (FGetConnInfo)pIter;
|
||||
SClientHbKey connKey;
|
||||
taosHashCopyKey(pIter, &connKey);
|
||||
getConnInfoFp(connKey, NULL);
|
||||
SArray* pArray = getConnInfoFp(connKey, NULL);
|
||||
|
||||
pIter = taosHashIterate(pAppHbMgr->getInfoFuncs, pIter);
|
||||
}
|
||||
#endif
|
||||
|
||||
return pBatchReq;
|
||||
}
|
||||
|
@ -99,12 +101,12 @@ static void* hbThreadFunc(void* param) {
|
|||
//TODO: error handling
|
||||
break;
|
||||
}
|
||||
void *bufCopy = buf;
|
||||
tSerializeSClientHbBatchReq(&bufCopy, pReq);
|
||||
void *abuf = buf;
|
||||
tSerializeSClientHbBatchReq(&abuf, pReq);
|
||||
SMsgSendInfo *pInfo = malloc(sizeof(SMsgSendInfo));
|
||||
if (pInfo == NULL) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
tFreeClientHbBatchReq(pReq);
|
||||
tFreeClientHbBatchReq(pReq, false);
|
||||
free(buf);
|
||||
break;
|
||||
}
|
||||
|
@ -120,7 +122,7 @@ static void* hbThreadFunc(void* param) {
|
|||
int64_t transporterId = 0;
|
||||
SEpSet epSet = getEpSet_s(&pAppInstInfo->mgmtEp);
|
||||
asyncSendMsgToServer(pAppInstInfo->pTransporter, &epSet, &transporterId, pInfo);
|
||||
tFreeClientHbBatchReq(pReq);
|
||||
tFreeClientHbBatchReq(pReq, false);
|
||||
|
||||
atomic_add_fetch_32(&pAppHbMgr->reportCnt, 1);
|
||||
}
|
||||
|
@ -155,6 +157,9 @@ SAppHbMgr* appHbMgrInit(SAppInstInfo* pAppInstInfo) {
|
|||
}
|
||||
// init stat
|
||||
pAppHbMgr->startTime = taosGetTimestampMs();
|
||||
pAppHbMgr->connKeyCnt = 0;
|
||||
pAppHbMgr->reportCnt = 0;
|
||||
pAppHbMgr->reportBytes = 0;
|
||||
|
||||
// init app info
|
||||
pAppHbMgr->pAppInstInfo = pAppInstInfo;
|
||||
|
|
|
@ -841,6 +841,7 @@ static SVnodeObj *dndAcquireVnodeFromMsg(SDnode *pDnode, SRpcMsg *pMsg) {
|
|||
|
||||
SVnodeObj *pVnode = dndAcquireVnode(pDnode, pHead->vgId);
|
||||
if (pVnode == NULL) {
|
||||
dError("vgId:%d, failed to acquire vnode while process req", pHead->vgId);
|
||||
if (pMsg->msgType & 1u) {
|
||||
SRpcMsg rsp = {.handle = pMsg->handle, .code = TSDB_CODE_VND_INVALID_VGROUP_ID};
|
||||
rpcSendResponse(&rsp);
|
||||
|
|
|
@ -5,5 +5,4 @@ add_subdirectory(bnode)
|
|||
add_subdirectory(snode)
|
||||
add_subdirectory(mnode)
|
||||
add_subdirectory(vnode)
|
||||
add_subdirectory(stb)
|
||||
add_subdirectory(sut)
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
aux_source_directory(. DSTB_SRC)
|
||||
add_executable(dnode_test_stb ${DSTB_SRC})
|
||||
target_link_libraries(
|
||||
dnode_test_stb
|
||||
PUBLIC sut
|
||||
)
|
||||
|
||||
add_test(
|
||||
NAME dnode_test_stb
|
||||
COMMAND dnode_test_stb
|
||||
)
|
|
@ -1,139 +0,0 @@
|
|||
/**
|
||||
* @file db.cpp
|
||||
* @author slguan (slguan@taosdata.com)
|
||||
* @brief DNODE module vnode tests
|
||||
* @version 0.1
|
||||
* @date 2021-12-20
|
||||
*
|
||||
* @copyright Copyright (c) 2021
|
||||
*
|
||||
*/
|
||||
|
||||
#include "sut.h"
|
||||
|
||||
class DndTestVnode : public ::testing::Test {
|
||||
protected:
|
||||
static void SetUpTestSuite() { test.Init("/tmp/dnode_test_stb", 9116); }
|
||||
static void TearDownTestSuite() { test.Cleanup(); }
|
||||
|
||||
static Testbase test;
|
||||
|
||||
public:
|
||||
void SetUp() override {}
|
||||
void TearDown() override {}
|
||||
};
|
||||
|
||||
Testbase DndTestVnode::test;
|
||||
|
||||
TEST_F(DndTestVnode, 01_Create_Restart_Drop_Vnode) {
|
||||
{
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
int32_t contLen = sizeof(SCreateVnodeReq);
|
||||
|
||||
SCreateVnodeReq* pReq = (SCreateVnodeReq*)rpcMallocCont(contLen);
|
||||
pReq->vgId = htonl(2);
|
||||
pReq->dnodeId = htonl(1);
|
||||
strcpy(pReq->db, "1.d1");
|
||||
pReq->dbUid = htobe64(9527);
|
||||
pReq->vgVersion = htonl(1);
|
||||
pReq->cacheBlockSize = htonl(16);
|
||||
pReq->totalBlocks = htonl(10);
|
||||
pReq->daysPerFile = htonl(10);
|
||||
pReq->daysToKeep0 = htonl(3650);
|
||||
pReq->daysToKeep1 = htonl(3650);
|
||||
pReq->daysToKeep2 = htonl(3650);
|
||||
pReq->minRows = htonl(100);
|
||||
pReq->minRows = htonl(4096);
|
||||
pReq->commitTime = htonl(3600);
|
||||
pReq->fsyncPeriod = htonl(3000);
|
||||
pReq->walLevel = 1;
|
||||
pReq->precision = 0;
|
||||
pReq->compression = 2;
|
||||
pReq->replica = 1;
|
||||
pReq->quorum = 1;
|
||||
pReq->update = 0;
|
||||
pReq->cacheLastRow = 0;
|
||||
pReq->selfIndex = 0;
|
||||
for (int r = 0; r < pReq->replica; ++r) {
|
||||
SReplica* pReplica = &pReq->replicas[r];
|
||||
pReplica->id = htonl(1);
|
||||
pReplica->port = htons(9527);
|
||||
}
|
||||
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_VNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
if (i == 0) {
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
test.Restart();
|
||||
} else {
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_DND_VNODE_ALREADY_DEPLOYED);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
int32_t contLen = sizeof(SAlterVnodeReq);
|
||||
|
||||
SAlterVnodeReq* pReq = (SAlterVnodeReq*)rpcMallocCont(contLen);
|
||||
pReq->vgId = htonl(2);
|
||||
pReq->dnodeId = htonl(1);
|
||||
strcpy(pReq->db, "1.d1");
|
||||
pReq->dbUid = htobe64(9527);
|
||||
pReq->vgVersion = htonl(2);
|
||||
pReq->cacheBlockSize = htonl(16);
|
||||
pReq->totalBlocks = htonl(10);
|
||||
pReq->daysPerFile = htonl(10);
|
||||
pReq->daysToKeep0 = htonl(3650);
|
||||
pReq->daysToKeep1 = htonl(3650);
|
||||
pReq->daysToKeep2 = htonl(3650);
|
||||
pReq->minRows = htonl(100);
|
||||
pReq->minRows = htonl(4096);
|
||||
pReq->commitTime = htonl(3600);
|
||||
pReq->fsyncPeriod = htonl(3000);
|
||||
pReq->walLevel = 1;
|
||||
pReq->precision = 0;
|
||||
pReq->compression = 2;
|
||||
pReq->replica = 1;
|
||||
pReq->quorum = 1;
|
||||
pReq->update = 0;
|
||||
pReq->cacheLastRow = 0;
|
||||
pReq->selfIndex = 0;
|
||||
for (int r = 0; r < pReq->replica; ++r) {
|
||||
SReplica* pReplica = &pReq->replicas[r];
|
||||
pReplica->id = htonl(1);
|
||||
pReplica->port = htons(9527);
|
||||
}
|
||||
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_ALTER_VNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
int32_t contLen = sizeof(SDropVnodeReq);
|
||||
|
||||
SDropVnodeReq* pReq = (SDropVnodeReq*)rpcMallocCont(contLen);
|
||||
pReq->vgId = htonl(2);
|
||||
pReq->dnodeId = htonl(1);
|
||||
strcpy(pReq->db, "1.d1");
|
||||
pReq->dbUid = htobe64(9527);
|
||||
|
||||
SRpcMsg rpcMsg = {0};
|
||||
rpcMsg.pCont = pReq;
|
||||
rpcMsg.contLen = sizeof(SDropVnodeReq);
|
||||
rpcMsg.msgType = TDMT_DND_DROP_VNODE;
|
||||
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_VNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
if (i == 0) {
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
test.Restart();
|
||||
} else {
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_DND_VNODE_NOT_DEPLOYED);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -25,115 +25,216 @@ class DndTestVnode : public ::testing::Test {
|
|||
|
||||
Testbase DndTestVnode::test;
|
||||
|
||||
TEST_F(DndTestVnode, 01_Create_Restart_Drop_Vnode) {
|
||||
{
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
int32_t contLen = sizeof(SCreateVnodeReq);
|
||||
TEST_F(DndTestVnode, 01_Create_Vnode) {
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
int32_t contLen = sizeof(SCreateVnodeReq);
|
||||
|
||||
SCreateVnodeReq* pReq = (SCreateVnodeReq*)rpcMallocCont(contLen);
|
||||
pReq->vgId = htonl(2);
|
||||
pReq->dnodeId = htonl(1);
|
||||
strcpy(pReq->db, "1.d1");
|
||||
pReq->dbUid = htobe64(9527);
|
||||
pReq->vgVersion = htonl(1);
|
||||
pReq->cacheBlockSize = htonl(16);
|
||||
pReq->totalBlocks = htonl(10);
|
||||
pReq->daysPerFile = htonl(10);
|
||||
pReq->daysToKeep0 = htonl(3650);
|
||||
pReq->daysToKeep1 = htonl(3650);
|
||||
pReq->daysToKeep2 = htonl(3650);
|
||||
pReq->minRows = htonl(100);
|
||||
pReq->minRows = htonl(4096);
|
||||
pReq->commitTime = htonl(3600);
|
||||
pReq->fsyncPeriod = htonl(3000);
|
||||
pReq->walLevel = 1;
|
||||
pReq->precision = 0;
|
||||
pReq->compression = 2;
|
||||
pReq->replica = 1;
|
||||
pReq->quorum = 1;
|
||||
pReq->update = 0;
|
||||
pReq->cacheLastRow = 0;
|
||||
pReq->selfIndex = 0;
|
||||
for (int r = 0; r < pReq->replica; ++r) {
|
||||
SReplica* pReplica = &pReq->replicas[r];
|
||||
pReplica->id = htonl(1);
|
||||
pReplica->port = htons(9527);
|
||||
}
|
||||
SCreateVnodeReq* pReq = (SCreateVnodeReq*)rpcMallocCont(contLen);
|
||||
pReq->vgId = htonl(2);
|
||||
pReq->dnodeId = htonl(1);
|
||||
strcpy(pReq->db, "1.d1");
|
||||
pReq->dbUid = htobe64(9527);
|
||||
pReq->vgVersion = htonl(1);
|
||||
pReq->cacheBlockSize = htonl(16);
|
||||
pReq->totalBlocks = htonl(10);
|
||||
pReq->daysPerFile = htonl(10);
|
||||
pReq->daysToKeep0 = htonl(3650);
|
||||
pReq->daysToKeep1 = htonl(3650);
|
||||
pReq->daysToKeep2 = htonl(3650);
|
||||
pReq->minRows = htonl(100);
|
||||
pReq->minRows = htonl(4096);
|
||||
pReq->commitTime = htonl(3600);
|
||||
pReq->fsyncPeriod = htonl(3000);
|
||||
pReq->walLevel = 1;
|
||||
pReq->precision = 0;
|
||||
pReq->compression = 2;
|
||||
pReq->replica = 1;
|
||||
pReq->quorum = 1;
|
||||
pReq->update = 0;
|
||||
pReq->cacheLastRow = 0;
|
||||
pReq->selfIndex = 0;
|
||||
for (int r = 0; r < pReq->replica; ++r) {
|
||||
SReplica* pReplica = &pReq->replicas[r];
|
||||
pReplica->id = htonl(1);
|
||||
pReplica->port = htons(9527);
|
||||
}
|
||||
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_VNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
if (i == 0) {
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
test.Restart();
|
||||
} else {
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_DND_VNODE_ALREADY_DEPLOYED);
|
||||
}
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_VNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
if (i == 0) {
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
test.Restart();
|
||||
} else {
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_DND_VNODE_ALREADY_DEPLOYED);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(DndTestVnode, 02_ALTER_Vnode) {
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
int32_t contLen = sizeof(SAlterVnodeReq);
|
||||
|
||||
SAlterVnodeReq* pReq = (SAlterVnodeReq*)rpcMallocCont(contLen);
|
||||
pReq->vgId = htonl(2);
|
||||
pReq->dnodeId = htonl(1);
|
||||
strcpy(pReq->db, "1.d1");
|
||||
pReq->dbUid = htobe64(9527);
|
||||
pReq->vgVersion = htonl(2);
|
||||
pReq->cacheBlockSize = htonl(16);
|
||||
pReq->totalBlocks = htonl(10);
|
||||
pReq->daysPerFile = htonl(10);
|
||||
pReq->daysToKeep0 = htonl(3650);
|
||||
pReq->daysToKeep1 = htonl(3650);
|
||||
pReq->daysToKeep2 = htonl(3650);
|
||||
pReq->minRows = htonl(100);
|
||||
pReq->minRows = htonl(4096);
|
||||
pReq->commitTime = htonl(3600);
|
||||
pReq->fsyncPeriod = htonl(3000);
|
||||
pReq->walLevel = 1;
|
||||
pReq->precision = 0;
|
||||
pReq->compression = 2;
|
||||
pReq->replica = 1;
|
||||
pReq->quorum = 1;
|
||||
pReq->update = 0;
|
||||
pReq->cacheLastRow = 0;
|
||||
pReq->selfIndex = 0;
|
||||
for (int r = 0; r < pReq->replica; ++r) {
|
||||
SReplica* pReplica = &pReq->replicas[r];
|
||||
pReplica->id = htonl(1);
|
||||
pReplica->port = htons(9527);
|
||||
}
|
||||
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_ALTER_VNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(DndTestVnode, 03_Create_Stb) {
|
||||
for (int i = 0; i < 1; ++i) {
|
||||
SVCreateTbReq req = {0};
|
||||
req.ver = 0;
|
||||
req.name = (char*)"stb1";
|
||||
req.ttl = 0;
|
||||
req.keep = 0;
|
||||
req.type = TD_SUPER_TABLE;
|
||||
|
||||
SSchema schemas[5] = {0};
|
||||
{
|
||||
SSchema* pSchema = &schemas[0];
|
||||
pSchema->bytes = htonl(8);
|
||||
pSchema->type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||
strcpy(pSchema->name, "ts");
|
||||
}
|
||||
|
||||
{
|
||||
SSchema* pSchema = &schemas[1];
|
||||
pSchema->bytes = htonl(4);
|
||||
pSchema->type = TSDB_DATA_TYPE_INT;
|
||||
strcpy(pSchema->name, "col1");
|
||||
}
|
||||
|
||||
{
|
||||
SSchema* pSchema = &schemas[2];
|
||||
pSchema->bytes = htonl(2);
|
||||
pSchema->type = TSDB_DATA_TYPE_TINYINT;
|
||||
strcpy(pSchema->name, "tag1");
|
||||
}
|
||||
|
||||
{
|
||||
SSchema* pSchema = &schemas[3];
|
||||
pSchema->bytes = htonl(8);
|
||||
pSchema->type = TSDB_DATA_TYPE_BIGINT;
|
||||
strcpy(pSchema->name, "tag2");
|
||||
}
|
||||
|
||||
{
|
||||
SSchema* pSchema = &schemas[4];
|
||||
pSchema->bytes = htonl(16);
|
||||
pSchema->type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema->name, "tag3");
|
||||
}
|
||||
|
||||
req.stbCfg.suid = 9527;
|
||||
req.stbCfg.nCols = 2;
|
||||
req.stbCfg.pSchema = &schemas[0];
|
||||
req.stbCfg.nTagCols = 3;
|
||||
req.stbCfg.pTagSchema = &schemas[2];
|
||||
|
||||
int32_t bsize = tSerializeSVCreateTbReq(NULL, &req);
|
||||
void* buf = rpcMallocCont(sizeof(SMsgHead) + bsize);
|
||||
SMsgHead* pMsgHead = (SMsgHead*)buf;
|
||||
|
||||
pMsgHead->contLen = htonl(sizeof(SMsgHead) + bsize);
|
||||
pMsgHead->vgId = htonl(2);
|
||||
|
||||
void* pBuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
|
||||
tSerializeSVCreateTbReq(&pBuf, &req);
|
||||
|
||||
int32_t contLen = sizeof(SMsgHead) + bsize;
|
||||
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_VND_CREATE_STB, buf, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
if (i == 0) {
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
test.Restart();
|
||||
} else {
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_TDB_TABLE_ALREADY_EXIST);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(DndTestVnode, 04_ALTER_Stb) {
|
||||
#if 0
|
||||
{
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
int32_t contLen = sizeof(SAlterVnodeReq);
|
||||
|
||||
SAlterVnodeReq* pReq = (SAlterVnodeReq*)rpcMallocCont(contLen);
|
||||
pReq->vgId = htonl(2);
|
||||
pReq->dnodeId = htonl(1);
|
||||
strcpy(pReq->db, "1.d1");
|
||||
pReq->dbUid = htobe64(9527);
|
||||
pReq->vgVersion = htonl(2);
|
||||
pReq->cacheBlockSize = htonl(16);
|
||||
pReq->totalBlocks = htonl(10);
|
||||
pReq->daysPerFile = htonl(10);
|
||||
pReq->daysToKeep0 = htonl(3650);
|
||||
pReq->daysToKeep1 = htonl(3650);
|
||||
pReq->daysToKeep2 = htonl(3650);
|
||||
pReq->minRows = htonl(100);
|
||||
pReq->minRows = htonl(4096);
|
||||
pReq->commitTime = htonl(3600);
|
||||
pReq->fsyncPeriod = htonl(3000);
|
||||
pReq->walLevel = 1;
|
||||
pReq->precision = 0;
|
||||
pReq->compression = 2;
|
||||
pReq->replica = 1;
|
||||
pReq->quorum = 1;
|
||||
pReq->update = 0;
|
||||
pReq->cacheLastRow = 0;
|
||||
pReq->selfIndex = 0;
|
||||
for (int r = 0; r < pReq->replica; ++r) {
|
||||
SReplica* pReplica = &pReq->replicas[r];
|
||||
pReplica->id = htonl(1);
|
||||
pReplica->port = htons(9527);
|
||||
}
|
||||
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_ALTER_VNODE, pReq, contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_VND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST_F(DndTestVnode, 05_DROP_Stb) {
|
||||
#if 0
|
||||
{
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
int32_t contLen = sizeof(SDropVnodeReq);
|
||||
|
||||
SDropVnodeReq* pReq = (SDropVnodeReq*)rpcMallocCont(contLen);
|
||||
pReq->vgId = htonl(2);
|
||||
pReq->dnodeId = htonl(1);
|
||||
strcpy(pReq->db, "1.d1");
|
||||
pReq->dbUid = htobe64(9527);
|
||||
|
||||
SRpcMsg rpcMsg = {0};
|
||||
rpcMsg.pCont = pReq;
|
||||
rpcMsg.contLen = sizeof(SDropVnodeReq);
|
||||
rpcMsg.msgType = TDMT_DND_DROP_VNODE;
|
||||
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_VNODE, pReq, contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_VND_DROP_STB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
if (i == 0) {
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
test.Restart();
|
||||
} else {
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_DND_VNODE_NOT_DEPLOYED);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_TDB_INVALID_TABLE_ID);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST_F(DndTestVnode, 06_DROP_Vnode) {
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
int32_t contLen = sizeof(SDropVnodeReq);
|
||||
|
||||
SDropVnodeReq* pReq = (SDropVnodeReq*)rpcMallocCont(contLen);
|
||||
pReq->vgId = htonl(2);
|
||||
pReq->dnodeId = htonl(1);
|
||||
strcpy(pReq->db, "1.d1");
|
||||
pReq->dbUid = htobe64(9527);
|
||||
|
||||
SRpcMsg rpcMsg = {0};
|
||||
rpcMsg.pCont = pReq;
|
||||
rpcMsg.contLen = sizeof(SDropVnodeReq);
|
||||
rpcMsg.msgType = TDMT_DND_DROP_VNODE;
|
||||
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_VNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
if (i == 0) {
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
test.Restart();
|
||||
} else {
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_DND_VNODE_NOT_DEPLOYED);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -325,6 +325,19 @@ typedef struct SMqTopicConsumer {
|
|||
} SMqTopicConsumer;
|
||||
#endif
|
||||
|
||||
typedef struct SMqConsumerEp {
|
||||
int32_t vgId;
|
||||
SEpSet epset;
|
||||
int64_t consumerId;
|
||||
} SMqConsumerEp;
|
||||
|
||||
typedef struct SMqCgroupTopicPair {
|
||||
char key[TSDB_CONSUMER_GROUP_LEN + TSDB_TOPIC_FNAME_LEN];
|
||||
SArray* assigned; // SArray<SMqConsumerEp>
|
||||
SArray* unassignedConsumer;
|
||||
SArray* unassignedVg;
|
||||
} SMqCgroupTopicPair;
|
||||
|
||||
typedef struct SMqCGroup {
|
||||
char name[TSDB_CONSUMER_GROUP_LEN];
|
||||
int32_t status; // 0 - uninitialized, 1 - wait rebalance, 2- normal
|
||||
|
@ -351,10 +364,11 @@ typedef struct SMqTopicObj {
|
|||
|
||||
// TODO: add cache and change name to id
|
||||
typedef struct SMqConsumerTopic {
|
||||
char name[TSDB_TOPIC_FNAME_LEN];
|
||||
int32_t epoch;
|
||||
char name[TSDB_TOPIC_NAME_LEN];
|
||||
//TODO: replace with something with ep
|
||||
SList *vgroups; // SList<int32_t>
|
||||
SArray *pVgInfo; // SArray<int32_t>
|
||||
} SMqConsumerTopic;
|
||||
|
||||
typedef struct SMqConsumerObj {
|
||||
|
@ -362,7 +376,7 @@ typedef struct SMqConsumerObj {
|
|||
SRWLatch lock;
|
||||
char cgroup[TSDB_CONSUMER_GROUP_LEN];
|
||||
SArray *topics; // SArray<SMqConsumerTopic>
|
||||
SHashObj *topicHash;
|
||||
SHashObj *topicHash; //SHashObj<SMqConsumerTopic>
|
||||
} SMqConsumerObj;
|
||||
|
||||
typedef struct SMqSubConsumerObj {
|
||||
|
|
|
@ -16,9 +16,18 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "mndAuth.h"
|
||||
|
||||
int32_t mndInitAuth(SMnode *pMnode) { return 0; }
|
||||
void mndCleanupAuth(SMnode *pMnode) {}
|
||||
static int32_t mndProcessAuthReq(SMnodeMsg *pReq);
|
||||
|
||||
int32_t mndRetriveAuth(SMnode *pMnode, char *user, char *spi, char *encrypt, char *secret, char *ckey) {
|
||||
int32_t mndInitAuth(SMnode *pMnode) {
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_AUTH, mndProcessAuthReq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mndCleanupAuth(SMnode *pMnode) {}
|
||||
|
||||
int32_t mndRetriveAuth(SMnode *pMnode, char *user, char *spi, char *encrypt, char *secret, char *ckey) { return 0; }
|
||||
|
||||
static int32_t mndProcessAuthReq(SMnodeMsg *pReq) {
|
||||
mDebug("user:%s, auth req is processed", pReq->user);
|
||||
return 0;
|
||||
}
|
|
@ -204,34 +204,37 @@ void mndReleaseConsumer(SMnode *pMnode, SMqConsumerObj *pConsumer) {
|
|||
static int32_t mndProcessSubscribeReq(SMnodeMsg *pMsg) {
|
||||
SMnode *pMnode = pMsg->pMnode;
|
||||
char *msgStr = pMsg->rpcMsg.pCont;
|
||||
SCMSubscribeReq *pSubscribe;
|
||||
tDeserializeSCMSubscribeReq(msgStr, pSubscribe);
|
||||
int64_t consumerId = pSubscribe->consumerId;
|
||||
char *consumerGroup = pSubscribe->consumerGroup;
|
||||
SCMSubscribeReq subscribe;
|
||||
tDeserializeSCMSubscribeReq(msgStr, &subscribe);
|
||||
int64_t consumerId = subscribe.consumerId;
|
||||
char *consumerGroup = subscribe.consumerGroup;
|
||||
int32_t cgroupLen = strlen(consumerGroup);
|
||||
|
||||
SArray *newSub = NULL;
|
||||
int newTopicNum = pSubscribe->topicNum;
|
||||
int newTopicNum = subscribe.topicNum;
|
||||
if (newTopicNum) {
|
||||
newSub = taosArrayInit(newTopicNum, sizeof(SMqConsumerTopic));
|
||||
}
|
||||
SMqConsumerTopic *pConsumerTopics = calloc(newTopicNum, sizeof(SMqConsumerTopic));
|
||||
if (pConsumerTopics == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
for (int i = 0; i < newTopicNum; i++) {
|
||||
char *newTopicName = taosArrayGetP(newSub, i);
|
||||
SMqConsumerTopic *pConsumerTopic = malloc(sizeof(SMqConsumerTopic));
|
||||
if (pConsumerTopic == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
// TODO: free
|
||||
return -1;
|
||||
}
|
||||
SMqConsumerTopic *pConsumerTopic = &pConsumerTopics[i];
|
||||
|
||||
strcpy(pConsumerTopic->name, newTopicName);
|
||||
pConsumerTopic->vgroups = tdListNew(sizeof(int64_t));
|
||||
taosArrayPush(newSub, pConsumerTopic);
|
||||
free(pConsumerTopic);
|
||||
}
|
||||
|
||||
taosArrayAddBatch(newSub, pConsumerTopics, newTopicNum);
|
||||
free(pConsumerTopics);
|
||||
taosArraySortString(newSub, taosArrayCompareString);
|
||||
|
||||
SArray *oldSub = NULL;
|
||||
int oldTopicNum = 0;
|
||||
// create consumer if not exist
|
||||
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, consumerId);
|
||||
if (pConsumer == NULL) {
|
||||
// create consumer
|
||||
|
@ -249,6 +252,7 @@ static int32_t mndProcessSubscribeReq(SMnodeMsg *pMsg) {
|
|||
}
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, &pMsg->rpcMsg);
|
||||
if (pTrans == NULL) {
|
||||
//TODO: free memory
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -286,6 +290,7 @@ static int32_t mndProcessSubscribeReq(SMnodeMsg *pMsg) {
|
|||
}
|
||||
|
||||
if (pOldTopic != NULL) {
|
||||
//cancel subscribe of that old topic
|
||||
ASSERT(pNewTopic == NULL);
|
||||
char *oldTopicName = pOldTopic->name;
|
||||
SList *vgroups = pOldTopic->vgroups;
|
||||
|
@ -298,13 +303,14 @@ static int32_t mndProcessSubscribeReq(SMnodeMsg *pMsg) {
|
|||
SMqCGroup *pGroup = taosHashGet(pTopic->cgroups, consumerGroup, cgroupLen);
|
||||
while ((pn = tdListNext(&iter)) != NULL) {
|
||||
int32_t vgId = *(int64_t *)pn->data;
|
||||
// acquire and get epset
|
||||
SVgObj *pVgObj = mndAcquireVgroup(pMnode, vgId);
|
||||
// TODO release
|
||||
// TODO what time to release?
|
||||
if (pVgObj == NULL) {
|
||||
// TODO handle error
|
||||
continue;
|
||||
}
|
||||
// acquire and get epset
|
||||
//build reset msg
|
||||
void *pMqVgSetReq = mndBuildMqVGroupSetReq(pMnode, oldTopicName, vgId, consumerId, consumerGroup);
|
||||
// TODO:serialize
|
||||
if (pMsg == NULL) {
|
||||
|
@ -323,10 +329,12 @@ static int32_t mndProcessSubscribeReq(SMnodeMsg *pMsg) {
|
|||
return -1;
|
||||
}
|
||||
}
|
||||
//delete data in mnode
|
||||
taosHashRemove(pTopic->cgroups, consumerGroup, cgroupLen);
|
||||
mndReleaseTopic(pMnode, pTopic);
|
||||
|
||||
} else if (pNewTopic != NULL) {
|
||||
// save subscribe info to mnode
|
||||
ASSERT(pOldTopic == NULL);
|
||||
|
||||
char *newTopicName = pNewTopic->name;
|
||||
|
@ -351,6 +359,7 @@ static int32_t mndProcessSubscribeReq(SMnodeMsg *pMsg) {
|
|||
// add into cgroups
|
||||
taosHashPut(pTopic->cgroups, consumerGroup, cgroupLen, pGroup, sizeof(SMqCGroup));
|
||||
}
|
||||
/*taosHashPut(pTopic->consumers, &pConsumer->consumerId, sizeof(int64_t), pConsumer, sizeof(SMqConsumerObj));*/
|
||||
|
||||
// put the consumer into list
|
||||
// rebalance will be triggered by timer
|
||||
|
|
|
@ -357,10 +357,13 @@ static int32_t mndProcessHeartBeatReq(SMnodeMsg *pReq) {
|
|||
}
|
||||
}
|
||||
}
|
||||
taosArrayDestroyEx(pArray, tFreeClientHbReq);
|
||||
|
||||
int32_t tlen = tSerializeSClientHbBatchRsp(NULL, &batchRsp);
|
||||
void* buf = rpcMallocCont(tlen);
|
||||
void* abuf = buf;
|
||||
tSerializeSClientHbBatchRsp(&abuf, &batchRsp);
|
||||
taosArrayDestroy(batchRsp.rsps);
|
||||
pReq->contLen = tlen;
|
||||
pReq->pCont = buf;
|
||||
return 0;
|
||||
|
|
|
@ -69,6 +69,17 @@ static void mndTransReExecute(void *param, void *tmrId) {
|
|||
taosTmrReset(mndTransReExecute, 3000, pMnode, pMnode->timer, &pMnode->transTimer);
|
||||
}
|
||||
|
||||
static void mndCalMqRebalance(void* param, void* tmrId) {
|
||||
SMnode* pMnode = param;
|
||||
if (mndIsMaster(pMnode)) {
|
||||
// iterate cgroup, cal rebalance
|
||||
// sync with raft
|
||||
// write sdb
|
||||
}
|
||||
|
||||
taosTmrReset(mndCalMqRebalance, 3000, pMnode, pMnode->timer, &pMnode->transTimer);
|
||||
}
|
||||
|
||||
static int32_t mndInitTimer(SMnode *pMnode) {
|
||||
if (pMnode->timer == NULL) {
|
||||
pMnode->timer = taosTmrInit(5000, 200, 3600000, "MND");
|
||||
|
|
|
@ -3454,6 +3454,7 @@ void filterPrepare(void* expr, void* param) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) {
|
||||
STableGroupSupporter* pTableGroupSupp = (STableGroupSupporter*) param;
|
||||
STable* pTable1 = ((STableKeyInfo*) p1)->pTable;
|
||||
|
@ -3537,8 +3538,6 @@ void createTableGroupImpl(SArray* pGroups, SArray* pTableList, size_t numOfTable
|
|||
int32_t ret = compareFn(prev, p, pSupp);
|
||||
assert(ret == 0 || ret == -1);
|
||||
|
||||
// assert((*p)->type == TSDB_CHILD_TABLE);
|
||||
|
||||
if (ret == 0) {
|
||||
STableKeyInfo info1 = {.pTable = *p, .lastKey = skey};
|
||||
taosArrayPush(g, &info1);
|
||||
|
@ -3554,7 +3553,6 @@ void createTableGroupImpl(SArray* pGroups, SArray* pTableList, size_t numOfTable
|
|||
taosArrayPush(pGroups, &g);
|
||||
}
|
||||
|
||||
#if 0
|
||||
SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pCols, int32_t numOfOrderCols, TSKEY skey) {
|
||||
assert(pTableList != NULL);
|
||||
SArray* pTableGroup = taosArrayInit(1, POINTER_BYTES);
|
||||
|
@ -3587,145 +3585,138 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC
|
|||
sup.pTagSchema = pTagSchema;
|
||||
sup.pCols = pCols;
|
||||
|
||||
taosqsort(pTableList->pData, size, sizeof(STableKeyInfo), &sup, tableGroupComparFn);
|
||||
createTableGroupImpl(pTableGroup, pTableList, size, skey, &sup, tableGroupComparFn);
|
||||
// taosqsort(pTableList->pData, size, sizeof(STableKeyInfo), &sup, tableGroupComparFn);
|
||||
// createTableGroupImpl(pTableGroup, pTableList, size, skey, &sup, tableGroupComparFn);
|
||||
}
|
||||
|
||||
return pTableGroup;
|
||||
}
|
||||
|
||||
static bool tableFilterFp(const void* pNode, void* param) {
|
||||
tQueryInfo* pInfo = (tQueryInfo*) param;
|
||||
//static bool tableFilterFp(const void* pNode, void* param) {
|
||||
// tQueryInfo* pInfo = (tQueryInfo*) param;
|
||||
//
|
||||
// STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode*)pNode));
|
||||
//
|
||||
// char* val = NULL;
|
||||
// if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
// val = (char*) TABLE_NAME(pTable);
|
||||
// } else {
|
||||
// val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId);
|
||||
// }
|
||||
//
|
||||
// if (pInfo->optr == TSDB_RELATION_ISNULL || pInfo->optr == TSDB_RELATION_NOTNULL) {
|
||||
// if (pInfo->optr == TSDB_RELATION_ISNULL) {
|
||||
// return (val == NULL) || isNull(val, pInfo->sch.type);
|
||||
// } else if (pInfo->optr == TSDB_RELATION_NOTNULL) {
|
||||
// return (val != NULL) && (!isNull(val, pInfo->sch.type));
|
||||
// }
|
||||
// } else if (pInfo->optr == TSDB_RELATION_IN) {
|
||||
// int type = pInfo->sch.type;
|
||||
// if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
// int64_t v;
|
||||
// GET_TYPED_DATA(v, int64_t, pInfo->sch.type, val);
|
||||
// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
|
||||
// } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
|
||||
// uint64_t v;
|
||||
// GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val);
|
||||
// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
|
||||
// }
|
||||
// else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
|
||||
// double v;
|
||||
// GET_TYPED_DATA(v, double, pInfo->sch.type, val);
|
||||
// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
|
||||
// } else if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR){
|
||||
// return NULL != taosHashGet((SHashObj *)pInfo->q, varDataVal(val), varDataLen(val));
|
||||
// }
|
||||
//
|
||||
// }
|
||||
//
|
||||
// int32_t ret = 0;
|
||||
// if (val == NULL) { //the val is possible to be null, so check it out carefully
|
||||
// ret = -1; // val is missing in table tags value pairs
|
||||
// } else {
|
||||
// ret = pInfo->compare(val, pInfo->q);
|
||||
// }
|
||||
//
|
||||
// switch (pInfo->optr) {
|
||||
// case TSDB_RELATION_EQUAL: {
|
||||
// return ret == 0;
|
||||
// }
|
||||
// case TSDB_RELATION_NOT_EQUAL: {
|
||||
// return ret != 0;
|
||||
// }
|
||||
// case TSDB_RELATION_GREATER_EQUAL: {
|
||||
// return ret >= 0;
|
||||
// }
|
||||
// case TSDB_RELATION_GREATER: {
|
||||
// return ret > 0;
|
||||
// }
|
||||
// case TSDB_RELATION_LESS_EQUAL: {
|
||||
// return ret <= 0;
|
||||
// }
|
||||
// case TSDB_RELATION_LESS: {
|
||||
// return ret < 0;
|
||||
// }
|
||||
// case TSDB_RELATION_LIKE: {
|
||||
// return ret == 0;
|
||||
// }
|
||||
// case TSDB_RELATION_MATCH: {
|
||||
// return ret == 0;
|
||||
// }
|
||||
// case TSDB_RELATION_NMATCH: {
|
||||
// return ret == 0;
|
||||
// }
|
||||
// case TSDB_RELATION_IN: {
|
||||
// return ret == 1;
|
||||
// }
|
||||
//
|
||||
// default:
|
||||
// assert(false);
|
||||
// }
|
||||
//
|
||||
// return true;
|
||||
//}
|
||||
|
||||
STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode*)pNode));
|
||||
//static void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param);
|
||||
|
||||
char* val = NULL;
|
||||
if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
val = (char*) TABLE_NAME(pTable);
|
||||
} else {
|
||||
val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId);
|
||||
}
|
||||
|
||||
if (pInfo->optr == TSDB_RELATION_ISNULL || pInfo->optr == TSDB_RELATION_NOTNULL) {
|
||||
if (pInfo->optr == TSDB_RELATION_ISNULL) {
|
||||
return (val == NULL) || isNull(val, pInfo->sch.type);
|
||||
} else if (pInfo->optr == TSDB_RELATION_NOTNULL) {
|
||||
return (val != NULL) && (!isNull(val, pInfo->sch.type));
|
||||
}
|
||||
} else if (pInfo->optr == TSDB_RELATION_IN) {
|
||||
int type = pInfo->sch.type;
|
||||
if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
int64_t v;
|
||||
GET_TYPED_DATA(v, int64_t, pInfo->sch.type, val);
|
||||
return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
|
||||
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
|
||||
uint64_t v;
|
||||
GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val);
|
||||
return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
|
||||
}
|
||||
else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
|
||||
double v;
|
||||
GET_TYPED_DATA(v, double, pInfo->sch.type, val);
|
||||
return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
|
||||
} else if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR){
|
||||
return NULL != taosHashGet((SHashObj *)pInfo->q, varDataVal(val), varDataLen(val));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
int32_t ret = 0;
|
||||
if (val == NULL) { //the val is possible to be null, so check it out carefully
|
||||
ret = -1; // val is missing in table tags value pairs
|
||||
} else {
|
||||
ret = pInfo->compare(val, pInfo->q);
|
||||
}
|
||||
|
||||
switch (pInfo->optr) {
|
||||
case TSDB_RELATION_EQUAL: {
|
||||
return ret == 0;
|
||||
}
|
||||
case TSDB_RELATION_NOT_EQUAL: {
|
||||
return ret != 0;
|
||||
}
|
||||
case TSDB_RELATION_GREATER_EQUAL: {
|
||||
return ret >= 0;
|
||||
}
|
||||
case TSDB_RELATION_GREATER: {
|
||||
return ret > 0;
|
||||
}
|
||||
case TSDB_RELATION_LESS_EQUAL: {
|
||||
return ret <= 0;
|
||||
}
|
||||
case TSDB_RELATION_LESS: {
|
||||
return ret < 0;
|
||||
}
|
||||
case TSDB_RELATION_LIKE: {
|
||||
return ret == 0;
|
||||
}
|
||||
case TSDB_RELATION_MATCH: {
|
||||
return ret == 0;
|
||||
}
|
||||
case TSDB_RELATION_NMATCH: {
|
||||
return ret == 0;
|
||||
}
|
||||
case TSDB_RELATION_IN: {
|
||||
return ret == 1;
|
||||
}
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param);
|
||||
|
||||
static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) {
|
||||
// query according to the expression tree
|
||||
SExprTraverseSupp supp = {
|
||||
.nodeFilterFn = (__result_filter_fn_t) tableFilterFp,
|
||||
.setupInfoFn = filterPrepare,
|
||||
.pExtInfo = pSTable->tagSchema,
|
||||
};
|
||||
|
||||
getTableListfromSkipList(pExpr, pSTable->pIndex, pRes, &supp);
|
||||
tExprTreeDestroy(pExpr, destroyHelper);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
//static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) {
|
||||
// // query according to the expression tree
|
||||
// SExprTraverseSupp supp = {
|
||||
// .nodeFilterFn = (__result_filter_fn_t) tableFilterFp,
|
||||
// .setupInfoFn = filterPrepare,
|
||||
// .pExtInfo = pSTable->tagSchema,
|
||||
// };
|
||||
//
|
||||
// getTableListfromSkipList(pExpr, pSTable->pIndex, pRes, &supp);
|
||||
// tExprTreeDestroy(pExpr, destroyHelper);
|
||||
// return TSDB_CODE_SUCCESS;
|
||||
//}
|
||||
|
||||
int32_t tsdbQuerySTableByTagCond(STsdb* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len,
|
||||
int16_t tagNameRelType, const char* tbnameCond, STableGroupInfo* pGroupInfo,
|
||||
SColIndex* pColIndex, int32_t numOfCols) {
|
||||
if (tsdbRLockRepoMeta(tsdb) < 0) goto _error;
|
||||
|
||||
STable* pTable = tsdbGetTableByUid(tsdbGetMeta(tsdb), uid);
|
||||
if (pTable == NULL) {
|
||||
tsdbError("%p failed to get stable, uid:%" PRIu64, tsdb, uid);
|
||||
SColIndex* pColIndex, int32_t numOfCols, uint64_t reqId) {
|
||||
STbCfg* pTbCfg = metaGetTbInfoByUid(tsdb->pMeta, uid);
|
||||
if (pTbCfg == NULL) {
|
||||
tsdbError("%p failed to get stable, uid:%"PRIu64", reqId:0x%"PRIx64, tsdb, uid, reqId);
|
||||
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
|
||||
tsdbUnlockRepoMeta(tsdb);
|
||||
|
||||
goto _error;
|
||||
}
|
||||
|
||||
if (pTable->type != TSDB_SUPER_TABLE) {
|
||||
tsdbError("%p query normal tag not allowed, uid:%" PRIu64 ", tid:%d, name:%s", tsdb, uid, pTable->tableId,
|
||||
pTable->name->data);
|
||||
terrno = TSDB_CODE_COM_OPS_NOT_SUPPORT; //basically, this error is caused by invalid sql issued by client
|
||||
|
||||
tsdbUnlockRepoMeta(tsdb);
|
||||
if (pTbCfg->type != META_SUPER_TABLE) {
|
||||
tsdbError("%p query normal tag not allowed, uid:%" PRIu64 ", reId:0x%"PRIx64, tsdb, uid, reqId);
|
||||
terrno = TSDB_CODE_OPS_NOT_SUPPORT; //basically, this error is caused by invalid sql issued by client
|
||||
goto _error;
|
||||
}
|
||||
|
||||
//NOTE: not add ref count for super table
|
||||
SArray* res = taosArrayInit(8, sizeof(STableKeyInfo));
|
||||
STSchema* pTagSchema = tsdbGetTableTagSchema(pTable);
|
||||
STSchema* pTagSchema = metaGetTableSchema(tsdb->pMeta, uid, 0, true);
|
||||
|
||||
// no tags and tbname condition, all child tables of this stable are involved
|
||||
if (tbnameCond == NULL && (pTagCond == NULL || len == 0)) {
|
||||
int32_t ret = getAllTableList(pTable, res);
|
||||
assert(false);
|
||||
int32_t ret = 0;//getAllTableList(pTable, res);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
tsdbUnlockRepoMeta(tsdb);
|
||||
goto _error;
|
||||
}
|
||||
|
||||
|
@ -3736,60 +3727,60 @@ int32_t tsdbQuerySTableByTagCond(STsdb* tsdb, uint64_t uid, TSKEY skey, const ch
|
|||
pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList));
|
||||
|
||||
taosArrayDestroy(res);
|
||||
if (tsdbUnlockRepoMeta(tsdb) < 0) goto _error;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t ret = TSDB_CODE_SUCCESS;
|
||||
tExprNode* expr = NULL;
|
||||
|
||||
TRY(TSDB_MAX_TAG_CONDITIONS) {
|
||||
expr = exprTreeFromTableName(tbnameCond);
|
||||
if (expr == NULL) {
|
||||
expr = exprTreeFromBinary(pTagCond, len);
|
||||
} else {
|
||||
CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, expr, NULL);
|
||||
tExprNode* tagExpr = exprTreeFromBinary(pTagCond, len);
|
||||
if (tagExpr != NULL) {
|
||||
CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, tagExpr, NULL);
|
||||
tExprNode* tbnameExpr = expr;
|
||||
expr = calloc(1, sizeof(tExprNode));
|
||||
if (expr == NULL) {
|
||||
THROW( TSDB_CODE_TDB_OUT_OF_MEMORY );
|
||||
}
|
||||
expr->nodeType = TSQL_NODE_EXPR;
|
||||
expr->_node.optr = (uint8_t)tagNameRelType;
|
||||
expr->_node.pLeft = tagExpr;
|
||||
expr->_node.pRight = tbnameExpr;
|
||||
}
|
||||
}
|
||||
CLEANUP_EXECUTE();
|
||||
|
||||
} CATCH( code ) {
|
||||
CLEANUP_EXECUTE();
|
||||
terrno = code;
|
||||
tsdbUnlockRepoMeta(tsdb); // unlock tsdb in any cases
|
||||
|
||||
goto _error;
|
||||
// TODO: more error handling
|
||||
} END_TRY
|
||||
|
||||
doQueryTableList(pTable, res, expr);
|
||||
pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res);
|
||||
pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey);
|
||||
|
||||
tsdbDebug("%p stable tid:%d, uid:%"PRIu64" query, numOfTables:%u, belong to %" PRIzu " groups", tsdb, pTable->tableId,
|
||||
pTable->uid, pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList));
|
||||
|
||||
taosArrayDestroy(res);
|
||||
|
||||
if (tsdbUnlockRepoMeta(tsdb) < 0) goto _error;
|
||||
return ret;
|
||||
// tExprNode* expr = NULL;
|
||||
//
|
||||
// TRY(TSDB_MAX_TAG_CONDITIONS) {
|
||||
// expr = exprTreeFromTableName(tbnameCond);
|
||||
// if (expr == NULL) {
|
||||
// expr = exprTreeFromBinary(pTagCond, len);
|
||||
// } else {
|
||||
// CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, expr, NULL);
|
||||
// tExprNode* tagExpr = exprTreeFromBinary(pTagCond, len);
|
||||
// if (tagExpr != NULL) {
|
||||
// CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, tagExpr, NULL);
|
||||
// tExprNode* tbnameExpr = expr;
|
||||
// expr = calloc(1, sizeof(tExprNode));
|
||||
// if (expr == NULL) {
|
||||
// THROW( TSDB_CODE_TDB_OUT_OF_MEMORY );
|
||||
// }
|
||||
// expr->nodeType = TSQL_NODE_EXPR;
|
||||
// expr->_node.optr = (uint8_t)tagNameRelType;
|
||||
// expr->_node.pLeft = tagExpr;
|
||||
// expr->_node.pRight = tbnameExpr;
|
||||
// }
|
||||
// }
|
||||
// CLEANUP_EXECUTE();
|
||||
//
|
||||
// } CATCH( code ) {
|
||||
// CLEANUP_EXECUTE();
|
||||
// terrno = code;
|
||||
// tsdbUnlockRepoMeta(tsdb); // unlock tsdb in any cases
|
||||
//
|
||||
// goto _error;
|
||||
// // TODO: more error handling
|
||||
// } END_TRY
|
||||
//
|
||||
// doQueryTableList(pTable, res, expr);
|
||||
// pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res);
|
||||
// pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey);
|
||||
//
|
||||
// tsdbDebug("%p stable tid:%d, uid:%"PRIu64" query, numOfTables:%u, belong to %" PRIzu " groups", tsdb, pTable->tableId,
|
||||
// pTable->uid, pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList));
|
||||
//
|
||||
// taosArrayDestroy(res);
|
||||
//
|
||||
// if (tsdbUnlockRepoMeta(tsdb) < 0) goto _error;
|
||||
// return ret;
|
||||
|
||||
_error:
|
||||
return terrno;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int32_t tsdbGetOneTableGroup(STsdb* tsdb, uint64_t uid, TSKEY startKey, STableGroupInfo* pGroupInfo) {
|
||||
if (tsdbRLockRepoMeta(tsdb) < 0) goto _error;
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ typedef struct SDataSinkManager {
|
|||
} SDataSinkManager;
|
||||
|
||||
typedef int32_t (*FPutDataBlock)(struct SDataSinkHandle* pHandle, const SInputData* pInput, bool* pContinue);
|
||||
typedef void (*FEndPut)(struct SDataSinkHandle* pHandle, int64_t useconds);
|
||||
typedef void (*FEndPut)(struct SDataSinkHandle* pHandle, uint64_t useconds);
|
||||
typedef void (*FGetDataLength)(struct SDataSinkHandle* pHandle, int32_t* pLen, bool* pQueryEnd);
|
||||
typedef int32_t (*FGetDataBlock)(struct SDataSinkHandle* pHandle, SOutputData* pOutput);
|
||||
typedef int32_t (*FDestroyDataSinker)(struct SDataSinkHandle* pHandle);
|
||||
|
|
|
@ -597,7 +597,6 @@ void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity);
|
|||
void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput);
|
||||
|
||||
void freeParam(STaskParam *param);
|
||||
int32_t convertQueryMsg(SQueryTableReq *pQueryMsg, STaskParam* param);
|
||||
int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExprInfo** pExprInfo,
|
||||
SSqlExpr** pExprMsg, SColumnInfo* pTagCols, int32_t queryType, void* pMsg, struct SUdfInfo* pUdfInfo);
|
||||
|
||||
|
@ -638,7 +637,8 @@ size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows);
|
|||
void setQueryKilled(SQInfo *pQInfo);
|
||||
|
||||
void publishOperatorProfEvent(SOperatorInfo* operatorInfo, EQueryProfEventType eventType);
|
||||
void publishQueryAbortEvent(SQInfo* pQInfo, int32_t code);
|
||||
void publishQueryAbortEvent(SExecTaskInfo * pTaskInfo, int32_t code);
|
||||
|
||||
void calculateOperatorProfResults(SQInfo* pQInfo);
|
||||
void queryCostStatis(SQInfo *pQInfo);
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ typedef struct SDataDispatchHandle {
|
|||
SDataDispatchBuf nextOutput;
|
||||
int32_t status;
|
||||
bool queryEnd;
|
||||
int64_t useconds;
|
||||
uint64_t useconds;
|
||||
pthread_mutex_t mutex;
|
||||
} SDataDispatchHandle;
|
||||
|
||||
|
@ -158,7 +158,7 @@ static int32_t putDataBlock(SDataSinkHandle* pHandle, const SInputData* pInput,
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void endPut(struct SDataSinkHandle* pHandle, int64_t useconds) {
|
||||
static void endPut(struct SDataSinkHandle* pHandle, uint64_t useconds) {
|
||||
SDataDispatchHandle* pDispatcher = (SDataDispatchHandle*)pHandle;
|
||||
pthread_mutex_lock(&pDispatcher->mutex);
|
||||
pDispatcher->queryEnd = true;
|
||||
|
|
|
@ -37,7 +37,7 @@ int32_t dsPutDataBlock(DataSinkHandle handle, const SInputData* pInput, bool* pC
|
|||
return pHandleImpl->fPut(pHandleImpl, pInput, pContinue);
|
||||
}
|
||||
|
||||
void dsEndPut(DataSinkHandle handle, int64_t useconds) {
|
||||
void dsEndPut(DataSinkHandle handle, uint64_t useconds) {
|
||||
SDataSinkHandle* pHandleImpl = (SDataSinkHandle*)handle;
|
||||
return pHandleImpl->fEndPut(pHandleImpl, useconds);
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ void freeParam(STaskParam *param) {
|
|||
tfree(param->prevResult);
|
||||
}
|
||||
|
||||
int32_t qCreateExecTask(void* tsdb, int32_t vgId, SSubplan* pSubplan, qTaskInfo_t* pTaskInfo) {
|
||||
int32_t qCreateExecTask(void* tsdb, int32_t vgId, SSubplan* pSubplan, qTaskInfo_t* pTaskInfo, DataSinkHandle* handle) {
|
||||
assert(tsdb != NULL && pSubplan != NULL);
|
||||
SExecTaskInfo** pTask = (SExecTaskInfo**)pTaskInfo;
|
||||
|
||||
|
@ -85,6 +85,8 @@ int32_t qCreateExecTask(void* tsdb, int32_t vgId, SSubplan* pSubplan, qTaskInfo_
|
|||
|
||||
code = dsCreateDataSinker(pSubplan->pDataSink, &(*pTask)->dsHandle);
|
||||
|
||||
*handle = (*pTask)->dsHandle;
|
||||
|
||||
_error:
|
||||
// if failed to add ref for all tables in this query, abort current query
|
||||
return code;
|
||||
|
@ -135,10 +137,12 @@ int waitMoment(SQInfo* pQInfo){
|
|||
}
|
||||
#endif
|
||||
|
||||
int32_t qExecTask(qTaskInfo_t tinfo, DataSinkHandle* handle) {
|
||||
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t *useconds) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
int64_t threadId = taosGetSelfPthreadId();
|
||||
|
||||
*pRes = NULL;
|
||||
|
||||
int64_t curOwner = 0;
|
||||
if ((curOwner = atomic_val_compare_exchange_64(&pTaskInfo->owner, 0, threadId)) != 0) {
|
||||
qError("QInfo:0x%" PRIx64 "-%p qhandle is now executed by thread:%p", GET_TASKID(pTaskInfo), pTaskInfo,
|
||||
|
@ -153,7 +157,7 @@ int32_t qExecTask(qTaskInfo_t tinfo, DataSinkHandle* handle) {
|
|||
|
||||
if (isTaskKilled(pTaskInfo)) {
|
||||
qDebug("QInfo:0x%" PRIx64 " it is already killed, abort", GET_TASKID(pTaskInfo));
|
||||
return pTaskInfo->code;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// STaskRuntimeEnv* pRuntimeEnv = &pTaskInfo->runtimeEnv;
|
||||
|
@ -168,7 +172,8 @@ int32_t qExecTask(qTaskInfo_t tinfo, DataSinkHandle* handle) {
|
|||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
publishQueryAbortEvent(pTaskInfo, ret);
|
||||
pTaskInfo->code = ret;
|
||||
qDebug("QInfo:0x%" PRIx64 " query abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo), tstrerror(pTaskInfo->code));
|
||||
qDebug("QInfo:0x%" PRIx64 " query abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo),
|
||||
tstrerror(pTaskInfo->code));
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
|
||||
|
@ -178,39 +183,21 @@ int32_t qExecTask(qTaskInfo_t tinfo, DataSinkHandle* handle) {
|
|||
publishOperatorProfEvent(pTaskInfo->pRoot, QUERY_PROF_BEFORE_OPERATOR_EXEC);
|
||||
int64_t st = 0;
|
||||
|
||||
if (handle) {
|
||||
*handle = pTaskInfo->dsHandle;
|
||||
st = taosGetTimestampUs();
|
||||
*pRes = pTaskInfo->pRoot->exec(pTaskInfo->pRoot, &newgroup);
|
||||
|
||||
pTaskInfo->cost.elapsedTime += (taosGetTimestampUs() - st);
|
||||
publishOperatorProfEvent(pTaskInfo->pRoot, QUERY_PROF_AFTER_OPERATOR_EXEC);
|
||||
|
||||
if (NULL == *pRes) {
|
||||
*useconds = pTaskInfo->cost.elapsedTime;
|
||||
}
|
||||
|
||||
while(1) {
|
||||
st = taosGetTimestampUs();
|
||||
SSDataBlock* pRes = pTaskInfo->pRoot->exec(pTaskInfo->pRoot, &newgroup);
|
||||
|
||||
pTaskInfo->cost.elapsedTime += (taosGetTimestampUs() - st);
|
||||
publishOperatorProfEvent(pTaskInfo->pRoot, QUERY_PROF_AFTER_OPERATOR_EXEC);
|
||||
qDebug("QInfo:0x%" PRIx64 " query paused, %d rows returned, total:%" PRId64 " rows, in sinkNode:%d",
|
||||
GET_TASKID(pTaskInfo), 0, 0L, 0);
|
||||
|
||||
if (pRes == NULL) { // no results generated yet, abort
|
||||
dsEndPut(pTaskInfo->dsHandle, pTaskInfo->cost.elapsedTime);
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
|
||||
bool qcontinue = false;
|
||||
SInputData inputData = {.pData = pRes, .pTableRetrieveTsMap = NULL};
|
||||
pTaskInfo->code = dsPutDataBlock(pTaskInfo->dsHandle, &inputData, &qcontinue);
|
||||
|
||||
if (isTaskKilled(pTaskInfo)) {
|
||||
qDebug("QInfo:0x%" PRIx64 " task is killed", GET_TASKID(pTaskInfo));
|
||||
// } else if (GET_NUM_OF_RESULTS(pRuntimeEnv) == 0) {
|
||||
// qDebug("QInfo:0x%"PRIx64" over, %u tables queried, total %"PRId64" rows returned", pTaskInfo->qId, pRuntimeEnv->tableqinfoGroupInfo.numOfTables,
|
||||
// pRuntimeEnv->resultInfo.total);
|
||||
}
|
||||
|
||||
if (!qcontinue) {
|
||||
qDebug("QInfo:0x%"PRIx64" query paused, %d rows returned, total:%" PRId64 " rows, in sinkNode:%d", GET_TASKID(pTaskInfo),
|
||||
0, 0L, 0);
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
}
|
||||
atomic_store_64(&pTaskInfo->owner, 0);
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
|
||||
int32_t qRetrieveQueryResultInfo(qTaskInfo_t qinfo, bool* buildRes, void* pRspContext) {
|
||||
|
|
|
@ -4186,14 +4186,14 @@ void publishOperatorProfEvent(SOperatorInfo* operatorInfo, EQueryProfEventType e
|
|||
}
|
||||
}
|
||||
|
||||
void publishQueryAbortEvent(SQInfo* pQInfo, int32_t code) {
|
||||
void publishQueryAbortEvent(SExecTaskInfo * pTaskInfo, int32_t code) {
|
||||
SQueryProfEvent event;
|
||||
event.eventType = QUERY_PROF_QUERY_ABORT;
|
||||
event.eventTime = taosGetTimestampUs();
|
||||
event.abortCode = code;
|
||||
|
||||
if (pQInfo->summary.queryProfEvents) {
|
||||
taosArrayPush(pQInfo->summary.queryProfEvents, &event);
|
||||
if (pTaskInfo->cost.queryProfEvents) {
|
||||
taosArrayPush(pTaskInfo->cost.queryProfEvents, &event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7423,358 +7423,358 @@ int32_t doCreateExecTaskInfo(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, void* r
|
|||
* @param pExpr
|
||||
* @return
|
||||
*/
|
||||
int32_t convertQueryMsg(SQueryTableReq *pQueryMsg, STaskParam* param) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
// if (taosCheckVersion(pQueryMsg->version, version, 3) != 0) {
|
||||
// return TSDB_CODE_QRY_INVALID_MSG;
|
||||
//int32_t convertQueryMsg(SQueryTableReq *pQueryMsg, STaskParam* param) {
|
||||
// int32_t code = TSDB_CODE_SUCCESS;
|
||||
//
|
||||
//// if (taosCheckVersion(pQueryMsg->version, version, 3) != 0) {
|
||||
//// return TSDB_CODE_QRY_INVALID_MSG;
|
||||
//// }
|
||||
//
|
||||
// pQueryMsg->numOfTables = htonl(pQueryMsg->numOfTables);
|
||||
// pQueryMsg->window.skey = htobe64(pQueryMsg->window.skey);
|
||||
// pQueryMsg->window.ekey = htobe64(pQueryMsg->window.ekey);
|
||||
// pQueryMsg->interval.interval = htobe64(pQueryMsg->interval.interval);
|
||||
// pQueryMsg->interval.sliding = htobe64(pQueryMsg->interval.sliding);
|
||||
// pQueryMsg->interval.offset = htobe64(pQueryMsg->interval.offset);
|
||||
// pQueryMsg->limit = htobe64(pQueryMsg->limit);
|
||||
// pQueryMsg->offset = htobe64(pQueryMsg->offset);
|
||||
// pQueryMsg->vgroupLimit = htobe64(pQueryMsg->vgroupLimit);
|
||||
//
|
||||
// pQueryMsg->order = htons(pQueryMsg->order);
|
||||
// pQueryMsg->orderColId = htons(pQueryMsg->orderColId);
|
||||
// pQueryMsg->queryType = htonl(pQueryMsg->queryType);
|
||||
//// pQueryMsg->tagNameRelType = htons(pQueryMsg->tagNameRelType);
|
||||
//
|
||||
// pQueryMsg->numOfCols = htons(pQueryMsg->numOfCols);
|
||||
// pQueryMsg->numOfOutput = htons(pQueryMsg->numOfOutput);
|
||||
// pQueryMsg->numOfGroupCols = htons(pQueryMsg->numOfGroupCols);
|
||||
//
|
||||
// pQueryMsg->tagCondLen = htons(pQueryMsg->tagCondLen);
|
||||
// pQueryMsg->colCondLen = htons(pQueryMsg->colCondLen);
|
||||
//
|
||||
// pQueryMsg->tsBuf.tsOffset = htonl(pQueryMsg->tsBuf.tsOffset);
|
||||
// pQueryMsg->tsBuf.tsLen = htonl(pQueryMsg->tsBuf.tsLen);
|
||||
// pQueryMsg->tsBuf.tsNumOfBlocks = htonl(pQueryMsg->tsBuf.tsNumOfBlocks);
|
||||
// pQueryMsg->tsBuf.tsOrder = htonl(pQueryMsg->tsBuf.tsOrder);
|
||||
//
|
||||
// pQueryMsg->numOfTags = htonl(pQueryMsg->numOfTags);
|
||||
//// pQueryMsg->tbnameCondLen = htonl(pQueryMsg->tbnameCondLen);
|
||||
// pQueryMsg->secondStageOutput = htonl(pQueryMsg->secondStageOutput);
|
||||
// pQueryMsg->sqlstrLen = htonl(pQueryMsg->sqlstrLen);
|
||||
// pQueryMsg->prevResultLen = htonl(pQueryMsg->prevResultLen);
|
||||
//// pQueryMsg->sw.gap = htobe64(pQueryMsg->sw.gap);
|
||||
//// pQueryMsg->sw.primaryColId = htonl(pQueryMsg->sw.primaryColId);
|
||||
// pQueryMsg->tableScanOperator = htonl(pQueryMsg->tableScanOperator);
|
||||
// pQueryMsg->numOfOperator = htonl(pQueryMsg->numOfOperator);
|
||||
// pQueryMsg->udfContentOffset = htonl(pQueryMsg->udfContentOffset);
|
||||
// pQueryMsg->udfContentLen = htonl(pQueryMsg->udfContentLen);
|
||||
// pQueryMsg->udfNum = htonl(pQueryMsg->udfNum);
|
||||
//
|
||||
// // query msg safety check
|
||||
// if (!validateQueryMsg(pQueryMsg)) {
|
||||
// code = TSDB_CODE_QRY_INVALID_MSG;
|
||||
// goto _cleanup;
|
||||
// }
|
||||
|
||||
pQueryMsg->numOfTables = htonl(pQueryMsg->numOfTables);
|
||||
pQueryMsg->window.skey = htobe64(pQueryMsg->window.skey);
|
||||
pQueryMsg->window.ekey = htobe64(pQueryMsg->window.ekey);
|
||||
pQueryMsg->interval.interval = htobe64(pQueryMsg->interval.interval);
|
||||
pQueryMsg->interval.sliding = htobe64(pQueryMsg->interval.sliding);
|
||||
pQueryMsg->interval.offset = htobe64(pQueryMsg->interval.offset);
|
||||
pQueryMsg->limit = htobe64(pQueryMsg->limit);
|
||||
pQueryMsg->offset = htobe64(pQueryMsg->offset);
|
||||
pQueryMsg->vgroupLimit = htobe64(pQueryMsg->vgroupLimit);
|
||||
|
||||
pQueryMsg->order = htons(pQueryMsg->order);
|
||||
pQueryMsg->orderColId = htons(pQueryMsg->orderColId);
|
||||
pQueryMsg->queryType = htonl(pQueryMsg->queryType);
|
||||
// pQueryMsg->tagNameRelType = htons(pQueryMsg->tagNameRelType);
|
||||
|
||||
pQueryMsg->numOfCols = htons(pQueryMsg->numOfCols);
|
||||
pQueryMsg->numOfOutput = htons(pQueryMsg->numOfOutput);
|
||||
pQueryMsg->numOfGroupCols = htons(pQueryMsg->numOfGroupCols);
|
||||
|
||||
pQueryMsg->tagCondLen = htons(pQueryMsg->tagCondLen);
|
||||
pQueryMsg->colCondLen = htons(pQueryMsg->colCondLen);
|
||||
|
||||
pQueryMsg->tsBuf.tsOffset = htonl(pQueryMsg->tsBuf.tsOffset);
|
||||
pQueryMsg->tsBuf.tsLen = htonl(pQueryMsg->tsBuf.tsLen);
|
||||
pQueryMsg->tsBuf.tsNumOfBlocks = htonl(pQueryMsg->tsBuf.tsNumOfBlocks);
|
||||
pQueryMsg->tsBuf.tsOrder = htonl(pQueryMsg->tsBuf.tsOrder);
|
||||
|
||||
pQueryMsg->numOfTags = htonl(pQueryMsg->numOfTags);
|
||||
// pQueryMsg->tbnameCondLen = htonl(pQueryMsg->tbnameCondLen);
|
||||
pQueryMsg->secondStageOutput = htonl(pQueryMsg->secondStageOutput);
|
||||
pQueryMsg->sqlstrLen = htonl(pQueryMsg->sqlstrLen);
|
||||
pQueryMsg->prevResultLen = htonl(pQueryMsg->prevResultLen);
|
||||
// pQueryMsg->sw.gap = htobe64(pQueryMsg->sw.gap);
|
||||
// pQueryMsg->sw.primaryColId = htonl(pQueryMsg->sw.primaryColId);
|
||||
pQueryMsg->tableScanOperator = htonl(pQueryMsg->tableScanOperator);
|
||||
pQueryMsg->numOfOperator = htonl(pQueryMsg->numOfOperator);
|
||||
pQueryMsg->udfContentOffset = htonl(pQueryMsg->udfContentOffset);
|
||||
pQueryMsg->udfContentLen = htonl(pQueryMsg->udfContentLen);
|
||||
pQueryMsg->udfNum = htonl(pQueryMsg->udfNum);
|
||||
|
||||
// query msg safety check
|
||||
if (!validateQueryMsg(pQueryMsg)) {
|
||||
code = TSDB_CODE_QRY_INVALID_MSG;
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
char *pMsg = (char *)(pQueryMsg->tableCols) + sizeof(SColumnInfo) * pQueryMsg->numOfCols;
|
||||
for (int32_t col = 0; col < pQueryMsg->numOfCols; ++col) {
|
||||
SColumnInfo *pColInfo = &pQueryMsg->tableCols[col];
|
||||
|
||||
pColInfo->colId = htons(pColInfo->colId);
|
||||
pColInfo->type = htons(pColInfo->type);
|
||||
pColInfo->bytes = htons(pColInfo->bytes);
|
||||
pColInfo->flist.numOfFilters = 0;
|
||||
|
||||
if (!isValidDataType(pColInfo->type)) {
|
||||
//qDebug("qmsg:%p, invalid data type in source column, index:%d, type:%d", pQueryMsg, col, pColInfo->type);
|
||||
code = TSDB_CODE_QRY_INVALID_MSG;
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
/*
|
||||
int32_t numOfFilters = pColInfo->flist.numOfFilters;
|
||||
if (numOfFilters > 0) {
|
||||
pColInfo->flist.filterInfo = calloc(numOfFilters, sizeof(SColumnFilterInfo));
|
||||
if (pColInfo->flist.filterInfo == NULL) {
|
||||
code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
goto _cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
code = deserializeColFilterInfo(pColInfo->flist.filterInfo, numOfFilters, &pMsg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _cleanup;
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
if (pQueryMsg->colCondLen > 0) {
|
||||
param->colCond = calloc(1, pQueryMsg->colCondLen);
|
||||
if (param->colCond == NULL) {
|
||||
code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
memcpy(param->colCond, pMsg, pQueryMsg->colCondLen);
|
||||
pMsg += pQueryMsg->colCondLen;
|
||||
}
|
||||
|
||||
|
||||
param->tableScanOperator = pQueryMsg->tableScanOperator;
|
||||
param->pExpr = calloc(pQueryMsg->numOfOutput, POINTER_BYTES);
|
||||
if (param->pExpr == NULL) {
|
||||
code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
SSqlExpr *pExprMsg = (SSqlExpr *)pMsg;
|
||||
|
||||
for (int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) {
|
||||
param->pExpr[i] = pExprMsg;
|
||||
|
||||
// pExprMsg->colInfo.colIndex = htons(pExprMsg->colInfo.colIndex);
|
||||
// pExprMsg->colInfo.colId = htons(pExprMsg->colInfo.colId);
|
||||
// pExprMsg->colInfo.flag = htons(pExprMsg->colInfo.flag);
|
||||
// pExprMsg->colBytes = htons(pExprMsg->colBytes);
|
||||
// pExprMsg->colType = htons(pExprMsg->colType);
|
||||
|
||||
// pExprMsg->resType = htons(pExprMsg->resType);
|
||||
// pExprMsg->resBytes = htons(pExprMsg->resBytes);
|
||||
pExprMsg->interBytes = htonl(pExprMsg->interBytes);
|
||||
|
||||
// pExprMsg->functionId = htons(pExprMsg->functionId);
|
||||
pExprMsg->numOfParams = htons(pExprMsg->numOfParams);
|
||||
// pExprMsg->resColId = htons(pExprMsg->resColId);
|
||||
// pExprMsg->flist.numOfFilters = htons(pExprMsg->flist.numOfFilters);
|
||||
pMsg += sizeof(SSqlExpr);
|
||||
|
||||
for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) {
|
||||
pExprMsg->param[j].nType = htonl(pExprMsg->param[j].nType);
|
||||
pExprMsg->param[j].nLen = htonl(pExprMsg->param[j].nLen);
|
||||
|
||||
if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) {
|
||||
pExprMsg->param[j].pz = pMsg;
|
||||
pMsg += pExprMsg->param[j].nLen; // one more for the string terminated char.
|
||||
} else {
|
||||
pExprMsg->param[j].i = htobe64(pExprMsg->param[j].i);
|
||||
}
|
||||
}
|
||||
|
||||
// int16_t functionId = pExprMsg->functionId;
|
||||
// if (functionId == FUNCTION_TAG || functionId == FUNCTION_TAGPRJ || functionId == FUNCTION_TAG_DUMMY) {
|
||||
// if (!TSDB_COL_IS_TAG(pExprMsg->colInfo.flag)) { // ignore the column index check for arithmetic expression.
|
||||
// code = TSDB_CODE_QRY_INVALID_MSG;
|
||||
//
|
||||
// char *pMsg = (char *)(pQueryMsg->tableCols) + sizeof(SColumnInfo) * pQueryMsg->numOfCols;
|
||||
// for (int32_t col = 0; col < pQueryMsg->numOfCols; ++col) {
|
||||
// SColumnInfo *pColInfo = &pQueryMsg->tableCols[col];
|
||||
//
|
||||
// pColInfo->colId = htons(pColInfo->colId);
|
||||
// pColInfo->type = htons(pColInfo->type);
|
||||
// pColInfo->bytes = htons(pColInfo->bytes);
|
||||
// pColInfo->flist.numOfFilters = 0;
|
||||
//
|
||||
// if (!isValidDataType(pColInfo->type)) {
|
||||
// //qDebug("qmsg:%p, invalid data type in source column, index:%d, type:%d", pQueryMsg, col, pColInfo->type);
|
||||
// code = TSDB_CODE_QRY_INVALID_MSG;
|
||||
// goto _cleanup;
|
||||
// }
|
||||
//
|
||||
///*
|
||||
// int32_t numOfFilters = pColInfo->flist.numOfFilters;
|
||||
// if (numOfFilters > 0) {
|
||||
// pColInfo->flist.filterInfo = calloc(numOfFilters, sizeof(SColumnFilterInfo));
|
||||
// if (pColInfo->flist.filterInfo == NULL) {
|
||||
// code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
// goto _cleanup;
|
||||
// }
|
||||
// }
|
||||
|
||||
// if (pExprMsg->flist.numOfFilters > 0) {
|
||||
// pExprMsg->flist.filterInfo = calloc(pExprMsg->flist.numOfFilters, sizeof(SColumnFilterInfo));
|
||||
// }
|
||||
//
|
||||
// deserializeColFilterInfo(pExprMsg->flist.filterInfo, pExprMsg->flist.numOfFilters, &pMsg);
|
||||
pExprMsg = (SSqlExpr *)pMsg;
|
||||
}
|
||||
|
||||
if (pQueryMsg->secondStageOutput) {
|
||||
pExprMsg = (SSqlExpr *)pMsg;
|
||||
param->pSecExpr = calloc(pQueryMsg->secondStageOutput, POINTER_BYTES);
|
||||
|
||||
for (int32_t i = 0; i < pQueryMsg->secondStageOutput; ++i) {
|
||||
param->pSecExpr[i] = pExprMsg;
|
||||
|
||||
// pExprMsg->colInfo.colIndex = htons(pExprMsg->colInfo.colIndex);
|
||||
// pExprMsg->colInfo.colId = htons(pExprMsg->colInfo.colId);
|
||||
// pExprMsg->colInfo.flag = htons(pExprMsg->colInfo.flag);
|
||||
// pExprMsg->resType = htons(pExprMsg->resType);
|
||||
// pExprMsg->resBytes = htons(pExprMsg->resBytes);
|
||||
// pExprMsg->colBytes = htons(pExprMsg->colBytes);
|
||||
// pExprMsg->colType = htons(pExprMsg->colType);
|
||||
|
||||
// pExprMsg->functionId = htons(pExprMsg->functionId);
|
||||
pExprMsg->numOfParams = htons(pExprMsg->numOfParams);
|
||||
|
||||
pMsg += sizeof(SSqlExpr);
|
||||
|
||||
for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) {
|
||||
pExprMsg->param[j].nType = htonl(pExprMsg->param[j].nType);
|
||||
pExprMsg->param[j].nLen = htonl(pExprMsg->param[j].nLen);
|
||||
|
||||
if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) {
|
||||
pExprMsg->param[j].pz = pMsg;
|
||||
pMsg += pExprMsg->param[j].nLen; // one more for the string terminated char.
|
||||
} else {
|
||||
pExprMsg->param[j].i = htobe64(pExprMsg->param[j].i);
|
||||
}
|
||||
}
|
||||
|
||||
// int16_t functionId = pExprMsg->functionId;
|
||||
// if (functionId == FUNCTION_TAG || functionId == FUNCTION_TAGPRJ || functionId == FUNCTION_TAG_DUMMY) {
|
||||
// if (!TSDB_COL_IS_TAG(pExprMsg->colInfo.flag)) { // ignore the column index check for arithmetic expression.
|
||||
// code = TSDB_CODE_QRY_INVALID_MSG;
|
||||
// goto _cleanup;
|
||||
// }
|
||||
// }
|
||||
|
||||
pExprMsg = (SSqlExpr *)pMsg;
|
||||
}
|
||||
}
|
||||
|
||||
pMsg = createTableIdList(pQueryMsg, pMsg, &(param->pTableIdList));
|
||||
|
||||
if (pQueryMsg->numOfGroupCols > 0) { // group by tag columns
|
||||
param->pGroupColIndex = malloc(pQueryMsg->numOfGroupCols * sizeof(SColIndex));
|
||||
if (param->pGroupColIndex == NULL) {
|
||||
code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pQueryMsg->numOfGroupCols; ++i) {
|
||||
param->pGroupColIndex[i].colId = htons(*(int16_t *)pMsg);
|
||||
pMsg += sizeof(param->pGroupColIndex[i].colId);
|
||||
|
||||
param->pGroupColIndex[i].colIndex = htons(*(int16_t *)pMsg);
|
||||
pMsg += sizeof(param->pGroupColIndex[i].colIndex);
|
||||
|
||||
param->pGroupColIndex[i].flag = htons(*(int16_t *)pMsg);
|
||||
pMsg += sizeof(param->pGroupColIndex[i].flag);
|
||||
|
||||
memcpy(param->pGroupColIndex[i].name, pMsg, tListLen(param->pGroupColIndex[i].name));
|
||||
pMsg += tListLen(param->pGroupColIndex[i].name);
|
||||
}
|
||||
|
||||
pQueryMsg->orderByIdx = htons(pQueryMsg->orderByIdx);
|
||||
pQueryMsg->orderType = htons(pQueryMsg->orderType);
|
||||
}
|
||||
|
||||
pQueryMsg->fillType = htons(pQueryMsg->fillType);
|
||||
if (pQueryMsg->fillType != TSDB_FILL_NONE) {
|
||||
pQueryMsg->fillVal = (uint64_t)(pMsg);
|
||||
|
||||
int64_t *v = (int64_t *)pMsg;
|
||||
for (int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) {
|
||||
v[i] = htobe64(v[i]);
|
||||
}
|
||||
|
||||
pMsg += sizeof(int64_t) * pQueryMsg->numOfOutput;
|
||||
}
|
||||
|
||||
if (pQueryMsg->numOfTags > 0) {
|
||||
param->pTagColumnInfo = calloc(1, sizeof(SColumnInfo) * pQueryMsg->numOfTags);
|
||||
if (param->pTagColumnInfo == NULL) {
|
||||
code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pQueryMsg->numOfTags; ++i) {
|
||||
SColumnInfo* pTagCol = (SColumnInfo*) pMsg;
|
||||
|
||||
pTagCol->colId = htons(pTagCol->colId);
|
||||
pTagCol->bytes = htons(pTagCol->bytes);
|
||||
pTagCol->type = htons(pTagCol->type);
|
||||
// pTagCol->flist.numOfFilters = 0;
|
||||
|
||||
param->pTagColumnInfo[i] = *pTagCol;
|
||||
pMsg += sizeof(SColumnInfo);
|
||||
}
|
||||
}
|
||||
|
||||
// the tag query condition expression string is located at the end of query msg
|
||||
if (pQueryMsg->tagCondLen > 0) {
|
||||
param->tagCond = calloc(1, pQueryMsg->tagCondLen);
|
||||
if (param->tagCond == NULL) {
|
||||
code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
memcpy(param->tagCond, pMsg, pQueryMsg->tagCondLen);
|
||||
pMsg += pQueryMsg->tagCondLen;
|
||||
}
|
||||
|
||||
if (pQueryMsg->prevResultLen > 0) {
|
||||
param->prevResult = calloc(1, pQueryMsg->prevResultLen);
|
||||
if (param->prevResult == NULL) {
|
||||
code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
memcpy(param->prevResult, pMsg, pQueryMsg->prevResultLen);
|
||||
pMsg += pQueryMsg->prevResultLen;
|
||||
}
|
||||
|
||||
// if (pQueryMsg->tbnameCondLen > 0) {
|
||||
// param->tbnameCond = calloc(1, pQueryMsg->tbnameCondLen + 1);
|
||||
// if (param->tbnameCond == NULL) {
|
||||
// code = deserializeColFilterInfo(pColInfo->flist.filterInfo, numOfFilters, &pMsg);
|
||||
// if (code != TSDB_CODE_SUCCESS) {
|
||||
// goto _cleanup;
|
||||
// }
|
||||
//*/
|
||||
// }
|
||||
//
|
||||
// if (pQueryMsg->colCondLen > 0) {
|
||||
// param->colCond = calloc(1, pQueryMsg->colCondLen);
|
||||
// if (param->colCond == NULL) {
|
||||
// code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
// goto _cleanup;
|
||||
// }
|
||||
//
|
||||
// strncpy(param->tbnameCond, pMsg, pQueryMsg->tbnameCondLen);
|
||||
// pMsg += pQueryMsg->tbnameCondLen;
|
||||
// memcpy(param->colCond, pMsg, pQueryMsg->colCondLen);
|
||||
// pMsg += pQueryMsg->colCondLen;
|
||||
// }
|
||||
|
||||
//skip ts buf
|
||||
if ((pQueryMsg->tsBuf.tsOffset + pQueryMsg->tsBuf.tsLen) > 0) {
|
||||
pMsg = (char *)pQueryMsg + pQueryMsg->tsBuf.tsOffset + pQueryMsg->tsBuf.tsLen;
|
||||
}
|
||||
|
||||
param->pOperator = taosArrayInit(pQueryMsg->numOfOperator, sizeof(int32_t));
|
||||
for(int32_t i = 0; i < pQueryMsg->numOfOperator; ++i) {
|
||||
int32_t op = htonl(*(int32_t*)pMsg);
|
||||
taosArrayPush(param->pOperator, &op);
|
||||
|
||||
pMsg += sizeof(int32_t);
|
||||
}
|
||||
|
||||
if (pQueryMsg->udfContentLen > 0) {
|
||||
// todo extract udf function in tudf.c
|
||||
// param->pUdfInfo = calloc(1, sizeof(SUdfInfo));
|
||||
// param->pUdfInfo->contLen = pQueryMsg->udfContentLen;
|
||||
//
|
||||
// pMsg = (char*)pQueryMsg + pQueryMsg->udfContentOffset;
|
||||
// param->pUdfInfo->resType = *(int8_t*) pMsg;
|
||||
// pMsg += sizeof(int8_t);
|
||||
//
|
||||
// param->pUdfInfo->resBytes = htons(*(int16_t*)pMsg);
|
||||
// pMsg += sizeof(int16_t);
|
||||
// param->tableScanOperator = pQueryMsg->tableScanOperator;
|
||||
// param->pExpr = calloc(pQueryMsg->numOfOutput, POINTER_BYTES);
|
||||
// if (param->pExpr == NULL) {
|
||||
// code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
// goto _cleanup;
|
||||
// }
|
||||
//
|
||||
// tstr* name = (tstr*)(pMsg);
|
||||
// param->pUdfInfo->name = strndup(name->data, name->len);
|
||||
// SSqlExpr *pExprMsg = (SSqlExpr *)pMsg;
|
||||
//
|
||||
// for (int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) {
|
||||
// param->pExpr[i] = pExprMsg;
|
||||
//
|
||||
//// pExprMsg->colInfo.colIndex = htons(pExprMsg->colInfo.colIndex);
|
||||
//// pExprMsg->colInfo.colId = htons(pExprMsg->colInfo.colId);
|
||||
//// pExprMsg->colInfo.flag = htons(pExprMsg->colInfo.flag);
|
||||
//// pExprMsg->colBytes = htons(pExprMsg->colBytes);
|
||||
//// pExprMsg->colType = htons(pExprMsg->colType);
|
||||
//
|
||||
//// pExprMsg->resType = htons(pExprMsg->resType);
|
||||
//// pExprMsg->resBytes = htons(pExprMsg->resBytes);
|
||||
// pExprMsg->interBytes = htonl(pExprMsg->interBytes);
|
||||
//
|
||||
//// pExprMsg->functionId = htons(pExprMsg->functionId);
|
||||
// pExprMsg->numOfParams = htons(pExprMsg->numOfParams);
|
||||
//// pExprMsg->resColId = htons(pExprMsg->resColId);
|
||||
//// pExprMsg->flist.numOfFilters = htons(pExprMsg->flist.numOfFilters);
|
||||
// pMsg += sizeof(SSqlExpr);
|
||||
//
|
||||
// for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) {
|
||||
// pExprMsg->param[j].nType = htonl(pExprMsg->param[j].nType);
|
||||
// pExprMsg->param[j].nLen = htonl(pExprMsg->param[j].nLen);
|
||||
//
|
||||
// if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) {
|
||||
// pExprMsg->param[j].pz = pMsg;
|
||||
// pMsg += pExprMsg->param[j].nLen; // one more for the string terminated char.
|
||||
// } else {
|
||||
// pExprMsg->param[j].i = htobe64(pExprMsg->param[j].i);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
//// int16_t functionId = pExprMsg->functionId;
|
||||
//// if (functionId == FUNCTION_TAG || functionId == FUNCTION_TAGPRJ || functionId == FUNCTION_TAG_DUMMY) {
|
||||
//// if (!TSDB_COL_IS_TAG(pExprMsg->colInfo.flag)) { // ignore the column index check for arithmetic expression.
|
||||
//// code = TSDB_CODE_QRY_INVALID_MSG;
|
||||
//// goto _cleanup;
|
||||
//// }
|
||||
//// }
|
||||
//
|
||||
//// if (pExprMsg->flist.numOfFilters > 0) {
|
||||
//// pExprMsg->flist.filterInfo = calloc(pExprMsg->flist.numOfFilters, sizeof(SColumnFilterInfo));
|
||||
//// }
|
||||
////
|
||||
//// deserializeColFilterInfo(pExprMsg->flist.filterInfo, pExprMsg->flist.numOfFilters, &pMsg);
|
||||
// pExprMsg = (SSqlExpr *)pMsg;
|
||||
// }
|
||||
//
|
||||
// if (pQueryMsg->secondStageOutput) {
|
||||
// pExprMsg = (SSqlExpr *)pMsg;
|
||||
// param->pSecExpr = calloc(pQueryMsg->secondStageOutput, POINTER_BYTES);
|
||||
//
|
||||
// for (int32_t i = 0; i < pQueryMsg->secondStageOutput; ++i) {
|
||||
// param->pSecExpr[i] = pExprMsg;
|
||||
//
|
||||
//// pExprMsg->colInfo.colIndex = htons(pExprMsg->colInfo.colIndex);
|
||||
//// pExprMsg->colInfo.colId = htons(pExprMsg->colInfo.colId);
|
||||
//// pExprMsg->colInfo.flag = htons(pExprMsg->colInfo.flag);
|
||||
//// pExprMsg->resType = htons(pExprMsg->resType);
|
||||
//// pExprMsg->resBytes = htons(pExprMsg->resBytes);
|
||||
//// pExprMsg->colBytes = htons(pExprMsg->colBytes);
|
||||
//// pExprMsg->colType = htons(pExprMsg->colType);
|
||||
//
|
||||
//// pExprMsg->functionId = htons(pExprMsg->functionId);
|
||||
// pExprMsg->numOfParams = htons(pExprMsg->numOfParams);
|
||||
//
|
||||
// pMsg += sizeof(SSqlExpr);
|
||||
//
|
||||
// for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) {
|
||||
// pExprMsg->param[j].nType = htonl(pExprMsg->param[j].nType);
|
||||
// pExprMsg->param[j].nLen = htonl(pExprMsg->param[j].nLen);
|
||||
//
|
||||
// if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) {
|
||||
// pExprMsg->param[j].pz = pMsg;
|
||||
// pMsg += pExprMsg->param[j].nLen; // one more for the string terminated char.
|
||||
// } else {
|
||||
// pExprMsg->param[j].i = htobe64(pExprMsg->param[j].i);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
//// int16_t functionId = pExprMsg->functionId;
|
||||
//// if (functionId == FUNCTION_TAG || functionId == FUNCTION_TAGPRJ || functionId == FUNCTION_TAG_DUMMY) {
|
||||
//// if (!TSDB_COL_IS_TAG(pExprMsg->colInfo.flag)) { // ignore the column index check for arithmetic expression.
|
||||
//// code = TSDB_CODE_QRY_INVALID_MSG;
|
||||
//// goto _cleanup;
|
||||
//// }
|
||||
//// }
|
||||
//
|
||||
// pExprMsg = (SSqlExpr *)pMsg;
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// pMsg = createTableIdList(pQueryMsg, pMsg, &(param->pTableIdList));
|
||||
//
|
||||
// if (pQueryMsg->numOfGroupCols > 0) { // group by tag columns
|
||||
// param->pGroupColIndex = malloc(pQueryMsg->numOfGroupCols * sizeof(SColIndex));
|
||||
// if (param->pGroupColIndex == NULL) {
|
||||
// code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
// goto _cleanup;
|
||||
// }
|
||||
//
|
||||
// for (int32_t i = 0; i < pQueryMsg->numOfGroupCols; ++i) {
|
||||
// param->pGroupColIndex[i].colId = htons(*(int16_t *)pMsg);
|
||||
// pMsg += sizeof(param->pGroupColIndex[i].colId);
|
||||
//
|
||||
// param->pGroupColIndex[i].colIndex = htons(*(int16_t *)pMsg);
|
||||
// pMsg += sizeof(param->pGroupColIndex[i].colIndex);
|
||||
//
|
||||
// param->pGroupColIndex[i].flag = htons(*(int16_t *)pMsg);
|
||||
// pMsg += sizeof(param->pGroupColIndex[i].flag);
|
||||
//
|
||||
// memcpy(param->pGroupColIndex[i].name, pMsg, tListLen(param->pGroupColIndex[i].name));
|
||||
// pMsg += tListLen(param->pGroupColIndex[i].name);
|
||||
// }
|
||||
//
|
||||
// pQueryMsg->orderByIdx = htons(pQueryMsg->orderByIdx);
|
||||
// pQueryMsg->orderType = htons(pQueryMsg->orderType);
|
||||
// }
|
||||
//
|
||||
// pQueryMsg->fillType = htons(pQueryMsg->fillType);
|
||||
// if (pQueryMsg->fillType != TSDB_FILL_NONE) {
|
||||
// pQueryMsg->fillVal = (uint64_t)(pMsg);
|
||||
//
|
||||
// int64_t *v = (int64_t *)pMsg;
|
||||
// for (int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) {
|
||||
// v[i] = htobe64(v[i]);
|
||||
// }
|
||||
//
|
||||
// pMsg += sizeof(int64_t) * pQueryMsg->numOfOutput;
|
||||
// }
|
||||
//
|
||||
// if (pQueryMsg->numOfTags > 0) {
|
||||
// param->pTagColumnInfo = calloc(1, sizeof(SColumnInfo) * pQueryMsg->numOfTags);
|
||||
// if (param->pTagColumnInfo == NULL) {
|
||||
// code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
// goto _cleanup;
|
||||
// }
|
||||
//
|
||||
// for (int32_t i = 0; i < pQueryMsg->numOfTags; ++i) {
|
||||
// SColumnInfo* pTagCol = (SColumnInfo*) pMsg;
|
||||
//
|
||||
// pTagCol->colId = htons(pTagCol->colId);
|
||||
// pTagCol->bytes = htons(pTagCol->bytes);
|
||||
// pTagCol->type = htons(pTagCol->type);
|
||||
//// pTagCol->flist.numOfFilters = 0;
|
||||
//
|
||||
// param->pTagColumnInfo[i] = *pTagCol;
|
||||
// pMsg += sizeof(SColumnInfo);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // the tag query condition expression string is located at the end of query msg
|
||||
// if (pQueryMsg->tagCondLen > 0) {
|
||||
// param->tagCond = calloc(1, pQueryMsg->tagCondLen);
|
||||
// if (param->tagCond == NULL) {
|
||||
// code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
// goto _cleanup;
|
||||
// }
|
||||
//
|
||||
// memcpy(param->tagCond, pMsg, pQueryMsg->tagCondLen);
|
||||
// pMsg += pQueryMsg->tagCondLen;
|
||||
// }
|
||||
//
|
||||
// if (pQueryMsg->prevResultLen > 0) {
|
||||
// param->prevResult = calloc(1, pQueryMsg->prevResultLen);
|
||||
// if (param->prevResult == NULL) {
|
||||
// code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
// goto _cleanup;
|
||||
// }
|
||||
//
|
||||
// memcpy(param->prevResult, pMsg, pQueryMsg->prevResultLen);
|
||||
// pMsg += pQueryMsg->prevResultLen;
|
||||
// }
|
||||
//
|
||||
//// if (pQueryMsg->tbnameCondLen > 0) {
|
||||
//// param->tbnameCond = calloc(1, pQueryMsg->tbnameCondLen + 1);
|
||||
//// if (param->tbnameCond == NULL) {
|
||||
//// code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
//// goto _cleanup;
|
||||
//// }
|
||||
////
|
||||
//// strncpy(param->tbnameCond, pMsg, pQueryMsg->tbnameCondLen);
|
||||
//// pMsg += pQueryMsg->tbnameCondLen;
|
||||
//// }
|
||||
//
|
||||
// //skip ts buf
|
||||
// if ((pQueryMsg->tsBuf.tsOffset + pQueryMsg->tsBuf.tsLen) > 0) {
|
||||
// pMsg = (char *)pQueryMsg + pQueryMsg->tsBuf.tsOffset + pQueryMsg->tsBuf.tsLen;
|
||||
// }
|
||||
//
|
||||
// param->pOperator = taosArrayInit(pQueryMsg->numOfOperator, sizeof(int32_t));
|
||||
// for(int32_t i = 0; i < pQueryMsg->numOfOperator; ++i) {
|
||||
// int32_t op = htonl(*(int32_t*)pMsg);
|
||||
// taosArrayPush(param->pOperator, &op);
|
||||
//
|
||||
// pMsg += varDataTLen(name);
|
||||
// param->pUdfInfo->funcType = htonl(*(int32_t*)pMsg);
|
||||
// pMsg += sizeof(int32_t);
|
||||
// }
|
||||
//
|
||||
// param->pUdfInfo->bufSize = htonl(*(int32_t*)pMsg);
|
||||
// pMsg += sizeof(int32_t);
|
||||
// if (pQueryMsg->udfContentLen > 0) {
|
||||
// // todo extract udf function in tudf.c
|
||||
//// param->pUdfInfo = calloc(1, sizeof(SUdfInfo));
|
||||
//// param->pUdfInfo->contLen = pQueryMsg->udfContentLen;
|
||||
////
|
||||
//// pMsg = (char*)pQueryMsg + pQueryMsg->udfContentOffset;
|
||||
//// param->pUdfInfo->resType = *(int8_t*) pMsg;
|
||||
//// pMsg += sizeof(int8_t);
|
||||
////
|
||||
//// param->pUdfInfo->resBytes = htons(*(int16_t*)pMsg);
|
||||
//// pMsg += sizeof(int16_t);
|
||||
////
|
||||
//// tstr* name = (tstr*)(pMsg);
|
||||
//// param->pUdfInfo->name = strndup(name->data, name->len);
|
||||
////
|
||||
//// pMsg += varDataTLen(name);
|
||||
//// param->pUdfInfo->funcType = htonl(*(int32_t*)pMsg);
|
||||
//// pMsg += sizeof(int32_t);
|
||||
////
|
||||
//// param->pUdfInfo->bufSize = htonl(*(int32_t*)pMsg);
|
||||
//// pMsg += sizeof(int32_t);
|
||||
////
|
||||
//// param->pUdfInfo->content = malloc(pQueryMsg->udfContentLen);
|
||||
//// memcpy(param->pUdfInfo->content, pMsg, pQueryMsg->udfContentLen);
|
||||
//
|
||||
// param->pUdfInfo->content = malloc(pQueryMsg->udfContentLen);
|
||||
// memcpy(param->pUdfInfo->content, pMsg, pQueryMsg->udfContentLen);
|
||||
|
||||
pMsg += pQueryMsg->udfContentLen;
|
||||
}
|
||||
|
||||
param->sql = strndup(pMsg, pQueryMsg->sqlstrLen);
|
||||
|
||||
SQueriedTableInfo info = { .numOfTags = pQueryMsg->numOfTags, .numOfCols = pQueryMsg->numOfCols, .colList = pQueryMsg->tableCols};
|
||||
if (!validateQueryTableCols(&info, param->pExpr, pQueryMsg->numOfOutput, param->pTagColumnInfo, pQueryMsg)) {
|
||||
code = TSDB_CODE_QRY_INVALID_MSG;
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
//qDebug("qmsg:%p query %d tables, type:%d, qrange:%" PRId64 "-%" PRId64 ", numOfGroupbyTagCols:%d, order:%d, "
|
||||
// "outputCols:%d, numOfCols:%d, interval:%" PRId64 ", fillType:%d, comptsLen:%d, compNumOfBlocks:%d, limit:%" PRId64 ", offset:%" PRId64,
|
||||
// pQueryMsg, pQueryMsg->numOfTables, pQueryMsg->queryType, pQueryMsg->window.skey, pQueryMsg->window.ekey, pQueryMsg->numOfGroupCols,
|
||||
// pQueryMsg->order, pQueryMsg->numOfOutput, pQueryMsg->numOfCols, pQueryMsg->interval.interval,
|
||||
// pQueryMsg->fillType, pQueryMsg->tsBuf.tsLen, pQueryMsg->tsBuf.tsNumOfBlocks, pQueryMsg->limit, pQueryMsg->offset);
|
||||
|
||||
//qDebug("qmsg:%p, sql:%s", pQueryMsg, param->sql);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_cleanup:
|
||||
freeParam(param);
|
||||
return code;
|
||||
}
|
||||
// pMsg += pQueryMsg->udfContentLen;
|
||||
// }
|
||||
//
|
||||
// param->sql = strndup(pMsg, pQueryMsg->sqlstrLen);
|
||||
//
|
||||
// SQueriedTableInfo info = { .numOfTags = pQueryMsg->numOfTags, .numOfCols = pQueryMsg->numOfCols, .colList = pQueryMsg->tableCols};
|
||||
// if (!validateQueryTableCols(&info, param->pExpr, pQueryMsg->numOfOutput, param->pTagColumnInfo, pQueryMsg)) {
|
||||
// code = TSDB_CODE_QRY_INVALID_MSG;
|
||||
// goto _cleanup;
|
||||
// }
|
||||
//
|
||||
// //qDebug("qmsg:%p query %d tables, type:%d, qrange:%" PRId64 "-%" PRId64 ", numOfGroupbyTagCols:%d, order:%d, "
|
||||
//// "outputCols:%d, numOfCols:%d, interval:%" PRId64 ", fillType:%d, comptsLen:%d, compNumOfBlocks:%d, limit:%" PRId64 ", offset:%" PRId64,
|
||||
//// pQueryMsg, pQueryMsg->numOfTables, pQueryMsg->queryType, pQueryMsg->window.skey, pQueryMsg->window.ekey, pQueryMsg->numOfGroupCols,
|
||||
//// pQueryMsg->order, pQueryMsg->numOfOutput, pQueryMsg->numOfCols, pQueryMsg->interval.interval,
|
||||
//// pQueryMsg->fillType, pQueryMsg->tsBuf.tsLen, pQueryMsg->tsBuf.tsNumOfBlocks, pQueryMsg->limit, pQueryMsg->offset);
|
||||
//
|
||||
// //qDebug("qmsg:%p, sql:%s", pQueryMsg, param->sql);
|
||||
// return TSDB_CODE_SUCCESS;
|
||||
//
|
||||
//_cleanup:
|
||||
// freeParam(param);
|
||||
// return code;
|
||||
//}
|
||||
|
||||
int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) {
|
||||
if (filterNum <= 0) {
|
||||
|
|
|
@ -217,5 +217,6 @@ TEST(testCase, build_executor_tree_Test) {
|
|||
"}";
|
||||
|
||||
SExecTaskInfo* pTaskInfo = nullptr;
|
||||
int32_t code = qCreateExecTask((void*) 1, 2, NULL, (void**) &pTaskInfo);
|
||||
DataSinkHandle sinkHandle = nullptr;
|
||||
int32_t code = qCreateExecTask((void*) 1, 2, NULL, (void**) &pTaskInfo, &sinkHandle);
|
||||
}
|
|
@ -458,6 +458,37 @@ _return:
|
|||
QW_RET(code);
|
||||
}
|
||||
|
||||
int32_t qwExecTask(QW_FPARAMS_DEF, qTaskInfo_t taskHandle, DataSinkHandle sinkHandle) {
|
||||
int32_t code = 0;
|
||||
bool qcontinue = true;
|
||||
SSDataBlock* pRes = NULL;
|
||||
uint64_t useconds = 0;
|
||||
|
||||
while (qcontinue) {
|
||||
code = qExecTask(taskHandle, &pRes, &useconds);
|
||||
if (code) {
|
||||
QW_TASK_ELOG("qExecTask failed, code:%x", code);
|
||||
QW_ERR_JRET(code);
|
||||
}
|
||||
|
||||
if (NULL == pRes) {
|
||||
QW_TASK_DLOG("query done, useconds:%"PRIu64, useconds);
|
||||
dsEndPut(sinkHandle, useconds);
|
||||
break;
|
||||
}
|
||||
|
||||
SInputData inputData = {.pData = pRes, .pTableRetrieveTsMap = NULL};
|
||||
code = dsPutDataBlock(sinkHandle, &inputData, &qcontinue);
|
||||
if (code) {
|
||||
QW_TASK_ELOG("dsPutDataBlock failed, code:%x", code);
|
||||
QW_ERR_JRET(code);
|
||||
}
|
||||
}
|
||||
|
||||
_return:
|
||||
|
||||
QW_RET(code);
|
||||
}
|
||||
|
||||
|
||||
int32_t qwGetResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, void **rspMsg, SOutputData *pOutput) {
|
||||
|
@ -733,7 +764,9 @@ int32_t qwProcessQuery(SQWorkerMgmt *mgmt, uint64_t sId, uint64_t qId, uint64_t
|
|||
}
|
||||
|
||||
qTaskInfo_t pTaskInfo = NULL;
|
||||
code = qCreateExecTask(qwMsg->node, 0, (struct SSubplan *)plan, &pTaskInfo);
|
||||
DataSinkHandle sinkHandle = NULL;
|
||||
|
||||
code = qCreateExecTask(qwMsg->node, 0, (struct SSubplan *)plan, &pTaskInfo, &sinkHandle);
|
||||
if (code) {
|
||||
QW_TASK_ELOG("qCreateExecTask failed, code:%x", code);
|
||||
QW_ERR_JRET(code);
|
||||
|
@ -743,12 +776,7 @@ int32_t qwProcessQuery(SQWorkerMgmt *mgmt, uint64_t sId, uint64_t qId, uint64_t
|
|||
|
||||
queryRsped = true;
|
||||
|
||||
DataSinkHandle sinkHandle = NULL;
|
||||
code = qExecTask(pTaskInfo, &sinkHandle);
|
||||
if (code) {
|
||||
QW_TASK_ELOG("qExecTask failed, code:%x", code);
|
||||
QW_ERR_JRET(code);
|
||||
}
|
||||
QW_ERR_JRET(qwExecTask(QW_FPARAMS(), pTaskInfo, sinkHandle));
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -840,11 +868,7 @@ int32_t qwProcessCQuery(SQWorkerMgmt *mgmt, uint64_t sId, uint64_t qId, uint64_t
|
|||
qTaskInfo_t taskHandle = ctx->taskHandle;
|
||||
DataSinkHandle sinkHandle = ctx->sinkHandle;
|
||||
|
||||
code = qExecTask(taskHandle, &sinkHandle);
|
||||
if (code) {
|
||||
QW_TASK_ELOG("qExecTask failed, code:%x", code);
|
||||
QW_ERR_JRET(code);
|
||||
}
|
||||
QW_ERR_JRET(qwExecTask(QW_FPARAMS(), taskHandle, sinkHandle));
|
||||
|
||||
QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_CQUERY);
|
||||
|
||||
|
|
|
@ -20,12 +20,38 @@
|
|||
|
||||
static SSchedulerMgmt schMgmt = {0};
|
||||
|
||||
uint64_t schGenTaskId(void) {
|
||||
return atomic_add_fetch_64(&schMgmt.taskId, 1);
|
||||
}
|
||||
|
||||
uint64_t schGenUUID(void) {
|
||||
static uint64_t hashId = 0;
|
||||
static int32_t requestSerialId = 0;
|
||||
|
||||
if (hashId == 0) {
|
||||
char uid[64];
|
||||
int32_t code = taosGetSystemUUID(uid, tListLen(uid));
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("Failed to get the system uid, reason:%s", tstrerror(TAOS_SYSTEM_ERROR(errno)));
|
||||
} else {
|
||||
hashId = MurmurHash3_32(uid, strlen(uid));
|
||||
}
|
||||
}
|
||||
|
||||
int64_t ts = taosGetTimestampMs();
|
||||
uint64_t pid = taosGetPId();
|
||||
int32_t val = atomic_add_fetch_32(&requestSerialId, 1);
|
||||
|
||||
uint64_t id = ((hashId & 0x0FFF) << 52) | ((pid & 0x0FFF) << 40) | ((ts & 0xFFFFFF) << 16) | (val & 0xFFFF);
|
||||
return id;
|
||||
}
|
||||
|
||||
|
||||
int32_t schInitTask(SSchJob* pJob, SSchTask *pTask, SSubplan* pPlan, SSchLevel *pLevel) {
|
||||
pTask->plan = pPlan;
|
||||
pTask->level = pLevel;
|
||||
SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_NOT_START);
|
||||
pTask->taskId = atomic_add_fetch_64(&schMgmt.taskId, 1);
|
||||
pTask->taskId = schGenTaskId();
|
||||
pTask->execAddrs = taosArrayInit(SCH_MAX_CONDIDATE_EP_NUM, sizeof(SQueryNodeAddr));
|
||||
if (NULL == pTask->execAddrs) {
|
||||
SCH_TASK_ELOG("taosArrayInit %d exec addrs failed", SCH_MAX_CONDIDATE_EP_NUM);
|
||||
|
@ -40,8 +66,7 @@ void schFreeTask(SSchTask* pTask) {
|
|||
taosArrayDestroy(pTask->candidateAddrs);
|
||||
}
|
||||
|
||||
// TODO NEED TO VERFY WITH ASYNC_SEND MEMORY FREE
|
||||
//tfree(pTask->msg);
|
||||
tfree(pTask->msg);
|
||||
|
||||
if (pTask->children) {
|
||||
taosArrayDestroy(pTask->children);
|
||||
|
@ -71,7 +96,7 @@ int32_t schValidateTaskReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t m
|
|||
SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%d, rspType:%d", lastMsgType, msgType);
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
|
||||
if (SCH_GET_TASK_STATUS(pTask) != JOB_TASK_STATUS_EXECUTING && SCH_GET_TASK_STATUS(pTask) != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
|
||||
SCH_TASK_ELOG("rsp msg conflicted with task status, status:%d, rspType:%d", SCH_GET_TASK_STATUS(pTask), msgType);
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
|
@ -141,7 +166,7 @@ int32_t schCheckAndUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
|
|||
break;
|
||||
case JOB_TASK_STATUS_CANCELLED:
|
||||
case JOB_TASK_STATUS_DROPPING:
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_JOB_FREED);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -412,6 +437,8 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) {
|
|||
SCH_TASK_ELOG("taosArrayPush execNode to candidate addrs failed, addNum:%d, errno:%d", addNum, errno);
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
++addNum;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -539,12 +566,9 @@ int32_t schTaskCheckAndSetRetry(SSchJob *job, SSchTask *task, int32_t errCode, b
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Note: no more error processing, handled in function internal
|
||||
int32_t schProcessOnJobFailure(SSchJob *pJob, int32_t errCode) {
|
||||
int32_t schProcessOnJobFailureImpl(SSchJob *pJob, int32_t status, int32_t errCode) {
|
||||
// if already FAILED, no more processing
|
||||
SCH_ERR_RET(schCheckAndUpdateJobStatus(pJob, JOB_TASK_STATUS_FAILED));
|
||||
SCH_ERR_RET(schCheckAndUpdateJobStatus(pJob, status));
|
||||
|
||||
if (errCode) {
|
||||
atomic_store_32(&pJob->errCode, errCode);
|
||||
|
@ -561,6 +585,17 @@ int32_t schProcessOnJobFailure(SSchJob *pJob, int32_t errCode) {
|
|||
|
||||
|
||||
|
||||
// Note: no more error processing, handled in function internal
|
||||
int32_t schProcessOnJobFailure(SSchJob *pJob, int32_t errCode) {
|
||||
SCH_RET(schProcessOnJobFailureImpl(pJob, JOB_TASK_STATUS_FAILED, errCode));
|
||||
}
|
||||
|
||||
// Note: no more error processing, handled in function internal
|
||||
int32_t schProcessOnJobDropped(SSchJob *pJob, int32_t errCode) {
|
||||
SCH_RET(schProcessOnJobFailureImpl(pJob, JOB_TASK_STATUS_DROPPING, errCode));
|
||||
}
|
||||
|
||||
|
||||
// Note: no more error processing, handled in function internal
|
||||
int32_t schFetchFromRemote(SSchJob *pJob) {
|
||||
int32_t code = 0;
|
||||
|
@ -792,6 +827,11 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
|
|||
if (rspCode != TSDB_CODE_SUCCESS) {
|
||||
SCH_ERR_RET(schProcessOnTaskFailure(pJob, pTask, rspCode));
|
||||
}
|
||||
|
||||
SShellSubmitRsp *rsp = (SShellSubmitRsp *)msg;
|
||||
if (rsp) {
|
||||
pJob->resNumOfRows += rsp->affectedRows;
|
||||
}
|
||||
#endif
|
||||
|
||||
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
|
||||
|
@ -827,8 +867,18 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
|
|||
SCH_ERR_RET(schProcessOnTaskFailure(pJob, pTask, rspCode));
|
||||
}
|
||||
|
||||
if (pJob->res) {
|
||||
SCH_TASK_ELOG("got fetch rsp while res already exists, res:%p", pJob->res);
|
||||
tfree(rsp);
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
atomic_store_ptr(&pJob->res, rsp);
|
||||
atomic_store_32(&pJob->resNumOfRows, rsp->numOfRows);
|
||||
|
||||
if (rsp->completed) {
|
||||
SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_SUCCEED);
|
||||
}
|
||||
|
||||
SCH_ERR_JRET(schProcessOnDataFetched(pJob));
|
||||
|
||||
|
@ -864,7 +914,7 @@ int32_t schHandleCallback(void* param, const SDataBuf* pMsg, int32_t msgType, in
|
|||
SSchJob **job = taosHashGet(schMgmt.jobs, &pParam->queryId, sizeof(pParam->queryId));
|
||||
if (NULL == job || NULL == (*job)) {
|
||||
qError("QID:%"PRIx64" taosHashGet queryId not exist, may be dropped", pParam->queryId);
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR);
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_JOB_FREED);
|
||||
}
|
||||
|
||||
pJob = *job;
|
||||
|
@ -873,13 +923,13 @@ int32_t schHandleCallback(void* param, const SDataBuf* pMsg, int32_t msgType, in
|
|||
|
||||
int32_t s = taosHashGetSize(pJob->execTasks);
|
||||
if (s <= 0) {
|
||||
qError("QID:%"PRIx64",TID:%"PRIx64" no task in execTask list", pParam->queryId, pParam->taskId);
|
||||
qError("QID:%"PRIx64",TID:%"PRId64" no task in execTask list", pParam->queryId, pParam->taskId);
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
SSchTask **task = taosHashGet(pJob->execTasks, &pParam->taskId, sizeof(pParam->taskId));
|
||||
if (NULL == task || NULL == (*task)) {
|
||||
qError("QID:%"PRIx64",TID:%"PRIx64" taosHashGet taskId not exist", pParam->queryId, pParam->taskId);
|
||||
qError("QID:%"PRIx64",TID:%"PRId64" taosHashGet taskId not exist", pParam->queryId, pParam->taskId);
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -1026,7 +1076,13 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
|
|||
case TDMT_VND_CREATE_TABLE:
|
||||
case TDMT_VND_SUBMIT: {
|
||||
msgSize = pTask->msgLen;
|
||||
msg = pTask->msg;
|
||||
msg = calloc(1, msgSize);
|
||||
if (NULL == msg) {
|
||||
SCH_TASK_ELOG("calloc %d failed", msgSize);
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
memcpy(msg, pTask->msg, msgSize);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1355,9 +1411,9 @@ int32_t scheduleExecJob(void *transport, SArray *nodeList, SQueryDag* pDag, stru
|
|||
|
||||
SSchJob *job = NULL;
|
||||
|
||||
SCH_ERR_RET(schExecJobImpl(transport, nodeList, pDag, &job, true));
|
||||
SCH_ERR_RET(schExecJobImpl(transport, nodeList, pDag, pJob, true));
|
||||
|
||||
*pJob = job;
|
||||
job = *pJob;
|
||||
|
||||
pRes->code = atomic_load_32(&job->errCode);
|
||||
pRes->numOfRows = job->resNumOfRows;
|
||||
|
@ -1427,7 +1483,7 @@ int32_t schedulerConvertDagToTaskList(SQueryDag* pDag, SArray **pTasks) {
|
|||
|
||||
pMsg->sId = htobe64(schMgmt.sId);
|
||||
pMsg->queryId = htobe64(plan->id.queryId);
|
||||
pMsg->taskId = htobe64(atomic_add_fetch_64(&schMgmt.taskId, 1));
|
||||
pMsg->taskId = htobe64(schGenUUID());
|
||||
pMsg->contentLen = htonl(msgLen);
|
||||
memcpy(pMsg->msg, msg, msgLen);
|
||||
|
||||
|
@ -1450,6 +1506,52 @@ _return:
|
|||
SCH_RET(code);
|
||||
}
|
||||
|
||||
int32_t schedulerCopyTask(STaskInfo *src, SArray **dst, int32_t copyNum) {
|
||||
if (NULL == src || NULL == dst || copyNum <= 0) {
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
|
||||
}
|
||||
|
||||
int32_t code = 0;
|
||||
|
||||
*dst = taosArrayInit(copyNum, sizeof(STaskInfo));
|
||||
if (NULL == *dst) {
|
||||
qError("taosArrayInit %d taskInfo failed", copyNum);
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
int32_t msgSize = src->msg->contentLen + sizeof(*src->msg);
|
||||
STaskInfo info = {0};
|
||||
|
||||
info.addr = src->addr;
|
||||
|
||||
for (int32_t i = 0; i < copyNum; ++i) {
|
||||
info.msg = malloc(msgSize);
|
||||
if (NULL == info.msg) {
|
||||
qError("malloc %d failed", msgSize);
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
memcpy(info.msg, src->msg, msgSize);
|
||||
|
||||
info.msg->taskId = schGenUUID();
|
||||
|
||||
if (NULL == taosArrayPush(*dst, &info)) {
|
||||
qError("taosArrayPush failed, idx:%d", i);
|
||||
free(info.msg);
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_return:
|
||||
|
||||
schedulerFreeTaskList(*dst);
|
||||
*dst = NULL;
|
||||
|
||||
SCH_RET(code);
|
||||
}
|
||||
|
||||
|
||||
int32_t scheduleFetchRows(SSchJob *pJob, void** pData) {
|
||||
if (NULL == pJob || NULL == pData) {
|
||||
|
@ -1457,33 +1559,29 @@ int32_t scheduleFetchRows(SSchJob *pJob, void** pData) {
|
|||
}
|
||||
int32_t code = 0;
|
||||
|
||||
atomic_add_fetch_32(&pJob->ref, 1);
|
||||
|
||||
int8_t status = SCH_GET_JOB_STATUS(pJob);
|
||||
if (status == JOB_TASK_STATUS_DROPPING) {
|
||||
SCH_JOB_ELOG("job is dropping, status:%d", status);
|
||||
return TSDB_CODE_SCH_STATUS_ERROR;
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
atomic_add_fetch_32(&pJob->ref, 1);
|
||||
|
||||
if (!SCH_JOB_NEED_FETCH(&pJob->attr)) {
|
||||
SCH_JOB_ELOG("no need to fetch data, status:%d", SCH_GET_JOB_STATUS(pJob));
|
||||
atomic_sub_fetch_32(&pJob->ref, 1);
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
if (atomic_val_compare_exchange_8(&pJob->userFetch, 0, 1) != 0) {
|
||||
SCH_JOB_ELOG("prior fetching not finished, userFetch:%d", atomic_load_8(&pJob->userFetch));
|
||||
atomic_sub_fetch_32(&pJob->ref, 1);
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
if (status == JOB_TASK_STATUS_FAILED) {
|
||||
*pData = atomic_load_ptr(&pJob->res);
|
||||
atomic_store_ptr(&pJob->res, NULL);
|
||||
if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) {
|
||||
SCH_JOB_ELOG("job failed or dropping, status:%d", status);
|
||||
SCH_ERR_JRET(atomic_load_32(&pJob->errCode));
|
||||
} else if (status == JOB_TASK_STATUS_SUCCEED) {
|
||||
*pData = atomic_load_ptr(&pJob->res);
|
||||
atomic_store_ptr(&pJob->res, NULL);
|
||||
SCH_JOB_ELOG("job already succeed, status:%d", status);
|
||||
goto _return;
|
||||
} else if (status == JOB_TASK_STATUS_PARTIAL_SUCCEED) {
|
||||
SCH_ERR_JRET(schFetchFromRemote(pJob));
|
||||
|
@ -1493,15 +1591,17 @@ int32_t scheduleFetchRows(SSchJob *pJob, void** pData) {
|
|||
|
||||
status = SCH_GET_JOB_STATUS(pJob);
|
||||
|
||||
if (status == JOB_TASK_STATUS_FAILED) {
|
||||
code = atomic_load_32(&pJob->errCode);
|
||||
SCH_ERR_JRET(code);
|
||||
if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) {
|
||||
SCH_JOB_ELOG("job failed or dropping, status:%d", status);
|
||||
SCH_ERR_JRET(atomic_load_32(&pJob->errCode));
|
||||
}
|
||||
|
||||
if (pJob->res && ((SRetrieveTableRsp *)pJob->res)->completed) {
|
||||
SCH_ERR_JRET(schCheckAndUpdateJobStatus(pJob, JOB_TASK_STATUS_SUCCEED));
|
||||
}
|
||||
|
||||
_return:
|
||||
|
||||
while (true) {
|
||||
*pData = atomic_load_ptr(&pJob->res);
|
||||
|
||||
|
@ -1512,10 +1612,19 @@ int32_t scheduleFetchRows(SSchJob *pJob, void** pData) {
|
|||
break;
|
||||
}
|
||||
|
||||
_return:
|
||||
if (NULL == *pData) {
|
||||
SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)calloc(1, sizeof(SRetrieveTableRsp));
|
||||
if (rsp) {
|
||||
rsp->completed = 1;
|
||||
}
|
||||
|
||||
*pData = rsp;
|
||||
}
|
||||
|
||||
atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0);
|
||||
|
||||
SCH_JOB_DLOG("fetch done, code:%x", code);
|
||||
|
||||
atomic_sub_fetch_32(&pJob->ref, 1);
|
||||
|
||||
SCH_RET(code);
|
||||
|
@ -1540,6 +1649,7 @@ void scheduleFreeJob(void *job) {
|
|||
|
||||
SSchJob *pJob = job;
|
||||
uint64_t queryId = pJob->queryId;
|
||||
bool setJobFree = false;
|
||||
|
||||
if (SCH_GET_JOB_STATUS(pJob) > 0) {
|
||||
if (0 != taosHashRemove(schMgmt.jobs, &pJob->queryId, sizeof(pJob->queryId))) {
|
||||
|
@ -1547,8 +1657,6 @@ void scheduleFreeJob(void *job) {
|
|||
return;
|
||||
}
|
||||
|
||||
schCheckAndUpdateJobStatus(pJob, JOB_TASK_STATUS_DROPPING);
|
||||
|
||||
SCH_JOB_DLOG("job removed from list, no further ref, ref:%d", atomic_load_32(&pJob->ref));
|
||||
|
||||
while (true) {
|
||||
|
@ -1556,6 +1664,11 @@ void scheduleFreeJob(void *job) {
|
|||
if (0 == ref) {
|
||||
break;
|
||||
} else if (ref > 0) {
|
||||
if (1 == ref && atomic_load_8(&pJob->userFetch) > 0 && !setJobFree) {
|
||||
schProcessOnJobDropped(pJob, TSDB_CODE_QRY_JOB_FREED);
|
||||
setJobFree = true;
|
||||
}
|
||||
|
||||
usleep(1);
|
||||
} else {
|
||||
assert(0);
|
||||
|
@ -1591,6 +1704,7 @@ void scheduleFreeJob(void *job) {
|
|||
taosHashCleanup(pJob->succTasks);
|
||||
|
||||
taosArrayDestroy(pJob->levels);
|
||||
taosArrayDestroy(pJob->nodeList);
|
||||
|
||||
tfree(pJob->res);
|
||||
|
||||
|
|
|
@ -34,9 +34,25 @@
|
|||
#include "stub.h"
|
||||
#include "addr_any.h"
|
||||
|
||||
|
||||
namespace {
|
||||
|
||||
extern "C" int32_t schHandleResponseMsg(SSchJob *job, SSchTask *task, int32_t msgType, char *msg, int32_t msgSize, int32_t rspCode);
|
||||
extern "C" int32_t schHandleCallback(void* param, const SDataBuf* pMsg, int32_t msgType, int32_t rspCode);
|
||||
|
||||
struct SSchJob *pInsertJob = NULL;
|
||||
struct SSchJob *pQueryJob = NULL;
|
||||
|
||||
uint64_t schtMergeTemplateId = 0x4;
|
||||
uint64_t schtFetchTaskId = 0;
|
||||
uint64_t schtQueryId = 1;
|
||||
|
||||
bool schtTestStop = false;
|
||||
bool schtTestDeadLoop = false;
|
||||
int32_t schtTestMTRunSec = 10;
|
||||
int32_t schtTestPrintNum = 1000;
|
||||
int32_t schtStartFetch = 0;
|
||||
|
||||
|
||||
void schtInitLogFile() {
|
||||
const char *defaultLogFileNamePrefix = "taoslog";
|
||||
|
@ -55,7 +71,7 @@ void schtInitLogFile() {
|
|||
|
||||
|
||||
void schtBuildQueryDag(SQueryDag *dag) {
|
||||
uint64_t qId = 0x0000000000000001;
|
||||
uint64_t qId = schtQueryId;
|
||||
|
||||
dag->queryId = qId;
|
||||
dag->numOfSubplans = 2;
|
||||
|
@ -82,7 +98,7 @@ void schtBuildQueryDag(SQueryDag *dag) {
|
|||
scanPlan->msgType = TDMT_VND_QUERY;
|
||||
|
||||
mergePlan->id.queryId = qId;
|
||||
mergePlan->id.templateId = 0x4444444444;
|
||||
mergePlan->id.templateId = schtMergeTemplateId;
|
||||
mergePlan->id.subplanId = 0x5555555555;
|
||||
mergePlan->type = QUERY_TYPE_MERGE;
|
||||
mergePlan->level = 0;
|
||||
|
@ -113,9 +129,9 @@ void schtBuildInsertDag(SQueryDag *dag) {
|
|||
dag->queryId = qId;
|
||||
dag->numOfSubplans = 2;
|
||||
dag->pSubplans = taosArrayInit(1, POINTER_BYTES);
|
||||
SArray *inserta = taosArrayInit(dag->numOfSubplans, sizeof(SSubplan));
|
||||
SArray *inserta = taosArrayInit(dag->numOfSubplans, POINTER_BYTES);
|
||||
|
||||
SSubplan insertPlan[2] = {0};
|
||||
SSubplan *insertPlan = (SSubplan *)calloc(2, sizeof(SSubplan));
|
||||
|
||||
insertPlan[0].id.queryId = qId;
|
||||
insertPlan[0].id.templateId = 0x0000000000000003;
|
||||
|
@ -131,6 +147,7 @@ void schtBuildInsertDag(SQueryDag *dag) {
|
|||
insertPlan[0].pParents = NULL;
|
||||
insertPlan[0].pNode = NULL;
|
||||
insertPlan[0].pDataSink = (SDataSink*)calloc(1, sizeof(SDataSink));
|
||||
insertPlan[0].msgType = TDMT_VND_SUBMIT;
|
||||
|
||||
insertPlan[1].id.queryId = qId;
|
||||
insertPlan[1].id.templateId = 0x0000000000000003;
|
||||
|
@ -146,10 +163,11 @@ void schtBuildInsertDag(SQueryDag *dag) {
|
|||
insertPlan[1].pParents = NULL;
|
||||
insertPlan[1].pNode = NULL;
|
||||
insertPlan[1].pDataSink = (SDataSink*)calloc(1, sizeof(SDataSink));
|
||||
insertPlan[1].msgType = TDMT_VND_SUBMIT;
|
||||
|
||||
|
||||
taosArrayPush(inserta, &insertPlan[0]);
|
||||
taosArrayPush(inserta, &insertPlan[1]);
|
||||
taosArrayPush(inserta, &insertPlan);
|
||||
insertPlan += 1;
|
||||
taosArrayPush(inserta, &insertPlan);
|
||||
|
||||
taosArrayPush(dag->pSubplans, &inserta);
|
||||
}
|
||||
|
@ -169,8 +187,6 @@ void schtRpcSendRequest(void *shandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int6
|
|||
|
||||
}
|
||||
|
||||
|
||||
|
||||
void schtSetPlanToString() {
|
||||
static Stub stub;
|
||||
stub.set(qSubPlanToString, schtPlanToString);
|
||||
|
@ -210,6 +226,29 @@ void schtSetRpcSendRequest() {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t schtAsyncSendMsgToServer(void *pTransporter, SEpSet* epSet, int64_t* pTransporterId, SMsgSendInfo* pInfo) {
|
||||
if (pInfo) {
|
||||
tfree(pInfo->param);
|
||||
tfree(pInfo->msgInfo.pData);
|
||||
free(pInfo);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void schtSetAsyncSendMsgToServer() {
|
||||
static Stub stub;
|
||||
stub.set(asyncSendMsgToServer, schtAsyncSendMsgToServer);
|
||||
{
|
||||
AddrAny any("libtransport.so");
|
||||
std::map<std::string,void*> result;
|
||||
any.get_global_func_addr_dynsym("^asyncSendMsgToServer$", result);
|
||||
for (const auto& f : result) {
|
||||
stub.set(f.second, schtAsyncSendMsgToServer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void *schtSendRsp(void *param) {
|
||||
SSchJob *job = NULL;
|
||||
|
@ -230,7 +269,7 @@ void *schtSendRsp(void *param) {
|
|||
|
||||
SShellSubmitRsp rsp = {0};
|
||||
rsp.affectedRows = 10;
|
||||
schHandleResponseMsg(job, task, TDMT_VND_SUBMIT, (char *)&rsp, sizeof(rsp), 0);
|
||||
schHandleResponseMsg(job, task, TDMT_VND_SUBMIT_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
|
||||
pIter = taosHashIterate(job->execTasks, pIter);
|
||||
}
|
||||
|
@ -238,7 +277,233 @@ void *schtSendRsp(void *param) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
struct SSchJob *pInsertJob = NULL;
|
||||
void *schtCreateFetchRspThread(void *param) {
|
||||
struct SSchJob* job = (struct SSchJob*)param;
|
||||
|
||||
sleep(1);
|
||||
|
||||
int32_t code = 0;
|
||||
SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)calloc(1, sizeof(SRetrieveTableRsp));
|
||||
rsp->completed = 1;
|
||||
rsp->numOfRows = 10;
|
||||
|
||||
code = schHandleResponseMsg(job, job->fetchTask, TDMT_VND_FETCH_RSP, (char *)rsp, sizeof(*rsp), 0);
|
||||
|
||||
assert(code == 0);
|
||||
}
|
||||
|
||||
|
||||
void *schtFetchRspThread(void *aa) {
|
||||
SDataBuf dataBuf = {0};
|
||||
SSchCallbackParam* param = NULL;
|
||||
|
||||
while (!schtTestStop) {
|
||||
if (0 == atomic_val_compare_exchange_32(&schtStartFetch, 1, 0)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
usleep(1);
|
||||
|
||||
param = (SSchCallbackParam *)calloc(1, sizeof(*param));
|
||||
|
||||
param->queryId = schtQueryId;
|
||||
param->taskId = schtFetchTaskId;
|
||||
|
||||
int32_t code = 0;
|
||||
SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)calloc(1, sizeof(SRetrieveTableRsp));
|
||||
rsp->completed = 1;
|
||||
rsp->numOfRows = 10;
|
||||
|
||||
dataBuf.pData = rsp;
|
||||
dataBuf.len = sizeof(*rsp);
|
||||
|
||||
code = schHandleCallback(param, &dataBuf, TDMT_VND_FETCH_RSP, 0);
|
||||
|
||||
assert(code == 0 || code);
|
||||
}
|
||||
}
|
||||
|
||||
void schtFreeQueryJob(int32_t freeThread) {
|
||||
static uint32_t freeNum = 0;
|
||||
SSchJob *job = atomic_load_ptr(&pQueryJob);
|
||||
|
||||
if (job && atomic_val_compare_exchange_ptr(&pQueryJob, job, NULL)) {
|
||||
scheduleFreeJob(job);
|
||||
if (freeThread) {
|
||||
if (++freeNum % schtTestPrintNum == 0) {
|
||||
printf("FreeNum:%d\n", freeNum);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void* schtRunJobThread(void *aa) {
|
||||
void *mockPointer = (void *)0x1;
|
||||
char *clusterId = "cluster1";
|
||||
char *dbname = "1.db1";
|
||||
char *tablename = "table1";
|
||||
SVgroupInfo vgInfo = {0};
|
||||
SQueryDag dag = {0};
|
||||
|
||||
schtInitLogFile();
|
||||
|
||||
|
||||
int32_t code = schedulerInit(NULL);
|
||||
assert(code == 0);
|
||||
|
||||
|
||||
schtSetPlanToString();
|
||||
schtSetExecNode();
|
||||
schtSetAsyncSendMsgToServer();
|
||||
|
||||
SSchJob *job = NULL;
|
||||
SSchCallbackParam *param = NULL;
|
||||
SHashObj *execTasks = NULL;
|
||||
SDataBuf dataBuf = {0};
|
||||
uint32_t jobFinished = 0;
|
||||
|
||||
while (!schtTestStop) {
|
||||
schtBuildQueryDag(&dag);
|
||||
|
||||
SArray *qnodeList = taosArrayInit(1, sizeof(SEpAddr));
|
||||
|
||||
SEpAddr qnodeAddr = {0};
|
||||
strcpy(qnodeAddr.fqdn, "qnode0.ep");
|
||||
qnodeAddr.port = 6031;
|
||||
taosArrayPush(qnodeList, &qnodeAddr);
|
||||
|
||||
code = scheduleAsyncExecJob(mockPointer, qnodeList, &dag, &job);
|
||||
assert(code == 0);
|
||||
|
||||
execTasks = taosHashInit(5, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_ENTRY_LOCK);
|
||||
void *pIter = taosHashIterate(job->execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
schtFetchTaskId = task->taskId - 1;
|
||||
|
||||
taosHashPut(execTasks, &task->taskId, sizeof(task->taskId), task, sizeof(*task));
|
||||
pIter = taosHashIterate(job->execTasks, pIter);
|
||||
}
|
||||
|
||||
param = (SSchCallbackParam *)calloc(1, sizeof(*param));
|
||||
param->queryId = schtQueryId;
|
||||
|
||||
pQueryJob = job;
|
||||
|
||||
|
||||
pIter = taosHashIterate(execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = (SSchTask *)pIter;
|
||||
|
||||
param->taskId = task->taskId;
|
||||
SQueryTableRsp rsp = {0};
|
||||
dataBuf.pData = &rsp;
|
||||
dataBuf.len = sizeof(rsp);
|
||||
|
||||
code = schHandleCallback(param, &dataBuf, TDMT_VND_QUERY_RSP, 0);
|
||||
assert(code == 0 || code);
|
||||
|
||||
pIter = taosHashIterate(execTasks, pIter);
|
||||
}
|
||||
|
||||
|
||||
param = (SSchCallbackParam *)calloc(1, sizeof(*param));
|
||||
param->queryId = schtQueryId;
|
||||
|
||||
pIter = taosHashIterate(execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = (SSchTask *)pIter;
|
||||
|
||||
param->taskId = task->taskId;
|
||||
SResReadyRsp rsp = {0};
|
||||
dataBuf.pData = &rsp;
|
||||
dataBuf.len = sizeof(rsp);
|
||||
|
||||
code = schHandleCallback(param, &dataBuf, TDMT_VND_RES_READY_RSP, 0);
|
||||
assert(code == 0 || code);
|
||||
|
||||
pIter = taosHashIterate(execTasks, pIter);
|
||||
}
|
||||
|
||||
|
||||
param = (SSchCallbackParam *)calloc(1, sizeof(*param));
|
||||
param->queryId = schtQueryId;
|
||||
|
||||
pIter = taosHashIterate(execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = (SSchTask *)pIter;
|
||||
|
||||
param->taskId = task->taskId - 1;
|
||||
SQueryTableRsp rsp = {0};
|
||||
dataBuf.pData = &rsp;
|
||||
dataBuf.len = sizeof(rsp);
|
||||
|
||||
code = schHandleCallback(param, &dataBuf, TDMT_VND_QUERY_RSP, 0);
|
||||
assert(code == 0 || code);
|
||||
|
||||
pIter = taosHashIterate(execTasks, pIter);
|
||||
}
|
||||
|
||||
|
||||
param = (SSchCallbackParam *)calloc(1, sizeof(*param));
|
||||
param->queryId = schtQueryId;
|
||||
|
||||
pIter = taosHashIterate(execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = (SSchTask *)pIter;
|
||||
|
||||
param->taskId = task->taskId - 1;
|
||||
SResReadyRsp rsp = {0};
|
||||
dataBuf.pData = &rsp;
|
||||
dataBuf.len = sizeof(rsp);
|
||||
|
||||
code = schHandleCallback(param, &dataBuf, TDMT_VND_RES_READY_RSP, 0);
|
||||
assert(code == 0 || code);
|
||||
|
||||
pIter = taosHashIterate(execTasks, pIter);
|
||||
}
|
||||
|
||||
atomic_store_32(&schtStartFetch, 1);
|
||||
|
||||
void *data = NULL;
|
||||
code = scheduleFetchRows(pQueryJob, &data);
|
||||
assert(code == 0 || code);
|
||||
|
||||
if (0 == code) {
|
||||
SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)data;
|
||||
assert(pRsp->completed == 1);
|
||||
assert(pRsp->numOfRows == 10);
|
||||
}
|
||||
|
||||
data = NULL;
|
||||
code = scheduleFetchRows(pQueryJob, &data);
|
||||
assert(code == 0 || code);
|
||||
|
||||
schtFreeQueryJob(0);
|
||||
|
||||
taosHashCleanup(execTasks);
|
||||
|
||||
schtFreeQueryDag(&dag);
|
||||
|
||||
if (++jobFinished % schtTestPrintNum == 0) {
|
||||
printf("jobFinished:%d\n", jobFinished);
|
||||
}
|
||||
|
||||
++schtQueryId;
|
||||
}
|
||||
|
||||
schedulerDestroy();
|
||||
|
||||
}
|
||||
|
||||
void* schtFreeJobThread(void *aa) {
|
||||
while (!schtTestStop) {
|
||||
usleep(rand() % 100);
|
||||
schtFreeQueryJob(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
TEST(queryTest, normalCase) {
|
||||
|
@ -266,6 +531,7 @@ TEST(queryTest, normalCase) {
|
|||
|
||||
schtSetPlanToString();
|
||||
schtSetExecNode();
|
||||
schtSetAsyncSendMsgToServer();
|
||||
|
||||
code = scheduleAsyncExecJob(mockPointer, qnodeList, &dag, &pJob);
|
||||
ASSERT_EQ(code, 0);
|
||||
|
@ -276,7 +542,7 @@ TEST(queryTest, normalCase) {
|
|||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
SQueryTableRsp rsp = {0};
|
||||
code = schHandleResponseMsg(job, task, TDMT_VND_QUERY, (char *)&rsp, sizeof(rsp), 0);
|
||||
code = schHandleResponseMsg(job, task, TDMT_VND_QUERY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
|
||||
ASSERT_EQ(code, 0);
|
||||
pIter = taosHashIterate(job->execTasks, pIter);
|
||||
|
@ -287,8 +553,8 @@ TEST(queryTest, normalCase) {
|
|||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
SResReadyRsp rsp = {0};
|
||||
code = schHandleResponseMsg(job, task, TDMT_VND_RES_READY, (char *)&rsp, sizeof(rsp), 0);
|
||||
|
||||
code = schHandleResponseMsg(job, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
printf("code:%d", code);
|
||||
ASSERT_EQ(code, 0);
|
||||
pIter = taosHashIterate(job->execTasks, pIter);
|
||||
}
|
||||
|
@ -298,7 +564,7 @@ TEST(queryTest, normalCase) {
|
|||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
SQueryTableRsp rsp = {0};
|
||||
code = schHandleResponseMsg(job, task, TDMT_VND_QUERY, (char *)&rsp, sizeof(rsp), 0);
|
||||
code = schHandleResponseMsg(job, task, TDMT_VND_QUERY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
|
||||
ASSERT_EQ(code, 0);
|
||||
pIter = taosHashIterate(job->execTasks, pIter);
|
||||
|
@ -309,39 +575,38 @@ TEST(queryTest, normalCase) {
|
|||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
SResReadyRsp rsp = {0};
|
||||
code = schHandleResponseMsg(job, task, TDMT_VND_RES_READY, (char *)&rsp, sizeof(rsp), 0);
|
||||
code = schHandleResponseMsg(job, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
pIter = taosHashIterate(job->execTasks, pIter);
|
||||
}
|
||||
|
||||
SRetrieveTableRsp rsp = {0};
|
||||
rsp.completed = 1;
|
||||
rsp.numOfRows = 10;
|
||||
code = schHandleResponseMsg(job, NULL, TDMT_VND_FETCH, (char *)&rsp, sizeof(rsp), 0);
|
||||
|
||||
ASSERT_EQ(code, 0);
|
||||
pthread_attr_t thattr;
|
||||
pthread_attr_init(&thattr);
|
||||
|
||||
pthread_t thread1;
|
||||
pthread_create(&(thread1), &thattr, schtCreateFetchRspThread, job);
|
||||
|
||||
void *data = NULL;
|
||||
|
||||
void *data = NULL;
|
||||
code = scheduleFetchRows(job, &data);
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)data;
|
||||
ASSERT_EQ(pRsp->completed, 1);
|
||||
ASSERT_EQ(pRsp->numOfRows, 10);
|
||||
tfree(data);
|
||||
|
||||
data = NULL;
|
||||
code = scheduleFetchRows(job, &data);
|
||||
ASSERT_EQ(code, 0);
|
||||
ASSERT_EQ(data, (void*)NULL);
|
||||
ASSERT_TRUE(data);
|
||||
|
||||
scheduleFreeJob(pJob);
|
||||
|
||||
schtFreeQueryDag(&dag);
|
||||
}
|
||||
|
||||
schedulerDestroy();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -369,6 +634,7 @@ TEST(insertTest, normalCase) {
|
|||
schtBuildInsertDag(&dag);
|
||||
|
||||
schtSetPlanToString();
|
||||
schtSetAsyncSendMsgToServer();
|
||||
|
||||
pthread_attr_t thattr;
|
||||
pthread_attr_init(&thattr);
|
||||
|
@ -382,16 +648,34 @@ TEST(insertTest, normalCase) {
|
|||
ASSERT_EQ(res.numOfRows, 20);
|
||||
|
||||
scheduleFreeJob(pInsertJob);
|
||||
|
||||
schedulerDestroy();
|
||||
}
|
||||
|
||||
TEST(multiThread, forceFree) {
|
||||
pthread_attr_t thattr;
|
||||
pthread_attr_init(&thattr);
|
||||
|
||||
schtInitLogFile();
|
||||
pthread_t thread1, thread2, thread3;
|
||||
pthread_create(&(thread1), &thattr, schtRunJobThread, NULL);
|
||||
pthread_create(&(thread2), &thattr, schtFreeJobThread, NULL);
|
||||
pthread_create(&(thread3), &thattr, schtFetchRspThread, NULL);
|
||||
|
||||
while (true) {
|
||||
if (schtTestDeadLoop) {
|
||||
sleep(1);
|
||||
} else {
|
||||
sleep(schtTestMTRunSec);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
schtTestStop = true;
|
||||
sleep(3);
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
srand(time(NULL));
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifdef USE_UV
|
||||
|
||||
#include <uv.h>
|
||||
#include "lz4.h"
|
||||
#include "os.h"
|
||||
#include "rpcCache.h"
|
||||
#include "rpcHead.h"
|
||||
#include "rpcLog.h"
|
||||
#include "rpcTcp.h"
|
||||
#include "rpcUdp.h"
|
||||
#include "taoserror.h"
|
||||
#include "tglobal.h"
|
||||
#include "thash.h"
|
||||
#include "tidpool.h"
|
||||
#include "tmd5.h"
|
||||
#include "tmempool.h"
|
||||
#include "tmsg.h"
|
||||
#include "transportInt.h"
|
||||
#include "tref.h"
|
||||
#include "trpc.h"
|
||||
#include "ttimer.h"
|
||||
#include "tutil.h"
|
||||
|
||||
typedef void* queue[2];
|
||||
/* Private macros. */
|
||||
#define QUEUE_NEXT(q) (*(queue**)&((*(q))[0]))
|
||||
#define QUEUE_PREV(q) (*(queue**)&((*(q))[1]))
|
||||
|
||||
#define QUEUE_PREV_NEXT(q) (QUEUE_NEXT(QUEUE_PREV(q)))
|
||||
#define QUEUE_NEXT_PREV(q) (QUEUE_PREV(QUEUE_NEXT(q)))
|
||||
/* Initialize an empty queue. */
|
||||
#define QUEUE_INIT(q) \
|
||||
{ \
|
||||
QUEUE_NEXT(q) = (q); \
|
||||
QUEUE_PREV(q) = (q); \
|
||||
}
|
||||
|
||||
/* Return true if the queue has no element. */
|
||||
#define QUEUE_IS_EMPTY(q) ((const queue*)(q) == (const queue*)QUEUE_NEXT(q))
|
||||
|
||||
/* Insert an element at the back of a queue. */
|
||||
#define QUEUE_PUSH(q, e) \
|
||||
{ \
|
||||
QUEUE_NEXT(e) = (q); \
|
||||
QUEUE_PREV(e) = QUEUE_PREV(q); \
|
||||
QUEUE_PREV_NEXT(e) = (e); \
|
||||
QUEUE_PREV(q) = (e); \
|
||||
}
|
||||
|
||||
/* Remove the given element from the queue. Any element can be removed at any *
|
||||
* time. */
|
||||
#define QUEUE_REMOVE(e) \
|
||||
{ \
|
||||
QUEUE_PREV_NEXT(e) = QUEUE_NEXT(e); \
|
||||
QUEUE_NEXT_PREV(e) = QUEUE_PREV(e); \
|
||||
}
|
||||
|
||||
/* Return the element at the front of the queue. */
|
||||
#define QUEUE_HEAD(q) (QUEUE_NEXT(q))
|
||||
|
||||
/* Return the element at the back of the queue. */
|
||||
#define QUEUE_TAIL(q) (QUEUE_PREV(q))
|
||||
|
||||
/* Iterate over the element of a queue. * Mutating the queue while iterating
|
||||
* results in undefined behavior. */
|
||||
#define QUEUE_FOREACH(q, e) for ((q) = QUEUE_NEXT(e); (q) != (e); (q) = QUEUE_NEXT(q))
|
||||
|
||||
/* Return the structure holding the given element. */
|
||||
#define QUEUE_DATA(e, type, field) ((type*)((void*)((char*)(e)-offsetof(type, field))))
|
||||
|
||||
typedef struct {
|
||||
SRpcInfo* pRpc; // associated SRpcInfo
|
||||
SEpSet epSet; // ip list provided by app
|
||||
void* ahandle; // handle provided by app
|
||||
struct SRpcConn* pConn; // pConn allocated
|
||||
tmsg_t msgType; // message type
|
||||
uint8_t* pCont; // content provided by app
|
||||
int32_t contLen; // content length
|
||||
int32_t code; // error code
|
||||
int16_t numOfTry; // number of try for different servers
|
||||
int8_t oldInUse; // server EP inUse passed by app
|
||||
int8_t redirect; // flag to indicate redirect
|
||||
int8_t connType; // connection type
|
||||
int64_t rid; // refId returned by taosAddRef
|
||||
SRpcMsg* pRsp; // for synchronous API
|
||||
tsem_t* pSem; // for synchronous API
|
||||
SEpSet* pSet; // for synchronous API
|
||||
char msg[0]; // RpcHead starts from here
|
||||
} SRpcReqContext;
|
||||
|
||||
#define container_of(ptr, type, member) ((type*)((char*)(ptr)-offsetof(type, member)))
|
||||
#define RPC_RESERVE_SIZE (sizeof(SRpcReqContext))
|
||||
|
||||
#define RPC_MSG_OVERHEAD (sizeof(SRpcReqContext) + sizeof(SRpcHead) + sizeof(SRpcDigest))
|
||||
#define rpcHeadFromCont(cont) ((SRpcHead*)((char*)cont - sizeof(SRpcHead)))
|
||||
#define rpcContFromHead(msg) (msg + sizeof(SRpcHead))
|
||||
#define rpcMsgLenFromCont(contLen) (contLen + sizeof(SRpcHead))
|
||||
#define rpcContLenFromMsg(msgLen) (msgLen - sizeof(SRpcHead))
|
||||
#define rpcIsReq(type) (type & 1U)
|
||||
|
||||
int rpcAuthenticateMsg(void* pMsg, int msgLen, void* pAuth, void* pKey);
|
||||
void rpcBuildAuthHead(void* pMsg, int msgLen, void* pAuth, void* pKey);
|
||||
int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen);
|
||||
SRpcHead* rpcDecompressRpcMsg(SRpcHead* pHead);
|
||||
|
||||
#endif
|
|
@ -16,62 +16,61 @@
|
|||
#ifndef _TD_TRANSPORT_INT_H_
|
||||
#define _TD_TRANSPORT_INT_H_
|
||||
|
||||
#ifdef USE_UV
|
||||
#include <uv.h>
|
||||
#endif
|
||||
#include "lz4.h"
|
||||
#include "os.h"
|
||||
#include "rpcCache.h"
|
||||
#include "rpcHead.h"
|
||||
#include "rpcLog.h"
|
||||
#include "rpcTcp.h"
|
||||
#include "rpcUdp.h"
|
||||
#include "taoserror.h"
|
||||
#include "tglobal.h"
|
||||
#include "thash.h"
|
||||
#include "tidpool.h"
|
||||
#include "tmsg.h"
|
||||
#include "tref.h"
|
||||
#include "trpc.h"
|
||||
#include "ttimer.h"
|
||||
#include "tutil.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef USE_UV
|
||||
|
||||
#include <stddef.h>
|
||||
typedef void *queue[2];
|
||||
void* taosInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle);
|
||||
void* taosInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle);
|
||||
|
||||
/* Private macros. */
|
||||
#define QUEUE_NEXT(q) (*(queue **)&((*(q))[0]))
|
||||
#define QUEUE_PREV(q) (*(queue **)&((*(q))[1]))
|
||||
typedef struct {
|
||||
int sessions; // number of sessions allowed
|
||||
int numOfThreads; // number of threads to process incoming messages
|
||||
int idleTime; // milliseconds;
|
||||
uint16_t localPort;
|
||||
int8_t connType;
|
||||
int64_t index;
|
||||
char label[TSDB_LABEL_LEN];
|
||||
|
||||
#define QUEUE_PREV_NEXT(q) (QUEUE_NEXT(QUEUE_PREV(q)))
|
||||
#define QUEUE_NEXT_PREV(q) (QUEUE_PREV(QUEUE_NEXT(q)))
|
||||
char user[TSDB_UNI_LEN]; // meter ID
|
||||
char spi; // security parameter index
|
||||
char encrypt; // encrypt algorithm
|
||||
char secret[TSDB_PASSWORD_LEN]; // secret for the link
|
||||
char ckey[TSDB_PASSWORD_LEN]; // ciphering key
|
||||
|
||||
/* Initialize an empty queue. */
|
||||
#define QUEUE_INIT(q) \
|
||||
{ \
|
||||
QUEUE_NEXT(q) = (q); \
|
||||
QUEUE_PREV(q) = (q); \
|
||||
}
|
||||
void (*cfp)(void* parent, SRpcMsg*, SEpSet*);
|
||||
int (*afp)(void* parent, char* user, char* spi, char* encrypt, char* secret, char* ckey);
|
||||
|
||||
/* Return true if the queue has no element. */
|
||||
#define QUEUE_IS_EMPTY(q) ((const queue *)(q) == (const queue *)QUEUE_NEXT(q))
|
||||
|
||||
/* Insert an element at the back of a queue. */
|
||||
#define QUEUE_PUSH(q, e) \
|
||||
{ \
|
||||
QUEUE_NEXT(e) = (q); \
|
||||
QUEUE_PREV(e) = QUEUE_PREV(q); \
|
||||
QUEUE_PREV_NEXT(e) = (e); \
|
||||
QUEUE_PREV(q) = (e); \
|
||||
}
|
||||
|
||||
/* Remove the given element from the queue. Any element can be removed at any *
|
||||
* time. */
|
||||
#define QUEUE_REMOVE(e) \
|
||||
{ \
|
||||
QUEUE_PREV_NEXT(e) = QUEUE_NEXT(e); \
|
||||
QUEUE_NEXT_PREV(e) = QUEUE_PREV(e); \
|
||||
}
|
||||
|
||||
/* Return the element at the front of the queue. */
|
||||
#define QUEUE_HEAD(q) (QUEUE_NEXT(q))
|
||||
|
||||
/* Return the element at the back of the queue. */
|
||||
#define QUEUE_TAIL(q) (QUEUE_PREV(q))
|
||||
|
||||
/* Iterate over the element of a queue. * Mutating the queue while iterating
|
||||
* results in undefined behavior. */
|
||||
#define QUEUE_FOREACH(q, e) for ((q) = QUEUE_NEXT(e); (q) != (e); (q) = QUEUE_NEXT(q))
|
||||
|
||||
/* Return the structure holding the given element. */
|
||||
#define QUEUE_DATA(e, type, field) ((type *)((void *)((char *)(e)-offsetof(type, field))))
|
||||
int32_t refCount;
|
||||
void* parent;
|
||||
void* idPool; // handle to ID pool
|
||||
void* tmrCtrl; // handle to timer
|
||||
SHashObj* hash; // handle returned by hash utility
|
||||
void* tcphandle; // returned handle from TCP initialization
|
||||
pthread_mutex_t mutex;
|
||||
} SRpcInfo;
|
||||
|
||||
#endif // USE_LIBUV
|
||||
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifdef USE_UV
|
||||
|
||||
#include "transComm.h"
|
||||
|
||||
typedef struct SConnBuffer {
|
||||
char* buf;
|
||||
int len;
|
||||
int cap;
|
||||
int left;
|
||||
} SConnBuffer;
|
||||
|
||||
void* (*taosHandle[])(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) = {
|
||||
taosInitServer, taosInitClient};
|
||||
|
||||
void* rpcOpen(const SRpcInit* pInit) {
|
||||
SRpcInfo* pRpc = calloc(1, sizeof(SRpcInfo));
|
||||
if (pRpc == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if (pInit->label) {
|
||||
tstrncpy(pRpc->label, pInit->label, strlen(pInit->label));
|
||||
}
|
||||
pRpc->numOfThreads = pInit->numOfThreads > TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS : pInit->numOfThreads;
|
||||
pRpc->connType = pInit->connType;
|
||||
pRpc->tcphandle = (*taosHandle[pRpc->connType])(0, pInit->localPort, pRpc->label, pRpc->numOfThreads, NULL, pRpc);
|
||||
|
||||
return pRpc;
|
||||
}
|
||||
void rpcClose(void* arg) { return; }
|
||||
void* rpcMallocCont(int contLen) {
|
||||
int size = contLen + RPC_MSG_OVERHEAD;
|
||||
|
||||
char* start = (char*)calloc(1, (size_t)size);
|
||||
if (start == NULL) {
|
||||
tError("failed to malloc msg, size:%d", size);
|
||||
return NULL;
|
||||
} else {
|
||||
tTrace("malloc mem:%p size:%d", start, size);
|
||||
}
|
||||
return start + sizeof(SRpcReqContext) + sizeof(SRpcHead);
|
||||
}
|
||||
void rpcFreeCont(void* cont) { return; }
|
||||
void* rpcReallocCont(void* ptr, int contLen) { return NULL; }
|
||||
|
||||
void rpcSendRedirectRsp(void* pConn, const SEpSet* pEpSet) {}
|
||||
int rpcGetConnInfo(void* thandle, SRpcConnInfo* pInfo) { return -1; }
|
||||
void rpcSendRecv(void* shandle, SEpSet* pEpSet, SRpcMsg* pReq, SRpcMsg* pRsp) { return; }
|
||||
int rpcReportProgress(void* pConn, char* pCont, int contLen) { return -1; }
|
||||
void rpcCancelRequest(int64_t rid) { return; }
|
||||
|
||||
int32_t rpcInit(void) {
|
||||
// impl later
|
||||
return -1;
|
||||
}
|
||||
|
||||
void rpcCleanup(void) {
|
||||
// impl later
|
||||
return;
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,198 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifdef USE_UV
|
||||
|
||||
#include "transComm.h"
|
||||
|
||||
typedef struct SCliConn {
|
||||
uv_connect_t connReq;
|
||||
uv_stream_t* stream;
|
||||
void* data;
|
||||
queue conn;
|
||||
} SCliConn;
|
||||
typedef struct SCliMsg {
|
||||
SRpcReqContext* context;
|
||||
queue q;
|
||||
} SCliMsg;
|
||||
|
||||
typedef struct SCliThrdObj {
|
||||
pthread_t thread;
|
||||
uv_loop_t* loop;
|
||||
uv_async_t* cliAsync; //
|
||||
void* cache; // conn pool
|
||||
queue msg;
|
||||
pthread_mutex_t msgMtx;
|
||||
void* shandle;
|
||||
} SCliThrdObj;
|
||||
|
||||
typedef struct SClientObj {
|
||||
char label[TSDB_LABEL_LEN];
|
||||
int32_t index;
|
||||
int numOfThreads;
|
||||
SCliThrdObj** pThreadObj;
|
||||
} SClientObj;
|
||||
|
||||
static void clientWriteCb(uv_write_t* req, int status);
|
||||
static void clientReadCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf);
|
||||
static void clientConnCb(struct uv_connect_s* req, int status);
|
||||
static void clientAsyncCb(uv_async_t* handle);
|
||||
|
||||
static void* clientThread(void* arg);
|
||||
|
||||
static void clientWriteCb(uv_write_t* req, int status) {
|
||||
// impl later
|
||||
}
|
||||
static void clientFailedCb(uv_handle_t* handle) {
|
||||
// impl later
|
||||
tDebug("close handle");
|
||||
}
|
||||
static void clientReadCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) {
|
||||
// impl later
|
||||
}
|
||||
static void clientConnCb(struct uv_connect_s* req, int status) {
|
||||
SCliConn* pConn = req->data;
|
||||
SCliMsg* pMsg = pConn->data;
|
||||
SEpSet* pEpSet = &pMsg->context->epSet;
|
||||
|
||||
char* fqdn = pEpSet->fqdn[pEpSet->inUse];
|
||||
uint32_t port = pEpSet->port[pEpSet->inUse];
|
||||
if (status != 0) {
|
||||
// call user fp later
|
||||
tError("failed to connect server(%s, %d), errmsg: %s", fqdn, port, uv_strerror(status));
|
||||
uv_close((uv_handle_t*)req->handle, clientFailedCb);
|
||||
return;
|
||||
}
|
||||
assert(pConn->stream == req->handle);
|
||||
|
||||
// impl later
|
||||
}
|
||||
|
||||
static SCliConn* getConnFromCache(void* cache, char* ip, uint32_t port) {
|
||||
// impl later
|
||||
return NULL;
|
||||
}
|
||||
static void clientAsyncCb(uv_async_t* handle) {
|
||||
SCliThrdObj* pThrd = handle->data;
|
||||
SCliMsg* pMsg = NULL;
|
||||
pthread_mutex_lock(&pThrd->msgMtx);
|
||||
if (!QUEUE_IS_EMPTY(&pThrd->msg)) {
|
||||
queue* head = QUEUE_HEAD(&pThrd->msg);
|
||||
pMsg = QUEUE_DATA(head, SCliMsg, q);
|
||||
QUEUE_REMOVE(head);
|
||||
}
|
||||
pthread_mutex_unlock(&pThrd->msgMtx);
|
||||
|
||||
SEpSet* pEpSet = &pMsg->context->epSet;
|
||||
char* fqdn = pEpSet->fqdn[pEpSet->inUse];
|
||||
uint32_t port = pEpSet->port[pEpSet->inUse];
|
||||
|
||||
SCliConn* conn = getConnFromCache(pThrd->cache, fqdn, port);
|
||||
if (conn != NULL) {
|
||||
// impl later
|
||||
} else {
|
||||
SCliConn* conn = malloc(sizeof(SCliConn));
|
||||
|
||||
conn->stream = (uv_stream_t*)malloc(sizeof(uv_tcp_t));
|
||||
uv_tcp_init(pThrd->loop, (uv_tcp_t*)(conn->stream));
|
||||
|
||||
conn->connReq.data = conn;
|
||||
conn->data = pMsg;
|
||||
|
||||
struct sockaddr_in addr;
|
||||
uv_ip4_addr(fqdn, port, &addr);
|
||||
// handle error in callback if connect error
|
||||
uv_tcp_connect(&conn->connReq, (uv_tcp_t*)(conn->stream), (const struct sockaddr*)&addr, clientConnCb);
|
||||
}
|
||||
|
||||
// SRpcReqContext* pCxt = pMsg->context;
|
||||
|
||||
// SRpcHead* pHead = rpcHeadFromCont(pCtx->pCont);
|
||||
// char* msg = (char*)pHead;
|
||||
// int len = rpcMsgLenFromCont(pCtx->contLen);
|
||||
// tmsg_t msgType = pCtx->msgType;
|
||||
|
||||
// impl later
|
||||
}
|
||||
|
||||
static void* clientThread(void* arg) {
|
||||
SCliThrdObj* pThrd = (SCliThrdObj*)arg;
|
||||
uv_run(pThrd->loop, UV_RUN_DEFAULT);
|
||||
}
|
||||
|
||||
void* taosInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) {
|
||||
SClientObj* cli = calloc(1, sizeof(SClientObj));
|
||||
memcpy(cli->label, label, strlen(label));
|
||||
cli->numOfThreads = numOfThreads;
|
||||
cli->pThreadObj = (SCliThrdObj**)calloc(cli->numOfThreads, sizeof(SCliThrdObj*));
|
||||
|
||||
for (int i = 0; i < cli->numOfThreads; i++) {
|
||||
SCliThrdObj* pThrd = (SCliThrdObj*)calloc(1, sizeof(SCliThrdObj));
|
||||
QUEUE_INIT(&pThrd->msg);
|
||||
pthread_mutex_init(&pThrd->msgMtx, NULL);
|
||||
|
||||
// QUEUE_INIT(&pThrd->clientCache);
|
||||
|
||||
pThrd->loop = (uv_loop_t*)malloc(sizeof(uv_loop_t));
|
||||
uv_loop_init(pThrd->loop);
|
||||
|
||||
pThrd->cliAsync = malloc(sizeof(uv_async_t));
|
||||
uv_async_init(pThrd->loop, pThrd->cliAsync, clientAsyncCb);
|
||||
pThrd->cliAsync->data = pThrd;
|
||||
|
||||
pThrd->shandle = shandle;
|
||||
int err = pthread_create(&pThrd->thread, NULL, clientThread, (void*)(pThrd));
|
||||
if (err == 0) {
|
||||
tDebug("sucess to create tranport-client thread %d", i);
|
||||
}
|
||||
cli->pThreadObj[i] = pThrd;
|
||||
}
|
||||
return cli;
|
||||
}
|
||||
|
||||
void rpcSendRequest(void* shandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t* pRid) {
|
||||
// impl later
|
||||
SRpcInfo* pRpc = (SRpcInfo*)shandle;
|
||||
|
||||
int len = rpcCompressRpcMsg(pMsg->pCont, pMsg->contLen);
|
||||
|
||||
SRpcReqContext* pContext;
|
||||
pContext = (SRpcReqContext*)((char*)pMsg->pCont - sizeof(SRpcHead) - sizeof(SRpcReqContext));
|
||||
pContext->ahandle = pMsg->ahandle;
|
||||
pContext->pRpc = (SRpcInfo*)shandle;
|
||||
pContext->epSet = *pEpSet;
|
||||
pContext->contLen = len;
|
||||
pContext->pCont = pMsg->pCont;
|
||||
pContext->msgType = pMsg->msgType;
|
||||
pContext->oldInUse = pEpSet->inUse;
|
||||
|
||||
assert(pRpc->connType == TAOS_CONN_CLIENT);
|
||||
// atomic or not
|
||||
int64_t index = pRpc->index;
|
||||
if (pRpc->index++ >= pRpc->numOfThreads) {
|
||||
pRpc->index = 0;
|
||||
}
|
||||
SCliMsg* msg = malloc(sizeof(SCliMsg));
|
||||
msg->context = pContext;
|
||||
|
||||
SCliThrdObj* thrd = ((SClientObj*)pRpc->tcphandle)->pThreadObj[index % pRpc->numOfThreads];
|
||||
|
||||
pthread_mutex_lock(&thrd->msgMtx);
|
||||
QUEUE_PUSH(&thrd->msg, &msg->q);
|
||||
pthread_mutex_unlock(&thrd->msgMtx);
|
||||
|
||||
uv_async_send(thrd->cliAsync);
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifdef USE_UV
|
||||
|
||||
#include "transComm.h"
|
||||
|
||||
int rpcAuthenticateMsg(void* pMsg, int msgLen, void* pAuth, void* pKey) {
|
||||
T_MD5_CTX context;
|
||||
int ret = -1;
|
||||
|
||||
tMD5Init(&context);
|
||||
tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN);
|
||||
tMD5Update(&context, (uint8_t*)pMsg, msgLen);
|
||||
tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN);
|
||||
tMD5Final(&context);
|
||||
|
||||
if (memcmp(context.digest, pAuth, sizeof(context.digest)) == 0) ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
void rpcBuildAuthHead(void* pMsg, int msgLen, void* pAuth, void* pKey) {
|
||||
T_MD5_CTX context;
|
||||
|
||||
tMD5Init(&context);
|
||||
tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN);
|
||||
tMD5Update(&context, (uint8_t*)pMsg, msgLen);
|
||||
tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN);
|
||||
tMD5Final(&context);
|
||||
|
||||
memcpy(pAuth, context.digest, sizeof(context.digest));
|
||||
}
|
||||
|
||||
int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen) {
|
||||
SRpcHead* pHead = rpcHeadFromCont(pCont);
|
||||
int32_t finalLen = 0;
|
||||
int overhead = sizeof(SRpcComp);
|
||||
|
||||
if (!NEEDTO_COMPRESSS_MSG(contLen)) {
|
||||
return contLen;
|
||||
}
|
||||
|
||||
char* buf = malloc(contLen + overhead + 8); // 8 extra bytes
|
||||
if (buf == NULL) {
|
||||
tError("failed to allocate memory for rpc msg compression, contLen:%d", contLen);
|
||||
return contLen;
|
||||
}
|
||||
|
||||
int32_t compLen = LZ4_compress_default(pCont, buf, contLen, contLen + overhead);
|
||||
tDebug("compress rpc msg, before:%d, after:%d, overhead:%d", contLen, compLen, overhead);
|
||||
|
||||
/*
|
||||
* only the compressed size is less than the value of contLen - overhead, the compression is applied
|
||||
* The first four bytes is set to 0, the second four bytes are utilized to keep the original length of message
|
||||
*/
|
||||
if (compLen > 0 && compLen < contLen - overhead) {
|
||||
SRpcComp* pComp = (SRpcComp*)pCont;
|
||||
pComp->reserved = 0;
|
||||
pComp->contLen = htonl(contLen);
|
||||
memcpy(pCont + overhead, buf, compLen);
|
||||
|
||||
pHead->comp = 1;
|
||||
tDebug("compress rpc msg, before:%d, after:%d", contLen, compLen);
|
||||
finalLen = compLen + overhead;
|
||||
} else {
|
||||
finalLen = contLen;
|
||||
}
|
||||
|
||||
free(buf);
|
||||
return finalLen;
|
||||
}
|
||||
|
||||
SRpcHead* rpcDecompressRpcMsg(SRpcHead* pHead) {
|
||||
int overhead = sizeof(SRpcComp);
|
||||
SRpcHead* pNewHead = NULL;
|
||||
uint8_t* pCont = pHead->content;
|
||||
SRpcComp* pComp = (SRpcComp*)pHead->content;
|
||||
|
||||
if (pHead->comp) {
|
||||
// decompress the content
|
||||
assert(pComp->reserved == 0);
|
||||
int contLen = htonl(pComp->contLen);
|
||||
|
||||
// prepare the temporary buffer to decompress message
|
||||
char* temp = (char*)malloc(contLen + RPC_MSG_OVERHEAD);
|
||||
pNewHead = (SRpcHead*)(temp + sizeof(SRpcReqContext)); // reserve SRpcReqContext
|
||||
|
||||
if (pNewHead) {
|
||||
int compLen = rpcContLenFromMsg(pHead->msgLen) - overhead;
|
||||
int origLen = LZ4_decompress_safe((char*)(pCont + overhead), (char*)pNewHead->content, compLen, contLen);
|
||||
assert(origLen == contLen);
|
||||
|
||||
memcpy(pNewHead, pHead, sizeof(SRpcHead));
|
||||
pNewHead->msgLen = rpcMsgLenFromCont(origLen);
|
||||
/// rpcFreeMsg(pHead); // free the compressed message buffer
|
||||
pHead = pNewHead;
|
||||
tTrace("decomp malloc mem:%p", temp);
|
||||
} else {
|
||||
tError("failed to allocate memory to decompress msg, contLen:%d", contLen);
|
||||
}
|
||||
}
|
||||
|
||||
return pHead;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -14,118 +14,7 @@
|
|||
*/
|
||||
|
||||
#ifdef USE_UV
|
||||
|
||||
#include <uv.h>
|
||||
#include "lz4.h"
|
||||
#include "os.h"
|
||||
#include "rpcCache.h"
|
||||
#include "rpcHead.h"
|
||||
#include "rpcLog.h"
|
||||
#include "rpcTcp.h"
|
||||
#include "rpcUdp.h"
|
||||
#include "taoserror.h"
|
||||
#include "tglobal.h"
|
||||
#include "thash.h"
|
||||
#include "tidpool.h"
|
||||
#include "tmd5.h"
|
||||
#include "tmempool.h"
|
||||
#include "tmsg.h"
|
||||
#include "transportInt.h"
|
||||
#include "tref.h"
|
||||
#include "trpc.h"
|
||||
#include "ttimer.h"
|
||||
#include "tutil.h"
|
||||
|
||||
#define container_of(ptr, type, member) ((type*)((char*)(ptr)-offsetof(type, member)))
|
||||
#define RPC_RESERVE_SIZE (sizeof(SRpcReqContext))
|
||||
static const char* notify = "a";
|
||||
|
||||
typedef struct {
|
||||
int sessions; // number of sessions allowed
|
||||
int numOfThreads; // number of threads to process incoming messages
|
||||
int idleTime; // milliseconds;
|
||||
uint16_t localPort;
|
||||
int8_t connType;
|
||||
int index; // for UDP server only, round robin for multiple threads
|
||||
char label[TSDB_LABEL_LEN];
|
||||
|
||||
char user[TSDB_UNI_LEN]; // meter ID
|
||||
char spi; // security parameter index
|
||||
char encrypt; // encrypt algorithm
|
||||
char secret[TSDB_PASSWORD_LEN]; // secret for the link
|
||||
char ckey[TSDB_PASSWORD_LEN]; // ciphering key
|
||||
|
||||
void (*cfp)(void* parent, SRpcMsg*, SEpSet*);
|
||||
int (*afp)(void* parent, char* user, char* spi, char* encrypt, char* secret, char* ckey);
|
||||
|
||||
int32_t refCount;
|
||||
void* parent;
|
||||
void* idPool; // handle to ID pool
|
||||
void* tmrCtrl; // handle to timer
|
||||
SHashObj* hash; // handle returned by hash utility
|
||||
void* tcphandle; // returned handle from TCP initialization
|
||||
void* udphandle; // returned handle from UDP initialization
|
||||
void* pCache; // connection cache
|
||||
pthread_mutex_t mutex;
|
||||
struct SRpcConn* connList; // connection list
|
||||
} SRpcInfo;
|
||||
|
||||
typedef struct {
|
||||
SRpcInfo* pRpc; // associated SRpcInfo
|
||||
SEpSet epSet; // ip list provided by app
|
||||
void* ahandle; // handle provided by app
|
||||
struct SRpcConn* pConn; // pConn allocated
|
||||
tmsg_t msgType; // message type
|
||||
uint8_t* pCont; // content provided by app
|
||||
int32_t contLen; // content length
|
||||
int32_t code; // error code
|
||||
int16_t numOfTry; // number of try for different servers
|
||||
int8_t oldInUse; // server EP inUse passed by app
|
||||
int8_t redirect; // flag to indicate redirect
|
||||
int8_t connType; // connection type
|
||||
int64_t rid; // refId returned by taosAddRef
|
||||
SRpcMsg* pRsp; // for synchronous API
|
||||
tsem_t* pSem; // for synchronous API
|
||||
SEpSet* pSet; // for synchronous API
|
||||
char msg[0]; // RpcHead starts from here
|
||||
} SRpcReqContext;
|
||||
|
||||
typedef struct SThreadObj {
|
||||
pthread_t thread;
|
||||
uv_pipe_t* pipe;
|
||||
int fd;
|
||||
uv_loop_t* loop;
|
||||
uv_async_t* workerAsync; //
|
||||
queue conn;
|
||||
pthread_mutex_t connMtx;
|
||||
void* shandle;
|
||||
} SThreadObj;
|
||||
|
||||
typedef struct SClientObj {
|
||||
char label[TSDB_LABEL_LEN];
|
||||
int32_t index;
|
||||
int numOfThreads;
|
||||
SThreadObj** pThreadObj;
|
||||
} SClientObj;
|
||||
|
||||
#define RPC_MSG_OVERHEAD (sizeof(SRpcReqContext) + sizeof(SRpcHead) + sizeof(SRpcDigest))
|
||||
#define rpcHeadFromCont(cont) ((SRpcHead*)((char*)cont - sizeof(SRpcHead)))
|
||||
#define rpcContFromHead(msg) (msg + sizeof(SRpcHead))
|
||||
#define rpcMsgLenFromCont(contLen) (contLen + sizeof(SRpcHead))
|
||||
#define rpcContLenFromMsg(msgLen) (msgLen - sizeof(SRpcHead))
|
||||
#define rpcIsReq(type) (type & 1U)
|
||||
|
||||
typedef struct SServerObj {
|
||||
pthread_t thread;
|
||||
uv_tcp_t server;
|
||||
uv_loop_t* loop;
|
||||
int workerIdx;
|
||||
int numOfThreads;
|
||||
SThreadObj** pThreadObj;
|
||||
uv_pipe_t** pipe;
|
||||
uint32_t ip;
|
||||
uint32_t port;
|
||||
} SServerObj;
|
||||
#include "transComm.h"
|
||||
|
||||
typedef struct SConnBuffer {
|
||||
char* buf;
|
||||
|
@ -134,7 +23,7 @@ typedef struct SConnBuffer {
|
|||
int left;
|
||||
} SConnBuffer;
|
||||
|
||||
typedef struct SRpcConn {
|
||||
typedef struct SConn {
|
||||
uv_tcp_t* pTcp;
|
||||
uv_write_t* pWriter;
|
||||
uv_timer_t* pTimer;
|
||||
|
@ -148,7 +37,7 @@ typedef struct SRpcConn {
|
|||
int count;
|
||||
void* shandle; // rpc init
|
||||
void* ahandle; //
|
||||
void* hostThread;
|
||||
void* hostThrd;
|
||||
// del later
|
||||
char secured;
|
||||
int spi;
|
||||
|
@ -156,16 +45,37 @@ typedef struct SRpcConn {
|
|||
char user[TSDB_UNI_LEN]; // user ID for the link
|
||||
char secret[TSDB_PASSWORD_LEN];
|
||||
char ckey[TSDB_PASSWORD_LEN]; // ciphering key
|
||||
} SRpcConn;
|
||||
} SConn;
|
||||
|
||||
// auth function
|
||||
static int uvAuthMsg(SRpcConn* pConn, char* msg, int msgLen);
|
||||
static int rpcAuthenticateMsg(void* pMsg, int msgLen, void* pAuth, void* pKey);
|
||||
static void rpcBuildAuthHead(void* pMsg, int msgLen, void* pAuth, void* pKey);
|
||||
static int rpcAddAuthPart(SRpcConn* pConn, char* msg, int msgLen);
|
||||
// compress data
|
||||
static int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen);
|
||||
static SRpcHead* rpcDecompressRpcMsg(SRpcHead* pHead);
|
||||
typedef struct SWorkThrdObj {
|
||||
pthread_t thread;
|
||||
uv_pipe_t* pipe;
|
||||
int fd;
|
||||
uv_loop_t* loop;
|
||||
uv_async_t* workerAsync; //
|
||||
queue conn;
|
||||
pthread_mutex_t connMtx;
|
||||
void* shandle;
|
||||
} SWorkThrdObj;
|
||||
|
||||
typedef struct SServerObj {
|
||||
pthread_t thread;
|
||||
uv_tcp_t server;
|
||||
uv_loop_t* loop;
|
||||
int workerIdx;
|
||||
int numOfThreads;
|
||||
SWorkThrdObj** pThreadObj;
|
||||
uv_pipe_t** pipe;
|
||||
uint32_t ip;
|
||||
uint32_t port;
|
||||
} SServerObj;
|
||||
|
||||
static const char* notify = "a";
|
||||
|
||||
// refactor later
|
||||
static int rpcAddAuthPart(SConn* pConn, char* msg, int msgLen);
|
||||
|
||||
static int uvAuthMsg(SConn* pConn, char* msg, int msgLen);
|
||||
|
||||
static void uvAllocConnBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf);
|
||||
static void uvAllocReadBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf);
|
||||
|
@ -176,79 +86,17 @@ static void uvOnAcceptCb(uv_stream_t* stream, int status);
|
|||
static void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf);
|
||||
static void uvWorkerAsyncCb(uv_async_t* handle);
|
||||
|
||||
static SRpcConn* connCreate();
|
||||
static void connDestroy(SRpcConn* conn);
|
||||
static void uvConnDestroy(uv_handle_t* handle);
|
||||
// already read complete packet
|
||||
static bool readComplete(SConnBuffer* buf);
|
||||
|
||||
static SConn* connCreate();
|
||||
static void connDestroy(SConn* conn);
|
||||
static void uvConnDestroy(uv_handle_t* handle);
|
||||
|
||||
// server worke thread
|
||||
static void* workerThread(void* arg);
|
||||
static void* acceptThread(void* arg);
|
||||
|
||||
void* taosInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle);
|
||||
void* taosInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle);
|
||||
|
||||
void* (*taosHandle[])(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) = {taosInitServer, taosInitClient};
|
||||
|
||||
void* taosInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) {
|
||||
SClientObj* cli = calloc(1, sizeof(SClientObj));
|
||||
memcpy(cli->label, label, strlen(label));
|
||||
cli->numOfThreads = numOfThreads;
|
||||
cli->pThreadObj = (SThreadObj**)calloc(cli->numOfThreads, sizeof(SThreadObj*));
|
||||
|
||||
for (int i = 0; i < cli->numOfThreads; i++) {
|
||||
SThreadObj* thrd = (SThreadObj*)calloc(1, sizeof(SThreadObj));
|
||||
|
||||
int err = pthread_create(&thrd->thread, NULL, workerThread, (void*)(thrd));
|
||||
if (err == 0) {
|
||||
tDebug("sucess to create tranport-client thread %d", i);
|
||||
}
|
||||
}
|
||||
return cli;
|
||||
}
|
||||
|
||||
void* taosInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) {
|
||||
SServerObj* srv = calloc(1, sizeof(SServerObj));
|
||||
srv->loop = (uv_loop_t*)malloc(sizeof(uv_loop_t));
|
||||
srv->numOfThreads = numOfThreads;
|
||||
srv->workerIdx = 0;
|
||||
srv->pThreadObj = (SThreadObj**)calloc(srv->numOfThreads, sizeof(SThreadObj*));
|
||||
srv->pipe = (uv_pipe_t**)calloc(srv->numOfThreads, sizeof(uv_pipe_t*));
|
||||
srv->ip = ip;
|
||||
srv->port = port;
|
||||
uv_loop_init(srv->loop);
|
||||
|
||||
for (int i = 0; i < srv->numOfThreads; i++) {
|
||||
SThreadObj* thrd = (SThreadObj*)calloc(1, sizeof(SThreadObj));
|
||||
srv->pipe[i] = (uv_pipe_t*)calloc(2, sizeof(uv_pipe_t));
|
||||
int fds[2];
|
||||
if (uv_socketpair(AF_UNIX, SOCK_STREAM, fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE) != 0) {
|
||||
return NULL;
|
||||
}
|
||||
uv_pipe_init(srv->loop, &(srv->pipe[i][0]), 1);
|
||||
uv_pipe_open(&(srv->pipe[i][0]), fds[1]); // init write
|
||||
|
||||
thrd->shandle = shandle;
|
||||
thrd->fd = fds[0];
|
||||
thrd->pipe = &(srv->pipe[i][1]); // init read
|
||||
int err = pthread_create(&(thrd->thread), NULL, workerThread, (void*)(thrd));
|
||||
if (err == 0) {
|
||||
tDebug("sucess to create worker-thread %d", i);
|
||||
// printf("thread %d create\n", i);
|
||||
} else {
|
||||
// TODO: clear all other resource later
|
||||
tError("failed to create worker-thread %d", i);
|
||||
}
|
||||
srv->pThreadObj[i] = thrd;
|
||||
}
|
||||
|
||||
int err = pthread_create(&srv->thread, NULL, acceptThread, (void*)srv);
|
||||
if (err == 0) {
|
||||
tDebug("success to create accept-thread");
|
||||
} else {
|
||||
// clear all resource later
|
||||
}
|
||||
|
||||
return srv;
|
||||
}
|
||||
void uvAllocReadBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) {
|
||||
/*
|
||||
* formate of data buffer:
|
||||
|
@ -256,8 +104,8 @@ void uvAllocReadBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* b
|
|||
*/
|
||||
static const int CAPACITY = 1024;
|
||||
|
||||
SRpcConn* ctx = handle->data;
|
||||
SConnBuffer* pBuf = &ctx->connBuf;
|
||||
SConn* conn = handle->data;
|
||||
SConnBuffer* pBuf = &conn->connBuf;
|
||||
if (pBuf->cap == 0) {
|
||||
pBuf->buf = (char*)calloc(CAPACITY + RPC_RESERVE_SIZE, sizeof(char));
|
||||
pBuf->len = 0;
|
||||
|
@ -280,9 +128,10 @@ void uvAllocReadBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* b
|
|||
buf->len = pBuf->cap - pBuf->len;
|
||||
}
|
||||
}
|
||||
|
||||
// check data read from socket completely or not
|
||||
//
|
||||
static bool isReadAll(SConnBuffer* data) {
|
||||
static bool readComplete(SConnBuffer* data) {
|
||||
// TODO(yihao): handle pipeline later
|
||||
SRpcHead rpcHead;
|
||||
int32_t headLen = sizeof(rpcHead);
|
||||
|
@ -299,10 +148,11 @@ static bool isReadAll(SConnBuffer* data) {
|
|||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void uvDoProcess(SRecvInfo* pRecv) {
|
||||
SRpcHead* pHead = (SRpcHead*)pRecv->msg;
|
||||
SRpcInfo* pRpc = (SRpcInfo*)pRecv->shandle;
|
||||
SRpcConn* pConn = pRecv->thandle;
|
||||
SConn* pConn = pRecv->thandle;
|
||||
|
||||
tDump(pRecv->msg, pRecv->msgLen);
|
||||
|
||||
|
@ -311,7 +161,8 @@ static void uvDoProcess(SRecvInfo* pRecv) {
|
|||
|
||||
// do auth and check
|
||||
}
|
||||
static int uvAuthMsg(SRpcConn* pConn, char* msg, int len) {
|
||||
|
||||
static int uvAuthMsg(SConn* pConn, char* msg, int len) {
|
||||
SRpcHead* pHead = (SRpcHead*)msg;
|
||||
int code = 0;
|
||||
|
||||
|
@ -325,7 +176,8 @@ static int uvAuthMsg(SRpcConn* pConn, char* msg, int len) {
|
|||
if (!rpcIsReq(pHead->msgType)) {
|
||||
// for response, if code is auth failure, it shall bypass the auth process
|
||||
code = htonl(pHead->code);
|
||||
if (code == TSDB_CODE_RPC_INVALID_TIME_STAMP || code == TSDB_CODE_RPC_AUTH_FAILURE || code == TSDB_CODE_RPC_INVALID_VERSION || code == TSDB_CODE_RPC_AUTH_REQUIRED ||
|
||||
if (code == TSDB_CODE_RPC_INVALID_TIME_STAMP || code == TSDB_CODE_RPC_AUTH_FAILURE ||
|
||||
code == TSDB_CODE_RPC_INVALID_VERSION || code == TSDB_CODE_RPC_AUTH_REQUIRED ||
|
||||
code == TSDB_CODE_MND_USER_NOT_EXIST || code == TSDB_CODE_RPC_NOT_READY) {
|
||||
pHead->msgLen = (int32_t)htonl((uint32_t)pHead->msgLen);
|
||||
// tTrace("%s, dont check authentication since code is:0x%x", pConn->info, code);
|
||||
|
@ -361,12 +213,14 @@ static int uvAuthMsg(SRpcConn* pConn, char* msg, int len) {
|
|||
|
||||
return code;
|
||||
}
|
||||
|
||||
// refers specifically to query or insert timeout
|
||||
static void uvHandleActivityTimeout(uv_timer_t* handle) {
|
||||
// impl later
|
||||
SRpcConn* conn = handle->data;
|
||||
SConn* conn = handle->data;
|
||||
}
|
||||
static void uvProcessData(SRpcConn* pConn) {
|
||||
|
||||
static void uvProcessData(SConn* pConn) {
|
||||
SRecvInfo info;
|
||||
SRecvInfo* p = &info;
|
||||
SConnBuffer* pBuf = &pConn->connBuf;
|
||||
|
@ -408,13 +262,14 @@ static void uvProcessData(SRpcConn* pConn) {
|
|||
// auth
|
||||
// validate msg type
|
||||
}
|
||||
|
||||
void uvOnReadCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) {
|
||||
// opt
|
||||
SRpcConn* ctx = cli->data;
|
||||
SConn* ctx = cli->data;
|
||||
SConnBuffer* pBuf = &ctx->connBuf;
|
||||
if (nread > 0) {
|
||||
pBuf->len += nread;
|
||||
if (isReadAll(pBuf)) {
|
||||
if (readComplete(pBuf)) {
|
||||
tDebug("alread read complete packet");
|
||||
uvProcessData(ctx);
|
||||
} else {
|
||||
|
@ -442,7 +297,7 @@ void uvOnTimeoutCb(uv_timer_t* handle) {
|
|||
}
|
||||
|
||||
void uvOnWriteCb(uv_write_t* req, int status) {
|
||||
SRpcConn* conn = req->data;
|
||||
SConn* conn = req->data;
|
||||
if (status == 0) {
|
||||
tDebug("data already was written on stream");
|
||||
} else {
|
||||
|
@ -452,15 +307,15 @@ void uvOnWriteCb(uv_write_t* req, int status) {
|
|||
}
|
||||
|
||||
void uvWorkerAsyncCb(uv_async_t* handle) {
|
||||
SThreadObj* pThrd = container_of(handle, SThreadObj, workerAsync);
|
||||
SRpcConn* conn = NULL;
|
||||
SWorkThrdObj* pThrd = container_of(handle, SWorkThrdObj, workerAsync);
|
||||
SConn* conn = NULL;
|
||||
|
||||
// opt later
|
||||
pthread_mutex_lock(&pThrd->connMtx);
|
||||
if (!QUEUE_IS_EMPTY(&pThrd->conn)) {
|
||||
queue* head = QUEUE_HEAD(&pThrd->conn);
|
||||
conn = QUEUE_DATA(head, SRpcConn, queue);
|
||||
QUEUE_REMOVE(&conn->queue);
|
||||
conn = QUEUE_DATA(head, SConn, queue);
|
||||
QUEUE_REMOVE(head);
|
||||
}
|
||||
pthread_mutex_unlock(&pThrd->connMtx);
|
||||
if (conn == NULL) {
|
||||
|
@ -507,7 +362,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
|
|||
assert(buf->base[0] == notify[0]);
|
||||
free(buf->base);
|
||||
|
||||
SThreadObj* pThrd = q->data;
|
||||
SWorkThrdObj* pThrd = q->data;
|
||||
|
||||
uv_pipe_t* pipe = (uv_pipe_t*)q;
|
||||
if (!uv_pipe_pending_count(pipe)) {
|
||||
|
@ -518,14 +373,14 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
|
|||
uv_handle_type pending = uv_pipe_pending_type(pipe);
|
||||
assert(pending == UV_TCP);
|
||||
|
||||
SRpcConn* pConn = connCreate();
|
||||
SConn* pConn = connCreate();
|
||||
pConn->shandle = pThrd->shandle;
|
||||
/* init conn timer*/
|
||||
pConn->pTimer = malloc(sizeof(uv_timer_t));
|
||||
uv_timer_init(pThrd->loop, pConn->pTimer);
|
||||
pConn->pTimer->data = pConn;
|
||||
|
||||
pConn->hostThread = pThrd;
|
||||
pConn->hostThrd = pThrd;
|
||||
pConn->pWorkerAsync = pThrd->workerAsync; // thread safty
|
||||
|
||||
// init client handle
|
||||
|
@ -564,17 +419,19 @@ void* acceptThread(void* arg) {
|
|||
uv_run(srv->loop, UV_RUN_DEFAULT);
|
||||
}
|
||||
void* workerThread(void* arg) {
|
||||
SThreadObj* pThrd = (SThreadObj*)arg;
|
||||
SWorkThrdObj* pThrd = (SWorkThrdObj*)arg;
|
||||
|
||||
pThrd->loop = (uv_loop_t*)malloc(sizeof(uv_loop_t));
|
||||
uv_loop_init(pThrd->loop);
|
||||
|
||||
// SRpcInfo* pRpc = pThrd->shandle;
|
||||
uv_pipe_init(pThrd->loop, pThrd->pipe, 1);
|
||||
uv_pipe_open(pThrd->pipe, pThrd->fd);
|
||||
|
||||
pThrd->pipe->data = pThrd;
|
||||
|
||||
QUEUE_INIT(&pThrd->conn);
|
||||
pthread_mutex_init(&pThrd->connMtx, NULL);
|
||||
|
||||
pThrd->workerAsync = malloc(sizeof(uv_async_t));
|
||||
uv_async_init(pThrd->loop, pThrd->workerAsync, uvWorkerAsyncCb);
|
||||
|
@ -582,11 +439,12 @@ void* workerThread(void* arg) {
|
|||
uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
|
||||
uv_run(pThrd->loop, UV_RUN_DEFAULT);
|
||||
}
|
||||
static SRpcConn* connCreate() {
|
||||
SRpcConn* pConn = (SRpcConn*)calloc(1, sizeof(SRpcConn));
|
||||
|
||||
static SConn* connCreate() {
|
||||
SConn* pConn = (SConn*)calloc(1, sizeof(SConn));
|
||||
return pConn;
|
||||
}
|
||||
static void connDestroy(SRpcConn* conn) {
|
||||
static void connDestroy(SConn* conn) {
|
||||
if (conn == NULL) {
|
||||
return;
|
||||
}
|
||||
|
@ -600,78 +458,10 @@ static void connDestroy(SRpcConn* conn) {
|
|||
// handle
|
||||
}
|
||||
static void uvConnDestroy(uv_handle_t* handle) {
|
||||
SRpcConn* conn = handle->data;
|
||||
SConn* conn = handle->data;
|
||||
connDestroy(conn);
|
||||
}
|
||||
void* rpcOpen(const SRpcInit* pInit) {
|
||||
SRpcInfo* pRpc = calloc(1, sizeof(SRpcInfo));
|
||||
if (pRpc == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if (pInit->label) {
|
||||
tstrncpy(pRpc->label, pInit->label, strlen(pInit->label));
|
||||
}
|
||||
pRpc->numOfThreads = pInit->numOfThreads > TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS : pInit->numOfThreads;
|
||||
pRpc->connType = pInit->connType;
|
||||
pRpc->tcphandle = (*taosHandle[pRpc->connType])(0, pInit->localPort, pRpc->label, pRpc->numOfThreads, NULL, pRpc);
|
||||
// pRpc->taosInitServer(0, pInit->localPort, pRpc->label, pRpc->numOfThreads, NULL, pRpc);
|
||||
return pRpc;
|
||||
}
|
||||
void rpcClose(void* arg) { return; }
|
||||
void* rpcMallocCont(int contLen) { return NULL; }
|
||||
void rpcFreeCont(void* cont) { return; }
|
||||
void* rpcReallocCont(void* ptr, int contLen) { return NULL; }
|
||||
|
||||
void rpcSendRequest(void* thandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t* rid) {
|
||||
// impl later
|
||||
return;
|
||||
}
|
||||
|
||||
void rpcSendResponse(const SRpcMsg* pMsg) {
|
||||
SRpcConn* pConn = pMsg->handle;
|
||||
SThreadObj* pThrd = pConn->hostThread;
|
||||
|
||||
// opt later
|
||||
pthread_mutex_lock(&pThrd->connMtx);
|
||||
QUEUE_PUSH(&pThrd->conn, &pConn->queue);
|
||||
pthread_mutex_unlock(&pThrd->connMtx);
|
||||
|
||||
uv_async_send(pConn->pWorkerAsync);
|
||||
}
|
||||
|
||||
void rpcSendRedirectRsp(void* pConn, const SEpSet* pEpSet) {}
|
||||
int rpcGetConnInfo(void* thandle, SRpcConnInfo* pInfo) { return -1; }
|
||||
void rpcSendRecv(void* shandle, SEpSet* pEpSet, SRpcMsg* pReq, SRpcMsg* pRsp) { return; }
|
||||
int rpcReportProgress(void* pConn, char* pCont, int contLen) { return -1; }
|
||||
void rpcCancelRequest(int64_t rid) { return; }
|
||||
|
||||
static int rpcAuthenticateMsg(void* pMsg, int msgLen, void* pAuth, void* pKey) {
|
||||
T_MD5_CTX context;
|
||||
int ret = -1;
|
||||
|
||||
tMD5Init(&context);
|
||||
tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN);
|
||||
tMD5Update(&context, (uint8_t*)pMsg, msgLen);
|
||||
tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN);
|
||||
tMD5Final(&context);
|
||||
|
||||
if (memcmp(context.digest, pAuth, sizeof(context.digest)) == 0) ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
static void rpcBuildAuthHead(void* pMsg, int msgLen, void* pAuth, void* pKey) {
|
||||
T_MD5_CTX context;
|
||||
|
||||
tMD5Init(&context);
|
||||
tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN);
|
||||
tMD5Update(&context, (uint8_t*)pMsg, msgLen);
|
||||
tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN);
|
||||
tMD5Final(&context);
|
||||
|
||||
memcpy(pAuth, context.digest, sizeof(context.digest));
|
||||
}
|
||||
|
||||
static int rpcAddAuthPart(SRpcConn* pConn, char* msg, int msgLen) {
|
||||
static int rpcAddAuthPart(SConn* pConn, char* msg, int msgLen) {
|
||||
SRpcHead* pHead = (SRpcHead*)msg;
|
||||
|
||||
if (pConn->spi && pConn->secured == 0) {
|
||||
|
@ -690,84 +480,61 @@ static int rpcAddAuthPart(SRpcConn* pConn, char* msg, int msgLen) {
|
|||
return msgLen;
|
||||
}
|
||||
|
||||
static int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen) {
|
||||
SRpcHead* pHead = rpcHeadFromCont(pCont);
|
||||
int32_t finalLen = 0;
|
||||
int overhead = sizeof(SRpcComp);
|
||||
void* taosInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) {
|
||||
SServerObj* srv = calloc(1, sizeof(SServerObj));
|
||||
srv->loop = (uv_loop_t*)malloc(sizeof(uv_loop_t));
|
||||
srv->numOfThreads = numOfThreads;
|
||||
srv->workerIdx = 0;
|
||||
srv->pThreadObj = (SWorkThrdObj**)calloc(srv->numOfThreads, sizeof(SWorkThrdObj*));
|
||||
srv->pipe = (uv_pipe_t**)calloc(srv->numOfThreads, sizeof(uv_pipe_t*));
|
||||
srv->ip = ip;
|
||||
srv->port = port;
|
||||
uv_loop_init(srv->loop);
|
||||
|
||||
if (!NEEDTO_COMPRESSS_MSG(contLen)) {
|
||||
return contLen;
|
||||
}
|
||||
|
||||
char* buf = malloc(contLen + overhead + 8); // 8 extra bytes
|
||||
if (buf == NULL) {
|
||||
tError("failed to allocate memory for rpc msg compression, contLen:%d", contLen);
|
||||
return contLen;
|
||||
}
|
||||
|
||||
int32_t compLen = LZ4_compress_default(pCont, buf, contLen, contLen + overhead);
|
||||
tDebug("compress rpc msg, before:%d, after:%d, overhead:%d", contLen, compLen, overhead);
|
||||
|
||||
/*
|
||||
* only the compressed size is less than the value of contLen - overhead, the compression is applied
|
||||
* The first four bytes is set to 0, the second four bytes are utilized to keep the original length of message
|
||||
*/
|
||||
if (compLen > 0 && compLen < contLen - overhead) {
|
||||
SRpcComp* pComp = (SRpcComp*)pCont;
|
||||
pComp->reserved = 0;
|
||||
pComp->contLen = htonl(contLen);
|
||||
memcpy(pCont + overhead, buf, compLen);
|
||||
|
||||
pHead->comp = 1;
|
||||
tDebug("compress rpc msg, before:%d, after:%d", contLen, compLen);
|
||||
finalLen = compLen + overhead;
|
||||
} else {
|
||||
finalLen = contLen;
|
||||
}
|
||||
|
||||
free(buf);
|
||||
return finalLen;
|
||||
}
|
||||
|
||||
static SRpcHead* rpcDecompressRpcMsg(SRpcHead* pHead) {
|
||||
int overhead = sizeof(SRpcComp);
|
||||
SRpcHead* pNewHead = NULL;
|
||||
uint8_t* pCont = pHead->content;
|
||||
SRpcComp* pComp = (SRpcComp*)pHead->content;
|
||||
|
||||
if (pHead->comp) {
|
||||
// decompress the content
|
||||
assert(pComp->reserved == 0);
|
||||
int contLen = htonl(pComp->contLen);
|
||||
|
||||
// prepare the temporary buffer to decompress message
|
||||
char* temp = (char*)malloc(contLen + RPC_MSG_OVERHEAD);
|
||||
pNewHead = (SRpcHead*)(temp + sizeof(SRpcReqContext)); // reserve SRpcReqContext
|
||||
|
||||
if (pNewHead) {
|
||||
int compLen = rpcContLenFromMsg(pHead->msgLen) - overhead;
|
||||
int origLen = LZ4_decompress_safe((char*)(pCont + overhead), (char*)pNewHead->content, compLen, contLen);
|
||||
assert(origLen == contLen);
|
||||
|
||||
memcpy(pNewHead, pHead, sizeof(SRpcHead));
|
||||
pNewHead->msgLen = rpcMsgLenFromCont(origLen);
|
||||
/// rpcFreeMsg(pHead); // free the compressed message buffer
|
||||
pHead = pNewHead;
|
||||
tTrace("decomp malloc mem:%p", temp);
|
||||
} else {
|
||||
tError("failed to allocate memory to decompress msg, contLen:%d", contLen);
|
||||
for (int i = 0; i < srv->numOfThreads; i++) {
|
||||
SWorkThrdObj* thrd = (SWorkThrdObj*)calloc(1, sizeof(SWorkThrdObj));
|
||||
srv->pipe[i] = (uv_pipe_t*)calloc(2, sizeof(uv_pipe_t));
|
||||
int fds[2];
|
||||
if (uv_socketpair(AF_UNIX, SOCK_STREAM, fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE) != 0) {
|
||||
return NULL;
|
||||
}
|
||||
uv_pipe_init(srv->loop, &(srv->pipe[i][0]), 1);
|
||||
uv_pipe_open(&(srv->pipe[i][0]), fds[1]); // init write
|
||||
|
||||
thrd->shandle = shandle;
|
||||
thrd->fd = fds[0];
|
||||
thrd->pipe = &(srv->pipe[i][1]); // init read
|
||||
int err = pthread_create(&(thrd->thread), NULL, workerThread, (void*)(thrd));
|
||||
if (err == 0) {
|
||||
tDebug("sucess to create worker-thread %d", i);
|
||||
// printf("thread %d create\n", i);
|
||||
} else {
|
||||
// TODO: clear all other resource later
|
||||
tError("failed to create worker-thread %d", i);
|
||||
}
|
||||
srv->pThreadObj[i] = thrd;
|
||||
}
|
||||
|
||||
return pHead;
|
||||
}
|
||||
int32_t rpcInit(void) {
|
||||
// impl later
|
||||
return -1;
|
||||
int err = pthread_create(&srv->thread, NULL, acceptThread, (void*)srv);
|
||||
if (err == 0) {
|
||||
tDebug("success to create accept-thread");
|
||||
} else {
|
||||
// clear all resource later
|
||||
}
|
||||
|
||||
return srv;
|
||||
}
|
||||
|
||||
void rpcCleanup(void) {
|
||||
// impl later
|
||||
return;
|
||||
void rpcSendResponse(const SRpcMsg* pMsg) {
|
||||
SConn* pConn = pMsg->handle;
|
||||
SWorkThrdObj* pThrd = pConn->hostThrd;
|
||||
|
||||
// opt later
|
||||
pthread_mutex_lock(&pThrd->connMtx);
|
||||
QUEUE_PUSH(&pThrd->conn, &pConn->queue);
|
||||
pthread_mutex_unlock(&pThrd->connMtx);
|
||||
|
||||
uv_async_send(pConn->pWorkerAsync);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -34,7 +34,8 @@ typedef struct {
|
|||
|
||||
static void processResponse(void *pParent, SRpcMsg *pMsg, SEpSet *pEpSet) {
|
||||
SInfo *pInfo = (SInfo *)pMsg->ahandle;
|
||||
tDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen, pMsg->code);
|
||||
tDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen,
|
||||
pMsg->code);
|
||||
|
||||
if (pEpSet) pInfo->epSet = *pEpSet;
|
||||
|
||||
|
@ -185,7 +186,8 @@ int main(int argc, char *argv[]) {
|
|||
// float usedTime = (endTime - startTime) / 1000.0f; // mseconds
|
||||
|
||||
// tInfo("it takes %.3f mseconds to send %d requests to server", usedTime, numOfReqs * appThreads);
|
||||
// tInfo("Performance: %.3f requests per second, msgSize:%d bytes", 1000.0 * numOfReqs * appThreads / usedTime, msgSize);
|
||||
// tInfo("Performance: %.3f requests per second, msgSize:%d bytes", 1000.0 * numOfReqs * appThreads / usedTime,
|
||||
// msgSize);
|
||||
|
||||
int ch = getchar();
|
||||
UNUSED(ch);
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "transComm.h"
|
||||
#include "transportInt.h"
|
||||
#include "trpc.h"
|
||||
|
||||
|
@ -46,7 +47,7 @@ class QueueObj {
|
|||
if (!IsEmpty()) {
|
||||
queue *h = QUEUE_HEAD(&head);
|
||||
el = QUEUE_DATA(h, QueueElem, q);
|
||||
QUEUE_REMOVE(&el->q);
|
||||
QUEUE_REMOVE(h);
|
||||
}
|
||||
return el;
|
||||
}
|
||||
|
|
|
@ -149,6 +149,7 @@ int walCheckAndRepairMeta(SWal* pWal) {
|
|||
}
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
regfree(&logRegPattern);
|
||||
regfree(&idxRegPattern);
|
||||
|
||||
|
|
|
@ -360,6 +360,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TASK_CANCELLING, "Task cancelling")
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TASK_DROPPING, "Task dropping")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_DUPLICATTED_OPERATION, "Duplicatted operation")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TASK_MSG_ERROR, "Task message error")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JOB_FREED, "Job already freed")
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# clean test environment
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# cleanCluster.sh
|
||||
# -r [ dnode root dir]
|
||||
|
||||
|
||||
dataRootDir="/data"
|
||||
|
||||
|
||||
while getopts "hr:" arg
|
||||
do
|
||||
case $arg in
|
||||
r)
|
||||
dataRootDir=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -r [ dnode root dir] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
rmDnodesDataDir() {
|
||||
if [ -d ${dataRootDir} ]; then
|
||||
rm -rf ${dataRootDir}/dnode*
|
||||
else
|
||||
echo "${dataRootDir} not exist"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function kill_process() {
|
||||
pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
########################################################################################
|
||||
############################### main process ##########################################
|
||||
|
||||
## kill all taosd process
|
||||
kill_process taosd
|
||||
|
||||
rmDnodesDataDir
|
||||
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# compile test version
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# compileVersion.sh
|
||||
# -r [ TDengine project dir]
|
||||
# -v [ TDengine branch version ]
|
||||
|
||||
|
||||
projectDir=/root/TDengine
|
||||
TDengineBrVer="3.0"
|
||||
|
||||
while getopts "hr:v:" arg
|
||||
do
|
||||
case $arg in
|
||||
r)
|
||||
projectDir=$(echo $OPTARG)
|
||||
;;
|
||||
v)
|
||||
TDengineBrVer=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -r [ TDengine project dir] "
|
||||
echo " -v [ TDengine branch version] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "projectDir=${projectDir} TDengineBrVer=${TDengineBrVer}"
|
||||
|
||||
function gitPullBranchInfo () {
|
||||
branch_name=$1
|
||||
|
||||
git checkout $branch_name
|
||||
echo "==== git pull $branch_name start ===="
|
||||
## git submodule update --init --recursive
|
||||
git pull origin $branch_name ||:
|
||||
echo "==== git pull $branch_name end ===="
|
||||
}
|
||||
|
||||
function compileTDengineVersion() {
|
||||
debugDir=debug
|
||||
if [ -d ${debugDir} ]; then
|
||||
rm -rf ${debugDir}/* ||:
|
||||
else
|
||||
mkdir -p ${debugDir}
|
||||
fi
|
||||
|
||||
cd ${debugDir}
|
||||
cmake ..
|
||||
make -j24
|
||||
}
|
||||
########################################################################################
|
||||
############################### main process ##########################################
|
||||
|
||||
## checkout all branchs and git pull
|
||||
cd ${projectDir}
|
||||
gitPullBranchInfo $TDengineBrVer
|
||||
compileTDengineVersion
|
||||
|
||||
taos_dir=${projectDir}/debug/tools/shell
|
||||
taosd_dir=${projectDir}/debug/source/dnode/mgmt/daemon
|
||||
create_table_dir=${projectDir}/debug/tests/test/c
|
||||
|
||||
rm -f /usr/bin/taos
|
||||
rm -f /usr/bin/taosd
|
||||
rm -f /usr/bin/create_table
|
||||
|
||||
ln -s $taos_dir/taos /usr/bin/taos
|
||||
ln -s $taosd_dir/taosd /usr/bin/taosd
|
||||
ln -s $create_table_dir/create_table /usr/bin/create_table
|
||||
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# deploy test cluster
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# deployCluster.sh
|
||||
curr_dir=$(readlink -f "$(dirname "$0")")
|
||||
echo $curr_dir
|
||||
|
||||
${curr_dir}/cleanCluster.sh -r "/data"
|
||||
${curr_dir}/cleanCluster.sh -r "/data2"
|
||||
|
||||
${curr_dir}/compileVersion.sh -r ${curr_dir}/../../../../ -v "3.0"
|
||||
|
||||
${curr_dir}/setupDnodes.sh -r "/data" -n 1 -f "trd02:7000" -p 7000
|
||||
${curr_dir}/setupDnodes.sh -r "/data2" -n 1 -f "trd02:7000" -p 8000
|
||||
|
||||
#./setupDnodes.sh -r "/data" -n 2 -f trd02:7000 -p 7000
|
||||
#./setupDnodes.sh -r "/data2" -n 2 -f trd02:7000 -p 8000
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,137 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# setup test environment
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# setupDnodes.sh
|
||||
# -e [ new | old]
|
||||
# -n [ dnode number]
|
||||
# -f [ first ep]
|
||||
# -p [ start port]
|
||||
# -r [ dnode root dir]
|
||||
|
||||
# set parameters by default value
|
||||
enviMode=new
|
||||
dataRootDir="/data"
|
||||
firstEp="localhost:7000"
|
||||
startPort=7000
|
||||
dnodeNumber=1
|
||||
|
||||
|
||||
while getopts "he:f:n:r:p:" arg
|
||||
do
|
||||
case $arg in
|
||||
e)
|
||||
enviMode=$( echo $OPTARG )
|
||||
;;
|
||||
n)
|
||||
dnodeNumber=$(echo $OPTARG)
|
||||
;;
|
||||
f)
|
||||
firstEp=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
startPort=$(echo $OPTARG)
|
||||
;;
|
||||
r)
|
||||
dataRootDir=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -e [new | old] "
|
||||
echo " -n [ dnode number] "
|
||||
echo " -f [ first ep] "
|
||||
echo " -p [ start port] "
|
||||
echo " -r [ dnode root dir] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "enviMode=${enviMode} dnodeNumber=${dnodeNumber} dataRootDir=${dataRootDir} firstEp=${firstEp} startPort=${startPort}"
|
||||
|
||||
#curr_dir=$(pwd)
|
||||
|
||||
|
||||
createNewCfgFile() {
|
||||
cfgFile=$1/taos.cfg
|
||||
dataDir=$2
|
||||
logDir=$3
|
||||
firstEp=$4
|
||||
serverPort=$5
|
||||
|
||||
echo "debugFlag 131" > ${cfgFile}
|
||||
echo "firstEp ${firstEp}" >> ${cfgFile}
|
||||
echo "dataDir ${dataDir}" >> ${cfgFile}
|
||||
echo "logDir ${logDir}" >> ${cfgFile}
|
||||
echo "serverPort ${serverPort}" >> ${cfgFile}
|
||||
|
||||
echo "supportVnodes 1024" >> ${cfgFile}
|
||||
#echo "asyncLog 0" >> ${cfgFile}
|
||||
echo "telemetryReporting 0" >> ${cfgFile}
|
||||
}
|
||||
|
||||
createNewDnodesDataDir() {
|
||||
if [ -d ${dataRootDir} ]; then
|
||||
rm -rf ${dataRootDir}/dnode*
|
||||
else
|
||||
echo "${dataRootDir} not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
dnodeNumber=$1
|
||||
firstEp=$2
|
||||
|
||||
serverPort=${startPort}
|
||||
for ((i=0; i<${dnodeNumber}; i++)); do
|
||||
mkdir -p ${dataRootDir}/dnode_${i}/cfg
|
||||
mkdir -p ${dataRootDir}/dnode_${i}/log
|
||||
mkdir -p ${dataRootDir}/dnode_${i}/data
|
||||
|
||||
createNewCfgFile ${dataRootDir}/dnode_${i}/cfg ${dataRootDir}/dnode_${i}/data ${dataRootDir}/dnode_${i}/log ${firstEp} ${serverPort}
|
||||
#echo "create dnode: ${serverPort}, ${dataRootDir}/dnode_${i}"
|
||||
serverPort=$((10#${serverPort}+100))
|
||||
done
|
||||
}
|
||||
|
||||
function kill_process() {
|
||||
pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
startDnodes() {
|
||||
dnodeNumber=$1
|
||||
|
||||
for ((i=0; i<${dnodeNumber}; i++)); do
|
||||
if [ -d ${dataRootDir}/dnode_${i} ]; then
|
||||
nohup taosd -c ${dataRootDir}/dnode_${i}/cfg >/dev/null 2>&1 &
|
||||
echo "start taosd ${dataRootDir}/dnode_${i}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
########################################################################################
|
||||
############################### main process ##########################################
|
||||
|
||||
## kill all taosd process
|
||||
#kill_process taosd
|
||||
|
||||
## create director for all dnode
|
||||
if [[ "$enviMode" == "new" ]]; then
|
||||
createNewDnodesDataDir ${dnodeNumber} ${firstEp}
|
||||
fi
|
||||
|
||||
## start all dnode by nohup
|
||||
startDnodes ${dnodeNumber}
|
||||
|
||||
echo "====run setupDnodes.sh end===="
|
||||
echo " "
|
||||
|
||||
|
|
@ -46,9 +46,9 @@ typedef struct {
|
|||
pthread_t thread;
|
||||
} SThreadInfo;
|
||||
|
||||
//void parseArgument(int32_t argc, char *argv[]);
|
||||
//void *threadFunc(void *param);
|
||||
//void createDbAndStb();
|
||||
// void parseArgument(int32_t argc, char *argv[]);
|
||||
// void *threadFunc(void *param);
|
||||
// void createDbAndStb();
|
||||
|
||||
void createDbAndStb() {
|
||||
pPrint("start to create db and stable");
|
||||
|
@ -64,7 +64,8 @@ void createDbAndStb() {
|
|||
TAOS_RES *pRes = taos_query(con, qstr);
|
||||
int32_t code = taos_errno(pRes);
|
||||
if (code != 0) {
|
||||
pError("failed to create database:%s, sql:%s, code:%d reason:%s", dbName, qstr, taos_errno(pRes), taos_errstr(pRes));
|
||||
pError("failed to create database:%s, sql:%s, code:%d reason:%s", dbName, qstr, taos_errno(pRes),
|
||||
taos_errstr(pRes));
|
||||
exit(0);
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
@ -129,10 +130,9 @@ static int64_t getResult(TAOS_RES *tres) {
|
|||
return numOfRows;
|
||||
}
|
||||
|
||||
|
||||
void showTables() {
|
||||
void showTables() {
|
||||
pPrint("start to show tables");
|
||||
char qstr[32];
|
||||
char qstr[128];
|
||||
|
||||
TAOS *con = taos_connect(NULL, "root", "taosdata", NULL, 0);
|
||||
if (con == NULL) {
|
||||
|
@ -140,9 +140,9 @@ void showTables() {
|
|||
exit(1);
|
||||
}
|
||||
|
||||
sprintf(qstr, "use %s", dbName);
|
||||
snprintf(qstr, 128, "use %s", dbName);
|
||||
TAOS_RES *pRes = taos_query(con, qstr);
|
||||
int code = taos_errno(pRes);
|
||||
int code = taos_errno(pRes);
|
||||
if (code != 0) {
|
||||
pError("failed to use db, code:%d reason:%s", taos_errno(pRes), taos_errstr(pRes));
|
||||
exit(1);
|
||||
|
@ -160,12 +160,11 @@ void showTables() {
|
|||
int64_t totalTableNum = getResult(pRes);
|
||||
taos_free_result(pRes);
|
||||
|
||||
pPrint("%s database: %s, total %" PRId64 " tables %s", GREEN, dbName, totalTableNum, NC);
|
||||
pPrint("%s database: %s, total %" PRId64 " tables %s", GREEN, dbName, totalTableNum, NC);
|
||||
|
||||
taos_close(con);
|
||||
}
|
||||
|
||||
|
||||
void *threadFunc(void *param) {
|
||||
SThreadInfo *pInfo = (SThreadInfo *)param;
|
||||
char *qstr = malloc(2000 * 1000);
|
||||
|
@ -177,48 +176,48 @@ void *threadFunc(void *param) {
|
|||
exit(1);
|
||||
}
|
||||
|
||||
//printf("thread:%d, table range: %"PRId64 " - %"PRId64 "\n", pInfo->threadIndex, pInfo->tableBeginIndex, pInfo->tableEndIndex);
|
||||
// printf("thread:%d, table range: %"PRId64 " - %"PRId64 "\n", pInfo->threadIndex, pInfo->tableBeginIndex,
|
||||
// pInfo->tableEndIndex);
|
||||
sprintf(qstr, "use %s", pInfo->dbName);
|
||||
TAOS_RES *pRes = taos_query(con, qstr);
|
||||
taos_free_result(pRes);
|
||||
|
||||
|
||||
if (createTable) {
|
||||
int64_t curMs = 0;
|
||||
int64_t beginMs = taosGetTimestampMs();
|
||||
pInfo->startMs = beginMs;
|
||||
int64_t t = pInfo->tableBeginIndex;
|
||||
int64_t t = pInfo->tableBeginIndex;
|
||||
for (; t <= pInfo->tableEndIndex;) {
|
||||
//int64_t batch = (pInfo->tableEndIndex - t);
|
||||
//batch = MIN(batch, batchNum);
|
||||
// int64_t batch = (pInfo->tableEndIndex - t);
|
||||
// batch = MIN(batch, batchNum);
|
||||
|
||||
int32_t len = sprintf(qstr, "create table");
|
||||
for (int32_t i = 0; i < batchNum;) {
|
||||
len += sprintf(qstr + len, " %s_t%" PRId64 " using %s tags(%" PRId64 ")", stbName, t, stbName, t);
|
||||
t++;
|
||||
i++;
|
||||
t++;
|
||||
i++;
|
||||
if (t > pInfo->tableEndIndex) {
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int64_t startTs = taosGetTimestampUs();
|
||||
int64_t startTs = taosGetTimestampUs();
|
||||
TAOS_RES *pRes = taos_query(con, qstr);
|
||||
code = taos_errno(pRes);
|
||||
if (code != 0) {
|
||||
if ((code != 0) && (code != 0x0002)) {
|
||||
pError("failed to create table t%" PRId64 ", reason:%s", t, tstrerror(code));
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
int64_t endTs = taosGetTimestampUs();
|
||||
int64_t delay = endTs - startTs;
|
||||
//printf("==== %"PRId64" - %"PRId64", %"PRId64"\n", startTs, endTs, delay);
|
||||
if (delay > pInfo->maxDelay) pInfo->maxDelay = delay;
|
||||
int64_t endTs = taosGetTimestampUs();
|
||||
int64_t delay = endTs - startTs;
|
||||
// printf("==== %"PRId64" - %"PRId64", %"PRId64"\n", startTs, endTs, delay);
|
||||
if (delay > pInfo->maxDelay) pInfo->maxDelay = delay;
|
||||
if (delay < pInfo->minDelay) pInfo->minDelay = delay;
|
||||
|
||||
curMs = taosGetTimestampMs();
|
||||
if (curMs - beginMs > 10000) {
|
||||
beginMs = curMs;
|
||||
//printf("==== tableBeginIndex: %"PRId64", t: %"PRId64"\n", pInfo->tableBeginIndex, t);
|
||||
curMs = taosGetTimestampMs();
|
||||
if (curMs - beginMs > 10000) {
|
||||
beginMs = curMs;
|
||||
// printf("==== tableBeginIndex: %"PRId64", t: %"PRId64"\n", pInfo->tableBeginIndex, t);
|
||||
printCreateProgress(pInfo, t);
|
||||
}
|
||||
}
|
||||
|
@ -227,7 +226,7 @@ void *threadFunc(void *param) {
|
|||
|
||||
if (insertData) {
|
||||
int64_t curMs = 0;
|
||||
int64_t beginMs = taosGetTimestampMs();;
|
||||
int64_t beginMs = taosGetTimestampMs();
|
||||
|
||||
pInfo->startMs = taosGetTimestampMs();
|
||||
for (int64_t t = pInfo->tableBeginIndex; t < pInfo->tableEndIndex; ++t) {
|
||||
|
@ -247,7 +246,7 @@ void *threadFunc(void *param) {
|
|||
taos_free_result(pRes);
|
||||
|
||||
curMs = taosGetTimestampMs();
|
||||
if (curMs - beginMs > 10000) {
|
||||
if (curMs - beginMs > 10000) {
|
||||
printInsertProgress(pInfo, t);
|
||||
}
|
||||
t += (batch - 1);
|
||||
|
@ -335,33 +334,32 @@ int32_t main(int32_t argc, char *argv[]) {
|
|||
parseArgument(argc, argv);
|
||||
|
||||
if (showTablesFlag) {
|
||||
showTables();
|
||||
return 0;
|
||||
showTables();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
createDbAndStb();
|
||||
|
||||
pPrint("%d threads are spawned to create %d tables", numOfThreads, numOfThreads);
|
||||
pPrint("%d threads are spawned to create %" PRId64 " tables", numOfThreads, numOfTables);
|
||||
|
||||
pthread_attr_t thattr;
|
||||
pthread_attr_init(&thattr);
|
||||
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
|
||||
SThreadInfo *pInfo = (SThreadInfo *)calloc(numOfThreads, sizeof(SThreadInfo));
|
||||
|
||||
//int64_t numOfTablesPerThread = numOfTables / numOfThreads;
|
||||
//numOfTables = numOfTablesPerThread * numOfThreads;
|
||||
|
||||
// int64_t numOfTablesPerThread = numOfTables / numOfThreads;
|
||||
// numOfTables = numOfTablesPerThread * numOfThreads;
|
||||
|
||||
if (numOfThreads < 1) {
|
||||
numOfThreads = 1;
|
||||
}
|
||||
|
||||
|
||||
int64_t a = numOfTables / numOfThreads;
|
||||
if (a < 1) {
|
||||
numOfThreads = numOfTables;
|
||||
a = 1;
|
||||
numOfThreads = numOfTables;
|
||||
a = 1;
|
||||
}
|
||||
|
||||
|
||||
int64_t b = 0;
|
||||
b = numOfTables % numOfThreads;
|
||||
|
||||
|
@ -371,7 +369,7 @@ int32_t main(int32_t argc, char *argv[]) {
|
|||
pInfo[i].tableEndIndex = i < b ? tableFrom + a : tableFrom + a - 1;
|
||||
tableFrom = pInfo[i].tableEndIndex + 1;
|
||||
pInfo[i].threadIndex = i;
|
||||
pInfo[i].minDelay = INT64_MAX;
|
||||
pInfo[i].minDelay = INT64_MAX;
|
||||
strcpy(pInfo[i].dbName, dbName);
|
||||
strcpy(pInfo[i].stbName, stbName);
|
||||
pthread_create(&(pInfo[i].thread), &thattr, threadFunc, (void *)(pInfo + i));
|
||||
|
@ -390,7 +388,7 @@ int32_t main(int32_t argc, char *argv[]) {
|
|||
createTableSpeed += pInfo[i].createTableSpeed;
|
||||
|
||||
if (pInfo[i].maxDelay > maxDelay) maxDelay = pInfo[i].maxDelay;
|
||||
if (pInfo[i].minDelay < minDelay) minDelay = pInfo[i].minDelay;
|
||||
if (pInfo[i].minDelay < minDelay) minDelay = pInfo[i].minDelay;
|
||||
}
|
||||
|
||||
float insertDataSpeed = 0;
|
||||
|
@ -398,21 +396,15 @@ int32_t main(int32_t argc, char *argv[]) {
|
|||
insertDataSpeed += pInfo[i].insertDataSpeed;
|
||||
}
|
||||
|
||||
pPrint("%s total %" PRId64 " tables, %.1f tables/second, threads:%d, maxDelay: %" PRId64 "us, minDelay: %" PRId64 "us %s",
|
||||
GREEN,
|
||||
numOfTables,
|
||||
createTableSpeed,
|
||||
numOfThreads,
|
||||
maxDelay,
|
||||
minDelay,
|
||||
NC);
|
||||
pPrint("%s total %" PRId64 " tables, %.1f tables/second, threads:%d, maxDelay: %" PRId64 "us, minDelay: %" PRId64
|
||||
"us %s",
|
||||
GREEN, numOfTables, createTableSpeed, numOfThreads, maxDelay, minDelay, NC);
|
||||
|
||||
if (insertData) {
|
||||
pPrint("%s total %" PRId64 " tables, %.1f rows/second, threads:%d %s", GREEN, numOfTables, insertDataSpeed,
|
||||
numOfThreads, NC);
|
||||
pPrint("%s total %" PRId64 " tables, %.1f rows/second, threads:%d %s", GREEN, numOfTables, insertDataSpeed,
|
||||
numOfThreads, NC);
|
||||
}
|
||||
|
||||
pthread_attr_destroy(&thattr);
|
||||
free(pInfo);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue