refactor: do some internal refactor.

This commit is contained in:
Haojun Liao 2024-01-11 11:50:52 +08:00
parent f6c0649108
commit 58694c67dd
7 changed files with 18 additions and 23 deletions

View File

@ -23,7 +23,6 @@
extern "C" {
#endif
typedef struct SQWorkerPool SQWorkerPool;
typedef struct SWWorkerPool SWWorkerPool;
typedef struct SQueueWorker {
@ -60,14 +59,14 @@ typedef struct SWWorker {
SWWorkerPool *pool;
} SWWorker;
typedef struct SWWorkerPool {
struct SWWorkerPool {
int32_t max; // max number of workers
int32_t num;
int32_t nextId; // from 0 to max-1, cyclic
const char *name;
SWWorker *workers;
TdThreadMutex mutex;
} SWWorkerPool;
};
int32_t tQWorkerInit(SQWorkerPool *pool);
void tQWorkerCleanup(SQWorkerPool *pool);

View File

@ -166,7 +166,7 @@ static const SSysDbTableSchema streamTaskSchema[] = {
{.name = "node_type", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "level", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "status", .bytes = 15 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "stage", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
{.name = "in_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
// {.name = "out_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},

View File

@ -58,7 +58,7 @@ int32_t tsNumOfMnodeQueryThreads = 4;
int32_t tsNumOfMnodeFetchThreads = 1;
int32_t tsNumOfMnodeReadThreads = 1;
int32_t tsNumOfVnodeQueryThreads = 4;
float tsRatioOfVnodeStreamThreads = 4.0;
float tsRatioOfVnodeStreamThreads = 1.0;
int32_t tsNumOfVnodeFetchThreads = 4;
int32_t tsNumOfVnodeRsmaThreads = 2;
int32_t tsNumOfQnodeQueryThreads = 4;
@ -621,7 +621,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
0)
return -1;
if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 100, CFG_SCOPE_SERVER,
if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 10, CFG_SCOPE_SERVER,
CFG_DYN_NONE) != 0)
return -1;

View File

@ -407,12 +407,8 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
if (tWWorkerInit(pFPool) != 0) return -1;
SSingleWorkerCfg mgmtCfg = {
.min = 1,
.max = 1,
.name = "vnode-mgmt",
.fp = (FItem)vmProcessMgmtQueue,
.param = pMgmt,
};
.min = 1, .max = 1, .name = "vnode-mgmt", .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt};
if (tSingleWorkerInit(&pMgmt->mgmtWorker, &mgmtCfg) != 0) return -1;
dDebug("vnode workers are initialized");

View File

@ -1102,7 +1102,7 @@ static int32_t metaHeartbeatToMnodeImpl(SStreamMeta* pMeta) {
.inputQUsed = SIZE_IN_MiB(streamQueueGetItemSize((*pTask)->inputq.queue)),
};
entry.inputRate = entry.inputQUsed * 100.0 / STREAM_TASK_QUEUE_CAPACITY_IN_SIZE;
entry.inputRate = entry.inputQUsed * 100.0 / (2*STREAM_TASK_QUEUE_CAPACITY_IN_SIZE);
if ((*pTask)->info.taskLevel == TASK_LEVEL__SINK) {
entry.sinkQuota = (*pTask)->outputInfo.pTokenBucket->quotaRate;
entry.sinkDataSize = SIZE_IN_MiB((*pTask)->execInfo.sink.dataSize);