Merge branch '3.0' of github.com:taosdata/TDengine into szhou/python-udf

This commit is contained in:
slzhou 2023-03-01 15:51:43 +08:00
commit c499d7f173
73 changed files with 1397 additions and 458 deletions

View File

@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG 9cfe416
GIT_TAG 7920f98
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 1e15545
GIT_TAG 0111c66
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -202,16 +202,18 @@ After the installation is complete, double-click the /applications/TDengine to s
The following `launchctl` commands can help you manage TDengine service:
- Start TDengine Server: `launchctl start com.tdengine.taosd`
- Start TDengine Server: `sudo launchctl start com.tdengine.taosd`
- Stop TDengine Server: `launchctl stop com.tdengine.taosd`
- Stop TDengine Server: `sudo launchctl stop com.tdengine.taosd`
- Check TDengine Server status: `launchctl list | grep taosd`
- Check TDengine Server status: `sudo launchctl list | grep taosd`
:::info
- The `launchctl` command does not require _root_ privileges. You don't need to use the `sudo` command.
- The first content returned by the `launchctl list | grep taosd` command is the PID of the program, if '-' indicates that the TDengine service is not running.
- Please use `sudo` to run `launchctl` to manage _com.tdengine.taosd_ with administrator privileges.
- The administrator privilege is required for service management to enhance security.
- Troubleshooting:
- The first column returned by the command `launchctl list | grep taosd` is the PID of the program. If it's `-`, that means the TDengine service is not running.
- If the service is abnormal, please check the `launchd.log` file from the system log or the `taosdlog` from the `/var/log/taos directory` for more information.
:::

View File

@ -86,10 +86,10 @@ SHOW FUNCTIONS;
Shows all user-defined functions in the system.
## SHOW LICENSE
## SHOW LICENCES
```sql
SHOW LICENSE;
SHOW LICENCES;
SHOW GRANTS;
```
@ -359,7 +359,7 @@ Shows the working configuration of the parameters that must be the same on each
SHOW [db_name.]VGROUPS;
```
Shows information about all vgroups in the system or about the vgroups for a specified database.
Shows information about all vgroups in the current database.
## SHOW VNODES

View File

@ -83,7 +83,7 @@ The parameters described in this document by the effect that they have on the sy
| :------- | :----------- | :----------------------------------------------- | :--------------------------------------------------------------------------------------------- |
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
| TCP | 6043 | Service Port of TaosKeeper | The parameter of TaosKeeper |
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters.
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters.
| TCP | 6060 | Port of Monitoring Service in Enterprise version | |

View File

@ -67,7 +67,7 @@ sudo systemctl start telegraf
Log in to the Grafana interface using a web browser at `IP:3000`, with the system's initial username and password being `admin/admin`.
Click on the gear icon on the left and select `Plugins`, you should find the TDengine data source plugin icon.
Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json`, download the dashboard JSON file and import it. You will then see the dashboard in the following screen.
Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v3.json` (for Tdengine 3.0. for TDengine 2.x, please use `telegraf-dashboard-v2.json`), download the dashboard JSON file and import it. You will then see the dashboard in the following screen.
![TDengine Database IT-DevOps-Solutions-telegraf-dashboard](./IT-DevOps-Solutions-telegraf-dashboard.webp)

View File

@ -196,20 +196,22 @@ Active: inactive (dead)
<TabItem label="macOS 系统" value="macos">
安装后,在应用程序目录下,双击 TDengine 图标来启动程序,也可以运行 `launchctl start com.tdengine.taosd` 来启动 TDengine 服务进程。
安装后,在应用程序目录下,双击 TDengine 图标来启动程序,也可以运行 `sudo launchctl start com.tdengine.taosd` 来启动 TDengine 服务进程。
如下 `launchctl` 命令可以帮助你管理 TDengine 服务:
如下 `launchctl` 命令用于管理 TDengine 服务:
- 启动服务进程:`launchctl start com.tdengine.taosd`
- 启动服务进程:`sudo launchctl start com.tdengine.taosd`
- 停止服务进程:`launchctl stop com.tdengine.taosd`
- 停止服务进程:`sudo launchctl stop com.tdengine.taosd`
- 查看服务状态:`launchctl list | grep taosd`
- 查看服务状态:`sudo launchctl list | grep taosd`
:::info
- `launchctl` 命令不需要管理员权限,请不要在前面加 `sudo`
- `launchctl list | grep taosd` 指令返回的第一个内容是程序的 PID若为 `-` 则说明 TDengine 服务未运行。
- `launchctl` 命令管理`com.tdengine.taosd`需要管理员权限,务必在前面加 `sudo` 来增强安全性。
- `sudo launchctl list | grep taosd` 指令返回的第一列是 `taosd` 程序的 PID若为 `-` 则说明 TDengine 服务未运行。
- 故障排查:
- 如果服务异常请查看系统日志 `launchd.log` 或者 `/var/log/taos` 目录下 `taosdlog` 日志获取更多信息。
:::

View File

@ -86,10 +86,10 @@ SHOW FUNCTIONS;
显示用户定义的自定义函数。
## SHOW LICENSE
## SHOW LICENCES
```sql
SHOW LICENSE;
SHOW LICENCES;
SHOW GRANTS;
```
@ -303,7 +303,7 @@ SHOW DNODE dnode_id VARIABLES;
SHOW [db_name.]VGROUPS;
```
显示当前系统中所有 VGROUP 或某个 db 的 VGROUPS 的信息。
显示当前数据库中所有 VGROUP 的信息。
## SHOW VNODES

View File

@ -83,7 +83,7 @@ taos --dump-config
| :--- | :-------- | :---------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------- |
| TCP | 6030 | 客户端与服务端之间通讯,多节点集群的节点间通讯。 | 由配置文件设置 serverPort 决定。 |
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
| TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 |
| TCP | 6043 | taosKeeper 监控服务端口。 | 随 taosKeeper 启动参数设置变化。 |
| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化|
| UDP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化 |
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
@ -104,7 +104,7 @@ taos --dump-config
| 属性 | 说明 |
| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽的监控记录监控信息将通过 HTTP 协议发送给由 `monitorFqdn``monitorProt` 指定的 TaosKeeper 监控服务 |
| 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽的监控记录监控信息将通过 HTTP 协议发送给由 `monitorFqdn``monitorProt` 指定的 taosKeeper 监控服务 |
| 取值范围 | 0关闭监控服务 1激活监控服务。 |
| 缺省值 | 0 |
@ -113,7 +113,7 @@ taos --dump-config
| 属性 | 说明 |
| -------- | -------------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | TaosKeeper 监控服务的 FQDN |
| 含义 | taosKeeper 监控服务的 FQDN |
| 缺省值 | 无 |
### monitorPort
@ -121,7 +121,7 @@ taos --dump-config
| 属性 | 说明 |
| -------- | --------------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | TaosKeeper 监控服务的端口号 |
| 含义 | taosKeeper 监控服务的端口号 |
| 缺省值 | 6043 |
### monitorInterval

View File

@ -6,7 +6,7 @@ description: TDengine 3.0 版本监控指标的导出工具
## 简介
TaosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。
taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。
## 安装

View File

@ -67,7 +67,7 @@ sudo systemctl start telegraf
使用 Web 浏览器访问 `IP:3000` 登录 Grafana 界面,系统初始用户名密码为 admin/admin。
点击左侧齿轮图标并选择 `Plugins`,应该可以找到 TDengine data source 插件图标。
点击左侧加号图标并选择 `Import`,从 `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json` 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘:
点击左侧加号图标并选择 `Import`,从 `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v3.json` (适配 TDengine 3.0, 如果使用 TDengine 2.x 请下载 `telegraf-dashboard-v2.json`) 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘:
![TDengine Database IT-DevOps-Solutions-telegraf-dashboard](./IT-DevOps-Solutions-telegraf-dashboard.webp)

View File

@ -50,6 +50,7 @@ extern int32_t tsTagFilterResCacheSize;
// queue & threads
extern int32_t tsNumOfRpcThreads;
extern int32_t tsNumOfRpcSessions;
extern int32_t tsTimeToGetAvailableConn;
extern int32_t tsNumOfCommitThreads;
extern int32_t tsNumOfTaskQueueThreads;
extern int32_t tsNumOfMnodeQueryThreads;

View File

@ -114,7 +114,7 @@ typedef struct SRpcInit {
int32_t connLimitNum;
int32_t connLimitLock;
int32_t timeToGetConn;
int8_t supportBatch; // 0: no batch, 1. batch
int32_t batchSize;
void *parent;

View File

@ -33,7 +33,6 @@ extern "C" {
#undef strdup
#define strdup STRDUP_FUNC_TAOS_FORBID
#endif
#endif // ifndef ALLOW_FORBID_FUNC
#endif // if !defined(WINDOWS)

View File

@ -59,11 +59,13 @@ typedef struct STraceId {
char* _t = _buf; \
_t[0] = '0'; \
_t[1] = 'x'; \
_t += titoa(rootId, 16, &_t[2]); \
_t += 2; \
_t += titoa(rootId, 16, &_t[0]); \
_t[0] = ':'; \
_t[1] = '0'; \
_t[2] = 'x'; \
_t += titoa(msgId, 16, &_t[3]); \
_t += 3; \
_t += titoa(msgId, 16, &_t[0]); \
} while (0)
#ifdef __cplusplus

View File

@ -144,7 +144,7 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
memset(&rpcInit, 0, sizeof(rpcInit));
rpcInit.localPort = 0;
rpcInit.label = "TSC";
rpcInit.numOfThreads = numOfThread;
rpcInit.numOfThreads = tsNumOfRpcThreads;
rpcInit.cfp = processMsgFromServer;
rpcInit.rfp = clientRpcRfp;
rpcInit.sessions = 1024;
@ -159,6 +159,12 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
rpcInit.retryMaxInterval = tsRedirectMaxPeriod;
rpcInit.retryMaxTimouet = tsMaxRetryWaitTime;
int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 5);
connLimitNum = TMAX(connLimitNum, 10);
connLimitNum = TMIN(connLimitNum, 500);
rpcInit.connLimitNum = connLimitNum;
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
void *pDnodeConn = rpcOpen(&rpcInit);
if (pDnodeConn == NULL) {
tscError("failed to init connection to server");

View File

@ -192,7 +192,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
(*pRequest)->sqlLen = sqlLen;
(*pRequest)->validateOnly = validateSql;
SSyncQueryParam* newpParam;
SSyncQueryParam* newpParam = NULL;
if (param == NULL) {
newpParam = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
if (newpParam == NULL) {
@ -1545,7 +1545,7 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4)
}
pRequest->code =
setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4, true);
setQueryResultFromRsp(&pRequest->body.resInfo, (const SRetrieveTableRsp*)pResInfo->pData, convertUcs4, true);
if (pRequest->code != TSDB_CODE_SUCCESS) {
pResultInfo->numOfRows = 0;
return NULL;
@ -2034,6 +2034,12 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de
rpcInit.compressSize = tsCompressMsgSize;
rpcInit.user = "_dnd";
int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3);
connLimitNum = TMAX(connLimitNum, 10);
connLimitNum = TMIN(connLimitNum, 500);
rpcInit.connLimitNum = connLimitNum;
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
clientRpc = rpcOpen(&rpcInit);
if (clientRpc == NULL) {
tscError("failed to init server status client");

View File

@ -1016,7 +1016,7 @@ static void fetchCallback(void *pResult, void *param, int32_t code) {
}
pRequest->code =
setQueryResultFromRsp(pResultInfo, (SRetrieveTableRsp *)pResultInfo->pData, pResultInfo->convertUcs4, true);
setQueryResultFromRsp(pResultInfo, (const SRetrieveTableRsp *)pResultInfo->pData, pResultInfo->convertUcs4, true);
if (pRequest->code != TSDB_CODE_SUCCESS) {
pResultInfo->numOfRows = 0;
pRequest->code = code;

View File

@ -497,6 +497,9 @@ int32_t processShowVariablesRsp(void* param, SDataBuf* pMsg, int32_t code) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRes, false, true);
}
if(code != 0){
taosMemoryFree(pRes);
}
tFreeSShowVariablesRsp(&rsp);
}

View File

@ -1154,13 +1154,16 @@ static int32_t smlParseLineBottom(SSmlHandle *info) {
SSmlLineInfo *elements = info->lines + i;
SSmlTableInfo *tinfo = NULL;
if (info->protocol == TSDB_SML_LINE_PROTOCOL) {
tinfo = *(SSmlTableInfo **)taosHashGet(info->childTables, elements->measure, elements->measureTagsLen);
SSmlTableInfo** tmp = (SSmlTableInfo **)taosHashGet(info->childTables, elements->measure, elements->measureTagsLen);
if(tmp) tinfo = *tmp;
} else if (info->protocol == TSDB_SML_TELNET_PROTOCOL) {
tinfo = *(SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag,
SSmlTableInfo** tmp = (SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag,
elements->measureLen + elements->tagsLen);
if(tmp) tinfo = *tmp;
} else {
tinfo = *(SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag,
SSmlTableInfo** tmp = (SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag,
elements->measureLen + elements->tagsLen);
if(tmp) tinfo = *tmp;
}
if (tinfo == NULL) {

View File

@ -1279,11 +1279,12 @@ int32_t smlParseJSON(SSmlHandle *info, char *payload) {
if (cnt >= payloadNum) {
payloadNum = payloadNum << 1;
void *tmp = taosMemoryRealloc(info->lines, payloadNum * sizeof(SSmlLineInfo));
if (tmp != NULL) {
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
info->lines = (SSmlLineInfo *)tmp;
memset(info->lines + cnt, 0, (payloadNum - cnt) * sizeof(SSmlLineInfo));
}
}
ret = smlParseJSONString(info, &dataPointStart, info->lines + cnt);
if ((info->lines + cnt)->measure == NULL) break;
}

View File

@ -41,7 +41,8 @@ bool tsPrintAuth = false;
// queue & threads
int32_t tsNumOfRpcThreads = 1;
int32_t tsNumOfRpcSessions = 2000;
int32_t tsNumOfRpcSessions = 5000;
int32_t tsTimeToGetAvailableConn = 100000;
int32_t tsNumOfCommitThreads = 2;
int32_t tsNumOfTaskQueueThreads = 4;
int32_t tsNumOfMnodeQueryThreads = 4;
@ -326,6 +327,12 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, true) != 0) return -1;
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, true) != 0) return -1;
tsNumOfRpcSessions = TRANGE(tsNumOfRpcSessions, 100, 100000);
if (cfgAddInt32(pCfg, "numOfRpcSessions", tsNumOfRpcSessions, 1, 100000, 0) != 0) return -1;
tsTimeToGetAvailableConn = TRANGE(tsTimeToGetAvailableConn, 20, 10000000);
if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, 0) != 0) return -1;
tsNumOfTaskQueueThreads = tsNumOfCores / 2;
tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4);
if (tsNumOfTaskQueueThreads >= 10) {
@ -397,6 +404,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfRpcSessions = TRANGE(tsNumOfRpcSessions, 100, 10000);
if (cfgAddInt32(pCfg, "numOfRpcSessions", tsNumOfRpcSessions, 1, 100000, 0) != 0) return -1;
tsTimeToGetAvailableConn = TRANGE(tsTimeToGetAvailableConn, 20, 1000000);
if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsNumOfRpcSessions, 20, 1000000, 0) != 0) return -1;
tsNumOfCommitThreads = tsNumOfCores / 2;
tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
if (cfgAddInt32(pCfg, "numOfCommitThreads", tsNumOfCommitThreads, 1, 1024, 0) != 0) return -1;
@ -517,6 +527,14 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) {
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "timeToGetAvailableConn");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsTimeToGetAvailableConn = 1000;
tsTimeToGetAvailableConn = TRANGE(tsTimeToGetAvailableConn, 20, 1000000);
pItem->i32 = tsTimeToGetAvailableConn;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfCommitThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfCommitThreads = numOfCores / 2;
@ -698,6 +716,10 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;
tsMaxRetryWaitTime = cfgGetItem(pCfg, "maxRetryWaitTime")->i32;
tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32;
tsTimeToGetAvailableConn = cfgGetItem(pCfg, "timeToGetAvailableConn")->i32;
return 0;
}
@ -735,6 +757,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsNumOfRpcThreads = cfgGetItem(pCfg, "numOfRpcThreads")->i32;
tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32;
tsTimeToGetAvailableConn = cfgGetItem(pCfg, "timeToGetAvailableConn")->i32;
tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32;
tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32;
tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32;
@ -742,7 +766,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32;
tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32;
// tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
// tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchTereads")->i32;
tsNumOfSnodeStreamThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32;
tsNumOfSnodeWriteThreads = cfgGetItem(pCfg, "numOfSnodeUniqueThreads")->i32;
tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64;

View File

@ -93,15 +93,15 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
break;
}
/*
pDnode is null, TD-22618
at trans.c line 91
before this line, dmProcessRpcMsg callback is set
after this line, parent is set
so when dmProcessRpcMsg is called, pDonde is still null.
*/
if (pDnode != NULL){
if(pDnode->status != DND_STAT_RUNNING) {
/*
pDnode is null, TD-22618
at trans.c line 91
before this line, dmProcessRpcMsg callback is set
after this line, parent is set
so when dmProcessRpcMsg is called, pDonde is still null.
*/
if (pDnode != NULL) {
if (pDnode->status != DND_STAT_RUNNING) {
if (pRpc->msgType == TDMT_DND_SERVER_STATUS) {
dmProcessServerStartupStatus(pDnode, pRpc);
return;
@ -304,6 +304,7 @@ int32_t dmInitClient(SDnode *pDnode) {
rpcInit.connLimitLock = 1;
rpcInit.supportBatch = 1;
rpcInit.batchSize = 8 * 1024;
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
pTrans->clientRpc = rpcOpen(&rpcInit);
if (pTrans->clientRpc == NULL) {

View File

@ -15,17 +15,11 @@
#define _DEFAULT_SOURCE
#include "mndConsumer.h"
#include "mndDb.h"
#include "mndDnode.h"
#include "mndMnode.h"
#include "mndPrivilege.h"
#include "mndShow.h"
#include "mndStb.h"
#include "mndSubscribe.h"
#include "mndTopic.h"
#include "mndTrans.h"
#include "mndUser.h"
#include "mndVgroup.h"
#include "tcompare.h"
#include "tname.h"
@ -209,6 +203,7 @@ static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg) {
taosMemoryFree(pConsumerNew);
mndTransDrop(pTrans);
return 0;
FAIL:
tDeleteSMqConsumerObj(pConsumerNew);
taosMemoryFree(pConsumerNew);
@ -580,6 +575,10 @@ static int32_t validateTopics(const SArray* pTopicList, SMnode* pMnode, const ch
return 0;
}
static void* topicNameDup(void* p){
return taosStrdup((char*) p);
}
int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
SMnode *pMnode = pMsg->info.node;
char *msgStr = pMsg->pCont;
@ -616,15 +615,16 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup);
tstrncpy(pConsumerNew->clientId, subscribe.clientId, 256);
pConsumerNew->updateType = CONSUMER_UPDATE__MODIFY;
taosArrayDestroy(pConsumerNew->rebNewTopics);
pConsumerNew->rebNewTopics = pTopicList; // all subscribe topics should re-balance.
subscribe.topicNames = NULL;
for (int32_t i = 0; i < newTopicNum; i++) {
char *newTopicCopy = taosStrdup(taosArrayGetP(pTopicList, i));
taosArrayPush(pConsumerNew->assignedTopics, &newTopicCopy);
}
// set the update type
pConsumerNew->updateType = CONSUMER_UPDATE__MODIFY;
taosArrayDestroy(pConsumerNew->assignedTopics);
pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup);
// all subscribed topics should re-balance.
taosArrayDestroy(pConsumerNew->rebNewTopics);
pConsumerNew->rebNewTopics = pTopicList;
subscribe.topicNames = NULL;
if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) goto _over;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _over;
@ -646,17 +646,12 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
goto _over;
}
// set the update type
pConsumerNew->updateType = CONSUMER_UPDATE__MODIFY;
taosArrayDestroy(pConsumerNew->assignedTopics);
pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup);
for (int32_t i = 0; i < newTopicNum; i++) {
char *newTopicCopy = taosStrdup(taosArrayGetP(pTopicList, i));
taosArrayPush(pConsumerNew->assignedTopics, &newTopicCopy);
}
int32_t oldTopicNum = 0;
if (pExistedConsumer->currentTopics) {
oldTopicNum = taosArrayGetSize(pExistedConsumer->currentTopics);
}
int32_t oldTopicNum = (pExistedConsumer->currentTopics)? taosArrayGetSize(pExistedConsumer->currentTopics):0;
int32_t i = 0, j = 0;
while (i < oldTopicNum || j < newTopicNum) {
@ -692,11 +687,8 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
}
}
if (pExistedConsumer && taosArrayGetSize(pConsumerNew->rebNewTopics) == 0 &&
taosArrayGetSize(pConsumerNew->rebRemovedTopics) == 0) {
/*if (taosArrayGetSize(pConsumerNew->assignedTopics) == 0) {*/
/*pConsumerNew->updateType = */
/*}*/
// no topics need to be rebalanced
if (taosArrayGetSize(pConsumerNew->rebNewTopics) == 0 && taosArrayGetSize(pConsumerNew->rebRemovedTopics) == 0) {
goto _over;
}
@ -718,8 +710,9 @@ _over:
tDeleteSMqConsumerObj(pConsumerNew);
taosMemoryFree(pConsumerNew);
}
// TODO: replace with destroy subscribe msg
if (subscribe.topicNames) taosArrayDestroyP(subscribe.topicNames, (FDelete)taosMemoryFree);
taosArrayDestroyP(subscribe.topicNames, (FDelete)taosMemoryFree);
return code;
}
@ -750,12 +743,12 @@ SSdbRaw *mndConsumerActionEncode(SMqConsumerObj *pConsumer) {
CM_ENCODE_OVER:
taosMemoryFreeClear(buf);
if (terrno != 0) {
mError("consumer:%" PRId64 ", failed to encode to raw:%p since %s", pConsumer->consumerId, pRaw, terrstr());
mError("consumer:0x%" PRIx64 " failed to encode to raw:%p since %s", pConsumer->consumerId, pRaw, terrstr());
sdbFreeRaw(pRaw);
return NULL;
}
mTrace("consumer:%" PRId64 ", encode to raw:%p, row:%p", pConsumer->consumerId, pRaw, pConsumer);
mTrace("consumer:0x%" PRIx64 ", encode to raw:%p, row:%p", pConsumer->consumerId, pRaw, pConsumer);
return pRaw;
}
@ -823,8 +816,8 @@ static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer) {
}
static int32_t mndConsumerActionDelete(SSdb *pSdb, SMqConsumerObj *pConsumer) {
mDebug("consumer:0x%" PRIx64 " perform delete action, status:%s", pConsumer->consumerId,
mndConsumerStatusName(pConsumer->status));
mDebug("consumer:0x%" PRIx64 " perform delete action, status:(%d)%s", pConsumer->consumerId,
pConsumer->status, mndConsumerStatusName(pConsumer->status));
tDeleteSMqConsumerObj(pConsumer);
return 0;
}
@ -1075,22 +1068,23 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *
// consumer group
char cgroup[TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE] = {0};
tstrncpy(varDataVal(cgroup), pConsumer->cgroup, TSDB_CGROUP_LEN);
varDataSetLen(cgroup, strlen(varDataVal(cgroup)));
STR_TO_VARSTR(cgroup, pConsumer->cgroup);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)cgroup, false);
// client id
char clientId[256 + VARSTR_HEADER_SIZE] = {0};
tstrncpy(varDataVal(clientId), pConsumer->clientId, 256);
varDataSetLen(clientId, strlen(varDataVal(clientId)));
STR_TO_VARSTR(clientId, pConsumer->clientId);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)clientId, false);
// status
char status[20 + VARSTR_HEADER_SIZE] = {0};
tstrncpy(varDataVal(status), mndConsumerStatusName(pConsumer->status), 20);
varDataSetLen(status, strlen(varDataVal(status)));
const char* pStatusName = mndConsumerStatusName(pConsumer->status);
STR_TO_VARSTR(status, pStatusName);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)status, false);
@ -1123,8 +1117,11 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *
numOfRows++;
}
taosRUnLockLatch(&pConsumer->lock);
sdbRelease(pSdb, pConsumer);
pBlock->info.rows = numOfRows;
}
pShow->numOfRows += numOfRows;

View File

@ -115,7 +115,7 @@ int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, SStreamObj* pStream, SStream
if (pStream->fixedSinkVgId == 0) {
SDbObj* pDb = mndAcquireDb(pMnode, pStream->targetDb);
if (pDb->cfg.numOfVgroups > 1) {
if (pDb != NULL && pDb->cfg.numOfVgroups > 1) {
isShuffle = true;
pTask->outputType = TASK_OUTPUT__SHUFFLE_DISPATCH;
pTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;

View File

@ -134,7 +134,7 @@ static SShowObj *mndCreateShowObj(SMnode *pMnode, SRetrieveTableReq *pReq) {
showObj.pMnode = pMnode;
showObj.type = convertToRetrieveType(pReq->tb, tListLen(pReq->tb));
memcpy(showObj.db, pReq->db, TSDB_DB_FNAME_LEN);
strncpy(showObj.filterTb, pReq->filterTb, TSDB_TABLE_NAME_LEN);
tstrncpy(showObj.filterTb, pReq->filterTb, TSDB_TABLE_NAME_LEN);
int32_t keepTime = tsShellActivityTimer * 6 * 1000;
SShowObj *pShow = taosCachePut(pMgmt->cache, &showId, sizeof(int64_t), &showObj, size, keepTime);

View File

@ -16,15 +16,10 @@
#define _DEFAULT_SOURCE
#include "mndSubscribe.h"
#include "mndConsumer.h"
#include "mndDb.h"
#include "mndDnode.h"
#include "mndMnode.h"
#include "mndScheduler.h"
#include "mndShow.h"
#include "mndStb.h"
#include "mndTopic.h"
#include "mndTrans.h"
#include "mndUser.h"
#include "mndVgroup.h"
#include "tcompare.h"
#include "tname.h"
@ -1041,7 +1036,6 @@ int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock
}
// do not show for cleared subscription
#if 1
int32_t sz = taosArrayGetSize(pSub->unassignedVgs);
for (int32_t i = 0; i < sz; i++) {
SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i);
@ -1087,8 +1081,6 @@ int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock
numOfRows++;
}
#endif
pBlock->info.rows = numOfRows;
taosRUnLockLatch(&pSub->lock);

View File

@ -15,6 +15,7 @@ target_sources(
"src/vnd/vnodeSync.c"
"src/vnd/vnodeSnapshot.c"
"src/vnd/vnodeCompact.c"
"src/vnd/vnodeRetention.c"
# meta
"src/meta/metaOpen.c"

View File

@ -180,7 +180,6 @@ int32_t tsdbCommit(STsdb* pTsdb, SCommitInfo* pInfo);
int32_t tsdbCompact(STsdb* pTsdb, SCompactInfo* pInfo);
int32_t tsdbFinishCommit(STsdb* pTsdb);
int32_t tsdbRollbackCommit(STsdb* pTsdb);
int32_t tsdbDoRetention(STsdb* pTsdb, int64_t now);
int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq2* pMsg);
int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq2* pMsg, SSubmitRsp2* pRsp);
int32_t tsdbInsertTableData(STsdb* pTsdb, int64_t version, SSubmitTbData* pSubmitTbData, int32_t* affectedRows);

View File

@ -595,6 +595,7 @@ static int32_t tdFetchSubmitReqSuids(SSubmitReq2 *pMsg, STbUidStore *pStore) {
return 0;
}
#if 0
/**
* @brief retention of rsma1/rsma2
*
@ -618,6 +619,7 @@ int32_t smaDoRetention(SSma *pSma, int64_t now) {
_end:
return code;
}
#endif
static void tdBlockDataDestroy(SArray *pBlockArr) {
for (int32_t i = 0; i < taosArrayGetSize(pBlockArr); ++i) {

View File

@ -217,7 +217,7 @@ int32_t tqPushDataRsp(STQ* pTq, STqPushEntry* pPushEntry) {
char buf2[80] = {0};
tFormatOffset(buf1, tListLen(buf1), &pRsp->reqOffset);
tFormatOffset(buf2, tListLen(buf2), &pRsp->rspOffset);
tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) push rsp, block num: %d, reqOffset:%s, rspOffset:%s",
tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) push rsp, block num: %d, req:%s, rsp:%s",
TD_VID(pTq->pVnode), pRsp->head.consumerId, pRsp->head.epoch, pRsp->blockNum, buf1, buf2);
return 0;
@ -275,7 +275,7 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
char buf2[80] = {0};
tFormatOffset(buf1, 80, &pRsp->reqOffset);
tFormatOffset(buf2, 80, &pRsp->rspOffset);
tqDebug("vgId:%d consumer:0x%" PRIx64 " (epoch %d), block num:%d, reqOffset:%s, rspOffset:%s",
tqDebug("vgId:%d consumer:0x%" PRIx64 " (epoch %d), block num:%d, req:%s, rsp:%s",
TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2);
return 0;
@ -604,7 +604,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
code = -1;
}
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp data block:%d, offset type:%d, uid/version:%" PRId64 ", ts:%" PRId64 "",
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, offset type:%d, uid/version:%" PRId64 ", ts:%" PRId64 "",
consumerId, pHandle->subKey, TD_VID(pTq->pVnode), dataRsp.blockNum, dataRsp.rspOffset.type,
dataRsp.rspOffset.uid, dataRsp.rspOffset.ts);

View File

@ -113,9 +113,7 @@ int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffs
return -1;
}
ASSERT(pRsp->withTbName == false);
ASSERT(pRsp->withSchema == false);
ASSERT(!(pRsp->withTbName || pRsp->withSchema));
return 0;
}

View File

@ -197,7 +197,7 @@ int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) {
return -1;
}
tqDebug("tq save %s(%d) handle consumer:0x%" PRIx64 "epoch:%d vgId:%d", pHandle->subKey,
tqDebug("tq save %s(%d) handle consumer:0x%" PRIx64 " epoch:%d vgId:%d", pHandle->subKey,
(int32_t)strlen(pHandle->subKey), pHandle->consumerId, pHandle->epoch, TD_VID(pTq->pVnode));
void* buf = taosMemoryCalloc(1, vlen);
@ -269,6 +269,7 @@ int32_t tqMetaDeleteHandle(STQ* pTq, const char* key) {
}
int32_t tqMetaRestoreHandle(STQ* pTq) {
int code = 0;
TBC* pCur = NULL;
if (tdbTbcOpen(pTq->pExecStore, &pCur, NULL) < 0) {
return -1;
@ -290,7 +291,8 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
handle.pRef = walOpenRef(pTq->pVnode->pWal);
if (handle.pRef == NULL) {
return -1;
code = -1;
goto end;
}
walRefVer(handle.pRef, handle.snapshotVer);
@ -307,16 +309,21 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
qCreateQueueExecTaskInfo(handle.execHandle.execCol.qmsg, &reader, &handle.execHandle.numOfCols, NULL);
if (handle.execHandle.task == NULL) {
tqError("cannot create exec task for %s", handle.subKey);
return -1;
code = -1;
goto end;
}
void* scanner = NULL;
qExtractStreamScanner(handle.execHandle.task, &scanner);
if (scanner == NULL) {
tqError("cannot extract stream scanner for %s", handle.subKey);
code = -1;
goto end;
}
handle.execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
if (handle.execHandle.pExecReader == NULL) {
tqError("cannot extract exec reader for %s", handle.subKey);
code = -1;
goto end;
}
} else if (handle.execHandle.subType == TOPIC_SUB_TYPE__DB) {
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
@ -347,8 +354,9 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
taosHashPut(pTq->pHandle, pKey, kLen, &handle, sizeof(STqHandle));
}
end:
tdbFree(pKey);
tdbFree(pVal);
tdbTbcClose(pCur);
return 0;
return code;
}

View File

@ -193,7 +193,7 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_
memset(&pHandle->pushHandle.rpcInfo, 0, sizeof(SRpcHandleInfo));
taosWUnLockLatch(&pHandle->pushHandle.lock);
tqDebug("vgId:%d offset %" PRId64 " from consumer:%" PRId64 ", (epoch %d) send rsp, block num: %d, reqOffset:%" PRId64 ", rspOffset:%" PRId64,
tqDebug("vgId:%d offset %" PRId64 " from consumer:%" PRId64 ", (epoch %d) send rsp, block num: %d, req:%" PRId64 ", rsp:%" PRId64,
TD_VID(pTq->pVnode), fetchOffset, pHandle->pushHandle.consumerId, pHandle->pushHandle.epoch, rsp.blockNum,
rsp.reqOffset, rsp.rspOffset);
@ -210,25 +210,30 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver)
void* pReq = POINTER_SHIFT(msg, sizeof(SSubmitReq2Msg));
int32_t len = msgLen - sizeof(SSubmitReq2Msg);
tqDebug("vgId:%d tq push msg version:%" PRId64 " type: %s, p head %p, p body %p, len %d", pTq->pVnode->config.vgId, ver,
TMSG_INFO(msgType), msg, pReq, len);
if (msgType == TDMT_VND_SUBMIT) {
// lock push mgr to avoid potential msg lost
taosWLockLatch(&pTq->pushLock);
if (taosHashGetSize(pTq->pPushMgr) != 0) {
tqDebug("vgId:%d, push handle num %d", pTq->pVnode->config.vgId, taosHashGetSize(pTq->pPushMgr));
int32_t numOfRegisteredPush = taosHashGetSize(pTq->pPushMgr);
if (numOfRegisteredPush > 0) {
tqDebug("vgId:%d tq push msg version:%" PRId64 " type:%s, head:%p, body:%p len:%d, numOfPushed consumers:%d",
pTq->pVnode->config.vgId, ver, TMSG_INFO(msgType), msg, pReq, len, numOfRegisteredPush);
SArray* cachedKeys = taosArrayInit(0, sizeof(void*));
SArray* cachedKeyLens = taosArrayInit(0, sizeof(size_t));
void* data = taosMemoryMalloc(len);
if (data == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("failed to copy data for stream since out of memory");
taosArrayDestroyP(cachedKeys, (FDelete)taosMemoryFree);
taosArrayDestroy(cachedKeyLens);
// unlock
taosWUnLockLatch(&pTq->pushLock);
return -1;
}
memcpy(data, pReq, len);
void* pIter = NULL;
@ -262,7 +267,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver)
};
qStreamSetScanMemData(task, submit);
// exec
// here start to scan submit block to extract the subscribed data
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
@ -278,7 +283,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver)
pRsp->blockNum++;
}
tqDebug("vgId:%d, tq handle push, subkey: %s, block num: %d", pTq->pVnode->config.vgId, pPushEntry->subKey,
tqDebug("vgId:%d, tq handle push, subkey:%s, block num:%d", pTq->pVnode->config.vgId, pPushEntry->subKey,
pRsp->blockNum);
if (pRsp->blockNum > 0) {
// set offset
@ -295,6 +300,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver)
tqPushDataRsp(pTq, pPushEntry);
}
}
// delete entry
for (int32_t i = 0; i < taosArrayGetSize(cachedKeys); i++) {
void* key = taosArrayGetP(cachedKeys, i);

View File

@ -291,10 +291,15 @@ void tqCloseReader(STqReader* pReader) {
}
int32_t tqSeekVer(STqReader* pReader, int64_t ver) {
// todo set the correct vgId
tqDebug("tmq poll: vgId:%d wal seek to version:%"PRId64, 0, ver);
if (walReadSeekVer(pReader->pWalReader, ver) < 0) {
tqError("tmq poll: wal reader failed to seek to ver:%"PRId64, ver);
return -1;
}
} else {
tqDebug("tmq poll: wal reader seek to ver:%"PRId64, ver);
return 0;
}
}
int32_t tqNextBlock(STqReader* pReader, SFetchRet* ret) {
@ -302,28 +307,33 @@ int32_t tqNextBlock(STqReader* pReader, SFetchRet* ret) {
while (1) {
if (!fromProcessedMsg) {
if (walNextValidMsg(pReader->pWalReader) < 0) {
pReader->ver =
pReader->pWalReader->curVersion - (pReader->pWalReader->curInvalid | pReader->pWalReader->curStopped);
SWalReader* pWalReader = pReader->pWalReader;
if (walNextValidMsg(pWalReader) < 0) {
pReader->ver = pWalReader->curVersion - (pWalReader->curInvalid | pWalReader->curStopped);
ret->offset.type = TMQ_OFFSET__LOG;
ret->offset.version = pReader->ver;
ret->fetchType = FETCH_TYPE__NONE;
tqDebug("return offset %" PRId64 ", no more valid", ret->offset.version);
return -1;
}
void* body = POINTER_SHIFT(pReader->pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg));
int32_t bodyLen = pReader->pWalReader->pHead->head.bodyLen - sizeof(SSubmitReq2Msg);
int64_t ver = pReader->pWalReader->pHead->head.version;
void* body = POINTER_SHIFT(pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg));
int32_t bodyLen = pWalReader->pHead->head.bodyLen - sizeof(SSubmitReq2Msg);
int64_t ver = pWalReader->pHead->head.version;
tqDebug("tmq poll: extract submit msg from wal, version:%"PRId64" len:%d", ver, bodyLen);
#if 0
if (pReader->pWalReader->pHead->head.msgType != TDMT_VND_SUBMIT) {
if (pWalReader->pHead->head.msgType != TDMT_VND_SUBMIT) {
// TODO do filter
ret->fetchType = FETCH_TYPE__META;
ret->meta = pReader->pWalReader->pHead->head.body;
ret->meta = pWalReader->pHead->head.body;
return 0;
} else {
#endif
tqReaderSetSubmitReq2(pReader, body, bodyLen, ver);
/*tqReaderSetDataMsg(pReader, body, pReader->pWalReader->pHead->head.version);*/
/*tqReaderSetDataMsg(pReader, body, pWalReader->pHead->head.version);*/
#if 0
}
#endif
@ -358,7 +368,7 @@ int32_t tqReaderSetDataMsg(STqReader* pReader, const SSubmitReq* pMsg, int64_t v
// if (tInitSubmitMsgIter(pMsg, &pReader->msgIter) < 0) return -1;
// while (true) {
// if (tGetSubmitMsgNext(&pReader->msgIter, &pReader->pBlock) < 0) return -1;
// tqDebug("submitnext vgId:%d, block:%p, dataLen:%d, len:%d, uid:%"PRId64, pReader->pWalReader->pWal->cfg.vgId, pReader->pBlock, pReader->msgIter.dataLen,
// tqDebug("submitnext vgId:%d, block:%p, dataLen:%d, len:%d, uid:%"PRId64, pWalReader->pWal->cfg.vgId, pReader->pBlock, pReader->msgIter.dataLen,
// pReader->msgIter.len, pReader->msgIter.uid);
// if (pReader->pBlock == NULL) break;
// }
@ -371,10 +381,8 @@ int32_t tqReaderSetDataMsg(STqReader* pReader, const SSubmitReq* pMsg, int64_t v
#endif
int32_t tqReaderSetSubmitReq2(STqReader* pReader, void* msgStr, int32_t msgLen, int64_t ver) {
ASSERT(pReader->msg2.msgStr == NULL);
ASSERT(msgStr);
ASSERT(msgLen);
ASSERT(ver >= 0);
ASSERT(pReader->msg2.msgStr == NULL && msgStr && msgLen && (ver >= 0));
pReader->msg2.msgStr = msgStr;
pReader->msg2.msgLen = msgLen;
pReader->msg2.ver = ver;
@ -421,7 +429,10 @@ bool tqNextDataBlock(STqReader* pReader) {
#endif
bool tqNextDataBlock2(STqReader* pReader) {
if (pReader->msg2.msgStr == NULL) return false;
if (pReader->msg2.msgStr == NULL) {
return false;
}
ASSERT(pReader->setMsg == 1);
tqDebug("tq reader next data block %p, %d %" PRId64 " %d", pReader->msg2.msgStr, pReader->msg2.msgLen,
@ -528,7 +539,7 @@ int32_t tqScanSubmitSplit(SArray* pBlocks, SArray* schemas, STqReader* pReader)
if (pReader->pSchema == NULL) {
tqWarn("vgId:%d, cannot found tsschema for table: uid:%" PRId64 " (suid:%" PRId64
"), version %d, possibly dropped table",
pReader->pWalReader->pWal->cfg.vgId, pReader->msgIter.uid, pReader->msgIter.suid, sversion);
pWalReader->pWal->cfg.vgId, pReader->msgIter.uid, pReader->msgIter.suid, sversion);
pReader->cachedSchemaSuid = 0;
terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND;
return -1;
@ -538,7 +549,7 @@ int32_t tqScanSubmitSplit(SArray* pBlocks, SArray* schemas, STqReader* pReader)
pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, pReader->msgIter.uid, sversion, 1);
if (pReader->pSchemaWrapper == NULL) {
tqWarn("vgId:%d, cannot found schema wrapper for table: suid:%" PRId64 ", version %d, possibly dropped table",
pReader->pWalReader->pWal->cfg.vgId, pReader->msgIter.uid, pReader->cachedSchemaVer);
pWalReader->pWal->cfg.vgId, pReader->msgIter.uid, pReader->cachedSchemaVer);
pReader->cachedSchemaSuid = 0;
terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND;
return -1;

View File

@ -370,11 +370,6 @@ int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) {
}
return TSDB_CODE_SUCCESS;
_error:
terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("failed to encode submit req since %s", terrstr());
return TSDB_CODE_OUT_OF_MEMORY;
}
void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
@ -441,9 +436,6 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
for (int32_t rowId = 0; rowId < rows; rowId++) {
SVCreateTbReq createTbReq = {0};
SVCreateTbReq* pCreateTbReq = &createTbReq;
if (!pCreateTbReq) {
goto _end;
}
// set const
pCreateTbReq->flags = 0;
@ -460,6 +452,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
if (size == 2) {
tagArray = taosArrayInit(1, sizeof(STagVal));
if (!tagArray) {
tdDestroySVCreateTbReq(pCreateTbReq);
goto _end;
}
STagVal tagVal = {
@ -477,6 +470,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
} else {
tagArray = taosArrayInit(size - 1, sizeof(STagVal));
if (!tagArray) {
tdDestroySVCreateTbReq(pCreateTbReq);
goto _end;
}
for (int32_t tagId = UD_TAG_COLUMN_INDEX, step = 1; tagId < size; tagId++, step++) {
@ -503,6 +497,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
tTagNew(tagArray, 1, false, &pTag);
tagArray = taosArrayDestroy(tagArray);
if (pTag == NULL) {
tdDestroySVCreateTbReq(pCreateTbReq);
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _end;
}
@ -556,6 +551,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
SVCreateTbReq* pCreateTbReq = NULL;
if (!(pCreateTbReq = taosMemoryCalloc(1, sizeof(SVCreateStbReq)))) {
taosMemoryFree(ctbName);
goto _end;
};
@ -572,6 +568,8 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
// set tag content
tagArray = taosArrayInit(1, sizeof(STagVal));
if (!tagArray) {
taosMemoryFree(ctbName);
tdDestroySVCreateTbReq(pCreateTbReq);
goto _end;
}
STagVal tagVal = {
@ -587,6 +585,8 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
tagArray = taosArrayDestroy(tagArray);
if (pTag == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(ctbName);
tdDestroySVCreateTbReq(pCreateTbReq);
goto _end;
}
pCreateTbReq->ctb.pTag = (uint8_t*)pTag;
@ -630,6 +630,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
// rows
if (!pVals && !(pVals = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal)))) {
taosArrayDestroy(tbData.aRowP);
tdDestroySVCreateTbReq(tbData.pCreateTbReq);
goto _end;
}
@ -680,6 +681,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
SSubmitReq2 submitReq = {0};
if (!(submitReq.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData)))) {
tDestroySSubmitTbData(&tbData, TSDB_MSG_FLG_ENCODE);
goto _end;
}
@ -693,6 +695,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
len += sizeof(SSubmitReq2Msg);
pBuf = rpcMallocCont(len);
if (NULL == pBuf) {
tDestroySSubmitReq2(&submitReq, TSDB_MSG_FLG_ENCODE);
goto _end;
}
((SSubmitReq2Msg*)pBuf)->header.vgId = TD_VID(pVnode);
@ -704,6 +707,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
tqError("failed to encode submit req since %s", terrstr());
tEncoderClear(&encoder);
rpcFreeCont(pBuf);
tDestroySSubmitReq2(&submitReq, TSDB_MSG_FLG_ENCODE);
continue;
}
tEncoderClear(&encoder);

View File

@ -57,32 +57,6 @@ typedef struct {
SBlockData sData;
} STsdbCompactor;
static int32_t tsdbCommitCompact(STsdbCompactor *pCompactor) {
int32_t code = 0;
int32_t lino = 0;
STsdb *pTsdb = pCompactor->pTsdb;
code = tsdbFSPrepareCommit(pTsdb, &pCompactor->fs);
TSDB_CHECK_CODE(code, lino, _exit);
taosThreadRwlockWrlock(&pTsdb->rwLock);
code = tsdbFSCommit(pTsdb);
if (code) {
taosThreadRwlockUnlock(&pTsdb->rwLock);
TSDB_CHECK_CODE(code, lino, _exit);
}
taosThreadRwlockUnlock(&pTsdb->rwLock);
_exit:
if (code) {
tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code));
}
return code;
}
static int32_t tsdbAbortCompact(STsdbCompactor *pCompactor) {
int32_t code = 0;
int32_t lino = 0;
@ -660,8 +634,31 @@ _exit:
if (code) {
tsdbAbortCompact(pCompactor);
} else {
tsdbCommitCompact(pCompactor);
tsdbFSPrepareCommit(pTsdb, &pCompactor->fs);
}
tsdbEndCompact(pCompactor);
return code;
}
int32_t tsdbCommitCompact(STsdb *pTsdb) {
int32_t code = 0;
int32_t lino = 0;
taosThreadRwlockWrlock(&pTsdb->rwLock);
code = tsdbFSCommit(pTsdb);
if (code) {
taosThreadRwlockUnlock(&pTsdb->rwLock);
TSDB_CHECK_CODE(code, lino, _exit);
}
taosThreadRwlockUnlock(&pTsdb->rwLock);
_exit:
if (code) {
tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code));
} else {
tsdbInfo("vgId:%d %s done", TD_VID(pTsdb->pVnode), __func__);
}
return code;
}

View File

@ -916,10 +916,12 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SBlockN
pBlockNum->numOfBlocks += 1;
}
if ((pScanInfo->pBlockList != NULL )&& (taosArrayGetSize(pScanInfo->pBlockList) > 0)) {
if (pScanInfo->pBlockList != NULL) {
if (taosArrayGetSize(pScanInfo->pBlockList) > 0) {
numOfQTable += 1;
}
}
}
pBlockNum->numOfLastFiles = pReader->pFileReader->pSet->nSttF;
int32_t total = pBlockNum->numOfLastFiles + pBlockNum->numOfBlocks;
@ -4720,8 +4722,13 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
return code;
}
static int32_t getBucketIndex(int32_t startRow, int32_t bucketRange, int32_t numOfRows) {
return (numOfRows - startRow) / bucketRange;
static int32_t getBucketIndex(int32_t startRow, int32_t bucketRange, int32_t numOfRows, int32_t numOfBucket) {
int32_t bucketIndex = ((numOfRows - startRow) / bucketRange);
if (bucketIndex == numOfBucket) {
bucketIndex -= 1;
}
return bucketIndex;
}
int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTableBlockInfo) {
@ -4730,6 +4737,8 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
pTableBlockInfo->totalRows = 0;
pTableBlockInfo->numOfVgroups = 1;
const int32_t numOfBuckets = 20.0;
// find the start data block in file
tsdbAcquireReader(pReader);
@ -4742,7 +4751,7 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
pTableBlockInfo->defMinRows = pc->minRows;
pTableBlockInfo->defMaxRows = pc->maxRows;
int32_t bucketRange = ceil((pc->maxRows - pc->minRows) / 20.0);
int32_t bucketRange = ceil((pc->maxRows - pc->minRows) / numOfBuckets);
pTableBlockInfo->numOfFiles += 1;
@ -4780,7 +4789,7 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
pTableBlockInfo->totalSize += pBlock->aSubBlock[0].szBlock;
int32_t bucketIndex = getBucketIndex(pTableBlockInfo->defMinRows, bucketRange, numOfRows);
int32_t bucketIndex = getBucketIndex(pTableBlockInfo->defMinRows, bucketRange, numOfRows, numOfBuckets);
pTableBlockInfo->blockRowsHisto[bucketIndex]++;
hasNext = blockIteratorNext(&pStatus->blockIter, pReader->idStr);

View File

@ -15,7 +15,7 @@
#include "tsdb.h"
static bool tsdbShouldDoRetention(STsdb *pTsdb, int64_t now) {
static bool tsdbShouldDoRetentionImpl(STsdb *pTsdb, int64_t now) {
for (int32_t iSet = 0; iSet < taosArrayGetSize(pTsdb->fs.aDFileSet); iSet++) {
SDFileSet *pSet = (SDFileSet *)taosArrayGet(pTsdb->fs.aDFileSet, iSet);
int32_t expLevel = tsdbFidLevel(pSet->fid, &pTsdb->keepCfg, now);
@ -38,19 +38,21 @@ static bool tsdbShouldDoRetention(STsdb *pTsdb, int64_t now) {
return false;
}
bool tsdbShouldDoRetention(STsdb *pTsdb, int64_t now) {
bool should;
taosThreadRwlockRdlock(&pTsdb->rwLock);
should = tsdbShouldDoRetentionImpl(pTsdb, now);
taosThreadRwlockUnlock(&pTsdb->rwLock);
return should;
}
int32_t tsdbDoRetention(STsdb *pTsdb, int64_t now) {
int32_t code = 0;
if (!tsdbShouldDoRetention(pTsdb, now)) {
return code;
}
// do retention
int32_t lino = 0;
STsdbFS fs = {0};
code = tsdbFSCopy(pTsdb, &fs);
if (code) goto _err;
TSDB_CHECK_CODE(code, lino, _exit);
for (int32_t iSet = 0; iSet < taosArrayGetSize(fs.aDFileSet); iSet++) {
SDFileSet *pSet = (SDFileSet *)taosArrayGet(fs.aDFileSet, iSet);
@ -60,8 +62,10 @@ int32_t tsdbDoRetention(STsdb *pTsdb, int64_t now) {
if (expLevel < 0) {
taosMemoryFree(pSet->pHeadF);
taosMemoryFree(pSet->pDataF);
taosMemoryFree(pSet->aSttF[0]);
taosMemoryFree(pSet->pSmaF);
for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
taosMemoryFree(pSet->aSttF[iStt]);
}
taosArrayRemove(fs.aDFileSet, iSet);
iSet--;
} else {
@ -78,35 +82,33 @@ int32_t tsdbDoRetention(STsdb *pTsdb, int64_t now) {
fSet.diskId = did;
code = tsdbDFileSetCopy(pTsdb, pSet, &fSet);
if (code) goto _err;
TSDB_CHECK_CODE(code, lino, _exit);
code = tsdbFSUpsertFSet(&fs, &fSet);
if (code) goto _err;
TSDB_CHECK_CODE(code, lino, _exit);
}
}
// do change fs
code = tsdbFSPrepareCommit(pTsdb, &fs);
if (code) goto _err;
taosThreadRwlockWrlock(&pTsdb->rwLock);
code = tsdbFSCommit(pTsdb);
if (code) {
taosThreadRwlockUnlock(&pTsdb->rwLock);
goto _err;
}
taosThreadRwlockUnlock(&pTsdb->rwLock);
tsdbFSDestroy(&fs);
TSDB_CHECK_CODE(code, lino, _exit);
_exit:
return code;
_err:
tsdbError("vgId:%d, tsdb do retention failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
ASSERT(0);
// tsdbFSRollback(pTsdb->pFS);
if (code) {
tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code));
} else {
tsdbInfo("vgId:%d %s done", TD_VID(pTsdb->pVnode), __func__);
}
tsdbFSDestroy(&fs);
return code;
}
static int32_t tsdbCommitRetentionImpl(STsdb *pTsdb) { return tsdbFSCommit(pTsdb); }
int32_t tsdbCommitRetention(STsdb *pTsdb) {
taosThreadRwlockWrlock(&pTsdb->rwLock);
tsdbCommitRetentionImpl(pTsdb);
taosThreadRwlockUnlock(&pTsdb->rwLock);
tsdbInfo("vgId:%d %s done", TD_VID(pTsdb->pVnode), __func__);
return 0;
}

View File

@ -35,9 +35,7 @@ int tsdbInsertData(STsdb *pTsdb, int64_t version, SSubmitReq2 *pMsg, SSubmitRsp2
return -1;
}
if (pMsg) {
arrSize = taosArrayGetSize(pMsg->aSubmitTbData);
}
// scan and convert
if (tsdbScanAndConvertSubmitMsg(pTsdb, pMsg) < 0) {

View File

@ -15,6 +15,8 @@
#include "vnd.h"
extern int32_t tsdbCommitCompact(STsdb *pTsdb);
static int32_t vnodeCompactTask(void *param) {
int32_t code = 0;
int32_t lino = 0;
@ -33,8 +35,11 @@ static int32_t vnodeCompactTask(void *param) {
} else {
snprintf(dir, TSDB_FILENAME_LEN, "%s", pVnode->path);
}
vnodeCommitInfo(dir);
tsdbCommitCompact(pVnode->pTsdb);
_exit:
tsem_post(&pInfo->pVnode->canCommit);
taosMemoryFree(pInfo);

View File

@ -0,0 +1,130 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "vnd.h"
typedef struct {
SVnode *pVnode;
int64_t now;
int64_t commitID;
SVnodeInfo info;
} SRetentionInfo;
extern bool tsdbShouldDoRetention(STsdb *pTsdb, int64_t now);
extern int32_t tsdbDoRetention(STsdb *pTsdb, int64_t now);
extern int32_t tsdbCommitRetention(STsdb *pTsdb);
static int32_t vnodePrepareRentention(SVnode *pVnode, SRetentionInfo *pInfo) {
int32_t code = 0;
int32_t lino = 0;
tsem_wait(&pVnode->canCommit);
pInfo->commitID = ++pVnode->state.commitID;
char dir[TSDB_FILENAME_LEN] = {0};
if (pVnode->pTfs) {
snprintf(dir, TSDB_FILENAME_LEN, "%s%s%s", tfsGetPrimaryPath(pVnode->pTfs), TD_DIRSEP, pVnode->path);
} else {
snprintf(dir, TSDB_FILENAME_LEN, "%s", pVnode->path);
}
if (vnodeLoadInfo(dir, &pInfo->info) < 0) {
code = terrno;
TSDB_CHECK_CODE(code, lino, _exit);
}
_exit:
if (code) {
vError("vgId:%d %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
tsem_post(&pVnode->canCommit);
} else {
vInfo("vgId:%d %s done", TD_VID(pVnode), __func__);
}
return code;
}
static int32_t vnodeRetentionTask(void *param) {
int32_t code = 0;
int32_t lino = 0;
SRetentionInfo *pInfo = (SRetentionInfo *)param;
SVnode *pVnode = pInfo->pVnode;
char dir[TSDB_FILENAME_LEN] = {0};
if (pVnode->pTfs) {
snprintf(dir, TSDB_FILENAME_LEN, "%s%s%s", tfsGetPrimaryPath(pVnode->pTfs), TD_DIRSEP, pVnode->path);
} else {
snprintf(dir, TSDB_FILENAME_LEN, "%s", pVnode->path);
}
// save info
pInfo->info.state.commitID = pInfo->commitID;
if (vnodeSaveInfo(dir, &pInfo->info) < 0) {
code = terrno;
TSDB_CHECK_CODE(code, lino, _exit);
}
// do job
code = tsdbDoRetention(pInfo->pVnode->pTsdb, pInfo->now);
TSDB_CHECK_CODE(code, lino, _exit);
// commit info
vnodeCommitInfo(dir);
// commit sub-job
tsdbCommitRetention(pVnode->pTsdb);
_exit:
if (code) {
vError("vgId:%d %s failed at line %d since %s", TD_VID(pInfo->pVnode), __func__, lino, tstrerror(code));
} else {
vInfo("vgId:%d %s done", TD_VID(pInfo->pVnode), __func__);
}
tsem_post(&pInfo->pVnode->canCommit);
taosMemoryFree(pInfo);
return code;
}
int32_t vnodeAsyncRentention(SVnode *pVnode, int64_t now) {
int32_t code = 0;
int32_t lino = 0;
if (!tsdbShouldDoRetention(pVnode->pTsdb, now)) return code;
SRetentionInfo *pInfo = (SRetentionInfo *)taosMemoryCalloc(1, sizeof(*pInfo));
if (pInfo == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
pInfo->pVnode = pVnode;
pInfo->now = now;
code = vnodePrepareRentention(pVnode, pInfo);
TSDB_CHECK_CODE(code, lino, _exit);
vnodeScheduleTask(vnodeRetentionTask, pInfo);
_exit:
if (code) {
vError("vgId:%d %s failed at line %d since %s", TD_VID(pInfo->pVnode), __func__, lino, tstrerror(code));
if (pInfo) taosMemoryFree(pInfo);
} else {
vInfo("vgId:%d %s done", TD_VID(pInfo->pVnode), __func__);
}
return 0;
}

View File

@ -586,6 +586,7 @@ void vnodeUpdateMetaRsp(SVnode *pVnode, STableMetaRsp *pMetaRsp) {
pMetaRsp->precision = pVnode->config.tsdbCfg.precision;
}
extern int32_t vnodeAsyncRentention(SVnode *pVnode, int64_t now);
static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
int32_t code = 0;
SVTrimDbReq trimReq = {0};
@ -598,12 +599,16 @@ static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq,
vInfo("vgId:%d, trim vnode request will be processed, time:%d", pVnode->config.vgId, trimReq.timestamp);
// process
// process
#if 0
code = tsdbDoRetention(pVnode->pTsdb, trimReq.timestamp);
if (code) goto _exit;
code = smaDoRetention(pVnode->pSma, trimReq.timestamp);
if (code) goto _exit;
#else
vnodeAsyncRentention(pVnode, trimReq.timestamp);
#endif
_exit:
return code;
@ -635,6 +640,10 @@ static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t version, void *p
ret = smaDoRetention(pVnode->pSma, ttlReq.timestamp);
if (ret) goto end;
#else
vnodeAsyncRentention(pVnode, ttlReq.timestamp);
tsem_wait(&pVnode->canCommit);
tsem_post(&pVnode->canCommit);
#endif
end:
@ -1264,8 +1273,8 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
goto _exit;
}
for (int32_t i = 1; i < nColData; i++) {
if (aColData[i].nVal != aColData[0].nVal) {
for (int32_t j = 1; j < nColData; j++) {
if (aColData[j].nVal != aColData[0].nVal) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
@ -1299,8 +1308,8 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
SVCreateTbRsp *pCreateTbRsp = taosArrayReserve(pSubmitRsp->aCreateTbRsp, 1);
// create table
if (metaCreateTable(pVnode->pMeta, version, pSubmitTbData->pCreateTbReq, &pCreateTbRsp->pMeta) ==
0) { // create table success
if (metaCreateTable(pVnode->pMeta, version, pSubmitTbData->pCreateTbReq, &pCreateTbRsp->pMeta) == 0) {
// create table success
if (newTbUids == NULL &&
(newTbUids = taosArrayInit(TARRAY_SIZE(pSubmitReq->aSubmitTbData), sizeof(int64_t))) == NULL) {
@ -1330,7 +1339,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
pSubmitRsp->affectedRows += affectedRows;
}
// update table uid list
// update the affected table uid list
if (taosArrayGetSize(newTbUids) > 0) {
vDebug("vgId:%d, add %d table into query table list in handling submit", TD_VID(pVnode),
(int32_t)taosArrayGetSize(newTbUids));

View File

@ -134,7 +134,6 @@ typedef struct {
// const SSubmitReq* pReq;
SPackedData submit;
SSchemaWrapper* schema;
char tbName[TSDB_TABLE_NAME_LEN];
int8_t recoverStep;
@ -733,7 +732,7 @@ void updateLoadRemoteInfo(SLoadRemoteDataInfo* pInfo, int64_t numOfRows, int3
STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order);
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag);
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag, bool inheritUsOrder);
int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz);
extern void doDestroyExchangeOperatorInfo(void* param);

View File

@ -37,11 +37,13 @@ int32_t dsCreateDataSinker(const SDataSinkNode* pDataSink, DataSinkHandle* pHand
switch ((int)nodeType(pDataSink)) {
case QUERY_NODE_PHYSICAL_PLAN_DISPATCH:
return createDataDispatcher(&gDataSinkManager, pDataSink, pHandle);
case QUERY_NODE_PHYSICAL_PLAN_DELETE:
case QUERY_NODE_PHYSICAL_PLAN_DELETE: {
return createDataDeleter(&gDataSinkManager, pDataSink, pHandle, pParam);
case QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT:
}
case QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT: {
return createDataInserter(&gDataSinkManager, pDataSink, pHandle, pParam);
}
}
qError("invalid input node type:%d, %s", nodeType(pDataSink), id);
return TSDB_CODE_QRY_INVALID_INPUT;

View File

@ -850,8 +850,7 @@ static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTa
tagVal.cid = pColInfo->info.colId;
if (p1->pTagVal == NULL) {
colDataSetNULL(pColInfo, i);
}
} else {
const char* p = metaGetTableTagVal(p1->pTagVal, pColInfo->info.type, &tagVal);
if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) {
@ -880,6 +879,7 @@ static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTa
}
}
}
}
return pResBlock;
}

View File

@ -1035,8 +1035,9 @@ int32_t qStreamScanMemData(qTaskInfo_t tinfo, const SSubmitReq* pReq, int64_t sc
int32_t qStreamSetScanMemData(qTaskInfo_t tinfo, SPackedData submit) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
ASSERT(pTaskInfo->streamInfo.submit.msgStr == NULL);
ASSERT((pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE )&& (pTaskInfo->streamInfo.submit.msgStr == NULL));
qDebug("set the submit block for future scan");
pTaskInfo->streamInfo.submit = submit;
return 0;
}
@ -1047,14 +1048,16 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
pTaskInfo->streamInfo.prepareStatus = *pOffset;
pTaskInfo->streamInfo.returned = 0;
if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) {
return 0;
}
if (subType == TOPIC_SUB_TYPE__COLUMN) {
uint16_t type = pOperator->operatorType;
pOperator->status = OP_OPENED;
// TODO add more check
if (type != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
ASSERT(pOperator->numOfDownstream == 1);
pOperator = pOperator->pDownstream[0];
}
@ -1064,11 +1067,13 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
tsdbReaderClose(pTSInfo->base.dataReader);
pTSInfo->base.dataReader = NULL;
// let's seek to the next version in wal file
if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
return -1;
}
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
/*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
// iterate all tables from tableInfoList, and retrieve rows from each table one-by-one
// those data are from the snapshot in tsdb, besides the data in the wal file.
int64_t uid = pOffset->uid;
int64_t ts = pOffset->ts;
@ -1127,7 +1132,6 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid,
ts, pTableScanInfo->currentTable, numOfTables);
/*}*/
} else {
ASSERT(0);
}
@ -1170,7 +1174,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema);
pTaskInfo->streamInfo.schema = mtInfo.schema;
qDebug("tmqsnap qStreamPrepareScan snapshot data uid %" PRId64 " ts %" PRId64, mtInfo.uid, pOffset->ts);
qDebug("tmqsnap qStreamPrepareScan snapshot data uid:%" PRId64 " ts %" PRId64, mtInfo.uid, pOffset->ts);
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_META) {
SStreamRawScanInfo* pInfo = pOperator->info;
SSnapContext* sContext = pInfo->sContext;
@ -1178,7 +1182,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
qError("setForSnapShot error. uid:%" PRIu64 " ,version:%" PRId64, pOffset->uid, pOffset->version);
return -1;
}
qDebug("tmqsnap qStreamPrepareScan snapshot meta uid %" PRId64 " ts %" PRId64, pOffset->uid, pOffset->ts);
qDebug("tmqsnap qStreamPrepareScan snapshot meta uid:%" PRId64 " ts %" PRId64, pOffset->uid, pOffset->ts);
} else if (pOffset->type == TMQ_OFFSET__LOG) {
SStreamRawScanInfo* pInfo = pOperator->info;
tsdbReaderClose(pInfo->dataReader);

View File

@ -1449,16 +1449,21 @@ int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t
return TSDB_CODE_SUCCESS;
}
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag) {
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag, bool inheritUsOrder) {
// todo add more information about exchange operation
int32_t type = pOperator->operatorType;
if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE || type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN ||
type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN ||
type == QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN ||
type == QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN) {
if (type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN ||
type == QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN ||
type == QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN) {
*order = TSDB_ORDER_ASC;
*scanFlag = MAIN_SCAN;
return TSDB_CODE_SUCCESS;
} else if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE) {
if (!inheritUsOrder) {
*order = TSDB_ORDER_ASC;
}
*scanFlag = MAIN_SCAN;
return TSDB_CODE_SUCCESS;
} else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
STableScanInfo* pTableScanInfo = pOperator->info;
*order = pTableScanInfo->base.cond.order;
@ -1473,7 +1478,7 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan
if (pOperator->pDownstream == NULL || pOperator->pDownstream[0] == NULL) {
return TSDB_CODE_INVALID_PARA;
} else {
return getTableScanInfo(pOperator->pDownstream[0], order, scanFlag);
return getTableScanInfo(pOperator->pDownstream[0], order, scanFlag, inheritUsOrder);
}
}
}
@ -1589,7 +1594,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
}
hasValidBlock = true;
int32_t code = getTableScanInfo(pOperator, &order, &scanFlag);
int32_t code = getTableScanInfo(pOperator, &order, &scanFlag, false);
if (code != TSDB_CODE_SUCCESS) {
destroyDataBlockForEmptyInput(blockAllocated, &pBlock);
T_LONG_JMP(pTaskInfo->env, code);
@ -1996,6 +2001,11 @@ SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode);
int32_t extractTableSchemaInfo(SReadHandle* pHandle, SScanPhysiNode* pScanNode, SExecTaskInfo* pTaskInfo) {
SMetaReader mr = {0};
if (pHandle == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
metaReaderInit(&mr, pHandle->meta, 0);
int32_t code = metaGetTableEntryByUidCache(&mr, pScanNode->uid);
if (code != TSDB_CODE_SUCCESS) {

View File

@ -69,7 +69,7 @@ static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOp
int32_t order = TSDB_ORDER_ASC;
int32_t scanFlag = MAIN_SCAN;
getTableScanInfo(pOperator, &order, &scanFlag);
getTableScanInfo(pOperator, &order, &scanFlag, false);
int64_t ekey = pInfo->existNewGroupBlock->info.window.ekey;
taosResetFillInfo(pInfo->pFillInfo, getFillInfoStart(pInfo->pFillInfo));
@ -128,7 +128,7 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
int32_t order = TSDB_ORDER_ASC;
int32_t scanFlag = MAIN_SCAN;
getTableScanInfo(pOperator, &order, &scanFlag);
getTableScanInfo(pOperator, &order, &scanFlag, false);
doHandleRemainBlockFromNewGroup(pOperator, pInfo, pResultInfo, pTaskInfo);
if (pResBlock->info.rows > 0) {

View File

@ -383,7 +383,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
break;
}
int32_t code = getTableScanInfo(pOperator, &order, &scanFlag);
int32_t code = getTableScanInfo(pOperator, &order, &scanFlag, false);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}

View File

@ -289,7 +289,7 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
}
// the pDataBlock are always the same one, no need to call this again
int32_t code = getTableScanInfo(downstream, &order, &scanFlag);
int32_t code = getTableScanInfo(downstream, &order, &scanFlag, false);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}
@ -441,7 +441,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp
SExprSupp* pSup = &pOperator->exprSupp;
// the pDataBlock are always the same one, no need to call this again
int32_t code = getTableScanInfo(downstream, &order, &scanFlag);
int32_t code = getTableScanInfo(downstream, &order, &scanFlag, false);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}

View File

@ -1562,7 +1562,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStreamScanInfo* pInfo = pOperator->info;
qDebug("queue scan called");
qDebug("start to exec queue scan");
if (pTaskInfo->streamInfo.submit.msgStr != NULL) {
if (pInfo->tqReader->msg2.msgStr == NULL) {
@ -1587,7 +1587,6 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
SSDataBlock block = {0};
int32_t code = tqRetrieveDataBlock2(&block, pInfo->tqReader, NULL);
if (code != TSDB_CODE_SUCCESS || block.info.rows == 0) {
continue;
}

View File

@ -672,7 +672,9 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
}
bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo);
if (limitReached) {
// if limit is reached within a group, do not clear limiInfo otherwise the next block
// will be processed.
if (newgroup && limitReached) {
resetLimitInfoForNextGroup(&pInfo->limitInfo);
}

View File

@ -1072,7 +1072,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
break;
}
getTableScanInfo(pOperator, &pInfo->inputOrder, &scanFlag);
getTableScanInfo(pOperator, &pInfo->inputOrder, &scanFlag, true);
if (pInfo->scalarSupp.pExprInfo != NULL) {
SExprSupp* pExprSup = &pInfo->scalarSupp;
@ -4294,7 +4294,7 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
}
}
getTableScanInfo(pOperator, &pIaInfo->inputOrder, &scanFlag);
getTableScanInfo(pOperator, &pIaInfo->inputOrder, &scanFlag, false);
setInputDataBlock(pSup, pBlock, pIaInfo->inputOrder, scanFlag, true);
doMergeAlignedIntervalAggImpl(pOperator, &pIaInfo->binfo.resultRowInfo, pBlock, pRes);
@ -4621,7 +4621,7 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
break;
}
getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag);
getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag, false);
setInputDataBlock(pExpSupp, pBlock, iaInfo->inputOrder, scanFlag, true);
doMergeIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes);

View File

@ -5579,7 +5579,7 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
int32_t bucketRange = (pData->defMaxRows - pData->defMinRows) / numOfBuckets;
for (int32_t i = 0; i < tListLen(pData->blockRowsHisto); ++i) {
len = sprintf(st + VARSTR_HEADER_SIZE, "%04d |", pData->defMinRows + bucketRange * i);
len = sprintf(st + VARSTR_HEADER_SIZE, "%04d |", pData->defMinRows + bucketRange * (i + 1));
int32_t num = 0;
if (pData->blockRowsHisto[i] > 0) {

View File

@ -946,6 +946,12 @@ int32_t udfdOpenClientRpc() {
rpcInit.rfp = udfdRpcRfp;
rpcInit.compressSize = tsCompressMsgSize;
int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3);
connLimitNum = TMAX(connLimitNum, 10);
connLimitNum = TMIN(connLimitNum, 500);
rpcInit.connLimitNum = connLimitNum;
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
global.clientRpc = rpcOpen(&rpcInit);
if (global.clientRpc == NULL) {
fnError("failed to init dnode rpc client");

View File

@ -158,7 +158,7 @@ static int32_t valueNodeCopy(const SValueNode* pSrc, SValueNode* pDst) {
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_VARBINARY: {
int32_t len = varDataTLen(pSrc->datum.p) + 1;
int32_t len = pSrc->node.resType.bytes + 1;
pDst->datum.p = taosMemoryCalloc(1, len);
if (NULL == pDst->datum.p) {
return TSDB_CODE_OUT_OF_MEMORY;

View File

@ -3269,7 +3269,7 @@ static int32_t jsonToDatum(const SJson* pJson, void* pObj) {
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_VARBINARY: {
pNode->datum.p = taosMemoryCalloc(1, pNode->node.resType.bytes + VARSTR_HEADER_SIZE + 1);
pNode->datum.p = taosMemoryCalloc(1, pNode->node.resType.bytes + 1);
if (NULL == pNode->datum.p) {
code = TSDB_CODE_OUT_OF_MEMORY;
break;

View File

@ -928,7 +928,7 @@ static int32_t msgToDatum(STlv* pTlv, void* pObj) {
code = TSDB_CODE_FAILED;
break;
}
pNode->datum.p = taosMemoryCalloc(1, pNode->node.resType.bytes + VARSTR_HEADER_SIZE + 1);
pNode->datum.p = taosMemoryCalloc(1, pNode->node.resType.bytes + 1);
if (NULL == pNode->datum.p) {
code = TSDB_CODE_OUT_OF_MEMORY;
break;

View File

@ -2101,9 +2101,9 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal) {
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_VARBINARY:
pVal->pz = taosMemoryMalloc(pVal->nLen + VARSTR_HEADER_SIZE + 1);
memcpy(pVal->pz, pNode->datum.p, pVal->nLen + VARSTR_HEADER_SIZE);
pVal->pz[pVal->nLen + VARSTR_HEADER_SIZE] = 0;
pVal->pz = taosMemoryMalloc(pVal->nLen + 1);
memcpy(pVal->pz, pNode->datum.p, pVal->nLen);
pVal->pz[pVal->nLen] = 0;
break;
case TSDB_DATA_TYPE_JSON:
pVal->nLen = getJsonValueLen(pNode->datum.p);

View File

@ -148,6 +148,7 @@ typedef struct {
int8_t epsetRetryCnt;
int32_t retryCode;
void* task;
int hThrdIdx;
} STransConnCtx;

View File

@ -68,7 +68,7 @@ typedef struct {
int8_t connLimitLock; // 0: no lock. 1. lock
int8_t supportBatch; // 0: no batch, 1: support batch
int32_t batchSize;
int32_t timeToGetConn;
int index;
void* parent;
void* tcphandle; // returned handle from TCP initialization

View File

@ -66,6 +66,10 @@ void* rpcOpen(const SRpcInit* pInit) {
pRpc->destroyFp = pInit->dfp;
pRpc->failFastFp = pInit->ffp;
pRpc->connLimitNum = pInit->connLimitNum;
if (pRpc->connLimitNum == 0) {
pRpc->connLimitNum = 20;
}
pRpc->connLimitLock = pInit->connLimitLock;
pRpc->supportBatch = pInit->supportBatch;
pRpc->batchSize = pInit->batchSize;
@ -90,7 +94,10 @@ void* rpcOpen(const SRpcInit* pInit) {
if (pInit->user) {
tstrncpy(pRpc->user, pInit->user, sizeof(pRpc->user));
}
pRpc->timeToGetConn = pInit->timeToGetConn;
if (pRpc->timeToGetConn == 0) {
pRpc->timeToGetConn = 10 * 1000;
}
pRpc->tcphandle =
(*taosInitHandle[pRpc->connType])(ip, pInit->localPort, pRpc->label, pRpc->numOfThreads, NULL, pRpc);

View File

@ -15,9 +15,15 @@
#include "transComm.h"
#include "tutil.h"
typedef struct {
int32_t numOfConn;
queue msgQ;
} SMsgList;
typedef struct SConnList {
queue conns;
int32_t size;
SMsgList* list;
} SConnList;
typedef struct {
@ -100,6 +106,7 @@ typedef struct SCliThrd {
TdThreadMutex msgMtx;
SDelayQueue* delayQueue;
SDelayQueue* timeoutQueue;
SDelayQueue* waitConnQueue;
uint64_t nextTimeout; // next timeout
void* pTransInst; //
@ -109,7 +116,6 @@ typedef struct SCliThrd {
SCvtAddr cvtAddr;
SHashObj* failFastCache;
SHashObj* connLimitCache;
SHashObj* batchCache;
SCliMsg* stopMsg;
@ -135,8 +141,8 @@ typedef struct {
// conn pool
// add expire timeout and capacity limit
static void* createConnPool(int size);
static void* destroyConnPool(void* pool);
static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port);
static void* destroyConnPool(SCliThrd* thread);
static SCliConn* getConnFromPool(SCliThrd* thread, char* key, bool* exceed);
static void addConnToPool(void* pool, SCliConn* conn);
static void doCloseIdleConn(void* param);
@ -176,7 +182,8 @@ static void cliSend(SCliConn* pConn);
static void cliSendBatch(SCliConn* pConn);
static void cliDestroyConnMsgs(SCliConn* conn, bool destroy);
static int32_t cliPreCheckSessionLimit(SCliThrd* pThrd, char* ip, uint16_t port);
static void doFreeTimeoutMsg(void* param);
static int32_t cliPreCheckSessionLimitForMsg(SCliThrd* pThrd, char* addr, SCliMsg** pMsg);
// cli util func
static FORCE_INLINE bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx);
@ -194,6 +201,7 @@ static void cliHandleExcept(SCliConn* conn);
static void cliReleaseUnfinishedMsg(SCliConn* conn);
static void cliHandleFastFail(SCliConn* pConn, int status);
static void doNotifyApp(SCliMsg* pMsg, SCliThrd* pThrd);
// handle req from app
static void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd);
static void cliHandleQuit(SCliMsg* pMsg, SCliThrd* pThrd);
@ -543,7 +551,8 @@ void* createConnPool(int size) {
// thread local, no lock
return taosHashInit(size, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
}
void* destroyConnPool(void* pool) {
void* destroyConnPool(SCliThrd* pThrd) {
void* pool = pThrd->pool;
SConnList* connList = taosHashIterate((SHashObj*)pool, NULL);
while (connList != NULL) {
while (!QUEUE_IS_EMPTY(&connList->conns)) {
@ -551,34 +560,130 @@ void* destroyConnPool(void* pool) {
SCliConn* c = QUEUE_DATA(h, SCliConn, q);
cliDestroyConn(c, true);
}
SMsgList* msglist = connList->list;
while (!QUEUE_IS_EMPTY(&msglist->msgQ)) {
queue* h = QUEUE_HEAD(&msglist->msgQ);
QUEUE_REMOVE(h);
SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q);
transDQCancel(pThrd->waitConnQueue, pMsg->ctx->task);
pMsg->ctx->task = NULL;
doNotifyApp(pMsg, pThrd);
}
taosMemoryFree(msglist);
connList = taosHashIterate((SHashObj*)pool, connList);
}
taosHashCleanup(pool);
return NULL;
}
static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) {
char key[TSDB_FQDN_LEN + 64] = {0};
CONN_CONSTRUCT_HASH_KEY(key, ip, port);
static SCliConn* getConnFromPool(SCliThrd* pThrd, char* key, bool* exceed) {
void* pool = pThrd->pool;
SConnList* plist = taosHashGet((SHashObj*)pool, key, strlen(key));
STrans* pTranInst = pThrd->pTransInst;
if (plist == NULL) {
SConnList list = {0};
taosHashPut((SHashObj*)pool, key, strlen(key), (void*)&list, sizeof(list));
plist = taosHashGet(pool, key, strlen(key));
SMsgList* nList = taosMemoryCalloc(1, sizeof(SMsgList));
QUEUE_INIT(&nList->msgQ);
nList->numOfConn++;
QUEUE_INIT(&plist->conns);
plist->list = nList;
}
if (QUEUE_IS_EMPTY(&plist->conns)) {
if (plist->list->numOfConn >= pTranInst->connLimitNum) {
*exceed = true;
}
return NULL;
}
queue* h = QUEUE_TAIL(&plist->conns);
QUEUE_REMOVE(h);
plist->size -= 1;
SCliConn* conn = QUEUE_DATA(h, SCliConn, q);
conn->status = ConnNormal;
QUEUE_INIT(&conn->q);
if (conn->task != NULL) {
transDQCancel(((SCliThrd*)conn->hostThrd)->timeoutQueue, conn->task);
conn->task = NULL;
}
return conn;
}
static SCliConn* getConnFromPool2(SCliThrd* pThrd, char* key, SCliMsg** pMsg) {
void* pool = pThrd->pool;
STrans* pTransInst = pThrd->pTransInst;
SConnList* plist = taosHashGet((SHashObj*)pool, key, strlen(key));
if (plist == NULL) {
SConnList list = {0};
taosHashPut((SHashObj*)pool, key, strlen(key), (void*)&list, sizeof(list));
plist = taosHashGet((SHashObj*)pool, key, strlen(key));
if (plist == NULL) return NULL;
plist = taosHashGet(pool, key, strlen(key));
SMsgList* nList = taosMemoryCalloc(1, sizeof(SMsgList));
QUEUE_INIT(&nList->msgQ);
nList->numOfConn++;
QUEUE_INIT(&plist->conns);
plist->list = nList;
}
STraceId* trace = &(*pMsg)->msg.info.traceId;
// no avaliable conn in pool
if (QUEUE_IS_EMPTY(&plist->conns)) {
SMsgList* list = plist->list;
if ((list)->numOfConn >= pTransInst->connLimitNum) {
STraceId* trace = &(*pMsg)->msg.info.traceId;
STaskArg* arg = taosMemoryMalloc(sizeof(STaskArg));
arg->param1 = *pMsg;
arg->param2 = pThrd;
(*pMsg)->ctx->task = transDQSched(pThrd->waitConnQueue, doFreeTimeoutMsg, arg, pTransInst->timeToGetConn);
tGTrace("%s msg %s delay to send, wait for avaiable connect", pTransInst->label, TMSG_INFO((*pMsg)->msg.msgType));
QUEUE_PUSH(&(list)->msgQ, &(*pMsg)->q);
*pMsg = NULL;
} else {
// send msg in delay queue
if (!(QUEUE_IS_EMPTY(&(list)->msgQ))) {
STaskArg* arg = taosMemoryMalloc(sizeof(STaskArg));
arg->param1 = *pMsg;
arg->param2 = pThrd;
(*pMsg)->ctx->task = transDQSched(pThrd->waitConnQueue, doFreeTimeoutMsg, arg, pTransInst->timeToGetConn);
tGTrace("%s msg %s delay to send, wait for avaiable connect", pTransInst->label,
TMSG_INFO((*pMsg)->msg.msgType));
QUEUE_PUSH(&(list)->msgQ, &(*pMsg)->q);
queue* h = QUEUE_HEAD(&(list)->msgQ);
QUEUE_REMOVE(h);
SCliMsg* ans = QUEUE_DATA(h, SCliMsg, q);
*pMsg = ans;
trace = &(*pMsg)->msg.info.traceId;
tGTrace("%s msg %s pop from delay queue, start to send", pTransInst->label, TMSG_INFO((*pMsg)->msg.msgType));
transDQCancel(pThrd->waitConnQueue, ans->ctx->task);
}
list->numOfConn++;
}
return NULL;
}
queue* h = QUEUE_TAIL(&plist->conns);
plist->size -= 1;
queue* h = QUEUE_HEAD(&plist->conns);
QUEUE_REMOVE(h);
SCliConn* conn = QUEUE_DATA(h, SCliConn, q);
conn->status = ConnNormal;
QUEUE_REMOVE(&conn->q);
QUEUE_INIT(&conn->q);
if (conn->task != NULL) {
@ -606,18 +711,34 @@ static void addConnToPool(void* pool, SCliConn* conn) {
cliDestroyConnMsgs(conn, false);
conn->status = ConnInPool;
if (conn->list == NULL) {
tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap);
conn->list = taosHashGet((SHashObj*)pool, conn->ip, strlen(conn->ip));
} else {
tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap);
}
SConnList* pList = conn->list;
SMsgList* msgList = pList->list;
if (!QUEUE_IS_EMPTY(&msgList->msgQ)) {
queue* h = QUEUE_HEAD(&(msgList)->msgQ);
QUEUE_REMOVE(h);
SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q);
transDQCancel(thrd->waitConnQueue, pMsg->ctx->task);
pMsg->ctx->task = NULL;
transCtxMerge(&conn->ctx, &pMsg->ctx->appCtx);
transQueuePush(&conn->cliMsgs, pMsg);
conn->status = ConnNormal;
cliSend(conn);
return;
}
conn->status = ConnInPool;
QUEUE_PUSH(&conn->list->conns, &conn->q);
conn->list->size += 1;
if (conn->list->size >= 250) {
if (conn->list->size >= 20) {
STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg));
arg->param1 = conn;
arg->param2 = thrd;
@ -739,8 +860,20 @@ static SCliConn* cliCreateConn(SCliThrd* pThrd) {
static void cliDestroyConn(SCliConn* conn, bool clear) {
SCliThrd* pThrd = conn->hostThrd;
tTrace("%s conn %p remove from conn pool", CONN_GET_INST_LABEL(conn), conn);
QUEUE_REMOVE(&conn->q);
QUEUE_INIT(&conn->q);
if (conn->list != NULL) {
SConnList* connList = conn->list;
connList->list->numOfConn--;
connList->size--;
} else {
SConnList* connList = taosHashGet((SHashObj*)pThrd->pool, conn->ip, strlen(conn->ip));
connList->list->numOfConn--;
}
conn->list = NULL;
transReleaseExHandle(transGetRefMgt(), conn->refId);
transRemoveExHandle(transGetRefMgt(), conn->refId);
conn->refId = -1;
@ -775,9 +908,6 @@ static void cliDestroy(uv_handle_t* handle) {
conn->timer->data = NULL;
conn->timer = NULL;
}
int32_t* oVal = taosHashGet(pThrd->connLimitCache, conn->ip, strlen(conn->ip));
int32_t nVal = oVal == NULL ? 0 : (*oVal) - 1;
taosHashPut(pThrd->connLimitCache, conn->ip, strlen(conn->ip), &nVal, sizeof(nVal));
atomic_sub_fetch_32(&pThrd->connCount, 1);
@ -1010,11 +1140,15 @@ static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) {
STrans* pTransInst = pThrd->pTransInst;
SCliBatchList* pList = pBatch->pList;
SCliConn* conn = getConnFromPool(pThrd->pool, pList->ip, pList->port);
char key[TSDB_FQDN_LEN + 64] = {0};
CONN_CONSTRUCT_HASH_KEY(key, pList->ip, pList->port);
if (conn == NULL && 0 != cliPreCheckSessionLimit(pThrd, pList->ip, pList->port)) {
tError("%s failed to send batch msg, batch size:%d, msgLen: %d", pTransInst->label, pBatch->wLen,
pBatch->batchSize);
bool exceed = false;
SCliConn* conn = getConnFromPool(pThrd, key, &exceed);
if (conn == NULL && exceed) {
tError("%s failed to send batch msg, batch size:%d, msgLen: %d, conn limit:%d", pTransInst->label, pBatch->wLen,
pBatch->batchSize, pTransInst->connLimitNum);
cliDestroyBatch(pBatch);
return;
}
@ -1174,10 +1308,6 @@ void cliConnCb(uv_connect_t* req, int status) {
return;
}
int32_t* oVal = taosHashGet(pThrd->connLimitCache, pConn->ip, strlen(pConn->ip));
int32_t nVal = oVal == NULL ? 0 : (*oVal) + 1;
taosHashPut(pThrd->connLimitCache, pConn->ip, strlen(pConn->ip), &nVal, sizeof(nVal));
struct sockaddr peername, sockname;
int addrlen = sizeof(peername);
uv_tcp_getpeername((uv_tcp_t*)pConn->stream, &peername, &addrlen);
@ -1195,6 +1325,29 @@ void cliConnCb(uv_connect_t* req, int status) {
}
}
static void doNotifyApp(SCliMsg* pMsg, SCliThrd* pThrd) {
STransConnCtx* pCtx = pMsg->ctx;
STrans* pTransInst = pThrd->pTransInst;
STransMsg transMsg = {0};
transMsg.contLen = 0;
transMsg.pCont = NULL;
transMsg.code = TSDB_CODE_RPC_MAX_SESSIONS;
transMsg.msgType = pMsg->msg.msgType + 1;
transMsg.info.ahandle = pMsg->ctx->ahandle;
transMsg.info.traceId = pMsg->msg.info.traceId;
transMsg.info.hasEpSet = false;
if (pCtx->pSem != NULL) {
if (pCtx->pRsp == NULL) {
} else {
memcpy((char*)pCtx->pRsp, (char*)&transMsg, sizeof(transMsg));
}
} else {
pTransInst->cfp(pTransInst->parent, &transMsg, NULL);
}
destroyCmsg(pMsg);
}
static void cliHandleQuit(SCliMsg* pMsg, SCliThrd* pThrd) {
if (!transAsyncPoolIsEmpty(pThrd->asyncPool)) {
pThrd->stopMsg = pMsg;
@ -1204,7 +1357,8 @@ static void cliHandleQuit(SCliMsg* pMsg, SCliThrd* pThrd) {
pThrd->quit = true;
tDebug("cli work thread %p start to quit", pThrd);
destroyCmsg(pMsg);
destroyConnPool(pThrd->pool);
destroyConnPool(pThrd);
uv_walk(pThrd->loop, cliWalkCb, NULL);
}
static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) {
@ -1237,11 +1391,11 @@ static void cliHandleUpdate(SCliMsg* pMsg, SCliThrd* pThrd) {
destroyCmsg(pMsg);
}
SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrd* pThrd, bool* ignore) {
STransConnCtx* pCtx = pMsg->ctx;
SCliConn* cliGetConn(SCliMsg** pMsg, SCliThrd* pThrd, bool* ignore, char* addr) {
STransConnCtx* pCtx = (*pMsg)->ctx;
SCliConn* conn = NULL;
int64_t refId = (int64_t)(pMsg->msg.info.handle);
int64_t refId = (int64_t)((*pMsg)->msg.info.handle);
if (refId != 0) {
SExHandle* exh = transAcquireExHandle(transGetRefMgt(), refId);
if (exh == NULL) {
@ -1251,7 +1405,7 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrd* pThrd, bool* ignore) {
} else {
conn = exh->handle;
if (conn == NULL) {
conn = getConnFromPool(pThrd->pool, EPSET_GET_INUSE_IP(&pCtx->epSet), EPSET_GET_INUSE_PORT(&pCtx->epSet));
conn = getConnFromPool2(pThrd, addr, pMsg);
if (conn != NULL) specifyConnRef(conn, true, refId);
}
transReleaseExHandle(transGetRefMgt(), refId);
@ -1259,7 +1413,7 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrd* pThrd, bool* ignore) {
return conn;
};
conn = getConnFromPool(pThrd->pool, EPSET_GET_INUSE_IP(&pCtx->epSet), EPSET_GET_INUSE_PORT(&pCtx->epSet));
conn = getConnFromPool2(pThrd, addr, pMsg);
if (conn != NULL) {
tTrace("%s conn %p get from conn pool:%p", CONN_GET_INST_LABEL(conn), conn, pThrd->pool);
} else {
@ -1317,57 +1471,34 @@ static FORCE_INLINE void cliUpdateFqdnCache(SHashObj* cache, char* fqdn) {
return;
}
static int32_t cliPreCheckSessionLimit(SCliThrd* pThrd, char* ip, uint16_t port) {
static void doFreeTimeoutMsg(void* param) {
STaskArg* arg = param;
SCliMsg* pMsg = arg->param1;
SCliThrd* pThrd = arg->param2;
STrans* pTransInst = pThrd->pTransInst;
// STransConnCtx* pCtx = pMsg->ctx;
// char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet);
// int32_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet);
char key[TSDB_FQDN_LEN + 64] = {0};
CONN_CONSTRUCT_HASH_KEY(key, ip, port);
int32_t* val = taosHashGet(pThrd->connLimitCache, key, strlen(key));
if (val == NULL) return 0;
if (*val >= pTransInst->connLimitNum) {
return -1;
}
return 0;
QUEUE_REMOVE(&pMsg->q);
STraceId* trace = &pMsg->msg.info.traceId;
tGTrace("%s msg %s cannot get available conn after timeout", pTransInst->label, TMSG_INFO(pMsg->msg.msgType));
doNotifyApp(pMsg, pThrd);
taosMemoryFree(arg);
}
void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
STrans* pTransInst = pThrd->pTransInst;
STransConnCtx* pCtx = pMsg->ctx;
cliMayCvtFqdnToIp(&pCtx->epSet, &pThrd->cvtAddr);
STraceId* trace = &pMsg->msg.info.traceId;
char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet);
uint16_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet);
if (!EPSET_IS_VALID(&pCtx->epSet)) {
tGError("%s, msg %s sent with invalid epset", pTransInst->label, TMSG_INFO(pMsg->msg.msgType));
cliMayCvtFqdnToIp(&pMsg->ctx->epSet, &pThrd->cvtAddr);
if (!EPSET_IS_VALID(&pMsg->ctx->epSet)) {
destroyCmsg(pMsg);
return;
}
if (REQUEST_NO_RESP(&pMsg->msg) && (pTransInst->failFastFp != NULL && pTransInst->failFastFp(pMsg->msg.msgType))) {
char key[TSDB_FQDN_LEN + 64] = {0};
CONN_CONSTRUCT_HASH_KEY(key, ip, port);
SFailFastItem* item = taosHashGet(pThrd->failFastCache, key, strlen(key));
if (item != NULL) {
int32_t elapse = (int32_t)(taosGetTimestampMs() - item->timestamp);
if (item->count >= pTransInst->failFastThreshold && (elapse >= 0 && elapse <= pTransInst->failFastInterval)) {
tGTrace("%s, msg %s cancel to send, reason: failed to connect %s:%d: count: %d, at %d", pTransInst->label,
TMSG_INFO(pMsg->msg.msgType), ip, port, item->count, elapse);
destroyCmsg(pMsg);
return;
}
}
}
char* fqdn = EPSET_GET_INUSE_IP(&pMsg->ctx->epSet);
uint16_t port = EPSET_GET_INUSE_PORT(&pMsg->ctx->epSet);
char addr[TSDB_FQDN_LEN + 64] = {0};
CONN_CONSTRUCT_HASH_KEY(addr, fqdn, port);
bool ignore = false;
SCliConn* conn = cliGetConn(pMsg, pThrd, &ignore);
SCliConn* conn = cliGetConn(&pMsg, pThrd, &ignore, addr);
if (ignore == true) {
// persist conn already release by server
STransMsg resp;
@ -1378,16 +1509,13 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
destroyCmsg(pMsg);
return;
}
if (conn == NULL && REQUEST_NO_RESP(&pMsg->msg) && 0 != cliPreCheckSessionLimit(pThrd, ip, port)) {
tGTrace("%s, msg %s cancel to send, reason: %s", pTransInst->label, TMSG_INFO(pMsg->msg.msgType),
tstrerror(TSDB_CODE_RPC_MAX_SESSIONS));
destroyCmsg(pMsg);
if (conn == NULL && pMsg == NULL) {
return;
}
STraceId* trace = &pMsg->msg.info.traceId;
if (conn != NULL) {
transCtxMerge(&conn->ctx, &pCtx->appCtx);
transCtxMerge(&conn->ctx, &pMsg->ctx->appCtx);
transQueuePush(&conn->cliMsgs, pMsg);
cliSend(conn);
} else {
@ -1396,15 +1524,10 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
int64_t refId = (int64_t)pMsg->msg.info.handle;
if (refId != 0) specifyConnRef(conn, true, refId);
transCtxMerge(&conn->ctx, &pCtx->appCtx);
transCtxMerge(&conn->ctx, &pMsg->ctx->appCtx);
transQueuePush(&conn->cliMsgs, pMsg);
char key[TSDB_FQDN_LEN + 64] = {0};
char* fqdn = EPSET_GET_INUSE_IP(&pCtx->epSet);
uint16_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet);
CONN_CONSTRUCT_HASH_KEY(key, fqdn, port);
conn->ip = taosStrdup(key);
conn->ip = taosStrdup(addr);
uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, fqdn);
if (ipaddr == 0xffffffff) {
@ -1834,14 +1957,14 @@ static SCliThrd* createThrdObj(void* trans) {
transDQCreate(pThrd->loop, &pThrd->timeoutQueue);
transDQCreate(pThrd->loop, &pThrd->waitConnQueue);
pThrd->nextTimeout = taosGetTimestampMs() + CONN_PERSIST_TIME(pTransInst->idleTime);
pThrd->pTransInst = trans;
pThrd->destroyAhandleFp = pTransInst->destroyFp;
pThrd->fqdn2ipCache = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
pThrd->failFastCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
pThrd->connLimitCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true,
pTransInst->connLimitLock == 0 ? HASH_NO_LOCK : HASH_ENTRY_LOCK);
pThrd->batchCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
@ -1861,6 +1984,7 @@ static void destroyThrdObj(SCliThrd* pThrd) {
transDQDestroy(pThrd->delayQueue, destroyCmsgAndAhandle);
transDQDestroy(pThrd->timeoutQueue, NULL);
transDQDestroy(pThrd->waitConnQueue, NULL);
tDebug("thread destroy %" PRId64, pThrd->pid);
for (int i = 0; i < taosArrayGetSize(pThrd->timerList); i++) {
@ -1872,7 +1996,6 @@ static void destroyThrdObj(SCliThrd* pThrd) {
taosMemoryFree(pThrd->loop);
taosHashCleanup(pThrd->fqdn2ipCache);
taosHashCleanup(pThrd->failFastCache);
taosHashCleanup(pThrd->connLimitCache);
void** pIter = taosHashIterate(pThrd->batchCache, NULL);
while (pIter != NULL) {

View File

@ -50,11 +50,6 @@ uint32_t taosDJB2Hash(const char *key, uint32_t len) {
return hash;
}
uint32_t xxHash(const char *key, uint32_t len) {
int32_t seed = 0xcc9e2d51;
return XXH32(key, len, seed);
}
uint32_t MurmurHash3_32(const char *key, uint32_t len) {
const uint8_t *data = (const uint8_t *)key;
const int32_t nblocks = len >> 2u;

View File

@ -945,7 +945,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 3
#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsbsQuery.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 3

View File

@ -0,0 +1,520 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import socket
import subprocess
import random
import string
import random
from util.log import *
from util.sql import *
from util.cases import *
from util.common import *
from util.sqlset import *
from util.dnodes import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
#
# -------------- util --------------------------
#
def pathSize(path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for i in filenames:
#use join to concatenate all the components of path
f = os.path.join(dirpath, i)
#use getsize to generate size in bytes and add it to the total size
total_size += os.path.getsize(f)
#print(dirpath)
print(" %s %.02f MB"%(path, total_size/1024/1024))
return total_size
'''
total = 0
with os.scandir(path) as it:
for entry in it:
if entry.is_file():
total += entry.stat().st_size
elif entry.is_dir():
total += pathSize(entry.path)
print(" %s %.02f MB"%(path, total/1024/1024))
return total
'''
#
# --------------- cluster ------------------------
#
class MyDnodes(TDDnodes):
def __init__(self ,dnodes_lists):
super(MyDnodes,self).__init__()
self.dnodes = dnodes_lists # dnode must be TDDnode instance
self.simDeployed = False
class TagCluster:
noConn = True
def init(self, conn, logSql, replicaVar=1):
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
self.depoly_cluster(5)
self.master_dnode = self.TDDnodes.dnodes[0]
self.host=self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor())
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files or "taosd.exe" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def depoly_cluster(self ,dnodes_nums):
testCluster = False
valgrind = 0
hostname = socket.gethostname()
dnodes = []
start_port = 6030
for num in range(1, dnodes_nums+1):
dnode = TDDnode(num)
dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}")
dnode.addExtraCfg("fqdn", f"{hostname}")
dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}")
dnode.addExtraCfg("monitorFqdn", hostname)
dnode.addExtraCfg("monitorPort", 7043)
dnodes.append(dnode)
self.TDDnodes = MyDnodes(dnodes)
self.TDDnodes.init("")
self.TDDnodes.setTestCluster(testCluster)
self.TDDnodes.setValgrind(valgrind)
self.TDDnodes.setAsan(tdDnodes.getAsan())
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index)
# create cluster
dnode_first_host = ""
sql = ""
for dnode in self.TDDnodes.dnodes[1:]:
# print(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
if dnode_first_host == "":
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
sql += f"create dnode '{dnode_id}'; "
cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s "
cmd += f'"{sql}"'
print(cmd)
os.system(cmd)
time.sleep(2)
tdLog.info(" create cluster done! ")
def getConnection(self, dnode):
host = dnode.cfgDict["fqdn"]
port = dnode.cfgDict["serverPort"]
config_dir = dnode.cfgDir
return taos.connect(host=host, port=int(port), config=config_dir)
def run(self):
tdLog.info(" create cluster ok.")
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
class PerfDB:
def __init__(self):
self.sqls = []
self.spends= []
# execute
def execute(self, sql):
print(f" perfdb execute {sql}")
stime = time.time()
ret = tdSql.execute(sql, 1)
spend = time.time() - stime
self.sqls.append(sql)
self.spends.append(spend)
return ret
# query
def query(self, sql):
print(f" perfdb query {sql}")
start = time.time()
ret = tdSql.query(sql, None, 1)
spend = time.time() - start
self.sqls.append(sql)
self.spends.append(spend)
return ret
#
# ----------------- TDTestCase ------------------
#
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
tdLog.debug("start to execute %s" % __file__)
self.dbs = [PerfDB(), PerfDB()]
self.cur = 0
self.tagCluster = TagCluster()
self.tagCluster.init(conn, logSql, replicaVar)
self.lenBinary = 64
self.lenNchar = 32
# column
self.column_dict = {
'ts': 'timestamp',
'col1': 'tinyint',
'col2': 'smallint',
'col3': 'int',
'col4': 'bigint',
'col5': 'tinyint unsigned',
'col6': 'smallint unsigned',
'col7': 'int unsigned',
'col8': 'bigint unsigned',
'col9': 'float',
'col10': 'double',
'col11': 'bool',
'col12': f'varchar({self.lenBinary})',
'col13': f'nchar({self.lenNchar})'
}
# tag
self.tag_dict = {
't1': 'tinyint',
't2': 'smallint',
't3': 'int',
't4': 'bigint',
't5': 'tinyint unsigned',
't6': 'smallint unsigned',
't7': 'int unsigned',
't8': 'bigint unsigned',
't9': 'float',
't10': 'double',
't11': 'bool',
't12': f'varchar({self.lenBinary})',
't13': f'nchar({self.lenNchar})',
't14': 'timestamp'
}
# generate specail wide random string
def random_string(self, count):
letters = string.ascii_letters
return ''.join(random.choice(letters) for i in range(count))
# execute
def execute(self, sql):
obj = self.dbs[self.cur]
return obj.execute(sql)
# query
def query(self, sql):
return self.dbs[self.cur].query(sql)
def set_stb_sql(self,stbname,column_dict,tag_dict):
column_sql = ''
tag_sql = ''
for k,v in column_dict.items():
column_sql += f"{k} {v}, "
for k,v in tag_dict.items():
tag_sql += f"{k} {v}, "
create_stb_sql = f'create stable {stbname} ({column_sql[:-2]}) tags ({tag_sql[:-2]})'
return create_stb_sql
# create datbase
def create_database(self, dbname, vgroups, replica):
sql = f'create database {dbname} vgroups {vgroups} replica {replica}'
tdSql.execute(sql)
#tdSql.execute(sql)
tdSql.execute(f'use {dbname}')
# create stable and child tables
def create_table(self, stbname, tbname, count):
# create stable
create_table_sql = self.set_stb_sql(stbname, self.column_dict, self.tag_dict)
tdSql.execute(create_table_sql)
# create child table
tdLog.info(f" start create {count} child tables.")
for i in range(count):
ti = i % 128
binTxt = self.random_string(self.lenBinary)
tags = f'{ti},{ti},{i},{i},{ti},{ti},{i},{i},{i}.000{i},{i}.000{i},true,"{binTxt}","nch{i}",now'
sql = f'create table {tbname}{i} using {stbname} tags({tags})'
tdSql.execute(sql)
if i > 0 and i % 1000 == 0:
tdLog.info(f" child table count = {i}")
tdLog.info(f" end create {count} child tables.")
# create stable and child tables
def create_tagidx(self, stbname):
cnt = -1
for key in self.tag_dict.keys():
# first tag have default index, so skip
if cnt == -1:
cnt = 0
continue;
sql = f'create index idx_{key} on {stbname} ({key})'
tdLog.info(f" sql={sql}")
tdSql.execute(sql)
cnt += 1
tdLog.info(f' create {cnt} tag indexs ok.')
# insert to child table d1 data
def insert_data(self, tbname):
# d1 insert 3 rows
for i in range(3):
sql = f'insert into {tbname}1(ts,col1) values(now+{i}s,{i});'
tdSql.execute(sql)
# d20 insert 4
for i in range(4):
sql = f'insert into {tbname}20(ts,col1) values(now+{i}s,{i});'
tdSql.execute(sql)
# check show indexs
def show_tagidx(self, dbname, stbname):
sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="{dbname}"'
tdSql.query(sql)
rows = len(self.tag_dict.keys())-1
tdSql.checkRows(rows)
for i in range(rows):
col_name = tdSql.getData(i, 1)
idx_name = f'idx_{col_name}'
tdSql.checkData(i, 0, idx_name)
tdLog.info(f' show {rows} tag indexs ok.')
# query with tag idx
def query_tagidx(self, stbname):
sql = f'select * from meters where t2=1'
self.query(sql)
tdSql.checkRows(3)
sql = f'select * from meters where t2<10'
self.query(sql)
tdSql.checkRows(3)
sql = f'select * from meters where t2>10'
self.query(sql)
tdSql.checkRows(4)
sql = f'select * from meters where t3<30'
self.query(sql)
tdSql.checkRows(7)
sql = f'select * from meters where t12="11"'
tdSql.query(sql)
tdSql.checkRows(0)
sql = f'select * from meters where (t4 < 10 or t5 = 20) and t6= 30'
self.query(sql)
tdSql.checkRows(0)
sql = f'select * from meters where (t7 < 20 and t8 = 20) or t4 = 20'
self.query(sql)
tdSql.checkRows(4)
sql = f'select * from meters where t12 like "%ab%" '
self.query(sql)
tdSql.checkRows(0)
sql = f'select * from meters where t13 = "d20" '
self.query(sql)
tdSql.checkRows(0)
sql = f'select * from meters where t13 = "nch20" '
self.query(sql)
tdSql.checkRows(4)
sql = f'select * from meters where tbname = "d20" '
self.query(sql)
tdSql.checkRows(4)
# drop child table
def drop_tables(self, tbname, count):
# table d1 and d20 have verify data , so can not drop
start = random.randint(21, count/2)
end = random.randint(count/2 + 1, count - 1)
for i in range(start, end):
sql = f'drop table {tbname}{i}'
tdSql.execute(sql)
cnt = end - start + 1
tdLog.info(f' drop table from {start} to {end} count={cnt}')
# drop tag index
def drop_tagidx(self, dbname, stbname):
# drop index
cnt = -1
for key in self.tag_dict.keys():
# first tag have default index, so skip
if cnt == -1:
cnt = 0
continue;
sql = f'drop index idx_{key}'
tdSql.execute(sql)
cnt += 1
# check idx result is 0
sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="{dbname}"'
tdSql.query(sql)
tdSql.checkRows(0)
tdLog.info(f' drop {cnt} tag indexs ok.')
# show performance
def show_performance(self, count) :
db = self.dbs[0]
db1 = self.dbs[1]
cnt = len(db.sqls)
cnt1 = len(db1.sqls)
if cnt != len(db1.sqls):
tdLog.info(f" datebase sql count not equal. cnt={cnt} cnt1={cnt1}\n")
return False
tdLog.info(f" database sql cnt ={cnt}")
print(f" ----------------- performance (child tables = {count})--------------------")
print(" No time(index) time(no-index) diff(col3-col2) rate(col2/col3) sql")
for i in range(cnt):
key = db.sqls[i]
value = db.spends[i]
key1 = db1.sqls[i]
value1 = db1.spends[i]
diff = value1 - value
rate = value/value1*100
print(" %d %.3fs %.3fs %.3fs %d%% %s"%(i+1, value, value1, diff, rate, key))
print(" --------------------- end ------------------------")
return True
def show_diskspace(self):
#calc
selfPath = os.path.dirname(os.path.realpath(__file__))
projPath = ""
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
# total
vnode2_size = pathSize(projPath + "sim/dnode2/data/vnode/vnode2/")
vnode3_size = pathSize(projPath + "sim/dnode3/data/vnode/vnode3/")
vnode4_size = pathSize(projPath + "sim/dnode4/data/vnode/vnode4/")
vnode5_size = pathSize(projPath + "sim/dnode5/data/vnode/vnode5/")
# show
print(" ----------------- disk space --------------------")
idx_size = vnode2_size + vnode3_size
noidx_size = vnode4_size + vnode5_size
print(" index = %.02f M"%(idx_size/1024/1024))
print(" no-index = %.02f M"%(noidx_size/1024/1024))
print(" index/no-index = %.2f multiple"%(idx_size/noidx_size))
print(" -------------------- end ------------------------")
# main
def testdb(self, dbname, stable, tbname, count, createidx):
# cur
if createidx:
self.cur = 0
else :
self.cur = 1
# do
self.create_database(dbname, 2, 1)
self.create_table(stable, tbname, count)
if(createidx):
self.create_tagidx(stable)
self.insert_data(tbname)
if(createidx):
self.show_tagidx(dbname,stable)
self.query_tagidx(stable)
#self.drop_tables(tbname, count)
#if(createidx):
# self.drop_tagidx(dbname, stable)
# query after delete , expect no crash
#self.query_tagidx(stable)
tdSql.execute(f'flush database {dbname}')
# run
def run(self):
self.tagCluster.run()
# var
dbname = "tagindex"
dbname1 = dbname + "1"
stable = "meters"
tbname = "d"
count = 10000
# test db
tdLog.info(f" ------------- {dbname} ----------")
self.testdb(dbname, stable, tbname, count, True)
tdLog.info(f" ------------- {dbname1} ----------")
self.testdb(dbname1, stable, tbname, count, False)
# show test result
self.show_performance(count)
self.tagCluster.TDDnodes.stopAll()
sec = 10
print(f" sleep {sec}s wait taosd stopping ...")
time.sleep(sec)
self.show_diskspace()
def stop(self):
self.tagCluster.stop()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -107,11 +107,11 @@ class TDTestCase:
def insert_data(self, tbname):
# d1 insert 3 rows
for i in range(3):
sql = f'insert into {tbname}1(ts,col1) values(now,{i});'
sql = f'insert into {tbname}1(ts,col1) values(now+{i}s,{i});'
tdSql.execute(sql)
# d20 insert 4
for i in range(4):
sql = f'insert into {tbname}20(ts,col1) values(now,{i});'
sql = f'insert into {tbname}20(ts,col1) values(now+{i}s,{i});'
tdSql.execute(sql)
# check show indexs
@ -150,6 +150,22 @@ class TDTestCase:
tdSql.query(sql)
tdSql.checkRows(0)
sql = f'select t12 ,t13,tbname from meters where t13="nch20"'
tdSql.query(sql)
tdSql.checkRows(4)
sql = f'select * from meters where t12 like "%ab%" '
tdSql.query(sql)
tdSql.checkRows(0)
sql = f'select * from meters where t13 = "d20" '
tdSql.query(sql)
tdSql.checkRows(0)
sql = f'select * from meters where tbname = "d20" '
tdSql.query(sql)
tdSql.checkRows(4)
sql = f'select * from meters where (t4 < 10 or t5 = 20) and t6= 30'
tdSql.query(sql)
tdSql.checkRows(0)
@ -188,6 +204,22 @@ class TDTestCase:
tdSql.checkRows(0)
tdLog.info(f' drop {cnt} tag indexs ok.')
# create long name idx
def longname_idx(self, stbname):
long_name = "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjjkkkkkkkkkkllllllllllmmmmmmmmmm"
key = "t3"
sql = f'create index {long_name} on {stbname} ({key})'
tdSql.error(sql)
long_name = "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff"
key = "t3"
sql = f'create index {long_name} on {stbname} ({key})'
tdLog.info(f"{sql}")
tdSql.execute(sql)
sql = f'drop index {long_name}'
tdLog.info(f"{sql}")
tdSql.execute(sql)
# run
def run(self):
# var
@ -204,6 +236,7 @@ class TDTestCase:
self.drop_tagidx(stable)
# query after delete , expect no crash
self.query_tagidx(stable)
self.longname_idx(stable)
def stop(self):

View File

@ -124,6 +124,18 @@ class TDTestCase:
sql2 = "select count(t1) from (select * from %s.meters)" %dbname
self.sql_base_check(sql1,sql2)
# TD-22520
tdSql.query("select tbname, ts from %s.meters where ts < '2017-07-14 10:40:00' order by ts asc limit 150;" %dbname)
tdSql.checkRows(150)
tdSql.query("select tbname, ts from %s.meters where ts < '2017-07-14 10:40:00' order by ts asc limit 300;" %dbname)
tdSql.checkRows(300)
tdSql.query("select tbname, ts from %s.meters where ts < '2017-07-14 10:40:00' order by ts desc limit 150;" %dbname)
tdSql.checkRows(150)
tdSql.query("select tbname, ts from %s.meters where ts < '2017-07-14 10:40:00' order by ts desc limit 300;" %dbname)
tdSql.checkRows(300)
def sql_base_check(self,sql1,sql2):
tdSql.query(sql1)

View File

@ -86,8 +86,8 @@ SWords shellCommands[] = {
"<anyword> <db_options> <anyword> <db_options> <anyword> <db_options> <anyword> <db_options> <anyword> "
"<db_options> <anyword> <db_options> <anyword> ;",
0, 0, NULL},
{"create dnode ", 0, 0, NULL},
{"create index ", 0, 0, NULL},
{"create dnode <anyword>", 0, 0, NULL},
{"create index <anyword> on <stb_name> ()", 0, 0, NULL},
{"create mnode on dnode <dnode_id> ;", 0, 0, NULL},
{"create qnode on dnode <dnode_id> ;", 0, 0, NULL},
{"create stream <anyword> into <anyword> as select", 0, 0, NULL}, // 26 append sub sql
@ -98,6 +98,7 @@ SWords shellCommands[] = {
{"describe <all_table>", 0, 0, NULL},
{"delete from <all_table> where ", 0, 0, NULL},
{"drop database <db_name>", 0, 0, NULL},
{"drop index <anyword>", 0, 0, NULL},
{"drop table <all_table>", 0, 0, NULL},
{"drop dnode <dnode_id>", 0, 0, NULL},
{"drop mnode on dnode <dnode_id> ;", 0, 0, NULL},
@ -384,7 +385,7 @@ void showHelp() {
create table <tb_name> using <stb_name> tags ...\n\
create database <db_name> <db_options> ...\n\
create dnode \"fqdn:port\" ...\n\
create index ...\n\
create index <index_name> on <stb_name> (tag_column_name);\n\
create mnode on dnode <dnode_id> ;\n\
create qnode on dnode <dnode_id> ;\n\
create stream <stream_name> into <stb_name> as select ...\n\
@ -404,6 +405,7 @@ void showHelp() {
drop consumer group ... \n\
drop topic <topic_name> ;\n\
drop stream <stream_name> ;\n\
drop index <index_name>;\n\
----- E ----- \n\
explain select clause ...\n\
----- F ----- \n\
@ -534,7 +536,7 @@ SWord* addWord(const char* p, int32_t len, bool pattern) {
word->len = len;
// check format
if (pattern) {
if (pattern && len > 0) {
word->type = wordType(p, len);
} else {
word->type = WT_TEXT;
@ -1724,6 +1726,9 @@ bool matchEnd(TAOS* con, SShellCmd* cmd) {
if (strlen(last) == 0) {
goto _return;
}
if (strcmp(last, " ") == 0) {
goto _return;
}
// match database
if (elast == NULL) {

View File

@ -21,7 +21,7 @@ static void shellWorkAsClient() {
SRpcInit rpcInit = {0};
SEpSet epSet = {.inUse = 0, .numOfEps = 1};
SRpcMsg rpcRsp = {0};
void * clientRpc = NULL;
void *clientRpc = NULL;
char pass[TSDB_PASSWORD_LEN + 1] = {0};
taosEncryptPass_c((uint8_t *)("_pwd"), strlen("_pwd"), pass);
@ -31,6 +31,7 @@ static void shellWorkAsClient() {
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.user = "_dnd";
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
clientRpc = rpcOpen(&rpcInit);
if (clientRpc == NULL) {

View File

@ -215,8 +215,8 @@ int smlProcess_json3_Test() {
taos_free_result(pRes);
const char *sql[] = {
"[{\"metric\":\"sys.cpu.nice3\",\"timestamp\":0,\"value\":\"18\",\"tags\":{\"host\":\"web01\",\"id\":\"t1\","
"\"dc\":\"lga\"}}]"};
"[{\"metric\":\"sys.cpu.nice3\",\"timestamp\":0,\"value\":\"18\",\"tags\":{\"host\":\"web01\",\"id\":\"t1\",\"dc\":\"lga\"}}]"
};
char *sql1[1] = {0};
for (int i = 0; i < 1; i++) {
sql1[i] = taosMemoryCalloc(1, 1024);

View File

@ -856,7 +856,9 @@ void loop_consume(SThreadInfo* pInfo) {
taosFprintfFile(g_fp, "==== consumerId: %d, consumeMsgCnt: %" PRId64 ", consumeRowCnt: %" PRId64 "\n",
pInfo->consumerId, pInfo->consumeMsgCnt, pInfo->consumeRowCnt);
taosFsyncFile(pInfo->pConsumeRowsFile);
if(taosFsyncFile(pInfo->pConsumeRowsFile) < 0){
printf("taosFsyncFile error:%s", strerror(errno));
}
taosCloseFile(&pInfo->pConsumeRowsFile);
}