Merge branch '3.0' into feat/TD-30813-2
This commit is contained in:
commit
2ac23f72e8
|
@ -90,7 +90,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.3.0</version>
|
<version>3.3.2</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,6 @@ table_options:
|
||||||
table_option: {
|
table_option: {
|
||||||
COMMENT 'string_value'
|
COMMENT 'string_value'
|
||||||
| SMA(col_name [, col_name] ...)
|
| SMA(col_name [, col_name] ...)
|
||||||
| TTL value
|
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
|
@ -1384,7 +1384,7 @@ SELECT SERVER_VERSION();
|
||||||
SELECT SERVER_STATUS();
|
SELECT SERVER_STATUS();
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The server status.
|
**Description**: The server status. When checking the status of a cluster, the recommended way is to use `SHOW CLUSTER ALIVE;`. Unlike `SELECT SERVER_STATUS();`, it does not return an error when some nodes in the cluster are unavailable; instead, it returns different status codes. Plese check [SHOW CLUSTER ALIVE](https://docs.tdengine.com/reference/taos-sql/show/#show-cluster-alive) for details.
|
||||||
|
|
||||||
### CURRENT_USER
|
### CURRENT_USER
|
||||||
|
|
||||||
|
|
|
@ -42,6 +42,7 @@ REST connection supports all platforms that can run Java.
|
||||||
|
|
||||||
| taos-jdbcdriver version | major changes | TDengine version |
|
| taos-jdbcdriver version | major changes | TDengine version |
|
||||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
||||||
|
| 3.3.2 | 1. Optimized websocket prepareStatement performance; 2. Improved mybatis support| - |
|
||||||
| 3.3.0 | 1. Optimized data transmission performance under Websocket connection; 2. SSL validation skipping is supported but disabled by default| 3.3.2.0 or later |
|
| 3.3.0 | 1. Optimized data transmission performance under Websocket connection; 2. SSL validation skipping is supported but disabled by default| 3.3.2.0 or later |
|
||||||
| 3.2.11 | Fixed the result set closing bug when using a native connection.| - |
|
| 3.2.11 | Fixed the result set closing bug when using a native connection.| - |
|
||||||
| 3.2.10 | 1. Automatic compression/decompression for data transmission, disabled by default; 2.Automatic reconnection for websocket with configurable parameter, disabled by default; 3. A new method for schemaless writing is added in the connection class; 4. Optimized performance for data fetching on native connection; 5. Fixing for some known issues; 6. The list of supported functions can be returned by the API for retrieving metadata| - |
|
| 3.2.10 | 1. Automatic compression/decompression for data transmission, disabled by default; 2.Automatic reconnection for websocket with configurable parameter, disabled by default; 3. A new method for schemaless writing is added in the connection class; 4. Optimized performance for data fetching on native connection; 5. Fixing for some known issues; 6. The list of supported functions can be returned by the API for retrieving metadata| - |
|
||||||
|
@ -179,7 +180,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.3.0</version>
|
<version>3.3.2</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.3.0</version>
|
<version>3.3.2</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.locationtech.jts</groupId>
|
<groupId>org.locationtech.jts</groupId>
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.3.0</version>
|
<version>3.3.2</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- druid -->
|
<!-- druid -->
|
||||||
<dependency>
|
<dependency>
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.3.0</version>
|
<version>3.3.2</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
|
|
|
@ -67,7 +67,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.3.0</version>
|
<version>3.3.2</version>
|
||||||
<!-- <scope>system</scope>-->
|
<!-- <scope>system</scope>-->
|
||||||
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
|
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.3.0</version>
|
<version>3.3.2</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- ANCHOR_END: dep-->
|
<!-- ANCHOR_END: dep-->
|
||||||
|
|
||||||
|
|
|
@ -89,7 +89,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.3.0</version>
|
<version>3.3.2</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,6 @@ table_options:
|
||||||
table_option: {
|
table_option: {
|
||||||
COMMENT 'string_value'
|
COMMENT 'string_value'
|
||||||
| SMA(col_name [, col_name] ...)
|
| SMA(col_name [, col_name] ...)
|
||||||
| TTL value
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -1374,7 +1374,7 @@ SELECT SERVER_VERSION();
|
||||||
SELECT SERVER_STATUS();
|
SELECT SERVER_STATUS();
|
||||||
```
|
```
|
||||||
|
|
||||||
**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。
|
**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。如果想要查询集群的状态,推荐使用 `SHOW CLUSTER ALIVE;`, 与 `SELECT SERVER_STATUS();` 不同,当集群中的部分节点不可用时,它不会返回错误,而是返回不同的状态码,详见:[SHOW CLUSTER ALIVE](https://docs.taosdata.com/reference/taos-sql/show/#show-cluster-alive)
|
||||||
|
|
||||||
### CURRENT_USER
|
### CURRENT_USER
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,7 @@ REST 连接支持所有能运行 Java 的平台。
|
||||||
|
|
||||||
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
||||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
||||||
|
| 3.3.2 | 1. 优化 Websocket 连接下的参数绑定性能;2. 优化了对 mybatis 的支持 | - |
|
||||||
| 3.3.0 | 1. 优化 Websocket 连接下的数据传输性能;2. 支持跳过 SSL 验证,默认关闭 | 3.3.2.0 及更高版本 |
|
| 3.3.0 | 1. 优化 Websocket 连接下的数据传输性能;2. 支持跳过 SSL 验证,默认关闭 | 3.3.2.0 及更高版本 |
|
||||||
| 3.2.11 | 解决了 Native 连接关闭结果集 bug | - |
|
| 3.2.11 | 解决了 Native 连接关闭结果集 bug | - |
|
||||||
| 3.2.10 | 1. REST/WebSocket 连接支持传输中的数据压缩;2. Websocket 自动重连机制,默认关闭;3. Connection 类提供无模式写入的方法;4. 优化了原生连接的数据拉取性能;5. 修复了一些已知问题;6.元数据获取函数可以返回支持的函数列表。 | - |
|
| 3.2.10 | 1. REST/WebSocket 连接支持传输中的数据压缩;2. Websocket 自动重连机制,默认关闭;3. Connection 类提供无模式写入的方法;4. 优化了原生连接的数据拉取性能;5. 修复了一些已知问题;6.元数据获取函数可以返回支持的函数列表。 | - |
|
||||||
|
|
|
@ -158,6 +158,7 @@ extern int32_t tsCacheLazyLoadThreshold; // cost threshold for last/last_row lo
|
||||||
|
|
||||||
// query client
|
// query client
|
||||||
extern int32_t tsQueryPolicy;
|
extern int32_t tsQueryPolicy;
|
||||||
|
extern bool tsQueryTbNotExistAsEmpty;
|
||||||
extern int32_t tsQueryRspPolicy;
|
extern int32_t tsQueryRspPolicy;
|
||||||
extern int64_t tsQueryMaxConcurrentTables;
|
extern int64_t tsQueryMaxConcurrentTables;
|
||||||
extern int32_t tsQuerySmaOptimize;
|
extern int32_t tsQuerySmaOptimize;
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
|
|
||||||
#ifndef TAOS_COUNTER_H
|
#ifndef TAOS_COUNTER_H
|
||||||
#define TAOS_COUNTER_H
|
#define TAOS_COUNTER_H
|
||||||
|
#include <stdint.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
#include "taos_metric.h"
|
#include "taos_metric.h"
|
||||||
|
@ -99,4 +99,7 @@ int taos_counter_inc(taos_counter_t *self, const char **label_values);
|
||||||
*/
|
*/
|
||||||
int taos_counter_add(taos_counter_t *self, double r_value, const char **label_values);
|
int taos_counter_add(taos_counter_t *self, double r_value, const char **label_values);
|
||||||
|
|
||||||
|
int taos_counter_get_vgroup_ids(taos_counter_t *self, char ***keys, int32_t **vgroup_ids, int *list_size);
|
||||||
|
int taos_counter_get_keys_size(taos_counter_t *self);
|
||||||
|
int taos_counter_delete(taos_counter_t *self, char *key);
|
||||||
#endif // TAOS_COUNTER_H
|
#endif // TAOS_COUNTER_H
|
||||||
|
|
|
@ -65,6 +65,8 @@ typedef struct SParseCsvCxt {
|
||||||
const char* pLastSqlPos; // the location of the last parsed sql
|
const char* pLastSqlPos; // the location of the last parsed sql
|
||||||
} SParseCsvCxt;
|
} SParseCsvCxt;
|
||||||
|
|
||||||
|
typedef void(*setQueryFn)(int64_t);
|
||||||
|
|
||||||
typedef struct SParseContext {
|
typedef struct SParseContext {
|
||||||
uint64_t requestId;
|
uint64_t requestId;
|
||||||
int64_t requestRid;
|
int64_t requestRid;
|
||||||
|
@ -98,6 +100,7 @@ typedef struct SParseContext {
|
||||||
void* parseSqlParam;
|
void* parseSqlParam;
|
||||||
int8_t biMode;
|
int8_t biMode;
|
||||||
SArray* pSubMetaList;
|
SArray* pSubMetaList;
|
||||||
|
setQueryFn setQueryFp;
|
||||||
} SParseContext;
|
} SParseContext;
|
||||||
|
|
||||||
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
|
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
|
||||||
|
|
|
@ -705,7 +705,7 @@ int32_t streamTaskSetActiveCheckpointInfo(SStreamTask* pTask, int64_t activeChec
|
||||||
void streamTaskSetFailedChkptInfo(SStreamTask* pTask, int32_t transId, int64_t checkpointId);
|
void streamTaskSetFailedChkptInfo(SStreamTask* pTask, int32_t transId, int64_t checkpointId);
|
||||||
bool streamTaskAlreadySendTrigger(SStreamTask* pTask, int32_t downstreamNodeId);
|
bool streamTaskAlreadySendTrigger(SStreamTask* pTask, int32_t downstreamNodeId);
|
||||||
void streamTaskGetTriggerRecvStatus(SStreamTask* pTask, int32_t* pRecved, int32_t* pTotal);
|
void streamTaskGetTriggerRecvStatus(SStreamTask* pTask, int32_t* pRecved, int32_t* pTotal);
|
||||||
void streamTaskInitTriggerDispatchInfo(SStreamTask* pTask);
|
int32_t streamTaskInitTriggerDispatchInfo(SStreamTask* pTask);
|
||||||
void streamTaskSetTriggerDispatchConfirmed(SStreamTask* pTask, int32_t vgId);
|
void streamTaskSetTriggerDispatchConfirmed(SStreamTask* pTask, int32_t vgId);
|
||||||
int32_t streamTaskSendCheckpointTriggerMsg(SStreamTask* pTask, int32_t dstTaskId, int32_t downstreamNodeId,
|
int32_t streamTaskSendCheckpointTriggerMsg(SStreamTask* pTask, int32_t dstTaskId, int32_t downstreamNodeId,
|
||||||
SRpcHandleInfo* pInfo, int32_t code);
|
SRpcHandleInfo* pInfo, int32_t code);
|
||||||
|
@ -810,6 +810,7 @@ int32_t streamTaskBuildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRp
|
||||||
int32_t streamSendChkptReportMsg(SStreamTask* pTask, SCheckpointInfo* pCheckpointInfo, int8_t dropRelHTask);
|
int32_t streamSendChkptReportMsg(SStreamTask* pTask, SCheckpointInfo* pCheckpointInfo, int8_t dropRelHTask);
|
||||||
int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SVUpdateCheckpointInfoReq* pReq);
|
int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SVUpdateCheckpointInfoReq* pReq);
|
||||||
int32_t streamTaskCreateActiveChkptInfo(SActiveCheckpointInfo** pRes);
|
int32_t streamTaskCreateActiveChkptInfo(SActiveCheckpointInfo** pRes);
|
||||||
|
void streamTaskSetCheckpointFailed(SStreamTask* pTask);
|
||||||
|
|
||||||
// stream task state machine, and event handling
|
// stream task state machine, and event handling
|
||||||
int32_t streamCreateStateMachine(SStreamTask* pTask);
|
int32_t streamCreateStateMachine(SStreamTask* pTask);
|
||||||
|
|
|
@ -499,7 +499,7 @@ typedef enum ELogicConditionType {
|
||||||
#ifdef WINDOWS
|
#ifdef WINDOWS
|
||||||
#define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections.
|
#define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections.
|
||||||
#else
|
#else
|
||||||
#define TSDB_MAX_RPC_THREADS 10
|
#define TSDB_MAX_RPC_THREADS 50
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type
|
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type
|
||||||
|
|
|
@ -52,11 +52,11 @@ enum {
|
||||||
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
|
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
|
||||||
#define SHOW_VARIABLES_RESULT_FIELD3_LEN (TSDB_CONFIG_SCOPE_LEN + VARSTR_HEADER_SIZE)
|
#define SHOW_VARIABLES_RESULT_FIELD3_LEN (TSDB_CONFIG_SCOPE_LEN + VARSTR_HEADER_SIZE)
|
||||||
|
|
||||||
#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
|
#define TD_RES_QUERY(res) (*(int8_t*)(res) == RES_TYPE__QUERY)
|
||||||
#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
|
#define TD_RES_TMQ(res) (*(int8_t*)(res) == RES_TYPE__TMQ)
|
||||||
#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
|
#define TD_RES_TMQ_META(res) (*(int8_t*)(res) == RES_TYPE__TMQ_META)
|
||||||
#define TD_RES_TMQ_METADATA(res) (*(int8_t*)res == RES_TYPE__TMQ_METADATA)
|
#define TD_RES_TMQ_METADATA(res) (*(int8_t*)(res) == RES_TYPE__TMQ_METADATA)
|
||||||
#define TD_RES_TMQ_BATCH_META(res) (*(int8_t*)res == RES_TYPE__TMQ_BATCH_META)
|
#define TD_RES_TMQ_BATCH_META(res) (*(int8_t*)(res) == RES_TYPE__TMQ_BATCH_META)
|
||||||
|
|
||||||
typedef struct SAppInstInfo SAppInstInfo;
|
typedef struct SAppInstInfo SAppInstInfo;
|
||||||
|
|
||||||
|
@ -284,6 +284,7 @@ typedef struct SRequestObj {
|
||||||
bool isSubReq;
|
bool isSubReq;
|
||||||
bool inCallback;
|
bool inCallback;
|
||||||
bool isStmtBind; // is statement bind parameter
|
bool isStmtBind; // is statement bind parameter
|
||||||
|
bool isQuery;
|
||||||
uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog
|
uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog
|
||||||
uint32_t retry;
|
uint32_t retry;
|
||||||
int64_t allocatorRefId;
|
int64_t allocatorRefId;
|
||||||
|
@ -420,6 +421,7 @@ typedef struct SSqlCallbackWrapper {
|
||||||
void* pPlanInfo;
|
void* pPlanInfo;
|
||||||
} SSqlCallbackWrapper;
|
} SSqlCallbackWrapper;
|
||||||
|
|
||||||
|
void setQueryRequest(int64_t rId);
|
||||||
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQuery, void** res);
|
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQuery, void** res);
|
||||||
int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList);
|
int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList);
|
||||||
void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta, SSqlCallbackWrapper* pWrapper);
|
void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta, SSqlCallbackWrapper* pWrapper);
|
||||||
|
|
|
@ -31,6 +31,15 @@
|
||||||
static int32_t initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSet);
|
static int32_t initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSet);
|
||||||
static int32_t buildConnectMsg(SRequestObj* pRequest, SMsgSendInfo** pMsgSendInfo);
|
static int32_t buildConnectMsg(SRequestObj* pRequest, SMsgSendInfo** pMsgSendInfo);
|
||||||
|
|
||||||
|
void setQueryRequest(int64_t rId) {
|
||||||
|
SRequestObj* pReq = acquireRequest(rId);
|
||||||
|
if (pReq != NULL) {
|
||||||
|
pReq->isQuery = true;
|
||||||
|
(void)releaseRequest(rId);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
static bool stringLengthCheck(const char* str, size_t maxsize) {
|
static bool stringLengthCheck(const char* str, size_t maxsize) {
|
||||||
if (str == NULL) {
|
if (str == NULL) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -286,7 +295,8 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
|
||||||
.enableSysInfo = pTscObj->sysInfo,
|
.enableSysInfo = pTscObj->sysInfo,
|
||||||
.svrVer = pTscObj->sVer,
|
.svrVer = pTscObj->sVer,
|
||||||
.nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes),
|
.nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes),
|
||||||
.isStmtBind = pRequest->isStmtBind};
|
.isStmtBind = pRequest->isStmtBind,
|
||||||
|
.setQueryFp = setQueryRequest};
|
||||||
|
|
||||||
cxt.mgmtEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
|
cxt.mgmtEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
|
||||||
int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &cxt.pCatalog);
|
int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &cxt.pCatalog);
|
||||||
|
@ -339,8 +349,8 @@ int32_t execDdlQuery(SRequestObj* pRequest, SQuery* pQuery) {
|
||||||
STscObj* pTscObj = pRequest->pTscObj;
|
STscObj* pTscObj = pRequest->pTscObj;
|
||||||
SMsgSendInfo* pSendMsg = buildMsgInfoImpl(pRequest);
|
SMsgSendInfo* pSendMsg = buildMsgInfoImpl(pRequest);
|
||||||
|
|
||||||
int64_t transporterId = 0;
|
// int64_t transporterId = 0;
|
||||||
TSC_ERR_RET(asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &pMsgInfo->epSet, &transporterId, pSendMsg));
|
TSC_ERR_RET(asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &pMsgInfo->epSet, NULL, pSendMsg));
|
||||||
(void)tsem_wait(&pRequest->body.rspSem);
|
(void)tsem_wait(&pRequest->body.rspSem);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -396,8 +406,8 @@ int32_t asyncExecDdlQuery(SRequestObj* pRequest, SQuery* pQuery) {
|
||||||
SAppInstInfo* pAppInfo = getAppInfo(pRequest);
|
SAppInstInfo* pAppInfo = getAppInfo(pRequest);
|
||||||
SMsgSendInfo* pSendMsg = buildMsgInfoImpl(pRequest);
|
SMsgSendInfo* pSendMsg = buildMsgInfoImpl(pRequest);
|
||||||
|
|
||||||
int64_t transporterId = 0;
|
// int64_t transporterId = 0;
|
||||||
int32_t code = asyncSendMsgToServer(pAppInfo->pTransporter, &pMsgInfo->epSet, &transporterId, pSendMsg);
|
int32_t code = asyncSendMsgToServer(pAppInfo->pTransporter, &pMsgInfo->epSet, NULL, pSendMsg);
|
||||||
if (code) {
|
if (code) {
|
||||||
doRequestCallback(pRequest, code);
|
doRequestCallback(pRequest, code);
|
||||||
}
|
}
|
||||||
|
@ -2953,6 +2963,10 @@ void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param
|
||||||
void doRequestCallback(SRequestObj* pRequest, int32_t code) {
|
void doRequestCallback(SRequestObj* pRequest, int32_t code) {
|
||||||
pRequest->inCallback = true;
|
pRequest->inCallback = true;
|
||||||
int64_t this = pRequest->self;
|
int64_t this = pRequest->self;
|
||||||
|
if (tsQueryTbNotExistAsEmpty && TD_RES_QUERY(&pRequest->resType) && pRequest->isQuery && (code == TSDB_CODE_PAR_TABLE_NOT_EXIST || code == TSDB_CODE_TDB_TABLE_NOT_EXIST)) {
|
||||||
|
code = TSDB_CODE_SUCCESS;
|
||||||
|
pRequest->type = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
|
||||||
|
}
|
||||||
pRequest->body.queryFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, code);
|
pRequest->body.queryFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, code);
|
||||||
SRequestObj* pReq = acquireRequest(this);
|
SRequestObj* pReq = acquireRequest(this);
|
||||||
if (pReq != NULL) {
|
if (pReq != NULL) {
|
||||||
|
|
|
@ -297,9 +297,8 @@ void taos_fetch_whitelist_a(TAOS *taos, __taos_async_whitelist_fn_t fp, void *pa
|
||||||
pSendInfo->fp = fetchWhiteListCallbackFn;
|
pSendInfo->fp = fetchWhiteListCallbackFn;
|
||||||
pSendInfo->msgType = TDMT_MND_GET_USER_WHITELIST;
|
pSendInfo->msgType = TDMT_MND_GET_USER_WHITELIST;
|
||||||
|
|
||||||
int64_t transportId = 0;
|
|
||||||
SEpSet epSet = getEpSet_s(&pTsc->pAppInfo->mgmtEp);
|
SEpSet epSet = getEpSet_s(&pTsc->pAppInfo->mgmtEp);
|
||||||
if (TSDB_CODE_SUCCESS != asyncSendMsgToServer(pTsc->pAppInfo->pTransporter, &epSet, &transportId, pSendInfo)) {
|
if (TSDB_CODE_SUCCESS != asyncSendMsgToServer(pTsc->pAppInfo->pTransporter, &epSet, NULL, pSendInfo)) {
|
||||||
tscWarn("failed to async send msg to server");
|
tscWarn("failed to async send msg to server");
|
||||||
}
|
}
|
||||||
releaseTscObj(connId);
|
releaseTscObj(connId);
|
||||||
|
@ -861,9 +860,9 @@ int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex) {
|
||||||
return pResInfo->pCol[columnIndex].offset;
|
return pResInfo->pCol[columnIndex].offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows){
|
int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows) {
|
||||||
if (res == NULL || result == NULL || rows == NULL || *rows <= 0 ||
|
if (res == NULL || result == NULL || rows == NULL || *rows <= 0 || columnIndex < 0 || TD_RES_TMQ_META(res) ||
|
||||||
columnIndex < 0 || TD_RES_TMQ_META(res) || TD_RES_TMQ_BATCH_META(res)) {
|
TD_RES_TMQ_BATCH_META(res)) {
|
||||||
return TSDB_CODE_INVALID_PARA;
|
return TSDB_CODE_INVALID_PARA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -876,22 +875,22 @@ int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *r
|
||||||
TAOS_FIELD *pField = &pResInfo->userFields[columnIndex];
|
TAOS_FIELD *pField = &pResInfo->userFields[columnIndex];
|
||||||
SResultColumn *pCol = &pResInfo->pCol[columnIndex];
|
SResultColumn *pCol = &pResInfo->pCol[columnIndex];
|
||||||
|
|
||||||
if (*rows > pResInfo->numOfRows){
|
if (*rows > pResInfo->numOfRows) {
|
||||||
*rows = pResInfo->numOfRows;
|
*rows = pResInfo->numOfRows;
|
||||||
}
|
}
|
||||||
if (IS_VAR_DATA_TYPE(pField->type)) {
|
if (IS_VAR_DATA_TYPE(pField->type)) {
|
||||||
for(int i = 0; i < *rows; i++){
|
for (int i = 0; i < *rows; i++) {
|
||||||
if(pCol->offset[i] == -1){
|
if (pCol->offset[i] == -1) {
|
||||||
result[i] = true;
|
result[i] = true;
|
||||||
}else{
|
} else {
|
||||||
result[i] = false;
|
result[i] = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}else{
|
} else {
|
||||||
for(int i = 0; i < *rows; i++){
|
for (int i = 0; i < *rows; i++) {
|
||||||
if (colDataIsNull_f(pCol->nullbitmap, i)){
|
if (colDataIsNull_f(pCol->nullbitmap, i)) {
|
||||||
result[i] = true;
|
result[i] = true;
|
||||||
}else{
|
} else {
|
||||||
result[i] = false;
|
result[i] = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1236,7 +1235,8 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt, SS
|
||||||
.nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes),
|
.nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes),
|
||||||
.allocatorId = pRequest->allocatorRefId,
|
.allocatorId = pRequest->allocatorRefId,
|
||||||
.parseSqlFp = clientParseSql,
|
.parseSqlFp = clientParseSql,
|
||||||
.parseSqlParam = pWrapper};
|
.parseSqlParam = pWrapper,
|
||||||
|
.setQueryFp = setQueryRequest};
|
||||||
int8_t biMode = atomic_load_8(&((STscObj *)pTscObj)->biMode);
|
int8_t biMode = atomic_load_8(&((STscObj *)pTscObj)->biMode);
|
||||||
(*pCxt)->biMode = biMode;
|
(*pCxt)->biMode = biMode;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
|
@ -121,7 +121,7 @@ static int32_t monitorReportAsyncCB(void* param, SDataBuf* pMsg, int32_t code) {
|
||||||
if (monitorPutData2MonitorQueue(tmp) == 0) {
|
if (monitorPutData2MonitorQueue(tmp) == 0) {
|
||||||
p->fileName = NULL;
|
p->fileName = NULL;
|
||||||
} else {
|
} else {
|
||||||
if(taosCloseFile(&(p->pFile)) != 0) {
|
if (taosCloseFile(&(p->pFile)) != 0) {
|
||||||
tscError("failed to close file:%p", p->pFile);
|
tscError("failed to close file:%p", p->pFile);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -165,8 +165,8 @@ static int32_t sendReport(void* pTransporter, SEpSet* epSet, char* pCont, MONITO
|
||||||
pInfo->requestId = tGenIdPI64();
|
pInfo->requestId = tGenIdPI64();
|
||||||
pInfo->requestObjRefId = 0;
|
pInfo->requestObjRefId = 0;
|
||||||
|
|
||||||
int64_t transporterId = 0;
|
// int64_t transporterId = 0;
|
||||||
return asyncSendMsgToServer(pTransporter, epSet, &transporterId, pInfo);
|
return asyncSendMsgToServer(pTransporter, epSet, NULL, pInfo);
|
||||||
|
|
||||||
FAILED:
|
FAILED:
|
||||||
if (taosCloseFile(&(((MonitorSlowLogData*)param)->pFile)) != 0) {
|
if (taosCloseFile(&(((MonitorSlowLogData*)param)->pFile)) != 0) {
|
||||||
|
@ -286,7 +286,7 @@ void monitorCreateClient(int64_t clusterId) {
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
destroyMonitorClient(&pMonitor);
|
destroyMonitorClient(&pMonitor);
|
||||||
taosWUnLockLatch(&monitorLock);
|
taosWUnLockLatch(&monitorLock);
|
||||||
}
|
}
|
||||||
|
@ -302,7 +302,7 @@ void monitorCreateClientCounter(int64_t clusterId, const char* name, const char*
|
||||||
taos_counter_t* newCounter = taos_counter_new(name, help, label_key_count, label_keys);
|
taos_counter_t* newCounter = taos_counter_new(name, help, label_key_count, label_keys);
|
||||||
if (newCounter == NULL) return;
|
if (newCounter == NULL) return;
|
||||||
MonitorClient* pMonitor = *ppMonitor;
|
MonitorClient* pMonitor = *ppMonitor;
|
||||||
if (taos_collector_add_metric(pMonitor->colector, newCounter) != 0){
|
if (taos_collector_add_metric(pMonitor->colector, newCounter) != 0) {
|
||||||
tscError("failed to add metric to collector");
|
tscError("failed to add metric to collector");
|
||||||
(void)taos_counter_destroy(newCounter);
|
(void)taos_counter_destroy(newCounter);
|
||||||
goto end;
|
goto end;
|
||||||
|
@ -315,7 +315,7 @@ void monitorCreateClientCounter(int64_t clusterId, const char* name, const char*
|
||||||
tscInfo("[monitor] monitorCreateClientCounter %" PRIx64 "(%p):%s : %p.", pMonitor->clusterId, pMonitor, name,
|
tscInfo("[monitor] monitorCreateClientCounter %" PRIx64 "(%p):%s : %p.", pMonitor->clusterId, pMonitor, name,
|
||||||
newCounter);
|
newCounter);
|
||||||
|
|
||||||
end:
|
end:
|
||||||
taosWUnLockLatch(&monitorLock);
|
taosWUnLockLatch(&monitorLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,13 +338,13 @@ void monitorCounterInc(int64_t clusterId, const char* counterName, const char**
|
||||||
tscError("monitorCounterInc not found pCounter %" PRIx64 ":%s.", clusterId, counterName);
|
tscError("monitorCounterInc not found pCounter %" PRIx64 ":%s.", clusterId, counterName);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
if (taos_counter_inc(*ppCounter, label_values) != 0){
|
if (taos_counter_inc(*ppCounter, label_values) != 0) {
|
||||||
tscError("monitorCounterInc failed to inc %" PRIx64 ":%s.", clusterId, counterName);
|
tscError("monitorCounterInc failed to inc %" PRIx64 ":%s.", clusterId, counterName);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
tscDebug("[monitor] monitorCounterInc %" PRIx64 "(%p):%s", pMonitor->clusterId, pMonitor, counterName);
|
tscDebug("[monitor] monitorCounterInc %" PRIx64 "(%p):%s", pMonitor->clusterId, pMonitor, counterName);
|
||||||
|
|
||||||
end:
|
end:
|
||||||
taosWUnLockLatch(&monitorLock);
|
taosWUnLockLatch(&monitorLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -413,7 +413,7 @@ static char* readFile(TdFilePtr pFile, int64_t* offset, int64_t size) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if((size <= *offset)){
|
if ((size <= *offset)) {
|
||||||
tscError("invalid size:%" PRId64 ", offset:%" PRId64, size, *offset);
|
tscError("invalid size:%" PRId64 ", offset:%" PRId64, size, *offset);
|
||||||
terrno = TSDB_CODE_TSC_INTERNAL_ERROR;
|
terrno = TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -510,13 +510,13 @@ static int32_t monitorReadSend(int64_t clusterId, TdFilePtr pFile, int64_t* offs
|
||||||
}
|
}
|
||||||
SEpSet ep = getEpSet_s(&pInst->mgmtEp);
|
SEpSet ep = getEpSet_s(&pInst->mgmtEp);
|
||||||
char* data = readFile(pFile, offset, size);
|
char* data = readFile(pFile, offset, size);
|
||||||
if(data == NULL) return terrno;
|
if (data == NULL) return terrno;
|
||||||
return sendSlowLog(clusterId, data, (type == SLOW_LOG_READ_BEGINNIG ? pFile : NULL), *offset, type, fileName,
|
return sendSlowLog(clusterId, data, (type == SLOW_LOG_READ_BEGINNIG ? pFile : NULL), *offset, type, fileName,
|
||||||
pInst->pTransporter, &ep);
|
pInst->pTransporter, &ep);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void monitorSendSlowLogAtBeginning(int64_t clusterId, char** fileName, TdFilePtr pFile, int64_t offset) {
|
static void monitorSendSlowLogAtBeginning(int64_t clusterId, char** fileName, TdFilePtr pFile, int64_t offset) {
|
||||||
if (fileName == NULL){
|
if (fileName == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
int64_t size = getFileSize(*fileName);
|
int64_t size = getFileSize(*fileName);
|
||||||
|
@ -525,10 +525,11 @@ static void monitorSendSlowLogAtBeginning(int64_t clusterId, char** fileName, Td
|
||||||
tscDebug("[monitor] monitorSendSlowLogAtBeginning delete file:%s", *fileName);
|
tscDebug("[monitor] monitorSendSlowLogAtBeginning delete file:%s", *fileName);
|
||||||
} else {
|
} else {
|
||||||
int32_t code = monitorReadSend(clusterId, pFile, &offset, size, SLOW_LOG_READ_BEGINNIG, *fileName);
|
int32_t code = monitorReadSend(clusterId, pFile, &offset, size, SLOW_LOG_READ_BEGINNIG, *fileName);
|
||||||
if (code == 0){
|
if (code == 0) {
|
||||||
tscDebug("[monitor] monitorSendSlowLogAtBeginning send slow log succ, clusterId:%" PRId64, clusterId);
|
tscDebug("[monitor] monitorSendSlowLogAtBeginning send slow log succ, clusterId:%" PRId64, clusterId);
|
||||||
}else{
|
} else {
|
||||||
tscError("[monitor] monitorSendSlowLogAtBeginning send slow log failed, clusterId:%" PRId64 ",ret:%d", clusterId, code);
|
tscError("[monitor] monitorSendSlowLogAtBeginning send slow log failed, clusterId:%" PRId64 ",ret:%d", clusterId,
|
||||||
|
code);
|
||||||
}
|
}
|
||||||
*fileName = NULL;
|
*fileName = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1241,7 +1241,9 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
|
||||||
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
|
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
|
||||||
.pTransporter = pStmt->taos->pAppInfo->pTransporter,
|
.pTransporter = pStmt->taos->pAppInfo->pTransporter,
|
||||||
.pStmtCb = NULL,
|
.pStmtCb = NULL,
|
||||||
.pUser = pStmt->taos->user};
|
.pUser = pStmt->taos->user,
|
||||||
|
.setQueryFp = setQueryRequest};
|
||||||
|
|
||||||
ctx.mgmtEpSet = getEpSet_s(&pStmt->taos->pAppInfo->mgmtEp);
|
ctx.mgmtEpSet = getEpSet_s(&pStmt->taos->pAppInfo->mgmtEp);
|
||||||
STMT_ERR_RET(catalogGetHandle(pStmt->taos->pAppInfo->clusterId, &ctx.pCatalog));
|
STMT_ERR_RET(catalogGetHandle(pStmt->taos->pAppInfo->clusterId, &ctx.pCatalog));
|
||||||
|
|
||||||
|
|
|
@ -552,9 +552,9 @@ static int32_t doSendCommitMsg(tmq_t* tmq, int32_t vgId, SEpSet* epSet, STqOffse
|
||||||
pMsgSendInfo->fp = tmqCommitCb;
|
pMsgSendInfo->fp = tmqCommitCb;
|
||||||
pMsgSendInfo->msgType = TDMT_VND_TMQ_COMMIT_OFFSET;
|
pMsgSendInfo->msgType = TDMT_VND_TMQ_COMMIT_OFFSET;
|
||||||
|
|
||||||
int64_t transporterId = 0;
|
// int64_t transporterId = 0;
|
||||||
(void)atomic_add_fetch_32(&pParamSet->waitingRspNum, 1);
|
(void)atomic_add_fetch_32(&pParamSet->waitingRspNum, 1);
|
||||||
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, epSet, &transporterId, pMsgSendInfo);
|
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, epSet, NULL, pMsgSendInfo);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
(void)atomic_sub_fetch_32(&pParamSet->waitingRspNum, 1);
|
(void)atomic_sub_fetch_32(&pParamSet->waitingRspNum, 1);
|
||||||
return code;
|
return code;
|
||||||
|
@ -955,8 +955,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
|
||||||
|
|
||||||
SEpSet epSet = getEpSet_s(&tmq->pTscObj->pAppInfo->mgmtEp);
|
SEpSet epSet = getEpSet_s(&tmq->pTscObj->pAppInfo->mgmtEp);
|
||||||
|
|
||||||
int64_t transporterId = 0;
|
int32_t code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo);
|
||||||
int32_t code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
|
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
tscError("tmqSendHbReq asyncSendMsgToServer failed");
|
tscError("tmqSendHbReq asyncSendMsgToServer failed");
|
||||||
}
|
}
|
||||||
|
@ -1436,8 +1435,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
|
||||||
|
|
||||||
SEpSet epSet = getEpSet_s(&tmq->pTscObj->pAppInfo->mgmtEp);
|
SEpSet epSet = getEpSet_s(&tmq->pTscObj->pAppInfo->mgmtEp);
|
||||||
|
|
||||||
int64_t transporterId = 0;
|
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo);
|
||||||
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
|
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
goto FAIL;
|
goto FAIL;
|
||||||
}
|
}
|
||||||
|
@ -2044,10 +2042,10 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p
|
||||||
sendInfo->fp = tmqPollCb;
|
sendInfo->fp = tmqPollCb;
|
||||||
sendInfo->msgType = TDMT_VND_TMQ_CONSUME;
|
sendInfo->msgType = TDMT_VND_TMQ_CONSUME;
|
||||||
|
|
||||||
int64_t transporterId = 0;
|
// int64_t transporterId = 0;
|
||||||
char offsetFormatBuf[TSDB_OFFSET_LEN] = {0};
|
char offsetFormatBuf[TSDB_OFFSET_LEN] = {0};
|
||||||
tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pVg->offsetInfo.endOffset);
|
tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pVg->offsetInfo.endOffset);
|
||||||
code = asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, sendInfo);
|
code = asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, NULL, sendInfo);
|
||||||
tscDebug("consumer:0x%" PRIx64 " send poll to %s vgId:%d, code:%d, epoch %d, req:%s,QID:0x%" PRIx64, pTmq->consumerId,
|
tscDebug("consumer:0x%" PRIx64 " send poll to %s vgId:%d, code:%d, epoch %d, req:%s,QID:0x%" PRIx64, pTmq->consumerId,
|
||||||
pTopic->topicName, pVg->vgId, code, pTmq->epoch, offsetFormatBuf, req.reqId);
|
pTopic->topicName, pVg->vgId, code, pTmq->epoch, offsetFormatBuf, req.reqId);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
|
@ -3221,8 +3219,7 @@ int64_t getCommittedFromServer(tmq_t* tmq, char* tname, int32_t vgId, SEpSet* ep
|
||||||
sendInfo->fp = tmCommittedCb;
|
sendInfo->fp = tmCommittedCb;
|
||||||
sendInfo->msgType = TDMT_VND_TMQ_VG_COMMITTEDINFO;
|
sendInfo->msgType = TDMT_VND_TMQ_VG_COMMITTEDINFO;
|
||||||
|
|
||||||
int64_t transporterId = 0;
|
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, epSet, NULL, sendInfo);
|
||||||
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, epSet, &transporterId, sendInfo);
|
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
(void)tsem2_destroy(&pParam->sem);
|
(void)tsem2_destroy(&pParam->sem);
|
||||||
taosMemoryFree(pParam);
|
taosMemoryFree(pParam);
|
||||||
|
@ -3498,13 +3495,13 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
|
||||||
sendInfo->fp = tmqGetWalInfoCb;
|
sendInfo->fp = tmqGetWalInfoCb;
|
||||||
sendInfo->msgType = TDMT_VND_TMQ_VG_WALINFO;
|
sendInfo->msgType = TDMT_VND_TMQ_VG_WALINFO;
|
||||||
|
|
||||||
int64_t transporterId = 0;
|
// int64_t transporterId = 0;
|
||||||
char offsetFormatBuf[TSDB_OFFSET_LEN] = {0};
|
char offsetFormatBuf[TSDB_OFFSET_LEN] = {0};
|
||||||
tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pClientVg->offsetInfo.beginOffset);
|
tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pClientVg->offsetInfo.beginOffset);
|
||||||
|
|
||||||
tscInfo("consumer:0x%" PRIx64 " %s retrieve wal info vgId:%d, epoch %d, req:%s,QID:0x%" PRIx64, tmq->consumerId,
|
tscInfo("consumer:0x%" PRIx64 " %s retrieve wal info vgId:%d, epoch %d, req:%s,QID:0x%" PRIx64, tmq->consumerId,
|
||||||
pTopic->topicName, pClientVg->vgId, tmq->epoch, offsetFormatBuf, req.reqId);
|
pTopic->topicName, pClientVg->vgId, tmq->epoch, offsetFormatBuf, req.reqId);
|
||||||
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pClientVg->epSet, &transporterId, sendInfo);
|
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pClientVg->epSet, NULL, sendInfo);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
@ -3668,8 +3665,7 @@ int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_
|
||||||
sendInfo->fp = tmqSeekCb;
|
sendInfo->fp = tmqSeekCb;
|
||||||
sendInfo->msgType = TDMT_VND_TMQ_SEEK;
|
sendInfo->msgType = TDMT_VND_TMQ_SEEK;
|
||||||
|
|
||||||
int64_t transporterId = 0;
|
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo);
|
||||||
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
|
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
(void)tsem2_destroy(&pParam->sem);
|
(void)tsem2_destroy(&pParam->sem);
|
||||||
taosMemoryFree(pParam);
|
taosMemoryFree(pParam);
|
||||||
|
|
|
@ -427,6 +427,7 @@ static const SSysDbTableSchema userMachinesSchema[] = {
|
||||||
{.name = "id", .bytes = TSDB_CLUSTER_ID_LEN + 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
{.name = "id", .bytes = TSDB_CLUSTER_ID_LEN + 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
||||||
{.name = "dnode_num", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
{.name = "dnode_num", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||||
{.name = "machine", .bytes = 7552 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
{.name = "machine", .bytes = 7552 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
||||||
|
{.name = "version", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const SSysDbTableSchema encryptionsSchema[] = {
|
static const SSysDbTableSchema encryptionsSchema[] = {
|
||||||
|
|
|
@ -3009,6 +3009,12 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
|
||||||
data += colSizes[col];
|
data += colSizes[col];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (colSizes[col] <= 0 && !colDataIsNull_s(pColRes, 0) && pColRes->info.type != TSDB_DATA_TYPE_NULL) {
|
||||||
|
uError("Invalid colSize:%d colIdx:%d colType:%d while encoding block", colSizes[col], col, pColRes->info.type);
|
||||||
|
terrno = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
colSizes[col] = htonl(colSizes[col]);
|
colSizes[col] = htonl(colSizes[col]);
|
||||||
// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type,
|
// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type,
|
||||||
// htonl(colSizes[col]), colSizes[col]);
|
// htonl(colSizes[col]), colSizes[col]);
|
||||||
|
@ -3036,6 +3042,11 @@ int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos
|
||||||
// total rows sizeof(int32_t)
|
// total rows sizeof(int32_t)
|
||||||
int32_t numOfRows = *(int32_t*)pStart;
|
int32_t numOfRows = *(int32_t*)pStart;
|
||||||
pStart += sizeof(int32_t);
|
pStart += sizeof(int32_t);
|
||||||
|
if (numOfRows <= 0) {
|
||||||
|
uError("block decode numOfRows:%d error", numOfRows);
|
||||||
|
terrno = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
|
||||||
|
return terrno;
|
||||||
|
}
|
||||||
|
|
||||||
// total columns sizeof(int32_t)
|
// total columns sizeof(int32_t)
|
||||||
int32_t numOfCols = *(int32_t*)pStart;
|
int32_t numOfCols = *(int32_t*)pStart;
|
||||||
|
@ -3115,14 +3126,19 @@ int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos
|
||||||
pStart += BitmapLen(numOfRows);
|
pStart += BitmapLen(numOfRows);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (colLen[i] > 0) {
|
|
||||||
memcpy(pColInfoData->pData, pStart, colLen[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO
|
// TODO
|
||||||
// setting this flag to true temporarily so aggregate function on stable will
|
// setting this flag to true temporarily so aggregate function on stable will
|
||||||
// examine NULL value for non-primary key column
|
// examine NULL value for non-primary key column
|
||||||
pColInfoData->hasNull = true;
|
pColInfoData->hasNull = true;
|
||||||
|
|
||||||
|
if (colLen[i] > 0) {
|
||||||
|
memcpy(pColInfoData->pData, pStart, colLen[i]);
|
||||||
|
} else if (!colDataIsNull_s(pColInfoData, 0) && pColInfoData->info.type != TSDB_DATA_TYPE_NULL) {
|
||||||
|
uError("block decode colLen:%d error, colIdx:%d, type:%d", colLen[i], i, pColInfoData->info.type);
|
||||||
|
terrno = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
|
||||||
|
return terrno;
|
||||||
|
}
|
||||||
|
|
||||||
pStart += colLen[i];
|
pStart += colLen[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -162,6 +162,7 @@ int32_t tmqMaxTopicNum = 20;
|
||||||
int32_t tmqRowSize = 4096;
|
int32_t tmqRowSize = 4096;
|
||||||
// query
|
// query
|
||||||
int32_t tsQueryPolicy = 1;
|
int32_t tsQueryPolicy = 1;
|
||||||
|
bool tsQueryTbNotExistAsEmpty = false;
|
||||||
int32_t tsQueryRspPolicy = 0;
|
int32_t tsQueryRspPolicy = 0;
|
||||||
int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT
|
int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT
|
||||||
bool tsEnableQueryHb = true;
|
bool tsEnableQueryHb = true;
|
||||||
|
@ -569,6 +570,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
||||||
TAOS_CHECK_RETURN(
|
TAOS_CHECK_RETURN(
|
||||||
cfgAddInt32(pCfg, "compressMsgSize", tsCompressMsgSize, -1, 100000000, CFG_SCOPE_BOTH, CFG_DYN_CLIENT));
|
cfgAddInt32(pCfg, "compressMsgSize", tsCompressMsgSize, -1, 100000000, CFG_SCOPE_BOTH, CFG_DYN_CLIENT));
|
||||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "queryPolicy", tsQueryPolicy, 1, 4, CFG_SCOPE_CLIENT, CFG_DYN_ENT_CLIENT));
|
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "queryPolicy", tsQueryPolicy, 1, 4, CFG_SCOPE_CLIENT, CFG_DYN_ENT_CLIENT));
|
||||||
|
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "queryTableNotExistAsEmpty", tsQueryTbNotExistAsEmpty, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
|
||||||
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "enableQueryHb", tsEnableQueryHb, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
|
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "enableQueryHb", tsEnableQueryHb, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
|
||||||
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "enableScience", tsEnableScience, CFG_SCOPE_CLIENT, CFG_DYN_NONE));
|
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "enableScience", tsEnableScience, CFG_SCOPE_CLIENT, CFG_DYN_NONE));
|
||||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "querySmaOptimize", tsQuerySmaOptimize, 0, 1, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
|
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "querySmaOptimize", tsQuerySmaOptimize, 0, 1, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
|
||||||
|
@ -1181,6 +1183,9 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
||||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "queryPolicy");
|
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "queryPolicy");
|
||||||
tsQueryPolicy = pItem->i32;
|
tsQueryPolicy = pItem->i32;
|
||||||
|
|
||||||
|
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "queryTableNotExistAsEmpty");
|
||||||
|
tsQueryTbNotExistAsEmpty = pItem->bval;
|
||||||
|
|
||||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "enableQueryHb");
|
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "enableQueryHb");
|
||||||
tsEnableQueryHb = pItem->bval;
|
tsEnableQueryHb = pItem->bval;
|
||||||
|
|
||||||
|
@ -2218,6 +2223,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
|
||||||
{"numOfLogLines", &tsNumOfLogLines},
|
{"numOfLogLines", &tsNumOfLogLines},
|
||||||
{"querySmaOptimize", &tsQuerySmaOptimize},
|
{"querySmaOptimize", &tsQuerySmaOptimize},
|
||||||
{"queryPolicy", &tsQueryPolicy},
|
{"queryPolicy", &tsQueryPolicy},
|
||||||
|
{"queryTableNotExistAsEmpty", &tsQueryTbNotExistAsEmpty},
|
||||||
{"queryPlannerTrace", &tsQueryPlannerTrace},
|
{"queryPlannerTrace", &tsQueryPlannerTrace},
|
||||||
{"queryNodeChunkSize", &tsQueryNodeChunkSize},
|
{"queryNodeChunkSize", &tsQueryNodeChunkSize},
|
||||||
{"queryUseNodeAllocator", &tsQueryUseNodeAllocator},
|
{"queryUseNodeAllocator", &tsQueryUseNodeAllocator},
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -37,6 +37,7 @@ typedef struct SDnodeMgmt {
|
||||||
ProcessAlterNodeTypeFp processAlterNodeTypeFp;
|
ProcessAlterNodeTypeFp processAlterNodeTypeFp;
|
||||||
ProcessDropNodeFp processDropNodeFp;
|
ProcessDropNodeFp processDropNodeFp;
|
||||||
SendMonitorReportFp sendMonitorReportFp;
|
SendMonitorReportFp sendMonitorReportFp;
|
||||||
|
MonitorCleanExpiredSamplesFp monitorCleanExpiredSamplesFp;
|
||||||
SendAuditRecordsFp sendAuditRecordsFp;
|
SendAuditRecordsFp sendAuditRecordsFp;
|
||||||
GetVnodeLoadsFp getVnodeLoadsFp;
|
GetVnodeLoadsFp getVnodeLoadsFp;
|
||||||
GetVnodeLoadsFp getVnodeLoadsLiteFp;
|
GetVnodeLoadsFp getVnodeLoadsLiteFp;
|
||||||
|
|
|
@ -65,6 +65,7 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
|
||||||
pMgmt->processAlterNodeTypeFp = pInput->processAlterNodeTypeFp;
|
pMgmt->processAlterNodeTypeFp = pInput->processAlterNodeTypeFp;
|
||||||
pMgmt->processDropNodeFp = pInput->processDropNodeFp;
|
pMgmt->processDropNodeFp = pInput->processDropNodeFp;
|
||||||
pMgmt->sendMonitorReportFp = pInput->sendMonitorReportFp;
|
pMgmt->sendMonitorReportFp = pInput->sendMonitorReportFp;
|
||||||
|
pMgmt->monitorCleanExpiredSamplesFp = pInput->monitorCleanExpiredSamplesFp;
|
||||||
pMgmt->sendAuditRecordsFp = pInput->sendAuditRecordFp;
|
pMgmt->sendAuditRecordsFp = pInput->sendAuditRecordFp;
|
||||||
pMgmt->getVnodeLoadsFp = pInput->getVnodeLoadsFp;
|
pMgmt->getVnodeLoadsFp = pInput->getVnodeLoadsFp;
|
||||||
pMgmt->getVnodeLoadsLiteFp = pInput->getVnodeLoadsLiteFp;
|
pMgmt->getVnodeLoadsLiteFp = pInput->getVnodeLoadsLiteFp;
|
||||||
|
|
|
@ -168,6 +168,7 @@ static void *dmMonitorThreadFp(void *param) {
|
||||||
float interval = (curTime - lastTime) / 1000.0f;
|
float interval = (curTime - lastTime) / 1000.0f;
|
||||||
if (interval >= tsMonitorInterval) {
|
if (interval >= tsMonitorInterval) {
|
||||||
(*pMgmt->sendMonitorReportFp)();
|
(*pMgmt->sendMonitorReportFp)();
|
||||||
|
(*pMgmt->monitorCleanExpiredSamplesFp)();
|
||||||
lastTime = curTime;
|
lastTime = curTime;
|
||||||
|
|
||||||
trimCount = (trimCount + 1) % TRIM_FREQ;
|
trimCount = (trimCount + 1) % TRIM_FREQ;
|
||||||
|
|
|
@ -14,8 +14,11 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
|
#include "taos_monitor.h"
|
||||||
#include "vmInt.h"
|
#include "vmInt.h"
|
||||||
|
|
||||||
|
extern taos_counter_t *tsInsertCounter;
|
||||||
|
|
||||||
void vmGetVnodeLoads(SVnodeMgmt *pMgmt, SMonVloadInfo *pInfo, bool isReset) {
|
void vmGetVnodeLoads(SVnodeMgmt *pMgmt, SMonVloadInfo *pInfo, bool isReset) {
|
||||||
pInfo->pVloads = taosArrayInit(pMgmt->state.totalVnodes, sizeof(SVnodeLoad));
|
pInfo->pVloads = taosArrayInit(pMgmt->state.totalVnodes, sizeof(SVnodeLoad));
|
||||||
if (pInfo->pVloads == NULL) return;
|
if (pInfo->pVloads == NULL) return;
|
||||||
|
@ -117,6 +120,34 @@ void vmGetMonitorInfo(SVnodeMgmt *pMgmt, SMonVmInfo *pInfo) {
|
||||||
taosArrayDestroy(pVloads);
|
taosArrayDestroy(pVloads);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vmCleanExpriedSamples(SVnodeMgmt *pMgmt) {
|
||||||
|
int list_size = taos_counter_get_keys_size(tsInsertCounter);
|
||||||
|
if (list_size == 0) return;
|
||||||
|
int32_t *vgroup_ids;
|
||||||
|
char **keys;
|
||||||
|
int r = 0;
|
||||||
|
r = taos_counter_get_vgroup_ids(tsInsertCounter, &keys, &vgroup_ids, &list_size);
|
||||||
|
if (r) {
|
||||||
|
dError("failed to get vgroup ids");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
(void)taosThreadRwlockRdlock(&pMgmt->lock);
|
||||||
|
for (int i = 0; i < list_size; i++) {
|
||||||
|
int32_t vgroup_id = vgroup_ids[i];
|
||||||
|
void *vnode = taosHashGet(pMgmt->hash, &vgroup_id, sizeof(int32_t));
|
||||||
|
if (vnode == NULL) {
|
||||||
|
r = taos_counter_delete(tsInsertCounter, keys[i]);
|
||||||
|
if (r) {
|
||||||
|
dError("failed to delete monitor sample key:%s", keys[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(void)taosThreadRwlockUnlock(&pMgmt->lock);
|
||||||
|
if (vgroup_ids) taosMemoryFree(vgroup_ids);
|
||||||
|
if (keys) taosMemoryFree(keys);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
|
static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
|
||||||
memcpy(pCfg, &vnodeCfgDefault, sizeof(SVnodeCfg));
|
memcpy(pCfg, &vnodeCfgDefault, sizeof(SVnodeCfg));
|
||||||
|
|
||||||
|
|
|
@ -128,6 +128,7 @@ int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);
|
||||||
|
|
||||||
// dmMonitor.c
|
// dmMonitor.c
|
||||||
void dmSendMonitorReport();
|
void dmSendMonitorReport();
|
||||||
|
void dmMonitorCleanExpiredSamples();
|
||||||
void dmSendAuditRecords();
|
void dmSendAuditRecords();
|
||||||
void dmGetVnodeLoads(SMonVloadInfo *pInfo);
|
void dmGetVnodeLoads(SMonVloadInfo *pInfo);
|
||||||
void dmGetVnodeLoadsLite(SMonVloadInfo *pInfo);
|
void dmGetVnodeLoadsLite(SMonVloadInfo *pInfo);
|
||||||
|
|
|
@ -39,6 +39,8 @@ void vmGetVnodeLoadsLite(void *pMgmt, SMonVloadInfo *pInfo);
|
||||||
void mmGetMnodeLoads(void *pMgmt, SMonMloadInfo *pInfo);
|
void mmGetMnodeLoads(void *pMgmt, SMonMloadInfo *pInfo);
|
||||||
void qmGetQnodeLoads(void *pMgmt, SQnodeLoad *pInfo);
|
void qmGetQnodeLoads(void *pMgmt, SQnodeLoad *pInfo);
|
||||||
|
|
||||||
|
void vmCleanExpriedSamples(void *pMgmt);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -409,6 +409,7 @@ SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper) {
|
||||||
.processAlterNodeTypeFp = dmProcessAlterNodeTypeReq,
|
.processAlterNodeTypeFp = dmProcessAlterNodeTypeReq,
|
||||||
.processDropNodeFp = dmProcessDropNodeReq,
|
.processDropNodeFp = dmProcessDropNodeReq,
|
||||||
.sendMonitorReportFp = dmSendMonitorReport,
|
.sendMonitorReportFp = dmSendMonitorReport,
|
||||||
|
.monitorCleanExpiredSamplesFp = dmMonitorCleanExpiredSamples,
|
||||||
.sendAuditRecordFp = auditSendRecordsInBatch,
|
.sendAuditRecordFp = auditSendRecordsInBatch,
|
||||||
.getVnodeLoadsFp = dmGetVnodeLoads,
|
.getVnodeLoadsFp = dmGetVnodeLoads,
|
||||||
.getVnodeLoadsLiteFp = dmGetVnodeLoadsLite,
|
.getVnodeLoadsLiteFp = dmGetVnodeLoadsLite,
|
||||||
|
|
|
@ -33,8 +33,8 @@ static void dmGetMonitorBasicInfoBasic(SDnode *pDnode, SMonBasicInfo *pInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dmGetMonitorDnodeInfo(SDnode *pDnode, SMonDnodeInfo *pInfo) {
|
static void dmGetMonitorDnodeInfo(SDnode *pDnode, SMonDnodeInfo *pInfo) {
|
||||||
//pInfo->uptime = (taosGetTimestampMs() - pDnode->data.rebootTime) / (86400000.0f);
|
// pInfo->uptime = (taosGetTimestampMs() - pDnode->data.rebootTime) / (86400000.0f);
|
||||||
pInfo->uptime = (taosGetTimestampMs() - pDnode->data.rebootTime) /1000.0f;
|
pInfo->uptime = (taosGetTimestampMs() - pDnode->data.rebootTime) / 1000.0f;
|
||||||
pInfo->has_mnode = pDnode->wrappers[MNODE].required;
|
pInfo->has_mnode = pDnode->wrappers[MNODE].required;
|
||||||
pInfo->has_qnode = pDnode->wrappers[QNODE].required;
|
pInfo->has_qnode = pDnode->wrappers[QNODE].required;
|
||||||
pInfo->has_snode = pDnode->wrappers[SNODE].required;
|
pInfo->has_snode = pDnode->wrappers[SNODE].required;
|
||||||
|
@ -52,6 +52,17 @@ static void dmGetDmMonitorInfo(SDnode *pDnode) {
|
||||||
monSetDmInfo(&dmInfo);
|
monSetDmInfo(&dmInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void dmCleanExpriedSamples(SDnode *pDnode) {
|
||||||
|
SMgmtWrapper *pWrapper = &pDnode->wrappers[VNODE];
|
||||||
|
if (dmMarkWrapper(pWrapper) == 0) {
|
||||||
|
if (pWrapper->pMgmt != NULL) {
|
||||||
|
vmCleanExpriedSamples(pWrapper->pMgmt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dmReleaseWrapper(pWrapper);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
static void dmGetDmMonitorInfoBasic(SDnode *pDnode) {
|
static void dmGetDmMonitorInfoBasic(SDnode *pDnode) {
|
||||||
SMonDmInfo dmInfo = {0};
|
SMonDmInfo dmInfo = {0};
|
||||||
dmGetMonitorBasicInfoBasic(pDnode, &dmInfo.basic);
|
dmGetMonitorBasicInfoBasic(pDnode, &dmInfo.basic);
|
||||||
|
@ -123,11 +134,17 @@ void dmSendMonitorReport() {
|
||||||
monGenAndSendReport();
|
monGenAndSendReport();
|
||||||
}
|
}
|
||||||
|
|
||||||
//Todo: put this in seperate file in the future
|
void dmMonitorCleanExpiredSamples() {
|
||||||
void dmSendAuditRecords() {
|
if (!tsEnableMonitor || tsMonitorFqdn[0] == 0 || tsMonitorPort == 0) return;
|
||||||
auditSendRecordsInBatch();
|
dTrace("clean monitor expired samples");
|
||||||
|
|
||||||
|
SDnode *pDnode = dmInstance();
|
||||||
|
(void)dmCleanExpriedSamples(pDnode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Todo: put this in seperate file in the future
|
||||||
|
void dmSendAuditRecords() { auditSendRecordsInBatch(); }
|
||||||
|
|
||||||
void dmGetVnodeLoads(SMonVloadInfo *pInfo) {
|
void dmGetVnodeLoads(SMonVloadInfo *pInfo) {
|
||||||
SDnode *pDnode = dmInstance();
|
SDnode *pDnode = dmInstance();
|
||||||
SMgmtWrapper *pWrapper = &pDnode->wrappers[VNODE];
|
SMgmtWrapper *pWrapper = &pDnode->wrappers[VNODE];
|
||||||
|
|
|
@ -116,6 +116,7 @@ typedef enum {
|
||||||
typedef int32_t (*ProcessCreateNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
|
typedef int32_t (*ProcessCreateNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
|
||||||
typedef int32_t (*ProcessDropNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
|
typedef int32_t (*ProcessDropNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
|
||||||
typedef void (*SendMonitorReportFp)();
|
typedef void (*SendMonitorReportFp)();
|
||||||
|
typedef void (*MonitorCleanExpiredSamplesFp)();
|
||||||
typedef void (*SendAuditRecordsFp)();
|
typedef void (*SendAuditRecordsFp)();
|
||||||
typedef void (*GetVnodeLoadsFp)(SMonVloadInfo *pInfo);
|
typedef void (*GetVnodeLoadsFp)(SMonVloadInfo *pInfo);
|
||||||
typedef void (*GetMnodeLoadsFp)(SMonMloadInfo *pInfo);
|
typedef void (*GetMnodeLoadsFp)(SMonMloadInfo *pInfo);
|
||||||
|
@ -155,6 +156,7 @@ typedef struct {
|
||||||
ProcessAlterNodeTypeFp processAlterNodeTypeFp;
|
ProcessAlterNodeTypeFp processAlterNodeTypeFp;
|
||||||
ProcessDropNodeFp processDropNodeFp;
|
ProcessDropNodeFp processDropNodeFp;
|
||||||
SendMonitorReportFp sendMonitorReportFp;
|
SendMonitorReportFp sendMonitorReportFp;
|
||||||
|
MonitorCleanExpiredSamplesFp monitorCleanExpiredSamplesFp;
|
||||||
SendAuditRecordsFp sendAuditRecordFp;
|
SendAuditRecordsFp sendAuditRecordFp;
|
||||||
GetVnodeLoadsFp getVnodeLoadsFp;
|
GetVnodeLoadsFp getVnodeLoadsFp;
|
||||||
GetVnodeLoadsFp getVnodeLoadsLiteFp;
|
GetVnodeLoadsFp getVnodeLoadsLiteFp;
|
||||||
|
|
|
@ -42,7 +42,7 @@ int32_t mndPerfsInitMeta(SHashObj *hash) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
STableMetaRsp meta = {0};
|
STableMetaRsp meta = {0};
|
||||||
|
|
||||||
tstrncpy(meta.dbFName, TSDB_INFORMATION_SCHEMA_DB, sizeof(meta.dbFName));
|
tstrncpy(meta.dbFName, TSDB_PERFORMANCE_SCHEMA_DB, sizeof(meta.dbFName));
|
||||||
meta.tableType = TSDB_SYSTEM_TABLE;
|
meta.tableType = TSDB_SYSTEM_TABLE;
|
||||||
meta.sversion = 1;
|
meta.sversion = 1;
|
||||||
meta.tversion = 1;
|
meta.tversion = 1;
|
||||||
|
|
|
@ -251,7 +251,7 @@ void mndKillTransImpl(SMnode *pMnode, int32_t transId, const char *pDbName) {
|
||||||
int32_t code = mndKillTrans(pMnode, pTrans);
|
int32_t code = mndKillTrans(pMnode, pTrans);
|
||||||
mndReleaseTrans(pMnode, pTrans);
|
mndReleaseTrans(pMnode, pTrans);
|
||||||
if (code) {
|
if (code) {
|
||||||
mError("failed to kill trans:%d", pTrans->id);
|
mError("failed to kill transId:%d, code:%s", pTrans->id, tstrerror(code));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
mError("failed to acquire trans in Db:%s, transId:%d", pDbName, transId);
|
mError("failed to acquire trans in Db:%s, transId:%d", pDbName, transId);
|
||||||
|
|
|
@ -68,7 +68,11 @@ SSdb *sdbInit(SSdbOpt *pOption) {
|
||||||
void sdbCleanup(SSdb *pSdb) {
|
void sdbCleanup(SSdb *pSdb) {
|
||||||
mInfo("start to cleanup sdb");
|
mInfo("start to cleanup sdb");
|
||||||
|
|
||||||
(void)sdbWriteFile(pSdb, 0);
|
int32_t code = 0;
|
||||||
|
|
||||||
|
if ((code = sdbWriteFile(pSdb, 0)) != 0) {
|
||||||
|
mError("failed to write sdb file since %s", tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
if (pSdb->currDir != NULL) {
|
if (pSdb->currDir != NULL) {
|
||||||
taosMemoryFreeClear(pSdb->currDir);
|
taosMemoryFreeClear(pSdb->currDir);
|
||||||
|
|
|
@ -258,8 +258,11 @@ static int32_t sdbReadFileImp(SSdb *pSdb) {
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
mError("failed to read sdb file:%s head since %s", file, tstrerror(code));
|
mError("failed to read sdb file:%s head since %s", file, tstrerror(code));
|
||||||
taosMemoryFree(pRaw);
|
taosMemoryFree(pRaw);
|
||||||
(void)taosCloseFile(&pFile);
|
int32_t ret = 0;
|
||||||
return -1;
|
if ((ret = taosCloseFile(&pFile)) != 0) {
|
||||||
|
mError("failed to close sdb file:%s since %s", file, tstrerror(ret));
|
||||||
|
}
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t tableVer[SDB_MAX] = {0};
|
int64_t tableVer[SDB_MAX] = {0};
|
||||||
|
@ -361,7 +364,9 @@ static int32_t sdbReadFileImp(SSdb *pSdb) {
|
||||||
pSdb->commitTerm, pSdb->commitConfig);
|
pSdb->commitTerm, pSdb->commitConfig);
|
||||||
|
|
||||||
_OVER:
|
_OVER:
|
||||||
(void)taosCloseFile(&pFile);
|
if ((ret = taosCloseFile(&pFile)) != 0) {
|
||||||
|
mError("failed to close sdb file:%s since %s", file, tstrerror(ret));
|
||||||
|
}
|
||||||
sdbFreeRaw(pRaw);
|
sdbFreeRaw(pRaw);
|
||||||
|
|
||||||
TAOS_RETURN(code);
|
TAOS_RETURN(code);
|
||||||
|
@ -404,8 +409,11 @@ static int32_t sdbWriteFileImp(SSdb *pSdb, int32_t skip_type) {
|
||||||
code = sdbWriteFileHead(pSdb, pFile);
|
code = sdbWriteFileHead(pSdb, pFile);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
mError("failed to write sdb file:%s head since %s", tmpfile, tstrerror(code));
|
mError("failed to write sdb file:%s head since %s", tmpfile, tstrerror(code));
|
||||||
(void)taosCloseFile(&pFile);
|
int32_t ret = 0;
|
||||||
return -1;
|
if ((ret = taosCloseFile(&pFile)) != 0) {
|
||||||
|
mError("failed to close sdb file:%s since %s", tmpfile, tstrerror(ret));
|
||||||
|
}
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = SDB_MAX - 1; i >= 0; --i) {
|
for (int32_t i = SDB_MAX - 1; i >= 0; --i) {
|
||||||
|
@ -613,12 +621,18 @@ static void sdbCloseIter(SSdbIter *pIter) {
|
||||||
if (pIter == NULL) return;
|
if (pIter == NULL) return;
|
||||||
|
|
||||||
if (pIter->file != NULL) {
|
if (pIter->file != NULL) {
|
||||||
(void)taosCloseFile(&pIter->file);
|
int32_t ret = 0;
|
||||||
|
if ((ret = taosCloseFile(&pIter->file)) != 0) {
|
||||||
|
mError("failed to close sdb file since %s", tstrerror(ret));
|
||||||
|
}
|
||||||
pIter->file = NULL;
|
pIter->file = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pIter->name != NULL) {
|
if (pIter->name != NULL) {
|
||||||
(void)taosRemoveFile(pIter->name);
|
int32_t ret = 0;
|
||||||
|
if ((ret = taosRemoveFile(pIter->name)) != 0) {
|
||||||
|
mError("failed to remove sdb file:%s since %s", pIter->name, tstrerror(ret));
|
||||||
|
}
|
||||||
taosMemoryFree(pIter->name);
|
taosMemoryFree(pIter->name);
|
||||||
pIter->name = NULL;
|
pIter->name = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -174,12 +174,12 @@ static int32_t sdbInsertRow(SSdb *pSdb, SHashObj *hash, SSdbRaw *pRaw, SSdbRow *
|
||||||
if (insertFp != NULL) {
|
if (insertFp != NULL) {
|
||||||
code = (*insertFp)(pSdb, pRow->pObj);
|
code = (*insertFp)(pSdb, pRow->pObj);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
if (terrno == 0) terrno = TSDB_CODE_MND_TRANS_UNKNOW_ERROR;
|
if (taosHashRemove(hash, pRow->pObj, keySize) != 0) {
|
||||||
code = terrno;
|
mError("failed to remove row from hash");
|
||||||
(void)taosHashRemove(hash, pRow->pObj, keySize);
|
}
|
||||||
sdbFreeRow(pSdb, pRow, false);
|
sdbFreeRow(pSdb, pRow, false);
|
||||||
terrno = code;
|
|
||||||
sdbUnLock(pSdb, type);
|
sdbUnLock(pSdb, type);
|
||||||
|
terrno = code;
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1129,7 +1129,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
||||||
|
|
||||||
SStreamTask* pTask = NULL;
|
SStreamTask* pTask = NULL;
|
||||||
code = streamMetaAcquireTask(pMeta, req.streamId, req.taskId, &pTask);
|
code = streamMetaAcquireTask(pMeta, req.streamId, req.taskId, &pTask);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL || code != 0) {
|
||||||
tqError("vgId:%d failed to find s-task:0x%x, ignore checkpoint msg. checkpointId:%" PRId64
|
tqError("vgId:%d failed to find s-task:0x%x, ignore checkpoint msg. checkpointId:%" PRId64
|
||||||
" transId:%d it may have been destroyed",
|
" transId:%d it may have been destroyed",
|
||||||
vgId, req.taskId, req.checkpointId, req.transId);
|
vgId, req.taskId, req.checkpointId, req.transId);
|
||||||
|
|
|
@ -410,7 +410,7 @@ int32_t tqStreamTaskProcessRetrieveReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
|
||||||
if (code) {
|
if (code) {
|
||||||
tqError("vgId:%d failed to decode retrieve msg, quit handling it", pMeta->vgId);
|
tqError("vgId:%d failed to decode retrieve msg, discard it", pMeta->vgId);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -420,9 +420,16 @@ int32_t tqStreamTaskProcessRetrieveReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||||
tqError("vgId:%d process retrieve req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId,
|
tqError("vgId:%d process retrieve req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId,
|
||||||
req.dstTaskId);
|
req.dstTaskId);
|
||||||
tCleanupStreamRetrieveReq(&req);
|
tCleanupStreamRetrieveReq(&req);
|
||||||
return -1;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// enqueue
|
||||||
|
tqDebug("s-task:%s (vgId:%d level:%d) recv retrieve req from task:0x%x(vgId:%d),QID:0x%" PRIx64, pTask->id.idStr,
|
||||||
|
pTask->pMeta->vgId, pTask->info.taskLevel, req.srcTaskId, req.srcNodeId, req.reqId);
|
||||||
|
|
||||||
|
// if task is in ck status, set current ck failed
|
||||||
|
streamTaskSetCheckpointFailed(pTask);
|
||||||
|
|
||||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
code = streamProcessRetrieveReq(pTask, &req);
|
code = streamProcessRetrieveReq(pTask, &req);
|
||||||
} else {
|
} else {
|
||||||
|
@ -431,14 +438,19 @@ int32_t tqStreamTaskProcessRetrieveReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||||
code = streamTaskBroadcastRetrieveReq(pTask, &req);
|
code = streamTaskBroadcastRetrieveReq(pTask, &req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (code != TSDB_CODE_SUCCESS) { // return error not send rsp manually
|
||||||
|
tqError("s-task:0x%x vgId:%d failed to process retrieve request from 0x%x, code:%s", req.dstTaskId, req.dstNodeId,
|
||||||
|
req.srcTaskId, tstrerror(code));
|
||||||
|
} else { // send rsp manually only on success.
|
||||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||||
streamTaskSendRetrieveRsp(&req, &rsp);
|
streamTaskSendRetrieveRsp(&req, &rsp);
|
||||||
|
}
|
||||||
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
tCleanupStreamRetrieveReq(&req);
|
tCleanupStreamRetrieveReq(&req);
|
||||||
|
|
||||||
// always return success, to disable the auto rsp
|
// always return success, to disable the auto rsp
|
||||||
return TSDB_CODE_SUCCESS;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||||
|
|
|
@ -211,7 +211,7 @@ static int32_t updateBlockSMAInfo(STSchema* pSchema, SBlockLoadSuppInfo* pSupInf
|
||||||
while (i < pSchema->numOfCols && j < pSupInfo->numOfCols) {
|
while (i < pSchema->numOfCols && j < pSupInfo->numOfCols) {
|
||||||
STColumn* pTCol = &pSchema->columns[i];
|
STColumn* pTCol = &pSchema->columns[i];
|
||||||
if (pTCol->colId == pSupInfo->colId[j]) {
|
if (pTCol->colId == pSupInfo->colId[j]) {
|
||||||
if (!IS_BSMA_ON(pTCol)) {
|
if (!IS_BSMA_ON(pTCol) && (PRIMARYKEY_TIMESTAMP_COL_ID != pTCol->colId)) {
|
||||||
pSupInfo->smaValid = false;
|
pSupInfo->smaValid = false;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -1743,8 +1743,6 @@ static int32_t initRowMergeIfNeeded(STsdbReader* pReader, int64_t uid) {
|
||||||
if (ps == NULL) {
|
if (ps == NULL) {
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = tsdbRowMergerInit(pMerger, ps);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
|
|
|
@ -253,11 +253,6 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int
|
||||||
version = (submitTbData.flags >> 8) & 0xff;
|
version = (submitTbData.flags >> 8) & 0xff;
|
||||||
submitTbData.flags = submitTbData.flags & 0xff;
|
submitTbData.flags = submitTbData.flags & 0xff;
|
||||||
|
|
||||||
if (submitTbData.flags & SUBMIT_REQ_FROM_FILE) {
|
|
||||||
code = grantCheck(TSDB_GRANT_CSV);
|
|
||||||
TSDB_CHECK_CODE(code, lino, _exit);
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t uid;
|
int64_t uid;
|
||||||
if (submitTbData.flags & SUBMIT_REQ_AUTO_CREATE_TABLE) {
|
if (submitTbData.flags & SUBMIT_REQ_AUTO_CREATE_TABLE) {
|
||||||
code = vnodePreprocessCreateTableReq(pVnode, pCoder, btimeMs, &uid);
|
code = vnodePreprocessCreateTableReq(pVnode, pCoder, btimeMs, &uid);
|
||||||
|
|
|
@ -51,7 +51,7 @@ typedef struct SAggOperatorInfo {
|
||||||
} SAggOperatorInfo;
|
} SAggOperatorInfo;
|
||||||
|
|
||||||
static void destroyAggOperatorInfo(void* param);
|
static void destroyAggOperatorInfo(void* param);
|
||||||
static void setExecutionContext(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId);
|
static int32_t setExecutionContext(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId);
|
||||||
|
|
||||||
static int32_t createDataBlockForEmptyInput(SOperatorInfo* pOperator, SSDataBlock** ppBlock);
|
static int32_t createDataBlockForEmptyInput(SOperatorInfo* pOperator, SSDataBlock** ppBlock);
|
||||||
static void destroyDataBlockForEmptyInput(bool blockAllocated, SSDataBlock** ppBlock);
|
static void destroyDataBlockForEmptyInput(bool blockAllocated, SSDataBlock** ppBlock);
|
||||||
|
@ -63,7 +63,7 @@ static int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, in
|
||||||
|
|
||||||
static int32_t addNewResultRowBuf(SResultRow* pWindowRes, SDiskbasedBuf* pResultBuf, uint32_t size);
|
static int32_t addNewResultRowBuf(SResultRow* pWindowRes, SDiskbasedBuf* pResultBuf, uint32_t size);
|
||||||
|
|
||||||
static void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId);
|
static int32_t doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId);
|
||||||
|
|
||||||
static void functionCtxSave(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus);
|
static void functionCtxSave(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus);
|
||||||
static void functionCtxRestore(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus);
|
static void functionCtxRestore(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus);
|
||||||
|
@ -184,7 +184,8 @@ static bool nextGroupedResult(SOperatorInfo* pOperator) {
|
||||||
if (pBlock) {
|
if (pBlock) {
|
||||||
pAggInfo->pNewGroupBlock = NULL;
|
pAggInfo->pNewGroupBlock = NULL;
|
||||||
tSimpleHashClear(pAggInfo->aggSup.pResultRowHashTable);
|
tSimpleHashClear(pAggInfo->aggSup.pResultRowHashTable);
|
||||||
setExecutionContext(pOperator, pOperator->exprSupp.numOfExprs, pBlock->info.id.groupId);
|
code = setExecutionContext(pOperator, pOperator->exprSupp.numOfExprs, pBlock->info.id.groupId);
|
||||||
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
code = setInputDataBlock(pSup, pBlock, order, pBlock->info.scanFlag, true);
|
code = setInputDataBlock(pSup, pBlock, order, pBlock->info.scanFlag, true);
|
||||||
QUERY_CHECK_CODE(code, lino, _end);
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
|
|
||||||
|
@ -225,12 +226,19 @@ static bool nextGroupedResult(SOperatorInfo* pOperator) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// the pDataBlock are always the same one, no need to call this again
|
// the pDataBlock are always the same one, no need to call this again
|
||||||
setExecutionContext(pOperator, pOperator->exprSupp.numOfExprs, pBlock->info.id.groupId);
|
code = setExecutionContext(pOperator, pOperator->exprSupp.numOfExprs, pBlock->info.id.groupId);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
destroyDataBlockForEmptyInput(blockAllocated, &pBlock);
|
||||||
|
T_LONG_JMP(pTaskInfo->env, code);
|
||||||
|
}
|
||||||
code = setInputDataBlock(pSup, pBlock, order, pBlock->info.scanFlag, true);
|
code = setInputDataBlock(pSup, pBlock, order, pBlock->info.scanFlag, true);
|
||||||
QUERY_CHECK_CODE(code, lino, _end);
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
destroyDataBlockForEmptyInput(blockAllocated, &pBlock);
|
||||||
|
T_LONG_JMP(pTaskInfo->env, code);
|
||||||
|
}
|
||||||
|
|
||||||
code = doAggregateImpl(pOperator, pSup->pCtx);
|
code = doAggregateImpl(pOperator, pSup->pCtx);
|
||||||
if (code != 0) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
destroyDataBlockForEmptyInput(blockAllocated, &pBlock);
|
destroyDataBlockForEmptyInput(blockAllocated, &pBlock);
|
||||||
T_LONG_JMP(pTaskInfo->env, code);
|
T_LONG_JMP(pTaskInfo->env, code);
|
||||||
}
|
}
|
||||||
|
@ -427,20 +435,24 @@ void destroyDataBlockForEmptyInput(bool blockAllocated, SSDataBlock** ppBlock) {
|
||||||
*ppBlock = NULL;
|
*ppBlock = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setExecutionContext(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId) {
|
int32_t setExecutionContext(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId) {
|
||||||
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
SAggOperatorInfo* pAggInfo = pOperator->info;
|
SAggOperatorInfo* pAggInfo = pOperator->info;
|
||||||
if (pAggInfo->groupId != UINT64_MAX && pAggInfo->groupId == groupId) {
|
if (pAggInfo->groupId != UINT64_MAX && pAggInfo->groupId == groupId) {
|
||||||
return;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
doSetTableGroupOutputBuf(pOperator, numOfOutput, groupId);
|
code = doSetTableGroupOutputBuf(pOperator, numOfOutput, groupId);
|
||||||
|
|
||||||
// record the current active group id
|
// record the current active group id
|
||||||
pAggInfo->groupId = groupId;
|
pAggInfo->groupId = groupId;
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId) {
|
int32_t doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId) {
|
||||||
// for simple group by query without interval, all the tables belong to one group result.
|
// for simple group by query without interval, all the tables belong to one group result.
|
||||||
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
int32_t lino = 0;
|
||||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||||
SAggOperatorInfo* pAggInfo = pOperator->info;
|
SAggOperatorInfo* pAggInfo = pOperator->info;
|
||||||
|
|
||||||
|
@ -452,23 +464,27 @@ void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uin
|
||||||
doSetResultOutBufByKey(pAggInfo->aggSup.pResultBuf, pResultRowInfo, (char*)&groupId, sizeof(groupId), true,
|
doSetResultOutBufByKey(pAggInfo->aggSup.pResultBuf, pResultRowInfo, (char*)&groupId, sizeof(groupId), true,
|
||||||
groupId, pTaskInfo, false, &pAggInfo->aggSup, true);
|
groupId, pTaskInfo, false, &pAggInfo->aggSup, true);
|
||||||
if (pResultRow == NULL || pTaskInfo->code != 0) {
|
if (pResultRow == NULL || pTaskInfo->code != 0) {
|
||||||
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
|
code = pTaskInfo->code;
|
||||||
|
lino = __LINE__;
|
||||||
|
goto _end;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* not assign result buffer yet, add new result buffer
|
* not assign result buffer yet, add new result buffer
|
||||||
* all group belong to one result set, and each group result has different group id so set the id to be one
|
* all group belong to one result set, and each group result has different group id so set the id to be one
|
||||||
*/
|
*/
|
||||||
if (pResultRow->pageId == -1) {
|
if (pResultRow->pageId == -1) {
|
||||||
int32_t ret = addNewResultRowBuf(pResultRow, pAggInfo->aggSup.pResultBuf, pAggInfo->binfo.pRes->info.rowSize);
|
code = addNewResultRowBuf(pResultRow, pAggInfo->aggSup.pResultBuf, pAggInfo->binfo.pRes->info.rowSize);
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
T_LONG_JMP(pTaskInfo->env, terrno);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t ret = setResultRowInitCtx(pResultRow, pCtx, numOfOutput, rowEntryInfoOffset);
|
code = setResultRowInitCtx(pResultRow, pCtx, numOfOutput, rowEntryInfoOffset);
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
T_LONG_JMP(pTaskInfo->env, ret);
|
|
||||||
|
_end:
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||||
}
|
}
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// a new buffer page for each table. Needs to opt this design
|
// a new buffer page for each table. Needs to opt this design
|
||||||
|
|
|
@ -136,8 +136,7 @@ static int32_t sendSubmitRequest(SDataInserterHandle* pInserter, void* pMsg, int
|
||||||
pMsgSendInfo->msgType = TDMT_VND_SUBMIT;
|
pMsgSendInfo->msgType = TDMT_VND_SUBMIT;
|
||||||
pMsgSendInfo->fp = inserterCallback;
|
pMsgSendInfo->fp = inserterCallback;
|
||||||
|
|
||||||
int64_t transporterId = 0;
|
return asyncSendMsgToServer(pTransporter, pEpset, NULL, pMsgSendInfo);
|
||||||
return asyncSendMsgToServer(pTransporter, pEpset, &transporterId, pMsgSendInfo);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t submitReqToMsg(int32_t vgId, SSubmitReq2* pReq, void** pData, int32_t* pLen) {
|
static int32_t submitReqToMsg(int32_t vgId, SSubmitReq2* pReq, void** pData, int32_t* pLen) {
|
||||||
|
@ -235,7 +234,8 @@ int32_t buildSubmitReqFromBlock(SDataInserterHandle* pInserter, SSubmitReq2** pp
|
||||||
case TSDB_DATA_TYPE_VARBINARY:
|
case TSDB_DATA_TYPE_VARBINARY:
|
||||||
case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY
|
case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY
|
||||||
if (pColInfoData->info.type != pCol->type) {
|
if (pColInfoData->info.type != pCol->type) {
|
||||||
qError("column:%d type:%d in block dismatch with schema col:%d type:%d", colIdx, pColInfoData->info.type, k, pCol->type);
|
qError("column:%d type:%d in block dismatch with schema col:%d type:%d", colIdx, pColInfoData->info.type, k,
|
||||||
|
pCol->type);
|
||||||
terrno = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
|
terrno = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
|
||||||
goto _end;
|
goto _end;
|
||||||
}
|
}
|
||||||
|
@ -462,7 +462,8 @@ int32_t createDataInserter(SDataSinkManager* pManager, const SDataSinkNode* pDat
|
||||||
inserter->explain = pInserterNode->explain;
|
inserter->explain = pInserterNode->explain;
|
||||||
|
|
||||||
int64_t suid = 0;
|
int64_t suid = 0;
|
||||||
int32_t code = pManager->pAPI->metaFn.getTableSchema(inserter->pParam->readHandle->vnode, pInserterNode->tableId, &inserter->pSchema, &suid);
|
int32_t code = pManager->pAPI->metaFn.getTableSchema(inserter->pParam->readHandle->vnode, pInserterNode->tableId,
|
||||||
|
&inserter->pSchema, &suid);
|
||||||
if (code) {
|
if (code) {
|
||||||
terrno = code;
|
terrno = code;
|
||||||
goto _return;
|
goto _return;
|
||||||
|
|
|
@ -464,7 +464,10 @@ _error:
|
||||||
|
|
||||||
void destroyExchangeOperatorInfo(void* param) {
|
void destroyExchangeOperatorInfo(void* param) {
|
||||||
SExchangeInfo* pExInfo = (SExchangeInfo*)param;
|
SExchangeInfo* pExInfo = (SExchangeInfo*)param;
|
||||||
(void)taosRemoveRef(exchangeObjRefPool, pExInfo->self);
|
int32_t code = taosRemoveRef(exchangeObjRefPool, pExInfo->self);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void freeBlock(void* pParam) {
|
void freeBlock(void* pParam) {
|
||||||
|
@ -505,7 +508,10 @@ void doDestroyExchangeOperatorInfo(void* param) {
|
||||||
blockDataDestroy(pExInfo->pDummyBlock);
|
blockDataDestroy(pExInfo->pDummyBlock);
|
||||||
tSimpleHashCleanup(pExInfo->pHashSources);
|
tSimpleHashCleanup(pExInfo->pHashSources);
|
||||||
|
|
||||||
(void)tsem_destroy(&pExInfo->ready);
|
int32_t code = tsem_destroy(&pExInfo->ready);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||||
|
}
|
||||||
taosMemoryFreeClear(pExInfo->pTaskId);
|
taosMemoryFreeClear(pExInfo->pTaskId);
|
||||||
|
|
||||||
taosMemoryFreeClear(param);
|
taosMemoryFreeClear(param);
|
||||||
|
@ -561,9 +567,13 @@ int32_t loadRemoteDataCallback(void* param, SDataBuf* pMsg, int32_t code) {
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
code = TAOS_SYSTEM_ERROR(code);
|
code = TAOS_SYSTEM_ERROR(code);
|
||||||
qError("failed to invoke post when fetch rsp is ready, code:%s, %p", tstrerror(code), pExchangeInfo);
|
qError("failed to invoke post when fetch rsp is ready, code:%s, %p", tstrerror(code), pExchangeInfo);
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
(void)taosReleaseRef(exchangeObjRefPool, pWrapper->exchangeId);
|
code = taosReleaseRef(exchangeObjRefPool, pWrapper->exchangeId);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||||
|
}
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1190,7 +1200,14 @@ static int32_t exchangeWait(SOperatorInfo* pOperator, SExchangeInfo* pExchangeIn
|
||||||
return pTask->code;
|
return pTask->code;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(void)tsem_wait(&pExchangeInfo->ready);
|
|
||||||
|
code = tsem_wait(&pExchangeInfo->ready);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||||
|
pTask->code = code;
|
||||||
|
return pTask->code;
|
||||||
|
}
|
||||||
|
|
||||||
if (pTask->pWorkerCb) {
|
if (pTask->pWorkerCb) {
|
||||||
code = pTask->pWorkerCb->afterRecoverFromBlocking(pTask->pWorkerCb->pPool);
|
code = pTask->pWorkerCb->afterRecoverFromBlocking(pTask->pWorkerCb->pPool);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
|
|
@ -601,7 +601,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
|
||||||
SExecTaskInfo** pTask = (SExecTaskInfo**)pTaskInfo;
|
SExecTaskInfo** pTask = (SExecTaskInfo**)pTaskInfo;
|
||||||
(void)taosThreadOnce(&initPoolOnce, initRefPool);
|
(void)taosThreadOnce(&initPoolOnce, initRefPool);
|
||||||
|
|
||||||
qDebug("start to create task, TID:0x%" PRIx64 "QID:0x%" PRIx64 ", vgId:%d", taskId, pSubplan->id.queryId, vgId);
|
qDebug("start to create task, TID:0x%" PRIx64 " QID:0x%" PRIx64 ", vgId:%d", taskId, pSubplan->id.queryId, vgId);
|
||||||
|
|
||||||
int32_t code = createExecTaskInfo(pSubplan, pTask, readHandle, taskId, vgId, sql, model);
|
int32_t code = createExecTaskInfo(pSubplan, pTask, readHandle, taskId, vgId, sql, model);
|
||||||
if (code != TSDB_CODE_SUCCESS || NULL == *pTask) {
|
if (code != TSDB_CODE_SUCCESS || NULL == *pTask) {
|
||||||
|
@ -904,8 +904,14 @@ void qStopTaskOperators(SExecTaskInfo* pTaskInfo) {
|
||||||
}
|
}
|
||||||
SExchangeInfo* pExchangeInfo = taosAcquireRef(exchangeObjRefPool, pStop->refId);
|
SExchangeInfo* pExchangeInfo = taosAcquireRef(exchangeObjRefPool, pStop->refId);
|
||||||
if (pExchangeInfo) {
|
if (pExchangeInfo) {
|
||||||
(void)tsem_post(&pExchangeInfo->ready);
|
int32_t code = tsem_post(&pExchangeInfo->ready);
|
||||||
(void)taosReleaseRef(exchangeObjRefPool, pStop->refId);
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||||
|
}
|
||||||
|
code = taosReleaseRef(exchangeObjRefPool, pStop->refId);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -288,7 +288,7 @@ void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst) {
|
||||||
memcpy(p, "TID:0x", offset);
|
memcpy(p, "TID:0x", offset);
|
||||||
offset += tintToHex(taskId, &p[offset]);
|
offset += tintToHex(taskId, &p[offset]);
|
||||||
|
|
||||||
memcpy(&p[offset], "QID:0x", 7);
|
memcpy(&p[offset], " QID:0x", 7);
|
||||||
offset += 7;
|
offset += 7;
|
||||||
offset += tintToHex(queryId, &p[offset]);
|
offset += tintToHex(queryId, &p[offset]);
|
||||||
|
|
||||||
|
|
|
@ -671,7 +671,8 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
|
||||||
STableCachedVal* pVal = taosLRUCacheValue(pCache->pTableMetaEntryCache, h);
|
STableCachedVal* pVal = taosLRUCacheValue(pCache->pTableMetaEntryCache, h);
|
||||||
val = *pVal;
|
val = *pVal;
|
||||||
|
|
||||||
(void)taosLRUCacheRelease(pCache->pTableMetaEntryCache, h, false);
|
bool bRes = taosLRUCacheRelease(pCache->pTableMetaEntryCache, h, false);
|
||||||
|
qTrace("release LRU cache, res %d", bRes);
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("retrieve table meta from cache:%" PRIu64 ", hit:%" PRIu64 " miss:%" PRIu64 ", %s", pCache->metaFetch,
|
qDebug("retrieve table meta from cache:%" PRIu64 ", hit:%" PRIu64 " miss:%" PRIu64 ", %s", pCache->metaFetch,
|
||||||
|
@ -893,7 +894,10 @@ void markGroupProcessed(STableScanInfo* pInfo, uint64_t groupId) {
|
||||||
if (pInfo->base.pTableListInfo->groupOffset) {
|
if (pInfo->base.pTableListInfo->groupOffset) {
|
||||||
pInfo->countState = TABLE_COUNT_STATE_PROCESSED;
|
pInfo->countState = TABLE_COUNT_STATE_PROCESSED;
|
||||||
} else {
|
} else {
|
||||||
(void)taosHashRemove(pInfo->base.pTableListInfo->remainGroups, &groupId, sizeof(groupId));
|
int32_t code = taosHashRemove(pInfo->base.pTableListInfo->remainGroups, &groupId, sizeof(groupId));
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4529,6 +4533,7 @@ static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRe
|
||||||
SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId);
|
SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId);
|
||||||
QUERY_CHECK_NULL(pDst, code, lino, _end, terrno);
|
QUERY_CHECK_NULL(pDst, code, lino, _end, terrno);
|
||||||
code = tagScanFillOneCellWithTag(pOperator, pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode);
|
code = tagScanFillOneCellWithTag(pOperator, pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode);
|
||||||
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -4540,6 +4545,7 @@ static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRe
|
||||||
SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId);
|
SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId);
|
||||||
QUERY_CHECK_NULL(pDst, code, lino, _end, terrno);
|
QUERY_CHECK_NULL(pDst, code, lino, _end, terrno);
|
||||||
code = tagScanFillOneCellWithTag(pOperator, pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode);
|
code = tagScanFillOneCellWithTag(pOperator, pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode);
|
||||||
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -718,6 +718,11 @@ static void doBuildPullDataBlock(SArray* array, int32_t* pIndex, SSDataBlock* pB
|
||||||
SColumnInfoData* pGroupId = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
SColumnInfoData* pGroupId = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
||||||
SColumnInfoData* pCalStartTs = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
|
SColumnInfoData* pCalStartTs = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
|
||||||
SColumnInfoData* pCalEndTs = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
SColumnInfoData* pCalEndTs = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
||||||
|
SColumnInfoData* pTbName = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
|
||||||
|
SColumnInfoData* pPrimaryKey = NULL;
|
||||||
|
if (taosArrayGetSize(pBlock->pDataBlock) > PRIMARY_KEY_COLUMN_INDEX) {
|
||||||
|
pPrimaryKey = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, PRIMARY_KEY_COLUMN_INDEX);
|
||||||
|
}
|
||||||
for (; (*pIndex) < size; (*pIndex)++) {
|
for (; (*pIndex) < size; (*pIndex)++) {
|
||||||
SPullWindowInfo* pWin = taosArrayGet(array, (*pIndex));
|
SPullWindowInfo* pWin = taosArrayGet(array, (*pIndex));
|
||||||
code = colDataSetVal(pStartTs, pBlock->info.rows, (const char*)&pWin->window.skey, false);
|
code = colDataSetVal(pStartTs, pBlock->info.rows, (const char*)&pWin->window.skey, false);
|
||||||
|
@ -735,6 +740,11 @@ static void doBuildPullDataBlock(SArray* array, int32_t* pIndex, SSDataBlock* pB
|
||||||
code = colDataSetVal(pCalEndTs, pBlock->info.rows, (const char*)&pWin->calWin.ekey, false);
|
code = colDataSetVal(pCalEndTs, pBlock->info.rows, (const char*)&pWin->calWin.ekey, false);
|
||||||
QUERY_CHECK_CODE(code, lino, _end);
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
|
|
||||||
|
colDataSetNULL(pTbName, pBlock->info.rows);
|
||||||
|
if (pPrimaryKey != NULL) {
|
||||||
|
colDataSetNULL(pPrimaryKey, pBlock->info.rows);
|
||||||
|
}
|
||||||
|
|
||||||
pBlock->info.rows++;
|
pBlock->info.rows++;
|
||||||
}
|
}
|
||||||
if ((*pIndex) == size) {
|
if ((*pIndex) == size) {
|
||||||
|
|
|
@ -1575,8 +1575,8 @@ static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) {
|
||||||
|
|
||||||
SMetaReader mr = {0};
|
SMetaReader mr = {0};
|
||||||
pAPI->metaReaderFn.initReader(&mr, pInfo->readHandle.vnode, META_READER_LOCK, &pAPI->metaFn);
|
pAPI->metaReaderFn.initReader(&mr, pInfo->readHandle.vnode, META_READER_LOCK, &pAPI->metaFn);
|
||||||
code = doSetUserTableMetaInfo(&pAPI->metaReaderFn, &pAPI->metaFn, pInfo->readHandle.vnode, &mr, *uid, dbname, vgId, p,
|
code = doSetUserTableMetaInfo(&pAPI->metaReaderFn, &pAPI->metaFn, pInfo->readHandle.vnode, &mr, *uid, dbname, vgId,
|
||||||
numOfRows, GET_TASKID(pTaskInfo));
|
p, numOfRows, GET_TASKID(pTaskInfo));
|
||||||
|
|
||||||
pAPI->metaReaderFn.clearReader(&mr);
|
pAPI->metaReaderFn.clearReader(&mr);
|
||||||
QUERY_CHECK_CODE(code, lino, _end);
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
|
@ -2170,15 +2170,19 @@ static SSDataBlock* sysTableScanFromMNode(SOperatorInfo* pOperator, SSysTableSca
|
||||||
pMsgSendInfo->fp = loadSysTableCallback;
|
pMsgSendInfo->fp = loadSysTableCallback;
|
||||||
pMsgSendInfo->requestId = pTaskInfo->id.queryId;
|
pMsgSendInfo->requestId = pTaskInfo->id.queryId;
|
||||||
|
|
||||||
int64_t transporterId = 0;
|
code = asyncSendMsgToServer(pInfo->readHandle.pMsgCb->clientRpc, &pInfo->epSet, NULL, pMsgSendInfo);
|
||||||
code = asyncSendMsgToServer(pInfo->readHandle.pMsgCb->clientRpc, &pInfo->epSet, &transporterId, pMsgSendInfo);
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||||
pTaskInfo->code = code;
|
pTaskInfo->code = code;
|
||||||
T_LONG_JMP(pTaskInfo->env, code);
|
T_LONG_JMP(pTaskInfo->env, code);
|
||||||
}
|
}
|
||||||
|
|
||||||
(void)tsem_wait(&pInfo->ready);
|
code = tsem_wait(&pInfo->ready);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||||
|
pTaskInfo->code = code;
|
||||||
|
T_LONG_JMP(pTaskInfo->env, code);
|
||||||
|
}
|
||||||
|
|
||||||
if (pTaskInfo->code) {
|
if (pTaskInfo->code) {
|
||||||
qError("%s load meta data from mnode failed, totalRows:%" PRIu64 ", code:%s", GET_TASKID(pTaskInfo),
|
qError("%s load meta data from mnode failed, totalRows:%" PRIu64 ", code:%s", GET_TASKID(pTaskInfo),
|
||||||
|
@ -2328,7 +2332,10 @@ void extractTbnameSlotId(SSysTableScanInfo* pInfo, const SScanPhysiNode* pScanNo
|
||||||
|
|
||||||
void destroySysScanOperator(void* param) {
|
void destroySysScanOperator(void* param) {
|
||||||
SSysTableScanInfo* pInfo = (SSysTableScanInfo*)param;
|
SSysTableScanInfo* pInfo = (SSysTableScanInfo*)param;
|
||||||
(void)tsem_destroy(&pInfo->ready);
|
int32_t code = tsem_destroy(&pInfo->ready);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||||
|
}
|
||||||
blockDataDestroy(pInfo->pRes);
|
blockDataDestroy(pInfo->pRes);
|
||||||
|
|
||||||
if (pInfo->name.type == TSDB_TABLE_NAME_T) {
|
if (pInfo->name.type == TSDB_TABLE_NAME_T) {
|
||||||
|
@ -2384,7 +2391,10 @@ int32_t loadSysTableCallback(void* param, SDataBuf* pMsg, int32_t code) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
(void)tsem_post(&pScanResInfo->ready);
|
int32_t res = tsem_post(&pScanResInfo->ready);
|
||||||
|
if (res != TSDB_CODE_SUCCESS) {
|
||||||
|
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(res));
|
||||||
|
}
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1231,7 +1231,7 @@ void destroyIntervalOperatorInfo(void* param) {
|
||||||
cleanupAggSup(&pInfo->aggSup);
|
cleanupAggSup(&pInfo->aggSup);
|
||||||
cleanupExprSupp(&pInfo->scalarSupp);
|
cleanupExprSupp(&pInfo->scalarSupp);
|
||||||
|
|
||||||
(void)tdListFree(pInfo->binfo.resultRowInfo.openWindow);
|
pInfo->binfo.resultRowInfo.openWindow = tdListFree(pInfo->binfo.resultRowInfo.openWindow);
|
||||||
|
|
||||||
taosArrayDestroy(pInfo->pInterpCols);
|
taosArrayDestroy(pInfo->pInterpCols);
|
||||||
pInfo->pInterpCols = NULL;
|
pInfo->pInterpCols = NULL;
|
||||||
|
@ -2132,7 +2132,7 @@ typedef struct SGroupTimeWindow {
|
||||||
|
|
||||||
void destroyMergeIntervalOperatorInfo(void* param) {
|
void destroyMergeIntervalOperatorInfo(void* param) {
|
||||||
SMergeIntervalAggOperatorInfo* miaInfo = (SMergeIntervalAggOperatorInfo*)param;
|
SMergeIntervalAggOperatorInfo* miaInfo = (SMergeIntervalAggOperatorInfo*)param;
|
||||||
(void)tdListFree(miaInfo->groupIntervals);
|
miaInfo->groupIntervals = tdListFree(miaInfo->groupIntervals);
|
||||||
destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo);
|
destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo);
|
||||||
|
|
||||||
taosMemoryFreeClear(param);
|
taosMemoryFreeClear(param);
|
||||||
|
@ -2162,7 +2162,8 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t
|
||||||
|
|
||||||
STimeWindow* prevWin = &prevGrpWin->window;
|
STimeWindow* prevWin = &prevGrpWin->window;
|
||||||
if ((ascScan && newWin->skey > prevWin->ekey) || ((!ascScan) && newWin->skey < prevWin->ekey)) {
|
if ((ascScan && newWin->skey > prevWin->ekey) || ((!ascScan) && newWin->skey < prevWin->ekey)) {
|
||||||
(void)tdListPopNode(miaInfo->groupIntervals, listNode);
|
SListNode* tmp = tdListPopNode(miaInfo->groupIntervals, listNode);
|
||||||
|
taosMemoryFreeClear(tmp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -678,7 +678,7 @@ static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t
|
||||||
|
|
||||||
// set result type
|
// set result type
|
||||||
if (numOfParams > 2) {
|
if (numOfParams > 2) {
|
||||||
pFunc->node.resType = (SDataType){.bytes = 512, .type = TSDB_DATA_TYPE_VARCHAR};
|
pFunc->node.resType = (SDataType){.bytes = 3200, .type = TSDB_DATA_TYPE_VARCHAR};
|
||||||
} else {
|
} else {
|
||||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
|
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
|
||||||
}
|
}
|
||||||
|
|
|
@ -2105,7 +2105,8 @@ int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
||||||
tMemBucket* pMemBucket = ppInfo->pMemBucket;
|
tMemBucket* pMemBucket = ppInfo->pMemBucket;
|
||||||
if (pMemBucket != NULL && pMemBucket->total > 0) { // check for null
|
if (pMemBucket != NULL && pMemBucket->total > 0) { // check for null
|
||||||
if (pCtx->numOfParams > 2) {
|
if (pCtx->numOfParams > 2) {
|
||||||
char buf[512] = {0};
|
char buf[3200] = {0};
|
||||||
|
// max length of double num is 317, e.g. use %.6lf to print -1.0e+308, consider the comma and bracket, 3200 is enough.
|
||||||
size_t len = 1;
|
size_t len = 1;
|
||||||
|
|
||||||
varDataVal(buf)[0] = '[';
|
varDataVal(buf)[0] = '[';
|
||||||
|
@ -6008,6 +6009,7 @@ int32_t modeFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
|
||||||
pInfo->buf = taosMemoryMalloc(pInfo->colBytes);
|
pInfo->buf = taosMemoryMalloc(pInfo->colBytes);
|
||||||
if (NULL == pInfo->buf) {
|
if (NULL == pInfo->buf) {
|
||||||
taosHashCleanup(pInfo->pHash);
|
taosHashCleanup(pInfo->pHash);
|
||||||
|
pInfo->pHash = NULL;
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6016,6 +6018,7 @@ int32_t modeFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
|
||||||
|
|
||||||
static void modeFunctionCleanup(SModeInfo * pInfo) {
|
static void modeFunctionCleanup(SModeInfo * pInfo) {
|
||||||
taosHashCleanup(pInfo->pHash);
|
taosHashCleanup(pInfo->pHash);
|
||||||
|
pInfo->pHash = NULL;
|
||||||
taosMemoryFreeClear(pInfo->buf);
|
taosMemoryFreeClear(pInfo->buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
|
|
||||||
#ifndef TAOS_METRIC_FORMATTER_I_H
|
#ifndef TAOS_METRIC_FORMATTER_I_H
|
||||||
#define TAOS_METRIC_FORMATTER_I_H
|
#define TAOS_METRIC_FORMATTER_I_H
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
// Private
|
// Private
|
||||||
#include "taos_metric_formatter_t.h"
|
#include "taos_metric_formatter_t.h"
|
||||||
|
@ -57,8 +58,8 @@ int taos_metric_formatter_load_l_value(taos_metric_formatter_t *metric_formatter
|
||||||
/**
|
/**
|
||||||
* @brief API PRIVATE Loads the formatter with a metric sample
|
* @brief API PRIVATE Loads the formatter with a metric sample
|
||||||
*/
|
*/
|
||||||
int taos_metric_formatter_load_sample(taos_metric_formatter_t *metric_formatter, taos_metric_sample_t *sample,
|
int taos_metric_formatter_load_sample(taos_metric_formatter_t *metric_formatter, taos_metric_sample_t *sample, char *ts,
|
||||||
char *ts, char *format);
|
char *format);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief API PRIVATE Loads a metric in the string exposition format
|
* @brief API PRIVATE Loads a metric in the string exposition format
|
||||||
|
@ -80,4 +81,5 @@ int taos_metric_formatter_clear(taos_metric_formatter_t *self);
|
||||||
*/
|
*/
|
||||||
char *taos_metric_formatter_dump(taos_metric_formatter_t *metric_formatter);
|
char *taos_metric_formatter_dump(taos_metric_formatter_t *metric_formatter);
|
||||||
|
|
||||||
|
int32_t taos_metric_formatter_get_vgroup_id(char *key);
|
||||||
#endif // TAOS_METRIC_FORMATTER_I_H
|
#endif // TAOS_METRIC_FORMATTER_I_H
|
||||||
|
|
|
@ -20,13 +20,14 @@
|
||||||
#include "taos_alloc.h"
|
#include "taos_alloc.h"
|
||||||
|
|
||||||
// Private
|
// Private
|
||||||
#include "taos_test.h"
|
|
||||||
#include "taos_errors.h"
|
#include "taos_errors.h"
|
||||||
#include "taos_log.h"
|
#include "taos_log.h"
|
||||||
|
#include "taos_metric_formatter_i.h"
|
||||||
#include "taos_metric_i.h"
|
#include "taos_metric_i.h"
|
||||||
#include "taos_metric_sample_i.h"
|
#include "taos_metric_sample_i.h"
|
||||||
#include "taos_metric_sample_t.h"
|
#include "taos_metric_sample_t.h"
|
||||||
#include "taos_metric_t.h"
|
#include "taos_metric_t.h"
|
||||||
|
#include "taos_test.h"
|
||||||
|
|
||||||
taos_counter_t *taos_counter_new(const char *name, const char *help, size_t label_key_count, const char **label_keys) {
|
taos_counter_t *taos_counter_new(const char *name, const char *help, size_t label_key_count, const char **label_keys) {
|
||||||
return (taos_counter_t *)taos_metric_new(TAOS_COUNTER, name, help, label_key_count, label_keys);
|
return (taos_counter_t *)taos_metric_new(TAOS_COUNTER, name, help, label_key_count, label_keys);
|
||||||
|
@ -64,3 +65,49 @@ int taos_counter_add(taos_counter_t *self, double r_value, const char **label_va
|
||||||
if (sample == NULL) return 1;
|
if (sample == NULL) return 1;
|
||||||
return taos_metric_sample_add(sample, r_value);
|
return taos_metric_sample_add(sample, r_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int taos_counter_get_keys_size(taos_counter_t *self) { return self->samples->keys->size; }
|
||||||
|
|
||||||
|
int taos_counter_get_vgroup_ids(taos_counter_t *self, char ***keys, int32_t **vgroup_ids, int *list_size) {
|
||||||
|
TAOS_TEST_PARA(self != NULL);
|
||||||
|
if (self == NULL) return 1;
|
||||||
|
if (self->type != TAOS_COUNTER) {
|
||||||
|
TAOS_LOG(TAOS_METRIC_INCORRECT_TYPE);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
if (self->samples == NULL) return 1;
|
||||||
|
(void)pthread_rwlock_rdlock(self->rwlock);
|
||||||
|
taos_linked_list_t *key_list = self->samples->keys;
|
||||||
|
*list_size = key_list->size;
|
||||||
|
int r = 0;
|
||||||
|
*vgroup_ids = (int32_t *)taos_malloc(*list_size * sizeof(int32_t));
|
||||||
|
if (vgroup_ids == NULL) {
|
||||||
|
(void)pthread_rwlock_unlock(self->rwlock);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
*keys = (char **)taos_malloc(*list_size * sizeof(char *));
|
||||||
|
if (keys == NULL) {
|
||||||
|
(void)pthread_rwlock_unlock(self->rwlock);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
int index = 0;
|
||||||
|
for (taos_linked_list_node_t *current_key = key_list->head; current_key != NULL; current_key = current_key->next) {
|
||||||
|
char *key = (char *)current_key->item;
|
||||||
|
int32_t vgroup_id = taos_metric_formatter_get_vgroup_id(key);
|
||||||
|
(*vgroup_ids)[index] = vgroup_id;
|
||||||
|
(*keys)[index] = key;
|
||||||
|
index++;
|
||||||
|
}
|
||||||
|
(void)pthread_rwlock_unlock(self->rwlock);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
int taos_counter_delete(taos_counter_t *self, char *key) {
|
||||||
|
TAOS_TEST_PARA(self != NULL);
|
||||||
|
if (self == NULL) return 1;
|
||||||
|
if (self->type != TAOS_COUNTER) {
|
||||||
|
TAOS_LOG(TAOS_METRIC_INCORRECT_TYPE);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return taos_map_delete(self->samples, key);
|
||||||
|
}
|
|
@ -156,6 +156,21 @@ int taos_metric_formatter_load_l_value(taos_metric_formatter_t *self, const char
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
int32_t taos_metric_formatter_get_vgroup_id(char *key) {
|
||||||
|
char *start, *end;
|
||||||
|
char vgroupid[10];
|
||||||
|
start = strstr(key, "vgroup_id=\"");
|
||||||
|
if (start) {
|
||||||
|
start += strlen("vgroup_id=\"");
|
||||||
|
end = strchr(start, '\"');
|
||||||
|
if (end) {
|
||||||
|
strncpy(vgroupid, start, end - start);
|
||||||
|
vgroupid[end - start] = '\0';
|
||||||
|
}
|
||||||
|
return strtol(vgroupid, NULL, 10);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
int taos_metric_formatter_load_sample(taos_metric_formatter_t *self, taos_metric_sample_t *sample,
|
int taos_metric_formatter_load_sample(taos_metric_formatter_t *self, taos_metric_sample_t *sample,
|
||||||
char *ts, char *format) {
|
char *ts, char *format) {
|
||||||
|
|
|
@ -4681,7 +4681,8 @@ int32_t translateTable(STranslateContext* pCxt, SNode** pTable, SNode* pJoinPare
|
||||||
pCxt, toName(pCxt->pParseCxt->acctId, pRealTable->table.dbName, pRealTable->table.tableName, &name),
|
pCxt, toName(pCxt->pParseCxt->acctId, pRealTable->table.dbName, pRealTable->table.tableName, &name),
|
||||||
&(pRealTable->pMeta), true);
|
&(pRealTable->pMeta), true);
|
||||||
if (TSDB_CODE_SUCCESS != code) {
|
if (TSDB_CODE_SUCCESS != code) {
|
||||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_GET_META_ERROR, tstrerror(code));
|
(void)generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_GET_META_ERROR, tstrerror(code));
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
#ifdef TD_ENTERPRISE
|
#ifdef TD_ENTERPRISE
|
||||||
if (TSDB_VIEW_TABLE == pRealTable->pMeta->tableType && (!pCurrSmt->tagScan || pCxt->pParseCxt->biMode)) {
|
if (TSDB_VIEW_TABLE == pRealTable->pMeta->tableType && (!pCurrSmt->tagScan || pCxt->pParseCxt->biMode)) {
|
||||||
|
@ -6783,6 +6784,10 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateSelect(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
static int32_t translateSelect(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||||
|
if (pCxt->pParseCxt && pCxt->pParseCxt->setQueryFp) {
|
||||||
|
(*pCxt->pParseCxt->setQueryFp)(pCxt->pParseCxt->requestRid);
|
||||||
|
}
|
||||||
|
|
||||||
if (NULL == pSelect->pFromTable) {
|
if (NULL == pSelect->pFromTable) {
|
||||||
return translateSelectWithoutFrom(pCxt, pSelect);
|
return translateSelectWithoutFrom(pCxt, pSelect);
|
||||||
} else {
|
} else {
|
||||||
|
@ -6907,6 +6912,10 @@ static int32_t checkSetOperLimit(STranslateContext* pCxt, SLimitNode* pLimit) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateSetOperator(STranslateContext* pCxt, SSetOperator* pSetOperator) {
|
static int32_t translateSetOperator(STranslateContext* pCxt, SSetOperator* pSetOperator) {
|
||||||
|
if (pCxt->pParseCxt && pCxt->pParseCxt->setQueryFp) {
|
||||||
|
(*pCxt->pParseCxt->setQueryFp)(pCxt->pParseCxt->requestRid);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t code = translateQuery(pCxt, pSetOperator->pLeft);
|
int32_t code = translateQuery(pCxt, pSetOperator->pLeft);
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = resetHighLevelTranslateNamespace(pCxt);
|
code = resetHighLevelTranslateNamespace(pCxt);
|
||||||
|
|
|
@ -366,9 +366,9 @@ TEST_F(ParserSelectTest, semanticCheck) {
|
||||||
run("SELECT t1.c1, t1.cc1 FROM t1", TSDB_CODE_PAR_INVALID_COLUMN);
|
run("SELECT t1.c1, t1.cc1 FROM t1", TSDB_CODE_PAR_INVALID_COLUMN);
|
||||||
|
|
||||||
// TSDB_CODE_PAR_GET_META_ERROR
|
// TSDB_CODE_PAR_GET_META_ERROR
|
||||||
run("SELECT * FROM t10", TSDB_CODE_PAR_GET_META_ERROR);
|
run("SELECT * FROM t10", TSDB_CODE_PAR_TABLE_NOT_EXIST);
|
||||||
|
|
||||||
run("SELECT * FROM test.t10", TSDB_CODE_PAR_GET_META_ERROR);
|
run("SELECT * FROM test.t10", TSDB_CODE_PAR_TABLE_NOT_EXIST);
|
||||||
|
|
||||||
// TSDB_CODE_PAR_TABLE_NOT_EXIST
|
// TSDB_CODE_PAR_TABLE_NOT_EXIST
|
||||||
run("SELECT t2.c1 FROM t1", TSDB_CODE_PAR_TABLE_NOT_EXIST);
|
run("SELECT t2.c1 FROM t1", TSDB_CODE_PAR_TABLE_NOT_EXIST);
|
||||||
|
|
|
@ -574,7 +574,7 @@ int32_t queryProcessTableMetaRsp(void *output, char *msg, int32_t msgSize) {
|
||||||
goto PROCESS_META_OVER;
|
goto PROCESS_META_OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (0 != strcmp(metaRsp.dbFName, TSDB_INFORMATION_SCHEMA_DB) &&
|
if (!IS_SYS_DBNAME(metaRsp.dbFName) &&
|
||||||
!tIsValidSchema(metaRsp.pSchemas, metaRsp.numOfColumns, metaRsp.numOfTags)) {
|
!tIsValidSchema(metaRsp.pSchemas, metaRsp.numOfColumns, metaRsp.numOfTags)) {
|
||||||
code = TSDB_CODE_TSC_INVALID_VALUE;
|
code = TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
goto PROCESS_META_OVER;
|
goto PROCESS_META_OVER;
|
||||||
|
|
|
@ -1274,6 +1274,16 @@ int32_t filterAddUnitToGroup(SFilterGroup *group, uint32_t unitIdx) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void filterFreeGroup(void *pItem) {
|
||||||
|
if (pItem == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
SFilterGroup *p = (SFilterGroup *)pItem;
|
||||||
|
taosMemoryFreeClear(p->unitIdxs);
|
||||||
|
taosMemoryFreeClear(p->unitFlags);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode *tree, SArray *group) {
|
int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode *tree, SArray *group) {
|
||||||
SOperatorNode *node = (SOperatorNode *)tree;
|
SOperatorNode *node = (SOperatorNode *)tree;
|
||||||
int32_t ret = TSDB_CODE_SUCCESS;
|
int32_t ret = TSDB_CODE_SUCCESS;
|
||||||
|
@ -1336,9 +1346,11 @@ int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode *tree, SArray *group) {
|
||||||
SFilterGroup fgroup = {0};
|
SFilterGroup fgroup = {0};
|
||||||
code = filterAddUnitToGroup(&fgroup, uidx);
|
code = filterAddUnitToGroup(&fgroup, uidx);
|
||||||
if (TSDB_CODE_SUCCESS != code) {
|
if (TSDB_CODE_SUCCESS != code) {
|
||||||
|
filterFreeGroup((void*)&fgroup);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (NULL == taosArrayPush(group, &fgroup)) {
|
if (NULL == taosArrayPush(group, &fgroup)) {
|
||||||
|
filterFreeGroup((void*)&fgroup);
|
||||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1658,16 +1670,6 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filterFreeGroup(void *pItem) {
|
|
||||||
if (pItem == NULL) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
SFilterGroup *p = (SFilterGroup *)pItem;
|
|
||||||
taosMemoryFreeClear(p->unitIdxs);
|
|
||||||
taosMemoryFreeClear(p->unitFlags);
|
|
||||||
}
|
|
||||||
|
|
||||||
EDealRes fltTreeToGroup(SNode *pNode, void *pContext) {
|
EDealRes fltTreeToGroup(SNode *pNode, void *pContext) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
SArray *preGroup = NULL;
|
SArray *preGroup = NULL;
|
||||||
|
@ -2944,25 +2946,44 @@ int32_t filterRewrite(SFilterInfo *info, SFilterGroupCtx **gRes, int32_t gResNum
|
||||||
for (int32_t n = 0; n < usize; ++n) {
|
for (int32_t n = 0; n < usize; ++n) {
|
||||||
SFilterUnit *u = (SFilterUnit *)taosArrayGetP((SArray *)colInfo->info, n);
|
SFilterUnit *u = (SFilterUnit *)taosArrayGetP((SArray *)colInfo->info, n);
|
||||||
if (NULL == u) {
|
if (NULL == u) {
|
||||||
FLT_ERR_JRET(TSDB_CODE_OUT_OF_RANGE);
|
code = TSDB_CODE_OUT_OF_RANGE;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
FLT_ERR_JRET(filterAddUnitFromUnit(info, &oinfo, u, &uidx));
|
code = filterAddUnitFromUnit(info, &oinfo, u, &uidx);
|
||||||
FLT_ERR_JRET(filterAddUnitToGroup(&ng, uidx));
|
if (TSDB_CODE_SUCCESS != code) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
code = filterAddUnitToGroup(&ng, uidx);
|
||||||
|
if (TSDB_CODE_SUCCESS != code) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS != code) {
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS != code) {
|
||||||
|
filterFreeGroup((void*)&ng);
|
||||||
|
FLT_ERR_JRET(code);
|
||||||
|
}
|
||||||
|
|
||||||
if (colInfo->type != RANGE_TYPE_MR_CTX) {
|
if (colInfo->type != RANGE_TYPE_MR_CTX) {
|
||||||
fltError("filterRewrite get invalid col type : %d", colInfo->type);
|
fltError("filterRewrite get invalid col type : %d", colInfo->type);
|
||||||
FLT_ERR_JRET(TSDB_CODE_QRY_FILTER_INVALID_TYPE);
|
FLT_ERR_JRET(TSDB_CODE_QRY_FILTER_INVALID_TYPE);
|
||||||
}
|
}
|
||||||
|
|
||||||
FLT_ERR_JRET(filterAddGroupUnitFromCtx(info, &oinfo, colInfo->info, res->colIdx[m], &ng, optr, group));
|
code = filterAddGroupUnitFromCtx(info, &oinfo, colInfo->info, res->colIdx[m], &ng, optr, group);
|
||||||
|
if (TSDB_CODE_SUCCESS != code) {
|
||||||
|
filterFreeGroup((void*)&ng);
|
||||||
|
FLT_ERR_JRET(code);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ng.unitNum > 0) {
|
if (ng.unitNum > 0) {
|
||||||
if (NULL == taosArrayPush(group, &ng)) {
|
if (NULL == taosArrayPush(group, &ng)) {
|
||||||
|
filterFreeGroup((void*)&ng);
|
||||||
FLT_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
|
FLT_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5238,22 +5259,20 @@ int32_t filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p,
|
||||||
*pResultStatus = FILTER_RESULT_ALL_QUALIFIED;
|
*pResultStatus = FILTER_RESULT_ALL_QUALIFIED;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
SScalarParam output = {0};
|
SScalarParam output = {0};
|
||||||
SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
|
SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
|
||||||
|
|
||||||
int32_t code = sclCreateColumnInfoData(&type, pSrc->info.rows, &output);
|
FLT_ERR_JRET(sclCreateColumnInfoData(&type, pSrc->info.rows, &output));
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (info->scalarMode) {
|
if (info->scalarMode) {
|
||||||
SArray *pList = taosArrayInit(1, POINTER_BYTES);
|
SArray *pList = taosArrayInit(1, POINTER_BYTES);
|
||||||
if (NULL == pList) {
|
if (NULL == pList) {
|
||||||
FLT_ERR_RET(terrno);
|
FLT_ERR_JRET(terrno);
|
||||||
}
|
}
|
||||||
if (NULL == taosArrayPush(pList, &pSrc)) {
|
if (NULL == taosArrayPush(pList, &pSrc)) {
|
||||||
FLT_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
taosArrayDestroy(pList);
|
||||||
|
FLT_ERR_JRET(terrno);
|
||||||
}
|
}
|
||||||
|
|
||||||
code = scalarCalculate(info->sclCtx.node, pList, &output);
|
code = scalarCalculate(info->sclCtx.node, pList, &output);
|
||||||
|
@ -5261,7 +5280,7 @@ int32_t filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p,
|
||||||
|
|
||||||
*p = output.columnData;
|
*p = output.columnData;
|
||||||
|
|
||||||
FLT_ERR_RET(code);
|
FLT_ERR_JRET(code);
|
||||||
|
|
||||||
if (output.numOfQualified == output.numOfRows) {
|
if (output.numOfQualified == output.numOfRows) {
|
||||||
*pResultStatus = FILTER_RESULT_ALL_QUALIFIED;
|
*pResultStatus = FILTER_RESULT_ALL_QUALIFIED;
|
||||||
|
@ -5277,11 +5296,12 @@ int32_t filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p,
|
||||||
output.numOfRows = pSrc->info.rows;
|
output.numOfRows = pSrc->info.rows;
|
||||||
|
|
||||||
if (*p == NULL) {
|
if (*p == NULL) {
|
||||||
return TSDB_CODE_APP_ERROR;
|
fltError("filterExecute failed, column data is NULL");
|
||||||
|
FLT_ERR_JRET(TSDB_CODE_APP_ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool keepAll = false;
|
bool keepAll = false;
|
||||||
FLT_ERR_RET((info->func)(info, pSrc->info.rows, *p, statis, numOfCols, &output.numOfQualified, &keepAll));
|
FLT_ERR_JRET((info->func)(info, pSrc->info.rows, *p, statis, numOfCols, &output.numOfQualified, &keepAll));
|
||||||
|
|
||||||
// todo this should be return during filter procedure
|
// todo this should be return during filter procedure
|
||||||
if (keepAll) {
|
if (keepAll) {
|
||||||
|
@ -5304,6 +5324,10 @@ int32_t filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p,
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
_return:
|
||||||
|
sclFreeParam(&output);
|
||||||
|
*p = NULL;
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct SClassifyConditionCxt {
|
typedef struct SClassifyConditionCxt {
|
||||||
|
|
|
@ -839,8 +839,8 @@ int32_t sclExecLogic(SLogicConditionNode *node, SScalarCtx *ctx, SScalarParam *o
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1=1 and tag_column = 1
|
// 1=1 and tag_column = 1
|
||||||
int32_t ind = (i >= params[m].numOfRows)? (params[m].numOfRows - 1):i;
|
int32_t ind = (i >= params[m].numOfRows) ? (params[m].numOfRows - 1) : i;
|
||||||
char* p = colDataGetData(params[m].columnData, ind);
|
char *p = colDataGetData(params[m].columnData, ind);
|
||||||
|
|
||||||
GET_TYPED_DATA(value, bool, params[m].columnData->info.type, p);
|
GET_TYPED_DATA(value, bool, params[m].columnData->info.type, p);
|
||||||
|
|
||||||
|
@ -1029,7 +1029,7 @@ _return:
|
||||||
EDealRes sclRewriteNullInOptr(SNode **pNode, SScalarCtx *ctx, EOperatorType opType) {
|
EDealRes sclRewriteNullInOptr(SNode **pNode, SScalarCtx *ctx, EOperatorType opType) {
|
||||||
if (opType <= OP_TYPE_CALC_MAX) {
|
if (opType <= OP_TYPE_CALC_MAX) {
|
||||||
SValueNode *res = NULL;
|
SValueNode *res = NULL;
|
||||||
ctx->code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&res);
|
ctx->code = nodesMakeNode(QUERY_NODE_VALUE, (SNode **)&res);
|
||||||
if (NULL == res) {
|
if (NULL == res) {
|
||||||
sclError("make value node failed");
|
sclError("make value node failed");
|
||||||
return DEAL_RES_ERROR;
|
return DEAL_RES_ERROR;
|
||||||
|
@ -1041,7 +1041,7 @@ EDealRes sclRewriteNullInOptr(SNode **pNode, SScalarCtx *ctx, EOperatorType opTy
|
||||||
*pNode = (SNode *)res;
|
*pNode = (SNode *)res;
|
||||||
} else {
|
} else {
|
||||||
SValueNode *res = NULL;
|
SValueNode *res = NULL;
|
||||||
ctx->code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&res);
|
ctx->code = nodesMakeNode(QUERY_NODE_VALUE, (SNode **)&res);
|
||||||
if (NULL == res) {
|
if (NULL == res) {
|
||||||
sclError("make value node failed");
|
sclError("make value node failed");
|
||||||
return DEAL_RES_ERROR;
|
return DEAL_RES_ERROR;
|
||||||
|
@ -1094,7 +1094,6 @@ static uint8_t sclGetOpValueNodeTsPrecision(SNode *pLeft, SNode *pRight) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int32_t sclConvertOpValueNodeTs(SOperatorNode *node) {
|
int32_t sclConvertOpValueNodeTs(SOperatorNode *node) {
|
||||||
if (node->pLeft && SCL_IS_VAR_VALUE_NODE(node->pLeft)) {
|
if (node->pLeft && SCL_IS_VAR_VALUE_NODE(node->pLeft)) {
|
||||||
if (node->pRight && (TSDB_DATA_TYPE_TIMESTAMP == ((SExprNode *)node->pRight)->resType.type)) {
|
if (node->pRight && (TSDB_DATA_TYPE_TIMESTAMP == ((SExprNode *)node->pRight)->resType.type)) {
|
||||||
|
@ -1110,8 +1109,7 @@ int32_t sclConvertOpValueNodeTs(SOperatorNode *node) {
|
||||||
SNode *pNode;
|
SNode *pNode;
|
||||||
FOREACH(pNode, ((SNodeListNode *)node->pRight)->pNodeList) {
|
FOREACH(pNode, ((SNodeListNode *)node->pRight)->pNodeList) {
|
||||||
if (SCL_IS_VAR_VALUE_NODE(pNode)) {
|
if (SCL_IS_VAR_VALUE_NODE(pNode)) {
|
||||||
SCL_ERR_RET(
|
SCL_ERR_RET(sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, pNode), (SValueNode *)pNode));
|
||||||
sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, pNode), (SValueNode *)pNode));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1120,7 +1118,6 @@ int32_t sclConvertOpValueNodeTs(SOperatorNode *node) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int32_t sclConvertCaseWhenValueNodeTs(SCaseWhenNode *node) {
|
int32_t sclConvertCaseWhenValueNodeTs(SCaseWhenNode *node) {
|
||||||
if (NULL == node->pCase) {
|
if (NULL == node->pCase) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -1201,7 +1198,7 @@ EDealRes sclRewriteFunction(SNode **pNode, SScalarCtx *ctx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SValueNode *res = NULL;
|
SValueNode *res = NULL;
|
||||||
ctx->code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&res);
|
ctx->code = nodesMakeNode(QUERY_NODE_VALUE, (SNode **)&res);
|
||||||
if (NULL == res) {
|
if (NULL == res) {
|
||||||
sclError("make value node failed");
|
sclError("make value node failed");
|
||||||
sclFreeParam(&output);
|
sclFreeParam(&output);
|
||||||
|
@ -1275,7 +1272,7 @@ EDealRes sclRewriteLogic(SNode **pNode, SScalarCtx *ctx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SValueNode *res = NULL;
|
SValueNode *res = NULL;
|
||||||
ctx->code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&res);
|
ctx->code = nodesMakeNode(QUERY_NODE_VALUE, (SNode **)&res);
|
||||||
if (NULL == res) {
|
if (NULL == res) {
|
||||||
sclError("make value node failed");
|
sclError("make value node failed");
|
||||||
sclFreeParam(&output);
|
sclFreeParam(&output);
|
||||||
|
@ -1346,7 +1343,7 @@ EDealRes sclRewriteOperator(SNode **pNode, SScalarCtx *ctx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SValueNode *res = NULL;
|
SValueNode *res = NULL;
|
||||||
ctx->code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&res);
|
ctx->code = nodesMakeNode(QUERY_NODE_VALUE, (SNode **)&res);
|
||||||
if (NULL == res) {
|
if (NULL == res) {
|
||||||
sclError("make value node failed");
|
sclError("make value node failed");
|
||||||
sclFreeParam(&output);
|
sclFreeParam(&output);
|
||||||
|
@ -1409,7 +1406,7 @@ EDealRes sclRewriteCaseWhen(SNode **pNode, SScalarCtx *ctx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SValueNode *res = NULL;
|
SValueNode *res = NULL;
|
||||||
ctx->code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&res);
|
ctx->code = nodesMakeNode(QUERY_NODE_VALUE, (SNode **)&res);
|
||||||
if (NULL == res) {
|
if (NULL == res) {
|
||||||
sclError("make value node failed");
|
sclError("make value node failed");
|
||||||
sclFreeParam(&output);
|
sclFreeParam(&output);
|
||||||
|
@ -1426,7 +1423,8 @@ EDealRes sclRewriteCaseWhen(SNode **pNode, SScalarCtx *ctx) {
|
||||||
} else {
|
} else {
|
||||||
int32_t type = output.columnData->info.type;
|
int32_t type = output.columnData->info.type;
|
||||||
if (IS_VAR_DATA_TYPE(type)) { // todo refactor
|
if (IS_VAR_DATA_TYPE(type)) { // todo refactor
|
||||||
res->datum.p = taosMemoryCalloc(varDataTLen(output.columnData->pData) + 1, sizeof(char)); // add \0 to the end for print json value
|
res->datum.p = taosMemoryCalloc(varDataTLen(output.columnData->pData) + 1,
|
||||||
|
sizeof(char)); // add \0 to the end for print json value
|
||||||
if (NULL == res->datum.p) {
|
if (NULL == res->datum.p) {
|
||||||
sclError("calloc %d failed", (int)(varDataTLen(output.columnData->pData) + 1));
|
sclError("calloc %d failed", (int)(varDataTLen(output.columnData->pData) + 1));
|
||||||
sclFreeParam(&output);
|
sclFreeParam(&output);
|
||||||
|
@ -1683,8 +1681,7 @@ static int32_t sclGetMathOperatorResType(SOperatorNode *pOp) {
|
||||||
SDataType rdt = ((SExprNode *)(pOp->pRight))->resType;
|
SDataType rdt = ((SExprNode *)(pOp->pRight))->resType;
|
||||||
|
|
||||||
if ((TSDB_DATA_TYPE_TIMESTAMP == ldt.type && TSDB_DATA_TYPE_TIMESTAMP == rdt.type) ||
|
if ((TSDB_DATA_TYPE_TIMESTAMP == ldt.type && TSDB_DATA_TYPE_TIMESTAMP == rdt.type) ||
|
||||||
TSDB_DATA_TYPE_VARBINARY == ldt.type ||
|
TSDB_DATA_TYPE_VARBINARY == ldt.type || TSDB_DATA_TYPE_VARBINARY == rdt.type ||
|
||||||
TSDB_DATA_TYPE_VARBINARY == rdt.type ||
|
|
||||||
(TSDB_DATA_TYPE_TIMESTAMP == ldt.type && (IS_VAR_DATA_TYPE(rdt.type) || IS_FLOAT_TYPE(rdt.type))) ||
|
(TSDB_DATA_TYPE_TIMESTAMP == ldt.type && (IS_VAR_DATA_TYPE(rdt.type) || IS_FLOAT_TYPE(rdt.type))) ||
|
||||||
(TSDB_DATA_TYPE_TIMESTAMP == rdt.type && (IS_VAR_DATA_TYPE(ldt.type) || IS_FLOAT_TYPE(ldt.type)))) {
|
(TSDB_DATA_TYPE_TIMESTAMP == rdt.type && (IS_VAR_DATA_TYPE(ldt.type) || IS_FLOAT_TYPE(ldt.type)))) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
|
@ -1720,13 +1717,14 @@ static int32_t sclGetCompOperatorResType(SOperatorNode *pOp) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
SDataType rdt = ((SExprNode *)(pOp->pRight))->resType;
|
SDataType rdt = ((SExprNode *)(pOp->pRight))->resType;
|
||||||
if (ldt.type == TSDB_DATA_TYPE_VARBINARY || !IS_VAR_DATA_TYPE(ldt.type) || QUERY_NODE_VALUE != nodeType(pOp->pRight) ||
|
if (ldt.type == TSDB_DATA_TYPE_VARBINARY || !IS_VAR_DATA_TYPE(ldt.type) ||
|
||||||
|
QUERY_NODE_VALUE != nodeType(pOp->pRight) ||
|
||||||
(!IS_STR_DATA_TYPE(rdt.type) && (rdt.type != TSDB_DATA_TYPE_NULL))) {
|
(!IS_STR_DATA_TYPE(rdt.type) && (rdt.type != TSDB_DATA_TYPE_NULL))) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
if (nodesIsMatchRegularOp(pOp)) {
|
SValueNode *node = (SValueNode *)(pOp->pRight);
|
||||||
SValueNode* node = (SValueNode*)(pOp->pRight);
|
if (!node->placeholderNo && nodesIsMatchRegularOp(pOp)) {
|
||||||
if(checkRegexPattern(node->literal) != TSDB_CODE_SUCCESS){
|
if (checkRegexPattern(node->literal) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_PAR_REGULAR_EXPRESSION_ERROR;
|
return TSDB_CODE_PAR_REGULAR_EXPRESSION_ERROR;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1762,7 +1760,7 @@ static int32_t sclGetBitwiseOperatorResType(SOperatorNode *pOp) {
|
||||||
}
|
}
|
||||||
SDataType ldt = ((SExprNode *)(pOp->pLeft))->resType;
|
SDataType ldt = ((SExprNode *)(pOp->pLeft))->resType;
|
||||||
SDataType rdt = ((SExprNode *)(pOp->pRight))->resType;
|
SDataType rdt = ((SExprNode *)(pOp->pRight))->resType;
|
||||||
if(TSDB_DATA_TYPE_VARBINARY == ldt.type || TSDB_DATA_TYPE_VARBINARY == rdt.type){
|
if (TSDB_DATA_TYPE_VARBINARY == ldt.type || TSDB_DATA_TYPE_VARBINARY == rdt.type) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
}
|
}
|
||||||
pOp->node.resType.type = TSDB_DATA_TYPE_BIGINT;
|
pOp->node.resType.type = TSDB_DATA_TYPE_BIGINT;
|
||||||
|
|
|
@ -65,12 +65,7 @@ int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_
|
||||||
", prev:%" PRId64,
|
", prev:%" PRId64,
|
||||||
id, upstreamTaskId, vgId, stage, pInfo->stage);
|
id, upstreamTaskId, vgId, stage, pInfo->stage);
|
||||||
// record the checkpoint failure id and sent to mnode
|
// record the checkpoint failure id and sent to mnode
|
||||||
streamMutexLock(&pTask->lock);
|
streamTaskSetCheckpointFailed(pTask);
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask).state;
|
|
||||||
if (status == TASK_STATUS__CK) {
|
|
||||||
streamTaskSetFailedCheckpointId(pTask);
|
|
||||||
}
|
|
||||||
streamMutexUnlock(&pTask->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pInfo->stage != stage) {
|
if (pInfo->stage != stage) {
|
||||||
|
|
|
@ -673,6 +673,15 @@ void streamTaskSetFailedCheckpointId(SStreamTask* pTask) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void streamTaskSetCheckpointFailed(SStreamTask* pTask) {
|
||||||
|
streamMutexLock(&pTask->lock);
|
||||||
|
ETaskStatus status = streamTaskGetStatus(pTask).state;
|
||||||
|
if (status == TASK_STATUS__CK) {
|
||||||
|
streamTaskSetFailedCheckpointId(pTask);
|
||||||
|
}
|
||||||
|
streamMutexUnlock(&pTask->lock);
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t getCheckpointDataMeta(const char* id, const char* path, SArray* list) {
|
static int32_t getCheckpointDataMeta(const char* id, const char* path, SArray* list) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int32_t cap = strlen(path) + 64;
|
int32_t cap = strlen(path) + 64;
|
||||||
|
@ -1111,26 +1120,20 @@ void streamTaskGetTriggerRecvStatus(SStreamTask* pTask, int32_t* pRecved, int32_
|
||||||
|
|
||||||
// record the dispatch checkpoint trigger info in the list
|
// record the dispatch checkpoint trigger info in the list
|
||||||
// memory insufficient may cause the stream computing stopped
|
// memory insufficient may cause the stream computing stopped
|
||||||
void streamTaskInitTriggerDispatchInfo(SStreamTask* pTask) {
|
int32_t streamTaskInitTriggerDispatchInfo(SStreamTask* pTask) {
|
||||||
SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo;
|
SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo;
|
||||||
|
|
||||||
int64_t now = taosGetTimestampMs();
|
int64_t now = taosGetTimestampMs();
|
||||||
|
|
||||||
streamMutexLock(&pInfo->lock);
|
streamMutexLock(&pInfo->lock);
|
||||||
|
|
||||||
// outputQ should be empty here
|
|
||||||
if (streamQueueGetNumOfUnAccessedItems(pTask->outputq.queue) > 0) {
|
|
||||||
stFatal("s-task:%s items are still in outputQ, failed to init trigger dispatch info", pTask->id.idStr);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
pInfo->dispatchTrigger = true;
|
pInfo->dispatchTrigger = true;
|
||||||
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||||
STaskDispatcherFixed* pDispatch = &pTask->outputInfo.fixedDispatcher;
|
STaskDispatcherFixed* pDispatch = &pTask->outputInfo.fixedDispatcher;
|
||||||
|
|
||||||
STaskTriggerSendInfo p = {.sendTs = now, .recved = false, .nodeId = pDispatch->nodeId, .taskId = pDispatch->taskId};
|
STaskTriggerSendInfo p = {.sendTs = now, .recved = false, .nodeId = pDispatch->nodeId, .taskId = pDispatch->taskId};
|
||||||
void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p);
|
void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p);
|
||||||
if (px == NULL) {
|
if (px == NULL) { // pause the stream task, if memory not enough
|
||||||
// pause the stream task, if memory not enough
|
streamMutexUnlock(&pInfo->lock);
|
||||||
|
return terrno;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (int32_t i = 0; i < streamTaskGetNumOfDownstream(pTask); ++i) {
|
for (int32_t i = 0; i < streamTaskGetNumOfDownstream(pTask); ++i) {
|
||||||
|
@ -1141,13 +1144,15 @@ void streamTaskInitTriggerDispatchInfo(SStreamTask* pTask) {
|
||||||
|
|
||||||
STaskTriggerSendInfo p = {.sendTs = now, .recved = false, .nodeId = pVgInfo->vgId, .taskId = pVgInfo->taskId};
|
STaskTriggerSendInfo p = {.sendTs = now, .recved = false, .nodeId = pVgInfo->vgId, .taskId = pVgInfo->taskId};
|
||||||
void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p);
|
void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p);
|
||||||
if (px == NULL) {
|
if (px == NULL) { // pause the stream task, if memory not enough
|
||||||
// pause the stream task, if memory not enough
|
streamMutexUnlock(&pInfo->lock);
|
||||||
|
return terrno;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMutexUnlock(&pInfo->lock);
|
streamMutexUnlock(&pInfo->lock);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskGetNumOfConfirmed(SActiveCheckpointInfo* pInfo) {
|
int32_t streamTaskGetNumOfConfirmed(SActiveCheckpointInfo* pInfo) {
|
||||||
|
|
|
@ -727,6 +727,9 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S
|
||||||
|
|
||||||
int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
|
int32_t code = 0;
|
||||||
|
SStreamDataBlock* pBlock = NULL;
|
||||||
|
|
||||||
int32_t numOfElems = streamQueueGetNumOfItems(pTask->outputq.queue);
|
int32_t numOfElems = streamQueueGetNumOfItems(pTask->outputq.queue);
|
||||||
if (numOfElems > 0) {
|
if (numOfElems > 0) {
|
||||||
double size = SIZE_IN_MiB(taosQueueMemorySize(pTask->outputq.queue->pQueue));
|
double size = SIZE_IN_MiB(taosQueueMemorySize(pTask->outputq.queue->pQueue));
|
||||||
|
@ -755,7 +758,7 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
||||||
stDebug("s-task:%s start to dispatch msg, set output status:%d", id, pTask->outputq.status);
|
stDebug("s-task:%s start to dispatch msg, set output status:%d", id, pTask->outputq.status);
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamDataBlock* pBlock = NULL;
|
while (1) {
|
||||||
streamQueueNextItem(pTask->outputq.queue, (SStreamQueueItem**)&pBlock);
|
streamQueueNextItem(pTask->outputq.queue, (SStreamQueueItem**)&pBlock);
|
||||||
if (pBlock == NULL) {
|
if (pBlock == NULL) {
|
||||||
atomic_store_8(&pTask->outputq.status, TASK_OUTPUT_STATUS__NORMAL);
|
atomic_store_8(&pTask->outputq.status, TASK_OUTPUT_STATUS__NORMAL);
|
||||||
|
@ -776,14 +779,28 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
||||||
initDispatchInfo(&pTask->msgInfo, pTask->execInfo.dispatch);
|
initDispatchInfo(&pTask->msgInfo, pTask->execInfo.dispatch);
|
||||||
streamMutexUnlock(&pTask->msgInfo.lock);
|
streamMutexUnlock(&pTask->msgInfo.lock);
|
||||||
|
|
||||||
int32_t code = doBuildDispatchMsg(pTask, pBlock);
|
code = doBuildDispatchMsg(pTask, pBlock);
|
||||||
if (code == 0) {
|
if (code == 0) {
|
||||||
destroyStreamDataBlock(pBlock);
|
destroyStreamDataBlock(pBlock);
|
||||||
} else { // todo handle build dispatch msg failed
|
} else { // todo handle build dispatch msg failed
|
||||||
}
|
}
|
||||||
|
|
||||||
if (type == STREAM_INPUT__CHECKPOINT_TRIGGER) {
|
if (type == STREAM_INPUT__CHECKPOINT_TRIGGER) {
|
||||||
streamTaskInitTriggerDispatchInfo(pTask);
|
// outputQ should be empty here, otherwise, set the checkpoint failed due to the retrieve req happens
|
||||||
|
if (streamQueueGetNumOfUnAccessedItems(pTask->outputq.queue) > 0) {
|
||||||
|
stError("s-task:%s items are still in outputQ due to downstream retrieve, failed to init trigger dispatch",
|
||||||
|
pTask->id.idStr);
|
||||||
|
streamTaskSetCheckpointFailed(pTask);
|
||||||
|
clearBufferedDispatchMsg(pTask);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = streamTaskInitTriggerDispatchInfo(pTask);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) { // todo handle error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = sendDispatchMsg(pTask, pTask->msgInfo.pData);
|
code = sendDispatchMsg(pTask, pTask->msgInfo.pData);
|
||||||
|
|
|
@ -98,14 +98,13 @@ static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray*
|
||||||
void streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, int64_t* totalSize, int32_t* totalBlocks) {
|
void streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, int64_t* totalSize, int32_t* totalBlocks) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
void* pExecutor = pTask->exec.pExecutor;
|
void* pExecutor = pTask->exec.pExecutor;
|
||||||
|
|
||||||
*totalBlocks = 0;
|
|
||||||
*totalSize = 0;
|
|
||||||
|
|
||||||
int32_t size = 0;
|
int32_t size = 0;
|
||||||
int32_t numOfBlocks = 0;
|
int32_t numOfBlocks = 0;
|
||||||
SArray* pRes = NULL;
|
SArray* pRes = NULL;
|
||||||
|
|
||||||
|
*totalBlocks = 0;
|
||||||
|
*totalSize = 0;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
if (pRes == NULL) {
|
if (pRes == NULL) {
|
||||||
pRes = taosArrayInit(4, sizeof(SSDataBlock));
|
pRes = taosArrayInit(4, sizeof(SSDataBlock));
|
||||||
|
@ -131,6 +130,7 @@ void streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, int64_t* to
|
||||||
if (pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
|
if (pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
|
||||||
SSDataBlock block = {0};
|
SSDataBlock block = {0};
|
||||||
const SStreamDataBlock* pRetrieveBlock = (const SStreamDataBlock*)pItem;
|
const SStreamDataBlock* pRetrieveBlock = (const SStreamDataBlock*)pItem;
|
||||||
|
|
||||||
int32_t num = taosArrayGetSize(pRetrieveBlock->blocks);
|
int32_t num = taosArrayGetSize(pRetrieveBlock->blocks);
|
||||||
if (num != 1) {
|
if (num != 1) {
|
||||||
stError("s-task:%s invalid retrieve block number:%d, ignore", pTask->id.idStr, num);
|
stError("s-task:%s invalid retrieve block number:%d, ignore", pTask->id.idStr, num);
|
||||||
|
@ -596,12 +596,32 @@ void streamProcessTransstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock)
|
||||||
// static void streamTaskSetIdleInfo(SStreamTask* pTask, int32_t idleTime) { pTask->status.schedIdleTime = idleTime; }
|
// static void streamTaskSetIdleInfo(SStreamTask* pTask, int32_t idleTime) { pTask->status.schedIdleTime = idleTime; }
|
||||||
static void setLastExecTs(SStreamTask* pTask, int64_t ts) { pTask->status.lastExecTs = ts; }
|
static void setLastExecTs(SStreamTask* pTask, int64_t ts) { pTask->status.lastExecTs = ts; }
|
||||||
|
|
||||||
|
static void doRecordThroughput(STaskExecStatisInfo* pInfo, int64_t totalBlocks, int64_t totalSize, int64_t blockSize,
|
||||||
|
double st, const char* id) {
|
||||||
|
double el = (taosGetTimestampMs() - st) / 1000.0;
|
||||||
|
|
||||||
|
stDebug("s-task:%s batch of input blocks exec end, elapsed time:%.2fs, result size:%.2fMiB, numOfBlocks:%" PRId64, id,
|
||||||
|
el, SIZE_IN_MiB(totalSize), totalBlocks);
|
||||||
|
|
||||||
|
pInfo->outputDataBlocks += totalBlocks;
|
||||||
|
pInfo->outputDataSize += totalSize;
|
||||||
|
if (fabs(el - 0.0) <= DBL_EPSILON) {
|
||||||
|
pInfo->procsThroughput = 0;
|
||||||
|
pInfo->outputThroughput = 0;
|
||||||
|
} else {
|
||||||
|
pInfo->outputThroughput = (totalSize / el);
|
||||||
|
pInfo->procsThroughput = (blockSize / el);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void doStreamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pBlock, int32_t num) {
|
static void doStreamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pBlock, int32_t num) {
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
int32_t blockSize = 0;
|
int32_t blockSize = 0;
|
||||||
int64_t st = taosGetTimestampMs();
|
int64_t st = taosGetTimestampMs();
|
||||||
SCheckpointInfo* pInfo = &pTask->chkInfo;
|
SCheckpointInfo* pInfo = &pTask->chkInfo;
|
||||||
int64_t ver = pInfo->processedVer;
|
int64_t ver = pInfo->processedVer;
|
||||||
|
int64_t totalSize = 0;
|
||||||
|
int32_t totalBlocks = 0;
|
||||||
|
|
||||||
stDebug("s-task:%s start to process batch blocks, num:%d, type:%s", id, num, streamQueueItemGetTypeStr(pBlock->type));
|
stDebug("s-task:%s start to process batch blocks, num:%d, type:%s", id, num, streamQueueItemGetTypeStr(pBlock->type));
|
||||||
|
|
||||||
|
@ -611,23 +631,8 @@ static void doStreamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pBlock, i
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t totalSize = 0;
|
|
||||||
int32_t totalBlocks = 0;
|
|
||||||
streamTaskExecImpl(pTask, pBlock, &totalSize, &totalBlocks);
|
streamTaskExecImpl(pTask, pBlock, &totalSize, &totalBlocks);
|
||||||
|
doRecordThroughput(&pTask->execInfo, totalBlocks, totalSize, blockSize, st, pTask->id.idStr);
|
||||||
double el = (taosGetTimestampMs() - st) / 1000.0;
|
|
||||||
stDebug("s-task:%s batch of input blocks exec end, elapsed time:%.2fs, result size:%.2fMiB, numOfBlocks:%d", id, el,
|
|
||||||
SIZE_IN_MiB(totalSize), totalBlocks);
|
|
||||||
|
|
||||||
pTask->execInfo.outputDataBlocks += totalBlocks;
|
|
||||||
pTask->execInfo.outputDataSize += totalSize;
|
|
||||||
if (fabs(el - 0.0) <= DBL_EPSILON) {
|
|
||||||
pTask->execInfo.procsThroughput = 0;
|
|
||||||
pTask->execInfo.outputThroughput = 0;
|
|
||||||
} else {
|
|
||||||
pTask->execInfo.outputThroughput = (totalSize / el);
|
|
||||||
pTask->execInfo.procsThroughput = (blockSize / el);
|
|
||||||
}
|
|
||||||
|
|
||||||
// update the currentVer if processing the submit blocks.
|
// update the currentVer if processing the submit blocks.
|
||||||
if (!(pInfo->checkpointVer <= pInfo->nextProcessVer && ver >= pInfo->checkpointVer)) {
|
if (!(pInfo->checkpointVer <= pInfo->nextProcessVer && ver >= pInfo->checkpointVer)) {
|
||||||
|
|
|
@ -1254,16 +1254,7 @@ int32_t streamMetaSendMsgBeforeCloseTasks(SStreamMeta* pMeta, SArray** pList) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMutexLock(&pTask->lock);
|
streamTaskSetCheckpointFailed(pTask);
|
||||||
|
|
||||||
SStreamTaskState pState = streamTaskGetStatus(pTask);
|
|
||||||
if (pState.state == TASK_STATUS__CK) {
|
|
||||||
streamTaskSetFailedCheckpointId(pTask);
|
|
||||||
} else {
|
|
||||||
stDebug("s-task:%s status:%s not reset the checkpoint", pTask->id.idStr, pState.name);
|
|
||||||
}
|
|
||||||
|
|
||||||
streamMutexUnlock(&pTask->lock);
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -287,7 +287,7 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem)
|
||||||
"s-task:%s inputQ is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) stop to push data",
|
"s-task:%s inputQ is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) stop to push data",
|
||||||
pTask->id.idStr, STREAM_TASK_QUEUE_CAPACITY, STREAM_TASK_QUEUE_CAPACITY_IN_SIZE, total, size);
|
pTask->id.idStr, STREAM_TASK_QUEUE_CAPACITY, STREAM_TASK_QUEUE_CAPACITY_IN_SIZE, total, size);
|
||||||
streamDataSubmitDestroy(px);
|
streamDataSubmitDestroy(px);
|
||||||
return -1;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t msgLen = px->submit.msgLen;
|
int32_t msgLen = px->submit.msgLen;
|
||||||
|
@ -312,7 +312,7 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem)
|
||||||
stTrace("s-task:%s input queue is full, capacity:%d size:%d MiB, current(blocks:%d, size:%.2fMiB) abort",
|
stTrace("s-task:%s input queue is full, capacity:%d size:%d MiB, current(blocks:%d, size:%.2fMiB) abort",
|
||||||
pTask->id.idStr, STREAM_TASK_QUEUE_CAPACITY, STREAM_TASK_QUEUE_CAPACITY_IN_SIZE, total, size);
|
pTask->id.idStr, STREAM_TASK_QUEUE_CAPACITY, STREAM_TASK_QUEUE_CAPACITY_IN_SIZE, total, size);
|
||||||
streamFreeQitem(pItem);
|
streamFreeQitem(pItem);
|
||||||
return -1;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t code = taosWriteQitem(pQueue, pItem);
|
int32_t code = taosWriteQitem(pQueue, pItem);
|
||||||
|
|
|
@ -1098,15 +1098,12 @@ static int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq*
|
||||||
return terrno = code;
|
return terrno = code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// enqueue
|
|
||||||
stDebug("s-task:%s (vgId:%d level:%d) recv retrieve req from task:0x%x(vgId:%d),QID:0x%" PRIx64, pTask->id.idStr,
|
|
||||||
pTask->pMeta->vgId, pTask->info.taskLevel, pReq->srcTaskId, pReq->srcNodeId, pReq->reqId);
|
|
||||||
|
|
||||||
pData->type = STREAM_INPUT__DATA_RETRIEVE;
|
pData->type = STREAM_INPUT__DATA_RETRIEVE;
|
||||||
pData->srcVgId = 0;
|
pData->srcVgId = 0;
|
||||||
|
|
||||||
code = streamRetrieveReqToData(pReq, pData, pTask->id.idStr);
|
code = streamRetrieveReqToData(pReq, pData, pTask->id.idStr);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
stError("s-task:%s failed to convert retrieve-data to block, code:%s", pTask->id.idStr, tstrerror(code));
|
||||||
taosFreeQitem(pData);
|
taosFreeQitem(pData);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
|
@ -275,14 +275,14 @@ bool transAsyncPoolIsEmpty(SAsyncPool* pool);
|
||||||
#define ASYNC_CHECK_HANDLE(exh1, id) \
|
#define ASYNC_CHECK_HANDLE(exh1, id) \
|
||||||
do { \
|
do { \
|
||||||
if (id > 0) { \
|
if (id > 0) { \
|
||||||
SExHandle* exh2 = transAcquireExHandle(transGetRefMgt(), id); \
|
SExHandle* exh2 = transAcquireExHandle(transGetSvrRefMgt(), id); \
|
||||||
if (exh2 == NULL || id != exh2->refId) { \
|
if (exh2 == NULL || id != exh2->refId) { \
|
||||||
tDebug("ref:%" PRId64 " already released" PRIu64, id); \
|
tDebug("ref:%" PRId64 " already released", id); \
|
||||||
code = terrno; \
|
code = terrno; \
|
||||||
goto _return1; \
|
goto _return1; \
|
||||||
} \
|
} \
|
||||||
} else { \
|
} else { \
|
||||||
tWarn("invalid handle to release"); \
|
tDebug("invalid handle to release"); \
|
||||||
goto _return2; \
|
goto _return2; \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
@ -443,6 +443,7 @@ int32_t transReleaseExHandle(int32_t refMgt, int64_t refId);
|
||||||
void transDestroyExHandle(void* handle);
|
void transDestroyExHandle(void* handle);
|
||||||
|
|
||||||
int32_t transGetRefMgt();
|
int32_t transGetRefMgt();
|
||||||
|
int32_t transGetSvrRefMgt();
|
||||||
int32_t transGetInstMgt();
|
int32_t transGetInstMgt();
|
||||||
int32_t transGetSyncMsgMgt();
|
int32_t transGetSyncMsgMgt();
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT;
|
static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT;
|
||||||
|
|
||||||
static int32_t refMgt;
|
static int32_t refMgt;
|
||||||
|
static int32_t svrRefMgt;
|
||||||
static int32_t instMgt;
|
static int32_t instMgt;
|
||||||
static int32_t transSyncMsgMgt;
|
static int32_t transSyncMsgMgt;
|
||||||
|
|
||||||
|
@ -704,12 +705,14 @@ bool transEpSetIsEqual2(SEpSet* a, SEpSet* b) {
|
||||||
|
|
||||||
static void transInitEnv() {
|
static void transInitEnv() {
|
||||||
refMgt = transOpenRefMgt(50000, transDestroyExHandle);
|
refMgt = transOpenRefMgt(50000, transDestroyExHandle);
|
||||||
|
svrRefMgt = transOpenRefMgt(50000, transDestroyExHandle);
|
||||||
instMgt = taosOpenRef(50, rpcCloseImpl);
|
instMgt = taosOpenRef(50, rpcCloseImpl);
|
||||||
transSyncMsgMgt = taosOpenRef(50, transDestroySyncMsg);
|
transSyncMsgMgt = taosOpenRef(50, transDestroySyncMsg);
|
||||||
(void)uv_os_setenv("UV_TCP_SINGLE_ACCEPT", "1");
|
(void)uv_os_setenv("UV_TCP_SINGLE_ACCEPT", "1");
|
||||||
}
|
}
|
||||||
static void transDestroyEnv() {
|
static void transDestroyEnv() {
|
||||||
transCloseRefMgt(refMgt);
|
transCloseRefMgt(refMgt);
|
||||||
|
transCloseRefMgt(svrRefMgt);
|
||||||
transCloseRefMgt(instMgt);
|
transCloseRefMgt(instMgt);
|
||||||
transCloseRefMgt(transSyncMsgMgt);
|
transCloseRefMgt(transSyncMsgMgt);
|
||||||
}
|
}
|
||||||
|
@ -724,6 +727,7 @@ int32_t transInit() {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t transGetRefMgt() { return refMgt; }
|
int32_t transGetRefMgt() { return refMgt; }
|
||||||
|
int32_t transGetSvrRefMgt() { return svrRefMgt; }
|
||||||
int32_t transGetInstMgt() { return instMgt; }
|
int32_t transGetInstMgt() { return instMgt; }
|
||||||
int32_t transGetSyncMsgMgt() { return transSyncMsgMgt; }
|
int32_t transGetSyncMsgMgt() { return transSyncMsgMgt; }
|
||||||
|
|
||||||
|
|
|
@ -373,6 +373,7 @@ static bool uvHandleReq(SSvrConn* pConn) {
|
||||||
STrans* pTransInst = pConn->pTransInst;
|
STrans* pTransInst = pConn->pTransInst;
|
||||||
SWorkThrd* pThrd = pConn->hostThrd;
|
SWorkThrd* pThrd = pConn->hostThrd;
|
||||||
|
|
||||||
|
int8_t acquire = 0;
|
||||||
STransMsgHead* pHead = NULL;
|
STransMsgHead* pHead = NULL;
|
||||||
|
|
||||||
int8_t resetBuf = pConn->status == ConnAcquire ? 0 : 1;
|
int8_t resetBuf = pConn->status == ConnAcquire ? 0 : 1;
|
||||||
|
@ -459,7 +460,13 @@ static bool uvHandleReq(SSvrConn* pConn) {
|
||||||
// 2. once send out data, cli conn released to conn pool immediately
|
// 2. once send out data, cli conn released to conn pool immediately
|
||||||
// 3. not mixed with persist
|
// 3. not mixed with persist
|
||||||
transMsg.info.ahandle = (void*)pHead->ahandle;
|
transMsg.info.ahandle = (void*)pHead->ahandle;
|
||||||
transMsg.info.handle = (void*)transAcquireExHandle(transGetRefMgt(), pConn->refId);
|
|
||||||
|
if (pHead->noResp == 1) {
|
||||||
|
transMsg.info.handle = NULL;
|
||||||
|
} else {
|
||||||
|
transMsg.info.handle = (void*)transAcquireExHandle(transGetSvrRefMgt(), pConn->refId);
|
||||||
|
acquire = 1;
|
||||||
|
}
|
||||||
transMsg.info.refId = pConn->refId;
|
transMsg.info.refId = pConn->refId;
|
||||||
transMsg.info.traceId = pHead->traceId;
|
transMsg.info.traceId = pHead->traceId;
|
||||||
transMsg.info.cliVer = htonl(pHead->compatibilityVer);
|
transMsg.info.cliVer = htonl(pHead->compatibilityVer);
|
||||||
|
@ -468,10 +475,10 @@ static bool uvHandleReq(SSvrConn* pConn) {
|
||||||
|
|
||||||
tGTrace("%s handle %p conn:%p translated to app, refId:%" PRIu64, transLabel(pTransInst), transMsg.info.handle, pConn,
|
tGTrace("%s handle %p conn:%p translated to app, refId:%" PRIu64, transLabel(pTransInst), transMsg.info.handle, pConn,
|
||||||
pConn->refId);
|
pConn->refId);
|
||||||
if (transMsg.info.handle == NULL) {
|
// if (transMsg.info.handle == NULL) {
|
||||||
tError("%s handle %p conn:%p handle failed to init" PRIu64, transLabel(pTransInst), transMsg.info.handle, pConn);
|
// tError("%s handle %p conn:%p handle failed to init" PRIu64, transLabel(pTransInst), transMsg.info.handle, pConn);
|
||||||
return false;
|
// return false;
|
||||||
}
|
// }
|
||||||
|
|
||||||
if (pHead->noResp == 1) {
|
if (pHead->noResp == 1) {
|
||||||
transMsg.info.refId = -1;
|
transMsg.info.refId = -1;
|
||||||
|
@ -483,7 +490,7 @@ static bool uvHandleReq(SSvrConn* pConn) {
|
||||||
pConnInfo->clientPort = pConn->port;
|
pConnInfo->clientPort = pConn->port;
|
||||||
tstrncpy(pConnInfo->user, pConn->user, sizeof(pConnInfo->user));
|
tstrncpy(pConnInfo->user, pConn->user, sizeof(pConnInfo->user));
|
||||||
|
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), pConn->refId);
|
if (acquire) transReleaseExHandle(transGetSvrRefMgt(), pConn->refId);
|
||||||
|
|
||||||
(*pTransInst->cfp)(pTransInst->parent, &transMsg, NULL);
|
(*pTransInst->cfp)(pTransInst->parent, &transMsg, NULL);
|
||||||
return true;
|
return true;
|
||||||
|
@ -770,15 +777,15 @@ void uvWorkerAsyncCb(uv_async_t* handle) {
|
||||||
|
|
||||||
SExHandle* exh1 = transMsg.info.handle;
|
SExHandle* exh1 = transMsg.info.handle;
|
||||||
int64_t refId = transMsg.info.refId;
|
int64_t refId = transMsg.info.refId;
|
||||||
SExHandle* exh2 = transAcquireExHandle(transGetRefMgt(), refId);
|
SExHandle* exh2 = transAcquireExHandle(transGetSvrRefMgt(), refId);
|
||||||
if (exh2 == NULL || exh1 != exh2) {
|
if (exh2 == NULL || exh1 != exh2) {
|
||||||
tTrace("handle except msg %p, ignore it", exh1);
|
tTrace("handle except msg %p, ignore it", exh1);
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), refId);
|
||||||
destroySmsg(msg);
|
destroySmsg(msg);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
msg->pConn = exh1->handle;
|
msg->pConn = exh1->handle;
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), refId);
|
||||||
(*transAsyncHandle[msg->type])(msg, pThrd);
|
(*transAsyncHandle[msg->type])(msg, pThrd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -874,15 +881,15 @@ static void uvPrepareCb(uv_prepare_t* handle) {
|
||||||
|
|
||||||
SExHandle* exh1 = transMsg.info.handle;
|
SExHandle* exh1 = transMsg.info.handle;
|
||||||
int64_t refId = transMsg.info.refId;
|
int64_t refId = transMsg.info.refId;
|
||||||
SExHandle* exh2 = transAcquireExHandle(transGetRefMgt(), refId);
|
SExHandle* exh2 = transAcquireExHandle(transGetSvrRefMgt(), refId);
|
||||||
if (exh2 == NULL || exh1 != exh2) {
|
if (exh2 == NULL || exh1 != exh2) {
|
||||||
tTrace("handle except msg %p, ignore it", exh1);
|
tTrace("handle except msg %p, ignore it", exh1);
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), refId);
|
||||||
destroySmsg(msg);
|
destroySmsg(msg);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
msg->pConn = exh1->handle;
|
msg->pConn = exh1->handle;
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), refId);
|
||||||
(*transAsyncHandle[msg->type])(msg, pThrd);
|
(*transAsyncHandle[msg->type])(msg, pThrd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1215,14 +1222,14 @@ static FORCE_INLINE SSvrConn* createConn(void* hThrd) {
|
||||||
|
|
||||||
exh->handle = pConn;
|
exh->handle = pConn;
|
||||||
exh->pThrd = pThrd;
|
exh->pThrd = pThrd;
|
||||||
exh->refId = transAddExHandle(transGetRefMgt(), exh);
|
exh->refId = transAddExHandle(transGetSvrRefMgt(), exh);
|
||||||
if (exh->refId < 0) {
|
if (exh->refId < 0) {
|
||||||
TAOS_CHECK_GOTO(TSDB_CODE_REF_INVALID_ID, NULL, _end);
|
TAOS_CHECK_GOTO(TSDB_CODE_REF_INVALID_ID, NULL, _end);
|
||||||
}
|
}
|
||||||
|
|
||||||
QUEUE_INIT(&exh->q);
|
QUEUE_INIT(&exh->q);
|
||||||
|
|
||||||
SExHandle* pSelf = transAcquireExHandle(transGetRefMgt(), exh->refId);
|
SExHandle* pSelf = transAcquireExHandle(transGetSvrRefMgt(), exh->refId);
|
||||||
if (pSelf != exh) {
|
if (pSelf != exh) {
|
||||||
TAOS_CHECK_GOTO(TSDB_CODE_REF_INVALID_ID, NULL, _end);
|
TAOS_CHECK_GOTO(TSDB_CODE_REF_INVALID_ID, NULL, _end);
|
||||||
}
|
}
|
||||||
|
@ -1284,8 +1291,8 @@ static FORCE_INLINE void destroyConnRegArg(SSvrConn* conn) {
|
||||||
}
|
}
|
||||||
static int32_t reallocConnRef(SSvrConn* conn) {
|
static int32_t reallocConnRef(SSvrConn* conn) {
|
||||||
if (conn->refId > 0) {
|
if (conn->refId > 0) {
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), conn->refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), conn->refId);
|
||||||
(void)transRemoveExHandle(transGetRefMgt(), conn->refId);
|
(void)transRemoveExHandle(transGetSvrRefMgt(), conn->refId);
|
||||||
}
|
}
|
||||||
// avoid app continue to send msg on invalid handle
|
// avoid app continue to send msg on invalid handle
|
||||||
SExHandle* exh = taosMemoryMalloc(sizeof(SExHandle));
|
SExHandle* exh = taosMemoryMalloc(sizeof(SExHandle));
|
||||||
|
@ -1295,14 +1302,14 @@ static int32_t reallocConnRef(SSvrConn* conn) {
|
||||||
|
|
||||||
exh->handle = conn;
|
exh->handle = conn;
|
||||||
exh->pThrd = conn->hostThrd;
|
exh->pThrd = conn->hostThrd;
|
||||||
exh->refId = transAddExHandle(transGetRefMgt(), exh);
|
exh->refId = transAddExHandle(transGetSvrRefMgt(), exh);
|
||||||
if (exh->refId < 0) {
|
if (exh->refId < 0) {
|
||||||
taosMemoryFree(exh);
|
taosMemoryFree(exh);
|
||||||
return TSDB_CODE_REF_INVALID_ID;
|
return TSDB_CODE_REF_INVALID_ID;
|
||||||
}
|
}
|
||||||
|
|
||||||
QUEUE_INIT(&exh->q);
|
QUEUE_INIT(&exh->q);
|
||||||
SExHandle* pSelf = transAcquireExHandle(transGetRefMgt(), exh->refId);
|
SExHandle* pSelf = transAcquireExHandle(transGetSvrRefMgt(), exh->refId);
|
||||||
if (pSelf != exh) {
|
if (pSelf != exh) {
|
||||||
tError("conn %p failed to acquire handle", conn);
|
tError("conn %p failed to acquire handle", conn);
|
||||||
taosMemoryFree(exh);
|
taosMemoryFree(exh);
|
||||||
|
@ -1321,8 +1328,8 @@ static void uvDestroyConn(uv_handle_t* handle) {
|
||||||
}
|
}
|
||||||
SWorkThrd* thrd = conn->hostThrd;
|
SWorkThrd* thrd = conn->hostThrd;
|
||||||
|
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), conn->refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), conn->refId);
|
||||||
(void)transRemoveExHandle(transGetRefMgt(), conn->refId);
|
(void)transRemoveExHandle(transGetSvrRefMgt(), conn->refId);
|
||||||
|
|
||||||
STrans* pTransInst = thrd->pTransInst;
|
STrans* pTransInst = thrd->pTransInst;
|
||||||
tDebug("%s conn %p destroy", transLabel(pTransInst), conn);
|
tDebug("%s conn %p destroy", transLabel(pTransInst), conn);
|
||||||
|
@ -1752,15 +1759,15 @@ int32_t transReleaseSrvHandle(void* handle) {
|
||||||
tDebug("%s conn %p start to release", transLabel(pThrd->pTransInst), exh->handle);
|
tDebug("%s conn %p start to release", transLabel(pThrd->pTransInst), exh->handle);
|
||||||
if ((code = transAsyncSend(pThrd->asyncPool, &m->q)) != 0) {
|
if ((code = transAsyncSend(pThrd->asyncPool, &m->q)) != 0) {
|
||||||
destroySmsg(m);
|
destroySmsg(m);
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), refId);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), refId);
|
||||||
return 0;
|
return 0;
|
||||||
_return1:
|
_return1:
|
||||||
tDebug("handle %p failed to send to release handle", exh);
|
tDebug("handle %p failed to send to release handle", exh);
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), refId);
|
||||||
return code;
|
return code;
|
||||||
_return2:
|
_return2:
|
||||||
tDebug("handle %p failed to send to release handle", exh);
|
tDebug("handle %p failed to send to release handle", exh);
|
||||||
|
@ -1803,17 +1810,17 @@ int32_t transSendResponse(const STransMsg* msg) {
|
||||||
tGDebug("conn %p start to send resp (1/2)", exh->handle);
|
tGDebug("conn %p start to send resp (1/2)", exh->handle);
|
||||||
if ((code = transAsyncSend(pThrd->asyncPool, &m->q)) != 0) {
|
if ((code = transAsyncSend(pThrd->asyncPool, &m->q)) != 0) {
|
||||||
destroySmsg(m);
|
destroySmsg(m);
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), refId);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), refId);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
_return1:
|
_return1:
|
||||||
tDebug("handle %p failed to send resp", exh);
|
tDebug("handle %p failed to send resp", exh);
|
||||||
rpcFreeCont(msg->pCont);
|
rpcFreeCont(msg->pCont);
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), refId);
|
||||||
return code;
|
return code;
|
||||||
_return2:
|
_return2:
|
||||||
tDebug("handle %p failed to send resp", exh);
|
tDebug("handle %p failed to send resp", exh);
|
||||||
|
@ -1848,17 +1855,17 @@ int32_t transRegisterMsg(const STransMsg* msg) {
|
||||||
tDebug("%s conn %p start to register brokenlink callback", transLabel(pTransInst), exh->handle);
|
tDebug("%s conn %p start to register brokenlink callback", transLabel(pTransInst), exh->handle);
|
||||||
if ((code = transAsyncSend(pThrd->asyncPool, &m->q)) != 0) {
|
if ((code = transAsyncSend(pThrd->asyncPool, &m->q)) != 0) {
|
||||||
destroySmsg(m);
|
destroySmsg(m);
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), refId);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), refId);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
_return1:
|
_return1:
|
||||||
tDebug("handle %p failed to register brokenlink", exh);
|
tDebug("handle %p failed to register brokenlink", exh);
|
||||||
rpcFreeCont(msg->pCont);
|
rpcFreeCont(msg->pCont);
|
||||||
(void)transReleaseExHandle(transGetRefMgt(), refId);
|
(void)transReleaseExHandle(transGetSvrRefMgt(), refId);
|
||||||
return code;
|
return code;
|
||||||
_return2:
|
_return2:
|
||||||
tDebug("handle %p failed to register brokenlink", exh);
|
tDebug("handle %p failed to register brokenlink", exh);
|
||||||
|
|
|
@ -222,7 +222,7 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.query("select * from information_schema.ins_columns where db_name ='information_schema'")
|
tdSql.query("select * from information_schema.ins_columns where db_name ='information_schema'")
|
||||||
tdLog.info(len(tdSql.queryResult))
|
tdLog.info(len(tdSql.queryResult))
|
||||||
tdSql.checkEqual(True, len(tdSql.queryResult) in range(261, 271))
|
tdSql.checkEqual(True, len(tdSql.queryResult) in range(271, 272))
|
||||||
|
|
||||||
tdSql.query("select * from information_schema.ins_columns where db_name ='performance_schema'")
|
tdSql.query("select * from information_schema.ins_columns where db_name ='performance_schema'")
|
||||||
tdSql.checkEqual(56, len(tdSql.queryResult))
|
tdSql.checkEqual(56, len(tdSql.queryResult))
|
||||||
|
|
|
@ -124,6 +124,9 @@ class TDTestCase:
|
||||||
tdSql.query(f'select percentile(col1, 9.9, 19.9, 29.9, 39.9, 49.9, 59.9, 69.9, 79.9, 89.9, 99.9) from {self.ntbname}')
|
tdSql.query(f'select percentile(col1, 9.9, 19.9, 29.9, 39.9, 49.9, 59.9, 69.9, 79.9, 89.9, 99.9) from {self.ntbname}')
|
||||||
tdSql.checkData(0, 0, '[0.891000, 1.791000, 2.691000, 3.591000, 4.491000, 5.391000, 6.291000, 7.191000, 8.091000, 8.991000]')
|
tdSql.checkData(0, 0, '[0.891000, 1.791000, 2.691000, 3.591000, 4.491000, 5.391000, 6.291000, 7.191000, 8.091000, 8.991000]')
|
||||||
|
|
||||||
|
tdSql.query(f'select percentile(col1 * 1e+200, 9.9, 19.9, 29.9, 39.9, 49.9, 59.9, 69.9, 79.9, 89.9, 99.9) from {self.ntbname}')
|
||||||
|
tdSql.checkRows(1);
|
||||||
|
|
||||||
tdSql.error(f'select percentile(col1) from {self.ntbname}')
|
tdSql.error(f'select percentile(col1) from {self.ntbname}')
|
||||||
tdSql.error(f'select percentile(col1, -1) from {self.ntbname}')
|
tdSql.error(f'select percentile(col1, -1) from {self.ntbname}')
|
||||||
tdSql.error(f'select percentile(col1, 101) from {self.ntbname}')
|
tdSql.error(f'select percentile(col1, 101) from {self.ntbname}')
|
||||||
|
@ -166,6 +169,9 @@ class TDTestCase:
|
||||||
tdSql.query(f'select percentile(col1, 9.9, 19.9, 29.9, 39.9, 49.9, 59.9, 69.9, 79.9, 89.9, 99.9) from {self.stbname}_0')
|
tdSql.query(f'select percentile(col1, 9.9, 19.9, 29.9, 39.9, 49.9, 59.9, 69.9, 79.9, 89.9, 99.9) from {self.stbname}_0')
|
||||||
tdSql.checkData(0, 0, '[0.891000, 1.791000, 2.691000, 3.591000, 4.491000, 5.391000, 6.291000, 7.191000, 8.091000, 8.991000]')
|
tdSql.checkData(0, 0, '[0.891000, 1.791000, 2.691000, 3.591000, 4.491000, 5.391000, 6.291000, 7.191000, 8.091000, 8.991000]')
|
||||||
|
|
||||||
|
tdSql.query(f'select percentile(col1 * 1e+200, 9.9, 19.9, 29.9, 39.9, 49.9, 59.9, 69.9, 79.9, 89.9, 99.9) from {self.stbname}_0')
|
||||||
|
tdSql.checkRows(1);
|
||||||
|
|
||||||
tdSql.error(f'select percentile(col1) from {self.stbname}_0')
|
tdSql.error(f'select percentile(col1) from {self.stbname}_0')
|
||||||
tdSql.error(f'select percentile(col1, -1) from {self.stbname}_0')
|
tdSql.error(f'select percentile(col1, -1) from {self.stbname}_0')
|
||||||
tdSql.error(f'select percentile(col1, 101) from {self.stbname}_0')
|
tdSql.error(f'select percentile(col1, 101) from {self.stbname}_0')
|
||||||
|
|
|
@ -15,6 +15,10 @@ sys.path.append("./7-tmq")
|
||||||
from tmqCommon import *
|
from tmqCommon import *
|
||||||
|
|
||||||
class TDTestCase:
|
class TDTestCase:
|
||||||
|
clientCfgDict = {'debugFlag': 135}
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
updatecfgDict["clientCfg"] = clientCfgDict
|
||||||
|
|
||||||
def init(self, conn, logSql, replicaVar=1):
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
self.replicaVar = int(replicaVar)
|
self.replicaVar = int(replicaVar)
|
||||||
tdLog.debug(f"start to excute {__file__}")
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
|
Loading…
Reference in New Issue