fix:conflicts from 3.0
This commit is contained in:
commit
9e2d80ac2a
|
@ -315,7 +315,9 @@ def pre_test_build_win() {
|
|||
python.exe -m pip install --upgrade pip
|
||||
python -m pip uninstall taospy -y
|
||||
python -m pip install taospy==2.7.10
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
python -m pip uninstall taos-ws-py -y
|
||||
python -m pip install taos-ws-py==0.2.8
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
set(CMAKE_VERBOSE_MAKEFILE ON)
|
||||
set(CMAKE_VERBOSE_MAKEFILE FALSE)
|
||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||
|
||||
#set output directory
|
||||
|
|
|
@ -176,6 +176,14 @@ IF(APPLE)
|
|||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
ENDIF()
|
||||
|
||||
IF(TD_WINDOWS)
|
||||
IF(MSVC)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:8388608")
|
||||
ELSEIF(MINGW)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--stack,8388608")
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
|
||||
|
||||
set(TD_DEPS_DIR "x86")
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.2.0.0.alpha")
|
||||
SET(TD_VER_NUMBER "3.1.2.0.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -12,7 +12,7 @@ The FQDN of all hosts must be setup properly. For e.g. FQDNs may have to be conf
|
|||
|
||||
### Step 1
|
||||
|
||||
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
|
||||
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](../../operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
|
||||
|
||||
:::note
|
||||
FQDN information is written to file. If you have started TDengine without configuring or changing the FQDN, ensure that data is backed up or no longer needed before running the `rm -rf /var/lib\taos/\*` command.
|
||||
|
|
|
@ -9,7 +9,7 @@ description: This document describes how to query data in TDengine.
|
|||
```sql
|
||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
|
||||
|
||||
SELECT [hints] [DISTINCT] [TAGS] select_list
|
||||
SELECT [hints] [DISTINCT] select_list
|
||||
from_clause
|
||||
[WHERE condition]
|
||||
[partition_by_clause]
|
||||
|
@ -225,14 +225,6 @@ The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolu
|
|||
select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
|
||||
```
|
||||
|
||||
### TAGS Query
|
||||
|
||||
The TAGS keyword returns only tag columns from all child tables when only tag columns are specified. One row containing tag columns is returned for each child table.
|
||||
|
||||
```sql
|
||||
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
|
||||
```
|
||||
|
||||
## Query Objects
|
||||
|
||||
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.
|
||||
|
|
|
@ -292,11 +292,11 @@ CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...)
|
|||
LENGTH(expr)
|
||||
```
|
||||
|
||||
**Description**: The length in bytes of a string
|
||||
**Description**: The length in bytes
|
||||
|
||||
**Return value type**: Bigint
|
||||
|
||||
**Applicable data types**: VARCHAR and NCHAR fields or columns
|
||||
**Applicable data types**: VARCHAR and NCHAR and VARBINARY
|
||||
|
||||
**Nested query**: It can be used in both the outer query and inner query in a nested query.
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDen
|
|||
|
||||
There are two ways to install taosBenchmark:
|
||||
|
||||
- Installing the official TDengine installer will automatically install taosBenchmark. Please refer to [TDengine installation](/operation/pkg-install) for details.
|
||||
- Installing the official TDengine installer will automatically install taosBenchmark. Please refer to [TDengine installation](../../operation/pkg-install) for details.
|
||||
|
||||
- Compile taos-tools separately and install them. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ taosKeeper is a tool for TDengine that exports monitoring metrics. With taosKeep
|
|||
There are two ways to install taosKeeper:
|
||||
Methods of installing taosKeeper:
|
||||
|
||||
- Installing the official TDengine installer will automatically install taosKeeper. Please refer to [TDengine installation](/operation/pkg-install) for details.
|
||||
- Installing the official TDengine installer will automatically install taosKeeper. Please refer to [TDengine installation](../../operation/pkg-install) for details.
|
||||
|
||||
- You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details.
|
||||
## Configuration and Launch
|
||||
|
|
|
@ -21,7 +21,7 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
|
|||
1. Linux operating system
|
||||
2. Java 8 and Maven installed
|
||||
3. Git/curl/vi is installed
|
||||
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install)
|
||||
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](../../operation/pkg-install)
|
||||
|
||||
## Install Kafka
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ TDengine 是一款开源、高性能、云原生的[时序数据库](https://tde
|
|||
|
||||
## 主要产品
|
||||
|
||||
TDengine 有三个主要产品:TDengine Pro (即 TDengine 企业版),TDengine Cloud,和 TDengine OSS,关于它们的具体定义请参考
|
||||
TDengine 有三个主要产品:TDengine Enterprise (即 TDengine 企业版),TDengine Cloud,和 TDengine OSS,关于它们的具体定义请参考
|
||||
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
|
||||
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
- [TDengine 开源版](https://www.taosdata.com/tdengine-oss)
|
||||
|
|
|
@ -1004,7 +1004,7 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
- httpConnectTimeout: 创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
|
||||
- messageWaitTimeout: 数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
|
||||
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
|
||||
其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
|
||||
#### 订阅消费数据
|
||||
|
||||
|
@ -1082,7 +1082,7 @@ consumer.unsubscribe();
|
|||
consumer.close()
|
||||
```
|
||||
|
||||
详情请参考:[数据订阅](../../../develop/tmq)
|
||||
详情请参考:[数据订阅](../../develop/tmq)
|
||||
|
||||
#### 完整示例
|
||||
|
||||
|
@ -1373,7 +1373,7 @@ public static void main(String[] args) throws Exception {
|
|||
|
||||
**解决方法**: 更换 taos-jdbcdriver 3.0.2+ 版本。
|
||||
|
||||
其它问题请参考 [FAQ](../../../train-faq/faq)
|
||||
其它问题请参考 [FAQ](../../train-faq/faq)
|
||||
|
||||
## API 参考
|
||||
|
||||
|
|
|
@ -352,7 +352,7 @@ client.put(&sml_data)?
|
|||
|
||||
### 数据订阅
|
||||
|
||||
TDengine 通过消息队列 [TMQ](../../../taos-sql/tmq/) 启动一个订阅。
|
||||
TDengine 通过消息队列 [TMQ](../../taos-sql/tmq/) 启动一个订阅。
|
||||
|
||||
#### 创建 Topic
|
||||
|
||||
|
@ -491,7 +491,7 @@ let taos = pool.get()?;
|
|||
|
||||
## 常见问题
|
||||
|
||||
请参考 [FAQ](../../../train-faq/faq)
|
||||
请参考 [FAQ](../../train-faq/faq)
|
||||
|
||||
## API 参考
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ description: 查询数据的详细语法
|
|||
```sql
|
||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
|
||||
|
||||
SELECT [hints] [DISTINCT] [TAGS] select_list
|
||||
SELECT [hints] [DISTINCT] select_list
|
||||
from_clause
|
||||
[WHERE condition]
|
||||
[partition_by_clause]
|
||||
|
@ -160,16 +160,6 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
|
|||
|
||||
:::
|
||||
|
||||
### 标签查询
|
||||
|
||||
当查询的列只有标签列时,`TAGS` 关键字可以指定返回所有子表的标签列。每个子表只返回一行标签列。
|
||||
|
||||
返回所有子表的标签列:
|
||||
|
||||
```sql
|
||||
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
|
||||
```
|
||||
|
||||
### 结果集列名
|
||||
|
||||
`SELECT`子句中,如果不指定返回结果集合的列名,结果集列名称默认使用`SELECT`子句中的表达式名称作为列名称。此外,用户可使用`AS`来重命名返回结果集合中列的名称。例如:
|
||||
|
|
|
@ -292,11 +292,11 @@ CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...)
|
|||
LENGTH(expr)
|
||||
```
|
||||
|
||||
**功能说明**:以字节计数的字符串长度。
|
||||
**功能说明**:以字节计数的长度。
|
||||
|
||||
**返回结果类型**:BIGINT。
|
||||
|
||||
**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
|
||||
**适用数据类型**:VARCHAR, NCHAR, VARBINARY。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能
|
|||
|
||||
taosBenchmark 有两种安装方式:
|
||||
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](../../operation/pkg-install)。
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](../../get-started/)。
|
||||
|
||||
- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的
|
|||
taosKeeper 有两种安装方式:
|
||||
taosKeeper 安装方式:
|
||||
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](../../operation/pkg-install)。
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](../../get-started/)。
|
||||
|
||||
- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
|
|||
1. Linux 操作系统
|
||||
2. 已安装 Java 8 和 Maven
|
||||
3. 已安装 Git、curl、vi
|
||||
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](../../operation/pkg-install)
|
||||
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](../../get-started/)
|
||||
|
||||
## 安装 Kafka
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
|
|||
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
|
||||
|
||||
int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval);
|
||||
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
|
||||
int32_t taosTimeCountIntervalForFill(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision, int32_t order);
|
||||
|
||||
int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision);
|
||||
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision);
|
||||
|
|
|
@ -29,54 +29,6 @@ extern "C" {
|
|||
|
||||
#include "storageapi.h"
|
||||
|
||||
// void* streamBackendInit(const char* path);
|
||||
// void streamBackendCleanup(void* arg);
|
||||
// SListNode* streamBackendAddCompare(void* backend, void* arg);
|
||||
// void streamBackendDelCompare(void* backend, void* arg);
|
||||
|
||||
// <<<<<<< HEAD
|
||||
// typedef struct STdbState {
|
||||
// rocksdb_t* rocksdb;
|
||||
// rocksdb_column_family_handle_t** pHandle;
|
||||
// rocksdb_writeoptions_t* writeOpts;
|
||||
// rocksdb_readoptions_t* readOpts;
|
||||
// rocksdb_options_t** cfOpts;
|
||||
// rocksdb_options_t* dbOpt;
|
||||
// struct SStreamTask* pOwner;
|
||||
// void* param;
|
||||
// void* env;
|
||||
// SListNode* pComparNode;
|
||||
// void* pBackend;
|
||||
// char idstr[64];
|
||||
// void* compactFactory;
|
||||
// TdThreadRwlock rwLock;
|
||||
// =======
|
||||
// typedef struct STdbState {
|
||||
// rocksdb_t* rocksdb;
|
||||
// rocksdb_column_family_handle_t** pHandle;
|
||||
// rocksdb_writeoptions_t* writeOpts;
|
||||
// rocksdb_readoptions_t* readOpts;
|
||||
// rocksdb_options_t** cfOpts;
|
||||
// rocksdb_options_t* dbOpt;
|
||||
// struct SStreamTask* pOwner;
|
||||
// void* param;
|
||||
// void* env;
|
||||
// SListNode* pComparNode;
|
||||
// void* pBackendHandle;
|
||||
// char idstr[64];
|
||||
// void* compactFactory;
|
||||
//
|
||||
// TDB* db;
|
||||
// TTB* pStateDb;
|
||||
// TTB* pFuncStateDb;
|
||||
// TTB* pFillStateDb; // todo refactor
|
||||
// TTB* pSessionStateDb;
|
||||
// TTB* pParNameDb;
|
||||
// TTB* pParTagDb;
|
||||
// TXN* txn;
|
||||
//} STdbState;
|
||||
//>>>>>>> enh/dev3.0
|
||||
|
||||
SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages);
|
||||
void streamStateClose(SStreamState* pState, bool remove);
|
||||
int32_t streamStateBegin(SStreamState* pState);
|
||||
|
|
|
@ -111,14 +111,12 @@ typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask, int64_t ver);
|
|||
typedef struct {
|
||||
int8_t type;
|
||||
int64_t ver;
|
||||
int32_t* dataRef;
|
||||
SPackedData submit;
|
||||
} SStreamDataSubmit;
|
||||
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
int64_t ver;
|
||||
SArray* dataRefs; // SArray<int32_t*>
|
||||
SArray* submits; // SArray<SPackedSubmit>
|
||||
} SStreamMergedSubmit;
|
||||
|
||||
|
@ -260,7 +258,7 @@ typedef struct SStreamTaskId {
|
|||
typedef struct SCheckpointInfo {
|
||||
int64_t checkpointId;
|
||||
int64_t checkpointVer; // latest checkpointId version
|
||||
int64_t currentVer; // current offset in WAL, not serialize it
|
||||
int64_t nextProcessVer; // current offset in WAL, not serialize it
|
||||
} SCheckpointInfo;
|
||||
|
||||
typedef struct SStreamStatus {
|
||||
|
@ -312,12 +310,27 @@ typedef struct STaskSchedInfo {
|
|||
void* pTimer;
|
||||
} STaskSchedInfo;
|
||||
|
||||
typedef struct SSinkTaskRecorder {
|
||||
int64_t numOfSubmit;
|
||||
int64_t numOfBlocks;
|
||||
int64_t numOfRows;
|
||||
} SSinkTaskRecorder;
|
||||
|
||||
typedef struct {
|
||||
int64_t created;
|
||||
int64_t init;
|
||||
int64_t step1Start;
|
||||
int64_t step2Start;
|
||||
int64_t sinkStart;
|
||||
} STaskTimestamp;
|
||||
|
||||
typedef struct STokenBucket {
|
||||
int32_t capacity; // total capacity
|
||||
int64_t fillTimestamp;// fill timestamp
|
||||
int32_t numOfToken; // total available tokens
|
||||
int32_t rate; // number of token per second
|
||||
} STokenBucket;
|
||||
|
||||
struct SStreamTask {
|
||||
int64_t ver;
|
||||
SStreamTaskId id;
|
||||
|
@ -345,6 +358,8 @@ struct SStreamTask {
|
|||
STaskSinkSma smaSink;
|
||||
STaskSinkFetch fetchSink;
|
||||
};
|
||||
SSinkTaskRecorder sinkRecorder;
|
||||
STokenBucket tokenBucket;
|
||||
|
||||
void* launchTaskTimer;
|
||||
SMsgCb* pMsgCb; // msg handle
|
||||
|
@ -419,8 +434,9 @@ int32_t tDecodeStreamTaskChkInfo(SDecoder* pDecoder, SCheckpointInfo* pChkpInfo)
|
|||
int32_t tDecodeStreamTaskId(SDecoder* pDecoder, SStreamTaskId* pTaskId);
|
||||
|
||||
int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem);
|
||||
int32_t streamTaskPutDataIntoOutputQ(SStreamTask* pTask, SStreamDataBlock* pBlock);
|
||||
int32_t streamTaskPutTranstateIntoInputQ(SStreamTask* pTask);
|
||||
bool streamQueueIsFull(const STaosQueue* pQueue);
|
||||
bool streamQueueIsFull(const STaosQueue* pQueue, bool inputQ);
|
||||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
|
@ -630,12 +646,10 @@ SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t
|
|||
void streamTaskInputFail(SStreamTask* pTask);
|
||||
int32_t streamTryExec(SStreamTask* pTask);
|
||||
int32_t streamSchedExec(SStreamTask* pTask);
|
||||
int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock);
|
||||
bool streamTaskShouldStop(const SStreamStatus* pStatus);
|
||||
bool streamTaskShouldPause(const SStreamStatus* pStatus);
|
||||
bool streamTaskIsIdle(const SStreamTask* pTask);
|
||||
|
||||
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize);
|
||||
void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen);
|
||||
|
||||
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
|
||||
|
@ -655,7 +669,7 @@ int32_t streamLaunchFillHistoryTask(SStreamTask* pTask);
|
|||
int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask);
|
||||
int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated);
|
||||
bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer);
|
||||
int32_t streamTaskGetInputQItems(const SStreamTask* pTask);
|
||||
int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue);
|
||||
|
||||
// common
|
||||
int32_t streamRestoreParam(SStreamTask* pTask);
|
||||
|
@ -679,11 +693,10 @@ void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
|
|||
// source level
|
||||
int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
|
||||
int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
|
||||
int32_t streamSourceScanHistoryData(SStreamTask* pTask);
|
||||
int32_t streamScanHistoryData(SStreamTask* pTask);
|
||||
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
|
||||
|
||||
// agg level
|
||||
int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask);
|
||||
int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq,
|
||||
SRpcHandleInfo* pRpcInfo);
|
||||
int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask);
|
||||
|
|
|
@ -192,7 +192,7 @@ int32_t walApplyVer(SWal *, int64_t ver);
|
|||
// int32_t walDataCorrupted(SWal*);
|
||||
|
||||
// wal reader
|
||||
SWalReader *walOpenReader(SWal *, SWalFilterCond *pCond);
|
||||
SWalReader *walOpenReader(SWal *, SWalFilterCond *pCond, int64_t id);
|
||||
void walCloseReader(SWalReader *pRead);
|
||||
void walReadReset(SWalReader *pReader);
|
||||
int32_t walReadVer(SWalReader *pRead, int64_t ver);
|
||||
|
|
|
@ -112,6 +112,8 @@ int32_t taosGetErrorFile(TdFilePtr pFile);
|
|||
|
||||
int32_t taosCompressFile(char *srcFileName, char *destFileName);
|
||||
|
||||
int32_t taosSetFileHandlesLimit();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -39,7 +39,7 @@ int64_t taosGetOsUptime();
|
|||
int32_t taosGetEmail(char *email, int32_t maxLen);
|
||||
int32_t taosGetOsReleaseName(char *releaseName, char* sName, char* ver, int32_t maxLen);
|
||||
int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores);
|
||||
int32_t taosGetCpuCores(float *numOfCores);
|
||||
int32_t taosGetCpuCores(float *numOfCores, bool physical);
|
||||
void taosGetCpuUsage(double *cpu_system, double *cpu_engine);
|
||||
int32_t taosGetCpuInstructions(char* sse42, char* avx, char* avx2, char* fma);
|
||||
int32_t taosGetTotalMemory(int64_t *totalKB);
|
||||
|
|
|
@ -789,7 +789,7 @@ int32_t* taosGetErrno();
|
|||
|
||||
// stream
|
||||
#define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100)
|
||||
#define TSDB_CODE_STREAM_BACKPRESSURE_OUT_OF_QUEUE TAOS_DEF_ERROR_CODE(0, 0x4101)
|
||||
#define TSDB_CODE_STREAM_EXEC_CANCELLED TAOS_DEF_ERROR_CODE(0, 0x4102)
|
||||
|
||||
// TDLite
|
||||
#define TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS TAOS_DEF_ERROR_CODE(0, 0x5100)
|
||||
|
|
|
@ -338,7 +338,7 @@ if [ "$verMode" == "cluster" ]; then
|
|||
tmp_pwd=`pwd`
|
||||
cd ${install_dir}/connector
|
||||
if [ ! -d taos-connector-jdbc ];then
|
||||
git clone -b 3.2.1 --depth=1 https://github.com/taosdata/taos-connector-jdbc.git ||:
|
||||
git clone -b main --depth=1 https://github.com/taosdata/taos-connector-jdbc.git ||:
|
||||
fi
|
||||
cd taos-connector-jdbc
|
||||
mvn clean package -Dmaven.test.skip=true
|
||||
|
|
|
@ -380,8 +380,7 @@ void destroySubRequests(SRequestObj *pRequest) {
|
|||
pReqList[++reqIdx] = pTmp;
|
||||
releaseRequest(tmpRefId);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self, tmpRefId,
|
||||
pTmp->requestId);
|
||||
tscError("prev req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -398,7 +397,7 @@ void destroySubRequests(SRequestObj *pRequest) {
|
|||
removeRequest(pTmp->self);
|
||||
releaseRequest(pTmp->self);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 " is not there", tmpRefId);
|
||||
tscError("next req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -492,8 +491,7 @@ void stopAllQueries(SRequestObj *pRequest) {
|
|||
pReqList[++reqIdx] = pTmp;
|
||||
releaseRequest(tmpRefId);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self, tmpRefId,
|
||||
pTmp->requestId);
|
||||
tscError("prev req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -512,7 +510,7 @@ void stopAllQueries(SRequestObj *pRequest) {
|
|||
taosStopQueryImpl(pTmp);
|
||||
releaseRequest(pTmp->self);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 " is not there", tmpRefId);
|
||||
tscError("next req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -874,8 +874,13 @@ void handleSubQueryFromAnalyse(SSqlCallbackWrapper *pWrapper, SMetaData *pResult
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = cloneCatalogReq(&pNewWrapper->pCatalogReq, pWrapper->pCatalogReq);
|
||||
}
|
||||
doAsyncQueryFromAnalyse(pResultMeta, pNewWrapper, code);
|
||||
nodesDestroyNode(pRoot);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
doAsyncQueryFromAnalyse(pResultMeta, pNewWrapper, code);
|
||||
nodesDestroyNode(pRoot);
|
||||
} else {
|
||||
handleQueryAnslyseRes(pWrapper, pResultMeta, code);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void handleQueryAnslyseRes(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, int32_t code) {
|
||||
|
@ -1148,8 +1153,7 @@ void restartAsyncQuery(SRequestObj *pRequest, int32_t code) {
|
|||
pReqList[++reqIdx] = pTmp;
|
||||
releaseRequest(tmpRefId);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self, tmpRefId,
|
||||
pTmp->requestId);
|
||||
tscError("prev req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1162,7 +1166,7 @@ void restartAsyncQuery(SRequestObj *pRequest, int32_t code) {
|
|||
removeRequest(pTmp->self);
|
||||
releaseRequest(pTmp->self);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 " is not there", tmpRefId);
|
||||
tscError("next req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2139,22 +2139,16 @@ int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, cha
|
|||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
SSmlKv pTag = {.key = "group_id",
|
||||
.keyLen = sizeof("group_id") - 1,
|
||||
.type = TSDB_DATA_TYPE_UBIGINT,
|
||||
.u = groupId,
|
||||
.length = sizeof(uint64_t)};
|
||||
int8_t type = TSDB_DATA_TYPE_UBIGINT;
|
||||
const char* name = "group_id";
|
||||
int32_t len = strlen(name);
|
||||
SSmlKv pTag = { .key = name, .keyLen = len, .type = type, .u = groupId, .length = sizeof(uint64_t)};
|
||||
taosArrayPush(tags, &pTag);
|
||||
|
||||
RandTableName rname = {
|
||||
.tags = tags,
|
||||
.stbFullName = stbFullName,
|
||||
.stbFullNameLen = strlen(stbFullName),
|
||||
.ctbShortName = cname,
|
||||
};
|
||||
.tags = tags, .stbFullName = stbFullName, .stbFullNameLen = strlen(stbFullName), .ctbShortName = cname};
|
||||
|
||||
buildChildTableName(&rname);
|
||||
|
||||
taosArrayDestroy(tags);
|
||||
|
||||
if ((rname.ctbShortName && rname.ctbShortName[0]) == 0) {
|
||||
|
|
|
@ -2460,10 +2460,10 @@ int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t byt
|
|||
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NULL](pColData, NULL, 0);
|
||||
if (code) goto _exit;
|
||||
} else {
|
||||
if (ASSERT(varDataTLen(data + offset) <= bytes)) {
|
||||
if (varDataTLen(data + offset) > bytes) {
|
||||
uError("var data length invalid, varDataTLen(data + offset):%d <= bytes:%d", (int)varDataTLen(data + offset),
|
||||
bytes);
|
||||
code = TSDB_CODE_INVALID_PARA;
|
||||
code = TSDB_CODE_PAR_VALUE_TOO_LONG;
|
||||
goto _exit;
|
||||
}
|
||||
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_VALUE](pColData, (uint8_t *)varDataVal(data + offset),
|
||||
|
|
|
@ -1601,6 +1601,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
|
|||
if (taosSetS3Cfg(tsCfg) != 0) return -1;
|
||||
}
|
||||
taosSetSystemCfg(tsCfg);
|
||||
if (taosSetFileHandlesLimit() != 0) return -1;
|
||||
|
||||
cfgDumpCfg(tsCfg, tsc, false);
|
||||
|
||||
|
|
|
@ -692,34 +692,67 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
|
|||
return (int64_t)(taosMktime(&tm) * TSDB_TICK_PER_SECOND(precision) + fraction);
|
||||
}
|
||||
|
||||
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision) {
|
||||
/**
|
||||
* @brief calc how many windows after filling between skey and ekey
|
||||
* @notes for asc order
|
||||
* skey ---> ekey
|
||||
* ^ ^
|
||||
* _____!_____.........._____|_____..
|
||||
* |__1__)
|
||||
* |__2__)...-->|_ret+1_)
|
||||
* skey + ret * interval <= ekey
|
||||
* skey + ret * interval + interval > ekey
|
||||
* ======> (ekey - skey - interval) / interval < ret <= (ekey - skey) / interval
|
||||
* For keys from blocks which do not need filling, skey + ret * interval == ekey.
|
||||
* For keys need filling, skey + ret * interval <= ekey.
|
||||
* Total num of windows is ret + 1(the last window)
|
||||
*
|
||||
* for desc order
|
||||
* skey <--- ekey
|
||||
* ^ ^
|
||||
* _____|____..........______!____...
|
||||
* |_first_)
|
||||
* |__1__)
|
||||
* |_ret_)<--...|__2__)
|
||||
* skey >= ekey - ret * interval
|
||||
* skey < ekey - ret * interval + interval
|
||||
*=======> (ekey - skey) / interval <= ret < (ekey - skey + interval) / interval
|
||||
* For keys from blocks which do not need filling, skey == ekey - ret * interval.
|
||||
* For keys need filling, skey >= ekey - ret * interval.
|
||||
* Total num of windows is ret + 1(the first window)
|
||||
*/
|
||||
int32_t taosTimeCountIntervalForFill(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision,
|
||||
int32_t order) {
|
||||
if (ekey < skey) {
|
||||
int64_t tmp = ekey;
|
||||
ekey = skey;
|
||||
skey = tmp;
|
||||
}
|
||||
int32_t ret;
|
||||
|
||||
if (unit != 'n' && unit != 'y') {
|
||||
return (int32_t)((ekey - skey) / interval);
|
||||
ret = (int32_t)((ekey - skey) / interval);
|
||||
if (order == TSDB_ORDER_DESC && ret * interval < (ekey - skey)) ret += 1;
|
||||
} else {
|
||||
skey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
|
||||
ekey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
|
||||
|
||||
struct tm tm;
|
||||
time_t t = (time_t)skey;
|
||||
taosLocalTime(&t, &tm, NULL);
|
||||
int32_t smon = tm.tm_year * 12 + tm.tm_mon;
|
||||
|
||||
t = (time_t)ekey;
|
||||
taosLocalTime(&t, &tm, NULL);
|
||||
int32_t emon = tm.tm_year * 12 + tm.tm_mon;
|
||||
|
||||
if (unit == 'y') {
|
||||
interval *= 12;
|
||||
}
|
||||
ret = (emon - smon) / (int32_t)interval;
|
||||
if (order == TSDB_ORDER_DESC && ret * interval < (smon - emon)) ret += 1;
|
||||
}
|
||||
|
||||
skey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
|
||||
ekey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
|
||||
|
||||
struct tm tm;
|
||||
time_t t = (time_t)skey;
|
||||
taosLocalTime(&t, &tm, NULL);
|
||||
int32_t smon = tm.tm_year * 12 + tm.tm_mon;
|
||||
|
||||
t = (time_t)ekey;
|
||||
taosLocalTime(&t, &tm, NULL);
|
||||
int32_t emon = tm.tm_year * 12 + tm.tm_mon;
|
||||
|
||||
if (unit == 'y') {
|
||||
interval *= 12;
|
||||
}
|
||||
|
||||
return (emon - smon) / (int32_t)interval;
|
||||
return ret + 1;
|
||||
}
|
||||
|
||||
int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval) {
|
||||
|
|
|
@ -56,7 +56,7 @@ void *dmSetMgmtHandle(SArray *pArray, tmsg_t msgType, void *nodeMsgFp, bool need
|
|||
|
||||
void dmGetMonitorSystemInfo(SMonSysInfo *pInfo) {
|
||||
taosGetCpuUsage(&pInfo->cpu_system, &pInfo->cpu_engine);
|
||||
taosGetCpuCores(&pInfo->cpu_cores);
|
||||
taosGetCpuCores(&pInfo->cpu_cores, false);
|
||||
taosGetProcMemory(&pInfo->mem_engine);
|
||||
taosGetSysMemory(&pInfo->mem_system);
|
||||
pInfo->mem_total = tsTotalMemoryKB;
|
||||
|
|
|
@ -1017,7 +1017,10 @@ static int32_t mndProcessAlterDbReq(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
code = mndSetDbCfgFromAlterDbReq(&dbObj, &alterReq);
|
||||
if (code != 0) goto _OVER;
|
||||
if (code != 0) {
|
||||
if (code == TSDB_CODE_MND_DB_OPTION_UNCHANGED) code = 0;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
code = mndCheckInChangeDbCfg(pMnode, &dbObj.cfg);
|
||||
if (code != 0) goto _OVER;
|
||||
|
|
|
@ -2342,9 +2342,9 @@ static int32_t mndProcessAlterStbReq(SRpcMsg *pReq) {
|
|||
alterReq.alterType, alterReq.numOfFields, alterReq.ttl);
|
||||
|
||||
SName name = {0};
|
||||
tNameFromString(&name, pDb->name, T_NAME_ACCT | T_NAME_DB);
|
||||
tNameFromString(&name, alterReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
|
||||
auditRecord(pReq, pMnode->clusterId, "alterStb", name.dbname, alterReq.name, detail);
|
||||
auditRecord(pReq, pMnode->clusterId, "alterStb", name.dbname, name.tname, detail);
|
||||
|
||||
_OVER:
|
||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
|
|
|
@ -1922,6 +1922,7 @@ int32_t mndPersistTransLog(SStreamObj *pStream, STrans *pTrans) {
|
|||
SSdbRaw *pCommitRaw = mndStreamActionEncode(pStream);
|
||||
if (pCommitRaw == NULL) {
|
||||
mError("failed to encode stream since %s", terrstr());
|
||||
mndTransDrop(pTrans);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1988,6 +1989,7 @@ static int32_t createStreamUpdateTrans(SMnode *pMnode, SStreamObj *pStream, SVgr
|
|||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(pBuf);
|
||||
taosWUnLockLatch(&pStream->lock);
|
||||
mndTransDrop(pTrans);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -1998,7 +2000,6 @@ static int32_t createStreamUpdateTrans(SMnode *pMnode, SStreamObj *pStream, SVgr
|
|||
int32_t code = mndPersistTransLog(pStream, pTrans);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
mndTransDrop(pTrans);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -86,18 +86,18 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) {
|
|||
SCheckpointInfo* pChkInfo = &pTask->chkInfo;
|
||||
// checkpoint ver is the kept version, handled data should be the next version.
|
||||
if (pTask->chkInfo.checkpointId != 0) {
|
||||
pTask->chkInfo.currentVer = pTask->chkInfo.checkpointVer + 1;
|
||||
qInfo("s-task:%s restore from the checkpointId:%" PRId64 " ver:%" PRId64 " currentVer:%" PRId64, pTask->id.idStr,
|
||||
pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->currentVer);
|
||||
pTask->chkInfo.nextProcessVer = pTask->chkInfo.checkpointVer + 1;
|
||||
qInfo("s-task:%s restore from the checkpointId:%" PRId64 " ver:%" PRId64 " nextProcessVer:%" PRId64, pTask->id.idStr,
|
||||
pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer);
|
||||
} else {
|
||||
if (pTask->chkInfo.currentVer == -1) {
|
||||
pTask->chkInfo.currentVer = 0;
|
||||
if (pTask->chkInfo.nextProcessVer == -1) {
|
||||
pTask->chkInfo.nextProcessVer = 0;
|
||||
}
|
||||
}
|
||||
|
||||
qInfo("snode:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 " currentVer:%" PRId64
|
||||
qInfo("snode:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 " nextProcessVer:%" PRId64
|
||||
" child id:%d, level:%d, status:%s fill-history:%d, trigger:%" PRId64 " ms",
|
||||
SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->currentVer,
|
||||
SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
pTask->info.selfChildId, pTask->info.taskLevel, streamGetTaskStatusStr(pTask->status.taskStatus),
|
||||
pTask->info.fillHistory, pTask->info.triggerParam);
|
||||
|
||||
|
|
|
@ -288,6 +288,7 @@ typedef struct {
|
|||
int64_t numOfSTables;
|
||||
int64_t numOfCTables;
|
||||
int64_t numOfNTables;
|
||||
int64_t numOfCmprTables;
|
||||
int64_t numOfNTimeSeries;
|
||||
int64_t numOfTimeSeries;
|
||||
int64_t itvTimeSeries;
|
||||
|
|
|
@ -155,7 +155,7 @@ int32_t tqOffsetCommitFile(STqOffsetStore* pStore);
|
|||
// tqSink
|
||||
int32_t tqBuildDeleteReq(const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
|
||||
const char* pIdStr);
|
||||
void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, void* data);
|
||||
void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data);
|
||||
|
||||
// tqOffset
|
||||
char* tqOffsetBuildFName(const char* path, int32_t fVer);
|
||||
|
|
|
@ -250,7 +250,7 @@ int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
|
|||
int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t version, SRpcMsg* pMsg);
|
||||
int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec);
|
||||
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||
|
|
|
@ -61,5 +61,12 @@ int metaPrepareAsyncCommit(SMeta *pMeta) {
|
|||
// abort the meta txn
|
||||
int metaAbort(SMeta *pMeta) {
|
||||
if (!pMeta->txn) return 0;
|
||||
return tdbAbort(pMeta->pEnv, pMeta->txn);
|
||||
int code = tdbAbort(pMeta->pEnv, pMeta->txn);
|
||||
if (code) {
|
||||
metaError("vgId:%d, failed to abort meta since %s", TD_VID(pMeta->pVnode), tstrerror(terrno));
|
||||
} else {
|
||||
pMeta->txn = NULL;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -699,13 +699,16 @@ int64_t metaGetTbNum(SMeta *pMeta) {
|
|||
// N.B. Called by statusReq per second
|
||||
int64_t metaGetTimeSeriesNum(SMeta *pMeta) {
|
||||
// sum of (number of columns of stable - 1) * number of ctables (excluding timestamp column)
|
||||
if (pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 ||
|
||||
int64_t nTables = metaGetTbNum(pMeta);
|
||||
if (nTables - pMeta->pVnode->config.vndStats.numOfCmprTables > 100 ||
|
||||
pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 ||
|
||||
++pMeta->pVnode->config.vndStats.itvTimeSeries % (60 * 5) == 0) {
|
||||
int64_t num = 0;
|
||||
vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
|
||||
pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
|
||||
|
||||
pMeta->pVnode->config.vndStats.itvTimeSeries = (TD_VID(pMeta->pVnode) % 100) * 2;
|
||||
pMeta->pVnode->config.vndStats.numOfCmprTables = nTables;
|
||||
}
|
||||
|
||||
return pMeta->pVnode->config.vndStats.numOfTimeSeries + pMeta->pVnode->config.vndStats.numOfNTimeSeries;
|
||||
|
|
|
@ -811,7 +811,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
|||
pTask->smaSink.smaSink = smaHandleRes;
|
||||
} else if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) {
|
||||
pTask->tbSink.vnode = pTq->pVnode;
|
||||
pTask->tbSink.tbSinkFunc = tqSinkToTablePipeline;
|
||||
pTask->tbSink.tbSinkFunc = tqSinkDataIntoDstTable;
|
||||
|
||||
int32_t ver1 = 1;
|
||||
SMetaInfo info = {0};
|
||||
|
@ -832,7 +832,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
|||
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
SWalFilterCond cond = {.deleteMsg = 1}; // delete msg also extract from wal files
|
||||
pTask->exec.pWalReader = walOpenReader(pTq->pVnode->pWal, &cond);
|
||||
pTask->exec.pWalReader = walOpenReader(pTq->pVnode->pWal, &cond, pTask->id.taskId);
|
||||
}
|
||||
|
||||
// reset the task status from unfinished transaction
|
||||
|
@ -847,14 +847,14 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
|||
|
||||
// checkpoint ver is the kept version, handled data should be the next version.
|
||||
if (pTask->chkInfo.checkpointId != 0) {
|
||||
pTask->chkInfo.currentVer = pTask->chkInfo.checkpointVer + 1;
|
||||
pTask->chkInfo.nextProcessVer = pTask->chkInfo.checkpointVer + 1;
|
||||
tqInfo("s-task:%s restore from the checkpointId:%" PRId64 " ver:%" PRId64 " currentVer:%" PRId64, pTask->id.idStr,
|
||||
pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->currentVer);
|
||||
pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer);
|
||||
}
|
||||
|
||||
tqInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 " currentVer:%" PRId64
|
||||
tqInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 " nextProcessVer:%" PRId64
|
||||
" child id:%d, level:%d, status:%s fill-history:%d, trigger:%" PRId64 " ms",
|
||||
vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->currentVer,
|
||||
vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
pTask->info.selfChildId, pTask->info.taskLevel, streamGetTaskStatusStr(pTask->status.taskStatus),
|
||||
pTask->info.fillHistory, pTask->info.triggerParam);
|
||||
|
||||
|
@ -903,9 +903,10 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
return streamSendCheckRsp(pTq->pStreamMeta, &req, &rsp, &pMsg->info, taskId);
|
||||
}
|
||||
|
||||
int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t sversion, SRpcMsg* pMsg) {
|
||||
int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
int32_t vgId = pTq->pStreamMeta->vgId;
|
||||
|
||||
int32_t code;
|
||||
SStreamTaskCheckRsp rsp;
|
||||
|
@ -914,7 +915,9 @@ int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t sversion, SRpcMsg* pMsg) {
|
|||
tDecoderInit(&decoder, (uint8_t*)pReq, len);
|
||||
code = tDecodeStreamTaskCheckRsp(&decoder, &rsp);
|
||||
if (code < 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
tDecoderClear(&decoder);
|
||||
tqError("vgId:%d failed to parse check rsp msg, code:%s", vgId, tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -993,6 +996,9 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
|
|||
|
||||
bool restored = pTq->pVnode->restored;
|
||||
if (p != NULL && restored) {
|
||||
p->tsInfo.init = taosGetTimestampMs();
|
||||
tqDebug("s-task:%s set the init ts:%"PRId64, p->id.idStr, p->tsInfo.init);
|
||||
|
||||
streamTaskCheckDownstream(p);
|
||||
} else if (!restored) {
|
||||
tqWarn("s-task:%s not launched since vnode(vgId:%d) not ready", p->id.idStr, vgId);
|
||||
|
@ -1055,7 +1061,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
ASSERT(pTask->status.pauseAllowed == true);
|
||||
}
|
||||
|
||||
streamSourceScanHistoryData(pTask);
|
||||
streamScanHistoryData(pTask);
|
||||
if (pTask->status.taskStatus == TASK_STATUS__PAUSE) {
|
||||
double el = (taosGetTimestampMs() - pTask->tsInfo.step1Start) / 1000.0;
|
||||
tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs, sched-status:%d", pTask->id.idStr, el,
|
||||
|
@ -1100,14 +1106,17 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
}
|
||||
|
||||
// now we can stop the stream task execution
|
||||
streamTaskHalt(pStreamTask);
|
||||
|
||||
int64_t latestVer = 0;
|
||||
taosThreadMutexLock(&pStreamTask->lock);
|
||||
streamTaskHalt(pStreamTask);
|
||||
tqDebug("s-task:%s level:%d sched-status:%d is halt by fill-history task:%s", pStreamTask->id.idStr,
|
||||
pStreamTask->info.taskLevel, pStreamTask->status.schedStatus, id);
|
||||
latestVer = walReaderGetCurrentVer(pStreamTask->exec.pWalReader);
|
||||
taosThreadMutexUnlock(&pStreamTask->lock);
|
||||
|
||||
// if it's an source task, extract the last version in wal.
|
||||
pRange = &pTask->dataRange.range;
|
||||
int64_t latestVer = walReaderGetCurrentVer(pStreamTask->exec.pWalReader);
|
||||
done = streamHistoryTaskSetVerRangeStep2(pTask, latestVer);
|
||||
|
||||
if (done) {
|
||||
|
@ -1126,9 +1135,8 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
pTask->tsInfo.step2Start = taosGetTimestampMs();
|
||||
streamSetParamForStreamScannerStep2(pTask, pRange, pWindow);
|
||||
|
||||
int64_t dstVer = pTask->dataRange.range.minVer - 1;
|
||||
|
||||
pTask->chkInfo.currentVer = dstVer;
|
||||
int64_t dstVer = pTask->dataRange.range.minVer;
|
||||
pTask->chkInfo.nextProcessVer = dstVer;
|
||||
walReaderSetSkipToVersion(pTask->exec.pWalReader, dstVer);
|
||||
tqDebug("s-task:%s wal reader start scan WAL verRange:%" PRId64 "-%" PRId64 ", set sched-status:%d", id, dstVer,
|
||||
pTask->dataRange.range.maxVer, TASK_SCHED_STATUS__INACTIVE);
|
||||
|
@ -1136,7 +1144,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
||||
|
||||
// set the fill-history task to be normal
|
||||
if (pTask->info.fillHistory == 1) {
|
||||
if (pTask->info.fillHistory == 1 && !streamTaskShouldStop(&pTask->status)) {
|
||||
streamSetStatusNormal(pTask);
|
||||
}
|
||||
|
||||
|
@ -1161,7 +1169,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
tqDebug(
|
||||
"s-task:%s scan-history in stream time window completed, now start to handle data from WAL, start "
|
||||
"ver:%" PRId64 ", window:%" PRId64 " - %" PRId64,
|
||||
id, pTask->chkInfo.currentVer, pWindow->skey, pWindow->ekey);
|
||||
id, pTask->chkInfo.nextProcessVer, pWindow->skey, pWindow->ekey);
|
||||
}
|
||||
|
||||
code = streamTaskScanHistoryDataComplete(pTask);
|
||||
|
@ -1296,8 +1304,8 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
// even in halt status, the data in inputQ must be processed
|
||||
int8_t st = pTask->status.taskStatus;
|
||||
if (st == TASK_STATUS__NORMAL || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__CK) {
|
||||
tqDebug("vgId:%d s-task:%s start to process block from inputQ, last chk point:%" PRId64, vgId, pTask->id.idStr,
|
||||
pTask->chkInfo.currentVer);
|
||||
tqDebug("vgId:%d s-task:%s start to process block from inputQ, next checked ver:%" PRId64, vgId, pTask->id.idStr,
|
||||
pTask->chkInfo.nextProcessVer);
|
||||
streamProcessRunReq(pTask);
|
||||
} else {
|
||||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
||||
|
@ -1436,10 +1444,10 @@ int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion,
|
|||
walReaderSetSkipToVersion(pTask->exec.pWalReader, sversion);
|
||||
tqDebug("vgId:%d s-task:%s resume to exec, prev paused version:%" PRId64 ", start from vnode ver:%" PRId64
|
||||
", schedStatus:%d",
|
||||
vgId, pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
|
||||
vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, sversion, pTask->status.schedStatus);
|
||||
} else { // from the previous paused version and go on
|
||||
tqDebug("vgId:%d s-task:%s resume to exec, from paused ver:%" PRId64 ", vnode ver:%" PRId64 ", schedStatus:%d",
|
||||
vgId, pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
|
||||
vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, sversion, pTask->status.schedStatus);
|
||||
}
|
||||
|
||||
if (level == TASK_LEVEL__SOURCE && pTask->info.fillHistory &&
|
||||
|
|
|
@ -312,14 +312,14 @@ static int buildHandle(STQ* pTq, STqHandle* handle){
|
|||
return -1;
|
||||
}
|
||||
} else if (handle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
|
||||
handle->pWalReader = walOpenReader(pVnode->pWal, NULL);
|
||||
handle->pWalReader = walOpenReader(pVnode->pWal, NULL, 0);
|
||||
handle->execHandle.pTqReader = tqReaderOpen(pVnode);
|
||||
|
||||
buildSnapContext(reader.vnode, reader.version, 0, handle->execHandle.subType, handle->fetchMeta,
|
||||
(SSnapContext**)(&reader.sContext));
|
||||
handle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, handle->consumerId);
|
||||
} else if (handle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
handle->pWalReader = walOpenReader(pVnode->pWal, NULL);
|
||||
handle->pWalReader = walOpenReader(pVnode->pWal, NULL, 0);
|
||||
|
||||
if(handle->execHandle.execTb.qmsg != NULL && strcmp(handle->execHandle.execTb.qmsg, "") != 0) {
|
||||
if (nodesStringToNode(handle->execHandle.execTb.qmsg, &handle->execHandle.execTb.node) != 0) {
|
||||
|
|
|
@ -187,25 +187,26 @@ end:
|
|||
int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t reqId) {
|
||||
int32_t code = -1;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
int64_t id = pHandle->pWalReader->readerId;
|
||||
|
||||
int64_t offset = *fetchOffset;
|
||||
int64_t lastVer = walGetLastVer(pHandle->pWalReader->pWal);
|
||||
int64_t committedVer = walGetCommittedVer(pHandle->pWalReader->pWal);
|
||||
int64_t appliedVer = walGetAppliedVer(pHandle->pWalReader->pWal);
|
||||
|
||||
wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64 ", applied index:%" PRId64,
|
||||
vgId, offset, lastVer, committedVer, appliedVer);
|
||||
wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64 ", applied index:%" PRId64", 0x%"PRIx64,
|
||||
vgId, offset, lastVer, committedVer, appliedVer, id);
|
||||
|
||||
while (offset <= appliedVer) {
|
||||
if (walFetchHead(pHandle->pWalReader, offset) < 0) {
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", (epoch %d) vgId:%d offset %" PRId64
|
||||
", no more log to return, reqId:0x%" PRIx64,
|
||||
pHandle->consumerId, pHandle->epoch, vgId, offset, reqId);
|
||||
", no more log to return, reqId:0x%" PRIx64 " 0x%" PRIx64,
|
||||
pHandle->consumerId, pHandle->epoch, vgId, offset, reqId, id);
|
||||
goto END;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d, consumer:0x%" PRIx64 " taosx get msg ver %" PRId64 ", type: %s, reqId:0x%" PRIx64, vgId,
|
||||
pHandle->consumerId, offset, TMSG_INFO(pHandle->pWalReader->pHead->head.msgType), reqId);
|
||||
tqDebug("vgId:%d, consumer:0x%" PRIx64 " taosx get msg ver %" PRId64 ", type: %s, reqId:0x%" PRIx64" 0x%"PRIx64, vgId,
|
||||
pHandle->consumerId, offset, TMSG_INFO(pHandle->pWalReader->pHead->head.msgType), reqId, id);
|
||||
|
||||
if (pHandle->pWalReader->pHead->head.msgType == TDMT_VND_SUBMIT) {
|
||||
code = walFetchBody(pHandle->pWalReader);
|
||||
|
@ -250,7 +251,7 @@ STqReader* tqReaderOpen(SVnode* pVnode) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
pReader->pWalReader = walOpenReader(pVnode->pWal, NULL);
|
||||
pReader->pWalReader = walOpenReader(pVnode->pWal, NULL, 0);
|
||||
if (pReader->pWalReader == NULL) {
|
||||
taosMemoryFree(pReader);
|
||||
return NULL;
|
||||
|
@ -491,11 +492,11 @@ bool tqNextBlockImpl(STqReader* pReader, const char* idstr) {
|
|||
|
||||
void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t));
|
||||
if (ret != NULL) {
|
||||
tqDebug("block found, ver:%" PRId64 ", uid:%" PRId64", %s", pReader->msg.ver, pSubmitTbData->uid, idstr);
|
||||
tqDebug("block found, ver:%" PRId64 ", uid:%" PRId64 ", %s", pReader->msg.ver, pSubmitTbData->uid, idstr);
|
||||
return true;
|
||||
} else {
|
||||
tqInfo("discard submit block, uid:%" PRId64 ", total queried tables:%d continue %s", pSubmitTbData->uid,
|
||||
taosHashGetSize(pReader->tbIdHash), idstr);
|
||||
tqDebug("discard submit block, uid:%" PRId64 ", total queried tables:%d continue %s", pSubmitTbData->uid,
|
||||
taosHashGetSize(pReader->tbIdHash), idstr);
|
||||
}
|
||||
|
||||
pReader->nextBlk++;
|
||||
|
|
|
@ -24,10 +24,23 @@ typedef struct STableSinkInfo {
|
|||
tstr name;
|
||||
} STableSinkInfo;
|
||||
|
||||
static int32_t doSinkResultBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName, int64_t suid,
|
||||
SSDataBlock* pDataBlock, SStreamTask* pTask);
|
||||
static int32_t doSinkDeleteBlock(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
||||
int64_t suid);
|
||||
static int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDataBlock, char* stbFullName,
|
||||
SSubmitTbData* pTableData);
|
||||
static int32_t setDstTableDataPayload(SStreamTask* pTask, int32_t blockIndex, SSDataBlock* pDataBlock,
|
||||
SSubmitTbData* pTableData);
|
||||
static int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
||||
int64_t suid);
|
||||
static int32_t tqBuildSubmitReq(SSubmitReq2* pSubmitReq, int32_t vgId, void** pMsg, int32_t* msgLen);
|
||||
static int32_t tsAscendingSortFn(const void* p1, const void* p2);
|
||||
static int32_t doConvertRows(SSubmitTbData* pTableData, STSchema* pTSchema, SSDataBlock* pDataBlock, const char* id);
|
||||
static int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkInfo* pTableSinkInfo,
|
||||
const char* dstTableName, int64_t* uid);
|
||||
static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id);
|
||||
static SVCreateTbReq* buildAutoCreateTableReq(char* stbFullName, int64_t suid, int32_t numOfCols,
|
||||
SSDataBlock* pDataBlock);
|
||||
static bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbName, int64_t suid);
|
||||
static int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, const char* id);
|
||||
static int32_t doBuildAndSendSubmitMsg(SVnode* pVnode, SStreamTask* pTask, SSubmitReq2* pReq, int32_t numOfBlocks);
|
||||
|
||||
int32_t tqBuildDeleteReq(const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
|
||||
const char* pIdStr) {
|
||||
|
@ -112,14 +125,6 @@ static bool tqGetTableInfo(SSHashObj* pTableInfoMap,uint64_t groupId, STableSink
|
|||
return false;
|
||||
}
|
||||
|
||||
static int32_t tqPutTableInfo(SSHashObj* tblInfo ,uint64_t groupId, STableSinkInfo* pTbl) {
|
||||
if (tSimpleHashGetSize(tblInfo) > MAX_CACHE_TABLE_INFO_NUM) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
return tSimpleHashPut(tblInfo, &groupId, sizeof(uint64_t), &pTbl, POINTER_BYTES);
|
||||
}
|
||||
|
||||
static int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) {
|
||||
void* buf = NULL;
|
||||
int32_t tlen = 0;
|
||||
|
@ -133,147 +138,202 @@ static int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
||||
int64_t suid) {
|
||||
tqDebug("s-task:%s build create table msg", pTask->id.idStr);
|
||||
|
||||
void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, void* data) {
|
||||
const SArray* pBlocks = (const SArray*)data;
|
||||
SVnode* pVnode = (SVnode*)vnode;
|
||||
int64_t suid = pTask->tbSink.stbUid;
|
||||
char* stbFullName = pTask->tbSink.stbFullName;
|
||||
STSchema* pTSchema = pTask->tbSink.pTSchema;
|
||||
int32_t vgId = TD_VID(pVnode);
|
||||
int32_t numOfBlocks = taosArrayGetSize(pBlocks);
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
STSchema* pTSchema = pTask->tbSink.pTSchema;
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
SArray* tagArray = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table", vgId, pTask->id.idStr, numOfBlocks);
|
||||
SVCreateTbBatchReq reqs = {0};
|
||||
|
||||
SArray* tagArray = NULL;
|
||||
SArray* pVals = NULL;
|
||||
SArray* crTblArray = NULL;
|
||||
SArray* crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq));
|
||||
if (NULL == reqs.pArray) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfBlocks; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
for (int32_t rowId = 0; rowId < rows; rowId++) {
|
||||
SVCreateTbReq* pCreateTbReq = &((SVCreateTbReq){0});
|
||||
|
||||
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
|
||||
code = doSinkDeleteBlock(pVnode, stbFullName, pDataBlock, pTask, suid);
|
||||
} else if (pDataBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||
tqDebug("s-task:%s build create table msg", pTask->id.idStr);
|
||||
// set const
|
||||
pCreateTbReq->flags = 0;
|
||||
pCreateTbReq->type = TSDB_CHILD_TABLE;
|
||||
pCreateTbReq->ctb.suid = suid;
|
||||
|
||||
SVCreateTbBatchReq reqs = {0};
|
||||
crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq));
|
||||
if (NULL == reqs.pArray) {
|
||||
// set super table name
|
||||
SName name = {0};
|
||||
tNameFromString(&name, stbFullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
pCreateTbReq->ctb.stbName = taosStrdup((char*)tNameGetTableName(&name)); // taosStrdup(stbFullName);
|
||||
|
||||
// set tag content
|
||||
int32_t size = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
if (size == 2) {
|
||||
tagArray = taosArrayInit(1, sizeof(STagVal));
|
||||
if (!tagArray) {
|
||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
||||
goto _end;
|
||||
}
|
||||
|
||||
for (int32_t rowId = 0; rowId < rows; rowId++) {
|
||||
SVCreateTbReq* pCreateTbReq = &((SVCreateTbReq){0});
|
||||
STagVal tagVal = {
|
||||
.cid = pTSchema->numOfCols + 1, .type = TSDB_DATA_TYPE_UBIGINT, .i64 = pDataBlock->info.id.groupId};
|
||||
|
||||
// set const
|
||||
pCreateTbReq->flags = 0;
|
||||
pCreateTbReq->type = TSDB_CHILD_TABLE;
|
||||
pCreateTbReq->ctb.suid = suid;
|
||||
taosArrayPush(tagArray, &tagVal);
|
||||
|
||||
// set super table name
|
||||
SName name = {0};
|
||||
tNameFromString(&name, stbFullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
pCreateTbReq->ctb.stbName = taosStrdup((char*)tNameGetTableName(&name)); // taosStrdup(stbFullName);
|
||||
|
||||
// set tag content
|
||||
int32_t size = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
if (size == 2) {
|
||||
tagArray = taosArrayInit(1, sizeof(STagVal));
|
||||
|
||||
if (!tagArray) {
|
||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
||||
goto _end;
|
||||
}
|
||||
|
||||
STagVal tagVal = {
|
||||
.cid = pTSchema->numOfCols + 1, .type = TSDB_DATA_TYPE_UBIGINT, .i64 = pDataBlock->info.id.groupId};
|
||||
|
||||
taosArrayPush(tagArray, &tagVal);
|
||||
|
||||
// set tag name
|
||||
SArray* tagName = taosArrayInit(1, TSDB_COL_NAME_LEN);
|
||||
char tagNameStr[TSDB_COL_NAME_LEN] = "group_id";
|
||||
taosArrayPush(tagName, tagNameStr);
|
||||
pCreateTbReq->ctb.tagName = tagName;
|
||||
} else {
|
||||
tagArray = taosArrayInit(size - 1, sizeof(STagVal));
|
||||
if (!tagArray) {
|
||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
||||
goto _end;
|
||||
}
|
||||
|
||||
for (int32_t tagId = UD_TAG_COLUMN_INDEX, step = 1; tagId < size; tagId++, step++) {
|
||||
SColumnInfoData* pTagData = taosArrayGet(pDataBlock->pDataBlock, tagId);
|
||||
|
||||
STagVal tagVal = {.cid = pTSchema->numOfCols + step, .type = pTagData->info.type};
|
||||
void* pData = colDataGetData(pTagData, rowId);
|
||||
if (colDataIsNull_s(pTagData, rowId)) {
|
||||
continue;
|
||||
} else if (IS_VAR_DATA_TYPE(pTagData->info.type)) {
|
||||
tagVal.nData = varDataLen(pData);
|
||||
tagVal.pData = (uint8_t*) varDataVal(pData);
|
||||
} else {
|
||||
memcpy(&tagVal.i64, pData, pTagData->info.bytes);
|
||||
}
|
||||
taosArrayPush(tagArray, &tagVal);
|
||||
}
|
||||
|
||||
}
|
||||
pCreateTbReq->ctb.tagNum = TMAX(size - UD_TAG_COLUMN_INDEX, 1);
|
||||
|
||||
STag* pTag = NULL;
|
||||
tTagNew(tagArray, 1, false, &pTag);
|
||||
tagArray = taosArrayDestroy(tagArray);
|
||||
if (pTag == NULL) {
|
||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _end;
|
||||
}
|
||||
|
||||
pCreateTbReq->ctb.pTag = (uint8_t*)pTag;
|
||||
|
||||
// set table name
|
||||
if (!pDataBlock->info.parTbName[0]) {
|
||||
SColumnInfoData* pGpIdColInfo = taosArrayGet(pDataBlock->pDataBlock, UD_GROUPID_COLUMN_INDEX);
|
||||
void* pGpIdData = colDataGetData(pGpIdColInfo, rowId);
|
||||
pCreateTbReq->name = buildCtbNameByGroupId(stbFullName, *(uint64_t*)pGpIdData);
|
||||
} else {
|
||||
pCreateTbReq->name = taosStrdup(pDataBlock->info.parTbName);
|
||||
}
|
||||
|
||||
taosArrayPush(reqs.pArray, pCreateTbReq);
|
||||
tqDebug("s-task:%s build create table:%s msg complete", pTask->id.idStr, pCreateTbReq->name);
|
||||
}
|
||||
|
||||
reqs.nReqs = taosArrayGetSize(reqs.pArray);
|
||||
if (tqPutReqToQueue(pVnode, &reqs) != TSDB_CODE_SUCCESS) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
tagArray = taosArrayDestroy(tagArray);
|
||||
taosArrayDestroyEx(crTblArray, (FDelete)tdDestroySVCreateTbReq);
|
||||
crTblArray = NULL;
|
||||
} else if (pDataBlock->info.type == STREAM_CHECKPOINT) {
|
||||
continue;
|
||||
// set tag name
|
||||
SArray* tagName = taosArrayInit(1, TSDB_COL_NAME_LEN);
|
||||
char tagNameStr[TSDB_COL_NAME_LEN] = "group_id";
|
||||
taosArrayPush(tagName, tagNameStr);
|
||||
pCreateTbReq->ctb.tagName = tagName;
|
||||
} else {
|
||||
code = doSinkResultBlock(pVnode, i, stbFullName, suid, pDataBlock, pTask);
|
||||
tagArray = taosArrayInit(size - 1, sizeof(STagVal));
|
||||
if (!tagArray) {
|
||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
||||
goto _end;
|
||||
}
|
||||
|
||||
for (int32_t tagId = UD_TAG_COLUMN_INDEX, step = 1; tagId < size; tagId++, step++) {
|
||||
SColumnInfoData* pTagData = taosArrayGet(pDataBlock->pDataBlock, tagId);
|
||||
|
||||
STagVal tagVal = {.cid = pTSchema->numOfCols + step, .type = pTagData->info.type};
|
||||
void* pData = colDataGetData(pTagData, rowId);
|
||||
if (colDataIsNull_s(pTagData, rowId)) {
|
||||
continue;
|
||||
} else if (IS_VAR_DATA_TYPE(pTagData->info.type)) {
|
||||
tagVal.nData = varDataLen(pData);
|
||||
tagVal.pData = (uint8_t*)varDataVal(pData);
|
||||
} else {
|
||||
memcpy(&tagVal.i64, pData, pTagData->info.bytes);
|
||||
}
|
||||
taosArrayPush(tagArray, &tagVal);
|
||||
}
|
||||
}
|
||||
pCreateTbReq->ctb.tagNum = TMAX(size - UD_TAG_COLUMN_INDEX, 1);
|
||||
|
||||
STag* pTag = NULL;
|
||||
tTagNew(tagArray, 1, false, &pTag);
|
||||
tagArray = taosArrayDestroy(tagArray);
|
||||
if (pTag == NULL) {
|
||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _end;
|
||||
}
|
||||
|
||||
pCreateTbReq->ctb.pTag = (uint8_t*)pTag;
|
||||
|
||||
// set table name
|
||||
if (!pDataBlock->info.parTbName[0]) {
|
||||
SColumnInfoData* pGpIdColInfo = taosArrayGet(pDataBlock->pDataBlock, UD_GROUPID_COLUMN_INDEX);
|
||||
|
||||
void* pGpIdData = colDataGetData(pGpIdColInfo, rowId);
|
||||
pCreateTbReq->name = buildCtbNameByGroupId(stbFullName, *(uint64_t*)pGpIdData);
|
||||
} else {
|
||||
pCreateTbReq->name = taosStrdup(pDataBlock->info.parTbName);
|
||||
}
|
||||
|
||||
taosArrayPush(reqs.pArray, pCreateTbReq);
|
||||
tqDebug("s-task:%s build create table:%s msg complete", pTask->id.idStr, pCreateTbReq->name);
|
||||
}
|
||||
|
||||
reqs.nReqs = taosArrayGetSize(reqs.pArray);
|
||||
code = tqPutReqToQueue(pVnode, &reqs);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("s-task:%s failed to send create table msg", pTask->id.idStr);
|
||||
}
|
||||
|
||||
_end:
|
||||
taosArrayDestroy(tagArray);
|
||||
taosArrayDestroyEx(crTblArray, (FDelete)tdDestroySVCreateTbReq);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t doBuildAndSendSubmitMsg(SVnode* pVnode, SStreamTask* pTask, SSubmitReq2* pReq, int32_t numOfBlocks) {
|
||||
const char* id = pTask->id.idStr;
|
||||
int32_t vgId = TD_VID(pVnode);
|
||||
int32_t len = 0;
|
||||
void* pBuf = NULL;
|
||||
int32_t numOfFinalBlocks = taosArrayGetSize(pReq->aSubmitTbData);
|
||||
|
||||
int32_t code = tqBuildSubmitReq(pReq, vgId, &pBuf, &len);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("s-task:%s build submit msg failed, vgId:%d, code:%s", id, vgId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
SRpcMsg msg = {.msgType = TDMT_VND_SUBMIT, .pCont = pBuf, .contLen = len};
|
||||
code = tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
tqDebug("s-task:%s vgId:%d comp %d blocks into %d and send to dstTable(s) completed", id, vgId, numOfBlocks,
|
||||
numOfFinalBlocks);
|
||||
} else {
|
||||
tqError("s-task:%s failed to put into write-queue since %s", id, terrstr());
|
||||
}
|
||||
|
||||
pTask->sinkRecorder.numOfSubmit += 1;
|
||||
|
||||
if ((pTask->sinkRecorder.numOfSubmit % 5000) == 0) {
|
||||
SSinkTaskRecorder* pRec = &pTask->sinkRecorder;
|
||||
double el = (taosGetTimestampMs() - pTask->tsInfo.sinkStart) / 1000.0;
|
||||
tqInfo("s-task:%s vgId:%d write %" PRId64 " blocks (%" PRId64 " rows) in %" PRId64
|
||||
" submit into dst table, duration:%.2f Sec.",
|
||||
pTask->id.idStr, vgId, pRec->numOfBlocks, pRec->numOfRows, pRec->numOfSubmit, el);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// merge the new submit table block with the existed blocks
|
||||
// if ts in the new data block overlap with existed one, replace it
|
||||
int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, const char* id) {
|
||||
int32_t oldLen = taosArrayGetSize(pExisted->aRowP);
|
||||
int32_t newLen = taosArrayGetSize(pNew->aRowP);
|
||||
|
||||
int32_t j = 0, k = 0;
|
||||
SArray* pFinal = taosArrayInit(oldLen + newLen, POINTER_BYTES);
|
||||
if (pFinal == NULL) {
|
||||
tqError("s-task:%s failed to prepare merge result datablock, code:%s", id, tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
while (j < newLen && k < oldLen) {
|
||||
SRow* pNewRow = taosArrayGetP(pNew->aRowP, j);
|
||||
SRow* pOldRow = taosArrayGetP(pExisted->aRowP, k);
|
||||
if (pNewRow->ts <= pOldRow->ts) {
|
||||
taosArrayPush(pFinal, &pNewRow);
|
||||
j += 1;
|
||||
|
||||
if (pNewRow->ts == pOldRow->ts) {
|
||||
k += 1;
|
||||
tRowDestroy(pOldRow);
|
||||
}
|
||||
} else {
|
||||
taosArrayPush(pFinal, &pOldRow);
|
||||
k += 1;
|
||||
}
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d, s-task:%s write results completed", vgId, pTask->id.idStr);
|
||||
while (j < newLen) {
|
||||
SRow* pRow = taosArrayGetP(pNew->aRowP, j++);
|
||||
taosArrayPush(pFinal, &pRow);
|
||||
}
|
||||
|
||||
_end:
|
||||
taosArrayDestroy(tagArray);
|
||||
taosArrayDestroy(pVals);
|
||||
taosArrayDestroyEx(crTblArray, (FDelete)tdDestroySVCreateTbReq);
|
||||
// TODO: change
|
||||
while (k < oldLen) {
|
||||
SRow* pRow = taosArrayGetP(pExisted->aRowP, k++);
|
||||
taosArrayPush(pFinal, &pRow);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pNew->aRowP);
|
||||
taosArrayDestroy(pExisted->aRowP);
|
||||
pExisted->aRowP = pFinal;
|
||||
|
||||
tqDebug("s-task:%s rows merged, final rows:%d, uid:%" PRId64 ", existed auto-create table:%d, new-block:%d", id,
|
||||
(int32_t)taosArrayGetSize(pFinal), pExisted->uid, (pExisted->pCreateTbReq != NULL), (pNew->pCreateTbReq != NULL));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t doSinkDeleteBlock(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
||||
int64_t suid) {
|
||||
int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
||||
int64_t suid) {
|
||||
SBatchDeleteReq deleteReq = {.suid = suid, .deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq))};
|
||||
|
||||
int32_t code = tqBuildDeleteReq(stbFullName, pDataBlock, &deleteReq, pTask->id.idStr);
|
||||
|
@ -311,22 +371,25 @@ int32_t doSinkDeleteBlock(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataB
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static bool isValidDestChildTable(SMetaReader* pReader, int32_t vgId, char* ctbName, int64_t suid) {
|
||||
bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbName, int64_t suid) {
|
||||
if (pReader->me.type != TSDB_CHILD_TABLE) {
|
||||
tqError("vgId:%d, failed to write into %s, since table type:%d incorrect", vgId, ctbName, pReader->me.type);
|
||||
terrno = TSDB_CODE_TDB_INVALID_TABLE_TYPE;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pReader->me.ctbEntry.suid != suid) {
|
||||
tqError("vgId:%d, failed to write into %s, since suid mismatch, expect suid:%" PRId64 ", actual:%" PRId64,
|
||||
vgId, ctbName, suid, pReader->me.ctbEntry.suid);
|
||||
terrno = TSDB_CODE_TDB_TABLE_IN_OTHER_STABLE;
|
||||
return false;
|
||||
}
|
||||
|
||||
terrno = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static SVCreateTbReq* buildAutoCreateTableReq(char* stbFullName, int64_t suid, int32_t numOfCols, SSDataBlock* pDataBlock) {
|
||||
SVCreateTbReq* buildAutoCreateTableReq(char* stbFullName, int64_t suid, int32_t numOfCols, SSDataBlock* pDataBlock) {
|
||||
char* ctbName = pDataBlock->info.parTbName;
|
||||
|
||||
SVCreateTbReq* pCreateTbReq = taosMemoryCalloc(1, sizeof(SVCreateStbReq));
|
||||
|
@ -383,14 +446,14 @@ static SVCreateTbReq* buildAutoCreateTableReq(char* stbFullName, int64_t suid, i
|
|||
return pCreateTbReq;
|
||||
}
|
||||
|
||||
static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, uint64_t uid,
|
||||
const char* id) {
|
||||
pTableSinkInfo->uid = uid;
|
||||
int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id) {
|
||||
if (tSimpleHashGetSize(pSinkTableMap) > MAX_CACHE_TABLE_INFO_NUM) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
int32_t code = tqPutTableInfo(pSinkTableMap, groupId, pTableSinkInfo);
|
||||
int32_t code = tSimpleHashPut(pSinkTableMap, &groupId, sizeof(uint64_t), &pTableSinkInfo, POINTER_BYTES);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFreeClear(pTableSinkInfo);
|
||||
tqError("s-task:%s failed to put tableSinkInfo in to cache, code:%s", id, tstrerror(code));
|
||||
} else {
|
||||
tqDebug("s-task:%s new dst table:%s(uid:%" PRIu64 ") added into cache, total:%d", id, pTableSinkInfo->name.data,
|
||||
pTableSinkInfo->uid, tSimpleHashGetSize(pSinkTableMap));
|
||||
|
@ -399,147 +462,72 @@ static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSi
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t doSinkResultBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName, int64_t suid, SSDataBlock* pDataBlock,
|
||||
SStreamTask* pTask) {
|
||||
int32_t numOfRows = pDataBlock->info.rows;
|
||||
int32_t vgId = TD_VID(pVnode);
|
||||
uint64_t groupId = pDataBlock->info.id.groupId;
|
||||
STSchema* pTSchema = pTask->tbSink.pTSchema;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
void* pBuf = NULL;
|
||||
SArray* pVals = NULL;
|
||||
const char* id = pTask->id.idStr;
|
||||
int32_t tqBuildSubmitReq(SSubmitReq2* pSubmitReq, int32_t vgId, void** pMsg, int32_t* msgLen) {
|
||||
int32_t code = 0;
|
||||
void* pBuf = NULL;
|
||||
*msgLen = 0;
|
||||
|
||||
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version};
|
||||
tqDebug("s-task:%s sink data pipeline, build submit msg from %d-th resBlock, including %d rows, dst suid:%" PRId64,
|
||||
id, blockIndex + 1, numOfRows, suid);
|
||||
// encode
|
||||
int32_t len = 0;
|
||||
tEncodeSize(tEncodeSubmitReq, pSubmitReq, len, code);
|
||||
|
||||
tbData.aRowP = taosArrayInit(numOfRows, sizeof(SRow*));
|
||||
pVals = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal));
|
||||
SEncoder encoder;
|
||||
len += sizeof(SSubmitReq2Msg);
|
||||
|
||||
if (tbData.aRowP == NULL || pVals == NULL) {
|
||||
taosArrayDestroy(tbData.aRowP);
|
||||
taosArrayDestroy(pVals);
|
||||
pBuf = rpcMallocCont(len);
|
||||
if (NULL == pBuf) {
|
||||
tDestroySubmitReq(pSubmitReq, TSDB_MSG_FLG_ENCODE);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("s-task:%s vgId:%d failed to prepare write stream res blocks, code:%s", id, vgId, tstrerror(code));
|
||||
((SSubmitReq2Msg*)pBuf)->header.vgId = vgId;
|
||||
((SSubmitReq2Msg*)pBuf)->header.contLen = htonl(len);
|
||||
((SSubmitReq2Msg*)pBuf)->version = htobe64(1);
|
||||
|
||||
tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SSubmitReq2Msg)), len - sizeof(SSubmitReq2Msg));
|
||||
if (tEncodeSubmitReq(&encoder, pSubmitReq) < 0) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("failed to encode submit req, code:%s, ignore and continue", terrstr());
|
||||
tEncoderClear(&encoder);
|
||||
rpcFreeCont(pBuf);
|
||||
tDestroySubmitReq(pSubmitReq, TSDB_MSG_FLG_ENCODE);
|
||||
return code;
|
||||
}
|
||||
|
||||
STableSinkInfo* pTableSinkInfo = NULL;
|
||||
bool exist = tqGetTableInfo(pTask->tbSink.pTblInfo, groupId, &pTableSinkInfo);
|
||||
tEncoderClear(&encoder);
|
||||
tDestroySubmitReq(pSubmitReq, TSDB_MSG_FLG_ENCODE);
|
||||
|
||||
char* dstTableName = pDataBlock->info.parTbName;
|
||||
if (exist) {
|
||||
if (dstTableName[0] == 0) {
|
||||
tstrncpy(dstTableName, pTableSinkInfo->name.data, pTableSinkInfo->name.len + 1);
|
||||
tqDebug("s-task:%s vgId:%d, gropuId:%" PRIu64 " datablock table name is null, set name:%s", id, vgId, groupId,
|
||||
dstTableName);
|
||||
} else {
|
||||
if (pTableSinkInfo->uid != 0) {
|
||||
tqDebug("s-task:%s write %d rows into groupId:%" PRIu64 " dstTable:%s(uid:%" PRIu64 ")", id, numOfRows, groupId,
|
||||
dstTableName, pTableSinkInfo->uid);
|
||||
} else {
|
||||
tqDebug("s-task:%s write %d rows into groupId:%" PRIu64 " dstTable:%s(not set uid yet for the secondary block)",
|
||||
id, numOfRows, groupId, dstTableName);
|
||||
}
|
||||
}
|
||||
} else { // not exist
|
||||
if (dstTableName[0] == 0) {
|
||||
memset(dstTableName, 0, TSDB_TABLE_NAME_LEN);
|
||||
buildCtbNameByGroupIdImpl(stbFullName, groupId, dstTableName);
|
||||
}
|
||||
*msgLen = len;
|
||||
*pMsg = pBuf;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t nameLen = strlen(dstTableName);
|
||||
pTableSinkInfo = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen);
|
||||
|
||||
pTableSinkInfo->name.len = nameLen;
|
||||
memcpy(pTableSinkInfo->name.data, dstTableName, nameLen);
|
||||
tqDebug("s-task:%s build new sinkTableInfo to add cache, dstTable:%s", id, dstTableName);
|
||||
}
|
||||
|
||||
if (exist) {
|
||||
tbData.uid = pTableSinkInfo->uid;
|
||||
|
||||
if (tbData.uid == 0) {
|
||||
tqDebug("s-task:%s cached tableInfo uid is invalid, acquire it from meta", id);
|
||||
}
|
||||
|
||||
while (pTableSinkInfo->uid == 0) {
|
||||
// wait for the table to be created
|
||||
SMetaReader mr = {0};
|
||||
metaReaderDoInit(&mr, pVnode->pMeta, 0);
|
||||
|
||||
code = metaGetTableEntryByName(&mr, dstTableName);
|
||||
if (code == 0) { // table alreay exists, check its type and uid
|
||||
bool isValid = isValidDestChildTable(&mr, vgId, dstTableName, suid);
|
||||
if (!isValid) { // not valid table, ignore it
|
||||
metaReaderClear(&mr);
|
||||
|
||||
taosArrayDestroy(tbData.aRowP);
|
||||
taosArrayDestroy(pVals);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
tqDebug("s-task:%s set uid:%"PRIu64" for dstTable:%s from meta", id, mr.me.uid, pTableSinkInfo->name.data);
|
||||
|
||||
tbData.uid = mr.me.uid;
|
||||
pTableSinkInfo->uid = mr.me.uid;
|
||||
metaReaderClear(&mr);
|
||||
}
|
||||
} else { // not exist, wait and retry
|
||||
metaReaderClear(&mr);
|
||||
taosMsleep(100);
|
||||
tqDebug("s-task:%s wait for the table:%s ready before insert data", id, dstTableName);
|
||||
}
|
||||
}
|
||||
int32_t tsAscendingSortFn(const void* p1, const void* p2) {
|
||||
SRow* pRow1 = *(SRow**) p1;
|
||||
SRow* pRow2 = *(SRow**) p2;
|
||||
|
||||
if (pRow1->ts == pRow2->ts) {
|
||||
return 0;
|
||||
} else {
|
||||
// todo: this check is not safe, and results in losing of submit message from WAL.
|
||||
// The auto-create option will always set to be open for those submit messages, which arrive during the period
|
||||
// the creating of the destination table, due to the absence of the user-specified table in TSDB. When scanning
|
||||
// data from WAL, those submit messages, with auto-created table option, will be discarded expect the first, for
|
||||
// those mismatched table uids. Only the FIRST table has the correct table uid, and those remain all have
|
||||
// randomly generated false table uid in the WAL.
|
||||
SMetaReader mr = {0};
|
||||
metaReaderDoInit(&mr, pVnode->pMeta, 0);
|
||||
return pRow1->ts > pRow2->ts? 1:-1;
|
||||
}
|
||||
}
|
||||
|
||||
// table not in cache, let's try the extract it from tsdb meta
|
||||
if (metaGetTableEntryByName(&mr, dstTableName) < 0) {
|
||||
metaReaderClear(&mr);
|
||||
int32_t doConvertRows(SSubmitTbData* pTableData, STSchema* pTSchema, SSDataBlock* pDataBlock, const char* id) {
|
||||
int32_t numOfRows = pDataBlock->info.rows;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
tqDebug("s-task:%s stream write into table:%s, table auto created", id, dstTableName);
|
||||
SArray* pVals = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal));
|
||||
pTableData->aRowP = taosArrayInit(numOfRows, sizeof(SRow*));
|
||||
|
||||
tbData.flags = SUBMIT_REQ_AUTO_CREATE_TABLE;
|
||||
tbData.pCreateTbReq = buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock);
|
||||
if (tbData.pCreateTbReq == NULL) {
|
||||
tqError("s-task:%s failed to build auto create table req, code:%s", id, tstrerror(terrno));
|
||||
|
||||
taosArrayDestroy(tbData.aRowP);
|
||||
taosArrayDestroy(pVals);
|
||||
|
||||
return terrno;
|
||||
}
|
||||
|
||||
doPutIntoCache(pTask->tbSink.pTblInfo, pTableSinkInfo, groupId, 0, id);
|
||||
} else {
|
||||
bool isValid = isValidDestChildTable(&mr, vgId, dstTableName, suid);
|
||||
if (!isValid) {
|
||||
metaReaderClear(&mr);
|
||||
taosMemoryFree(pTableSinkInfo);
|
||||
taosArrayDestroy(tbData.aRowP);
|
||||
taosArrayDestroy(pVals);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
tbData.uid = mr.me.uid;
|
||||
metaReaderClear(&mr);
|
||||
|
||||
doPutIntoCache(pTask->tbSink.pTblInfo, pTableSinkInfo, groupId, tbData.uid, id);
|
||||
}
|
||||
}
|
||||
if (pTableData->aRowP == NULL || pVals == NULL) {
|
||||
pTableData->aRowP = taosArrayDestroy(pTableData->aRowP);
|
||||
taosArrayDestroy(pVals);
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("s-task:%s failed to prepare write stream res blocks, code:%s", id, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
// rows
|
||||
for (int32_t j = 0; j < numOfRows; j++) {
|
||||
taosArrayClear(pVals);
|
||||
|
||||
|
@ -549,7 +537,7 @@ int32_t doSinkResultBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName,
|
|||
if (k == 0) {
|
||||
SColumnInfoData* pColData = taosArrayGet(pDataBlock->pDataBlock, dataIndex);
|
||||
void* colData = colDataGetData(pColData, j);
|
||||
tqDebug("s-task:%s tq sink pipe2, row %d, col %d ts %" PRId64, id, j, k, *(int64_t*)colData);
|
||||
tqDebug("s-task:%s sink row %d, col %d ts %" PRId64, id, j, k, *(int64_t*)colData);
|
||||
}
|
||||
|
||||
if (IS_SET_NULL(pCol)) {
|
||||
|
@ -565,7 +553,7 @@ int32_t doSinkResultBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName,
|
|||
void* colData = colDataGetData(pColData, j);
|
||||
if (IS_STR_DATA_TYPE(pCol->type)) {
|
||||
// address copy, no value
|
||||
SValue sv = (SValue){.nData = varDataLen(colData), .pData = varDataVal(colData)};
|
||||
SValue sv = (SValue){.nData = varDataLen(colData), .pData = (uint8_t*) varDataVal(colData)};
|
||||
SColVal cv = COL_VAL_VALUE(pCol->colId, pCol->type, sv);
|
||||
taosArrayPush(pVals, &cv);
|
||||
} else {
|
||||
|
@ -582,69 +570,313 @@ int32_t doSinkResultBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName,
|
|||
SRow* pRow = NULL;
|
||||
code = tRowBuild(pVals, (STSchema*)pTSchema, &pRow);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tDestroySubmitTbData(&tbData, TSDB_MSG_FLG_ENCODE);
|
||||
|
||||
taosArrayDestroy(tbData.aRowP);
|
||||
tDestroySubmitTbData(pTableData, TSDB_MSG_FLG_ENCODE);
|
||||
pTableData->aRowP = taosArrayDestroy(pTableData->aRowP);
|
||||
taosArrayDestroy(pVals);
|
||||
return code;
|
||||
}
|
||||
|
||||
ASSERT(pRow);
|
||||
taosArrayPush(tbData.aRowP, &pRow);
|
||||
}
|
||||
|
||||
SSubmitReq2 submitReq = {0};
|
||||
if (!(submitReq.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData)))) {
|
||||
tDestroySubmitTbData(&tbData, TSDB_MSG_FLG_ENCODE);
|
||||
|
||||
taosArrayDestroy(tbData.aRowP);
|
||||
taosArrayDestroy(pVals);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
taosArrayPush(submitReq.aSubmitTbData, &tbData);
|
||||
|
||||
// encode
|
||||
int32_t len = 0;
|
||||
tEncodeSize(tEncodeSubmitReq, &submitReq, len, code);
|
||||
|
||||
SEncoder encoder;
|
||||
len += sizeof(SSubmitReq2Msg);
|
||||
|
||||
pBuf = rpcMallocCont(len);
|
||||
if (NULL == pBuf) {
|
||||
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
|
||||
taosArrayDestroy(tbData.aRowP);
|
||||
taosArrayDestroy(pVals);
|
||||
}
|
||||
|
||||
((SSubmitReq2Msg*)pBuf)->header.vgId = vgId;
|
||||
((SSubmitReq2Msg*)pBuf)->header.contLen = htonl(len);
|
||||
((SSubmitReq2Msg*)pBuf)->version = htobe64(1);
|
||||
|
||||
tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SSubmitReq2Msg)), len - sizeof(SSubmitReq2Msg));
|
||||
if (tEncodeSubmitReq(&encoder, &submitReq) < 0) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("failed to encode submit req, code:%s, ignore and continue", terrstr());
|
||||
tEncoderClear(&encoder);
|
||||
rpcFreeCont(pBuf);
|
||||
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
tEncoderClear(&encoder);
|
||||
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
|
||||
|
||||
SRpcMsg msg = { .msgType = TDMT_VND_SUBMIT, .pCont = pBuf, .contLen = len };
|
||||
code = tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg);
|
||||
|
||||
if(code == TSDB_CODE_SUCCESS) {
|
||||
tqDebug("s-task:%s send submit msg to dstTable:%s, numOfRows:%d", id, pTableSinkInfo->name.data, numOfRows);
|
||||
} else {
|
||||
tqError("s-task:%s failed to put into write-queue since %s", id, terrstr());
|
||||
taosArrayPush(pTableData->aRowP, &pRow);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pVals);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkInfo* pTableSinkInfo,
|
||||
const char* dstTableName, int64_t* uid) {
|
||||
int32_t vgId = TD_VID(pVnode);
|
||||
int64_t suid = pTask->tbSink.stbUid;
|
||||
const char* id = pTask->id.idStr;
|
||||
|
||||
while (pTableSinkInfo->uid == 0) {
|
||||
if (streamTaskShouldStop(&pTask->status)) {
|
||||
tqDebug("s-task:%s task will stop, quit from waiting for table:%s create", id, dstTableName);
|
||||
return TSDB_CODE_STREAM_EXEC_CANCELLED;
|
||||
}
|
||||
|
||||
// wait for the table to be created
|
||||
SMetaReader mr = {0};
|
||||
metaReaderDoInit(&mr, pVnode->pMeta, 0);
|
||||
|
||||
int32_t code = metaGetTableEntryByName(&mr, dstTableName);
|
||||
if (code == 0) { // table already exists, check its type and uid
|
||||
bool isValid = isValidDstChildTable(&mr, vgId, dstTableName, suid);
|
||||
if (isValid) { // not valid table, ignore it
|
||||
tqDebug("s-task:%s set uid:%" PRIu64 " for dstTable:%s from meta", id, mr.me.uid, pTableSinkInfo->name.data);
|
||||
ASSERT(terrno == 0);
|
||||
|
||||
// set the destination table uid
|
||||
(*uid) = mr.me.uid;
|
||||
pTableSinkInfo->uid = mr.me.uid;
|
||||
}
|
||||
|
||||
metaReaderClear(&mr);
|
||||
return terrno;
|
||||
} else { // not exist, wait and retry
|
||||
metaReaderClear(&mr);
|
||||
taosMsleep(100);
|
||||
tqDebug("s-task:%s wait 100ms for the table:%s ready before insert data", id, dstTableName);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDataBlock, char* stbFullName,
|
||||
SSubmitTbData* pTableData) {
|
||||
uint64_t groupId = pDataBlock->info.id.groupId;
|
||||
char* dstTableName = pDataBlock->info.parTbName;
|
||||
int32_t numOfRows = pDataBlock->info.rows;
|
||||
const char* id = pTask->id.idStr;
|
||||
int64_t suid = pTask->tbSink.stbUid;
|
||||
STSchema* pTSchema = pTask->tbSink.pTSchema;
|
||||
int32_t vgId = TD_VID(pVnode);
|
||||
STableSinkInfo* pTableSinkInfo = NULL;
|
||||
|
||||
bool alreadyCached = tqGetTableInfo(pTask->tbSink.pTblInfo, groupId, &pTableSinkInfo);
|
||||
|
||||
if (alreadyCached) {
|
||||
if (dstTableName[0] == 0) { // data block does not set the destination table name
|
||||
tstrncpy(dstTableName, pTableSinkInfo->name.data, pTableSinkInfo->name.len + 1);
|
||||
tqDebug("s-task:%s vgId:%d, gropuId:%" PRIu64 " datablock table name is null, set name:%s", id, vgId, groupId,
|
||||
dstTableName);
|
||||
} else {
|
||||
if (pTableSinkInfo->uid != 0) {
|
||||
tqDebug("s-task:%s write %d rows into groupId:%" PRIu64 " dstTable:%s(uid:%" PRIu64 ")", id, numOfRows, groupId,
|
||||
dstTableName, pTableSinkInfo->uid);
|
||||
} else {
|
||||
tqDebug("s-task:%s write %d rows into groupId:%" PRIu64 " dstTable:%s(not set uid yet for the secondary block)",
|
||||
id, numOfRows, groupId, dstTableName);
|
||||
}
|
||||
}
|
||||
} else { // this groupId has not been kept in cache yet
|
||||
if (dstTableName[0] == 0) {
|
||||
memset(dstTableName, 0, TSDB_TABLE_NAME_LEN);
|
||||
buildCtbNameByGroupIdImpl(stbFullName, groupId, dstTableName);
|
||||
}
|
||||
|
||||
int32_t nameLen = strlen(dstTableName);
|
||||
pTableSinkInfo = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen);
|
||||
if (pTableSinkInfo == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pTableSinkInfo->name.len = nameLen;
|
||||
memcpy(pTableSinkInfo->name.data, dstTableName, nameLen);
|
||||
tqDebug("s-task:%s build new sinkTableInfo to add cache, dstTable:%s", id, dstTableName);
|
||||
}
|
||||
|
||||
if (alreadyCached) {
|
||||
pTableData->uid = pTableSinkInfo->uid;
|
||||
|
||||
if (pTableData->uid == 0) {
|
||||
tqDebug("s-task:%s cached tableInfo uid is invalid, acquire it from meta", id);
|
||||
return doWaitForDstTableCreated(pVnode, pTask, pTableSinkInfo, dstTableName, &pTableData->uid);
|
||||
} else {
|
||||
tqDebug("s-task:%s set the dstTable uid from cache:%"PRId64, id, pTableData->uid);
|
||||
}
|
||||
} else {
|
||||
// The auto-create option will always set to be open for those submit messages, which arrive during the period
|
||||
// the creating of the destination table, due to the absence of the user-specified table in TSDB. When scanning
|
||||
// data from WAL, those submit messages, with auto-created table option, will be discarded expect the first, for
|
||||
// those mismatched table uids. Only the FIRST table has the correct table uid, and those remain all have
|
||||
// randomly generated, but false table uid in the WAL.
|
||||
SMetaReader mr = {0};
|
||||
metaReaderDoInit(&mr, pVnode->pMeta, 0);
|
||||
|
||||
// table not in cache, let's try the extract it from tsdb meta
|
||||
if (metaGetTableEntryByName(&mr, dstTableName) < 0) {
|
||||
metaReaderClear(&mr);
|
||||
|
||||
tqDebug("s-task:%s stream write into table:%s, table auto created", id, dstTableName);
|
||||
|
||||
pTableData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE;
|
||||
pTableData->pCreateTbReq = buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock);
|
||||
if (pTableData->pCreateTbReq == NULL) {
|
||||
tqError("s-task:%s failed to build auto create table req, code:%s", id, tstrerror(terrno));
|
||||
taosMemoryFree(pTableSinkInfo);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
pTableSinkInfo->uid = 0;
|
||||
doPutIntoCache(pTask->tbSink.pTblInfo, pTableSinkInfo, groupId, id);
|
||||
} else {
|
||||
bool isValid = isValidDstChildTable(&mr, vgId, dstTableName, suid);
|
||||
if (!isValid) {
|
||||
metaReaderClear(&mr);
|
||||
taosMemoryFree(pTableSinkInfo);
|
||||
tqError("s-task:%s vgId:%d table:%s already exists, but not child table, stream results is discarded", id, vgId,
|
||||
dstTableName);
|
||||
return terrno;
|
||||
} else {
|
||||
pTableData->uid = mr.me.uid;
|
||||
pTableSinkInfo->uid = mr.me.uid;
|
||||
|
||||
metaReaderClear(&mr);
|
||||
doPutIntoCache(pTask->tbSink.pTblInfo, pTableSinkInfo, groupId, id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t setDstTableDataPayload(SStreamTask* pTask, int32_t blockIndex, SSDataBlock* pDataBlock,
|
||||
SSubmitTbData* pTableData) {
|
||||
int32_t numOfRows = pDataBlock->info.rows;
|
||||
const char* id = pTask->id.idStr;
|
||||
|
||||
tqDebug("s-task:%s sink data pipeline, build submit msg from %dth resBlock, including %d rows, dst suid:%" PRId64,
|
||||
id, blockIndex + 1, numOfRows, pTask->tbSink.stbUid);
|
||||
char* dstTableName = pDataBlock->info.parTbName;
|
||||
|
||||
// convert all rows
|
||||
int32_t code = doConvertRows(pTableData, pTask->tbSink.pTSchema, pDataBlock, id);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("s-task:%s failed to convert rows from result block, code:%s", id, tstrerror(terrno));
|
||||
return code;
|
||||
}
|
||||
|
||||
taosArraySort(pTableData->aRowP, tsAscendingSortFn);
|
||||
tqDebug("s-task:%s build submit msg for dstTable:%s, numOfRows:%d", id, dstTableName, numOfRows);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
||||
const SArray* pBlocks = (const SArray*)data;
|
||||
SVnode* pVnode = (SVnode*)vnode;
|
||||
int64_t suid = pTask->tbSink.stbUid;
|
||||
char* stbFullName = pTask->tbSink.stbFullName;
|
||||
STSchema* pTSchema = pTask->tbSink.pTSchema;
|
||||
int32_t vgId = TD_VID(pVnode);
|
||||
int32_t numOfBlocks = taosArrayGetSize(pBlocks);
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
const char* id = pTask->id.idStr;
|
||||
|
||||
if (pTask->tsInfo.sinkStart == 0) {
|
||||
pTask->tsInfo.sinkStart = taosGetTimestampMs();
|
||||
}
|
||||
|
||||
bool onlySubmitData = true;
|
||||
for(int32_t i = 0; i < numOfBlocks; ++i) {
|
||||
SSDataBlock* p = taosArrayGet(pBlocks, i);
|
||||
if (p->info.type == STREAM_DELETE_RESULT || p->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||
onlySubmitData = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!onlySubmitData) {
|
||||
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, has delete block, submit one-by-one", vgId, id,
|
||||
numOfBlocks);
|
||||
|
||||
for(int32_t i = 0; i < numOfBlocks; ++i) {
|
||||
if (streamTaskShouldStop(&pTask->status)) {
|
||||
return;
|
||||
}
|
||||
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
|
||||
code = doBuildAndSendDeleteMsg(pVnode, stbFullName, pDataBlock, pTask, suid);
|
||||
} else if (pDataBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||
code = doBuildAndSendCreateTableMsg(pVnode, stbFullName, pDataBlock, pTask, suid);
|
||||
} else if (pDataBlock->info.type == STREAM_CHECKPOINT) {
|
||||
continue;
|
||||
} else {
|
||||
pTask->sinkRecorder.numOfBlocks += 1;
|
||||
|
||||
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
|
||||
if (submitReq.aSubmitTbData == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code));
|
||||
return;
|
||||
}
|
||||
|
||||
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version};
|
||||
code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
continue;
|
||||
}
|
||||
|
||||
code = setDstTableDataPayload(pTask, i, pDataBlock, &tbData);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
continue;
|
||||
}
|
||||
|
||||
taosArrayPush(submitReq.aSubmitTbData, &tbData);
|
||||
code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, 1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, merge submit msg", vgId, id, numOfBlocks);
|
||||
SHashObj* pTableIndexMap = taosHashInit(numOfBlocks, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
|
||||
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
|
||||
if (submitReq.aSubmitTbData == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code));
|
||||
taosHashCleanup(pTableIndexMap);
|
||||
return;
|
||||
}
|
||||
|
||||
bool hasSubmit = false;
|
||||
for (int32_t i = 0; i < numOfBlocks; i++) {
|
||||
if (streamTaskShouldStop(&pTask->status)) {
|
||||
return;
|
||||
}
|
||||
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
if (pDataBlock->info.type == STREAM_CHECKPOINT) {
|
||||
continue;
|
||||
}
|
||||
|
||||
hasSubmit = true;
|
||||
pTask->sinkRecorder.numOfBlocks += 1;
|
||||
uint64_t groupId = pDataBlock->info.id.groupId;
|
||||
|
||||
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version};
|
||||
|
||||
int32_t* index = taosHashGet(pTableIndexMap, &groupId, sizeof(groupId));
|
||||
if (index == NULL) { // no data yet, append it
|
||||
code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
continue;
|
||||
}
|
||||
|
||||
code = setDstTableDataPayload(pTask, i, pDataBlock, &tbData);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
continue;
|
||||
}
|
||||
|
||||
taosArrayPush(submitReq.aSubmitTbData, &tbData);
|
||||
|
||||
int32_t size = (int32_t)taosArrayGetSize(submitReq.aSubmitTbData) - 1;
|
||||
taosHashPut(pTableIndexMap, &groupId, sizeof(groupId), &size, sizeof(size));
|
||||
} else {
|
||||
code = setDstTableDataPayload(pTask, i, pDataBlock, &tbData);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
continue;
|
||||
}
|
||||
|
||||
SSubmitTbData* pExisted = taosArrayGet(submitReq.aSubmitTbData, *index);
|
||||
code = doMergeExistedRows(pExisted, &tbData, id);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
pTask->sinkRecorder.numOfRows += pDataBlock->info.rows;
|
||||
}
|
||||
|
||||
taosHashCleanup(pTableIndexMap);
|
||||
|
||||
if (hasSubmit) {
|
||||
doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, numOfBlocks);
|
||||
} else {
|
||||
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
|
||||
tqDebug("vgId:%d, s-task:%s write results completed", vgId, id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ int32_t streamStateSnapReaderOpen(STQ* pTq, int64_t sver, int64_t ever, SStreamS
|
|||
|
||||
SStreamSnapReader* pSnapReader = NULL;
|
||||
|
||||
if (streamSnapReaderOpen(pTq, sver, chkpId, pTq->path, &pSnapReader) == 0) {
|
||||
if (streamSnapReaderOpen(meta, sver, chkpId, pTq->path, &pSnapReader) == 0) {
|
||||
pReader->complete = 1;
|
||||
} else {
|
||||
code = -1;
|
||||
|
|
|
@ -94,6 +94,9 @@ int32_t tqCheckAndRunStreamTask(STQ* pTq) {
|
|||
continue;
|
||||
}
|
||||
|
||||
pTask->tsInfo.init = taosGetTimestampMs();
|
||||
tqDebug("s-task:%s set the init ts:%"PRId64, pTask->id.idStr, pTask->tsInfo.init);
|
||||
|
||||
streamSetStatusNormal(pTask);
|
||||
streamTaskCheckDownstream(pTask);
|
||||
|
||||
|
@ -112,7 +115,7 @@ int32_t tqCheckAndRunStreamTaskAsync(STQ* pTq) {
|
|||
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
if (numOfTasks == 0) {
|
||||
tqInfo("vgId:%d no stream tasks exist", vgId);
|
||||
tqDebug("vgId:%d no stream tasks existed to run", vgId);
|
||||
taosWUnLockLatch(&pMeta->lock);
|
||||
return 0;
|
||||
}
|
||||
|
@ -150,7 +153,7 @@ int32_t tqScanWalAsync(STQ* pTq, bool ckPause) {
|
|||
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
if (numOfTasks == 0) {
|
||||
tqInfo("vgId:%d no stream tasks exist", vgId);
|
||||
tqDebug("vgId:%d no stream tasks existed to run", vgId);
|
||||
taosWUnLockLatch(&pMeta->lock);
|
||||
return 0;
|
||||
}
|
||||
|
@ -256,36 +259,36 @@ int32_t tqStartStreamTasks(STQ* pTq) {
|
|||
int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId) {
|
||||
// seek the stored version and extract data from WAL
|
||||
int64_t firstVer = walReaderGetValidFirstVer(pTask->exec.pWalReader);
|
||||
if (pTask->chkInfo.currentVer < firstVer) {
|
||||
if (pTask->chkInfo.nextProcessVer < firstVer) {
|
||||
tqWarn("vgId:%d s-task:%s ver:%" PRId64 " earlier than the first ver of wal range %" PRId64 ", forward to %" PRId64,
|
||||
vgId, pTask->id.idStr, pTask->chkInfo.currentVer, firstVer, firstVer);
|
||||
vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, firstVer, firstVer);
|
||||
|
||||
pTask->chkInfo.currentVer = firstVer;
|
||||
pTask->chkInfo.nextProcessVer = firstVer;
|
||||
|
||||
// todo need retry if failed
|
||||
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, pTask->chkInfo.currentVer);
|
||||
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, pTask->chkInfo.nextProcessVer);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
// append the data for the stream
|
||||
tqDebug("vgId:%d s-task:%s wal reader seek to ver:%" PRId64, vgId, pTask->id.idStr, pTask->chkInfo.currentVer);
|
||||
tqDebug("vgId:%d s-task:%s wal reader seek to ver:%" PRId64, vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer);
|
||||
} else {
|
||||
int64_t currentVer = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
||||
if (currentVer == -1) { // we only seek the read for the first time
|
||||
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, pTask->chkInfo.currentVer);
|
||||
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, pTask->chkInfo.nextProcessVer);
|
||||
if (code != TSDB_CODE_SUCCESS) { // no data in wal, quit
|
||||
return code;
|
||||
}
|
||||
|
||||
// append the data for the stream
|
||||
tqDebug("vgId:%d s-task:%s wal reader initial seek to ver:%" PRId64, vgId, pTask->id.idStr,
|
||||
pTask->chkInfo.currentVer);
|
||||
pTask->chkInfo.nextProcessVer);
|
||||
}
|
||||
}
|
||||
|
||||
int64_t skipToVer = walReaderGetSkipToVersion(pTask->exec.pWalReader);
|
||||
if (skipToVer != 0 && skipToVer > pTask->chkInfo.currentVer) {
|
||||
if (skipToVer != 0 && skipToVer > pTask->chkInfo.nextProcessVer) {
|
||||
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, skipToVer);
|
||||
if (code != TSDB_CODE_SUCCESS) { // no data in wal, quit
|
||||
return code;
|
||||
|
@ -304,7 +307,7 @@ void handleFillhistoryScanComplete(SStreamTask* pTask, int64_t ver) {
|
|||
|
||||
if ((pTask->info.fillHistory == 1) && ver > pTask->dataRange.range.maxVer) {
|
||||
if (!pTask->status.appendTranstateBlock) {
|
||||
qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64
|
||||
qWarn("s-task:%s fill-history scan WAL, nextProcessVer:%" PRId64 " out of the maximum ver:%" PRId64
|
||||
", not scan wal anymore, add transfer-state block into inputQ",
|
||||
id, ver, maxVer);
|
||||
|
||||
|
@ -313,7 +316,7 @@ void handleFillhistoryScanComplete(SStreamTask* pTask, int64_t ver) {
|
|||
/*int32_t code = */streamTaskPutTranstateIntoInputQ(pTask);
|
||||
/*int32_t code = */ streamSchedExec(pTask);
|
||||
} else {
|
||||
qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64 ", not scan wal",
|
||||
qWarn("s-task:%s fill-history scan WAL, nextProcessVer:%" PRId64 " out of the maximum ver:%" PRId64 ", not scan wal",
|
||||
id, ver, maxVer);
|
||||
}
|
||||
}
|
||||
|
@ -371,7 +374,7 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (streamQueueIsFull(pTask->inputInfo.queue->pQueue)) {
|
||||
if (streamQueueIsFull(pTask->inputInfo.queue->pQueue, true)) {
|
||||
tqTrace("s-task:%s input queue is full, do nothing", pTask->id.idStr);
|
||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||
continue;
|
||||
|
@ -393,28 +396,26 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
|||
continue;
|
||||
}
|
||||
|
||||
int32_t numOfItems = streamTaskGetInputQItems(pTask);
|
||||
int32_t numOfItems = streamQueueGetNumOfItems(pTask->inputInfo.queue);
|
||||
int64_t maxVer = (pTask->info.fillHistory == 1) ? pTask->dataRange.range.maxVer : INT64_MAX;
|
||||
|
||||
taosThreadMutexLock(&pTask->lock);
|
||||
|
||||
pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
||||
if (pTask->status.taskStatus != TASK_STATUS__NORMAL) {
|
||||
tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, pStatus);
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
SStreamQueueItem* pItem = NULL;
|
||||
code = extractMsgFromWal(pTask->exec.pWalReader, (void**)&pItem, maxVer, pTask->id.idStr);
|
||||
|
||||
if ((code != TSDB_CODE_SUCCESS || pItem == NULL) && (numOfItems == 0)) { // failed, continue
|
||||
handleFillhistoryScanComplete(pTask, walReaderGetCurrentVer(pTask->exec.pWalReader));
|
||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
taosThreadMutexLock(&pTask->lock);
|
||||
pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
||||
|
||||
if (pTask->status.taskStatus != TASK_STATUS__NORMAL) {
|
||||
tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, pStatus);
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||
if (pItem != NULL) {
|
||||
streamFreeQitem(pItem);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -423,12 +424,12 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
|||
code = streamTaskPutDataIntoInputQ(pTask, pItem);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
int64_t ver = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
||||
pTask->chkInfo.currentVer = ver;
|
||||
pTask->chkInfo.nextProcessVer = ver;
|
||||
handleFillhistoryScanComplete(pTask, ver);
|
||||
tqDebug("s-task:%s set the ver:%" PRId64 " from WALReader after extract block from WAL", pTask->id.idStr, ver);
|
||||
} else {
|
||||
tqError("s-task:%s append input queue failed, too many in inputQ, ver:%" PRId64, pTask->id.idStr,
|
||||
pTask->chkInfo.currentVer);
|
||||
pTask->chkInfo.nextProcessVer);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -165,7 +165,6 @@ struct SStreamTaskWriter {
|
|||
STQ* pTq;
|
||||
int64_t sver;
|
||||
int64_t ever;
|
||||
TXN* txn;
|
||||
};
|
||||
|
||||
int32_t streamTaskSnapWriterOpen(STQ* pTq, int64_t sver, int64_t ever, SStreamTaskWriter** ppWriter) {
|
||||
|
@ -182,12 +181,6 @@ int32_t streamTaskSnapWriterOpen(STQ* pTq, int64_t sver, int64_t ever, SStreamTa
|
|||
pWriter->sver = sver;
|
||||
pWriter->ever = ever;
|
||||
|
||||
if (tdbBegin(pTq->pStreamMeta->db, &pWriter->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, 0) < 0) {
|
||||
code = -1;
|
||||
taosMemoryFree(pWriter);
|
||||
goto _err;
|
||||
}
|
||||
|
||||
*ppWriter = pWriter;
|
||||
tqDebug("vgId:%d, vnode stream-task snapshot writer opened", TD_VID(pTq->pVnode));
|
||||
return code;
|
||||
|
@ -203,30 +196,33 @@ int32_t streamTaskSnapWriterClose(SStreamTaskWriter* pWriter, int8_t rollback) {
|
|||
int32_t code = 0;
|
||||
STQ* pTq = pWriter->pTq;
|
||||
|
||||
taosWLockLatch(&pTq->pStreamMeta->lock);
|
||||
tqDebug("vgId:%d, vnode stream-task snapshot writer closed", TD_VID(pTq->pVnode));
|
||||
if (rollback) {
|
||||
tdbAbort(pWriter->pTq->pStreamMeta->db, pWriter->txn);
|
||||
tdbAbort(pTq->pStreamMeta->db, pTq->pStreamMeta->txn);
|
||||
} else {
|
||||
code = tdbCommit(pWriter->pTq->pStreamMeta->db, pWriter->txn);
|
||||
code = tdbCommit(pTq->pStreamMeta->db, pTq->pStreamMeta->txn);
|
||||
if (code) goto _err;
|
||||
code = tdbPostCommit(pWriter->pTq->pStreamMeta->db, pWriter->txn);
|
||||
code = tdbPostCommit(pTq->pStreamMeta->db, pTq->pStreamMeta->txn);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
if (tdbBegin(pTq->pStreamMeta->db, &pTq->pStreamMeta->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, 0) < 0) {
|
||||
code = -1;
|
||||
taosMemoryFree(pWriter);
|
||||
goto _err;
|
||||
}
|
||||
|
||||
taosWUnLockLatch(&pTq->pStreamMeta->lock);
|
||||
|
||||
taosMemoryFree(pWriter);
|
||||
|
||||
// restore from metastore
|
||||
// if (tqMetaRestoreHandle(pTq) < 0) {
|
||||
// goto _err;
|
||||
// }
|
||||
|
||||
return code;
|
||||
|
||||
_err:
|
||||
tqError("vgId:%d, vnode stream-task snapshot writer failed to close since %s", TD_VID(pWriter->pTq->pVnode),
|
||||
tstrerror(code));
|
||||
taosWUnLockLatch(&pTq->pStreamMeta->lock);
|
||||
return code;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamTaskSnapWrite(SStreamTaskWriter* pWriter, uint8_t* pData, uint32_t nData) {
|
||||
|
@ -247,11 +243,17 @@ int32_t streamTaskSnapWrite(SStreamTaskWriter* pWriter, uint8_t* pData, uint32_t
|
|||
}
|
||||
tDecoderClear(&decoder);
|
||||
// tdbTbInsert(TTB *pTb, const void *pKey, int keyLen, const void *pVal, int valLen, TXN *pTxn)
|
||||
|
||||
taosWLockLatch(&pTq->pStreamMeta->lock);
|
||||
int64_t key[2] = {task.streamId, task.taskId};
|
||||
|
||||
taosWLockLatch(&pTq->pStreamMeta->lock);
|
||||
if (tdbTbUpsert(pTq->pStreamMeta->pTaskDb, key, sizeof(int64_t) << 1, (uint8_t*)pData + sizeof(SSnapDataHdr),
|
||||
nData - sizeof(SSnapDataHdr), pWriter->txn) < 0) {
|
||||
nData - sizeof(SSnapDataHdr), pTq->pStreamMeta->txn) < 0) {
|
||||
taosWUnLockLatch(&pTq->pStreamMeta->lock);
|
||||
return -1;
|
||||
}
|
||||
taosWUnLockLatch(&pTq->pStreamMeta->lock);
|
||||
} else if (pHdr->type == SNAP_DATA_STREAM_TASK_CHECKPOINT) {
|
||||
// do nothing
|
||||
}
|
||||
|
|
|
@ -436,6 +436,8 @@ int32_t extractDelDataBlock(const void* pData, int32_t len, int64_t ver, SStream
|
|||
taosArrayDestroy(pRes->uidList);
|
||||
*pRefBlock = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0);
|
||||
if (*pRefBlock == NULL) {
|
||||
blockDataCleanup(pDelBlock);
|
||||
taosMemoryFree(pDelBlock);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
|
|
@ -980,9 +980,7 @@ static int32_t tsdbDataFileDoWriteTableOldData(SDataFileWriter *writer, const TS
|
|||
writer->ctx->brinBlkArray = NULL;
|
||||
writer->ctx->tbHasOldData = false;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
for (; writer->ctx->brinBlkArrayIdx < TARRAY2_SIZE(writer->ctx->brinBlkArray); writer->ctx->brinBlkArrayIdx++) {
|
||||
} else {
|
||||
const SBrinBlk *brinBlk = TARRAY2_GET_PTR(writer->ctx->brinBlkArray, writer->ctx->brinBlkArrayIdx);
|
||||
|
||||
if (brinBlk->minTbid.uid != writer->ctx->tbid->uid) {
|
||||
|
@ -995,7 +993,6 @@ static int32_t tsdbDataFileDoWriteTableOldData(SDataFileWriter *writer, const TS
|
|||
|
||||
writer->ctx->brinBlockIdx = 0;
|
||||
writer->ctx->brinBlkArrayIdx++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1110,9 +1107,7 @@ static int32_t tsdbDataFileWriteTableDataBegin(SDataFileWriter *writer, const TA
|
|||
if (writer->ctx->brinBlkArrayIdx >= TARRAY2_SIZE(writer->ctx->brinBlkArray)) {
|
||||
writer->ctx->brinBlkArray = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
for (; writer->ctx->brinBlkArrayIdx < TARRAY2_SIZE(writer->ctx->brinBlkArray); writer->ctx->brinBlkArrayIdx++) {
|
||||
} else {
|
||||
const SBrinBlk *brinBlk = TARRAY2_GET_PTR(writer->ctx->brinBlkArray, writer->ctx->brinBlkArrayIdx);
|
||||
|
||||
code = tsdbDataFileReadBrinBlock(writer->ctx->reader, brinBlk, writer->ctx->brinBlock);
|
||||
|
@ -1120,7 +1115,6 @@ static int32_t tsdbDataFileWriteTableDataBegin(SDataFileWriter *writer, const TA
|
|||
|
||||
writer->ctx->brinBlockIdx = 0;
|
||||
writer->ctx->brinBlkArrayIdx++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1251,9 +1245,7 @@ static int32_t tsdbDataFileDoWriteTombRecord(SDataFileWriter *writer, const STom
|
|||
if (writer->ctx->tombBlkArrayIdx >= TARRAY2_SIZE(writer->ctx->tombBlkArray)) {
|
||||
writer->ctx->hasOldTomb = false;
|
||||
break;
|
||||
}
|
||||
|
||||
for (; writer->ctx->tombBlkArrayIdx < TARRAY2_SIZE(writer->ctx->tombBlkArray); ++writer->ctx->tombBlkArrayIdx) {
|
||||
} else {
|
||||
const STombBlk *tombBlk = TARRAY2_GET_PTR(writer->ctx->tombBlkArray, writer->ctx->tombBlkArrayIdx);
|
||||
|
||||
code = tsdbDataFileReadTombBlock(writer->ctx->reader, tombBlk, writer->ctx->tombBlock);
|
||||
|
@ -1261,7 +1253,6 @@ static int32_t tsdbDataFileDoWriteTombRecord(SDataFileWriter *writer, const STom
|
|||
|
||||
writer->ctx->tombBlockIdx = 0;
|
||||
writer->ctx->tombBlkArrayIdx++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -174,11 +174,17 @@ int32_t save_fs(const TFileSetArray *arr, const char *fname) {
|
|||
|
||||
// fset
|
||||
cJSON *ajson = cJSON_AddArrayToObject(json, "fset");
|
||||
if (!ajson) TSDB_CHECK_CODE(code = TSDB_CODE_OUT_OF_MEMORY, lino, _exit);
|
||||
if (!ajson) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
const STFileSet *fset;
|
||||
TARRAY2_FOREACH(arr, fset) {
|
||||
cJSON *item = cJSON_CreateObject();
|
||||
if (!item) TSDB_CHECK_CODE(code = TSDB_CODE_OUT_OF_MEMORY, lino, _exit);
|
||||
if (!item) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
cJSON_AddItemToArray(ajson, item);
|
||||
|
||||
code = tsdbTFileSetToJson(fset, item);
|
||||
|
@ -231,7 +237,8 @@ static int32_t load_fs(STsdb *pTsdb, const char *fname, TFileSetArray *arr) {
|
|||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
} else {
|
||||
TSDB_CHECK_CODE(code = TSDB_CODE_FILE_CORRUPTED, lino, _exit);
|
||||
code = TSDB_CODE_FILE_CORRUPTED;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
_exit:
|
||||
|
@ -312,7 +319,8 @@ static int32_t commit_edit(STFileSystem *fs) {
|
|||
int32_t code;
|
||||
int32_t lino;
|
||||
if ((code = taosRenameFile(current_t, current))) {
|
||||
TSDB_CHECK_CODE(code = TAOS_SYSTEM_ERROR(code), lino, _exit);
|
||||
code = TAOS_SYSTEM_ERROR(code);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
code = apply_commit(fs);
|
||||
|
@ -345,7 +353,8 @@ static int32_t abort_edit(STFileSystem *fs) {
|
|||
int32_t code;
|
||||
int32_t lino;
|
||||
if ((code = taosRemoveFile(fname))) {
|
||||
TSDB_CHECK_CODE(code = TAOS_SYSTEM_ERROR(code), lino, _exit);
|
||||
code = TAOS_SYSTEM_ERROR(code);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
code = apply_abort(fs);
|
||||
|
@ -398,7 +407,7 @@ static int32_t tsdbFSAddEntryToFileObjHash(STFileHash *hash, const char *fname)
|
|||
STFileHashEntry *entry = taosMemoryMalloc(sizeof(*entry));
|
||||
if (entry == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
||||
strcpy(entry->fname, fname);
|
||||
strncpy(entry->fname, fname, TSDB_FILENAME_LEN);
|
||||
|
||||
uint32_t idx = MurmurHash3_32(fname, strlen(fname)) % hash->numBucket;
|
||||
|
||||
|
@ -873,7 +882,7 @@ int32_t tsdbFSCreateCopySnapshot(STFileSystem *fs, TFileSetArray **fsetArr) {
|
|||
STFileSet *fset1;
|
||||
|
||||
fsetArr[0] = taosMemoryMalloc(sizeof(TFileSetArray));
|
||||
if (fsetArr == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
if (fsetArr[0] == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
||||
TARRAY2_INIT(fsetArr[0]);
|
||||
|
||||
|
|
|
@ -46,7 +46,8 @@ static int32_t tsdbSttLvlInitEx(STsdb *pTsdb, const SSttLvl *lvl1, SSttLvl **lvl
|
|||
return code;
|
||||
}
|
||||
|
||||
TARRAY2_APPEND(lvl[0]->fobjArr, fobj);
|
||||
code = TARRAY2_APPEND(lvl[0]->fobjArr, fobj);
|
||||
if (code) return code;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -185,7 +186,8 @@ static int32_t tsdbJsonToSttLvl(STsdb *pTsdb, const cJSON *json, SSttLvl **lvl)
|
|||
return code;
|
||||
}
|
||||
|
||||
TARRAY2_APPEND(lvl[0]->fobjArr, fobj);
|
||||
code = TARRAY2_APPEND(lvl[0]->fobjArr, fobj);
|
||||
if (code) return code;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -263,7 +265,8 @@ int32_t tsdbJsonToTFileSet(STsdb *pTsdb, const cJSON *json, STFileSet **fset) {
|
|||
return code;
|
||||
}
|
||||
|
||||
TARRAY2_APPEND((*fset)->lvlArr, lvl);
|
||||
code = TARRAY2_APPEND((*fset)->lvlArr, lvl);
|
||||
if (code) return code;
|
||||
}
|
||||
} else {
|
||||
return TSDB_CODE_FILE_CORRUPTED;
|
||||
|
@ -326,11 +329,12 @@ int32_t tsdbTFileSetEdit(STsdb *pTsdb, STFileSet *fset, const STFileOp *op) {
|
|||
|
||||
STFileObj tfobj = {.f[0] = {.cid = op->of.cid}}, *tfobjp = &tfobj;
|
||||
STFileObj **fobjPtr = TARRAY2_SEARCH(lvl->fobjArr, &tfobjp, tsdbTFileObjCmpr, TD_EQ);
|
||||
tfobjp = (fobjPtr ? *fobjPtr : NULL);
|
||||
|
||||
ASSERT(tfobjp);
|
||||
|
||||
tfobjp->f[0] = op->nf;
|
||||
if (fobjPtr) {
|
||||
tfobjp = *fobjPtr;
|
||||
tfobjp->f[0] = op->nf;
|
||||
} else {
|
||||
tsdbError("file not found, cid:%" PRId64, op->of.cid);
|
||||
}
|
||||
} else {
|
||||
fset->farr[op->nf.type]->f[0] = op->nf;
|
||||
}
|
||||
|
|
|
@ -42,8 +42,12 @@ static const struct {
|
|||
};
|
||||
|
||||
void remove_file(const char *fname) {
|
||||
taosRemoveFile(fname);
|
||||
tsdbInfo("file:%s is removed", fname);
|
||||
int32_t code = taosRemoveFile(fname);
|
||||
if (code) {
|
||||
tsdbError("file:%s remove failed", fname);
|
||||
} else {
|
||||
tsdbInfo("file:%s is removed", fname);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t tfile_to_json(const STFile *file, cJSON *json) {
|
||||
|
|
|
@ -356,7 +356,8 @@ static int32_t tsdbSttIterOpen(STsdbIter *iter) {
|
|||
}
|
||||
|
||||
iter->sttData->sttBlkArrayIdx = 0;
|
||||
tBlockDataCreate(iter->sttData->blockData);
|
||||
code = tBlockDataCreate(iter->sttData->blockData);
|
||||
if (code) return code;
|
||||
iter->sttData->blockDataIdx = 0;
|
||||
|
||||
return tsdbSttIterNext(iter, NULL);
|
||||
|
@ -381,7 +382,8 @@ static int32_t tsdbDataIterOpen(STsdbIter *iter) {
|
|||
iter->dataData->brinBlockIdx = 0;
|
||||
|
||||
// SBlockData
|
||||
tBlockDataCreate(iter->dataData->blockData);
|
||||
code = tBlockDataCreate(iter->dataData->blockData);
|
||||
if (code) return code;
|
||||
iter->dataData->blockDataIdx = 0;
|
||||
|
||||
return tsdbDataIterNext(iter, NULL);
|
||||
|
|
|
@ -1504,6 +1504,12 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
if (ps == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int32_t code = tsdbRowMergerInit(pMerger, ps);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("failed to init row merger, code:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
int64_t minKey = 0;
|
||||
|
@ -1535,15 +1541,11 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
}
|
||||
}
|
||||
|
||||
// todo remove init
|
||||
bool init = false;
|
||||
|
||||
// ASC: file block ---> last block -----> imem -----> mem
|
||||
// DESC: mem -----> imem -----> last block -----> file block
|
||||
if (pReader->info.order == TSDB_ORDER_ASC) {
|
||||
if (minKey == key) {
|
||||
init = true;
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema);
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -1552,47 +1554,44 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
|
||||
if (minKey == tsLast) {
|
||||
TSDBROW* fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
|
||||
if (init) {
|
||||
tsdbRowMergerAdd(pMerger, fRow1, NULL);
|
||||
} else {
|
||||
init = true;
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, fRow1, pReader->info.pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, fRow1, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange,
|
||||
pReader->idStr);
|
||||
}
|
||||
|
||||
if (minKey == k.ts) {
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
if (pSchema == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
if (init) {
|
||||
tsdbRowMergerAdd(pMerger, pRow, pSchema);
|
||||
} else {
|
||||
init = true;
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, pRow, pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
STSchema* pTSchema = NULL;
|
||||
if (pRow->type == TSDBROW_ROW_FMT) {
|
||||
pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
if (pTSchema == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
}
|
||||
int32_t code = doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, pReader);
|
||||
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, pRow, pTSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
code = doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, pReader);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (minKey == k.ts) {
|
||||
init = true;
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
if (pSchema == NULL) {
|
||||
return terrno;
|
||||
STSchema* pTSchema = NULL;
|
||||
if (pRow->type == TSDBROW_ROW_FMT) {
|
||||
pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
if (pTSchema == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, pRow, pSchema);
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, pRow, pTSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -1605,28 +1604,18 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
|
||||
if (minKey == tsLast) {
|
||||
TSDBROW* fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
|
||||
if (init) {
|
||||
tsdbRowMergerAdd(pMerger, fRow1, NULL);
|
||||
} else {
|
||||
init = true;
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, fRow1, pReader->info.pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, fRow1, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange,
|
||||
pReader->idStr);
|
||||
}
|
||||
|
||||
if (minKey == key) {
|
||||
if (init) {
|
||||
tsdbRowMergerAdd(pMerger, &fRow, NULL);
|
||||
} else {
|
||||
init = true;
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
|
||||
}
|
||||
|
@ -1674,7 +1663,7 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
|
|||
pBlockScanInfo->lastKey = tsLastBlock;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema);
|
||||
code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -1699,7 +1688,7 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
|
|||
}
|
||||
}
|
||||
} else { // not merge block data
|
||||
code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema);
|
||||
code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -1742,6 +1731,12 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader
|
|||
if (ps == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int32_t code = tsdbRowMergerInit(pMerger, ps);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("failed to init row merger, code:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasDataInFileBlock(pBlockData, pDumpInfo)) {
|
||||
|
@ -1768,7 +1763,7 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader
|
|||
}
|
||||
// the following for key == tsLast
|
||||
SRow* pTSRow = NULL;
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema);
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -1776,7 +1771,10 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader
|
|||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
|
||||
|
||||
TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
|
||||
tsdbRowMergerAdd(pMerger, pRow1, NULL);
|
||||
code = tsdbRowMergerAdd(pMerger, pRow1, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange, pReader->idStr);
|
||||
|
||||
|
@ -1816,14 +1814,21 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
|
||||
TSDBKEY k = TSDBROW_KEY(pRow);
|
||||
TSDBKEY ik = TSDBROW_KEY(piRow);
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
if (pSchema == NULL) {
|
||||
return code;
|
||||
|
||||
STSchema* pSchema = NULL;
|
||||
if (pRow->type == TSDBROW_ROW_FMT) {
|
||||
pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
if (pSchema == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
}
|
||||
|
||||
STSchema* piSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid);
|
||||
if (piSchema == NULL) {
|
||||
return code;
|
||||
STSchema* piSchema = NULL;
|
||||
if (piRow->type == TSDBROW_ROW_FMT) {
|
||||
piSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid);
|
||||
if (piSchema == NULL) {
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
// merge is not initialized yet, due to the fact that the pReader->info.pSchema is not initialized
|
||||
|
@ -1833,6 +1838,12 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
if (ps == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
code = tsdbRowMergerInit(pMerger, ps);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("failed to init row merger, code:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
int64_t minKey = 0;
|
||||
|
@ -1872,15 +1883,12 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
}
|
||||
}
|
||||
|
||||
bool init = false;
|
||||
|
||||
// ASC: file block -----> last block -----> imem -----> mem
|
||||
// DESC: mem -----> imem -----> last block -----> file block
|
||||
if (ASCENDING_TRAVERSE(pReader->info.order)) {
|
||||
if (minKey == key) {
|
||||
init = true;
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema);
|
||||
code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -1890,14 +1898,9 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
|
||||
if (minKey == tsLast) {
|
||||
TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
|
||||
if (init) {
|
||||
tsdbRowMergerAdd(pMerger, pRow1, NULL);
|
||||
} else {
|
||||
init = true;
|
||||
code = tsdbRowMergerAdd(pMerger, pRow1, pReader->info.pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
code = tsdbRowMergerAdd(pMerger, pRow1, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange,
|
||||
|
@ -1905,14 +1908,9 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
}
|
||||
|
||||
if (minKey == ik.ts) {
|
||||
if (init) {
|
||||
tsdbRowMergerAdd(pMerger, piRow, piSchema);
|
||||
} else {
|
||||
init = true;
|
||||
code = tsdbRowMergerAdd(pMerger, piRow, piSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
code = tsdbRowMergerAdd(pMerger, piRow, piSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, pReader);
|
||||
|
@ -1922,15 +1920,11 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
}
|
||||
|
||||
if (minKey == k.ts) {
|
||||
if (init) {
|
||||
tsdbRowMergerAdd(pMerger, pRow, pSchema);
|
||||
} else {
|
||||
// STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
code = tsdbRowMergerAdd(pMerger, pRow, pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
code = tsdbRowMergerAdd(pMerger, pRow, pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
code = doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, pReader);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
|
@ -1938,7 +1932,6 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
}
|
||||
} else {
|
||||
if (minKey == k.ts) {
|
||||
init = true;
|
||||
code = tsdbRowMergerAdd(pMerger, pRow, pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
|
@ -1951,15 +1944,11 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
}
|
||||
|
||||
if (minKey == ik.ts) {
|
||||
if (init) {
|
||||
tsdbRowMergerAdd(pMerger, piRow, piSchema);
|
||||
} else {
|
||||
init = true;
|
||||
code = tsdbRowMergerAdd(pMerger, piRow, piSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
code = tsdbRowMergerAdd(pMerger, piRow, piSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, pReader);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
|
@ -1968,29 +1957,22 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
|
||||
if (minKey == tsLast) {
|
||||
TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
|
||||
if (init) {
|
||||
tsdbRowMergerAdd(pMerger, pRow1, NULL);
|
||||
} else {
|
||||
init = true;
|
||||
code = tsdbRowMergerAdd(pMerger, pRow1, pReader->info.pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
code = tsdbRowMergerAdd(pMerger, pRow1, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange,
|
||||
pReader->idStr);
|
||||
}
|
||||
|
||||
if (minKey == key) {
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
if (!init) {
|
||||
code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
} else {
|
||||
tsdbRowMergerAdd(pMerger, &fRow, NULL);
|
||||
code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
|
||||
}
|
||||
}
|
||||
|
@ -2184,6 +2166,12 @@ int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBloc
|
|||
if (ps == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
code = tsdbRowMergerInit(pMerger, ps);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("failed to init row merger, code:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
if (copied) {
|
||||
|
@ -2193,7 +2181,7 @@ int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBloc
|
|||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
|
||||
SRow* pTSRow = NULL;
|
||||
code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema);
|
||||
code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -3328,16 +3316,15 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDe
|
|||
break;
|
||||
}
|
||||
|
||||
STSchema* pTSchema = NULL;
|
||||
if (pRow->type == TSDBROW_ROW_FMT) {
|
||||
STSchema* pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, uid);
|
||||
pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, uid);
|
||||
if (pTSchema == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
tsdbRowMergerAdd(pMerger, pRow, pTSchema);
|
||||
} else { // column format
|
||||
tsdbRowMergerAdd(pMerger, pRow, NULL);
|
||||
}
|
||||
|
||||
tsdbRowMergerAdd(pMerger, pRow, pTSchema);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -3473,31 +3460,30 @@ int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter,
|
|||
int32_t code = 0;
|
||||
|
||||
// start to merge duplicated rows
|
||||
if (current.type == TSDBROW_ROW_FMT) {
|
||||
// get the correct schema for data in memory
|
||||
STSchema* pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(¤t), pReader, uid);
|
||||
STSchema* pTSchema = NULL;
|
||||
if (current.type == TSDBROW_ROW_FMT) { // get the correct schema for row-wise data in memory
|
||||
pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(¤t), pReader, uid);
|
||||
if (pTSchema == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
}
|
||||
|
||||
code = tsdbRowMergerAdd(&pReader->status.merger, ¤t, pTSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
code = tsdbRowMergerAdd(&pReader->status.merger, ¤t, pTSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
STSchema* pTSchema1 = doGetSchemaForTSRow(TSDBROW_SVERSION(pNextRow), pReader, uid);
|
||||
STSchema* pTSchema1 = NULL;
|
||||
if (pNextRow->type == TSDBROW_ROW_FMT) { // get the correct schema for row-wise data in memory
|
||||
pTSchema1 = doGetSchemaForTSRow(TSDBROW_SVERSION(pNextRow), pReader, uid);
|
||||
if (pTSchema1 == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
}
|
||||
|
||||
tsdbRowMergerAdd(&pReader->status.merger, pNextRow, pTSchema1);
|
||||
} else { // let's merge rows in file block
|
||||
code = tsdbRowMergerAdd(&pReader->status.merger, ¤t, pReader->info.pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
tsdbRowMergerAdd(&pReader->status.merger, pNextRow, NULL);
|
||||
code = tsdbRowMergerAdd(&pReader->status.merger, pNextRow, pTSchema1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
code = doMergeRowsInBuf(pIter, uid, TSDBROW_TS(¤t), pDelList, pReader);
|
||||
|
@ -3523,14 +3509,21 @@ int32_t doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* p
|
|||
|
||||
TSDBKEY k = TSDBROW_KEY(pRow);
|
||||
TSDBKEY ik = TSDBROW_KEY(piRow);
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
if (pSchema == NULL) {
|
||||
return terrno;
|
||||
|
||||
STSchema* pSchema = NULL;
|
||||
if (pRow->type == TSDBROW_ROW_FMT) {
|
||||
pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
if (pSchema == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
}
|
||||
|
||||
STSchema* piSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid);
|
||||
if (piSchema == NULL) {
|
||||
return terrno;
|
||||
STSchema* piSchema = NULL;
|
||||
if (piRow->type == TSDBROW_ROW_FMT) {
|
||||
piSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid);
|
||||
if (piSchema == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
}
|
||||
|
||||
if (ASCENDING_TRAVERSE(pReader->info.order)) { // ascending order imem --> mem
|
||||
|
@ -4898,10 +4891,10 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
|
|||
pSnap->pMem = pTsdb->mem;
|
||||
pSnap->pNode = taosMemoryMalloc(sizeof(*pSnap->pNode));
|
||||
if (pSnap->pNode == NULL) {
|
||||
taosThreadRwlockUnlock(&pTsdb->rwLock);
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
pSnap->pNode->pQHandle = pReader;
|
||||
pSnap->pNode->reseek = reseek;
|
||||
|
||||
|
@ -4912,10 +4905,10 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
|
|||
pSnap->pIMem = pTsdb->imem;
|
||||
pSnap->pINode = taosMemoryMalloc(sizeof(*pSnap->pINode));
|
||||
if (pSnap->pINode == NULL) {
|
||||
taosThreadRwlockUnlock(&pTsdb->rwLock);
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
pSnap->pINode->pQHandle = pReader;
|
||||
pSnap->pINode->reseek = reseek;
|
||||
|
||||
|
@ -4924,18 +4917,16 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
|
|||
|
||||
// fs
|
||||
code = tsdbFSCreateRefSnapshot(pTsdb->pFS, &pSnap->pfSetArray);
|
||||
if (code) {
|
||||
taosThreadRwlockUnlock(&pTsdb->rwLock);
|
||||
goto _exit;
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
tsdbTrace("vgId:%d, take read snapshot", TD_VID(pTsdb->pVnode));
|
||||
}
|
||||
|
||||
// unlock
|
||||
_exit:
|
||||
taosThreadRwlockUnlock(&pTsdb->rwLock);
|
||||
|
||||
tsdbTrace("vgId:%d, take read snapshot", TD_VID(pTsdb->pVnode));
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("vgId:%d take read snapshot failed, code:%s", TD_VID(pTsdb->pVnode), tstrerror(code));
|
||||
|
||||
_exit:
|
||||
if (code) {
|
||||
*ppSnap = NULL;
|
||||
if (pSnap) {
|
||||
if (pSnap->pNode) taosMemoryFree(pSnap->pNode);
|
||||
|
|
|
@ -273,7 +273,12 @@ SBrinRecord* getNextBrinRecord(SBrinRecordIter* pIter) {
|
|||
pIter->pCurrentBlk = taosArrayGet(pIter->pBrinBlockList, pIter->blockIndex);
|
||||
|
||||
tBrinBlockClear(&pIter->block);
|
||||
tsdbDataFileReadBrinBlock(pIter->pReader, pIter->pCurrentBlk, &pIter->block);
|
||||
int32_t code = tsdbDataFileReadBrinBlock(pIter->pReader, pIter->pCurrentBlk, &pIter->block);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("failed to read brinBlock from file, code:%s", tstrerror(code));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pIter->recordIndex = -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -857,6 +857,9 @@ static int32_t tsdbSnapWriteTombRecord(STsdbSnapWriter* writer, const STombRecor
|
|||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
code = tsdbIterMergerNext(writer->ctx->tombIterMerger);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
if (record->suid == INT64_MAX) {
|
||||
|
|
|
@ -545,13 +545,10 @@ int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) {
|
|||
}
|
||||
|
||||
static int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num) {
|
||||
STSchema *pTSchema = metaGetTbTSchema(pVnode->pMeta, suid, -1, 1);
|
||||
// metaGetTbTSchemaEx(pVnode->pMeta, suid, suid, -1, &pTSchema);
|
||||
|
||||
if (pTSchema) {
|
||||
*num = pTSchema->numOfCols;
|
||||
|
||||
taosMemoryFree(pTSchema);
|
||||
SSchemaWrapper *pSW = metaGetTableSchema(pVnode->pMeta, suid, -1, 1);
|
||||
if (pSW) {
|
||||
*num = pSW->nCols;
|
||||
tDeleteSchemaWrapper(pSW);
|
||||
} else {
|
||||
*num = 2;
|
||||
}
|
||||
|
|
|
@ -425,20 +425,6 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *
|
|||
if (code) goto _exit;
|
||||
}
|
||||
|
||||
if (pWriter->pStreamTaskWriter) {
|
||||
code = streamTaskSnapWriterClose(pWriter->pStreamTaskWriter, rollback);
|
||||
if (code) goto _exit;
|
||||
}
|
||||
|
||||
if (pWriter->pStreamStateWriter) {
|
||||
code = streamStateSnapWriterClose(pWriter->pStreamStateWriter, rollback);
|
||||
if (code) goto _exit;
|
||||
|
||||
code = streamStateRebuildFromSnap(pWriter->pStreamStateWriter, 0);
|
||||
pWriter->pStreamStateWriter = NULL;
|
||||
if (code) goto _exit;
|
||||
}
|
||||
|
||||
if (pWriter->pRsmaSnapWriter) {
|
||||
code = rsmaSnapWriterClose(&pWriter->pRsmaSnapWriter, rollback);
|
||||
if (code) goto _exit;
|
||||
|
|
|
@ -756,7 +756,7 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo)
|
|||
case TDMT_VND_STREAM_TASK_CHECK:
|
||||
return tqProcessStreamTaskCheckReq(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_STREAM_TASK_CHECK_RSP:
|
||||
return tqProcessStreamTaskCheckRsp(pVnode->pTq, 0, pMsg);
|
||||
return tqProcessStreamTaskCheckRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_RETRIEVE:
|
||||
return tqProcessTaskRetrieveReq(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_RETRIEVE_RSP:
|
||||
|
@ -1443,8 +1443,11 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, in
|
|||
|
||||
SColData *pColData = (SColData *)taosArrayGet(pSubmitTbData->aCol, 0);
|
||||
TSKEY *aKey = (TSKEY *)(pColData->pData);
|
||||
vDebug("vgId:%d submit %d rows data, uid:%"PRId64, TD_VID(pVnode), pColData->nVal, pSubmitTbData->uid);
|
||||
|
||||
for (int32_t iRow = 0; iRow < pColData->nVal; iRow++) {
|
||||
vDebug("vgId:%d uid:%"PRId64" ts:%"PRId64, TD_VID(pVnode), pSubmitTbData->uid, aKey[iRow]);
|
||||
|
||||
if (aKey[iRow] < minKey || aKey[iRow] > maxKey || (iRow > 0 && aKey[iRow] <= aKey[iRow - 1])) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
vError("vgId:%d %s failed since %s, version:%" PRId64, TD_VID(pVnode), __func__, tstrerror(terrno), ver);
|
||||
|
@ -1456,10 +1459,13 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, in
|
|||
int32_t nRow = TARRAY_SIZE(pSubmitTbData->aRowP);
|
||||
SRow **aRow = (SRow **)TARRAY_DATA(pSubmitTbData->aRowP);
|
||||
|
||||
vDebug("vgId:%d submit %d rows data, uid:%"PRId64, TD_VID(pVnode), nRow, pSubmitTbData->uid);
|
||||
for (int32_t iRow = 0; iRow < nRow; ++iRow) {
|
||||
vDebug("vgId:%d uid:%"PRId64" ts:%"PRId64, TD_VID(pVnode), pSubmitTbData->uid, aRow[iRow]->ts);
|
||||
|
||||
if (aRow[iRow]->ts < minKey || aRow[iRow]->ts > maxKey || (iRow > 0 && aRow[iRow]->ts <= aRow[iRow - 1]->ts)) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
vError("vgId:%d %s failed since %s, version:%" PRId64, TD_VID(pVnode), __func__, tstrerror(terrno), ver);
|
||||
vError("vgId:%d %s failed since %s, version:%" PRId64, TD_VID(pVnode), __func__, tstrerror(code), ver);
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
|
@ -1564,6 +1570,8 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, in
|
|||
} else { // create table failed
|
||||
if (terrno != TSDB_CODE_TDB_TABLE_ALREADY_EXIST) {
|
||||
code = terrno;
|
||||
vError("vgId:%d failed to create table:%s, code:%s", TD_VID(pVnode), pSubmitTbData->pCreateTbReq->name,
|
||||
tstrerror(terrno));
|
||||
goto _exit;
|
||||
}
|
||||
terrno = 0;
|
||||
|
|
|
@ -308,10 +308,11 @@ static void setCreateDBResultIntoDataBlock(SSDataBlock* pBlock, char* dbName, ch
|
|||
|
||||
if (retentions) {
|
||||
len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, " RETENTIONS %s", retentions);
|
||||
taosMemoryFree(retentions);
|
||||
}
|
||||
}
|
||||
|
||||
taosMemoryFree(retentions);
|
||||
|
||||
(varDataLen(buf2)) = len;
|
||||
|
||||
colDataSetVal(pCol2, 0, buf2, false);
|
||||
|
|
|
@ -446,7 +446,7 @@ int32_t createDataInserter(SDataSinkManager* pManager, const SDataSinkNode* pDat
|
|||
taosThreadMutexInit(&inserter->mutex, NULL);
|
||||
if (NULL == inserter->pDataBlocks) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _return;
|
||||
}
|
||||
|
||||
inserter->fullOrderColList = pInserterNode->pCols->length == inserter->pSchema->numOfCols;
|
||||
|
|
|
@ -151,14 +151,21 @@ static void updatePostJoinCurrTableInfo(SStbJoinDynCtrlInfo* pStbJoin)
|
|||
static int32_t buildGroupCacheOperatorParam(SOperatorParam** ppRes, int32_t downstreamIdx, int32_t vgId, int64_t tbUid, bool needCache, SOperatorParam* pChild) {
|
||||
*ppRes = taosMemoryMalloc(sizeof(SOperatorParam));
|
||||
if (NULL == *ppRes) {
|
||||
freeOperatorParam(pChild, OP_GET_PARAM);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (pChild) {
|
||||
(*ppRes)->pChildren = taosArrayInit(1, POINTER_BYTES);
|
||||
if (NULL == *ppRes) {
|
||||
if (NULL == (*ppRes)->pChildren) {
|
||||
freeOperatorParam(pChild, OP_GET_PARAM);
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (NULL == taosArrayPush((*ppRes)->pChildren, &pChild)) {
|
||||
freeOperatorParam(pChild, OP_GET_PARAM);
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
} else {
|
||||
|
@ -167,6 +174,8 @@ static int32_t buildGroupCacheOperatorParam(SOperatorParam** ppRes, int32_t down
|
|||
|
||||
SGcOperatorParam* pGc = taosMemoryMalloc(sizeof(SGcOperatorParam));
|
||||
if (NULL == pGc) {
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -193,6 +202,7 @@ static int32_t buildGroupCacheNotifyOperatorParam(SOperatorParam** ppRes, int32_
|
|||
|
||||
SGcNotifyOperatorParam* pGc = taosMemoryMalloc(sizeof(SGcNotifyOperatorParam));
|
||||
if (NULL == pGc) {
|
||||
freeOperatorParam(*ppRes, OP_NOTIFY_PARAM);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -248,6 +258,7 @@ static int32_t buildBatchExchangeOperatorParam(SOperatorParam** ppRes, int32_t d
|
|||
|
||||
SExchangeOperatorBatchParam* pExc = taosMemoryMalloc(sizeof(SExchangeOperatorBatchParam));
|
||||
if (NULL == pExc) {
|
||||
taosMemoryFreeClear(*ppRes);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -255,6 +266,7 @@ static int32_t buildBatchExchangeOperatorParam(SOperatorParam** ppRes, int32_t d
|
|||
pExc->pBatchs = tSimpleHashInit(tSimpleHashGetSize(pVg), taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT));
|
||||
if (NULL == pExc->pBatchs) {
|
||||
taosMemoryFree(pExc);
|
||||
taosMemoryFreeClear(*ppRes);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
tSimpleHashSetFreeFp(pExc->pBatchs, freeExchangeGetBasicOperatorParam);
|
||||
|
@ -288,21 +300,36 @@ static int32_t buildBatchExchangeOperatorParam(SOperatorParam** ppRes, int32_t d
|
|||
static int32_t buildMergeJoinOperatorParam(SOperatorParam** ppRes, bool initParam, SOperatorParam* pChild0, SOperatorParam* pChild1) {
|
||||
*ppRes = taosMemoryMalloc(sizeof(SOperatorParam));
|
||||
if (NULL == *ppRes) {
|
||||
freeOperatorParam(pChild0, OP_GET_PARAM);
|
||||
freeOperatorParam(pChild1, OP_GET_PARAM);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
(*ppRes)->pChildren = taosArrayInit(2, POINTER_BYTES);
|
||||
if (NULL == *ppRes) {
|
||||
freeOperatorParam(pChild0, OP_GET_PARAM);
|
||||
freeOperatorParam(pChild1, OP_GET_PARAM);
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (NULL == taosArrayPush((*ppRes)->pChildren, &pChild0)) {
|
||||
freeOperatorParam(pChild0, OP_GET_PARAM);
|
||||
freeOperatorParam(pChild1, OP_GET_PARAM);
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (NULL == taosArrayPush((*ppRes)->pChildren, &pChild1)) {
|
||||
freeOperatorParam(pChild1, OP_GET_PARAM);
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
SSortMergeJoinOperatorParam* pJoin = taosMemoryMalloc(sizeof(SSortMergeJoinOperatorParam));
|
||||
if (NULL == pJoin) {
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -318,16 +345,28 @@ static int32_t buildMergeJoinOperatorParam(SOperatorParam** ppRes, bool initPara
|
|||
static int32_t buildMergeJoinNotifyOperatorParam(SOperatorParam** ppRes, SOperatorParam* pChild0, SOperatorParam* pChild1) {
|
||||
*ppRes = taosMemoryMalloc(sizeof(SOperatorParam));
|
||||
if (NULL == *ppRes) {
|
||||
freeOperatorParam(pChild0, OP_NOTIFY_PARAM);
|
||||
freeOperatorParam(pChild1, OP_NOTIFY_PARAM);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
(*ppRes)->pChildren = taosArrayInit(2, POINTER_BYTES);
|
||||
if (NULL == *ppRes) {
|
||||
taosMemoryFreeClear(*ppRes);
|
||||
freeOperatorParam(pChild0, OP_NOTIFY_PARAM);
|
||||
freeOperatorParam(pChild1, OP_NOTIFY_PARAM);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (pChild0 && NULL == taosArrayPush((*ppRes)->pChildren, &pChild0)) {
|
||||
freeOperatorParam(*ppRes, OP_NOTIFY_PARAM);
|
||||
freeOperatorParam(pChild0, OP_NOTIFY_PARAM);
|
||||
freeOperatorParam(pChild1, OP_NOTIFY_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (pChild1 && NULL == taosArrayPush((*ppRes)->pChildren, &pChild1)) {
|
||||
freeOperatorParam(*ppRes, OP_NOTIFY_PARAM);
|
||||
freeOperatorParam(pChild1, OP_NOTIFY_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -420,13 +459,34 @@ static int32_t buildSeqStbJoinOperatorParam(SDynQueryCtrlOperatorInfo* pInfo, SS
|
|||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildGroupCacheOperatorParam(&pGcParam0, 0, *leftVg, *leftUid, pPost->leftNeedCache, pSrcParam0);
|
||||
pSrcParam0 = NULL;
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildGroupCacheOperatorParam(&pGcParam1, 1, *rightVg, *rightUid, pPost->rightNeedCache, pSrcParam1);
|
||||
pSrcParam1 = NULL;
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildMergeJoinOperatorParam(ppParam, pSrcParam0 ? true : false, pGcParam0, pGcParam1);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
if (pSrcParam0) {
|
||||
freeOperatorParam(pSrcParam0, OP_GET_PARAM);
|
||||
}
|
||||
if (pSrcParam1) {
|
||||
freeOperatorParam(pSrcParam1, OP_GET_PARAM);
|
||||
}
|
||||
if (pGcParam0) {
|
||||
freeOperatorParam(pGcParam0, OP_GET_PARAM);
|
||||
}
|
||||
if (pGcParam1) {
|
||||
freeOperatorParam(pGcParam1, OP_GET_PARAM);
|
||||
}
|
||||
if (*ppParam) {
|
||||
freeOperatorParam(*ppParam, OP_GET_PARAM);
|
||||
*ppParam = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -488,7 +548,7 @@ static void handleSeqJoinCurrRetrieveEnd(SOperatorInfo* pOperator, SStbJoinDynCt
|
|||
|
||||
if (pPost->leftNeedCache) {
|
||||
uint32_t* num = tSimpleHashGet(pStbJoin->ctx.prev.leftCache, &pPost->leftCurrUid, sizeof(pPost->leftCurrUid));
|
||||
if (--(*num) <= 0) {
|
||||
if (num && --(*num) <= 0) {
|
||||
tSimpleHashRemove(pStbJoin->ctx.prev.leftCache, &pPost->leftCurrUid, sizeof(pPost->leftCurrUid));
|
||||
notifySeqJoinTableCacheEnd(pOperator, pPost, true);
|
||||
}
|
||||
|
|
|
@ -277,7 +277,7 @@ int32_t eventWindowAggImpl(SOperatorInfo* pOperator, SEventWindowOperatorInfo* p
|
|||
SFilterColumnParam param2 = {.numOfCols = taosArrayGetSize(pBlock->pDataBlock), .pDataBlock = pBlock->pDataBlock};
|
||||
code = filterSetDataFromSlotId(pInfo->pEndCondInfo, ¶m2);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
goto _return;
|
||||
}
|
||||
|
||||
int32_t status2 = 0;
|
||||
|
@ -331,10 +331,12 @@ int32_t eventWindowAggImpl(SOperatorInfo* pOperator, SEventWindowOperatorInfo* p
|
|||
}
|
||||
}
|
||||
|
||||
_return:
|
||||
|
||||
colDataDestroy(ps);
|
||||
taosMemoryFree(ps);
|
||||
colDataDestroy(pe);
|
||||
taosMemoryFree(pe);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -223,6 +223,9 @@ static int32_t acquireFdFromFileCtx(SGcFileCacheCtx* pFileCtx, int32_t fileId, S
|
|||
SGroupCacheFileInfo newFile = {0};
|
||||
taosHashPut(pFileCtx->pCacheFile, &fileId, sizeof(fileId), &newFile, sizeof(newFile));
|
||||
pTmp = taosHashGet(pFileCtx->pCacheFile, &fileId, sizeof(fileId));
|
||||
if (NULL == pTmp) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
if (pTmp->deleted) {
|
||||
|
@ -287,7 +290,7 @@ static int32_t saveBlocksToDisk(SGroupCacheOperatorInfo* pGCache, SGcDownstreamC
|
|||
|
||||
if (deleted) {
|
||||
qTrace("FileId:%d-%d-%d already be deleted, skip write",
|
||||
pCtx->id, pGroup->vgId, pHead->basic.fileId);
|
||||
pCtx->id, pGroup ? pGroup->vgId : GROUP_CACHE_DEFAULT_VGID, pHead->basic.fileId);
|
||||
|
||||
int64_t blkId = pHead->basic.blkId;
|
||||
pHead = pHead->next;
|
||||
|
@ -337,7 +340,9 @@ static int32_t addBlkToDirtyBufList(SGroupCacheOperatorInfo* pGCache, SGcDownstr
|
|||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pBufInfo = taosHashGet(pCache->pDirtyBlk, &pBufInfo->basic.blkId, sizeof(pBufInfo->basic.blkId));
|
||||
|
||||
if (NULL == pBufInfo) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SGcBlkBufInfo* pWriteHead = NULL;
|
||||
|
||||
|
@ -378,6 +383,10 @@ static int32_t addBlkToDirtyBufList(SGroupCacheOperatorInfo* pGCache, SGcDownstr
|
|||
|
||||
static FORCE_INLINE void chkRemoveVgroupCurrFile(SGcFileCacheCtx* pFileCtx, int32_t downstreamIdx, int32_t vgId) {
|
||||
SGroupCacheFileInfo* pFileInfo = taosHashGet(pFileCtx->pCacheFile, &pFileCtx->fileId, sizeof(pFileCtx->fileId));
|
||||
if (NULL == pFileInfo) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (0 == pFileInfo->groupNum) {
|
||||
removeGroupCacheFile(pFileInfo);
|
||||
|
||||
|
@ -711,6 +720,9 @@ static int32_t addFileRefTableNum(SGcFileCacheCtx* pFileCtx, int32_t fileId, int
|
|||
newFile.groupNum = 1;
|
||||
taosHashPut(pFileCtx->pCacheFile, &fileId, sizeof(fileId), &newFile, sizeof(newFile));
|
||||
pTmp = taosHashGet(pFileCtx->pCacheFile, &fileId, sizeof(fileId));
|
||||
if (NULL == pTmp) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
} else {
|
||||
pTmp->groupNum++;
|
||||
}
|
||||
|
@ -786,6 +798,9 @@ static int32_t addNewGroupData(struct SOperatorInfo* pOperator, SOperatorParam*
|
|||
}
|
||||
|
||||
*ppGrp = taosHashGet(pGrpHash, &uid, sizeof(uid));
|
||||
if (NULL == *ppGrp) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
initNewGroupData(pCtx, *ppGrp, pParam->downstreamIdx, vgId, pGCache->batchFetch, pGcParam->needCache);
|
||||
|
||||
qError("new group %" PRIu64 " initialized, downstreamIdx:%d, vgId:%d, needCache:%d", uid, pParam->downstreamIdx, vgId, pGcParam->needCache);
|
||||
|
|
|
@ -636,12 +636,14 @@ static int32_t addRowToHashImpl(SHJoinOperatorInfo* pJoin, SGroupData* pGroup, S
|
|||
|
||||
int32_t code = getValBufFromPages(pJoin->pRowBufs, getHJoinValBufSize(pTable, rowIdx), &pTable->valData, pRow);
|
||||
if (code) {
|
||||
taosMemoryFree(pRow);
|
||||
return code;
|
||||
}
|
||||
|
||||
if (NULL == pGroup) {
|
||||
pRow->next = NULL;
|
||||
if (tSimpleHashPut(pJoin->pKeyHash, pTable->keyData, keyLen, &group, sizeof(group))) {
|
||||
taosMemoryFree(pRow);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -711,6 +711,11 @@ static bool mergeJoinGetNextTimestamp(SOperatorInfo* pOperator, int64_t* pLeftTs
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (NULL == pJoinInfo->pLeft || NULL == pJoinInfo->pRight) {
|
||||
setMergeJoinDone(pOperator);
|
||||
return false;
|
||||
}
|
||||
|
||||
// only the timestamp match support for ordinary table
|
||||
SColumnInfoData* pLeftCol = taosArrayGet(pJoinInfo->pLeft->pDataBlock, pJoinInfo->leftCol.slotId);
|
||||
|
|
|
@ -2257,7 +2257,7 @@ FETCH_NEXT_BLOCK:
|
|||
int32_t current = pInfo->validBlockIndex++;
|
||||
SPackedData* pSubmit = taosArrayGet(pInfo->pBlockLists, current);
|
||||
|
||||
qDebug("set %d/%d as the input submit block, %s", current, totalBlocks, id);
|
||||
qDebug("set %d/%d as the input submit block, %s", current + 1, totalBlocks, id);
|
||||
if (pAPI->tqReaderFn.tqReaderSetSubmitMsg(pInfo->tqReader, pSubmit->msgStr, pSubmit->msgLen, pSubmit->ver) < 0) {
|
||||
qError("submit msg messed up when initializing stream submit block %p, current %d/%d, %s", pSubmit, current, totalBlocks, id);
|
||||
continue;
|
||||
|
@ -2883,7 +2883,7 @@ static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) {
|
|||
}
|
||||
|
||||
|
||||
static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI, STagScanInfo* pInfo) {
|
||||
static int32_t tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI, STagScanInfo* pInfo) {
|
||||
int32_t code = 0;
|
||||
int32_t numOfTables = taosArrayGetSize(aUidTags);
|
||||
|
||||
|
@ -2894,9 +2894,15 @@ static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aF
|
|||
SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
|
||||
|
||||
SScalarParam output = {0};
|
||||
tagScanCreateResultData(&type, numOfTables, &output);
|
||||
code = tagScanCreateResultData(&type, numOfTables, &output);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
scalarCalculate(pTagCond, pBlockList, &output);
|
||||
code = scalarCalculate(pTagCond, pBlockList, &output);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
bool* result = (bool*)output.columnData->pData;
|
||||
for (int32_t i = 0 ; i < numOfTables; ++i) {
|
||||
|
@ -2911,7 +2917,7 @@ static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aF
|
|||
blockDataDestroy(pResBlock);
|
||||
taosArrayDestroy(pBlockList);
|
||||
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void tagScanFillOneCellWithTag(SOperatorInfo* pOperator, const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) {
|
||||
|
@ -3024,7 +3030,11 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) {
|
|||
bool ignoreFilterIdx = true;
|
||||
if (pInfo->pTagCond != NULL) {
|
||||
ignoreFilterIdx = false;
|
||||
tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI, pInfo);
|
||||
int32_t code = tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI, pInfo);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
pOperator->pTaskInfo->code = code;
|
||||
T_LONG_JMP(pOperator->pTaskInfo->env, code);
|
||||
}
|
||||
} else {
|
||||
ignoreFilterIdx = true;
|
||||
}
|
||||
|
|
|
@ -669,8 +669,13 @@ static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle*
|
|||
p->info.id.groupId = tupleGroupId;
|
||||
pInfo->groupId = tupleGroupId;
|
||||
} else {
|
||||
pInfo->prefetchedTuple = pTupleHandle;
|
||||
break;
|
||||
if (p->info.rows == 0) {
|
||||
appendOneRowToDataBlock(p, pTupleHandle);
|
||||
p->info.id.groupId = pInfo->groupId = tupleGroupId;
|
||||
} else {
|
||||
pInfo->prefetchedTuple = pTupleHandle;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
appendOneRowToDataBlock(p, pTupleHandle);
|
||||
|
@ -715,14 +720,9 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
|
|||
resetLimitInfoForNextGroup(&pInfo->limitInfo);
|
||||
}
|
||||
|
||||
bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo);
|
||||
// if limit is reached within a group, do not clear limiInfo otherwise the next block
|
||||
// will be processed.
|
||||
if (newgroup && limitReached) {
|
||||
resetLimitInfoForNextGroup(&pInfo->limitInfo);
|
||||
}
|
||||
applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo);
|
||||
|
||||
if (p->info.rows > 0 || limitReached) {
|
||||
if (p->info.rows > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -786,6 +786,8 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDat
|
|||
if (startPos < 0) {
|
||||
break;
|
||||
}
|
||||
qDebug("===stream===ignore expired data, window end ts:%" PRId64 ", maxts - wartermak:%" PRId64, nextWin.ekey,
|
||||
pInfo->twAggSup.maxTs - pInfo->twAggSup.waterMark);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1552,6 +1554,7 @@ void destroyStreamSessionAggOperatorInfo(void* param) {
|
|||
tSimpleHashCleanup(pInfo->pStUpdated);
|
||||
tSimpleHashCleanup(pInfo->pStDeleted);
|
||||
pInfo->pUpdated = taosArrayDestroy(pInfo->pUpdated);
|
||||
cleanupGroupResInfo(&pInfo->groupResInfo);
|
||||
|
||||
taosArrayDestroy(pInfo->historyWins);
|
||||
blockDataDestroy(pInfo->pCheckpointRes);
|
||||
|
@ -2197,6 +2200,7 @@ void initGroupResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayL
|
|||
pGroupResInfo->pRows = pArrayList;
|
||||
pGroupResInfo->index = 0;
|
||||
pGroupResInfo->pBuf = NULL;
|
||||
pGroupResInfo->freeItem = false;
|
||||
}
|
||||
|
||||
void doBuildSessionResult(SOperatorInfo* pOperator, void* pState, SGroupResInfo* pGroupResInfo, SSDataBlock* pBlock) {
|
||||
|
@ -2874,10 +2878,12 @@ void destroyStreamStateOperatorInfo(void* param) {
|
|||
}
|
||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||
blockDataDestroy(pInfo->pDelRes);
|
||||
taosArrayDestroy(pInfo->historyWins);
|
||||
tSimpleHashCleanup(pInfo->pSeUpdated);
|
||||
tSimpleHashCleanup(pInfo->pSeDeleted);
|
||||
pInfo->pUpdated = taosArrayDestroy(pInfo->pUpdated);
|
||||
cleanupGroupResInfo(&pInfo->groupResInfo);
|
||||
|
||||
taosArrayDestroy(pInfo->historyWins);
|
||||
blockDataDestroy(pInfo->pCheckpointRes);
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
|
|
|
@ -578,9 +578,8 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t ma
|
|||
SColumnInfoData* pCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, pFillInfo->srcTsSlotId);
|
||||
int64_t* tsList = (int64_t*)pCol->pData;
|
||||
TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
|
||||
numOfRes = taosTimeCountInterval(lastKey, pFillInfo->currentKey, pFillInfo->interval.sliding,
|
||||
pFillInfo->interval.slidingUnit, pFillInfo->interval.precision);
|
||||
numOfRes += 1;
|
||||
numOfRes = taosTimeCountIntervalForFill(lastKey, pFillInfo->currentKey, pFillInfo->interval.sliding,
|
||||
pFillInfo->interval.slidingUnit, pFillInfo->interval.precision, pFillInfo->order);
|
||||
ASSERT(numOfRes >= numOfRows);
|
||||
} else { // reach the end of data
|
||||
if ((ekey1 < pFillInfo->currentKey && FILL_IS_ASC_FILL(pFillInfo)) ||
|
||||
|
@ -588,9 +587,8 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t ma
|
|||
return 0;
|
||||
}
|
||||
|
||||
numOfRes = taosTimeCountInterval(ekey1, pFillInfo->currentKey, pFillInfo->interval.sliding,
|
||||
pFillInfo->interval.slidingUnit, pFillInfo->interval.precision);
|
||||
numOfRes += 1;
|
||||
numOfRes = taosTimeCountIntervalForFill(ekey1, pFillInfo->currentKey, pFillInfo->interval.sliding,
|
||||
pFillInfo->interval.slidingUnit, pFillInfo->interval.precision, pFillInfo->order);
|
||||
}
|
||||
|
||||
return (numOfRes > maxNumOfRows) ? maxNumOfRows : numOfRes;
|
||||
|
|
|
@ -904,6 +904,7 @@ static int32_t getPageBufIncForRow(SSDataBlock* blk, int32_t row, int32_t rowIdx
|
|||
}
|
||||
|
||||
static int32_t sortBlocksToExtSource(SSortHandle* pHandle, SArray* aBlk, SBlockOrderInfo* order, SArray* aExtSrc) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int pgHeaderSz = sizeof(int32_t) + sizeof(int32_t) * taosArrayGetSize(pHandle->pDataBlock->pDataBlock);
|
||||
int32_t rowCap = blockDataGetCapacityInRow(pHandle->pDataBlock, pHandle->pageSize, pgHeaderSz);
|
||||
blockDataEnsureCapacity(pHandle->pDataBlock, rowCap);
|
||||
|
@ -930,7 +931,13 @@ static int32_t sortBlocksToExtSource(SSortHandle* pHandle, SArray* aBlk, SBlockO
|
|||
SArray* aPgId = taosArrayInit(8, sizeof(int32_t));
|
||||
|
||||
SMultiwayMergeTreeInfo* pTree = NULL;
|
||||
tMergeTreeCreate(&pTree, taosArrayGetSize(aBlk), &sup, blockCompareTsFn);
|
||||
code = tMergeTreeCreate(&pTree, taosArrayGetSize(aBlk), &sup, blockCompareTsFn);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
taosMemoryFree(sup.aRowIdx);
|
||||
taosMemoryFree(sup.aTs);
|
||||
|
||||
return code;
|
||||
}
|
||||
int32_t nRows = 0;
|
||||
int32_t nMergedRows = 0;
|
||||
bool mergeLimitReached = false;
|
||||
|
@ -1054,7 +1061,14 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
|
|||
tSimpleHashClear(mUidBlk);
|
||||
|
||||
int64_t p = taosGetTimestampUs();
|
||||
sortBlocksToExtSource(pHandle, aBlkSort, pOrder, aExtSrc);
|
||||
code = sortBlocksToExtSource(pHandle, aBlkSort, pOrder, aExtSrc);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tSimpleHashCleanup(mUidBlk);
|
||||
taosArrayDestroy(aBlkSort);
|
||||
taosArrayDestroy(aExtSrc);
|
||||
return code;
|
||||
}
|
||||
|
||||
int64_t el = taosGetTimestampUs() - p;
|
||||
pHandle->sortElapsed += el;
|
||||
|
||||
|
|
|
@ -1839,10 +1839,6 @@ static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
|
|||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
if (TSDB_DATA_TYPE_VARBINARY == ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT};
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -121,7 +121,8 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) {
|
|||
snprintf(dnodeIdEnvItem, 32, "%s=%d", "DNODE_ID", pData->dnodeId);
|
||||
|
||||
float numCpuCores = 4;
|
||||
taosGetCpuCores(&numCpuCores);
|
||||
taosGetCpuCores(&numCpuCores, false);
|
||||
numCpuCores = TMAX(numCpuCores, 2);
|
||||
snprintf(thrdPoolSizeEnvItem, 32, "%s=%d", "UV_THREADPOOL_SIZE", (int)numCpuCores * 2);
|
||||
|
||||
char pathTaosdLdLib[512] = {0};
|
||||
|
|
|
@ -69,7 +69,7 @@ const char *udfdCPluginUdfInitLoadInitDestoryFuncs(SUdfCPluginCtx *udfCtx, const
|
|||
|
||||
void udfdCPluginUdfInitLoadAggFuncs(SUdfCPluginCtx *udfCtx, const char *udfName) {
|
||||
char processFuncName[TSDB_FUNC_NAME_LEN] = {0};
|
||||
strncpy(processFuncName, udfName, sizeof(processFuncName));
|
||||
snprintf(processFuncName, sizeof(processFuncName), "%s", udfName);
|
||||
uv_dlsym(&udfCtx->lib, processFuncName, (void **)(&udfCtx->aggProcFunc));
|
||||
|
||||
char startFuncName[TSDB_FUNC_NAME_LEN + 7] = {0};
|
||||
|
@ -103,7 +103,7 @@ int32_t udfdCPluginUdfInit(SScriptUdfInfo *udf, void **pUdfCtx) {
|
|||
|
||||
if (udf->funcType == UDF_FUNC_TYPE_SCALAR) {
|
||||
char processFuncName[TSDB_FUNC_NAME_LEN] = {0};
|
||||
strncpy(processFuncName, udfName, sizeof(processFuncName));
|
||||
snprintf(processFuncName, sizeof(processFuncName), "%s", udfName);
|
||||
uv_dlsym(&udfCtx->lib, processFuncName, (void **)(&udfCtx->scalarProcFunc));
|
||||
} else if (udf->funcType == UDF_FUNC_TYPE_AGG) {
|
||||
udfdCPluginUdfInitLoadAggFuncs(udfCtx, udfName);
|
||||
|
|
|
@ -683,8 +683,10 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
|
|||
pStart += BitmapLen(numOfRows);
|
||||
}
|
||||
char* pData = pStart;
|
||||
|
||||
tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData);
|
||||
ret = tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData);
|
||||
if(ret != 0){
|
||||
goto end;
|
||||
}
|
||||
fields += sizeof(int8_t) + sizeof(int32_t);
|
||||
if (needChangeLength) {
|
||||
pStart += htonl(colLength[j]);
|
||||
|
@ -712,7 +714,10 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
|
|||
char* pData = pStart;
|
||||
|
||||
SColData* pCol = taosArrayGet(pTableCxt->pData->aCol, j);
|
||||
tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData);
|
||||
ret = tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData);
|
||||
if(ret != 0){
|
||||
goto end;
|
||||
}
|
||||
fields += sizeof(int8_t) + sizeof(int32_t);
|
||||
if (needChangeLength) {
|
||||
pStart += htonl(colLength[i]);
|
||||
|
@ -729,7 +734,10 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
|
|||
for (int c = 0; c < boundInfo->numOfBound; ++c) {
|
||||
if( boundInfo->pColIndex[c] != -1){
|
||||
SColData* pCol = taosArrayGet(pTableCxt->pData->aCol, c);
|
||||
tColDataAddValueByDataBlock(pCol, 0, 0, numOfRows, NULL, NULL);
|
||||
ret = tColDataAddValueByDataBlock(pCol, 0, 0, numOfRows, NULL, NULL);
|
||||
if(ret != 0){
|
||||
goto end;
|
||||
}
|
||||
}else{
|
||||
boundInfo->pColIndex[c] = c; // restore for next block
|
||||
}
|
||||
|
|
|
@ -2902,7 +2902,7 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) {
|
|||
taosCreateMD5Hash(buf, len);
|
||||
strncpy(pFunc->node.aliasName, buf, TSDB_COL_NAME_LEN - 1);
|
||||
len = snprintf(buf, sizeof(buf) - 1, "%s(%s)", pSrcFunc->functionName, pCol->colName);
|
||||
taosCreateMD5Hash(buf, len);
|
||||
// note: userAlias could be truncated here
|
||||
strncpy(pFunc->node.userAlias, buf, TSDB_COL_NAME_LEN - 1);
|
||||
}
|
||||
} else {
|
||||
|
@ -2910,7 +2910,7 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) {
|
|||
taosCreateMD5Hash(buf, len);
|
||||
strncpy(pFunc->node.aliasName, buf, TSDB_COL_NAME_LEN - 1);
|
||||
len = snprintf(buf, sizeof(buf) - 1, "%s(%s)", pSrcFunc->functionName, pExpr->userAlias);
|
||||
taosCreateMD5Hash(buf, len);
|
||||
// note: userAlias could be truncated here
|
||||
strncpy(pFunc->node.userAlias, buf, TSDB_COL_NAME_LEN - 1);
|
||||
}
|
||||
|
||||
|
@ -7021,7 +7021,6 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta
|
|||
|
||||
int32_t code = nodesListStrictAppend(pParamterList, (SNode*)col);
|
||||
if (code) {
|
||||
nodesDestroyNode((SNode*)col);
|
||||
nodesDestroyList(pParamterList);
|
||||
return code;
|
||||
}
|
||||
|
@ -7039,7 +7038,6 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta
|
|||
}
|
||||
code = nodesListStrictAppend(pProjectionList, pFunc);
|
||||
if (code) {
|
||||
nodesDestroyNode(pFunc);
|
||||
nodesDestroyList(pProjectionList);
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -46,8 +46,8 @@ static void setColumnInfo(SFunctionNode* pFunc, SColumnNode* pCol, bool isPartit
|
|||
pCol->colType = COLUMN_TYPE_TBNAME;
|
||||
SValueNode* pVal = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 0);
|
||||
if (pVal) {
|
||||
strcpy(pCol->tableName, pVal->literal);
|
||||
strcpy(pCol->tableAlias, pVal->literal);
|
||||
snprintf(pCol->tableName, sizeof(pCol->tableName), "%s", pVal->literal);
|
||||
snprintf(pCol->tableAlias, sizeof(pCol->tableAlias), "%s", pVal->literal);
|
||||
}
|
||||
break;
|
||||
case FUNCTION_TYPE_WSTART:
|
||||
|
@ -531,6 +531,9 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesListStrictAppend(pJoin->node.pChildren, (SNode*)pLeft);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
pLeft = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
SLogicNode* pRight = NULL;
|
||||
|
@ -584,7 +587,7 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
}
|
||||
}
|
||||
|
||||
if (NULL == pJoin->node.pTargets) {
|
||||
if (NULL == pJoin->node.pTargets && NULL != pLeft) {
|
||||
pJoin->node.pTargets = nodesCloneList(pLeft->pTargets);
|
||||
if (NULL == pJoin->node.pTargets) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
|
|
@ -77,6 +77,8 @@ int32_t streamNotifyUpstreamContinue(SStreamTask* pTask);
|
|||
int32_t streamTaskFillHistoryFinished(SStreamTask* pTask);
|
||||
int32_t streamTransferStateToStreamTask(SStreamTask* pTask);
|
||||
|
||||
int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t cap, int32_t rate);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -83,7 +83,6 @@ static void streamSchedByTimer(void* param, void* tmrId) {
|
|||
atomic_store_8(&pTask->schedInfo.status, TASK_TRIGGER_STATUS__INACTIVE);
|
||||
pTrigger->pBlock->info.type = STREAM_GET_ALL;
|
||||
if (streamTaskPutDataIntoInputQ(pTask, (SStreamQueueItem*)pTrigger) < 0) {
|
||||
taosFreeQitem(pTrigger);
|
||||
taosTmrReset(streamSchedByTimer, (int32_t)pTask->info.triggerParam, pTask, streamEnv.timer, &pTask->schedInfo.pTimer);
|
||||
return;
|
||||
}
|
||||
|
@ -178,7 +177,7 @@ static int32_t streamTaskAppendInputBlocks(SStreamTask* pTask, const SStreamDisp
|
|||
}
|
||||
|
||||
int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pRsp) {
|
||||
SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0);
|
||||
SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, sizeof(SStreamDataBlock));
|
||||
int8_t status = TASK_INPUT_STATUS__NORMAL;
|
||||
|
||||
// enqueue
|
||||
|
@ -213,29 +212,6 @@ int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq,
|
|||
return status == TASK_INPUT_STATUS__NORMAL ? 0 : -1;
|
||||
}
|
||||
|
||||
int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock) {
|
||||
int32_t code = 0;
|
||||
int32_t type = pTask->outputInfo.type;
|
||||
if (type == TASK_OUTPUT__TABLE) {
|
||||
pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, pBlock->blocks);
|
||||
destroyStreamDataBlock(pBlock);
|
||||
} else if (type == TASK_OUTPUT__SMA) {
|
||||
pTask->smaSink.smaSink(pTask->smaSink.vnode, pTask->smaSink.smaId, pBlock->blocks);
|
||||
destroyStreamDataBlock(pBlock);
|
||||
} else {
|
||||
ASSERT(type == TASK_OUTPUT__FIXED_DISPATCH || type == TASK_OUTPUT__SHUFFLE_DISPATCH);
|
||||
code = taosWriteQitem(pTask->outputInfo.queue->pQueue, pBlock);
|
||||
if (code != 0) {
|
||||
qError("s-task:%s failed to put res into outputQ", pTask->id.idStr);
|
||||
}
|
||||
|
||||
streamDispatchStreamBlock(pTask);
|
||||
return code;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp, bool exec) {
|
||||
qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64, pTask->id.idStr,
|
||||
pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen);
|
||||
|
|
|
@ -1591,7 +1591,6 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
|
|||
return 0;
|
||||
}
|
||||
int streamStateOpenBackend(void* backend, SStreamState* pState) {
|
||||
// qInfo("start to open state %p on backend %p 0x%" PRIx64 "-%d", pState, backend, pState->streamId, pState->taskId);
|
||||
taosAcquireRef(streamBackendId, pState->streamBackendRid);
|
||||
SBackendWrapper* handle = backend;
|
||||
SBackendCfWrapper* pBackendCfWrapper = taosMemoryCalloc(1, sizeof(SBackendCfWrapper));
|
||||
|
|
|
@ -125,7 +125,6 @@ static int32_t appendCheckpointIntoInputQ(SStreamTask* pTask, int32_t checkpoint
|
|||
|
||||
taosMemoryFree(pBlock);
|
||||
if (streamTaskPutDataIntoInputQ(pTask, (SStreamQueueItem*)pChkpoint) < 0) {
|
||||
taosFreeQitem(pChkpoint);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -271,7 +270,12 @@ int32_t streamSaveAllTaskStatus(SStreamMeta* pMeta, int64_t checkpointId) {
|
|||
keys[0] = pId->streamId;
|
||||
keys[1] = pId->taskId;
|
||||
|
||||
SStreamTask* p = *(SStreamTask**)taosHashGet(pMeta->pTasks, keys, sizeof(keys));
|
||||
SStreamTask** ppTask = taosHashGet(pMeta->pTasks, keys, sizeof(keys));
|
||||
if (ppTask == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
SStreamTask* p = *ppTask;
|
||||
if (p->info.fillHistory == 1) {
|
||||
continue;
|
||||
}
|
||||
|
@ -287,7 +291,7 @@ int32_t streamSaveAllTaskStatus(SStreamMeta* pMeta, int64_t checkpointId) {
|
|||
streamTaskOpenAllUpstreamInput(p); // open inputQ for all upstream tasks
|
||||
qDebug("vgId:%d s-task:%s level:%d commit task status after checkpoint completed, checkpointId:%" PRId64
|
||||
", Ver(saved):%" PRId64 " currentVer:%" PRId64 ", status to be normal, prev:%s",
|
||||
pMeta->vgId, p->id.idStr, p->info.taskLevel, checkpointId, p->chkInfo.checkpointVer, p->chkInfo.currentVer,
|
||||
pMeta->vgId, p->id.idStr, p->info.taskLevel, checkpointId, p->chkInfo.checkpointVer, p->chkInfo.nextProcessVer,
|
||||
streamGetTaskStatusStr(prev));
|
||||
}
|
||||
|
||||
|
|
|
@ -115,28 +115,16 @@ SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
pDataSubmit->dataRef = (int32_t*)taosMemoryMalloc(sizeof(int32_t));
|
||||
if (pDataSubmit->dataRef == NULL) {
|
||||
taosFreeQitem(pDataSubmit);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pDataSubmit->ver = pData->ver;
|
||||
pDataSubmit->submit = *pData;
|
||||
*pDataSubmit->dataRef = 1; // initialize the reference count to be 1
|
||||
pDataSubmit->type = type;
|
||||
|
||||
return pDataSubmit;
|
||||
}
|
||||
|
||||
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit) {
|
||||
int32_t ref = atomic_sub_fetch_32(pDataSubmit->dataRef, 1);
|
||||
ASSERT(ref >= 0 && pDataSubmit->type == STREAM_INPUT__DATA_SUBMIT);
|
||||
|
||||
if (ref == 0) {
|
||||
taosMemoryFree(pDataSubmit->submit.msgStr);
|
||||
taosMemoryFree(pDataSubmit->dataRef);
|
||||
}
|
||||
ASSERT(pDataSubmit->type == STREAM_INPUT__DATA_SUBMIT);
|
||||
taosMemoryFree(pDataSubmit->submit.msgStr);
|
||||
}
|
||||
|
||||
SStreamMergedSubmit* streamMergedSubmitNew() {
|
||||
|
@ -146,11 +134,8 @@ SStreamMergedSubmit* streamMergedSubmitNew() {
|
|||
}
|
||||
|
||||
pMerged->submits = taosArrayInit(0, sizeof(SPackedData));
|
||||
pMerged->dataRefs = taosArrayInit(0, sizeof(void*));
|
||||
|
||||
if (pMerged->dataRefs == NULL || pMerged->submits == NULL) {
|
||||
if (pMerged->submits == NULL) {
|
||||
taosArrayDestroy(pMerged->submits);
|
||||
taosArrayDestroy(pMerged->dataRefs);
|
||||
taosFreeQitem(pMerged);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -160,9 +145,10 @@ SStreamMergedSubmit* streamMergedSubmitNew() {
|
|||
}
|
||||
|
||||
int32_t streamMergeSubmit(SStreamMergedSubmit* pMerged, SStreamDataSubmit* pSubmit) {
|
||||
taosArrayPush(pMerged->dataRefs, &pSubmit->dataRef);
|
||||
taosArrayPush(pMerged->submits, &pSubmit->submit);
|
||||
pMerged->ver = pSubmit->ver;
|
||||
if (pSubmit->ver > pMerged->ver) {
|
||||
pMerged->ver = pSubmit->ver;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -222,18 +208,10 @@ void streamFreeQitem(SStreamQueueItem* data) {
|
|||
|
||||
int32_t sz = taosArrayGetSize(pMerge->submits);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
int32_t* pRef = taosArrayGetP(pMerge->dataRefs, i);
|
||||
int32_t ref = atomic_sub_fetch_32(pRef, 1);
|
||||
ASSERT(ref >= 0);
|
||||
|
||||
if (ref == 0) {
|
||||
SPackedData* pSubmit = (SPackedData*)taosArrayGet(pMerge->submits, i);
|
||||
taosMemoryFree(pSubmit->msgStr);
|
||||
taosMemoryFree(pRef);
|
||||
}
|
||||
SPackedData* pSubmit = (SPackedData*)taosArrayGet(pMerge->submits, i);
|
||||
taosMemoryFree(pSubmit->msgStr);
|
||||
}
|
||||
taosArrayDestroy(pMerge->submits);
|
||||
taosArrayDestroy(pMerge->dataRefs);
|
||||
taosFreeQitem(pMerge);
|
||||
} else if (type == STREAM_INPUT__REF_DATA_BLOCK) {
|
||||
SStreamRefDataBlock* pRefBlock = (SStreamRefDataBlock*)data;
|
||||
|
|
|
@ -498,9 +498,10 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
|||
ASSERT((pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH));
|
||||
|
||||
const char* id = pTask->id.idStr;
|
||||
int32_t numOfElems = taosQueueItemSize(pTask->outputInfo.queue->pQueue);
|
||||
int32_t numOfElems = streamQueueGetNumOfItems(pTask->outputInfo.queue);
|
||||
if (numOfElems > 0) {
|
||||
qDebug("s-task:%s try to dispatch intermediate block to downstream, elem in outputQ:%d", id, numOfElems);
|
||||
double size = SIZE_IN_MB(taosQueueMemorySize(pTask->outputInfo.queue->pQueue));
|
||||
qDebug("s-task:%s start to dispatch intermediate block to downstream, elem in outputQ:%d, size:%.2fMiB", id, numOfElems, size);
|
||||
}
|
||||
|
||||
// to make sure only one dispatch is running
|
||||
|
@ -1001,10 +1002,17 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
|||
// so the TASK_INPUT_STATUS_BLOCKED is rsp
|
||||
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
|
||||
pTask->inputInfo.status = TASK_INPUT_STATUS__BLOCKED; // block the input of current task, to push pressure to upstream
|
||||
pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time
|
||||
double el = 0;
|
||||
if (pTask->msgInfo.blockingTs == 0) {
|
||||
pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time
|
||||
} else {
|
||||
el = (taosGetTimestampMs() - pTask->msgInfo.blockingTs) / 1000.0;
|
||||
}
|
||||
|
||||
int8_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1);
|
||||
qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 " wait for %dms and retry dispatch data, ref:%d",
|
||||
id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS, ref);
|
||||
qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64
|
||||
" wait for %dms and retry dispatch data, total wait:%.2fSec ref:%d",
|
||||
id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS, el, ref);
|
||||
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||
} else { // pipeline send data in output queue
|
||||
// this message has been sent successfully, let's try next one.
|
||||
|
|
|
@ -32,33 +32,58 @@ bool streamTaskShouldPause(const SStreamStatus* pStatus) {
|
|||
return (status == TASK_STATUS__PAUSE);
|
||||
}
|
||||
|
||||
static int32_t doOutputResultBlockImpl(SStreamTask* pTask, SStreamDataBlock* pBlock) {
|
||||
int32_t code = 0;
|
||||
int32_t type = pTask->outputInfo.type;
|
||||
if (type == TASK_OUTPUT__TABLE) {
|
||||
pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, pBlock->blocks);
|
||||
destroyStreamDataBlock(pBlock);
|
||||
} else if (type == TASK_OUTPUT__SMA) {
|
||||
pTask->smaSink.smaSink(pTask->smaSink.vnode, pTask->smaSink.smaId, pBlock->blocks);
|
||||
destroyStreamDataBlock(pBlock);
|
||||
} else {
|
||||
ASSERT(type == TASK_OUTPUT__FIXED_DISPATCH || type == TASK_OUTPUT__SHUFFLE_DISPATCH);
|
||||
code = streamTaskPutDataIntoOutputQ(pTask, pBlock);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
streamDispatchStreamBlock(pTask);
|
||||
return code;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray* pRes, int32_t size, int64_t* totalSize,
|
||||
int32_t* totalBlocks) {
|
||||
int32_t numOfBlocks = taosArrayGetSize(pRes);
|
||||
if (numOfBlocks > 0) {
|
||||
SStreamDataBlock* pStreamBlocks = createStreamBlockFromResults(pItem, pTask, size, pRes);
|
||||
if (pStreamBlocks == NULL) {
|
||||
qError("s-task:%s failed to create result stream data block, code:%s", pTask->id.idStr, tstrerror(terrno));
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
return -1;
|
||||
}
|
||||
|
||||
qDebug("s-task:%s dump stream result data blocks, num:%d, size:%.2fMiB", pTask->id.idStr, numOfBlocks,
|
||||
SIZE_IN_MB(size));
|
||||
|
||||
int32_t code = streamTaskOutputResultBlock(pTask, pStreamBlocks);
|
||||
if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) { // back pressure and record position
|
||||
destroyStreamDataBlock(pStreamBlocks);
|
||||
return -1;
|
||||
}
|
||||
|
||||
*totalSize += size;
|
||||
*totalBlocks += numOfBlocks;
|
||||
} else {
|
||||
if (numOfBlocks == 0) {
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
SStreamDataBlock* pStreamBlocks = createStreamBlockFromResults(pItem, pTask, size, pRes);
|
||||
if (pStreamBlocks == NULL) {
|
||||
qError("s-task:%s failed to create result stream data block, code:%s", pTask->id.idStr, tstrerror(terrno));
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
qDebug("s-task:%s dump stream result data blocks, num:%d, size:%.2fMiB", pTask->id.idStr, numOfBlocks,
|
||||
SIZE_IN_MB(size));
|
||||
|
||||
int32_t code = doOutputResultBlockImpl(pTask, pStreamBlocks);
|
||||
if (code != TSDB_CODE_SUCCESS) { // back pressure and record position
|
||||
//code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY
|
||||
destroyStreamDataBlock(pStreamBlocks);
|
||||
return code;
|
||||
}
|
||||
|
||||
*totalSize += size;
|
||||
*totalBlocks += numOfBlocks;
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, int64_t* totalSize,
|
||||
|
@ -84,7 +109,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
|
|||
}
|
||||
|
||||
if (pTask->inputInfo.status == TASK_INPUT_STATUS__BLOCKED) {
|
||||
qWarn("s-task:%s downstream task inputQ blocked, idle for 1sec and retry", pTask->id.idStr);
|
||||
qWarn("s-task:%s downstream task inputQ blocked, idle for 1sec and retry exec task", pTask->id.idStr);
|
||||
taosMsleep(1000);
|
||||
continue;
|
||||
}
|
||||
|
@ -164,11 +189,13 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize) {
|
||||
int32_t streamScanHistoryData(SStreamTask* pTask) {
|
||||
ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE);
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
void* exec = pTask->exec.pExecutor;
|
||||
bool finished = false;
|
||||
int32_t outputBatchSize = 100;
|
||||
|
||||
qSetStreamOpOpen(exec);
|
||||
|
||||
|
@ -217,8 +244,8 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize) {
|
|||
block.info.childId = pTask->info.selfChildId;
|
||||
taosArrayPush(pRes, &block);
|
||||
|
||||
if ((++numOfBlocks) >= batchSize) {
|
||||
qDebug("s-task:%s scan exec numOfBlocks:%d, output limit:%d reached", pTask->id.idStr, numOfBlocks, batchSize);
|
||||
if ((++numOfBlocks) >= outputBatchSize) {
|
||||
qDebug("s-task:%s scan exec numOfBlocks:%d, output limit:%d reached", pTask->id.idStr, numOfBlocks, outputBatchSize);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -234,7 +261,7 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize) {
|
|||
qRes->type = STREAM_INPUT__DATA_BLOCK;
|
||||
qRes->blocks = pRes;
|
||||
|
||||
code = streamTaskOutputResultBlock(pTask, qRes);
|
||||
code = doOutputResultBlockImpl(pTask, qRes);
|
||||
if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) {
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
taosFreeQitem(qRes);
|
||||
|
@ -248,13 +275,6 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamTaskGetInputQItems(const SStreamTask* pTask) {
|
||||
int32_t numOfItems1 = taosQueueItemSize(pTask->inputInfo.queue->pQueue);
|
||||
int32_t numOfItems2 = taosQallItemSize(pTask->inputInfo.queue->qall);
|
||||
|
||||
return numOfItems1 + numOfItems2;
|
||||
}
|
||||
|
||||
// wait for the stream task to be idle
|
||||
static void waitForTaskIdle(SStreamTask* pTask, SStreamTask* pStreamTask) {
|
||||
const char* id = pTask->id.idStr;
|
||||
|
@ -541,7 +561,7 @@ int32_t streamExecForAll(SStreamTask* pTask) {
|
|||
|
||||
if (type == STREAM_INPUT__DATA_BLOCK) {
|
||||
qDebug("s-task:%s sink task start to sink %d blocks", id, numOfBlocks);
|
||||
streamTaskOutputResultBlock(pTask, (SStreamDataBlock*)pInput);
|
||||
doOutputResultBlockImpl(pTask, (SStreamDataBlock*)pInput);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -563,11 +583,11 @@ int32_t streamExecForAll(SStreamTask* pTask) {
|
|||
SIZE_IN_MB(resSize), totalBlocks);
|
||||
|
||||
// update the currentVer if processing the submit blocks.
|
||||
ASSERT(pTask->chkInfo.checkpointVer <= pTask->chkInfo.currentVer && ver >= pTask->chkInfo.checkpointVer);
|
||||
ASSERT(pTask->chkInfo.checkpointVer <= pTask->chkInfo.nextProcessVer && ver >= pTask->chkInfo.checkpointVer);
|
||||
|
||||
if (ver != pTask->chkInfo.checkpointVer) {
|
||||
qDebug("s-task:%s update checkpointVer(unsaved) from %" PRId64 " to %" PRId64, pTask->id.idStr,
|
||||
pTask->chkInfo.checkpointVer, ver);
|
||||
qDebug("s-task:%s update checkpointVer(unsaved) from %" PRId64 " to %" PRId64 " , currentVer:%" PRId64,
|
||||
pTask->id.idStr, pTask->chkInfo.checkpointVer, ver, pTask->chkInfo.nextProcessVer);
|
||||
pTask->chkInfo.checkpointVer = ver;
|
||||
}
|
||||
|
||||
|
@ -576,7 +596,6 @@ int32_t streamExecForAll(SStreamTask* pTask) {
|
|||
// todo other thread may change the status
|
||||
// do nothing after sync executor state to storage backend, untill the vnode-level checkpoint is completed.
|
||||
if (type == STREAM_INPUT__CHECKPOINT) {
|
||||
// ASSERT(pTask->status.taskStatus == TASK_STATUS__CK);
|
||||
qDebug("s-task:%s checkpoint block received, set the status:%s", pTask->id.idStr,
|
||||
streamGetTaskStatusStr(pTask->status.taskStatus));
|
||||
streamTaskBuildCheckpoint(pTask);
|
||||
|
@ -608,8 +627,6 @@ int32_t streamTryExec(SStreamTask* pTask) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
// streamTaskBuildCheckpoint(pTask);
|
||||
|
||||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
||||
qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus),
|
||||
pTask->status.schedStatus);
|
||||
|
|
|
@ -418,6 +418,10 @@ int32_t streamMetaGetNumOfStreamTasks(SStreamMeta* pMeta) {
|
|||
int64_t keys[2] = {pId->streamId, pId->taskId};
|
||||
|
||||
SStreamTask** p = taosHashGet(pMeta->pTasks, keys, sizeof(keys));
|
||||
if (p == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((*p)->info.fillHistory == 0) {
|
||||
num += 1;
|
||||
}
|
||||
|
@ -539,10 +543,13 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
|||
}
|
||||
|
||||
int32_t streamMetaBegin(SStreamMeta* pMeta) {
|
||||
taosWLockLatch(&pMeta->lock);
|
||||
if (tdbBegin(pMeta->db, &pMeta->txn, tdbDefaultMalloc, tdbDefaultFree, NULL,
|
||||
TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
|
||||
taosWUnLockLatch(&pMeta->lock);
|
||||
return -1;
|
||||
}
|
||||
taosWUnLockLatch(&pMeta->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,10 +15,11 @@
|
|||
|
||||
#include "streamInt.h"
|
||||
|
||||
#define MAX_STREAM_EXEC_BATCH_NUM 32
|
||||
#define MIN_STREAM_EXEC_BATCH_NUM 4
|
||||
#define STREAM_TASK_INPUT_QUEUE_CAPACITY 20480
|
||||
#define STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE (30)
|
||||
#define MAX_STREAM_EXEC_BATCH_NUM 32
|
||||
#define MIN_STREAM_EXEC_BATCH_NUM 4
|
||||
#define STREAM_TASK_QUEUE_CAPACITY 20480
|
||||
#define STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE (30)
|
||||
#define STREAM_TASK_OUTPUT_QUEUE_CAPACITY_IN_SIZE (50)
|
||||
|
||||
// todo refactor:
|
||||
// read data from input queue
|
||||
|
@ -29,6 +30,8 @@ typedef struct SQueueReader {
|
|||
int32_t waitDuration; // maximum wait time to format several block into a batch to process, unit: ms
|
||||
} SQueueReader;
|
||||
|
||||
static bool streamTaskHasAvailableToken(STokenBucket* pBucket);
|
||||
|
||||
static void streamQueueCleanup(SStreamQueue* pQueue) {
|
||||
void* qItem = NULL;
|
||||
while ((qItem = streamQueueNextItem(pQueue)) != NULL) {
|
||||
|
@ -157,10 +160,22 @@ SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue) {
|
|||
}
|
||||
#endif
|
||||
|
||||
bool streamQueueIsFull(const STaosQueue* pQueue) {
|
||||
bool isFull = taosQueueItemSize((STaosQueue*) pQueue) >= STREAM_TASK_INPUT_QUEUE_CAPACITY;
|
||||
double size = SIZE_IN_MB(taosQueueMemorySize((STaosQueue*) pQueue));
|
||||
return (isFull || size >= STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE);
|
||||
bool streamQueueIsFull(const STaosQueue* pQueue, bool inputQ) {
|
||||
bool isFull = taosQueueItemSize((STaosQueue*)pQueue) >= STREAM_TASK_QUEUE_CAPACITY;
|
||||
if (isFull) {
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t threahold = (inputQ) ? STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE : STREAM_TASK_OUTPUT_QUEUE_CAPACITY_IN_SIZE;
|
||||
double size = SIZE_IN_MB(taosQueueMemorySize((STaosQueue*)pQueue));
|
||||
return (size >= threahold);
|
||||
}
|
||||
|
||||
int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue) {
|
||||
int32_t numOfItems1 = taosQueueItemSize(pQueue->pQueue);
|
||||
int32_t numOfItems2 = taosQallItemSize(pQueue->qall);
|
||||
|
||||
return numOfItems1 + numOfItems2;
|
||||
}
|
||||
|
||||
int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks) {
|
||||
|
@ -175,6 +190,14 @@ int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
STokenBucket* pBucket = &pTask->tokenBucket;
|
||||
bool has = streamTaskHasAvailableToken(pBucket);
|
||||
if (!has) { // no available token in th bucket, ignore this execution
|
||||
// qInfo("s-task:%s no available token for sink, capacity:%d, rate:%d token/sec, quit", pTask->id.idStr,
|
||||
// pBucket->capacity, pBucket->rate);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputInfo.queue);
|
||||
if (qItem == NULL) {
|
||||
qDebug("===stream===break batchSize:%d, %s", *numOfBlocks, id);
|
||||
|
@ -258,15 +281,15 @@ int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu
|
|||
int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem) {
|
||||
int8_t type = pItem->type;
|
||||
STaosQueue* pQueue = pTask->inputInfo.queue->pQueue;
|
||||
int32_t total = taosQueueItemSize(pQueue) + 1;
|
||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
||||
int32_t total = streamQueueGetNumOfItems(pTask->inputInfo.queue) + 1;
|
||||
|
||||
if (type == STREAM_INPUT__DATA_SUBMIT) {
|
||||
SStreamDataSubmit* px = (SStreamDataSubmit*)pItem;
|
||||
if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && streamQueueIsFull(pQueue)) {
|
||||
qError(
|
||||
if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && streamQueueIsFull(pQueue, true)) {
|
||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
||||
qTrace(
|
||||
"s-task:%s inputQ is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) stop to push data",
|
||||
pTask->id.idStr, STREAM_TASK_INPUT_QUEUE_CAPACITY, STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE, total, size);
|
||||
pTask->id.idStr, STREAM_TASK_QUEUE_CAPACITY, STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE, total, size);
|
||||
streamDataSubmitDestroy(px);
|
||||
taosFreeQitem(pItem);
|
||||
return -1;
|
||||
|
@ -282,32 +305,50 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem)
|
|||
return code;
|
||||
}
|
||||
|
||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
||||
|
||||
// use the local variable to avoid the pItem be freed by other threads, since it has been put into queue already.
|
||||
qDebug("s-task:%s submit enqueue msgLen:%d ver:%" PRId64 ", total in queue:%d, size:%.2fMiB", pTask->id.idStr,
|
||||
msgLen, ver, total, size + SIZE_IN_MB(msgLen));
|
||||
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE ||
|
||||
type == STREAM_INPUT__REF_DATA_BLOCK) {
|
||||
if (streamQueueIsFull(pQueue)) {
|
||||
qError("s-task:%s input queue is full, capacity:%d size:%d MiB, current(blocks:%d, size:%.2fMiB) abort",
|
||||
pTask->id.idStr, STREAM_TASK_INPUT_QUEUE_CAPACITY, STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE, total, size);
|
||||
if (streamQueueIsFull(pQueue, true)) {
|
||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
||||
|
||||
qTrace("s-task:%s input queue is full, capacity:%d size:%d MiB, current(blocks:%d, size:%.2fMiB) abort",
|
||||
pTask->id.idStr, STREAM_TASK_QUEUE_CAPACITY, STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE, total, size);
|
||||
destroyStreamDataBlock((SStreamDataBlock*)pItem);
|
||||
return -1;
|
||||
}
|
||||
|
||||
qDebug("s-task:%s blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr, total, size);
|
||||
int32_t code = taosWriteQitem(pQueue, pItem);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
destroyStreamDataBlock((SStreamDataBlock*)pItem);
|
||||
return code;
|
||||
}
|
||||
|
||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
||||
qDebug("s-task:%s blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr, total, size);
|
||||
} else if (type == STREAM_INPUT__CHECKPOINT || type == STREAM_INPUT__CHECKPOINT_TRIGGER ||
|
||||
type == STREAM_INPUT__TRANS_STATE) {
|
||||
taosWriteQitem(pQueue, pItem);
|
||||
int32_t code = taosWriteQitem(pQueue, pItem);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosFreeQitem(pItem);
|
||||
return code;
|
||||
}
|
||||
|
||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
||||
qDebug("s-task:%s level:%d %s blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr,
|
||||
pTask->info.taskLevel, streamGetBlockTypeStr(type), total, size);
|
||||
} else if (type == STREAM_INPUT__GET_RES) {
|
||||
// use the default memory limit, refactor later.
|
||||
taosWriteQitem(pQueue, pItem);
|
||||
int32_t code = taosWriteQitem(pQueue, pItem);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosFreeQitem(pItem);
|
||||
return code;
|
||||
}
|
||||
|
||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
||||
qDebug("s-task:%s data res enqueue, current(blocks:%d, size:%.2fMiB)", pTask->id.idStr, total, size);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
|
@ -320,3 +361,76 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// the result should be put into the outputQ in any cases, otherwise, the result may be lost
|
||||
int32_t streamTaskPutDataIntoOutputQ(SStreamTask* pTask, SStreamDataBlock* pBlock) {
|
||||
STaosQueue* pQueue = pTask->outputInfo.queue->pQueue;
|
||||
|
||||
while (streamQueueIsFull(pQueue, false)) {
|
||||
if (streamTaskShouldStop(&pTask->status)) {
|
||||
qInfo("s-task:%s discard result block due to task stop", pTask->id.idStr);
|
||||
return TSDB_CODE_STREAM_EXEC_CANCELLED;
|
||||
}
|
||||
|
||||
int32_t total = streamQueueGetNumOfItems(pTask->outputInfo.queue);
|
||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
||||
// let's wait for there are enough space to hold this result pBlock
|
||||
qDebug("s-task:%s outputQ is full, wait for 500ms and retry, outputQ items:%d, size:%.2fMiB", pTask->id.idStr,
|
||||
total, size);
|
||||
taosMsleep(500);
|
||||
}
|
||||
|
||||
int32_t code = taosWriteQitem(pQueue, pBlock);
|
||||
|
||||
int32_t total = streamQueueGetNumOfItems(pTask->outputInfo.queue);
|
||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
||||
if (code != 0) {
|
||||
qError("s-task:%s failed to put res into outputQ, outputQ items:%d, size:%.2fMiB code:%s, result lost",
|
||||
pTask->id.idStr, total + 1, size, tstrerror(code));
|
||||
} else {
|
||||
qInfo("s-task:%s data put into outputQ, outputQ items:%d, size:%.2fMiB", pTask->id.idStr, total, size);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t cap, int32_t rate) {
|
||||
if (cap < 100 || rate < 50 || pBucket == NULL) {
|
||||
qError("failed to init sink task bucket, cap:%d, rate:%d", cap, rate);
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
|
||||
pBucket->capacity = cap;
|
||||
pBucket->rate = rate;
|
||||
pBucket->numOfToken = cap;
|
||||
pBucket->fillTimestamp = taosGetTimestampMs();
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void fillBucket(STokenBucket* pBucket) {
|
||||
int64_t now = taosGetTimestampMs();
|
||||
int64_t delta = now - pBucket->fillTimestamp;
|
||||
ASSERT(pBucket->numOfToken >= 0);
|
||||
|
||||
int32_t inc = (delta / 1000.0) * pBucket->rate;
|
||||
if (inc > 0) {
|
||||
if ((pBucket->numOfToken + inc) < pBucket->capacity) {
|
||||
pBucket->numOfToken += inc;
|
||||
} else {
|
||||
pBucket->numOfToken = pBucket->capacity;
|
||||
}
|
||||
|
||||
pBucket->fillTimestamp = now;
|
||||
qDebug("new token available, current:%d, inc:%d ts:%"PRId64, pBucket->numOfToken, inc, now);
|
||||
}
|
||||
}
|
||||
|
||||
bool streamTaskHasAvailableToken(STokenBucket* pBucket) {
|
||||
fillBucket(pBucket);
|
||||
if (pBucket->numOfToken > 0) {
|
||||
--pBucket->numOfToken;
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -30,12 +30,19 @@ static void streamTaskSetRangeStreamCalc(SStreamTask* pTask);
|
|||
static int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated);
|
||||
|
||||
static void streamTaskSetReady(SStreamTask* pTask, int32_t numOfReqs) {
|
||||
if (pTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY && pTask->info.taskLevel != TASK_LEVEL__SOURCE) {
|
||||
pTask->numOfWaitingUpstream = taosArrayGetSize(pTask->pUpstreamInfoList);
|
||||
qDebug("s-task:%s level:%d task wait for %d upstream tasks complete scan-history procedure, status:%s",
|
||||
pTask->id.idStr, pTask->info.taskLevel, pTask->numOfWaitingUpstream,
|
||||
streamGetTaskStatusStr(pTask->status.taskStatus));
|
||||
}
|
||||
|
||||
ASSERT(pTask->status.downstreamReady == 0);
|
||||
pTask->status.downstreamReady = 1;
|
||||
|
||||
int64_t el = (taosGetTimestampMs() - pTask->tsInfo.init);
|
||||
qDebug("s-task:%s all %d downstream ready, init completed, elapsed time:%dms, task status:%s",
|
||||
pTask->id.idStr, numOfReqs, (int32_t) el, streamGetTaskStatusStr(pTask->status.taskStatus));
|
||||
qDebug("s-task:%s all %d downstream ready, init completed, elapsed time:%"PRId64"ms, task status:%s",
|
||||
pTask->id.idStr, numOfReqs, el, streamGetTaskStatusStr(pTask->status.taskStatus));
|
||||
}
|
||||
|
||||
int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated) {
|
||||
|
@ -97,11 +104,8 @@ int32_t streamTaskLaunchScanHistory(SStreamTask* pTask) {
|
|||
streamSetParamForScanHistory(pTask);
|
||||
streamTaskEnablePause(pTask);
|
||||
}
|
||||
|
||||
streamTaskScanHistoryPrepare(pTask);
|
||||
} else if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
||||
qDebug("s-task:%s sink task do nothing to handle scan-history", pTask->id.idStr);
|
||||
streamTaskScanHistoryPrepare(pTask);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -367,10 +371,6 @@ int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamSourceScanHistoryData(SStreamTask* pTask) {
|
||||
return streamScanExec(pTask, 100);
|
||||
}
|
||||
|
||||
int32_t streamTaskPutTranstateIntoInputQ(SStreamTask* pTask) {
|
||||
SStreamDataBlock* pTranstate = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, sizeof(SSDataBlock));
|
||||
if (pTranstate == NULL) {
|
||||
|
@ -402,15 +402,6 @@ int32_t streamTaskPutTranstateIntoInputQ(SStreamTask* pTask) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// agg
|
||||
int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask) {
|
||||
pTask->numOfWaitingUpstream = taosArrayGetSize(pTask->pUpstreamInfoList);
|
||||
qDebug("s-task:%s level:%d task wait for %d upstream tasks complete scan-history procedure, status:%s",
|
||||
pTask->id.idStr, pTask->info.taskLevel, pTask->numOfWaitingUpstream,
|
||||
streamGetTaskStatusStr(pTask->status.taskStatus));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamAggUpstreamScanHistoryFinish(SStreamTask* pTask) {
|
||||
void* exec = pTask->exec.pExecutor;
|
||||
if (pTask->info.fillHistory && qRestoreStreamOperatorOption(exec) < 0) {
|
||||
|
@ -509,7 +500,8 @@ int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask) {
|
|||
|
||||
static void checkFillhistoryTaskStatus(SStreamTask* pTask, SStreamTask* pHTask) {
|
||||
pHTask->dataRange.range.minVer = 0;
|
||||
pHTask->dataRange.range.maxVer = pTask->chkInfo.currentVer;
|
||||
// the query version range should be limited to the already processed data
|
||||
pHTask->dataRange.range.maxVer = pTask->chkInfo.nextProcessVer - 1;
|
||||
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
qDebug("s-task:%s set the launch condition for fill-history s-task:%s, window:%" PRId64 " - %" PRId64
|
||||
|
|
|
@ -345,7 +345,7 @@ bool streamStateCheck(SStreamState* pState, const SWinKey* key) {
|
|||
return hasRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey));
|
||||
#else
|
||||
SStateKey sKey = {.key = *key, .opNum = pState->number};
|
||||
return tdbTbGet(pState->pTdbState->pStateDb, &sKey, sizeof(SStateKey), pVal, pVLen);
|
||||
return tdbTbGet(pState->pTdbState->pStateDb, &sKey, sizeof(SStateKey), NULL, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -375,16 +375,17 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i
|
|||
return -1;
|
||||
}
|
||||
|
||||
pTask->tsInfo.init = taosGetTimestampMs();
|
||||
pTask->tsInfo.created = taosGetTimestampMs();
|
||||
pTask->inputInfo.status = TASK_INPUT_STATUS__NORMAL;
|
||||
pTask->outputInfo.status = TASK_OUTPUT_STATUS__NORMAL;
|
||||
pTask->pMeta = pMeta;
|
||||
|
||||
pTask->chkInfo.currentVer = ver;
|
||||
pTask->chkInfo.nextProcessVer = ver;
|
||||
pTask->dataRange.range.maxVer = ver;
|
||||
pTask->dataRange.range.minVer = ver;
|
||||
pTask->pMsgCb = pMsgCb;
|
||||
|
||||
streamTaskInitTokenBucket(&pTask->tokenBucket, 150, 100);
|
||||
taosThreadMutexInit(&pTask->lock, NULL);
|
||||
streamTaskOpenAllUpstreamInput(pTask);
|
||||
|
||||
|
@ -502,8 +503,9 @@ int32_t streamTaskStop(SStreamTask* pTask) {
|
|||
taosMsleep(100);
|
||||
}
|
||||
|
||||
pTask->tsInfo.init = 0;
|
||||
int64_t el = taosGetTimestampMs() - st;
|
||||
qDebug("vgId:%d s-task:%s is closed in %" PRId64 " ms", pMeta->vgId, pTask->id.idStr, el);
|
||||
qDebug("vgId:%d s-task:%s is closed in %" PRId64 " ms, and reset init ts", pMeta->vgId, pTask->id.idStr, el);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,8 @@
|
|||
|
||||
#define FLUSH_RATIO 0.5
|
||||
#define FLUSH_NUM 4
|
||||
#define DEFAULT_MAX_STREAM_BUFFER_SIZE (128 * 1024 * 1024);
|
||||
#define DEFAULT_MAX_STREAM_BUFFER_SIZE (128 * 1024 * 1024)
|
||||
#define MIN_NUM_OF_ROW_BUFF 10240
|
||||
|
||||
struct SStreamFileState {
|
||||
SList* usedBuffs;
|
||||
|
@ -67,7 +68,7 @@ SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_
|
|||
pFileState->usedBuffs = tdListNew(POINTER_BYTES);
|
||||
pFileState->freeBuffs = tdListNew(POINTER_BYTES);
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
int32_t cap = TMIN(10240, pFileState->maxRowCount);
|
||||
int32_t cap = TMIN(MIN_NUM_OF_ROW_BUFF, pFileState->maxRowCount);
|
||||
pFileState->rowBuffMap = tSimpleHashInit(cap, hashFn);
|
||||
if (!pFileState->usedBuffs || !pFileState->freeBuffs || !pFileState->rowBuffMap) {
|
||||
goto _error;
|
||||
|
@ -272,10 +273,12 @@ int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, voi
|
|||
*pVLen = pFileState->rowSize;
|
||||
*pVal = *pos;
|
||||
(*pos)->beUsed = true;
|
||||
(*pos)->beFlushed = false;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SRowBuffPos* pNewPos = getNewRowPos(pFileState);
|
||||
pNewPos->beUsed = true;
|
||||
pNewPos->beFlushed = false;
|
||||
ASSERT(pNewPos->pRowBuff);
|
||||
memcpy(pNewPos->pKey, pKey, keyLen);
|
||||
|
||||
|
@ -375,6 +378,10 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot,
|
|||
while ((pNode = tdListNext(&iter)) != NULL && code == TSDB_CODE_SUCCESS) {
|
||||
SRowBuffPos* pPos = *(SRowBuffPos**)pNode->data;
|
||||
ASSERT(pPos->pRowBuff && pFileState->rowSize > 0);
|
||||
if (pPos->beFlushed) {
|
||||
continue;
|
||||
}
|
||||
pPos->beFlushed = true;
|
||||
|
||||
if (streamStateGetBatchSize(batch) >= BATCH_LIMIT) {
|
||||
streamStatePutBatch_rocksdb(pFileState->pFileStore, batch);
|
||||
|
@ -513,6 +520,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState, int64_t ckId) {
|
|||
ASSERT(pVLen == pFileState->rowSize);
|
||||
memcpy(pNewPos->pRowBuff, pVal, pVLen);
|
||||
taosMemoryFreeClear(pVal);
|
||||
pNewPos->beFlushed = true;
|
||||
code = tSimpleHashPut(pFileState->rowBuffMap, pNewPos->pKey, pFileState->keyLen, &pNewPos, POINTER_BYTES);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
destroyRowBuffPos(pNewPos);
|
||||
|
|
|
@ -2562,7 +2562,11 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum
|
|||
ths->logReplMgrs[i]->matchIndex, ths->logReplMgrs[i]->endIndex);
|
||||
}
|
||||
|
||||
SSyncLogReplMgr oldLogReplMgrs[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA] = {0};
|
||||
SSyncLogReplMgr* oldLogReplMgrs = NULL;
|
||||
int64_t length = sizeof(SSyncLogReplMgr) * (TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA);
|
||||
oldLogReplMgrs = taosMemoryMalloc(length);
|
||||
if (NULL == oldLogReplMgrs) return -1;
|
||||
memset(oldLogReplMgrs, 0, length);
|
||||
|
||||
for(int i = 0; i < oldtotalReplicaNum; i++){
|
||||
oldLogReplMgrs[i] = *(ths->logReplMgrs[i]);
|
||||
|
@ -2643,6 +2647,8 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum
|
|||
}
|
||||
}
|
||||
|
||||
taosMemoryFree(oldLogReplMgrs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) {
|
|||
ASSERT(pData->pWal != NULL);
|
||||
|
||||
taosThreadMutexInit(&(pData->mutex), NULL);
|
||||
pData->pWalHandle = walOpenReader(pData->pWal, NULL);
|
||||
pData->pWalHandle = walOpenReader(pData->pWal, NULL, 0);
|
||||
ASSERT(pData->pWalHandle != NULL);
|
||||
|
||||
pLogStore->syncLogUpdateCommitIndex = raftLogUpdateCommitIndex;
|
||||
|
|
|
@ -2200,10 +2200,15 @@ int tdbBtcDelete(SBTC *pBtc) {
|
|||
tdbOsFree(pCell);
|
||||
|
||||
if (pPage->nOverflow > 0) {
|
||||
tdbDebug("tdb/btc-delete: btree balance after update cell, pPage/nOverflow: %p/%d.", pPage,
|
||||
pPage->nOverflow);
|
||||
tdbDebug("tdb/btc-delete: btree balance after update cell, pPage/nOverflow/pgno: %p/%d/%" PRIu32 ".", pPage,
|
||||
pPage->nOverflow, TDB_PAGE_PGNO(pPage));
|
||||
|
||||
pBtc->iPage = iPage;
|
||||
tdbPagerReturnPage(pBtc->pBt->pPager, pBtc->pPage, pBtc->pTxn);
|
||||
while (--pBtc->iPage != iPage) {
|
||||
tdbPagerReturnPage(pBtc->pBt->pPager, pBtc->pgStack[pBtc->iPage], pBtc->pTxn);
|
||||
}
|
||||
|
||||
// pBtc->iPage = iPage;
|
||||
pBtc->pPage = pPage;
|
||||
ret = tdbBtreeBalance(pBtc);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -229,7 +229,15 @@ void tdbPCacheInvalidatePage(SPCache *pCache, SPager *pPager, SPgno pgno) {
|
|||
}
|
||||
|
||||
if (pPage) {
|
||||
bool moveToFreeList = false;
|
||||
if (pPage->pLruNext) {
|
||||
tdbPCachePinPage(pCache, pPage);
|
||||
moveToFreeList = true;
|
||||
}
|
||||
tdbPCacheRemovePageFromHash(pCache, pPage);
|
||||
if (moveToFreeList) {
|
||||
tdbPCacheFreePage(pCache, pPage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -295,7 +303,7 @@ static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn)
|
|||
}
|
||||
|
||||
// 1. pPage == NULL
|
||||
// 2. pPage && pPage->isLocal == 0 && !TDB_TXN_IS_WRITE(pTxn)
|
||||
// 2. pPage && !pPage->isLocal == 0 && !TDB_TXN_IS_WRITE(pTxn)
|
||||
pPageH = pPage;
|
||||
pPage = NULL;
|
||||
|
||||
|
|
|
@ -296,7 +296,7 @@ int32_t tfsMkdirRecurAt(STfs *pTfs, const char *rname, SDiskID diskId) {
|
|||
// https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man3/dirname.3.html
|
||||
char *dir = taosStrdup(taosDirName(s));
|
||||
|
||||
if (tfsMkdirRecurAt(pTfs, dir, diskId) < 0) {
|
||||
if (strlen(dir) >= strlen(rname) || tfsMkdirRecurAt(pTfs, dir, diskId) < 0) {
|
||||
taosMemoryFree(s);
|
||||
taosMemoryFree(dir);
|
||||
return -1;
|
||||
|
|
|
@ -395,7 +395,7 @@ static void uvOnPipeWriteCb(uv_write_t* req, int status) {
|
|||
if (status == 0) {
|
||||
tTrace("success to dispatch conn to work thread");
|
||||
} else {
|
||||
tError("fail to dispatch conn to work thread");
|
||||
tError("fail to dispatch conn to work thread, code:%s", uv_strerror(status));
|
||||
}
|
||||
if (!uv_is_closing((uv_handle_t*)req->data)) {
|
||||
uv_close((uv_handle_t*)req->data, uvFreeCb);
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include "taoserror.h"
|
||||
#include "walInt.h"
|
||||
|
||||
SWalReader *walOpenReader(SWal *pWal, SWalFilterCond *cond) {
|
||||
SWalReader *walOpenReader(SWal *pWal, SWalFilterCond *cond, int64_t id) {
|
||||
SWalReader *pReader = taosMemoryCalloc(1, sizeof(SWalReader));
|
||||
if (pReader == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -24,7 +24,7 @@ SWalReader *walOpenReader(SWal *pWal, SWalFilterCond *cond) {
|
|||
}
|
||||
|
||||
pReader->pWal = pWal;
|
||||
pReader->readerId = tGenIdPI64();
|
||||
pReader->readerId = (id != 0)? id:tGenIdPI64();
|
||||
pReader->pIdxFile = NULL;
|
||||
pReader->pLogFile = NULL;
|
||||
pReader->curVersion = -1;
|
||||
|
@ -75,6 +75,7 @@ int32_t walNextValidMsg(SWalReader *pReader) {
|
|||
terrno = TSDB_CODE_WAL_LOG_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
|
||||
while (fetchVer <= appliedVer) {
|
||||
if (walFetchHead(pReader, fetchVer) < 0) {
|
||||
return -1;
|
||||
|
@ -257,9 +258,9 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver) {
|
|||
bool seeked = false;
|
||||
|
||||
wDebug("vgId:%d, try to fetch ver %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64
|
||||
", applied ver:%" PRId64,
|
||||
", applied ver:%" PRId64", 0x%"PRIx64,
|
||||
pRead->pWal->cfg.vgId, ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer,
|
||||
pRead->pWal->vers.appliedVer);
|
||||
pRead->pWal->vers.appliedVer, pRead->readerId);
|
||||
|
||||
// TODO: valid ver
|
||||
if (ver > pRead->pWal->vers.commitVer) {
|
||||
|
@ -297,7 +298,8 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver) {
|
|||
code = walValidHeadCksum(pRead->pHead);
|
||||
|
||||
if (code != 0) {
|
||||
wError("vgId:%d, unexpected wal log index:%" PRId64 ", since head checksum not passed", pRead->pWal->cfg.vgId, ver);
|
||||
wError("vgId:%d, unexpected wal log index:%" PRId64 ", since head checksum not passed, 0x%"PRIx64, pRead->pWal->cfg.vgId, ver,
|
||||
pRead->readerId);
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
return -1;
|
||||
}
|
||||
|
@ -307,9 +309,9 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver) {
|
|||
|
||||
int32_t walSkipFetchBody(SWalReader *pRead) {
|
||||
wDebug("vgId:%d, skip fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64
|
||||
", applied ver:%" PRId64,
|
||||
", applied ver:%" PRId64", 0x%"PRIx64,
|
||||
pRead->pWal->cfg.vgId, pRead->pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer,
|
||||
pRead->pWal->vers.lastVer, pRead->pWal->vers.appliedVer);
|
||||
pRead->pWal->vers.lastVer, pRead->pWal->vers.appliedVer, pRead->readerId);
|
||||
|
||||
int64_t code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR);
|
||||
if (code < 0) {
|
||||
|
@ -324,11 +326,13 @@ int32_t walSkipFetchBody(SWalReader *pRead) {
|
|||
int32_t walFetchBody(SWalReader *pRead) {
|
||||
SWalCont *pReadHead = &pRead->pHead->head;
|
||||
int64_t ver = pReadHead->version;
|
||||
int32_t vgId = pRead->pWal->cfg.vgId;
|
||||
int64_t id = pRead->readerId;
|
||||
|
||||
wDebug("vgId:%d, fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64
|
||||
", applied ver:%" PRId64,
|
||||
pRead->pWal->cfg.vgId, ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer,
|
||||
pRead->pWal->vers.appliedVer);
|
||||
", applied ver:%" PRId64 ", 0x%" PRIx64,
|
||||
vgId, ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer,
|
||||
pRead->pWal->vers.appliedVer, id);
|
||||
|
||||
if (pRead->capacity < pReadHead->bodyLen) {
|
||||
SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(pRead->pHead, sizeof(SWalCkHead) + pReadHead->bodyLen);
|
||||
|
@ -344,26 +348,25 @@ int32_t walFetchBody(SWalReader *pRead) {
|
|||
if (pReadHead->bodyLen != taosReadFile(pRead->pLogFile, pReadHead->body, pReadHead->bodyLen)) {
|
||||
if (pReadHead->bodyLen < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since %s",
|
||||
pRead->pWal->cfg.vgId, pReadHead->version, ver, tstrerror(terrno));
|
||||
wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since %s, 0x%"PRIx64,
|
||||
vgId, pReadHead->version, ver, tstrerror(terrno), id);
|
||||
} else {
|
||||
wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since file corrupted",
|
||||
pRead->pWal->cfg.vgId, pReadHead->version, ver);
|
||||
wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since file corrupted, 0x%"PRIx64,
|
||||
vgId, pReadHead->version, ver, id);
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pReadHead->version != ver) {
|
||||
wError("vgId:%d, wal fetch body error, index:%" PRId64 ", read request index:%" PRId64, pRead->pWal->cfg.vgId,
|
||||
pReadHead->version, ver);
|
||||
wError("vgId:%d, wal fetch body error, index:%" PRId64 ", read request index:%" PRId64", 0x%"PRIx64, vgId,
|
||||
pReadHead->version, ver, id);
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (walValidBodyCksum(pRead->pHead) != 0) {
|
||||
wError("vgId:%d, wal fetch body error, index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId,
|
||||
ver);
|
||||
wError("vgId:%d, wal fetch body error, index:%" PRId64 ", since body checksum not passed, 0x%" PRIx64, vgId, ver, id);
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -326,7 +326,7 @@ TEST_F(WalCleanDeleteEnv, roll) {
|
|||
TEST_F(WalKeepEnv, readHandleRead) {
|
||||
walResetEnv();
|
||||
int code;
|
||||
SWalReader* pRead = walOpenReader(pWal, NULL);
|
||||
SWalReader* pRead = walOpenReader(pWal, NULL, 0);
|
||||
ASSERT(pRead != NULL);
|
||||
|
||||
int i;
|
||||
|
@ -387,7 +387,7 @@ TEST_F(WalRetentionEnv, repairMeta1) {
|
|||
|
||||
ASSERT_EQ(pWal->vers.lastVer, 99);
|
||||
|
||||
SWalReader* pRead = walOpenReader(pWal, NULL);
|
||||
SWalReader* pRead = walOpenReader(pWal, NULL, 0);
|
||||
ASSERT(pRead != NULL);
|
||||
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue