Merge branch '3.0' into fix/TD-31899
|
@ -5,8 +5,9 @@ ExternalProject_Add(addr2line
|
|||
GIT_TAG master
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/addr2line"
|
||||
BINARY_DIR "${TD_CONTRIB_DIR}/addr2line"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -11,9 +11,10 @@ ExternalProject_Add(aprutil-1
|
|||
BUILD_IN_SOURCE TRUE
|
||||
BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1
|
||||
#CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -16,4 +16,5 @@ ExternalProject_Add(apr-1
|
|||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -9,4 +9,5 @@ ExternalProject_Add(cjson
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -9,4 +9,5 @@ ExternalProject_Add(cos
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -5,8 +5,9 @@ ExternalProject_Add(crashdump
|
|||
GIT_TAG master
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/crashdump"
|
||||
BINARY_DIR "${TD_CONTRIB_DIR}/crashdump"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -16,4 +16,5 @@ ExternalProject_Add(curl2
|
|||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -9,4 +9,5 @@ ExternalProject_Add(geos
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -10,4 +10,5 @@ ExternalProject_Add(gnuregex
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -9,4 +9,5 @@ ExternalProject_Add(googletest
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -9,4 +9,5 @@ ExternalProject_Add(iconv
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -5,8 +5,9 @@ ExternalProject_Add(libdwarf
|
|||
GIT_TAG libdwarf-0.3.1
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/libdwarf"
|
||||
BINARY_DIR "${TD_CONTRIB_DIR}/libdwarf"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -13,4 +13,5 @@ ExternalProject_Add(libs3
|
|||
BUILD_COMMAND make build/lib/libs3.a
|
||||
INSTALL_COMMAND make install_static
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -5,8 +5,9 @@ ExternalProject_Add(libuv
|
|||
GIT_TAG v1.48.0
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/libuv"
|
||||
BINARY_DIR "${TD_CONTRIB_DIR}/libuv"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -9,4 +9,5 @@ ExternalProject_Add(lz4
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
|
||||
# xz
|
||||
# xz
|
||||
|
||||
if (${TD_LINUX})
|
||||
ExternalProject_Add(lzma2
|
||||
GIT_REPOSITORY https://github.com/conor42/fast-lzma2.git
|
||||
GIT_REPOSITORY https://github.com/conor42/fast-lzma2.git
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/lzma2"
|
||||
#BINARY_DIR ""
|
||||
BUILD_IN_SOURCE TRUE
|
||||
|
@ -11,5 +11,6 @@ ExternalProject_Add(lzma2
|
|||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
|
|
@ -10,4 +10,5 @@ ExternalProject_Add(msvcregex
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -11,4 +11,5 @@ ExternalProject_Add(mxml
|
|||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -10,4 +10,5 @@ ExternalProject_Add(pcre2
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -10,4 +10,5 @@ ExternalProject_Add(pthread
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -11,6 +11,7 @@ if (${BUILD_CONTRIB})
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
else()
|
||||
if (NOT ${TD_LINUX})
|
||||
|
@ -24,6 +25,7 @@ else()
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
|
|
@ -10,4 +10,5 @@ ExternalProject_Add(sqlite
|
|||
BUILD_COMMAND "$(MAKE)"
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -12,4 +12,5 @@ ExternalProject_Add(openssl
|
|||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install_sw -j
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -10,4 +10,5 @@ ExternalProject_Add(stub
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -10,4 +10,5 @@ ExternalProject_Add(wcwidth
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -10,4 +10,5 @@ ExternalProject_Add(wingetopt
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -15,4 +15,5 @@ ExternalProject_Add(xml2
|
|||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install && ln -sf $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
|
||||
# xz
|
||||
ExternalProject_Add(xz
|
||||
GIT_REPOSITORY https://github.com/xz-mirror/xz.git
|
||||
GIT_TAG v5.4.4
|
||||
GIT_REPOSITORY https://github.com/xz-mirror/xz.git
|
||||
GIT_TAG v5.4.4
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/xz"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
CMAKE_ARGS
|
||||
CMAKE_ARGS
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -12,4 +12,5 @@ ExternalProject_Add(zlib
|
|||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
|
||||
# zstb
|
||||
# zstb
|
||||
|
||||
#ExternalProject_Add(zstd
|
||||
#GIT_REPOSITORY https://github.com/facebook/zstd.git
|
||||
#GIT_REPOSITORY https://github.com/facebook/zstd.git
|
||||
#GIT_TAG v1.5.5
|
||||
#SOURCE_DIR "${TD_CONTRIB_DIR}/zstd"
|
||||
#DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
#BINARY_DIR ""
|
||||
#CMAKE_COMMAND
|
||||
#CMAKE_COMMAND
|
||||
#CMAKE_ARGS ${TD_CONTRIB_DIR}/zstd/build/cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=${CMAKE_BINARY_DIR}/build/lib
|
||||
#BUILD_COMMAND make -j4
|
||||
#BUILD_COMMAND make -j4
|
||||
#INSTALL_COMMAND ""
|
||||
#TEST_COMMAND ""
|
||||
#)
|
||||
#GIT_SHALLOW true
|
||||
#)
|
||||
|
|
|
@ -72,7 +72,7 @@ SELECT _wstart, count(*), avg(voltage) FROM power.meters PARTITION BY tbname INT
|
|||
- stb_name 是保存计算结果的超级表的表名,如果该超级表不存在,则会自动创建;如果已存在,则检查列的 schema 信息。详见 6.3.8 节。
|
||||
- tags 子句定义了流计算中创建标签的规则。通过 tags 字段可以为每个分区对应的子表生成自定义的标签值。
|
||||
|
||||
## 流试计算的规则和策略
|
||||
## 流式计算的规则和策略
|
||||
|
||||
### 流计算的分区
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
|
|||
|
||||
在 **MQTT 协议** 下拉列表中选择 MQTT 协议版本。有三个选项:`3.1`、`3.1.1`、`5.0`。 默认值为 3.1。
|
||||
|
||||
在 **Client ID** 中填写客户端 ID。建多个任务时,同个 MQTT 地址下,Client ID 必须是唯一的。
|
||||
在 **Client ID** 中填写客户端标识,填写后会生成带有 `taosx` 前缀的客户端 id (例如,如果填写的标识为 `foo`,则生成的客户端 id 为 `taosxfoo`)。如果打开末尾处的开关,则会把当前任务的任务 id 拼接到 `taosx` 之后,输入的标识之前(生成的客户端 id 形如 `taosx100foo`)。连接到同一个 MQTT 地址的所有客户端 id 必须保证唯一。
|
||||
|
||||
在 **Keep Alive** 中输入保持活动间隔。如果代理在保持活动间隔内没有收到来自客户端的任何消息,它将假定客户端已断开连接,并关闭连接。
|
||||
保持活动间隔是指客户端和代理之间协商的时间间隔,用于检测客户端是否活动。如果客户端在保持活动间隔内没有向代理发送消息,则代理将断开连接。
|
||||
|
|
|
@ -60,6 +60,10 @@ TDengine 可以高效地从 Kafka 读取数据并将其写入 TDengine,以实
|
|||
|
||||
在 **主题** 中填写要消费的 Topic 名称。可以配置多个 Topic , Topic 之间用逗号分隔。例如:`tp1,tp2`。
|
||||
|
||||
在 **Client ID** 中填写客户端标识,填写后会生成带有 `taosx` 前缀的客户端 ID (例如,如果填写的标识为 `foo`,则生成的客户端 ID 为 `taosxfoo`)。如果打开末尾处的开关,则会把当前任务的任务 ID 拼接到 `taosx` 之后,输入的标识之前(生成的客户端 ID 形如 `taosx100foo`)。连接到同一个 Kafka 集群的所有客户端 ID 必须保证唯一。
|
||||
|
||||
在 **消费者组 ID** 中填写消费者组标识,填写后会生成带有 `taosx` 前缀的消费者组 ID (例如,如果填写的标识为 `foo`,则生成的消费者组 ID 为 `taosxfoo`)。如果打开末尾处的开关,则会把当前任务的任务 ID 拼接到 `taosx` 之后,输入的标识之前(生成的消费者组 ID 形如 `taosx100foo`)。
|
||||
|
||||
在 **Offset** 的下拉列表中选择从哪个 Offset 开始消费数据。有三个选项:`Earliest`、`Latest`、`ByTime(ms)`。 默认值为Earliest。
|
||||
|
||||
* Earliest:用于请求最早的 offset。
|
||||
|
|
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 48 KiB |
Before Width: | Height: | Size: 105 KiB After Width: | Height: | Size: 37 KiB |
|
@ -229,4 +229,12 @@ toc_max_heading_level: 4
|
|||
- 权限
|
||||
- 白名单
|
||||
|
||||
。
|
||||

|
||||
|
||||
### 慢 SQL
|
||||
点击“系统管理”后,点击“慢 SQL”标签页,可以查看慢 SQL 执行语句日志统计与明细。
|
||||
|
||||
- 慢 SQL 明细:默认展示的是开始执行时间是一天内和执行耗时大于等于10秒的数据
|
||||

|
||||
- 慢 SQL 统计:默认展示所有的数据,可根据开始执行时间进行过滤
|
||||

|
||||
|
|
|
@ -69,7 +69,7 @@ dataDir /mnt/data6 2 0
|
|||
在配置文件 /etc/taos/taos.cfg 中,添加用于 S3 访问的参数:
|
||||
|
||||
|参数名称 | 参数含义 |
|
||||
|:-------------:|:-----------------------------------------------:|
|
||||
|:-------------|:-----------------------------------------------|
|
||||
|s3EndPoint | 用户所在地域的 COS 服务域名,支持 http 和 https,bucket 的区域需要与 endpoint 的保持一致,否则无法访问。例如:http://cos.ap-beijing.myqcloud.com |
|
||||
|s3AccessKey |冒号分隔的用户 SecretId:SecretKey。例如:AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E |
|
||||
|s3BucketName | 存储桶名称,减号后面是用户注册 COS 服务的 AppId。其中 AppId 是 COS 特有,AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分,使用减号分隔。参数值均为字符串类型,但不需要引号。例如:test0711-1309024725 |
|
||||
|
@ -111,3 +111,27 @@ s3migrate database <db_name>;
|
|||
| 1 | s3_keeplocal | 3650 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位:天,支持 m(分钟)、h(小时)和 d(天)三个单位 |
|
||||
| 2 | s3_chunksize | 262144 | 131072 | 1048576 | 上传对象的大小阈值,与 TSDB_PAGESIZE 参数一样,不可修改,单位为 TSDB 页 |
|
||||
| 3 | s3_compact | 0 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 |
|
||||
|
||||
## Azure Blob 存储
|
||||
本节介绍在 TDengine Enterprise 如何使用微软 Azure Blob 对象存储。本功能是上一小节‘对象存储’功能的扩展,需额外依赖 Flexify 服务提供的 S3 网关。通过适当的参数配置,可以把大部分较冷的时序数据存储到 Azure Blob 服务中。
|
||||
|
||||
### Flexify 服务
|
||||
Flexify 是 Azure Marketplace 中的一款应用程序,允许兼容 S3 的应用程序通过标准 S3 API 在 Azure Blob Storage 中存储数据。可使用多个 Flexify 服务对同一个 Blob 存储建立多个 S3 网关。
|
||||
|
||||
部署方式请参考 [Flexify](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/flexify.azure-s3-api?tab=Overview) 应用页面说明。
|
||||
|
||||
### 配置方式
|
||||
|
||||
在配置文件 /etc/taos/taos.cfg 中,添加用于 S3 访问的参数:
|
||||
|
||||
```
|
||||
s3EndPoint http //20.191.157.23,http://20.191.157.24,http://20.191.157.25
|
||||
s3AccessKey FLIOMMNL0:uhRNdeZMLD4wo,ABCIOMMN:uhRNdeZMD4wog,DEFOMMNL049ba:uhRNdeZMLD4wogXd
|
||||
s3BucketName td-test
|
||||
```
|
||||
|
||||
- 允许对 s3EndPoint、s3AccessKey 配置多项,但要求二者项数一致。多个配置项间使用 ',' 分隔。s3BucketName 仅允许配置一项
|
||||
- 认为每一组 `{s3EndPoint、s3AccessKey}` 配置对应一个 S3 服务,每次发起 S3 请求时将随机选择一个服务
|
||||
- 认为全部 S3 服务均指向同一数据源,对各个 S3 服务操作完全等价
|
||||
- 在某一 S3 服务上操作失败后会切换至其他服务,全部服务都失败后将返回最后产生的错误码
|
||||
- 最大支持的 S3 服务配置数为 10
|
||||
|
|
After Width: | Height: | Size: 283 KiB |
After Width: | Height: | Size: 256 KiB |
|
@ -34,13 +34,15 @@ TDengine 消费者的概念跟 Kafka 类似,消费者通过订阅主题来接
|
|||
| `td.connect.user` | string | 用户名 | |
|
||||
| `td.connect.pass` | string | 密码 | |
|
||||
| `td.connect.port` | integer | 服务端的端口号 | |
|
||||
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | <br />**必填项**。最大长度:192。<br />每个topic最多可建立100个 consumer group |
|
||||
| `client.id` | string | 客户端 ID | 最大长度:192。 |
|
||||
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | <br />**必填项**。最大长度:192。<br />每个topic最多可建立 100 个 consumer group |
|
||||
| `client.id` | string | 客户端 ID | 最大长度:192 |
|
||||
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default(version < 3.2.0.0);从头开始订阅; <br/>`latest`: default(version >= 3.2.0.0);仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
|
||||
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交,true: 自动提交,客户端应用无需commit;false:客户端应用需要自行commit | 默认值为 true |
|
||||
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
|
||||
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句)(从3.2.0.0版本该参数废弃,恒为true) | 默认关闭 |
|
||||
| `enable.replay` | boolean | 是否开启数据回放功能 | 默认关闭 |
|
||||
| `session.timeout.ms` | integer | consumer 心跳丢失后超时时间,超时后会触发 rebalance 逻辑,成功后该 consumer 会被删除(从3.3.3.0版本开始支持) | 默认值为 12000,取值范围 [6000, 1800000] |
|
||||
| `max.poll.interval.ms` | integer | consumer poll 拉取数据间隔的最长时间,超过该时间,会认为该 consumer 离线,触发rebalance 逻辑,成功后该 consumer 会被删除(从3.3.3.0版本开始支持) | 默认值为 300000,[1000,INT32_MAX] |
|
||||
|
||||
|
||||
下面是各语言连接器创建参数:
|
||||
|
|
|
@ -38,7 +38,8 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在
|
|||
|enableCoreFile | crash 时是否生成 core 文件,0: 不生成, 1: 生成;缺省值:1 |
|
||||
|enableScience | 是否开启科学计数法显示浮点数; 0: 不开始, 1: 开启;缺省值:1 |
|
||||
|compressMsgSize | 是否对 RPC 消息进行压缩; -1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩; 缺省值 -1|
|
||||
|queryTableNotExistAsEmpty | 查询表不存在时是否返回空结果集; false: 返回错误; true: 返回空结果集; 缺省值 false|
|
||||
|
||||
## API
|
||||
|
||||
请参考[连接器](../../connector)
|
||||
请参考[连接器](../../connector)
|
||||
|
|
|
@ -187,8 +187,8 @@ AllowWebSockets
|
|||
- 兼容 InfluxDB v1 写接口
|
||||
[https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/)
|
||||
- 兼容 OpenTSDB JSON 和 telnet 格式写入
|
||||
- \<http://opentsdb.net/docs/build/html/api_http/put.html\>
|
||||
- \<http://opentsdb.net/docs/build/html/api_telnet/put.html\>
|
||||
- [http://opentsdb.net/docs/build/html/api_http/put.html](http://opentsdb.net/docs/build/html/api_http/put.html)
|
||||
- [http://opentsdb.net/docs/build/html/api_telnet/put.html](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
||||
- 与 collectd 无缝连接。
|
||||
collectd 是一个系统统计收集守护程序,请访问 [https://collectd.org/](https://collectd.org/) 了解更多信息。
|
||||
- Seamless connection with StatsD。
|
||||
|
|
|
@ -194,7 +194,7 @@ Active: inactive (dead)
|
|||
可以访问 taosKeeper 的 `check_health` 接口来判断服务是否存活,如果服务正常则会返回 HTTP 200 状态码:
|
||||
|
||||
```
|
||||
$ curl -i http://127.0.0.1:6043/check_health
|
||||
curl -i http://127.0.0.1:6043/check_health
|
||||
```
|
||||
|
||||
返回结果:
|
||||
|
|
|
@ -54,7 +54,7 @@ window_clause: {
|
|||
```
|
||||
|
||||
其中,interval_val 和 sliding_val 都表示时间段,interval_offset 表示窗口偏移量,interval_offset 必须小于 interval_val,语法上支持三种方式,举例说明如下:
|
||||
- INTERVAL(1s, 500a) SLIDING(1s), 自带时间单位的形式,其中的时间单位是单字符表示, 分别为: a (毫秒), b (纳秒), d (天), h (小时), m (分钟), n (月), s (秒), u (微妙), w (周), y (年).
|
||||
- INTERVAL(1s, 500a) SLIDING(1s), 自带时间单位的形式,其中的时间单位是单字符表示, 分别为: a (毫秒), b (纳秒), d (天), h (小时), m (分钟), n (月), s (秒), u (微秒), w (周), y (年).
|
||||
- INTERVAL(1000, 500) SLIDING(1000), 不带时间单位的形式,将使用查询库的时间精度作为默认时间单位,当存在多个库时默认采用精度更高的库.
|
||||
- INTERVAL('1s', '500a') SLIDING('1s'), 自带时间单位的字符串形式,字符串内部不能有任何空格等其它字符.
|
||||
|
||||
|
|
|
@ -296,8 +296,10 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 2 | consumer_group | VARCHAR(193) | 订阅者的消费者组 |
|
||||
| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
|
||||
| 4 | consumer_id | BIGINT | 消费者的唯一 id |
|
||||
| 5 | offset | VARCHAR(64) | 消费者的消费进度 |
|
||||
| 6 | rows | BIGINT | 消费者的消费的数据条数 |
|
||||
| 5 | user | VARCHAR(24) | 消费者的登录的用户名 |
|
||||
| 6 | fqdn | VARCHAR(128) | 消费者的所在机器的 fqdn |
|
||||
| 7 | offset | VARCHAR(64) | 消费者的消费进度 |
|
||||
| 8 | rows | BIGINT | 消费者的消费的数据条数 |
|
||||
|
||||
## INS_STREAMS
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ time_duration:
|
|||
number unit
|
||||
```
|
||||
|
||||
创建 TSMA 时需要指定 TSMA 名字, 表名字, 函数列表以及窗口大小. 当基于一个已经存在的 TSMA 创建新的 TSMA 时, 需要使用 `RECURSIVE` 关键字但不能指定 `FUNCTION()`, 新创建的 TSMA 已有 TSMA 拥有相同的函数列表, 且此种情况下所指定的 INTERVAL 必须为所基于的 TSMA 窗口长度的整数倍。
|
||||
创建 TSMA 时需要指定 TSMA 名字, 表名字, 函数列表以及窗口大小. 当基于一个已经存在的 TSMA 创建新的 TSMA 时, 需要使用 `RECURSIVE` 关键字但不能指定 `FUNCTION()`, 新创建的 TSMA 已有 TSMA 拥有相同的函数列表, 且此种情况下所指定的 INTERVAL 必须至少为所基于的 TSMA 窗口长度的整数倍, 并且天不能基于2h或3h建立, 只能基于1h建立, 月也只能基于1d而非2d,3d建立。
|
||||
|
||||
其中 TSMA 命名规则与表名字类似, 长度最大限制为表名长度限制减去输出表后缀长度, 表名长度限制为193, 输出表后缀为`_tsma_res_stb_`, TSMA 名字最大长度为178.
|
||||
|
||||
|
@ -28,7 +28,7 @@ TSMA只能基于超级表和普通表创建, 不能基于子表创建.
|
|||
|
||||
由于TSMA输出为一张超级表, 因此输出表的行长度受最大行长度限制, 不同函数的`中间结果`大小各异, 一般都大于原始数据大小, 若输出表的行长度大于最大行长度限制, 将会报`Row length exceeds max length`错误. 此时需要减少函数个数或者将常用的函数进行分组拆分到多个TSMA中.
|
||||
|
||||
窗口大小的限制为[1m ~ 1h]. INTERVAL 的单位与查询中INTERVAL子句相同, 如 a (毫秒), b (纳秒), h (小时), m (分钟), s (秒), u (微妙).
|
||||
窗口大小的限制为[1m ~ 1y/12n]. INTERVAL 的单位与查询中INTERVAL子句相同, 如 a (毫秒), b (纳秒), h (小时), m (分钟), s (秒), u (微妙), d (天), w(周), n(月), y(年).
|
||||
|
||||
TSMA为库内对象, 但名字全局唯一. 集群内一共可创建TSMA个数受参数`maxTsmaNum`限制, 参数默认值为3, 范围: [0-3]. 注意, 由于TSMA后台计算使用流计算, 因此每创建一条TSMA, 将会创建一条流, 因此能够创建的TSMA条数也受当前已经存在的流条数和最大可创建流条数限制.
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ toc_max_heading_level: 4
|
|||
|
||||
PHP 连接器依赖 TDengine 客户端驱动。
|
||||
|
||||
项目地址:\<https://github.com/Yurunsoft/php-tdengine>
|
||||
项目地址:[https://github.com/Yurunsoft/php-tdengine](https://github.com/Yurunsoft/php-tdengine)
|
||||
|
||||
TDengine 服务端或客户端安装后,`taos.h` 位于:
|
||||
|
||||
|
|
|
@ -118,21 +118,32 @@ curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timez
|
|||
|
||||
### HTTP 响应码
|
||||
|
||||
从 `TDengine 3.0.3.0` 开始 `taosAdapter` 提供配置参数 `httpCodeServerError` 用来设置当 C 接口返回错误时是否返回非 200 的http状态码。
|
||||
无论是否设置此参数,响应 body 里都有详细的错误码和错误信息,具体请参考 [错误](../rest-api/#错误) 。
|
||||
默认情况下,`taosAdapter` 对大多数 C 接口调用出错时也会返回 200 响应码,但是 HTTP body 中包含错误信息。从 `TDengine 3.0.3.0` 开始 `taosAdapter` 提供配置参数 `httpCodeServerError` 用来设置当 C 接口返回错误时是否返回非 200 的 HTTP 响应码。无论是否设置此参数,响应 body 里都有详细的错误码和错误信息,具体请参考 [错误](../rest-api/#错误) 。
|
||||
|
||||
| **说明** | **httpCodeServerError false** | **httpCodeServerError true** |
|
||||
|--------------------|-------------------------------|---------------------------------------|
|
||||
| taos_errno() 返回 0 | 200 | 200 |
|
||||
| taos_errno() 返回 非0 | 200(除鉴权错误) | 500 (除鉴权错误和 400/502/503 错误) |
|
||||
| 参数错误 | 400(仅处理 HTTP 请求 URL 参数错误) | 400 (处理 HTTP 请求 URL 参数错误和 taosd 返回错误) |
|
||||
| 鉴权错误 | 401 | 401 |
|
||||
| 接口不存在 | 404 | 404 |
|
||||
| 集群不可用错误 | 502 | 502 |
|
||||
| 系统资源不足 | 503 | 503 |
|
||||
**当 httpCodeServerError 为 false 时:**
|
||||
|
||||
返回 400 的 C 错误码为:
|
||||
| **分类说明** |**HTTP 响应码** |
|
||||
|--------------------|-------------------------------|
|
||||
| C 接口调用成功 | 200 | 200 |
|
||||
| C 接口调用出错,且不是鉴权错误 | 200 |
|
||||
| HTTP 请求 URL 参数错误 | 400 |
|
||||
| C 接口调用鉴权错误 | 401 |
|
||||
| 接口不存在 | 404 |
|
||||
| 系统资源不足 | 503 |
|
||||
|
||||
**当 httpCodeServerError 为 true 时:**
|
||||
|
||||
| **分类说明** | **HTTP 响应码** |
|
||||
|--------------------|-------------------------------|
|
||||
| C 接口调用成功 | 200 |
|
||||
| HTTP 请求 URL 参数错误和 C 接口调用参数解析错误 | 400 |
|
||||
| C 接口调用鉴权错误 | 401 |
|
||||
| 接口不存在 | 404 |
|
||||
| C 接口调用网络不可用错误 | 502 |
|
||||
| 系统资源不足 |503 |
|
||||
| 其他 C 接口调用错误 | 500 |
|
||||
|
||||
C 接口参数解析相关错误码:
|
||||
- TSDB_CODE_TSC_SQL_SYNTAX_ERROR (0x0216)
|
||||
- TSDB_CODE_TSC_LINE_SYNTAX_ERROR (0x021B)
|
||||
- TSDB_CODE_PAR_SYNTAX_ERROR (0x2600)
|
||||
|
@ -140,7 +151,7 @@ curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timez
|
|||
- TSDB_CODE_TSC_VALUE_OUT_OF_RANGE (0x0224)
|
||||
- TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE (0x263B)
|
||||
|
||||
返回 401 的错误码为:
|
||||
C 接口鉴权相关错误码:
|
||||
|
||||
- TSDB_CODE_MND_USER_ALREADY_EXIST (0x0350)
|
||||
- TSDB_CODE_MND_USER_NOT_EXIST (0x0351)
|
||||
|
@ -151,7 +162,7 @@ curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timez
|
|||
- TSDB_CODE_MND_INVALID_ALTER_OPER (0x0356)
|
||||
- TSDB_CODE_MND_AUTH_FAILURE (0x0357)
|
||||
|
||||
返回 502 的错误码为:
|
||||
C 接口网络不可用相关错误码:
|
||||
|
||||
- TSDB_CODE_RPC_NETWORK_UNAVAIL (0x000B)
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em
|
|||
|
||||
## 安装并启动 EMQX
|
||||
|
||||
用户可以根据当前的操作系统,到 EMQX 官网下载安装包,并执行安装。下载地址如下:\<https://www.emqx.io/zh/downloads>。安装后使用 `sudo emqx start` 或 `sudo systemctl start emqx` 启动 EMQX 服务。
|
||||
用户可以根据当前的操作系统,到 [EMQX 官网下载安装包](https://www.emqx.io/zh/downloads),并执行安装。安装后使用 `sudo emqx start` 或 `sudo systemctl start emqx` 启动 EMQX 服务。
|
||||
|
||||
注意:本文基于 EMQX v4.4.5 版本,其他版本由于相关配置界面、配置方法以及功能可能随着版本升级有所区别。
|
||||
|
||||
|
|
|
@ -61,37 +61,37 @@ TDengine 会为 WAL 文件自动创建索引以支持快速随机访问。通过
|
|||
|
||||

|
||||
|
||||
在客户端成功建立与服务器的连接之后,用户须首先指定消费组和主题,以创建相应的消费者实例。随后,客户端便会向服务器提交订阅请求。此刻,消费者的状态被标记为 modify,表示正处于配置阶段。消费者随后会定期向服务器发送请求,以检索并获取待消费的 vnode 列表,直至服务器为其分配相应的 vnode。一旦分配完成,消费者的状态便更新为 ready,标志着订阅流程的成功完成。此刻,客户端便可正式启动向 vnode 发送消费数据请求的过程。
|
||||
在客户端成功建立与服务器的连接之后,用户须首先指定消费组和主题,以创建相应的消费者实例。随后,客户端便会向服务器提交订阅请求。此刻,消费者的状态被标记为 rebalancing,表示正处于 rebalance 阶段。消费者随后会定期向服务器发送请求,以检索并获取待消费的 vnode 列表,直至服务器为其分配相应的 vnode。一旦分配完成,消费者的状态便更新为 ready,标志着订阅流程的成功完成。此刻,客户端便可正式启动向 vnode 发送消费数据请求的过程。
|
||||
|
||||
在消费数据的过程中,消费者会不断地向每个分配到的 vnode 发送请求,以尝试获取新的数据。一旦收到数据,消费者在完成消费后会继续向该 vnode 发送请求,以便持续消费。若在预设时间内未收到任何数据,消费者便会在 vnode 上注册一个消费 handle。这样一来,一旦 vnode 上有新数据产生,便会立即推送给消费者,从而确保数据消费的即时性,并有效减少消费者频繁主动拉取数据所导致的性能损耗。因此,可以看出,消费者从 vnode 获取数据的方式是一种拉取(pull)与推送(push)相结合的高效模式。
|
||||
|
||||
消费者在收到数据时,会同时收到数据的版本号,并将其记录为当前在每个 vnode上的消费进度。这一进度仅在消费者内部以内存形式存储,确保仅对该消费者有效。这种设计保证了消费者在每次启动时能够从上次的消费中断处继续,避免了数据的重复处理。然而,若消费者需要退出并希望在之后恢复上次的消费进度,就必须在退出前将消费进度提交至服务器,执行所谓的 commit 操作。这一操作会将消费进度在服务器进行持久化存储,支持自动提交或手动提交两种方式。
|
||||
消费者在收到数据时,会同时收到数据的版本号,并将其记录为当前在每个 vnode 上的消费进度。这一进度仅在消费者内部以内存形式存储,确保仅对该消费者有效。这种设计保证了消费者在每次启动时能够从上次的消费中断处继续,避免了数据的重复处理。然而,若消费者需要退出并希望在之后恢复上次的消费进度,就必须在退出前将消费进度提交至服务器,执行所谓的 commit 操作。这一操作会将消费进度在服务器进行持久化存储,支持自动提交或手动提交两种方式。
|
||||
|
||||
此外,为了维持消费者的活跃状态,客户端还实施了心跳保活机制。通过定期向服务器发送心跳信号,消费者能够向服务器证明自己仍然在线。若服务器在一定时间内未收到消费者的心跳,便会将其标记为 lost 状态,即认为消费者已离线。服务器依赖心跳机制来监控所有消费者的状态,进而有效地管理整个消费者群体。
|
||||
此外,为了维持消费者的活跃状态,客户端还实施了心跳保活机制。通过定期向服务器发送心跳信号,消费者能够向服务器证明自己仍然在线。若服务器在一定时间内未收到消费者的心跳,便会认为消费者已离线。对于一定时间(可以通过参数控制)不拉取数据的 consumer,服务器也会标记为离线,并从消费组中删除该消费者。服务器依赖心跳机制来监控所有消费者的状态,进而有效地管理整个消费者群体。
|
||||
|
||||
mnode 主要负责处理订阅过程中的控制消息,包括创建和删除主题、订阅消息、查询 endpoint 消息以及心跳消息等。vnode 则专注于处理消费消息和 commit 消息。当mnode 收到消费者的订阅消息时,如果该消费者尚未订阅过,其状态将被设置为 modify。如果消费者已经订阅过,但订阅的主题发生变更,消费者的状态同样会被设置为 modify。等待再平衡的计时器到来时,mnode 会对 modify 状态的消费者执行再平衡操作,将心跳超过固定时间的消费者设置为 lost 状态,并对关闭或 lost 状态超过一定时间的消费者进行删除操作。
|
||||
mnode 主要负责处理订阅过程中的控制消息,包括创建和删除主题、订阅消息、查询 endpoint 消息以及心跳消息等。vnode 则专注于处理消费消息和 commit 消息。当 mnode 收到消费者的订阅消息时,如果该消费者尚未订阅过,其状态将被设置为 rebalancing。如果消费者已经订阅过,但订阅的主题发生变更,消费者的状态同样会被设置为 rebalancing。然后 mnode 会对 rebalancing 状态的消费者执行 rebalance 操作。心跳超过固定时间的消费者或主动关闭的消费者将被删除。
|
||||
|
||||
消费者会定期向 mnode 发送查询 endpoint 消息,以获取再平衡后的最新 vnode 分配结果。同时,消费者还会定期发送心跳消息,通知 mnode 自身处于活跃状态。此外,消费者的一些信息也会通过心跳消息上报至 mnode,用户可以查询 mnode 上的这些信息以监测各个消费者的状态。这种设计有助于实现对消费者的有效管理和监控。
|
||||
|
||||
## 再平衡过程
|
||||
## rebalance 过程
|
||||
|
||||
每个主题的数据可能分散在多个 vnode 上。服务器通过执行再平衡过程,将这些vnode 合理地分配给各个消费者,确保数据的均匀分布和高效消费。
|
||||
每个主题的数据可能分散在多个 vnode 上。服务器通过执行 rebalance 过程,将这些 vnode 合理地分配给各个消费者,确保数据的均匀分布和高效消费。
|
||||
|
||||
如下图所示,c1 表示消费者 1,c2 表示消费者 2,g1 表示消费组 1。起初 g1 中只有 c1 消费数据,c1 发送订阅信息到 mnode,mnode 把数据所在的所有 4 个vnode 分配给 c1。当 c2 增加到 g1 后,c2 将订阅信息发送给 mnode,mnode 将在下一次再平衡计时器到来时检测到这个 g1 需要重新分配,就会启动再平衡过程,随后 c2 分得其中两个 vnode 消费。分配信息还会通过 mnode 发送给 vnode,同时 c1 和 c2 也会获取自己消费的 vnode 信息并开始消费。
|
||||
如下图所示,c1 表示消费者 1,c2 表示消费者 2,g1 表示消费组 1。起初 g1 中只有 c1 消费数据,c1 发送订阅信息到 mnode,mnode 把数据所在的所有 4 个vnode 分配给 c1。当 c2 增加到 g1 后,c2 将订阅信息发送给 mnode,mnode 检测到这个 g1 需要重新分配,就会启动 rebalance 过程,随后 c2 分得其中两个 vnode 消费。分配信息还会通过 mnode 发送给 vnode,同时 c1 和 c2 也会获取自己消费的 vnode 信息并开始消费。
|
||||
|
||||

|
||||

|
||||
|
||||
再平衡计时器每 2s 检测一次是否需要再平衡。在再平衡过程中,如果消费者获取的状态是 not ready,则不能进行消费。只有再平衡正常结束后,消费者获取分配 vnode 的offset 后才可正常消费,否则消费者会重试指定次数后报错。
|
||||
|
||||
## 消费者状态处理
|
||||
|
||||
消费者的状态转换过程如下图所示。初始状态下,刚发起订阅的消费者处于modify 状态,此时客户端获取的消费者状态为 not ready,表明消费者尚未准备好进行数据消费。一旦 mnode 在再平衡计时器中检测到处于 modify 状态的消费者,便会启动再平衡过程。再平衡成功后,消费者的状态将转变为 ready,表示消费者已准备就绪。随后,当消费者通过定时的查询 endpoint 消息获取自身的 ready 状态以及分配的 vnode 列表后,便能正式开始消费数据。
|
||||
消费者的状态转换过程如下图所示。初始状态下,刚发起订阅的消费者处于 rebalancing 状态,表明消费者尚未准备好进行数据消费。一旦 mnode 检测到处于 rebalancing 状态的消费者,便会启动 rebalance 过程,成功后,消费者的状态将转变为 ready,表示消费者已准备就绪。随后,当消费者通过定时的查询 endpoint 消息获取自身的 ready 状态以及分配的 vnode 列表后,便能正式开始消费数据。
|
||||
|
||||

|
||||
|
||||
若消费者的心跳丢失时间超过 12s,经过再平衡过程,其状态将被更新为 lost,表明消费者被认为已离线。如果心跳丢失时间超过 1 天,消费者的状态将进一步转变为 clear,此时消费者将被系统删除。然而,如果在上述过程中消费者重新发送心跳信号,其状态将恢复为 modify,并重新进入下一轮的再平衡过程。
|
||||
若消费者的心跳丢失时间超过 12s,经过 rebalance 过程,其状态将被更新为 clear,然后消费者将被系统删除。
|
||||
|
||||
当消费者主动退出时,会发送 unsubscribe 消息。该消息将清除消费者订阅的所有主题,并将消费者的状态设置为 modify。随后,在再平衡过程中,一旦消费者的状态变为ready 且无订阅的主题,mnode 将在再平衡的计时器中检测到此状态变化,并据此删除该消费者。这一系列措施确保了消费者退出的有序性和系统的稳定性。
|
||||
当消费者主动退出时,会发送 unsubscribe 消息。该消息将清除消费者订阅的所有主题,并将消费者的状态设置为 rebalancing。随后,检测到处于 rebalancing 状态的消费者,便会启动 rebalance 过程,成功后,其状态将被更新为 clear,然后消费者将被系统删除。这一系列措施确保了消费者退出的有序性和系统的稳定性。
|
||||
|
||||
## 消费数据
|
||||
|
||||
|
|
Before Width: | Height: | Size: 118 KiB After Width: | Height: | Size: 48 KiB |
Before Width: | Height: | Size: 46 KiB After Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 232 KiB After Width: | Height: | Size: 282 KiB |
Before Width: | Height: | Size: 72 KiB After Width: | Height: | Size: 40 KiB |
Before Width: | Height: | Size: 56 KiB After Width: | Height: | Size: 18 KiB |
Before Width: | Height: | Size: 215 KiB After Width: | Height: | Size: 103 KiB |
|
@ -2817,8 +2817,8 @@ enum {
|
|||
TOPIC_SUB_TYPE__COLUMN,
|
||||
};
|
||||
|
||||
#define DEFAULT_MAX_POLL_INTERVAL 3000000
|
||||
#define DEFAULT_SESSION_TIMEOUT 12000
|
||||
#define DEFAULT_MAX_POLL_INTERVAL 300000
|
||||
#define DEFAULT_SESSION_TIMEOUT 12000
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_TOPIC_FNAME_LEN]; // accout.topic
|
||||
|
|
|
@ -49,7 +49,7 @@ int32_t toUInteger(const char *z, int32_t n, int32_t base, uint64_t *value);
|
|||
*/
|
||||
int32_t toIntegerPure(const char *z, int32_t n, int32_t base, int64_t *value);
|
||||
|
||||
void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uint32_t type);
|
||||
int32_t taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uint32_t type);
|
||||
|
||||
void taosVariantDestroy(SVariant *pV);
|
||||
|
||||
|
|
|
@ -49,6 +49,7 @@ typedef struct {
|
|||
} SAuditRecord;
|
||||
|
||||
int32_t auditInit(const SAuditCfg *pCfg);
|
||||
void auditSetDnodeId(int32_t dnodeId);
|
||||
void auditCleanup();
|
||||
int32_t auditSend(SJson *pJson);
|
||||
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
|
||||
|
|
|
@ -197,7 +197,7 @@ typedef struct SStoreCacheReader {
|
|||
SArray *pFuncTypeList, SColumnInfo* pPkCol, int32_t numOfPks);
|
||||
void (*closeReader)(void *pReader);
|
||||
int32_t (*retrieveRows)(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, const int32_t *dstSlotIds,
|
||||
SArray *pTableUidList);
|
||||
SArray *pTableUidList, bool* pGotAllRows);
|
||||
int32_t (*reuseReader)(void *pReader, void *pTableIdList, int32_t numOfTables);
|
||||
} SStoreCacheReader;
|
||||
|
||||
|
|
|
@ -218,6 +218,7 @@ typedef struct {
|
|||
} SDmNotifyHandle;
|
||||
|
||||
int32_t monInit(const SMonCfg *pCfg);
|
||||
void monSetDnodeId(int32_t dnodeId);
|
||||
void monInitVnode();
|
||||
void monCleanup();
|
||||
void monRecordLog(int64_t ts, ELogLevel level, const char *content);
|
||||
|
|
|
@ -173,6 +173,7 @@ int32_t nodesNodeToSQL(SNode* pNode, char* buf, int32_t bufSize, int32_t* len);
|
|||
char* nodesGetNameFromColumnNode(SNode* pNode);
|
||||
int32_t nodesGetOutputNumFromSlotList(SNodeList* pSlots);
|
||||
void nodesSortList(SNodeList** pList, int32_t (*)(SNode* pNode1, SNode* pNode2));
|
||||
void destroyFuncParam(void* pFuncStruct);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -494,6 +494,14 @@ typedef struct SScanWalInfo {
|
|||
tmr_h scanTimer;
|
||||
} SScanWalInfo;
|
||||
|
||||
typedef struct SFatalErrInfo {
|
||||
int32_t code;
|
||||
int64_t ts;
|
||||
int32_t threadId;
|
||||
int32_t line;
|
||||
char func[128];
|
||||
} SFatalErrInfo;
|
||||
|
||||
// meta
|
||||
typedef struct SStreamMeta {
|
||||
char* path;
|
||||
|
@ -523,14 +531,13 @@ typedef struct SStreamMeta {
|
|||
int32_t numOfStreamTasks; // this value should be increased when a new task is added into the meta
|
||||
int32_t numOfPausedTasks;
|
||||
int64_t rid;
|
||||
|
||||
int64_t chkpId;
|
||||
int32_t chkpCap;
|
||||
SArray* chkpSaved;
|
||||
SArray* chkpInUse;
|
||||
SRWLatch chkpDirLock;
|
||||
void* qHandle; // todo remove it
|
||||
void* bkdChkptMgt;
|
||||
SFatalErrInfo fatalInfo; // fatal error occurs, stream stop to execute
|
||||
int64_t chkpId;
|
||||
int32_t chkpCap;
|
||||
SArray* chkpSaved;
|
||||
SArray* chkpInUse;
|
||||
SRWLatch chkpDirLock;
|
||||
void* bkdChkptMgt;
|
||||
} SStreamMeta;
|
||||
|
||||
typedef struct STaskUpdateEntry {
|
||||
|
@ -776,6 +783,9 @@ void streamMetaRLock(SStreamMeta* pMeta);
|
|||
void streamMetaRUnLock(SStreamMeta* pMeta);
|
||||
void streamMetaWLock(SStreamMeta* pMeta);
|
||||
void streamMetaWUnLock(SStreamMeta* pMeta);
|
||||
void streamSetFatalError(SStreamMeta* pMeta, int32_t code, const char* funcName, int32_t lino);
|
||||
int32_t streamGetFatalError(const SStreamMeta* pMeta);
|
||||
|
||||
void streamMetaResetStartInfo(STaskStartInfo* pMeta, int32_t vgId);
|
||||
int32_t streamMetaSendMsgBeforeCloseTasks(SStreamMeta* pMeta, SArray** pTaskList);
|
||||
void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool isLeader);
|
||||
|
@ -791,7 +801,7 @@ void streamTaskSetReqConsenChkptId(SStreamTask* pTask, int64_t ts);
|
|||
|
||||
// timer
|
||||
int32_t streamTimerGetInstance(tmr_h* pTmr);
|
||||
void streamTmrReset(TAOS_TMR_CALLBACK fp, int32_t mseconds, void* param, void* handle, tmr_h* pTmrId, int32_t vgId,
|
||||
void streamTmrStart(TAOS_TMR_CALLBACK fp, int32_t mseconds, void* pParam, void* pHandle, tmr_h* pTmrId, int32_t vgId,
|
||||
const char* pMsg);
|
||||
void streamTmrStop(tmr_h tmrId);
|
||||
|
||||
|
|
|
@ -28,9 +28,11 @@ typedef enum { HTTP_GZIP, HTTP_FLAT } EHttpCompFlag;
|
|||
int32_t taosSendHttpReport(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen,
|
||||
EHttpCompFlag flag);
|
||||
|
||||
int32_t taosSendHttpReportWithQID(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen,
|
||||
EHttpCompFlag flag, const char* qid);
|
||||
int64_t taosInitHttpChan();
|
||||
int32_t taosSendHttpReportByChan(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen,
|
||||
EHttpCompFlag flag, int64_t chanId);
|
||||
EHttpCompFlag flag, int64_t chanId, const char* qid);
|
||||
void taosDestroyHttpChan(int64_t chanId);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -165,7 +165,6 @@ int32_t taosGetFqdn(char *);
|
|||
void tinet_ntoa(char *ipstr, uint32_t ip);
|
||||
uint32_t ip2uint(const char *const ip_addr);
|
||||
int32_t taosIgnSIGPIPE();
|
||||
uint32_t taosInetAddr(const char *ipAddr);
|
||||
const char *taosInetNtoa(struct in_addr ipInt, char *dstStr, int32_t len);
|
||||
|
||||
uint64_t taosHton64(uint64_t val);
|
||||
|
|
|
@ -80,7 +80,7 @@ typedef struct {
|
|||
|
||||
SysNameInfo taosGetSysNameInfo();
|
||||
bool taosCheckCurrentInDll();
|
||||
int taosGetlocalhostname(char *hostname, size_t maxLen);
|
||||
int32_t taosGetlocalhostname(char *hostname, size_t maxLen);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -81,8 +81,11 @@ int32_t taosResetTerminalMode();
|
|||
unw_get_reg(&cursor, UNW_REG_IP, &pc); \
|
||||
fname[0] = '\0'; \
|
||||
(void)unw_get_proc_name(&cursor, fname, sizeof(fname), &offset); \
|
||||
size += 1; \
|
||||
array[size] = (char *)taosMemoryMalloc(sizeof(char) * STACKSIZE + 1); \
|
||||
if(NULL == array[size]) { \
|
||||
break; \
|
||||
} \
|
||||
size += 1; \
|
||||
snprintf(array[size], STACKSIZE, "0x%lx : (%s+0x%lx) [0x%lx]\n", (long)pc, fname, (long)offset, (long)pc); \
|
||||
} \
|
||||
if (ignoreNum < size && size > 0) { \
|
||||
|
|
|
@ -149,7 +149,7 @@ void setBufPageDirty(void* pPage, bool dirty);
|
|||
* Set the compress/ no-compress flag for paged buffer, when flushing data in disk.
|
||||
* @param pBuf
|
||||
*/
|
||||
void setBufPageCompressOnDisk(SDiskbasedBuf* pBuf, bool comp);
|
||||
int32_t setBufPageCompressOnDisk(SDiskbasedBuf* pBuf, bool comp);
|
||||
|
||||
/**
|
||||
* Set the pageId page buffer is not need
|
||||
|
|
|
@ -37,3 +37,14 @@ int32_t tGenIdPI32(void);
|
|||
* @return
|
||||
*/
|
||||
int64_t tGenIdPI64(void);
|
||||
|
||||
/**
|
||||
* Generate an qid
|
||||
*+------------+-----+-----------+---------------+
|
||||
*| nodeid| 0| serial number | 0 |
|
||||
*+------------+-----+-----------+---------------+
|
||||
*| 8bit | 16bit|32bit |8bit |
|
||||
*+------------+-----+-----------+---------------+
|
||||
* @return
|
||||
*/
|
||||
int64_t tGenQid64(int8_t dnodeId);
|
||||
|
|
|
@ -552,6 +552,9 @@ int32_t createRequest(uint64_t connId, int32_t type, int64_t reqid, SRequestObj
|
|||
(*pRequest)->allocatorRefId = -1;
|
||||
|
||||
(*pRequest)->pDb = getDbOfConnection(pTscObj);
|
||||
if (NULL == (*pRequest)->pDb) {
|
||||
TSC_ERR_JRET(terrno);
|
||||
}
|
||||
(*pRequest)->pTscObj = pTscObj;
|
||||
(*pRequest)->inCallback = false;
|
||||
(*pRequest)->msgBuf = taosMemoryCalloc(1, ERROR_MSG_BUF_DEFAULT_SIZE);
|
||||
|
|
|
@ -1359,6 +1359,14 @@ static void *hbThreadFunc(void *param) {
|
|||
pInfo->msgInfo.len = tlen;
|
||||
pInfo->msgType = TDMT_MND_HEARTBEAT;
|
||||
pInfo->param = taosMemoryMalloc(sizeof(int32_t));
|
||||
if (pInfo->param == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tFreeClientHbBatchReq(pReq);
|
||||
// hbClearReqInfo(pAppHbMgr);
|
||||
taosMemoryFree(buf);
|
||||
taosMemoryFree(pInfo);
|
||||
break;
|
||||
}
|
||||
*(int32_t *)pInfo->param = i;
|
||||
pInfo->paramFreeFp = taosMemoryFree;
|
||||
pInfo->requestId = generateRequestId();
|
||||
|
|
|
@ -1291,7 +1291,7 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
|
|||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
pRequest->code = terrno;
|
||||
pRequest->code = code;
|
||||
}
|
||||
|
||||
if (res) {
|
||||
|
@ -1586,6 +1586,8 @@ int32_t taosConnectImpl(const char* user, const char* auth, const char* db, __ta
|
|||
pRequest->sqlstr = taosStrdup("taos_connect");
|
||||
if (pRequest->sqlstr) {
|
||||
pRequest->sqlLen = strlen(pRequest->sqlstr);
|
||||
} else {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
SMsgSendInfo* body = NULL;
|
||||
|
@ -1649,6 +1651,9 @@ static int32_t buildConnectMsg(SRequestObj* pRequest, SMsgSendInfo** pMsgSendInf
|
|||
char* db = getDbOfConnection(pObj);
|
||||
if (db != NULL) {
|
||||
tstrncpy(connectReq.db, db, sizeof(connectReq.db));
|
||||
} else if (terrno) {
|
||||
taosMemoryFree(*pMsgSendInfo);
|
||||
return terrno;
|
||||
}
|
||||
taosMemoryFreeClear(db);
|
||||
|
||||
|
@ -2043,7 +2048,7 @@ static int32_t doPrepareResPtr(SReqResultInfo* pResInfo) {
|
|||
static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int32_t numOfCols, int32_t* colLength) {
|
||||
int32_t idx = -1;
|
||||
iconv_t conv = taosAcquireConv(&idx, C2M);
|
||||
if (!conv) return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
if (conv == (iconv_t)-1) return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
int32_t type = pResultInfo->fields[i].type;
|
||||
|
@ -2240,6 +2245,10 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
} else if (tTagIsJson(data)) {
|
||||
char* jsonString = NULL;
|
||||
parseTagDatatoJson(data, &jsonString);
|
||||
if(jsonString == NULL) {
|
||||
tscError("doConvertJson error: parseTagDatatoJson failed");
|
||||
return terrno;
|
||||
}
|
||||
STR_TO_VARSTR(dst, jsonString);
|
||||
taosMemoryFree(jsonString);
|
||||
} else if (jsonInnerType == TSDB_DATA_TYPE_NCHAR) { // value -> "value"
|
||||
|
@ -2392,11 +2401,16 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
}
|
||||
|
||||
char* getDbOfConnection(STscObj* pObj) {
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
char* p = NULL;
|
||||
(void)taosThreadMutexLock(&pObj->mutex);
|
||||
size_t len = strlen(pObj->db);
|
||||
if (len > 0) {
|
||||
p = strndup(pObj->db, tListLen(pObj->db));
|
||||
if (p == NULL) {
|
||||
tscError("failed to strndup db name");
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
(void)taosThreadMutexUnlock(&pObj->mutex);
|
||||
|
|
|
@ -417,6 +417,10 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
|
|||
}
|
||||
char* pJson = NULL;
|
||||
parseTagDatatoJson(pTag, &pJson);
|
||||
if(pJson == NULL) {
|
||||
uError("parseTagDatatoJson failed, pJson == NULL");
|
||||
goto end;
|
||||
}
|
||||
cJSON* tag = cJSON_CreateObject();
|
||||
RAW_NULL_CHECK(tag);
|
||||
STagVal* pTagVal = taosArrayGet(pTagVals, 0);
|
||||
|
@ -727,6 +731,10 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
|||
goto end;
|
||||
}
|
||||
parseTagDatatoJson(vAlterTbReq.pTagVal, &buf);
|
||||
if(buf == NULL) {
|
||||
uError("parseTagDatatoJson failed, buf == NULL");
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
if (vAlterTbReq.tagType == TSDB_DATA_TYPE_VARBINARY) {
|
||||
buf = taosMemoryCalloc(vAlterTbReq.nTagVal * 2 + 2 + 3, 1);
|
||||
|
|
|
@ -924,6 +924,9 @@ int stmtPrepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
|||
}
|
||||
|
||||
pStmt->sql.sqlStr = strndup(sql, length);
|
||||
if (!pStmt->sql.sqlStr) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pStmt->sql.sqlLen = length;
|
||||
pStmt->sql.stbInterlaceMode = pStmt->stbInterlaceMode;
|
||||
|
||||
|
|
|
@ -194,6 +194,12 @@ static int32_t stmtUpdateBindInfo(TAOS_STMT2* stmt, STableMeta* pTableMeta, void
|
|||
pStmt->bInfo.tbSuid = pTableMeta->suid;
|
||||
pStmt->bInfo.tbVgId = pTableMeta->vgId;
|
||||
pStmt->bInfo.tbType = pTableMeta->tableType;
|
||||
|
||||
if (!pStmt->bInfo.tagsCached) {
|
||||
qDestroyBoundColInfo(pStmt->bInfo.boundTags);
|
||||
taosMemoryFreeClear(pStmt->bInfo.boundTags);
|
||||
}
|
||||
|
||||
pStmt->bInfo.boundTags = tags;
|
||||
pStmt->bInfo.tagsCached = false;
|
||||
tstrncpy(pStmt->bInfo.stbFName, sTableName, sizeof(pStmt->bInfo.stbFName));
|
||||
|
@ -863,6 +869,9 @@ int stmtPrepare2(TAOS_STMT2* stmt, const char* sql, unsigned long length) {
|
|||
}
|
||||
|
||||
pStmt->sql.sqlStr = strndup(sql, length);
|
||||
if (!pStmt->sql.sqlStr) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pStmt->sql.sqlLen = length;
|
||||
pStmt->sql.stbInterlaceMode = pStmt->stbInterlaceMode;
|
||||
|
||||
|
@ -985,10 +994,6 @@ int stmtSetTbTags2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* tags) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pStmt->bInfo.inExecCache) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
STableDataCxt** pDataBlock =
|
||||
(STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
|
||||
if (NULL == pDataBlock) {
|
||||
|
@ -996,6 +1001,10 @@ int stmtSetTbTags2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* tags) {
|
|||
STMT_ERR_RET(TSDB_CODE_APP_ERROR);
|
||||
}
|
||||
|
||||
if (pStmt->bInfo.inExecCache && !pStmt->sql.autoCreateTbl) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
tscDebug("start to bind stmt tag values");
|
||||
STMT_ERR_RET(qBindStmtTagsValue2(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.tbSuid, pStmt->bInfo.stbFName,
|
||||
pStmt->bInfo.sname.tname, tags, pStmt->exec.pRequest->msgBuf,
|
||||
|
|
|
@ -2620,11 +2620,15 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf
|
|||
}
|
||||
len += snprintf(dumpBuf + len, size - len, "%s |end\n", flag);
|
||||
|
||||
*pDataBuf = dumpBuf;
|
||||
dumpBuf = NULL;
|
||||
_exit:
|
||||
if (dumpBuf) {
|
||||
taosMemoryFree(dumpBuf);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
*pDataBuf = dumpBuf;
|
||||
dumpBuf = NULL;
|
||||
} else {
|
||||
uError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
if (dumpBuf) {
|
||||
taosMemoryFree(dumpBuf);
|
||||
}
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ bool tNameIsValid(const SName* name) {
|
|||
|
||||
SName* tNameDup(const SName* name) {
|
||||
SName* p = taosMemoryMalloc(sizeof(SName));
|
||||
memcpy(p, name, sizeof(SName));
|
||||
if (p) TAOS_MEMCPY(p, name, sizeof(SName));
|
||||
return p;
|
||||
}
|
||||
|
||||
|
|
|
@ -368,7 +368,7 @@ int32_t toUInteger(const char *z, int32_t n, int32_t base, uint64_t *value) {
|
|||
* @param len
|
||||
* @param type
|
||||
*/
|
||||
void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uint32_t type) {
|
||||
int32_t taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uint32_t type) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
case TSDB_DATA_TYPE_TINYINT: {
|
||||
|
@ -426,6 +426,7 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin
|
|||
size_t lenInwchar = len / TSDB_NCHAR_SIZE;
|
||||
|
||||
pVar->ucs4 = taosMemoryCalloc(1, (lenInwchar + 1) * TSDB_NCHAR_SIZE);
|
||||
if(!pVar->ucs4) return terrno;
|
||||
(void)memcpy(pVar->ucs4, pz, lenInwchar * TSDB_NCHAR_SIZE);
|
||||
pVar->nLen = (int32_t)len;
|
||||
|
||||
|
@ -446,6 +447,7 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin
|
|||
}
|
||||
|
||||
pVar->nType = type;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void taosVariantDestroy(SVariant *pVar) {
|
||||
|
|
|
@ -14,7 +14,9 @@
|
|||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "audit.h"
|
||||
#include "dmInt.h"
|
||||
#include "monitor.h"
|
||||
#include "systable.h"
|
||||
#include "tchecksum.h"
|
||||
|
||||
|
@ -27,6 +29,8 @@ static void dmUpdateDnodeCfg(SDnodeMgmt *pMgmt, SDnodeCfg *pCfg) {
|
|||
(void)taosThreadRwlockWrlock(&pMgmt->pData->lock);
|
||||
pMgmt->pData->dnodeId = pCfg->dnodeId;
|
||||
pMgmt->pData->clusterId = pCfg->clusterId;
|
||||
monSetDnodeId(pCfg->dnodeId);
|
||||
auditSetDnodeId(pCfg->dnodeId);
|
||||
code = dmWriteEps(pMgmt->pData);
|
||||
if (code != 0) {
|
||||
dInfo("failed to set local info, dnodeId:%d clusterId:%" PRId64 " reason:%s", pCfg->dnodeId, pCfg->clusterId,
|
||||
|
|
|
@ -360,6 +360,12 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
|
|||
int32_t vnodesPerThread = numOfVnodes / threadNum + 1;
|
||||
|
||||
SVnodeThread *threads = taosMemoryCalloc(threadNum, sizeof(SVnodeThread));
|
||||
if (threads == NULL) {
|
||||
dError("failed to allocate memory for threads since %s", terrstr());
|
||||
taosMemoryFree(pCfgs);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
for (int32_t t = 0; t < threadNum; ++t) {
|
||||
threads[t].threadIndex = t;
|
||||
threads[t].pMgmt = pMgmt;
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "dmMgmt.h"
|
||||
#include "dmUtil.h"
|
||||
#include "monitor.h"
|
||||
#include "audit.h"
|
||||
|
||||
int32_t dmOpenNode(SMgmtWrapper *pWrapper) {
|
||||
int32_t code = 0;
|
||||
|
@ -98,6 +101,9 @@ static int32_t dmOpenNodes(SDnode *pDnode) {
|
|||
}
|
||||
}
|
||||
|
||||
auditSetDnodeId(dmGetDnodeId(&pDnode->data));
|
||||
monSetDnodeId(dmGetDnodeId(&pDnode->data));
|
||||
|
||||
dmSetStatus(pDnode, DND_STAT_RUNNING);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -217,6 +217,7 @@ int32_t dmInitDndInfo(SDnodeData *pData);
|
|||
|
||||
// dmEps.c
|
||||
int32_t dmGetDnodeSize(SDnodeData *pData);
|
||||
int32_t dmGetDnodeId(SDnodeData *pData);
|
||||
int32_t dmReadEps(SDnodeData *pData);
|
||||
int32_t dmWriteEps(SDnodeData *pData);
|
||||
void dmUpdateEps(SDnodeData *pData, SArray *pDnodeEps);
|
||||
|
|
|
@ -88,3 +88,5 @@ void dmGetMonitorSystemInfo(SMonSysInfo *pInfo) {
|
|||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t dmGetDnodeId(SDnodeData *pData) { return pData->dnodeId; }
|
|
@ -81,7 +81,7 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) {
|
|||
code = terrno;
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||
TAOS_CHECK_RETURN(sdbSetRawStatus(pRaw, SDB_STATUS_READY));
|
||||
|
||||
mInfo("acct:%s, will be created when deploying, raw:%p", acctObj.acct, pRaw);
|
||||
|
||||
|
|
|
@ -256,7 +256,9 @@ static int32_t mndArbGroupActionUpdate(SSdb *pSdb, SArbGroup *pOld, SArbGroup *p
|
|||
_OVER:
|
||||
(void)taosThreadMutexUnlock(&pOld->mutex);
|
||||
|
||||
(void)taosHashRemove(arbUpdateHash, &pOld->vgId, sizeof(int32_t));
|
||||
if (taosHashRemove(arbUpdateHash, &pOld->vgId, sizeof(int32_t)) != 0) {
|
||||
mError("arbgroup:%d, failed to remove from arbUpdateHash", pOld->vgId);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -451,7 +453,7 @@ static int32_t mndProcessArbHbTimer(SRpcMsg *pReq) {
|
|||
int64_t mndTerm = mndGetTerm(pMnode);
|
||||
|
||||
if (mndIsDnodeOnline(pDnode, nowMs)) {
|
||||
(void)mndSendArbHeartBeatReq(pDnode, arbToken, mndTerm, hbMembers);
|
||||
TAOS_CHECK_RETURN(mndSendArbHeartBeatReq(pDnode, arbToken, mndTerm, hbMembers));
|
||||
}
|
||||
|
||||
mndReleaseDnode(pMnode, pDnode);
|
||||
|
@ -684,7 +686,7 @@ static int32_t mndProcessArbCheckSyncTimer(SRpcMsg *pReq) {
|
|||
sdbRelease(pSdb, pArbGroup);
|
||||
}
|
||||
|
||||
(void)mndPullupArbUpdateGroupBatch(pMnode, pUpdateArray);
|
||||
TAOS_CHECK_RETURN(mndPullupArbUpdateGroupBatch(pMnode, pUpdateArray));
|
||||
|
||||
taosArrayDestroy(pUpdateArray);
|
||||
return 0;
|
||||
|
@ -795,7 +797,9 @@ _OVER:
|
|||
if (ret != 0) {
|
||||
for (size_t i = 0; i < sz; i++) {
|
||||
SArbGroup *pNewGroup = taosArrayGet(newGroupArray, i);
|
||||
(void)taosHashRemove(arbUpdateHash, &pNewGroup->vgId, sizeof(pNewGroup->vgId));
|
||||
if (taosHashRemove(arbUpdateHash, &pNewGroup->vgId, sizeof(pNewGroup->vgId)) != 0) {
|
||||
mError("failed to remove vgId:%d from arbUpdateHash", pNewGroup->vgId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -839,7 +843,9 @@ static int32_t mndProcessArbUpdateGroupBatchReq(SRpcMsg *pReq) {
|
|||
SArbGroup *pOldGroup = sdbAcquire(pMnode->pSdb, SDB_ARBGROUP, &newGroup.vgId);
|
||||
if (!pOldGroup) {
|
||||
mInfo("vgId:%d, arb skip to update arbgroup, since no obj found", newGroup.vgId);
|
||||
(void)taosHashRemove(arbUpdateHash, &newGroup.vgId, sizeof(int32_t));
|
||||
if (taosHashRemove(arbUpdateHash, &newGroup.vgId, sizeof(int32_t)) != 0) {
|
||||
mError("failed to remove vgId:%d from arbUpdateHash", newGroup.vgId);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -869,7 +875,9 @@ _OVER:
|
|||
// failed to update arbgroup
|
||||
for (size_t i = 0; i < sz; i++) {
|
||||
SMArbUpdateGroup *pUpdateGroup = taosArrayGet(req.updateArray, i);
|
||||
(void)taosHashRemove(arbUpdateHash, &pUpdateGroup->vgId, sizeof(int32_t));
|
||||
if (taosHashRemove(arbUpdateHash, &pUpdateGroup->vgId, sizeof(int32_t)) != 0) {
|
||||
mError("failed to remove vgId:%d from arbUpdateHash", pUpdateGroup->vgId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1010,7 +1018,7 @@ static int32_t mndUpdateArbHeartBeat(SMnode *pMnode, int32_t dnodeId, SArray *me
|
|||
sdbRelease(pMnode->pSdb, pGroup);
|
||||
}
|
||||
|
||||
(void)mndPullupArbUpdateGroupBatch(pMnode, pUpdateArray);
|
||||
TAOS_CHECK_RETURN(mndPullupArbUpdateGroupBatch(pMnode, pUpdateArray));
|
||||
|
||||
taosArrayDestroy(pUpdateArray);
|
||||
return 0;
|
||||
|
@ -1102,7 +1110,7 @@ static int32_t mndProcessArbHbRsp(SRpcMsg *pRsp) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
(void)mndUpdateArbHeartBeat(pMnode, arbHbRsp.dnodeId, arbHbRsp.hbMembers);
|
||||
TAOS_CHECK_GOTO(mndUpdateArbHeartBeat(pMnode, arbHbRsp.dnodeId, arbHbRsp.hbMembers), NULL, _OVER);
|
||||
code = 0;
|
||||
|
||||
_OVER:
|
||||
|
@ -1249,6 +1257,8 @@ static int32_t mndRetrieveArbGroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
int32_t numOfRows = 0;
|
||||
int32_t cols = 0;
|
||||
SArbGroup *pGroup = NULL;
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
||||
while (numOfRows < rows) {
|
||||
pShow->pIter = sdbFetch(pSdb, SDB_ARBGROUP, pShow->pIter, (void **)&pGroup);
|
||||
|
@ -1264,33 +1274,40 @@ static int32_t mndRetrieveArbGroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
sdbRelease(pSdb, pGroup);
|
||||
continue;
|
||||
}
|
||||
char dbNameInGroup[TSDB_DB_FNAME_LEN];
|
||||
strncpy(dbNameInGroup, pVgObj->dbName, TSDB_DB_FNAME_LEN);
|
||||
sdbRelease(pSdb, pVgObj);
|
||||
|
||||
char dbname[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(dbname, mndGetDbStr(pVgObj->dbName), TSDB_ARB_TOKEN_SIZE + VARSTR_HEADER_SIZE);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)dbname, false);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(dbname, mndGetDbStr(dbNameInGroup), TSDB_ARB_TOKEN_SIZE + VARSTR_HEADER_SIZE);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)dbname, false), pGroup, &lino, _OVER);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)&pGroup->vgId, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)&pGroup->vgId, false), pGroup, &lino, _OVER);
|
||||
|
||||
for (int i = 0; i < TSDB_ARB_GROUP_MEMBER_NUM; i++) {
|
||||
SArbGroupMember *pMember = &pGroup->members[i];
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)&pMember->info.dnodeId, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)&pMember->info.dnodeId, false), pGroup,
|
||||
&lino, _OVER);
|
||||
}
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)&pGroup->isSync, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)&pGroup->isSync, false), pGroup, &lino, _OVER);
|
||||
|
||||
if (pGroup->assignedLeader.dnodeId != 0) {
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)&pGroup->assignedLeader.dnodeId, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)&pGroup->assignedLeader.dnodeId, false),
|
||||
pGroup, &lino, _OVER);
|
||||
|
||||
char token[TSDB_ARB_TOKEN_SIZE + VARSTR_HEADER_SIZE] = {0};
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(token, pGroup->assignedLeader.token, TSDB_ARB_TOKEN_SIZE + VARSTR_HEADER_SIZE);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)token, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)token, false), pGroup, &lino, _OVER);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)&pGroup->assignedLeader.acked, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)&pGroup->assignedLeader.acked, false),
|
||||
pGroup, &lino, _OVER);
|
||||
} else {
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetNULL(pColInfo, numOfRows);
|
||||
|
@ -1305,10 +1322,11 @@ static int32_t mndRetrieveArbGroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
(void)taosThreadMutexUnlock(&pGroup->mutex);
|
||||
|
||||
numOfRows++;
|
||||
sdbRelease(pSdb, pVgObj);
|
||||
sdbRelease(pSdb, pGroup);
|
||||
}
|
||||
|
||||
_OVER:
|
||||
if (code != 0) mError("failed to restrieve arb group at line:%d, since %s", lino, tstrerror(code));
|
||||
pShow->numOfRows += numOfRows;
|
||||
|
||||
return numOfRows;
|
||||
|
|
|
@ -1694,6 +1694,8 @@ static int32_t mndRetrieveConfigs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
|
|||
char cfgVals[TSDB_CONFIG_NUMBER][TSDB_CONFIG_VALUE_LEN + 1] = {0};
|
||||
char *pWrite = NULL;
|
||||
int32_t cols = 0;
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
||||
cfgOpts[totalRows] = "statusInterval";
|
||||
(void)snprintf(cfgVals[totalRows], TSDB_CONFIG_VALUE_LEN, "%d", tsStatusInterval);
|
||||
|
@ -1741,15 +1743,17 @@ static int32_t mndRetrieveConfigs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
|
|||
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(buf, cfgOpts[i], TSDB_CONFIG_OPTION_LEN);
|
||||
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)buf, false);
|
||||
TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)buf, false), &lino, _OVER);
|
||||
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(bufVal, cfgVals[i], TSDB_CONFIG_VALUE_LEN);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)bufVal, false);
|
||||
TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)bufVal, false), &lino, _OVER);
|
||||
|
||||
numOfRows++;
|
||||
}
|
||||
|
||||
_OVER:
|
||||
if (code != 0) mError("failed to retrieve configs at line:%d since %s", lino, tstrerror(code));
|
||||
pShow->numOfRows += numOfRows;
|
||||
return numOfRows;
|
||||
}
|
||||
|
@ -1765,6 +1769,8 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
|||
SDnodeObj *pDnode = NULL;
|
||||
int64_t curMs = taosGetTimestampMs();
|
||||
char buf[TSDB_EP_LEN + VARSTR_HEADER_SIZE];
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
||||
while (numOfRows < rows) {
|
||||
pShow->pIter = sdbFetchAll(pSdb, SDB_DNODE, pShow->pIter, (void **)&pDnode, &objStatus, true);
|
||||
|
@ -1774,19 +1780,20 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
|||
cols = 0;
|
||||
|
||||
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)&pDnode->id, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)&pDnode->id, false), pDnode, &lino, _OVER);
|
||||
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(buf, pDnode->ep, pShow->pMeta->pSchemas[cols].bytes);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, buf, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, buf, false), pDnode, &lino, _OVER);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
int16_t id = mndGetVnodesNum(pMnode, pDnode->id);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)&id, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)&id, false), pDnode, &lino, _OVER);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)&pDnode->numOfSupportVnodes, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)&pDnode->numOfSupportVnodes, false), pDnode,
|
||||
&lino, _OVER);
|
||||
|
||||
const char *status = "ready";
|
||||
if (objStatus == SDB_STATUS_CREATING) status = "creating";
|
||||
|
@ -1802,31 +1809,36 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
|||
|
||||
STR_TO_VARSTR(buf, status);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, buf, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, buf, false), pDnode, &lino, _OVER);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)&pDnode->createdTime, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)&pDnode->createdTime, false), pDnode, &lino,
|
||||
_OVER);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, (const char *)&pDnode->rebootTime, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)&pDnode->rebootTime, false), pDnode, &lino,
|
||||
_OVER);
|
||||
|
||||
char *b = taosMemoryCalloc(VARSTR_HEADER_SIZE + strlen(offlineReason[pDnode->offlineReason]) + 1, 1);
|
||||
STR_TO_VARSTR(b, online ? "" : offlineReason[pDnode->offlineReason]);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, b, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, b, false), pDnode, &lino, _OVER);
|
||||
taosMemoryFreeClear(b);
|
||||
|
||||
#ifdef TD_ENTERPRISE
|
||||
STR_TO_VARSTR(buf, pDnode->machineId);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
(void)colDataSetVal(pColInfo, numOfRows, buf, false);
|
||||
RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, buf, false), pDnode, &lino, _OVER);
|
||||
#endif
|
||||
|
||||
numOfRows++;
|
||||
sdbRelease(pSdb, pDnode);
|
||||
}
|
||||
|
||||
_OVER:
|
||||
if (code != 0) mError("failed to retrieve dnodes at line:%d since %s", lino, tstrerror(code));
|
||||
|
||||
pShow->numOfRows += numOfRows;
|
||||
return numOfRows;
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ static int32_t mndCreateDefaultMnode(SMnode *pMnode) {
|
|||
if (terrno != 0) code = terrno;
|
||||
return -1;
|
||||
}
|
||||
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||
TAOS_CHECK_RETURN(sdbSetRawStatus(pRaw, SDB_STATUS_READY));
|
||||
|
||||
mInfo("mnode:%d, will be created when deploying, raw:%p", mnodeObj.id, pRaw);
|
||||
|
||||
|
|
|
@ -846,6 +846,7 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
|
|||
if (pShow->pIter == NULL) {
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
pShow->pIter = taosCacheCreateIter(pMgmt->connCache);
|
||||
if (!pShow->pIter) return terrno;
|
||||
}
|
||||
|
||||
while (numOfRows < rows) {
|
||||
|
@ -1005,6 +1006,7 @@ static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
|
|||
if (pShow->pIter == NULL) {
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
pShow->pIter = taosCacheCreateIter(pMgmt->connCache);
|
||||
if (!pShow->pIter) return terrno;
|
||||
}
|
||||
|
||||
// means fetched some data last time for this conn
|
||||
|
@ -1042,6 +1044,7 @@ static int32_t mndRetrieveApps(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlo
|
|||
if (pShow->pIter == NULL) {
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
pShow->pIter = taosCacheCreateIter(pMgmt->appCache);
|
||||
if (!pShow->pIter) return terrno;
|
||||
}
|
||||
|
||||
while (numOfRows < rows) {
|
||||
|
|
|
@ -651,6 +651,9 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
|
|||
streamObj.targetDbUid = pDb->uid;
|
||||
streamObj.version = 1;
|
||||
streamObj.sql = taosStrdup(pCreate->sql);
|
||||
if (!streamObj.sql) {
|
||||
return terrno;
|
||||
}
|
||||
streamObj.smaId = smaObj.uid;
|
||||
streamObj.conf.watermark = pCreate->watermark;
|
||||
streamObj.deleteMark = pCreate->deleteMark;
|
||||
|
@ -658,6 +661,10 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
|
|||
streamObj.conf.trigger = STREAM_TRIGGER_WINDOW_CLOSE;
|
||||
streamObj.conf.triggerParam = pCreate->maxDelay;
|
||||
streamObj.ast = taosStrdup(smaObj.ast);
|
||||
if (!streamObj.ast) {
|
||||
taosMemoryFree(streamObj.sql);
|
||||
return terrno;
|
||||
}
|
||||
streamObj.indexForMultiAggBalance = -1;
|
||||
|
||||
// check the maxDelay
|
||||
|
@ -1509,6 +1516,9 @@ static int32_t mndRetrieveIdx(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc
|
|||
if (pShow->pIter == NULL) {
|
||||
pShow->pIter = taosMemoryCalloc(1, sizeof(SSmaAndTagIter));
|
||||
}
|
||||
if (!pShow->pIter) {
|
||||
return terrno;
|
||||
}
|
||||
int32_t read = mndRetrieveSma(pReq, pShow, pBlock, rows);
|
||||
if (read < rows) {
|
||||
read += mndRetrieveTagIdx(pReq, pShow, pBlock, rows - read);
|
||||
|
@ -1555,27 +1565,6 @@ static void initSMAObj(SCreateTSMACxt* pCxt) {
|
|||
pCxt->pSma->ast = pCxt->pCreateSmaReq->ast;
|
||||
}
|
||||
|
||||
static void initStreamObj(SStreamObj *pStream, const char *streamName, const SMCreateSmaReq *pCreateReq,
|
||||
const SDbObj *pDb, SSmaObj *pSma) {
|
||||
tstrncpy(pStream->name, streamName, TSDB_STREAM_FNAME_LEN);
|
||||
tstrncpy(pStream->sourceDb, pDb->name, TSDB_DB_FNAME_LEN);
|
||||
tstrncpy(pStream->targetDb, pDb->name, TSDB_DB_FNAME_LEN);
|
||||
pStream->createTime = taosGetTimestampMs();
|
||||
pStream->updateTime = pStream->createTime;
|
||||
pStream->uid = mndGenerateUid(streamName, strlen(streamName));
|
||||
pStream->sourceDbUid = pDb->uid;
|
||||
pStream->targetDbUid = pDb->uid;
|
||||
pStream->version = 1;
|
||||
pStream->sql = taosStrdup(pCreateReq->sql);
|
||||
pStream->smaId = pSma->uid;
|
||||
pStream->conf.watermark = 0;
|
||||
pStream->deleteMark = 0;
|
||||
pStream->conf.fillHistory = STREAM_FILL_HISTORY_ON;
|
||||
pStream->conf.trigger = STREAM_TRIGGER_WINDOW_CLOSE;
|
||||
pStream->conf.triggerParam = 10000;
|
||||
pStream->ast = taosStrdup(pSma->ast);
|
||||
}
|
||||
|
||||
static int32_t mndCreateTSMABuildCreateStreamReq(SCreateTSMACxt *pCxt) {
|
||||
tstrncpy(pCxt->pCreateStreamReq->name, pCxt->streamName, TSDB_STREAM_FNAME_LEN);
|
||||
tstrncpy(pCxt->pCreateStreamReq->sourceDB, pCxt->pDb->name, TSDB_DB_FNAME_LEN);
|
||||
|
@ -1653,11 +1642,15 @@ static int32_t mndCreateTSMABuildCreateStreamReq(SCreateTSMACxt *pCxt) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static void mndCreateTSMABuildDropStreamReq(SCreateTSMACxt* pCxt) {
|
||||
static int32_t mndCreateTSMABuildDropStreamReq(SCreateTSMACxt* pCxt) {
|
||||
tstrncpy(pCxt->pDropStreamReq->name, pCxt->streamName, TSDB_STREAM_FNAME_LEN);
|
||||
pCxt->pDropStreamReq->igNotExists = false;
|
||||
pCxt->pDropStreamReq->sql = taosStrdup(pCxt->pDropSmaReq->name);
|
||||
if (!pCxt->pDropStreamReq->sql) {
|
||||
return terrno;
|
||||
}
|
||||
pCxt->pDropStreamReq->sqlLen = strlen(pCxt->pDropStreamReq->sql);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t mndSetUpdateDbTsmaVersionPrepareLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pOld, SDbObj *pNew) {
|
||||
|
@ -1815,7 +1808,10 @@ static int32_t mndCreateTSMA(SCreateTSMACxt *pCxt) {
|
|||
if (TSDB_CODE_SUCCESS != code) {
|
||||
goto _OVER;
|
||||
}
|
||||
mndCreateTSMABuildDropStreamReq(pCxt);
|
||||
code = mndCreateTSMABuildDropStreamReq(pCxt);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != (code = mndCreateTSMATxnPrepare(pCxt))) {
|
||||
goto _OVER;
|
||||
|
@ -1985,7 +1981,10 @@ static int32_t mndDropTSMA(SCreateTSMACxt* pCxt) {
|
|||
}
|
||||
SMDropStreamReq dropStreamReq = {0};
|
||||
pCxt->pDropStreamReq = &dropStreamReq;
|
||||
mndCreateTSMABuildDropStreamReq(pCxt);
|
||||
code = mndCreateTSMABuildDropStreamReq(pCxt);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
goto _OVER;
|
||||
}
|
||||
mndTransSetDbName(pTrans, pCxt->pDb->name, NULL);
|
||||
if (mndTransCheckConflict(pCxt->pMnode, pTrans) != 0) goto _OVER;
|
||||
mndTransSetSerial(pTrans);
|
||||
|
@ -2141,12 +2140,15 @@ static int32_t mndRetrieveTSMA(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlo
|
|||
SMnode * pMnode = pReq->info.node;
|
||||
int32_t code = 0;
|
||||
SColumnInfoData *pColInfo;
|
||||
if (pShow->db[0]) {
|
||||
pDb = mndAcquireDb(pMnode, pShow->db);
|
||||
}
|
||||
if (pShow->pIter == NULL) {
|
||||
pShow->pIter = taosMemoryCalloc(1, sizeof(SSmaAndTagIter));
|
||||
}
|
||||
if (!pShow->pIter) {
|
||||
return terrno;
|
||||
}
|
||||
if (pShow->db[0]) {
|
||||
pDb = mndAcquireDb(pMnode, pShow->db);
|
||||
}
|
||||
SSmaAndTagIter *pIter = pShow->pIter;
|
||||
while (numOfRows < rows) {
|
||||
pIter->pSmaIter = sdbFetch(pMnode->pSdb, SDB_SMA, pIter->pSmaIter, (void **)&pSma);
|
||||
|
@ -2273,7 +2275,7 @@ static int32_t mndRetrieveTSMA(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlo
|
|||
mndReleaseDb(pMnode, pSrcDb);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
sdbCancelFetch(pMnode->pSdb, pIter->pSmaIter);
|
||||
numOfRows = -1;
|
||||
numOfRows = code;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1143,18 +1143,16 @@ int32_t extractStreamNodeList(SMnode *pMnode) {
|
|||
return taosArrayGetSize(execInfo.pNodeList);
|
||||
}
|
||||
|
||||
static bool taskNodeIsUpdated(SMnode *pMnode) {
|
||||
bool allReady = true;
|
||||
SArray *pNodeSnapshot = NULL;
|
||||
|
||||
// check if the node update happens or not
|
||||
streamMutexLock(&execInfo.lock);
|
||||
static int32_t doCheckForUpdated(SMnode *pMnode, SArray **ppNodeSnapshot) {
|
||||
bool allReady = false;
|
||||
bool nodeUpdated = false;
|
||||
SVgroupChangeInfo changeInfo = {0};
|
||||
|
||||
int32_t numOfNodes = extractStreamNodeList(pMnode);
|
||||
|
||||
if (numOfNodes == 0) {
|
||||
mDebug("stream task node change checking done, no vgroups exist, do nothing");
|
||||
execInfo.ts = taosGetTimestampSec();
|
||||
streamMutexUnlock(&execInfo.lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1166,43 +1164,46 @@ static bool taskNodeIsUpdated(SMnode *pMnode) {
|
|||
|
||||
if (pNodeEntry->stageUpdated) {
|
||||
mDebug("stream task not ready due to node update detected, checkpoint not issued");
|
||||
streamMutexUnlock(&execInfo.lock);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t code = mndTakeVgroupSnapshot(pMnode, &allReady, &pNodeSnapshot);
|
||||
int32_t code = mndTakeVgroupSnapshot(pMnode, &allReady, ppNodeSnapshot);
|
||||
if (code) {
|
||||
mError("failed to get the vgroup snapshot, ignore it and continue");
|
||||
}
|
||||
|
||||
if (!allReady) {
|
||||
mWarn("not all vnodes ready, quit from vnodes status check");
|
||||
taosArrayDestroy(pNodeSnapshot);
|
||||
streamMutexUnlock(&execInfo.lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
SVgroupChangeInfo changeInfo = {0};
|
||||
code = mndFindChangedNodeInfo(pMnode, execInfo.pNodeList, pNodeSnapshot, &changeInfo);
|
||||
code = mndFindChangedNodeInfo(pMnode, execInfo.pNodeList, *ppNodeSnapshot, &changeInfo);
|
||||
if (code) {
|
||||
streamMutexUnlock(&execInfo.lock);
|
||||
return false;
|
||||
nodeUpdated = false;
|
||||
} else {
|
||||
nodeUpdated = (taosArrayGetSize(changeInfo.pUpdateNodeList) > 0);
|
||||
if (nodeUpdated) {
|
||||
mDebug("stream tasks not ready due to node update");
|
||||
}
|
||||
}
|
||||
|
||||
bool nodeUpdated = (taosArrayGetSize(changeInfo.pUpdateNodeList) > 0);
|
||||
|
||||
mndDestroyVgroupChangeInfo(&changeInfo);
|
||||
taosArrayDestroy(pNodeSnapshot);
|
||||
|
||||
if (nodeUpdated) {
|
||||
mDebug("stream tasks not ready due to node update");
|
||||
}
|
||||
|
||||
streamMutexUnlock(&execInfo.lock);
|
||||
return nodeUpdated;
|
||||
}
|
||||
|
||||
// check if the node update happens or not
|
||||
static bool taskNodeIsUpdated(SMnode *pMnode) {
|
||||
SArray *pNodeSnapshot = NULL;
|
||||
|
||||
streamMutexLock(&execInfo.lock);
|
||||
bool updated = doCheckForUpdated(pMnode, &pNodeSnapshot);
|
||||
streamMutexUnlock(&execInfo.lock);
|
||||
|
||||
taosArrayDestroy(pNodeSnapshot);
|
||||
return updated;
|
||||
}
|
||||
|
||||
static int32_t mndCheckTaskAndNodeStatus(SMnode *pMnode) {
|
||||
bool ready = true;
|
||||
if (taskNodeIsUpdated(pMnode)) {
|
||||
|
@ -1993,7 +1994,7 @@ static int32_t mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pPrevNodeLis
|
|||
|
||||
if (pInfo->pUpdateNodeList == NULL || pInfo->pDBMap == NULL) {
|
||||
mndDestroyVgroupChangeInfo(pInfo);
|
||||
return terrno;
|
||||
TSDB_CHECK_NULL(NULL, code, lino, _err, terrno);
|
||||
}
|
||||
|
||||
int32_t numOfNodes = taosArrayGetSize(pPrevNodeList);
|
||||
|
@ -2048,6 +2049,7 @@ static int32_t mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pPrevNodeLis
|
|||
return code;
|
||||
|
||||
_err:
|
||||
mError("failed to find node change info, code:%s at %s line:%d", tstrerror(code), __func__, lino);
|
||||
mndDestroyVgroupChangeInfo(pInfo);
|
||||
return code;
|
||||
}
|
||||
|
@ -2193,8 +2195,9 @@ static int32_t refreshNodeListFromExistedStreams(SMnode *pMnode, SArray *pNodeLi
|
|||
|
||||
SNodeEntry entry = {.hbTimestamp = -1, .nodeId = pTask->info.nodeId, .lastHbMsgId = -1};
|
||||
epsetAssign(&entry.epset, &pTask->info.epSet);
|
||||
if (taosHashPut(pHash, &entry.nodeId, sizeof(entry.nodeId), &entry, sizeof(entry)) != 0) {
|
||||
mError("failed to put entry into hash map, nodeId:%d", entry.nodeId);
|
||||
int32_t ret = taosHashPut(pHash, &entry.nodeId, sizeof(entry.nodeId), &entry, sizeof(entry));
|
||||
if (ret != 0 && ret != TSDB_CODE_DUP_KEY) {
|
||||
mError("failed to put entry into hash map, nodeId:%d, code:%s", entry.nodeId, tstrerror(code));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -628,6 +628,7 @@ static int32_t doBuildStreamTaskUpdateMsg(void **pBuf, int32_t *pLen, SVgroupCha
|
|||
code = tEncodeStreamTaskUpdateMsg(&encoder, &req);
|
||||
if (code == -1) {
|
||||
tEncoderClear(&encoder);
|
||||
taosMemoryFree(buf);
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return code;
|
||||
}
|
||||
|
@ -648,21 +649,25 @@ static int32_t doBuildStreamTaskUpdateMsg(void **pBuf, int32_t *pLen, SVgroupCha
|
|||
static int32_t doSetUpdateTaskAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask, SVgroupChangeInfo *pInfo) {
|
||||
void *pBuf = NULL;
|
||||
int32_t len = 0;
|
||||
SEpSet epset = {0};
|
||||
bool hasEpset = false;
|
||||
|
||||
bool unusedRet = streamTaskUpdateEpsetInfo(pTask, pInfo->pUpdateNodeList);
|
||||
int32_t code = doBuildStreamTaskUpdateMsg(&pBuf, &len, pInfo, pTask->info.nodeId, &pTask->id, pTrans->id);
|
||||
if (code) {
|
||||
mError("failed to build stream task epset update msg, code:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
SEpSet epset = {0};
|
||||
bool hasEpset = false;
|
||||
code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId);
|
||||
if (code != TSDB_CODE_SUCCESS || !hasEpset) {
|
||||
mError("failed to extract epset during create update epset, code:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
code = setTransAction(pTrans, pBuf, len, TDMT_VND_STREAM_TASK_UPDATE, &epset, 0, TSDB_CODE_VND_INVALID_VGROUP_ID);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
mError("failed to create update task epset trans, code:%s", tstrerror(code));
|
||||
taosMemoryFree(pBuf);
|
||||
}
|
||||
|
||||
|
|
|
@ -68,54 +68,64 @@ static void mndGetStat(SMnode* pMnode, SMnodeStat* pStat) {
|
|||
|
||||
static void mndBuildRuntimeInfo(SMnode* pMnode, SJson* pJson) {
|
||||
SMnodeStat mstat = {0};
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
mndGetStat(pMnode, &mstat);
|
||||
|
||||
(void)tjsonAddDoubleToObject(pJson, "numOfDnode", mstat.numOfDnode);
|
||||
(void)tjsonAddDoubleToObject(pJson, "numOfMnode", mstat.numOfMnode);
|
||||
(void)tjsonAddDoubleToObject(pJson, "numOfVgroup", mstat.numOfVgroup);
|
||||
(void)tjsonAddDoubleToObject(pJson, "numOfDatabase", mstat.numOfDatabase);
|
||||
(void)tjsonAddDoubleToObject(pJson, "numOfSuperTable", mstat.numOfSuperTable);
|
||||
(void)tjsonAddDoubleToObject(pJson, "numOfChildTable", mstat.numOfChildTable);
|
||||
(void)tjsonAddDoubleToObject(pJson, "numOfColumn", mstat.numOfColumn);
|
||||
(void)tjsonAddDoubleToObject(pJson, "numOfPoint", mstat.totalPoints);
|
||||
(void)tjsonAddDoubleToObject(pJson, "totalStorage", mstat.totalStorage);
|
||||
(void)tjsonAddDoubleToObject(pJson, "compStorage", mstat.compStorage);
|
||||
TAOS_CHECK_GOTO(tjsonAddDoubleToObject(pJson, "numOfDnode", mstat.numOfDnode), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddDoubleToObject(pJson, "numOfMnode", mstat.numOfMnode), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddDoubleToObject(pJson, "numOfVgroup", mstat.numOfVgroup), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddDoubleToObject(pJson, "numOfDatabase", mstat.numOfDatabase), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddDoubleToObject(pJson, "numOfSuperTable", mstat.numOfSuperTable), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddDoubleToObject(pJson, "numOfChildTable", mstat.numOfChildTable), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddDoubleToObject(pJson, "numOfColumn", mstat.numOfColumn), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddDoubleToObject(pJson, "numOfPoint", mstat.totalPoints), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddDoubleToObject(pJson, "totalStorage", mstat.totalStorage), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddDoubleToObject(pJson, "compStorage", mstat.compStorage), &lino, _OVER);
|
||||
_OVER:
|
||||
if (code != 0) mError("failed to mndBuildRuntimeInfo at line:%d since %s", lino, tstrerror(code));
|
||||
}
|
||||
|
||||
static char* mndBuildTelemetryReport(SMnode* pMnode) {
|
||||
char tmp[4096] = {0};
|
||||
STelemMgmt* pMgmt = &pMnode->telemMgmt;
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
||||
SJson* pJson = tjsonCreateObject();
|
||||
if (pJson == NULL) return NULL;
|
||||
|
||||
char clusterName[64] = {0};
|
||||
if ((terrno = mndGetClusterName(pMnode, clusterName, sizeof(clusterName))) != 0) return NULL;
|
||||
(void)tjsonAddStringToObject(pJson, "instanceId", clusterName);
|
||||
(void)tjsonAddDoubleToObject(pJson, "reportVersion", 1);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "instanceId", clusterName), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddDoubleToObject(pJson, "reportVersion", 1), &lino, _OVER);
|
||||
|
||||
if (taosGetOsReleaseName(tmp, NULL, NULL, sizeof(tmp)) == 0) {
|
||||
(void)tjsonAddStringToObject(pJson, "os", tmp);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "os", tmp), &lino, _OVER);
|
||||
}
|
||||
|
||||
float numOfCores = 0;
|
||||
if (taosGetCpuInfo(tmp, sizeof(tmp), &numOfCores) == 0) {
|
||||
(void)tjsonAddStringToObject(pJson, "cpuModel", tmp);
|
||||
(void)tjsonAddDoubleToObject(pJson, "numOfCpu", numOfCores);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "cpuModel", tmp), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddDoubleToObject(pJson, "numOfCpu", numOfCores), &lino, _OVER);
|
||||
} else {
|
||||
(void)tjsonAddDoubleToObject(pJson, "numOfCpu", tsNumOfCores);
|
||||
TAOS_CHECK_GOTO(tjsonAddDoubleToObject(pJson, "numOfCpu", tsNumOfCores), &lino, _OVER);
|
||||
}
|
||||
|
||||
snprintf(tmp, sizeof(tmp), "%" PRId64 " kB", tsTotalMemoryKB);
|
||||
(void)tjsonAddStringToObject(pJson, "memory", tmp);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), &lino, _OVER);
|
||||
|
||||
(void)tjsonAddStringToObject(pJson, "version", version);
|
||||
(void)tjsonAddStringToObject(pJson, "buildInfo", buildinfo);
|
||||
(void)tjsonAddStringToObject(pJson, "gitInfo", gitinfo);
|
||||
(void)tjsonAddStringToObject(pJson, "email", pMgmt->email);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", version), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", buildinfo), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", gitinfo), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "email", pMgmt->email), &lino, _OVER);
|
||||
|
||||
mndBuildRuntimeInfo(pMnode, pJson);
|
||||
|
||||
_OVER:
|
||||
if (code != 0) {
|
||||
mError("failed to build telemetry report at lino:%d, since %s", lino, tstrerror(code));
|
||||
}
|
||||
char* pCont = tjsonToString(pJson);
|
||||
tjsonDelete(pJson);
|
||||
return pCont;
|
||||
|
|
|
@ -342,7 +342,8 @@ int32_t mndUpdateIpWhiteImpl(SHashObj *pIpWhiteTab, char *user, char *fqdn, int8
|
|||
SIpV4Range range = {.ip = 0, .mask = 32};
|
||||
int32_t code = taosGetIpv4FromFqdn(fqdn, &range.ip);
|
||||
if (code) {
|
||||
//TODO
|
||||
mError("failed to get ip from fqdn: %s at line %d since %s", fqdn, lino, tstrerror(code));
|
||||
TAOS_RETURN(TSDB_CODE_TSC_INVALID_FQDN);
|
||||
}
|
||||
mDebug("ip-white-list may update for user: %s, fqdn: %s", user, fqdn);
|
||||
SIpWhiteList **ppList = taosHashGet(pIpWhiteTab, user, strlen(user));
|
||||
|
|
|
@ -186,7 +186,7 @@ int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, void *pTableIdList,
|
|||
SArray *pCidList, int32_t *pSlotIds, uint64_t suid, void **pReader, const char *idstr,
|
||||
SArray *pFuncTypeList, SColumnInfo *pkCol, int32_t numOfPks);
|
||||
int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, const int32_t *dstSlotIds,
|
||||
SArray *pTableUids);
|
||||
SArray *pTableUids, bool *pGotAllRows);
|
||||
void tsdbCacherowsReaderClose(void *pReader);
|
||||
|
||||
void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
|
||||
|
|
|
@ -319,7 +319,7 @@ int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData);
|
|||
// STsdbSnapWriter ========================================
|
||||
int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, void* pRanges, STsdbSnapWriter** ppWriter);
|
||||
int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, SSnapDataHdr* pHdr);
|
||||
int32_t tsdbSnapWriterPrepareClose(STsdbSnapWriter* pWriter);
|
||||
int32_t tsdbSnapWriterPrepareClose(STsdbSnapWriter* pWriter, bool rollback);
|
||||
int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback);
|
||||
// STsdbSnapRAWReader ========================================
|
||||
int32_t tsdbSnapRAWReaderOpen(STsdb* pTsdb, int64_t ever, int8_t type, STsdbSnapRAWReader** ppReader);
|
||||
|
@ -373,7 +373,7 @@ int32_t rsmaSnapRead(SRSmaSnapReader* pReader, uint8_t** ppData);
|
|||
// SRSmaSnapWriter ========================================
|
||||
int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, void** ppRanges, SRSmaSnapWriter** ppWriter);
|
||||
int32_t rsmaSnapWrite(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
|
||||
int32_t rsmaSnapWriterPrepareClose(SRSmaSnapWriter* pWriter);
|
||||
int32_t rsmaSnapWriterPrepareClose(SRSmaSnapWriter* pWriter, bool rollback);
|
||||
int32_t rsmaSnapWriterClose(SRSmaSnapWriter** ppWriter, int8_t rollback);
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -286,7 +286,7 @@ int32_t metaCacheUpsert(SMeta* pMeta, SMetaInfo* pInfo) {
|
|||
|
||||
SMetaCacheEntry* pEntryNew = (SMetaCacheEntry*)taosMemoryMalloc(sizeof(*pEntryNew));
|
||||
if (pEntryNew == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = terrno;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
|
@ -411,7 +411,7 @@ int32_t metaStatsCacheUpsert(SMeta* pMeta, SMetaStbStats* pInfo) {
|
|||
|
||||
SMetaStbStatsEntry* pEntryNew = (SMetaStbStatsEntry*)taosMemoryMalloc(sizeof(*pEntryNew));
|
||||
if (pEntryNew == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = terrno;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
|
@ -492,7 +492,7 @@ static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInv
|
|||
LRUHandle* pRes = taosLRUCacheLookup(pCache, buf, len);
|
||||
if (pRes == NULL) { // remove the item in the linked list
|
||||
if (taosArrayPush(pInvalidRes, &pNode) == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
} else {
|
||||
(void)taosLRUCacheRelease(pCache, pRes, false);
|
||||
|
@ -612,7 +612,7 @@ static void freeUidCachePayload(const void* key, size_t keyLen, void* value, voi
|
|||
static int32_t addNewEntry(SHashObj* pTableEntry, const void* pKey, int32_t keyLen, uint64_t suid) {
|
||||
STagFilterResEntry* p = taosMemoryMalloc(sizeof(STagFilterResEntry));
|
||||
if (p == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
p->hitTimes = 0;
|
||||
|
|
|
@ -39,7 +39,7 @@ int meteDecodeColCmprEntry(SDecoder *pDecoder, SMetaEntry *pME) {
|
|||
uDebug("dencode cols:%d", pWrapper->nCols);
|
||||
pWrapper->pColCmpr = (SColCmpr *)tDecoderMalloc(pDecoder, pWrapper->nCols * sizeof(SColCmpr));
|
||||
if (pWrapper->pColCmpr == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
for (int i = 0; i < pWrapper->nCols; i++) {
|
||||
|
@ -53,7 +53,7 @@ static FORCE_INLINE int32_t metatInitDefaultSColCmprWrapper(SDecoder *pDecoder,
|
|||
SSchemaWrapper *pSchema) {
|
||||
pCmpr->nCols = pSchema->nCols;
|
||||
if ((pCmpr->pColCmpr = (SColCmpr *)tDecoderMalloc(pDecoder, pCmpr->nCols * sizeof(SColCmpr))) == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pCmpr->nCols; i++) {
|
||||
|
@ -149,7 +149,7 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) {
|
|||
} else if (pME->type == TSDB_TSMA_TABLE) {
|
||||
pME->smaEntry.tsma = tDecoderMalloc(pCoder, sizeof(STSma));
|
||||
if (!pME->smaEntry.tsma) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
TAOS_CHECK_RETURN(tDecodeTSma(pCoder, pME->smaEntry.tsma, true));
|
||||
} else {
|
||||
|
|
|
@ -27,15 +27,15 @@ static int taskIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int k
|
|||
static int btimeIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
|
||||
static int ncolIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
|
||||
|
||||
static int32_t metaInitLock(SMeta *pMeta) {
|
||||
static void metaInitLock(SMeta *pMeta) {
|
||||
TdThreadRwlockAttr attr;
|
||||
(void)taosThreadRwlockAttrInit(&attr);
|
||||
(void)taosThreadRwlockAttrSetKindNP(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
|
||||
(void)taosThreadRwlockInit(&pMeta->lock, &attr);
|
||||
(void)taosThreadRwlockAttrDestroy(&attr);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
static int32_t metaDestroyLock(SMeta *pMeta) { return taosThreadRwlockDestroy(&pMeta->lock); }
|
||||
static void metaDestroyLock(SMeta *pMeta) { (void)taosThreadRwlockDestroy(&pMeta->lock); }
|
||||
|
||||
static void metaCleanup(SMeta **ppMeta);
|
||||
|
||||
|
@ -56,7 +56,7 @@ int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
|
|||
TSDB_CHECK_CODE(code = terrno, lino, _exit);
|
||||
}
|
||||
|
||||
(void)metaInitLock(pMeta);
|
||||
metaInitLock(pMeta);
|
||||
|
||||
pMeta->path = (char *)&pMeta[1];
|
||||
strcpy(pMeta->path, path);
|
||||
|
@ -188,17 +188,23 @@ int metaAlterCache(SMeta *pMeta, int32_t nPage) {
|
|||
|
||||
void metaRLock(SMeta *pMeta) {
|
||||
metaTrace("meta rlock %p", &pMeta->lock);
|
||||
(void)taosThreadRwlockRdlock(&pMeta->lock);
|
||||
if (taosThreadRwlockRdlock(&pMeta->lock) != 0) {
|
||||
metaError("vgId:%d failed to lock %p", TD_VID(pMeta->pVnode), &pMeta->lock);
|
||||
}
|
||||
}
|
||||
|
||||
void metaWLock(SMeta *pMeta) {
|
||||
metaTrace("meta wlock %p", &pMeta->lock);
|
||||
(void)taosThreadRwlockWrlock(&pMeta->lock);
|
||||
if (taosThreadRwlockWrlock(&pMeta->lock) != 0) {
|
||||
metaError("vgId:%d failed to lock %p", TD_VID(pMeta->pVnode), &pMeta->lock);
|
||||
}
|
||||
}
|
||||
|
||||
void metaULock(SMeta *pMeta) {
|
||||
metaTrace("meta ulock %p", &pMeta->lock);
|
||||
(void)taosThreadRwlockUnlock(&pMeta->lock);
|
||||
if (taosThreadRwlockUnlock(&pMeta->lock) != 0) {
|
||||
metaError("vgId:%d failed to unlock %p", TD_VID(pMeta->pVnode), &pMeta->lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void metaCleanup(SMeta **ppMeta) {
|
||||
|
@ -223,7 +229,7 @@ static void metaCleanup(SMeta **ppMeta) {
|
|||
if (pMeta->pSkmDb) tdbTbClose(pMeta->pSkmDb);
|
||||
if (pMeta->pTbDb) tdbTbClose(pMeta->pTbDb);
|
||||
if (pMeta->pEnv) tdbClose(pMeta->pEnv);
|
||||
(void)metaDestroyLock(pMeta);
|
||||
metaDestroyLock(pMeta);
|
||||
|
||||
taosMemoryFreeClear(*ppMeta);
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ int metaReaderGetTableEntryByUidCache(SMetaReader *pReader, tb_uid_t uid) {
|
|||
SMeta *pMeta = pReader->pMeta;
|
||||
|
||||
SMetaInfo info;
|
||||
int32_t code = metaGetInfo(pMeta, uid, &info, pReader);
|
||||
int32_t code = metaGetInfo(pMeta, uid, &info, pReader);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return terrno = (TSDB_CODE_NOT_FOUND == code ? TSDB_CODE_PAR_TABLE_NOT_EXIST : code);
|
||||
}
|
||||
|
@ -260,7 +260,7 @@ void metaCloseTbCursor(SMTbCursor *pTbCur) {
|
|||
if (!pTbCur->paused) {
|
||||
metaReaderClear(&pTbCur->mr);
|
||||
if (pTbCur->pDbc) {
|
||||
(void)tdbTbcClose((TBC *)pTbCur->pDbc);
|
||||
tdbTbcClose((TBC *)pTbCur->pDbc);
|
||||
}
|
||||
}
|
||||
taosMemoryFree(pTbCur);
|
||||
|
@ -270,7 +270,7 @@ void metaCloseTbCursor(SMTbCursor *pTbCur) {
|
|||
void metaPauseTbCursor(SMTbCursor *pTbCur) {
|
||||
if (!pTbCur->paused) {
|
||||
metaReaderClear(&pTbCur->mr);
|
||||
(void)tdbTbcClose((TBC *)pTbCur->pDbc);
|
||||
tdbTbcClose((TBC *)pTbCur->pDbc);
|
||||
pTbCur->paused = 1;
|
||||
}
|
||||
}
|
||||
|
@ -353,7 +353,11 @@ int32_t metaTbCursorPrev(SMTbCursor *pTbCur, ETableType jumpTableType) {
|
|||
|
||||
tDecoderClear(&pTbCur->mr.coder);
|
||||
|
||||
(void)metaGetTableEntryByVersion(&pTbCur->mr, ((SUidIdxVal *)pTbCur->pVal)[0].version, *(tb_uid_t *)pTbCur->pKey);
|
||||
ret = metaGetTableEntryByVersion(&pTbCur->mr, ((SUidIdxVal *)pTbCur->pVal)[0].version, *(tb_uid_t *)pTbCur->pKey);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (pTbCur->mr.me.type == jumpTableType) {
|
||||
continue;
|
||||
}
|
||||
|
@ -387,7 +391,10 @@ _query:
|
|||
|
||||
SMetaEntry me = {0};
|
||||
tDecoderInit(&dc, pData, nData);
|
||||
(void)metaDecodeEntry(&dc, &me);
|
||||
int32_t code = metaDecodeEntry(&dc, &me);
|
||||
if (code) {
|
||||
goto _err;
|
||||
}
|
||||
if (me.type == TSDB_SUPER_TABLE) {
|
||||
if (sver == -1 || sver == me.stbEntry.schemaRow.version) {
|
||||
pSchema = tCloneSSchemaWrapper(&me.stbEntry.schemaRow);
|
||||
|
@ -463,7 +470,7 @@ void metaCloseCtbCursor(SMCtbCursor *pCtbCur) {
|
|||
if (!pCtbCur->paused) {
|
||||
if (pCtbCur->pMeta && pCtbCur->lock) metaULock(pCtbCur->pMeta);
|
||||
if (pCtbCur->pCur) {
|
||||
(void)tdbTbcClose(pCtbCur->pCur);
|
||||
tdbTbcClose(pCtbCur->pCur);
|
||||
}
|
||||
}
|
||||
tdbFree(pCtbCur->pKey);
|
||||
|
@ -474,7 +481,7 @@ void metaCloseCtbCursor(SMCtbCursor *pCtbCur) {
|
|||
|
||||
void metaPauseCtbCursor(SMCtbCursor *pCtbCur) {
|
||||
if (!pCtbCur->paused) {
|
||||
(void)tdbTbcClose((TBC *)pCtbCur->pCur);
|
||||
tdbTbcClose((TBC *)pCtbCur->pCur);
|
||||
if (pCtbCur->lock) {
|
||||
metaULock(pCtbCur->pMeta);
|
||||
}
|
||||
|
@ -502,17 +509,17 @@ int32_t metaResumeCtbCursor(SMCtbCursor *pCtbCur, int8_t first) {
|
|||
ctbIdxKey.suid = pCtbCur->suid;
|
||||
ctbIdxKey.uid = INT64_MIN;
|
||||
int c = 0;
|
||||
(void)tdbTbcMoveTo(pCtbCur->pCur, &ctbIdxKey, sizeof(ctbIdxKey), &c);
|
||||
ret = tdbTbcMoveTo(pCtbCur->pCur, &ctbIdxKey, sizeof(ctbIdxKey), &c);
|
||||
if (c > 0) {
|
||||
(void)tdbTbcMoveToNext(pCtbCur->pCur);
|
||||
ret = tdbTbcMoveToNext(pCtbCur->pCur);
|
||||
}
|
||||
} else {
|
||||
int c = 0;
|
||||
ret = tdbTbcMoveTo(pCtbCur->pCur, pCtbCur->pKey, pCtbCur->kLen, &c);
|
||||
if (c < 0) {
|
||||
(void)tdbTbcMoveToPrev(pCtbCur->pCur);
|
||||
ret = tdbTbcMoveToPrev(pCtbCur->pCur);
|
||||
} else {
|
||||
(void)tdbTbcMoveToNext(pCtbCur->pCur);
|
||||
ret = tdbTbcMoveToNext(pCtbCur->pCur);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -570,9 +577,9 @@ SMStbCursor *metaOpenStbCursor(SMeta *pMeta, tb_uid_t suid) {
|
|||
}
|
||||
|
||||
// move to the suid
|
||||
(void)tdbTbcMoveTo(pStbCur->pCur, &suid, sizeof(suid), &c);
|
||||
ret = tdbTbcMoveTo(pStbCur->pCur, &suid, sizeof(suid), &c);
|
||||
if (c > 0) {
|
||||
(void)tdbTbcMoveToNext(pStbCur->pCur);
|
||||
ret = tdbTbcMoveToNext(pStbCur->pCur);
|
||||
}
|
||||
|
||||
return pStbCur;
|
||||
|
@ -582,7 +589,7 @@ void metaCloseStbCursor(SMStbCursor *pStbCur) {
|
|||
if (pStbCur) {
|
||||
if (pStbCur->pMeta) metaULock(pStbCur->pMeta);
|
||||
if (pStbCur->pCur) {
|
||||
(void)tdbTbcClose(pStbCur->pCur);
|
||||
tdbTbcClose(pStbCur->pCur);
|
||||
|
||||
tdbFree(pStbCur->pKey);
|
||||
tdbFree(pStbCur->pVal);
|
||||
|
@ -616,17 +623,17 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock) {
|
|||
return pTSchema;
|
||||
}
|
||||
|
||||
int32_t metaGetTbTSchemaNotNull(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema) {
|
||||
int32_t metaGetTbTSchemaNotNull(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema **ppTSchema) {
|
||||
*ppTSchema = metaGetTbTSchema(pMeta, uid, sver, lock);
|
||||
if(*ppTSchema == NULL) {
|
||||
if (*ppTSchema == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t metaGetTbTSchemaMaybeNull(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema) {
|
||||
int32_t metaGetTbTSchemaMaybeNull(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema **ppTSchema) {
|
||||
*ppTSchema = metaGetTbTSchema(pMeta, uid, sver, lock);
|
||||
if(*ppTSchema == NULL && terrno == TSDB_CODE_OUT_OF_MEMORY) {
|
||||
if (*ppTSchema == NULL && terrno == TSDB_CODE_OUT_OF_MEMORY) {
|
||||
return terrno;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -656,30 +663,30 @@ int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sv
|
|||
|
||||
if (tdbTbcMoveTo(pSkmDbC, &skmDbKey, sizeof(skmDbKey), &c) < 0) {
|
||||
metaULock(pMeta);
|
||||
(void)tdbTbcClose(pSkmDbC);
|
||||
tdbTbcClose(pSkmDbC);
|
||||
code = TSDB_CODE_NOT_FOUND;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
if (c == 0) {
|
||||
metaULock(pMeta);
|
||||
(void)tdbTbcClose(pSkmDbC);
|
||||
tdbTbcClose(pSkmDbC);
|
||||
code = TSDB_CODE_FAILED;
|
||||
metaError("meta/query: incorrect c: %" PRId32 ".", c);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
if (c < 0) {
|
||||
(void)tdbTbcMoveToPrev(pSkmDbC);
|
||||
int32_t ret = tdbTbcMoveToPrev(pSkmDbC);
|
||||
}
|
||||
|
||||
const void *pKey = NULL;
|
||||
int32_t nKey = 0;
|
||||
(void)tdbTbcGet(pSkmDbC, &pKey, &nKey, NULL, NULL);
|
||||
int32_t ret = tdbTbcGet(pSkmDbC, &pKey, &nKey, NULL, NULL);
|
||||
|
||||
if (((SSkmDbKey *)pKey)->uid != skmDbKey.uid) {
|
||||
metaULock(pMeta);
|
||||
(void)tdbTbcClose(pSkmDbC);
|
||||
tdbTbcClose(pSkmDbC);
|
||||
code = TSDB_CODE_NOT_FOUND;
|
||||
goto _exit;
|
||||
}
|
||||
|
@ -687,7 +694,7 @@ int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sv
|
|||
sver = ((SSkmDbKey *)pKey)->sver;
|
||||
|
||||
metaULock(pMeta);
|
||||
(void)tdbTbcClose(pSkmDbC);
|
||||
tdbTbcClose(pSkmDbC);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -805,9 +812,9 @@ SMSmaCursor *metaOpenSmaCursor(SMeta *pMeta, tb_uid_t uid) {
|
|||
// move to the suid
|
||||
smaIdxKey.uid = uid;
|
||||
smaIdxKey.smaUid = INT64_MIN;
|
||||
(void)tdbTbcMoveTo(pSmaCur->pCur, &smaIdxKey, sizeof(smaIdxKey), &c);
|
||||
ret = tdbTbcMoveTo(pSmaCur->pCur, &smaIdxKey, sizeof(smaIdxKey), &c);
|
||||
if (c > 0) {
|
||||
(void)tdbTbcMoveToNext(pSmaCur->pCur);
|
||||
ret = tdbTbcMoveToNext(pSmaCur->pCur);
|
||||
}
|
||||
|
||||
return pSmaCur;
|
||||
|
@ -817,7 +824,7 @@ void metaCloseSmaCursor(SMSmaCursor *pSmaCur) {
|
|||
if (pSmaCur) {
|
||||
if (pSmaCur->pMeta) metaULock(pSmaCur->pMeta);
|
||||
if (pSmaCur->pCur) {
|
||||
(void)tdbTbcClose(pSmaCur->pCur);
|
||||
tdbTbcClose(pSmaCur->pCur);
|
||||
pSmaCur->pCur = NULL;
|
||||
|
||||
tdbFree(pSmaCur->pKey);
|
||||
|
@ -916,7 +923,7 @@ STSmaWrapper *metaGetSmaInfoByTable(SMeta *pMeta, tb_uid_t uid, bool deepCopy) {
|
|||
_err:
|
||||
metaReaderClear(&mr);
|
||||
taosArrayDestroy(pSmaIds);
|
||||
(void)tFreeTSmaWrapper(pSW, deepCopy);
|
||||
pSW = tFreeTSmaWrapper(pSW, deepCopy);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1036,36 +1043,6 @@ const void *metaGetTableTagVal(const void *pTag, int16_t type, STagVal *val) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef TAG_FILTER_DEBUG
|
||||
if (IS_VAR_DATA_TYPE(val->type)) {
|
||||
char *buf = taosMemoryCalloc(val->nData + 1, 1);
|
||||
memcpy(buf, val->pData, val->nData);
|
||||
metaDebug("metaTag table val varchar index:%d cid:%d type:%d value:%s", 1, val->cid, val->type, buf);
|
||||
taosMemoryFree(buf);
|
||||
} else {
|
||||
double dval = 0;
|
||||
GET_TYPED_DATA(dval, double, val->type, &val->i64);
|
||||
metaDebug("metaTag table val number index:%d cid:%d type:%d value:%f", 1, val->cid, val->type, dval);
|
||||
}
|
||||
|
||||
SArray *pTagVals = NULL;
|
||||
tTagToValArray((STag *)pTag, &pTagVals);
|
||||
for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
|
||||
STagVal *pTagVal = (STagVal *)taosArrayGet(pTagVals, i);
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
|
||||
char *buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
|
||||
memcpy(buf, pTagVal->pData, pTagVal->nData);
|
||||
metaDebug("metaTag table varchar index:%d cid:%d type:%d value:%s", i, pTagVal->cid, pTagVal->type, buf);
|
||||
taosMemoryFree(buf);
|
||||
} else {
|
||||
double dval = 0;
|
||||
GET_TYPED_DATA(dval, double, pTagVal->type, &pTagVal->i64);
|
||||
metaDebug("metaTag table number index:%d cid:%d type:%d value:%f", i, pTagVal->cid, pTagVal->type, dval);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
|
@ -1088,6 +1065,9 @@ int32_t metaFilterCreateTime(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
|
|||
|
||||
SIdxCursor *pCursor = NULL;
|
||||
pCursor = (SIdxCursor *)taosMemoryCalloc(1, sizeof(SIdxCursor));
|
||||
if (pCursor == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
pCursor->pMeta = pMeta;
|
||||
pCursor->suid = param->suid;
|
||||
pCursor->cid = param->cid;
|
||||
|
@ -1144,7 +1124,7 @@ int32_t metaFilterCreateTime(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
|
|||
|
||||
END:
|
||||
if (pCursor->pMeta) metaULock(pCursor->pMeta);
|
||||
if (pCursor->pCur) (void)tdbTbcClose(pCursor->pCur);
|
||||
if (pCursor->pCur) tdbTbcClose(pCursor->pCur);
|
||||
taosMemoryFree(pCursor);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1160,6 +1140,9 @@ int32_t metaFilterTableName(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
|
|||
|
||||
SIdxCursor *pCursor = NULL;
|
||||
pCursor = (SIdxCursor *)taosMemoryCalloc(1, sizeof(SIdxCursor));
|
||||
if (pCursor == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
pCursor->pMeta = pMeta;
|
||||
pCursor->suid = param->suid;
|
||||
pCursor->cid = param->cid;
|
||||
|
@ -1216,7 +1199,7 @@ int32_t metaFilterTableName(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
|
|||
|
||||
END:
|
||||
if (pCursor->pMeta) metaULock(pCursor->pMeta);
|
||||
if (pCursor->pCur) (void)tdbTbcClose(pCursor->pCur);
|
||||
if (pCursor->pCur) tdbTbcClose(pCursor->pCur);
|
||||
taosMemoryFree(buf);
|
||||
taosMemoryFree(pKey);
|
||||
|
||||
|
@ -1235,6 +1218,9 @@ int32_t metaFilterTtl(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
|
|||
|
||||
SIdxCursor *pCursor = NULL;
|
||||
pCursor = (SIdxCursor *)taosMemoryCalloc(1, sizeof(SIdxCursor));
|
||||
if (pCursor == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
pCursor->pMeta = pMeta;
|
||||
pCursor->suid = param->suid;
|
||||
pCursor->cid = param->cid;
|
||||
|
@ -1245,7 +1231,7 @@ int32_t metaFilterTtl(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
|
|||
|
||||
END:
|
||||
if (pCursor->pMeta) metaULock(pCursor->pMeta);
|
||||
if (pCursor->pCur) (void)tdbTbcClose(pCursor->pCur);
|
||||
if (pCursor->pCur) tdbTbcClose(pCursor->pCur);
|
||||
taosMemoryFree(buf);
|
||||
taosMemoryFree(pKey);
|
||||
|
||||
|
@ -1422,7 +1408,7 @@ int32_t metaFilterTableIds(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
|
|||
|
||||
END:
|
||||
if (pCursor->pMeta) metaULock(pCursor->pMeta);
|
||||
if (pCursor->pCur) (void)tdbTbcClose(pCursor->pCur);
|
||||
if (pCursor->pCur) tdbTbcClose(pCursor->pCur);
|
||||
if (oStbEntry.pBuf) taosMemoryFree(oStbEntry.pBuf);
|
||||
tDecoderClear(&dc);
|
||||
tdbFree(pData);
|
||||
|
@ -1474,7 +1460,7 @@ int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) {
|
|||
if (!p->pTagVal) {
|
||||
if (isLock) metaULock(pMeta);
|
||||
|
||||
TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY);
|
||||
TAOS_RETURN(terrno);
|
||||
}
|
||||
memcpy(p->pTagVal, val, len);
|
||||
tdbFree(val);
|
||||
|
@ -1525,13 +1511,13 @@ int32_t metaGetTableTags(void *pVnode, uint64_t suid, SArray *pUidTagInfo) {
|
|||
if (!info.pTagVal) {
|
||||
metaCloseCtbCursor(pCur);
|
||||
taosHashCleanup(pSepecifiedUidMap);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
memcpy(info.pTagVal, pCur->pVal, pCur->vLen);
|
||||
if (taosArrayPush(pUidTagInfo, &info) == NULL) {
|
||||
metaCloseCtbCursor(pCur);
|
||||
taosHashCleanup(pSepecifiedUidMap);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
}
|
||||
} else { // only the specified tables need to be added
|
||||
|
@ -1552,7 +1538,7 @@ int32_t metaGetTableTags(void *pVnode, uint64_t suid, SArray *pUidTagInfo) {
|
|||
if (!pTagInfo->pTagVal) {
|
||||
metaCloseCtbCursor(pCur);
|
||||
taosHashCleanup(pSepecifiedUidMap);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
memcpy(pTagInfo->pTagVal, pCur->pVal, pCur->vLen);
|
||||
}
|
||||
|
@ -1604,7 +1590,10 @@ int32_t metaGetInfo(SMeta *pMeta, int64_t uid, SMetaInfo *pInfo, SMetaReader *pR
|
|||
}
|
||||
// upsert the cache
|
||||
metaWLock(pMeta);
|
||||
(void)metaCacheUpsert(pMeta, pInfo);
|
||||
int32_t ret = metaCacheUpsert(pMeta, pInfo);
|
||||
if (ret != 0) {
|
||||
metaError("vgId:%d, failed to upsert cache, uid:%" PRId64, TD_VID(pMeta->pVnode), uid);
|
||||
}
|
||||
metaULock(pMeta);
|
||||
|
||||
if (lock) {
|
||||
|
@ -1654,7 +1643,12 @@ int32_t metaGetStbStats(void *pVnode, int64_t uid, int64_t *numOfTables, int32_t
|
|||
|
||||
// upsert the cache
|
||||
metaWLock(pVnodeObj->pMeta);
|
||||
(void)metaStatsCacheUpsert(pVnodeObj->pMeta, &state);
|
||||
|
||||
int32_t ret = metaStatsCacheUpsert(pVnodeObj->pMeta, &state);
|
||||
if (ret) {
|
||||
metaError("failed to upsert stats, uid:%" PRId64 ", ctbNum:%" PRId64 ", colNum:%d", uid, ctbNum, colNum);
|
||||
}
|
||||
|
||||
metaULock(pVnodeObj->pMeta);
|
||||
|
||||
_exit:
|
||||
|
@ -1667,6 +1661,10 @@ void metaUpdateStbStats(SMeta *pMeta, int64_t uid, int64_t deltaCtb, int32_t del
|
|||
if (metaStatsCacheGet(pMeta, uid, &stats) == TSDB_CODE_SUCCESS) {
|
||||
stats.ctbNum += deltaCtb;
|
||||
stats.colNum += deltaCol;
|
||||
(void)metaStatsCacheUpsert(pMeta, &stats);
|
||||
int32_t code = metaStatsCacheUpsert(pMeta, &stats);
|
||||
if (code) {
|
||||
metaError("vgId:%d, failed to update stats, uid:%" PRId64 ", ctbNum:%" PRId64 ", colNum:%d",
|
||||
TD_VID(pMeta->pVnode), uid, deltaCtb, deltaCol);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ static int metaSaveSmaToDB(SMeta *pMeta, const SMetaEntry *pME) {
|
|||
|
||||
pVal = taosMemoryMalloc(vLen);
|
||||
if (pVal == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
terrno = terrno;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
|
|