Merge branch '3.0' into fix/valgrind
This commit is contained in:
commit
e23c1482da
11
README-CN.md
11
README-CN.md
|
@ -50,19 +50,12 @@ TDengine 目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后
|
|||
|
||||
## 安装工具
|
||||
|
||||
### Ubuntu 16.04 及以上版本 & Debian:
|
||||
### Ubuntu 18.04 及以上版本 & Debian:
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
||||
```
|
||||
|
||||
### Ubuntu 14.04:
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake3 build-essential git binutils-2.26
|
||||
export PATH=/usr/lib/binutils-2.26/bin:$PATH
|
||||
```
|
||||
|
||||
编译或打包 JDBC 驱动源码,需安装 Java JDK 8 或以上版本和 Apache Maven 2.7 或以上版本。
|
||||
|
||||
安装 OpenJDK 8:
|
||||
|
@ -89,7 +82,7 @@ taosTools 是用于 TDengine 的辅助工具软件集合。目前它包含 taosB
|
|||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config
|
||||
```
|
||||
|
||||
### CentOS 7:
|
||||
### CentOS 7.9:
|
||||
|
||||
```bash
|
||||
sudo yum install -y gcc gcc-c++ make cmake git openssl-devel
|
||||
|
|
11
README.md
11
README.md
|
@ -53,19 +53,12 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
|
|||
|
||||
## Install build dependencies
|
||||
|
||||
### Ubuntu 16.04 and above or Debian
|
||||
### Ubuntu 18.04 and above or Debian
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
||||
```
|
||||
|
||||
### Ubuntu 14.04
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake3 build-essential git binutils-2.26
|
||||
export PATH=/usr/lib/binutils-2.26/bin:$PATH
|
||||
```
|
||||
|
||||
To compile and package the JDBC driver source code, you should have a Java jdk-8 or higher and Apache Maven 2.7 or higher installed.
|
||||
|
||||
To install openjdk-8:
|
||||
|
@ -91,7 +84,7 @@ To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debia
|
|||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config
|
||||
```
|
||||
|
||||
### CentOS 7
|
||||
### CentOS 7.9
|
||||
|
||||
```bash
|
||||
sudo yum install epel-release
|
||||
|
|
|
@ -22,6 +22,7 @@ ELSEIF (TD_WINDOWS)
|
|||
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
|
||||
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosd.exe DESTINATION .)
|
||||
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/udfd.exe DESTINATION .)
|
||||
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosBenchmark.exe DESTINATION .)
|
||||
|
||||
IF (TD_MVN_INSTALLED)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.38-dist.jar DESTINATION connector/jdbc)
|
||||
|
|
|
@ -97,13 +97,13 @@ IF ("${CPUTYPE}" STREQUAL "")
|
|||
ELSE ()
|
||||
# if generate ARM version:
|
||||
# cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64
|
||||
IF (${CPUTYPE} MATCHES "aarch32")
|
||||
IF (${CPUTYPE} MATCHES "aarch32" OR ${CPUTYPE} MATCHES "arm32")
|
||||
SET(PLATFORM_ARCH_STR "arm")
|
||||
MESSAGE(STATUS "input cpuType: aarch32")
|
||||
ADD_DEFINITIONS("-D_TD_ARM_")
|
||||
ADD_DEFINITIONS("-D_TD_ARM_32")
|
||||
SET(TD_ARM_32 TRUE)
|
||||
ELSEIF (${CPUTYPE} MATCHES "aarch64")
|
||||
ELSEIF (${CPUTYPE} MATCHES "aarch64" OR ${CPUTYPE} MATCHES "arm64")
|
||||
SET(PLATFORM_ARCH_STR "arm64")
|
||||
MESSAGE(STATUS "input cpuType: aarch64")
|
||||
ADD_DEFINITIONS("-D_TD_ARM_")
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG 766dcc4
|
||||
GIT_TAG 3d21433
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 3c7dafe
|
||||
GIT_TAG 2d68404
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosws-rs
|
||||
ExternalProject_Add(taosws-rs
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
||||
GIT_TAG 97c4bac
|
||||
GIT_TAG 7a54d21
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -47,27 +47,28 @@ If the displayed content is followed by `...` you can use this command to change
|
|||
|
||||
You can change the behavior of TDengine CLI by specifying command-line parameters. The following parameters are commonly used.
|
||||
|
||||
- -h, --host=HOST: FQDN of the server where the TDengine server is to be connected. Default is to connect to the local service
|
||||
- -P, --port=PORT: Specify the port number to be used by the server. Default is `6030`
|
||||
- -u, --user=USER: the user name to use when connecting. Default is `root`
|
||||
- -p, --password=PASSWORD: the password to use when connecting to the server. Default is `taosdata`
|
||||
- -h HOST: FQDN of the server where the TDengine server is to be connected. Default is to connect to the local service
|
||||
- -P PORT: Specify the port number to be used by the server. Default is `6030`
|
||||
- -u USER: the user name to use when connecting. Default is `root`
|
||||
- -p PASSWORD: the password to use when connecting to the server. Default is `taosdata`
|
||||
- -?, --help: print out all command-line arguments
|
||||
|
||||
And many more parameters.
|
||||
|
||||
- -c, --config-dir: Specify the directory where configuration file exists. The default is `/etc/taos`, and the default name of the configuration file in this directory is `taos.cfg`
|
||||
- -C, --dump-config: Print the configuration parameters of `taos.cfg` in the default directory or specified by -c
|
||||
- -d, --database=DATABASE: Specify the database to use when connecting to the server
|
||||
- -D, --directory=DIRECTORY: Import the SQL script file in the specified path
|
||||
- -f, --file=FILE: Execute the SQL script file in non-interactive mode
|
||||
- -k, --check=CHECK: Specify the table to be checked
|
||||
- -l, --pktlen=PKTLEN: Test package size to be used for network testing
|
||||
- -n, --netrole=NETROLE: test scope for network connection test, default is `startup`. The value can be `client`, `server`, `rpc`, `startup`, `sync`, `speed`, or `fqdn`.
|
||||
- -r, --raw-time: output the timestamp format as unsigned 64-bits integer (uint64_t in C language)
|
||||
- -s, --commands=COMMAND: execute SQL commands in non-interactive mode
|
||||
- -S, --pkttype=PKTTYPE: Specify the packet type used for network testing. The default is TCP, can be specified as either TCP or UDP when `speed` is specified to `netrole` parameter
|
||||
- -T, --thread=THREADNUM: The number of threads to import data in multi-threaded mode
|
||||
- -s, --commands: Run TDengine CLI commands without entering the terminal
|
||||
- -a AUTHSTR: The auth string to use when connecting to the server
|
||||
- -A: Generate auth string from password
|
||||
- -c CONFIGDIR: Specify the directory where configuration file exists. The default is `/etc/taos`, and the default name of the configuration file in this directory is `taos.cfg`
|
||||
- -C: Print the configuration parameters of `taos.cfg` in the default directory or specified by -c
|
||||
- -d DATABASE: Specify the database to use when connecting to the server
|
||||
- -f FILE: Execute the SQL script file in non-interactive mode
|
||||
- -k: Check the service status, 0: unavailable,1: network ok,2: service ok,3: service degraded,4: exiting
|
||||
- -l PKTLEN: Test package length to be used for network testing
|
||||
- -n NETROLE: test scope for network connection test, default is `client`. The value can be `client`, `server`
|
||||
- -N PKTNUM: Test package numbers to be used for network testing
|
||||
- -r: output the timestamp format as unsigned 64-bits integer (uint64_t in C language)
|
||||
- -s COMMAND: execute SQL commands in non-interactive mode
|
||||
- -t: Check the details of the service status,status same as -k
|
||||
- -w DISPLAYWIDTH: 客户端列显示宽度
|
||||
- -z, --timezone=TIMEZONE: Specify time zone. Default is the value of current configuration file
|
||||
- -V, --version: Print out the current version number
|
||||
|
||||
|
|
|
@ -5,12 +5,11 @@ description: "List of platforms supported by TDengine server, client, and connec
|
|||
|
||||
## List of supported platforms for TDengine server
|
||||
|
||||
| | **CentOS 7/8** | **Ubuntu 16/18/20** | **Other Linux** |
|
||||
| ------------ | -------------- | ------------------- | --------------- |
|
||||
| X64 | ● | ● | |
|
||||
| MIPS64 | | | ● |
|
||||
| ARM64 | | ○ | ○ |
|
||||
| Alpha64 | | | ○ |
|
||||
| | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** | **Other Linux** | **UOS** | **Kylin** | **Ningsi V60/V80** | **HUAWEI EulerOS** |
|
||||
| ------------------ | ----------------- | ---------------- | ---------------- | --------------- | ------- | --------- | ------------------ | ------------------ |
|
||||
| X64 | ● | ● | ● | | ● | ● | ● | |
|
||||
| Raspberry Pi ARM64 | | | | ● | | | | |
|
||||
| HUAWEI cloud ARM64 | | | | | | | | ● |
|
||||
|
||||
Note: ● means officially tested and verified, ○ means unofficially tested and verified.
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d
|
|||
- _taosBenchmark_: TDengine testing tool
|
||||
- _remove.sh_: script to uninstall TDengine, please execute it carefully, link to the **rmtaos** command in the /usr/bin directory. Will remove the TDengine installation directory `/usr/local/taos`, but will keep `/etc/taos`, `/var/lib/taos`, `/var/log/taos`
|
||||
- _taosadapter_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares
|
||||
- _tarbitrator_: provides arbitration for two-node cluster deployments
|
||||
- _TDinsight.sh_: script to download TDinsight and install it
|
||||
- _set_core.sh_: script for setting up the system to generate core dump files for easy debugging
|
||||
- _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution.
|
||||
|
|
|
@ -2,6 +2,9 @@
|
|||
sidebar_label: Docker
|
||||
title: 通过 Docker 快速体验 TDengine
|
||||
---
|
||||
:::info
|
||||
如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
|
||||
:::
|
||||
|
||||
本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ TDengine 支持通过 C/C++ 语言进行 UDF 定义。接下来结合示例讲
|
|||
实现 UDF 时,需要实现规定的接口函数
|
||||
- 标量函数需要实现标量接口函数 scalarfn 。
|
||||
- 聚合函数需要实现聚合接口函数 aggfn_start , aggfn , aggfn_finish。
|
||||
- 如果需要初始化,实现 udf_init;如果需要清理工作,实现udf_destory。
|
||||
- 如果需要初始化,实现 udf_init;如果需要清理工作,实现udf_destroy。
|
||||
|
||||
接口函数的名称是 UDF 名称,或者是 UDF 名称和特定后缀(_start, _finish, _init, _destroy)的连接。列表中的scalarfn,aggfn, udf需要替换成udf函数名。
|
||||
|
||||
|
|
|
@ -3,27 +3,11 @@ sidebar_label: Kubernetes
|
|||
title: 在 Kubernetes 上部署 TDengine 集群
|
||||
---
|
||||
|
||||
## 配置 ConfigMap
|
||||
以下配置文件可以从 [GitHub 仓库](https://github.com/taosdata/TDengine-Operator/tree/3.0/src/tdengine) 下载。
|
||||
|
||||
为 TDengine 创建 `taoscfg.yaml`,此文件中的配置将作为环境变量传入 TDengine 镜像,更新此配置将导致所有 TDengine POD 重启。
|
||||
## 配置 Service 服务
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: taoscfg
|
||||
labels:
|
||||
app: tdengine
|
||||
data:
|
||||
CLUSTER: "1"
|
||||
TAOS_KEEP: "3650"
|
||||
TAOS_DEBUG_FLAG: "135"
|
||||
```
|
||||
|
||||
## 配置服务
|
||||
|
||||
创建一个 service 配置文件:`taosd-service.yaml`,服务名称 `metadata.name` (此处为 "taosd") 将在下一步中使用到。添加 TDengine 所用到的所有端口:
|
||||
创建一个 Service 配置文件:`taosd-service.yaml`,服务名称 `metadata.name` (此处为 "taosd") 将在下一步中使用到。添加 TDengine 所用到的所有端口:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
@ -38,45 +22,9 @@ spec:
|
|||
- name: tcp6030
|
||||
protocol: "TCP"
|
||||
port: 6030
|
||||
- name: tcp6035
|
||||
protocol: "TCP"
|
||||
port: 6035
|
||||
- name: tcp6041
|
||||
protocol: "TCP"
|
||||
port: 6041
|
||||
- name: udp6030
|
||||
protocol: "UDP"
|
||||
port: 6030
|
||||
- name: udp6031
|
||||
protocol: "UDP"
|
||||
port: 6031
|
||||
- name: udp6032
|
||||
protocol: "UDP"
|
||||
port: 6032
|
||||
- name: udp6033
|
||||
protocol: "UDP"
|
||||
port: 6033
|
||||
- name: udp6034
|
||||
protocol: "UDP"
|
||||
port: 6034
|
||||
- name: udp6035
|
||||
protocol: "UDP"
|
||||
port: 6035
|
||||
- name: udp6036
|
||||
protocol: "UDP"
|
||||
port: 6036
|
||||
- name: udp6037
|
||||
protocol: "UDP"
|
||||
port: 6037
|
||||
- name: udp6038
|
||||
protocol: "UDP"
|
||||
port: 6038
|
||||
- name: udp6039
|
||||
protocol: "UDP"
|
||||
port: 6039
|
||||
- name: udp6040
|
||||
protocol: "UDP"
|
||||
port: 6040
|
||||
selector:
|
||||
app: "tdengine"
|
||||
```
|
||||
|
@ -109,7 +57,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: "tdengine"
|
||||
image: "zitsen/taosd:develop"
|
||||
image: "tdengine/tdengine:3.0.0.0"
|
||||
imagePullPolicy: "Always"
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
|
@ -118,45 +66,9 @@ spec:
|
|||
- name: tcp6030
|
||||
protocol: "TCP"
|
||||
containerPort: 6030
|
||||
- name: tcp6035
|
||||
protocol: "TCP"
|
||||
containerPort: 6035
|
||||
- name: tcp6041
|
||||
protocol: "TCP"
|
||||
containerPort: 6041
|
||||
- name: udp6030
|
||||
protocol: "UDP"
|
||||
containerPort: 6030
|
||||
- name: udp6031
|
||||
protocol: "UDP"
|
||||
containerPort: 6031
|
||||
- name: udp6032
|
||||
protocol: "UDP"
|
||||
containerPort: 6032
|
||||
- name: udp6033
|
||||
protocol: "UDP"
|
||||
containerPort: 6033
|
||||
- name: udp6034
|
||||
protocol: "UDP"
|
||||
containerPort: 6034
|
||||
- name: udp6035
|
||||
protocol: "UDP"
|
||||
containerPort: 6035
|
||||
- name: udp6036
|
||||
protocol: "UDP"
|
||||
containerPort: 6036
|
||||
- name: udp6037
|
||||
protocol: "UDP"
|
||||
containerPort: 6037
|
||||
- name: udp6038
|
||||
protocol: "UDP"
|
||||
containerPort: 6038
|
||||
- name: udp6039
|
||||
protocol: "UDP"
|
||||
containerPort: 6039
|
||||
- name: udp6040
|
||||
protocol: "UDP"
|
||||
containerPort: 6040
|
||||
env:
|
||||
# POD_NAME for FQDN config
|
||||
- name: POD_NAME
|
||||
|
@ -190,14 +102,13 @@ spec:
|
|||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- taos
|
||||
- -s
|
||||
- "show mnodes"
|
||||
- taos-check
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5000
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 6030
|
||||
exec:
|
||||
command:
|
||||
- taos-check
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
volumeClaimTemplates:
|
||||
|
@ -206,44 +117,78 @@ spec:
|
|||
spec:
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
storageClassName: "csi-rbd-sc"
|
||||
storageClassName: "standard"
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
```
|
||||
|
||||
## 启动集群
|
||||
## 使用 kubectl 命令部署 TDengine 集群
|
||||
|
||||
将前述三个文件添加到 Kubernetes 集群中:
|
||||
顺序执行以下命令。
|
||||
|
||||
```bash
|
||||
kubectl apply -f taoscfg.yaml
|
||||
kubectl apply -f taosd-service.yaml
|
||||
kubectl apply -f tdengine.yaml
|
||||
|
||||
```
|
||||
|
||||
上面的配置将生成一个两节点的 TDengine 集群,dnode 是自动配置的,可以使用 `show dnodes` 命令查看当前集群的节点:
|
||||
上面的配置将生成一个三节点的 TDengine 集群,dnode 是自动配置的,可以使用 show dnodes 命令查看当前集群的节点:
|
||||
|
||||
```bash
|
||||
kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"
|
||||
kubectl exec -i -t tdengine-1 -- taos -s "show dnodes"
|
||||
|
||||
kubectl exec -i -t tdengine-2 -- taos -s "show dnodes"
|
||||
```
|
||||
|
||||
输出如下:
|
||||
|
||||
```
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 17:13:24.181 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 17:14:09.257 | |
|
||||
Query OK, 2 row(s) in set (0.000997s)
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||
============================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | |
|
||||
Query OK, 3 rows in database (0.003655s)
|
||||
```
|
||||
|
||||
## 使能端口转发
|
||||
|
||||
利用 kubectl 端口转发功能可以使应用可以访问 Kubernetes 环境运行的 TDengine 集群。
|
||||
|
||||
```
|
||||
kubectl port-forward tdengine-0 6041:6041 &
|
||||
```
|
||||
|
||||
使用 curl 命令验证 TDengine REST API 使用的 6041 接口。
|
||||
|
||||
```
|
||||
$ curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
|
||||
Handling connection for 6041
|
||||
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8],["wal_roll_period","INT",4],["wal_segment_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2}
|
||||
```
|
||||
|
||||
## 使用 dashboard 进行图形化管理
|
||||
|
||||
minikube 提供 dashboard 命令支持图形化管理界面。
|
||||
|
||||
```
|
||||
$ minikube dashboard
|
||||
* Verifying dashboard health ...
|
||||
* Launching proxy ...
|
||||
* Verifying proxy health ...
|
||||
* Opening http://127.0.0.1:46617/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ in your default browser...
|
||||
http://127.0.0.1:46617/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
|
||||
```
|
||||
|
||||
对于某些公有云环境,minikube 绑定在 127.0.0.1 IP 地址上无法通过远程访问,需要使用 kubectl proxy 命令将端口映射到 0.0.0.0 IP 地址上,再通过浏览器访问虚拟机公网 IP 和端口以及相同的 dashboard URL 路径即可远程访问 dashboard。
|
||||
|
||||
```
|
||||
$ kubectl proxy --accept-hosts='^.*$' --address='0.0.0.0'
|
||||
```
|
||||
|
||||
## 集群扩容
|
||||
|
@ -252,14 +197,12 @@ TDengine 集群支持自动扩容:
|
|||
|
||||
```bash
|
||||
kubectl scale statefulsets tdengine --replicas=4
|
||||
|
||||
```
|
||||
|
||||
上面命令行中参数 `--replica=4` 表示要将 TDengine 集群扩容到 4 个节点,执行后首先检查 POD 的状态:
|
||||
|
||||
```bash
|
||||
kubectl get pods -l app=tdengine
|
||||
|
||||
```
|
||||
|
||||
输出如下:
|
||||
|
@ -270,102 +213,112 @@ tdengine-0 1/1 Running 0 161m
|
|||
tdengine-1 1/1 Running 0 161m
|
||||
tdengine-2 1/1 Running 0 32m
|
||||
tdengine-3 1/1 Running 0 32m
|
||||
|
||||
```
|
||||
|
||||
此时 POD 的状态仍然是 Running,TDengine 集群中的 dnode 状态要等 POD 状态为 `ready` 之后才能看到:
|
||||
|
||||
```bash
|
||||
kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"
|
||||
|
||||
kubectl exec -i -t tdengine-3 -- taos -s "show dnodes"
|
||||
```
|
||||
|
||||
扩容后的四节点 TDengine 集群的 dnode 列表:
|
||||
|
||||
```
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:12.915 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:33.127 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 14:07:27.078 | |
|
||||
4 | tdengine-3.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 14:07:48.362 | |
|
||||
Query OK, 4 row(s) in set (0.001293s)
|
||||
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||
============================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | |
|
||||
4 | tdengine-3.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:33:16.039 | |
|
||||
Query OK, 4 rows in database (0.008377s)
|
||||
```
|
||||
|
||||
## 集群缩容
|
||||
|
||||
TDengine 的缩容并没有自动化,我们尝试将一个三节点集群缩容到两节点。
|
||||
由于 TDengine 集群在扩缩容时会对数据进行节点间迁移,使用 kubectl 命令进行缩容需要首先使用 "drop dnodes" 命令,节点删除完成后再进行 Kubernetes 集群缩容。
|
||||
|
||||
首先,确认一个三节点 TDengine 集群正常工作,在 TDengine CLI 中查看 dnode 的状态:
|
||||
注意:由于 Kubernetes Statefulset 中 Pod 的只能按创建顺序逆序移除,所以 TDengine drop dnode 也需要按照创建顺序逆序移除,否则会导致 Pod 处于错误状态。
|
||||
|
||||
```
|
||||
$ kubectl exec -i -t tdengine-0 -- taos -s "drop dnode 4"
|
||||
```
|
||||
|
||||
```bash
|
||||
$ kubectl exec -it tdengine-0 -- taos -s "show dnodes"
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 16:27:24.852 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:27:53.339 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:28:49.787 | |
|
||||
Query OK, 3 row(s) in set (0.001101s)
|
||||
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||
============================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | |
|
||||
Query OK, 3 rows in database (0.004861s)
|
||||
```
|
||||
|
||||
想要安全的缩容,首先需要将节点从 dnode 列表中移除,也即从集群中移除:
|
||||
确认移除成功后(使用 kubectl exec -i -t tdengine-0 -- taos -s "show dnodes" 查看和确认 dnode 列表),使用 kubectl 命令移除 POD:
|
||||
|
||||
```
|
||||
kubectl scale statefulsets tdengine --replicas=3
|
||||
```
|
||||
|
||||
最后一个 POD 将会被删除。使用命令 kubectl get pods -l app=tdengine 查看POD状态:
|
||||
|
||||
```
|
||||
$ kubectl get pods -l app=tdengine
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
tdengine-0 1/1 Running 0 4m7s
|
||||
tdengine-1 1/1 Running 0 3m55s
|
||||
tdengine-2 1/1 Running 0 2m28s
|
||||
```
|
||||
|
||||
POD删除后,需要手动删除PVC,否则下次扩容时会继续使用以前的数据导致无法正常加入集群。
|
||||
|
||||
```bash
|
||||
kubectl exec -i -t tdengine-0 -- taos -s "drop dnode 'tdengine-2.taosd.default.svc.cluster.local:6030'"
|
||||
|
||||
```
|
||||
|
||||
通过 `show dondes` 命令确认移除成功后,移除相应的 POD:
|
||||
|
||||
```bash
|
||||
kubectl scale statefulsets tdengine --replicas=2
|
||||
|
||||
```
|
||||
|
||||
最后一个 POD 会被删除,使用 `kubectl get pods -l app=tdengine` 查看集群状态:
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
tdengine-0 1/1 Running 0 3h40m
|
||||
tdengine-1 1/1 Running 0 3h40m
|
||||
|
||||
```
|
||||
|
||||
POD 删除后,需要手动删除 PVC,否则下次扩容时会继续使用以前的数据导致无法正常加入集群。
|
||||
|
||||
```bash
|
||||
kubectl delete pvc taosdata-tdengine-2
|
||||
|
||||
$ kubectl delete pvc taosdata-tdengine-3
|
||||
```
|
||||
|
||||
此时的集群状态是安全的,需要时还可以再次进行扩容:
|
||||
|
||||
```bash
|
||||
kubectl scale statefulsets tdengine --replicas=3
|
||||
$ kubectl scale statefulsets tdengine --replicas=4
|
||||
statefulset.apps/tdengine scaled
|
||||
it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl get pods -l app=tdengine
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
tdengine-0 1/1 Running 0 35m
|
||||
tdengine-1 1/1 Running 0 34m
|
||||
tdengine-2 1/1 Running 0 12m
|
||||
tdengine-3 0/1 ContainerCreating 0 4s
|
||||
it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl get pods -l app=tdengine
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
tdengine-0 1/1 Running 0 35m
|
||||
tdengine-1 1/1 Running 0 34m
|
||||
tdengine-2 1/1 Running 0 12m
|
||||
tdengine-3 0/1 Running 0 7s
|
||||
it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl exec -it tdengine-0 -- taos -s "show dnodes"
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
Server is Community Edition.
|
||||
|
||||
|
||||
```
|
||||
|
||||
`show dnodes` 输出如下:
|
||||
|
||||
```
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 16:27:24.852 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:27:53.339 | |
|
||||
4 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:40:49.177 | |
|
||||
|
||||
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 17:38:49.012 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 1 | 4 | ready | 2022-07-25 17:39:01.517 | |
|
||||
5 | tdengine-2.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 18:01:36.479 | |
|
||||
6 | tdengine-3.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 18:13:54.411 | |
|
||||
Query OK, 4 row(s) in set (0.001348s)
|
||||
```
|
||||
|
||||
## 删除集群
|
||||
## 清理 TDengine 集群
|
||||
|
||||
完整移除 TDengine 集群,需要分别清理 statefulset、svc、configmap、pvc。
|
||||
|
||||
|
@ -381,26 +334,26 @@ kubectl delete configmap taoscfg
|
|||
|
||||
### 错误一
|
||||
|
||||
扩容到四节点之后缩容到两节点,删除的 POD 会进入 offline 状态:
|
||||
未进行 "drop dnode" 直接进行缩容,由于 TDengine 尚未删除节点,缩容 pod 导致 TDengine 集群中部分节点处于 offline 状态。
|
||||
|
||||
```
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
$ kubectl exec -it tdengine-0 -- taos -s "show dnodes"
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
Server is Community Edition.
|
||||
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:12.915 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:33.127 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 40 | offline | any | 2021-06-01 14:07:27.078 | status msg timeout |
|
||||
4 | tdengine-3.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 14:07:48.362 | status msg timeout |
|
||||
Query OK, 4 row(s) in set (0.001236s)
|
||||
|
||||
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 17:38:49.012 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 1 | 4 | ready | 2022-07-25 17:39:01.517 | |
|
||||
5 | tdengine-2.taosd.default.sv... | 0 | 4 | offline | 2022-07-25 18:01:36.479 | status msg timeout |
|
||||
6 | tdengine-3.taosd.default.sv... | 0 | 4 | offline | 2022-07-25 18:13:54.411 | status msg timeout |
|
||||
Query OK, 4 row(s) in set (0.001323s)
|
||||
```
|
||||
|
||||
但 `drop dnode` 的行为按不会按照预期进行,且下次集群重启后,所有的 dnode 节点将无法启动 dropping 状态无法退出。
|
||||
|
||||
### 错误二
|
||||
|
||||
TDengine 集群会持有 replica 参数,如果缩容后的节点数小于这个值,集群将无法使用:
|
||||
|
|
|
@ -22,7 +22,7 @@ Helm 会使用 kubectl 和 kubeconfig 的配置来操作 Kubernetes,可以参
|
|||
TDengine Chart 尚未发布到 Helm 仓库,当前可以从 GitHub 直接下载:
|
||||
|
||||
```bash
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/main/helm/tdengine-0.3.0.tgz
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.0.tgz
|
||||
|
||||
```
|
||||
|
||||
|
@ -38,7 +38,7 @@ kubectl get storageclass
|
|||
之后,使用 helm 命令安装:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-0.3.0.tgz \
|
||||
helm install tdengine tdengine-3.0.0.tgz \
|
||||
--set storage.className=<your storage class name>
|
||||
|
||||
```
|
||||
|
@ -46,7 +46,7 @@ helm install tdengine tdengine-0.3.0.tgz \
|
|||
在 minikube 环境下,可以设置一个较小的容量避免超出磁盘可用空间:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-0.3.0.tgz \
|
||||
helm install tdengine tdengine-3.0.0.tgz \
|
||||
--set storage.className=standard \
|
||||
--set storage.dataSize=2Gi \
|
||||
--set storage.logSize=10Mi
|
||||
|
@ -83,14 +83,14 @@ TDengine 支持 `values.yaml` 自定义。
|
|||
通过 helm show values 可以获取 TDengine Chart 支持的全部 values 列表:
|
||||
|
||||
```bash
|
||||
helm show values tdengine-0.3.0.tgz
|
||||
helm show values tdengine-3.0.0.tgz
|
||||
|
||||
```
|
||||
|
||||
你可以将结果保存为 values.yaml,之后可以修改其中的各项参数,如 replica 数量,存储类名称,容量大小,TDengine 配置等,然后使用如下命令安装 TDengine 集群:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-0.3.0.tgz -f values.yaml
|
||||
helm install tdengine tdengine-3.0.0.tgz -f values.yaml
|
||||
|
||||
```
|
||||
|
||||
|
@ -107,37 +107,17 @@ image:
|
|||
prefix: tdengine/tdengine
|
||||
#pullPolicy: Always
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
#tag: "2.4.0.5"
|
||||
# tag: "3.0.0.0"
|
||||
|
||||
service:
|
||||
# ClusterIP is the default service type, use NodeIP only if you know what you are doing.
|
||||
type: ClusterIP
|
||||
ports:
|
||||
# TCP range required
|
||||
tcp:
|
||||
[
|
||||
6030,
|
||||
6031,
|
||||
6032,
|
||||
6033,
|
||||
6034,
|
||||
6035,
|
||||
6036,
|
||||
6037,
|
||||
6038,
|
||||
6039,
|
||||
6040,
|
||||
6041,
|
||||
6042,
|
||||
6043,
|
||||
6044,
|
||||
6045,
|
||||
6060,
|
||||
]
|
||||
# UDP range 6030-6039
|
||||
udp: [6030, 6031, 6032, 6033, 6034, 6035, 6036, 6037, 6038, 6039]
|
||||
tcp: [6030, 6041, 6042, 6043, 6044, 6046, 6047, 6048, 6049, 6060]
|
||||
# UDP range
|
||||
udp: [6044, 6045]
|
||||
|
||||
arbitrator: true
|
||||
|
||||
# Set timezone here, not in taoscfg
|
||||
timezone: "Asia/Shanghai"
|
||||
|
@ -182,11 +162,14 @@ clusterDomainSuffix: ""
|
|||
#
|
||||
# Btw, keep quotes "" around the value like below, even the value will be number or not.
|
||||
taoscfg:
|
||||
# Starts as cluster or not, must be 0 or 1.
|
||||
# 0: all pods will start as a seperate TDengine server
|
||||
# 1: pods will start as TDengine server cluster. [default]
|
||||
CLUSTER: "1"
|
||||
|
||||
# number of replications, for cluster only
|
||||
TAOS_REPLICA: "1"
|
||||
|
||||
# number of management nodes in the system
|
||||
TAOS_NUM_OF_MNODES: "1"
|
||||
|
||||
# number of days per DB file
|
||||
# TAOS_DAYS: "10"
|
||||
|
@ -422,7 +405,7 @@ kubectl --namespace default exec $POD_NAME -- taos -s 'drop dnode "<you dnode in
|
|||
|
||||
```
|
||||
|
||||
## 删除集群
|
||||
## 清理集群
|
||||
|
||||
Helm 管理下,清理操作也变得简单:
|
||||
|
||||
|
|
|
@ -89,10 +89,6 @@ T = 最新事件时间 - watermark
|
|||
|
||||
无论在哪种模式下,watermark 都应该被妥善设置,来得到正确结果(直接丢弃模式)或避免频繁触发重算带来的性能开销(重新计算模式)。
|
||||
|
||||
## 流式计算的数据填充策略
|
||||
|
||||
TODO
|
||||
|
||||
## 流式计算与会话窗口(session window)
|
||||
|
||||
```sql
|
||||
|
@ -105,14 +101,6 @@ window_clause: {
|
|||
|
||||
其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。
|
||||
|
||||
## 流式计算的监控与流任务分布查询
|
||||
|
||||
TODO
|
||||
|
||||
## 流式计算的内存控制与存算分离
|
||||
|
||||
TODO
|
||||
|
||||
## 流式计算的暂停与恢复
|
||||
|
||||
```sql
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
---
|
||||
sidebar_label: 性能数据库
|
||||
title: 性能数据库
|
||||
---
|
||||
|
||||
TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其中存储了与性能有关的统计数据。本节详细介绍其中的表和详细的表结构。
|
||||
|
||||
## PERF_APP
|
||||
|
||||
提供接入集群的应用(客户端)的相关信息。也可以使用 SHOW APPS 来查询这些信息。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :----------: | ------------ | ------------------------------- |
|
||||
| 1 | app_id | UBIGINT | 客户端 ID |
|
||||
| 2 | ip | BINARY(16) | 客户端地址 |
|
||||
| 3 | pid | INT | 客户端进程 号 |
|
||||
| 4 | name | BINARY(24) | 客户端名称 |
|
||||
| 5 | start_time | TIMESTAMP | 客户端启动时间 |
|
||||
| 6 | insert_req | UBIGINT | insert 请求次数 |
|
||||
| 7 | insert_row | UBIGINT | insert 插入行数 |
|
||||
| 8 | insert_time | UBIGINT | insert 请求的处理时间,单位微秒 |
|
||||
| 9 | insert_bytes | UBIGINT | insert 请求消息字节数 |
|
||||
| 10 | fetch_bytes | UBIGINT | 查询结果字节数 |
|
||||
| 11 | query_time | UBIGINT | 查询请求处理时间 |
|
||||
| 12 | slow_query | UBIGINT | 慢查询(处理时间 >= 3 秒)个数 |
|
||||
| 13 | total_req | UBIGINT | 总请求数 |
|
||||
| 14 | current_req | UBIGINT | 当前正在处理的请求个数 |
|
||||
| 15 | last_access | TIMESTAMP | 最后更新时间 |
|
||||
|
||||
## PERF_CONNECTIONS
|
||||
|
||||
数据库的连接的相关信息。也可以使用 SHOW CONNECTIONS 来查询这些信息。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | -------------------------------------------------- |
|
||||
| 1 | conn_id | INT | 连接 ID |
|
||||
| 2 | user | BINARY(24) | 用户名 |
|
||||
| 3 | app | BINARY(24) | 客户端名称 |
|
||||
| 4 | pid | UINT | 发起此连接的客户端在自己所在服务器或主机上的进程号 |
|
||||
| 5 | end_point | BINARY(128) | 客户端地址 |
|
||||
| 6 | login_time | TIMESTAMP | 登录时间 |
|
||||
| 7 | last_access | TIMESTAMP | 最后更新时间 |
|
||||
|
||||
## PERF_QUERIES
|
||||
|
||||
提供当前正在执行的 SQL 语句的信息。也可以使用 SHOW QUERIES 来查询这些信息。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :----------: | ------------ | ---------------------------- |
|
||||
| 1 | kill_id | UBIGINT | 用来停止查询的 ID |
|
||||
| 2 | query_id | INT | 查询 ID |
|
||||
| 3 | conn_id | UINT | 连接 ID |
|
||||
| 4 | app | BINARY(24) | app 名称 |
|
||||
| 5 | pid | INT | app 在自己所在主机上的进程号 |
|
||||
| 6 | user | BINARY(24) | 用户名 |
|
||||
| 7 | end_point | BINARY(16) | 客户端地址 |
|
||||
| 8 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 9 | exec_usec | BIGINT | 已执行时间 |
|
||||
| 10 | stable_query | BOOL | 是否是超级表查询 |
|
||||
| 11 | sub_num | INT | 子查询数量 |
|
||||
| 12 | sub_status | BINARY(1000) | 子查询状态 |
|
||||
| 13 | sql | BINARY(1024) | SQL 语句 |
|
||||
|
||||
## PERF_TOPICS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------------------------ |
|
||||
| 1 | topic_name | BINARY(192) | topic 名称 |
|
||||
| 2 | db_name | BINARY(64) | topic 相关的 DB |
|
||||
| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
|
||||
| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
|
||||
|
||||
## PERF_CONSUMERS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------: | ------------ | ----------------------------------------------------------- |
|
||||
| 1 | consumer_id | BIGINT | 消费者的唯一 ID |
|
||||
| 2 | consumer_group | BINARY(192) | 消费者组 |
|
||||
| 3 | client_id | BINARY(192) | 用户自定义字符串,通过创建 consumer 时指定 client_id 来展示 |
|
||||
| 4 | status | BINARY(20) | 消费者当前状态 |
|
||||
| 5 | topics | BINARY(204) | 被订阅的 topic。若订阅多个 topic,则展示为多行 |
|
||||
| 6 | up_time | TIMESTAMP | 第一次连接 taosd 的时间 |
|
||||
| 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 |
|
||||
| 8 | rebalance_time | TIMESTAMP | 上一次触发 rebalance 的时间 |
|
||||
|
||||
## PERF_SUBSCRIPTIONS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------: | ------------ | ------------------------ |
|
||||
| 1 | topic_name | BINARY(204) | 被订阅的 topic |
|
||||
| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
|
||||
| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
|
||||
| 4 | consumer_id | BIGINT | 消费者的唯一 id |
|
||||
|
||||
## PERF_TRANS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :--------------: | ------------ | -------- |
|
||||
| 1 | id | INT | |
|
||||
| 2 | create_time | TIMESTAMP | |
|
||||
| 3 | stage | BINARY(12) | |
|
||||
| 4 | db1 | BINARY(64) | |
|
||||
| 5 | db2 | BINARY(64) | |
|
||||
| 6 | failed_times | INT | |
|
||||
| 7 | last_exec_time | TIMESTAMP | |
|
||||
| 8 | last_action_info | BINARY(511) | |
|
||||
|
||||
## PERF_SMAS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------------------------------------- |
|
||||
| 1 | sma_name | BINARY(192) | 时间维度的预计算 (time-range-wise sma) 名称 |
|
||||
| 2 | create_time | TIMESTAMP | sma 创建时间 |
|
||||
| 3 | stable_name | BINARY(192) | sma 所属的超级表名称 |
|
||||
| 4 | vgroup_id | INT | sma 专属的 vgroup 名称 |
|
||||
|
||||
## PERF_STREAMS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :----------: | ------------ | --------------------------------------- |
|
||||
| 1 | stream_name | BINARY(64) | 流计算名称 |
|
||||
| 2 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
|
||||
| 4 | status | BIANRY(20) | 流当前状态 |
|
||||
| 5 | source_db | BINARY(64) | 源数据库 |
|
||||
| 6 | target_db | BIANRY(64) | 目的数据库 |
|
||||
| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
|
||||
| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 |
|
||||
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 |
|
|
@ -10,14 +10,14 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
|
||||
目前 TDengine 的原生接口连接器可支持的平台包括:X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32 等开发环境。对照矩阵如下:
|
||||
|
||||
| **CPU** | **OS** | **JDBC** | **Python** | **Go** | **Node.js** | **C#** | **Rust** | C/C++ |
|
||||
| **CPU** | **OS** | **Java** | **Python** | **Go** | **Node.js** | **C#** | **Rust** | C/C++ |
|
||||
| -------------- | --------- | -------- | ---------- | ------ | ----------- | ------ | -------- | ----- |
|
||||
| **X86 64bit** | **Linux** | ● | ● | ● | ● | ● | ● | ● |
|
||||
| **X86 64bit** | **Win64** | ● | ● | ● | ● | ● | ● | ● |
|
||||
| **X86 64bit** | **Win32** | ● | ● | ● | ● | ○ | ○ | ● |
|
||||
| **X86 32bit** | **Win32** | ○ | ○ | ○ | ○ | ○ | ○ | ● |
|
||||
| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
|
||||
| **ARM32** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
|
||||
| **ARM32** | **Linux** | ○ | ○ | ○ | ○ | ○ | ○ | ● |
|
||||
| **MIPS 龙芯** | **Linux** | ○ | ○ | ○ | ○ | ○ | ○ | ○ |
|
||||
| **Alpha 申威** | **Linux** | ○ | ○ | -- | -- | -- | -- | ○ |
|
||||
| **X86 海光** | **Linux** | ○ | ○ | ○ | -- | -- | -- | ○ |
|
||||
|
@ -32,6 +32,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
|||
|
||||
| **TDengine 版本** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
|
||||
| --------------------- | -------- | ---------- | ------------ | ------------- | --------------- | -------- |
|
||||
| **3.0.0.0 及以上** | 3.0.0 | 当前版本 | 3.0 分支 | 3.0.0 | 3.0.0 | 当前版本 |
|
||||
| **2.4.0.14 及以上** | 2.0.38 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 |
|
||||
| **2.4.0.6 及以上** | 2.0.37 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 |
|
||||
| **2.4.0.4 - 2.4.0.5** | 2.0.37 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 |
|
||||
|
@ -50,7 +51,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
|||
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **连续查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **订阅功能** | 支持 | 支持 | 支持 | 支持 | 支持 | 暂不支持 |
|
||||
| ** TMQ ** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
|
||||
|
||||
|
@ -58,17 +59,17 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
|||
由于不同编程语言数据库框架规范不同,并不意味着所有 C/C++ 接口都需要对应封装支持。
|
||||
:::
|
||||
|
||||
### 使用 REST 接口
|
||||
### 使用 http (REST 或 WebSocket) 接口
|
||||
|
||||
| **功能特性** | **Java** | **Python** | **Go** | **C#(暂不支持)** | **Node.js** | **Rust** |
|
||||
| ------------------------------ | -------- | ---------- | -------- | ------------------ | ----------- | -------- |
|
||||
| **连接管理** | 支持 | 支持 | 支持 | N/A | 支持 | 支持 |
|
||||
| **普通查询** | 支持 | 支持 | 支持 | N/A | 支持 | 支持 |
|
||||
| **连续查询** | 支持 | 支持 | 支持 | N/A | 支持 | 支持 |
|
||||
| **参数绑定** | 不支持 | 不支持 | 不支持 | N/A | 不支持 | 不支持 |
|
||||
| **订阅功能** | 不支持 | 不支持 | 不支持 | N/A | 不支持 | 不支持 |
|
||||
| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | N/A | 暂不支持 | 暂不支持 |
|
||||
| **批量拉取(基于 WebSocket)** | 支持 | 暂不支持 | 暂不支持 | N/A | 暂不支持 | 暂不支持 |
|
||||
| **参数绑定** | 不支持 | 暂不支持 | 暂不支持 | N/A | 不支持 | 支持 |
|
||||
| ** TMQ ** | 不支持 | 暂不支持 | 暂不支持 | N/A | 不支持 | 支持 |
|
||||
| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | N/A | 不支持 | 暂不支持 |
|
||||
| **批量拉取(基于 WebSocket)** | 支持 | 支持 | 暂不支持 | N/A | 不支持 | 支持 |
|
||||
| **DataFrame** | 不支持 | 支持 | 不支持 | N/A | 不支持 | 不支持 |
|
||||
|
||||
:::warning
|
||||
|
|
|
@ -279,7 +279,7 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
2. 调用 `taos_stmt_prepare()` 解析 INSERT 语句;
|
||||
3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname()` 来设置表名;
|
||||
4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags()` 来设置表名和 TAGS 的值;
|
||||
5. 调用 `taos_stmt_bind_param_batch()` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param()` 以单行的方式设置 VALUES 的值;
|
||||
5. 调用 `taos_stmt_bind_param_batch()` 以多行的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param()` 以单行的方式设置 VALUES 的值;
|
||||
6. 调用 `taos_stmt_add_batch()` 把当前绑定的参数加入批处理;
|
||||
7. 可以重复第 3 ~ 6 步,为批处理加入更多的数据行;
|
||||
8. 调用 `taos_stmt_execute()` 执行已经准备好的批处理指令;
|
||||
|
|
|
@ -306,8 +306,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
|||
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | 参数绑定,一次绑定一行 |
|
||||
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB 行协议写入 |
|
||||
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | 使用 JSON 类型的标签 |
|
||||
| [subscribe-async.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-async.py) | 异步订阅 |
|
||||
| [subscribe-sync.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-sync.py) | 同步订阅 |
|
||||
| [tmq.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq.py) | tmq 订阅 |
|
||||
|
||||
## 其它说明
|
||||
|
||||
|
@ -326,23 +325,15 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
|||
1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds
|
||||
2. https://www.python.org/dev/peps/pep-0564/
|
||||
|
||||
|
||||
## 常见问题
|
||||
|
||||
欢迎[提问或报告问题](https://github.com/taosdata/taos-connector-python/issues)。
|
||||
|
||||
## 重要更新
|
||||
|
||||
| 连接器版本 | 重要更新 | 发布日期 |
|
||||
| ---------- | --------------------------------------------------------------------------------- | ---------- |
|
||||
| 2.3.1 | 1. support TDengine REST API <br/> 2. remove support for Python version below 3.6 | 2022-04-28 |
|
||||
| 2.2.5 | support timezone option when connect | 2022-04-13 |
|
||||
| 2.2.2 | support sqlalchemy dialect plugin | 2022-03-28 |
|
||||
|
||||
|
||||
[**Release Notes**](https://github.com/taosdata/taos-connector-python/releases)
|
||||
|
||||
## API 参考
|
||||
|
||||
- [taos](https://docs.taosdata.com/api/taospy/taos/)
|
||||
- [taosrest](https://docs.taosdata.com/api/taospy/taosrest)
|
||||
|
||||
## 常见问题
|
||||
|
||||
欢迎[提问或报告问题](https://github.com/taosdata/taos-connector-python/issues)。
|
||||
|
|
|
@ -48,29 +48,30 @@ taos> SET MAX_BINARY_DISPLAY_WIDTH <nn>;
|
|||
|
||||
您可通过配置命令行参数来改变 TDengine CLI 的行为。以下为常用的几个命令行参数:
|
||||
|
||||
- -h, --host=HOST: 要连接的 TDengine 服务端所在服务器的 FQDN, 默认为连接本地服务
|
||||
- -P, --port=PORT: 指定服务端所用端口号
|
||||
- -u, --user=USER: 连接时使用的用户名
|
||||
- -p, --password=PASSWORD: 连接服务端时使用的密码
|
||||
- -h HOST: 要连接的 TDengine 服务端所在服务器的 FQDN, 默认为连接本地服务
|
||||
- -P PORT: 指定服务端所用端口号
|
||||
- -u USER: 连接时使用的用户名
|
||||
- -p PASSWORD: 连接服务端时使用的密码
|
||||
- -?, --help: 打印出所有命令行参数
|
||||
|
||||
还有更多其他参数:
|
||||
|
||||
- -c, --config-dir: 指定配置文件目录,Linux 环境下默认为 `/etc/taos`,该目录下的配置文件默认名称为 `taos.cfg`
|
||||
- -C, --dump-config: 打印 -c 指定的目录中 `taos.cfg` 的配置参数
|
||||
- -d, --database=DATABASE: 指定连接到服务端时使用的数据库
|
||||
- -D, --directory=DIRECTORY: 导入指定路径中的 SQL 脚本文件
|
||||
- -f, --file=FILE: 以非交互模式执行 SQL 脚本文件。文件中一个 SQL 语句只能占一行
|
||||
- -k, --check=CHECK: 指定要检查的表
|
||||
- -l, --pktlen=PKTLEN: 网络测试时使用的测试包大小
|
||||
- -n, --netrole=NETROLE: 网络连接测试时的测试范围,默认为 `startup`, 可选值为 `client`、`server`、`rpc`、`startup`、`sync`、`speed` 和 `fqdn` 之一
|
||||
- -r, --raw-time: 将时间输出出无符号 64 位整数类型(即 C 语音中 uint64_t)
|
||||
- -s, --commands=COMMAND: 以非交互模式执行的 SQL 命令
|
||||
- -S, --pkttype=PKTTYPE: 指定网络测试所用的包类型,默认为 TCP。只有 netrole 为 `speed` 时既可以指定为 TCP 也可以指定为 UDP
|
||||
- -T, --thread=THREADNUM: 以多线程模式导入数据时的线程数
|
||||
- -s, --commands: 在不进入终端的情况下运行 TDengine 命令
|
||||
- -z, --timezone=TIMEZONE: 指定时区,默认为本地时区
|
||||
- -V, --version: 打印出当前版本号
|
||||
- -a AUTHSTR: 连接服务端的授权信息
|
||||
- -A: 通过用户名和密码计算授权信息
|
||||
- -c CONFIGDIR: 指定配置文件目录,Linux 环境下默认为 `/etc/taos`,该目录下的配置文件默认名称为 `taos.cfg`
|
||||
- -C: 打印 -c 指定的目录中 `taos.cfg` 的配置参数
|
||||
- -d DATABASE: 指定连接到服务端时使用的数据库
|
||||
- -f FILE: 以非交互模式执行 SQL 脚本文件。文件中一个 SQL 语句只能占一行
|
||||
- -k: 测试服务端运行状态,0: unavailable,1: network ok,2: service ok,3: service degraded,4: exiting
|
||||
- -l PKTLEN: 网络测试时使用的测试包大小
|
||||
- -n NETROLE: 网络连接测试时的测试范围,默认为 `client`, 可选值为 `client`、`server`
|
||||
- -N PKTNUM: 网络测试时使用的测试包数量
|
||||
- -r: 将时间输出出无符号 64 位整数类型(即 C 语音中 uint64_t)
|
||||
- -s COMMAND: 以非交互模式执行的 SQL 命令
|
||||
- -t: 测试服务端启动状态,状态同-k
|
||||
- -w DISPLAYWIDTH: 客户端列显示宽度
|
||||
- -z TIMEZONE: 指定时区,默认为本地时区
|
||||
- -V: 打印出当前版本号
|
||||
|
||||
示例:
|
||||
|
||||
|
|
|
@ -5,18 +5,11 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表"
|
|||
|
||||
## TDengine 服务端支持的平台列表
|
||||
|
||||
| | **CentOS 7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** |
|
||||
| ------------ | -------------- | ------------------- | --------------- | ------------ | ----------------- | ---------------- | ---------------- |
|
||||
| X64 | ● | ● | | ○ | ● | ● | ● |
|
||||
| 龙芯 MIPS64 | | | ● | | | | |
|
||||
| 鲲鹏 ARM64 | | ○ | ○ | | ● | | |
|
||||
| 申威 Alpha64 | | | ○ | ● | | | |
|
||||
| 飞腾 ARM64 | | ○ 优麒麟 | | | | | |
|
||||
| 海光 X64 | ● | ● | ● | ○ | ● | ● | |
|
||||
| 瑞芯微 ARM64 | | | ○ | | | | |
|
||||
| 全志 ARM64 | | | ○ | | | | |
|
||||
| 炬力 ARM64 | | | ○ | | | | |
|
||||
| 华为云 ARM64 | | | | | | | ● |
|
||||
| | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** |
|
||||
| ------------ | ----------------- | ---------------- | ---------------- | --------------- | ------------ | ----------------- | ---------------- | ---------------- |
|
||||
| X64 | ● | ● | ● | | ● | ● | ● | |
|
||||
| 树莓派 ARM64 | | | | ● | | | | |
|
||||
| 华为云 ARM64 | | | | | | | | ● |
|
||||
|
||||
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
|
|||
- _taosBenchmark_:TDengine 测试工具
|
||||
- _remove.sh_:卸载 TDengine 的脚本,请谨慎执行,链接到/usr/bin 目录下的**rmtaos**命令。会删除 TDengine 的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos
|
||||
- _taosadapter_: 提供 RESTful 服务和接受其他多种软件写入请求的服务端可执行文件
|
||||
- _tarbitrator_: 提供双节点集群部署的仲裁功能
|
||||
- _TDinsight.sh_:用于下载 TDinsight 并安装的脚本
|
||||
- _set_core.sh_:用于方便调试设置系统生成 core dump 文件的脚本
|
||||
- _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。
|
||||
|
|
|
@ -21,17 +21,17 @@
|
|||
#include "taos.h"
|
||||
|
||||
static int running = 1;
|
||||
static char dbName[64] = "tmqdb";
|
||||
static char stbName[64] = "stb";
|
||||
static char dbName[64] = "tmqdb";
|
||||
static char stbName[64] = "stb";
|
||||
static char topicName[64] = "topicname";
|
||||
|
||||
static int32_t msg_process(TAOS_RES* msg) {
|
||||
char buf[1024];
|
||||
char buf[1024];
|
||||
int32_t rows = 0;
|
||||
|
||||
const char* topicName = tmq_get_topic_name(msg);
|
||||
const char* dbName = tmq_get_db_name(msg);
|
||||
int32_t vgroupId = tmq_get_vgroup_id(msg);
|
||||
const char* dbName = tmq_get_db_name(msg);
|
||||
int32_t vgroupId = tmq_get_vgroup_id(msg);
|
||||
|
||||
printf("topic: %s\n", topicName);
|
||||
printf("db: %s\n", dbName);
|
||||
|
@ -41,14 +41,14 @@ static int32_t msg_process(TAOS_RES* msg) {
|
|||
TAOS_ROW row = taos_fetch_row(msg);
|
||||
if (row == NULL) break;
|
||||
|
||||
TAOS_FIELD* fields = taos_fetch_fields(msg);
|
||||
TAOS_FIELD* fields = taos_fetch_fields(msg);
|
||||
int32_t numOfFields = taos_field_count(msg);
|
||||
int32_t* length = taos_fetch_lengths(msg);
|
||||
int32_t precision = taos_result_precision(msg);
|
||||
const char* tbName = tmq_get_table_name(msg);
|
||||
rows++;
|
||||
int32_t* length = taos_fetch_lengths(msg);
|
||||
int32_t precision = taos_result_precision(msg);
|
||||
const char* tbName = tmq_get_table_name(msg);
|
||||
rows++;
|
||||
taos_print_row(buf, row, fields, numOfFields);
|
||||
printf("row content from %s: %s\n", (tbName != NULL ? tbName : "null table"), buf);
|
||||
printf("row content from %s: %s\n", (tbName != NULL ? tbName : "table null"), buf);
|
||||
}
|
||||
|
||||
return rows;
|
||||
|
@ -80,7 +80,8 @@ static int32_t init_env() {
|
|||
|
||||
// create super table
|
||||
printf("create super table\n");
|
||||
pRes = taos_query(pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))");
|
||||
pRes = taos_query(
|
||||
pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
|
@ -166,7 +167,6 @@ int32_t create_topic() {
|
|||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
// pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
|
||||
pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes));
|
||||
|
@ -184,26 +184,28 @@ void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
|
|||
|
||||
tmq_t* build_consumer() {
|
||||
tmq_conf_res_t code;
|
||||
tmq_conf_t* conf = tmq_conf_new();
|
||||
tmq_conf_t* conf = tmq_conf_new();
|
||||
code = tmq_conf_set(conf, "enable.auto.commit", "true");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "group.id", "cgrpName");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "client.id", "user defined name");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "td.connect.user", "root");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||
code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "experimental.snapshot.enable", "true");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
|
||||
tmq_conf_destroy(conf);
|
||||
return tmq;
|
||||
|
@ -211,7 +213,7 @@ tmq_t* build_consumer() {
|
|||
|
||||
tmq_list_t* build_topic_list() {
|
||||
tmq_list_t* topicList = tmq_list_new();
|
||||
int32_t code = tmq_list_append(topicList, "topicname");
|
||||
int32_t code = tmq_list_append(topicList, "topicname");
|
||||
if (code) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -228,18 +230,18 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) {
|
|||
|
||||
int32_t totalRows = 0;
|
||||
int32_t msgCnt = 0;
|
||||
int32_t consumeDelay = 5000;
|
||||
int32_t timeout = 5000;
|
||||
while (running) {
|
||||
TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, consumeDelay);
|
||||
TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout);
|
||||
if (tmqmsg) {
|
||||
msgCnt++;
|
||||
totalRows += msg_process(tmqmsg);
|
||||
taos_free_result(tmqmsg);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
/*} else {*/
|
||||
/*break;*/
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
|
||||
}
|
||||
|
||||
|
@ -256,32 +258,30 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
tmq_t* tmq = build_consumer();
|
||||
if (NULL == tmq) {
|
||||
fprintf(stderr, "%% build_consumer() fail!\n");
|
||||
fprintf(stderr, "%% build_consumer() fail!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
tmq_list_t* topic_list = build_topic_list();
|
||||
if (NULL == topic_list) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
basic_consume_loop(tmq, topic_list);
|
||||
|
||||
code = tmq_unsubscribe(tmq);
|
||||
if (code) {
|
||||
fprintf(stderr, "%% Failed to unsubscribe: %s\n", tmq_err2str(code));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
fprintf(stderr, "%% unsubscribe\n");
|
||||
}
|
||||
|
||||
code = tmq_consumer_close(tmq);
|
||||
if (code) {
|
||||
fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
fprintf(stderr, "%% Consumer closed\n");
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -196,15 +196,6 @@ DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, vo
|
|||
DLL_EXPORT void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param);
|
||||
DLL_EXPORT const void *taos_get_raw_block(TAOS_RES *res);
|
||||
|
||||
// Shuduo: temporary enable for app build
|
||||
#if 1
|
||||
typedef void (*__taos_sub_fn_t)(TAOS_SUB *tsub, TAOS_RES *res, void *param, int code);
|
||||
DLL_EXPORT TAOS_SUB *taos_subscribe(TAOS *taos, int restart, const char *topic, const char *sql, __taos_sub_fn_t fp,
|
||||
void *param, int interval);
|
||||
DLL_EXPORT TAOS_RES *taos_consume(TAOS_SUB *tsub);
|
||||
DLL_EXPORT void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress);
|
||||
#endif
|
||||
|
||||
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char *tableNameList);
|
||||
DLL_EXPORT TAOS_RES *taos_schemaless_insert(TAOS *taos, char *lines[], int numLines, int protocol, int precision);
|
||||
|
||||
|
@ -281,10 +272,6 @@ DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
|
|||
|
||||
/* ------------------------------ TMQ END -------------------------------- */
|
||||
|
||||
#if 1 // Shuduo: temporary enable for app build
|
||||
typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB *tsub, TAOS_RES *res, void *param, int code);
|
||||
#endif
|
||||
|
||||
typedef enum {
|
||||
TSDB_SRV_STATUS_UNAVAILABLE = 0,
|
||||
TSDB_SRV_STATUS_NETWORK_OK = 1,
|
||||
|
|
|
@ -239,7 +239,7 @@ SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId
|
|||
SColumnInfoData* bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index);
|
||||
|
||||
void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress);
|
||||
const char* blockDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData);
|
||||
const char* blockDecode(SSDataBlock* pBlock, const char* pData);
|
||||
|
||||
void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag);
|
||||
void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag);
|
||||
|
|
|
@ -200,8 +200,6 @@ struct STag {
|
|||
#if 1 //================================================================================================================================================
|
||||
// Imported since 3.0 and use bitmap to demonstrate None/Null/Norm, while use Null/Norm below 3.0 without of bitmap.
|
||||
#define TD_SUPPORT_BITMAP
|
||||
#define TD_SUPPORT_READ2
|
||||
#define TD_SUPPORT_BACK2 // suppport back compatibility of 2.0
|
||||
|
||||
#define TASSERT(x) ASSERT(x)
|
||||
|
||||
|
|
|
@ -296,13 +296,13 @@ void tFreeSSubmitRsp(SSubmitRsp* pRsp);
|
|||
#define COL_IDX_ON ((int8_t)0x2)
|
||||
#define COL_SET_NULL ((int8_t)0x10)
|
||||
#define COL_SET_VAL ((int8_t)0x20)
|
||||
typedef struct SSchema {
|
||||
struct SSchema {
|
||||
int8_t type;
|
||||
int8_t flags;
|
||||
col_id_t colId;
|
||||
int32_t bytes;
|
||||
char name[TSDB_COL_NAME_LEN];
|
||||
} SSchema;
|
||||
};
|
||||
|
||||
#define COL_IS_SET(FLG) (((FLG) & (COL_SET_VAL | COL_SET_NULL)) != 0)
|
||||
#define COL_CLR_SET(FLG) ((FLG) &= (~(COL_SET_VAL | COL_SET_NULL)))
|
||||
|
@ -648,7 +648,7 @@ typedef struct {
|
|||
};
|
||||
bool output; // TODO remove it later
|
||||
|
||||
int16_t type;
|
||||
int8_t type;
|
||||
int32_t bytes;
|
||||
uint8_t precision;
|
||||
uint8_t scale;
|
||||
|
@ -1364,12 +1364,13 @@ typedef struct {
|
|||
int8_t compressed;
|
||||
int8_t streamBlockType;
|
||||
int32_t compLen;
|
||||
int32_t numOfBlocks;
|
||||
int32_t numOfRows;
|
||||
int32_t numOfCols;
|
||||
int64_t skey;
|
||||
int64_t ekey;
|
||||
int64_t version; // for stream
|
||||
TSKEY watermark;// for stream
|
||||
int64_t version; // for stream
|
||||
TSKEY watermark; // for stream
|
||||
char data[];
|
||||
} SRetrieveTableRsp;
|
||||
|
||||
|
@ -3079,6 +3080,22 @@ typedef struct SDeleteRes {
|
|||
int32_t tEncodeDeleteRes(SEncoder* pCoder, const SDeleteRes* pRes);
|
||||
int32_t tDecodeDeleteRes(SDecoder* pCoder, SDeleteRes* pRes);
|
||||
|
||||
typedef struct {
|
||||
int64_t uid;
|
||||
int64_t ts;
|
||||
} SSingleDeleteReq;
|
||||
|
||||
int32_t tEncodeSSingleDeleteReq(SEncoder* pCoder, const SSingleDeleteReq* pReq);
|
||||
int32_t tDecodeSSingleDeleteReq(SDecoder* pCoder, SSingleDeleteReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int64_t suid;
|
||||
SArray* deleteReqs; // SArray<SSingleDeleteReq>
|
||||
} SBatchDeleteReq;
|
||||
|
||||
int32_t tEncodeSBatchDeleteReq(SEncoder* pCoder, const SBatchDeleteReq* pReq);
|
||||
int32_t tDecodeSBatchDeleteReq(SDecoder* pCoder, SBatchDeleteReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t msgIdx;
|
||||
int32_t msgType;
|
||||
|
|
|
@ -202,6 +202,7 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_FETCH_RSMA, "vnode-fetch-rsma", SRSmaFetchMsg, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "delete-data", SVDeleteReq, SVDeleteRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_BATCH_DEL, "batch-delete", SBatchDeleteReq, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "alter-config", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "alter-replica", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIRM, "alter-confirm", NULL, NULL)
|
||||
|
|
|
@ -38,7 +38,8 @@ typedef struct {
|
|||
uint16_t type : 2;
|
||||
uint16_t del : 1;
|
||||
uint16_t endian : 1;
|
||||
uint16_t reserve : 12;
|
||||
uint16_t statis : 1; // 0 all normal, 1 has null or none
|
||||
uint16_t reserve : 11;
|
||||
uint16_t sver;
|
||||
};
|
||||
};
|
||||
|
@ -149,6 +150,8 @@ typedef struct {
|
|||
void *pBitmap;
|
||||
void *pOffset;
|
||||
int32_t extendedRowSize;
|
||||
bool hasNone;
|
||||
bool hasNull;
|
||||
} SRowBuilder;
|
||||
|
||||
#define TD_ROW_HEAD_LEN (sizeof(STSRow))
|
||||
|
@ -287,9 +290,13 @@ int32_t tdSRowSetTpInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t flen);
|
|||
int32_t tdSRowSetExtendedInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t nBoundCols, int32_t flen,
|
||||
int32_t allNullLen, int32_t boundNullLen);
|
||||
int32_t tdSRowResetBuf(SRowBuilder *pBuilder, void *pBuf);
|
||||
static FORCE_INLINE void tdSRowEnd(SRowBuilder *pBuilder) {
|
||||
STSRow *pRow = (STSRow *)pBuilder->pBuf;
|
||||
if (pBuilder->hasNull || pBuilder->hasNone) {
|
||||
pRow->statis = 1;
|
||||
}
|
||||
}
|
||||
int32_t tdSRowGetBuf(SRowBuilder *pBuilder, void *pBuf);
|
||||
int32_t tdSRowInitEx(SRowBuilder *pBuilder, void *pBuf, uint32_t allNullLen, uint32_t boundNullLen, int32_t nCols,
|
||||
int32_t nBoundCols, int32_t flen);
|
||||
void tdSRowReset(SRowBuilder *pBuilder);
|
||||
int32_t tdAppendColValToTpRow(SRowBuilder *pBuilder, TDRowValT valType, const void *val, bool isCopyVarData,
|
||||
int8_t colType, int16_t colIdx, int32_t offset);
|
||||
|
@ -312,21 +319,17 @@ typedef struct {
|
|||
col_id_t kvIdx; // [0, nKvCols)
|
||||
} STSRowIter;
|
||||
|
||||
void tdSTSRowIterReset(STSRowIter *pIter, STSRow *pRow);
|
||||
void tdSTSRowIterInit(STSRowIter *pIter, STSchema *pSchema);
|
||||
void tdSTSRowIterInit(STSRowIter *pIter, STSchema *pSchema);
|
||||
void tdSTSRowIterReset(STSRowIter *pIter, STSRow *pRow);
|
||||
bool tdSTSRowIterFetch(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal);
|
||||
bool tdSTSRowIterNext(STSRowIter *pIter, SCellVal *pVal);
|
||||
|
||||
int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow);
|
||||
bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal);
|
||||
bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal);
|
||||
bool tdGetKvRowValOfColEx(STSRowIter *pIter, col_id_t colId, col_type_t colType, col_id_t *nIdx, SCellVal *pVal);
|
||||
bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal);
|
||||
bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t flen, uint32_t offset, col_id_t colIdx,
|
||||
SCellVal *pVal);
|
||||
bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal);
|
||||
void tdSCellValPrint(SCellVal *pVal, int8_t colType);
|
||||
void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_COMMON_ROW_H_*/
|
||||
#endif /*_TD_COMMON_ROW_H_*/
|
|
@ -67,6 +67,7 @@ typedef struct SInputData {
|
|||
} SInputData;
|
||||
|
||||
typedef struct SOutputData {
|
||||
int32_t numOfBlocks;
|
||||
int32_t numOfRows;
|
||||
int32_t numOfCols;
|
||||
int8_t compressed;
|
||||
|
|
|
@ -123,7 +123,8 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table
|
|||
* @param handle
|
||||
* @return
|
||||
*/
|
||||
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds);
|
||||
int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds);
|
||||
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pBlock, uint64_t* useconds);
|
||||
|
||||
/**
|
||||
* kill the ongoing query and free the query handle and corresponding resources automatically
|
||||
|
|
|
@ -54,10 +54,6 @@ typedef struct SFuncExecFuncs {
|
|||
FExecCombine combine;
|
||||
} SFuncExecFuncs;
|
||||
|
||||
typedef struct SFileBlockInfo {
|
||||
int32_t numBlocksOfStep;
|
||||
} SFileBlockInfo;
|
||||
|
||||
#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results
|
||||
|
||||
#define TOP_BOTTOM_QUERY_LIMIT 100
|
||||
|
@ -171,8 +167,6 @@ typedef struct tExprNode {
|
|||
};
|
||||
} tExprNode;
|
||||
|
||||
void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *));
|
||||
|
||||
struct SScalarParam {
|
||||
bool colAlloced;
|
||||
SColumnInfoData *columnData;
|
||||
|
@ -182,14 +176,10 @@ struct SScalarParam {
|
|||
int32_t numOfRows;
|
||||
};
|
||||
|
||||
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, SResultDataInfo* pInfo, int16_t extLength,
|
||||
bool isSuperTable);
|
||||
|
||||
void resetResultRowEntryResult(SqlFunctionCtx* pCtx, int32_t num);
|
||||
void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell);
|
||||
void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell);
|
||||
int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock);
|
||||
bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry);
|
||||
bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry);
|
||||
bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry);
|
||||
bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry);
|
||||
|
||||
typedef struct SPoint {
|
||||
int64_t key;
|
||||
|
|
|
@ -380,6 +380,7 @@ typedef struct SAggPhysiNode {
|
|||
SNodeList* pExprs; // these are expression list of group_by_clause and parameter expression of aggregate function
|
||||
SNodeList* pGroupKeys;
|
||||
SNodeList* pAggFuncs;
|
||||
bool mergeDataBlock;
|
||||
} SAggPhysiNode;
|
||||
|
||||
typedef struct SDownstreamSourceNode {
|
||||
|
@ -418,6 +419,7 @@ typedef struct SWinodwPhysiNode {
|
|||
int8_t igExpired;
|
||||
EOrder inputTsOrder;
|
||||
EOrder outputTsOrder;
|
||||
bool mergeDataBlock;
|
||||
} SWinodwPhysiNode;
|
||||
|
||||
typedef struct SIntervalPhysiNode {
|
||||
|
|
|
@ -44,7 +44,7 @@ extern int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict
|
|||
extern int32_t filterConverNcharColumns(SFilterInfo *pFilterInfo, int32_t rows, bool *gotNchar);
|
||||
extern int32_t filterFreeNcharColumns(SFilterInfo *pFilterInfo);
|
||||
extern void filterFreeInfo(SFilterInfo *info);
|
||||
extern bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg *pDataStatis, int32_t numOfCols, int32_t numOfRows);
|
||||
extern bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg **pColsAgg, int32_t numOfCols, int32_t numOfRows);
|
||||
|
||||
/* condition split interface */
|
||||
int32_t filterPartitionCond(SNode **pCondition, SNode **pPrimaryKeyCond, SNode **pTagIndexCond, SNode **pTagCond,
|
||||
|
|
|
@ -118,7 +118,7 @@ typedef struct {
|
|||
int64_t sourceVer;
|
||||
int64_t reqId;
|
||||
|
||||
SArray* blocks; // SArray<SSDataBlock*>
|
||||
SArray* blocks; // SArray<SSDataBlock>
|
||||
} SStreamDataBlock;
|
||||
|
||||
typedef struct {
|
||||
|
@ -154,7 +154,10 @@ static FORCE_INLINE void streamQueueProcessFail(SStreamQueue* queue) {
|
|||
atomic_store_8(&queue->status, STREAM_QUEUE__FAILED);
|
||||
}
|
||||
|
||||
static FORCE_INLINE void* streamQueueCurItem(SStreamQueue* queue) { return queue->qItem; }
|
||||
static FORCE_INLINE void* streamQueueCurItem(SStreamQueue* queue) {
|
||||
//
|
||||
return queue->qItem;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void* streamQueueNextItem(SStreamQueue* queue) {
|
||||
int8_t dequeueFlag = atomic_exchange_8(&queue->status, STREAM_QUEUE__PROCESSING);
|
||||
|
@ -226,9 +229,7 @@ typedef struct {
|
|||
int32_t nodeId;
|
||||
int32_t childId;
|
||||
int32_t taskId;
|
||||
// int64_t checkpointVer;
|
||||
// int64_t processedVer;
|
||||
SEpSet epSet;
|
||||
SEpSet epSet;
|
||||
} SStreamChildEpInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -372,15 +373,6 @@ static FORCE_INLINE int32_t streamTaskOutput(SStreamTask* pTask, SStreamDataBloc
|
|||
return 0;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int32_t reserved;
|
||||
} SStreamTaskDeployRsp;
|
||||
|
||||
typedef struct {
|
||||
// SMsgHead head;
|
||||
SStreamTask* task;
|
||||
} SStreamTaskDeployReq;
|
||||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
int64_t streamId;
|
||||
|
@ -478,7 +470,18 @@ typedef struct {
|
|||
} SStreamRecoverDownstreamRsp;
|
||||
|
||||
int32_t tEncodeSStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamRecoverDownstreamReq* pReq);
|
||||
int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, const SStreamRecoverDownstreamRsp* pRsp);
|
||||
int32_t tDecodeSStreamTaskRecoverReq(SDecoder* pDecoder, SStreamRecoverDownstreamReq* pReq);
|
||||
|
||||
int32_t tEncodeSStreamTaskRecoverRsp(SEncoder* pEncoder, const SStreamRecoverDownstreamRsp* pRsp);
|
||||
int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamRecoverDownstreamRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t waitingRspCnt;
|
||||
int32_t totReq;
|
||||
SArray* info; // SArray<SArray<SStreamCheckpointInfo>*>
|
||||
} SStreamRecoverStatus;
|
||||
|
||||
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
|
||||
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
|
||||
|
@ -504,7 +507,7 @@ typedef struct SStreamMeta {
|
|||
TTB* pTaskDb;
|
||||
TTB* pStateDb;
|
||||
SHashObj* pTasks;
|
||||
SHashObj* pRecoveringState;
|
||||
SHashObj* pRecoverStatus;
|
||||
void* ahandle;
|
||||
TXN txn;
|
||||
FTaskExpand* expandFunc;
|
||||
|
|
|
@ -129,6 +129,9 @@ typedef struct SSyncFSM {
|
|||
void (*FpReConfigCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SReConfigCbMeta cbMeta);
|
||||
void (*FpLeaderTransferCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
|
||||
|
||||
void (*FpBecomeLeaderCb)(struct SSyncFSM* pFsm);
|
||||
void (*FpBecomeFollowerCb)(struct SSyncFSM* pFsm);
|
||||
|
||||
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot, void* pReaderParam, void** ppReader);
|
||||
int32_t (*FpGetSnapshotInfo)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
|
||||
|
||||
|
|
|
@ -47,8 +47,6 @@ typedef struct SRpcHandleInfo {
|
|||
int8_t persistHandle; // persist handle or not
|
||||
int8_t hasEpSet;
|
||||
|
||||
STraceId traceId;
|
||||
|
||||
// app info
|
||||
void *ahandle; // app handle set by client
|
||||
void *wrapper; // wrapper handle
|
||||
|
@ -58,7 +56,8 @@ typedef struct SRpcHandleInfo {
|
|||
void *rsp;
|
||||
int32_t rspLen;
|
||||
|
||||
// conn info
|
||||
STraceId traceId;
|
||||
|
||||
SRpcConnInfo conn;
|
||||
} SRpcHandleInfo;
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ extern "C" {
|
|||
typedef struct TdCmd *TdCmdPtr;
|
||||
|
||||
TdCmdPtr taosOpenCmd(const char* cmd);
|
||||
int64_t taosGetsCmd(TdCmdPtr pCmd, int32_t maxSize, char *__restrict buf);
|
||||
int64_t taosGetLineCmd(TdCmdPtr pCmd, char** __restrict ptrBuf);
|
||||
int32_t taosEOFCmd(TdCmdPtr pCmd);
|
||||
int64_t taosCloseCmd(TdCmdPtr* ppCmd);
|
||||
|
|
|
@ -284,11 +284,13 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_MND_CONSUMER_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x03EA)
|
||||
#define TSDB_CODE_MND_TOPIC_SUBSCRIBED TAOS_DEF_ERROR_CODE(0, 0x03EB)
|
||||
#define TSDB_CODE_MND_CGROUP_USED TAOS_DEF_ERROR_CODE(0, 0x03EC)
|
||||
#define TSDB_CODE_MND_TOPIC_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03ED)
|
||||
|
||||
// mnode-stream
|
||||
#define TSDB_CODE_MND_STREAM_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F0)
|
||||
#define TSDB_CODE_MND_STREAM_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F1)
|
||||
#define TSDB_CODE_MND_INVALID_STREAM_OPTION TAOS_DEF_ERROR_CODE(0, 0x03F2)
|
||||
#define TSDB_CODE_MND_STREAM_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03F3)
|
||||
|
||||
// mnode-sma
|
||||
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)
|
||||
|
@ -619,6 +621,7 @@ int32_t* taosGetErrno();
|
|||
|
||||
//tmq
|
||||
#define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000)
|
||||
#define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -396,7 +396,7 @@ typedef enum ELogicConditionType {
|
|||
#ifdef WINDOWS
|
||||
#define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections.
|
||||
#else
|
||||
#define TSDB_MAX_RPC_THREADS 5
|
||||
#define TSDB_MAX_RPC_THREADS 10
|
||||
#endif
|
||||
|
||||
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type
|
||||
|
|
|
@ -60,7 +60,7 @@ cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_pat
|
|||
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
|
||||
#cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
|
||||
cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||:
|
||||
|
|
|
@ -11,13 +11,13 @@ if !%2==! GOTO USAGE
|
|||
if "%1" == "cluster" (
|
||||
set work_dir=%internal_dir%
|
||||
set packagServerName_x64=TDengine-enterprise-server-%2-beta-Windows-x64
|
||||
set packagServerName_x86=TDengine-enterprise-server-%2-beta-Windows-x86
|
||||
@REM set packagServerName_x86=TDengine-enterprise-server-%2-beta-Windows-x86
|
||||
set packagClientName_x64=TDengine-enterprise-client-%2-beta-Windows-x64
|
||||
set packagClientName_x86=TDengine-enterprise-client-%2-beta-Windows-x86
|
||||
) else (
|
||||
set work_dir=%community_dir%
|
||||
set packagServerName_x64=TDengine-server-%2-Windows-x64
|
||||
set packagServerName_x86=TDengine-server-%2-Windows-x86
|
||||
@REM set packagServerName_x86=TDengine-server-%2-Windows-x86
|
||||
set packagClientName_x64=TDengine-client-%2-Windows-x64
|
||||
set packagClientName_x86=TDengine-client-%2-Windows-x86
|
||||
)
|
||||
|
@ -59,8 +59,8 @@ rd /s /Q C:\TDengine
|
|||
cmake --install .
|
||||
if not %errorlevel% == 0 ( call :RUNFAILED build x86 failed & exit /b 1)
|
||||
cd %package_dir%
|
||||
iscc /DMyAppInstallName="%packagServerName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release
|
||||
if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x86% failed & exit /b 1)
|
||||
@REM iscc /DMyAppInstallName="%packagServerName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release
|
||||
@REM if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x86% failed & exit /b 1)
|
||||
iscc /DMyAppInstallName="%packagClientName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release
|
||||
if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x86% failed & exit /b 1)
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin
|
|||
cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
|
||||
#cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
|
||||
|
||||
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
|
||||
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
|
||||
|
|
|
@ -132,6 +132,7 @@ function install_bin() {
|
|||
[ -x ${bin_dir}/taosd ] && ${csudo}ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || :
|
||||
[ -x ${bin_dir}/taosadapter ] && ${csudo}ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter || :
|
||||
[ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosdemo || :
|
||||
[ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosBenchmark || :
|
||||
[ -x ${bin_dir}/TDinsight.sh ] && ${csudo}ln -sf ${bin_dir}/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
|
||||
[ -x ${bin_dir}/taosdump ] && ${csudo}ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || :
|
||||
[ -x ${bin_dir}/set_core.sh ] && ${csudo}ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || :
|
||||
|
|
|
@ -101,10 +101,6 @@ typedef struct SQueryExecMetric {
|
|||
int64_t rsp; // receive response from server, us
|
||||
} SQueryExecMetric;
|
||||
|
||||
typedef struct SHeartBeatInfo {
|
||||
void* pTimer; // timer, used to send request msg to mnode
|
||||
} SHeartBeatInfo;
|
||||
|
||||
struct SAppInstInfo {
|
||||
int64_t numOfConns;
|
||||
SCorEpSet mgmtEp;
|
||||
|
@ -256,6 +252,8 @@ SRequestObj* execQuery(uint64_t connId, const char* sql, int sqlLen, bool valida
|
|||
TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly);
|
||||
void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly);
|
||||
|
||||
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols);
|
||||
|
||||
static FORCE_INLINE SReqResultInfo* tmqGetCurResInfo(TAOS_RES* res) {
|
||||
SMqRspObj* msg = (SMqRspObj*)res;
|
||||
return (SReqResultInfo*)&msg->resInfo;
|
||||
|
|
|
@ -611,65 +611,6 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm
|
|||
}
|
||||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jlong con,
|
||||
jboolean restart, jstring jtopic,
|
||||
jstring jsql, jint jinterval) {
|
||||
jlong sub = 0;
|
||||
TAOS *taos = (TAOS *)con;
|
||||
char *topic = NULL;
|
||||
char *sql = NULL;
|
||||
|
||||
jniGetGlobalMethod(env);
|
||||
jniDebug("jobj:%p, in TSDBJNIConnector_subscribeImp", jobj);
|
||||
|
||||
if (jtopic != NULL) {
|
||||
topic = (char *)(*env)->GetStringUTFChars(env, jtopic, NULL);
|
||||
}
|
||||
if (jsql != NULL) {
|
||||
sql = (char *)(*env)->GetStringUTFChars(env, jsql, NULL);
|
||||
}
|
||||
|
||||
if (topic == NULL || sql == NULL) {
|
||||
jniDebug("jobj:%p, invalid argument: topic or sql is NULL", jobj);
|
||||
return sub;
|
||||
}
|
||||
|
||||
TAOS_SUB *tsub = taos_subscribe(taos, (int)restart, topic, sql, NULL, NULL, jinterval);
|
||||
sub = (jlong)tsub;
|
||||
|
||||
if (sub == 0) {
|
||||
jniDebug("jobj:%p, failed to subscribe: topic:%s", jobj, topic);
|
||||
} else {
|
||||
jniDebug("jobj:%p, successfully subscribe: topic: %s", jobj, topic);
|
||||
}
|
||||
|
||||
(*env)->ReleaseStringUTFChars(env, jtopic, topic);
|
||||
(*env)->ReleaseStringUTFChars(env, jsql, sql);
|
||||
|
||||
return sub;
|
||||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub) {
|
||||
jniDebug("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%lld", jobj, sub);
|
||||
jniGetGlobalMethod(env);
|
||||
|
||||
TAOS_SUB *tsub = (TAOS_SUB *)sub;
|
||||
TAOS_RES *res = taos_consume(tsub);
|
||||
|
||||
if (res == NULL) {
|
||||
jniError("jobj:%p, tsub:%p, taos_consume returns NULL", jobj, tsub);
|
||||
return 0l;
|
||||
}
|
||||
|
||||
return (jlong)res;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub,
|
||||
jboolean keepProgress) {
|
||||
TAOS_SUB *tsub = (TAOS_SUB *)sub;
|
||||
taos_unsubscribe(tsub, keepProgress);
|
||||
}
|
||||
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp(JNIEnv *env, jobject jobj,
|
||||
jlong con, jbyteArray jsql) {
|
||||
TAOS *tscon = (TAOS *)con;
|
||||
|
@ -1087,4 +1028,4 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_schemalessInsert
|
|||
return JNI_OUT_OF_MEMORY;
|
||||
}
|
||||
return (jlong)tres;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1571,10 +1571,18 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
|
||||
int32_t cols = *(int32_t*) (p + sizeof(int32_t) * 3);
|
||||
ASSERT(numOfCols == cols);
|
||||
|
||||
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t)*3 + sizeof(uint64_t) + numOfCols * (sizeof(int8_t) + sizeof(int32_t));
|
||||
}
|
||||
|
||||
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
|
||||
char* p = (char*)pResultInfo->pData;
|
||||
|
||||
int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
|
||||
// version + length + numOfRows + numOfCol + groupId + flag_segment + column_info
|
||||
int32_t len = getVersion1BlockMetaSize(p, numOfCols);
|
||||
int32_t* colLength = (int32_t*)(p + len);
|
||||
len += sizeof(int32_t) * numOfCols;
|
||||
|
||||
|
@ -1642,7 +1650,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
char* p1 = pResultInfo->convertJson;
|
||||
|
||||
int32_t totalLen = 0;
|
||||
int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
|
||||
int32_t len = getVersion1BlockMetaSize(p, numOfCols);
|
||||
memcpy(p1, p, len);
|
||||
|
||||
p += len;
|
||||
|
@ -1739,7 +1747,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
pStart1 += colLen1;
|
||||
}
|
||||
|
||||
*(int32_t*)(pResultInfo->convertJson) = totalLen;
|
||||
*(int32_t*)(pResultInfo->convertJson + 4) = totalLen;
|
||||
pResultInfo->pData = pResultInfo->convertJson;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1762,16 +1770,31 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
|
||||
char* p = (char*)pResultInfo->pData;
|
||||
|
||||
// version:
|
||||
int32_t blockVersion = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
int32_t dataLen = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
int32_t rows = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
int32_t cols = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
ASSERT(rows == numOfRows && cols == numOfCols);
|
||||
|
||||
int32_t hasColumnSeg = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
uint64_t groupId = *(uint64_t*)p;
|
||||
p += sizeof(uint64_t);
|
||||
|
||||
// check fields
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
int16_t type = *(int16_t*)p;
|
||||
p += sizeof(int16_t);
|
||||
p += sizeof(int8_t);
|
||||
|
||||
int32_t bytes = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
|
|
@ -939,21 +939,6 @@ const void *taos_get_raw_block(TAOS_RES *res) {
|
|||
return pRequest->body.resInfo.pData;
|
||||
}
|
||||
|
||||
TAOS_SUB *taos_subscribe(TAOS *taos, int restart, const char *topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp,
|
||||
void *param, int interval) {
|
||||
// TODO
|
||||
return NULL;
|
||||
}
|
||||
|
||||
TAOS_RES *taos_consume(TAOS_SUB *tsub) {
|
||||
// TODO
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
||||
if (NULL == taos) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
|
|
|
@ -122,6 +122,7 @@ enum {
|
|||
TMQ_CONSUMER_STATUS__INIT = 0,
|
||||
TMQ_CONSUMER_STATUS__READY,
|
||||
TMQ_CONSUMER_STATUS__NO_TOPIC,
|
||||
TMQ_CONSUMER_STATUS__RECOVER,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -134,10 +135,8 @@ typedef struct {
|
|||
// statistics
|
||||
int64_t pollCnt;
|
||||
// offset
|
||||
/*int64_t committedOffset;*/
|
||||
/*int64_t currentOffset;*/
|
||||
STqOffsetVal committedOffsetNew;
|
||||
STqOffsetVal currentOffsetNew;
|
||||
STqOffsetVal committedOffset;
|
||||
STqOffsetVal currentOffset;
|
||||
// connection info
|
||||
int32_t vgId;
|
||||
int32_t vgStatus;
|
||||
|
@ -152,7 +151,6 @@ typedef struct {
|
|||
|
||||
SArray* vgs; // SArray<SMqClientVg>
|
||||
|
||||
int8_t isSchemaAdaptive;
|
||||
SSchemaWrapper schema;
|
||||
} SMqClientTopic;
|
||||
|
||||
|
@ -190,10 +188,9 @@ typedef struct {
|
|||
} SMqPollCbParam;
|
||||
|
||||
typedef struct {
|
||||
tmq_t* tmq;
|
||||
int8_t automatic;
|
||||
int8_t async;
|
||||
/*int8_t freeOffsets;*/
|
||||
tmq_t* tmq;
|
||||
int8_t automatic;
|
||||
int8_t async;
|
||||
int32_t waitingRspNum;
|
||||
int32_t totalRspNum;
|
||||
int32_t rspErr;
|
||||
|
@ -207,7 +204,7 @@ typedef struct {
|
|||
typedef struct {
|
||||
SMqCommitCbParamSet* params;
|
||||
STqOffset* pOffset;
|
||||
} SMqCommitCbParam2;
|
||||
} SMqCommitCbParam;
|
||||
|
||||
tmq_conf_t* tmq_conf_new() {
|
||||
tmq_conf_t* conf = taosMemoryCalloc(1, sizeof(tmq_conf_t));
|
||||
|
@ -215,6 +212,7 @@ tmq_conf_t* tmq_conf_new() {
|
|||
conf->autoCommit = true;
|
||||
conf->autoCommitInterval = 5000;
|
||||
conf->resetOffset = TMQ_CONF__RESET_OFFSET__EARLIEAST;
|
||||
conf->hbBgEnable = true;
|
||||
return conf;
|
||||
}
|
||||
|
||||
|
@ -371,8 +369,8 @@ static int32_t tmqMakeTopicVgKey(char* dst, const char* topicName, int32_t vg) {
|
|||
return sprintf(dst, "%s:%d", topicName, vg);
|
||||
}
|
||||
|
||||
int32_t tmqCommitCb2(void* param, SDataBuf* pBuf, int32_t code) {
|
||||
SMqCommitCbParam2* pParam = (SMqCommitCbParam2*)param;
|
||||
int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) {
|
||||
SMqCommitCbParam* pParam = (SMqCommitCbParam*)param;
|
||||
SMqCommitCbParamSet* pParamSet = (SMqCommitCbParamSet*)pParam->params;
|
||||
// push into array
|
||||
#if 0
|
||||
|
@ -418,7 +416,7 @@ static int32_t tmqSendCommitReq(tmq_t* tmq, SMqClientVg* pVg, SMqClientTopic* pT
|
|||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
pOffset->val = pVg->currentOffsetNew;
|
||||
pOffset->val = pVg->currentOffset;
|
||||
|
||||
int32_t groupLen = strlen(tmq->groupId);
|
||||
memcpy(pOffset->subKey, tmq->groupId, groupLen);
|
||||
|
@ -443,7 +441,7 @@ static int32_t tmqSendCommitReq(tmq_t* tmq, SMqClientVg* pVg, SMqClientTopic* pT
|
|||
tEncodeSTqOffset(&encoder, pOffset);
|
||||
|
||||
// build param
|
||||
SMqCommitCbParam2* pParam = taosMemoryCalloc(1, sizeof(SMqCommitCbParam2));
|
||||
SMqCommitCbParam* pParam = taosMemoryCalloc(1, sizeof(SMqCommitCbParam));
|
||||
pParam->params = pParamSet;
|
||||
pParam->pOffset = pOffset;
|
||||
|
||||
|
@ -462,13 +460,13 @@ static int32_t tmqSendCommitReq(tmq_t* tmq, SMqClientVg* pVg, SMqClientTopic* pT
|
|||
pVg->vgId, pOffset->val.version);
|
||||
|
||||
// TODO: put into cb
|
||||
pVg->committedOffsetNew = pVg->currentOffsetNew;
|
||||
pVg->committedOffset = pVg->currentOffset;
|
||||
|
||||
pMsgSendInfo->requestId = generateRequestId();
|
||||
pMsgSendInfo->requestObjRefId = 0;
|
||||
pMsgSendInfo->param = pParam;
|
||||
pMsgSendInfo->paramFreeFp = taosMemoryFree;
|
||||
pMsgSendInfo->fp = tmqCommitCb2;
|
||||
pMsgSendInfo->fp = tmqCommitCb;
|
||||
pMsgSendInfo->msgType = TDMT_VND_MQ_COMMIT_OFFSET;
|
||||
// send msg
|
||||
|
||||
|
@ -504,7 +502,6 @@ int32_t tmqCommitMsgImpl(tmq_t* tmq, const TAOS_RES* msg, int8_t async, tmq_comm
|
|||
pParamSet->tmq = tmq;
|
||||
pParamSet->automatic = 0;
|
||||
pParamSet->async = async;
|
||||
/*pParamSet->freeOffsets = 1;*/
|
||||
pParamSet->userCb = userCb;
|
||||
pParamSet->userParam = userParam;
|
||||
tsem_init(&pParamSet->rspSem, 0, 0);
|
||||
|
@ -518,7 +515,7 @@ int32_t tmqCommitMsgImpl(tmq_t* tmq, const TAOS_RES* msg, int8_t async, tmq_comm
|
|||
SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
|
||||
if (pVg->vgId != vgId) continue;
|
||||
|
||||
if (pVg->currentOffsetNew.type > 0 && !tOffsetEqual(&pVg->currentOffsetNew, &pVg->committedOffsetNew)) {
|
||||
if (pVg->currentOffset.type > 0 && !tOffsetEqual(&pVg->currentOffset, &pVg->committedOffset)) {
|
||||
if (tmqSendCommitReq(tmq, pVg, pTopic, pParamSet) < 0) {
|
||||
goto FAIL;
|
||||
}
|
||||
|
@ -550,8 +547,8 @@ FAIL:
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tmqCommitInner2(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t async, tmq_commit_cb* userCb,
|
||||
void* userParam) {
|
||||
int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t async, tmq_commit_cb* userCb,
|
||||
void* userParam) {
|
||||
int32_t code = -1;
|
||||
|
||||
if (msg != NULL) {
|
||||
|
@ -566,7 +563,6 @@ int32_t tmqCommitInner2(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_
|
|||
pParamSet->tmq = tmq;
|
||||
pParamSet->automatic = automatic;
|
||||
pParamSet->async = async;
|
||||
/*pParamSet->freeOffsets = 1;*/
|
||||
pParamSet->userCb = userCb;
|
||||
pParamSet->userParam = userParam;
|
||||
tsem_init(&pParamSet->rspSem, 0, 0);
|
||||
|
@ -583,7 +579,9 @@ int32_t tmqCommitInner2(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_
|
|||
tscDebug("consumer:%" PRId64 ", begin commit for topic %s, vgId:%d", tmq->consumerId, pTopic->topicName,
|
||||
pVg->vgId);
|
||||
|
||||
if (pVg->currentOffsetNew.type > 0 && !tOffsetEqual(&pVg->currentOffsetNew, &pVg->committedOffsetNew)) {
|
||||
if (pVg->currentOffset.type > 0 && !tOffsetEqual(&pVg->currentOffset, &pVg->committedOffset)) {
|
||||
tscDebug("consumer: %ld, vg:%d, current %ld, committed %ld", tmq->consumerId, pVg->vgId,
|
||||
pVg->currentOffset.version, pVg->committedOffset.version);
|
||||
if (tmqSendCommitReq(tmq, pVg, pTopic, pParamSet) < 0) {
|
||||
continue;
|
||||
}
|
||||
|
@ -699,7 +697,7 @@ int32_t tmqHandleAllDelayedTask(tmq_t* tmq) {
|
|||
tmqAskEp(tmq, true);
|
||||
taosTmrReset(tmqAssignAskEpTask, 1000, tmq, tmqMgmt.timer, &tmq->epTimer);
|
||||
} else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) {
|
||||
tmqCommitInner2(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam);
|
||||
tmqCommitInner(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam);
|
||||
taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, tmq, tmqMgmt.timer, &tmq->commitTimer);
|
||||
} else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) {
|
||||
} else {
|
||||
|
@ -888,12 +886,6 @@ FAIL:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int32_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int32_t async) {
|
||||
return tmqCommitInner2(tmq, offsets, 0, async, tmq->commitCb, tmq->commitCbUserParam);
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
|
||||
const SArray* container = &topic_list->container;
|
||||
int32_t sz = taosArrayGetSize(container);
|
||||
|
@ -967,7 +959,11 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
|
|||
code = param.rspErr;
|
||||
if (code != 0) goto FAIL;
|
||||
|
||||
int32_t retryCnt = 0;
|
||||
while (TSDB_CODE_MND_CONSUMER_NOT_READY == tmqAskEp(tmq, false)) {
|
||||
if (retryCnt++ > 10) {
|
||||
goto FAIL;
|
||||
}
|
||||
tscDebug("consumer not ready, retry");
|
||||
taosMsleep(500);
|
||||
}
|
||||
|
@ -1006,8 +1002,12 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
int32_t epoch = pParam->epoch;
|
||||
taosMemoryFree(pParam);
|
||||
if (code != 0) {
|
||||
tscWarn("msg discard from vgId:%d, epoch %d, code:%x", vgId, epoch, code);
|
||||
if (pMsg->pData) taosMemoryFreeClear(pMsg->pData);
|
||||
tscWarn("msg discard from vgId:%d, epoch %d, since %s", vgId, epoch, terrstr());
|
||||
if (pMsg->pData) taosMemoryFree(pMsg->pData);
|
||||
if (code == TSDB_CODE_TMQ_CONSUMER_MISMATCH) {
|
||||
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__RECOVER);
|
||||
goto CREATE_MSG_FAIL;
|
||||
}
|
||||
if (code == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) {
|
||||
SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM);
|
||||
if (pRspWrapper == NULL) {
|
||||
|
@ -1083,7 +1083,7 @@ CREATE_MSG_FAIL:
|
|||
return -1;
|
||||
}
|
||||
|
||||
bool tmqUpdateEp2(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
|
||||
bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
|
||||
bool set = false;
|
||||
|
||||
int32_t topicNumGet = taosArrayGetSize(pRsp->topics);
|
||||
|
@ -1112,10 +1112,10 @@ bool tmqUpdateEp2(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
|
|||
SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, j);
|
||||
sprintf(vgKey, "%s:%d", pTopicCur->topicName, pVgCur->vgId);
|
||||
char buf[80];
|
||||
tFormatOffset(buf, 80, &pVgCur->currentOffsetNew);
|
||||
tFormatOffset(buf, 80, &pVgCur->currentOffset);
|
||||
tscDebug("consumer:%" PRId64 ", epoch %d vgId:%d vgKey is %s, offset is %s", tmq->consumerId, epoch,
|
||||
pVgCur->vgId, vgKey, buf);
|
||||
taosHashPut(pHash, vgKey, strlen(vgKey), &pVgCur->currentOffsetNew, sizeof(STqOffsetVal));
|
||||
taosHashPut(pHash, vgKey, strlen(vgKey), &pVgCur->currentOffset, sizeof(STqOffsetVal));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1142,7 +1142,7 @@ bool tmqUpdateEp2(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
|
|||
|
||||
SMqClientVg clientVg = {
|
||||
.pollCnt = 0,
|
||||
.currentOffsetNew = offsetNew,
|
||||
.currentOffset = offsetNew,
|
||||
.vgId = pVgEp->vgId,
|
||||
.epSet = pVgEp->epSet,
|
||||
.vgStatus = TMQ_VG_STATUS__IDLE,
|
||||
|
@ -1166,93 +1166,6 @@ bool tmqUpdateEp2(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
|
|||
return set;
|
||||
}
|
||||
|
||||
#if 0
|
||||
bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
|
||||
/*printf("call update ep %d\n", epoch);*/
|
||||
bool set = false;
|
||||
int32_t topicNumGet = taosArrayGetSize(pRsp->topics);
|
||||
char vgKey[TSDB_TOPIC_FNAME_LEN + 22];
|
||||
tscDebug("consumer:%" PRId64 ", update ep epoch %d to epoch %d, topic num: %d", tmq->consumerId, tmq->epoch, epoch,
|
||||
topicNumGet);
|
||||
SArray* newTopics = taosArrayInit(topicNumGet, sizeof(SMqClientTopic));
|
||||
if (newTopics == NULL) {
|
||||
return false;
|
||||
}
|
||||
SHashObj* pHash = taosHashInit(64, MurmurHash3_32, false, HASH_NO_LOCK);
|
||||
if (pHash == NULL) {
|
||||
taosArrayDestroy(newTopics);
|
||||
return false;
|
||||
}
|
||||
|
||||
// find topic, build hash
|
||||
for (int32_t i = 0; i < topicNumGet; i++) {
|
||||
SMqClientTopic topic = {0};
|
||||
SMqSubTopicEp* pTopicEp = taosArrayGet(pRsp->topics, i);
|
||||
topic.schema = pTopicEp->schema;
|
||||
taosHashClear(pHash);
|
||||
topic.topicName = strdup(pTopicEp->topic);
|
||||
tstrncpy(topic.db, pTopicEp->db, TSDB_DB_FNAME_LEN);
|
||||
|
||||
tscDebug("consumer:%" PRId64 ", update topic: %s", tmq->consumerId, topic.topicName);
|
||||
int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics);
|
||||
for (int32_t j = 0; j < topicNumCur; j++) {
|
||||
// find old topic
|
||||
SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, j);
|
||||
if (pTopicCur->vgs && strcmp(pTopicCur->topicName, pTopicEp->topic) == 0) {
|
||||
int32_t vgNumCur = taosArrayGetSize(pTopicCur->vgs);
|
||||
tscDebug("consumer:%" PRId64 ", new vg num: %d", tmq->consumerId, vgNumCur);
|
||||
if (vgNumCur == 0) break;
|
||||
for (int32_t k = 0; k < vgNumCur; k++) {
|
||||
SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, k);
|
||||
sprintf(vgKey, "%s:%d", topic.topicName, pVgCur->vgId);
|
||||
tscDebug("consumer:%" PRId64 ", epoch %d vgId:%d build %s", tmq->consumerId, epoch, pVgCur->vgId, vgKey);
|
||||
taosHashPut(pHash, vgKey, strlen(vgKey), &pVgCur->currentOffset, sizeof(int64_t));
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t vgNumGet = taosArrayGetSize(pTopicEp->vgs);
|
||||
topic.vgs = taosArrayInit(vgNumGet, sizeof(SMqClientVg));
|
||||
for (int32_t j = 0; j < vgNumGet; j++) {
|
||||
SMqSubVgEp* pVgEp = taosArrayGet(pTopicEp->vgs, j);
|
||||
sprintf(vgKey, "%s:%d", topic.topicName, pVgEp->vgId);
|
||||
int64_t* pOffset = taosHashGet(pHash, vgKey, strlen(vgKey));
|
||||
int64_t offset = pVgEp->offset;
|
||||
tscDebug("consumer:%" PRId64 ", (epoch %d) original offset of vgId:%d is %" PRId64, tmq->consumerId, epoch, pVgEp->vgId, offset);
|
||||
if (pOffset != NULL) {
|
||||
offset = *pOffset;
|
||||
tscDebug("consumer:%" PRId64 ", (epoch %d) receive offset of vgId:%d, full key is %s", tmq->consumerId, epoch, pVgEp->vgId,
|
||||
vgKey);
|
||||
}
|
||||
tscDebug("consumer:%" PRId64 ", (epoch %d) offset of vgId:%d updated to %" PRId64, tmq->consumerId, epoch, pVgEp->vgId, offset);
|
||||
SMqClientVg clientVg = {
|
||||
.pollCnt = 0,
|
||||
.currentOffset = offset,
|
||||
.vgId = pVgEp->vgId,
|
||||
.epSet = pVgEp->epSet,
|
||||
.vgStatus = TMQ_VG_STATUS__IDLE,
|
||||
.vgSkipCnt = 0,
|
||||
};
|
||||
taosArrayPush(topic.vgs, &clientVg);
|
||||
set = true;
|
||||
}
|
||||
taosArrayPush(newTopics, &topic);
|
||||
}
|
||||
if (tmq->clientTopics) taosArrayDestroy(tmq->clientTopics);
|
||||
taosHashCleanup(pHash);
|
||||
tmq->clientTopics = newTopics;
|
||||
|
||||
if (taosArrayGetSize(tmq->clientTopics) == 0)
|
||||
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__NO_TOPIC);
|
||||
else
|
||||
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__READY);
|
||||
|
||||
atomic_store_32(&tmq->epoch, epoch);
|
||||
return set;
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) {
|
||||
SMqAskEpCbParam* pParam = (SMqAskEpCbParam*)param;
|
||||
tmq_t* tmq = pParam->tmq;
|
||||
|
@ -1278,7 +1191,7 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
tDecodeSMqAskEpRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &rsp);
|
||||
/*printf("rsp epoch %" PRId64 " sz %" PRId64 "\n", rsp.epoch, rsp.topics->size);*/
|
||||
/*printf("tmq epoch %" PRId64 " sz %" PRId64 "\n", tmq->epoch, tmq->clientTopics->size);*/
|
||||
tmqUpdateEp2(tmq, head->epoch, &rsp);
|
||||
tmqUpdateEp(tmq, head->epoch, &rsp);
|
||||
tDeleteSMqAskEpRsp(&rsp);
|
||||
} else {
|
||||
SMqAskEpRspWrapper* pWrapper = taosAllocateQitem(sizeof(SMqAskEpRspWrapper), DEF_QITEM);
|
||||
|
@ -1401,17 +1314,6 @@ int32_t tmq_seek(tmq_t* tmq, const tmq_topic_vgroup_t* offset) {
|
|||
#endif
|
||||
|
||||
SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic* pTopic, SMqClientVg* pVg) {
|
||||
/*int64_t reqOffset;*/
|
||||
/*if (pVg->currentOffset >= 0) {*/
|
||||
/*reqOffset = pVg->currentOffset;*/
|
||||
/*} else {*/
|
||||
/*if (tmq->resetOffsetCfg == TMQ_CONF__RESET_OFFSET__NONE) {*/
|
||||
/*tscError("unable to poll since no committed offset but reset offset is set to none");*/
|
||||
/*return NULL;*/
|
||||
/*}*/
|
||||
/*reqOffset = tmq->resetOffsetCfg;*/
|
||||
/*}*/
|
||||
|
||||
SMqPollReq* pReq = taosMemoryCalloc(1, sizeof(SMqPollReq));
|
||||
if (pReq == NULL) {
|
||||
return NULL;
|
||||
|
@ -1430,7 +1332,7 @@ SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic*
|
|||
pReq->consumerId = tmq->consumerId;
|
||||
pReq->epoch = tmq->epoch;
|
||||
/*pReq->currentOffset = reqOffset;*/
|
||||
pReq->reqOffset = pVg->currentOffsetNew;
|
||||
pReq->reqOffset = pVg->currentOffset;
|
||||
pReq->reqId = generateRequestId();
|
||||
|
||||
pReq->useSnapshot = tmq->useSnapshot;
|
||||
|
@ -1534,7 +1436,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
|
|||
/*printf("send poll\n");*/
|
||||
|
||||
char offsetFormatBuf[80];
|
||||
tFormatOffset(offsetFormatBuf, 80, &pVg->currentOffsetNew);
|
||||
tFormatOffset(offsetFormatBuf, 80, &pVg->currentOffset);
|
||||
tscDebug("consumer:%" PRId64 ", send poll to %s vgId:%d, epoch %d, req offset:%s, reqId:%" PRIu64,
|
||||
tmq->consumerId, pTopic->topicName, pVg->vgId, tmq->epoch, offsetFormatBuf, pReq->reqId);
|
||||
/*printf("send vgId:%d %" PRId64 "\n", pVg->vgId, pVg->currentOffset);*/
|
||||
|
@ -1552,7 +1454,7 @@ int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* pReset)
|
|||
if (rspWrapper->epoch > atomic_load_32(&tmq->epoch)) {
|
||||
SMqAskEpRspWrapper* pEpRspWrapper = (SMqAskEpRspWrapper*)rspWrapper;
|
||||
SMqAskEpRsp* rspMsg = &pEpRspWrapper->msg;
|
||||
tmqUpdateEp2(tmq, rspWrapper->epoch, rspMsg);
|
||||
tmqUpdateEp(tmq, rspWrapper->epoch, rspMsg);
|
||||
/*tmqClearUnhandleMsg(tmq);*/
|
||||
*pReset = true;
|
||||
} else {
|
||||
|
@ -1586,7 +1488,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
SMqClientVg* pVg = pollRspWrapper->vgHandle;
|
||||
/*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
|
||||
* rspMsg->msg.rspOffset);*/
|
||||
pVg->currentOffsetNew = pollRspWrapper->dataRsp.rspOffset;
|
||||
pVg->currentOffset = pollRspWrapper->dataRsp.rspOffset;
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
if (pollRspWrapper->dataRsp.blockNum == 0) {
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
|
@ -1609,8 +1511,8 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
SMqClientVg* pVg = pollRspWrapper->vgHandle;
|
||||
/*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
|
||||
* rspMsg->msg.rspOffset);*/
|
||||
pVg->currentOffsetNew.version = pollRspWrapper->metaRsp.rspOffset;
|
||||
pVg->currentOffsetNew.type = TMQ_OFFSET__LOG;
|
||||
pVg->currentOffset.version = pollRspWrapper->metaRsp.rspOffset;
|
||||
pVg->currentOffset.type = TMQ_OFFSET__LOG;
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
// build rsp
|
||||
SMqMetaRspObj* pRsp = tmqBuildMetaRspFromWrapper(pollRspWrapper);
|
||||
|
@ -1653,6 +1555,17 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__RECOVER) {
|
||||
int32_t retryCnt = 0;
|
||||
while (TSDB_CODE_MND_CONSUMER_NOT_READY == tmqAskEp(tmq, false)) {
|
||||
if (retryCnt++ > 10) {
|
||||
return NULL;
|
||||
}
|
||||
tscDebug("consumer not ready, retry");
|
||||
taosMsleep(500);
|
||||
}
|
||||
}
|
||||
|
||||
while (1) {
|
||||
tmqHandleAllDelayedTask(tmq);
|
||||
if (tmqPollImpl(tmq, timeout) < 0) return NULL;
|
||||
|
@ -2947,7 +2860,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
|
|||
tdSRowSetTpInfo(&rb, numOfCols, fLen);
|
||||
int32_t dataLen = 0;
|
||||
|
||||
char* pStart = pData + sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
|
||||
char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols);
|
||||
int32_t* colLength = (int32_t*)pStart;
|
||||
pStart += sizeof(int32_t) * numOfCols;
|
||||
|
||||
|
@ -2977,6 +2890,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
|
|||
char* data = pCol[k].pData + pCol[k].offset[j];
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
|
||||
} else {
|
||||
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
|
||||
}
|
||||
} else {
|
||||
|
@ -2990,6 +2904,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
|
|||
|
||||
offset += TYPE_BYTES[pColumn->type];
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
int32_t rowLen = TD_ROW_LEN(rowData);
|
||||
rowData = POINTER_SHIFT(rowData, rowLen);
|
||||
dataLen += rowLen;
|
||||
|
@ -3220,6 +3135,7 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
|
|||
}
|
||||
offset += TYPE_BYTES[pColumn->type];
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
int32_t rowLen = TD_ROW_LEN(rowData);
|
||||
rowData = POINTER_SHIFT(rowData, rowLen);
|
||||
dataLen += rowLen;
|
||||
|
@ -3381,10 +3297,10 @@ int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
|
|||
|
||||
void tmq_commit_async(tmq_t* tmq, const TAOS_RES* msg, tmq_commit_cb* cb, void* param) {
|
||||
//
|
||||
tmqCommitInner2(tmq, msg, 0, 1, cb, param);
|
||||
tmqCommitInner(tmq, msg, 0, 1, cb, param);
|
||||
}
|
||||
|
||||
int32_t tmq_commit_sync(tmq_t* tmq, const TAOS_RES* msg) {
|
||||
//
|
||||
return tmqCommitInner2(tmq, msg, 0, 0, NULL, NULL);
|
||||
return tmqCommitInner(tmq, msg, 0, 0, NULL, NULL);
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ TEST(testCase, driverInit_Test) {
|
|||
}
|
||||
|
||||
TEST(testCase, connect_Test) {
|
||||
taos_options(TSDB_OPTION_CONFIGDIR, "/home/lisa/Documents/workspace/tdengine/sim/dnode1/cfg");
|
||||
taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
|
||||
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
if (pConn == NULL) {
|
||||
|
@ -670,54 +670,56 @@ TEST(testCase, projection_query_tables) {
|
|||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(pConn, nullptr);
|
||||
|
||||
TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in create db, reason:%s\n", taos_errstr(pRes));
|
||||
}
|
||||
// TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
|
||||
// if (taos_errno(pRes) != 0) {
|
||||
// printf("error in create db, reason:%s\n", taos_errstr(pRes));
|
||||
// }
|
||||
// taos_free_result(pRes);
|
||||
|
||||
TAOS_RES* pRes = taos_query(pConn, "use abc1");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "use abc1");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table tu using st1 tags(1)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
for(int32_t i = 0; i < 1; ++i) {
|
||||
printf("create table :%d\n", i);
|
||||
createNewTable(pConn, i);
|
||||
}
|
||||
|
||||
pRes = taos_query(pConn, "select * from tu");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to select from table, reason:%s\n", taos_errstr(pRes));
|
||||
taos_free_result(pRes);
|
||||
ASSERT_TRUE(false);
|
||||
}
|
||||
|
||||
TAOS_ROW pRow = NULL;
|
||||
TAOS_FIELD* pFields = taos_fetch_fields(pRes);
|
||||
int32_t numOfFields = taos_num_fields(pRes);
|
||||
|
||||
char str[512] = {0};
|
||||
while ((pRow = taos_fetch_row(pRes)) != NULL) {
|
||||
int32_t code = taos_print_row(str, pRow, pFields, numOfFields);
|
||||
printf("%s\n", str);
|
||||
}
|
||||
pRes = taos_query(pConn, "explain verbose true select _wstart,count(*),a from st1 partition by a interval(1s)");
|
||||
printResult(pRes);
|
||||
// pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)");
|
||||
// if (taos_errno(pRes) != 0) {
|
||||
// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
// }
|
||||
// taos_free_result(pRes);
|
||||
//
|
||||
// pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)");
|
||||
// if (taos_errno(pRes) != 0) {
|
||||
// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
// }
|
||||
// taos_free_result(pRes);
|
||||
//
|
||||
// pRes = taos_query(pConn, "create table tu using st1 tags(1)");
|
||||
// if (taos_errno(pRes) != 0) {
|
||||
// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
// }
|
||||
// taos_free_result(pRes);
|
||||
//
|
||||
// for(int32_t i = 0; i < 1; ++i) {
|
||||
// printf("create table :%d\n", i);
|
||||
// createNewTable(pConn, i);
|
||||
// }
|
||||
//
|
||||
// pRes = taos_query(pConn, "select * from tu");
|
||||
// if (taos_errno(pRes) != 0) {
|
||||
// printf("failed to select from table, reason:%s\n", taos_errstr(pRes));
|
||||
// taos_free_result(pRes);
|
||||
// ASSERT_TRUE(false);
|
||||
// }
|
||||
//
|
||||
// TAOS_ROW pRow = NULL;
|
||||
// TAOS_FIELD* pFields = taos_fetch_fields(pRes);
|
||||
// int32_t numOfFields = taos_num_fields(pRes);
|
||||
//
|
||||
// char str[512] = {0};
|
||||
// while ((pRow = taos_fetch_row(pRes)) != NULL) {
|
||||
// int32_t code = taos_print_row(str, pRow, pFields, numOfFields);
|
||||
// printf("%s\n", str);
|
||||
// }
|
||||
|
||||
taos_free_result(pRes);
|
||||
taos_close(pConn);
|
||||
|
|
|
@ -74,7 +74,7 @@ static const SSysDbTableSchema clusterSchema[] = {
|
|||
static const SSysDbTableSchema userDBSchema[] = {
|
||||
{.name = "name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
{.name = "vgroups", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
|
||||
{.name = "vgroups", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
|
||||
{.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
|
||||
{.name = "strict", .bytes = TSDB_DB_STRICT_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
|
@ -284,8 +284,7 @@ static const SSysDbTableSchema consumerSchema[] = {
|
|||
{.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
|
||||
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
|
||||
{.name = "topics", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
|
||||
{.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
|
||||
/*{.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},*/
|
||||
{.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
{.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
{.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
|
|
|
@ -676,9 +676,9 @@ size_t blockDataGetRowSize(SSDataBlock* pBlock) {
|
|||
* @return
|
||||
*/
|
||||
size_t blockDataGetSerialMetaSize(uint32_t numOfCols) {
|
||||
// | total rows/total length | block group id | column schema | each column length |
|
||||
return sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t)) +
|
||||
numOfCols * sizeof(int32_t);
|
||||
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column length |
|
||||
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
|
||||
numOfCols * (sizeof(int8_t) + sizeof(int32_t)) + numOfCols * sizeof(int32_t);
|
||||
}
|
||||
|
||||
double blockDataGetSerialRowSize(const SSDataBlock* pBlock) {
|
||||
|
@ -1329,10 +1329,6 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
|
|||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i);
|
||||
SColumnInfoData* pSrc = taosArrayGet(pDataBlock->pDataBlock, i);
|
||||
if (pSrc->pData == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
colDataAssign(pDst, pSrc, pDataBlock->info.rows, &pDataBlock->info);
|
||||
}
|
||||
|
||||
|
@ -1581,7 +1577,7 @@ int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) {
|
|||
for (int32_t i = 0; i < sz; i++) {
|
||||
SColumnInfoData* pColData = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, i);
|
||||
tlen += taosEncodeFixedI16(buf, pColData->info.colId);
|
||||
tlen += taosEncodeFixedI16(buf, pColData->info.type);
|
||||
tlen += taosEncodeFixedI8(buf, pColData->info.type);
|
||||
tlen += taosEncodeFixedI32(buf, pColData->info.bytes);
|
||||
tlen += taosEncodeFixedBool(buf, pColData->hasNull);
|
||||
|
||||
|
@ -1613,7 +1609,7 @@ void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock) {
|
|||
for (int32_t i = 0; i < sz; i++) {
|
||||
SColumnInfoData data = {0};
|
||||
buf = taosDecodeFixedI16(buf, &data.info.colId);
|
||||
buf = taosDecodeFixedI16(buf, &data.info.type);
|
||||
buf = taosDecodeFixedI8(buf, &data.info.type);
|
||||
buf = taosDecodeFixedI32(buf, &data.info.bytes);
|
||||
buf = taosDecodeFixedBool(buf, &data.hasNull);
|
||||
|
||||
|
@ -2001,6 +1997,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB
|
|||
}
|
||||
offset += TYPE_BYTES[pCol->type]; // sum/avg would convert to int64_t/uint64_t/double during aggregation
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
dataLen += TD_ROW_LEN(rb.pBuf);
|
||||
#ifdef TD_DEBUG_PRINT_ROW
|
||||
tdSRowPrint(rb.pBuf, pTSchema, __func__);
|
||||
|
@ -2070,17 +2067,36 @@ char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId) {
|
|||
|
||||
void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress) {
|
||||
// todo extract method
|
||||
int32_t* version = (int32_t*)data;
|
||||
*version = 1;
|
||||
data += sizeof(int32_t);
|
||||
|
||||
int32_t* actualLen = (int32_t*)data;
|
||||
data += sizeof(int32_t);
|
||||
|
||||
int32_t* rows = (int32_t*)data;
|
||||
*rows = pBlock->info.rows;
|
||||
data += sizeof(int32_t);
|
||||
|
||||
int32_t* cols = (int32_t*)data;
|
||||
*cols = numOfCols;
|
||||
data += sizeof(int32_t);
|
||||
|
||||
// flag segment.
|
||||
// the inital bit is for column info
|
||||
int32_t* flagSegment = (int32_t*)data;
|
||||
*flagSegment = (1<<31);
|
||||
|
||||
data += sizeof(int32_t);
|
||||
|
||||
uint64_t* groupId = (uint64_t*)data;
|
||||
data += sizeof(uint64_t);
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
|
||||
*((int16_t*)data) = pColInfoData->info.type;
|
||||
data += sizeof(int16_t);
|
||||
*((int8_t*)data) = pColInfoData->info.type;
|
||||
data += sizeof(int8_t);
|
||||
|
||||
*((int32_t*)data) = pColInfoData->info.bytes;
|
||||
data += sizeof(int32_t);
|
||||
|
@ -2126,12 +2142,31 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_
|
|||
*groupId = pBlock->info.groupId;
|
||||
}
|
||||
|
||||
const char* blockDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData) {
|
||||
const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
|
||||
const char* pStart = pData;
|
||||
|
||||
int32_t version = *(int32_t*) pStart;
|
||||
pStart += sizeof(int32_t);
|
||||
ASSERT(version == 1);
|
||||
|
||||
// total length sizeof(int32_t)
|
||||
int32_t dataLen = *(int32_t*)pStart;
|
||||
pStart += sizeof(int32_t);
|
||||
|
||||
// total rows sizeof(int32_t)
|
||||
int32_t numOfRows = *(int32_t*)pStart;
|
||||
pStart += sizeof(int32_t);
|
||||
|
||||
// total columns sizeof(int32_t)
|
||||
int32_t numOfCols = *(int32_t*)pStart;
|
||||
pStart += sizeof(int32_t);
|
||||
|
||||
// has column info segment
|
||||
int32_t flagSeg = *(int32_t*)pStart;
|
||||
int32_t hasColumnInfo = (flagSeg >> 31);
|
||||
pStart += sizeof(int32_t);
|
||||
|
||||
// group id sizeof(uint64_t)
|
||||
pBlock->info.groupId = *(uint64_t*)pStart;
|
||||
pStart += sizeof(uint64_t);
|
||||
|
||||
|
@ -2143,7 +2178,7 @@ const char* blockDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRow
|
|||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
pColInfoData->info.type = *(int16_t*)pStart;
|
||||
pStart += sizeof(int16_t);
|
||||
pStart += sizeof(int8_t);
|
||||
|
||||
pColInfoData->info.bytes = *(int32_t*)pStart;
|
||||
pStart += sizeof(int32_t);
|
||||
|
|
|
@ -305,7 +305,7 @@ static int32_t tDeserializeSClientHbReq(SDecoder *pDecoder, SClientHbReq *pReq)
|
|||
taosArrayPush(desc.subDesc, &sDesc);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ASSERT(desc.subPlanNum == taosArrayGetSize(desc.subDesc));
|
||||
|
||||
taosArrayPush(pReq->query->queryDesc, &desc);
|
||||
|
@ -5770,3 +5770,40 @@ int32_t tDecodeSMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tEncodeSSingleDeleteReq(SEncoder *pEncoder, const SSingleDeleteReq *pReq) {
|
||||
if (tEncodeI64(pEncoder, pReq->uid) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pReq->ts) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tDecodeSSingleDeleteReq(SDecoder *pDecoder, SSingleDeleteReq *pReq) {
|
||||
if (tDecodeI64(pDecoder, &pReq->uid) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pReq->ts) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tEncodeSBatchDeleteReq(SEncoder *pEncoder, const SBatchDeleteReq *pReq) {
|
||||
if (tEncodeI64(pEncoder, pReq->suid) < 0) return -1;
|
||||
int32_t sz = taosArrayGetSize(pReq->deleteReqs);
|
||||
if (tEncodeI32(pEncoder, sz) < 0) return -1;
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSingleDeleteReq *pOneReq = taosArrayGet(pReq->deleteReqs, i);
|
||||
if (tEncodeSSingleDeleteReq(pEncoder, pOneReq) < 0) return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) {
|
||||
if (tDecodeI64(pDecoder, &pReq->suid) < 0) return -1;
|
||||
int32_t sz;
|
||||
if (tDecodeI32(pDecoder, &sz) < 0) return -1;
|
||||
pReq->deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
|
||||
if (pReq->deleteReqs == NULL) return -1;
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSingleDeleteReq deleteReq;
|
||||
if (tDecodeSSingleDeleteReq(pDecoder, &deleteReq) < 0) return -1;
|
||||
taosArrayPush(pReq->deleteReqs, &deleteReq);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -32,9 +32,13 @@ const uint8_t tdVTypeByte[2][3] = {{
|
|||
};
|
||||
|
||||
// declaration
|
||||
static uint8_t tdGetBitmapByte(uint8_t byte);
|
||||
static int32_t tdCompareColId(const void *arg1, const void *arg2);
|
||||
static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2);
|
||||
static uint8_t tdGetBitmapByte(uint8_t byte);
|
||||
static bool tdSTSRowIterGetTpVal(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal);
|
||||
static bool tdSTSRowIterGetKvVal(STSRowIter *pIter, col_id_t colId, col_id_t *nIdx, SCellVal *pVal);
|
||||
static bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t flen, uint32_t offset,
|
||||
col_id_t colIdx, SCellVal *pVal);
|
||||
static bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal);
|
||||
static void tdSCellValPrint(SCellVal *pVal, int8_t colType);
|
||||
|
||||
// implementation
|
||||
/**
|
||||
|
@ -330,14 +334,14 @@ void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag) {
|
|||
tdSTSRowIterInit(&iter, pSchema);
|
||||
tdSTSRowIterReset(&iter, row);
|
||||
printf("%s >>>type:%d,sver:%d ", tag, (int32_t)TD_ROW_TYPE(row), (int32_t)TD_ROW_SVER(row));
|
||||
for (int i = 0; i < pSchema->numOfCols; ++i) {
|
||||
STColumn *stCol = pSchema->columns + i;
|
||||
SCellVal sVal = {255, NULL};
|
||||
if (!tdSTSRowIterNext(&iter, stCol->colId, stCol->type, &sVal)) {
|
||||
STColumn *cols = (STColumn *)&iter.pSchema->columns;
|
||||
while (true) {
|
||||
SCellVal sVal = {.valType = 255, NULL};
|
||||
if (!tdSTSRowIterNext(&iter, &sVal)) {
|
||||
break;
|
||||
}
|
||||
ASSERT(sVal.valType == 0 || sVal.valType == 1 || sVal.valType == 2);
|
||||
tdSCellValPrint(&sVal, stCol->type);
|
||||
tdSCellValPrint(&sVal, cols[iter.colIdx - 1].type);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
@ -420,6 +424,16 @@ void tdSCellValPrint(SCellVal *pVal, int8_t colType) {
|
|||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2) {
|
||||
if (*(col_id_t *)key1 > ((SKvRowIdx *)key2)->colId) {
|
||||
return 1;
|
||||
} else if (*(col_id_t *)key1 < ((SKvRowIdx *)key2)->colId) {
|
||||
return -1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal) {
|
||||
if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
tdRowSetVal(pVal, TD_VTYPE_NORM, TD_ROW_KEY_ADDR(pRow));
|
||||
|
@ -456,7 +470,7 @@ bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t fl
|
|||
return true;
|
||||
}
|
||||
|
||||
bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal) {
|
||||
bool tdSTSRowIterFetch(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal) {
|
||||
if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
pVal->val = &pIter->pRow->ts;
|
||||
pVal->valType = TD_VTYPE_NORM;
|
||||
|
@ -477,10 +491,10 @@ bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCe
|
|||
return false;
|
||||
}
|
||||
}
|
||||
tdGetTpRowDataOfCol(pIter, pCol->type, pCol->offset - sizeof(TSKEY), pVal);
|
||||
tdSTSRowIterGetTpVal(pIter, pCol->type, pCol->offset - sizeof(TSKEY), pVal);
|
||||
++pIter->colIdx;
|
||||
} else if (TD_IS_KV_ROW(pIter->pRow)) {
|
||||
return tdGetKvRowValOfColEx(pIter, colId, colType, &pIter->kvIdx, pVal);
|
||||
return tdSTSRowIterGetKvVal(pIter, colId, &pIter->kvIdx, pVal);
|
||||
} else {
|
||||
pVal->valType = TD_VTYPE_NONE;
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
|
@ -489,13 +503,69 @@ bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCe
|
|||
return true;
|
||||
}
|
||||
|
||||
bool tdGetKvRowValOfColEx(STSRowIter *pIter, col_id_t colId, col_type_t colType, col_id_t *nIdx, SCellVal *pVal) {
|
||||
bool tdSTSRowIterNext(STSRowIter *pIter, SCellVal *pVal) {
|
||||
if (pIter->colIdx >= pIter->pSchema->numOfCols) {
|
||||
return false;
|
||||
}
|
||||
|
||||
STColumn *pCol = &pIter->pSchema->columns[pIter->colIdx];
|
||||
|
||||
if (pCol->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
pVal->val = &pIter->pRow->ts;
|
||||
pVal->valType = TD_VTYPE_NORM;
|
||||
++pIter->colIdx;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (TD_IS_TP_ROW(pIter->pRow)) {
|
||||
tdSTSRowIterGetTpVal(pIter, pCol->type, pCol->offset - sizeof(TSKEY), pVal);
|
||||
} else if (TD_IS_KV_ROW(pIter->pRow)) {
|
||||
tdSTSRowIterGetKvVal(pIter, pCol->colId, &pIter->kvIdx, pVal);
|
||||
ASSERT(0);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
++pIter->colIdx;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool tdSTSRowIterGetTpVal(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal) {
|
||||
STSRow *pRow = pIter->pRow;
|
||||
if (pRow->statis == 0) {
|
||||
pVal->valType = TD_VTYPE_NORM;
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
pVal->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
|
||||
} else {
|
||||
pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (tdGetBitmapValType(pIter->pBitmap, pIter->colIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) {
|
||||
pVal->valType = TD_VTYPE_NONE;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
if (pVal->valType == TD_VTYPE_NORM) {
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
pVal->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
|
||||
} else {
|
||||
pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool tdSTSRowIterGetKvVal(STSRowIter *pIter, col_id_t colId, col_id_t *nIdx, SCellVal *pVal) {
|
||||
STSRow *pRow = pIter->pRow;
|
||||
SKvRowIdx *pKvIdx = NULL;
|
||||
bool colFound = false;
|
||||
col_id_t kvNCols = tdRowGetNCols(pRow) - 1;
|
||||
void *pColIdx = TD_ROW_COL_IDX(pRow);
|
||||
while (*nIdx < kvNCols) {
|
||||
pKvIdx = (SKvRowIdx *)POINTER_SHIFT(TD_ROW_COL_IDX(pRow), *nIdx * sizeof(SKvRowIdx));
|
||||
pKvIdx = (SKvRowIdx *)POINTER_SHIFT(pColIdx, *nIdx * sizeof(SKvRowIdx));
|
||||
if (pKvIdx->colId == colId) {
|
||||
++(*nIdx);
|
||||
pVal->val = POINTER_SHIFT(pRow, pKvIdx->offset);
|
||||
|
@ -518,48 +588,13 @@ bool tdGetKvRowValOfColEx(STSRowIter *pIter, col_id_t colId, col_type_t colType,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef TD_SUPPORT_BITMAP
|
||||
int16_t colIdx = -1;
|
||||
if (pKvIdx) colIdx = POINTER_DISTANCE(pKvIdx, TD_ROW_COL_IDX(pRow)) / sizeof(SKvRowIdx);
|
||||
if (tdGetBitmapValType(pIter->pBitmap, colIdx, &pVal->valType, 0) != TSDB_CODE_SUCCESS) {
|
||||
if (tdGetBitmapValType(pIter->pBitmap, pIter->kvIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) {
|
||||
pVal->valType = TD_VTYPE_NONE;
|
||||
}
|
||||
#else
|
||||
pVal->valType = isNull(pVal->val, colType) ? TD_VTYPE_NULL : TD_VTYPE_NORM;
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal) {
|
||||
STSRow *pRow = pIter->pRow;
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
pVal->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
|
||||
} else {
|
||||
pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
||||
}
|
||||
|
||||
#ifdef TD_SUPPORT_BITMAP
|
||||
if (tdGetBitmapValType(pIter->pBitmap, pIter->colIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) {
|
||||
pVal->valType = TD_VTYPE_NONE;
|
||||
}
|
||||
#else
|
||||
pVal->valType = isNull(pVal->val, colType) ? TD_VTYPE_NULL : TD_VTYPE_NORM;
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2) {
|
||||
if (*(col_id_t *)key1 > ((SKvRowIdx *)key2)->colId) {
|
||||
return 1;
|
||||
} else if (*(col_id_t *)key1 < ((SKvRowIdx *)key2)->colId) {
|
||||
return -1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) {
|
||||
STColumn *pTColumn;
|
||||
SColVal *pColVal;
|
||||
|
@ -625,7 +660,7 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) {
|
|||
if (maxVarDataLen > 0) {
|
||||
varBuf = taosMemoryMalloc(maxVarDataLen);
|
||||
if (!varBuf) {
|
||||
if(isAlloc) {
|
||||
if (isAlloc) {
|
||||
taosMemoryFreeClear(*ppRow);
|
||||
}
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -666,12 +701,26 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) {
|
|||
|
||||
++iColVal;
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
|
||||
taosMemoryFreeClear(varBuf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t tdCompareColId(const void *arg1, const void *arg2) {
|
||||
int32_t colId = *(int32_t *)arg1;
|
||||
STColumn *pCol = (STColumn *)arg2;
|
||||
|
||||
if (colId < pCol->colId) {
|
||||
return -1;
|
||||
} else if (colId == pCol->colId) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal) {
|
||||
if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
pVal->val = &pIter->pRow->ts;
|
||||
|
@ -711,19 +760,6 @@ bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCell
|
|||
return true;
|
||||
}
|
||||
|
||||
static int32_t tdCompareColId(const void *arg1, const void *arg2) {
|
||||
int32_t colId = *(int32_t *)arg1;
|
||||
STColumn *pCol = (STColumn *)arg2;
|
||||
|
||||
if (colId < pCol->colId) {
|
||||
return -1;
|
||||
} else if (colId == pCol->colId) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pValType) {
|
||||
if (!pBitmap || colIdx < 0) {
|
||||
TASSERT(0);
|
||||
|
@ -880,26 +916,29 @@ int32_t tdGetKvRowValOfCol(SCellVal *output, STSRow *pRow, void *pBitmap, int32_
|
|||
|
||||
int32_t tdGetTpRowValOfCol(SCellVal *output, STSRow *pRow, void *pBitmap, int8_t colType, int32_t offset,
|
||||
int16_t colIdx) {
|
||||
#ifdef TD_SUPPORT_BITMAP
|
||||
if (pRow->statis == 0) {
|
||||
output->valType = TD_VTYPE_NORM;
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
output->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
|
||||
} else {
|
||||
output->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (tdGetBitmapValType(pBitmap, colIdx, &output->valType, 0) != TSDB_CODE_SUCCESS) {
|
||||
output->valType = TD_VTYPE_NONE;
|
||||
return terrno;
|
||||
}
|
||||
if (tdValTypeIsNorm(output->valType)) {
|
||||
|
||||
if (output->valType == TD_VTYPE_NORM) {
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
output->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
|
||||
} else {
|
||||
output->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
output->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
|
||||
} else {
|
||||
output->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
||||
}
|
||||
output->valType = isNull(output->val, colType) ? TD_VTYPE_NULL : TD_VTYPE_NORM;
|
||||
#endif
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -908,7 +947,7 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp
|
|||
STSRow *pRow = pBuilder->pBuf;
|
||||
if (!val) {
|
||||
#ifdef TD_SUPPORT_BITMAP
|
||||
if (tdValTypeIsNorm(valType)) {
|
||||
if (valType == TD_VTYPE_NORM) {
|
||||
terrno = TSDB_CODE_INVALID_PTR;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -925,6 +964,21 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
// TODO: We can avoid the type judegement by FP, but would prevent the inline scheme.
|
||||
|
||||
switch (valType) {
|
||||
case TD_VTYPE_NORM:
|
||||
break;
|
||||
case TD_VTYPE_NULL:
|
||||
if (!pBuilder->hasNull) pBuilder->hasNull = true;
|
||||
break;
|
||||
case TD_VTYPE_NONE:
|
||||
if (!pBuilder->hasNone) pBuilder->hasNone = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
default:
|
||||
ASSERT(0);
|
||||
break;
|
||||
}
|
||||
|
||||
if (TD_IS_TP_ROW(pRow)) {
|
||||
tdAppendColValToTpRow(pBuilder, valType, val, isCopyVarData, colType, colIdx, offset);
|
||||
} else {
|
||||
|
@ -951,13 +1005,11 @@ int32_t tdAppendColValToKvRow(SRowBuilder *pBuilder, TDRowValT valType, const vo
|
|||
|
||||
STSRow *row = pBuilder->pBuf;
|
||||
// No need to store None/Null values.
|
||||
if (tdValIsNorm(valType, val, colType)) {
|
||||
// ts key stored in STSRow.ts
|
||||
SKvRowIdx *pColIdx = (SKvRowIdx *)POINTER_SHIFT(TD_ROW_COL_IDX(row), offset);
|
||||
char *ptr = (char *)POINTER_SHIFT(row, TD_ROW_LEN(row));
|
||||
pColIdx->colId = colId;
|
||||
pColIdx->offset = TD_ROW_LEN(row); // the offset include the TD_ROW_HEAD_LEN
|
||||
|
||||
SKvRowIdx *pColIdx = (SKvRowIdx *)POINTER_SHIFT(TD_ROW_COL_IDX(row), offset);
|
||||
pColIdx->colId = colId;
|
||||
pColIdx->offset = TD_ROW_LEN(row); // the offset include the TD_ROW_HEAD_LEN
|
||||
if (valType == TD_VTYPE_NORM) {
|
||||
char *ptr = (char *)POINTER_SHIFT(row, TD_ROW_LEN(row));
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
if (isCopyVarData) {
|
||||
memcpy(ptr, val, varDataTLen(val));
|
||||
|
@ -968,26 +1020,6 @@ int32_t tdAppendColValToKvRow(SRowBuilder *pBuilder, TDRowValT valType, const vo
|
|||
TD_ROW_LEN(row) += TYPE_BYTES[colType];
|
||||
}
|
||||
}
|
||||
#ifdef TD_SUPPORT_BACK2
|
||||
// NULL/None value
|
||||
else {
|
||||
SKvRowIdx *pColIdx = (SKvRowIdx *)POINTER_SHIFT(TD_ROW_COL_IDX(row), offset);
|
||||
char *ptr = (char *)POINTER_SHIFT(row, TD_ROW_LEN(row));
|
||||
pColIdx->colId = colId;
|
||||
pColIdx->offset = TD_ROW_LEN(row); // the offset include the TD_ROW_HEAD_LEN
|
||||
const void *nullVal = getNullValue(colType);
|
||||
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
if (isCopyVarData) {
|
||||
memcpy(ptr, nullVal, varDataTLen(nullVal));
|
||||
}
|
||||
TD_ROW_LEN(row) += varDataTLen(nullVal);
|
||||
} else {
|
||||
memcpy(ptr, nullVal, TYPE_BYTES[colType]);
|
||||
TD_ROW_LEN(row) += TYPE_BYTES[colType];
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1012,7 +1044,7 @@ int32_t tdAppendColValToTpRow(SRowBuilder *pBuilder, TDRowValT valType, const vo
|
|||
// 1. No need to set flen part for Null/None, just use bitmap. When upsert for the same primary TS key, the bitmap
|
||||
// should be updated simultaneously if Norm val overwrite Null/None cols.
|
||||
// 2. When consume STSRow in memory by taos client/tq, the output of Null/None cols should both be Null.
|
||||
if (tdValIsNorm(valType, val, colType)) {
|
||||
if (valType == TD_VTYPE_NORM) {
|
||||
// TODO: The layout of new data types imported since 3.0 like blob/medium blob is the same with binary/nchar.
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
// ts key stored in STSRow.ts
|
||||
|
@ -1025,24 +1057,6 @@ int32_t tdAppendColValToTpRow(SRowBuilder *pBuilder, TDRowValT valType, const vo
|
|||
memcpy(POINTER_SHIFT(TD_ROW_DATA(row), offset), val, TYPE_BYTES[colType]);
|
||||
}
|
||||
}
|
||||
#ifdef TD_SUPPORT_BACK2
|
||||
// NULL/None value
|
||||
else {
|
||||
// TODO: Null value for new data types imported since 3.0 need to be defined.
|
||||
const void *nullVal = getNullValue(colType);
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
// ts key stored in STSRow.ts
|
||||
*(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(row), offset) = TD_ROW_LEN(row);
|
||||
|
||||
if (isCopyVarData) {
|
||||
memcpy(POINTER_SHIFT(row, TD_ROW_LEN(row)), nullVal, varDataTLen(nullVal));
|
||||
}
|
||||
TD_ROW_LEN(row) += varDataTLen(nullVal);
|
||||
} else {
|
||||
memcpy(POINTER_SHIFT(TD_ROW_DATA(row), offset), nullVal, TYPE_BYTES[colType]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1092,6 +1106,9 @@ int32_t tdSRowResetBuf(SRowBuilder *pBuilder, void *pBuf) {
|
|||
return terrno;
|
||||
}
|
||||
|
||||
if (pBuilder->hasNone) pBuilder->hasNone = false;
|
||||
if (pBuilder->hasNull) pBuilder->hasNull = false;
|
||||
|
||||
TD_ROW_SET_INFO(pBuilder->pBuf, 0);
|
||||
TD_ROW_SET_TYPE(pBuilder->pBuf, pBuilder->rowType);
|
||||
|
||||
|
@ -1159,14 +1176,6 @@ int32_t tdSRowGetBuf(SRowBuilder *pBuilder, void *pBuf) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t tdSRowInitEx(SRowBuilder *pBuilder, void *pBuf, uint32_t allNullLen, uint32_t boundNullLen, int32_t nCols,
|
||||
int32_t nBoundCols, int32_t flen) {
|
||||
if (tdSRowSetExtendedInfo(pBuilder, allNullLen, boundNullLen, nCols, nBoundCols, flen) < 0) {
|
||||
return terrno;
|
||||
}
|
||||
return tdSRowResetBuf(pBuilder, pBuf);
|
||||
}
|
||||
|
||||
void tdSRowReset(SRowBuilder *pBuilder) {
|
||||
pBuilder->rowType = TD_ROW_TP;
|
||||
pBuilder->pBuf = NULL;
|
||||
|
@ -1315,7 +1324,7 @@ void tdSTSRowIterReset(STSRowIter *pIter, STSRow *pRow) {
|
|||
pIter->pRow = pRow;
|
||||
pIter->pBitmap = tdGetBitmapAddr(pRow, pRow->type, pIter->pSchema->flen, tdRowGetNCols(pRow));
|
||||
pIter->offset = 0;
|
||||
pIter->colIdx = PRIMARYKEY_TIMESTAMP_COL_ID;
|
||||
pIter->colIdx = 0; // PRIMARYKEY_TIMESTAMP_COL_ID;
|
||||
pIter->kvIdx = 0;
|
||||
}
|
||||
|
||||
|
@ -1353,4 +1362,4 @@ void tTSRowGetVal(STSRow *pRow, STSchema *pTSchema, int16_t iCol, SColVal *pColV
|
|||
|
||||
*pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, value);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -364,6 +364,7 @@ SArray *vmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_VND_CHECK_ALTER_INFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_CONSUME, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_BATCH_DEL, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_COMMIT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_SCH_QUERY_HEARTBEAT, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic);
|
|||
SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw);
|
||||
|
||||
int32_t mndDropTopicByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
|
||||
int32_t mndCheckTopicExist(SMnode *pMnode, SDbObj *pDb);
|
||||
|
||||
const char *mndTopicGetShowName(const char topic[TSDB_TOPIC_FNAME_LEN]);
|
||||
|
||||
|
|
|
@ -131,8 +131,9 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) {
|
|||
mInfo("receive consumer recover msg, consumer id %" PRId64 ", status %s", pRecoverMsg->consumerId,
|
||||
mndConsumerStatusName(pConsumer->status));
|
||||
|
||||
if (pConsumer->status != MQ_CONSUMER_STATUS__READY) {
|
||||
if (pConsumer->status != MQ_CONSUMER_STATUS__LOST_REBD) {
|
||||
mndReleaseConsumer(pMnode, pConsumer);
|
||||
terrno = TSDB_CODE_MND_CONSUMER_NOT_READY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -275,6 +276,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
|||
int32_t status = atomic_load_32(&pConsumer->status);
|
||||
|
||||
if (status == MQ_CONSUMER_STATUS__LOST_REBD) {
|
||||
mInfo("try to recover consumer %ld", consumerId);
|
||||
SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg));
|
||||
|
||||
pRecoverMsg->consumerId = consumerId;
|
||||
|
@ -305,15 +307,14 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
|
||||
ASSERT(strcmp(pReq->cgroup, pConsumer->cgroup) == 0);
|
||||
|
||||
#if 1
|
||||
atomic_store_32(&pConsumer->hbStatus, 0);
|
||||
#endif
|
||||
|
||||
// 1. check consumer status
|
||||
int32_t status = atomic_load_32(&pConsumer->status);
|
||||
|
||||
#if 1
|
||||
if (status == MQ_CONSUMER_STATUS__LOST_REBD) {
|
||||
mInfo("try to recover consumer %ld", consumerId);
|
||||
SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg));
|
||||
|
||||
pRecoverMsg->consumerId = consumerId;
|
||||
|
@ -326,6 +327,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
#endif
|
||||
|
||||
if (status != MQ_CONSUMER_STATUS__READY) {
|
||||
mInfo("consumer %ld not ready, status: %s", consumerId, mndConsumerStatusName(status));
|
||||
terrno = TSDB_CODE_MND_CONSUMER_NOT_READY;
|
||||
return -1;
|
||||
}
|
||||
|
@ -939,13 +941,9 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *
|
|||
colDataAppend(pColInfo, numOfRows, NULL, true);
|
||||
}
|
||||
|
||||
// pid
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pConsumer->pid, true);
|
||||
|
||||
// end point
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pConsumer->ep, true);
|
||||
/*pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);*/
|
||||
/*colDataAppend(pColInfo, numOfRows, (const char *)&pConsumer->ep, true);*/
|
||||
|
||||
// up time
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
|
|
|
@ -995,11 +995,13 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) {
|
|||
mDebug("trans:%d, used to drop db:%s", pTrans->id, pDb->name);
|
||||
mndTransSetDbName(pTrans, pDb->name, NULL);
|
||||
|
||||
if (mndCheckTopicExist(pMnode, pDb) < 0) goto _OVER;
|
||||
|
||||
if (mndSetDropDbRedoLogs(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndSetDropDbCommitLogs(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
/*if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/
|
||||
/*if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/
|
||||
/*if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/
|
||||
if (mndDropStreamByDb(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndDropSmasByDb(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndSetDropDbRedoActions(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
|
@ -1706,7 +1708,10 @@ static void setPerfSchemaDbCfg(SDbObj *pDbObj) {
|
|||
static bool mndGetTablesOfDbFp(SMnode *pMnode, void *pObj, void *p1, void *p2, void *p3) {
|
||||
SVgObj *pVgroup = pObj;
|
||||
int32_t *numOfTables = p1;
|
||||
*numOfTables += pVgroup->numOfTables;
|
||||
int64_t uid = *(int64_t *)p2;
|
||||
if (pVgroup->dbUid == uid) {
|
||||
*numOfTables += pVgroup->numOfTables;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1747,7 +1752,7 @@ static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc
|
|||
|
||||
if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_READ_OR_WRITE_DB, pDb) == 0) {
|
||||
int32_t numOfTables = 0;
|
||||
sdbTraverse(pSdb, SDB_VGROUP, mndGetTablesOfDbFp, &numOfTables, NULL, NULL);
|
||||
sdbTraverse(pSdb, SDB_VGROUP, mndGetTablesOfDbFp, &numOfTables, &pDb->uid, NULL);
|
||||
mndDumpDbInfoData(pMnode, pBlock, pDb, pShow, numOfRows, numOfTables, false, objStatus, sysinfo);
|
||||
numOfRows++;
|
||||
}
|
||||
|
|
|
@ -837,7 +837,7 @@ int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
|
|||
sdbCancelFetch(pSdb, pIter);
|
||||
mError("db:%s, failed to drop stream:%s since sourceDbUid:%" PRId64 " not match with targetDbUid:%" PRId64,
|
||||
pDb->name, pStream->name, pStream->sourceDbUid, pStream->targetDbUid);
|
||||
terrno = TSDB_CODE_MND_STREAM_ALREADY_EXIST;
|
||||
terrno = TSDB_CODE_MND_STREAM_MUST_BE_DELETED;
|
||||
return -1;
|
||||
} else {
|
||||
#if 0
|
||||
|
@ -929,14 +929,31 @@ static int32_t mndRetrieveStream(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->status, true);
|
||||
|
||||
char sourceDB[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
tNameFromString(&n, pStream->sourceDb, T_NAME_ACCT | T_NAME_DB);
|
||||
tNameGetDbName(&n, varDataVal(sourceDB));
|
||||
varDataSetLen(sourceDB, strlen(varDataVal(sourceDB)));
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->sourceDb, true);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&sourceDB, false);
|
||||
|
||||
char targetDB[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
tNameFromString(&n, pStream->targetDb, T_NAME_ACCT | T_NAME_DB);
|
||||
tNameGetDbName(&n, varDataVal(targetDB));
|
||||
varDataSetLen(targetDB, strlen(varDataVal(targetDB)));
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->targetDb, true);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&targetDB, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->targetSTbName, true);
|
||||
if (pStream->targetSTbName[0] == 0) {
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, NULL, true);
|
||||
} else {
|
||||
char targetSTB[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
tNameFromString(&n, pStream->targetSTbName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
strcpy(&targetSTB[VARSTR_HEADER_SIZE], tNameGetTableName(&n));
|
||||
varDataSetLen(targetSTB, strlen(varDataVal(targetSTB)));
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&targetSTB, false);
|
||||
}
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->watermark, false);
|
||||
|
|
|
@ -398,13 +398,27 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
|
|||
}
|
||||
}
|
||||
|
||||
// 8. TODO generate logs
|
||||
mInfo("rebalance calculation completed, rebalanced vg:");
|
||||
// 8. generate logs
|
||||
mInfo("mq rebalance: calculation completed, rebalanced vg:");
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pOutput->rebVgs); i++) {
|
||||
SMqRebOutputVg *pOutputRebVg = taosArrayGet(pOutput->rebVgs, i);
|
||||
mInfo("vgId:%d, moved from consumer:%" PRId64 ", to consumer:%" PRId64, pOutputRebVg->pVgEp->vgId,
|
||||
mInfo("mq rebalance: vgId:%d, moved from consumer:%" PRId64 ", to consumer:%" PRId64, pOutputRebVg->pVgEp->vgId,
|
||||
pOutputRebVg->oldConsumerId, pOutputRebVg->newConsumerId);
|
||||
}
|
||||
{
|
||||
void *pIter = NULL;
|
||||
while (1) {
|
||||
pIter = taosHashIterate(pOutput->pSub->consumerHash, pIter);
|
||||
if (pIter == NULL) break;
|
||||
SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter;
|
||||
int32_t sz = taosArrayGetSize(pConsumerEp->vgs);
|
||||
mInfo("mq rebalance: final cfg: consumer %ld has %d vg", pConsumerEp->consumerId, sz);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, i);
|
||||
mInfo("mq rebalance: final cfg: vg %d to consumer %ld", pVgEp->vgId, pConsumerEp->consumerId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 9. clear
|
||||
taosHashCleanup(pHash);
|
||||
|
|
|
@ -166,6 +166,18 @@ void mndLeaderTransfer(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cb
|
|||
mDebug("vgId:1, mnode leader transfer finish");
|
||||
}
|
||||
|
||||
static void mndBecomeFollower(struct SSyncFSM *pFsm) {
|
||||
SMnode *pMnode = pFsm->data;
|
||||
mDebug("vgId:1, become follower");
|
||||
|
||||
// clear old leader resource
|
||||
}
|
||||
|
||||
static void mndBecomeLeader(struct SSyncFSM *pFsm) {
|
||||
SMnode *pMnode = pFsm->data;
|
||||
mDebug("vgId:1, become leader");
|
||||
}
|
||||
|
||||
SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
|
||||
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
|
||||
pFsm->data = pMnode;
|
||||
|
@ -175,6 +187,8 @@ SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
|
|||
pFsm->FpRestoreFinishCb = mndRestoreFinish;
|
||||
pFsm->FpLeaderTransferCb = mndLeaderTransfer;
|
||||
pFsm->FpReConfigCb = mndReConfig;
|
||||
pFsm->FpBecomeLeaderCb = mndBecomeLeader;
|
||||
pFsm->FpBecomeFollowerCb = mndBecomeFollower;
|
||||
pFsm->FpGetSnapshot = mndSyncGetSnapshot;
|
||||
pFsm->FpGetSnapshotInfo = mndSyncGetSnapshotInfo;
|
||||
pFsm->FpSnapshotStartRead = mndSnapshotStartRead;
|
||||
|
|
|
@ -782,6 +782,26 @@ static void mndCancelGetNextTopic(SMnode *pMnode, void *pIter) {
|
|||
sdbCancelFetch(pSdb, pIter);
|
||||
}
|
||||
|
||||
int32_t mndCheckTopicExist(SMnode *pMnode, SDbObj *pDb) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
|
||||
void *pIter = NULL;
|
||||
SMqTopicObj *pTopic = NULL;
|
||||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_TOPIC, pIter, (void **)&pTopic);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
if (pTopic->dbUid == pDb->uid) {
|
||||
sdbRelease(pSdb, pTopic);
|
||||
terrno = TSDB_CODE_MND_TOPIC_MUST_BE_DELETED;
|
||||
return -1;
|
||||
}
|
||||
|
||||
sdbRelease(pSdb, pTopic);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mndDropTopicByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
|
||||
int32_t code = 0;
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
|
|
|
@ -97,7 +97,7 @@ typedef struct STbUidStore STbUidStore;
|
|||
|
||||
int metaOpen(SVnode* pVnode, SMeta** ppMeta);
|
||||
int metaClose(SMeta* pMeta);
|
||||
int metaBegin(SMeta* pMeta);
|
||||
int metaBegin(SMeta* pMeta, int8_t fromSys);
|
||||
int metaCommit(SMeta* pMeta);
|
||||
int metaCreateSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
|
||||
int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
|
||||
|
@ -157,7 +157,7 @@ int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId);
|
|||
int32_t tqProcessCheckAlterInfoReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ver);
|
||||
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
|
@ -172,7 +172,7 @@ int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg);
|
|||
int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list);
|
||||
|
||||
SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid,
|
||||
const char* stbFullName, int32_t vgId);
|
||||
const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq);
|
||||
|
||||
// sma
|
||||
int32_t smaInit();
|
||||
|
@ -187,7 +187,7 @@ int32_t smaAsyncPreCommit(SSma* pSma);
|
|||
int32_t smaAsyncCommit(SSma* pSma);
|
||||
int32_t smaAsyncPostCommit(SSma* pSma);
|
||||
int32_t smaDoRetention(SSma* pSma, int64_t now);
|
||||
int32_t smaProcessFetch(SSma *pSma, void* pMsg);
|
||||
int32_t smaProcessFetch(SSma* pSma, void* pMsg);
|
||||
|
||||
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
|
||||
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
|
||||
|
|
|
@ -19,9 +19,12 @@ static FORCE_INLINE void *metaMalloc(void *pPool, size_t size) { return vnodeBuf
|
|||
static FORCE_INLINE void metaFree(void *pPool, void *p) { vnodeBufPoolFree((SVBufPool *)pPool, p); }
|
||||
|
||||
// begin a meta txn
|
||||
int metaBegin(SMeta *pMeta) {
|
||||
tdbTxnOpen(&pMeta->txn, 0, metaMalloc, metaFree, pMeta->pVnode->inUse, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
|
||||
int metaBegin(SMeta *pMeta, int8_t fromSys) {
|
||||
if (fromSys) {
|
||||
tdbTxnOpen(&pMeta->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
} else {
|
||||
tdbTxnOpen(&pMeta->txn, 0, metaMalloc, metaFree, pMeta->pVnode->inUse, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
}
|
||||
if (tdbBegin(pMeta->pEnv, &pMeta->txn) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWr
|
|||
pWriter->sver = sver;
|
||||
pWriter->ever = ever;
|
||||
|
||||
metaBegin(pMeta);
|
||||
metaBegin(pMeta, 1);
|
||||
|
||||
*ppWriter = pWriter;
|
||||
return code;
|
||||
|
|
|
@ -604,28 +604,44 @@ _end:
|
|||
|
||||
static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
|
||||
int64_t suid, int8_t blkType) {
|
||||
while (1) {
|
||||
SSDataBlock *output = NULL;
|
||||
uint64_t ts;
|
||||
SArray *pResList = taosArrayInit(1, POINTER_BYTES);
|
||||
if (pResList == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
int32_t code = qExecTask(taskInfo, &output, &ts);
|
||||
while (1) {
|
||||
uint64_t ts;
|
||||
int32_t code = qExecTaskOpt(taskInfo, pResList, &ts);
|
||||
if (code < 0) {
|
||||
smaError("vgId:%d, qExecTask for rsma table %" PRIi64 " level %" PRIi8 " failed since %s", SMA_VID(pSma), suid,
|
||||
pItem->level, terrstr(code));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (output) {
|
||||
#if 0
|
||||
if (taosArrayGetSize(pResList) == 0) {
|
||||
if (terrno == 0) {
|
||||
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
|
||||
} else {
|
||||
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr());
|
||||
goto _err;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pResList); ++i) {
|
||||
SSDataBlock *output = taosArrayGetP(pResList, i);
|
||||
|
||||
#if 1
|
||||
char flag[10] = {0};
|
||||
snprintf(flag, 10, "level %" PRIi8, pItem->level);
|
||||
SArray *pResult = taosArrayInit(1, sizeof(SSDataBlock));
|
||||
taosArrayPush(pResult, output);
|
||||
blockDebugShowDataBlocks(pResult, flag);
|
||||
taosArrayDestroy(pResult);
|
||||
// blockDebugShowDataBlocks(output, flag);
|
||||
// taosArrayDestroy(pResult);
|
||||
#endif
|
||||
STsdb *sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]);
|
||||
STsdb * sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]);
|
||||
SSubmitReq *pReq = NULL;
|
||||
|
||||
// TODO: the schema update should be handled later(TD-17965)
|
||||
if (buildSubmitReqFromDataBlock(&pReq, output, pTSchema, SMA_VID(pSma), suid) < 0) {
|
||||
smaError("vgId:%d, build submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s",
|
||||
|
@ -644,17 +660,14 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm
|
|||
SMA_VID(pSma), suid, pItem->level, output->info.version);
|
||||
|
||||
taosMemoryFreeClear(pReq);
|
||||
} else if (terrno == 0) {
|
||||
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
|
||||
break;
|
||||
} else {
|
||||
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr());
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
taosArrayDestroy(pResList);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_err:
|
||||
taosArrayDestroy(pResList);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
|
@ -1407,7 +1420,7 @@ int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) {
|
|||
tEncoderClear(&encoder);
|
||||
}
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
|
||||
((SMsgHead *)pBuf)->vgId = SMA_VID(pSma);
|
||||
((SMsgHead *)pBuf)->contLen = contLen + sizeof(SMsgHead);
|
||||
|
||||
|
@ -1434,10 +1447,10 @@ _err:
|
|||
|
||||
/**
|
||||
* @brief fetch rsma data of level 2/3 and submit
|
||||
*
|
||||
* @param pSma
|
||||
* @param pMsg
|
||||
* @return int32_t
|
||||
*
|
||||
* @param pSma
|
||||
* @param pMsg
|
||||
* @return int32_t
|
||||
*/
|
||||
int32_t smaProcessFetch(SSma *pSma, void *pMsg) {
|
||||
SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg;
|
||||
|
|
|
@ -119,7 +119,7 @@ int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) {
|
|||
SName stbFullName = {0};
|
||||
tNameFromString(&stbFullName, pCfg->dstTbName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
SVCreateStbReq pReq = {0};
|
||||
pReq.name = (char*)tNameGetTableName(&stbFullName);
|
||||
pReq.name = (char *)tNameGetTableName(&stbFullName);
|
||||
pReq.suid = pCfg->dstTbUid;
|
||||
pReq.schemaRow = pCfg->schemaRow;
|
||||
pReq.schemaTag = pCfg->schemaTag;
|
||||
|
@ -200,8 +200,9 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
|
|||
goto _err;
|
||||
}
|
||||
|
||||
SBatchDeleteReq deleteReq;
|
||||
SSubmitReq *pSubmitReq = tdBlockToSubmit((const SArray *)msg, pTsmaStat->pTSchema, true, pTsmaStat->pTSma->dstTbUid,
|
||||
pTsmaStat->pTSma->dstTbName, pTsmaStat->pTSma->dstVgId);
|
||||
pTsmaStat->pTSma->dstTbName, pTsmaStat->pTSma->dstVgId, &deleteReq);
|
||||
|
||||
if (!pSubmitReq) {
|
||||
smaError("vgId:%d, failed to gen submit blk while tsma insert for smaIndex %" PRIi64 " since %s", SMA_VID(pSma),
|
||||
|
@ -230,4 +231,4 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
|
|||
_err:
|
||||
tdUnRefSmaStat(pSma, pStat);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -183,7 +183,7 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen) {
|
||||
int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ver) {
|
||||
STqOffset offset = {0};
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, msg, msgLen);
|
||||
|
@ -302,6 +302,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
tqError("tmq poll: consumer handle mismatch for consumer:%" PRId64
|
||||
", in vgId:%d, subkey %s, handle consumer id %" PRId64,
|
||||
consumerId, TD_VID(pTq->pVnode), pReq->subKey, pHandle->consumerId);
|
||||
terrno = TSDB_CODE_TMQ_CONSUMER_MISMATCH;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -325,7 +325,7 @@ int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReader* pReader) {
|
|||
for (int32_t i = 0; i < colActual; i++) {
|
||||
SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
SCellVal sVal = {0};
|
||||
if (!tdSTSRowIterNext(&iter, pColData->info.colId, pColData->info.type, &sVal)) {
|
||||
if (!tdSTSRowIterFetch(&iter, pColData->info.colId, pColData->info.type, &sVal)) {
|
||||
break;
|
||||
}
|
||||
if (colDataAppend(pColData, curRow, sVal.val, sVal.valType != TD_VTYPE_NORM) < 0) {
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include "tq.h"
|
||||
|
||||
SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, bool createTb, int64_t suid,
|
||||
const char* stbFullName, int32_t vgId) {
|
||||
const char* stbFullName, int32_t vgId, SBatchDeleteReq* deleteReq) {
|
||||
SSubmitReq* ret = NULL;
|
||||
SArray* schemaReqs = NULL;
|
||||
SArray* schemaReqSz = NULL;
|
||||
|
@ -33,10 +33,13 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
|
|||
schemaReqSz = taosArrayInit(sz, sizeof(int32_t));
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
STagVal tagVal = {
|
||||
.cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1,
|
||||
.type = TSDB_DATA_TYPE_UBIGINT,
|
||||
.i64 = (int64_t)pDataBlock->info.groupId,
|
||||
if (pDataBlock->info.type == STREAM_DELETE_DATA) {
|
||||
//
|
||||
}
|
||||
STagVal tagVal = {
|
||||
.cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1,
|
||||
.type = TSDB_DATA_TYPE_UBIGINT,
|
||||
.i64 = (int64_t)pDataBlock->info.groupId,
|
||||
};
|
||||
STag* pTag = NULL;
|
||||
taosArrayClear(tagArray);
|
||||
|
@ -157,6 +160,7 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
|
|||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, pColumn->offset, k);
|
||||
}
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
int32_t rowLen = TD_ROW_LEN(rowData);
|
||||
rowData = POINTER_SHIFT(rowData, rowLen);
|
||||
dataLen += rowLen;
|
||||
|
@ -176,17 +180,45 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
|
|||
}
|
||||
|
||||
void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
|
||||
const SArray* pRes = (const SArray*)data;
|
||||
SVnode* pVnode = (SVnode*)vnode;
|
||||
const SArray* pRes = (const SArray*)data;
|
||||
SVnode* pVnode = (SVnode*)vnode;
|
||||
SBatchDeleteReq deleteReq = {0};
|
||||
|
||||
tqDebug("vgId:%d, task %d write into table, block num: %d", TD_VID(pVnode), pTask->taskId, (int32_t)pRes->size);
|
||||
|
||||
ASSERT(pTask->tbSink.pTSchema);
|
||||
deleteReq.deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
|
||||
SSubmitReq* pReq = tdBlockToSubmit(pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
|
||||
pTask->tbSink.stbFullName, pVnode->config.vgId);
|
||||
pTask->tbSink.stbFullName, pVnode->config.vgId, &deleteReq);
|
||||
|
||||
tqDebug("vgId:%d, task %d convert blocks over, put into write-queue", TD_VID(pVnode), pTask->taskId);
|
||||
|
||||
int32_t code;
|
||||
int32_t len;
|
||||
tEncodeSize(tEncodeSBatchDeleteReq, &deleteReq, len, code);
|
||||
if (code < 0) {
|
||||
//
|
||||
ASSERT(0);
|
||||
}
|
||||
SEncoder encoder;
|
||||
void* buf = taosMemoryCalloc(1, len + sizeof(SMsgHead));
|
||||
void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
|
||||
tEncoderInit(&encoder, abuf, len);
|
||||
tEncodeSBatchDeleteReq(&encoder, &deleteReq);
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
if (taosArrayGetSize(deleteReq.deleteReqs) != 0) {
|
||||
SRpcMsg msg = {
|
||||
.msgType = TDMT_VND_BATCH_DEL,
|
||||
.pCont = buf,
|
||||
.contLen = len + sizeof(SMsgHead),
|
||||
};
|
||||
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
|
||||
tqDebug("failed to put into write-queue since %s", terrstr());
|
||||
}
|
||||
}
|
||||
taosArrayDestroy(deleteReq.deleteReqs);
|
||||
|
||||
/*tPrintFixedSchemaSubmitReq(pReq, pTask->tbSink.pTSchema);*/
|
||||
// build write msg
|
||||
SRpcMsg msg = {
|
||||
|
|
|
@ -393,15 +393,16 @@ static void tbDataMovePosTo(STbData *pTbData, SMemSkipListNode **pos, TSDBKEY *p
|
|||
SMemSkipListNode *px;
|
||||
SMemSkipListNode *pn;
|
||||
TSDBKEY *pTKey;
|
||||
int c;
|
||||
int backward = flags & SL_MOVE_BACKWARD;
|
||||
int fromPos = flags & SL_MOVE_FROM_POS;
|
||||
int32_t backward = flags & SL_MOVE_BACKWARD;
|
||||
int32_t fromPos = flags & SL_MOVE_FROM_POS;
|
||||
|
||||
if (backward) {
|
||||
px = pTbData->sl.pTail;
|
||||
|
||||
for (int8_t iLevel = pTbData->sl.maxLevel - 1; iLevel >= pTbData->sl.level; iLevel--) {
|
||||
pos[iLevel] = px;
|
||||
if (!fromPos) {
|
||||
for (int8_t iLevel = pTbData->sl.level; iLevel < pTbData->sl.maxLevel; iLevel++) {
|
||||
pos[iLevel] = px;
|
||||
}
|
||||
}
|
||||
|
||||
if (pTbData->sl.level) {
|
||||
|
@ -412,7 +413,7 @@ static void tbDataMovePosTo(STbData *pTbData, SMemSkipListNode **pos, TSDBKEY *p
|
|||
while (pn != pTbData->sl.pHead) {
|
||||
pTKey = (TSDBKEY *)SL_NODE_DATA(pn);
|
||||
|
||||
c = tsdbKeyCmprFn(pTKey, pKey);
|
||||
int32_t c = tsdbKeyCmprFn(pTKey, pKey);
|
||||
if (c <= 0) {
|
||||
break;
|
||||
} else {
|
||||
|
@ -427,8 +428,10 @@ static void tbDataMovePosTo(STbData *pTbData, SMemSkipListNode **pos, TSDBKEY *p
|
|||
} else {
|
||||
px = pTbData->sl.pHead;
|
||||
|
||||
for (int8_t iLevel = pTbData->sl.maxLevel - 1; iLevel >= pTbData->sl.level; iLevel--) {
|
||||
pos[iLevel] = px;
|
||||
if (!fromPos) {
|
||||
for (int8_t iLevel = pTbData->sl.level; iLevel < pTbData->sl.maxLevel; iLevel++) {
|
||||
pos[iLevel] = px;
|
||||
}
|
||||
}
|
||||
|
||||
if (pTbData->sl.level) {
|
||||
|
@ -437,9 +440,7 @@ static void tbDataMovePosTo(STbData *pTbData, SMemSkipListNode **pos, TSDBKEY *p
|
|||
for (int8_t iLevel = pTbData->sl.level - 1; iLevel >= 0; iLevel--) {
|
||||
pn = SL_NODE_FORWARD(px, iLevel);
|
||||
while (pn != pTbData->sl.pTail) {
|
||||
pTKey = (TSDBKEY *)SL_NODE_DATA(pn);
|
||||
|
||||
c = tsdbKeyCmprFn(pTKey, pKey);
|
||||
int32_t c = tsdbKeyCmprFn(SL_NODE_DATA(pn), pKey);
|
||||
if (c >= 0) {
|
||||
break;
|
||||
} else {
|
||||
|
@ -480,36 +481,44 @@ static int32_t tbDataDoPut(SMemTable *pMemTable, STbData *pTbData, SMemSkipListN
|
|||
goto _exit;
|
||||
}
|
||||
pNode->level = level;
|
||||
for (int8_t iLevel = 0; iLevel < level; iLevel++) {
|
||||
SL_NODE_FORWARD(pNode, iLevel) = NULL;
|
||||
SL_NODE_BACKWARD(pNode, iLevel) = NULL;
|
||||
}
|
||||
|
||||
tPutTSDBRow((uint8_t *)SL_NODE_DATA(pNode), pRow);
|
||||
|
||||
// put
|
||||
for (int8_t iLevel = 0; iLevel < pNode->level; iLevel++) {
|
||||
SMemSkipListNode *px = pos[iLevel];
|
||||
for (int8_t iLevel = level - 1; iLevel >= 0; iLevel--) {
|
||||
SMemSkipListNode *pn = pos[iLevel];
|
||||
SMemSkipListNode *px;
|
||||
|
||||
if (forward) {
|
||||
SMemSkipListNode *pNext = SL_NODE_FORWARD(px, iLevel);
|
||||
|
||||
SL_NODE_FORWARD(pNode, iLevel) = pNext;
|
||||
SL_NODE_BACKWARD(pNode, iLevel) = px;
|
||||
|
||||
SL_NODE_BACKWARD(pNext, iLevel) = pNode;
|
||||
SL_NODE_FORWARD(px, iLevel) = pNode;
|
||||
} else {
|
||||
SMemSkipListNode *pPrev = SL_NODE_BACKWARD(px, iLevel);
|
||||
px = SL_NODE_FORWARD(pn, iLevel);
|
||||
|
||||
SL_NODE_BACKWARD(pNode, iLevel) = pn;
|
||||
SL_NODE_FORWARD(pNode, iLevel) = px;
|
||||
SL_NODE_BACKWARD(pNode, iLevel) = pPrev;
|
||||
} else {
|
||||
px = SL_NODE_BACKWARD(pn, iLevel);
|
||||
|
||||
SL_NODE_FORWARD(pPrev, iLevel) = pNode;
|
||||
SL_NODE_BACKWARD(px, iLevel) = pNode;
|
||||
SL_NODE_BACKWARD(pNode, iLevel) = px;
|
||||
SL_NODE_FORWARD(pNode, iLevel) = pn;
|
||||
}
|
||||
}
|
||||
|
||||
for (int8_t iLevel = level - 1; iLevel >= 0; iLevel--) {
|
||||
SMemSkipListNode *pn = pos[iLevel];
|
||||
SMemSkipListNode *px;
|
||||
|
||||
if (forward) {
|
||||
px = SL_NODE_FORWARD(pn, iLevel);
|
||||
|
||||
SL_NODE_FORWARD(pn, iLevel) = pNode;
|
||||
SL_NODE_BACKWARD(px, iLevel) = pNode;
|
||||
} else {
|
||||
px = SL_NODE_BACKWARD(pn, iLevel);
|
||||
|
||||
SL_NODE_FORWARD(px, iLevel) = pNode;
|
||||
SL_NODE_BACKWARD(pn, iLevel) = pNode;
|
||||
}
|
||||
|
||||
pos[iLevel] = pNode;
|
||||
}
|
||||
|
||||
pTbData->sl.size++;
|
||||
if (pTbData->sl.level < pNode->level) {
|
||||
pTbData->sl.level = pNode->level;
|
||||
|
@ -548,7 +557,7 @@ static int32_t tsdbInsertTableDataImpl(SMemTable *pMemTable, STbData *pTbData, i
|
|||
// forward put rest data
|
||||
row.pTSRow = tGetSubmitBlkNext(&blkIter);
|
||||
if (row.pTSRow) {
|
||||
for (int8_t iLevel = 0; iLevel < pTbData->sl.maxLevel; iLevel++) {
|
||||
for (int8_t iLevel = pos[0]->level; iLevel < pTbData->sl.maxLevel; iLevel++) {
|
||||
pos[iLevel] = SL_NODE_BACKWARD(pos[iLevel], iLevel);
|
||||
}
|
||||
do {
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
*/
|
||||
|
||||
#include "tsdb.h"
|
||||
#include "osDef.h"
|
||||
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
|
||||
|
||||
typedef enum {
|
||||
|
@ -129,7 +130,8 @@ struct STsdbReader {
|
|||
SBlockLoadSuppInfo suppInfo;
|
||||
STsdbReadSnap* pReadSnap;
|
||||
SIOCostSummary cost;
|
||||
STSchema* pSchema;
|
||||
STSchema* pSchema;// the newest version schema
|
||||
STSchema* pMemSchema;// the previous schema for in-memory data, to avoid load schema too many times
|
||||
SDataFReader* pFileReader;
|
||||
SVersionRange verRange;
|
||||
|
||||
|
@ -145,15 +147,14 @@ static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanI
|
|||
SRowMerger* pMerger);
|
||||
static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
|
||||
STsdbReader* pReader);
|
||||
static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow);
|
||||
static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid);
|
||||
static int32_t doAppendRowFromBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
|
||||
int32_t rowIndex);
|
||||
static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
|
||||
static void updateSchema(TSDBROW* pRow, uint64_t uid, STsdbReader* pReader);
|
||||
static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order);
|
||||
|
||||
static void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
|
||||
STsdbReader* pReader);
|
||||
STsdbReader* pReader, bool* freeTSRow);
|
||||
static void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
|
||||
STSRow** pTSRow);
|
||||
static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData,
|
||||
|
@ -181,6 +182,7 @@ static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) {
|
|||
|
||||
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
|
||||
pSupInfo->buildBuf[i] = taosMemoryMalloc(pCol->info.bytes);
|
||||
// tsdbInfo("-------------------%d\n", pCol->info.bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -673,6 +675,7 @@ static void doCopyColVal(SColumnInfoData* pColInfoData, int32_t rowIndex, int32_
|
|||
colDataAppendNULL(pColInfoData, rowIndex);
|
||||
} else {
|
||||
varDataSetLen(pSup->buildBuf[colIndex], pColVal->value.nData);
|
||||
ASSERT(pColVal->value.nData <= pColInfoData->info.bytes);
|
||||
memcpy(varDataVal(pSup->buildBuf[colIndex]), pColVal->value.pData, pColVal->value.nData);
|
||||
colDataAppend(pColInfoData, rowIndex, pSup->buildBuf[colIndex], false);
|
||||
}
|
||||
|
@ -1210,6 +1213,51 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo*
|
|||
return code;
|
||||
}
|
||||
|
||||
static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pBlockData, int64_t key, SFileBlockDumpInfo* pDumpInfo) {
|
||||
|
||||
// opt version
|
||||
// 1. it is not a border point
|
||||
// 2. the direct next point is not an duplicated timestamp
|
||||
if ((pDumpInfo->rowIndex < pDumpInfo->totalRows - 1 && pReader->order == TSDB_ORDER_ASC) ||
|
||||
(pDumpInfo->rowIndex > 0 && pReader->order == TSDB_ORDER_DESC)) {
|
||||
int32_t step = pReader->order == TSDB_ORDER_ASC? 1:-1;
|
||||
|
||||
int64_t nextKey = pBlockData->aTSKEY[pDumpInfo->rowIndex + step];
|
||||
if (nextKey != key) { // merge is not needed
|
||||
doAppendRowFromBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
|
||||
pDumpInfo->rowIndex += step;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader* pReader, uint64_t uid) {
|
||||
// always set the newest schema version in pReader->pSchema
|
||||
if (pReader->pSchema == NULL) {
|
||||
pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, uid, -1);
|
||||
}
|
||||
|
||||
if (sversion == pReader->pSchema->version) {
|
||||
return pReader->pSchema;
|
||||
}
|
||||
|
||||
if (pReader->pMemSchema == NULL) {
|
||||
int32_t code =
|
||||
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pMemSchema);
|
||||
return pReader->pMemSchema;
|
||||
}
|
||||
|
||||
if (pReader->pMemSchema->version == sversion) {
|
||||
return pReader->pMemSchema;
|
||||
}
|
||||
|
||||
taosMemoryFree(pReader->pMemSchema);
|
||||
int32_t code = metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pMemSchema);
|
||||
return pReader->pMemSchema;
|
||||
}
|
||||
|
||||
static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
|
||||
SIterInfo* pIter, int64_t key) {
|
||||
SRowMerger merge = {0};
|
||||
|
@ -1220,16 +1268,23 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
TSDBKEY k = TSDBROW_KEY(pRow);
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
SArray* pDelList = pBlockScanInfo->delSkyline;
|
||||
bool freeTSRow = false;
|
||||
uint64_t uid = pBlockScanInfo->uid;
|
||||
|
||||
// ascending order traverse
|
||||
if (ASCENDING_TRAVERSE(pReader->order)) {
|
||||
if (key < k.ts) {
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
// imem & mem are all empty, only file exist
|
||||
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
freeTSRow = true;
|
||||
}
|
||||
} else if (k.ts < key) { // k.ts < key
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader);
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
} else { // k.ts == key, ascending order: file block ----> imem rows -----> mem rows
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
|
@ -1238,32 +1293,41 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
freeTSRow = true;
|
||||
}
|
||||
} else { // descending order scan
|
||||
if (key < k.ts) {
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader);
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
} else if (k.ts < key) {
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
freeTSRow = true;
|
||||
}
|
||||
} else { // descending order: mem rows -----> imem rows ------> file block
|
||||
updateSchema(pRow, pBlockScanInfo->uid, pReader);
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
|
||||
tRowMergerInit(&merge, pRow, pReader->pSchema);
|
||||
tRowMergerInit(&merge, pRow, pSchema);
|
||||
doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
tRowMerge(&merge, &fRow);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
freeTSRow = true;
|
||||
}
|
||||
}
|
||||
|
||||
tRowMergerClear(&merge);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
}
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1280,12 +1344,12 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
ASSERT(pRow != NULL && piRow != NULL);
|
||||
|
||||
int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex];
|
||||
bool freeTSRow = false;
|
||||
|
||||
uint64_t uid = pBlockScanInfo->uid;
|
||||
|
||||
TSDBKEY k = TSDBROW_KEY(pRow);
|
||||
TSDBKEY ik = TSDBROW_KEY(piRow);
|
||||
|
||||
if (ASCENDING_TRAVERSE(pReader->order)) {
|
||||
// [1&2] key <= [k.ts && ik.ts]
|
||||
if (key <= k.ts && key <= ik.ts) {
|
||||
|
@ -1305,7 +1369,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
}
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else { // key > ik.ts || key > k.ts
|
||||
ASSERT(key != ik.ts);
|
||||
|
@ -1313,16 +1377,22 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
// [3] ik.ts < key <= k.ts
|
||||
// [4] ik.ts < k.ts <= key
|
||||
if (ik.ts < k.ts) {
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// [5] k.ts < key <= ik.ts
|
||||
// [6] k.ts < ik.ts <= key
|
||||
if (k.ts < ik.ts) {
|
||||
doMergeMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doMergeMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1331,16 +1401,17 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
ASSERT(key > ik.ts && key > k.ts);
|
||||
|
||||
doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
taosMemoryFree(pTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
} else { // descending order scan
|
||||
// [1/2] k.ts >= ik.ts && k.ts >= key
|
||||
if (k.ts >= ik.ts && k.ts >= key) {
|
||||
updateSchema(pRow, uid, pReader);
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
|
||||
tRowMergerInit(&merge, pRow, pReader->pSchema);
|
||||
tRowMergerInit(&merge, pRow, pSchema);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
if (ik.ts == k.ts) {
|
||||
|
@ -1355,7 +1426,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
}
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
ASSERT(ik.ts != k.ts); // this case has been included in the previous if branch
|
||||
|
@ -1363,8 +1434,11 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
// [3] ik.ts > k.ts >= Key
|
||||
// [4] ik.ts > key >= k.ts
|
||||
if (ik.ts > key) {
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1376,19 +1450,22 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
taosMemoryFree(pTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
//[7] key = ik.ts > k.ts
|
||||
if (key == ik.ts) {
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader);
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
tRowMerge(&merge, &fRow);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
@ -1443,34 +1520,23 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI
|
|||
}
|
||||
|
||||
// imem & mem are all empty, only file exist
|
||||
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
|
||||
// opt version
|
||||
// 1. it is not a border point
|
||||
// 2. the direct next point is not an duplicated timestamp
|
||||
if ((pDumpInfo->rowIndex < pDumpInfo->totalRows - 1 && pReader->order == TSDB_ORDER_ASC) ||
|
||||
(pDumpInfo->rowIndex > 0 && pReader->order == TSDB_ORDER_DESC)) {
|
||||
int32_t step = pReader->order == TSDB_ORDER_ASC ? 1 : -1;
|
||||
int64_t nextKey = pBlockData->aTSKEY[pDumpInfo->rowIndex + step];
|
||||
if (nextKey != key) { // merge is not needed
|
||||
doAppendRowFromBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
|
||||
pDumpInfo->rowIndex += step;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
STSRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
|
||||
STSRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2083,7 +2149,7 @@ TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pRea
|
|||
}
|
||||
|
||||
TSDBROW* pRow = tsdbTbDataIterGet(pIter->iter);
|
||||
TSDBKEY key = TSDBROW_KEY(pRow);
|
||||
TSDBKEY key = {.ts = pRow->pTSRow->ts, .version = pRow->version};
|
||||
if (outOfTimeWindow(key.ts, &pReader->window)) {
|
||||
pIter->hasVal = false;
|
||||
return NULL;
|
||||
|
@ -2116,6 +2182,7 @@ TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pRea
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
|
||||
STsdbReader* pReader) {
|
||||
while (1) {
|
||||
|
@ -2136,22 +2203,8 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDe
|
|||
break;
|
||||
}
|
||||
|
||||
int32_t sversion = TSDBROW_SVERSION(pRow);
|
||||
STSchema* pTSchema = NULL;
|
||||
if (pReader->pSchema == NULL || sversion != pReader->pSchema->version) {
|
||||
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pTSchema);
|
||||
if (pReader->pSchema == NULL) {
|
||||
pReader->pSchema = pTSchema;
|
||||
}
|
||||
} else {
|
||||
pTSchema = pReader->pSchema;
|
||||
}
|
||||
|
||||
STSchema* pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, uid);
|
||||
tRowMergerAdd(pMerger, pRow, pTSchema);
|
||||
|
||||
if (pTSchema != pReader->pSchema) {
|
||||
taosMemoryFree(pTSchema);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -2259,42 +2312,50 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void updateSchema(TSDBROW* pRow, uint64_t uid, STsdbReader* pReader) {
|
||||
int32_t sversion = TSDBROW_SVERSION(pRow);
|
||||
|
||||
if (pReader->pSchema == NULL) {
|
||||
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pSchema);
|
||||
} else if (pReader->pSchema->version != sversion) {
|
||||
taosMemoryFreeClear(pReader->pSchema);
|
||||
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pSchema);
|
||||
}
|
||||
}
|
||||
|
||||
void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
|
||||
STsdbReader* pReader) {
|
||||
STsdbReader* pReader, bool* freeTSRow) {
|
||||
|
||||
TSDBROW* pNextRow = NULL;
|
||||
TSDBROW current = *pRow;
|
||||
|
||||
{ // if the timestamp of the next valid row has a different ts, return current row directly
|
||||
pIter->hasVal = tsdbTbDataIterNext(pIter->iter);
|
||||
|
||||
if (!pIter->hasVal) {
|
||||
*pTSRow = current.pTSRow;
|
||||
*freeTSRow = false;
|
||||
return;
|
||||
} else { // has next point in mem/imem
|
||||
pNextRow = getValidRow(pIter, pDelList, pReader);
|
||||
if (pNextRow == NULL) {
|
||||
*pTSRow = current.pTSRow;
|
||||
*freeTSRow = false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (current.pTSRow->ts != pNextRow->pTSRow->ts) {
|
||||
*pTSRow = current.pTSRow;
|
||||
*freeTSRow = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SRowMerger merge = {0};
|
||||
|
||||
TSDBKEY k = TSDBROW_KEY(pRow);
|
||||
// updateSchema(pRow, uid, pReader);
|
||||
int32_t sversion = TSDBROW_SVERSION(pRow);
|
||||
STSchema* pTSchema = NULL;
|
||||
if (pReader->pSchema == NULL || sversion != pReader->pSchema->version) {
|
||||
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pTSchema);
|
||||
if (pReader->pSchema == NULL) {
|
||||
pReader->pSchema = pTSchema;
|
||||
}
|
||||
} else {
|
||||
pTSchema = pReader->pSchema;
|
||||
}
|
||||
// get the correct schema for data in memory
|
||||
STSchema* pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(¤t), pReader, uid);
|
||||
|
||||
tRowMergerInit2(&merge, pReader->pSchema, pRow, pTSchema);
|
||||
doMergeRowsInBuf(pIter, uid, k.ts, pDelList, &merge, pReader);
|
||||
tRowMergerInit2(&merge, pReader->pSchema, ¤t, pTSchema);
|
||||
|
||||
STSchema* pTSchema1 = doGetSchemaForTSRow(TSDBROW_SVERSION(pNextRow), pReader, uid);
|
||||
tRowMergerAdd(&merge, pNextRow, pTSchema1);
|
||||
|
||||
doMergeRowsInBuf(pIter, uid, current.pTSRow->ts, pDelList, &merge, pReader);
|
||||
tRowMergerGetRow(&merge, pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
|
||||
if (pTSchema != pReader->pSchema) {
|
||||
taosMemoryFree(pTSchema);
|
||||
}
|
||||
*freeTSRow = true;
|
||||
}
|
||||
|
||||
void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
|
||||
|
@ -2305,17 +2366,17 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo
|
|||
TSDBKEY ik = TSDBROW_KEY(piRow);
|
||||
|
||||
if (ASCENDING_TRAVERSE(pReader->order)) { // ascending order imem --> mem
|
||||
updateSchema(piRow, pBlockScanInfo->uid, pReader);
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
|
||||
tRowMergerInit(&merge, piRow, pReader->pSchema);
|
||||
tRowMergerInit(&merge, piRow, pSchema);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
tRowMerge(&merge, pRow);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
} else {
|
||||
updateSchema(pRow, pBlockScanInfo->uid, pReader);
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
|
||||
tRowMergerInit(&merge, pRow, pReader->pSchema);
|
||||
tRowMergerInit(&merge, pRow, pSchema);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
tRowMerge(&merge, piRow);
|
||||
|
@ -2326,7 +2387,7 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo
|
|||
}
|
||||
|
||||
int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow,
|
||||
int64_t endKey) {
|
||||
int64_t endKey, bool* freeTSRow) {
|
||||
TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
|
||||
TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
|
||||
SArray* pDelList = pBlockScanInfo->delSkyline;
|
||||
|
@ -2352,35 +2413,36 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR
|
|||
TSDBKEY ik = TSDBROW_KEY(piRow);
|
||||
|
||||
if (ik.ts < k.ts) { // ik.ts < k.ts
|
||||
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader);
|
||||
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
} else if (k.ts < ik.ts) {
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader);
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
} else { // ik.ts == k.ts
|
||||
doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, pTSRow);
|
||||
*freeTSRow = true;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pBlockScanInfo->iter.hasVal && pRow != NULL) {
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader);
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pBlockScanInfo->iiter.hasVal && piRow != NULL) {
|
||||
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader);
|
||||
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow) {
|
||||
int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid) {
|
||||
int32_t numOfRows = pBlock->info.rows;
|
||||
int32_t numOfCols = (int32_t)taosArrayGetSize(pBlock->pDataBlock);
|
||||
|
||||
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
|
||||
STSchema* pSchema = pReader->pSchema;
|
||||
STSchema* pSchema = doGetSchemaForTSRow(pTSRow->sver, pReader, uid);
|
||||
|
||||
SColVal colVal = {0};
|
||||
int32_t i = 0, j = 0;
|
||||
|
@ -2396,7 +2458,7 @@ int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow*
|
|||
col_id_t colId = pColInfoData->info.colId;
|
||||
|
||||
if (colId == pSchema->columns[j].colId) {
|
||||
tTSRowGetVal(pTSRow, pReader->pSchema, j, &colVal);
|
||||
tTSRowGetVal(pTSRow, pSchema, j, &colVal);
|
||||
doCopyColVal(pColInfoData, numOfRows, i, &colVal, pSupInfo);
|
||||
i += 1;
|
||||
j += 1;
|
||||
|
@ -2466,13 +2528,16 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
|
|||
|
||||
do {
|
||||
STSRow* pTSRow = NULL;
|
||||
tsdbGetNextRowInMem(pBlockScanInfo, pReader, &pTSRow, endKey);
|
||||
bool freeTSRow = false;
|
||||
tsdbGetNextRowInMem(pBlockScanInfo, pReader, &pTSRow, endKey, &freeTSRow);
|
||||
if (pTSRow == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
doAppendRowFromTSRow(pBlock, pReader, pTSRow);
|
||||
taosMemoryFree(pTSRow);
|
||||
doAppendRowFromTSRow(pBlock, pReader, pTSRow, pBlockScanInfo->uid);
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
}
|
||||
|
||||
// no data in buffer, return immediately
|
||||
if (!(pBlockScanInfo->iter.hasVal || pBlockScanInfo->iiter.hasVal)) {
|
||||
|
@ -2631,6 +2696,11 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
|
|||
STsdbReader* pPrevReader = pReader->innerReader[0];
|
||||
SDataBlockIter* pBlockIter = &pPrevReader->status.blockIter;
|
||||
|
||||
code = tsdbTakeReadSnap(pPrevReader->pTsdb, &pPrevReader->pReadSnap);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader->order,
|
||||
pPrevReader->idStr);
|
||||
resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order, pReader->status.pTableMap);
|
||||
|
@ -2660,7 +2730,6 @@ void tsdbReaderClose(STsdbReader* pReader) {
|
|||
}
|
||||
|
||||
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
|
||||
|
||||
tsdbUntakeReadSnap(pReader->pTsdb, pReader->pReadSnap);
|
||||
|
||||
taosMemoryFreeClear(pSupInfo->plist);
|
||||
|
@ -2688,16 +2757,15 @@ void tsdbReaderClose(STsdbReader* pReader) {
|
|||
SIOCostSummary* pCost = &pReader->cost;
|
||||
|
||||
tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64
|
||||
" SMA-time:%.2f ms, "
|
||||
"fileBlocks:%" PRId64
|
||||
", fileBlocks-time:%.2f ms, build in-memory-block-time:%.2f ms, STableBlockScanInfo "
|
||||
"size:%.2f Kb %s",
|
||||
" SMA-time:%.2f ms, fileBlocks:%" PRId64 ", fileBlocks-time:%.2f ms, "
|
||||
"build in-memory-block-time:%.2f ms, STableBlockScanInfo size:%.2f Kb %s",
|
||||
pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaData, pCost->smaLoadTime,
|
||||
pCost->numOfBlocks, pCost->blockLoadTime, pCost->buildmemBlock,
|
||||
numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pReader->idStr);
|
||||
|
||||
taosMemoryFree(pReader->idStr);
|
||||
taosMemoryFree(pReader->pSchema);
|
||||
taosMemoryFree(pReader->pMemSchema);
|
||||
taosMemoryFreeClear(pReader);
|
||||
}
|
||||
|
||||
|
|
|
@ -1520,22 +1520,22 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
|
|||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
pColAgg->sum += colVal.value.f;
|
||||
if (pColAgg->min > colVal.value.f) {
|
||||
pColAgg->min = colVal.value.f;
|
||||
*(double*)(&pColAgg->sum) += colVal.value.f;
|
||||
if (*(double*)(&pColAgg->min) > colVal.value.f) {
|
||||
*(double*)(&pColAgg->min) = colVal.value.f;
|
||||
}
|
||||
if (pColAgg->max < colVal.value.f) {
|
||||
pColAgg->max = colVal.value.f;
|
||||
if (*(double*)(&pColAgg->max) < colVal.value.f) {
|
||||
*(double*)(&pColAgg->max) = colVal.value.f;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
pColAgg->sum += colVal.value.d;
|
||||
if (pColAgg->min > colVal.value.d) {
|
||||
pColAgg->min = colVal.value.d;
|
||||
*(double*)(&pColAgg->sum) += colVal.value.d;
|
||||
if (*(double*)(&pColAgg->min) > colVal.value.d) {
|
||||
*(double*)(&pColAgg->min) = colVal.value.d;
|
||||
}
|
||||
if (pColAgg->max < colVal.value.d) {
|
||||
pColAgg->max = colVal.value.d;
|
||||
if (*(double*)(&pColAgg->max) < colVal.value.d) {
|
||||
*(double*)(&pColAgg->max) = colVal.value.d;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ int vnodeBegin(SVnode *pVnode) {
|
|||
|
||||
pVnode->state.commitID++;
|
||||
// begin meta
|
||||
if (metaBegin(pVnode->pMeta) < 0) {
|
||||
if (metaBegin(pVnode->pMeta, 0) < 0) {
|
||||
vError("vgId:%d, failed to begin meta since %s", TD_VID(pVnode), tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void
|
|||
static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
|
||||
int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
|
||||
int32_t code = 0;
|
||||
|
@ -190,6 +191,9 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
|
|||
case TDMT_VND_DELETE:
|
||||
if (vnodeProcessDeleteReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
|
||||
break;
|
||||
case TDMT_VND_BATCH_DEL:
|
||||
if (vnodeProcessBatchDeleteReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
|
||||
break;
|
||||
/* TQ */
|
||||
case TDMT_VND_MQ_VG_CHANGE:
|
||||
if (tqProcessVgChangeReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
|
||||
|
@ -204,7 +208,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
|
|||
break;
|
||||
case TDMT_VND_MQ_COMMIT_OFFSET:
|
||||
if (tqProcessOffsetCommitReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
|
||||
pMsg->contLen - sizeof(SMsgHead)) < 0) {
|
||||
pMsg->contLen - sizeof(SMsgHead), version) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
break;
|
||||
|
@ -1053,6 +1057,23 @@ static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
|
||||
SBatchDeleteReq deleteReq;
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, pReq, len);
|
||||
tDecodeSBatchDeleteReq(&decoder, &deleteReq);
|
||||
|
||||
int32_t sz = taosArrayGetSize(deleteReq.deleteReqs);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSingleDeleteReq *pOneReq = taosArrayGet(deleteReq.deleteReqs, i);
|
||||
int32_t code = tsdbDeleteTableData(pVnode->pTsdb, version, deleteReq.suid, pOneReq->uid, pOneReq->ts, pOneReq->ts);
|
||||
if (code) {
|
||||
// TODO
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
|
||||
int32_t code = 0;
|
||||
SDecoder *pCoder = &(SDecoder){0};
|
||||
|
|
|
@ -672,6 +672,18 @@ static void vnodeRestoreFinish(struct SSyncFSM *pFsm) {
|
|||
vDebug("vgId:%d, sync restore finished", pVnode->config.vgId);
|
||||
}
|
||||
|
||||
static void vnodeBecomeFollower(struct SSyncFSM *pFsm) {
|
||||
SVnode *pVnode = pFsm->data;
|
||||
vDebug("vgId:%d, become follower", pVnode->config.vgId);
|
||||
|
||||
// clear old leader resource
|
||||
}
|
||||
|
||||
static void vnodeBecomeLeader(struct SSyncFSM *pFsm) {
|
||||
SVnode *pVnode = pFsm->data;
|
||||
vDebug("vgId:%d, become leader", pVnode->config.vgId);
|
||||
}
|
||||
|
||||
static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
|
||||
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
|
||||
pFsm->data = pVnode;
|
||||
|
@ -681,6 +693,8 @@ static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
|
|||
pFsm->FpGetSnapshotInfo = vnodeSyncGetSnapshot;
|
||||
pFsm->FpRestoreFinishCb = vnodeRestoreFinish;
|
||||
pFsm->FpLeaderTransferCb = vnodeLeaderTransfer;
|
||||
pFsm->FpBecomeLeaderCb = vnodeBecomeLeader;
|
||||
pFsm->FpBecomeFollowerCb = vnodeBecomeFollower;
|
||||
pFsm->FpReConfigCb = vnodeSyncReconfig;
|
||||
pFsm->FpSnapshotStartRead = vnodeSnapshotStartRead;
|
||||
pFsm->FpSnapshotStopRead = vnodeSnapshotStopRead;
|
||||
|
|
|
@ -43,12 +43,14 @@ extern "C" {
|
|||
#define EXPLAIN_GROUP_SORT_FORMAT "Group Sort"
|
||||
#define EXPLAIN_INTERVAL_FORMAT "Interval on Column %s"
|
||||
#define EXPLAIN_MERGE_INTERVAL_FORMAT "Merge Interval on Column %s"
|
||||
#define EXPLAIN_MERGE_ALIGNED_INTERVAL_FORMAT "Merge Aligned Interval on Column %s"
|
||||
#define EXPLAIN_FILL_FORMAT "Fill"
|
||||
#define EXPLAIN_SESSION_FORMAT "Session"
|
||||
#define EXPLAIN_STATE_WINDOW_FORMAT "StateWindow on Column %s"
|
||||
#define EXPLAIN_PARITION_FORMAT "Partition on Column %s"
|
||||
#define EXPLAIN_ORDER_FORMAT "Order: %s"
|
||||
#define EXPLAIN_FILTER_FORMAT "Filter: "
|
||||
#define EXPLAIN_MERGEBLOCKS_FORMAT "Merge ResBlocks: %s"
|
||||
#define EXPLAIN_FILL_VALUE_FORMAT "Fill Values: "
|
||||
#define EXPLAIN_ON_CONDITIONS_FORMAT "Join Cond: "
|
||||
#define EXPLAIN_TIMERANGE_FORMAT "Time Range: [%" PRId64 ", %" PRId64 "]"
|
||||
|
@ -56,9 +58,11 @@ extern "C" {
|
|||
#define EXPLAIN_TIME_WINDOWS_FORMAT "Time Window: interval=%" PRId64 "%c offset=%" PRId64 "%c sliding=%" PRId64 "%c"
|
||||
#define EXPLAIN_WINDOW_FORMAT "Window: gap=%" PRId64
|
||||
#define EXPLAIN_RATIO_TIME_FORMAT "Ratio: %f"
|
||||
#define EXPLAIN_MERGE_FORMAT "Merge"
|
||||
#define EXPLAIN_MERGE_FORMAT "SortMerge"
|
||||
#define EXPLAIN_MERGE_KEYS_FORMAT "Merge Key: "
|
||||
#define EXPLAIN_IGNORE_GROUPID_FORMAT "Ignore Group Id: %s"
|
||||
#define EXPLAIN_PARTITION_KETS_FORMAT "Partition Key: "
|
||||
#define EXPLAIN_INTERP_FORMAT "Interp"
|
||||
|
||||
#define EXPLAIN_PLANNING_TIME_FORMAT "Planning Time: %.3f ms"
|
||||
#define EXPLAIN_EXEC_TIME_FORMAT "Execution Time: %.3f ms"
|
||||
|
@ -69,6 +73,7 @@ extern "C" {
|
|||
#define EXPLAIN_LEFT_PARENTHESIS_FORMAT " ("
|
||||
#define EXPLAIN_RIGHT_PARENTHESIS_FORMAT ")"
|
||||
#define EXPLAIN_BLANK_FORMAT " "
|
||||
#define EXPLAIN_COMMA_FORMAT ", "
|
||||
#define EXPLAIN_COST_FORMAT "cost=%.2f..%.2f"
|
||||
#define EXPLAIN_ROWS_FORMAT "rows=%" PRIu64
|
||||
#define EXPLAIN_COLUMNS_FORMAT "columns=%d"
|
||||
|
@ -86,6 +91,7 @@ extern "C" {
|
|||
#define EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT "output_order=%s"
|
||||
#define EXPLAIN_OFFSET_FORMAT "offset=%d"
|
||||
#define EXPLAIN_SOFFSET_FORMAT "soffset=%d"
|
||||
#define EXPLAIN_PARTITIONS_FORMAT "partitions=%d"
|
||||
|
||||
#define COMMAND_RESET_LOG "resetLog"
|
||||
#define COMMAND_SCHEDULE_POLICY "schedulePolicy"
|
||||
|
|
|
@ -535,6 +535,13 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
if (NULL != pTblScanNode->pGroupTags) {
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_PARTITION_KETS_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_PARTITIONS_FORMAT, pTblScanNode->pGroupTags->length);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
|
||||
if (pTblScanNode->scan.node.pConditions) {
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
|
||||
QRY_ERR_RET(nodesNodeToSQL(pTblScanNode->scan.node.pConditions, tbuf + VARSTR_HEADER_SIZE,
|
||||
|
@ -617,13 +624,17 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGEBLOCKS_FORMAT, pPrjNode->mergeDataBlock? "True":"False");
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
if (pPrjNode->node.pConditions) {
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
|
||||
QRY_ERR_RET(nodesNodeToSQL(pPrjNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE,
|
||||
TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -707,7 +718,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
}
|
||||
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGEBLOCKS_FORMAT, pAggNode->mergeDataBlock? "True":"False");
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -906,13 +921,17 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
}
|
||||
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGEBLOCKS_FORMAT, pIntNode->window.mergeDataBlock? "True":"False");
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL: {
|
||||
SMergeAlignedIntervalPhysiNode *pIntNode = (SMergeAlignedIntervalPhysiNode *)pNode;
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_INTERVAL_FORMAT, nodesGetNameFromColumnNode(pIntNode->window.pTspk));
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_MERGE_ALIGNED_INTERVAL_FORMAT, nodesGetNameFromColumnNode(pIntNode->window.pTspk));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
|
||||
if (pResNode->pExecInfo) {
|
||||
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
||||
|
@ -950,7 +969,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
}
|
||||
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGEBLOCKS_FORMAT, pIntNode->window.mergeDataBlock? "True":"False");
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1122,6 +1145,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_PARTITION_KETS_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_PARTITIONS_FORMAT, pPartNode->pPartitionKeys->length);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
if (pPartNode->node.pConditions) {
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
|
||||
QRY_ERR_RET(nodesNodeToSQL(pPartNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE,
|
||||
|
@ -1194,11 +1222,20 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGE_KEYS_FORMAT);
|
||||
if (pMergeNode->groupSort) {
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, "_group_id asc");
|
||||
if (LIST_LENGTH(pMergeNode->pMergeKeys) > 0) {
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_COMMA_FORMAT);
|
||||
}
|
||||
}
|
||||
for (int32_t i = 0; i < LIST_LENGTH(pMergeNode->pMergeKeys); ++i) {
|
||||
SOrderByExprNode *ptn = (SOrderByExprNode *)nodesListGetNode(pMergeNode->pMergeKeys, i);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, nodesGetNameFromColumnNode(ptn->pExpr));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, EXPLAIN_ORDER_STRING(ptn->order));
|
||||
if (i != LIST_LENGTH(pMergeNode->pMergeKeys) - 1) {
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_COMMA_FORMAT);
|
||||
}
|
||||
}
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
@ -1410,7 +1447,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: {
|
||||
SInterpFuncPhysiNode *pInterpNode = (SInterpFuncPhysiNode *)pNode;
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_AGG_FORMAT);
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_INTERP_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
|
||||
if (pResNode->pExecInfo) {
|
||||
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
||||
|
|
|
@ -266,7 +266,11 @@ typedef struct SExchangeInfo {
|
|||
SArray* pSourceDataInfo;
|
||||
tsem_t ready;
|
||||
void* pTransporter;
|
||||
SSDataBlock* pResult;
|
||||
// SArray<SSDataBlock*>, result block list, used to keep the multi-block that
|
||||
// passed by downstream operator
|
||||
SArray* pResultBlockList;
|
||||
int32_t rspBlockIndex; // indicate the return block index in pResultBlockList
|
||||
SSDataBlock* pDummyBlock; // dummy block, not keep data
|
||||
bool seqLoadData; // sequential load data or not, false by default
|
||||
int32_t current;
|
||||
SLoadRemoteDataInfo loadInfo;
|
||||
|
@ -520,6 +524,7 @@ typedef struct SBlockDistInfo {
|
|||
typedef struct SOptrBasicInfo {
|
||||
SResultRowInfo resultRowInfo;
|
||||
SSDataBlock* pRes;
|
||||
bool mergeResultBlock;
|
||||
} SOptrBasicInfo;
|
||||
|
||||
typedef struct SIntervalAggOperatorInfo {
|
||||
|
@ -554,8 +559,6 @@ typedef struct SMergeAlignedIntervalAggOperatorInfo {
|
|||
uint64_t groupId; // current groupId
|
||||
int64_t curTs; // current ts
|
||||
SSDataBlock* prefetchedBlock;
|
||||
bool inputBlocksFinished;
|
||||
|
||||
SNode* pCondition;
|
||||
} SMergeAlignedIntervalAggOperatorInfo;
|
||||
|
||||
|
@ -594,7 +597,6 @@ typedef struct SAggOperatorInfo {
|
|||
uint64_t groupId;
|
||||
SGroupResInfo groupResInfo;
|
||||
SExprSupp scalarExprSup;
|
||||
|
||||
SNode *pCondition;
|
||||
} SAggOperatorInfo;
|
||||
|
||||
|
@ -739,6 +741,7 @@ typedef struct STimeSliceOperatorInfo {
|
|||
SArray* pPrevRow; // SArray<SGroupValue>
|
||||
SArray* pNextRow; // SArray<SGroupValue>
|
||||
SArray* pLinearInfo; // SArray<SFillLinearInfo>
|
||||
bool fillLastPoint;
|
||||
bool isPrevRowSet;
|
||||
bool isNextRowSet;
|
||||
int32_t fillType; // fill type
|
||||
|
@ -860,9 +863,10 @@ void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLi
|
|||
void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset,
|
||||
int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput, int32_t order);
|
||||
|
||||
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData,
|
||||
int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total,
|
||||
SArray* pColList);
|
||||
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, int32_t numOfOutput, SArray* pColList, char** pNextStart);
|
||||
void updateLoadRemoteInfo(SLoadRemoteDataInfo *pInfo, int32_t numOfRows, int32_t dataLen, int64_t startTs,
|
||||
SOperatorInfo* pOperator);
|
||||
|
||||
STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order);
|
||||
|
||||
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag);
|
||||
|
@ -897,7 +901,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi
|
|||
SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode *pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SNode* pCondition, SExprInfo* pScalarExprInfo,
|
||||
int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo);
|
||||
int32_t numOfScalarExpr, bool mergeResult, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode *pNode, SExecTaskInfo* pTaskInfo);
|
||||
SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo);
|
||||
|
@ -912,14 +916,14 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
|
|||
|
||||
SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
|
||||
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
|
||||
SExecTaskInfo* pTaskInfo);
|
||||
bool mergeResultBlock, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
|
||||
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
|
||||
SNode* pCondition, SExecTaskInfo* pTaskInfo);
|
||||
SNode* pCondition, bool mergeResultBlocks, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
|
||||
SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild);
|
||||
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
||||
SExecTaskInfo* pTaskInfo, int32_t numOfChild);
|
||||
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
|
||||
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
|
||||
STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo);
|
||||
|
|
|
@ -37,7 +37,6 @@ typedef struct SFillLinearInfo {
|
|||
SPoint start;
|
||||
SPoint end;
|
||||
bool hasNull;
|
||||
bool fillLastPoint;
|
||||
int16_t type;
|
||||
int32_t bytes;
|
||||
} SFillLinearInfo;
|
||||
|
|
|
@ -50,7 +50,7 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
|
|||
|
||||
STableListInfo* pTableList = &pTaskInfo->tableqinfoList;
|
||||
|
||||
initResultSizeInfo(&pOperator->resultInfo, 1024);
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
|
||||
pInfo->pUidList = taosArrayInit(4, sizeof(int64_t));
|
||||
|
||||
|
|
|
@ -69,10 +69,10 @@ static bool needCompress(const SSDataBlock* pData, int32_t numOfCols) {
|
|||
|
||||
// clang-format off
|
||||
// data format:
|
||||
// +----------------+--------------+-----------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
|
||||
// |SDataCacheEntry | total length | group id | col1_schema | col2_schema | col3_schema... | column#1 length, column#2 length...| col1 bitmap | col1 data | col2 bitmap | col2 data | .... | | (4 bytes) |(8 bytes)
|
||||
// | |sizeof(int32) |sizeof(uint64_t) |(sizeof(int16_t)+sizeof(int32_t))*numOfCols | sizeof(int32_t) * numOfCols | actual size | |
|
||||
// +----------------+--------------+-----------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
|
||||
// +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
|
||||
// |SDataCacheEntry | version | total length | numOfRows | group id | col1_schema | col2_schema | col3_schema... | column#1 length, column#2 length...| col1 bitmap | col1 data | col2 bitmap | col2 data | .... | | (4 bytes) |(8 bytes)
|
||||
// | | sizeof(int32_t) |sizeof(int32) | sizeof(int32)| sizeof(uint64_t) | (sizeof(int8_t)+sizeof(int32_t))*numOfCols | sizeof(int32_t) * numOfCols | actual size | |
|
||||
// +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
|
||||
// The length of bitmap is decided by number of rows of this data block, and the length of each column data is
|
||||
// recorded in the first segment, next to the struct header
|
||||
// clang-format on
|
||||
|
|
|
@ -213,6 +213,10 @@ int32_t dataBlockToSubmit(SDataInserterHandle* pInserter, SSubmitReq** pReq) {
|
|||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, pColumn->offset, k);
|
||||
}
|
||||
}
|
||||
if(!fullCol) {
|
||||
rb.hasNone = true;
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
|
||||
if (ignoreRow) {
|
||||
continue;
|
||||
|
|
|
@ -348,12 +348,14 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
|
|||
taosThreadOnce(&initPoolOnce, initRefPool);
|
||||
atexit(cleanupRefPool);
|
||||
|
||||
qDebug("start to create subplan task, TID:0x%"PRIx64 " QID:0x%"PRIx64, taskId, pSubplan->id.queryId);
|
||||
|
||||
int32_t code = createExecTaskInfoImpl(pSubplan, pTask, readHandle, taskId, sql, model);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
SDataSinkMgtCfg cfg = {.maxDataBlockNum = 1000, .maxDataBlockNumPerQuery = 100};
|
||||
SDataSinkMgtCfg cfg = {.maxDataBlockNum = 10000, .maxDataBlockNumPerQuery = 5000};
|
||||
code = dsDataSinkMgtInit(&cfg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
|
@ -372,6 +374,8 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
|
|||
}
|
||||
}
|
||||
|
||||
qDebug("subplan task create completed, TID:0x%"PRIx64 " QID:0x%"PRIx64, taskId, pSubplan->id.queryId);
|
||||
|
||||
_error:
|
||||
// if failed to add ref for all tables in this query, abort current query
|
||||
return code;
|
||||
|
@ -422,6 +426,80 @@ int waitMoment(SQInfo* pQInfo) {
|
|||
}
|
||||
#endif
|
||||
|
||||
static void freeBlock(void* param) {
|
||||
SSDataBlock* pBlock = *(SSDataBlock**) param;
|
||||
blockDataDestroy(pBlock);
|
||||
}
|
||||
|
||||
int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
int64_t threadId = taosGetSelfPthreadId();
|
||||
|
||||
taosArrayClearEx(pResList, freeBlock);
|
||||
|
||||
int64_t curOwner = 0;
|
||||
if ((curOwner = atomic_val_compare_exchange_64(&pTaskInfo->owner, 0, threadId)) != 0) {
|
||||
qError("%s-%p execTask is now executed by thread:%p", GET_TASKID(pTaskInfo), pTaskInfo, (void*)curOwner);
|
||||
pTaskInfo->code = TSDB_CODE_QRY_IN_EXEC;
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
|
||||
if (pTaskInfo->cost.start == 0) {
|
||||
pTaskInfo->cost.start = taosGetTimestampMs();
|
||||
}
|
||||
|
||||
if (isTaskKilled(pTaskInfo)) {
|
||||
atomic_store_64(&pTaskInfo->owner, 0);
|
||||
qDebug("%s already killed, abort", GET_TASKID(pTaskInfo));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// error occurs, record the error code and return to client
|
||||
int32_t ret = setjmp(pTaskInfo->env);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
pTaskInfo->code = ret;
|
||||
cleanUpUdfs();
|
||||
qDebug("%s task abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo), tstrerror(pTaskInfo->code));
|
||||
atomic_store_64(&pTaskInfo->owner, 0);
|
||||
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
|
||||
qDebug("%s execTask is launched", GET_TASKID(pTaskInfo));
|
||||
|
||||
int32_t current = 0;
|
||||
SSDataBlock* pRes = NULL;
|
||||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
while((pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot)) != NULL) {
|
||||
SSDataBlock* p = createOneDataBlock(pRes, true);
|
||||
current += p->info.rows;
|
||||
ASSERT(p->info.rows > 0);
|
||||
taosArrayPush(pResList, &p);
|
||||
|
||||
if (current >= 4096) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t el = (taosGetTimestampUs() - st);
|
||||
|
||||
pTaskInfo->cost.elapsedTime += el;
|
||||
if (NULL == pRes) {
|
||||
*useconds = pTaskInfo->cost.elapsedTime;
|
||||
}
|
||||
|
||||
cleanUpUdfs();
|
||||
uint64_t total = pTaskInfo->pRoot->resultInfo.totalRows;
|
||||
|
||||
qDebug("%s task suspended, %d rows in %d blocks returned, total:%" PRId64 " rows, in sinkNode:%d, elapsed:%.2f ms",
|
||||
GET_TASKID(pTaskInfo), current, (int32_t) taosArrayGetSize(pResList), total, 0, el / 1000.0);
|
||||
|
||||
atomic_store_64(&pTaskInfo->owner, 0);
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
|
||||
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
int64_t threadId = taosGetSelfPthreadId();
|
||||
|
|
|
@ -88,7 +88,7 @@ static int32_t getExprFunctionId(SExprInfo* pExprInfo) {
|
|||
|
||||
static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes);
|
||||
|
||||
static void setBlockStatisInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pSDataBlock);
|
||||
static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pSDataBlock);
|
||||
|
||||
static void releaseQueryBuf(size_t numOfTables);
|
||||
|
||||
|
@ -136,9 +136,8 @@ SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn,
|
|||
|
||||
void operatorDummyCloseFn(void* param, int32_t numOfCols) {}
|
||||
|
||||
static int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo,
|
||||
SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo, const int32_t* rowCellOffset,
|
||||
SqlFunctionCtx* pCtx, int32_t numOfExprs);
|
||||
static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
||||
SGroupResInfo* pGroupResInfo);
|
||||
|
||||
static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size);
|
||||
static void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId);
|
||||
|
@ -434,7 +433,8 @@ static void doSetInputDataBlockInfo(SOperatorInfo* pOperator, SqlFunctionCtx* pC
|
|||
for (int32_t i = 0; i < pOperator->exprSupp.numOfExprs; ++i) {
|
||||
pCtx[i].order = order;
|
||||
pCtx[i].input.numOfRows = pBlock->info.rows;
|
||||
setBlockStatisInfo(&pCtx[i], &pOperator->exprSupp.pExprInfo[i], pBlock);
|
||||
setBlockSMAInfo(&pCtx[i], &pOperator->exprSupp.pExprInfo[i], pBlock);
|
||||
pCtx[i].pSrcBlock = pBlock;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -799,7 +799,7 @@ static int32_t doCreateConstantValColumnAggInfo(SInputColumnInfoData* pInput, SF
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void setBlockStatisInfo(SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, SSDataBlock* pBlock) {
|
||||
void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, SSDataBlock* pBlock) {
|
||||
int32_t numOfRows = pBlock->info.rows;
|
||||
|
||||
SInputColumnInfoData* pInput = &pCtx->input;
|
||||
|
@ -1012,16 +1012,6 @@ static int32_t updateBlockLoadStatus(STaskAttr* pQuery, int32_t status) {
|
|||
// }
|
||||
//}
|
||||
|
||||
// static FORCE_INLINE bool doFilterByBlockStatistics(STaskRuntimeEnv* pRuntimeEnv, SDataStatis *pDataStatis,
|
||||
// SqlFunctionCtx *pCtx, int32_t numOfRows) {
|
||||
// STaskAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
|
||||
//
|
||||
// if (pDataStatis == NULL || pQueryAttr->pFilters == NULL) {
|
||||
// return true;
|
||||
// }
|
||||
//
|
||||
// return filterRangeExecute(pQueryAttr->pFilters, pDataStatis, pQueryAttr->numOfCols, numOfRows);
|
||||
// }
|
||||
#if 0
|
||||
static bool overlapWithTimeWindow(STaskAttr* pQueryAttr, SDataBlockInfo* pBlockInfo) {
|
||||
STimeWindow w = {0};
|
||||
|
@ -1216,7 +1206,7 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc
|
|||
}
|
||||
|
||||
// current block has been discard due to filter applied
|
||||
// if (!doFilterByBlockStatistics(pRuntimeEnv, pBlock->pBlockAgg, pTableScanInfo->pCtx, pBlockInfo->rows)) {
|
||||
// if (!doFilterByBlockSMA(pRuntimeEnv, pBlock->pBlockAgg, pTableScanInfo->pCtx, pBlockInfo->rows)) {
|
||||
// pCost->skipBlocks += 1;
|
||||
// qDebug("QInfo:0x%"PRIx64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey,
|
||||
// pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||
|
@ -1512,19 +1502,24 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf,
|
||||
SGroupResInfo* pGroupResInfo, const int32_t* rowCellOffset, SqlFunctionCtx* pCtx,
|
||||
int32_t numOfExprs) {
|
||||
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
|
||||
int32_t start = pGroupResInfo->index;
|
||||
int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
||||
SGroupResInfo* pGroupResInfo) {
|
||||
SExprInfo* pExprInfo = pSup->pExprInfo;
|
||||
int32_t numOfExprs = pSup->numOfExprs;
|
||||
int32_t* rowEntryOffset = pSup->rowEntryInfoOffset;
|
||||
SqlFunctionCtx* pCtx = pSup->pCtx;
|
||||
|
||||
for (int32_t i = start; i < numOfRows; i += 1) {
|
||||
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
|
||||
|
||||
for (int32_t i = pGroupResInfo->index; i < numOfRows; i += 1) {
|
||||
SResKeyPos* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
|
||||
SFilePage* page = getBufPage(pBuf, pPos->pos.pageId);
|
||||
|
||||
SResultRow* pRow = (SResultRow*)((char*)page + pPos->pos.offset);
|
||||
|
||||
doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowCellOffset);
|
||||
doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowEntryOffset);
|
||||
|
||||
// no results, continue to check the next one
|
||||
if (pRow->numOfRows == 0) {
|
||||
pGroupResInfo->index += 1;
|
||||
releaseBufPage(pBuf, page);
|
||||
|
@ -1542,6 +1537,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
|
|||
}
|
||||
|
||||
if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
|
||||
ASSERT(pBlock->info.rows > 0);
|
||||
releaseBufPage(pBuf, page);
|
||||
break;
|
||||
}
|
||||
|
@ -1551,7 +1547,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
|
|||
for (int32_t j = 0; j < numOfExprs; ++j) {
|
||||
int32_t slotId = pExprInfo[j].base.resSchema.slotId;
|
||||
|
||||
pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowCellOffset);
|
||||
pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowEntryOffset);
|
||||
if (pCtx[j].fpSet.finalize) {
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("\npage_finalize %d", numOfExprs);
|
||||
|
@ -1584,26 +1580,19 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
|
|||
|
||||
releaseBufPage(pBuf, page);
|
||||
pBlock->info.rows += pRow->numOfRows;
|
||||
// if (pBlock->info.rows >= pBlock->info.capacity) { // output buffer is full
|
||||
// break;
|
||||
// }
|
||||
}
|
||||
|
||||
qDebug("%s result generated, rows:%d, groupId:%" PRIu64, GET_TASKID(pTaskInfo), pBlock->info.rows,
|
||||
pBlock->info.groupId);
|
||||
|
||||
blockDataUpdateTsWindow(pBlock, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
|
||||
SDiskbasedBuf* pBuf) {
|
||||
SExprInfo* pExprInfo = pOperator->exprSupp.pExprInfo;
|
||||
int32_t numOfExprs = pOperator->exprSupp.numOfExprs;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
int32_t* rowCellOffset = pOperator->exprSupp.rowEntryInfoOffset;
|
||||
SSDataBlock* pBlock = pbInfo->pRes;
|
||||
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
|
||||
SSDataBlock* pBlock = pbInfo->pRes;
|
||||
|
||||
// set output datablock version
|
||||
pBlock->info.version = pTaskInfo->version;
|
||||
|
@ -1615,30 +1604,22 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG
|
|||
|
||||
// clear the existed group id
|
||||
pBlock->info.groupId = 0;
|
||||
doCopyToSDataBlock(pTaskInfo, pBlock, pExprInfo, pBuf, pGroupResInfo, rowCellOffset, pCtx, numOfExprs);
|
||||
}
|
||||
|
||||
static void updateNumOfRowsInResultRows(SqlFunctionCtx* pCtx, int32_t numOfOutput, SResultRowInfo* pResultRowInfo,
|
||||
int32_t* rowEntryInfoOffset) {
|
||||
// update the number of result for each, only update the number of rows for the corresponding window result.
|
||||
// if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) {
|
||||
// return;
|
||||
// }
|
||||
#if 0
|
||||
for (int32_t i = 0; i < pResultRowInfo->size; ++i) {
|
||||
SResultRow* pResult = pResultRowInfo->pResult[i];
|
||||
|
||||
for (int32_t j = 0; j < numOfOutput; ++j) {
|
||||
int32_t functionId = pCtx[j].functionId;
|
||||
if (functionId == FUNCTION_TS || functionId == FUNCTION_TAG || functionId == FUNCTION_TAGPRJ) {
|
||||
continue;
|
||||
if (!pbInfo->mergeResultBlock) {
|
||||
doCopyToSDataBlock(pTaskInfo, pBlock, &pOperator->exprSupp, pBuf, pGroupResInfo);
|
||||
} else {
|
||||
while(hasRemainResults(pGroupResInfo)) {
|
||||
doCopyToSDataBlock(pTaskInfo, pBlock, &pOperator->exprSupp, pBuf, pGroupResInfo);
|
||||
if (pBlock->info.rows >= pOperator->resultInfo.threshold) {
|
||||
break;
|
||||
}
|
||||
|
||||
SResultRowEntryInfo* pCell = getResultEntryInfo(pResult, j, rowEntryInfoOffset);
|
||||
pResult->numOfRows = (uint16_t)(TMAX(pResult->numOfRows, pCell->numOfRes));
|
||||
// clearing group id to continue to merge data that belong to different groups
|
||||
pBlock->info.groupId = 0;
|
||||
}
|
||||
|
||||
// clear the group id info in SSDataBlock, since the client does not need it
|
||||
pBlock->info.groupId = 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static int32_t compressQueryColData(SColumnInfoData* pColRes, int32_t numOfRows, char* data, int8_t compressed) {
|
||||
|
@ -1976,9 +1957,11 @@ int32_t loadRemoteDataCallback(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
pRsp->compLen = htonl(pRsp->compLen);
|
||||
pRsp->numOfCols = htonl(pRsp->numOfCols);
|
||||
pRsp->useconds = htobe64(pRsp->useconds);
|
||||
pRsp->numOfBlocks = htonl(pRsp->numOfBlocks);
|
||||
|
||||
ASSERT(pRsp != NULL);
|
||||
qDebug("%s fetch rsp received, index:%d, rows:%d", pSourceDataInfo->taskId, index, pRsp->numOfRows);
|
||||
qDebug("%s fetch rsp received, index:%d, blocks:%d, rows:%d", pSourceDataInfo->taskId, index, pRsp->numOfBlocks,
|
||||
pRsp->numOfRows);
|
||||
} else {
|
||||
pSourceDataInfo->code = code;
|
||||
qDebug("%s fetch rsp received, index:%d, error:%d", pSourceDataInfo->taskId, index, tstrerror(code));
|
||||
|
@ -2062,13 +2045,19 @@ static int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInf
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData,
|
||||
int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total,
|
||||
SArray* pColList) {
|
||||
void updateLoadRemoteInfo(SLoadRemoteDataInfo* pInfo, int32_t numOfRows, int32_t dataLen, int64_t startTs,
|
||||
SOperatorInfo* pOperator) {
|
||||
pInfo->totalRows += numOfRows;
|
||||
pInfo->totalSize += dataLen;
|
||||
pInfo->totalElapsed += (taosGetTimestampUs() - startTs);
|
||||
pOperator->resultInfo.totalRows += numOfRows;
|
||||
}
|
||||
|
||||
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, int32_t numOfOutput, SArray* pColList,
|
||||
char** pNextStart) {
|
||||
if (pColList == NULL) { // data from other sources
|
||||
blockDataCleanup(pRes);
|
||||
// blockDataEnsureCapacity(pRes, numOfRows);
|
||||
blockDecode(pRes, numOfOutput, numOfRows, pData);
|
||||
*pNextStart = (char*) blockDecode(pRes, pData);
|
||||
} else { // extract data according to pColList
|
||||
ASSERT(numOfOutput == taosArrayGetSize(pColList));
|
||||
char* pStart = pData;
|
||||
|
@ -2092,28 +2081,17 @@ int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLo
|
|||
blockDataAppendColInfo(pBlock, &idata);
|
||||
}
|
||||
|
||||
blockDecode(pBlock, numOfCols, numOfRows, pStart);
|
||||
blockDataEnsureCapacity(pRes, numOfRows);
|
||||
blockDecode(pBlock, pStart);
|
||||
blockDataEnsureCapacity(pRes, pBlock->info.rows);
|
||||
|
||||
// data from mnode
|
||||
pRes->info.rows = numOfRows;
|
||||
pRes->info.rows = pBlock->info.rows;
|
||||
relocateColumnData(pRes, pColList, pBlock->pDataBlock, false);
|
||||
blockDataDestroy(pBlock);
|
||||
}
|
||||
|
||||
// todo move this to time window aggregator, since the primary timestamp may not be known by exchange operator.
|
||||
blockDataUpdateTsWindow(pRes, 0);
|
||||
|
||||
int64_t el = taosGetTimestampUs() - startTs;
|
||||
|
||||
pLoadInfo->totalRows += numOfRows;
|
||||
pLoadInfo->totalSize += compLen;
|
||||
|
||||
if (total != NULL) {
|
||||
*total += numOfRows;
|
||||
}
|
||||
|
||||
pLoadInfo->totalElapsed += el;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -2135,8 +2113,8 @@ static void* setAllSourcesCompleted(SOperatorInfo* pOperator, int64_t startTs) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeInfo* pExchangeInfo,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
static void concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeInfo* pExchangeInfo,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
int32_t code = 0;
|
||||
int64_t startTs = taosGetTimestampUs();
|
||||
size_t totalSources = taosArrayGetSize(pExchangeInfo->pSources);
|
||||
|
@ -2162,7 +2140,6 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
|
|||
SRetrieveTableRsp* pRsp = pDataInfo->pRsp;
|
||||
SDownstreamSourceNode* pSource = taosArrayGet(pExchangeInfo->pSources, i);
|
||||
|
||||
SSDataBlock* pRes = pExchangeInfo->pResult;
|
||||
SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo;
|
||||
if (pRsp->numOfRows == 0) {
|
||||
qDebug("%s vgId:%d, taskId:0x%" PRIx64 " execId:%d index:%d completed, rowsOfSource:%" PRIu64
|
||||
|
@ -2175,29 +2152,35 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
|
|||
continue;
|
||||
}
|
||||
|
||||
SRetrieveTableRsp* pTableRsp = pDataInfo->pRsp;
|
||||
code =
|
||||
extractDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data,
|
||||
pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL);
|
||||
if (code != 0) {
|
||||
taosMemoryFreeClear(pDataInfo->pRsp);
|
||||
goto _error;
|
||||
SRetrieveTableRsp* pRetrieveRsp = pDataInfo->pRsp;
|
||||
int32_t index = 0;
|
||||
char* pStart = pRetrieveRsp->data;
|
||||
while(index++ < pRetrieveRsp->numOfBlocks) {
|
||||
SSDataBlock* pb = createOneDataBlock(pExchangeInfo->pDummyBlock, false);
|
||||
code = extractDataBlockFromFetchRsp(pb, pStart, pRetrieveRsp->numOfCols, NULL, &pStart);
|
||||
if (code != 0) {
|
||||
taosMemoryFreeClear(pDataInfo->pRsp);
|
||||
goto _error;
|
||||
}
|
||||
|
||||
taosArrayPush(pExchangeInfo->pResultBlockList, &pb);
|
||||
}
|
||||
|
||||
updateLoadRemoteInfo(pLoadInfo, pRetrieveRsp->numOfRows, pRetrieveRsp->compLen, startTs, pOperator);
|
||||
|
||||
if (pRsp->completed == 1) {
|
||||
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64
|
||||
" execId:%d"
|
||||
" index:%d completed, numOfRows:%d, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 ", totalBytes:%" PRIu64
|
||||
", completed:%d try next %d/%" PRIzu,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pRes->info.rows,
|
||||
pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize, completed + 1, i + 1, totalSources);
|
||||
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d"
|
||||
" index:%d completed, blocks:%d, numOfRows:%d, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 ", total:%.2f Kb,"
|
||||
" completed:%d try next %d/%" PRIzu,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pRsp->numOfBlocks,
|
||||
pRsp->numOfRows, pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize / 1024.0,
|
||||
completed + 1, i + 1, totalSources);
|
||||
completed += 1;
|
||||
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
|
||||
} else {
|
||||
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, totalRows:%" PRIu64
|
||||
", totalBytes:%" PRIu64,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows,
|
||||
pLoadInfo->totalRows, pLoadInfo->totalSize);
|
||||
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d blocks:%d, numOfRows:%d, totalRows:%" PRIu64
|
||||
", total:%.2f Kb", GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId,
|
||||
pRsp->numOfBlocks, pRsp->numOfRows, pLoadInfo->totalRows, pLoadInfo->totalSize/1024.0);
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(pDataInfo->pRsp);
|
||||
|
@ -2211,11 +2194,12 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
|
|||
}
|
||||
}
|
||||
|
||||
return pExchangeInfo->pResult;
|
||||
return;
|
||||
}
|
||||
|
||||
if (completed == totalSources) {
|
||||
return setAllSourcesCompleted(pOperator, startTs);
|
||||
setAllSourcesCompleted(pOperator, startTs);
|
||||
return;
|
||||
}
|
||||
|
||||
sched_yield();
|
||||
|
@ -2223,7 +2207,6 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
|
|||
|
||||
_error:
|
||||
pTaskInfo->code = code;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int32_t prepareConcurrentlyLoad(SOperatorInfo* pOperator) {
|
||||
|
@ -2253,7 +2236,7 @@ static int32_t prepareConcurrentlyLoad(SOperatorInfo* pOperator) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
|
||||
static int32_t seqLoadRemoteData(SOperatorInfo* pOperator) {
|
||||
SExchangeInfo* pExchangeInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
|
@ -2262,7 +2245,8 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
|
|||
|
||||
while (1) {
|
||||
if (pExchangeInfo->current >= totalSources) {
|
||||
return setAllSourcesCompleted(pOperator, startTs);
|
||||
setAllSourcesCompleted(pOperator, startTs);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
doSendFetchDataRequest(pExchangeInfo, pTaskInfo, pExchangeInfo->current);
|
||||
|
@ -2275,7 +2259,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
|
|||
qError("%s vgId:%d, taskID:0x%" PRIx64 " execId:%d error happens, code:%s", GET_TASKID(pTaskInfo),
|
||||
pSource->addr.nodeId, pSource->taskId, pSource->execId, tstrerror(pDataInfo->code));
|
||||
pOperator->pTaskInfo->code = pDataInfo->code;
|
||||
return NULL;
|
||||
return pOperator->pTaskInfo->code;
|
||||
}
|
||||
|
||||
SRetrieveTableRsp* pRsp = pDataInfo->pRsp;
|
||||
|
@ -2292,16 +2276,15 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
|
|||
continue;
|
||||
}
|
||||
|
||||
SSDataBlock* pRes = pExchangeInfo->pResult;
|
||||
SRetrieveTableRsp* pTableRsp = pDataInfo->pRsp;
|
||||
int32_t code =
|
||||
extractDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data,
|
||||
pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL);
|
||||
SRetrieveTableRsp* pRetrieveRsp = pDataInfo->pRsp;
|
||||
|
||||
char* pStart = pRetrieveRsp->data;
|
||||
int32_t code = extractDataBlockFromFetchRsp(NULL, pStart, pRetrieveRsp->numOfCols, NULL, &pStart);
|
||||
|
||||
if (pRsp->completed == 1) {
|
||||
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, rowsOfSource:%" PRIu64
|
||||
", totalRows:%" PRIu64 ", totalBytes:%" PRIu64 " try next %d/%" PRIzu,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRetrieveRsp->numOfRows,
|
||||
pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize, pExchangeInfo->current + 1,
|
||||
totalSources);
|
||||
|
||||
|
@ -2310,13 +2293,15 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
|
|||
} else {
|
||||
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, totalRows:%" PRIu64
|
||||
", totalBytes:%" PRIu64,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRetrieveRsp->numOfRows,
|
||||
pLoadInfo->totalRows, pLoadInfo->totalSize);
|
||||
}
|
||||
|
||||
pOperator->resultInfo.totalRows += pRes->info.rows;
|
||||
updateLoadRemoteInfo(pLoadInfo, pRetrieveRsp->numOfRows, pRetrieveRsp->compLen, startTs, pOperator);
|
||||
pDataInfo->totalRows += pRetrieveRsp->numOfRows;
|
||||
|
||||
taosMemoryFreeClear(pDataInfo->pRsp);
|
||||
return pExchangeInfo->pResult;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2340,6 +2325,11 @@ static int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void freeBlock(void* pParam) {
|
||||
SSDataBlock* pBlock = *(SSDataBlock**)pParam;
|
||||
blockDataDestroy(pBlock);
|
||||
}
|
||||
|
||||
static SSDataBlock* doLoadRemoteDataImpl(SOperatorInfo* pOperator) {
|
||||
SExchangeInfo* pExchangeInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
@ -2349,9 +2339,9 @@ static SSDataBlock* doLoadRemoteDataImpl(SOperatorInfo* pOperator) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
size_t totalSources = taosArrayGetSize(pExchangeInfo->pSources);
|
||||
SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo;
|
||||
size_t totalSources = taosArrayGetSize(pExchangeInfo->pSources);
|
||||
|
||||
SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo;
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
qDebug("%s all %" PRIzu " source(s) are exhausted, total rows:%" PRIu64 " bytes:%" PRIu64 ", elapsed:%.2f ms",
|
||||
GET_TASKID(pTaskInfo), totalSources, pLoadInfo->totalRows, pLoadInfo->totalSize,
|
||||
|
@ -2359,11 +2349,23 @@ static SSDataBlock* doLoadRemoteDataImpl(SOperatorInfo* pOperator) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (pExchangeInfo->seqLoadData) {
|
||||
return seqLoadRemoteData(pOperator);
|
||||
} else {
|
||||
return concurrentlyLoadRemoteDataImpl(pOperator, pExchangeInfo, pTaskInfo);
|
||||
size_t size = taosArrayGetSize(pExchangeInfo->pResultBlockList);
|
||||
if (size == 0 || pExchangeInfo->rspBlockIndex >= size) {
|
||||
pExchangeInfo->rspBlockIndex = 0;
|
||||
taosArrayClearEx(pExchangeInfo->pResultBlockList, freeBlock);
|
||||
if (pExchangeInfo->seqLoadData) {
|
||||
seqLoadRemoteData(pOperator);
|
||||
} else {
|
||||
concurrentlyLoadRemoteDataImpl(pOperator, pExchangeInfo, pTaskInfo);
|
||||
}
|
||||
|
||||
if (taosArrayGetSize(pExchangeInfo->pResultBlockList) == 0) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// we have buffered retrieved datablock, return it directly
|
||||
return taosArrayGetP(pExchangeInfo->pResultBlockList, pExchangeInfo->rspBlockIndex++);
|
||||
}
|
||||
|
||||
static SSDataBlock* doLoadRemoteData(SOperatorInfo* pOperator) {
|
||||
|
@ -2380,26 +2382,24 @@ static SSDataBlock* doLoadRemoteData(SOperatorInfo* pOperator) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
ASSERT(pBlock == pExchangeInfo->pResult);
|
||||
|
||||
SLimitInfo* pLimitInfo = &pExchangeInfo->limitInfo;
|
||||
if (hasLimitOffsetInfo(pLimitInfo)) {
|
||||
int32_t status = handleLimitOffset(pOperator, pLimitInfo, pExchangeInfo->pResult, false);
|
||||
int32_t status = handleLimitOffset(pOperator, pLimitInfo, pBlock, false);
|
||||
if (status == PROJECT_RETRIEVE_CONTINUE) {
|
||||
continue;
|
||||
} else if (status == PROJECT_RETRIEVE_DONE) {
|
||||
size_t rows = pExchangeInfo->pResult->info.rows;
|
||||
size_t rows = pBlock->info.rows;
|
||||
pExchangeInfo->limitInfo.numOfOutputRows += rows;
|
||||
|
||||
if (rows == 0) {
|
||||
doSetOperatorCompleted(pOperator);
|
||||
return NULL;
|
||||
} else {
|
||||
return pExchangeInfo->pResult;
|
||||
return pBlock;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return pExchangeInfo->pResult;
|
||||
return pBlock;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2462,16 +2462,18 @@ SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode
|
|||
}
|
||||
|
||||
tsem_init(&pInfo->ready, 0, 0);
|
||||
pInfo->pDummyBlock = createResDataBlock(pExNode->node.pOutputDataBlockDesc);
|
||||
pInfo->pResultBlockList = taosArrayInit(1, POINTER_BYTES);
|
||||
|
||||
pInfo->seqLoadData = false;
|
||||
pInfo->pTransporter = pTransporter;
|
||||
pInfo->pResult = createResDataBlock(pExNode->node.pOutputDataBlockDesc);
|
||||
|
||||
pOperator->name = "ExchangeOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_EXCHANGE;
|
||||
pOperator->blocking = false;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pResult->pDataBlock);
|
||||
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pDummyBlock->pDataBlock);
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
|
||||
pOperator->fpSet = createOperatorFpSet(prepareLoadRemoteData, doLoadRemoteData, NULL, NULL,
|
||||
|
@ -2990,6 +2992,7 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
size_t rows = blockDataGetNumOfRows(pInfo->pRes);
|
||||
pOperator->resultInfo.totalRows += rows;
|
||||
|
||||
|
@ -3413,6 +3416,7 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
|
|||
}
|
||||
int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("Create agg result buf failed since %s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -3432,7 +3436,11 @@ int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInf
|
|||
return code;
|
||||
}
|
||||
|
||||
doInitAggInfoSup(pAggSup, pSup->pCtx, numOfCols, keyBufSize, pkey);
|
||||
code = doInitAggInfoSup(pAggSup, pSup->pCtx, numOfCols, keyBufSize, pkey);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
pSup->pCtx[i].pBuf = pAggSup->pResultBuf;
|
||||
}
|
||||
|
@ -3498,17 +3506,16 @@ void cleanupExprSupp(SExprSupp* pSupp) {
|
|||
|
||||
SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
|
||||
SSDataBlock* pResultBlock, SNode* pCondition, SExprInfo* pScalarExprInfo,
|
||||
int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo) {
|
||||
int32_t numOfScalarExpr, bool mergeResult, SExecTaskInfo* pTaskInfo) {
|
||||
SAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SAggOperatorInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
if (pInfo == NULL || pOperator == NULL) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
int32_t numOfRows = 1024;
|
||||
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
|
||||
|
||||
initResultSizeInfo(&pOperator->resultInfo, numOfRows);
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
|
@ -3520,6 +3527,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo*
|
|||
goto _error;
|
||||
}
|
||||
|
||||
pInfo->binfo.mergeResultBlock = mergeResult;
|
||||
pInfo->groupId = UINT64_MAX;
|
||||
pInfo->pCondition = pCondition;
|
||||
pOperator->name = "TableAggregate";
|
||||
|
@ -3601,12 +3609,15 @@ void doDestroyExchangeOperatorInfo(void* param) {
|
|||
|
||||
taosArrayDestroy(pExInfo->pSources);
|
||||
taosArrayDestroy(pExInfo->pSourceDataInfo);
|
||||
if (pExInfo->pResult != NULL) {
|
||||
pExInfo->pResult = blockDataDestroy(pExInfo->pResult);
|
||||
|
||||
if (pExInfo->pResultBlockList != NULL) {
|
||||
taosArrayDestroyEx(pExInfo->pResultBlockList, freeBlock);
|
||||
pExInfo->pResultBlockList = NULL;
|
||||
}
|
||||
|
||||
tsem_destroy(&pExInfo->ready);
|
||||
blockDataDestroy(pExInfo->pDummyBlock);
|
||||
|
||||
tsem_destroy(&pExInfo->ready);
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
@ -4071,7 +4082,11 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
pOperator->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
|
||||
|
||||
if (pOperator != NULL) {
|
||||
pOperator->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
|
||||
}
|
||||
|
||||
return pOperator;
|
||||
}
|
||||
|
||||
|
@ -4110,7 +4125,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
pScalarExprInfo, numOfScalarExpr, pTaskInfo);
|
||||
} else {
|
||||
pOptr = createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pAggNode->node.pConditions,
|
||||
pScalarExprInfo, numOfScalarExpr, pTaskInfo);
|
||||
pScalarExprInfo, numOfScalarExpr, pAggNode->mergeDataBlock, pTaskInfo);
|
||||
}
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL == type || QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) {
|
||||
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
|
||||
|
@ -4152,7 +4167,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
|
||||
int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
|
||||
pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId,
|
||||
pPhyNode->pConditions, pTaskInfo);
|
||||
pPhyNode->pConditions, pIntervalPhyNode->window.mergeDataBlock, pTaskInfo);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL == type) {
|
||||
SMergeIntervalPhysiNode* pIntervalPhyNode = (SMergeIntervalPhysiNode*)pPhyNode;
|
||||
|
||||
|
@ -4167,7 +4182,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
.precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
|
||||
|
||||
int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
|
||||
pOptr = createMergeIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, pTaskInfo);
|
||||
pOptr = createMergeIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, pIntervalPhyNode->window.mergeDataBlock, pTaskInfo);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL == type) {
|
||||
int32_t children = 0;
|
||||
pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
|
||||
|
|
|
@ -727,7 +727,7 @@ static void destroyPartitionOperatorInfo(void* param, int32_t numOfOutput) {
|
|||
taosMemoryFree(pInfo->columnOffset);
|
||||
|
||||
cleanupExprSupp(&pInfo->scalarSup);
|
||||
|
||||
destroyDiskbasedBuf(pInfo->pBuf);
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
|
|
@ -132,6 +132,8 @@ void destroyMergeJoinOperator(void* param, int32_t numOfOutput) {
|
|||
SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param;
|
||||
nodesDestroyNode(pJoinOperator->pCondAfterMerge);
|
||||
|
||||
pJoinOperator->pRes = blockDataDestroy(pJoinOperator->pRes);
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
|
|
@ -13,10 +13,11 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "os.h"
|
||||
#include "executorimpl.h"
|
||||
#include "filter.h"
|
||||
#include "function.h"
|
||||
#include "functionMgt.h"
|
||||
#include "os.h"
|
||||
#include "querynodes.h"
|
||||
#include "systable.h"
|
||||
#include "tname.h"
|
||||
|
@ -227,6 +228,57 @@ static int32_t doDynamicPruneDataBlock(SOperatorInfo* pOperator, SDataBlockInfo*
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static FORCE_INLINE bool doFilterByBlockSMA(const SNode* pFilterNode, SColumnDataAgg** pColsAgg, int32_t numOfCols,
|
||||
int32_t numOfRows) {
|
||||
if (pColsAgg == NULL || pFilterNode == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
SFilterInfo* filter = NULL;
|
||||
|
||||
// todo move to the initialization function
|
||||
int32_t code = filterInitFromNode((SNode*)pFilterNode, &filter, 0);
|
||||
bool keep = filterRangeExecute(filter, pColsAgg, numOfCols, numOfRows);
|
||||
|
||||
filterFreeInfo(filter);
|
||||
return keep;
|
||||
}
|
||||
|
||||
static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) {
|
||||
bool allColumnsHaveAgg = true;
|
||||
SColumnDataAgg** pColAgg = NULL;
|
||||
|
||||
int32_t code = tsdbRetrieveDatablockSMA(pTableScanInfo->dataReader, &pColAgg, &allColumnsHaveAgg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
longjmp(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
if (!allColumnsHaveAgg) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// if (allColumnsHaveAgg == true) {
|
||||
int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
|
||||
// todo create this buffer during creating operator
|
||||
if (pBlock->pBlockAgg == NULL) {
|
||||
pBlock->pBlockAgg = taosMemoryCalloc(numOfCols, POINTER_BYTES);
|
||||
if (pBlock->pBlockAgg == NULL) {
|
||||
longjmp(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pTableScanInfo->pColMatchInfo); ++i) {
|
||||
SColMatchInfo* pColMatchInfo = taosArrayGet(pTableScanInfo->pColMatchInfo, i);
|
||||
if (!pColMatchInfo->output) {
|
||||
continue;
|
||||
}
|
||||
pBlock->pBlockAgg[pColMatchInfo->targetSlotId] = pColAgg[i];
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
|
||||
uint32_t* status) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
@ -236,6 +288,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
|
|||
|
||||
pCost->totalBlocks += 1;
|
||||
pCost->totalRows += pBlock->info.rows;
|
||||
bool loadSMA = false;
|
||||
|
||||
*status = pInfo->dataBlockLoadFlag;
|
||||
if (pTableScanInfo->pFilterNode != NULL ||
|
||||
|
@ -259,60 +312,50 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
|
|||
return TSDB_CODE_SUCCESS;
|
||||
} else if (*status == FUNC_DATA_REQUIRED_STATIS_LOAD) {
|
||||
pCost->loadBlockStatis += 1;
|
||||
|
||||
bool allColumnsHaveAgg = true;
|
||||
SColumnDataAgg** pColAgg = NULL;
|
||||
|
||||
int32_t code = tsdbRetrieveDatablockSMA(pTableScanInfo->dataReader, &pColAgg, &allColumnsHaveAgg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
longjmp(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
if (allColumnsHaveAgg == true) {
|
||||
int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
|
||||
// todo create this buffer during creating operator
|
||||
if (pBlock->pBlockAgg == NULL) {
|
||||
pBlock->pBlockAgg = taosMemoryCalloc(numOfCols, POINTER_BYTES);
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pTableScanInfo->pColMatchInfo); ++i) {
|
||||
SColMatchInfo* pColMatchInfo = taosArrayGet(pTableScanInfo->pColMatchInfo, i);
|
||||
if (!pColMatchInfo->output) {
|
||||
continue;
|
||||
}
|
||||
pBlock->pBlockAgg[pColMatchInfo->targetSlotId] = pColAgg[i];
|
||||
}
|
||||
|
||||
loadSMA = true; // mark the operation of load sma;
|
||||
bool success = doLoadBlockSMA(pTableScanInfo, pBlock, pTaskInfo);
|
||||
if (success) { // failed to load the block sma data, data block statistics does not exist, load data block instead
|
||||
qDebug("%s data block SMA loaded, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
|
||||
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else { // failed to load the block sma data, data block statistics does not exist, load data block instead
|
||||
} else {
|
||||
qDebug("%s failed to load SMA, since not all columns have SMA", GET_TASKID(pTaskInfo));
|
||||
*status = FUNC_DATA_REQUIRED_DATA_LOAD;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(*status == FUNC_DATA_REQUIRED_DATA_LOAD);
|
||||
|
||||
// todo filter data block according to the block sma data firstly
|
||||
// try to filter data block according to sma info
|
||||
if (pTableScanInfo->pFilterNode != NULL && (!loadSMA)) {
|
||||
bool success = doLoadBlockSMA(pTableScanInfo, pBlock, pTaskInfo);
|
||||
if (success) {
|
||||
size_t size = taosArrayGetSize(pBlock->pDataBlock);
|
||||
bool keep = doFilterByBlockSMA(pTableScanInfo->pFilterNode, pBlock->pBlockAgg, size, pBlockInfo->rows);
|
||||
if (!keep) {
|
||||
qDebug("%s data block filter out by block SMA, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
|
||||
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||
pCost->filterOutBlocks += 1;
|
||||
(*status) = FUNC_DATA_REQUIRED_FILTEROUT;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// free the sma info, since it should not be involved in later computing process.
|
||||
taosMemoryFreeClear(pBlock->pBlockAgg);
|
||||
|
||||
// try to filter data block according to current results
|
||||
doDynamicPruneDataBlock(pOperator, pBlockInfo, status);
|
||||
if (*status == FUNC_DATA_REQUIRED_NOT_LOAD) {
|
||||
qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
|
||||
qDebug("%s data block skipped due to dynamic prune, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
|
||||
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||
pCost->skipBlocks += 1;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
#if 0
|
||||
if (!doFilterByBlockStatistics(pBlock->pBlockStatis, pTableScanInfo->pCtx, pBlockInfo->rows)) {
|
||||
pCost->filterOutBlocks += 1;
|
||||
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo), pBlockInfo->window.skey,
|
||||
pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||
(*status) = FUNC_DATA_REQUIRED_FILTEROUT;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
pCost->totalCheckedRows += pBlock->info.rows;
|
||||
pCost->loadBlocks += 1;
|
||||
|
||||
|
@ -470,6 +513,11 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
|
|||
pBlock->info = binfo;
|
||||
ASSERT(binfo.uid != 0);
|
||||
|
||||
uint64_t* groupId = taosHashGet(pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
|
||||
if (groupId) {
|
||||
pBlock->info.groupId = *groupId;
|
||||
}
|
||||
|
||||
uint32_t status = 0;
|
||||
int32_t code = loadDataBlock(pOperator, pTableScanInfo, pBlock, &status);
|
||||
// int32_t code = loadDataBlockOnDemand(pOperator->pRuntimeEnv, pTableScanInfo, pBlock, &status);
|
||||
|
@ -482,11 +530,6 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
|
|||
continue;
|
||||
}
|
||||
|
||||
uint64_t* groupId = taosHashGet(pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
|
||||
if (groupId) {
|
||||
pBlock->info.groupId = *groupId;
|
||||
}
|
||||
|
||||
pOperator->resultInfo.totalRows = pTableScanInfo->readRecorder.totalRows;
|
||||
pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
|
||||
|
||||
|
@ -527,7 +570,7 @@ static SSDataBlock* doTableScanGroup(SOperatorInfo* pOperator) {
|
|||
if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) {
|
||||
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
|
||||
pTableScanInfo->scanFlag = REPEAT_SCAN;
|
||||
qDebug("%s start to repeat ascending order scan data blocks due to query func required", GET_TASKID(pTaskInfo));
|
||||
qDebug("%s start to repeat ascending order scan data SELECT last_row(*),hostname from cpu group by hostname;blocks due to query func required", GET_TASKID(pTaskInfo));
|
||||
|
||||
// do prepare for the next round table scan operation
|
||||
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
|
||||
|
@ -539,10 +582,9 @@ static SSDataBlock* doTableScanGroup(SOperatorInfo* pOperator) {
|
|||
if (pTableScanInfo->cond.order == TSDB_ORDER_ASC) {
|
||||
prepareForDescendingScan(pTableScanInfo, pOperator->exprSupp.pCtx, 0);
|
||||
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
|
||||
qDebug("%s start to descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo));
|
||||
}
|
||||
|
||||
qDebug("%s start to descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo));
|
||||
|
||||
while (pTableScanInfo->scanTimes < total) {
|
||||
SSDataBlock* p = doTableScanImpl(pOperator);
|
||||
if (p != NULL) {
|
||||
|
@ -682,9 +724,6 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
|
|||
}
|
||||
|
||||
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
|
||||
// pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose
|
||||
// pInfo->cond.order = TSDB_ORDER_DESC;
|
||||
|
||||
pInfo->pdInfo.interval = extractIntervalInfo(pTableScanNode);
|
||||
pInfo->readHandle = *readHandle;
|
||||
pInfo->sample.sampleRatio = pTableScanNode->ratio;
|
||||
|
@ -2276,8 +2315,9 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
|
|||
}
|
||||
}
|
||||
|
||||
extractDataBlockFromFetchRsp(pInfo->pRes, &pInfo->loadInfo, pRsp->numOfRows, pRsp->data, pRsp->compLen,
|
||||
pOperator->exprSupp.numOfExprs, startTs, NULL, pInfo->scanCols);
|
||||
char* pStart = pRsp->data;
|
||||
extractDataBlockFromFetchRsp(pInfo->pRes, pRsp->data, pOperator->exprSupp.numOfExprs, pInfo->scanCols, &pStart);
|
||||
updateLoadRemoteInfo(&pInfo->loadInfo, pRsp->numOfRows, pRsp->compLen, startTs, pOperator);
|
||||
|
||||
// todo log the filter info
|
||||
doFilterResult(pInfo);
|
||||
|
@ -2418,81 +2458,6 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
|
|||
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
#if 0
|
||||
int32_t maxNumOfTables = (int32_t)pResultInfo->capacity;
|
||||
|
||||
STagScanInfo *pInfo = pOperator->info;
|
||||
SSDataBlock *pRes = pInfo->pRes;
|
||||
|
||||
int32_t count = 0;
|
||||
SArray* pa = GET_TABLEGROUP(pRuntimeEnv, 0);
|
||||
|
||||
int32_t functionId = getExprFunctionId(&pOperator->exprSupp.pExprInfo[0]);
|
||||
if (functionId == FUNCTION_TID_TAG) { // return the tags & table Id
|
||||
assert(pQueryAttr->numOfOutput == 1);
|
||||
|
||||
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0];
|
||||
int32_t rsize = pExprInfo->base.resSchema.bytes;
|
||||
|
||||
count = 0;
|
||||
|
||||
int16_t bytes = pExprInfo->base.resSchema.bytes;
|
||||
int16_t type = pExprInfo->base.resSchema.type;
|
||||
|
||||
for(int32_t i = 0; i < pQueryAttr->numOfTags; ++i) {
|
||||
if (pQueryAttr->tagColList[i].colId == pExprInfo->base.pColumns->info.colId) {
|
||||
bytes = pQueryAttr->tagColList[i].bytes;
|
||||
type = pQueryAttr->tagColList[i].type;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
SColumnInfoData* pColInfo = taosArrayGet(pRes->pDataBlock, 0);
|
||||
|
||||
while(pInfo->curPos < pInfo->totalTables && count < maxNumOfTables) {
|
||||
int32_t i = pInfo->curPos++;
|
||||
STableQueryInfo *item = taosArrayGetP(pa, i);
|
||||
|
||||
char *output = pColInfo->pData + count * rsize;
|
||||
varDataSetLen(output, rsize - VARSTR_HEADER_SIZE);
|
||||
|
||||
output = varDataVal(output);
|
||||
STableId* id = TSDB_TABLEID(item->pTable);
|
||||
|
||||
*(int16_t *)output = 0;
|
||||
output += sizeof(int16_t);
|
||||
|
||||
*(int64_t *)output = id->uid; // memory align problem, todo serialize
|
||||
output += sizeof(id->uid);
|
||||
|
||||
*(int32_t *)output = id->tid;
|
||||
output += sizeof(id->tid);
|
||||
|
||||
*(int32_t *)output = pQueryAttr->vgId;
|
||||
output += sizeof(pQueryAttr->vgId);
|
||||
|
||||
char* data = NULL;
|
||||
if (pExprInfo->base.pColumns->info.colId == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
data = tsdbGetTableName(item->pTable);
|
||||
} else {
|
||||
data = tsdbGetTableTagVal(item->pTable, pExprInfo->base.pColumns->info.colId, type, bytes);
|
||||
}
|
||||
|
||||
doSetTagValueToResultBuf(output, data, type, bytes);
|
||||
count += 1;
|
||||
}
|
||||
|
||||
//qDebug("QInfo:0x%"PRIx64" create (tableId, tag) info completed, rows:%d", GET_TASKID(pRuntimeEnv), count);
|
||||
} else if (functionId == FUNCTION_COUNT) {// handle the "count(tbname)" query
|
||||
SColumnInfoData* pColInfo = taosArrayGet(pRes->pDataBlock, 0);
|
||||
*(int64_t*)pColInfo->pData = pInfo->totalTables;
|
||||
count = 1;
|
||||
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
//qDebug("QInfo:0x%"PRIx64" create count(tbname) query, res:%d rows:1", GET_TASKID(pRuntimeEnv), count);
|
||||
} else { // return only the tags|table name etc.
|
||||
#endif
|
||||
|
||||
STagScanInfo* pInfo = pOperator->info;
|
||||
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0];
|
||||
SSDataBlock* pRes = pInfo->pRes;
|
||||
|
@ -2570,6 +2535,8 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
|
|||
STagScanInfo* pInfo = (STagScanInfo*)param;
|
||||
pInfo->pRes = blockDataDestroy(pInfo->pRes);
|
||||
|
||||
taosArrayDestroy(pInfo->pColMatchInfo);
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
@ -2625,11 +2592,17 @@ _error:
|
|||
int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
|
||||
STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond,
|
||||
const char* idStr) {
|
||||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
int64_t st1 = taosGetTimestampUs();
|
||||
qDebug("generate queried table list completed, elapsed time:%.2f ms %s", (st1-st)/1000.0, idStr);
|
||||
|
||||
if (taosArrayGetSize(pTableListInfo->pTableList) == 0) {
|
||||
qDebug("no table qualified for query, %s" PRIx64, idStr);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -2641,6 +2614,9 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
|
|||
return code;
|
||||
}
|
||||
|
||||
int64_t st2 = taosGetTimestampUs();
|
||||
qDebug("generate group id map completed, elapsed time:%.2f ms %s", (st2-st1)/1000.0, idStr);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -2731,7 +2707,7 @@ static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeSc
|
|||
|
||||
// todo filter data block according to the block sma data firstly
|
||||
#if 0
|
||||
if (!doFilterByBlockStatistics(pBlock->pBlockStatis, pTableScanInfo->pCtx, pBlockInfo->rows)) {
|
||||
if (!doFilterByBlockSMA(pBlock->pBlockStatis, pTableScanInfo->pCtx, pBlockInfo->rows)) {
|
||||
pCost->filterOutBlocks += 1;
|
||||
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo), pBlockInfo->window.skey,
|
||||
pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||
|
|
|
@ -658,7 +658,7 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
|
|||
|
||||
blockDataDestroy(p);
|
||||
|
||||
qDebug("%s get sorted row blocks, rows:%d", GET_TASKID(pTaskInfo), pDataBlock->info.rows);
|
||||
qDebug("%s get sorted block, groupId:%0x"PRIx64" rows:%d", GET_TASKID(pTaskInfo), pDataBlock->info.groupId, pDataBlock->info.rows);
|
||||
return (pDataBlock->info.rows > 0) ? pDataBlock : NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -1695,7 +1695,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) {
|
|||
|
||||
static bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) {
|
||||
for (int32_t i = 0; i < numOfCols; i++) {
|
||||
if (!fmIsInvertible(pFCtx[i].functionId)) {
|
||||
if (fmIsUserDefinedFunc(pFCtx[i].functionId) || !fmIsInvertible(pFCtx[i].functionId)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -1781,6 +1781,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
|
|||
pInfo->twAggSup = *pTwAggSupp;
|
||||
pInfo->ignoreExpiredData = pPhyNode->window.igExpired;
|
||||
pInfo->pCondition = pPhyNode->window.node.pConditions;
|
||||
pInfo->binfo.mergeResultBlock = pPhyNode->window.mergeDataBlock;
|
||||
|
||||
if (pPhyNode->window.pExprs != NULL) {
|
||||
int32_t numOfScalar = 0;
|
||||
|
@ -2087,8 +2088,10 @@ static void doKeepNextRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock
|
|||
pSliceInfo->isNextRowSet = true;
|
||||
}
|
||||
|
||||
static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock* pBlock, int32_t rowIndex) {
|
||||
static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock* pBlock, int32_t rowIndex,
|
||||
bool isLastRow) {
|
||||
int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
bool fillLastPoint = pSliceInfo->fillLastPoint;
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, pSliceInfo->tsCol.slotId);
|
||||
|
@ -2096,16 +2099,22 @@ static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlo
|
|||
|
||||
// null data should not be kept since it can not be used to perform interpolation
|
||||
if (!colDataIsNull_s(pColInfoData, i)) {
|
||||
int64_t startKey = *(int64_t*)colDataGetData(pTsCol, rowIndex);
|
||||
int64_t endKey = *(int64_t*)colDataGetData(pTsCol, rowIndex + 1);
|
||||
pLinearInfo->start.key = startKey;
|
||||
pLinearInfo->end.key = endKey;
|
||||
if (isLastRow) {
|
||||
pLinearInfo->start.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
|
||||
memcpy(pLinearInfo->start.val, colDataGetData(pColInfoData, rowIndex), pLinearInfo->bytes);
|
||||
} else if (fillLastPoint) {
|
||||
pLinearInfo->end.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
|
||||
memcpy(pLinearInfo->end.val, colDataGetData(pColInfoData, rowIndex), pLinearInfo->bytes);
|
||||
} else {
|
||||
pLinearInfo->start.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
|
||||
pLinearInfo->end.key = *(int64_t*)colDataGetData(pTsCol, rowIndex + 1);
|
||||
|
||||
char* val;
|
||||
val = colDataGetData(pColInfoData, rowIndex);
|
||||
memcpy(pLinearInfo->start.val, val, pLinearInfo->bytes);
|
||||
val = colDataGetData(pColInfoData, rowIndex + 1);
|
||||
memcpy(pLinearInfo->end.val, val, pLinearInfo->bytes);
|
||||
char* val;
|
||||
val = colDataGetData(pColInfoData, rowIndex);
|
||||
memcpy(pLinearInfo->start.val, val, pLinearInfo->bytes);
|
||||
val = colDataGetData(pColInfoData, rowIndex + 1);
|
||||
memcpy(pLinearInfo->end.val, val, pLinearInfo->bytes);
|
||||
}
|
||||
|
||||
pLinearInfo->hasNull = false;
|
||||
} else {
|
||||
|
@ -2113,9 +2122,11 @@ static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlo
|
|||
}
|
||||
}
|
||||
|
||||
pSliceInfo->fillLastPoint = isLastRow ? true : false;
|
||||
|
||||
}
|
||||
|
||||
static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pBlock,
|
||||
static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup,
|
||||
SSDataBlock* pResBlock) {
|
||||
int32_t rows = pResBlock->info.rows;
|
||||
|
||||
|
@ -2124,10 +2135,10 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
|||
// output the result
|
||||
for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
|
||||
SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
|
||||
int32_t dstSlot = pExprInfo->base.resSchema.slotId;
|
||||
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
|
||||
int32_t dstSlot = pExprInfo->base.resSchema.slotId;
|
||||
|
||||
SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
|
||||
//SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
|
||||
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
|
||||
|
||||
switch (pSliceInfo->fillType) {
|
||||
|
@ -2269,7 +2280,7 @@ static int32_t initFillLinearInfo(STimeSliceOperatorInfo* pInfo, SSDataBlock* pB
|
|||
}
|
||||
|
||||
pInfo->pLinearInfo = taosArrayInit(4, sizeof(SFillLinearInfo));
|
||||
if (pInfo->pNextRow == NULL) {
|
||||
if (pInfo->pLinearInfo == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -2283,15 +2294,20 @@ static int32_t initFillLinearInfo(STimeSliceOperatorInfo* pInfo, SSDataBlock* pB
|
|||
linearInfo.start.val = taosMemoryCalloc(1, pColInfo->info.bytes);
|
||||
linearInfo.end.val = taosMemoryCalloc(1, pColInfo->info.bytes);
|
||||
linearInfo.hasNull = false;
|
||||
linearInfo.fillLastPoint = false;
|
||||
linearInfo.type = pColInfo->info.type;
|
||||
linearInfo.bytes = pColInfo->info.bytes;
|
||||
taosArrayPush(pInfo->pLinearInfo, &linearInfo);
|
||||
}
|
||||
|
||||
pInfo->fillLastPoint = false;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static bool needToFillLastPoint(STimeSliceOperatorInfo* pSliceInfo) {
|
||||
return (pSliceInfo->fillLastPoint == true && pSliceInfo->fillType == TSDB_FILL_LINEAR);
|
||||
}
|
||||
|
||||
static int32_t initKeeperInfo(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) {
|
||||
int32_t code;
|
||||
code = initPrevRowsKeeper(pInfo, pBlock);
|
||||
|
@ -2356,6 +2372,23 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
|
||||
int64_t ts = *(int64_t*)colDataGetData(pTsCol, i);
|
||||
|
||||
if (i == 0 && needToFillLastPoint(pSliceInfo)) { // first row in current block
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i, false);
|
||||
while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (pResBlock->info.rows >= pResBlock->info.capacity) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pSliceInfo->current > pSliceInfo->win.ekey) {
|
||||
doSetOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ts == pSliceInfo->current) {
|
||||
for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
|
||||
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[j];
|
||||
|
@ -2374,16 +2407,17 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
|
||||
// for linear interpolation, always fill value between this and next points;
|
||||
// if its the first point in data block, also fill values between previous(if there's any) and this point;
|
||||
// if its the last point in data block, no need to fill, but reserve this point as the start value for next data block.
|
||||
// if its the last point in data block, no need to fill, but reserve this point as the start value and do
|
||||
// the interpolation when processing next data block.
|
||||
if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i);
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i, false);
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (i < pBlock->info.rows - 1) {
|
||||
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
|
||||
if (nextTs > pSliceInfo->current) {
|
||||
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (pResBlock->info.rows >= pResBlock->info.capacity) {
|
||||
|
@ -2395,10 +2429,10 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
doSetOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// ignore current row, and do nothing
|
||||
}
|
||||
} else { // it is the last row of current block
|
||||
} else {// it is the last row of current block
|
||||
//store ts value as start, and calculate interp value when processing next block
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i, true);
|
||||
}
|
||||
} else { // non-linear interpolation
|
||||
pSliceInfo->current =
|
||||
|
@ -2417,14 +2451,15 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
doKeepPrevRows(pSliceInfo, pBlock, i);
|
||||
|
||||
if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i);
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i, false);
|
||||
// no need to increate pSliceInfo->current here
|
||||
//pSliceInfo->current =
|
||||
// taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (i < pBlock->info.rows - 1) {
|
||||
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
|
||||
if (nextTs > pSliceInfo->current) {
|
||||
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (pResBlock->info.rows >= pResBlock->info.capacity) {
|
||||
|
@ -2436,10 +2471,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
doSetOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// ignore current row, and do nothing
|
||||
}
|
||||
} else { // it is the last row of current block
|
||||
}
|
||||
} else { // non-linear interpolation
|
||||
if (i < pBlock->info.rows - 1) {
|
||||
|
@ -2448,7 +2480,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
|
||||
if (nextTs > pSliceInfo->current) {
|
||||
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (pResBlock->info.rows >= pResBlock->info.capacity) {
|
||||
|
@ -2472,7 +2504,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
doKeepNextRows(pSliceInfo, pBlock, i);
|
||||
|
||||
while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (pResBlock->info.rows >= pResBlock->info.capacity) {
|
||||
|
@ -2499,14 +2531,14 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
|
||||
|
||||
if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i);
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i, false);
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (i < pBlock->info.rows - 1) {
|
||||
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
|
||||
if (nextTs > pSliceInfo->current) {
|
||||
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (pResBlock->info.rows >= pResBlock->info.capacity) {
|
||||
|
@ -2518,10 +2550,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
doSetOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// ignore current row, and do nothing
|
||||
}
|
||||
} else { // it is the last row of current block
|
||||
}
|
||||
} else { // non-linear interpolation
|
||||
pSliceInfo->current =
|
||||
|
@ -2540,15 +2569,16 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
}
|
||||
}
|
||||
|
||||
// check if need to interpolate after ts range
|
||||
// except for fill(next)
|
||||
while (pSliceInfo->current <= pSliceInfo->win.ekey && pSliceInfo->fillType != TSDB_FILL_NEXT) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (pResBlock->info.rows >= pResBlock->info.capacity) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// check if need to interpolate after last datablock
|
||||
// except for fill(next), fill(linear)
|
||||
while (pSliceInfo->current <= pSliceInfo->win.ekey && pSliceInfo->fillType != TSDB_FILL_NEXT && pSliceInfo->fillType != TSDB_FILL_LINEAR) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (pResBlock->info.rows >= pResBlock->info.capacity) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4865,20 +4895,25 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
|
|||
int32_t numOfOutput = pSup->numOfExprs;
|
||||
int64_t* tsCols = extractTsCol(pBlock, iaInfo);
|
||||
uint64_t tableGroupId = pBlock->info.groupId;
|
||||
TSKEY currTs = getStartTsKey(&pBlock->info.window, tsCols);
|
||||
SResultRow* pResult = NULL;
|
||||
|
||||
TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols);
|
||||
|
||||
// there is an result exists
|
||||
if (miaInfo->curTs != INT64_MIN) {
|
||||
ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
|
||||
if (currTs != miaInfo->curTs) {
|
||||
|
||||
if (ts != miaInfo->curTs) {
|
||||
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs);
|
||||
miaInfo->curTs = INT64_MIN;
|
||||
miaInfo->curTs = ts;
|
||||
}
|
||||
} else {
|
||||
miaInfo->curTs = ts;
|
||||
ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
|
||||
}
|
||||
|
||||
STimeWindow win = {0};
|
||||
win.skey = currTs;
|
||||
win.skey = miaInfo->curTs;
|
||||
win.ekey =
|
||||
taosTimeAdd(win.skey, iaInfo->interval.interval, iaInfo->interval.intervalUnit, iaInfo->interval.precision) - 1;
|
||||
|
||||
|
@ -4889,12 +4924,11 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
|
|||
longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
miaInfo->curTs = win.skey;
|
||||
int32_t currPos = startPos;
|
||||
|
||||
STimeWindow currWin = win;
|
||||
while (++currPos < pBlock->info.rows) {
|
||||
if (tsCols[currPos] == currTs) {
|
||||
if (tsCols[currPos] == miaInfo->curTs) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -4902,11 +4936,10 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
|
|||
doApplyFunctions(pTaskInfo, pSup->pCtx, &currWin, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos,
|
||||
tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder);
|
||||
|
||||
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, currTs);
|
||||
miaInfo->curTs = INT64_MIN;
|
||||
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs);
|
||||
miaInfo->curTs = tsCols[currPos];
|
||||
|
||||
currTs = tsCols[currPos];
|
||||
currWin.skey = currTs;
|
||||
currWin.skey = miaInfo->curTs;
|
||||
currWin.ekey = taosTimeAdd(currWin.skey, iaInfo->interval.interval, iaInfo->interval.intervalUnit,
|
||||
iaInfo->interval.precision) -
|
||||
1;
|
||||
|
@ -4924,83 +4957,97 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
|
|||
updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true);
|
||||
doApplyFunctions(pTaskInfo, pSup->pCtx, &currWin, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos,
|
||||
tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder);
|
||||
|
||||
if (currPos >= pBlock->info.rows) {
|
||||
// we need to see next block if exists
|
||||
} else {
|
||||
ASSERT(0);
|
||||
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, currTs);
|
||||
miaInfo->curTs = INT64_MIN;
|
||||
}
|
||||
}
|
||||
|
||||
static SSDataBlock* doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
|
||||
|
||||
static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
SMergeAlignedIntervalAggOperatorInfo* miaInfo = pOperator->info;
|
||||
SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo;
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
SSDataBlock* pRes = iaInfo->binfo.pRes;
|
||||
|
||||
blockDataCleanup(pRes);
|
||||
blockDataEnsureCapacity(pRes, pOperator->resultInfo.capacity);
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
int32_t scanFlag = MAIN_SCAN;
|
||||
|
||||
if (!miaInfo->inputBlocksFinished) {
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
int32_t scanFlag = MAIN_SCAN;
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = NULL;
|
||||
if (miaInfo->prefetchedBlock == NULL) {
|
||||
pBlock = downstream->fpSet.getNextFn(downstream);
|
||||
} else {
|
||||
pBlock = miaInfo->prefetchedBlock;
|
||||
miaInfo->prefetchedBlock = NULL;
|
||||
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = NULL;
|
||||
if (miaInfo->prefetchedBlock == NULL) {
|
||||
pBlock = downstream->fpSet.getNextFn(downstream);
|
||||
} else {
|
||||
pBlock = miaInfo->prefetchedBlock;
|
||||
miaInfo->groupId = pBlock->info.groupId;
|
||||
miaInfo->prefetchedBlock = NULL;
|
||||
}
|
||||
|
||||
if (pBlock == NULL) {
|
||||
// close last unfinalized time window
|
||||
if (miaInfo->curTs != INT64_MIN) {
|
||||
ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
|
||||
outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
|
||||
miaInfo->curTs = INT64_MIN;
|
||||
}
|
||||
|
||||
doSetOperatorCompleted(pOperator);
|
||||
miaInfo->inputBlocksFinished = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!miaInfo->hasGroupId) {
|
||||
miaInfo->hasGroupId = true;
|
||||
miaInfo->groupId = pBlock->info.groupId;
|
||||
} else if (miaInfo->groupId != pBlock->info.groupId) {
|
||||
// if there are unclosed time window, close it firstly.
|
||||
ASSERT(miaInfo->curTs != INT64_MIN);
|
||||
outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
|
||||
miaInfo->prefetchedBlock = pBlock;
|
||||
miaInfo->curTs = INT64_MIN;
|
||||
break;
|
||||
}
|
||||
|
||||
getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag);
|
||||
setInputDataBlock(pOperator, pSup->pCtx, pBlock, iaInfo->inputOrder, scanFlag, true);
|
||||
doMergeAlignedIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes);
|
||||
doFilter(miaInfo->pCondition, pRes, NULL);
|
||||
if (pRes->info.rows >= pOperator->resultInfo.capacity) {
|
||||
break;
|
||||
}
|
||||
miaInfo->groupId = pBlock->info.groupId;
|
||||
}
|
||||
|
||||
pRes->info.groupId = miaInfo->groupId;
|
||||
if (pBlock == NULL) {
|
||||
// close last unfinalized time window
|
||||
if (miaInfo->curTs != INT64_MIN) {
|
||||
ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
|
||||
outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
|
||||
miaInfo->curTs = INT64_MIN;
|
||||
}
|
||||
|
||||
doSetOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!miaInfo->hasGroupId) {
|
||||
miaInfo->hasGroupId = true;
|
||||
miaInfo->groupId = pBlock->info.groupId;
|
||||
} else if (miaInfo->groupId != pBlock->info.groupId) {
|
||||
// if there are unclosed time window, close it firstly.
|
||||
ASSERT(miaInfo->curTs != INT64_MIN);
|
||||
outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
|
||||
miaInfo->prefetchedBlock = pBlock;
|
||||
miaInfo->curTs = INT64_MIN;
|
||||
break;
|
||||
}
|
||||
|
||||
getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag);
|
||||
setInputDataBlock(pOperator, pSup->pCtx, pBlock, iaInfo->inputOrder, scanFlag, true);
|
||||
doMergeAlignedIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes);
|
||||
|
||||
doFilter(miaInfo->pCondition, pRes, NULL);
|
||||
if (pRes->info.rows >= pOperator->resultInfo.capacity) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pRes->info.groupId = miaInfo->groupId;
|
||||
miaInfo->hasGroupId = false;
|
||||
}
|
||||
|
||||
static SSDataBlock* mergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
SMergeAlignedIntervalAggOperatorInfo* pMiaInfo = pOperator->info;
|
||||
SIntervalAggOperatorInfo* iaInfo = pMiaInfo->intervalAggOperatorInfo;
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SSDataBlock* pRes = iaInfo->binfo.pRes;
|
||||
blockDataCleanup(pRes);
|
||||
|
||||
if (iaInfo->binfo.mergeResultBlock) {
|
||||
while(1) {
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (pRes->info.rows >= pOperator->resultInfo.threshold) {
|
||||
break;
|
||||
}
|
||||
|
||||
doMergeAlignedIntervalAgg(pOperator);
|
||||
}
|
||||
} else {
|
||||
doMergeAlignedIntervalAgg(pOperator);
|
||||
}
|
||||
|
||||
size_t rows = pRes->info.rows;
|
||||
pOperator->resultInfo.totalRows += rows;
|
||||
|
@ -5009,7 +5056,7 @@ static SSDataBlock* doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
|
|||
|
||||
SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo,
|
||||
int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval,
|
||||
int32_t primaryTsSlotId, SNode* pCondition,
|
||||
int32_t primaryTsSlotId, SNode* pCondition, bool mergeResultBlock,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
SMergeAlignedIntervalAggOperatorInfo* miaInfo = taosMemoryCalloc(1, sizeof(SMergeAlignedIntervalAggOperatorInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
|
@ -5033,6 +5080,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
|
|||
iaInfo->interval = *pInterval;
|
||||
iaInfo->execModel = pTaskInfo->execModel;
|
||||
iaInfo->primaryTsIndex = primaryTsSlotId;
|
||||
iaInfo->binfo.mergeResultBlock = mergeResultBlock;
|
||||
|
||||
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
|
@ -5053,6 +5101,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
|
|||
}
|
||||
|
||||
initResultRowInfo(&iaInfo->binfo.resultRowInfo);
|
||||
blockDataEnsureCapacity(iaInfo->binfo.pRes, pOperator->resultInfo.capacity);
|
||||
|
||||
pOperator->name = "TimeMergeAlignedIntervalAggOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL;
|
||||
|
@ -5063,7 +5112,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
|
|||
pOperator->exprSupp.numOfExprs = numOfCols;
|
||||
pOperator->info = miaInfo;
|
||||
|
||||
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doMergeAlignedIntervalAgg, NULL, NULL,
|
||||
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, mergeAlignedIntervalAgg, NULL, NULL,
|
||||
destroyMergeAlignedIntervalOperatorInfo, NULL, NULL, NULL);
|
||||
|
||||
code = appendDownstream(pOperator, &downstream, 1);
|
||||
|
@ -5321,7 +5370,7 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
|
|||
|
||||
SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
|
||||
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
bool mergeBlock, SExecTaskInfo* pTaskInfo) {
|
||||
SMergeIntervalAggOperatorInfo* miaInfo = taosMemoryCalloc(1, sizeof(SMergeIntervalAggOperatorInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
if (miaInfo == NULL || pOperator == NULL) {
|
||||
|
@ -5335,6 +5384,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI
|
|||
iaInfo->inputOrder = TSDB_ORDER_ASC;
|
||||
iaInfo->interval = *pInterval;
|
||||
iaInfo->execModel = pTaskInfo->execModel;
|
||||
iaInfo->binfo.mergeResultBlock = mergeBlock;
|
||||
|
||||
iaInfo->primaryTsIndex = primaryTsSlotId;
|
||||
|
||||
|
|
|
@ -13,38 +13,37 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "os.h"
|
||||
#include "tsimplehash.h"
|
||||
#include "os.h"
|
||||
#include "taoserror.h"
|
||||
|
||||
#define SHASH_DEFAULT_LOAD_FACTOR 0.75
|
||||
#define HASH_MAX_CAPACITY (1024*1024*16)
|
||||
#define HASH_MAX_CAPACITY (1024 * 1024 * 16)
|
||||
#define SHASH_NEED_RESIZE(_h) ((_h)->size >= (_h)->capacity * SHASH_DEFAULT_LOAD_FACTOR)
|
||||
|
||||
#define GET_SHASH_NODE_KEY(_n, _dl) ((char*)(_n) + sizeof(SHNode) + (_dl))
|
||||
#define GET_SHASH_NODE_DATA(_n) ((char*)(_n) + sizeof(SHNode))
|
||||
#define GET_SHASH_NODE_KEY(_n, _dl) ((char *)(_n) + sizeof(SHNode) + (_dl))
|
||||
#define GET_SHASH_NODE_DATA(_n) ((char *)(_n) + sizeof(SHNode))
|
||||
|
||||
#define HASH_INDEX(v, c) ((v) & ((c)-1))
|
||||
#define HASH_NEED_RESIZE(_h) ((_h)->size >= (_h)->capacity * SHASH_DEFAULT_LOAD_FACTOR)
|
||||
#define HASH_INDEX(v, c) ((v) & ((c)-1))
|
||||
|
||||
#define FREE_HASH_NODE(_n) \
|
||||
do { \
|
||||
taosMemoryFreeClear(_n); \
|
||||
#define FREE_HASH_NODE(_n) \
|
||||
do { \
|
||||
taosMemoryFreeClear(_n); \
|
||||
} while (0);
|
||||
|
||||
typedef struct SHNode {
|
||||
struct SHNode *next;
|
||||
char data[];
|
||||
struct SHNode *next;
|
||||
char data[];
|
||||
} SHNode;
|
||||
|
||||
struct SSHashObj {
|
||||
SHNode **hashList;
|
||||
size_t capacity; // number of slots
|
||||
int64_t size; // number of elements in hash table
|
||||
_hash_fn_t hashFp; // hash function
|
||||
_equal_fn_t equalFp; // equal function
|
||||
int32_t keyLen;
|
||||
int32_t dataLen;
|
||||
SHNode **hashList;
|
||||
size_t capacity; // number of slots
|
||||
int64_t size; // number of elements in hash table
|
||||
_hash_fn_t hashFp; // hash function
|
||||
_equal_fn_t equalFp; // equal function
|
||||
int32_t keyLen;
|
||||
int32_t dataLen;
|
||||
};
|
||||
|
||||
static FORCE_INLINE int32_t taosHashCapacity(int32_t length) {
|
||||
|
@ -62,7 +61,7 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn, size_t keyLen, size_t
|
|||
capacity = 4;
|
||||
}
|
||||
|
||||
SSHashObj* pHashObj = (SSHashObj*) taosMemoryCalloc(1, sizeof(SSHashObj));
|
||||
SSHashObj *pHashObj = (SSHashObj *)taosMemoryCalloc(1, sizeof(SSHashObj));
|
||||
if (pHashObj == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
|
@ -72,7 +71,7 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn, size_t keyLen, size_t
|
|||
pHashObj->capacity = taosHashCapacity((int32_t)capacity);
|
||||
|
||||
pHashObj->equalFp = memcmp;
|
||||
pHashObj->hashFp = fn;
|
||||
pHashObj->hashFp = fn;
|
||||
ASSERT((pHashObj->capacity & (pHashObj->capacity - 1)) == 0);
|
||||
|
||||
pHashObj->keyLen = keyLen;
|
||||
|
@ -91,7 +90,7 @@ int32_t tSimpleHashGetSize(const SSHashObj *pHashObj) {
|
|||
if (pHashObj == NULL) {
|
||||
return 0;
|
||||
}
|
||||
return (int32_t)atomic_load_64((int64_t*)&pHashObj->size);
|
||||
return (int32_t)atomic_load_64((int64_t *)&pHashObj->size);
|
||||
}
|
||||
|
||||
static SHNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) {
|
||||
|
@ -108,41 +107,42 @@ static SHNode *doCreateHashNode(const void *key, size_t keyLen, const void *pDat
|
|||
}
|
||||
|
||||
static void taosHashTableResize(SSHashObj *pHashObj) {
|
||||
if (!HASH_NEED_RESIZE(pHashObj)) {
|
||||
if (!SHASH_NEED_RESIZE(pHashObj)) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t newCapacity = (int32_t)(pHashObj->capacity << 1u);
|
||||
if (newCapacity > HASH_MAX_CAPACITY) {
|
||||
// uDebug("current capacity:%zu, maximum capacity:%d, no resize applied due to limitation is reached",
|
||||
// pHashObj->capacity, HASH_MAX_CAPACITY);
|
||||
// uDebug("current capacity:%zu, maximum capacity:%d, no resize applied due to limitation is reached",
|
||||
// pHashObj->capacity, HASH_MAX_CAPACITY);
|
||||
return;
|
||||
}
|
||||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
void *pNewEntryList = taosMemoryRealloc(pHashObj->hashList, sizeof(void *) * newCapacity);
|
||||
void *pNewEntryList = taosMemoryRealloc(pHashObj->hashList, sizeof(void *) * newCapacity);
|
||||
if (pNewEntryList == NULL) {
|
||||
// qWarn("hash resize failed due to out of memory, capacity remain:%zu", pHashObj->capacity);
|
||||
// qWarn("hash resize failed due to out of memory, capacity remain:%zu", pHashObj->capacity);
|
||||
return;
|
||||
}
|
||||
|
||||
size_t inc = newCapacity - pHashObj->capacity;
|
||||
memset((char*)pNewEntryList + pHashObj->capacity * sizeof(void*), 0, inc);
|
||||
memset((char *)pNewEntryList + pHashObj->capacity * sizeof(void *), 0, inc);
|
||||
|
||||
pHashObj->hashList = pNewEntryList;
|
||||
pHashObj->capacity = newCapacity;
|
||||
|
||||
for (int32_t idx = 0; idx < pHashObj->capacity; ++idx) {
|
||||
SHNode* pNode = pHashObj->hashList[idx];
|
||||
SHNode *pNext;
|
||||
SHNode *pPrev = NULL;
|
||||
|
||||
SHNode *pNode = pHashObj->hashList[idx];
|
||||
if (pNode == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
SHNode *pNext;
|
||||
SHNode *pPrev = NULL;
|
||||
|
||||
|
||||
while (pNode != NULL) {
|
||||
void* key = GET_SHASH_NODE_KEY(pNode, pHashObj->dataLen);
|
||||
void *key = GET_SHASH_NODE_KEY(pNode, pHashObj->dataLen);
|
||||
uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pHashObj->dataLen);
|
||||
|
||||
int32_t newIdx = HASH_INDEX(hashVal, pHashObj->capacity);
|
||||
|
@ -166,8 +166,9 @@ static void taosHashTableResize(SSHashObj *pHashObj) {
|
|||
|
||||
int64_t et = taosGetTimestampUs();
|
||||
|
||||
// uDebug("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", (int32_t)pHashObj->capacity,
|
||||
// ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0);
|
||||
// uDebug("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms",
|
||||
// (int32_t)pHashObj->capacity,
|
||||
// ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0);
|
||||
}
|
||||
|
||||
int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, const void *data) {
|
||||
|
@ -210,7 +211,7 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, const void *data) {
|
|||
pNewNode->next = pHashObj->hashList[slot];
|
||||
pHashObj->hashList[slot] = pNewNode;
|
||||
atomic_add_fetch_64(&pHashObj->size, 1);
|
||||
} else { //update data
|
||||
} else { // update data
|
||||
memcpy(GET_SHASH_NODE_DATA(pNode), data, pHashObj->dataLen);
|
||||
}
|
||||
|
||||
|
@ -230,9 +231,7 @@ static FORCE_INLINE SHNode *doSearchInEntryList(SSHashObj *pHashObj, const void
|
|||
return pNode;
|
||||
}
|
||||
|
||||
static FORCE_INLINE bool taosHashTableEmpty(const SSHashObj *pHashObj) {
|
||||
return tSimpleHashGetSize(pHashObj) == 0;
|
||||
}
|
||||
static FORCE_INLINE bool taosHashTableEmpty(const SSHashObj *pHashObj) { return tSimpleHashGetSize(pHashObj) == 0; }
|
||||
|
||||
void *tSimpleHashGet(SSHashObj *pHashObj, const void *key) {
|
||||
if (pHashObj == NULL || taosHashTableEmpty(pHashObj) || key == NULL) {
|
||||
|
@ -299,9 +298,9 @@ size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj) {
|
|||
return (pHashObj->capacity * sizeof(void *)) + sizeof(SHNode) * tSimpleHashGetSize(pHashObj) + sizeof(SSHashObj);
|
||||
}
|
||||
|
||||
void *tSimpleHashGetKey(const SSHashObj* pHashObj, void *data, size_t* keyLen) {
|
||||
void *tSimpleHashGetKey(const SSHashObj *pHashObj, void *data, size_t *keyLen) {
|
||||
int32_t offset = offsetof(SHNode, data);
|
||||
SHNode *node = ((SHNode*)(char*)data - offset);
|
||||
SHNode *node = ((SHNode *)(char *)data - offset);
|
||||
if (keyLen != NULL) {
|
||||
*keyLen = pHashObj->keyLen;
|
||||
}
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_TAGGFUNCTION_H
|
||||
#define TDENGINE_TAGGFUNCTION_H
|
||||
#ifndef TDENGINE_TFUNCTIONINT_H
|
||||
#define TDENGINE_TFUNCTIONINT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -28,17 +28,6 @@ extern "C" {
|
|||
#include "function.h"
|
||||
#include "tudf.h"
|
||||
|
||||
#define AVG_FUNCTION_INTER_BUFFER_SIZE 50
|
||||
|
||||
#define DATA_SET_FLAG ',' // to denote the output area has data, not null value
|
||||
#define DATA_SET_FLAG_SIZE sizeof(DATA_SET_FLAG)
|
||||
|
||||
typedef struct SInterpInfoDetail {
|
||||
TSKEY ts; // interp specified timestamp
|
||||
int8_t type;
|
||||
int8_t primaryCol;
|
||||
} SInterpInfoDetail;
|
||||
|
||||
bool topbot_datablock_filter(SqlFunctionCtx *pCtx, const char *minval, const char *maxval);
|
||||
|
||||
/**
|
||||
|
@ -57,4 +46,4 @@ static FORCE_INLINE void initResultRowEntry(SResultRowEntryInfo *pResInfo, int32
|
|||
}
|
||||
#endif
|
||||
|
||||
#endif // TDENGINE_TAGGFUNCTION_H
|
||||
#endif // TDENGINE_TFUNCTIONINT_H
|
|
@ -1575,7 +1575,8 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
|||
}
|
||||
|
||||
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
||||
if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType) {
|
||||
if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType &&
|
||||
TSDB_DATA_TYPE_TIMESTAMP != colType) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
|
@ -2150,7 +2151,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
},
|
||||
{
|
||||
.name = "_apercentile_partial",
|
||||
.type = FUNCTION_TYPE_APERCENTILE_PARTIAL | FUNC_MGT_TIMELINE_FUNC,
|
||||
.type = FUNCTION_TYPE_APERCENTILE_PARTIAL,
|
||||
.classification = FUNC_MGT_AGG_FUNC,
|
||||
.translateFunc = translateApercentilePartial,
|
||||
.getEnvFunc = getApercentileFuncEnv,
|
||||
|
@ -2163,7 +2164,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "_apercentile_merge",
|
||||
.type = FUNCTION_TYPE_APERCENTILE_MERGE,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC,
|
||||
.translateFunc = translateApercentileMerge,
|
||||
.getEnvFunc = getApercentileFuncEnv,
|
||||
.initFunc = apercentileFunctionSetup,
|
||||
|
@ -2256,8 +2257,6 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
.finalizeFunc = elapsedFinalize,
|
||||
.invertFunc = NULL,
|
||||
.combineFunc = elapsedCombine,
|
||||
//.pPartialFunc = "_elapsed_partial",
|
||||
//.pMergeFunc = "_elapsed_merge"
|
||||
},
|
||||
{
|
||||
.name = "_elapsed_partial",
|
||||
|
@ -2324,10 +2323,13 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
.type = FUNCTION_TYPE_LAST_ROW,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateFirstLast,
|
||||
.dynDataRequiredFunc = lastDynDataReq,
|
||||
.getEnvFunc = getFirstLastFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
.processFunc = lastRowFunction,
|
||||
.sprocessFunc = firstLastScalarFunction,
|
||||
.pPartialFunc = "_last_row_partial",
|
||||
.pMergeFunc = "_last_row_merge",
|
||||
.finalizeFunc = firstLastFinalize
|
||||
},
|
||||
{
|
||||
|
@ -2340,6 +2342,27 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
.processFunc = cachedLastRowFunction,
|
||||
.finalizeFunc = firstLastFinalize
|
||||
},
|
||||
{
|
||||
.name = "_last_row_partial",
|
||||
.type = FUNCTION_TYPE_LAST_PARTIAL,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateFirstLastPartial,
|
||||
.dynDataRequiredFunc = lastDynDataReq,
|
||||
.getEnvFunc = getFirstLastFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
.processFunc = lastRowFunction,
|
||||
.finalizeFunc = firstLastPartialFinalize,
|
||||
},
|
||||
{
|
||||
.name = "_last_row_merge",
|
||||
.type = FUNCTION_TYPE_LAST_MERGE,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateFirstLastMerge,
|
||||
.getEnvFunc = getFirstLastFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
.processFunc = lastFunctionMerge,
|
||||
.finalizeFunc = firstLastFinalize,
|
||||
},
|
||||
{
|
||||
.name = "first",
|
||||
.type = FUNCTION_TYPE_FIRST,
|
||||
|
@ -2396,6 +2419,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
.type = FUNCTION_TYPE_LAST_PARTIAL,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateFirstLastPartial,
|
||||
.dynDataRequiredFunc = lastDynDataReq,
|
||||
.getEnvFunc = getFirstLastFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
.processFunc = lastFunction,
|
||||
|
@ -2428,7 +2452,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "histogram",
|
||||
.type = FUNCTION_TYPE_HISTOGRAM,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||
.translateFunc = translateHistogram,
|
||||
.getEnvFunc = getHistogramFuncEnv,
|
||||
.initFunc = histogramFunctionSetup,
|
||||
|
@ -2443,7 +2467,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "_histogram_partial",
|
||||
.type = FUNCTION_TYPE_HISTOGRAM_PARTIAL,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
|
||||
.translateFunc = translateHistogramPartial,
|
||||
.getEnvFunc = getHistogramFuncEnv,
|
||||
.initFunc = histogramFunctionSetup,
|
||||
|
@ -2455,7 +2479,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "_histogram_merge",
|
||||
.type = FUNCTION_TYPE_HISTOGRAM_MERGE,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
|
||||
.translateFunc = translateHistogramMerge,
|
||||
.getEnvFunc = getHistogramFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -2467,7 +2491,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "hyperloglog",
|
||||
.type = FUNCTION_TYPE_HYPERLOGLOG,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC,
|
||||
.translateFunc = translateHLL,
|
||||
.getEnvFunc = getHLLFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -2481,7 +2505,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
},
|
||||
{
|
||||
.name = "_hyperloglog_partial",
|
||||
.type = FUNCTION_TYPE_HYPERLOGLOG_PARTIAL | FUNC_MGT_TIMELINE_FUNC,
|
||||
.type = FUNCTION_TYPE_HYPERLOGLOG_PARTIAL,
|
||||
.classification = FUNC_MGT_AGG_FUNC,
|
||||
.translateFunc = translateHLLPartial,
|
||||
.getEnvFunc = getHLLFuncEnv,
|
||||
|
@ -2493,7 +2517,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
},
|
||||
{
|
||||
.name = "_hyperloglog_merge",
|
||||
.type = FUNCTION_TYPE_HYPERLOGLOG_MERGE | FUNC_MGT_TIMELINE_FUNC,
|
||||
.type = FUNCTION_TYPE_HYPERLOGLOG_MERGE,
|
||||
.classification = FUNC_MGT_AGG_FUNC,
|
||||
.translateFunc = translateHLLMerge,
|
||||
.getEnvFunc = getHLLFuncEnv,
|
||||
|
|
|
@ -18,10 +18,10 @@
|
|||
#include "function.h"
|
||||
#include "query.h"
|
||||
#include "querynodes.h"
|
||||
#include "taggfunction.h"
|
||||
#include "tcompare.h"
|
||||
#include "tdatablock.h"
|
||||
#include "tdigest.h"
|
||||
#include "tfunctionInt.h"
|
||||
#include "tglobal.h"
|
||||
#include "thistogram.h"
|
||||
#include "tpercentile.h"
|
||||
|
@ -312,14 +312,6 @@ typedef struct SGroupKeyInfo {
|
|||
#define GET_TS_LIST(x) ((TSKEY*)((x)->ptsList))
|
||||
#define GET_TS_DATA(x, y) (GET_TS_LIST(x)[(y)])
|
||||
|
||||
#define DO_UPDATE_TAG_COLUMNS_WITHOUT_TS(ctx) \
|
||||
do { \
|
||||
for (int32_t _i = 0; _i < (ctx)->tagInfo.numOfTagCols; ++_i) { \
|
||||
SqlFunctionCtx* __ctx = (ctx)->tagInfo.pTagCtxList[_i]; \
|
||||
__ctx->fpSet.process(__ctx); \
|
||||
} \
|
||||
} while (0);
|
||||
|
||||
#define DO_UPDATE_SUBSID_RES(ctx, ts) \
|
||||
do { \
|
||||
for (int32_t _i = 0; _i < (ctx)->subsidiaries.num; ++_i) { \
|
||||
|
@ -2970,25 +2962,24 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
|
|||
|
||||
static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, SFirstLastRes* pOutput, bool isFirst) {
|
||||
SInputColumnInfoData* pColInfo = &pCtx->input;
|
||||
int32_t start = pColInfo->startRowIndex;
|
||||
|
||||
pOutput->bytes = pInput->bytes;
|
||||
TSKEY* tsIn = &pInput->ts;
|
||||
TSKEY* tsOut = &pOutput->ts;
|
||||
|
||||
int32_t start = pColInfo->startRowIndex;
|
||||
if (pOutput->hasResult) {
|
||||
if (isFirst) {
|
||||
if (*tsIn > *tsOut) {
|
||||
if (pInput->ts > pOutput->ts) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (*tsIn < *tsOut) {
|
||||
if (pInput->ts < pOutput->ts) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*tsOut = *tsIn;
|
||||
pOutput->isNull = pInput->isNull;
|
||||
pOutput->ts = pInput->ts;
|
||||
pOutput->bytes = pInput->bytes;
|
||||
|
||||
memcpy(pOutput->buf, pInput->buf, pOutput->bytes);
|
||||
saveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
|
||||
|
||||
|
@ -3015,7 +3006,6 @@ static int32_t firstLastFunctionMergeImpl(SqlFunctionCtx* pCtx, bool isFirstQuer
|
|||
}
|
||||
|
||||
SET_VAL(GET_RES_INFO(pCtx), numOfElems, 1);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -3116,9 +3106,9 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
|
|||
TSKEY startKey = getRowPTs(pInput->pPTS, 0);
|
||||
TSKEY endKey = getRowPTs(pInput->pPTS, pInput->totalRows - 1);
|
||||
|
||||
#if 0
|
||||
int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
|
||||
|
||||
#if 0
|
||||
// the optimized version only function if all tuples in one block are monotonious increasing or descreasing.
|
||||
// this is NOT always works if project operator exists in downstream.
|
||||
if (blockDataOrder == TSDB_ORDER_ASC) {
|
||||
|
@ -3158,6 +3148,7 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
|
||||
#endif
|
||||
|
||||
SET_VAL(pResInfo, numOfElems, 1);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -3188,6 +3179,8 @@ bool diffFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
|
|||
static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
pDiffInfo->prev.i64 = *(bool*)pv? 1:0;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
pDiffInfo->prev.i64 = *(int8_t*)pv;
|
||||
break;
|
||||
|
@ -3197,6 +3190,7 @@ static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) {
|
|||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
pDiffInfo->prev.i64 = *(int16_t*)pv;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
pDiffInfo->prev.i64 = *(int64_t*)pv;
|
||||
break;
|
||||
|
@ -3250,6 +3244,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo
|
|||
pDiffInfo->prev.i64 = v;
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
case TSDB_DATA_TYPE_BIGINT: {
|
||||
int64_t v = *(int64_t*)pv;
|
||||
int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null
|
||||
|
@ -4281,9 +4276,9 @@ static int32_t histogramFunctionImpl(SqlFunctionCtx* pCtx, bool isPartial) {
|
|||
}
|
||||
|
||||
if (!isPartial) {
|
||||
SET_VAL(GET_RES_INFO(pCtx), numOfElems, pInfo->numOfBins);
|
||||
GET_RES_INFO(pCtx)->numOfRes = pInfo->numOfBins;
|
||||
} else {
|
||||
SET_VAL(GET_RES_INFO(pCtx), numOfElems, 1);
|
||||
GET_RES_INFO(pCtx)->numOfRes = 1;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -116,6 +116,11 @@ EFuncDataRequired fmFuncDynDataRequired(int32_t funcId, void* pRes, STimeWindow*
|
|||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
const char* name = funcMgtBuiltins[funcId].name;
|
||||
if ((strcmp(name, "_group_key") == 0) || (strcmp(name, "_select_value") == 0)) {
|
||||
return FUNC_DATA_REQUIRED_NOT_LOAD;
|
||||
}
|
||||
|
||||
if (funcMgtBuiltins[funcId].dynDataRequiredFunc == NULL) {
|
||||
return FUNC_DATA_REQUIRED_DATA_LOAD;
|
||||
} else {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,67 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "function.h"
|
||||
#include "os.h"
|
||||
|
||||
#include "texception.h"
|
||||
#include "tmsg.h"
|
||||
|
||||
static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *));
|
||||
|
||||
void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)) {
|
||||
if (pNode == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (pNode->nodeType == TEXPR_BINARYEXPR_NODE || pNode->nodeType == TEXPR_UNARYEXPR_NODE) {
|
||||
doExprTreeDestroy(&pNode, fp);
|
||||
}
|
||||
taosMemoryFree(pNode);
|
||||
}
|
||||
|
||||
static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) {
|
||||
if (*pExpr == NULL) {
|
||||
return;
|
||||
}
|
||||
taosMemoryFree(*pExpr);
|
||||
*pExpr = NULL;
|
||||
}
|
||||
|
||||
// TODO: these three functions should be made global
|
||||
static void* exception_calloc(size_t nmemb, size_t size) {
|
||||
void* p = taosMemoryCalloc(nmemb, size);
|
||||
if (p == NULL) {
|
||||
THROW(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
static void* exception_malloc(size_t size) {
|
||||
void* p = taosMemoryMalloc(size);
|
||||
if (p == NULL) {
|
||||
THROW(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
static UNUSED_FUNC char* exception_strdup(const char* str) {
|
||||
char* p = strdup(str);
|
||||
if (p == NULL) {
|
||||
THROW(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "os.h"
|
||||
#include "taosdef.h"
|
||||
#include "tmsg.h"
|
||||
#include "thash.h"
|
||||
#include "ttypes.h"
|
||||
|
||||
#include "function.h"
|
||||
#include "tbuffer.h"
|
||||
#include "tcompression.h"
|
||||
#include "tdatablock.h"
|
||||
#include "tfunctionInt.h"
|
||||
#include "thistogram.h"
|
||||
#include "tpercentile.h"
|
||||
#include "ttszip.h"
|
||||
#include "tudf.h"
|
||||
|
||||
void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell) {
|
||||
pCell->initialized = false;
|
||||
}
|
||||
|
||||
int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock) {
|
||||
int32_t maxRows = 0;
|
||||
|
||||
for (int32_t j = 0; j < num; ++j) {
|
||||
#if 0
|
||||
int32_t id = pCtx[j].functionId;
|
||||
|
||||
/*
|
||||
* ts, tag, tagprj function can not decide the output number of current query
|
||||
* the number of output result is decided by main output
|
||||
*/
|
||||
if (id == FUNCTION_TS || id == FUNCTION_TAG || id == FUNCTION_TAGPRJ) {
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
SResultRowEntryInfo *pResInfo = GET_RES_INFO(&pCtx[j]);
|
||||
if (pResInfo != NULL && maxRows < pResInfo->numOfRes) {
|
||||
maxRows = pResInfo->numOfRes;
|
||||
}
|
||||
}
|
||||
|
||||
assert(maxRows >= 0);
|
||||
|
||||
blockDataEnsureCapacity(pResBlock, maxRows);
|
||||
for(int32_t i = 0; i < num; ++i) {
|
||||
SColumnInfoData* pCol = taosArrayGet(pResBlock->pDataBlock, i);
|
||||
|
||||
SResultRowEntryInfo *pResInfo = GET_RES_INFO(&pCtx[i]);
|
||||
if (pResInfo->numOfRes == 0) {
|
||||
for(int32_t j = 0; j < pResInfo->numOfRes; ++j) {
|
||||
colDataAppend(pCol, j, NULL, true); // TODO add set null data api
|
||||
}
|
||||
} else {
|
||||
for (int32_t j = 0; j < pResInfo->numOfRes; ++j) {
|
||||
colDataAppend(pCol, j, GET_ROWCELL_INTERBUF(pResInfo), false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pResBlock->info.rows = maxRows;
|
||||
return maxRows;
|
||||
}
|
||||
|
||||
bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry) {
|
||||
assert(pEntry != NULL);
|
||||
return pEntry->complete;
|
||||
}
|
||||
|
||||
bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry) {
|
||||
return pEntry->initialized;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue