Merge branch '3.0' into cpwu/3.0

This commit is contained in:
cpwu 2022-08-10 20:58:52 +08:00
commit 59903896a5
81 changed files with 1515 additions and 1126 deletions

View File

@ -22,6 +22,7 @@ ELSEIF (TD_WINDOWS)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosd.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/udfd.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosBenchmark.exe DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.38-dist.jar DESTINATION connector/jdbc)

View File

@ -97,13 +97,13 @@ IF ("${CPUTYPE}" STREQUAL "")
ELSE ()
# if generate ARM version:
# cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64
IF (${CPUTYPE} MATCHES "aarch32" or ${CPUTYPE} MATCHES "arm32")
IF (${CPUTYPE} MATCHES "aarch32" OR ${CPUTYPE} MATCHES "arm32")
SET(PLATFORM_ARCH_STR "arm")
MESSAGE(STATUS "input cpuType: aarch32")
ADD_DEFINITIONS("-D_TD_ARM_")
ADD_DEFINITIONS("-D_TD_ARM_32")
SET(TD_ARM_32 TRUE)
ELSEIF (${CPUTYPE} MATCHES "aarch64" or ${CPUTYPE} MATCHES "arm64")
ELSEIF (${CPUTYPE} MATCHES "aarch64" OR ${CPUTYPE} MATCHES "arm64")
SET(PLATFORM_ARCH_STR "arm64")
MESSAGE(STATUS "input cpuType: aarch64")
ADD_DEFINITIONS("-D_TD_ARM_")

View File

@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG 766dcc4
GIT_TAG ed6a160
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -3,27 +3,11 @@ sidebar_label: Kubernetes
title: 在 Kubernetes 上部署 TDengine 集群
---
## 配置 ConfigMap
以下配置文件可以从 [GitHub 仓库](https://github.com/taosdata/TDengine-Operator/tree/3.0/src/tdengine) 下载。
为 TDengine 创建 `taoscfg.yaml`,此文件中的配置将作为环境变量传入 TDengine 镜像,更新此配置将导致所有 TDengine POD 重启。
## 配置 Service 服务
```yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: taoscfg
labels:
app: tdengine
data:
CLUSTER: "1"
TAOS_KEEP: "3650"
TAOS_DEBUG_FLAG: "135"
```
## 配置服务
创建一个 service 配置文件:`taosd-service.yaml`,服务名称 `metadata.name` (此处为 "taosd") 将在下一步中使用到。添加 TDengine 所用到的所有端口:
创建一个 Service 配置文件:`taosd-service.yaml`,服务名称 `metadata.name` (此处为 "taosd") 将在下一步中使用到。添加 TDengine 所用到的所有端口:
```yaml
---
@ -38,45 +22,9 @@ spec:
- name: tcp6030
protocol: "TCP"
port: 6030
- name: tcp6035
protocol: "TCP"
port: 6035
- name: tcp6041
protocol: "TCP"
port: 6041
- name: udp6030
protocol: "UDP"
port: 6030
- name: udp6031
protocol: "UDP"
port: 6031
- name: udp6032
protocol: "UDP"
port: 6032
- name: udp6033
protocol: "UDP"
port: 6033
- name: udp6034
protocol: "UDP"
port: 6034
- name: udp6035
protocol: "UDP"
port: 6035
- name: udp6036
protocol: "UDP"
port: 6036
- name: udp6037
protocol: "UDP"
port: 6037
- name: udp6038
protocol: "UDP"
port: 6038
- name: udp6039
protocol: "UDP"
port: 6039
- name: udp6040
protocol: "UDP"
port: 6040
selector:
app: "tdengine"
```
@ -109,7 +57,7 @@ spec:
spec:
containers:
- name: "tdengine"
image: "zitsen/taosd:develop"
image: "tdengine/tdengine:3.0.0.0"
imagePullPolicy: "Always"
envFrom:
- configMapRef:
@ -118,45 +66,9 @@ spec:
- name: tcp6030
protocol: "TCP"
containerPort: 6030
- name: tcp6035
protocol: "TCP"
containerPort: 6035
- name: tcp6041
protocol: "TCP"
containerPort: 6041
- name: udp6030
protocol: "UDP"
containerPort: 6030
- name: udp6031
protocol: "UDP"
containerPort: 6031
- name: udp6032
protocol: "UDP"
containerPort: 6032
- name: udp6033
protocol: "UDP"
containerPort: 6033
- name: udp6034
protocol: "UDP"
containerPort: 6034
- name: udp6035
protocol: "UDP"
containerPort: 6035
- name: udp6036
protocol: "UDP"
containerPort: 6036
- name: udp6037
protocol: "UDP"
containerPort: 6037
- name: udp6038
protocol: "UDP"
containerPort: 6038
- name: udp6039
protocol: "UDP"
containerPort: 6039
- name: udp6040
protocol: "UDP"
containerPort: 6040
env:
# POD_NAME for FQDN config
- name: POD_NAME
@ -190,14 +102,13 @@ spec:
readinessProbe:
exec:
command:
- taos
- -s
- "show mnodes"
- taos-check
initialDelaySeconds: 5
timeoutSeconds: 5000
livenessProbe:
tcpSocket:
port: 6030
exec:
command:
- taos-check
initialDelaySeconds: 15
periodSeconds: 20
volumeClaimTemplates:
@ -206,44 +117,78 @@ spec:
spec:
accessModes:
- "ReadWriteOnce"
storageClassName: "csi-rbd-sc"
storageClassName: "standard"
resources:
requests:
storage: "10Gi"
```
## 启动集群
## 使用 kubectl 命令部署 TDengine 集群
将前述三个文件添加到 Kubernetes 集群中:
顺序执行以下命令。
```bash
kubectl apply -f taoscfg.yaml
kubectl apply -f taosd-service.yaml
kubectl apply -f tdengine.yaml
```
上面的配置将生成一个节点的 TDengine 集群dnode 是自动配置的,可以使用 `show dnodes` 命令查看当前集群的节点:
上面的配置将生成一个节点的 TDengine 集群dnode 是自动配置的,可以使用 show dnodes 命令查看当前集群的节点:
```bash
kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"
kubectl exec -i -t tdengine-1 -- taos -s "show dnodes"
kubectl exec -i -t tdengine-2 -- taos -s "show dnodes"
```
输出如下:
```
Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
taos> show dnodes
id | end_point | vnodes | cores | status | role | create_time | offline reason |
======================================================================================================================================
1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 17:13:24.181 | |
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 17:14:09.257 | |
Query OK, 2 row(s) in set (0.000997s)
id | endpoint | vnodes | support_vnodes | status | create_time | note |
============================================================================================================================================
1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | |
2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | |
3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | |
Query OK, 3 rows in database (0.003655s)
```
## 使能端口转发
利用 kubectl 端口转发功能可以使应用可以访问 Kubernetes 环境运行的 TDengine 集群。
```
kubectl port-forward tdengine-0 6041:6041 &
```
使用 curl 命令验证 TDengine REST API 使用的 6041 接口。
```
$ curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
Handling connection for 6041
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8],["wal_roll_period","INT",4],["wal_segment_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2}
```
## 使用 dashboard 进行图形化管理
minikube 提供 dashboard 命令支持图形化管理界面。
```
$ minikube dashboard
* Verifying dashboard health ...
* Launching proxy ...
* Verifying proxy health ...
* Opening http://127.0.0.1:46617/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ in your default browser...
http://127.0.0.1:46617/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
```
对于某些公有云环境minikube 绑定在 127.0.0.1 IP 地址上无法通过远程访问,需要使用 kubectl proxy 命令将端口映射到 0.0.0.0 IP 地址上,再通过浏览器访问虚拟机公网 IP 和端口以及相同的 dashboard URL 路径即可远程访问 dashboard。
```
$ kubectl proxy --accept-hosts='^.*$' --address='0.0.0.0'
```
## 集群扩容
@ -252,14 +197,12 @@ TDengine 集群支持自动扩容:
```bash
kubectl scale statefulsets tdengine --replicas=4
```
上面命令行中参数 `--replica=4` 表示要将 TDengine 集群扩容到 4 个节点,执行后首先检查 POD 的状态:
```bash
kubectl get pods -l app=tdengine
```
输出如下:
@ -270,102 +213,112 @@ tdengine-0 1/1 Running 0 161m
tdengine-1 1/1 Running 0 161m
tdengine-2 1/1 Running 0 32m
tdengine-3 1/1 Running 0 32m
```
此时 POD 的状态仍然是 RunningTDengine 集群中的 dnode 状态要等 POD 状态为 `ready` 之后才能看到:
```bash
kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"
kubectl exec -i -t tdengine-3 -- taos -s "show dnodes"
```
扩容后的四节点 TDengine 集群的 dnode 列表:
```
Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
taos> show dnodes
id | end_point | vnodes | cores | status | role | create_time | offline reason |
======================================================================================================================================
1 | tdengine-0.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:12.915 | |
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:33.127 | |
3 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 14:07:27.078 | |
4 | tdengine-3.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 14:07:48.362 | |
Query OK, 4 row(s) in set (0.001293s)
id | endpoint | vnodes | support_vnodes | status | create_time | note |
============================================================================================================================================
1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | |
2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | |
3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | |
4 | tdengine-3.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:33:16.039 | |
Query OK, 4 rows in database (0.008377s)
```
## 集群缩容
TDengine 的缩容并没有自动化,我们尝试将一个三节点集群缩容到两节点
由于 TDengine 集群在扩缩容时会对数据进行节点间迁移,使用 kubectl 命令进行缩容需要首先使用 "drop dnodes" 命令,节点删除完成后再进行 Kubernetes 集群缩容
首先,确认一个三节点 TDengine 集群正常工作,在 TDengine CLI 中查看 dnode 的状态:
注意:由于 Kubernetes Statefulset 中 Pod 的只能按创建顺序逆序移除,所以 TDengine drop dnode 也需要按照创建顺序逆序移除,否则会导致 Pod 处于错误状态。
```
$ kubectl exec -i -t tdengine-0 -- taos -s "drop dnode 4"
```
```bash
$ kubectl exec -it tdengine-0 -- taos -s "show dnodes"
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
taos> show dnodes
id | end_point | vnodes | cores | status | role | create_time | offline reason |
======================================================================================================================================
1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 16:27:24.852 | |
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:27:53.339 | |
3 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:28:49.787 | |
Query OK, 3 row(s) in set (0.001101s)
id | endpoint | vnodes | support_vnodes | status | create_time | note |
============================================================================================================================================
1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | |
2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | |
3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | |
Query OK, 3 rows in database (0.004861s)
```
想要安全的缩容,首先需要将节点从 dnode 列表中移除,也即从集群中移除:
确认移除成功后(使用 kubectl exec -i -t tdengine-0 -- taos -s "show dnodes" 查看和确认 dnode 列表),使用 kubectl 命令移除 POD
```
kubectl scale statefulsets tdengine --replicas=3
```
最后一个 POD 将会被删除。使用命令 kubectl get pods -l app=tdengine 查看POD状态
```
$ kubectl get pods -l app=tdengine
NAME READY STATUS RESTARTS AGE
tdengine-0 1/1 Running 0 4m7s
tdengine-1 1/1 Running 0 3m55s
tdengine-2 1/1 Running 0 2m28s
```
POD删除后需要手动删除PVC否则下次扩容时会继续使用以前的数据导致无法正常加入集群。
```bash
kubectl exec -i -t tdengine-0 -- taos -s "drop dnode 'tdengine-2.taosd.default.svc.cluster.local:6030'"
```
通过 `show dondes` 命令确认移除成功后,移除相应的 POD
```bash
kubectl scale statefulsets tdengine --replicas=2
```
最后一个 POD 会被删除,使用 `kubectl get pods -l app=tdengine` 查看集群状态:
```
NAME READY STATUS RESTARTS AGE
tdengine-0 1/1 Running 0 3h40m
tdengine-1 1/1 Running 0 3h40m
```
POD 删除后,需要手动删除 PVC否则下次扩容时会继续使用以前的数据导致无法正常加入集群。
```bash
kubectl delete pvc taosdata-tdengine-2
$ kubectl delete pvc taosdata-tdengine-3
```
此时的集群状态是安全的,需要时还可以再次进行扩容:
```bash
kubectl scale statefulsets tdengine --replicas=3
$ kubectl scale statefulsets tdengine --replicas=4
statefulset.apps/tdengine scaled
it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl get pods -l app=tdengine
NAME READY STATUS RESTARTS AGE
tdengine-0 1/1 Running 0 35m
tdengine-1 1/1 Running 0 34m
tdengine-2 1/1 Running 0 12m
tdengine-3 0/1 ContainerCreating 0 4s
it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl get pods -l app=tdengine
NAME READY STATUS RESTARTS AGE
tdengine-0 1/1 Running 0 35m
tdengine-1 1/1 Running 0 34m
tdengine-2 1/1 Running 0 12m
tdengine-3 0/1 Running 0 7s
it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl exec -it tdengine-0 -- taos -s "show dnodes"
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
Server is Community Edition.
```
`show dnodes` 输出如下:
```
taos> show dnodes
id | end_point | vnodes | cores | status | role | create_time | offline reason |
id | endpoint | vnodes | support_vnodes | status | create_time | offline reason |
======================================================================================================================================
1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 16:27:24.852 | |
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:27:53.339 | |
4 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:40:49.177 | |
1 | tdengine-0.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 17:38:49.012 | |
2 | tdengine-1.taosd.default.sv... | 1 | 4 | ready | 2022-07-25 17:39:01.517 | |
5 | tdengine-2.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 18:01:36.479 | |
6 | tdengine-3.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 18:13:54.411 | |
Query OK, 4 row(s) in set (0.001348s)
```
## 删除集群
## 清理 TDengine 集群
完整移除 TDengine 集群,需要分别清理 statefulset、svc、configmap、pvc。
@ -381,26 +334,26 @@ kubectl delete configmap taoscfg
### 错误一
扩容到四节点之后缩容到两节点,删除的 POD 会进入 offline 状态:
未进行 "drop dnode" 直接进行缩容,由于 TDengine 尚未删除节点,缩容 pod 导致 TDengine 集群中部分节点处于 offline 状态。
```
Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
$ kubectl exec -it tdengine-0 -- taos -s "show dnodes"
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
Server is Community Edition.
taos> show dnodes
id | end_point | vnodes | cores | status | role | create_time | offline reason |
id | endpoint | vnodes | support_vnodes | status | create_time | offline reason |
======================================================================================================================================
1 | tdengine-0.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:12.915 | |
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:33.127 | |
3 | tdengine-2.taosd.default.sv... | 0 | 40 | offline | any | 2021-06-01 14:07:27.078 | status msg timeout |
4 | tdengine-3.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 14:07:48.362 | status msg timeout |
Query OK, 4 row(s) in set (0.001236s)
1 | tdengine-0.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 17:38:49.012 | |
2 | tdengine-1.taosd.default.sv... | 1 | 4 | ready | 2022-07-25 17:39:01.517 | |
5 | tdengine-2.taosd.default.sv... | 0 | 4 | offline | 2022-07-25 18:01:36.479 | status msg timeout |
6 | tdengine-3.taosd.default.sv... | 0 | 4 | offline | 2022-07-25 18:13:54.411 | status msg timeout |
Query OK, 4 row(s) in set (0.001323s)
```
`drop dnode` 的行为按不会按照预期进行,且下次集群重启后,所有的 dnode 节点将无法启动 dropping 状态无法退出。
### 错误二
TDengine 集群会持有 replica 参数,如果缩容后的节点数小于这个值,集群将无法使用:

View File

@ -22,7 +22,7 @@ Helm 会使用 kubectl 和 kubeconfig 的配置来操作 Kubernetes可以参
TDengine Chart 尚未发布到 Helm 仓库,当前可以从 GitHub 直接下载:
```bash
wget https://github.com/taosdata/TDengine-Operator/raw/main/helm/tdengine-0.3.0.tgz
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.0.tgz
```
@ -38,7 +38,7 @@ kubectl get storageclass
之后,使用 helm 命令安装:
```bash
helm install tdengine tdengine-0.3.0.tgz \
helm install tdengine tdengine-3.0.0.tgz \
--set storage.className=<your storage class name>
```
@ -46,7 +46,7 @@ helm install tdengine tdengine-0.3.0.tgz \
在 minikube 环境下,可以设置一个较小的容量避免超出磁盘可用空间:
```bash
helm install tdengine tdengine-0.3.0.tgz \
helm install tdengine tdengine-3.0.0.tgz \
--set storage.className=standard \
--set storage.dataSize=2Gi \
--set storage.logSize=10Mi
@ -83,14 +83,14 @@ TDengine 支持 `values.yaml` 自定义。
通过 helm show values 可以获取 TDengine Chart 支持的全部 values 列表:
```bash
helm show values tdengine-0.3.0.tgz
helm show values tdengine-3.0.0.tgz
```
你可以将结果保存为 values.yaml之后可以修改其中的各项参数如 replica 数量存储类名称容量大小TDengine 配置等,然后使用如下命令安装 TDengine 集群:
```bash
helm install tdengine tdengine-0.3.0.tgz -f values.yaml
helm install tdengine tdengine-3.0.0.tgz -f values.yaml
```
@ -107,37 +107,17 @@ image:
prefix: tdengine/tdengine
#pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
#tag: "2.4.0.5"
# tag: "3.0.0.0"
service:
# ClusterIP is the default service type, use NodeIP only if you know what you are doing.
type: ClusterIP
ports:
# TCP range required
tcp:
[
6030,
6031,
6032,
6033,
6034,
6035,
6036,
6037,
6038,
6039,
6040,
6041,
6042,
6043,
6044,
6045,
6060,
]
# UDP range 6030-6039
udp: [6030, 6031, 6032, 6033, 6034, 6035, 6036, 6037, 6038, 6039]
tcp: [6030, 6041, 6042, 6043, 6044, 6046, 6047, 6048, 6049, 6060]
# UDP range
udp: [6044, 6045]
arbitrator: true
# Set timezone here, not in taoscfg
timezone: "Asia/Shanghai"
@ -182,11 +162,14 @@ clusterDomainSuffix: ""
#
# Btw, keep quotes "" around the value like below, even the value will be number or not.
taoscfg:
# Starts as cluster or not, must be 0 or 1.
# 0: all pods will start as a seperate TDengine server
# 1: pods will start as TDengine server cluster. [default]
CLUSTER: "1"
# number of replications, for cluster only
TAOS_REPLICA: "1"
# number of management nodes in the system
TAOS_NUM_OF_MNODES: "1"
# number of days per DB file
# TAOS_DAYS: "10"
@ -422,7 +405,7 @@ kubectl --namespace default exec $POD_NAME -- taos -s 'drop dnode "<you dnode in
```
## 删除集群
## 清理集群
Helm 管理下,清理操作也变得简单:

View File

@ -279,7 +279,7 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
2. 调用 `taos_stmt_prepare()` 解析 INSERT 语句;
3. 如果 INSERT 语句中预留了表名但没有预留 TAGS那么调用 `taos_stmt_set_tbname()` 来设置表名;
4. 如果 INSERT 语句中既预留了表名又预留了 TAGS例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags()` 来设置表名和 TAGS 的值;
5. 调用 `taos_stmt_bind_param_batch()` 以多的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param()` 以单行的方式设置 VALUES 的值;
5. 调用 `taos_stmt_bind_param_batch()` 以多的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param()` 以单行的方式设置 VALUES 的值;
6. 调用 `taos_stmt_add_batch()` 把当前绑定的参数加入批处理;
7. 可以重复第 3 6 步,为批处理加入更多的数据行;
8. 调用 `taos_stmt_execute()` 执行已经准备好的批处理指令;

View File

@ -306,8 +306,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | 参数绑定,一次绑定一行 |
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB 行协议写入 |
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | 使用 JSON 类型的标签 |
| [subscribe-async.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-async.py) | 异步订阅 |
| [subscribe-sync.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-sync.py) | 同步订阅 |
| [tmq.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq.py) | tmq 订阅 |
## 其它说明
@ -326,23 +325,15 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds
2. https://www.python.org/dev/peps/pep-0564/
## 常见问题
欢迎[提问或报告问题](https://github.com/taosdata/taos-connector-python/issues)。
## 重要更新
| 连接器版本 | 重要更新 | 发布日期 |
| ---------- | --------------------------------------------------------------------------------- | ---------- |
| 2.3.1 | 1. support TDengine REST API <br/> 2. remove support for Python version below 3.6 | 2022-04-28 |
| 2.2.5 | support timezone option when connect | 2022-04-13 |
| 2.2.2 | support sqlalchemy dialect plugin | 2022-03-28 |
[**Release Notes**](https://github.com/taosdata/taos-connector-python/releases)
## API 参考
- [taos](https://docs.taosdata.com/api/taospy/taos/)
- [taosrest](https://docs.taosdata.com/api/taospy/taosrest)
## 常见问题
欢迎[提问或报告问题](https://github.com/taosdata/taos-connector-python/issues)。

View File

@ -239,7 +239,7 @@ SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId
SColumnInfoData* bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index);
void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress);
const char* blockDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData);
const char* blockDecode(SSDataBlock* pBlock, const char* pData);
void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag);
void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag);

View File

@ -296,13 +296,13 @@ void tFreeSSubmitRsp(SSubmitRsp* pRsp);
#define COL_IDX_ON ((int8_t)0x2)
#define COL_SET_NULL ((int8_t)0x10)
#define COL_SET_VAL ((int8_t)0x20)
typedef struct SSchema {
struct SSchema {
int8_t type;
int8_t flags;
col_id_t colId;
int32_t bytes;
char name[TSDB_COL_NAME_LEN];
} SSchema;
};
#define COL_IS_SET(FLG) (((FLG) & (COL_SET_VAL | COL_SET_NULL)) != 0)
#define COL_CLR_SET(FLG) ((FLG) &= (~(COL_SET_VAL | COL_SET_NULL)))
@ -648,7 +648,7 @@ typedef struct {
};
bool output; // TODO remove it later
int16_t type;
int8_t type;
int32_t bytes;
uint8_t precision;
uint8_t scale;
@ -1364,6 +1364,7 @@ typedef struct {
int8_t compressed;
int8_t streamBlockType;
int32_t compLen;
int32_t numOfBlocks;
int32_t numOfRows;
int32_t numOfCols;
int64_t skey;

View File

@ -38,7 +38,8 @@ typedef struct {
uint16_t type : 2;
uint16_t del : 1;
uint16_t endian : 1;
uint16_t reserve : 12;
uint16_t statis : 1; // 0 all normal, 1 has null or none
uint16_t reserve : 11;
uint16_t sver;
};
};
@ -149,6 +150,8 @@ typedef struct {
void *pBitmap;
void *pOffset;
int32_t extendedRowSize;
bool hasNone;
bool hasNull;
} SRowBuilder;
#define TD_ROW_HEAD_LEN (sizeof(STSRow))
@ -287,9 +290,13 @@ int32_t tdSRowSetTpInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t flen);
int32_t tdSRowSetExtendedInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t nBoundCols, int32_t flen,
int32_t allNullLen, int32_t boundNullLen);
int32_t tdSRowResetBuf(SRowBuilder *pBuilder, void *pBuf);
static FORCE_INLINE void tdSRowEnd(SRowBuilder *pBuilder) {
STSRow *pRow = (STSRow *)pBuilder->pBuf;
if (pBuilder->hasNull || pBuilder->hasNone) {
pRow->statis = 1;
}
}
int32_t tdSRowGetBuf(SRowBuilder *pBuilder, void *pBuf);
int32_t tdSRowInitEx(SRowBuilder *pBuilder, void *pBuf, uint32_t allNullLen, uint32_t boundNullLen, int32_t nCols,
int32_t nBoundCols, int32_t flen);
void tdSRowReset(SRowBuilder *pBuilder);
int32_t tdAppendColValToTpRow(SRowBuilder *pBuilder, TDRowValT valType, const void *val, bool isCopyVarData,
int8_t colType, int16_t colIdx, int32_t offset);

View File

@ -67,6 +67,7 @@ typedef struct SInputData {
} SInputData;
typedef struct SOutputData {
int32_t numOfBlocks;
int32_t numOfRows;
int32_t numOfCols;
int8_t compressed;

View File

@ -123,7 +123,8 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table
* @param handle
* @return
*/
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds);
int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds);
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pBlock, uint64_t* useconds);
/**
* kill the ongoing query and free the query handle and corresponding resources automatically

View File

@ -380,6 +380,7 @@ typedef struct SAggPhysiNode {
SNodeList* pExprs; // these are expression list of group_by_clause and parameter expression of aggregate function
SNodeList* pGroupKeys;
SNodeList* pAggFuncs;
bool mergeDataBlock;
} SAggPhysiNode;
typedef struct SDownstreamSourceNode {
@ -418,6 +419,7 @@ typedef struct SWinodwPhysiNode {
int8_t igExpired;
EOrder inputTsOrder;
EOrder outputTsOrder;
bool mergeDataBlock;
} SWinodwPhysiNode;
typedef struct SIntervalPhysiNode {

View File

@ -44,7 +44,7 @@ extern int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict
extern int32_t filterConverNcharColumns(SFilterInfo *pFilterInfo, int32_t rows, bool *gotNchar);
extern int32_t filterFreeNcharColumns(SFilterInfo *pFilterInfo);
extern void filterFreeInfo(SFilterInfo *info);
extern bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg *pDataStatis, int32_t numOfCols, int32_t numOfRows);
extern bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg **pColsAgg, int32_t numOfCols, int32_t numOfRows);
/* condition split interface */
int32_t filterPartitionCond(SNode **pCondition, SNode **pPrimaryKeyCond, SNode **pTagIndexCond, SNode **pTagCond,

View File

@ -47,8 +47,6 @@ typedef struct SRpcHandleInfo {
int8_t persistHandle; // persist handle or not
int8_t hasEpSet;
STraceId traceId;
// app info
void *ahandle; // app handle set by client
void *wrapper; // wrapper handle
@ -58,7 +56,8 @@ typedef struct SRpcHandleInfo {
void *rsp;
int32_t rspLen;
// conn info
STraceId traceId;
SRpcConnInfo conn;
} SRpcHandleInfo;

View File

@ -32,6 +32,7 @@ extern "C" {
typedef struct TdCmd *TdCmdPtr;
TdCmdPtr taosOpenCmd(const char* cmd);
int64_t taosGetsCmd(TdCmdPtr pCmd, int32_t maxSize, char *__restrict buf);
int64_t taosGetLineCmd(TdCmdPtr pCmd, char** __restrict ptrBuf);
int32_t taosEOFCmd(TdCmdPtr pCmd);
int64_t taosCloseCmd(TdCmdPtr* ppCmd);

View File

@ -396,7 +396,7 @@ typedef enum ELogicConditionType {
#ifdef WINDOWS
#define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections.
#else
#define TSDB_MAX_RPC_THREADS 5
#define TSDB_MAX_RPC_THREADS 10
#endif
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type

View File

@ -11,13 +11,13 @@ if !%2==! GOTO USAGE
if "%1" == "cluster" (
set work_dir=%internal_dir%
set packagServerName_x64=TDengine-enterprise-server-%2-beta-Windows-x64
set packagServerName_x86=TDengine-enterprise-server-%2-beta-Windows-x86
@REM set packagServerName_x86=TDengine-enterprise-server-%2-beta-Windows-x86
set packagClientName_x64=TDengine-enterprise-client-%2-beta-Windows-x64
set packagClientName_x86=TDengine-enterprise-client-%2-beta-Windows-x86
) else (
set work_dir=%community_dir%
set packagServerName_x64=TDengine-server-%2-Windows-x64
set packagServerName_x86=TDengine-server-%2-Windows-x86
@REM set packagServerName_x86=TDengine-server-%2-Windows-x86
set packagClientName_x64=TDengine-client-%2-Windows-x64
set packagClientName_x86=TDengine-client-%2-Windows-x86
)
@ -59,8 +59,8 @@ rd /s /Q C:\TDengine
cmake --install .
if not %errorlevel% == 0 ( call :RUNFAILED build x86 failed & exit /b 1)
cd %package_dir%
iscc /DMyAppInstallName="%packagServerName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release
if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x86% failed & exit /b 1)
@REM iscc /DMyAppInstallName="%packagServerName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release
@REM if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x86% failed & exit /b 1)
iscc /DMyAppInstallName="%packagClientName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release
if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x86% failed & exit /b 1)

View File

@ -101,10 +101,6 @@ typedef struct SQueryExecMetric {
int64_t rsp; // receive response from server, us
} SQueryExecMetric;
typedef struct SHeartBeatInfo {
void* pTimer; // timer, used to send request msg to mnode
} SHeartBeatInfo;
struct SAppInstInfo {
int64_t numOfConns;
SCorEpSet mgmtEp;
@ -256,6 +252,8 @@ SRequestObj* execQuery(uint64_t connId, const char* sql, int sqlLen, bool valida
TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly);
void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly);
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols);
static FORCE_INLINE SReqResultInfo* tmqGetCurResInfo(TAOS_RES* res) {
SMqRspObj* msg = (SMqRspObj*)res;
return (SReqResultInfo*)&msg->resInfo;

View File

@ -1571,10 +1571,18 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
return TSDB_CODE_SUCCESS;
}
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
int32_t cols = *(int32_t*) (p + sizeof(int32_t) * 3);
ASSERT(numOfCols == cols);
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t)*3 + sizeof(uint64_t) + numOfCols * (sizeof(int8_t) + sizeof(int32_t));
}
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
char* p = (char*)pResultInfo->pData;
int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
// version + length + numOfRows + numOfCol + groupId + flag_segment + column_info
int32_t len = getVersion1BlockMetaSize(p, numOfCols);
int32_t* colLength = (int32_t*)(p + len);
len += sizeof(int32_t) * numOfCols;
@ -1642,7 +1650,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
char* p1 = pResultInfo->convertJson;
int32_t totalLen = 0;
int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
int32_t len = getVersion1BlockMetaSize(p, numOfCols);
memcpy(p1, p, len);
p += len;
@ -1739,7 +1747,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
pStart1 += colLen1;
}
*(int32_t*)(pResultInfo->convertJson) = totalLen;
*(int32_t*)(pResultInfo->convertJson + 4) = totalLen;
pResultInfo->pData = pResultInfo->convertJson;
return TSDB_CODE_SUCCESS;
}
@ -1762,16 +1770,31 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
char* p = (char*)pResultInfo->pData;
// version:
int32_t blockVersion = *(int32_t*)p;
p += sizeof(int32_t);
int32_t dataLen = *(int32_t*)p;
p += sizeof(int32_t);
int32_t rows = *(int32_t*)p;
p += sizeof(int32_t);
int32_t cols = *(int32_t*)p;
p += sizeof(int32_t);
ASSERT(rows == numOfRows && cols == numOfCols);
int32_t hasColumnSeg = *(int32_t*)p;
p += sizeof(int32_t);
uint64_t groupId = *(uint64_t*)p;
p += sizeof(uint64_t);
// check fields
for (int32_t i = 0; i < numOfCols; ++i) {
int16_t type = *(int16_t*)p;
p += sizeof(int16_t);
p += sizeof(int8_t);
int32_t bytes = *(int32_t*)p;
p += sizeof(int32_t);

View File

@ -2947,7 +2947,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
tdSRowSetTpInfo(&rb, numOfCols, fLen);
int32_t dataLen = 0;
char* pStart = pData + sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols);
int32_t* colLength = (int32_t*)pStart;
pStart += sizeof(int32_t) * numOfCols;
@ -2977,6 +2977,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
char* data = pCol[k].pData + pCol[k].offset[j];
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
} else {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
}
} else {
@ -2990,6 +2991,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
offset += TYPE_BYTES[pColumn->type];
}
tdSRowEnd(&rb);
int32_t rowLen = TD_ROW_LEN(rowData);
rowData = POINTER_SHIFT(rowData, rowLen);
dataLen += rowLen;
@ -3220,6 +3222,7 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
}
offset += TYPE_BYTES[pColumn->type];
}
tdSRowEnd(&rb);
int32_t rowLen = TD_ROW_LEN(rowData);
rowData = POINTER_SHIFT(rowData, rowLen);
dataLen += rowLen;

View File

@ -154,7 +154,7 @@ TEST(testCase, driverInit_Test) {
}
TEST(testCase, connect_Test) {
taos_options(TSDB_OPTION_CONFIGDIR, "/home/lisa/Documents/workspace/tdengine/sim/dnode1/cfg");
taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
@ -670,54 +670,56 @@ TEST(testCase, projection_query_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
if (taos_errno(pRes) != 0) {
printf("error in create db, reason:%s\n", taos_errstr(pRes));
}
// TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
// if (taos_errno(pRes) != 0) {
// printf("error in create db, reason:%s\n", taos_errstr(pRes));
// }
// taos_free_result(pRes);
TAOS_RES* pRes = taos_query(pConn, "use abc1");
taos_free_result(pRes);
pRes = taos_query(pConn, "use abc1");
taos_free_result(pRes);
pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)");
if (taos_errno(pRes) != 0) {
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)");
if (taos_errno(pRes) != 0) {
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table tu using st1 tags(1)");
if (taos_errno(pRes) != 0) {
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
for(int32_t i = 0; i < 1; ++i) {
printf("create table :%d\n", i);
createNewTable(pConn, i);
}
pRes = taos_query(pConn, "select * from tu");
if (taos_errno(pRes) != 0) {
printf("failed to select from table, reason:%s\n", taos_errstr(pRes));
taos_free_result(pRes);
ASSERT_TRUE(false);
}
TAOS_ROW pRow = NULL;
TAOS_FIELD* pFields = taos_fetch_fields(pRes);
int32_t numOfFields = taos_num_fields(pRes);
char str[512] = {0};
while ((pRow = taos_fetch_row(pRes)) != NULL) {
int32_t code = taos_print_row(str, pRow, pFields, numOfFields);
printf("%s\n", str);
}
pRes = taos_query(pConn, "explain verbose true select _wstart,count(*),a from st1 partition by a interval(1s)");
printResult(pRes);
// pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)");
// if (taos_errno(pRes) != 0) {
// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
// }
// taos_free_result(pRes);
//
// pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)");
// if (taos_errno(pRes) != 0) {
// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
// }
// taos_free_result(pRes);
//
// pRes = taos_query(pConn, "create table tu using st1 tags(1)");
// if (taos_errno(pRes) != 0) {
// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
// }
// taos_free_result(pRes);
//
// for(int32_t i = 0; i < 1; ++i) {
// printf("create table :%d\n", i);
// createNewTable(pConn, i);
// }
//
// pRes = taos_query(pConn, "select * from tu");
// if (taos_errno(pRes) != 0) {
// printf("failed to select from table, reason:%s\n", taos_errstr(pRes));
// taos_free_result(pRes);
// ASSERT_TRUE(false);
// }
//
// TAOS_ROW pRow = NULL;
// TAOS_FIELD* pFields = taos_fetch_fields(pRes);
// int32_t numOfFields = taos_num_fields(pRes);
//
// char str[512] = {0};
// while ((pRow = taos_fetch_row(pRes)) != NULL) {
// int32_t code = taos_print_row(str, pRow, pFields, numOfFields);
// printf("%s\n", str);
// }
taos_free_result(pRes);
taos_close(pConn);

View File

@ -676,9 +676,9 @@ size_t blockDataGetRowSize(SSDataBlock* pBlock) {
* @return
*/
size_t blockDataGetSerialMetaSize(uint32_t numOfCols) {
// | total rows/total length | block group id | column schema | each column length |
return sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t)) +
numOfCols * sizeof(int32_t);
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column length |
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
numOfCols * (sizeof(int8_t) + sizeof(int32_t)) + numOfCols * sizeof(int32_t);
}
double blockDataGetSerialRowSize(const SSDataBlock* pBlock) {
@ -1329,10 +1329,6 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i);
SColumnInfoData* pSrc = taosArrayGet(pDataBlock->pDataBlock, i);
if (pSrc->pData == NULL) {
continue;
}
colDataAssign(pDst, pSrc, pDataBlock->info.rows, &pDataBlock->info);
}
@ -1581,7 +1577,7 @@ int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) {
for (int32_t i = 0; i < sz; i++) {
SColumnInfoData* pColData = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, i);
tlen += taosEncodeFixedI16(buf, pColData->info.colId);
tlen += taosEncodeFixedI16(buf, pColData->info.type);
tlen += taosEncodeFixedI8(buf, pColData->info.type);
tlen += taosEncodeFixedI32(buf, pColData->info.bytes);
tlen += taosEncodeFixedBool(buf, pColData->hasNull);
@ -1613,7 +1609,7 @@ void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock) {
for (int32_t i = 0; i < sz; i++) {
SColumnInfoData data = {0};
buf = taosDecodeFixedI16(buf, &data.info.colId);
buf = taosDecodeFixedI16(buf, &data.info.type);
buf = taosDecodeFixedI8(buf, &data.info.type);
buf = taosDecodeFixedI32(buf, &data.info.bytes);
buf = taosDecodeFixedBool(buf, &data.hasNull);
@ -2001,6 +1997,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB
}
offset += TYPE_BYTES[pCol->type]; // sum/avg would convert to int64_t/uint64_t/double during aggregation
}
tdSRowEnd(&rb);
dataLen += TD_ROW_LEN(rb.pBuf);
#ifdef TD_DEBUG_PRINT_ROW
tdSRowPrint(rb.pBuf, pTSchema, __func__);
@ -2070,17 +2067,36 @@ char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId) {
void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress) {
// todo extract method
int32_t* version = (int32_t*)data;
*version = 1;
data += sizeof(int32_t);
int32_t* actualLen = (int32_t*)data;
data += sizeof(int32_t);
int32_t* rows = (int32_t*)data;
*rows = pBlock->info.rows;
data += sizeof(int32_t);
int32_t* cols = (int32_t*)data;
*cols = numOfCols;
data += sizeof(int32_t);
// flag segment.
// the inital bit is for column info
int32_t* flagSegment = (int32_t*)data;
*flagSegment = (1<<31);
data += sizeof(int32_t);
uint64_t* groupId = (uint64_t*)data;
data += sizeof(uint64_t);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
*((int16_t*)data) = pColInfoData->info.type;
data += sizeof(int16_t);
*((int8_t*)data) = pColInfoData->info.type;
data += sizeof(int8_t);
*((int32_t*)data) = pColInfoData->info.bytes;
data += sizeof(int32_t);
@ -2126,12 +2142,31 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_
*groupId = pBlock->info.groupId;
}
const char* blockDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData) {
const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
const char* pStart = pData;
int32_t version = *(int32_t*) pStart;
pStart += sizeof(int32_t);
ASSERT(version == 1);
// total length sizeof(int32_t)
int32_t dataLen = *(int32_t*)pStart;
pStart += sizeof(int32_t);
// total rows sizeof(int32_t)
int32_t numOfRows = *(int32_t*)pStart;
pStart += sizeof(int32_t);
// total columns sizeof(int32_t)
int32_t numOfCols = *(int32_t*)pStart;
pStart += sizeof(int32_t);
// has column info segment
int32_t flagSeg = *(int32_t*)pStart;
int32_t hasColumnInfo = (flagSeg >> 31);
pStart += sizeof(int32_t);
// group id sizeof(uint64_t)
pBlock->info.groupId = *(uint64_t*)pStart;
pStart += sizeof(uint64_t);
@ -2143,7 +2178,7 @@ const char* blockDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRow
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
pColInfoData->info.type = *(int16_t*)pStart;
pStart += sizeof(int16_t);
pStart += sizeof(int8_t);
pColInfoData->info.bytes = *(int32_t*)pStart;
pStart += sizeof(int32_t);

View File

@ -666,6 +666,7 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) {
++iColVal;
}
tdSRowEnd(&rb);
taosMemoryFreeClear(varBuf);
@ -880,26 +881,29 @@ int32_t tdGetKvRowValOfCol(SCellVal *output, STSRow *pRow, void *pBitmap, int32_
int32_t tdGetTpRowValOfCol(SCellVal *output, STSRow *pRow, void *pBitmap, int8_t colType, int32_t offset,
int16_t colIdx) {
#ifdef TD_SUPPORT_BITMAP
if (pRow->statis == 0) {
output->valType = TD_VTYPE_NORM;
if (IS_VAR_DATA_TYPE(colType)) {
output->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
} else {
output->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
}
return TSDB_CODE_SUCCESS;
}
if (tdGetBitmapValType(pBitmap, colIdx, &output->valType, 0) != TSDB_CODE_SUCCESS) {
output->valType = TD_VTYPE_NONE;
return terrno;
}
if (tdValTypeIsNorm(output->valType)) {
if (output->valType == TD_VTYPE_NORM) {
if (IS_VAR_DATA_TYPE(colType)) {
output->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
} else {
output->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
}
}
#else
if (IS_VAR_DATA_TYPE(colType)) {
output->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
} else {
output->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
}
output->valType = isNull(output->val, colType) ? TD_VTYPE_NULL : TD_VTYPE_NORM;
#endif
return TSDB_CODE_SUCCESS;
}
@ -908,7 +912,7 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp
STSRow *pRow = pBuilder->pBuf;
if (!val) {
#ifdef TD_SUPPORT_BITMAP
if (tdValTypeIsNorm(valType)) {
if (valType == TD_VTYPE_NORM) {
terrno = TSDB_CODE_INVALID_PTR;
return terrno;
}
@ -925,6 +929,21 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp
return TSDB_CODE_SUCCESS;
}
// TODO: We can avoid the type judegement by FP, but would prevent the inline scheme.
switch (valType) {
case TD_VTYPE_NORM:
break;
case TD_VTYPE_NULL:
if (!pBuilder->hasNull) pBuilder->hasNull = true;
break;
case TD_VTYPE_NONE:
if (!pBuilder->hasNone) pBuilder->hasNone = true;
break;
default:
ASSERT(0);
break;
}
if (TD_IS_TP_ROW(pRow)) {
tdAppendColValToTpRow(pBuilder, valType, val, isCopyVarData, colType, colIdx, offset);
} else {
@ -951,7 +970,7 @@ int32_t tdAppendColValToKvRow(SRowBuilder *pBuilder, TDRowValT valType, const vo
STSRow *row = pBuilder->pBuf;
// No need to store None/Null values.
if (tdValIsNorm(valType, val, colType)) {
if (valType == TD_VTYPE_NORM) {
// ts key stored in STSRow.ts
SKvRowIdx *pColIdx = (SKvRowIdx *)POINTER_SHIFT(TD_ROW_COL_IDX(row), offset);
char *ptr = (char *)POINTER_SHIFT(row, TD_ROW_LEN(row));
@ -1012,7 +1031,7 @@ int32_t tdAppendColValToTpRow(SRowBuilder *pBuilder, TDRowValT valType, const vo
// 1. No need to set flen part for Null/None, just use bitmap. When upsert for the same primary TS key, the bitmap
// should be updated simultaneously if Norm val overwrite Null/None cols.
// 2. When consume STSRow in memory by taos client/tq, the output of Null/None cols should both be Null.
if (tdValIsNorm(valType, val, colType)) {
if (valType == TD_VTYPE_NORM) {
// TODO: The layout of new data types imported since 3.0 like blob/medium blob is the same with binary/nchar.
if (IS_VAR_DATA_TYPE(colType)) {
// ts key stored in STSRow.ts
@ -1092,6 +1111,9 @@ int32_t tdSRowResetBuf(SRowBuilder *pBuilder, void *pBuf) {
return terrno;
}
if (pBuilder->hasNone) pBuilder->hasNone = false;
if (pBuilder->hasNull) pBuilder->hasNull = false;
TD_ROW_SET_INFO(pBuilder->pBuf, 0);
TD_ROW_SET_TYPE(pBuilder->pBuf, pBuilder->rowType);
@ -1159,14 +1181,6 @@ int32_t tdSRowGetBuf(SRowBuilder *pBuilder, void *pBuf) {
return TSDB_CODE_SUCCESS;
}
int32_t tdSRowInitEx(SRowBuilder *pBuilder, void *pBuf, uint32_t allNullLen, uint32_t boundNullLen, int32_t nCols,
int32_t nBoundCols, int32_t flen) {
if (tdSRowSetExtendedInfo(pBuilder, allNullLen, boundNullLen, nCols, nBoundCols, flen) < 0) {
return terrno;
}
return tdSRowResetBuf(pBuilder, pBuf);
}
void tdSRowReset(SRowBuilder *pBuilder) {
pBuilder->rowType = TD_ROW_TP;
pBuilder->pBuf = NULL;

View File

@ -604,28 +604,44 @@ _end:
static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
int64_t suid, int8_t blkType) {
while (1) {
SSDataBlock *output = NULL;
uint64_t ts;
SArray *pResList = taosArrayInit(1, POINTER_BYTES);
if (pResList == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
int32_t code = qExecTask(taskInfo, &output, &ts);
while (1) {
uint64_t ts;
int32_t code = qExecTaskOpt(taskInfo, pResList, &ts);
if (code < 0) {
smaError("vgId:%d, qExecTask for rsma table %" PRIi64 " level %" PRIi8 " failed since %s", SMA_VID(pSma), suid,
pItem->level, terrstr(code));
goto _err;
}
if (output) {
#if 0
if (taosArrayGetSize(pResList) == 0) {
if (terrno == 0) {
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
} else {
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr());
goto _err;
}
break;
}
for (int32_t i = 0; i < taosArrayGetSize(pResList); ++i) {
SSDataBlock *output = taosArrayGetP(pResList, i);
#if 1
char flag[10] = {0};
snprintf(flag, 10, "level %" PRIi8, pItem->level);
SArray *pResult = taosArrayInit(1, sizeof(SSDataBlock));
taosArrayPush(pResult, output);
blockDebugShowDataBlocks(pResult, flag);
taosArrayDestroy(pResult);
// blockDebugShowDataBlocks(output, flag);
// taosArrayDestroy(pResult);
#endif
STsdb *sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]);
STsdb * sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]);
SSubmitReq *pReq = NULL;
// TODO: the schema update should be handled later(TD-17965)
if (buildSubmitReqFromDataBlock(&pReq, output, pTSchema, SMA_VID(pSma), suid) < 0) {
smaError("vgId:%d, build submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s",
@ -644,17 +660,14 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm
SMA_VID(pSma), suid, pItem->level, output->info.version);
taosMemoryFreeClear(pReq);
} else if (terrno == 0) {
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
break;
} else {
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr());
goto _err;
}
}
taosArrayDestroy(pResList);
return TSDB_CODE_SUCCESS;
_err:
taosArrayDestroy(pResList);
return TSDB_CODE_FAILED;
}
@ -1407,7 +1420,7 @@ int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) {
tEncoderClear(&encoder);
}
tEncoderClear(&encoder);
((SMsgHead *)pBuf)->vgId = SMA_VID(pSma);
((SMsgHead *)pBuf)->contLen = contLen + sizeof(SMsgHead);
@ -1434,10 +1447,10 @@ _err:
/**
* @brief fetch rsma data of level 2/3 and submit
*
* @param pSma
* @param pMsg
* @return int32_t
*
* @param pSma
* @param pMsg
* @return int32_t
*/
int32_t smaProcessFetch(SSma *pSma, void *pMsg) {
SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg;

View File

@ -157,6 +157,7 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, pColumn->offset, k);
}
}
tdSRowEnd(&rb);
int32_t rowLen = TD_ROW_LEN(rowData);
rowData = POINTER_SHIFT(rowData, rowLen);
dataLen += rowLen;

View File

@ -393,15 +393,16 @@ static void tbDataMovePosTo(STbData *pTbData, SMemSkipListNode **pos, TSDBKEY *p
SMemSkipListNode *px;
SMemSkipListNode *pn;
TSDBKEY *pTKey;
int c;
int backward = flags & SL_MOVE_BACKWARD;
int fromPos = flags & SL_MOVE_FROM_POS;
int32_t backward = flags & SL_MOVE_BACKWARD;
int32_t fromPos = flags & SL_MOVE_FROM_POS;
if (backward) {
px = pTbData->sl.pTail;
for (int8_t iLevel = pTbData->sl.maxLevel - 1; iLevel >= pTbData->sl.level; iLevel--) {
pos[iLevel] = px;
if (!fromPos) {
for (int8_t iLevel = pTbData->sl.level; iLevel < pTbData->sl.maxLevel; iLevel++) {
pos[iLevel] = px;
}
}
if (pTbData->sl.level) {
@ -412,7 +413,7 @@ static void tbDataMovePosTo(STbData *pTbData, SMemSkipListNode **pos, TSDBKEY *p
while (pn != pTbData->sl.pHead) {
pTKey = (TSDBKEY *)SL_NODE_DATA(pn);
c = tsdbKeyCmprFn(pTKey, pKey);
int32_t c = tsdbKeyCmprFn(pTKey, pKey);
if (c <= 0) {
break;
} else {
@ -427,8 +428,10 @@ static void tbDataMovePosTo(STbData *pTbData, SMemSkipListNode **pos, TSDBKEY *p
} else {
px = pTbData->sl.pHead;
for (int8_t iLevel = pTbData->sl.maxLevel - 1; iLevel >= pTbData->sl.level; iLevel--) {
pos[iLevel] = px;
if (!fromPos) {
for (int8_t iLevel = pTbData->sl.level; iLevel < pTbData->sl.maxLevel; iLevel++) {
pos[iLevel] = px;
}
}
if (pTbData->sl.level) {
@ -437,9 +440,7 @@ static void tbDataMovePosTo(STbData *pTbData, SMemSkipListNode **pos, TSDBKEY *p
for (int8_t iLevel = pTbData->sl.level - 1; iLevel >= 0; iLevel--) {
pn = SL_NODE_FORWARD(px, iLevel);
while (pn != pTbData->sl.pTail) {
pTKey = (TSDBKEY *)SL_NODE_DATA(pn);
c = tsdbKeyCmprFn(pTKey, pKey);
int32_t c = tsdbKeyCmprFn(SL_NODE_DATA(pn), pKey);
if (c >= 0) {
break;
} else {
@ -480,36 +481,44 @@ static int32_t tbDataDoPut(SMemTable *pMemTable, STbData *pTbData, SMemSkipListN
goto _exit;
}
pNode->level = level;
for (int8_t iLevel = 0; iLevel < level; iLevel++) {
SL_NODE_FORWARD(pNode, iLevel) = NULL;
SL_NODE_BACKWARD(pNode, iLevel) = NULL;
}
tPutTSDBRow((uint8_t *)SL_NODE_DATA(pNode), pRow);
// put
for (int8_t iLevel = 0; iLevel < pNode->level; iLevel++) {
SMemSkipListNode *px = pos[iLevel];
for (int8_t iLevel = level - 1; iLevel >= 0; iLevel--) {
SMemSkipListNode *pn = pos[iLevel];
SMemSkipListNode *px;
if (forward) {
SMemSkipListNode *pNext = SL_NODE_FORWARD(px, iLevel);
SL_NODE_FORWARD(pNode, iLevel) = pNext;
SL_NODE_BACKWARD(pNode, iLevel) = px;
SL_NODE_BACKWARD(pNext, iLevel) = pNode;
SL_NODE_FORWARD(px, iLevel) = pNode;
} else {
SMemSkipListNode *pPrev = SL_NODE_BACKWARD(px, iLevel);
px = SL_NODE_FORWARD(pn, iLevel);
SL_NODE_BACKWARD(pNode, iLevel) = pn;
SL_NODE_FORWARD(pNode, iLevel) = px;
SL_NODE_BACKWARD(pNode, iLevel) = pPrev;
} else {
px = SL_NODE_BACKWARD(pn, iLevel);
SL_NODE_FORWARD(pPrev, iLevel) = pNode;
SL_NODE_BACKWARD(px, iLevel) = pNode;
SL_NODE_BACKWARD(pNode, iLevel) = px;
SL_NODE_FORWARD(pNode, iLevel) = pn;
}
}
for (int8_t iLevel = level - 1; iLevel >= 0; iLevel--) {
SMemSkipListNode *pn = pos[iLevel];
SMemSkipListNode *px;
if (forward) {
px = SL_NODE_FORWARD(pn, iLevel);
SL_NODE_FORWARD(pn, iLevel) = pNode;
SL_NODE_BACKWARD(px, iLevel) = pNode;
} else {
px = SL_NODE_BACKWARD(pn, iLevel);
SL_NODE_FORWARD(px, iLevel) = pNode;
SL_NODE_BACKWARD(pn, iLevel) = pNode;
}
pos[iLevel] = pNode;
}
pTbData->sl.size++;
if (pTbData->sl.level < pNode->level) {
pTbData->sl.level = pNode->level;
@ -548,7 +557,7 @@ static int32_t tsdbInsertTableDataImpl(SMemTable *pMemTable, STbData *pTbData, i
// forward put rest data
row.pTSRow = tGetSubmitBlkNext(&blkIter);
if (row.pTSRow) {
for (int8_t iLevel = 0; iLevel < pTbData->sl.maxLevel; iLevel++) {
for (int8_t iLevel = pos[0]->level; iLevel < pTbData->sl.maxLevel; iLevel++) {
pos[iLevel] = SL_NODE_BACKWARD(pos[iLevel], iLevel);
}
do {

View File

@ -14,6 +14,7 @@
*/
#include "tsdb.h"
#include "osDef.h"
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
typedef enum {
@ -129,7 +130,8 @@ struct STsdbReader {
SBlockLoadSuppInfo suppInfo;
STsdbReadSnap* pReadSnap;
SIOCostSummary cost;
STSchema* pSchema;
STSchema* pSchema;// the newest version schema
STSchema* pMemSchema;// the previous schema for in-memory data, to avoid load schema too many times
SDataFReader* pFileReader;
SVersionRange verRange;
@ -145,15 +147,14 @@ static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanI
SRowMerger* pMerger);
static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
STsdbReader* pReader);
static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow);
static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid);
static int32_t doAppendRowFromBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
int32_t rowIndex);
static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
static void updateSchema(TSDBROW* pRow, uint64_t uid, STsdbReader* pReader);
static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order);
static void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
STsdbReader* pReader);
STsdbReader* pReader, bool* freeTSRow);
static void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
STSRow** pTSRow);
static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData,
@ -181,6 +182,7 @@ static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) {
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
pSupInfo->buildBuf[i] = taosMemoryMalloc(pCol->info.bytes);
tsdbInfo("-------------------%d\n", pCol->info.bytes);
}
}
@ -673,6 +675,7 @@ static void doCopyColVal(SColumnInfoData* pColInfoData, int32_t rowIndex, int32_
colDataAppendNULL(pColInfoData, rowIndex);
} else {
varDataSetLen(pSup->buildBuf[colIndex], pColVal->value.nData);
ASSERT(pColVal->value.nData <= pColInfoData->info.bytes);
memcpy(varDataVal(pSup->buildBuf[colIndex]), pColVal->value.pData, pColVal->value.nData);
colDataAppend(pColInfoData, rowIndex, pSup->buildBuf[colIndex], false);
}
@ -1210,6 +1213,51 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo*
return code;
}
static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pBlockData, int64_t key, SFileBlockDumpInfo* pDumpInfo) {
// opt version
// 1. it is not a border point
// 2. the direct next point is not an duplicated timestamp
if ((pDumpInfo->rowIndex < pDumpInfo->totalRows - 1 && pReader->order == TSDB_ORDER_ASC) ||
(pDumpInfo->rowIndex > 0 && pReader->order == TSDB_ORDER_DESC)) {
int32_t step = pReader->order == TSDB_ORDER_ASC? 1:-1;
int64_t nextKey = pBlockData->aTSKEY[pDumpInfo->rowIndex + step];
if (nextKey != key) { // merge is not needed
doAppendRowFromBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
pDumpInfo->rowIndex += step;
return true;
}
}
return false;
}
static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader* pReader, uint64_t uid) {
// always set the newest schema version in pReader->pSchema
if (pReader->pSchema == NULL) {
pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, uid, -1);
}
if (sversion == pReader->pSchema->version) {
return pReader->pSchema;
}
if (pReader->pMemSchema == NULL) {
int32_t code =
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pMemSchema);
return pReader->pMemSchema;
}
if (pReader->pMemSchema->version == sversion) {
return pReader->pMemSchema;
}
taosMemoryFree(pReader->pMemSchema);
int32_t code = metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pMemSchema);
return pReader->pMemSchema;
}
static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
SIterInfo* pIter, int64_t key) {
SRowMerger merge = {0};
@ -1220,16 +1268,23 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
TSDBKEY k = TSDBROW_KEY(pRow);
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
SArray* pDelList = pBlockScanInfo->delSkyline;
bool freeTSRow = false;
uint64_t uid = pBlockScanInfo->uid;
// ascending order traverse
if (ASCENDING_TRAVERSE(pReader->order)) {
if (key < k.ts) {
tRowMergerInit(&merge, &fRow, pReader->pSchema);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
tRowMergerGetRow(&merge, &pTSRow);
// imem & mem are all empty, only file exist
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
return TSDB_CODE_SUCCESS;
} else {
tRowMergerInit(&merge, &fRow, pReader->pSchema);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
tRowMergerGetRow(&merge, &pTSRow);
freeTSRow = true;
}
} else if (k.ts < key) { // k.ts < key
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader);
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow);
} else { // k.ts == key, ascending order: file block ----> imem rows -----> mem rows
tRowMergerInit(&merge, &fRow, pReader->pSchema);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
@ -1238,32 +1293,41 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
tRowMergerGetRow(&merge, &pTSRow);
freeTSRow = true;
}
} else { // descending order scan
if (key < k.ts) {
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader);
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow);
} else if (k.ts < key) {
tRowMergerInit(&merge, &fRow, pReader->pSchema);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
tRowMergerGetRow(&merge, &pTSRow);
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
return TSDB_CODE_SUCCESS;
} else {
tRowMergerInit(&merge, &fRow, pReader->pSchema);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
tRowMergerGetRow(&merge, &pTSRow);
freeTSRow = true;
}
} else { // descending order: mem rows -----> imem rows ------> file block
updateSchema(pRow, pBlockScanInfo->uid, pReader);
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
tRowMergerInit(&merge, pRow, pReader->pSchema);
tRowMergerInit(&merge, pRow, pSchema);
doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
tRowMerge(&merge, &fRow);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
tRowMergerGetRow(&merge, &pTSRow);
freeTSRow = true;
}
}
tRowMergerClear(&merge);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
if (freeTSRow) {
taosMemoryFree(pTSRow);
}
taosMemoryFree(pTSRow);
return TSDB_CODE_SUCCESS;
}
@ -1280,12 +1344,12 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
ASSERT(pRow != NULL && piRow != NULL);
int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex];
bool freeTSRow = false;
uint64_t uid = pBlockScanInfo->uid;
TSDBKEY k = TSDBROW_KEY(pRow);
TSDBKEY ik = TSDBROW_KEY(piRow);
if (ASCENDING_TRAVERSE(pReader->order)) {
// [1&2] key <= [k.ts && ik.ts]
if (key <= k.ts && key <= ik.ts) {
@ -1305,7 +1369,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
}
tRowMergerGetRow(&merge, &pTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
return TSDB_CODE_SUCCESS;
} else { // key > ik.ts || key > k.ts
ASSERT(key != ik.ts);
@ -1313,16 +1377,22 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
// [3] ik.ts < key <= k.ts
// [4] ik.ts < k.ts <= key
if (ik.ts < k.ts) {
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
if (freeTSRow) {
taosMemoryFree(pTSRow);
}
return TSDB_CODE_SUCCESS;
}
// [5] k.ts < key <= ik.ts
// [6] k.ts < ik.ts <= key
if (k.ts < ik.ts) {
doMergeMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
doMergeMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader, &freeTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
if (freeTSRow) {
taosMemoryFree(pTSRow);
}
return TSDB_CODE_SUCCESS;
}
@ -1331,16 +1401,17 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
ASSERT(key > ik.ts && key > k.ts);
doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, &pTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
taosMemoryFree(pTSRow);
return TSDB_CODE_SUCCESS;
}
}
} else { // descending order scan
// [1/2] k.ts >= ik.ts && k.ts >= key
if (k.ts >= ik.ts && k.ts >= key) {
updateSchema(pRow, uid, pReader);
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
tRowMergerInit(&merge, pRow, pReader->pSchema);
tRowMergerInit(&merge, pRow, pSchema);
doMergeRowsInBuf(&pBlockScanInfo->iter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader);
if (ik.ts == k.ts) {
@ -1355,7 +1426,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
}
tRowMergerGetRow(&merge, &pTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
return TSDB_CODE_SUCCESS;
} else {
ASSERT(ik.ts != k.ts); // this case has been included in the previous if branch
@ -1363,8 +1434,11 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
// [3] ik.ts > k.ts >= Key
// [4] ik.ts > key >= k.ts
if (ik.ts > key) {
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
if (freeTSRow) {
taosMemoryFree(pTSRow);
}
return TSDB_CODE_SUCCESS;
}
@ -1376,19 +1450,22 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
tRowMergerGetRow(&merge, &pTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
taosMemoryFree(pTSRow);
return TSDB_CODE_SUCCESS;
}
//[7] key = ik.ts > k.ts
if (key == ik.ts) {
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader);
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
tRowMerge(&merge, &fRow);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
tRowMergerGetRow(&merge, &pTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
taosMemoryFree(pTSRow);
return TSDB_CODE_SUCCESS;
}
}
@ -1443,34 +1520,23 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI
}
// imem & mem are all empty, only file exist
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
return TSDB_CODE_SUCCESS;
} else {
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
// opt version
// 1. it is not a border point
// 2. the direct next point is not an duplicated timestamp
if ((pDumpInfo->rowIndex < pDumpInfo->totalRows - 1 && pReader->order == TSDB_ORDER_ASC) ||
(pDumpInfo->rowIndex > 0 && pReader->order == TSDB_ORDER_DESC)) {
int32_t step = pReader->order == TSDB_ORDER_ASC ? 1 : -1;
int64_t nextKey = pBlockData->aTSKEY[pDumpInfo->rowIndex + step];
if (nextKey != key) { // merge is not needed
doAppendRowFromBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
pDumpInfo->rowIndex += step;
return TSDB_CODE_SUCCESS;
}
STSRow* pTSRow = NULL;
SRowMerger merge = {0};
tRowMergerInit(&merge, &fRow, pReader->pSchema);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
tRowMergerGetRow(&merge, &pTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
return TSDB_CODE_SUCCESS;
}
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
STSRow* pTSRow = NULL;
SRowMerger merge = {0};
tRowMergerInit(&merge, &fRow, pReader->pSchema);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
tRowMergerGetRow(&merge, &pTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
return TSDB_CODE_SUCCESS;
}
}
@ -2083,7 +2149,7 @@ TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pRea
}
TSDBROW* pRow = tsdbTbDataIterGet(pIter->iter);
TSDBKEY key = TSDBROW_KEY(pRow);
TSDBKEY key = {.ts = pRow->pTSRow->ts, .version = pRow->version};
if (outOfTimeWindow(key.ts, &pReader->window)) {
pIter->hasVal = false;
return NULL;
@ -2116,6 +2182,7 @@ TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pRea
}
}
int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
STsdbReader* pReader) {
while (1) {
@ -2136,22 +2203,8 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDe
break;
}
int32_t sversion = TSDBROW_SVERSION(pRow);
STSchema* pTSchema = NULL;
if (pReader->pSchema == NULL || sversion != pReader->pSchema->version) {
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pTSchema);
if (pReader->pSchema == NULL) {
pReader->pSchema = pTSchema;
}
} else {
pTSchema = pReader->pSchema;
}
STSchema* pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, uid);
tRowMergerAdd(pMerger, pRow, pTSchema);
if (pTSchema != pReader->pSchema) {
taosMemoryFree(pTSchema);
}
}
return TSDB_CODE_SUCCESS;
@ -2259,42 +2312,50 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
return TSDB_CODE_SUCCESS;
}
void updateSchema(TSDBROW* pRow, uint64_t uid, STsdbReader* pReader) {
int32_t sversion = TSDBROW_SVERSION(pRow);
if (pReader->pSchema == NULL) {
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pSchema);
} else if (pReader->pSchema->version != sversion) {
taosMemoryFreeClear(pReader->pSchema);
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pSchema);
}
}
void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
STsdbReader* pReader) {
STsdbReader* pReader, bool* freeTSRow) {
TSDBROW* pNextRow = NULL;
TSDBROW current = *pRow;
{ // if the timestamp of the next valid row has a different ts, return current row directly
pIter->hasVal = tsdbTbDataIterNext(pIter->iter);
if (!pIter->hasVal) {
*pTSRow = current.pTSRow;
*freeTSRow = false;
return;
} else { // has next point in mem/imem
pNextRow = getValidRow(pIter, pDelList, pReader);
if (pNextRow == NULL) {
*pTSRow = current.pTSRow;
*freeTSRow = false;
return;
}
if (current.pTSRow->ts != pNextRow->pTSRow->ts) {
*pTSRow = current.pTSRow;
*freeTSRow = false;
return;
}
}
}
SRowMerger merge = {0};
TSDBKEY k = TSDBROW_KEY(pRow);
// updateSchema(pRow, uid, pReader);
int32_t sversion = TSDBROW_SVERSION(pRow);
STSchema* pTSchema = NULL;
if (pReader->pSchema == NULL || sversion != pReader->pSchema->version) {
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pTSchema);
if (pReader->pSchema == NULL) {
pReader->pSchema = pTSchema;
}
} else {
pTSchema = pReader->pSchema;
}
// get the correct schema for data in memory
STSchema* pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(&current), pReader, uid);
tRowMergerInit2(&merge, pReader->pSchema, pRow, pTSchema);
doMergeRowsInBuf(pIter, uid, k.ts, pDelList, &merge, pReader);
tRowMergerInit2(&merge, pReader->pSchema, &current, pTSchema);
STSchema* pTSchema1 = doGetSchemaForTSRow(TSDBROW_SVERSION(pNextRow), pReader, uid);
tRowMergerAdd(&merge, pNextRow, pTSchema1);
doMergeRowsInBuf(pIter, uid, current.pTSRow->ts, pDelList, &merge, pReader);
tRowMergerGetRow(&merge, pTSRow);
tRowMergerClear(&merge);
if (pTSchema != pReader->pSchema) {
taosMemoryFree(pTSchema);
}
*freeTSRow = true;
}
void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
@ -2305,17 +2366,17 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo
TSDBKEY ik = TSDBROW_KEY(piRow);
if (ASCENDING_TRAVERSE(pReader->order)) { // ascending order imem --> mem
updateSchema(piRow, pBlockScanInfo->uid, pReader);
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
tRowMergerInit(&merge, piRow, pReader->pSchema);
tRowMergerInit(&merge, piRow, pSchema);
doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge, pReader);
tRowMerge(&merge, pRow);
doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
} else {
updateSchema(pRow, pBlockScanInfo->uid, pReader);
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
tRowMergerInit(&merge, pRow, pReader->pSchema);
tRowMergerInit(&merge, pRow, pSchema);
doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
tRowMerge(&merge, piRow);
@ -2326,7 +2387,7 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo
}
int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow,
int64_t endKey) {
int64_t endKey, bool* freeTSRow) {
TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
SArray* pDelList = pBlockScanInfo->delSkyline;
@ -2352,35 +2413,36 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR
TSDBKEY ik = TSDBROW_KEY(piRow);
if (ik.ts < k.ts) { // ik.ts < k.ts
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader);
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
} else if (k.ts < ik.ts) {
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader);
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
} else { // ik.ts == k.ts
doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, pTSRow);
*freeTSRow = true;
}
return TSDB_CODE_SUCCESS;
}
if (pBlockScanInfo->iter.hasVal && pRow != NULL) {
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader);
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
return TSDB_CODE_SUCCESS;
}
if (pBlockScanInfo->iiter.hasVal && piRow != NULL) {
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader);
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
return TSDB_CODE_SUCCESS;
}
return TSDB_CODE_SUCCESS;
}
int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow) {
int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid) {
int32_t numOfRows = pBlock->info.rows;
int32_t numOfCols = (int32_t)taosArrayGetSize(pBlock->pDataBlock);
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
STSchema* pSchema = pReader->pSchema;
STSchema* pSchema = doGetSchemaForTSRow(pTSRow->sver, pReader, uid);
SColVal colVal = {0};
int32_t i = 0, j = 0;
@ -2396,7 +2458,7 @@ int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow*
col_id_t colId = pColInfoData->info.colId;
if (colId == pSchema->columns[j].colId) {
tTSRowGetVal(pTSRow, pReader->pSchema, j, &colVal);
tTSRowGetVal(pTSRow, pSchema, j, &colVal);
doCopyColVal(pColInfoData, numOfRows, i, &colVal, pSupInfo);
i += 1;
j += 1;
@ -2466,13 +2528,16 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
do {
STSRow* pTSRow = NULL;
tsdbGetNextRowInMem(pBlockScanInfo, pReader, &pTSRow, endKey);
bool freeTSRow = false;
tsdbGetNextRowInMem(pBlockScanInfo, pReader, &pTSRow, endKey, &freeTSRow);
if (pTSRow == NULL) {
break;
}
doAppendRowFromTSRow(pBlock, pReader, pTSRow);
taosMemoryFree(pTSRow);
doAppendRowFromTSRow(pBlock, pReader, pTSRow, pBlockScanInfo->uid);
if (freeTSRow) {
taosMemoryFree(pTSRow);
}
// no data in buffer, return immediately
if (!(pBlockScanInfo->iter.hasVal || pBlockScanInfo->iiter.hasVal)) {
@ -2631,6 +2696,11 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
STsdbReader* pPrevReader = pReader->innerReader[0];
SDataBlockIter* pBlockIter = &pPrevReader->status.blockIter;
code = tsdbTakeReadSnap(pPrevReader->pTsdb, &pPrevReader->pReadSnap);
if (code != TSDB_CODE_SUCCESS) {
goto _err;
}
initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader->order,
pPrevReader->idStr);
resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order, pReader->status.pTableMap);
@ -2660,7 +2730,6 @@ void tsdbReaderClose(STsdbReader* pReader) {
}
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
tsdbUntakeReadSnap(pReader->pTsdb, pReader->pReadSnap);
taosMemoryFreeClear(pSupInfo->plist);
@ -2688,16 +2757,15 @@ void tsdbReaderClose(STsdbReader* pReader) {
SIOCostSummary* pCost = &pReader->cost;
tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64
" SMA-time:%.2f ms, "
"fileBlocks:%" PRId64
", fileBlocks-time:%.2f ms, build in-memory-block-time:%.2f ms, STableBlockScanInfo "
"size:%.2f Kb %s",
" SMA-time:%.2f ms, fileBlocks:%" PRId64 ", fileBlocks-time:%.2f ms, "
"build in-memory-block-time:%.2f ms, STableBlockScanInfo size:%.2f Kb %s",
pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaData, pCost->smaLoadTime,
pCost->numOfBlocks, pCost->blockLoadTime, pCost->buildmemBlock,
numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pReader->idStr);
taosMemoryFree(pReader->idStr);
taosMemoryFree(pReader->pSchema);
taosMemoryFree(pReader->pMemSchema);
taosMemoryFreeClear(pReader);
}

View File

@ -1520,22 +1520,22 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
break;
}
case TSDB_DATA_TYPE_FLOAT: {
pColAgg->sum += colVal.value.f;
if (pColAgg->min > colVal.value.f) {
pColAgg->min = colVal.value.f;
*(double*)(&pColAgg->sum) += colVal.value.f;
if (*(double*)(&pColAgg->min) > colVal.value.f) {
*(double*)(&pColAgg->min) = colVal.value.f;
}
if (pColAgg->max < colVal.value.f) {
pColAgg->max = colVal.value.f;
if (*(double*)(&pColAgg->max) < colVal.value.f) {
*(double*)(&pColAgg->max) = colVal.value.f;
}
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
pColAgg->sum += colVal.value.d;
if (pColAgg->min > colVal.value.d) {
pColAgg->min = colVal.value.d;
*(double*)(&pColAgg->sum) += colVal.value.d;
if (*(double*)(&pColAgg->min) > colVal.value.d) {
*(double*)(&pColAgg->min) = colVal.value.d;
}
if (pColAgg->max < colVal.value.d) {
pColAgg->max = colVal.value.d;
if (*(double*)(&pColAgg->max) < colVal.value.d) {
*(double*)(&pColAgg->max) = colVal.value.d;
}
break;
}

View File

@ -43,12 +43,14 @@ extern "C" {
#define EXPLAIN_GROUP_SORT_FORMAT "Group Sort"
#define EXPLAIN_INTERVAL_FORMAT "Interval on Column %s"
#define EXPLAIN_MERGE_INTERVAL_FORMAT "Merge Interval on Column %s"
#define EXPLAIN_MERGE_ALIGNED_INTERVAL_FORMAT "Merge Aligned Interval on Column %s"
#define EXPLAIN_FILL_FORMAT "Fill"
#define EXPLAIN_SESSION_FORMAT "Session"
#define EXPLAIN_STATE_WINDOW_FORMAT "StateWindow on Column %s"
#define EXPLAIN_PARITION_FORMAT "Partition on Column %s"
#define EXPLAIN_ORDER_FORMAT "Order: %s"
#define EXPLAIN_FILTER_FORMAT "Filter: "
#define EXPLAIN_MERGEBLOCKS_FORMAT "Merge ResBlocks: %s"
#define EXPLAIN_FILL_VALUE_FORMAT "Fill Values: "
#define EXPLAIN_ON_CONDITIONS_FORMAT "Join Cond: "
#define EXPLAIN_TIMERANGE_FORMAT "Time Range: [%" PRId64 ", %" PRId64 "]"
@ -56,9 +58,11 @@ extern "C" {
#define EXPLAIN_TIME_WINDOWS_FORMAT "Time Window: interval=%" PRId64 "%c offset=%" PRId64 "%c sliding=%" PRId64 "%c"
#define EXPLAIN_WINDOW_FORMAT "Window: gap=%" PRId64
#define EXPLAIN_RATIO_TIME_FORMAT "Ratio: %f"
#define EXPLAIN_MERGE_FORMAT "Merge"
#define EXPLAIN_MERGE_FORMAT "SortMerge"
#define EXPLAIN_MERGE_KEYS_FORMAT "Merge Key: "
#define EXPLAIN_IGNORE_GROUPID_FORMAT "Ignore Group Id: %s"
#define EXPLAIN_PARTITION_KETS_FORMAT "Partition Key: "
#define EXPLAIN_INTERP_FORMAT "Interp"
#define EXPLAIN_PLANNING_TIME_FORMAT "Planning Time: %.3f ms"
#define EXPLAIN_EXEC_TIME_FORMAT "Execution Time: %.3f ms"
@ -69,6 +73,7 @@ extern "C" {
#define EXPLAIN_LEFT_PARENTHESIS_FORMAT " ("
#define EXPLAIN_RIGHT_PARENTHESIS_FORMAT ")"
#define EXPLAIN_BLANK_FORMAT " "
#define EXPLAIN_COMMA_FORMAT ", "
#define EXPLAIN_COST_FORMAT "cost=%.2f..%.2f"
#define EXPLAIN_ROWS_FORMAT "rows=%" PRIu64
#define EXPLAIN_COLUMNS_FORMAT "columns=%d"
@ -86,6 +91,7 @@ extern "C" {
#define EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT "output_order=%s"
#define EXPLAIN_OFFSET_FORMAT "offset=%d"
#define EXPLAIN_SOFFSET_FORMAT "soffset=%d"
#define EXPLAIN_PARTITIONS_FORMAT "partitions=%d"
#define COMMAND_RESET_LOG "resetLog"
#define COMMAND_SCHEDULE_POLICY "schedulePolicy"

View File

@ -535,6 +535,13 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
if (NULL != pTblScanNode->pGroupTags) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_PARTITION_KETS_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_PARTITIONS_FORMAT, pTblScanNode->pGroupTags->length);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
if (pTblScanNode->scan.node.pConditions) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pTblScanNode->scan.node.pConditions, tbuf + VARSTR_HEADER_SIZE,
@ -617,13 +624,17 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGEBLOCKS_FORMAT, pPrjNode->mergeDataBlock? "True":"False");
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
if (pPrjNode->node.pConditions) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pPrjNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE,
TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
}
}
break;
}
@ -707,7 +718,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
}
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGEBLOCKS_FORMAT, pAggNode->mergeDataBlock? "True":"False");
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
break;
}
@ -906,13 +921,17 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
}
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGEBLOCKS_FORMAT, pIntNode->window.mergeDataBlock? "True":"False");
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL: {
SMergeAlignedIntervalPhysiNode *pIntNode = (SMergeAlignedIntervalPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_INTERVAL_FORMAT, nodesGetNameFromColumnNode(pIntNode->window.pTspk));
EXPLAIN_ROW_NEW(level, EXPLAIN_MERGE_ALIGNED_INTERVAL_FORMAT, nodesGetNameFromColumnNode(pIntNode->window.pTspk));
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
@ -950,7 +969,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
}
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGEBLOCKS_FORMAT, pIntNode->window.mergeDataBlock? "True":"False");
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
break;
}
@ -1122,6 +1145,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_PARTITION_KETS_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_PARTITIONS_FORMAT, pPartNode->pPartitionKeys->length);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
if (pPartNode->node.pConditions) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pPartNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE,
@ -1194,11 +1222,20 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGE_KEYS_FORMAT);
if (pMergeNode->groupSort) {
EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, "_group_id asc");
if (LIST_LENGTH(pMergeNode->pMergeKeys) > 0) {
EXPLAIN_ROW_APPEND(EXPLAIN_COMMA_FORMAT);
}
}
for (int32_t i = 0; i < LIST_LENGTH(pMergeNode->pMergeKeys); ++i) {
SOrderByExprNode *ptn = (SOrderByExprNode *)nodesListGetNode(pMergeNode->pMergeKeys, i);
EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, nodesGetNameFromColumnNode(ptn->pExpr));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, EXPLAIN_ORDER_STRING(ptn->order));
if (i != LIST_LENGTH(pMergeNode->pMergeKeys) - 1) {
EXPLAIN_ROW_APPEND(EXPLAIN_COMMA_FORMAT);
}
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
@ -1410,7 +1447,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
}
case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: {
SInterpFuncPhysiNode *pInterpNode = (SInterpFuncPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_AGG_FORMAT);
EXPLAIN_ROW_NEW(level, EXPLAIN_INTERP_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));

View File

@ -266,7 +266,11 @@ typedef struct SExchangeInfo {
SArray* pSourceDataInfo;
tsem_t ready;
void* pTransporter;
SSDataBlock* pResult;
// SArray<SSDataBlock*>, result block list, used to keep the multi-block that
// passed by downstream operator
SArray* pResultBlockList;
int32_t rspBlockIndex; // indicate the return block index in pResultBlockList
SSDataBlock* pDummyBlock; // dummy block, not keep data
bool seqLoadData; // sequential load data or not, false by default
int32_t current;
SLoadRemoteDataInfo loadInfo;
@ -520,6 +524,7 @@ typedef struct SBlockDistInfo {
typedef struct SOptrBasicInfo {
SResultRowInfo resultRowInfo;
SSDataBlock* pRes;
bool mergeResultBlock;
} SOptrBasicInfo;
typedef struct SIntervalAggOperatorInfo {
@ -554,8 +559,6 @@ typedef struct SMergeAlignedIntervalAggOperatorInfo {
uint64_t groupId; // current groupId
int64_t curTs; // current ts
SSDataBlock* prefetchedBlock;
bool inputBlocksFinished;
SNode* pCondition;
} SMergeAlignedIntervalAggOperatorInfo;
@ -594,7 +597,6 @@ typedef struct SAggOperatorInfo {
uint64_t groupId;
SGroupResInfo groupResInfo;
SExprSupp scalarExprSup;
SNode *pCondition;
} SAggOperatorInfo;
@ -860,9 +862,10 @@ void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLi
void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset,
int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput, int32_t order);
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData,
int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total,
SArray* pColList);
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, int32_t numOfOutput, SArray* pColList, char** pNextStart);
void updateLoadRemoteInfo(SLoadRemoteDataInfo *pInfo, int32_t numOfRows, int32_t dataLen, int64_t startTs,
SOperatorInfo* pOperator);
STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order);
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag);
@ -897,7 +900,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi
SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode *pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SNode* pCondition, SExprInfo* pScalarExprInfo,
int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo);
int32_t numOfScalarExpr, bool mergeResult, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode *pNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo);
@ -912,14 +915,14 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
SExecTaskInfo* pTaskInfo);
bool mergeResultBlock, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
SNode* pCondition, SExecTaskInfo* pTaskInfo);
SNode* pCondition, bool mergeResultBlocks, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild);
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
SExecTaskInfo* pTaskInfo, int32_t numOfChild);
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo);

View File

@ -50,7 +50,7 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
STableListInfo* pTableList = &pTaskInfo->tableqinfoList;
initResultSizeInfo(&pOperator->resultInfo, 1024);
initResultSizeInfo(&pOperator->resultInfo, 4096);
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
pInfo->pUidList = taosArrayInit(4, sizeof(int64_t));

View File

@ -69,10 +69,10 @@ static bool needCompress(const SSDataBlock* pData, int32_t numOfCols) {
// clang-format off
// data format:
// +----------------+--------------+-----------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
// |SDataCacheEntry | total length | group id | col1_schema | col2_schema | col3_schema... | column#1 length, column#2 length...| col1 bitmap | col1 data | col2 bitmap | col2 data | .... | | (4 bytes) |(8 bytes)
// | |sizeof(int32) |sizeof(uint64_t) |(sizeof(int16_t)+sizeof(int32_t))*numOfCols | sizeof(int32_t) * numOfCols | actual size | |
// +----------------+--------------+-----------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
// +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
// |SDataCacheEntry | version | total length | numOfRows | group id | col1_schema | col2_schema | col3_schema... | column#1 length, column#2 length...| col1 bitmap | col1 data | col2 bitmap | col2 data | .... | | (4 bytes) |(8 bytes)
// | | sizeof(int32_t) |sizeof(int32) | sizeof(int32)| sizeof(uint64_t) | (sizeof(int8_t)+sizeof(int32_t))*numOfCols | sizeof(int32_t) * numOfCols | actual size | |
// +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
// The length of bitmap is decided by number of rows of this data block, and the length of each column data is
// recorded in the first segment, next to the struct header
// clang-format on

View File

@ -213,6 +213,10 @@ int32_t dataBlockToSubmit(SDataInserterHandle* pInserter, SSubmitReq** pReq) {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, pColumn->offset, k);
}
}
if(!fullCol) {
rb.hasNone = true;
}
tdSRowEnd(&rb);
if (ignoreRow) {
continue;

View File

@ -348,12 +348,14 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
taosThreadOnce(&initPoolOnce, initRefPool);
atexit(cleanupRefPool);
qDebug("start to create subplan task, TID:0x%"PRIx64 " QID:0x%"PRIx64, taskId, pSubplan->id.queryId);
int32_t code = createExecTaskInfoImpl(pSubplan, pTask, readHandle, taskId, sql, model);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
SDataSinkMgtCfg cfg = {.maxDataBlockNum = 1000, .maxDataBlockNumPerQuery = 100};
SDataSinkMgtCfg cfg = {.maxDataBlockNum = 10000, .maxDataBlockNumPerQuery = 5000};
code = dsDataSinkMgtInit(&cfg);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
@ -372,6 +374,8 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
}
}
qDebug("subplan task create completed, TID:0x%"PRIx64 " QID:0x%"PRIx64, taskId, pSubplan->id.queryId);
_error:
// if failed to add ref for all tables in this query, abort current query
return code;
@ -422,6 +426,80 @@ int waitMoment(SQInfo* pQInfo) {
}
#endif
static void freeBlock(void* param) {
SSDataBlock* pBlock = *(SSDataBlock**) param;
blockDataDestroy(pBlock);
}
int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
int64_t threadId = taosGetSelfPthreadId();
taosArrayClearEx(pResList, freeBlock);
int64_t curOwner = 0;
if ((curOwner = atomic_val_compare_exchange_64(&pTaskInfo->owner, 0, threadId)) != 0) {
qError("%s-%p execTask is now executed by thread:%p", GET_TASKID(pTaskInfo), pTaskInfo, (void*)curOwner);
pTaskInfo->code = TSDB_CODE_QRY_IN_EXEC;
return pTaskInfo->code;
}
if (pTaskInfo->cost.start == 0) {
pTaskInfo->cost.start = taosGetTimestampMs();
}
if (isTaskKilled(pTaskInfo)) {
atomic_store_64(&pTaskInfo->owner, 0);
qDebug("%s already killed, abort", GET_TASKID(pTaskInfo));
return TSDB_CODE_SUCCESS;
}
// error occurs, record the error code and return to client
int32_t ret = setjmp(pTaskInfo->env);
if (ret != TSDB_CODE_SUCCESS) {
pTaskInfo->code = ret;
cleanUpUdfs();
qDebug("%s task abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo), tstrerror(pTaskInfo->code));
atomic_store_64(&pTaskInfo->owner, 0);
return pTaskInfo->code;
}
qDebug("%s execTask is launched", GET_TASKID(pTaskInfo));
int32_t current = 0;
SSDataBlock* pRes = NULL;
int64_t st = taosGetTimestampUs();
while((pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot)) != NULL) {
SSDataBlock* p = createOneDataBlock(pRes, true);
current += p->info.rows;
ASSERT(p->info.rows > 0);
taosArrayPush(pResList, &p);
if (current >= 4096) {
break;
}
}
uint64_t el = (taosGetTimestampUs() - st);
pTaskInfo->cost.elapsedTime += el;
if (NULL == pRes) {
*useconds = pTaskInfo->cost.elapsedTime;
}
cleanUpUdfs();
uint64_t total = pTaskInfo->pRoot->resultInfo.totalRows;
qDebug("%s task suspended, %d rows in %d blocks returned, total:%" PRId64 " rows, in sinkNode:%d, elapsed:%.2f ms",
GET_TASKID(pTaskInfo), current, (int32_t) taosArrayGetSize(pResList), total, 0, el / 1000.0);
atomic_store_64(&pTaskInfo->owner, 0);
return pTaskInfo->code;
}
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
int64_t threadId = taosGetSelfPthreadId();

View File

@ -88,7 +88,7 @@ static int32_t getExprFunctionId(SExprInfo* pExprInfo) {
static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes);
static void setBlockStatisInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pSDataBlock);
static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pSDataBlock);
static void releaseQueryBuf(size_t numOfTables);
@ -136,9 +136,8 @@ SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn,
void operatorDummyCloseFn(void* param, int32_t numOfCols) {}
static int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo,
SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo, const int32_t* rowCellOffset,
SqlFunctionCtx* pCtx, int32_t numOfExprs);
static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup, SDiskbasedBuf* pBuf,
SGroupResInfo* pGroupResInfo);
static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size);
static void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId);
@ -434,7 +433,8 @@ static void doSetInputDataBlockInfo(SOperatorInfo* pOperator, SqlFunctionCtx* pC
for (int32_t i = 0; i < pOperator->exprSupp.numOfExprs; ++i) {
pCtx[i].order = order;
pCtx[i].input.numOfRows = pBlock->info.rows;
setBlockStatisInfo(&pCtx[i], &pOperator->exprSupp.pExprInfo[i], pBlock);
setBlockSMAInfo(&pCtx[i], &pOperator->exprSupp.pExprInfo[i], pBlock);
pCtx[i].pSrcBlock = pBlock;
}
}
@ -799,7 +799,7 @@ static int32_t doCreateConstantValColumnAggInfo(SInputColumnInfoData* pInput, SF
return TSDB_CODE_SUCCESS;
}
void setBlockStatisInfo(SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, SSDataBlock* pBlock) {
void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, SSDataBlock* pBlock) {
int32_t numOfRows = pBlock->info.rows;
SInputColumnInfoData* pInput = &pCtx->input;
@ -1012,16 +1012,6 @@ static int32_t updateBlockLoadStatus(STaskAttr* pQuery, int32_t status) {
// }
//}
// static FORCE_INLINE bool doFilterByBlockStatistics(STaskRuntimeEnv* pRuntimeEnv, SDataStatis *pDataStatis,
// SqlFunctionCtx *pCtx, int32_t numOfRows) {
// STaskAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
//
// if (pDataStatis == NULL || pQueryAttr->pFilters == NULL) {
// return true;
// }
//
// return filterRangeExecute(pQueryAttr->pFilters, pDataStatis, pQueryAttr->numOfCols, numOfRows);
// }
#if 0
static bool overlapWithTimeWindow(STaskAttr* pQueryAttr, SDataBlockInfo* pBlockInfo) {
STimeWindow w = {0};
@ -1216,7 +1206,7 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc
}
// current block has been discard due to filter applied
// if (!doFilterByBlockStatistics(pRuntimeEnv, pBlock->pBlockAgg, pTableScanInfo->pCtx, pBlockInfo->rows)) {
// if (!doFilterByBlockSMA(pRuntimeEnv, pBlock->pBlockAgg, pTableScanInfo->pCtx, pBlockInfo->rows)) {
// pCost->skipBlocks += 1;
// qDebug("QInfo:0x%"PRIx64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey,
// pBlockInfo->window.ekey, pBlockInfo->rows);
@ -1512,19 +1502,24 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
return 0;
}
int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf,
SGroupResInfo* pGroupResInfo, const int32_t* rowCellOffset, SqlFunctionCtx* pCtx,
int32_t numOfExprs) {
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
int32_t start = pGroupResInfo->index;
int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup, SDiskbasedBuf* pBuf,
SGroupResInfo* pGroupResInfo) {
SExprInfo* pExprInfo = pSup->pExprInfo;
int32_t numOfExprs = pSup->numOfExprs;
int32_t* rowEntryOffset = pSup->rowEntryInfoOffset;
SqlFunctionCtx* pCtx = pSup->pCtx;
for (int32_t i = start; i < numOfRows; i += 1) {
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
for (int32_t i = pGroupResInfo->index; i < numOfRows; i += 1) {
SResKeyPos* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
SFilePage* page = getBufPage(pBuf, pPos->pos.pageId);
SResultRow* pRow = (SResultRow*)((char*)page + pPos->pos.offset);
doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowCellOffset);
doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowEntryOffset);
// no results, continue to check the next one
if (pRow->numOfRows == 0) {
pGroupResInfo->index += 1;
releaseBufPage(pBuf, page);
@ -1542,6 +1537,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
}
if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
ASSERT(pBlock->info.rows > 0);
releaseBufPage(pBuf, page);
break;
}
@ -1551,7 +1547,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
for (int32_t j = 0; j < numOfExprs; ++j) {
int32_t slotId = pExprInfo[j].base.resSchema.slotId;
pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowCellOffset);
pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowEntryOffset);
if (pCtx[j].fpSet.finalize) {
#ifdef BUF_PAGE_DEBUG
qDebug("\npage_finalize %d", numOfExprs);
@ -1584,26 +1580,19 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
releaseBufPage(pBuf, page);
pBlock->info.rows += pRow->numOfRows;
// if (pBlock->info.rows >= pBlock->info.capacity) { // output buffer is full
// break;
// }
}
qDebug("%s result generated, rows:%d, groupId:%" PRIu64, GET_TASKID(pTaskInfo), pBlock->info.rows,
pBlock->info.groupId);
blockDataUpdateTsWindow(pBlock, 0);
return 0;
}
void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
SDiskbasedBuf* pBuf) {
SExprInfo* pExprInfo = pOperator->exprSupp.pExprInfo;
int32_t numOfExprs = pOperator->exprSupp.numOfExprs;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
int32_t* rowCellOffset = pOperator->exprSupp.rowEntryInfoOffset;
SSDataBlock* pBlock = pbInfo->pRes;
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
SSDataBlock* pBlock = pbInfo->pRes;
// set output datablock version
pBlock->info.version = pTaskInfo->version;
@ -1615,30 +1604,22 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG
// clear the existed group id
pBlock->info.groupId = 0;
doCopyToSDataBlock(pTaskInfo, pBlock, pExprInfo, pBuf, pGroupResInfo, rowCellOffset, pCtx, numOfExprs);
}
static void updateNumOfRowsInResultRows(SqlFunctionCtx* pCtx, int32_t numOfOutput, SResultRowInfo* pResultRowInfo,
int32_t* rowEntryInfoOffset) {
// update the number of result for each, only update the number of rows for the corresponding window result.
// if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) {
// return;
// }
#if 0
for (int32_t i = 0; i < pResultRowInfo->size; ++i) {
SResultRow* pResult = pResultRowInfo->pResult[i];
for (int32_t j = 0; j < numOfOutput; ++j) {
int32_t functionId = pCtx[j].functionId;
if (functionId == FUNCTION_TS || functionId == FUNCTION_TAG || functionId == FUNCTION_TAGPRJ) {
continue;
if (!pbInfo->mergeResultBlock) {
doCopyToSDataBlock(pTaskInfo, pBlock, &pOperator->exprSupp, pBuf, pGroupResInfo);
} else {
while(hasRemainResults(pGroupResInfo)) {
doCopyToSDataBlock(pTaskInfo, pBlock, &pOperator->exprSupp, pBuf, pGroupResInfo);
if (pBlock->info.rows >= pOperator->resultInfo.threshold) {
break;
}
SResultRowEntryInfo* pCell = getResultEntryInfo(pResult, j, rowEntryInfoOffset);
pResult->numOfRows = (uint16_t)(TMAX(pResult->numOfRows, pCell->numOfRes));
// clearing group id to continue to merge data that belong to different groups
pBlock->info.groupId = 0;
}
// clear the group id info in SSDataBlock, since the client does not need it
pBlock->info.groupId = 0;
}
#endif
}
static int32_t compressQueryColData(SColumnInfoData* pColRes, int32_t numOfRows, char* data, int8_t compressed) {
@ -1976,9 +1957,11 @@ int32_t loadRemoteDataCallback(void* param, SDataBuf* pMsg, int32_t code) {
pRsp->compLen = htonl(pRsp->compLen);
pRsp->numOfCols = htonl(pRsp->numOfCols);
pRsp->useconds = htobe64(pRsp->useconds);
pRsp->numOfBlocks = htonl(pRsp->numOfBlocks);
ASSERT(pRsp != NULL);
qDebug("%s fetch rsp received, index:%d, rows:%d", pSourceDataInfo->taskId, index, pRsp->numOfRows);
qDebug("%s fetch rsp received, index:%d, blocks:%d, rows:%d", pSourceDataInfo->taskId, index, pRsp->numOfBlocks,
pRsp->numOfRows);
} else {
pSourceDataInfo->code = code;
qDebug("%s fetch rsp received, index:%d, error:%d", pSourceDataInfo->taskId, index, tstrerror(code));
@ -2062,13 +2045,19 @@ static int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInf
return TSDB_CODE_SUCCESS;
}
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData,
int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total,
SArray* pColList) {
void updateLoadRemoteInfo(SLoadRemoteDataInfo* pInfo, int32_t numOfRows, int32_t dataLen, int64_t startTs,
SOperatorInfo* pOperator) {
pInfo->totalRows += numOfRows;
pInfo->totalSize += dataLen;
pInfo->totalElapsed += (taosGetTimestampUs() - startTs);
pOperator->resultInfo.totalRows += numOfRows;
}
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, int32_t numOfOutput, SArray* pColList,
char** pNextStart) {
if (pColList == NULL) { // data from other sources
blockDataCleanup(pRes);
// blockDataEnsureCapacity(pRes, numOfRows);
blockDecode(pRes, numOfOutput, numOfRows, pData);
*pNextStart = (char*) blockDecode(pRes, pData);
} else { // extract data according to pColList
ASSERT(numOfOutput == taosArrayGetSize(pColList));
char* pStart = pData;
@ -2092,28 +2081,17 @@ int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLo
blockDataAppendColInfo(pBlock, &idata);
}
blockDecode(pBlock, numOfCols, numOfRows, pStart);
blockDataEnsureCapacity(pRes, numOfRows);
blockDecode(pBlock, pStart);
blockDataEnsureCapacity(pRes, pBlock->info.rows);
// data from mnode
pRes->info.rows = numOfRows;
pRes->info.rows = pBlock->info.rows;
relocateColumnData(pRes, pColList, pBlock->pDataBlock, false);
blockDataDestroy(pBlock);
}
// todo move this to time window aggregator, since the primary timestamp may not be known by exchange operator.
blockDataUpdateTsWindow(pRes, 0);
int64_t el = taosGetTimestampUs() - startTs;
pLoadInfo->totalRows += numOfRows;
pLoadInfo->totalSize += compLen;
if (total != NULL) {
*total += numOfRows;
}
pLoadInfo->totalElapsed += el;
return TSDB_CODE_SUCCESS;
}
@ -2135,8 +2113,8 @@ static void* setAllSourcesCompleted(SOperatorInfo* pOperator, int64_t startTs) {
return NULL;
}
static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeInfo* pExchangeInfo,
SExecTaskInfo* pTaskInfo) {
static void concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeInfo* pExchangeInfo,
SExecTaskInfo* pTaskInfo) {
int32_t code = 0;
int64_t startTs = taosGetTimestampUs();
size_t totalSources = taosArrayGetSize(pExchangeInfo->pSources);
@ -2162,7 +2140,6 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
SRetrieveTableRsp* pRsp = pDataInfo->pRsp;
SDownstreamSourceNode* pSource = taosArrayGet(pExchangeInfo->pSources, i);
SSDataBlock* pRes = pExchangeInfo->pResult;
SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo;
if (pRsp->numOfRows == 0) {
qDebug("%s vgId:%d, taskId:0x%" PRIx64 " execId:%d index:%d completed, rowsOfSource:%" PRIu64
@ -2175,29 +2152,35 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
continue;
}
SRetrieveTableRsp* pTableRsp = pDataInfo->pRsp;
code =
extractDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data,
pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL);
if (code != 0) {
taosMemoryFreeClear(pDataInfo->pRsp);
goto _error;
SRetrieveTableRsp* pRetrieveRsp = pDataInfo->pRsp;
int32_t index = 0;
char* pStart = pRetrieveRsp->data;
while(index++ < pRetrieveRsp->numOfBlocks) {
SSDataBlock* pb = createOneDataBlock(pExchangeInfo->pDummyBlock, false);
code = extractDataBlockFromFetchRsp(pb, pStart, pRetrieveRsp->numOfCols, NULL, &pStart);
if (code != 0) {
taosMemoryFreeClear(pDataInfo->pRsp);
goto _error;
}
taosArrayPush(pExchangeInfo->pResultBlockList, &pb);
}
updateLoadRemoteInfo(pLoadInfo, pRetrieveRsp->numOfRows, pRetrieveRsp->compLen, startTs, pOperator);
if (pRsp->completed == 1) {
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64
" execId:%d"
" index:%d completed, numOfRows:%d, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 ", totalBytes:%" PRIu64
", completed:%d try next %d/%" PRIzu,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pRes->info.rows,
pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize, completed + 1, i + 1, totalSources);
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d"
" index:%d completed, blocks:%d, numOfRows:%d, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 ", total:%.2f Kb,"
" completed:%d try next %d/%" PRIzu,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pRsp->numOfBlocks,
pRsp->numOfRows, pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize / 1024.0,
completed + 1, i + 1, totalSources);
completed += 1;
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
} else {
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, totalRows:%" PRIu64
", totalBytes:%" PRIu64,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows,
pLoadInfo->totalRows, pLoadInfo->totalSize);
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d blocks:%d, numOfRows:%d, totalRows:%" PRIu64
", total:%.2f Kb", GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId,
pRsp->numOfBlocks, pRsp->numOfRows, pLoadInfo->totalRows, pLoadInfo->totalSize/1024.0);
}
taosMemoryFreeClear(pDataInfo->pRsp);
@ -2211,11 +2194,12 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
}
}
return pExchangeInfo->pResult;
return;
}
if (completed == totalSources) {
return setAllSourcesCompleted(pOperator, startTs);
setAllSourcesCompleted(pOperator, startTs);
return;
}
sched_yield();
@ -2223,7 +2207,6 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
_error:
pTaskInfo->code = code;
return NULL;
}
static int32_t prepareConcurrentlyLoad(SOperatorInfo* pOperator) {
@ -2253,7 +2236,7 @@ static int32_t prepareConcurrentlyLoad(SOperatorInfo* pOperator) {
return TSDB_CODE_SUCCESS;
}
static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
static int32_t seqLoadRemoteData(SOperatorInfo* pOperator) {
SExchangeInfo* pExchangeInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@ -2262,7 +2245,8 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
while (1) {
if (pExchangeInfo->current >= totalSources) {
return setAllSourcesCompleted(pOperator, startTs);
setAllSourcesCompleted(pOperator, startTs);
return TSDB_CODE_SUCCESS;
}
doSendFetchDataRequest(pExchangeInfo, pTaskInfo, pExchangeInfo->current);
@ -2275,7 +2259,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
qError("%s vgId:%d, taskID:0x%" PRIx64 " execId:%d error happens, code:%s", GET_TASKID(pTaskInfo),
pSource->addr.nodeId, pSource->taskId, pSource->execId, tstrerror(pDataInfo->code));
pOperator->pTaskInfo->code = pDataInfo->code;
return NULL;
return pOperator->pTaskInfo->code;
}
SRetrieveTableRsp* pRsp = pDataInfo->pRsp;
@ -2292,16 +2276,15 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
continue;
}
SSDataBlock* pRes = pExchangeInfo->pResult;
SRetrieveTableRsp* pTableRsp = pDataInfo->pRsp;
int32_t code =
extractDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data,
pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL);
SRetrieveTableRsp* pRetrieveRsp = pDataInfo->pRsp;
char* pStart = pRetrieveRsp->data;
int32_t code = extractDataBlockFromFetchRsp(NULL, pStart, pRetrieveRsp->numOfCols, NULL, &pStart);
if (pRsp->completed == 1) {
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, rowsOfSource:%" PRIu64
", totalRows:%" PRIu64 ", totalBytes:%" PRIu64 " try next %d/%" PRIzu,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRetrieveRsp->numOfRows,
pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize, pExchangeInfo->current + 1,
totalSources);
@ -2310,13 +2293,15 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
} else {
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, totalRows:%" PRIu64
", totalBytes:%" PRIu64,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRetrieveRsp->numOfRows,
pLoadInfo->totalRows, pLoadInfo->totalSize);
}
pOperator->resultInfo.totalRows += pRes->info.rows;
updateLoadRemoteInfo(pLoadInfo, pRetrieveRsp->numOfRows, pRetrieveRsp->compLen, startTs, pOperator);
pDataInfo->totalRows += pRetrieveRsp->numOfRows;
taosMemoryFreeClear(pDataInfo->pRsp);
return pExchangeInfo->pResult;
return TSDB_CODE_SUCCESS;
}
}
@ -2340,6 +2325,11 @@ static int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) {
return TSDB_CODE_SUCCESS;
}
static void freeBlock(void* pParam) {
SSDataBlock* pBlock = *(SSDataBlock**)pParam;
blockDataDestroy(pBlock);
}
static SSDataBlock* doLoadRemoteDataImpl(SOperatorInfo* pOperator) {
SExchangeInfo* pExchangeInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@ -2349,9 +2339,9 @@ static SSDataBlock* doLoadRemoteDataImpl(SOperatorInfo* pOperator) {
return NULL;
}
size_t totalSources = taosArrayGetSize(pExchangeInfo->pSources);
SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo;
size_t totalSources = taosArrayGetSize(pExchangeInfo->pSources);
SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo;
if (pOperator->status == OP_EXEC_DONE) {
qDebug("%s all %" PRIzu " source(s) are exhausted, total rows:%" PRIu64 " bytes:%" PRIu64 ", elapsed:%.2f ms",
GET_TASKID(pTaskInfo), totalSources, pLoadInfo->totalRows, pLoadInfo->totalSize,
@ -2359,11 +2349,23 @@ static SSDataBlock* doLoadRemoteDataImpl(SOperatorInfo* pOperator) {
return NULL;
}
if (pExchangeInfo->seqLoadData) {
return seqLoadRemoteData(pOperator);
} else {
return concurrentlyLoadRemoteDataImpl(pOperator, pExchangeInfo, pTaskInfo);
size_t size = taosArrayGetSize(pExchangeInfo->pResultBlockList);
if (size == 0 || pExchangeInfo->rspBlockIndex >= size) {
pExchangeInfo->rspBlockIndex = 0;
taosArrayClearEx(pExchangeInfo->pResultBlockList, freeBlock);
if (pExchangeInfo->seqLoadData) {
seqLoadRemoteData(pOperator);
} else {
concurrentlyLoadRemoteDataImpl(pOperator, pExchangeInfo, pTaskInfo);
}
if (taosArrayGetSize(pExchangeInfo->pResultBlockList) == 0) {
return NULL;
}
}
// we have buffered retrieved datablock, return it directly
return taosArrayGetP(pExchangeInfo->pResultBlockList, pExchangeInfo->rspBlockIndex++);
}
static SSDataBlock* doLoadRemoteData(SOperatorInfo* pOperator) {
@ -2380,26 +2382,24 @@ static SSDataBlock* doLoadRemoteData(SOperatorInfo* pOperator) {
return NULL;
}
ASSERT(pBlock == pExchangeInfo->pResult);
SLimitInfo* pLimitInfo = &pExchangeInfo->limitInfo;
if (hasLimitOffsetInfo(pLimitInfo)) {
int32_t status = handleLimitOffset(pOperator, pLimitInfo, pExchangeInfo->pResult, false);
int32_t status = handleLimitOffset(pOperator, pLimitInfo, pBlock, false);
if (status == PROJECT_RETRIEVE_CONTINUE) {
continue;
} else if (status == PROJECT_RETRIEVE_DONE) {
size_t rows = pExchangeInfo->pResult->info.rows;
size_t rows = pBlock->info.rows;
pExchangeInfo->limitInfo.numOfOutputRows += rows;
if (rows == 0) {
doSetOperatorCompleted(pOperator);
return NULL;
} else {
return pExchangeInfo->pResult;
return pBlock;
}
}
} else {
return pExchangeInfo->pResult;
return pBlock;
}
}
}
@ -2462,16 +2462,18 @@ SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode
}
tsem_init(&pInfo->ready, 0, 0);
pInfo->pDummyBlock = createResDataBlock(pExNode->node.pOutputDataBlockDesc);
pInfo->pResultBlockList = taosArrayInit(1, POINTER_BYTES);
pInfo->seqLoadData = false;
pInfo->pTransporter = pTransporter;
pInfo->pResult = createResDataBlock(pExNode->node.pOutputDataBlockDesc);
pOperator->name = "ExchangeOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_EXCHANGE;
pOperator->blocking = false;
pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo;
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pResult->pDataBlock);
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pDummyBlock->pDataBlock);
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(prepareLoadRemoteData, doLoadRemoteData, NULL, NULL,
@ -2990,6 +2992,7 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
break;
}
}
size_t rows = blockDataGetNumOfRows(pInfo->pRes);
pOperator->resultInfo.totalRows += rows;
@ -3498,17 +3501,16 @@ void cleanupExprSupp(SExprSupp* pSupp) {
SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResultBlock, SNode* pCondition, SExprInfo* pScalarExprInfo,
int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo) {
int32_t numOfScalarExpr, bool mergeResult, SExecTaskInfo* pTaskInfo) {
SAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SAggOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
int32_t numOfRows = 1024;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(&pOperator->resultInfo, numOfRows);
initResultSizeInfo(&pOperator->resultInfo, 4096);
int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
@ -3520,6 +3522,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo*
goto _error;
}
pInfo->binfo.mergeResultBlock = mergeResult;
pInfo->groupId = UINT64_MAX;
pInfo->pCondition = pCondition;
pOperator->name = "TableAggregate";
@ -3601,12 +3604,15 @@ void doDestroyExchangeOperatorInfo(void* param) {
taosArrayDestroy(pExInfo->pSources);
taosArrayDestroy(pExInfo->pSourceDataInfo);
if (pExInfo->pResult != NULL) {
pExInfo->pResult = blockDataDestroy(pExInfo->pResult);
if (pExInfo->pResultBlockList != NULL) {
taosArrayDestroyEx(pExInfo->pResultBlockList, freeBlock);
pExInfo->pResultBlockList = NULL;
}
tsem_destroy(&pExInfo->ready);
blockDataDestroy(pExInfo->pDummyBlock);
tsem_destroy(&pExInfo->ready);
taosMemoryFreeClear(param);
}
@ -4071,7 +4077,11 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
} else {
ASSERT(0);
}
pOperator->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
if (pOperator != NULL) {
pOperator->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
}
return pOperator;
}
@ -4110,7 +4120,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pScalarExprInfo, numOfScalarExpr, pTaskInfo);
} else {
pOptr = createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pAggNode->node.pConditions,
pScalarExprInfo, numOfScalarExpr, pTaskInfo);
pScalarExprInfo, numOfScalarExpr, pAggNode->mergeDataBlock, pTaskInfo);
}
} else if (QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL == type || QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) {
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
@ -4152,7 +4162,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId,
pPhyNode->pConditions, pTaskInfo);
pPhyNode->pConditions, pIntervalPhyNode->window.mergeDataBlock, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL == type) {
SMergeIntervalPhysiNode* pIntervalPhyNode = (SMergeIntervalPhysiNode*)pPhyNode;
@ -4167,7 +4177,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
.precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
pOptr = createMergeIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, pTaskInfo);
pOptr = createMergeIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, pIntervalPhyNode->window.mergeDataBlock, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL == type) {
int32_t children = 0;
pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);

View File

@ -727,7 +727,7 @@ static void destroyPartitionOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFree(pInfo->columnOffset);
cleanupExprSupp(&pInfo->scalarSup);
destroyDiskbasedBuf(pInfo->pBuf);
taosMemoryFreeClear(param);
}

View File

@ -13,10 +13,11 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "os.h"
#include "executorimpl.h"
#include "filter.h"
#include "function.h"
#include "functionMgt.h"
#include "os.h"
#include "querynodes.h"
#include "systable.h"
#include "tname.h"
@ -227,6 +228,57 @@ static int32_t doDynamicPruneDataBlock(SOperatorInfo* pOperator, SDataBlockInfo*
return TSDB_CODE_SUCCESS;
}
static FORCE_INLINE bool doFilterByBlockSMA(const SNode* pFilterNode, SColumnDataAgg** pColsAgg, int32_t numOfCols,
int32_t numOfRows) {
if (pColsAgg == NULL || pFilterNode == NULL) {
return true;
}
SFilterInfo* filter = NULL;
// todo move to the initialization function
int32_t code = filterInitFromNode((SNode*)pFilterNode, &filter, 0);
bool keep = filterRangeExecute(filter, pColsAgg, numOfCols, numOfRows);
filterFreeInfo(filter);
return keep;
}
static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) {
bool allColumnsHaveAgg = true;
SColumnDataAgg** pColAgg = NULL;
int32_t code = tsdbRetrieveDatablockSMA(pTableScanInfo->dataReader, &pColAgg, &allColumnsHaveAgg);
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
}
if (!allColumnsHaveAgg) {
return false;
}
// if (allColumnsHaveAgg == true) {
int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
// todo create this buffer during creating operator
if (pBlock->pBlockAgg == NULL) {
pBlock->pBlockAgg = taosMemoryCalloc(numOfCols, POINTER_BYTES);
if (pBlock->pBlockAgg == NULL) {
longjmp(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
}
}
for (int32_t i = 0; i < taosArrayGetSize(pTableScanInfo->pColMatchInfo); ++i) {
SColMatchInfo* pColMatchInfo = taosArrayGet(pTableScanInfo->pColMatchInfo, i);
if (!pColMatchInfo->output) {
continue;
}
pBlock->pBlockAgg[pColMatchInfo->targetSlotId] = pColAgg[i];
}
return true;
}
static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
uint32_t* status) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@ -236,6 +288,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
pCost->totalBlocks += 1;
pCost->totalRows += pBlock->info.rows;
bool loadSMA = false;
*status = pInfo->dataBlockLoadFlag;
if (pTableScanInfo->pFilterNode != NULL ||
@ -259,60 +312,50 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
return TSDB_CODE_SUCCESS;
} else if (*status == FUNC_DATA_REQUIRED_STATIS_LOAD) {
pCost->loadBlockStatis += 1;
bool allColumnsHaveAgg = true;
SColumnDataAgg** pColAgg = NULL;
int32_t code = tsdbRetrieveDatablockSMA(pTableScanInfo->dataReader, &pColAgg, &allColumnsHaveAgg);
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
}
if (allColumnsHaveAgg == true) {
int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
// todo create this buffer during creating operator
if (pBlock->pBlockAgg == NULL) {
pBlock->pBlockAgg = taosMemoryCalloc(numOfCols, POINTER_BYTES);
}
for (int32_t i = 0; i < taosArrayGetSize(pTableScanInfo->pColMatchInfo); ++i) {
SColMatchInfo* pColMatchInfo = taosArrayGet(pTableScanInfo->pColMatchInfo, i);
if (!pColMatchInfo->output) {
continue;
}
pBlock->pBlockAgg[pColMatchInfo->targetSlotId] = pColAgg[i];
}
loadSMA = true; // mark the operation of load sma;
bool success = doLoadBlockSMA(pTableScanInfo, pBlock, pTaskInfo);
if (success) { // failed to load the block sma data, data block statistics does not exist, load data block instead
qDebug("%s data block SMA loaded, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
return TSDB_CODE_SUCCESS;
} else { // failed to load the block sma data, data block statistics does not exist, load data block instead
} else {
*status = FUNC_DATA_REQUIRED_DATA_LOAD;
}
}
ASSERT(*status == FUNC_DATA_REQUIRED_DATA_LOAD);
// todo filter data block according to the block sma data firstly
// try to filter data block according to sma info
if (pTableScanInfo->pFilterNode != NULL) {
if (!loadSMA) {
doLoadBlockSMA(pTableScanInfo, pBlock, pTaskInfo);
}
bool keep = doFilterByBlockSMA(pTableScanInfo->pFilterNode, pBlock->pBlockAgg, taosArrayGetSize(pBlock->pDataBlock),
pBlockInfo->rows);
if (!keep) {
qDebug("%s data block filter out by block SMA, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
pCost->filterOutBlocks += 1;
(*status) = FUNC_DATA_REQUIRED_FILTEROUT;
return TSDB_CODE_SUCCESS;
}
}
// free the sma info, since it should not be involved in later computing process.
taosMemoryFreeClear(pBlock->pBlockAgg);
// try to filter data block according to current results
doDynamicPruneDataBlock(pOperator, pBlockInfo, status);
if (*status == FUNC_DATA_REQUIRED_NOT_LOAD) {
qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
qDebug("%s data block skipped due to dynamic prune, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
pCost->skipBlocks += 1;
return TSDB_CODE_SUCCESS;
}
#if 0
if (!doFilterByBlockStatistics(pBlock->pBlockStatis, pTableScanInfo->pCtx, pBlockInfo->rows)) {
pCost->filterOutBlocks += 1;
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo), pBlockInfo->window.skey,
pBlockInfo->window.ekey, pBlockInfo->rows);
(*status) = FUNC_DATA_REQUIRED_FILTEROUT;
return TSDB_CODE_SUCCESS;
}
#endif
pCost->totalCheckedRows += pBlock->info.rows;
pCost->loadBlocks += 1;
@ -470,6 +513,11 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
pBlock->info = binfo;
ASSERT(binfo.uid != 0);
uint64_t* groupId = taosHashGet(pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
if (groupId) {
pBlock->info.groupId = *groupId;
}
uint32_t status = 0;
int32_t code = loadDataBlock(pOperator, pTableScanInfo, pBlock, &status);
// int32_t code = loadDataBlockOnDemand(pOperator->pRuntimeEnv, pTableScanInfo, pBlock, &status);
@ -482,11 +530,6 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
continue;
}
uint64_t* groupId = taosHashGet(pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
if (groupId) {
pBlock->info.groupId = *groupId;
}
pOperator->resultInfo.totalRows = pTableScanInfo->readRecorder.totalRows;
pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
@ -527,7 +570,7 @@ static SSDataBlock* doTableScanGroup(SOperatorInfo* pOperator) {
if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) {
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
pTableScanInfo->scanFlag = REPEAT_SCAN;
qDebug("%s start to repeat ascending order scan data blocks due to query func required", GET_TASKID(pTaskInfo));
qDebug("%s start to repeat ascending order scan data SELECT last_row(*),hostname from cpu group by hostname;blocks due to query func required", GET_TASKID(pTaskInfo));
// do prepare for the next round table scan operation
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
@ -539,10 +582,9 @@ static SSDataBlock* doTableScanGroup(SOperatorInfo* pOperator) {
if (pTableScanInfo->cond.order == TSDB_ORDER_ASC) {
prepareForDescendingScan(pTableScanInfo, pOperator->exprSupp.pCtx, 0);
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
qDebug("%s start to descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo));
}
qDebug("%s start to descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo));
while (pTableScanInfo->scanTimes < total) {
SSDataBlock* p = doTableScanImpl(pOperator);
if (p != NULL) {
@ -682,9 +724,6 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
}
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
// pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose
// pInfo->cond.order = TSDB_ORDER_DESC;
pInfo->pdInfo.interval = extractIntervalInfo(pTableScanNode);
pInfo->readHandle = *readHandle;
pInfo->sample.sampleRatio = pTableScanNode->ratio;
@ -2276,8 +2315,9 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
}
}
extractDataBlockFromFetchRsp(pInfo->pRes, &pInfo->loadInfo, pRsp->numOfRows, pRsp->data, pRsp->compLen,
pOperator->exprSupp.numOfExprs, startTs, NULL, pInfo->scanCols);
char* pStart = pRsp->data;
extractDataBlockFromFetchRsp(pInfo->pRes, pRsp->data, pOperator->exprSupp.numOfExprs, pInfo->scanCols, &pStart);
updateLoadRemoteInfo(&pInfo->loadInfo, pRsp->numOfRows, pRsp->compLen, startTs, pOperator);
// todo log the filter info
doFilterResult(pInfo);
@ -2625,11 +2665,17 @@ _error:
int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond,
const char* idStr) {
int64_t st = taosGetTimestampUs();
int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
int64_t st1 = taosGetTimestampUs();
qDebug("generate queried table list completed, elapsed time:%.2f ms %s", (st1-st)/1000.0, idStr);
if (taosArrayGetSize(pTableListInfo->pTableList) == 0) {
qDebug("no table qualified for query, %s" PRIx64, idStr);
return TSDB_CODE_SUCCESS;
@ -2641,6 +2687,9 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
return code;
}
int64_t st2 = taosGetTimestampUs();
qDebug("generate group id map completed, elapsed time:%.2f ms %s", (st2-st1)/1000.0, idStr);
return TSDB_CODE_SUCCESS;
}
@ -2731,7 +2780,7 @@ static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeSc
// todo filter data block according to the block sma data firstly
#if 0
if (!doFilterByBlockStatistics(pBlock->pBlockStatis, pTableScanInfo->pCtx, pBlockInfo->rows)) {
if (!doFilterByBlockSMA(pBlock->pBlockStatis, pTableScanInfo->pCtx, pBlockInfo->rows)) {
pCost->filterOutBlocks += 1;
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo), pBlockInfo->window.skey,
pBlockInfo->window.ekey, pBlockInfo->rows);

View File

@ -658,7 +658,7 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
blockDataDestroy(p);
qDebug("%s get sorted row blocks, rows:%d", GET_TASKID(pTaskInfo), pDataBlock->info.rows);
qDebug("%s get sorted block, groupId:%0x"PRIx64" rows:%d", GET_TASKID(pTaskInfo), pDataBlock->info.groupId, pDataBlock->info.rows);
return (pDataBlock->info.rows > 0) ? pDataBlock : NULL;
}

View File

@ -1781,6 +1781,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
pInfo->twAggSup = *pTwAggSupp;
pInfo->ignoreExpiredData = pPhyNode->window.igExpired;
pInfo->pCondition = pPhyNode->window.node.pConditions;
pInfo->binfo.mergeResultBlock = pPhyNode->window.mergeDataBlock;
if (pPhyNode->window.pExprs != NULL) {
int32_t numOfScalar = 0;
@ -2115,7 +2116,7 @@ static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlo
}
static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pBlock,
static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup,
SSDataBlock* pResBlock) {
int32_t rows = pResBlock->info.rows;
@ -2124,10 +2125,10 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
// output the result
for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
int32_t dstSlot = pExprInfo->base.resSchema.slotId;
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
int32_t dstSlot = pExprInfo->base.resSchema.slotId;
SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
//SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
switch (pSliceInfo->fillType) {
@ -2383,7 +2384,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
if (nextTs > pSliceInfo->current) {
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (pResBlock->info.rows >= pResBlock->info.capacity) {
@ -2422,7 +2423,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
if (nextTs > pSliceInfo->current) {
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (pResBlock->info.rows >= pResBlock->info.capacity) {
@ -2443,7 +2444,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
if (nextTs > pSliceInfo->current) {
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (pResBlock->info.rows >= pResBlock->info.capacity) {
@ -2467,7 +2468,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
doKeepNextRows(pSliceInfo, pBlock, i);
while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) {
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (pResBlock->info.rows >= pResBlock->info.capacity) {
@ -2501,7 +2502,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
if (nextTs > pSliceInfo->current) {
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (pResBlock->info.rows >= pResBlock->info.capacity) {
@ -2532,15 +2533,16 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
}
}
// check if need to interpolate after ts range
// except for fill(next), fill(linear)
while (pSliceInfo->current <= pSliceInfo->win.ekey && pSliceInfo->fillType != TSDB_FILL_NEXT && pSliceInfo->fillType != TSDB_FILL_LINEAR) {
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (pResBlock->info.rows >= pResBlock->info.capacity) {
break;
}
}
// check if need to interpolate after last datablock
// except for fill(next), fill(linear)
while (pSliceInfo->current <= pSliceInfo->win.ekey && pSliceInfo->fillType != TSDB_FILL_NEXT && pSliceInfo->fillType != TSDB_FILL_LINEAR) {
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (pResBlock->info.rows >= pResBlock->info.capacity) {
break;
}
}
@ -4857,20 +4859,25 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
int32_t numOfOutput = pSup->numOfExprs;
int64_t* tsCols = extractTsCol(pBlock, iaInfo);
uint64_t tableGroupId = pBlock->info.groupId;
TSKEY currTs = getStartTsKey(&pBlock->info.window, tsCols);
SResultRow* pResult = NULL;
TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols);
// there is an result exists
if (miaInfo->curTs != INT64_MIN) {
ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
if (currTs != miaInfo->curTs) {
if (ts != miaInfo->curTs) {
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs);
miaInfo->curTs = INT64_MIN;
miaInfo->curTs = ts;
}
} else {
miaInfo->curTs = ts;
ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
}
STimeWindow win = {0};
win.skey = currTs;
win.skey = miaInfo->curTs;
win.ekey =
taosTimeAdd(win.skey, iaInfo->interval.interval, iaInfo->interval.intervalUnit, iaInfo->interval.precision) - 1;
@ -4881,12 +4888,11 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
miaInfo->curTs = win.skey;
int32_t currPos = startPos;
STimeWindow currWin = win;
while (++currPos < pBlock->info.rows) {
if (tsCols[currPos] == currTs) {
if (tsCols[currPos] == miaInfo->curTs) {
continue;
}
@ -4894,11 +4900,10 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
doApplyFunctions(pTaskInfo, pSup->pCtx, &currWin, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos,
tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder);
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, currTs);
miaInfo->curTs = INT64_MIN;
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs);
miaInfo->curTs = tsCols[currPos];
currTs = tsCols[currPos];
currWin.skey = currTs;
currWin.skey = miaInfo->curTs;
currWin.ekey = taosTimeAdd(currWin.skey, iaInfo->interval.interval, iaInfo->interval.intervalUnit,
iaInfo->interval.precision) -
1;
@ -4916,83 +4921,97 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true);
doApplyFunctions(pTaskInfo, pSup->pCtx, &currWin, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos,
tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder);
if (currPos >= pBlock->info.rows) {
// we need to see next block if exists
} else {
ASSERT(0);
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, currTs);
miaInfo->curTs = INT64_MIN;
}
}
static SSDataBlock* doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SMergeAlignedIntervalAggOperatorInfo* miaInfo = pOperator->info;
SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
SExprSupp* pSup = &pOperator->exprSupp;
SSDataBlock* pRes = iaInfo->binfo.pRes;
blockDataCleanup(pRes);
blockDataEnsureCapacity(pRes, pOperator->resultInfo.capacity);
SOperatorInfo* downstream = pOperator->pDownstream[0];
int32_t scanFlag = MAIN_SCAN;
if (!miaInfo->inputBlocksFinished) {
SOperatorInfo* downstream = pOperator->pDownstream[0];
int32_t scanFlag = MAIN_SCAN;
while (1) {
SSDataBlock* pBlock = NULL;
if (miaInfo->prefetchedBlock == NULL) {
pBlock = downstream->fpSet.getNextFn(downstream);
} else {
pBlock = miaInfo->prefetchedBlock;
miaInfo->prefetchedBlock = NULL;
while (1) {
SSDataBlock* pBlock = NULL;
if (miaInfo->prefetchedBlock == NULL) {
pBlock = downstream->fpSet.getNextFn(downstream);
} else {
pBlock = miaInfo->prefetchedBlock;
miaInfo->groupId = pBlock->info.groupId;
miaInfo->prefetchedBlock = NULL;
}
if (pBlock == NULL) {
// close last unfinalized time window
if (miaInfo->curTs != INT64_MIN) {
ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
miaInfo->curTs = INT64_MIN;
}
doSetOperatorCompleted(pOperator);
miaInfo->inputBlocksFinished = true;
break;
}
if (!miaInfo->hasGroupId) {
miaInfo->hasGroupId = true;
miaInfo->groupId = pBlock->info.groupId;
} else if (miaInfo->groupId != pBlock->info.groupId) {
// if there are unclosed time window, close it firstly.
ASSERT(miaInfo->curTs != INT64_MIN);
outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
miaInfo->prefetchedBlock = pBlock;
miaInfo->curTs = INT64_MIN;
break;
}
getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag);
setInputDataBlock(pOperator, pSup->pCtx, pBlock, iaInfo->inputOrder, scanFlag, true);
doMergeAlignedIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes);
doFilter(miaInfo->pCondition, pRes, NULL);
if (pRes->info.rows >= pOperator->resultInfo.capacity) {
break;
}
miaInfo->groupId = pBlock->info.groupId;
}
pRes->info.groupId = miaInfo->groupId;
if (pBlock == NULL) {
// close last unfinalized time window
if (miaInfo->curTs != INT64_MIN) {
ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
miaInfo->curTs = INT64_MIN;
}
doSetOperatorCompleted(pOperator);
break;
}
if (!miaInfo->hasGroupId) {
miaInfo->hasGroupId = true;
miaInfo->groupId = pBlock->info.groupId;
} else if (miaInfo->groupId != pBlock->info.groupId) {
// if there are unclosed time window, close it firstly.
ASSERT(miaInfo->curTs != INT64_MIN);
outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
miaInfo->prefetchedBlock = pBlock;
miaInfo->curTs = INT64_MIN;
break;
}
getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag);
setInputDataBlock(pOperator, pSup->pCtx, pBlock, iaInfo->inputOrder, scanFlag, true);
doMergeAlignedIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes);
doFilter(miaInfo->pCondition, pRes, NULL);
if (pRes->info.rows >= pOperator->resultInfo.capacity) {
break;
}
}
pRes->info.groupId = miaInfo->groupId;
miaInfo->hasGroupId = false;
}
static SSDataBlock* mergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SMergeAlignedIntervalAggOperatorInfo* pMiaInfo = pOperator->info;
SIntervalAggOperatorInfo* iaInfo = pMiaInfo->intervalAggOperatorInfo;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
SSDataBlock* pRes = iaInfo->binfo.pRes;
blockDataCleanup(pRes);
if (iaInfo->binfo.mergeResultBlock) {
while(1) {
if (pOperator->status == OP_EXEC_DONE) {
break;
}
if (pRes->info.rows >= pOperator->resultInfo.threshold) {
break;
}
doMergeAlignedIntervalAgg(pOperator);
}
} else {
doMergeAlignedIntervalAgg(pOperator);
}
size_t rows = pRes->info.rows;
pOperator->resultInfo.totalRows += rows;
@ -5001,7 +5020,7 @@ static SSDataBlock* doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo,
int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval,
int32_t primaryTsSlotId, SNode* pCondition,
int32_t primaryTsSlotId, SNode* pCondition, bool mergeResultBlock,
SExecTaskInfo* pTaskInfo) {
SMergeAlignedIntervalAggOperatorInfo* miaInfo = taosMemoryCalloc(1, sizeof(SMergeAlignedIntervalAggOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
@ -5025,6 +5044,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
iaInfo->interval = *pInterval;
iaInfo->execModel = pTaskInfo->execModel;
iaInfo->primaryTsIndex = primaryTsSlotId;
iaInfo->binfo.mergeResultBlock = mergeResultBlock;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(&pOperator->resultInfo, 4096);
@ -5045,6 +5065,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
}
initResultRowInfo(&iaInfo->binfo.resultRowInfo);
blockDataEnsureCapacity(iaInfo->binfo.pRes, pOperator->resultInfo.capacity);
pOperator->name = "TimeMergeAlignedIntervalAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL;
@ -5055,7 +5076,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = miaInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doMergeAlignedIntervalAgg, NULL, NULL,
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, mergeAlignedIntervalAgg, NULL, NULL,
destroyMergeAlignedIntervalOperatorInfo, NULL, NULL, NULL);
code = appendDownstream(pOperator, &downstream, 1);
@ -5313,7 +5334,7 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
SExecTaskInfo* pTaskInfo) {
bool mergeBlock, SExecTaskInfo* pTaskInfo) {
SMergeIntervalAggOperatorInfo* miaInfo = taosMemoryCalloc(1, sizeof(SMergeIntervalAggOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (miaInfo == NULL || pOperator == NULL) {
@ -5327,6 +5348,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI
iaInfo->inputOrder = TSDB_ORDER_ASC;
iaInfo->interval = *pInterval;
iaInfo->execModel = pTaskInfo->execModel;
iaInfo->binfo.mergeResultBlock = mergeBlock;
iaInfo->primaryTsIndex = primaryTsSlotId;

View File

@ -1575,7 +1575,8 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
}
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType) {
if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType &&
TSDB_DATA_TYPE_TIMESTAMP != colType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@ -2150,7 +2151,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
},
{
.name = "_apercentile_partial",
.type = FUNCTION_TYPE_APERCENTILE_PARTIAL | FUNC_MGT_TIMELINE_FUNC,
.type = FUNCTION_TYPE_APERCENTILE_PARTIAL,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateApercentilePartial,
.getEnvFunc = getApercentileFuncEnv,
@ -2163,7 +2164,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_apercentile_merge",
.type = FUNCTION_TYPE_APERCENTILE_MERGE,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateApercentileMerge,
.getEnvFunc = getApercentileFuncEnv,
.initFunc = apercentileFunctionSetup,
@ -2256,8 +2257,6 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.finalizeFunc = elapsedFinalize,
.invertFunc = NULL,
.combineFunc = elapsedCombine,
//.pPartialFunc = "_elapsed_partial",
//.pMergeFunc = "_elapsed_merge"
},
{
.name = "_elapsed_partial",
@ -2324,10 +2323,13 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.type = FUNCTION_TYPE_LAST_ROW,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLast,
.dynDataRequiredFunc = lastDynDataReq,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
.processFunc = lastRowFunction,
.sprocessFunc = firstLastScalarFunction,
.pPartialFunc = "_last_row_partial",
.pMergeFunc = "_last_row_merge",
.finalizeFunc = firstLastFinalize
},
{
@ -2340,6 +2342,27 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.processFunc = cachedLastRowFunction,
.finalizeFunc = firstLastFinalize
},
{
.name = "_last_row_partial",
.type = FUNCTION_TYPE_LAST_PARTIAL,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLastPartial,
.dynDataRequiredFunc = lastDynDataReq,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
.processFunc = lastRowFunction,
.finalizeFunc = firstLastPartialFinalize,
},
{
.name = "_last_row_merge",
.type = FUNCTION_TYPE_LAST_MERGE,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLastMerge,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
.processFunc = lastFunctionMerge,
.finalizeFunc = firstLastFinalize,
},
{
.name = "first",
.type = FUNCTION_TYPE_FIRST,
@ -2396,6 +2419,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.type = FUNCTION_TYPE_LAST_PARTIAL,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLastPartial,
.dynDataRequiredFunc = lastDynDataReq,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
.processFunc = lastFunction,
@ -2428,7 +2452,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "histogram",
.type = FUNCTION_TYPE_HISTOGRAM,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
.translateFunc = translateHistogram,
.getEnvFunc = getHistogramFuncEnv,
.initFunc = histogramFunctionSetup,
@ -2443,7 +2467,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_histogram_partial",
.type = FUNCTION_TYPE_HISTOGRAM_PARTIAL,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
.translateFunc = translateHistogramPartial,
.getEnvFunc = getHistogramFuncEnv,
.initFunc = histogramFunctionSetup,
@ -2455,7 +2479,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_histogram_merge",
.type = FUNCTION_TYPE_HISTOGRAM_MERGE,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
.translateFunc = translateHistogramMerge,
.getEnvFunc = getHistogramFuncEnv,
.initFunc = functionSetup,
@ -2467,7 +2491,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "hyperloglog",
.type = FUNCTION_TYPE_HYPERLOGLOG,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateHLL,
.getEnvFunc = getHLLFuncEnv,
.initFunc = functionSetup,
@ -2481,7 +2505,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
},
{
.name = "_hyperloglog_partial",
.type = FUNCTION_TYPE_HYPERLOGLOG_PARTIAL | FUNC_MGT_TIMELINE_FUNC,
.type = FUNCTION_TYPE_HYPERLOGLOG_PARTIAL,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateHLLPartial,
.getEnvFunc = getHLLFuncEnv,
@ -2493,7 +2517,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
},
{
.name = "_hyperloglog_merge",
.type = FUNCTION_TYPE_HYPERLOGLOG_MERGE | FUNC_MGT_TIMELINE_FUNC,
.type = FUNCTION_TYPE_HYPERLOGLOG_MERGE,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateHLLMerge,
.getEnvFunc = getHLLFuncEnv,

View File

@ -2970,25 +2970,24 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, SFirstLastRes* pOutput, bool isFirst) {
SInputColumnInfoData* pColInfo = &pCtx->input;
int32_t start = pColInfo->startRowIndex;
pOutput->bytes = pInput->bytes;
TSKEY* tsIn = &pInput->ts;
TSKEY* tsOut = &pOutput->ts;
int32_t start = pColInfo->startRowIndex;
if (pOutput->hasResult) {
if (isFirst) {
if (*tsIn > *tsOut) {
if (pInput->ts > pOutput->ts) {
return;
}
} else {
if (*tsIn < *tsOut) {
if (pInput->ts < pOutput->ts) {
return;
}
}
}
*tsOut = *tsIn;
pOutput->isNull = pInput->isNull;
pOutput->ts = pInput->ts;
pOutput->bytes = pInput->bytes;
memcpy(pOutput->buf, pInput->buf, pOutput->bytes);
saveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
@ -3015,7 +3014,6 @@ static int32_t firstLastFunctionMergeImpl(SqlFunctionCtx* pCtx, bool isFirstQuer
}
SET_VAL(GET_RES_INFO(pCtx), numOfElems, 1);
return TSDB_CODE_SUCCESS;
}
@ -3116,9 +3114,9 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
TSKEY startKey = getRowPTs(pInput->pPTS, 0);
TSKEY endKey = getRowPTs(pInput->pPTS, pInput->totalRows - 1);
#if 0
int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
#if 0
// the optimized version only function if all tuples in one block are monotonious increasing or descreasing.
// this is NOT always works if project operator exists in downstream.
if (blockDataOrder == TSDB_ORDER_ASC) {
@ -3158,6 +3156,7 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
}
#endif
SET_VAL(pResInfo, numOfElems, 1);
return TSDB_CODE_SUCCESS;
}
@ -3188,6 +3187,8 @@ bool diffFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
pDiffInfo->prev.i64 = *(bool*)pv? 1:0;
break;
case TSDB_DATA_TYPE_TINYINT:
pDiffInfo->prev.i64 = *(int8_t*)pv;
break;
@ -3197,6 +3198,7 @@ static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) {
case TSDB_DATA_TYPE_SMALLINT:
pDiffInfo->prev.i64 = *(int16_t*)pv;
break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
pDiffInfo->prev.i64 = *(int64_t*)pv;
break;
@ -3250,6 +3252,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo
pDiffInfo->prev.i64 = v;
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT: {
int64_t v = *(int64_t*)pv;
int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null

View File

@ -116,6 +116,11 @@ EFuncDataRequired fmFuncDynDataRequired(int32_t funcId, void* pRes, STimeWindow*
return TSDB_CODE_FAILED;
}
const char* name = funcMgtBuiltins[funcId].name;
if ((strcmp(name, "_group_key") == 0) || (strcmp(name, "_select_value") == 0)) {
return FUNC_DATA_REQUIRED_NOT_LOAD;
}
if (funcMgtBuiltins[funcId].dynDataRequiredFunc == NULL) {
return FUNC_DATA_REQUIRED_DATA_LOAD;
} else {

View File

@ -485,7 +485,6 @@ static void idxMayMergeTempToFinalRslt(SArray* result, TFileValue* tfv, SIdxTRsl
// handle last iterator
idxTRsltMergeTo(tr, lv->tableId);
} else {
// temp result saved in help
tfileValueDestroy(tfv);
}
} else {

View File

@ -45,7 +45,7 @@ static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr,
static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
static int32_t cacheSearchRange(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
/*comm func of compare, used in (LE/LT/GE/GT compare)*/
/*comm compare func, used in (LE/LT/GE/GT compare)*/
static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s, RangeType type);
static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
static int32_t cacheSearchEqual_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
@ -94,7 +94,6 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTRslt* tr, STe
if (0 == strcmp(c->colVal, pCt->colVal) && strlen(pCt->colVal) == strlen(c->colVal)) {
if (c->operaType == ADD_VALUE) {
INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid)
// taosArrayPush(result, &c->uid);
*s = kTypeValue;
} else if (c->operaType == DEL_VALUE) {
INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid)

View File

@ -152,7 +152,7 @@ int verdataCompare(const void *a, const void *b) {
int32_t cmp = compareUint64Val(&va->data, &vb->data);
if (cmp == 0) {
cmp = 0 - compareUint32Val(&va->ver, &vb->data);
cmp = 0 - compareUint32Val(&va->ver, &vb->ver);
return cmp;
}
return cmp;
@ -181,6 +181,7 @@ void idxTRsltDestroy(SIdxTRslt *tr) {
taosArrayDestroy(tr->total);
taosArrayDestroy(tr->add);
taosArrayDestroy(tr->del);
taosMemoryFree(tr);
}
void idxTRsltMergeTo(SIdxTRslt *tr, SArray *result) {
taosArraySort(tr->total, uidCompare);

View File

@ -1787,6 +1787,7 @@ static int32_t jsonToPhysiJoinNode(const SJson* pJson, void* pObj) {
static const char* jkAggPhysiPlanExprs = "Exprs";
static const char* jkAggPhysiPlanGroupKeys = "GroupKeys";
static const char* jkAggPhysiPlanAggFuncs = "AggFuncs";
static const char* jkAggPhysiPlanMergeDataBlock = "MergeDataBlock";
static int32_t physiAggNodeToJson(const void* pObj, SJson* pJson) {
const SAggPhysiNode* pNode = (const SAggPhysiNode*)pObj;
@ -1801,6 +1802,9 @@ static int32_t physiAggNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = nodeListToJson(pJson, jkAggPhysiPlanAggFuncs, pNode->pAggFuncs);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkAggPhysiPlanMergeDataBlock, pNode->mergeDataBlock);
}
return code;
}
@ -1818,6 +1822,9 @@ static int32_t jsonToPhysiAggNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkAggPhysiPlanAggFuncs, &pNode->pAggFuncs);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkAggPhysiPlanMergeDataBlock, &pNode->mergeDataBlock);
}
return code;
}
@ -1950,8 +1957,9 @@ static const char* jkWindowPhysiPlanTsEnd = "TsEnd";
static const char* jkWindowPhysiPlanTriggerType = "TriggerType";
static const char* jkWindowPhysiPlanWatermark = "Watermark";
static const char* jkWindowPhysiPlanIgnoreExpired = "IgnoreExpired";
static const char* jkWindowPhysiPlanInputTsOrder = "inputTsOrder";
static const char* jkWindowPhysiPlanInputTsOrder = "InputTsOrder";
static const char* jkWindowPhysiPlanOutputTsOrder = "outputTsOrder";
static const char* jkWindowPhysiPlanMergeDataBlock = "MergeDataBlock";
static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) {
const SWinodwPhysiNode* pNode = (const SWinodwPhysiNode*)pObj;
@ -1984,6 +1992,9 @@ static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanOutputTsOrder, pNode->outputTsOrder);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkWindowPhysiPlanMergeDataBlock, pNode->mergeDataBlock);
}
return code;
}
@ -2019,6 +2030,9 @@ static int32_t jsonToPhysiWindowNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkWindowPhysiPlanOutputTsOrder, pNode->outputTsOrder, code);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkWindowPhysiPlanMergeDataBlock, &pNode->mergeDataBlock);
}
return code;
}
@ -2161,7 +2175,6 @@ static int32_t jsonToPhysiSessionWindowNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToPhysiWindowNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkSessionWindowPhysiPlanGap, pNode->gap, code);
;
}
return code;

View File

@ -18,10 +18,10 @@
#include "parInt.h"
#include "parToken.h"
#include "parUtil.h"
#include "query.h"
#include "tglobal.h"
#include "ttime.h"
#include "ttypes.h"
#include "query.h"
#define NEXT_TOKEN(pSql, sToken) \
do { \
@ -1189,12 +1189,7 @@ static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks,
if (!isParseBindParam) {
// set the null value for the columns that do not assign values
if ((spd->numOfBound < spd->numOfCols) && TD_IS_TP_ROW(row)) {
for (int32_t i = 0; i < spd->numOfCols; ++i) {
if (spd->cols[i].valStat == VAL_STAT_NONE) { // the primary TS key is not VAL_STAT_NONE
tdAppendColValToTpRow(pBuilder, TD_VTYPE_NONE, getNullValue(schema[i].type), true, schema[i].type, i,
spd->cols[i].toffset);
}
}
pBuilder->hasNone = true;
}
*gotRow = true;
@ -1206,6 +1201,7 @@ static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks,
}
// *len = pBuilder->extendedRowSize;
tdSRowEnd(pBuilder);
return TSDB_CODE_SUCCESS;
}
@ -1266,7 +1262,8 @@ static int32_t parseValuesClause(SInsertParseContext* pCxt, STableDataBlocks* da
SSubmitBlk* pBlocks = (SSubmitBlk*)(dataBuf->pData);
if (TSDB_CODE_SUCCESS != setBlockInfo(pBlocks, dataBuf, numOfRows)) {
return buildInvalidOperationMsg(&pCxt->msg, "too many rows in sql, total number of rows should be less than INT32_MAX");
return buildInvalidOperationMsg(&pCxt->msg,
"too many rows in sql, total number of rows should be less than INT32_MAX");
}
dataBuf->numOfTables = 1;
@ -1338,7 +1335,8 @@ static int32_t parseDataFromFile(SInsertParseContext* pCxt, SToken filePath, STa
SSubmitBlk* pBlocks = (SSubmitBlk*)(dataBuf->pData);
if (TSDB_CODE_SUCCESS != setBlockInfo(pBlocks, dataBuf, numOfRows)) {
return buildInvalidOperationMsg(&pCxt->msg, "too many rows in sql, total number of rows should be less than INT32_MAX");
return buildInvalidOperationMsg(&pCxt->msg,
"too many rows in sql, total number of rows should be less than INT32_MAX");
}
dataBuf->numOfTables = 1;
@ -1975,13 +1973,9 @@ int32_t qBindStmtColsValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBuf, in
}
// set the null value for the columns that do not assign values
if ((spd->numOfBound < spd->numOfCols) && TD_IS_TP_ROW(row)) {
for (int32_t i = 0; i < spd->numOfCols; ++i) {
if (spd->cols[i].valStat == VAL_STAT_NONE) { // the primary TS key is not VAL_STAT_NONE
tdAppendColValToTpRow(pBuilder, TD_VTYPE_NONE, getNullValue(pSchema[i].type), true, pSchema[i].type, i,
spd->cols[i].toffset);
}
}
pBuilder->hasNone = true;
}
tdSRowEnd(pBuilder);
#ifdef TD_DEBUG_PRINT_ROW
STSchema* pSTSchema = tdGetSTSChemaFromSSChema(pSchema, spd->numOfCols, 1);
tdSRowPrint(row, pSTSchema, __func__);
@ -2058,14 +2052,11 @@ int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBu
// set the null value for the columns that do not assign values
if (rowEnd && (spd->numOfBound < spd->numOfCols) && TD_IS_TP_ROW(row)) {
for (int32_t i = 0; i < spd->numOfCols; ++i) {
if (spd->cols[i].valStat == VAL_STAT_NONE) { // the primary TS key is not VAL_STAT_NONE
tdAppendColValToTpRow(pBuilder, TD_VTYPE_NONE, getNullValue(pSchema[i].type), true, pSchema[i].type, i,
spd->cols[i].toffset);
}
}
pBuilder->hasNone = true;
}
if (rowEnd) {
tdSRowEnd(pBuilder);
}
#ifdef TD_DEBUG_PRINT_ROW
if (rowEnd) {
STSchema* pSTSchema = tdGetSTSChemaFromSSChema(pSchema, spd->numOfCols, 1);
@ -2080,7 +2071,8 @@ int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBu
SSubmitBlk* pBlocks = (SSubmitBlk*)(pDataBlock->pData);
if (TSDB_CODE_SUCCESS != setBlockInfo(pBlocks, pDataBlock, bind->num)) {
return buildInvalidOperationMsg(&pBuf, "too many rows in sql, total number of rows should be less than INT32_MAX");
return buildInvalidOperationMsg(&pBuf,
"too many rows in sql, total number of rows should be less than INT32_MAX");
}
}
@ -2437,14 +2429,10 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols
// set the null value for the columns that do not assign values
if ((spd->numOfBound < spd->numOfCols) && TD_IS_TP_ROW(row)) {
for (int32_t i = 0; i < spd->numOfCols; ++i) {
if (spd->cols[i].valStat == VAL_STAT_NONE) { // the primary TS key is not VAL_STAT_NONE
tdAppendColValToTpRow(pBuilder, TD_VTYPE_NONE, getNullValue(pSchema[i].type), true, pSchema[i].type, i,
spd->cols[i].toffset);
}
}
pBuilder->hasNone = true;
}
tdSRowEnd(pBuilder);
pDataBlock->size += extendedRowSize;
}

View File

@ -486,11 +486,11 @@ static int32_t tdBlockRowMerge(STableMeta* pTableMeta, SBlockKeyTuple* pEndKeyTp
void* pDestRow = NULL;
++((*pBlkRowMerger)->index);
if ((*pBlkRowMerger)->index < taosArrayGetSize((*pBlkRowMerger)->rowArray)) {
void* pAlloc = *(void**)taosArrayGet((*pBlkRowMerger)->rowArray, (*pBlkRowMerger)->index);
if (tRealloc((uint8_t**)&pAlloc, rowSize) != 0) {
void** pAlloc = (void**)taosArrayGet((*pBlkRowMerger)->rowArray, (*pBlkRowMerger)->index);
if (tRealloc((uint8_t**)pAlloc, rowSize) != 0) {
return TSDB_CODE_FAILED;
}
pDestRow = pAlloc;
pDestRow = *pAlloc;
} else {
if (tRealloc((uint8_t**)&pDestRow, rowSize) != 0) {
return TSDB_CODE_FAILED;
@ -659,6 +659,7 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SB
toffset += TYPE_BYTES[colType];
p += pSchema[j].bytes;
}
tdSRowEnd(&builder);
int32_t rowLen = TD_ROW_LEN((STSRow*)pDataBlock);
pDataBlock = (char*)pDataBlock + rowLen;
pBlock->dataLen += rowLen;

View File

@ -342,7 +342,7 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
pJoin->inputTsOrder = ORDER_ASC;
pJoin->node.groupAction = GROUP_ACTION_CLEAR;
pJoin->node.requireDataOrder = DATA_ORDER_LEVEL_GLOBAL;
pJoin->node.requireDataOrder = DATA_ORDER_LEVEL_GLOBAL;
pJoin->node.resultDataOrder = DATA_ORDER_LEVEL_GLOBAL;
int32_t code = TSDB_CODE_SUCCESS;
@ -468,6 +468,16 @@ static int32_t createGroupKeysFromPartKeys(SNodeList* pPartKeys, SNodeList** pOu
return TSDB_CODE_SUCCESS;
}
static EGroupAction getGroupAction(SLogicPlanContext* pCxt, SSelectStmt* pSelect) {
return (pCxt->pPlanCxt->streamQuery || NULL != pSelect->pLimit || NULL != pSelect->pSlimit) ? GROUP_ACTION_KEEP
: GROUP_ACTION_NONE;
}
static EDataOrderLevel getRequireDataOrder(bool needTimeline, SSelectStmt* pSelect) {
return needTimeline ? (NULL != pSelect->pPartitionByList ? DATA_ORDER_LEVEL_IN_GROUP : DATA_ORDER_LEVEL_GLOBAL)
: DATA_ORDER_LEVEL_NONE;
}
static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SLogicNode** pLogicNode) {
if (!pSelect->hasAggFuncs && NULL == pSelect->pGroupByList) {
return TSDB_CODE_SUCCESS;
@ -481,9 +491,9 @@ static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect,
pAgg->hasLastRow = pSelect->hasLastRowFunc;
pAgg->hasTimeLineFunc = pSelect->hasTimeLineFunc;
pAgg->onlyHasKeepOrderFunc = pSelect->onlyHasKeepOrderFunc;
pAgg->node.groupAction = GROUP_ACTION_SET;
pAgg->node.requireDataOrder = pAgg->hasTimeLineFunc ? DATA_ORDER_LEVEL_IN_GROUP : DATA_ORDER_LEVEL_NONE;
pAgg->node.resultDataOrder = DATA_ORDER_LEVEL_NONE;
pAgg->node.groupAction = getGroupAction(pCxt, pSelect);
pAgg->node.requireDataOrder = getRequireDataOrder(pAgg->hasTimeLineFunc, pSelect);
pAgg->node.resultDataOrder = pAgg->onlyHasKeepOrderFunc ? pAgg->node.requireDataOrder : DATA_ORDER_LEVEL_NONE;
int32_t code = TSDB_CODE_SUCCESS;
@ -552,9 +562,8 @@ static int32_t createIndefRowsFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt
pIdfRowsFunc->isTailFunc = pSelect->hasTailFunc;
pIdfRowsFunc->isUniqueFunc = pSelect->hasUniqueFunc;
pIdfRowsFunc->isTimeLineFunc = pSelect->hasTimeLineFunc;
pIdfRowsFunc->node.groupAction = GROUP_ACTION_KEEP;
pIdfRowsFunc->node.requireDataOrder =
pIdfRowsFunc->isTimeLineFunc ? DATA_ORDER_LEVEL_IN_GROUP : DATA_ORDER_LEVEL_NONE;
pIdfRowsFunc->node.groupAction = getGroupAction(pCxt, pSelect);
pIdfRowsFunc->node.requireDataOrder = getRequireDataOrder(pIdfRowsFunc->isTimeLineFunc, pSelect);
pIdfRowsFunc->node.resultDataOrder = pIdfRowsFunc->node.requireDataOrder;
// indefinite rows functions and _select_values functions
@ -587,8 +596,8 @@ static int32_t createInterpFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt* p
return TSDB_CODE_OUT_OF_MEMORY;
}
pInterpFunc->node.groupAction = GROUP_ACTION_KEEP;
pInterpFunc->node.requireDataOrder = DATA_ORDER_LEVEL_IN_GROUP;
pInterpFunc->node.groupAction = getGroupAction(pCxt, pSelect);
pInterpFunc->node.requireDataOrder = getRequireDataOrder(true, pSelect);
pInterpFunc->node.resultDataOrder = pInterpFunc->node.requireDataOrder;
int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_SELECT, fmIsInterpFunc, &pInterpFunc->pFuncs);
@ -663,9 +672,11 @@ static int32_t createWindowLogicNodeByState(SLogicPlanContext* pCxt, SStateWindo
}
pWindow->winType = WINDOW_TYPE_STATE;
pWindow->node.groupAction = GROUP_ACTION_KEEP;
pWindow->node.requireDataOrder = pCxt->pPlanCxt->streamQuery ? DATA_ORDER_LEVEL_IN_BLOCK : DATA_ORDER_LEVEL_IN_GROUP;
pWindow->node.resultDataOrder = DATA_ORDER_LEVEL_IN_GROUP;
pWindow->node.groupAction = getGroupAction(pCxt, pSelect);
pWindow->node.requireDataOrder =
pCxt->pPlanCxt->streamQuery ? DATA_ORDER_LEVEL_IN_BLOCK : getRequireDataOrder(true, pSelect);
pWindow->node.resultDataOrder =
pCxt->pPlanCxt->streamQuery ? DATA_ORDER_LEVEL_GLOBAL : pWindow->node.requireDataOrder;
pWindow->pStateExpr = nodesCloneNode(pState->pExpr);
pWindow->pTspk = nodesCloneNode(pState->pCol);
if (NULL == pWindow->pStateExpr || NULL == pWindow->pTspk) {
@ -686,9 +697,11 @@ static int32_t createWindowLogicNodeBySession(SLogicPlanContext* pCxt, SSessionW
pWindow->winType = WINDOW_TYPE_SESSION;
pWindow->sessionGap = ((SValueNode*)pSession->pGap)->datum.i;
pWindow->windowAlgo = pCxt->pPlanCxt->streamQuery ? SESSION_ALGO_STREAM_SINGLE : SESSION_ALGO_MERGE;
pWindow->node.groupAction = GROUP_ACTION_KEEP;
pWindow->node.requireDataOrder = pCxt->pPlanCxt->streamQuery ? DATA_ORDER_LEVEL_IN_BLOCK : DATA_ORDER_LEVEL_IN_GROUP;
pWindow->node.resultDataOrder = DATA_ORDER_LEVEL_IN_GROUP;
pWindow->node.groupAction = getGroupAction(pCxt, pSelect);
pWindow->node.requireDataOrder =
pCxt->pPlanCxt->streamQuery ? DATA_ORDER_LEVEL_IN_BLOCK : getRequireDataOrder(true, pSelect);
pWindow->node.resultDataOrder =
pCxt->pPlanCxt->streamQuery ? DATA_ORDER_LEVEL_GLOBAL : pWindow->node.requireDataOrder;
pWindow->pTspk = nodesCloneNode((SNode*)pSession->pCol);
if (NULL == pWindow->pTspk) {
@ -715,9 +728,13 @@ static int32_t createWindowLogicNodeByInterval(SLogicPlanContext* pCxt, SInterva
pWindow->slidingUnit =
(NULL != pInterval->pSliding ? ((SValueNode*)pInterval->pSliding)->unit : pWindow->intervalUnit);
pWindow->windowAlgo = pCxt->pPlanCxt->streamQuery ? INTERVAL_ALGO_STREAM_SINGLE : INTERVAL_ALGO_HASH;
pWindow->node.groupAction = GROUP_ACTION_KEEP;
pWindow->node.requireDataOrder = pSelect->hasTimeLineFunc ? DATA_ORDER_LEVEL_IN_GROUP : DATA_ORDER_LEVEL_IN_BLOCK;
pWindow->node.resultDataOrder = DATA_ORDER_LEVEL_IN_GROUP;
pWindow->node.groupAction = (NULL != pInterval->pFill ? GROUP_ACTION_KEEP : getGroupAction(pCxt, pSelect));
pWindow->node.requireDataOrder =
pCxt->pPlanCxt->streamQuery
? DATA_ORDER_LEVEL_IN_BLOCK
: (pSelect->hasTimeLineFunc ? getRequireDataOrder(true, pSelect) : DATA_ORDER_LEVEL_IN_BLOCK);
pWindow->node.resultDataOrder =
pCxt->pPlanCxt->streamQuery ? DATA_ORDER_LEVEL_GLOBAL : getRequireDataOrder(true, pSelect);
pWindow->pTspk = nodesCloneNode(pInterval->pCol);
if (NULL == pWindow->pTspk) {
@ -763,9 +780,9 @@ static int32_t createFillLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
return TSDB_CODE_OUT_OF_MEMORY;
}
pFill->node.groupAction = GROUP_ACTION_KEEP;
pFill->node.requireDataOrder = DATA_ORDER_LEVEL_IN_GROUP;
pFill->node.resultDataOrder = DATA_ORDER_LEVEL_IN_GROUP;
pFill->node.groupAction = getGroupAction(pCxt, pSelect);
pFill->node.requireDataOrder = getRequireDataOrder(true, pSelect);
pFill->node.resultDataOrder = pFill->node.requireDataOrder;
pFill->inputTsOrder = ORDER_ASC;
int32_t code = nodesCollectColumns(pSelect, SQL_CLAUSE_WINDOW, NULL, COLLECT_COL_TYPE_ALL, &pFill->node.pTargets);
@ -938,7 +955,7 @@ static int32_t createDistinctLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSe
return TSDB_CODE_OUT_OF_MEMORY;
}
pAgg->node.groupAction = GROUP_ACTION_SET;
pAgg->node.groupAction = GROUP_ACTION_CLEAR;
pAgg->node.requireDataOrder = DATA_ORDER_LEVEL_NONE;
pAgg->node.resultDataOrder = DATA_ORDER_LEVEL_NONE;

View File

@ -242,7 +242,7 @@ static int32_t scanPathOptGetScanOrder(SScanLogicNode* pScan, EScanOrder* pScanO
SFunctionNode* pFunc = (SFunctionNode*)pNode;
if (FUNCTION_TYPE_FIRST == pFunc->funcType) {
hasFirst = true;
} else if (FUNCTION_TYPE_LAST == pFunc->funcType) {
} else if (FUNCTION_TYPE_LAST == pFunc->funcType || FUNCTION_TYPE_LAST_ROW == pFunc->funcType) {
hasLast = true;
} else if (FUNCTION_TYPE_SELECT_VALUE != pFunc->funcType) {
otherFunc = true;

View File

@ -825,6 +825,8 @@ static int32_t createAggPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
return TSDB_CODE_OUT_OF_MEMORY;
}
pAgg->mergeDataBlock = (GROUP_ACTION_KEEP == pAggLogicNode->node.groupAction ? false : true);
SNodeList* pPrecalcExprs = NULL;
SNodeList* pGroupKeys = NULL;
SNodeList* pAggFuncs = NULL;
@ -983,7 +985,7 @@ static bool projectCanMergeDataBlock(SProjectLogicNode* pProject) {
return true;
}
if (1 != LIST_LENGTH(pProject->node.pChildren)) {
return false;
return true;
}
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pProject->node.pChildren, 0);
return DATA_ORDER_LEVEL_GLOBAL == pChild->resultDataOrder ? true : false;
@ -1092,6 +1094,7 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList*
pWindow->igExpired = pWindowLogicNode->igExpired;
pWindow->inputTsOrder = pWindowLogicNode->inputTsOrder;
pWindow->outputTsOrder = pWindowLogicNode->outputTsOrder;
pWindow->mergeDataBlock = (GROUP_ACTION_KEEP == pWindowLogicNode->node.groupAction ? false : true);
SNodeList* pPrecalcExprs = NULL;
SNodeList* pFuncs = NULL;

View File

@ -393,6 +393,8 @@ static int32_t stbSplCreatePartWindowNode(SWindowLogicNode* pMergeWindow, SLogic
code = TSDB_CODE_OUT_OF_MEMORY;
}
pPartWin->node.groupAction = GROUP_ACTION_KEEP;
if (TSDB_CODE_SUCCESS == code) {
pMergeWindow->node.pTargets = pTargets;
pMergeWindow->node.pConditions = pConditions;
@ -723,6 +725,8 @@ static int32_t stbSplCreatePartAggNode(SAggLogicNode* pMergeAgg, SLogicNode** pO
code = TSDB_CODE_OUT_OF_MEMORY;
}
pPartAgg->node.groupAction = GROUP_ACTION_KEEP;
if (TSDB_CODE_SUCCESS == code && NULL != pGroupKeys) {
pPartAgg->pGroupKeys = pGroupKeys;
code = createColumnByRewriteExprs(pPartAgg->pGroupKeys, &pPartAgg->node.pTargets);

View File

@ -232,7 +232,8 @@ static int32_t adjustPartitionDataRequirement(SPartitionLogicNode* pPart, EDataO
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
pPart->node.resultDataOrder = requirement;
pPart->node.requireDataOrder = requirement;
pPart->node.requireDataOrder =
(requirement >= DATA_ORDER_LEVEL_IN_BLOCK ? DATA_ORDER_LEVEL_GLOBAL : DATA_ORDER_LEVEL_NONE);
return TSDB_CODE_SUCCESS;
}

View File

@ -32,6 +32,22 @@ TEST_F(PlanGroupByTest, basic) {
run("SELECT c1 + c3, SUM(c4 * c5) FROM t1 WHERE CONCAT(c2, 'wwww') = 'abcwww' GROUP BY c1 + c3");
run("SELECT SUM(CEIL(c1)) FROM t1 GROUP BY CEIL(c1)");
run("SELECT COUNT(*) FROM st1");
run("SELECT c1 FROM st1 GROUP BY c1");
run("SELECT COUNT(*) FROM st1 GROUP BY c1");
run("SELECT SUM(c1) FROM st1 GROUP BY c2 HAVING SUM(c1) IS NOT NULL");
}
TEST_F(PlanGroupByTest, withPartitionBy) {
useDb("root", "test");
run("SELECT LAST(ts), TBNAME FROM st1 PARTITION BY TBNAME");
run("SELECT COUNT(*) FROM st1 PARTITION BY c2 GROUP BY c1");
}
TEST_F(PlanGroupByTest, withOrderBy) {
@ -43,14 +59,12 @@ TEST_F(PlanGroupByTest, withOrderBy) {
// run("SELECT COUNT(*), SUM(c1) a FROM t1 ORDER BY a");
}
TEST_F(PlanGroupByTest, aggFunc) {
TEST_F(PlanGroupByTest, multiResFunc) {
useDb("root", "test");
run("SELECT LAST(*), FIRST(*) FROM t1");
run("SELECT LAST(*), FIRST(*) FROM t1 GROUP BY c1");
run("SELECT SUM(10), COUNT(c1) FROM t1 GROUP BY c2");
}
TEST_F(PlanGroupByTest, selectFunc) {
@ -67,17 +81,3 @@ TEST_F(PlanGroupByTest, selectFunc) {
run("SELECT MAX(c1), c2 FROM t1 GROUP BY c3");
run("SELECT MAX(c1), t1.* FROM t1 GROUP BY c3");
}
TEST_F(PlanGroupByTest, stable) {
useDb("root", "test");
run("SELECT COUNT(*) FROM st1");
run("SELECT c1 FROM st1 GROUP BY c1");
run("SELECT COUNT(*) FROM st1 GROUP BY c1");
run("SELECT COUNT(*) FROM st1 PARTITION BY c2 GROUP BY c1");
run("SELECT SUM(c1) FROM st1 GROUP BY c2 HAVING SUM(c1) IS NOT NULL");
}

View File

@ -35,6 +35,7 @@ extern "C" {
#define QW_DEFAULT_SHORT_RUN_TIMES 2
#define QW_DEFAULT_HEARTBEAT_MSEC 5000
#define QW_SCH_TIMEOUT_MSEC 180000
#define QW_MIN_RES_ROWS 4096
enum {
QW_PHASE_PRE_QUERY = 1,
@ -135,6 +136,7 @@ typedef struct SQWTaskCtx {
int32_t msgType;
int32_t fetchType;
int32_t execId;
int32_t level;
bool queryRsped;
bool queryEnd;

View File

@ -12,14 +12,16 @@
int32_t qwMallocFetchRsp(int32_t length, SRetrieveTableRsp **rsp) {
int32_t msgSize = sizeof(SRetrieveTableRsp) + length;
SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)rpcMallocCont(msgSize);
SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)rpcReallocCont(*rsp, msgSize);
if (NULL == pRsp) {
qError("rpcMallocCont %d failed", msgSize);
QW_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
memset(pRsp, 0, sizeof(SRetrieveTableRsp));
if (NULL == *rsp) {
memset(pRsp, 0, sizeof(SRetrieveTableRsp));
}
*rsp = pRsp;
return TSDB_CODE_SUCCESS;
@ -35,6 +37,7 @@ void qwBuildFetchRsp(void *msg, SOutputData *input, int32_t len, bool qComplete)
rsp->compLen = htonl(len);
rsp->numOfRows = htonl(input->numOfRows);
rsp->numOfCols = htonl(input->numOfCols);
rsp->numOfBlocks = htonl(input->numOfBlocks);
}
void qwFreeFetchRsp(void *msg) {

View File

@ -75,22 +75,20 @@ int32_t qwHandleTaskComplete(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
int32_t code = 0;
bool qcontinue = true;
SSDataBlock *pRes = NULL;
uint64_t useconds = 0;
int32_t i = 0;
int32_t execNum = 0;
qTaskInfo_t taskHandle = ctx->taskHandle;
DataSinkHandle sinkHandle = ctx->sinkHandle;
SArray* pResList = taosArrayInit(4, POINTER_BYTES);
while (true) {
QW_TASK_DLOG("start to execTask, loopIdx:%d", i++);
pRes = NULL;
// if *taskHandle is NULL, it's killed right now
if (taskHandle) {
qwDbgSimulateSleep();
code = qExecTask(taskHandle, &pRes, &useconds);
code = qExecTaskOpt(taskHandle, pResList, &useconds);
if (code) {
if (code != TSDB_CODE_OPS_NOT_SUPPORT) {
QW_TASK_ELOG("qExecTask failed, code:%x - %s", code, tstrerror(code));
@ -103,9 +101,8 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
++execNum;
if (NULL == pRes) {
if (taosArrayGetSize(pResList) == 0) {
QW_TASK_DLOG("qExecTask end with empty res, useconds:%" PRIu64, useconds);
dsEndPut(sinkHandle, useconds);
QW_ERR_RET(qwHandleTaskComplete(QW_FPARAMS(), ctx));
@ -117,19 +114,20 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
break;
}
int32_t rows = pRes->info.rows;
for(int32_t j = 0; j < taosArrayGetSize(pResList); ++j) {
SSDataBlock *pRes = taosArrayGetP(pResList, j);
ASSERT(pRes->info.rows > 0);
ASSERT(pRes->info.rows > 0);
SInputData inputData = {.pData = pRes};
code = dsPutDataBlock(sinkHandle, &inputData, &qcontinue);
if (code) {
QW_TASK_ELOG("dsPutDataBlock failed, code:%x - %s", code, tstrerror(code));
QW_ERR_RET(code);
}
SInputData inputData = {.pData = pRes};
code = dsPutDataBlock(sinkHandle, &inputData, &qcontinue);
if (code) {
QW_TASK_ELOG("dsPutDataBlock failed, code:%x - %s", code, tstrerror(code));
QW_ERR_RET(code);
QW_TASK_DLOG("data put into sink, rows:%d, continueExecTask:%d", pRes->info.rows, qcontinue);
}
QW_TASK_DLOG("data put into sink, rows:%d, continueExecTask:%d", rows, qcontinue);
if (!qcontinue) {
if (queryStop) {
*queryStop = true;
@ -151,6 +149,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
}
}
taosArrayDestroy(pResList);
QW_RET(code);
}
@ -203,57 +202,88 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
SRetrieveTableRsp *rsp = NULL;
bool queryEnd = false;
int32_t code = 0;
SOutputData output = {0};
dsGetDataLength(ctx->sinkHandle, &len, &queryEnd);
*dataLen = 0;
if (len < 0) {
QW_TASK_ELOG("invalid length from dsGetDataLength, length:%d", len);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
while (true) {
dsGetDataLength(ctx->sinkHandle, &len, &queryEnd);
if (len == 0) {
if (queryEnd) {
code = dsGetDataBlock(ctx->sinkHandle, pOutput);
if (code) {
QW_TASK_ELOG("dsGetDataBlock failed, code:%x - %s", code, tstrerror(code));
QW_ERR_RET(code);
}
QW_TASK_DLOG_E("no data in sink and query end");
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC);
QW_ERR_RET(qwMallocFetchRsp(len, &rsp));
*rspMsg = rsp;
*dataLen = 0;
return TSDB_CODE_SUCCESS;
if (len < 0) {
QW_TASK_ELOG("invalid length from dsGetDataLength, length:%d", len);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
pOutput->bufStatus = DS_BUF_EMPTY;
if (len == 0) {
if (queryEnd) {
code = dsGetDataBlock(ctx->sinkHandle, &output);
if (code) {
QW_TASK_ELOG("dsGetDataBlock failed, code:%x - %s", code, tstrerror(code));
QW_ERR_RET(code);
}
return TSDB_CODE_SUCCESS;
QW_TASK_DLOG("no more data in sink and query end, fetched blocks %d rows %d", pOutput->numOfBlocks, pOutput->numOfRows);
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC);
if (NULL == rsp) {
QW_ERR_RET(qwMallocFetchRsp(len, &rsp));
*pOutput = output;
} else {
pOutput->queryEnd = output.queryEnd;
pOutput->bufStatus = output.bufStatus;
pOutput->useconds = output.useconds;
}
break;
}
pOutput->bufStatus = DS_BUF_EMPTY;
break;
}
// Got data from sink
QW_TASK_DLOG("there are data in sink, dataLength:%d", len);
*dataLen += len;
QW_ERR_RET(qwMallocFetchRsp(*dataLen, &rsp));
output.pData = rsp->data + *dataLen - len;
code = dsGetDataBlock(ctx->sinkHandle, &output);
if (code) {
QW_TASK_ELOG("dsGetDataBlock failed, code:%x - %s", code, tstrerror(code));
QW_ERR_RET(code);
}
pOutput->queryEnd = output.queryEnd;
pOutput->precision = output.precision;
pOutput->bufStatus = output.bufStatus;
pOutput->useconds = output.useconds;
pOutput->compressed = output.compressed;
pOutput->numOfCols = output.numOfCols;
pOutput->numOfRows += output.numOfRows;
pOutput->numOfBlocks++;
if (DS_BUF_EMPTY == pOutput->bufStatus && pOutput->queryEnd) {
QW_TASK_DLOG("task all data fetched and done, fetched blocks %d rows %d", pOutput->numOfBlocks, pOutput->numOfRows);
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC);
break;
}
if (0 == ctx->level) {
QW_TASK_DLOG("task fetched blocks %d rows %d, level %d", pOutput->numOfBlocks, pOutput->numOfRows, ctx->level);
break;
}
if (pOutput->numOfRows >= QW_MIN_RES_ROWS) {
QW_TASK_DLOG("task fetched blocks %d rows %d reaches the min rows", pOutput->numOfBlocks, pOutput->numOfRows);
break;
}
}
// Got data from sink
QW_TASK_DLOG("there are data in sink, dataLength:%d", len);
*dataLen = len;
QW_ERR_RET(qwMallocFetchRsp(len, &rsp));
*rspMsg = rsp;
pOutput->pData = rsp->data;
code = dsGetDataBlock(ctx->sinkHandle, pOutput);
if (code) {
QW_TASK_ELOG("dsGetDataBlock failed, code:%x - %s", code, tstrerror(code));
QW_ERR_RET(code);
}
if (DS_BUF_EMPTY == pOutput->bufStatus && pOutput->queryEnd) {
QW_TASK_DLOG_E("task all data fetched, done");
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC);
}
return TSDB_CODE_SUCCESS;
}
@ -552,6 +582,7 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char* sql) {
// queryRsped = true;
ctx->level = plan->level;
atomic_store_ptr(&ctx->taskHandle, pTaskInfo);
atomic_store_ptr(&ctx->sinkHandle, sinkHandle);

View File

@ -3245,11 +3245,11 @@ _return:
return code;
}
bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg *pDataStatis, int32_t numOfCols, int32_t numOfRows) {
bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg **pDataStatis, int32_t numOfCols, int32_t numOfRows) {
if (info->scalarMode) {
return true;
}
if (FILTER_EMPTY_RES(info)) {
return false;
}
@ -3265,7 +3265,7 @@ bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg *pDataStatis, int32_t
int32_t index = -1;
SFilterRangeCtx *ctx = info->colRange[k];
for(int32_t i = 0; i < numOfCols; ++i) {
if (pDataStatis[i].colId == ctx->colId) {
if (pDataStatis[i]->colId == ctx->colId) {
index = i;
break;
}
@ -3281,13 +3281,13 @@ bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg *pDataStatis, int32_t
break;
}
if (pDataStatis[index].numOfNull <= 0) {
if (pDataStatis[index]->numOfNull <= 0) {
if (ctx->isnull && !ctx->notnull && !ctx->isrange) {
ret = false;
break;
}
} else if (pDataStatis[index].numOfNull > 0) {
if (pDataStatis[index].numOfNull == numOfRows) {
} else if (pDataStatis[index]->numOfNull > 0) {
if (pDataStatis[index]->numOfNull == numOfRows) {
if ((ctx->notnull || ctx->isrange) && (!ctx->isnull)) {
ret = false;
break;
@ -3301,7 +3301,7 @@ bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg *pDataStatis, int32_t
}
}
SColumnDataAgg* pDataBlockst = &pDataStatis[index];
SColumnDataAgg* pDataBlockst = pDataStatis[index];
SFilterRangeNode *r = ctx->rs;
float minv = 0;

View File

@ -27,10 +27,9 @@ int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock
ASSERT(pReq->blockNum == taosArrayGetSize(pReq->dataLen));
for (int32_t i = 0; i < blockNum; i++) {
/*int32_t len = *(int32_t*)taosArrayGet(pReq->dataLen, i);*/
SRetrieveTableRsp* pRetrieve = taosArrayGetP(pReq->data, i);
SSDataBlock* pDataBlock = taosArrayGet(pArray, i);
blockDecode(pDataBlock, htonl(pRetrieve->numOfCols), htonl(pRetrieve->numOfRows), pRetrieve->data);
blockDecode(pDataBlock, pRetrieve->data);
// TODO: refactor
pDataBlock->info.window.skey = be64toh(pRetrieve->skey);
pDataBlock->info.window.ekey = be64toh(pRetrieve->ekey);
@ -52,7 +51,7 @@ int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock
taosArraySetSize(pArray, 1);
SRetrieveTableRsp* pRetrieve = pReq->pRetrieve;
SSDataBlock* pDataBlock = taosArrayGet(pArray, 0);
blockDecode(pDataBlock, htonl(pRetrieve->numOfCols), htonl(pRetrieve->numOfRows), pRetrieve->data);
blockDecode(pDataBlock, pRetrieve->data);
// TODO: refactor
pDataBlock->info.window.skey = be64toh(pRetrieve->skey);
pDataBlock->info.window.ekey = be64toh(pRetrieve->ekey);

View File

@ -199,7 +199,6 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
// send msg
syncNodeAppendEntriesBatch(pSyncNode, pDestId, pMsg);
syncAppendEntriesBatchDestroy(pMsg);
// speed up
if (pMsg->dataCount > 0 && pSyncNode->commitIndex - pMsg->prevLogIndex > SYNC_SLOW_DOWN_RANGE) {
@ -216,6 +215,8 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
} while (0);
#endif
}
syncAppendEntriesBatchDestroy(pMsg);
}
return ret;

View File

@ -128,7 +128,7 @@ typedef struct {
int8_t retryCnt;
int8_t retryLimit;
// bool setMaxRetry;
STransCtx appCtx; //
STransMsg* pRsp; // for synchronous API
tsem_t* pSem; // for synchronous API
@ -195,17 +195,7 @@ typedef enum { ConnNormal, ConnAcquire, ConnRelease, ConnBroken, ConnInPool } Co
#define transLabel(trans) ((STrans*)trans)->label
// int rpcAuthenticateMsg(void* pMsg, int msgLen, void* pAuth, void* pKey);
// void rpcBuildAuthHead(void* pMsg, int msgLen, void* pAuth, void* pKey);
//// int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen);
//
// int transAuthenticateMsg(void* pMsg, int msgLen, void* pAuth, void* pKey);
// void transBuildAuthHead(void* pMsg, int msgLen, void* pAuth, void* pKey);
// bool transCompressMsg(char* msg, int32_t len, int32_t* flen);
// bool transDecompressMsg(char* msg, int32_t len, int32_t* flen);
void transFreeMsg(void* msg);
//
typedef struct SConnBuffer {
char* buf;
@ -322,8 +312,8 @@ void* transCtxDumpBrokenlinkVal(STransCtx* ctx, int32_t* msgType);
// request list
typedef struct STransReq {
queue q;
void* data;
queue q;
uv_write_t wreq;
} STransReq;
void transReqQueueInit(queue* q);

View File

@ -16,7 +16,8 @@
#include "transComm.h"
typedef struct SConnList {
queue conn;
queue conn;
int32_t size;
} SConnList;
typedef struct SCliConn {
@ -339,8 +340,8 @@ void cliHandleResp(SCliConn* conn) {
tDebug("%s conn %p stop timer", CONN_GET_INST_LABEL(conn), conn);
uv_timer_stop(conn->timer);
}
conn->timer->data = NULL;
taosArrayPush(pThrd->timerList, &conn->timer);
conn->timer->data = NULL;
conn->timer = NULL;
}
@ -395,7 +396,7 @@ void cliHandleResp(SCliConn* conn) {
STraceId* trace = &transMsg.info.traceId;
tGTrace("%s conn %p %s received from %s, local info:%s, len:%d, code str:%s", CONN_GET_INST_LABEL(conn), conn,
tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, code str:%s", CONN_GET_INST_LABEL(conn), conn,
TMSG_INFO(pHead->msgType), conn->dst, conn->src, transMsg.contLen, tstrerror(transMsg.code));
if (pCtx == NULL && CONN_NO_PERSIST_BY_APP(conn)) {
@ -510,7 +511,7 @@ static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) {
SHashObj* pPool = pool;
SConnList* plist = taosHashGet(pPool, key, strlen(key));
if (plist == NULL) {
SConnList list;
SConnList list = {0};
taosHashPut(pPool, key, strlen(key), (void*)&list, sizeof(list));
plist = taosHashGet(pPool, key, strlen(key));
QUEUE_INIT(&plist->conn);
@ -519,15 +520,18 @@ static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) {
if (QUEUE_IS_EMPTY(&plist->conn)) {
return NULL;
}
plist->size -= 1;
queue* h = QUEUE_HEAD(&plist->conn);
SCliConn* conn = QUEUE_DATA(h, SCliConn, q);
conn->status = ConnNormal;
QUEUE_REMOVE(&conn->q);
QUEUE_INIT(&conn->q);
transDQCancel(((SCliThrd*)conn->hostThrd)->timeoutQueue, conn->task);
conn->task = NULL;
if (conn->task != NULL) {
transDQCancel(((SCliThrd*)conn->hostThrd)->timeoutQueue, conn->task);
conn->task = NULL;
}
return conn;
}
static void addConnToPool(void* pool, SCliConn* conn) {
@ -539,6 +543,13 @@ static void addConnToPool(void* pool, SCliConn* conn) {
allocConnRef(conn, true);
if (conn->timer != NULL) {
uv_timer_stop(conn->timer);
taosArrayPush(thrd->timerList, &conn->timer);
conn->timer->data = NULL;
conn->timer = NULL;
}
STrans* pTransInst = thrd->pTransInst;
cliReleaseUnfinishedMsg(conn);
transQueueClear(&conn->cliMsgs);
@ -556,13 +567,17 @@ static void addConnToPool(void* pool, SCliConn* conn) {
assert(conn->list != NULL);
QUEUE_INIT(&conn->q);
QUEUE_PUSH(&conn->list->conn, &conn->q);
conn->list->size += 1;
conn->task = NULL;
assert(!QUEUE_IS_EMPTY(&conn->list->conn));
STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg));
arg->param1 = conn;
arg->param2 = thrd;
conn->task = transDQSched(thrd->timeoutQueue, doCloseIdleConn, arg, CONN_PERSIST_TIME(pTransInst->idleTime));
if (conn->list->size >= 50) {
STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg));
arg->param1 = conn;
arg->param2 = thrd;
conn->task = transDQSched(thrd->timeoutQueue, doCloseIdleConn, arg, CONN_PERSIST_TIME(pTransInst->idleTime));
}
}
static int32_t allocConnRef(SCliConn* conn, bool update) {
if (update) {
@ -775,7 +790,7 @@ void cliSend(SCliConn* pConn) {
uv_buf_t wb = uv_buf_init((char*)pHead, msgLen);
STraceId* trace = &pMsg->info.traceId;
tGTrace("%s conn %p %s is sent to %s, local info %s, len:%d", CONN_GET_INST_LABEL(pConn), pConn,
tGDebug("%s conn %p %s is sent to %s, local info %s, len:%d", CONN_GET_INST_LABEL(pConn), pConn,
TMSG_INFO(pHead->msgType), pConn->dst, pConn->src, pMsg->contLen);
if (pHead->persist == 1) {
@ -1374,7 +1389,7 @@ int transReleaseCliHandle(void* handle) {
}
STransMsg tmsg = {.info.handle = handle};
TRACE_SET_MSGID(&tmsg.info.traceId, tGenIdPI64());
// TRACE_SET_MSGID(&tmsg.info.traceId, tGenIdPI64());
SCliMsg* cmsg = taosMemoryCalloc(1, sizeof(SCliMsg));
cmsg->msg = tmsg;
@ -1415,7 +1430,6 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran
if (ctx != NULL) {
pCtx->appCtx = *ctx;
}
assert(pTransInst->connType == TAOS_CONN_CLIENT);
SCliMsg* cliMsg = taosMemoryCalloc(1, sizeof(SCliMsg));
cliMsg->ctx = pCtx;

View File

@ -23,33 +23,6 @@ static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT;
static int32_t refMgt;
static int32_t instMgt;
int transAuthenticateMsg(void* pMsg, int msgLen, void* pAuth, void* pKey) {
T_MD5_CTX context;
int ret = -1;
tMD5Init(&context);
tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN);
tMD5Update(&context, (uint8_t*)pMsg, msgLen);
tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN);
tMD5Final(&context);
if (memcmp(context.digest, pAuth, sizeof(context.digest)) == 0) ret = 0;
return ret;
}
void transBuildAuthHead(void* pMsg, int msgLen, void* pAuth, void* pKey) {
T_MD5_CTX context;
tMD5Init(&context);
tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN);
tMD5Update(&context, (uint8_t*)pMsg, msgLen);
tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN);
tMD5Final(&context);
memcpy(pAuth, context.digest, sizeof(context.digest));
}
bool transCompressMsg(char* msg, int32_t len, int32_t* flen) {
return false;
// SRpcHead* pHead = rpcHeadFromCont(pCont);
@ -176,7 +149,6 @@ int transAllocBuffer(SConnBuffer* connBuf, uv_buf_t* uvBuf) {
* info--->|
*/
SConnBuffer* p = connBuf;
uvBuf->base = p->buf + p->len;
if (p->left == -1) {
uvBuf->len = p->cap - p->len;
@ -184,7 +156,8 @@ int transAllocBuffer(SConnBuffer* connBuf, uv_buf_t* uvBuf) {
if (p->left < p->cap - p->len) {
uvBuf->len = p->left;
} else {
p->buf = taosMemoryRealloc(p->buf, p->left + p->len);
p->cap = p->left + p->len;
p->buf = taosMemoryRealloc(p->buf, p->cap);
uvBuf->base = p->buf + p->len;
uvBuf->len = p->left;
}
@ -266,14 +239,9 @@ int transAsyncSend(SAsyncPool* pool, queue* q) {
uv_async_t* async = &(pool->asyncs[idx]);
SAsyncItem* item = async->data;
int64_t st = taosGetTimestampUs();
taosThreadMutexLock(&item->mtx);
QUEUE_PUSH(&item->qmsg, q);
taosThreadMutexUnlock(&item->mtx);
int64_t el = taosGetTimestampUs() - st;
if (el > 50) {
// tInfo("lock and unlock cost:%d", (int)el);
}
return uv_async_send(async);
}
@ -349,30 +317,21 @@ void transReqQueueInit(queue* q) {
QUEUE_INIT(q);
}
void* transReqQueuePush(queue* q) {
uv_write_t* req = taosMemoryCalloc(1, sizeof(uv_write_t));
STransReq* wreq = taosMemoryCalloc(1, sizeof(STransReq));
wreq->data = req;
req->data = wreq;
QUEUE_PUSH(q, &wreq->q);
return req;
STransReq* req = taosMemoryCalloc(1, sizeof(STransReq));
req->wreq.data = req;
QUEUE_PUSH(q, &req->q);
return &req->wreq;
}
void* transReqQueueRemove(void* arg) {
void* ret = NULL;
uv_write_t* req = arg;
STransReq* wreq = req && req->data ? req->data : NULL;
uv_write_t* wreq = arg;
assert(wreq->data == req);
if (wreq == NULL || wreq->data == NULL) {
taosMemoryFree(wreq->data);
taosMemoryFree(wreq);
return req;
}
STransReq* req = wreq ? wreq->data : NULL;
if (req == NULL) return NULL;
QUEUE_REMOVE(&req->q);
QUEUE_REMOVE(&wreq->q);
ret = req && req->handle ? req->handle->data : NULL;
taosMemoryFree(wreq->data);
taosMemoryFree(wreq);
ret = wreq && wreq->handle ? wreq->handle->data : NULL;
taosMemoryFree(req);
return ret;
}
@ -381,7 +340,6 @@ void transReqQueueClear(queue* q) {
queue* h = QUEUE_HEAD(q);
QUEUE_REMOVE(h);
STransReq* req = QUEUE_DATA(h, STransReq, q);
taosMemoryFree(req->data);
taosMemoryFree(req);
}
}

View File

@ -75,7 +75,6 @@ typedef struct SWorkThrd {
SAsyncPool* asyncPool;
uv_prepare_t* prepare;
queue msg;
TdThreadMutex msgMtx;
queue conn;
void* pTransInst;
@ -253,10 +252,10 @@ static void uvHandleReq(SSvrConn* pConn) {
if (pConn->status == ConnNormal && pHead->noResp == 0) {
transRefSrvHandle(pConn);
tGTrace("%s conn %p %s received from %s, local info:%s, len:%d", transLabel(pTransInst), pConn,
tGDebug("%s conn %p %s received from %s, local info:%s, len:%d", transLabel(pTransInst), pConn,
TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, transMsg.contLen);
} else {
tGTrace("%s conn %p %s received from %s, local info:%s, len:%d, resp:%d, code:%d", transLabel(pTransInst), pConn,
tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, resp:%d, code:%d", transLabel(pTransInst), pConn,
TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, transMsg.contLen, pHead->noResp, transMsg.code);
}
@ -416,7 +415,7 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
STrans* pTransInst = pConn->pTransInst;
STraceId* trace = &pMsg->info.traceId;
tGTrace("%s conn %p %s is sent to %s, local info:%s, len:%d", transLabel(pTransInst), pConn,
tGDebug("%s conn %p %s is sent to %s, local info:%s, len:%d", transLabel(pTransInst), pConn,
TMSG_INFO(pHead->msgType), pConn->dst, pConn->src, pMsg->contLen);
pHead->msgLen = htonl(len);
@ -499,6 +498,7 @@ void uvWorkerAsyncCb(uv_async_t* handle) {
tError("unexcept occurred, continue");
continue;
}
// release handle to rpc init
if (msg->type == Quit) {
(*transAsyncHandle[msg->type])(msg, pThrd);
@ -743,7 +743,6 @@ static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName) {
pThrd->pipe->data = pThrd;
QUEUE_INIT(&pThrd->msg);
taosThreadMutexInit(&pThrd->msgMtx, NULL);
pThrd->prepare = taosMemoryCalloc(1, sizeof(uv_prepare_t));
uv_prepare_init(pThrd->loop, pThrd->prepare);

View File

@ -75,15 +75,14 @@ void processShellMsg() {
void *handle = pRpcMsg->info.handle;
taosFreeQitem(pRpcMsg);
{
SRpcMsg nRpcMsg = {0};
nRpcMsg.pCont = rpcMallocCont(msgSize);
nRpcMsg.contLen = msgSize;
nRpcMsg.info.handle = handle;
nRpcMsg.code = TSDB_CODE_CTG_NOT_READY;
rpcSendResponse(&nRpcMsg);
}
//{
// SRpcMsg nRpcMsg = {0};
// nRpcMsg.pCont = rpcMallocCont(msgSize);
// nRpcMsg.contLen = msgSize;
// nRpcMsg.info.handle = handle;
// nRpcMsg.code = TSDB_CODE_CTG_NOT_READY;
// rpcSendResponse(&nRpcMsg);
//}
}
taosUpdateItemSize(qinfo.queue, numOfMsgs);

View File

@ -16,6 +16,7 @@
#define ALLOW_FORBID_FUNC
#define _DEFAULT_SOURCE
#include "os.h"
#include "tdef.h"
#include "pthread.h"
#ifdef WINDOWS
@ -57,7 +58,7 @@ int32_t taosGetAppName(char* name, int32_t* len) {
end = filepath;
}
strcpy(name, end);
tstrncpy(name, end, TSDB_APP_NAME_LEN);
if (len != NULL) {
*len = (int32_t)strlen(end);
@ -625,7 +626,7 @@ int32_t taosGetAppName(char *name, int32_t *len) {
buf[PATH_MAX] = '\0';
size_t n = strlen(buf);
if (len) *len = n;
if (name) strcpy(name, buf);
if (name) tstrncpy(name, buf, TSDB_APP_NAME_LEN);
return 0;
}
@ -668,7 +669,7 @@ int32_t taosGetAppName(char* name, int32_t* len) {
++end;
strcpy(name, end);
tstrncpy(name, end, TSDB_APP_NAME_LEN);
if (len != NULL) {
*len = strlen(name);

View File

@ -398,7 +398,7 @@ int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores) {
if (line != NULL) taosMemoryFree(line);
taosCloseFile(&pFile);
if (code != 0) {
if (code != 0 && (done & 1) == 0) {
TdFilePtr pFile1 = taosOpenFile("/proc/device-tree/model", TD_FILE_READ | TD_FILE_STREAM);
if (pFile1 == NULL) return code;
taosGetsFile(pFile1, maxLen, cpuModel);
@ -407,6 +407,16 @@ int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores) {
done |= 1;
}
if (code != 0 && (done & 1) == 0) {
TdCmdPtr pCmd = taosOpenCmd("uname -a");
if (pCmd == NULL) return code;
if (taosGetsCmd(pCmd, maxLen, cpuModel) > 0) {
code = 0;
done |= 1;
}
taosCloseCmd(&pCmd);
}
if ((done & 2) == 0) {
*numOfCores = coreCount;
done |= 2;

View File

@ -248,6 +248,16 @@ TdCmdPtr taosOpenCmd(const char* cmd) {
#endif
}
int64_t taosGetsCmd(TdCmdPtr pCmd, int32_t maxSize, char *__restrict buf) {
if (pCmd == NULL || buf == NULL) {
return -1;
}
if (fgets(buf, maxSize, (FILE*)pCmd) == NULL) {
return -1;
}
return strlen(buf);
}
int64_t taosGetLineCmd(TdCmdPtr pCmd, char** __restrict ptrBuf) {
if (pCmd == NULL || ptrBuf == NULL) {
return -1;

View File

@ -44,11 +44,11 @@ typedef struct {
void (*fp)(void *);
} SRefSet;
static SRefSet tsRefSetList[TSDB_REF_OBJECTS];
static SRefSet tsRefSetList[TSDB_REF_OBJECTS];
static TdThreadOnce tsRefModuleInit = PTHREAD_ONCE_INIT;
static TdThreadMutex tsRefMutex;
static int32_t tsRefSetNum = 0;
static int32_t tsNextId = 0;
static int32_t tsRefSetNum = 0;
static int32_t tsNextId = 0;
static void taosInitRefModule(void);
static void taosLockList(int64_t *lockedBy);

View File

@ -332,8 +332,8 @@
./test.sh -f tsim/vnode/stable_replica3_vnode3.sim
# --- sync
#./test.sh -f tsim/sync/3Replica1VgElect.sim
#./test.sh -f tsim/sync/3Replica5VgElect.sim
./test.sh -f tsim/sync/3Replica1VgElect.sim
./test.sh -f tsim/sync/3Replica5VgElect.sim
./test.sh -f tsim/sync/oneReplica1VgElect.sim
./test.sh -f tsim/sync/oneReplica5VgElect.sim

View File

@ -78,7 +78,6 @@ endi
sql select diff(c7) from $tb
sql_error select diff(c8) from $tb
sql_error select diff(c9) from $tb
sql_error select diff(ts) from $tb
sql select diff(c1), diff(c2) from $tb
sql select 2+diff(c1) from $tb

View File

@ -49,7 +49,7 @@ if $rows != 12 then
return -1
endi
if $data02 != NULL then
print "expect: NULL, act: $data03"
print "expect: NULL, act: $data02"
return -1
endi
if $data03 != @binary00@ then

View File

@ -304,8 +304,8 @@ endi
if $data21 != 24 then
return -1
endi
if $data22 != 11.000000000 then
print $data02
if $data22 != 11.000000000 then
print expect 11.000000000 actual: $data22
return -1
endi
if $data23 != 25 then

View File

@ -263,6 +263,9 @@ sql select sum(c1), avg(c2), stddev(c3), max(c4), min(c5), count(c6), first(c7),
if $rows != 0 then
return -1
endi
print select sum(c1), avg(c2), stddev(c3), max(c4), min(c5), count(c6), first(c7), last(c8), last(c9) from $tb where ts >= $ts0 and ts <= $tsu and c1>1 and c2<9 and c3>2 and c4<8 and c5>4 and c6<6 limit 1 offset 0
sql select sum(c1), avg(c2), stddev(c3), max(c4), min(c5), count(c6), first(c7), last(c8), last(c9) from $tb where ts >= $ts0 and ts <= $tsu and c1>1 and c2<9 and c3>2 and c4<8 and c5>4 and c6<6 limit 1 offset 0
if $rows != 1 then
return -1
@ -270,6 +273,7 @@ endi
$val = $rowNum / 10
$val = $val * 5
if $data00 != $val then
print $data00, expect $val
return -1
endi
if $data01 != 5.000000000 then

View File

@ -92,6 +92,7 @@ if $data03 != 319 then
return -1
endi
if $data04 != 318.000000000 then
print expect 318.000000000, actual: $data04
return -1
endi
if $data05 != 1 then

View File

@ -1,10 +1,10 @@
run tsim/user/password.sim
run tsim/user/privilege_db.sim
run tsim/user/privilege_sysinfo.sim
run tsim/user/basic.sim
run tsim/table/basic1.sim
run tsim/trans/lossdata1.sim
run tsim/trans/create_db.sim
#run tsim/user/password.sim
#run tsim/user/privilege_db.sim
#run tsim/user/privilege_sysinfo.sim
#run tsim/user/basic.sim
#run tsim/table/basic1.sim
#run tsim/trans/lossdata1.sim
#run tsim/trans/create_db.sim
run tsim/stable/alter_metrics.sim
run tsim/stable/tag_modify.sim
run tsim/stable/alter_comment.sim

View File

@ -83,9 +83,6 @@ class TDTestCase:
tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
tdSql.error(f"select diff(ts) from {dbname}.stb")
tdSql.error(f"select diff(ts) from {dbname}.stb_1")
# tdSql.error(f"select diff(col7) from {dbname}.stb")
tdSql.error(f"select diff(col8) from {dbname}.stb")

View File

@ -256,8 +256,6 @@ class TDTestCase:
tdSql.error(self.diff_query_form(col='c%')) # col is spercial char 1
tdSql.error(self.diff_query_form(col='c_')) # col is spercial char 2
tdSql.error(self.diff_query_form(col='c.')) # col is spercial char 3
tdSql.error(self.diff_query_form(col='c3')) # timestamp col
tdSql.error(self.diff_query_form(col='ts')) # Primary key
tdSql.error(self.diff_query_form(col='avg(c1)')) # expr col
# tdSql.error(self.diff_query_form(col='c6')) # bool col
tdSql.query("select diff(c6) from t1")