[td-225] merge develop
This commit is contained in:
commit
401fc20df0
|
@ -43,16 +43,48 @@ def pre_test(){
|
||||||
killall -9 gdb || echo "no gdb running"
|
killall -9 gdb || echo "no gdb running"
|
||||||
cd ${WKC}
|
cd ${WKC}
|
||||||
git reset --hard HEAD~10 >/dev/null
|
git reset --hard HEAD~10 >/dev/null
|
||||||
|
'''
|
||||||
|
script {
|
||||||
|
if (env.CHANGE_TARGET == 'master') {
|
||||||
|
sh '''
|
||||||
|
cd ${WKC}
|
||||||
|
git checkout master
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
sh '''
|
||||||
|
cd ${WKC}
|
||||||
git checkout develop
|
git checkout develop
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sh'''
|
||||||
|
cd ${WKC}
|
||||||
git pull >/dev/null
|
git pull >/dev/null
|
||||||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||||
git checkout -qf FETCH_HEAD
|
git checkout -qf FETCH_HEAD
|
||||||
git clean -dfx
|
git clean -dfx
|
||||||
cd ${WK}
|
cd ${WK}
|
||||||
git reset --hard HEAD~10
|
git reset --hard HEAD~10
|
||||||
git checkout develop
|
'''
|
||||||
git pull >/dev/null
|
script {
|
||||||
|
if (env.CHANGE_TARGET == 'master') {
|
||||||
|
sh '''
|
||||||
cd ${WK}
|
cd ${WK}
|
||||||
|
git checkout master
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
sh '''
|
||||||
|
cd ${WK}
|
||||||
|
git checkout develop
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sh '''
|
||||||
|
cd ${WK}
|
||||||
|
git pull >/dev/null
|
||||||
|
|
||||||
export TZ=Asia/Harbin
|
export TZ=Asia/Harbin
|
||||||
date
|
date
|
||||||
git clean -dfx
|
git clean -dfx
|
||||||
|
@ -93,6 +125,7 @@ pipeline {
|
||||||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||||
git checkout -qf FETCH_HEAD
|
git checkout -qf FETCH_HEAD
|
||||||
'''
|
'''
|
||||||
|
|
||||||
script{
|
script{
|
||||||
env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true)
|
env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true)
|
||||||
}
|
}
|
||||||
|
@ -185,14 +218,12 @@ pipeline {
|
||||||
rm -rf /var/log/taos/*
|
rm -rf /var/log/taos/*
|
||||||
./handle_crash_gen_val_log.sh
|
./handle_crash_gen_val_log.sh
|
||||||
'''
|
'''
|
||||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
|
||||||
sh '''
|
sh '''
|
||||||
cd ${WKC}/tests/pytest
|
cd ${WKC}/tests/pytest
|
||||||
rm -rf /var/lib/taos/*
|
rm -rf /var/lib/taos/*
|
||||||
rm -rf /var/log/taos/*
|
rm -rf /var/log/taos/*
|
||||||
./handle_taosd_val_log.sh
|
./handle_taosd_val_log.sh
|
||||||
'''
|
'''
|
||||||
}
|
|
||||||
timeout(time: 45, unit: 'MINUTES'){
|
timeout(time: 45, unit: 'MINUTES'){
|
||||||
sh '''
|
sh '''
|
||||||
date
|
date
|
||||||
|
|
|
@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
|
||||||
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
||||||
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
||||||
IF (TD_MVN_INSTALLED)
|
IF (TD_MVN_INSTALLED)
|
||||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.25-dist.jar DESTINATION connector/jdbc)
|
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.27-dist.jar DESTINATION connector/jdbc)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
ELSEIF (TD_DARWIN)
|
ELSEIF (TD_DARWIN)
|
||||||
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
||||||
|
|
|
@ -4,7 +4,7 @@ PROJECT(TDengine)
|
||||||
IF (DEFINED VERNUMBER)
|
IF (DEFINED VERNUMBER)
|
||||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(TD_VER_NUMBER "2.0.18.0")
|
SET(TD_VER_NUMBER "2.0.20.0")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (DEFINED VERCOMPATIBLE)
|
IF (DEFINED VERCOMPATIBLE)
|
||||||
|
|
|
@ -0,0 +1,211 @@
|
||||||
|
# 通过 Docker 快速体验 TDengine
|
||||||
|
|
||||||
|
虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。
|
||||||
|
|
||||||
|
下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。
|
||||||
|
|
||||||
|
## 下载 Docker
|
||||||
|
|
||||||
|
Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。
|
||||||
|
|
||||||
|
安装完毕后可以在命令行终端查看 Docker 版本。如果版本号正常输出,则说明 Docker 环境已经安装成功。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker -v
|
||||||
|
Docker version 20.10.5, build 55c4c88
|
||||||
|
```
|
||||||
|
|
||||||
|
## 在 Docker 容器中运行 TDengine
|
||||||
|
|
||||||
|
1,使用命令拉取 TDengine 镜像,并使它在后台运行。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker run -d tdengine/tdengine
|
||||||
|
cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316
|
||||||
|
```
|
||||||
|
|
||||||
|
- **docker run**:通过 Docker 运行一个容器。
|
||||||
|
- **-d**:让容器在后台运行。
|
||||||
|
- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像。
|
||||||
|
- **cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316**:这个返回的长字符是容器 ID,我们可以通过容器 ID 来查看对应的容器。
|
||||||
|
|
||||||
|
2,确认容器是否已经正确运行。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker ps
|
||||||
|
CONTAINER ID IMAGE COMMAND CREATED STATUS ···
|
||||||
|
cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
|
||||||
|
```
|
||||||
|
|
||||||
|
- **docker ps**:列出所有正在运行状态的容器信息。
|
||||||
|
- **CONTAINER ID**:容器 ID。
|
||||||
|
- **IMAGE**:使用的镜像。
|
||||||
|
- **COMMAND**:启动容器时运行的命令。
|
||||||
|
- **CREATED**:容器创建时间。
|
||||||
|
- **STATUS**:容器状态。UP 表示运行中。
|
||||||
|
|
||||||
|
3,进入 Docker 容器内,使用 TDengine。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker exec -it cdf548465318 /bin/bash
|
||||||
|
root@cdf548465318:~/TDengine-server-2.0.13.0#
|
||||||
|
```
|
||||||
|
|
||||||
|
- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。
|
||||||
|
- **-i**:进入交互模式。
|
||||||
|
- **-t**:指定一个终端。
|
||||||
|
- **cdf548465318**:容器 ID,需要根据 docker ps 指令返回的值进行修改。
|
||||||
|
- **/bin/bash**:载入容器后运行 bash 来进行交互。
|
||||||
|
|
||||||
|
4,进入容器后,执行 taos shell 客户端程序。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
|
||||||
|
|
||||||
|
Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
|
||||||
|
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||||
|
|
||||||
|
taos>
|
||||||
|
```
|
||||||
|
|
||||||
|
TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。
|
||||||
|
|
||||||
|
在 TDengine 终端中,可以通过 SQL 命令来创建/删除数据库、表、超级表等,并可以进行插入和查询操作。具体可以参考 [TAOS SQL 说明文档](https://www.taosdata.com/cn/documentation/taos-sql)。
|
||||||
|
|
||||||
|
## 通过 taosdemo 进一步了解 TDengine
|
||||||
|
|
||||||
|
1,接上面的步骤,先退出 TDengine 终端程序。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ taos> q
|
||||||
|
root@cdf548465318:~/TDengine-server-2.0.13.0#
|
||||||
|
```
|
||||||
|
|
||||||
|
2,在命令行界面执行 taosdemo。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taosdemo
|
||||||
|
###################################################################
|
||||||
|
# Server IP: localhost:0
|
||||||
|
# User: root
|
||||||
|
# Password: taosdata
|
||||||
|
# Use metric: true
|
||||||
|
# Datatype of Columns: int int int int int int int float
|
||||||
|
# Binary Length(If applicable): -1
|
||||||
|
# Number of Columns per record: 3
|
||||||
|
# Number of Threads: 10
|
||||||
|
# Number of Tables: 10000
|
||||||
|
# Number of Data per Table: 100000
|
||||||
|
# Records/Request: 1000
|
||||||
|
# Database name: test
|
||||||
|
# Table prefix: t
|
||||||
|
# Delete method: 0
|
||||||
|
# Test time: 2021-04-13 02:05:20
|
||||||
|
###################################################################
|
||||||
|
```
|
||||||
|
|
||||||
|
回车后,该命令将新建一个数据库 test,并且自动创建一张超级表 meters,并以超级表 meters 为模版创建了 1 万张表,表名从 "t0" 到 "t9999"。每张表有 10 万条记录,每条记录有 f1,f2,f3 三个字段,时间戳 ts 字段从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:41:39 999"。每张表带有 areaid 和 loc 两个标签 TAG,areaid 被设置为 1 到 10,loc 被设置为 "beijing" 或 "shanghai"。
|
||||||
|
|
||||||
|
3,进入 TDengine 终端,查看 taosdemo 生成的数据。
|
||||||
|
|
||||||
|
- **进入命令行。**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
|
||||||
|
|
||||||
|
Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
|
||||||
|
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||||
|
|
||||||
|
taos>
|
||||||
|
```
|
||||||
|
|
||||||
|
- **查看数据库。**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ taos> show databases;
|
||||||
|
name | created_time | ntables | vgroups | ···
|
||||||
|
test | 2021-04-13 02:14:15.950 | 10000 | 6 | ···
|
||||||
|
log | 2021-04-12 09:36:37.549 | 4 | 1 | ···
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
- **查看超级表。**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ taos> use test;
|
||||||
|
Database changed.
|
||||||
|
|
||||||
|
$ taos> show stables;
|
||||||
|
name | created_time | columns | tags | tables |
|
||||||
|
=====================================================================================
|
||||||
|
meters | 2021-04-13 02:14:15.955 | 4 | 2 | 10000 |
|
||||||
|
Query OK, 1 row(s) in set (0.001737s)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
- **查看表,限制输出十条。**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ taos> select * from test.t0 limit 10;
|
||||||
|
ts | f1 | f2 | f3 |
|
||||||
|
====================================================================
|
||||||
|
2017-07-14 02:40:01.000 | 3 | 9 | 0 |
|
||||||
|
2017-07-14 02:40:02.000 | 0 | 1 | 2 |
|
||||||
|
2017-07-14 02:40:03.000 | 7 | 2 | 3 |
|
||||||
|
2017-07-14 02:40:04.000 | 9 | 4 | 5 |
|
||||||
|
2017-07-14 02:40:05.000 | 1 | 2 | 5 |
|
||||||
|
2017-07-14 02:40:06.000 | 6 | 3 | 2 |
|
||||||
|
2017-07-14 02:40:07.000 | 4 | 7 | 8 |
|
||||||
|
2017-07-14 02:40:08.000 | 4 | 6 | 6 |
|
||||||
|
2017-07-14 02:40:09.000 | 5 | 7 | 7 |
|
||||||
|
2017-07-14 02:40:10.000 | 1 | 5 | 0 |
|
||||||
|
Query OK, 10 row(s) in set (0.003638s)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
- **查看 t0 表的标签值。**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ taos> select areaid, loc from test.t0;
|
||||||
|
areaid | loc |
|
||||||
|
===========================
|
||||||
|
10 | shanghai |
|
||||||
|
Query OK, 1 row(s) in set (0.002904s)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## 停止正在 Docker 中运行的 TDengine 服务
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker stop cdf548465318
|
||||||
|
cdf548465318
|
||||||
|
```
|
||||||
|
|
||||||
|
- **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。
|
||||||
|
- **cdf548465318**:容器 ID,根据 docker ps 指令返回的结果进行修改。
|
||||||
|
|
||||||
|
## 编程开发时连接在 Docker 中的 TDengine
|
||||||
|
|
||||||
|
从 Docker 之外连接使用在 Docker 容器内运行的 TDengine 服务,有以下两个思路:
|
||||||
|
|
||||||
|
1,通过端口映射(-p),将容器内部开放的网络端口映射到宿主机的指定端口上。通过挂载本地目录(-v),可以实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker run -d -v /etc/taos:/etc/taos -p 6041:6041 tdengine/tdengine
|
||||||
|
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
|
||||||
|
|
||||||
|
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
|
||||||
|
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}
|
||||||
|
```
|
||||||
|
|
||||||
|
- 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。
|
||||||
|
- 第二条命令,通过 RESTful 接口访问 TDengine,这时连接的是本机的 6041 端口,可见连接成功。
|
||||||
|
|
||||||
|
注意:在这个示例中,出于方便性考虑,只映射了 RESTful 需要的 6041 端口。如果希望以非 RESTful 方式连接 TDengine 服务,则需要映射从 6030 开始的共 11 个端口(完整的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))。在例子中,挂载本地目录也只是处理了配置文件所在的 /etc/taos 目录,而没有挂载数据存储目录。
|
||||||
|
|
||||||
|
2,直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker exec -it 526aa188da /bin/bash
|
||||||
|
```
|
||||||
|
|
|
@ -10,7 +10,9 @@ TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版
|
||||||
|
|
||||||
### 通过Docker容器运行
|
### 通过Docker容器运行
|
||||||
|
|
||||||
请参考[TDengine官方Docker镜像的发布、下载和使用](https://www.taosdata.com/blog/2020/05/13/1509.html)
|
暂时不建议生产环境采用 Docker 来部署 TDengine 的客户端或服务端,但在开发环境下或初次尝试时,使用 Docker 方式部署是十分方便的。特别是,利用 Docker,可以方便地在 Mac OSX 和 Windows 环境下尝试 TDengine。
|
||||||
|
|
||||||
|
详细操作方法请参照 [通过Docker快速体验TDengine](https://www.taosdata.com/cn/documentation/getting-started/docker)。
|
||||||
|
|
||||||
### <a class="anchor" id="package-install"></a>通过安装包安装
|
### <a class="anchor" id="package-install"></a>通过安装包安装
|
||||||
|
|
||||||
|
@ -101,7 +103,7 @@ $ taos -h 192.168.0.1 -s "use db; show tables;"
|
||||||
|
|
||||||
### 运行SQL命令脚本
|
### 运行SQL命令脚本
|
||||||
|
|
||||||
TDengine终端可以通过`source`命令来运行SQL命令脚本.
|
TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本.
|
||||||
|
|
||||||
```mysql
|
```mysql
|
||||||
taos> source <filename>;
|
taos> source <filename>;
|
||||||
|
@ -109,10 +111,10 @@ taos> source <filename>;
|
||||||
|
|
||||||
### Shell小技巧
|
### Shell小技巧
|
||||||
|
|
||||||
- 可以使用上下光标键查看已经历史输入的命令
|
- 可以使用上下光标键查看历史输入的指令
|
||||||
- 修改用户密码。在shell中使用alter user命令
|
- 修改用户密码。在 shell 中使用 alter user 指令
|
||||||
- ctrl+c 中止正在进行中的查询
|
- ctrl+c 中止正在进行中的查询
|
||||||
- 执行`RESET QUERY CACHE`清空本地缓存的表的schema
|
- 执行 `RESET QUERY CACHE` 清空本地缓存的表 schema
|
||||||
|
|
||||||
|
|
||||||
## <a class="anchor" id="demo"></a>TDengine 极速体验
|
## <a class="anchor" id="demo"></a>TDengine 极速体验
|
||||||
|
@ -212,7 +214,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
|
||||||
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● |
|
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● |
|
||||||
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
|
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
|
||||||
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
|
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
|
||||||
| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
|
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
|
||||||
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
|
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
|
||||||
|
|
||||||
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
|
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
|
||||||
|
|
|
@ -178,7 +178,7 @@ TDengine 分布式架构的逻辑结构图如下:
|
||||||
|
|
||||||
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。
|
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。
|
||||||
|
|
||||||
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。
|
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))
|
||||||
|
|
||||||
**集群对外连接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号)。通过命令行CLI启动应用taos时,可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号,如果端口不配置,将采用TDengine的系统配置参数serverPort。
|
**集群对外连接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号)。通过命令行CLI启动应用taos时,可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号,如果端口不配置,将采用TDengine的系统配置参数serverPort。
|
||||||
|
|
||||||
|
|
|
@ -120,7 +120,7 @@ if (async) {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
TDengine中的订阅既可以是同步的,也可以是异步的,上面的代码会根据从命令行获取的参数`async`的值来决定使用哪种方式。这里,同步的意思是用户程序要直接调用`taos_consume`来拉取数据,而异步则由API在内部的另一个线程中调用`taos_consume`,然后把拉取到的数据交给回调函数`subscribe_callback`去处理。
|
TDengine中的订阅既可以是同步的,也可以是异步的,上面的代码会根据从命令行获取的参数`async`的值来决定使用哪种方式。这里,同步的意思是用户程序要直接调用`taos_consume`来拉取数据,而异步则由API在内部的另一个线程中调用`taos_consume`,然后把拉取到的数据交给回调函数`subscribe_callback`去处理。(注意,`subscribe_callback` 中不宜做较为耗时的操作,否则有可能导致客户端阻塞等不可控的问题。)
|
||||||
|
|
||||||
参数`taos`是一个已经建立好的数据库连接,在同步模式下无特殊要求。但在异步模式下,需要注意它不会被其它线程使用,否则可能导致不可预计的错误,因为回调函数在API的内部线程中被调用,而TDengine的部分API不是线程安全的。
|
参数`taos`是一个已经建立好的数据库连接,在同步模式下无特殊要求。但在异步模式下,需要注意它不会被其它线程使用,否则可能导致不可预计的错误,因为回调函数在API的内部线程中被调用,而TDengine的部分API不是线程安全的。
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
|
||||||
| **Python** | ● | ● | ● | ○ | ● | ● | ○ | -- | ○ |
|
| **Python** | ● | ● | ● | ○ | ● | ● | ○ | -- | ○ |
|
||||||
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
|
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
|
||||||
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
|
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
|
||||||
| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
|
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
|
||||||
| **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ |
|
| **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ |
|
||||||
|
|
||||||
其中 ● 表示经过官方测试验证, ○ 表示非官方测试验证。
|
其中 ● 表示经过官方测试验证, ○ 表示非官方测试验证。
|
||||||
|
@ -23,7 +23,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
|
||||||
|
|
||||||
* 在没有安装TDengine服务端软件的系统中使用连接器(除RESTful外)访问 TDengine 数据库,需要安装相应版本的客户端安装包来使应用驱动(Linux系统中文件名为libtaos.so,Windows系统中为taos.dll)被安装在系统中,否则会产生无法找到相应库文件的错误。
|
* 在没有安装TDengine服务端软件的系统中使用连接器(除RESTful外)访问 TDengine 数据库,需要安装相应版本的客户端安装包来使应用驱动(Linux系统中文件名为libtaos.so,Windows系统中为taos.dll)被安装在系统中,否则会产生无法找到相应库文件的错误。
|
||||||
* 所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。
|
* 所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。
|
||||||
* 升级到TDengine到2.0.8.0版本的用户,必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。
|
* 升级到TDengine到2.0.8.0版本的用户,必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。详细的版本依赖关系请参见 [taos-jdbcdriver 文档](https://www.taosdata.com/cn/documentation/connector/java#version)。
|
||||||
* 无论选用何种编程语言的连接器,2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池,以避免连接内的“USE statement”状态量在线程之间相互干扰(但连接的查询和写入操作都是线程安全的)。
|
* 无论选用何种编程语言的连接器,2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池,以避免连接内的“USE statement”状态量在线程之间相互干扰(但连接的查询和写入操作都是线程安全的)。
|
||||||
|
|
||||||
## <a class="anchor" id="driver"></a>安装连接器驱动步骤
|
## <a class="anchor" id="driver"></a>安装连接器驱动步骤
|
||||||
|
@ -377,6 +377,7 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
|
||||||
* res:查询结果集,注意结果集中可能没有记录
|
* res:查询结果集,注意结果集中可能没有记录
|
||||||
* param:调用 `taos_subscribe`时客户程序提供的附加参数
|
* param:调用 `taos_subscribe`时客户程序提供的附加参数
|
||||||
* code:错误码
|
* code:错误码
|
||||||
|
**注意**:在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。
|
||||||
|
|
||||||
* `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
|
* `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
|
||||||
|
|
||||||
|
@ -743,7 +744,7 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间
|
||||||
|
|
||||||
下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效
|
下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效
|
||||||
|
|
||||||
- httpPort: 对外提供RESTful服务的端口号,默认绑定到6041
|
- 对外提供RESTful服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改)
|
||||||
- httpMaxThreads: 启动的线程数量,默认为2(2.0.17版本开始,默认值改为CPU核数的一半向下取整)
|
- httpMaxThreads: 启动的线程数量,默认为2(2.0.17版本开始,默认值改为CPU核数的一半向下取整)
|
||||||
- restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240
|
- restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240
|
||||||
- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式
|
- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式
|
||||||
|
|
|
@ -55,12 +55,11 @@ arbitrator ha.taosdata.com:6042
|
||||||
| 4 | statusInterval | dnode向mnode报告状态时长 |
|
| 4 | statusInterval | dnode向mnode报告状态时长 |
|
||||||
| 5 | arbitrator | 系统中裁决器的end point |
|
| 5 | arbitrator | 系统中裁决器的end point |
|
||||||
| 6 | timezone | 时区 |
|
| 6 | timezone | 时区 |
|
||||||
| 7 | locale | 系统区位信息及编码格式 |
|
| 7 | balance | 是否启动负载均衡 |
|
||||||
| 8 | charset | 字符集编码 |
|
| 8 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
|
||||||
| 9 | balance | 是否启动负载均衡 |
|
| 9 | maxVgroupsPerDb | 每个DB中能够使用的最大vgroup个数 |
|
||||||
| 10 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
|
|
||||||
| 11 | maxVgroupsPerDb | 每个DB中能够使用的最大vgroup个数 |
|
|
||||||
|
|
||||||
|
备注:在 2.0.19.0 及更早的版本中,除以上 9 项参数外,dnode 加入集群时,还会要求 locale 和 charset 参数的取值也一致。
|
||||||
|
|
||||||
|
|
||||||
## <a class="anchor" id="node-one"></a>启动第一个数据节点
|
## <a class="anchor" id="node-one"></a>启动第一个数据节点
|
||||||
|
|
|
@ -100,8 +100,7 @@ taosd -C
|
||||||
|
|
||||||
- firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。
|
- firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。
|
||||||
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。
|
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。
|
||||||
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。
|
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。(RESTful服务使用的端口号是在此基础上+11,即默认值为6041。)
|
||||||
- httpPort: RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求, 默认值为6041。
|
|
||||||
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
|
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
|
||||||
- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。
|
- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。
|
||||||
- arbitrator:系统中裁决器的end point, 缺省值为空。
|
- arbitrator:系统中裁决器的end point, 缺省值为空。
|
||||||
|
@ -115,7 +114,7 @@ taosd -C
|
||||||
- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为 MB(2.0.15 以前的版本中,此参数的单位是字节)。
|
- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为 MB(2.0.15 以前的版本中,此参数的单位是字节)。
|
||||||
- ratioOfQueryCores: 设置查询线程的最大数量。最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。
|
- ratioOfQueryCores: 设置查询线程的最大数量。最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。
|
||||||
|
|
||||||
**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。
|
**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))
|
||||||
|
|
||||||
不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数:
|
不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数:
|
||||||
|
|
||||||
|
@ -150,7 +149,7 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数
|
||||||
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
|
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
|
||||||
- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
|
- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
|
||||||
- arbitrator: 系统中裁决器的end point,缺省为空。
|
- arbitrator: 系统中裁决器的end point,缺省为空。
|
||||||
- timezone、locale、charset 的配置见客户端配置。
|
- timezone、locale、charset 的配置见客户端配置。(2.0.20.0 及以上的版本里,集群中加入新节点已不要求 locale 和 charset 参数取值一致)
|
||||||
|
|
||||||
为方便调试,可通过SQL语句临时调整每个dnode的日志配置,系统重启后会失效:
|
为方便调试,可通过SQL语句临时调整每个dnode的日志配置,系统重启后会失效:
|
||||||
|
|
||||||
|
@ -463,41 +462,41 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
|
||||||
|
|
||||||
| 关键字列表 | | | | |
|
| 关键字列表 | | | | |
|
||||||
| ---------- | ----------- | ------------ | ---------- | --------- |
|
| ---------- | ----------- | ------------ | ---------- | --------- |
|
||||||
| ABLOCKS | CONNECTIONS | GT | MNODES | SLIDING |
|
| ABLOCKS | CONNECTIONS | HAVING | MODULES | SLIMIT |
|
||||||
| ABORT | COPY | ID | MODULES | SLIMIT |
|
| ABORT | COPY | ID | NCHAR | SMALLINT |
|
||||||
| ACCOUNT | COUNT | IF | NCHAR | SMALLINT |
|
| ACCOUNT | COUNT | IF | NE | SPREAD |
|
||||||
| ACCOUNTS | CREATE | IGNORE | NE | SPREAD |
|
| ACCOUNTS | CREATE | IGNORE | NONE | STABLE |
|
||||||
| ADD | CTIME | IMMEDIATE | NONE | STABLE |
|
| ADD | CTIME | IMMEDIATE | NOT | STABLES |
|
||||||
| AFTER | DATABASE | IMPORT | NOT | STABLES |
|
| AFTER | DATABASE | IMPORT | NOTNULL | STAR |
|
||||||
| ALL | DATABASES | IN | NOTNULL | STAR |
|
| ALL | DATABASES | IN | NOW | STATEMENT |
|
||||||
| ALTER | DAYS | INITIALLY | NOW | STATEMENT |
|
| ALTER | DAYS | INITIALLY | OF | STDDEV |
|
||||||
| AND | DEFERRED | INSERT | OF | STDDEV |
|
| AND | DEFERRED | INSERT | OFFSET | STREAM |
|
||||||
| AS | DELIMITERS | INSTEAD | OFFSET | STREAM |
|
| AS | DELIMITERS | INSTEAD | OR | STREAMS |
|
||||||
| ASC | DESC | INTEGER | OR | STREAMS |
|
| ASC | DESC | INTEGER | ORDER | STRING |
|
||||||
| ATTACH | DESCRIBE | INTERVAL | ORDER | STRING |
|
| ATTACH | DESCRIBE | INTERVAL | PASS | SUM |
|
||||||
| AVG | DETACH | INTO | PASS | SUM |
|
| AVG | DETACH | INTO | PERCENTILE | TABLE |
|
||||||
| BEFORE | DIFF | IP | PERCENTILE | TABLE |
|
| BEFORE | DIFF | IP | PLUS | TABLES |
|
||||||
| BEGIN | DISTINCT | IS | PLUS | TABLES |
|
| BEGIN | DISTINCT | IS | PRAGMA | TAG |
|
||||||
| BETWEEN | DIVIDE | ISNULL | PRAGMA | TAG |
|
| BETWEEN | DIVIDE | ISNULL | PREV | TAGS |
|
||||||
| BIGINT | DNODE | JOIN | PREV | TAGS |
|
| BIGINT | DNODE | JOIN | PRIVILEGE | TBLOCKS |
|
||||||
| BINARY | DNODES | KEEP | PRIVILEGE | TBLOCKS |
|
| BINARY | DNODES | KEEP | QUERIES | TBNAME |
|
||||||
| BITAND | DOT | KEY | QUERIES | TBNAME |
|
| BITAND | DOT | KEY | QUERY | TIMES |
|
||||||
| BITNOT | DOUBLE | KILL | QUERY | TIMES |
|
| BITNOT | DOUBLE | KILL | RAISE | TIMESTAMP |
|
||||||
| BITOR | DROP | LAST | RAISE | TIMESTAMP |
|
| BITOR | DROP | LAST | REM | TINYINT |
|
||||||
| BOOL | EACH | LE | REM | TINYINT |
|
| BOOL | EACH | LE | REPLACE | TOP |
|
||||||
| BOTTOM | END | LEASTSQUARES | REPLACE | TOP |
|
| BOTTOM | END | LEASTSQUARES | REPLICA | TOPIC |
|
||||||
| BY | EQ | LIKE | REPLICA | TRIGGER |
|
| BY | EQ | LIKE | RESET | TRIGGER |
|
||||||
| CACHE | EXISTS | LIMIT | RESET | UMINUS |
|
| CACHE | EXISTS | LIMIT | RESTRICT | UMINUS |
|
||||||
| CASCADE | EXPLAIN | LINEAR | RESTRICT | UPLUS |
|
| CASCADE | EXPLAIN | LINEAR | ROW | UPLUS |
|
||||||
| CHANGE | FAIL | LOCAL | ROW | USE |
|
| CHANGE | FAIL | LOCAL | ROWS | USE |
|
||||||
| CLOG | FILL | LP | ROWS | USER |
|
| CLOG | FILL | LP | RP | USER |
|
||||||
| CLUSTER | FIRST | LSHIFT | RP | USERS |
|
| CLUSTER | FIRST | LSHIFT | RSHIFT | USERS |
|
||||||
| COLON | FLOAT | LT | RSHIFT | USING |
|
| COLON | FLOAT | LT | SCORES | USING |
|
||||||
| COLUMN | FOR | MATCH | SCORES | VALUES |
|
| COLUMN | FOR | MATCH | SELECT | VALUES |
|
||||||
| COMMA | FROM | MAX | SELECT | VARIABLE |
|
| COMMA | FROM | MAX | SEMI | VARIABLE |
|
||||||
| COMP | GE | METRIC | SEMI | VGROUPS |
|
| COMP | GE | METRIC | SET | VGROUPS |
|
||||||
| CONCAT | GLOB | METRICS | SET | VIEW |
|
| CONCAT | GLOB | METRICS | SHOW | VIEW |
|
||||||
| CONFIGS | GRANTS | MIN | SHOW | WAVG |
|
| CONFIGS | GRANTS | MIN | SLASH | WAVG |
|
||||||
| CONFLICT | GROUP | MINUS | SLASH | WHERE |
|
| CONFLICT | GROUP | MINUS | SLIDING | WHERE |
|
||||||
| CONNECTION | | | | |
|
| CONNECTION | GT | MNODES | | |
|
||||||
|
|
||||||
|
|
|
@ -407,10 +407,10 @@ SELECT select_expr [, select_expr ...]
|
||||||
[INTERVAL (interval_val [, interval_offset])]
|
[INTERVAL (interval_val [, interval_offset])]
|
||||||
[SLIDING sliding_val]
|
[SLIDING sliding_val]
|
||||||
[FILL fill_val]
|
[FILL fill_val]
|
||||||
[GROUP BY col_list]
|
[GROUP BY col_list [HAVING having_condition]]
|
||||||
[ORDER BY col_list { DESC | ASC }]
|
[ORDER BY col_list { DESC | ASC }]
|
||||||
[SLIMIT limit_val [, SOFFSET offset_val]]
|
[SLIMIT limit_val [SOFFSET offset_val]]
|
||||||
[LIMIT limit_val [, OFFSET offset_val]]
|
[LIMIT limit_val [OFFSET offset_val]]
|
||||||
[>> export_file];
|
[>> export_file];
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -626,7 +626,8 @@ Query OK, 1 row(s) in set (0.001091s)
|
||||||
- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串
|
- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串
|
||||||
- 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。
|
- 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。
|
||||||
- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。
|
- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。
|
||||||
- 参数 SLIMIT 控制由 GROUP BY 指令划分的每个分组中的输出条数。
|
* 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。
|
||||||
|
- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。
|
||||||
- 通过”>>"输出结果可以导出到指定文件
|
- 通过”>>"输出结果可以导出到指定文件
|
||||||
|
|
||||||
### 支持的条件过滤操作
|
### 支持的条件过滤操作
|
||||||
|
@ -647,6 +648,15 @@ Query OK, 1 row(s) in set (0.001091s)
|
||||||
2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。
|
2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。
|
||||||
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
|
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
|
||||||
|
|
||||||
|
### GROUP BY 之后的 HAVING 过滤
|
||||||
|
|
||||||
|
从 2.0.20 版本开始,GROUP BY 之后允许再跟一个 HAVING 子句,对成组后的各组数据再做筛选。HAVING 子句可以使用聚合函数和选择函数作为过滤条件(但暂时不支持 LEASTSQUARES、TOP、BOTTOM、LAST_ROW)。
|
||||||
|
|
||||||
|
例如,如下语句只会输出 `AVG(f1) > 0` 的分组:
|
||||||
|
```mysql
|
||||||
|
SELECT AVG(f1), SPREAD(f1, f2, st2.f1) FROM st2 WHERE f1 > 0 GROUP BY f1 HAVING AVG(f1) > 0;
|
||||||
|
```
|
||||||
|
|
||||||
### SQL 示例
|
### SQL 示例
|
||||||
|
|
||||||
- 对于下面的例子,表tb1用以下语句创建
|
- 对于下面的例子,表tb1用以下语句创建
|
||||||
|
@ -950,7 +960,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
```mysql
|
```mysql
|
||||||
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
```
|
```
|
||||||
功能说明: 统计表/超级表中某列的值最大*k*个非NULL值。若多于k个列值并列最大,则返回时间戳小的。
|
功能说明: 统计表/超级表中某列的值最大 *k* 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。
|
||||||
|
|
||||||
返回结果数据类型:同应用的字段。
|
返回结果数据类型:同应用的字段。
|
||||||
|
|
||||||
|
@ -984,7 +994,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
```mysql
|
```mysql
|
||||||
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
```
|
```
|
||||||
功能说明:统计表/超级表中某列的值最小*k*个非NULL值。若多于k个列值并列最小,则返回时间戳小的。
|
功能说明:统计表/超级表中某列的值最小 *k* 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。
|
||||||
|
|
||||||
返回结果数据类型:同应用的字段。
|
返回结果数据类型:同应用的字段。
|
||||||
|
|
||||||
|
@ -1216,6 +1226,7 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
|
||||||
- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳
|
- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳
|
||||||
- 标签最多允许 128 个,可以 1 个,标签总长度不超过 16k 个字符
|
- 标签最多允许 128 个,可以 1 个,标签总长度不超过 16k 个字符
|
||||||
- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M
|
- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M
|
||||||
|
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
|
||||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||||
|
|
||||||
## TAOS SQL其他约定
|
## TAOS SQL其他约定
|
||||||
|
|
|
@ -166,3 +166,18 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
|
||||||
2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。
|
2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。
|
||||||
3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
|
3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
|
||||||
4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
|
4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
|
||||||
|
|
||||||
|
## <a class="anchor" id="port"></a>19. TDengine 都会用到哪些网络端口?
|
||||||
|
|
||||||
|
在 TDengine 2.0 版本中,会用到以下这些网络端口(以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么这里列举的端口都会出现变化),管理员可以参考这里的信息调整防火墙设置:
|
||||||
|
|
||||||
|
| 协议 | 默认端口 | 用途说明 | 修改方法 |
|
||||||
|
| --- | --------- | ------------------------------- | ------------------------------ |
|
||||||
|
| TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 |
|
||||||
|
| TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
|
||||||
|
| TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 |
|
||||||
|
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。 |
|
||||||
|
| TCP | 6042 | Arbitrator 的服务端口。 | 因 Arbitrator 启动参数设置变化。 |
|
||||||
|
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
|
||||||
|
| UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 |
|
||||||
|
| UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
name: tdengine
|
name: tdengine
|
||||||
base: core18
|
base: core18
|
||||||
version: '2.0.18.0'
|
version: '2.0.20.0'
|
||||||
icon: snap/gui/t-dengine.svg
|
icon: snap/gui/t-dengine.svg
|
||||||
summary: an open-source big data platform designed and optimized for IoT.
|
summary: an open-source big data platform designed and optimized for IoT.
|
||||||
description: |
|
description: |
|
||||||
|
@ -72,7 +72,7 @@ parts:
|
||||||
- usr/bin/taosd
|
- usr/bin/taosd
|
||||||
- usr/bin/taos
|
- usr/bin/taos
|
||||||
- usr/bin/taosdemo
|
- usr/bin/taosdemo
|
||||||
- usr/lib/libtaos.so.2.0.18.0
|
- usr/lib/libtaos.so.2.0.20.0
|
||||||
- usr/lib/libtaos.so.1
|
- usr/lib/libtaos.so.1
|
||||||
- usr/lib/libtaos.so
|
- usr/lib/libtaos.so
|
||||||
|
|
||||||
|
|
|
@ -36,19 +36,6 @@ extern "C" {
|
||||||
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
|
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
|
||||||
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
|
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
|
||||||
|
|
||||||
|
|
||||||
typedef struct SParsedColElem {
|
|
||||||
int16_t colIndex;
|
|
||||||
uint16_t offset;
|
|
||||||
} SParsedColElem;
|
|
||||||
|
|
||||||
typedef struct SParsedDataColInfo {
|
|
||||||
int16_t numOfCols;
|
|
||||||
int16_t numOfAssignedCols;
|
|
||||||
SParsedColElem elems[TSDB_MAX_COLUMNS];
|
|
||||||
bool hasVal[TSDB_MAX_COLUMNS];
|
|
||||||
} SParsedDataColInfo;
|
|
||||||
|
|
||||||
#pragma pack(push,1)
|
#pragma pack(push,1)
|
||||||
// this struct is transfered as binary, padding two bytes to avoid
|
// this struct is transfered as binary, padding two bytes to avoid
|
||||||
// an 'uid' whose low bytes is 0xff being recoginized as NULL,
|
// an 'uid' whose low bytes is 0xff being recoginized as NULL,
|
||||||
|
@ -118,6 +105,8 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
|
||||||
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta);
|
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta);
|
||||||
void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf);
|
void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf);
|
||||||
|
|
||||||
|
void tscDestroyBoundColumnInfo(SParsedDataColInfo* pColInfo);
|
||||||
|
|
||||||
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes,
|
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes,
|
||||||
uint32_t offset);
|
uint32_t offset);
|
||||||
|
|
||||||
|
@ -147,6 +136,7 @@ bool isStabledev(SQueryInfo* pQueryInfo);
|
||||||
bool isTsCompQuery(SQueryInfo* pQueryInfo);
|
bool isTsCompQuery(SQueryInfo* pQueryInfo);
|
||||||
bool isSimpleAggregate(SQueryInfo* pQueryInfo);
|
bool isSimpleAggregate(SQueryInfo* pQueryInfo);
|
||||||
bool isBlockDistQuery(SQueryInfo* pQueryInfo);
|
bool isBlockDistQuery(SQueryInfo* pQueryInfo);
|
||||||
|
int32_t tscGetTopbotQueryParam(SQueryInfo* pQueryInfo);
|
||||||
|
|
||||||
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
|
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
|
||||||
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
||||||
|
|
|
@ -102,10 +102,24 @@ typedef struct SColumnIndex {
|
||||||
int16_t columnIndex;
|
int16_t columnIndex;
|
||||||
} SColumnIndex;
|
} SColumnIndex;
|
||||||
|
|
||||||
|
typedef struct SColumn {
|
||||||
|
uint64_t tableUid;
|
||||||
|
int32_t columnIndex;
|
||||||
|
SColumnInfo info;
|
||||||
|
} SColumn;
|
||||||
|
|
||||||
|
typedef struct SExprFilter {
|
||||||
|
tSqlExpr *pExpr; //used for having parse
|
||||||
|
SExprInfo *pExprInfo;
|
||||||
|
SArray *fp;
|
||||||
|
SColumn *pFilters; //having filter info
|
||||||
|
}SExprFilter;
|
||||||
|
|
||||||
typedef struct SInternalField {
|
typedef struct SInternalField {
|
||||||
TAOS_FIELD field;
|
TAOS_FIELD field;
|
||||||
bool visible;
|
bool visible;
|
||||||
SExprInfo *pExpr;
|
SExprInfo *pExpr;
|
||||||
|
SExprFilter *pFieldFilters;
|
||||||
} SInternalField;
|
} SInternalField;
|
||||||
|
|
||||||
typedef struct SFieldInfo {
|
typedef struct SFieldInfo {
|
||||||
|
@ -114,12 +128,6 @@ typedef struct SFieldInfo {
|
||||||
SArray *internalField; // SArray<SInternalField>
|
SArray *internalField; // SArray<SInternalField>
|
||||||
} SFieldInfo;
|
} SFieldInfo;
|
||||||
|
|
||||||
typedef struct SColumn {
|
|
||||||
uint64_t tableUid;
|
|
||||||
int32_t columnIndex;
|
|
||||||
// SColumnIndex colIndex;
|
|
||||||
SColumnInfo info;
|
|
||||||
} SColumn;
|
|
||||||
|
|
||||||
typedef struct SCond {
|
typedef struct SCond {
|
||||||
uint64_t uid;
|
uint64_t uid;
|
||||||
|
@ -161,6 +169,19 @@ typedef struct SParamInfo {
|
||||||
uint32_t offset;
|
uint32_t offset;
|
||||||
} SParamInfo;
|
} SParamInfo;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct SBoundColumn {
|
||||||
|
bool hasVal; // denote if current column has bound or not
|
||||||
|
int32_t offset; // all column offset value
|
||||||
|
} SBoundColumn;
|
||||||
|
|
||||||
|
typedef struct SParsedDataColInfo {
|
||||||
|
int16_t numOfCols;
|
||||||
|
int16_t numOfBound;
|
||||||
|
int32_t *boundedColumns;
|
||||||
|
SBoundColumn *cols;
|
||||||
|
} SParsedDataColInfo;
|
||||||
|
|
||||||
typedef struct STableDataBlocks {
|
typedef struct STableDataBlocks {
|
||||||
SName tableName;
|
SName tableName;
|
||||||
int8_t tsSource; // where does the UNIX timestamp come from, server or client
|
int8_t tsSource; // where does the UNIX timestamp come from, server or client
|
||||||
|
@ -175,6 +196,8 @@ typedef struct STableDataBlocks {
|
||||||
STableMeta *pTableMeta; // the tableMeta of current table, the table meta will be used during submit, keep a ref to avoid to be removed from cache
|
STableMeta *pTableMeta; // the tableMeta of current table, the table meta will be used during submit, keep a ref to avoid to be removed from cache
|
||||||
char *pData;
|
char *pData;
|
||||||
|
|
||||||
|
SParsedDataColInfo boundColumnInfo;
|
||||||
|
|
||||||
// for parameter ('?') binding
|
// for parameter ('?') binding
|
||||||
uint32_t numOfAllocedParams;
|
uint32_t numOfAllocedParams;
|
||||||
uint32_t numOfParams;
|
uint32_t numOfParams;
|
||||||
|
@ -223,6 +246,7 @@ typedef struct SQueryInfo {
|
||||||
struct SQueryInfo *sibling; // sibling
|
struct SQueryInfo *sibling; // sibling
|
||||||
SArray *pUpstream; // SArray<struct SQueryInfo>
|
SArray *pUpstream; // SArray<struct SQueryInfo>
|
||||||
struct SQueryInfo *pDownstream;
|
struct SQueryInfo *pDownstream;
|
||||||
|
int32_t havingFieldNum;
|
||||||
} SQueryInfo;
|
} SQueryInfo;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -298,6 +322,7 @@ typedef struct {
|
||||||
char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t)
|
char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t)
|
||||||
SColumnIndex* pColumnIndex;
|
SColumnIndex* pColumnIndex;
|
||||||
|
|
||||||
|
TAOS_FIELD* final;
|
||||||
SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions
|
SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions
|
||||||
struct SLocalMerger *pLocalMerger;
|
struct SLocalMerger *pLocalMerger;
|
||||||
} SSqlRes;
|
} SSqlRes;
|
||||||
|
@ -425,6 +450,7 @@ void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
|
||||||
void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock);
|
void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock);
|
||||||
|
|
||||||
void handleDownstreamOperator(SSqlRes* pRes, SQueryInfo* pQueryInfo);
|
void handleDownstreamOperator(SSqlRes* pRes, SQueryInfo* pQueryInfo);
|
||||||
|
void destroyTableNameList(SSqlCmd* pCmd);
|
||||||
|
|
||||||
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
|
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
|
||||||
|
|
||||||
|
@ -462,6 +488,7 @@ char* tscGetSqlStr(SSqlObj* pSql);
|
||||||
bool tscIsQueryWithLimit(SSqlObj* pSql);
|
bool tscIsQueryWithLimit(SSqlObj* pSql);
|
||||||
|
|
||||||
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
|
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
|
||||||
|
void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols);
|
||||||
|
|
||||||
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
|
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include "tscUtil.h"
|
#include "tscUtil.h"
|
||||||
#include "tschemautil.h"
|
#include "tschemautil.h"
|
||||||
#include "tsclient.h"
|
#include "tsclient.h"
|
||||||
|
#include "qUtil.h"
|
||||||
|
|
||||||
typedef struct SCompareParam {
|
typedef struct SCompareParam {
|
||||||
SLocalDataSource **pLocalData;
|
SLocalDataSource **pLocalData;
|
||||||
|
|
|
@ -40,6 +40,7 @@ enum {
|
||||||
};
|
};
|
||||||
|
|
||||||
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
|
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
|
||||||
|
static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SSchema* pSchema, char* str, char** end);
|
||||||
|
|
||||||
static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
|
static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
|
||||||
errno = 0;
|
errno = 0;
|
||||||
|
@ -94,12 +95,12 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
|
||||||
*/
|
*/
|
||||||
SStrToken valueToken;
|
SStrToken valueToken;
|
||||||
index = 0;
|
index = 0;
|
||||||
sToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL);
|
sToken = tStrGetToken(pTokenEnd, &index, false);
|
||||||
pTokenEnd += index;
|
pTokenEnd += index;
|
||||||
|
|
||||||
if (sToken.type == TK_MINUS || sToken.type == TK_PLUS) {
|
if (sToken.type == TK_MINUS || sToken.type == TK_PLUS) {
|
||||||
index = 0;
|
index = 0;
|
||||||
valueToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL);
|
valueToken = tStrGetToken(pTokenEnd, &index, false);
|
||||||
pTokenEnd += index;
|
pTokenEnd += index;
|
||||||
|
|
||||||
if (valueToken.n < 2) {
|
if (valueToken.n < 2) {
|
||||||
|
@ -117,7 +118,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
|
||||||
if (sToken.type == TK_PLUS) {
|
if (sToken.type == TK_PLUS) {
|
||||||
useconds += interval;
|
useconds += interval;
|
||||||
} else {
|
} else {
|
||||||
useconds = (useconds >= interval) ? useconds - interval : 0;
|
useconds = useconds - interval;
|
||||||
}
|
}
|
||||||
|
|
||||||
*next = pTokenEnd;
|
*next = pTokenEnd;
|
||||||
|
@ -127,12 +128,11 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo extract the null value check
|
|
||||||
static bool isNullStr(SStrToken* pToken) {
|
static bool isNullStr(SStrToken* pToken) {
|
||||||
return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) &&
|
return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) &&
|
||||||
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0));
|
(strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0));
|
||||||
}
|
}
|
||||||
int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, bool primaryKey,
|
int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, bool primaryKey,
|
||||||
int16_t timePrec) {
|
int16_t timePrec) {
|
||||||
int64_t iv;
|
int64_t iv;
|
||||||
int32_t ret;
|
int32_t ret;
|
||||||
|
@ -417,29 +417,32 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, SSqlCmd* pCmd,
|
int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int16_t timePrec, int32_t *len,
|
||||||
int16_t timePrec, int32_t *code, char *tmpTokenBuf) {
|
char *tmpTokenBuf) {
|
||||||
int32_t index = 0;
|
int32_t index = 0;
|
||||||
SStrToken sToken = {0};
|
SStrToken sToken = {0};
|
||||||
char * payload = pDataBlocks->pData + pDataBlocks->size;
|
char *payload = pDataBlocks->pData + pDataBlocks->size;
|
||||||
|
|
||||||
|
SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo;
|
||||||
|
SSchema *schema = tscGetTableSchema(pDataBlocks->pTableMeta);
|
||||||
|
|
||||||
// 1. set the parsed value from sql string
|
// 1. set the parsed value from sql string
|
||||||
int32_t rowSize = 0;
|
int32_t rowSize = 0;
|
||||||
for (int i = 0; i < spd->numOfAssignedCols; ++i) {
|
for (int i = 0; i < spd->numOfBound; ++i) {
|
||||||
// the start position in data block buffer of current value in sql
|
// the start position in data block buffer of current value in sql
|
||||||
char * start = payload + spd->elems[i].offset;
|
int32_t colIndex = spd->boundedColumns[i];
|
||||||
int16_t colIndex = spd->elems[i].colIndex;
|
|
||||||
SSchema *pSchema = schema + colIndex;
|
char *start = payload + spd->cols[colIndex].offset;
|
||||||
|
SSchema *pSchema = &schema[colIndex];
|
||||||
rowSize += pSchema->bytes;
|
rowSize += pSchema->bytes;
|
||||||
|
|
||||||
index = 0;
|
index = 0;
|
||||||
sToken = tStrGetToken(*str, &index, true, 0, NULL);
|
sToken = tStrGetToken(*str, &index, true);
|
||||||
*str += index;
|
*str += index;
|
||||||
|
|
||||||
if (sToken.type == TK_QUESTION) {
|
if (sToken.type == TK_QUESTION) {
|
||||||
if (pCmd->insertType != TSDB_QUERY_TYPE_STMT_INSERT) {
|
if (pCmd->insertType != TSDB_QUERY_TYPE_STMT_INSERT) {
|
||||||
*code = tscSQLSyntaxErrMsg(pCmd->payload, "? only allowed in binding insertion", *str);
|
return tscSQLSyntaxErrMsg(pCmd->payload, "? only allowed in binding insertion", *str);
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t offset = (uint32_t)(start - pDataBlocks->pData);
|
uint32_t offset = (uint32_t)(start - pDataBlocks->pData);
|
||||||
|
@ -448,15 +451,13 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
|
||||||
}
|
}
|
||||||
|
|
||||||
strcpy(pCmd->payload, "client out of memory");
|
strcpy(pCmd->payload, "client out of memory");
|
||||||
*code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t type = sToken.type;
|
int16_t type = sToken.type;
|
||||||
if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL &&
|
if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL &&
|
||||||
type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) {
|
type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) {
|
||||||
*code = tscSQLSyntaxErrMsg(pCmd->payload, "invalid data or symbol", sToken.z);
|
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid data or symbol", sToken.z);
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove quotation marks
|
// Remove quotation marks
|
||||||
|
@ -485,26 +486,23 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
|
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
|
||||||
int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, pCmd->payload, str, isPrimaryKey, timePrec);
|
int32_t ret = tsParseOneColumn(pSchema, &sToken, start, pCmd->payload, str, isPrimaryKey, timePrec);
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
return ret;
|
||||||
return -1; // NOTE: here 0 mean error!
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) {
|
if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) {
|
||||||
tscInvalidSQLErrMsg(pCmd->payload, "client time/server time can not be mixed up", sToken.z);
|
tscInvalidSQLErrMsg(pCmd->payload, "client time/server time can not be mixed up", sToken.z);
|
||||||
*code = TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. set the null value for the columns that do not assign values
|
// 2. set the null value for the columns that do not assign values
|
||||||
if (spd->numOfAssignedCols < spd->numOfCols) {
|
if (spd->numOfBound < spd->numOfCols) {
|
||||||
char *ptr = payload;
|
char *ptr = payload;
|
||||||
|
|
||||||
for (int32_t i = 0; i < spd->numOfCols; ++i) {
|
for (int32_t i = 0; i < spd->numOfCols; ++i) {
|
||||||
|
if (!spd->cols[i].hasVal) { // current column do not have any value to insert, set it to null
|
||||||
if (!spd->hasVal[i]) { // current column do not have any value to insert, set it to null
|
|
||||||
if (schema[i].type == TSDB_DATA_TYPE_BINARY) {
|
if (schema[i].type == TSDB_DATA_TYPE_BINARY) {
|
||||||
varDataSetLen(ptr, sizeof(int8_t));
|
varDataSetLen(ptr, sizeof(int8_t));
|
||||||
*(uint8_t*) varDataVal(ptr) = TSDB_DATA_BINARY_NULL;
|
*(uint8_t*) varDataVal(ptr) = TSDB_DATA_BINARY_NULL;
|
||||||
|
@ -522,7 +520,8 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
|
||||||
rowSize = (int32_t)(ptr - payload);
|
rowSize = (int32_t)(ptr - payload);
|
||||||
}
|
}
|
||||||
|
|
||||||
return rowSize;
|
*len = rowSize;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t rowDataCompar(const void *lhs, const void *rhs) {
|
static int32_t rowDataCompar(const void *lhs, const void *rhs) {
|
||||||
|
@ -536,80 +535,79 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMeta, int maxRows,
|
int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSqlCmd* pCmd, int32_t* numOfRows, char *tmpTokenBuf) {
|
||||||
SParsedDataColInfo *spd, SSqlCmd* pCmd, int32_t *code, char *tmpTokenBuf) {
|
|
||||||
int32_t index = 0;
|
int32_t index = 0;
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
(*numOfRows) = 0;
|
||||||
|
|
||||||
SStrToken sToken;
|
SStrToken sToken;
|
||||||
|
|
||||||
int32_t numOfRows = 0;
|
STableMeta* pTableMeta = pDataBlock->pTableMeta;
|
||||||
|
|
||||||
SSchema *pSchema = tscGetTableSchema(pTableMeta);
|
|
||||||
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
|
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
|
||||||
|
|
||||||
int32_t precision = tinfo.precision;
|
int32_t precision = tinfo.precision;
|
||||||
|
|
||||||
if (spd->hasVal[0] == false) {
|
|
||||||
*code = tscInvalidSQLErrMsg(pCmd->payload, "primary timestamp column can not be null", *str);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
index = 0;
|
index = 0;
|
||||||
sToken = tStrGetToken(*str, &index, false, 0, NULL);
|
sToken = tStrGetToken(*str, &index, false);
|
||||||
if (sToken.n == 0 || sToken.type != TK_LP) break;
|
if (sToken.n == 0 || sToken.type != TK_LP) break;
|
||||||
|
|
||||||
*str += index;
|
*str += index;
|
||||||
if (numOfRows >= maxRows || pDataBlock->size + tinfo.rowSize >= pDataBlock->nAllocSize) {
|
if ((*numOfRows) >= maxRows || pDataBlock->size + tinfo.rowSize >= pDataBlock->nAllocSize) {
|
||||||
int32_t tSize;
|
int32_t tSize;
|
||||||
*code = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize);
|
code = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize);
|
||||||
if (*code != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
|
if (code != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
|
||||||
strcpy(pCmd->payload, "client out of memory");
|
strcpy(pCmd->payload, "client out of memory");
|
||||||
return -1;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(tSize > maxRows);
|
ASSERT(tSize > maxRows);
|
||||||
maxRows = tSize;
|
maxRows = tSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, pCmd, precision, code, tmpTokenBuf);
|
int32_t len = 0;
|
||||||
if (len <= 0) { // error message has been set in tsParseOneRowData
|
code = tsParseOneRow(str, pDataBlock, pCmd, precision, &len, tmpTokenBuf);
|
||||||
return -1;
|
if (code != TSDB_CODE_SUCCESS) { // error message has been set in tsParseOneRow, return directly
|
||||||
|
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
pDataBlock->size += len;
|
pDataBlock->size += len;
|
||||||
|
|
||||||
index = 0;
|
index = 0;
|
||||||
sToken = tStrGetToken(*str, &index, false, 0, NULL);
|
sToken = tStrGetToken(*str, &index, false);
|
||||||
*str += index;
|
*str += index;
|
||||||
if (sToken.n == 0 || sToken.type != TK_RP) {
|
if (sToken.n == 0 || sToken.type != TK_RP) {
|
||||||
tscSQLSyntaxErrMsg(pCmd->payload, ") expected", *str);
|
tscSQLSyntaxErrMsg(pCmd->payload, ") expected", *str);
|
||||||
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
numOfRows++;
|
(*numOfRows)++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numOfRows <= 0) {
|
if ((*numOfRows) <= 0) {
|
||||||
strcpy(pCmd->payload, "no any data points");
|
strcpy(pCmd->payload, "no any data points");
|
||||||
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||||
return -1;
|
|
||||||
} else {
|
} else {
|
||||||
return numOfRows;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tscSetAssignedColumnInfo(SParsedDataColInfo *spd, SSchema *pSchema, int32_t numOfCols) {
|
void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) {
|
||||||
spd->numOfCols = numOfCols;
|
pColInfo->numOfCols = numOfCols;
|
||||||
spd->numOfAssignedCols = numOfCols;
|
pColInfo->numOfBound = numOfCols;
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t));
|
||||||
spd->hasVal[i] = true;
|
pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn));
|
||||||
spd->elems[i].colIndex = i;
|
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < pColInfo->numOfCols; ++i) {
|
||||||
if (i > 0) {
|
if (i > 0) {
|
||||||
spd->elems[i].offset = spd->elems[i - 1].offset + pSchema[i - 1].bytes;
|
pColInfo->cols[i].offset = pSchema[i - 1].bytes + pColInfo->cols[i - 1].offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pColInfo->cols[i].hasVal = true;
|
||||||
|
pColInfo->boundedColumns[i] = i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -697,33 +695,26 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, SParsedDataColInfo *spd, int32_t *totalNum) {
|
static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
|
||||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
STableComInfo tinfo = tscGetTableInfo(dataBuf->pTableMeta);
|
||||||
STableMeta *pTableMeta = pTableMetaInfo->pTableMeta;
|
|
||||||
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
|
|
||||||
|
|
||||||
STableDataBlocks *dataBuf = NULL;
|
|
||||||
int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
|
|
||||||
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &dataBuf, NULL);
|
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t maxNumOfRows;
|
int32_t maxNumOfRows;
|
||||||
ret = tscAllocateMemIfNeed(dataBuf, tinfo.rowSize, &maxNumOfRows);
|
int32_t code = tscAllocateMemIfNeed(dataBuf, tinfo.rowSize, &maxNumOfRows);
|
||||||
if (TSDB_CODE_SUCCESS != ret) {
|
if (TSDB_CODE_SUCCESS != code) {
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t code = TSDB_CODE_TSC_INVALID_SQL;
|
code = TSDB_CODE_TSC_INVALID_SQL;
|
||||||
char * tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
|
char *tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
|
||||||
if (NULL == tmpTokenBuf) {
|
if (NULL == tmpTokenBuf) {
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t numOfRows = tsParseValues(str, dataBuf, pTableMeta, maxNumOfRows, spd, pCmd, &code, tmpTokenBuf);
|
int32_t numOfRows = 0;
|
||||||
|
code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf);
|
||||||
|
|
||||||
free(tmpTokenBuf);
|
free(tmpTokenBuf);
|
||||||
if (numOfRows <= 0) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -736,20 +727,18 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, SParsedDataColI
|
||||||
}
|
}
|
||||||
|
|
||||||
SSubmitBlk *pBlocks = (SSubmitBlk *)(dataBuf->pData);
|
SSubmitBlk *pBlocks = (SSubmitBlk *)(dataBuf->pData);
|
||||||
code = tsSetBlockInfo(pBlocks, pTableMeta, numOfRows);
|
code = tsSetBlockInfo(pBlocks, dataBuf->pTableMeta, numOfRows);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", *str);
|
tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", *str);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
dataBuf->vgId = pTableMeta->vgId;
|
|
||||||
dataBuf->numOfTables = 1;
|
dataBuf->numOfTables = 1;
|
||||||
|
|
||||||
*totalNum += numOfRows;
|
*totalNum += numOfRows;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundColumn) {
|
||||||
int32_t index = 0;
|
int32_t index = 0;
|
||||||
SStrToken sToken = {0};
|
SStrToken sToken = {0};
|
||||||
SStrToken tableToken = {0};
|
SStrToken tableToken = {0};
|
||||||
|
@ -767,38 +756,37 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
||||||
|
|
||||||
// get the token of specified table
|
// get the token of specified table
|
||||||
index = 0;
|
index = 0;
|
||||||
tableToken = tStrGetToken(sql, &index, false, 0, NULL);
|
tableToken = tStrGetToken(sql, &index, false);
|
||||||
sql += index;
|
sql += index;
|
||||||
|
|
||||||
char *cstart = NULL;
|
|
||||||
char *cend = NULL;
|
|
||||||
|
|
||||||
// skip possibly exists column list
|
// skip possibly exists column list
|
||||||
index = 0;
|
index = 0;
|
||||||
sToken = tStrGetToken(sql, &index, false, 0, NULL);
|
sToken = tStrGetToken(sql, &index, false);
|
||||||
sql += index;
|
sql += index;
|
||||||
|
|
||||||
int32_t numOfColList = 0;
|
int32_t numOfColList = 0;
|
||||||
bool createTable = false;
|
|
||||||
|
|
||||||
|
// Bind table columns list in string, skip it and continue
|
||||||
if (sToken.type == TK_LP) {
|
if (sToken.type == TK_LP) {
|
||||||
cstart = &sToken.z[0];
|
*boundColumn = &sToken.z[0];
|
||||||
index = 0;
|
|
||||||
while (1) {
|
while (1) {
|
||||||
sToken = tStrGetToken(sql, &index, false, 0, NULL);
|
index = 0;
|
||||||
|
sToken = tStrGetToken(sql, &index, false);
|
||||||
|
|
||||||
if (sToken.type == TK_RP) {
|
if (sToken.type == TK_RP) {
|
||||||
cend = &sToken.z[0];
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sql += index;
|
||||||
++numOfColList;
|
++numOfColList;
|
||||||
}
|
}
|
||||||
|
|
||||||
sToken = tStrGetToken(sql, &index, false, 0, NULL);
|
sToken = tStrGetToken(sql, &index, false);
|
||||||
sql += index;
|
sql += index;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numOfColList == 0 && cstart != NULL) {
|
if (numOfColList == 0 && (*boundColumn) != NULL) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -806,7 +794,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
||||||
|
|
||||||
if (sToken.type == TK_USING) { // create table if not exists according to the super table
|
if (sToken.type == TK_USING) { // create table if not exists according to the super table
|
||||||
index = 0;
|
index = 0;
|
||||||
sToken = tStrGetToken(sql, &index, false, 0, NULL);
|
sToken = tStrGetToken(sql, &index, false);
|
||||||
sql += index;
|
sql += index;
|
||||||
|
|
||||||
//the source super table is moved to the secondary position of the pTableMetaInfo list
|
//the source super table is moved to the secondary position of the pTableMetaInfo list
|
||||||
|
@ -835,82 +823,42 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
||||||
SSchema *pTagSchema = tscGetTableTagSchema(pSTableMetaInfo->pTableMeta);
|
SSchema *pTagSchema = tscGetTableTagSchema(pSTableMetaInfo->pTableMeta);
|
||||||
STableComInfo tinfo = tscGetTableInfo(pSTableMetaInfo->pTableMeta);
|
STableComInfo tinfo = tscGetTableInfo(pSTableMetaInfo->pTableMeta);
|
||||||
|
|
||||||
index = 0;
|
|
||||||
sToken = tStrGetToken(sql, &index, false, 0, NULL);
|
|
||||||
sql += index;
|
|
||||||
|
|
||||||
SParsedDataColInfo spd = {0};
|
SParsedDataColInfo spd = {0};
|
||||||
|
tscSetBoundColumnInfo(&spd, pTagSchema, tscGetNumOfTags(pSTableMetaInfo->pTableMeta));
|
||||||
uint8_t numOfTags = tscGetNumOfTags(pSTableMetaInfo->pTableMeta);
|
|
||||||
spd.numOfCols = numOfTags;
|
|
||||||
|
|
||||||
// if specify some tags column
|
|
||||||
if (sToken.type != TK_LP) {
|
|
||||||
tscSetAssignedColumnInfo(&spd, pTagSchema, numOfTags);
|
|
||||||
} else {
|
|
||||||
/* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen)
|
|
||||||
* tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn); */
|
|
||||||
int16_t offset[TSDB_MAX_COLUMNS] = {0};
|
|
||||||
for (int32_t t = 1; t < numOfTags; ++t) {
|
|
||||||
offset[t] = offset[t - 1] + pTagSchema[t - 1].bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (1) {
|
|
||||||
index = 0;
|
|
||||||
sToken = tStrGetToken(sql, &index, false, 0, NULL);
|
|
||||||
sql += index;
|
|
||||||
|
|
||||||
if (TK_STRING == sToken.type) {
|
|
||||||
strdequote(sToken.z);
|
|
||||||
sToken.n = (uint32_t)strtrim(sToken.z);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sToken.type == TK_RP) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool findColumnIndex = false;
|
|
||||||
|
|
||||||
// todo speedup by using hash list
|
|
||||||
for (int32_t t = 0; t < numOfTags; ++t) {
|
|
||||||
if (strncmp(sToken.z, pTagSchema[t].name, sToken.n) == 0 && strlen(pTagSchema[t].name) == sToken.n) {
|
|
||||||
SParsedColElem *pElem = &spd.elems[spd.numOfAssignedCols++];
|
|
||||||
pElem->offset = offset[t];
|
|
||||||
pElem->colIndex = t;
|
|
||||||
|
|
||||||
if (spd.hasVal[t] == true) {
|
|
||||||
return tscInvalidSQLErrMsg(pCmd->payload, "duplicated tag name", sToken.z);
|
|
||||||
}
|
|
||||||
|
|
||||||
spd.hasVal[t] = true;
|
|
||||||
findColumnIndex = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!findColumnIndex) {
|
|
||||||
return tscInvalidSQLErrMsg(pCmd->payload, "invalid tag name", sToken.z);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (spd.numOfAssignedCols == 0 || spd.numOfAssignedCols > numOfTags) {
|
|
||||||
return tscInvalidSQLErrMsg(pCmd->payload, "tag name expected", sToken.z);
|
|
||||||
}
|
|
||||||
|
|
||||||
index = 0;
|
index = 0;
|
||||||
sToken = tStrGetToken(sql, &index, false, 0, NULL);
|
sToken = tStrGetToken(sql, &index, false);
|
||||||
sql += index;
|
if (sToken.type != TK_TAGS && sToken.type != TK_LP) {
|
||||||
}
|
|
||||||
|
|
||||||
if (sToken.type != TK_TAGS) {
|
|
||||||
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
|
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
|
||||||
}
|
}
|
||||||
|
|
||||||
index = 0;
|
// parse the bound tags column
|
||||||
sToken = tStrGetToken(sql, &index, false, 0, NULL);
|
if (sToken.type == TK_LP) {
|
||||||
|
/*
|
||||||
|
* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen)
|
||||||
|
* tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn);
|
||||||
|
*/
|
||||||
|
char* end = NULL;
|
||||||
|
code = parseBoundColumns(pCmd, &spd, pTagSchema, sql, &end);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
sql = end;
|
||||||
|
|
||||||
|
index = 0; // keywords of "TAGS"
|
||||||
|
sToken = tStrGetToken(sql, &index, false);
|
||||||
sql += index;
|
sql += index;
|
||||||
|
} else {
|
||||||
|
sql += index;
|
||||||
|
}
|
||||||
|
|
||||||
|
index = 0;
|
||||||
|
sToken = tStrGetToken(sql, &index, false);
|
||||||
|
sql += index;
|
||||||
|
|
||||||
if (sToken.type != TK_LP) {
|
if (sToken.type != TK_LP) {
|
||||||
return tscInvalidSQLErrMsg(pCmd->payload, NULL, sToken.z);
|
return tscInvalidSQLErrMsg(pCmd->payload, "( is expected", sToken.z);
|
||||||
}
|
}
|
||||||
|
|
||||||
SKVRowBuilder kvRowBuilder = {0};
|
SKVRowBuilder kvRowBuilder = {0};
|
||||||
|
@ -918,13 +866,11 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t ignoreTokenTypes = TK_LP;
|
for (int i = 0; i < spd.numOfBound; ++i) {
|
||||||
uint32_t numOfIgnoreToken = 1;
|
SSchema* pSchema = &pTagSchema[spd.boundedColumns[i]];
|
||||||
for (int i = 0; i < spd.numOfAssignedCols; ++i) {
|
|
||||||
SSchema* pSchema = pTagSchema + spd.elems[i].colIndex;
|
|
||||||
|
|
||||||
index = 0;
|
index = 0;
|
||||||
sToken = tStrGetToken(sql, &index, true, numOfIgnoreToken, &ignoreTokenTypes);
|
sToken = tStrGetToken(sql, &index, true);
|
||||||
sql += index;
|
sql += index;
|
||||||
|
|
||||||
if (TK_ILLEGAL == sToken.type) {
|
if (TK_ILLEGAL == sToken.type) {
|
||||||
|
@ -943,7 +889,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
||||||
}
|
}
|
||||||
|
|
||||||
char tagVal[TSDB_MAX_TAGS_LEN];
|
char tagVal[TSDB_MAX_TAGS_LEN];
|
||||||
code = tsParseOneColumnData(pSchema, &sToken, tagVal, pCmd->payload, &sql, false, tinfo.precision);
|
code = tsParseOneColumn(pSchema, &sToken, tagVal, pCmd->payload, &sql, false, tinfo.precision);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||||
return code;
|
return code;
|
||||||
|
@ -952,6 +898,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
||||||
tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal);
|
tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tscDestroyBoundColumnInfo(&spd);
|
||||||
|
|
||||||
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
|
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
|
||||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||||
if (row == NULL) {
|
if (row == NULL) {
|
||||||
|
@ -974,7 +922,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
||||||
pCmd->tagData.data = pTag;
|
pCmd->tagData.data = pTag;
|
||||||
|
|
||||||
index = 0;
|
index = 0;
|
||||||
sToken = tStrGetToken(sql, &index, false, 0, NULL);
|
sToken = tStrGetToken(sql, &index, false);
|
||||||
sql += index;
|
sql += index;
|
||||||
if (sToken.n == 0 || sToken.type != TK_RP) {
|
if (sToken.n == 0 || sToken.type != TK_RP) {
|
||||||
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
|
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
|
||||||
|
@ -989,37 +937,29 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
createTable = true;
|
if (sql == NULL) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
|
||||||
code = tscGetTableMetaEx(pSql, pTableMetaInfo, true);
|
code = tscGetTableMetaEx(pSql, pTableMetaInfo, true);
|
||||||
if (TSDB_CODE_TSC_ACTION_IN_PROGRESS == code) {
|
if (TSDB_CODE_TSC_ACTION_IN_PROGRESS == code) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
|
||||||
if (cstart != NULL) {
|
|
||||||
sql = cstart;
|
|
||||||
} else {
|
} else {
|
||||||
sql = sToken.z;
|
sql = sToken.z;
|
||||||
}
|
|
||||||
code = tscGetTableMetaEx(pSql, pTableMetaInfo, false);
|
|
||||||
|
|
||||||
|
if (sql == NULL) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = tscGetTableMetaEx(pSql, pTableMetaInfo, false);
|
||||||
if (pCmd->curSql == NULL) {
|
if (pCmd->curSql == NULL) {
|
||||||
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS);
|
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t len = (int32_t)(cend - cstart + 1);
|
|
||||||
if (cstart != NULL && createTable == true) {
|
|
||||||
/* move the column list to start position of the next accessed points */
|
|
||||||
memmove(sql - len, cstart, len);
|
|
||||||
*sqlstr = sql - len;
|
|
||||||
} else {
|
|
||||||
*sqlstr = sql;
|
*sqlstr = sql;
|
||||||
}
|
|
||||||
|
|
||||||
if (*sqlstr == NULL) {
|
|
||||||
code = TSDB_CODE_TSC_INVALID_SQL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -1043,6 +983,76 @@ static int32_t validateDataSource(SSqlCmd *pCmd, int8_t type, const char *sql) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SSchema* pSchema,
|
||||||
|
char* str, char **end) {
|
||||||
|
pColInfo->numOfBound = 0;
|
||||||
|
|
||||||
|
memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * pColInfo->numOfCols);
|
||||||
|
for(int32_t i = 0; i < pColInfo->numOfCols; ++i) {
|
||||||
|
pColInfo->cols[i].hasVal = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
int32_t index = 0;
|
||||||
|
SStrToken sToken = tStrGetToken(str, &index, false);
|
||||||
|
str += index;
|
||||||
|
|
||||||
|
if (sToken.type != TK_LP) {
|
||||||
|
code = tscInvalidSQLErrMsg(pCmd->payload, "( is expected", sToken.z);
|
||||||
|
goto _clean;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
index = 0;
|
||||||
|
sToken = tStrGetToken(str, &index, false);
|
||||||
|
str += index;
|
||||||
|
|
||||||
|
if (TK_STRING == sToken.type) {
|
||||||
|
tscDequoteAndTrimToken(&sToken);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sToken.type == TK_RP) {
|
||||||
|
if (end != NULL) { // set the end position
|
||||||
|
*end = str;
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool findColumnIndex = false;
|
||||||
|
|
||||||
|
// todo speedup by using hash list
|
||||||
|
for (int32_t t = 0; t < pColInfo->numOfCols; ++t) {
|
||||||
|
if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
|
||||||
|
if (pColInfo->cols[t].hasVal == true) {
|
||||||
|
code = tscInvalidSQLErrMsg(pCmd->payload, "duplicated column name", sToken.z);
|
||||||
|
goto _clean;
|
||||||
|
}
|
||||||
|
|
||||||
|
pColInfo->cols[t].hasVal = true;
|
||||||
|
pColInfo->boundedColumns[pColInfo->numOfBound] = t;
|
||||||
|
pColInfo->numOfBound += 1;
|
||||||
|
findColumnIndex = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!findColumnIndex) {
|
||||||
|
code = tscInvalidSQLErrMsg(pCmd->payload, "invalid column/tag name", sToken.z);
|
||||||
|
goto _clean;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(&pColInfo->boundedColumns[pColInfo->numOfBound], 0 , sizeof(int32_t) * (pColInfo->numOfCols - pColInfo->numOfBound));
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
_clean:
|
||||||
|
pCmd->curSql = NULL;
|
||||||
|
pCmd->parseFinished = 1;
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* parse insert sql
|
* parse insert sql
|
||||||
* @param pSql
|
* @param pSql
|
||||||
|
@ -1083,7 +1093,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
int32_t index = 0;
|
int32_t index = 0;
|
||||||
SStrToken sToken = tStrGetToken(str, &index, false, 0, NULL);
|
SStrToken sToken = tStrGetToken(str, &index, false);
|
||||||
|
|
||||||
// no data in the sql string anymore.
|
// no data in the sql string anymore.
|
||||||
if (sToken.n == 0) {
|
if (sToken.n == 0) {
|
||||||
|
@ -1121,7 +1131,8 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
||||||
goto _clean;
|
goto _clean;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((code = tscCheckIfCreateTable(&str, pSql)) != TSDB_CODE_SUCCESS) {
|
char *bindedColumns = NULL;
|
||||||
|
if ((code = tscCheckIfCreateTable(&str, pSql, &bindedColumns)) != TSDB_CODE_SUCCESS) {
|
||||||
/*
|
/*
|
||||||
* After retrieving the table meta from server, the sql string will be parsed from the paused position.
|
* After retrieving the table meta from server, the sql string will be parsed from the paused position.
|
||||||
* And during the getTableMetaCallback function, the sql string will be parsed from the paused position.
|
* And during the getTableMetaCallback function, the sql string will be parsed from the paused position.
|
||||||
|
@ -1141,41 +1152,22 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
||||||
}
|
}
|
||||||
|
|
||||||
index = 0;
|
index = 0;
|
||||||
sToken = tStrGetToken(str, &index, false, 0, NULL);
|
sToken = tStrGetToken(str, &index, false);
|
||||||
str += index;
|
str += index;
|
||||||
|
|
||||||
if (sToken.n == 0) {
|
if (sToken.n == 0 || (sToken.type != TK_FILE && sToken.type != TK_VALUES)) {
|
||||||
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE required", sToken.z);
|
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE required", sToken.z);
|
||||||
goto _clean;
|
goto _clean;
|
||||||
}
|
}
|
||||||
|
|
||||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||||
|
if (sToken.type == TK_FILE) {
|
||||||
if (sToken.type == TK_VALUES) {
|
|
||||||
SParsedDataColInfo spd = {.numOfCols = tinfo.numOfColumns};
|
|
||||||
|
|
||||||
SSchema *pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
|
|
||||||
tscSetAssignedColumnInfo(&spd, pSchema, tinfo.numOfColumns);
|
|
||||||
|
|
||||||
if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
|
|
||||||
goto _clean;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* app here insert data in different vnodes, so we need to set the following
|
|
||||||
* data in another submit procedure using async insert routines
|
|
||||||
*/
|
|
||||||
code = doParseInsertStatement(pCmd, &str, &spd, &totalNum);
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
|
||||||
goto _clean;
|
|
||||||
}
|
|
||||||
} else if (sToken.type == TK_FILE) {
|
|
||||||
if (validateDataSource(pCmd, DATA_FROM_DATA_FILE, sToken.z) != TSDB_CODE_SUCCESS) {
|
if (validateDataSource(pCmd, DATA_FROM_DATA_FILE, sToken.z) != TSDB_CODE_SUCCESS) {
|
||||||
goto _clean;
|
goto _clean;
|
||||||
}
|
}
|
||||||
|
|
||||||
index = 0;
|
index = 0;
|
||||||
sToken = tStrGetToken(str, &index, false, 0, NULL);
|
sToken = tStrGetToken(str, &index, false);
|
||||||
if (sToken.type != TK_STRING && sToken.type != TK_ID) {
|
if (sToken.type != TK_STRING && sToken.type != TK_ID) {
|
||||||
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
|
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
|
||||||
goto _clean;
|
goto _clean;
|
||||||
|
@ -1199,83 +1191,63 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
||||||
tstrncpy(pCmd->payload, full_path.we_wordv[0], pCmd->allocSize);
|
tstrncpy(pCmd->payload, full_path.we_wordv[0], pCmd->allocSize);
|
||||||
wordfree(&full_path);
|
wordfree(&full_path);
|
||||||
|
|
||||||
} else if (sToken.type == TK_LP) {
|
} else {
|
||||||
/* insert into tablename(col1, col2,..., coln) values(v1, v2,... vn); */
|
if (bindedColumns == NULL) {
|
||||||
STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0)->pTableMeta;
|
STableMeta *pTableMeta = pTableMetaInfo->pTableMeta;
|
||||||
SSchema * pSchema = tscGetTableSchema(pTableMeta);
|
|
||||||
|
|
||||||
if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
|
if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
|
||||||
goto _clean;
|
goto _clean;
|
||||||
}
|
}
|
||||||
|
|
||||||
SParsedDataColInfo spd = {0};
|
STableDataBlocks *dataBuf = NULL;
|
||||||
spd.numOfCols = tinfo.numOfColumns;
|
int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
|
||||||
|
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta,
|
||||||
int16_t offset[TSDB_MAX_COLUMNS] = {0};
|
&dataBuf, NULL);
|
||||||
for (int32_t t = 1; t < tinfo.numOfColumns; ++t) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
offset[t] = offset[t - 1] + pSchema[t - 1].bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (1) {
|
|
||||||
index = 0;
|
|
||||||
sToken = tStrGetToken(str, &index, false, 0, NULL);
|
|
||||||
str += index;
|
|
||||||
|
|
||||||
if (TK_STRING == sToken.type) {
|
|
||||||
tscDequoteAndTrimToken(&sToken);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sToken.type == TK_RP) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool findColumnIndex = false;
|
|
||||||
|
|
||||||
// todo speedup by using hash list
|
|
||||||
for (int32_t t = 0; t < tinfo.numOfColumns; ++t) {
|
|
||||||
if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
|
|
||||||
SParsedColElem *pElem = &spd.elems[spd.numOfAssignedCols++];
|
|
||||||
pElem->offset = offset[t];
|
|
||||||
pElem->colIndex = t;
|
|
||||||
|
|
||||||
if (spd.hasVal[t] == true) {
|
|
||||||
code = tscInvalidSQLErrMsg(pCmd->payload, "duplicated column name", sToken.z);
|
|
||||||
goto _clean;
|
goto _clean;
|
||||||
}
|
}
|
||||||
|
|
||||||
spd.hasVal[t] = true;
|
code = doParseInsertStatement(pCmd, &str, dataBuf, &totalNum);
|
||||||
findColumnIndex = true;
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!findColumnIndex) {
|
|
||||||
code = tscInvalidSQLErrMsg(pCmd->payload, "invalid column name", sToken.z);
|
|
||||||
goto _clean;
|
goto _clean;
|
||||||
}
|
}
|
||||||
}
|
} else { // bindedColumns != NULL
|
||||||
|
// insert into tablename(col1, col2,..., coln) values(v1, v2,... vn);
|
||||||
|
STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0)->pTableMeta;
|
||||||
|
|
||||||
if (spd.numOfAssignedCols == 0 || spd.numOfAssignedCols > tinfo.numOfColumns) {
|
if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
|
||||||
code = tscInvalidSQLErrMsg(pCmd->payload, "column name expected", sToken.z);
|
|
||||||
goto _clean;
|
goto _clean;
|
||||||
}
|
}
|
||||||
|
|
||||||
index = 0;
|
STableDataBlocks *dataBuf = NULL;
|
||||||
sToken = tStrGetToken(str, &index, false, 0, NULL);
|
int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
|
||||||
str += index;
|
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta,
|
||||||
|
&dataBuf, NULL);
|
||||||
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
|
goto _clean;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSchema *pSchema = tscGetTableSchema(pTableMeta);
|
||||||
|
code = parseBoundColumns(pCmd, &dataBuf->boundColumnInfo, pSchema, bindedColumns, NULL);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
goto _clean;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dataBuf->boundColumnInfo.cols[0].hasVal == false) {
|
||||||
|
code = tscInvalidSQLErrMsg(pCmd->payload, "primary timestamp column can not be null", NULL);
|
||||||
|
goto _clean;
|
||||||
|
}
|
||||||
|
|
||||||
if (sToken.type != TK_VALUES) {
|
if (sToken.type != TK_VALUES) {
|
||||||
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES is expected", sToken.z);
|
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES is expected", sToken.z);
|
||||||
goto _clean;
|
goto _clean;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = doParseInsertStatement(pCmd, &str, &spd, &totalNum);
|
code = doParseInsertStatement(pCmd, &str, dataBuf, &totalNum);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
goto _clean;
|
goto _clean;
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE are required", sToken.z);
|
|
||||||
goto _clean;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1307,7 +1279,7 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
|
||||||
int32_t index = 0;
|
int32_t index = 0;
|
||||||
SSqlCmd *pCmd = &pSql->cmd;
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
|
|
||||||
SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
|
SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false);
|
||||||
assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT);
|
assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT);
|
||||||
|
|
||||||
pCmd->count = 0;
|
pCmd->count = 0;
|
||||||
|
@ -1317,7 +1289,7 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
|
||||||
|
|
||||||
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT | pCmd->insertType);
|
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT | pCmd->insertType);
|
||||||
|
|
||||||
sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
|
sToken = tStrGetToken(pSql->sqlstr, &index, false);
|
||||||
if (sToken.type != TK_INTO) {
|
if (sToken.type != TK_INTO) {
|
||||||
return tscInvalidSQLErrMsg(pCmd->payload, "keyword INTO is expected", sToken.z);
|
return tscInvalidSQLErrMsg(pCmd->payload, "keyword INTO is expected", sToken.z);
|
||||||
}
|
}
|
||||||
|
@ -1450,13 +1422,10 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
|
||||||
|
|
||||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||||
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
|
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
|
||||||
SSchema * pSchema = tscGetTableSchema(pTableMeta);
|
|
||||||
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
|
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
|
||||||
|
|
||||||
SParsedDataColInfo spd = {.numOfCols = tinfo.numOfColumns};
|
destroyTableNameList(pCmd);
|
||||||
tscSetAssignedColumnInfo(&spd, pSchema, tinfo.numOfColumns);
|
|
||||||
|
|
||||||
tfree(pCmd->pTableNameList);
|
|
||||||
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
||||||
|
|
||||||
if (pCmd->pTableBlockHashList == NULL) {
|
if (pCmd->pTableBlockHashList == NULL) {
|
||||||
|
@ -1495,8 +1464,9 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
|
||||||
char *lineptr = line;
|
char *lineptr = line;
|
||||||
strtolower(line, line);
|
strtolower(line, line);
|
||||||
|
|
||||||
int32_t len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd, tinfo.precision, &code, tokenBuf);
|
int32_t len = 0;
|
||||||
if (len <= 0 || pTableDataBlock->numOfParams > 0) {
|
code = tsParseOneRow(&lineptr, pTableDataBlock, pCmd, tinfo.precision, &len, tokenBuf);
|
||||||
|
if (code != TSDB_CODE_SUCCESS || pTableDataBlock->numOfParams > 0) {
|
||||||
pSql->res.code = code;
|
pSql->res.code = code;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,6 +34,7 @@
|
||||||
#include "tstoken.h"
|
#include "tstoken.h"
|
||||||
#include "tstrbuild.h"
|
#include "tstrbuild.h"
|
||||||
#include "ttokendef.h"
|
#include "ttokendef.h"
|
||||||
|
#include "qUtil.h"
|
||||||
|
|
||||||
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
|
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
|
||||||
|
|
||||||
|
@ -1101,6 +1102,7 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd) {
|
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd) {
|
||||||
assert(pTagsList != NULL);
|
assert(pTagsList != NULL);
|
||||||
|
|
||||||
|
@ -1682,18 +1684,6 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* transfer sql functions that need secondary merge into another format
|
|
||||||
* in dealing with super table queries such as: count/first/last
|
|
||||||
*/
|
|
||||||
if (isSTable) {
|
|
||||||
tscTansformFuncForSTableQuery(pQueryInfo);
|
|
||||||
|
|
||||||
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
|
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3076,6 +3066,7 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) {
|
static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) {
|
||||||
if (pColumn == NULL) {
|
if (pColumn == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -3099,15 +3090,11 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter,
|
static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter,
|
||||||
SColumnIndex* columnIndex, tSqlExpr* pExpr) {
|
int16_t colType, tSqlExpr* pExpr) {
|
||||||
const char* msg = "not supported filter condition";
|
const char* msg = "not supported filter condition";
|
||||||
|
|
||||||
tSqlExpr* pRight = pExpr->pRight;
|
tSqlExpr *pRight = pExpr->pRight;
|
||||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, columnIndex->tableIndex);
|
|
||||||
|
|
||||||
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex->columnIndex);
|
|
||||||
|
|
||||||
int16_t colType = pSchema->type;
|
|
||||||
if (colType >= TSDB_DATA_TYPE_TINYINT && colType <= TSDB_DATA_TYPE_BIGINT) {
|
if (colType >= TSDB_DATA_TYPE_TINYINT && colType <= TSDB_DATA_TYPE_BIGINT) {
|
||||||
colType = TSDB_DATA_TYPE_BIGINT;
|
colType = TSDB_DATA_TYPE_BIGINT;
|
||||||
} else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) {
|
} else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) {
|
||||||
|
@ -3313,7 +3300,7 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC
|
||||||
|
|
||||||
pColumn->columnIndex = pIndex->columnIndex;
|
pColumn->columnIndex = pIndex->columnIndex;
|
||||||
pColumn->tableUid = pTableMeta->id.uid;
|
pColumn->tableUid = pTableMeta->id.uid;
|
||||||
return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pIndex, pExpr);
|
return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pColumn->info.type, pExpr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pTableCond, SStringBuilder* sb) {
|
static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pTableCond, SStringBuilder* sb) {
|
||||||
|
@ -6203,7 +6190,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// projection query on super table does not compatible with "group by" syntax
|
// projection query on super table does not compatible with "group by" syntax
|
||||||
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
|
if (tscIsProjectionQuery(pQueryInfo)) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6807,6 +6794,306 @@ int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t tscInsertExprFields(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** interField) {
|
||||||
|
tSqlExprItem item = {.pNode = pExpr, .aliasName = NULL, .distinct = false};
|
||||||
|
|
||||||
|
int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
|
||||||
|
|
||||||
|
// ADD TRUE FOR TEST
|
||||||
|
if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, true) != TSDB_CODE_SUCCESS) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
|
||||||
|
++pQueryInfo->havingFieldNum;
|
||||||
|
|
||||||
|
size_t n = tscSqlExprNumOfExprs(pQueryInfo);
|
||||||
|
SExprInfo* pExprInfo = tscSqlExprGet(pQueryInfo, (int32_t)n - 1);
|
||||||
|
|
||||||
|
int32_t slot = tscNumOfFields(pQueryInfo) - 1;
|
||||||
|
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot);
|
||||||
|
pInfo->visible = false;
|
||||||
|
|
||||||
|
if (pInfo->pFieldFilters == NULL) {
|
||||||
|
SExprFilter* pFieldFilters = calloc(1, sizeof(SExprFilter));
|
||||||
|
if (pFieldFilters == NULL) {
|
||||||
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
SColumn* pFilters = calloc(1, sizeof(SColumn));
|
||||||
|
if (pFilters == NULL) {
|
||||||
|
tfree(pFieldFilters);
|
||||||
|
|
||||||
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
pFieldFilters->pFilters = pFilters;
|
||||||
|
pFieldFilters->pExprInfo = pExprInfo;
|
||||||
|
pExprInfo->base.pFilter = pFilters->info.filterInfo;
|
||||||
|
pInfo->pFieldFilters = pFieldFilters;
|
||||||
|
}
|
||||||
|
|
||||||
|
pInfo->pFieldFilters->pExpr = pExpr;
|
||||||
|
|
||||||
|
*interField = pInfo;
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** pField) {
|
||||||
|
SInternalField* pInfo = NULL;
|
||||||
|
|
||||||
|
for (int32_t i = pQueryInfo->havingFieldNum - 1; i >= 0; --i) {
|
||||||
|
pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, pQueryInfo->fieldsInfo.numOfOutput - 1 - i);
|
||||||
|
|
||||||
|
if (pInfo->pFieldFilters && 0 == tSqlExprCompare(pInfo->pFieldFilters->pExpr, pExpr)) {
|
||||||
|
*pField = pInfo;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t ret = tscInsertExprFields(pCmd, pQueryInfo, pExpr, &pInfo);
|
||||||
|
if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
*pField = pInfo;
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t genExprFilter(SExprFilter* exprFilter) {
|
||||||
|
exprFilter->fp = taosArrayInit(4, sizeof(__filter_func_t));
|
||||||
|
if (exprFilter->fp == NULL) {
|
||||||
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < exprFilter->pFilters->info.numOfFilters; ++i) {
|
||||||
|
SColumnFilterInfo *filterInfo = &exprFilter->pFilters->info.filterInfo[i];
|
||||||
|
|
||||||
|
int32_t lower = filterInfo->lowerRelOptr;
|
||||||
|
int32_t upper = filterInfo->upperRelOptr;
|
||||||
|
if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) {
|
||||||
|
tscError("invalid rel optr");
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
__filter_func_t ffp = getFilterOperator(lower, upper);
|
||||||
|
if (ffp == NULL) {
|
||||||
|
tscError("invalid filter info");
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayPush(exprFilter->fp, &ffp);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t sqlOptr) {
|
||||||
|
const char* msg1 = "non binary column not support like operator";
|
||||||
|
const char* msg2 = "invalid operator for binary column in having clause";
|
||||||
|
const char* msg3 = "invalid operator for bool column in having clause";
|
||||||
|
|
||||||
|
SColumn* pColumn = NULL;
|
||||||
|
SColumnFilterInfo* pColFilter = NULL;
|
||||||
|
SInternalField* pInfo = NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* in case of TK_AND filter condition, we first find the corresponding column and build the query condition together
|
||||||
|
* the already existed condition.
|
||||||
|
*/
|
||||||
|
if (sqlOptr == TK_AND) {
|
||||||
|
int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo);
|
||||||
|
if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
pColumn = pInfo->pFieldFilters->pFilters;
|
||||||
|
|
||||||
|
// this is a new filter condition on this column
|
||||||
|
if (pColumn->info.numOfFilters == 0) {
|
||||||
|
pColFilter = addColumnFilterInfo(pColumn);
|
||||||
|
} else { // update the existed column filter information, find the filter info here
|
||||||
|
pColFilter = &pColumn->info.filterInfo[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pColFilter == NULL) {
|
||||||
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
} else if (sqlOptr == TK_OR) {
|
||||||
|
int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo);
|
||||||
|
if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
pColumn = pInfo->pFieldFilters->pFilters;
|
||||||
|
|
||||||
|
// TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2"
|
||||||
|
pColFilter = addColumnFilterInfo(pColumn);
|
||||||
|
if (pColFilter == NULL) {
|
||||||
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
} else { // error;
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
|
||||||
|
pColFilter->filterstr =
|
||||||
|
((pInfo->field.type == TSDB_DATA_TYPE_BINARY || pInfo->field.type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0);
|
||||||
|
|
||||||
|
if (pColFilter->filterstr) {
|
||||||
|
if (pExpr->tokenId != TK_EQ
|
||||||
|
&& pExpr->tokenId != TK_NE
|
||||||
|
&& pExpr->tokenId != TK_ISNULL
|
||||||
|
&& pExpr->tokenId != TK_NOTNULL
|
||||||
|
&& pExpr->tokenId != TK_LIKE
|
||||||
|
) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (pExpr->tokenId == TK_LIKE) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pInfo->field.type == TSDB_DATA_TYPE_BOOL) {
|
||||||
|
if (pExpr->tokenId != TK_EQ && pExpr->tokenId != TK_NE) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pInfo->field.type, pExpr);
|
||||||
|
if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return genExprFilter(pInfo->pFieldFilters);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t parentOptr) {
|
||||||
|
if (pExpr == NULL) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* msg1 = "invalid having clause";
|
||||||
|
|
||||||
|
tSqlExpr* pLeft = pExpr->pLeft;
|
||||||
|
tSqlExpr* pRight = pExpr->pRight;
|
||||||
|
|
||||||
|
if (pExpr->tokenId == TK_AND || pExpr->tokenId == TK_OR) {
|
||||||
|
int32_t ret = getHavingExpr(pCmd, pQueryInfo, pExpr->pLeft, pExpr->tokenId);
|
||||||
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return getHavingExpr(pCmd, pQueryInfo, pExpr->pRight, pExpr->tokenId);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pLeft == NULL || pRight == NULL) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pLeft->type == pRight->type) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
|
||||||
|
exchangeExpr(pExpr);
|
||||||
|
|
||||||
|
pLeft = pExpr->pLeft;
|
||||||
|
pRight = pExpr->pRight;
|
||||||
|
|
||||||
|
|
||||||
|
if (pLeft->type != SQL_NODE_SQLFUNCTION) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pRight->type != SQL_NODE_VALUE) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pExpr->tokenId >= TK_BITAND) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
|
||||||
|
//if (pLeft->pParam == NULL || pLeft->pParam->nExpr < 1) {
|
||||||
|
// return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
//}
|
||||||
|
|
||||||
|
if (pLeft->pParam) {
|
||||||
|
size_t size = taosArrayGetSize(pLeft->pParam);
|
||||||
|
for (int32_t i = 0; i < size; i++) {
|
||||||
|
tSqlExprItem* pParamElem = taosArrayGet(pLeft->pParam, i);
|
||||||
|
if (pParamElem->pNode->tokenId != TK_ALL &&
|
||||||
|
pParamElem->pNode->tokenId != TK_ID &&
|
||||||
|
pParamElem->pNode->tokenId != TK_STRING &&
|
||||||
|
pParamElem->pNode->tokenId != TK_INTEGER &&
|
||||||
|
pParamElem->pNode->tokenId != TK_FLOAT) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pParamElem->pNode->tokenId == TK_ID && (pParamElem->pNode->colInfo.z == NULL && pParamElem->pNode->colInfo.n == 0)) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pParamElem->pNode->tokenId == TK_ID) {
|
||||||
|
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
|
||||||
|
if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
|
||||||
|
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
|
||||||
|
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||||
|
|
||||||
|
if (index.columnIndex <= 0 ||
|
||||||
|
index.columnIndex >= tscGetNumOfColumns(pTableMeta)) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pLeft->functionId = isValidFunction(pLeft->operand.z, pLeft->operand.n);
|
||||||
|
if (pLeft->functionId < 0) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleExprInHavingClause(pCmd, pQueryInfo, pExpr, parentOptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t validateHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd, bool isSTable, int32_t joinQuery, int32_t timeWindowQuery) {
|
||||||
|
const char* msg1 = "having only works with group by";
|
||||||
|
const char* msg2 = "functions or others can not be mixed up";
|
||||||
|
const char* msg3 = "invalid expression in having clause";
|
||||||
|
|
||||||
|
if (pExpr == NULL) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pQueryInfo->groupbyExpr.numOfGroupCols <= 0) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pExpr->pLeft == NULL || pExpr->pRight == NULL) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pQueryInfo->colList == NULL) {
|
||||||
|
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t ret = 0;
|
||||||
|
|
||||||
|
if ((ret = getHavingExpr(pCmd, pQueryInfo, pExpr, TK_AND)) != TSDB_CODE_SUCCESS) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
//REDO function check
|
||||||
|
if (!functionCompatibleCheck(pQueryInfo, joinQuery, timeWindowQuery)) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t doLoadAllTableMeta(SSqlObj* pSql, int32_t index, SSqlNode* pSqlNode, int32_t numOfTables) {
|
static int32_t doLoadAllTableMeta(SSqlObj* pSql, int32_t index, SSqlNode* pSqlNode, int32_t numOfTables) {
|
||||||
const char* msg1 = "invalid table name";
|
const char* msg1 = "invalid table name";
|
||||||
const char* msg2 = "invalid table alias name";
|
const char* msg2 = "invalid table alias name";
|
||||||
|
@ -7040,6 +7327,24 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, int32_t index) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parse the having clause in the first place
|
||||||
|
if (validateHavingClause(pQueryInfo, pSqlNode->pHaving, pCmd, isSTable, joinQuery, timeWindowQuery) !=
|
||||||
|
TSDB_CODE_SUCCESS) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* transfer sql functions that need secondary merge into another format
|
||||||
|
* in dealing with super table queries such as: count/first/last
|
||||||
|
*/
|
||||||
|
if (isSTable) {
|
||||||
|
tscTansformFuncForSTableQuery(pQueryInfo);
|
||||||
|
|
||||||
|
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
|
if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -144,8 +144,9 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
|
||||||
SNewVgroupInfo info = {0};
|
SNewVgroupInfo info = {0};
|
||||||
info.numOfEps = pVgroupMsg->numOfEps;
|
info.numOfEps = pVgroupMsg->numOfEps;
|
||||||
info.vgId = pVgroupMsg->vgId;
|
info.vgId = pVgroupMsg->vgId;
|
||||||
info.inUse = 0;
|
info.inUse = 0; // 0 is the default value of inUse in case of multiple replica
|
||||||
|
|
||||||
|
assert(info.numOfEps >= 1 && info.vgId >= 1);
|
||||||
for(int32_t i = 0; i < pVgroupMsg->numOfEps; ++i) {
|
for(int32_t i = 0; i < pVgroupMsg->numOfEps; ++i) {
|
||||||
tstrncpy(info.ep[i].fqdn, pVgroupMsg->epAddr[i].fqdn, TSDB_FQDN_LEN);
|
tstrncpy(info.ep[i].fqdn, pVgroupMsg->epAddr[i].fqdn, TSDB_FQDN_LEN);
|
||||||
info.ep[i].port = pVgroupMsg->epAddr[i].port;
|
info.ep[i].port = pVgroupMsg->epAddr[i].port;
|
||||||
|
|
|
@ -35,6 +35,7 @@ int tscKeepConn[TSDB_SQL_MAX] = {0};
|
||||||
TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid, TSKEY dflt);
|
TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid, TSKEY dflt);
|
||||||
void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts);
|
void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts);
|
||||||
void tscSaveSubscriptionProgress(void* sub);
|
void tscSaveSubscriptionProgress(void* sub);
|
||||||
|
static int32_t extractSTableQueryVgroupId(STableMetaInfo* pTableMetaInfo);
|
||||||
|
|
||||||
static int32_t minMsgSize() { return tsRpcHeadSize + 100; }
|
static int32_t minMsgSize() { return tsRpcHeadSize + 100; }
|
||||||
static int32_t getWaitingTimeInterval(int32_t count) {
|
static int32_t getWaitingTimeInterval(int32_t count) {
|
||||||
|
@ -80,6 +81,7 @@ static void tscEpSetHtons(SRpcEpSet *s) {
|
||||||
s->port[i] = htons(s->port[i]);
|
s->port[i] = htons(s->port[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool tscEpSetIsEqual(SRpcEpSet *s1, SRpcEpSet *s2) {
|
bool tscEpSetIsEqual(SRpcEpSet *s1, SRpcEpSet *s2) {
|
||||||
if (s1->numOfEps != s2->numOfEps || s1->inUse != s2->inUse) {
|
if (s1->numOfEps != s2->numOfEps || s1->inUse != s2->inUse) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -112,19 +114,22 @@ static void tscDumpEpSetFromVgroupInfo(SRpcEpSet *pEpSet, SNewVgroupInfo *pVgrou
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) {
|
static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) {
|
||||||
SSqlCmd *pCmd = &pObj->cmd;
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||||
if (pTableMetaInfo == NULL || pTableMetaInfo->pTableMeta == NULL) {
|
if (pTableMetaInfo == NULL || pTableMetaInfo->pTableMeta == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t vgId = pTableMetaInfo->pTableMeta->vgId;
|
int32_t vgId = -1;
|
||||||
if (pTableMetaInfo->pTableMeta->tableType == TSDB_SUPER_TABLE) {
|
if (pTableMetaInfo->pTableMeta->tableType == TSDB_SUPER_TABLE) {
|
||||||
assert(vgId == 0);
|
vgId = extractSTableQueryVgroupId(pTableMetaInfo);
|
||||||
return;
|
} else {
|
||||||
|
vgId = pTableMetaInfo->pTableMeta->vgId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert(vgId > 0);
|
||||||
|
|
||||||
SNewVgroupInfo vgroupInfo = {.vgId = -1};
|
SNewVgroupInfo vgroupInfo = {.vgId = -1};
|
||||||
taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
|
taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
|
||||||
assert(vgroupInfo.numOfEps > 0 && vgroupInfo.vgId > 0);
|
assert(vgroupInfo.numOfEps > 0 && vgroupInfo.vgId > 0);
|
||||||
|
@ -139,6 +144,33 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) {
|
||||||
|
|
||||||
tscDebug("after: EndPoint in use:%d, numOfEps:%d", vgroupInfo.inUse, vgroupInfo.numOfEps);
|
tscDebug("after: EndPoint in use:%d, numOfEps:%d", vgroupInfo.inUse, vgroupInfo.numOfEps);
|
||||||
taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(SNewVgroupInfo));
|
taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(SNewVgroupInfo));
|
||||||
|
|
||||||
|
// Update the local cached epSet info cached by SqlObj
|
||||||
|
int32_t inUse = pSql->epSet.inUse;
|
||||||
|
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
|
||||||
|
tscDebug("%p update the epSet in SqlObj, in use before:%d, after:%d", pSql, inUse, pSql->epSet.inUse);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t extractSTableQueryVgroupId(STableMetaInfo* pTableMetaInfo) {
|
||||||
|
assert(pTableMetaInfo != NULL);
|
||||||
|
|
||||||
|
int32_t vgIndex = pTableMetaInfo->vgroupIndex;
|
||||||
|
int32_t vgId = -1;
|
||||||
|
|
||||||
|
if (pTableMetaInfo->pVgroupTables == NULL) {
|
||||||
|
SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList;
|
||||||
|
assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups);
|
||||||
|
vgId = pVgroupInfo->vgroups[vgIndex].vgId;
|
||||||
|
} else {
|
||||||
|
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
|
||||||
|
assert(vgIndex >= 0 && vgIndex < numOfVgroups);
|
||||||
|
|
||||||
|
SVgroupTableInfo *pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex);
|
||||||
|
vgId = pTableIdList->vgInfo.vgId;
|
||||||
|
}
|
||||||
|
|
||||||
|
return vgId;
|
||||||
}
|
}
|
||||||
|
|
||||||
void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
|
void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
|
||||||
|
@ -517,21 +549,22 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
|
|
||||||
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
||||||
int32_t vgIndex = pTableMetaInfo->vgroupIndex;
|
int32_t vgIndex = pTableMetaInfo->vgroupIndex;
|
||||||
|
int32_t vgId = -1;
|
||||||
|
|
||||||
if (pTableMetaInfo->pVgroupTables == NULL) {
|
if (pTableMetaInfo->pVgroupTables == NULL) {
|
||||||
SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList;
|
SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList;
|
||||||
assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups);
|
assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups);
|
||||||
|
vgId = pVgroupInfo->vgroups[vgIndex].vgId;
|
||||||
pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId);
|
|
||||||
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex, pSql->res.qId);
|
|
||||||
} else {
|
} else {
|
||||||
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
|
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
|
||||||
assert(vgIndex >= 0 && vgIndex < numOfVgroups);
|
assert(vgIndex >= 0 && vgIndex < numOfVgroups);
|
||||||
|
|
||||||
SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex);
|
SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex);
|
||||||
|
vgId = pTableIdList->vgInfo.vgId;
|
||||||
pRetrieveMsg->header.vgId = htonl(pTableIdList->vgInfo.vgId);
|
|
||||||
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, pTableIdList->vgInfo.vgId, vgIndex, pSql->res.qId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pRetrieveMsg->header.vgId = htonl(vgId);
|
||||||
|
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, vgId, vgIndex, pSql->res.qId);
|
||||||
} else {
|
} else {
|
||||||
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||||
pRetrieveMsg->header.vgId = htonl(pTableMeta->vgId);
|
pRetrieveMsg->header.vgId = htonl(pTableMeta->vgId);
|
||||||
|
@ -1876,7 +1909,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
|
||||||
(vgroupInfo.inUse < 0)) { // vgroup info exists, compare with it
|
(vgroupInfo.inUse < 0)) { // vgroup info exists, compare with it
|
||||||
vgroupInfo = createNewVgroupInfo(&pMetaMsg->vgroup);
|
vgroupInfo = createNewVgroupInfo(&pMetaMsg->vgroup);
|
||||||
taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo));
|
taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo));
|
||||||
tscDebug("add new VgroupInfo, vgId:%d, total:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap));
|
tscDebug("add new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2028,18 +2061,33 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
|
||||||
tscError("%p empty vgroup info", pSql);
|
tscError("%p empty vgroup info", pSql);
|
||||||
} else {
|
} else {
|
||||||
for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
|
for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
|
||||||
//just init, no need to lock
|
// just init, no need to lock
|
||||||
SVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j];
|
SVgroupInfo *pVgroup = &pInfo->vgroupList->vgroups[j];
|
||||||
|
|
||||||
SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
|
SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
|
||||||
pVgroups->vgId = htonl(vmsg->vgId);
|
vmsg->vgId = htonl(vmsg->vgId);
|
||||||
pVgroups->numOfEps = vmsg->numOfEps;
|
vmsg->numOfEps = vmsg->numOfEps;
|
||||||
|
for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
|
||||||
|
vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port);
|
||||||
|
}
|
||||||
|
|
||||||
assert(pVgroups->numOfEps >= 1 && pVgroups->vgId >= 1);
|
SNewVgroupInfo newVi = createNewVgroupInfo(vmsg);
|
||||||
|
pVgroup->numOfEps = newVi.numOfEps;
|
||||||
|
pVgroup->vgId = newVi.vgId;
|
||||||
|
for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
|
||||||
|
pVgroup->epAddr[k].port = newVi.ep[k].port;
|
||||||
|
pVgroup->epAddr[k].fqdn = strndup(newVi.ep[k].fqdn, TSDB_FQDN_LEN);
|
||||||
|
}
|
||||||
|
|
||||||
for (int32_t k = 0; k < pVgroups->numOfEps; ++k) {
|
// check if current buffer contains the vgroup info.
|
||||||
pVgroups->epAddr[k].port = htons(vmsg->epAddr[k].port);
|
// If not, add it
|
||||||
pVgroups->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, tListLen(vmsg->epAddr[k].fqdn));
|
SNewVgroupInfo existVgroupInfo = {.inUse = -1};
|
||||||
|
taosHashGetClone(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), NULL, &existVgroupInfo, sizeof(SNewVgroupInfo));
|
||||||
|
|
||||||
|
if (((existVgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&existVgroupInfo, vmsg)) ||
|
||||||
|
(existVgroupInfo.inUse < 0)) { // vgroup info exists, compare with it
|
||||||
|
taosHashPut(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), &newVi, sizeof(newVi));
|
||||||
|
tscDebug("add new VgroupInfo, vgId:%d, total cached:%d", newVi.vgId, (int32_t) taosHashGetSize(tscVgroupMap));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -405,6 +405,7 @@ int taos_affected_rows(TAOS_RES *tres) {
|
||||||
|
|
||||||
TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
|
TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
|
||||||
SSqlObj *pSql = (SSqlObj *)res;
|
SSqlObj *pSql = (SSqlObj *)res;
|
||||||
|
SSqlRes *pRes = &pSql->res;
|
||||||
if (pSql == NULL || pSql->signature != pSql) return 0;
|
if (pSql == NULL || pSql->signature != pSql) return 0;
|
||||||
|
|
||||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||||
|
@ -419,7 +420,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
|
||||||
|
|
||||||
SFieldInfo *pFieldInfo = &pQueryInfo->fieldsInfo;
|
SFieldInfo *pFieldInfo = &pQueryInfo->fieldsInfo;
|
||||||
|
|
||||||
if (pFieldInfo->final == NULL) {
|
if (pRes->final == NULL) {
|
||||||
TAOS_FIELD* f = calloc(pFieldInfo->numOfOutput, sizeof(TAOS_FIELD));
|
TAOS_FIELD* f = calloc(pFieldInfo->numOfOutput, sizeof(TAOS_FIELD));
|
||||||
|
|
||||||
int32_t j = 0;
|
int32_t j = 0;
|
||||||
|
@ -439,10 +440,10 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pFieldInfo->final = f;
|
pRes->final = f;
|
||||||
}
|
}
|
||||||
|
|
||||||
return pFieldInfo->final;
|
return pRes->final;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool needToFetchNewBlock(SSqlObj* pSql) {
|
static bool needToFetchNewBlock(SSqlObj* pSql) {
|
||||||
|
|
|
@ -2968,7 +2968,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
||||||
}
|
}
|
||||||
|
|
||||||
tscDebug("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql,
|
tscDebug("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql,
|
||||||
pVgroup->epAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex);
|
pVgroup->epAddr[pSql->epSet.inUse].fqdn, pVgroup->vgId, trsupport->subqueryIndex);
|
||||||
|
|
||||||
if (pSql->res.qId == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode
|
if (pSql->res.qId == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode
|
||||||
tscRetrieveFromDnodeCallBack(param, pSql, 0);
|
tscRetrieveFromDnodeCallBack(param, pSql, 0);
|
||||||
|
|
|
@ -756,6 +756,8 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) {
|
||||||
tfree(pRes->pArithSup);
|
tfree(pRes->pArithSup);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tfree(pRes->final);
|
||||||
|
|
||||||
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
|
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -795,6 +797,20 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
|
||||||
tfree(pCmd->pQueryInfo);
|
tfree(pCmd->pQueryInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void destroyTableNameList(SSqlCmd* pCmd) {
|
||||||
|
if (pCmd->numOfTables == 0) {
|
||||||
|
assert(pCmd->pTableNameList == NULL);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int32_t i = 0; i < pCmd->numOfTables; ++i) {
|
||||||
|
tfree(pCmd->pTableNameList[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
pCmd->numOfTables = 0;
|
||||||
|
tfree(pCmd->pTableNameList);
|
||||||
|
}
|
||||||
|
|
||||||
void tscResetSqlCmd(SSqlCmd* pCmd, bool removeMeta) {
|
void tscResetSqlCmd(SSqlCmd* pCmd, bool removeMeta) {
|
||||||
pCmd->command = 0;
|
pCmd->command = 0;
|
||||||
pCmd->numOfCols = 0;
|
pCmd->numOfCols = 0;
|
||||||
|
@ -804,14 +820,7 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool removeMeta) {
|
||||||
pCmd->parseFinished = 0;
|
pCmd->parseFinished = 0;
|
||||||
pCmd->autoCreated = 0;
|
pCmd->autoCreated = 0;
|
||||||
|
|
||||||
for(int32_t i = 0; i < pCmd->numOfTables; ++i) {
|
destroyTableNameList(pCmd);
|
||||||
if (pCmd->pTableNameList && pCmd->pTableNameList[i]) {
|
|
||||||
tfree(pCmd->pTableNameList[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pCmd->numOfTables = 0;
|
|
||||||
tfree(pCmd->pTableNameList);
|
|
||||||
|
|
||||||
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, removeMeta);
|
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, removeMeta);
|
||||||
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
||||||
|
@ -928,6 +937,11 @@ void tscFreeSqlObj(SSqlObj* pSql) {
|
||||||
free(pSql);
|
free(pSql);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void tscDestroyBoundColumnInfo(SParsedDataColInfo* pColInfo) {
|
||||||
|
tfree(pColInfo->boundedColumns);
|
||||||
|
tfree(pColInfo->cols);
|
||||||
|
}
|
||||||
|
|
||||||
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
|
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
|
||||||
if (pDataBlock == NULL) {
|
if (pDataBlock == NULL) {
|
||||||
return;
|
return;
|
||||||
|
@ -948,6 +962,7 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
|
||||||
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
|
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tscDestroyBoundColumnInfo(&pDataBlock->boundColumnInfo);
|
||||||
tfree(pDataBlock);
|
tfree(pDataBlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1062,7 +1077,7 @@ SQueryInfo* tscGetActiveQueryInfo(SSqlCmd* pCmd) {
|
||||||
* @param dataBlocks
|
* @param dataBlocks
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name,
|
int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOffset, SName* name,
|
||||||
STableMeta* pTableMeta, STableDataBlocks** dataBlocks) {
|
STableMeta* pTableMeta, STableDataBlocks** dataBlocks) {
|
||||||
STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks));
|
STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks));
|
||||||
if (dataBuf == NULL) {
|
if (dataBuf == NULL) {
|
||||||
|
@ -1070,10 +1085,12 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
dataBuf->nAllocSize = (uint32_t)initialSize;
|
dataBuf->nAllocSize = (uint32_t)defaultSize;
|
||||||
dataBuf->headerSize = startOffset; // the header size will always be the startOffset value, reserved for the subumit block header
|
dataBuf->headerSize = startOffset;
|
||||||
|
|
||||||
|
// the header size will always be the startOffset value, reserved for the subumit block header
|
||||||
if (dataBuf->nAllocSize <= dataBuf->headerSize) {
|
if (dataBuf->nAllocSize <= dataBuf->headerSize) {
|
||||||
dataBuf->nAllocSize = dataBuf->headerSize*2;
|
dataBuf->nAllocSize = dataBuf->headerSize * 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
dataBuf->pData = calloc(1, dataBuf->nAllocSize);
|
dataBuf->pData = calloc(1, dataBuf->nAllocSize);
|
||||||
|
@ -1083,25 +1100,31 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Here we keep the tableMeta to avoid it to be remove by other threads.
|
||||||
|
dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
|
||||||
|
|
||||||
|
SParsedDataColInfo* pColInfo = &dataBuf->boundColumnInfo;
|
||||||
|
SSchema* pSchema = tscGetTableSchema(dataBuf->pTableMeta);
|
||||||
|
tscSetBoundColumnInfo(pColInfo, pSchema, dataBuf->pTableMeta->tableInfo.numOfColumns);
|
||||||
|
|
||||||
dataBuf->ordered = true;
|
dataBuf->ordered = true;
|
||||||
dataBuf->prevTS = INT64_MIN;
|
dataBuf->prevTS = INT64_MIN;
|
||||||
|
|
||||||
dataBuf->rowSize = rowSize;
|
dataBuf->rowSize = rowSize;
|
||||||
dataBuf->size = startOffset;
|
dataBuf->size = startOffset;
|
||||||
dataBuf->tsSource = -1;
|
dataBuf->tsSource = -1;
|
||||||
|
dataBuf->vgId = dataBuf->pTableMeta->vgId;
|
||||||
|
|
||||||
tNameAssign(&dataBuf->tableName, name);
|
tNameAssign(&dataBuf->tableName, name);
|
||||||
|
|
||||||
//Here we keep the tableMeta to avoid it to be remove by other threads.
|
assert(defaultSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
|
||||||
dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
|
|
||||||
assert(initialSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
|
|
||||||
|
|
||||||
*dataBlocks = dataBuf;
|
*dataBlocks = dataBuf;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize,
|
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize,
|
||||||
SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks, SArray* pBlockList) {
|
SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks,
|
||||||
|
SArray* pBlockList) {
|
||||||
*dataBlocks = NULL;
|
*dataBlocks = NULL;
|
||||||
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pHashList, (const char*)&id, sizeof(id));
|
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pHashList, (const char*)&id, sizeof(id));
|
||||||
if (t1 != NULL) {
|
if (t1 != NULL) {
|
||||||
|
@ -1210,6 +1233,8 @@ static void extractTableNameList(SSqlCmd* pCmd, bool freeBlockMap) {
|
||||||
int32_t i = 0;
|
int32_t i = 0;
|
||||||
while(p1) {
|
while(p1) {
|
||||||
STableDataBlocks* pBlocks = *p1;
|
STableDataBlocks* pBlocks = *p1;
|
||||||
|
tfree(pCmd->pTableNameList[i]);
|
||||||
|
|
||||||
pCmd->pTableNameList[i++] = tNameDup(&pBlocks->tableName);
|
pCmd->pTableNameList[i++] = tNameDup(&pBlocks->tableName);
|
||||||
p1 = taosHashIterate(pCmd->pTableBlockHashList, p1);
|
p1 = taosHashIterate(pCmd->pTableBlockHashList, p1);
|
||||||
}
|
}
|
||||||
|
@ -1326,7 +1351,7 @@ bool tscIsInsertData(char* sqlstr) {
|
||||||
int32_t index = 0;
|
int32_t index = 0;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
SStrToken t0 = tStrGetToken(sqlstr, &index, false, 0, NULL);
|
SStrToken t0 = tStrGetToken(sqlstr, &index, false);
|
||||||
if (t0.type != TK_LP) {
|
if (t0.type != TK_LP) {
|
||||||
return t0.type == TK_INSERT || t0.type == TK_IMPORT;
|
return t0.type == TK_INSERT || t0.type == TK_IMPORT;
|
||||||
}
|
}
|
||||||
|
@ -1448,6 +1473,16 @@ int32_t tscGetResRowLength(SArray* pExprList) {
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) {
|
||||||
|
for(int32_t i = 0; i < numOfFilters; ++i) {
|
||||||
|
if (pFilterInfo[i].filterstr) {
|
||||||
|
tfree(pFilterInfo[i].pz);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tfree(pFilterInfo);
|
||||||
|
}
|
||||||
|
|
||||||
void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
|
void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
|
||||||
if (pFieldInfo == NULL) {
|
if (pFieldInfo == NULL) {
|
||||||
return;
|
return;
|
||||||
|
@ -1742,15 +1777,7 @@ SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t
|
||||||
return taosArrayGetP(pColumnList, i);
|
return taosArrayGetP(pColumnList, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) {
|
|
||||||
for(int32_t i = 0; i < numOfFilters; ++i) {
|
|
||||||
if (pFilterInfo[i].filterstr) {
|
|
||||||
tfree(pFilterInfo[i].pz);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tfree(pFilterInfo);
|
|
||||||
}
|
|
||||||
|
|
||||||
SColumn* tscColumnClone(const SColumn* src) {
|
SColumn* tscColumnClone(const SColumn* src) {
|
||||||
assert(src != NULL);
|
assert(src != NULL);
|
||||||
|
|
|
@ -39,6 +39,7 @@ extern int8_t tsEnableTelemetryReporting;
|
||||||
extern char tsEmail[];
|
extern char tsEmail[];
|
||||||
extern char tsArbitrator[];
|
extern char tsArbitrator[];
|
||||||
extern int8_t tsArbOnline;
|
extern int8_t tsArbOnline;
|
||||||
|
extern int32_t tsDnodeId;
|
||||||
|
|
||||||
// common
|
// common
|
||||||
extern int tsRpcTimer;
|
extern int tsRpcTimer;
|
||||||
|
|
|
@ -41,10 +41,11 @@ typedef struct SResPair {
|
||||||
double avg;
|
double avg;
|
||||||
} SResPair;
|
} SResPair;
|
||||||
|
|
||||||
/* the structure for sql function in select clause */
|
// the structure for sql function in select clause
|
||||||
typedef struct SSqlExpr {
|
typedef struct SSqlExpr {
|
||||||
char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
|
char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
|
||||||
SColIndex colInfo;
|
SColIndex colInfo;
|
||||||
|
|
||||||
uint64_t uid; // refactor use the pointer
|
uint64_t uid; // refactor use the pointer
|
||||||
|
|
||||||
int16_t functionId; // function id in aAgg array
|
int16_t functionId; // function id in aAgg array
|
||||||
|
@ -60,11 +61,14 @@ typedef struct SSqlExpr {
|
||||||
tVariant param[3]; // parameters are not more than 3
|
tVariant param[3]; // parameters are not more than 3
|
||||||
int32_t offset; // sub result column value of arithmetic expression.
|
int32_t offset; // sub result column value of arithmetic expression.
|
||||||
int16_t resColId; // result column id
|
int16_t resColId; // result column id
|
||||||
|
|
||||||
|
int32_t filterNum;
|
||||||
|
SColumnFilterInfo *pFilter;
|
||||||
} SSqlExpr;
|
} SSqlExpr;
|
||||||
|
|
||||||
typedef struct SExprInfo {
|
typedef struct SExprInfo {
|
||||||
SSqlExpr base;
|
SSqlExpr base;
|
||||||
struct tExprNode* pExpr;
|
struct tExprNode *pExpr;
|
||||||
} SExprInfo;
|
} SExprInfo;
|
||||||
|
|
||||||
#define TSDB_DB_NAME_T 1
|
#define TSDB_DB_NAME_T 1
|
||||||
|
|
|
@ -43,6 +43,7 @@ int8_t tsEnableVnodeBak = 1;
|
||||||
int8_t tsEnableTelemetryReporting = 1;
|
int8_t tsEnableTelemetryReporting = 1;
|
||||||
int8_t tsArbOnline = 0;
|
int8_t tsArbOnline = 0;
|
||||||
char tsEmail[TSDB_FQDN_LEN] = {0};
|
char tsEmail[TSDB_FQDN_LEN] = {0};
|
||||||
|
int32_t tsDnodeId = 0;
|
||||||
|
|
||||||
// common
|
// common
|
||||||
int32_t tsRpcTimer = 1000;
|
int32_t tsRpcTimer = 1000;
|
||||||
|
@ -212,7 +213,7 @@ float tsAvailTmpDirectorySpace = 0;
|
||||||
float tsAvailDataDirGB = 0;
|
float tsAvailDataDirGB = 0;
|
||||||
float tsUsedDataDirGB = 0;
|
float tsUsedDataDirGB = 0;
|
||||||
float tsReservedTmpDirectorySpace = 1.0f;
|
float tsReservedTmpDirectorySpace = 1.0f;
|
||||||
float tsMinimalDataDirGB = 1.0f;
|
float tsMinimalDataDirGB = 2.0f;
|
||||||
int32_t tsTotalMemoryMB = 0;
|
int32_t tsTotalMemoryMB = 0;
|
||||||
uint32_t tsVersion = 0;
|
uint32_t tsVersion = 0;
|
||||||
|
|
||||||
|
|
|
@ -73,6 +73,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row);
|
||||||
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj);
|
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj);
|
||||||
|
|
||||||
int32_t cqObjRef = -1;
|
int32_t cqObjRef = -1;
|
||||||
|
int32_t cqVnodeNum = 0;
|
||||||
|
|
||||||
void cqRmFromList(SCqObj *pObj) {
|
void cqRmFromList(SCqObj *pObj) {
|
||||||
//LOCK in caller
|
//LOCK in caller
|
||||||
|
@ -166,6 +167,8 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
atomic_add_fetch_32(&cqVnodeNum, 1);
|
||||||
|
|
||||||
cqCreateRef();
|
cqCreateRef();
|
||||||
|
|
||||||
pContext->tmrCtrl = taosTmrInit(0, 0, 0, "CQ");
|
pContext->tmrCtrl = taosTmrInit(0, 0, 0, "CQ");
|
||||||
|
@ -240,6 +243,13 @@ void cqClose(void *handle) {
|
||||||
if (hasCq == 0) {
|
if (hasCq == 0) {
|
||||||
freeSCqContext(pContext);
|
freeSCqContext(pContext);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t remainn = atomic_sub_fetch_32(&cqVnodeNum, 1);
|
||||||
|
if (remainn <= 0) {
|
||||||
|
int32_t ref = cqObjRef;
|
||||||
|
cqObjRef = -1;
|
||||||
|
taosCloseRef(ref);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void cqStart(void *handle) {
|
void cqStart(void *handle) {
|
||||||
|
@ -294,7 +304,7 @@ void cqStop(void *handle) {
|
||||||
pthread_mutex_unlock(&pContext->mutex);
|
pthread_mutex_unlock(&pContext->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema) {
|
void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema, int start) {
|
||||||
if (tsEnableStream == 0) {
|
if (tsEnableStream == 0) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -326,7 +336,11 @@ void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, ch
|
||||||
|
|
||||||
pObj->rid = taosAddRef(cqObjRef, pObj);
|
pObj->rid = taosAddRef(cqObjRef, pObj);
|
||||||
|
|
||||||
|
if(start && pContext->master) {
|
||||||
cqCreateStream(pContext, pObj);
|
cqCreateStream(pContext, pObj);
|
||||||
|
} else {
|
||||||
|
pObj->pContext = pContext;
|
||||||
|
}
|
||||||
|
|
||||||
rid = pObj->rid;
|
rid = pObj->rid;
|
||||||
|
|
||||||
|
|
|
@ -70,7 +70,7 @@ int main(int argc, char *argv[]) {
|
||||||
tdDestroyTSchemaBuilder(&schemaBuilder);
|
tdDestroyTSchemaBuilder(&schemaBuilder);
|
||||||
|
|
||||||
for (int sid =1; sid<10; ++sid) {
|
for (int sid =1; sid<10; ++sid) {
|
||||||
cqCreate(pCq, sid, sid, NULL, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", pSchema);
|
cqCreate(pCq, sid, sid, NULL, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", pSchema, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
tdFreeSchema(pSchema);
|
tdFreeSchema(pSchema);
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "cJSON.h"
|
#include "cJSON.h"
|
||||||
#include "dnodeCfg.h"
|
#include "dnodeCfg.h"
|
||||||
|
#include "tglobal.h"
|
||||||
|
|
||||||
static SDnodeCfg tsCfg = {0};
|
static SDnodeCfg tsCfg = {0};
|
||||||
static pthread_mutex_t tsCfgMutex;
|
static pthread_mutex_t tsCfgMutex;
|
||||||
|
@ -70,6 +71,7 @@ static void dnodeResetCfg(SDnodeCfg *cfg) {
|
||||||
|
|
||||||
pthread_mutex_lock(&tsCfgMutex);
|
pthread_mutex_lock(&tsCfgMutex);
|
||||||
tsCfg.dnodeId = cfg->dnodeId;
|
tsCfg.dnodeId = cfg->dnodeId;
|
||||||
|
tsDnodeId = cfg->dnodeId;
|
||||||
tstrncpy(tsCfg.clusterId, cfg->clusterId, TSDB_CLUSTER_ID_LEN);
|
tstrncpy(tsCfg.clusterId, cfg->clusterId, TSDB_CLUSTER_ID_LEN);
|
||||||
dnodePrintCfg(cfg);
|
dnodePrintCfg(cfg);
|
||||||
dnodeWriteCfg();
|
dnodeWriteCfg();
|
||||||
|
|
|
@ -222,7 +222,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) {
|
||||||
dnodeSendRpcVWriteRsp(pVnode, pWrite, pWrite->code);
|
dnodeSendRpcVWriteRsp(pVnode, pWrite, pWrite->code);
|
||||||
} else {
|
} else {
|
||||||
if (qtype == TAOS_QTYPE_FWD) {
|
if (qtype == TAOS_QTYPE_FWD) {
|
||||||
vnodeConfirmForward(pVnode, pWrite->pHead.version, 0, pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT);
|
vnodeConfirmForward(pVnode, pWrite->pHead.version, pWrite->code, pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT);
|
||||||
}
|
}
|
||||||
if (pWrite->rspRet.rsp) {
|
if (pWrite->rspRet.rsp) {
|
||||||
rpcFreeCont(pWrite->rspRet.rsp);
|
rpcFreeCont(pWrite->rspRet.rsp);
|
||||||
|
|
|
@ -88,10 +88,11 @@ void* qOpenQueryMgmt(int32_t vgId);
|
||||||
void qQueryMgmtNotifyClosed(void* pExecutor);
|
void qQueryMgmtNotifyClosed(void* pExecutor);
|
||||||
void qQueryMgmtReOpen(void *pExecutor);
|
void qQueryMgmtReOpen(void *pExecutor);
|
||||||
void qCleanupQueryMgmt(void* pExecutor);
|
void qCleanupQueryMgmt(void* pExecutor);
|
||||||
void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo);
|
void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo);
|
||||||
void** qAcquireQInfo(void* pMgmt, uint64_t key);
|
void** qAcquireQInfo(void* pMgmt, uint64_t key);
|
||||||
void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle);
|
void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle);
|
||||||
bool checkQIdEqual(void *qHandle, uint64_t qId);
|
bool checkQIdEqual(void *qHandle, uint64_t qId);
|
||||||
|
int64_t genQueryId(void);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -163,6 +163,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_MND_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0362) //"Table does not exist")
|
#define TSDB_CODE_MND_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0362) //"Table does not exist")
|
||||||
#define TSDB_CODE_MND_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0363) //"Invalid table type in tsdb")
|
#define TSDB_CODE_MND_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0363) //"Invalid table type in tsdb")
|
||||||
#define TSDB_CODE_MND_TOO_MANY_TAGS TAOS_DEF_ERROR_CODE(0, 0x0364) //"Too many tags")
|
#define TSDB_CODE_MND_TOO_MANY_TAGS TAOS_DEF_ERROR_CODE(0, 0x0364) //"Too many tags")
|
||||||
|
#define TSDB_CODE_MND_TOO_MANY_COLUMNS TAOS_DEF_ERROR_CODE(0, 0x0365) //"Too many columns")
|
||||||
#define TSDB_CODE_MND_TOO_MANY_TIMESERIES TAOS_DEF_ERROR_CODE(0, 0x0366) //"Too many time series")
|
#define TSDB_CODE_MND_TOO_MANY_TIMESERIES TAOS_DEF_ERROR_CODE(0, 0x0366) //"Too many time series")
|
||||||
#define TSDB_CODE_MND_NOT_SUPER_TABLE TAOS_DEF_ERROR_CODE(0, 0x0367) //"Not super table") // operation only available for super table
|
#define TSDB_CODE_MND_NOT_SUPER_TABLE TAOS_DEF_ERROR_CODE(0, 0x0367) //"Not super table") // operation only available for super table
|
||||||
#define TSDB_CODE_MND_COL_NAME_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x0368) //"Tag name too long")
|
#define TSDB_CODE_MND_COL_NAME_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x0368) //"Tag name too long")
|
||||||
|
|
|
@ -42,7 +42,7 @@ void cqStart(void *handle);
|
||||||
void cqStop(void *handle);
|
void cqStop(void *handle);
|
||||||
|
|
||||||
// cqCreate is called by TSDB to start an instance of CQ
|
// cqCreate is called by TSDB to start an instance of CQ
|
||||||
void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema);
|
void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema, int start);
|
||||||
|
|
||||||
// cqDrop is called by TSDB to stop an instance of CQ, handle is the return value of cqCreate
|
// cqDrop is called by TSDB to stop an instance of CQ, handle is the return value of cqCreate
|
||||||
void cqDrop(void *handle);
|
void cqDrop(void *handle);
|
||||||
|
|
|
@ -51,7 +51,7 @@ typedef struct {
|
||||||
void *cqH;
|
void *cqH;
|
||||||
int (*notifyStatus)(void *, int status, int eno);
|
int (*notifyStatus)(void *, int status, int eno);
|
||||||
int (*eventCallBack)(void *);
|
int (*eventCallBack)(void *);
|
||||||
void *(*cqCreateFunc)(void *handle, uint64_t uid, int32_t sid, const char *dstTable, char *sqlStr, STSchema *pSchema);
|
void *(*cqCreateFunc)(void *handle, uint64_t uid, int32_t sid, const char *dstTable, char *sqlStr, STSchema *pSchema, int start);
|
||||||
void (*cqDropFunc)(void *handle);
|
void (*cqDropFunc)(void *handle);
|
||||||
} STsdbAppH;
|
} STsdbAppH;
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
#define MAX_IP_SIZE 20
|
#define MAX_IP_SIZE 20
|
||||||
#define MAX_PASSWORD_SIZE 20
|
#define MAX_PASSWORD_SIZE 20
|
||||||
#define MAX_HISTORY_SIZE 1000
|
#define MAX_HISTORY_SIZE 1000
|
||||||
#define MAX_COMMAND_SIZE 65536
|
#define MAX_COMMAND_SIZE 1048586
|
||||||
#define HISTORY_FILE ".taos_history"
|
#define HISTORY_FILE ".taos_history"
|
||||||
|
|
||||||
#define DEFAULT_RES_SHOW_NUM 100
|
#define DEFAULT_RES_SHOW_NUM 100
|
||||||
|
|
|
@ -238,7 +238,7 @@ void resetCommand(Command *cmd, const char s[]) {
|
||||||
clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
|
clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
|
||||||
memset(cmd->buffer, 0, MAX_COMMAND_SIZE);
|
memset(cmd->buffer, 0, MAX_COMMAND_SIZE);
|
||||||
memset(cmd->command, 0, MAX_COMMAND_SIZE);
|
memset(cmd->command, 0, MAX_COMMAND_SIZE);
|
||||||
strcpy(cmd->command, s);
|
strncpy(cmd->command, s, MAX_COMMAND_SIZE);
|
||||||
int size = 0;
|
int size = 0;
|
||||||
int width = 0;
|
int width = 0;
|
||||||
getMbSizeInfo(s, &size, &width);
|
getMbSizeInfo(s, &size, &width);
|
||||||
|
|
|
@ -37,7 +37,7 @@ static struct argp_option options[] = {
|
||||||
{"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."},
|
{"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."},
|
||||||
{"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."},
|
{"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."},
|
||||||
{"user", 'u', "USER", 0, "The user name to use when connecting to the server."},
|
{"user", 'u', "USER", 0, "The user name to use when connecting to the server."},
|
||||||
{"user", 'A', "Auth", 0, "The user auth to use when connecting to the server."},
|
{"auth", 'A', "Auth", 0, "The auth string to use when connecting to the server."},
|
||||||
{"config-dir", 'c', "CONFIG_DIR", 0, "Configuration directory."},
|
{"config-dir", 'c', "CONFIG_DIR", 0, "Configuration directory."},
|
||||||
{"dump-config", 'C', 0, 0, "Dump configuration."},
|
{"dump-config", 'C', 0, 0, "Dump configuration."},
|
||||||
{"commands", 's', "COMMANDS", 0, "Commands to run without enter the shell."},
|
{"commands", 's', "COMMANDS", 0, "Commands to run without enter the shell."},
|
||||||
|
|
|
@ -9,19 +9,18 @@ IF (GIT_FOUND)
|
||||||
EXECUTE_PROCESS(
|
EXECUTE_PROCESS(
|
||||||
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
|
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
|
||||||
RESULT_VARIABLE RESULT
|
RESULT_VARIABLE RESULT
|
||||||
OUTPUT_VARIABLE TAOSDEMO_COMMIT)
|
|
||||||
EXECUTE_PROCESS(
|
|
||||||
COMMAND bash "-c" "echo '${TAOSDEMO_COMMIT}' | awk '{print $1}' | cut -c -9"
|
|
||||||
RESULT_VARIABLE RESULT
|
|
||||||
OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1)
|
OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1)
|
||||||
|
STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1)
|
||||||
EXECUTE_PROCESS(
|
EXECUTE_PROCESS(
|
||||||
COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
|
COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
|
||||||
RESULT_VARIABLE RESULT
|
RESULT_VARIABLE RESULT
|
||||||
OUTPUT_VARIABLE TAOSDEMO_STATUS)
|
OUTPUT_VARIABLE TAOSDEMO_STATUS)
|
||||||
|
IF (TD_LINUX)
|
||||||
EXECUTE_PROCESS(
|
EXECUTE_PROCESS(
|
||||||
COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'"
|
COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'"
|
||||||
RESULT_VARIABLE RESULT
|
RESULT_VARIABLE RESULT
|
||||||
OUTPUT_VARIABLE TAOSDEMO_STATUS)
|
OUTPUT_VARIABLE TAOSDEMO_STATUS)
|
||||||
|
ENDIF (TD_LINUX)
|
||||||
MESSAGE("taosdemo.c status: " ${TAOSDEMO_STATUS})
|
MESSAGE("taosdemo.c status: " ${TAOSDEMO_STATUS})
|
||||||
ELSE()
|
ELSE()
|
||||||
MESSAGE("Git not found")
|
MESSAGE("Git not found")
|
||||||
|
@ -29,9 +28,9 @@ ELSE()
|
||||||
SET(TAOSDEMO_STATUS "unknown")
|
SET(TAOSDEMO_STATUS "unknown")
|
||||||
ENDIF (GIT_FOUND)
|
ENDIF (GIT_FOUND)
|
||||||
|
|
||||||
STRING(STRIP ${TAOSDEMO_COMMIT_SHA1} TAOSDEMO_COMMIT_SHA1)
|
STRING(STRIP "${TAOSDEMO_COMMIT_SHA1}" TAOSDEMO_COMMIT_SHA1)
|
||||||
MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1})
|
MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1})
|
||||||
STRING(STRIP ${TAOSDEMO_STATUS} TAOSDEMO_STATUS)
|
STRING(STRIP "${TAOSDEMO_STATUS}" TAOSDEMO_STATUS)
|
||||||
|
|
||||||
IF (TAOSDEMO_STATUS MATCHES "M")
|
IF (TAOSDEMO_STATUS MATCHES "M")
|
||||||
SET(TAOSDEMO_STATUS "modified")
|
SET(TAOSDEMO_STATUS "modified")
|
||||||
|
|
|
@ -40,7 +40,6 @@
|
||||||
"data_source": "rand",
|
"data_source": "rand",
|
||||||
"insert_mode": "taosc",
|
"insert_mode": "taosc",
|
||||||
"insert_rows": 1000,
|
"insert_rows": 1000,
|
||||||
"multi_thread_write_one_tbl": "no",
|
|
||||||
"interlace_rows": 20,
|
"interlace_rows": 20,
|
||||||
"max_sql_len": 1024000,
|
"max_sql_len": 1024000,
|
||||||
"disorder_ratio": 0,
|
"disorder_ratio": 0,
|
||||||
|
|
|
@ -40,7 +40,6 @@
|
||||||
"data_source": "rand",
|
"data_source": "rand",
|
||||||
"insert_mode": "taosc",
|
"insert_mode": "taosc",
|
||||||
"insert_rows": 100000,
|
"insert_rows": 100000,
|
||||||
"multi_thread_write_one_tbl": "no",
|
|
||||||
"interlace_rows": 0,
|
"interlace_rows": 0,
|
||||||
"max_sql_len": 1024000,
|
"max_sql_len": 1024000,
|
||||||
"disorder_ratio": 0,
|
"disorder_ratio": 0,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
{
|
{
|
||||||
"filetype":"query",
|
"filetype": "query",
|
||||||
"cfgdir": "/etc/taos",
|
"cfgdir": "/etc/taos",
|
||||||
"host": "127.0.0.1",
|
"host": "127.0.0.1",
|
||||||
"port": 6030,
|
"port": 6030,
|
||||||
|
@ -7,13 +7,30 @@
|
||||||
"password": "taosdata",
|
"password": "taosdata",
|
||||||
"confirm_parameter_prompt": "yes",
|
"confirm_parameter_prompt": "yes",
|
||||||
"databases": "dbx",
|
"databases": "dbx",
|
||||||
"specified_table_query":
|
"query_times": 1,
|
||||||
{"query_interval":1, "concurrent":4,
|
"specified_table_query": {
|
||||||
"sqls": [{"sql": "select last_row(*) from stb where color='red'", "result": "./query_res0.txt"},
|
"query_interval": 1,
|
||||||
{"sql": "select count(*) from stb_01", "result": "./query_res1.txt"}]
|
"concurrent": 4,
|
||||||
|
"sqls": [
|
||||||
|
{
|
||||||
|
"sql": "select last_row(*) from stb where color='red'",
|
||||||
|
"result": "./query_res0.txt"
|
||||||
},
|
},
|
||||||
"super_table_query":
|
{
|
||||||
{"stblname": "stb", "query_interval":1, "threads":4,
|
"sql": "select count(*) from stb_01",
|
||||||
"sqls": [{"sql": "select last_row(*) from xxxx", "result": "./query_res2.txt"}]
|
"result": "./query_res1.txt"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"super_table_query": {
|
||||||
|
"stblname": "stb",
|
||||||
|
"query_interval": 1,
|
||||||
|
"threads": 4,
|
||||||
|
"sqls": [
|
||||||
|
{
|
||||||
|
"sql": "select last_row(*) from xxxx",
|
||||||
|
"result": "./query_res2.txt"
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -39,6 +39,22 @@ typedef struct {
|
||||||
int8_t type;
|
int8_t type;
|
||||||
} SOColInfo;
|
} SOColInfo;
|
||||||
|
|
||||||
|
#define debugPrint(fmt, ...) \
|
||||||
|
do { if (g_args.debug_print || g_args.verbose_print) \
|
||||||
|
fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0)
|
||||||
|
|
||||||
|
#define verbosePrint(fmt, ...) \
|
||||||
|
do { if (g_args.verbose_print) \
|
||||||
|
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
|
||||||
|
|
||||||
|
#define performancePrint(fmt, ...) \
|
||||||
|
do { if (g_args.performance_print) \
|
||||||
|
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
|
||||||
|
|
||||||
|
#define errorPrint(fmt, ...) \
|
||||||
|
do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0)
|
||||||
|
|
||||||
|
|
||||||
// -------------------------- SHOW DATABASE INTERFACE-----------------------
|
// -------------------------- SHOW DATABASE INTERFACE-----------------------
|
||||||
enum _show_db_index {
|
enum _show_db_index {
|
||||||
TSDB_SHOW_DB_NAME_INDEX,
|
TSDB_SHOW_DB_NAME_INDEX,
|
||||||
|
@ -199,16 +215,18 @@ static struct argp_option options[] = {
|
||||||
{"schemaonly", 's', 0, 0, "Only dump schema.", 3},
|
{"schemaonly", 's', 0, 0, "Only dump schema.", 3},
|
||||||
{"with-property", 'M', 0, 0, "Dump schema with properties.", 3},
|
{"with-property", 'M', 0, 0, "Dump schema with properties.", 3},
|
||||||
{"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3},
|
{"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3},
|
||||||
{"end-time", 'E', "END_TIME", 0, "End time to dump.", 3},
|
{"end-time", 'E', "END_TIME", 0, "End time to dump. Epoch or ISO8601/RFC3339 format is acceptable. For example: 2017-10-01T18:00:00+0800", 3},
|
||||||
{"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
|
{"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
|
||||||
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
|
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
|
||||||
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
|
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
|
||||||
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
|
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
|
||||||
{"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3},
|
{"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3},
|
||||||
|
{"debug", 'g', 0, 0, "Print debug info.", 1},
|
||||||
|
{"verbose", 'v', 0, 0, "Print verbose debug info.", 1},
|
||||||
{0}};
|
{0}};
|
||||||
|
|
||||||
/* Used by main to communicate with parse_opt. */
|
/* Used by main to communicate with parse_opt. */
|
||||||
struct arguments {
|
typedef struct arguments {
|
||||||
// connection option
|
// connection option
|
||||||
char *host;
|
char *host;
|
||||||
char *user;
|
char *user;
|
||||||
|
@ -240,7 +258,10 @@ struct arguments {
|
||||||
char **arg_list;
|
char **arg_list;
|
||||||
int arg_list_len;
|
int arg_list_len;
|
||||||
bool isDumpIn;
|
bool isDumpIn;
|
||||||
};
|
bool debug_print;
|
||||||
|
bool verbose_print;
|
||||||
|
bool performance_print;
|
||||||
|
} SArguments;
|
||||||
|
|
||||||
/* Parse a single option. */
|
/* Parse a single option. */
|
||||||
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
||||||
|
@ -286,6 +307,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
||||||
tstrncpy(arguments->outpath, full_path.we_wordv[0], TSDB_FILENAME_LEN);
|
tstrncpy(arguments->outpath, full_path.we_wordv[0], TSDB_FILENAME_LEN);
|
||||||
wordfree(&full_path);
|
wordfree(&full_path);
|
||||||
break;
|
break;
|
||||||
|
case 'g':
|
||||||
|
arguments->debug_print = true;
|
||||||
|
break;
|
||||||
case 'i':
|
case 'i':
|
||||||
arguments->isDumpIn = true;
|
arguments->isDumpIn = true;
|
||||||
if (wordexp(arg, &full_path, 0) != 0) {
|
if (wordexp(arg, &full_path, 0) != 0) {
|
||||||
|
@ -387,7 +411,7 @@ int taosCheckParam(struct arguments *arguments);
|
||||||
void taosFreeDbInfos();
|
void taosFreeDbInfos();
|
||||||
static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName);
|
static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName);
|
||||||
|
|
||||||
struct arguments tsArguments = {
|
struct arguments g_args = {
|
||||||
// connection option
|
// connection option
|
||||||
NULL,
|
NULL,
|
||||||
"root",
|
"root",
|
||||||
|
@ -421,7 +445,10 @@ struct arguments tsArguments = {
|
||||||
0,
|
0,
|
||||||
NULL,
|
NULL,
|
||||||
0,
|
0,
|
||||||
false
|
false,
|
||||||
|
false, // debug_print
|
||||||
|
false, // verbose_print
|
||||||
|
false // performance_print
|
||||||
};
|
};
|
||||||
|
|
||||||
static int queryDbImpl(TAOS *taos, char *command) {
|
static int queryDbImpl(TAOS *taos, char *command) {
|
||||||
|
@ -453,13 +480,53 @@ static int queryDbImpl(TAOS *taos, char *command) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void parse_args(int argc, char *argv[], SArguments *arguments) {
|
||||||
|
for (int i = 1; i < argc; i++) {
|
||||||
|
if (strcmp(argv[i], "-E") == 0) {
|
||||||
|
if (argv[i+1]) {
|
||||||
|
char *tmp = strdup(argv[++i]);
|
||||||
|
|
||||||
|
if (tmp) {
|
||||||
|
int64_t tmpEpoch;
|
||||||
|
if (strchr(tmp, ':') && strchr(tmp, '-')) {
|
||||||
|
if (TSDB_CODE_SUCCESS != taosParseTime(
|
||||||
|
tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) {
|
||||||
|
fprintf(stderr, "Input end time error!\n");
|
||||||
|
free(tmp);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tmpEpoch = atoll(tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
sprintf(argv[i], "%"PRId64"", tmpEpoch);
|
||||||
|
debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n",
|
||||||
|
__func__, __LINE__, tmp, i, argv[i]);
|
||||||
|
|
||||||
|
free(tmp);
|
||||||
|
} else {
|
||||||
|
errorPrint("%s() LN%d, strdup() cannot allocate memory\n", __func__, __LINE__);
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
errorPrint("%s() LN%d, -E need a valid value following!\n", __func__, __LINE__);
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
} else if (strcmp(argv[i], "-g") == 0) {
|
||||||
|
arguments->debug_print = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int main(int argc, char *argv[]) {
|
int main(int argc, char *argv[]) {
|
||||||
|
|
||||||
/* Parse our arguments; every option seen by parse_opt will be
|
/* Parse our arguments; every option seen by parse_opt will be
|
||||||
reflected in arguments. */
|
reflected in arguments. */
|
||||||
argp_parse(&argp, argc, argv, 0, 0, &tsArguments);
|
parse_args(argc, argv, &g_args);
|
||||||
|
|
||||||
if (tsArguments.abort) {
|
argp_parse(&argp, argc, argv, 0, 0, &g_args);
|
||||||
|
|
||||||
|
if (g_args.abort) {
|
||||||
#ifndef _ALPINE
|
#ifndef _ALPINE
|
||||||
error(10, 0, "ABORTED");
|
error(10, 0, "ABORTED");
|
||||||
#else
|
#else
|
||||||
|
@ -469,81 +536,82 @@ int main(int argc, char *argv[]) {
|
||||||
|
|
||||||
printf("====== arguments config ======\n");
|
printf("====== arguments config ======\n");
|
||||||
{
|
{
|
||||||
printf("host: %s\n", tsArguments.host);
|
printf("host: %s\n", g_args.host);
|
||||||
printf("user: %s\n", tsArguments.user);
|
printf("user: %s\n", g_args.user);
|
||||||
printf("password: %s\n", tsArguments.password);
|
printf("password: %s\n", g_args.password);
|
||||||
printf("port: %u\n", tsArguments.port);
|
printf("port: %u\n", g_args.port);
|
||||||
printf("cversion: %s\n", tsArguments.cversion);
|
printf("cversion: %s\n", g_args.cversion);
|
||||||
printf("mysqlFlag: %d\n", tsArguments.mysqlFlag);
|
printf("mysqlFlag: %d\n", g_args.mysqlFlag);
|
||||||
printf("outpath: %s\n", tsArguments.outpath);
|
printf("outpath: %s\n", g_args.outpath);
|
||||||
printf("inpath: %s\n", tsArguments.inpath);
|
printf("inpath: %s\n", g_args.inpath);
|
||||||
printf("resultFile: %s\n", tsArguments.resultFile);
|
printf("resultFile: %s\n", g_args.resultFile);
|
||||||
printf("encode: %s\n", tsArguments.encode);
|
printf("encode: %s\n", g_args.encode);
|
||||||
printf("all_databases: %d\n", tsArguments.all_databases);
|
printf("all_databases: %d\n", g_args.all_databases);
|
||||||
printf("databases: %d\n", tsArguments.databases);
|
printf("databases: %d\n", g_args.databases);
|
||||||
printf("schemaonly: %d\n", tsArguments.schemaonly);
|
printf("schemaonly: %d\n", g_args.schemaonly);
|
||||||
printf("with_property: %d\n", tsArguments.with_property);
|
printf("with_property: %d\n", g_args.with_property);
|
||||||
printf("start_time: %" PRId64 "\n", tsArguments.start_time);
|
printf("start_time: %" PRId64 "\n", g_args.start_time);
|
||||||
printf("end_time: %" PRId64 "\n", tsArguments.end_time);
|
printf("end_time: %" PRId64 "\n", g_args.end_time);
|
||||||
printf("data_batch: %d\n", tsArguments.data_batch);
|
printf("data_batch: %d\n", g_args.data_batch);
|
||||||
printf("max_sql_len: %d\n", tsArguments.max_sql_len);
|
printf("max_sql_len: %d\n", g_args.max_sql_len);
|
||||||
printf("table_batch: %d\n", tsArguments.table_batch);
|
printf("table_batch: %d\n", g_args.table_batch);
|
||||||
printf("thread_num: %d\n", tsArguments.thread_num);
|
printf("thread_num: %d\n", g_args.thread_num);
|
||||||
printf("allow_sys: %d\n", tsArguments.allow_sys);
|
printf("allow_sys: %d\n", g_args.allow_sys);
|
||||||
printf("abort: %d\n", tsArguments.abort);
|
printf("abort: %d\n", g_args.abort);
|
||||||
printf("isDumpIn: %d\n", tsArguments.isDumpIn);
|
printf("isDumpIn: %d\n", g_args.isDumpIn);
|
||||||
printf("arg_list_len: %d\n", tsArguments.arg_list_len);
|
printf("arg_list_len: %d\n", g_args.arg_list_len);
|
||||||
|
printf("debug_print: %d\n", g_args.debug_print);
|
||||||
|
|
||||||
for (int32_t i = 0; i < tsArguments.arg_list_len; i++) {
|
for (int32_t i = 0; i < g_args.arg_list_len; i++) {
|
||||||
printf("arg_list[%d]: %s\n", i, tsArguments.arg_list[i]);
|
printf("arg_list[%d]: %s\n", i, g_args.arg_list[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
printf("==============================\n");
|
printf("==============================\n");
|
||||||
|
|
||||||
if (tsArguments.cversion[0] != 0){
|
if (g_args.cversion[0] != 0){
|
||||||
tstrncpy(version, tsArguments.cversion, 11);
|
tstrncpy(version, g_args.cversion, 11);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (taosCheckParam(&tsArguments) < 0) {
|
if (taosCheckParam(&g_args) < 0) {
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
g_fpOfResult = fopen(tsArguments.resultFile, "a");
|
g_fpOfResult = fopen(g_args.resultFile, "a");
|
||||||
if (NULL == g_fpOfResult) {
|
if (NULL == g_fpOfResult) {
|
||||||
fprintf(stderr, "Failed to open %s for save result\n", tsArguments.resultFile);
|
fprintf(stderr, "Failed to open %s for save result\n", g_args.resultFile);
|
||||||
return 1;
|
return 1;
|
||||||
};
|
};
|
||||||
|
|
||||||
fprintf(g_fpOfResult, "#############################################################################\n");
|
fprintf(g_fpOfResult, "#############################################################################\n");
|
||||||
fprintf(g_fpOfResult, "============================== arguments config =============================\n");
|
fprintf(g_fpOfResult, "============================== arguments config =============================\n");
|
||||||
{
|
{
|
||||||
fprintf(g_fpOfResult, "host: %s\n", tsArguments.host);
|
fprintf(g_fpOfResult, "host: %s\n", g_args.host);
|
||||||
fprintf(g_fpOfResult, "user: %s\n", tsArguments.user);
|
fprintf(g_fpOfResult, "user: %s\n", g_args.user);
|
||||||
fprintf(g_fpOfResult, "password: %s\n", tsArguments.password);
|
fprintf(g_fpOfResult, "password: %s\n", g_args.password);
|
||||||
fprintf(g_fpOfResult, "port: %u\n", tsArguments.port);
|
fprintf(g_fpOfResult, "port: %u\n", g_args.port);
|
||||||
fprintf(g_fpOfResult, "cversion: %s\n", tsArguments.cversion);
|
fprintf(g_fpOfResult, "cversion: %s\n", g_args.cversion);
|
||||||
fprintf(g_fpOfResult, "mysqlFlag: %d\n", tsArguments.mysqlFlag);
|
fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag);
|
||||||
fprintf(g_fpOfResult, "outpath: %s\n", tsArguments.outpath);
|
fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath);
|
||||||
fprintf(g_fpOfResult, "inpath: %s\n", tsArguments.inpath);
|
fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath);
|
||||||
fprintf(g_fpOfResult, "resultFile: %s\n", tsArguments.resultFile);
|
fprintf(g_fpOfResult, "resultFile: %s\n", g_args.resultFile);
|
||||||
fprintf(g_fpOfResult, "encode: %s\n", tsArguments.encode);
|
fprintf(g_fpOfResult, "encode: %s\n", g_args.encode);
|
||||||
fprintf(g_fpOfResult, "all_databases: %d\n", tsArguments.all_databases);
|
fprintf(g_fpOfResult, "all_databases: %d\n", g_args.all_databases);
|
||||||
fprintf(g_fpOfResult, "databases: %d\n", tsArguments.databases);
|
fprintf(g_fpOfResult, "databases: %d\n", g_args.databases);
|
||||||
fprintf(g_fpOfResult, "schemaonly: %d\n", tsArguments.schemaonly);
|
fprintf(g_fpOfResult, "schemaonly: %d\n", g_args.schemaonly);
|
||||||
fprintf(g_fpOfResult, "with_property: %d\n", tsArguments.with_property);
|
fprintf(g_fpOfResult, "with_property: %d\n", g_args.with_property);
|
||||||
fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", tsArguments.start_time);
|
fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
|
||||||
fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", tsArguments.end_time);
|
fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
|
||||||
fprintf(g_fpOfResult, "data_batch: %d\n", tsArguments.data_batch);
|
fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch);
|
||||||
fprintf(g_fpOfResult, "max_sql_len: %d\n", tsArguments.max_sql_len);
|
fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len);
|
||||||
fprintf(g_fpOfResult, "table_batch: %d\n", tsArguments.table_batch);
|
fprintf(g_fpOfResult, "table_batch: %d\n", g_args.table_batch);
|
||||||
fprintf(g_fpOfResult, "thread_num: %d\n", tsArguments.thread_num);
|
fprintf(g_fpOfResult, "thread_num: %d\n", g_args.thread_num);
|
||||||
fprintf(g_fpOfResult, "allow_sys: %d\n", tsArguments.allow_sys);
|
fprintf(g_fpOfResult, "allow_sys: %d\n", g_args.allow_sys);
|
||||||
fprintf(g_fpOfResult, "abort: %d\n", tsArguments.abort);
|
fprintf(g_fpOfResult, "abort: %d\n", g_args.abort);
|
||||||
fprintf(g_fpOfResult, "isDumpIn: %d\n", tsArguments.isDumpIn);
|
fprintf(g_fpOfResult, "isDumpIn: %d\n", g_args.isDumpIn);
|
||||||
fprintf(g_fpOfResult, "arg_list_len: %d\n", tsArguments.arg_list_len);
|
fprintf(g_fpOfResult, "arg_list_len: %d\n", g_args.arg_list_len);
|
||||||
|
|
||||||
for (int32_t i = 0; i < tsArguments.arg_list_len; i++) {
|
for (int32_t i = 0; i < g_args.arg_list_len; i++) {
|
||||||
fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, tsArguments.arg_list[i]);
|
fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, g_args.arg_list[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -552,11 +620,11 @@ int main(int argc, char *argv[]) {
|
||||||
time_t tTime = time(NULL);
|
time_t tTime = time(NULL);
|
||||||
struct tm tm = *localtime(&tTime);
|
struct tm tm = *localtime(&tTime);
|
||||||
|
|
||||||
if (tsArguments.isDumpIn) {
|
if (g_args.isDumpIn) {
|
||||||
fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n");
|
fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n");
|
||||||
fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
|
fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
|
||||||
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
|
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
|
||||||
if (taosDumpIn(&tsArguments) < 0) {
|
if (taosDumpIn(&g_args) < 0) {
|
||||||
fprintf(g_fpOfResult, "\n");
|
fprintf(g_fpOfResult, "\n");
|
||||||
fclose(g_fpOfResult);
|
fclose(g_fpOfResult);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -565,7 +633,7 @@ int main(int argc, char *argv[]) {
|
||||||
fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n");
|
fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n");
|
||||||
fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
|
fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
|
||||||
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
|
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
|
||||||
if (taosDumpOut(&tsArguments) < 0) {
|
if (taosDumpOut(&g_args) < 0) {
|
||||||
fprintf(g_fpOfResult, "\n");
|
fprintf(g_fpOfResult, "\n");
|
||||||
fclose(g_fpOfResult);
|
fclose(g_fpOfResult);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -1236,8 +1304,8 @@ void* taosDumpOutWorkThreadFp(void *arg)
|
||||||
FILE *fp = NULL;
|
FILE *fp = NULL;
|
||||||
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
|
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
|
||||||
|
|
||||||
if (tsArguments.outpath[0] != 0) {
|
if (g_args.outpath[0] != 0) {
|
||||||
sprintf(tmpBuf, "%s/%s.tables.%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex);
|
sprintf(tmpBuf, "%s/%s.tables.%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex);
|
||||||
} else {
|
} else {
|
||||||
sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex);
|
sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex);
|
||||||
}
|
}
|
||||||
|
@ -1270,7 +1338,7 @@ void* taosDumpOutWorkThreadFp(void *arg)
|
||||||
ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
|
ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
|
||||||
if (readLen <= 0) break;
|
if (readLen <= 0) break;
|
||||||
|
|
||||||
int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &tsArguments, fp, pThread->taosCon, pThread->dbName);
|
int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &g_args, fp, pThread->taosCon, pThread->dbName);
|
||||||
if (ret >= 0) {
|
if (ret >= 0) {
|
||||||
// TODO: sum table count and table rows by self
|
// TODO: sum table count and table rows by self
|
||||||
pThread->tablesOfDumpOut++;
|
pThread->tablesOfDumpOut++;
|
||||||
|
@ -1282,13 +1350,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
tablesInOneFile++;
|
tablesInOneFile++;
|
||||||
if (tablesInOneFile >= tsArguments.table_batch) {
|
if (tablesInOneFile >= g_args.table_batch) {
|
||||||
fclose(fp);
|
fclose(fp);
|
||||||
tablesInOneFile = 0;
|
tablesInOneFile = 0;
|
||||||
|
|
||||||
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
|
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
|
||||||
if (tsArguments.outpath[0] != 0) {
|
if (g_args.outpath[0] != 0) {
|
||||||
sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex);
|
sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex);
|
||||||
} else {
|
} else {
|
||||||
sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex);
|
sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex);
|
||||||
}
|
}
|
||||||
|
@ -1491,14 +1559,14 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
|
||||||
taos_free_result(res);
|
taos_free_result(res);
|
||||||
lseek(fd, 0, SEEK_SET);
|
lseek(fd, 0, SEEK_SET);
|
||||||
|
|
||||||
int maxThreads = tsArguments.thread_num;
|
int maxThreads = g_args.thread_num;
|
||||||
int tableOfPerFile ;
|
int tableOfPerFile ;
|
||||||
if (numOfTable <= tsArguments.thread_num) {
|
if (numOfTable <= g_args.thread_num) {
|
||||||
tableOfPerFile = 1;
|
tableOfPerFile = 1;
|
||||||
maxThreads = numOfTable;
|
maxThreads = numOfTable;
|
||||||
} else {
|
} else {
|
||||||
tableOfPerFile = numOfTable / tsArguments.thread_num;
|
tableOfPerFile = numOfTable / g_args.thread_num;
|
||||||
if (0 != numOfTable % tsArguments.thread_num) {
|
if (0 != numOfTable % g_args.thread_num) {
|
||||||
tableOfPerFile += 1;
|
tableOfPerFile += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2214,7 +2282,7 @@ void* taosDumpInWorkThreadFp(void *arg)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
fprintf(stderr, "Success Open input file: %s\n", SQLFileName);
|
fprintf(stderr, "Success Open input file: %s\n", SQLFileName);
|
||||||
taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, tsArguments.encode, SQLFileName);
|
taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, g_args.encode, SQLFileName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -437,14 +437,14 @@ static int32_t mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) {
|
||||||
return TAOS_DN_OFF_TIME_ZONE_NOT_MATCH;
|
return TAOS_DN_OFF_TIME_ZONE_NOT_MATCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) {
|
// if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) {
|
||||||
mError("\"locale\"[%s - %s] cfg parameters inconsistent", clusterCfg->locale, tsLocale);
|
// mError("\"locale\"[%s - %s] cfg parameters inconsistent", clusterCfg->locale, tsLocale);
|
||||||
return TAOS_DN_OFF_LOCALE_NOT_MATCH;
|
// return TAOS_DN_OFF_LOCALE_NOT_MATCH;
|
||||||
}
|
// }
|
||||||
if (0 != strncasecmp(clusterCfg->charset, tsCharset, strlen(tsCharset))) {
|
// if (0 != strncasecmp(clusterCfg->charset, tsCharset, strlen(tsCharset))) {
|
||||||
mError("\"charset\"[%s - %s] cfg parameters inconsistent.", clusterCfg->charset, tsCharset);
|
// mError("\"charset\"[%s - %s] cfg parameters inconsistent.", clusterCfg->charset, tsCharset);
|
||||||
return TAOS_DN_OFF_CHARSET_NOT_MATCH;
|
// return TAOS_DN_OFF_CHARSET_NOT_MATCH;
|
||||||
}
|
// }
|
||||||
|
|
||||||
if (clusterCfg->enableBalance != tsEnableBalance) {
|
if (clusterCfg->enableBalance != tsEnableBalance) {
|
||||||
mError("\"balance\"[%d - %d] cfg parameters inconsistent", clusterCfg->enableBalance, tsEnableBalance);
|
mError("\"balance\"[%d - %d] cfg parameters inconsistent", clusterCfg->enableBalance, tsEnableBalance);
|
||||||
|
@ -628,6 +628,11 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
|
||||||
bnNotify();
|
bnNotify();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!tsEnableBalance) {
|
||||||
|
int32_t numOfMnodes = mnodeGetMnodesNum();
|
||||||
|
if (numOfMnodes < tsNumOfMnodes) bnNotify();
|
||||||
|
}
|
||||||
|
|
||||||
if (openVnodes != pDnode->openVnodes) {
|
if (openVnodes != pDnode->openVnodes) {
|
||||||
mnodeCheckUnCreatedVgroup(pDnode, pStatus->load, openVnodes);
|
mnodeCheckUnCreatedVgroup(pDnode, pStatus->load, openVnodes);
|
||||||
}
|
}
|
||||||
|
|
|
@ -381,6 +381,8 @@ static bool mnodeAllOnline() {
|
||||||
void *pIter = NULL;
|
void *pIter = NULL;
|
||||||
bool allOnline = true;
|
bool allOnline = true;
|
||||||
|
|
||||||
|
sdbUpdateMnodeRoles();
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
SMnodeObj *pMnode = NULL;
|
SMnodeObj *pMnode = NULL;
|
||||||
pIter = mnodeGetNextMnode(pIter, &pMnode);
|
pIter = mnodeGetNextMnode(pIter, &pMnode);
|
||||||
|
|
|
@ -315,6 +315,10 @@ void sdbUpdateAsync() {
|
||||||
taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr);
|
taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int node_cmp(const void *l, const void *r) {
|
||||||
|
return ((SNodeInfo *)l)->nodeId - ((SNodeInfo *)r)->nodeId;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t sdbUpdateSync(void *pMnodes) {
|
int32_t sdbUpdateSync(void *pMnodes) {
|
||||||
SMInfos *pMinfos = pMnodes;
|
SMInfos *pMinfos = pMnodes;
|
||||||
if (!mnodeIsRunning()) {
|
if (!mnodeIsRunning()) {
|
||||||
|
@ -382,6 +386,8 @@ int32_t sdbUpdateSync(void *pMnodes) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qsort(syncCfg.nodeInfo, syncCfg.replica, sizeof(syncCfg.nodeInfo[0]), node_cmp);
|
||||||
|
|
||||||
sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica);
|
sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica);
|
||||||
for (int32_t i = 0; i < syncCfg.replica; ++i) {
|
for (int32_t i = 0; i < syncCfg.replica; ++i) {
|
||||||
sdbInfo("vgId:1, mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn,
|
sdbInfo("vgId:1, mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn,
|
||||||
|
@ -1019,7 +1025,7 @@ static int32_t sdbWriteToQueue(SSdbRow *pRow, int32_t qtype) {
|
||||||
|
|
||||||
int32_t queued = atomic_add_fetch_32(&tsSdbMgmt.queuedMsg, 1);
|
int32_t queued = atomic_add_fetch_32(&tsSdbMgmt.queuedMsg, 1);
|
||||||
if (queued > MAX_QUEUED_MSG_NUM) {
|
if (queued > MAX_QUEUED_MSG_NUM) {
|
||||||
sdbDebug("vgId:1, too many msg:%d in sdb queue, flow control", queued);
|
sdbInfo("vgId:1, too many msg:%d in sdb queue, flow control", queued);
|
||||||
taosMsleep(1);
|
taosMsleep(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1037,6 +1037,19 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
|
||||||
|
|
||||||
SCreateTableMsg* pCreate = (SCreateTableMsg*)((char*)pCreate1 + sizeof(SCMCreateTableMsg));
|
SCreateTableMsg* pCreate = (SCreateTableMsg*)((char*)pCreate1 + sizeof(SCMCreateTableMsg));
|
||||||
|
|
||||||
|
int16_t numOfTags = htons(pCreate->numOfTags);
|
||||||
|
if (numOfTags > TSDB_MAX_TAGS) {
|
||||||
|
mError("msg:%p, app:%p table:%s, failed to create, too many tags", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
|
||||||
|
return TSDB_CODE_MND_TOO_MANY_TAGS;
|
||||||
|
}
|
||||||
|
|
||||||
|
int16_t numOfColumns = htons(pCreate->numOfColumns);
|
||||||
|
int32_t numOfCols = numOfColumns + numOfTags;
|
||||||
|
if (numOfCols > TSDB_MAX_COLUMNS) {
|
||||||
|
mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
|
||||||
|
return TSDB_CODE_MND_TOO_MANY_COLUMNS;
|
||||||
|
}
|
||||||
|
|
||||||
SSTableObj * pStable = calloc(1, sizeof(SSTableObj));
|
SSTableObj * pStable = calloc(1, sizeof(SSTableObj));
|
||||||
if (pStable == NULL) {
|
if (pStable == NULL) {
|
||||||
mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
|
mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
|
||||||
|
@ -1050,10 +1063,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
|
||||||
pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
|
pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
|
||||||
pStable->sversion = 0;
|
pStable->sversion = 0;
|
||||||
pStable->tversion = 0;
|
pStable->tversion = 0;
|
||||||
pStable->numOfColumns = htons(pCreate->numOfColumns);
|
pStable->numOfColumns = numOfColumns;
|
||||||
pStable->numOfTags = htons(pCreate->numOfTags);
|
pStable->numOfTags = numOfTags;
|
||||||
|
|
||||||
int32_t numOfCols = pStable->numOfColumns + pStable->numOfTags;
|
|
||||||
int32_t schemaSize = numOfCols * sizeof(SSchema);
|
int32_t schemaSize = numOfCols * sizeof(SSchema);
|
||||||
pStable->schema = (SSchema *)calloc(1, schemaSize);
|
pStable->schema = (SSchema *)calloc(1, schemaSize);
|
||||||
if (pStable->schema == NULL) {
|
if (pStable->schema == NULL) {
|
||||||
|
@ -1064,11 +1076,6 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
|
||||||
|
|
||||||
memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema));
|
memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema));
|
||||||
|
|
||||||
if (pStable->numOfColumns > TSDB_MAX_COLUMNS || pStable->numOfTags > TSDB_MAX_TAGS) {
|
|
||||||
mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
|
|
||||||
return TSDB_CODE_MND_INVALID_TABLE_NAME;
|
|
||||||
}
|
|
||||||
|
|
||||||
pStable->nextColId = 0;
|
pStable->nextColId = 0;
|
||||||
|
|
||||||
for (int32_t col = 0; col < numOfCols; col++) {
|
for (int32_t col = 0; col < numOfCols; col++) {
|
||||||
|
@ -1340,6 +1347,11 @@ static int32_t mnodeAddSuperTableColumn(SMnodeMsg *pMsg, SSchema schema[], int32
|
||||||
return TSDB_CODE_MND_APP_ERROR;
|
return TSDB_CODE_MND_APP_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pStable->numOfColumns + ncols + pStable->numOfTags > TSDB_MAX_COLUMNS) {
|
||||||
|
mError("msg:%p, app:%p stable:%s, add column, too many columns", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId);
|
||||||
|
return TSDB_CODE_MND_TOO_MANY_COLUMNS;
|
||||||
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < ncols; i++) {
|
for (int32_t i = 0; i < ncols; i++) {
|
||||||
if (mnodeFindSuperTableColumnIndex(pStable, schema[i].name) > 0) {
|
if (mnodeFindSuperTableColumnIndex(pStable, schema[i].name) > 0) {
|
||||||
mError("msg:%p, app:%p stable:%s, add column, column:%s already exist", pMsg, pMsg->rpcMsg.ahandle,
|
mError("msg:%p, app:%p stable:%s, add column, column:%s already exist", pMsg, pMsg->rpcMsg.ahandle,
|
||||||
|
|
|
@ -994,6 +994,7 @@ void mnodeSendSyncVgroupMsg(SVgObj *pVgroup) {
|
||||||
mDebug("vgId:%d, send sync all vnodes msg, numOfVnodes:%d db:%s", pVgroup->vgId, pVgroup->numOfVnodes,
|
mDebug("vgId:%d, send sync all vnodes msg, numOfVnodes:%d db:%s", pVgroup->vgId, pVgroup->numOfVnodes,
|
||||||
pVgroup->dbName);
|
pVgroup->dbName);
|
||||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||||
|
if (pVgroup->vnodeGid[i].role != TAOS_SYNC_ROLE_SLAVE) continue;
|
||||||
SRpcEpSet epSet = mnodeGetEpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp);
|
SRpcEpSet epSet = mnodeGetEpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp);
|
||||||
mDebug("vgId:%d, index:%d, send sync vnode msg to dnode %s", pVgroup->vgId, i,
|
mDebug("vgId:%d, index:%d, send sync vnode msg to dnode %s", pVgroup->vgId, i,
|
||||||
pVgroup->vnodeGid[i].pDnode->dnodeEp);
|
pVgroup->vnodeGid[i].pDnode->dnodeEp);
|
||||||
|
|
|
@ -86,7 +86,8 @@ typedef struct SResultRow {
|
||||||
bool closed; // this result status: closed or opened
|
bool closed; // this result status: closed or opened
|
||||||
uint32_t numOfRows; // number of rows of current time window
|
uint32_t numOfRows; // number of rows of current time window
|
||||||
SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo
|
SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo
|
||||||
union {STimeWindow win; char* key;}; // start key of current result row
|
STimeWindow win;
|
||||||
|
char* key; // start key of current result row
|
||||||
} SResultRow;
|
} SResultRow;
|
||||||
|
|
||||||
typedef struct SGroupResInfo {
|
typedef struct SGroupResInfo {
|
||||||
|
@ -196,6 +197,8 @@ typedef struct SQueryAttr {
|
||||||
bool needReverseScan; // need reverse scan
|
bool needReverseScan; // need reverse scan
|
||||||
int32_t interBufSize; // intermediate buffer sizse
|
int32_t interBufSize; // intermediate buffer sizse
|
||||||
|
|
||||||
|
int32_t havingNum; // having expr number
|
||||||
|
|
||||||
SOrderVal order;
|
SOrderVal order;
|
||||||
int16_t numOfCols;
|
int16_t numOfCols;
|
||||||
int16_t numOfTags;
|
int16_t numOfTags;
|
||||||
|
@ -297,6 +300,7 @@ enum OPERATOR_TYPE_E {
|
||||||
OP_DummyInput = 16, //TODO remove it after fully refactor.
|
OP_DummyInput = 16, //TODO remove it after fully refactor.
|
||||||
OP_MultiwaySort = 17, // multi-way data merge into one input stream.
|
OP_MultiwaySort = 17, // multi-way data merge into one input stream.
|
||||||
OP_GlobalAggregate = 18, // global merge for the multi-way data sources.
|
OP_GlobalAggregate = 18, // global merge for the multi-way data sources.
|
||||||
|
OP_Having = 19,
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct SOperatorInfo {
|
typedef struct SOperatorInfo {
|
||||||
|
@ -436,6 +440,11 @@ typedef struct SSLimitOperatorInfo {
|
||||||
SArray *orderColumnList;
|
SArray *orderColumnList;
|
||||||
} SSLimitOperatorInfo;
|
} SSLimitOperatorInfo;
|
||||||
|
|
||||||
|
typedef struct SHavingOperatorInfo {
|
||||||
|
SArray* fp;
|
||||||
|
} SHavingOperatorInfo;
|
||||||
|
|
||||||
|
|
||||||
typedef struct SFillOperatorInfo {
|
typedef struct SFillOperatorInfo {
|
||||||
SFillInfo *pFillInfo;
|
SFillInfo *pFillInfo;
|
||||||
SSDataBlock *pRes;
|
SSDataBlock *pRes;
|
||||||
|
@ -497,6 +506,7 @@ SOperatorInfo* createMultiwaySortOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SEx
|
||||||
int32_t numOfRows, void* merger, bool groupMix);
|
int32_t numOfRows, void* merger, bool groupMix);
|
||||||
SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param);
|
SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param);
|
||||||
SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger);
|
SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger);
|
||||||
|
SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||||
|
|
||||||
SSDataBlock* doGlobalAggregate(void* param, bool* newgroup);
|
SSDataBlock* doGlobalAggregate(void* param, bool* newgroup);
|
||||||
SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup);
|
SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup);
|
||||||
|
|
|
@ -98,6 +98,7 @@ typedef struct SSqlNode {
|
||||||
SLimitVal limit; // limit offset [optional]
|
SLimitVal limit; // limit offset [optional]
|
||||||
SLimitVal slimit; // group limit offset [optional]
|
SLimitVal slimit; // group limit offset [optional]
|
||||||
SStrToken sqlstr; // sql string in select clause
|
SStrToken sqlstr; // sql string in select clause
|
||||||
|
struct tSqlExpr *pHaving; // having clause [optional]
|
||||||
} SSqlNode;
|
} SSqlNode;
|
||||||
|
|
||||||
typedef struct STableNamePair {
|
typedef struct STableNamePair {
|
||||||
|
@ -269,7 +270,8 @@ void tSqlExprListDestroy(SArray *pList);
|
||||||
|
|
||||||
SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere,
|
SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere,
|
||||||
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps,
|
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps,
|
||||||
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit);
|
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving);
|
||||||
|
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right);
|
||||||
|
|
||||||
SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SSqlNode *pSelect, int32_t type);
|
SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SSqlNode *pSelect, int32_t type);
|
||||||
|
|
||||||
|
|
|
@ -453,7 +453,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
|
||||||
%type select {SSqlNode*}
|
%type select {SSqlNode*}
|
||||||
%destructor select {destroySqlNode($$);}
|
%destructor select {destroySqlNode($$);}
|
||||||
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
|
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
|
||||||
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G);
|
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G, N);
|
||||||
}
|
}
|
||||||
|
|
||||||
select(A) ::= LP select(B) RP. {A = B;}
|
select(A) ::= LP select(B) RP. {A = B;}
|
||||||
|
@ -471,7 +471,7 @@ cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
|
||||||
// select client_version()
|
// select client_version()
|
||||||
// select server_state()
|
// select server_state()
|
||||||
select(A) ::= SELECT(T) selcollist(W). {
|
select(A) ::= SELECT(T) selcollist(W). {
|
||||||
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
|
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
// selcollist is a list of expressions that are to become the return
|
// selcollist is a list of expressions that are to become the return
|
||||||
|
|
|
@ -2765,14 +2765,16 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
|
||||||
SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||||
|
|
||||||
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
|
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
|
||||||
|
pInfo->stage += 1;
|
||||||
|
|
||||||
// all data are null, set it completed
|
// all data are null, set it completed
|
||||||
if (pInfo->numOfElems == 0) {
|
if (pInfo->numOfElems == 0) {
|
||||||
pResInfo->complete = true;
|
pResInfo->complete = true;
|
||||||
|
|
||||||
|
return;
|
||||||
} else {
|
} else {
|
||||||
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
|
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
|
||||||
}
|
}
|
||||||
|
|
||||||
pInfo->stage += 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// the first stage, only acquire the min/max value
|
// the first stage, only acquire the min/max value
|
||||||
|
@ -2851,14 +2853,16 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
||||||
SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo);
|
SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo);
|
||||||
|
|
||||||
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
|
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
|
||||||
|
pInfo->stage += 1;
|
||||||
|
|
||||||
// all data are null, set it completed
|
// all data are null, set it completed
|
||||||
if (pInfo->numOfElems == 0) {
|
if (pInfo->numOfElems == 0) {
|
||||||
pResInfo->complete = true;
|
pResInfo->complete = true;
|
||||||
|
|
||||||
|
return;
|
||||||
} else {
|
} else {
|
||||||
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
|
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
|
||||||
}
|
}
|
||||||
|
|
||||||
pInfo->stage += 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pInfo->stage == 0) {
|
if (pInfo->stage == 0) {
|
||||||
|
|
|
@ -97,6 +97,27 @@ int32_t getMaximumIdleDurationSec() {
|
||||||
return tsShellActivityTimer * 2;
|
return tsShellActivityTimer * 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int64_t genQueryId(void) {
|
||||||
|
int64_t uid = 0;
|
||||||
|
int64_t did = tsDnodeId;
|
||||||
|
|
||||||
|
uid = did << 54;
|
||||||
|
|
||||||
|
int64_t pid = ((int64_t)taosGetPId()) & 0x3FF;
|
||||||
|
|
||||||
|
uid |= pid << 44;
|
||||||
|
|
||||||
|
int64_t ts = taosGetTimestampMs() & 0x1FFFFFFFF;
|
||||||
|
|
||||||
|
uid |= ts << 11;
|
||||||
|
|
||||||
|
int64_t sid = atomic_add_fetch_64(&queryHandleId, 1) & 0x7FF;
|
||||||
|
|
||||||
|
uid |= sid;
|
||||||
|
|
||||||
|
return uid;
|
||||||
|
}
|
||||||
|
|
||||||
static void getNextTimeWindow(SQueryAttr* pQueryAttr, STimeWindow* tw) {
|
static void getNextTimeWindow(SQueryAttr* pQueryAttr, STimeWindow* tw) {
|
||||||
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order);
|
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order);
|
||||||
if (pQueryAttr->interval.intervalUnit != 'n' && pQueryAttr->interval.intervalUnit != 'y') {
|
if (pQueryAttr->interval.intervalUnit != 'n' && pQueryAttr->interval.intervalUnit != 'y') {
|
||||||
|
@ -1734,6 +1755,12 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
|
||||||
pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr2, pQueryAttr->numOfExpr2);
|
pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr2, pQueryAttr->numOfExpr2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
// if (pQueryAttr->limit.offset > 0) {
|
||||||
|
// pRuntimeEnv->proot = createOffsetOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot);
|
||||||
|
// }
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1742,6 +1769,13 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case OP_Having: {
|
||||||
|
if (pQueryAttr->havingNum > 0) {
|
||||||
|
pRuntimeEnv->proot = createHavingOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case OP_Fill: {
|
case OP_Fill: {
|
||||||
SOperatorInfo* pInfo = pRuntimeEnv->proot;
|
SOperatorInfo* pInfo = pRuntimeEnv->proot;
|
||||||
pRuntimeEnv->proot = createFillOperatorInfo(pRuntimeEnv, pInfo, pInfo->pExpr, pInfo->numOfOutput);
|
pRuntimeEnv->proot = createFillOperatorInfo(pRuntimeEnv, pInfo, pInfo->pExpr, pInfo->numOfOutput);
|
||||||
|
@ -1799,6 +1833,17 @@ static void doFreeQueryHandle(SQueryRuntimeEnv* pRuntimeEnv) {
|
||||||
assert(pMemRef->ref == 0 && pMemRef->snapshot.imem == NULL && pMemRef->snapshot.mem == NULL);
|
assert(pMemRef->ref == 0 && pMemRef->snapshot.imem == NULL && pMemRef->snapshot.mem == NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void destroyTsComp(SQueryRuntimeEnv *pRuntimeEnv, SQueryAttr *pQueryAttr) {
|
||||||
|
if (pQueryAttr->tsCompQuery) {
|
||||||
|
SColumnInfoData* pColInfoData = taosArrayGet(pRuntimeEnv->outputBuf->pDataBlock, 0);
|
||||||
|
FILE *f = *(FILE **)pColInfoData->pData; // TODO refactor
|
||||||
|
if (f) {
|
||||||
|
fclose(f);
|
||||||
|
*(FILE **)pColInfoData->pData = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
|
static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
|
||||||
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
|
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
|
||||||
SQInfo* pQInfo = (SQInfo*) pRuntimeEnv->qinfo;
|
SQInfo* pQInfo = (SQInfo*) pRuntimeEnv->qinfo;
|
||||||
|
@ -1817,6 +1862,8 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
|
||||||
destroyResultBuf(pRuntimeEnv->pResultBuf);
|
destroyResultBuf(pRuntimeEnv->pResultBuf);
|
||||||
doFreeQueryHandle(pRuntimeEnv);
|
doFreeQueryHandle(pRuntimeEnv);
|
||||||
|
|
||||||
|
destroyTsComp(pRuntimeEnv, pQueryAttr);
|
||||||
|
|
||||||
pRuntimeEnv->pTsBuf = tsBufDestroy(pRuntimeEnv->pTsBuf);
|
pRuntimeEnv->pTsBuf = tsBufDestroy(pRuntimeEnv->pTsBuf);
|
||||||
|
|
||||||
tfree(pRuntimeEnv->keyBuf);
|
tfree(pRuntimeEnv->keyBuf);
|
||||||
|
@ -1826,14 +1873,15 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
|
||||||
taosHashCleanup(pRuntimeEnv->pResultRowHashTable);
|
taosHashCleanup(pRuntimeEnv->pResultRowHashTable);
|
||||||
pRuntimeEnv->pResultRowHashTable = NULL;
|
pRuntimeEnv->pResultRowHashTable = NULL;
|
||||||
|
|
||||||
pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool);
|
|
||||||
taosArrayDestroyEx(pRuntimeEnv->prevResult, freeInterResult);
|
|
||||||
pRuntimeEnv->prevResult = NULL;
|
|
||||||
|
|
||||||
taosHashCleanup(pRuntimeEnv->pTableRetrieveTsMap);
|
taosHashCleanup(pRuntimeEnv->pTableRetrieveTsMap);
|
||||||
pRuntimeEnv->pTableRetrieveTsMap = NULL;
|
pRuntimeEnv->pTableRetrieveTsMap = NULL;
|
||||||
|
|
||||||
destroyOperatorInfo(pRuntimeEnv->proot);
|
destroyOperatorInfo(pRuntimeEnv->proot);
|
||||||
|
|
||||||
|
pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool);
|
||||||
|
taosArrayDestroyEx(pRuntimeEnv->prevResult, freeInterResult);
|
||||||
|
pRuntimeEnv->prevResult = NULL;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool needBuildResAfterQueryComplete(SQInfo* pQInfo) {
|
static bool needBuildResAfterQueryComplete(SQInfo* pQInfo) {
|
||||||
|
@ -2005,6 +2053,40 @@ static bool onlyFirstQuery(SQueryAttr *pQueryAttr) { return onlyOneQueryType(pQu
|
||||||
|
|
||||||
static bool onlyLastQuery(SQueryAttr *pQueryAttr) { return onlyOneQueryType(pQueryAttr, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); }
|
static bool onlyLastQuery(SQueryAttr *pQueryAttr) { return onlyOneQueryType(pQueryAttr, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); }
|
||||||
|
|
||||||
|
static int32_t updateBlockLoadStatus(SQueryAttr *pQuery, int32_t status) {
|
||||||
|
bool hasFirstLastFunc = false;
|
||||||
|
bool hasOtherFunc = false;
|
||||||
|
|
||||||
|
if (status == BLK_DATA_ALL_NEEDED || status == BLK_DATA_DISCARD) {
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
|
||||||
|
int32_t functionId = pQuery->pExpr1[i].base.functionId;
|
||||||
|
|
||||||
|
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG ||
|
||||||
|
functionId == TSDB_FUNC_TAG_DUMMY) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) {
|
||||||
|
hasFirstLastFunc = true;
|
||||||
|
} else {
|
||||||
|
hasOtherFunc = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hasFirstLastFunc && status == BLK_DATA_NO_NEEDED) {
|
||||||
|
if(!hasOtherFunc) {
|
||||||
|
return BLK_DATA_DISCARD;
|
||||||
|
} else{
|
||||||
|
return BLK_DATA_ALL_NEEDED;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) {
|
static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) {
|
||||||
SQueryAttr* pQueryAttr = &pQInfo->query;
|
SQueryAttr* pQueryAttr = &pQInfo->query;
|
||||||
size_t t = taosArrayGetSize(pQueryAttr->tableGroupInfo.pGroupList);
|
size_t t = taosArrayGetSize(pQueryAttr->tableGroupInfo.pGroupList);
|
||||||
|
@ -2508,7 +2590,9 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
|
||||||
}
|
}
|
||||||
|
|
||||||
SDataBlockInfo* pBlockInfo = &pBlock->info;
|
SDataBlockInfo* pBlockInfo = &pBlock->info;
|
||||||
if ((*status) == BLK_DATA_NO_NEEDED) {
|
*status = updateBlockLoadStatus(pRuntimeEnv->pQueryAttr, *status);
|
||||||
|
|
||||||
|
if ((*status) == BLK_DATA_NO_NEEDED || (*status) == BLK_DATA_DISCARD) {
|
||||||
qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey,
|
qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey,
|
||||||
pBlockInfo->window.ekey, pBlockInfo->rows);
|
pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||||
pCost->discardBlocks += 1;
|
pCost->discardBlocks += 1;
|
||||||
|
@ -2529,6 +2613,21 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
|
||||||
tsdbRetrieveDataBlockStatisInfo(pTableScanInfo->pQueryHandle, &pBlock->pBlockStatis);
|
tsdbRetrieveDataBlockStatisInfo(pTableScanInfo->pQueryHandle, &pBlock->pBlockStatis);
|
||||||
|
|
||||||
if (pQueryAttr->topBotQuery && pBlock->pBlockStatis != NULL) {
|
if (pQueryAttr->topBotQuery && pBlock->pBlockStatis != NULL) {
|
||||||
|
{ // set previous window
|
||||||
|
if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) {
|
||||||
|
SResultRow* pResult = NULL;
|
||||||
|
|
||||||
|
bool masterScan = IS_MASTER_SCAN(pRuntimeEnv);
|
||||||
|
TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey;
|
||||||
|
|
||||||
|
STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQueryAttr);
|
||||||
|
if (setWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, &win, masterScan, &pResult, groupId,
|
||||||
|
pTableScanInfo->pCtx, pTableScanInfo->numOfOutput,
|
||||||
|
pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) {
|
||||||
|
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
bool load = false;
|
bool load = false;
|
||||||
for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) {
|
for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) {
|
||||||
int32_t functionId = pTableScanInfo->pCtx[i].functionId;
|
int32_t functionId = pTableScanInfo->pCtx[i].functionId;
|
||||||
|
@ -4774,6 +4873,111 @@ static SSDataBlock* doLimit(void* param, bool* newgroup) {
|
||||||
return pBlock;
|
return pBlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool doFilterData(SColumnInfoData* p, int32_t rid, SColumnFilterElem *filterElem, __filter_func_t fp) {
|
||||||
|
char* input = p->pData + p->info.bytes * rid;
|
||||||
|
bool isnull = isNull(input, p->info.type);
|
||||||
|
if (isnull) {
|
||||||
|
return (fp == isNullOperator) ? true : false;
|
||||||
|
} else {
|
||||||
|
if (fp == notNullOperator) {
|
||||||
|
return true;
|
||||||
|
} else if (fp == isNullOperator) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fp(filterElem, input, input, p->info.type)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void doHavingImpl(SOperatorInfo *pOperator, SSDataBlock *pBlock) {
|
||||||
|
SHavingOperatorInfo* pInfo = pOperator->info;
|
||||||
|
int32_t f = 0;
|
||||||
|
int32_t allQualified = 1;
|
||||||
|
int32_t exprQualified = 0;
|
||||||
|
|
||||||
|
for (int32_t r = 0; r < pBlock->info.rows; ++r) {
|
||||||
|
allQualified = 1;
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < pOperator->numOfOutput; ++i) {
|
||||||
|
SExprInfo* pExprInfo = &(pOperator->pExpr[i]);
|
||||||
|
if (pExprInfo->base.pFilter == NULL) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
SArray* es = taosArrayGetP(pInfo->fp, i);
|
||||||
|
assert(es);
|
||||||
|
|
||||||
|
size_t fpNum = taosArrayGetSize(es);
|
||||||
|
|
||||||
|
exprQualified = 0;
|
||||||
|
for (int32_t m = 0; m < fpNum; ++m) {
|
||||||
|
__filter_func_t fp = taosArrayGetP(es, m);
|
||||||
|
|
||||||
|
assert(fp);
|
||||||
|
|
||||||
|
//SColIndex* colIdx = &pExprInfo->base.colInfo;
|
||||||
|
SColumnInfoData* p = taosArrayGet(pBlock->pDataBlock, i);
|
||||||
|
|
||||||
|
SColumnFilterElem filterElem = {.filterInfo = pExprInfo->base.pFilter[m]};
|
||||||
|
|
||||||
|
if (doFilterData(p, r, &filterElem, fp)) {
|
||||||
|
exprQualified = 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (exprQualified == 0) {
|
||||||
|
allQualified = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (allQualified == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
|
||||||
|
SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||||
|
|
||||||
|
int16_t bytes = pColInfoData->info.bytes;
|
||||||
|
memmove(pColInfoData->pData + f * bytes, pColInfoData->pData + bytes * r, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
++f;
|
||||||
|
}
|
||||||
|
|
||||||
|
pBlock->info.rows = f;
|
||||||
|
}
|
||||||
|
|
||||||
|
static SSDataBlock* doHaving(void* param, bool* newgroup) {
|
||||||
|
SOperatorInfo *pOperator = (SOperatorInfo *)param;
|
||||||
|
if (pOperator->status == OP_EXEC_DONE) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
SSDataBlock *pBlock = pOperator->upstream->exec(pOperator->upstream, newgroup);
|
||||||
|
if (pBlock == NULL) {
|
||||||
|
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
|
||||||
|
pOperator->status = OP_EXEC_DONE;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
doHavingImpl(pOperator, pBlock);
|
||||||
|
|
||||||
|
return pBlock;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
|
static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
|
||||||
SOperatorInfo* pOperator = (SOperatorInfo*) param;
|
SOperatorInfo* pOperator = (SOperatorInfo*) param;
|
||||||
if (pOperator->status == OP_EXEC_DONE) {
|
if (pOperator->status == OP_EXEC_DONE) {
|
||||||
|
@ -5173,6 +5377,13 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
|
||||||
pInfo->pRes = destroyOutputBuf(pInfo->pRes);
|
pInfo->pRes = destroyOutputBuf(pInfo->pRes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void destroyHavingOperatorInfo(void* param, int32_t numOfOutput) {
|
||||||
|
SHavingOperatorInfo* pInfo = (SHavingOperatorInfo*) param;
|
||||||
|
if (pInfo->fp) {
|
||||||
|
taosArrayDestroy(pInfo->fp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
|
SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
|
||||||
SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo));
|
SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo));
|
||||||
|
|
||||||
|
@ -5229,6 +5440,83 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI
|
||||||
return pOperator;
|
return pOperator;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int32_t initFilterFp(SExprInfo* pExpr, int32_t numOfOutput, SArray** fps) {
|
||||||
|
__filter_func_t fp = NULL;
|
||||||
|
|
||||||
|
*fps = taosArrayInit(numOfOutput, sizeof(SArray*));
|
||||||
|
if (*fps == NULL) {
|
||||||
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < numOfOutput; ++i) {
|
||||||
|
SExprInfo* pExprInfo = &(pExpr[i]);
|
||||||
|
SColIndex* colIdx = &pExprInfo->base.colInfo;
|
||||||
|
|
||||||
|
if (pExprInfo->base.pFilter == NULL || !TSDB_COL_IS_NORMAL_COL(colIdx->flag)) {
|
||||||
|
taosArrayPush(*fps, &fp);
|
||||||
|
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t filterNum = pExprInfo->base.filterNum;
|
||||||
|
SColumnFilterInfo *filterInfo = pExprInfo->base.pFilter;
|
||||||
|
|
||||||
|
SArray* es = taosArrayInit(filterNum, sizeof(__filter_func_t));
|
||||||
|
|
||||||
|
for (int32_t j = 0; j < filterNum; ++j) {
|
||||||
|
int32_t lower = filterInfo->lowerRelOptr;
|
||||||
|
int32_t upper = filterInfo->upperRelOptr;
|
||||||
|
if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) {
|
||||||
|
qError("invalid rel optr");
|
||||||
|
taosArrayDestroy(es);
|
||||||
|
return TSDB_CODE_QRY_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
__filter_func_t ffp = getFilterOperator(lower, upper);
|
||||||
|
if (ffp == NULL) {
|
||||||
|
qError("invalid filter info");
|
||||||
|
taosArrayDestroy(es);
|
||||||
|
return TSDB_CODE_QRY_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayPush(es, &ffp);
|
||||||
|
|
||||||
|
filterInfo += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayPush(*fps, &es);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
|
||||||
|
SHavingOperatorInfo* pInfo = calloc(1, sizeof(SHavingOperatorInfo));
|
||||||
|
|
||||||
|
initFilterFp(pExpr, numOfOutput, &pInfo->fp);
|
||||||
|
|
||||||
|
assert(pInfo->fp);
|
||||||
|
|
||||||
|
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
|
||||||
|
|
||||||
|
pOperator->name = "HavingOperator";
|
||||||
|
pOperator->operatorType = OP_Having;
|
||||||
|
pOperator->blockingOptr = false;
|
||||||
|
pOperator->status = OP_IN_EXECUTING;
|
||||||
|
pOperator->numOfOutput = numOfOutput;
|
||||||
|
pOperator->pExpr = pExpr;
|
||||||
|
pOperator->upstream = upstream;
|
||||||
|
pOperator->exec = doHaving;
|
||||||
|
pOperator->info = pInfo;
|
||||||
|
pOperator->pRuntimeEnv = pRuntimeEnv;
|
||||||
|
pOperator->cleanup = destroyHavingOperatorInfo;
|
||||||
|
|
||||||
|
return pOperator;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream) {
|
SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream) {
|
||||||
SLimitOperatorInfo* pInfo = calloc(1, sizeof(SLimitOperatorInfo));
|
SLimitOperatorInfo* pInfo = calloc(1, sizeof(SLimitOperatorInfo));
|
||||||
pInfo->limit = pRuntimeEnv->pQueryAttr->limit.limit;
|
pInfo->limit = pRuntimeEnv->pQueryAttr->limit.limit;
|
||||||
|
@ -5834,9 +6122,35 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
|
||||||
pExprMsg->functionId = htons(pExprMsg->functionId);
|
pExprMsg->functionId = htons(pExprMsg->functionId);
|
||||||
pExprMsg->numOfParams = htons(pExprMsg->numOfParams);
|
pExprMsg->numOfParams = htons(pExprMsg->numOfParams);
|
||||||
pExprMsg->resColId = htons(pExprMsg->resColId);
|
pExprMsg->resColId = htons(pExprMsg->resColId);
|
||||||
|
pExprMsg->filterNum = htonl(pExprMsg->filterNum);
|
||||||
|
|
||||||
pMsg += sizeof(SSqlExpr);
|
pMsg += sizeof(SSqlExpr);
|
||||||
|
|
||||||
|
SColumnFilterInfo* pExprFilterInfo = pExprMsg->pFilter;
|
||||||
|
|
||||||
|
pMsg += sizeof(SColumnFilterInfo) * pExprMsg->filterNum;
|
||||||
|
|
||||||
|
for (int32_t f = 0; f < pExprMsg->filterNum; ++f) {
|
||||||
|
SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)pExprFilterInfo;
|
||||||
|
|
||||||
|
pFilterMsg->filterstr = htons(pFilterMsg->filterstr);
|
||||||
|
|
||||||
|
if (pFilterMsg->filterstr) {
|
||||||
|
pFilterMsg->len = htobe64(pFilterMsg->len);
|
||||||
|
|
||||||
|
pFilterMsg->pz = (int64_t)pMsg;
|
||||||
|
pMsg += (pFilterMsg->len + 1);
|
||||||
|
} else {
|
||||||
|
pFilterMsg->lowerBndi = htobe64(pFilterMsg->lowerBndi);
|
||||||
|
pFilterMsg->upperBndi = htobe64(pFilterMsg->upperBndi);
|
||||||
|
}
|
||||||
|
|
||||||
|
pFilterMsg->lowerRelOptr = htons(pFilterMsg->lowerRelOptr);
|
||||||
|
pFilterMsg->upperRelOptr = htons(pFilterMsg->upperRelOptr);
|
||||||
|
|
||||||
|
pExprFilterInfo++;
|
||||||
|
}
|
||||||
|
|
||||||
for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) {
|
for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) {
|
||||||
pExprMsg->param[j].nType = htons(pExprMsg->param[j].nType);
|
pExprMsg->param[j].nType = htons(pExprMsg->param[j].nType);
|
||||||
pExprMsg->param[j].nLen = htons(pExprMsg->param[j].nLen);
|
pExprMsg->param[j].nLen = htons(pExprMsg->param[j].nLen);
|
||||||
|
@ -6032,6 +6346,41 @@ _cleanup:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) {
|
||||||
|
if (filterNum <= 0) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
*dst = calloc(filterNum, sizeof(*src));
|
||||||
|
if (*dst == NULL) {
|
||||||
|
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(*dst, src, sizeof(*src) * filterNum);
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < filterNum; i++) {
|
||||||
|
if ((*dst)[i].filterstr && dst[i]->len > 0) {
|
||||||
|
void *pz = calloc(1, (size_t)(*dst)[i].len + 1);
|
||||||
|
|
||||||
|
if (pz == NULL) {
|
||||||
|
if (i == 0) {
|
||||||
|
free(*dst);
|
||||||
|
} else {
|
||||||
|
freeColumnFilterInfo(*dst, i);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(pz, (void *)src->pz, (size_t)src->len + 1);
|
||||||
|
|
||||||
|
(*dst)[i].pz = (int64_t)pz;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t buildArithmeticExprFromMsg(SExprInfo *pExprInfo, void *pQueryMsg) {
|
int32_t buildArithmeticExprFromMsg(SExprInfo *pExprInfo, void *pQueryMsg) {
|
||||||
qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg);
|
qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg);
|
||||||
|
|
||||||
|
@ -6144,6 +6493,14 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp
|
||||||
type = s->type;
|
type = s->type;
|
||||||
bytes = s->bytes;
|
bytes = s->bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pExprs[i].base.filterNum > 0) {
|
||||||
|
int32_t ret = cloneExprFilterInfo(&pExprs[i].base.pFilter, pExprMsg[i]->pFilter,
|
||||||
|
pExprMsg[i]->filterNum);
|
||||||
|
if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t param = (int32_t)pExprs[i].base.param[0].i64;
|
int32_t param = (int32_t)pExprs[i].base.param[0].i64;
|
||||||
|
@ -6384,6 +6741,8 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr
|
||||||
goto _cleanup_qinfo;
|
goto _cleanup_qinfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pQInfo->qId = *qId;
|
||||||
|
|
||||||
// to make sure third party won't overwrite this structure
|
// to make sure third party won't overwrite this structure
|
||||||
pQInfo->signature = pQInfo;
|
pQInfo->signature = pQInfo;
|
||||||
SQueryAttr* pQueryAttr = &pQInfo->query;
|
SQueryAttr* pQueryAttr = &pQInfo->query;
|
||||||
|
@ -6447,6 +6806,10 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr
|
||||||
if (TSDB_COL_IS_TAG(pExprs[col].base.colInfo.flag)) {
|
if (TSDB_COL_IS_TAG(pExprs[col].base.colInfo.flag)) {
|
||||||
pQueryAttr->tagLen += pExprs[col].base.resBytes;
|
pQueryAttr->tagLen += pExprs[col].base.resBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pExprs[col].base.pFilter) {
|
||||||
|
++pQueryAttr->havingNum;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
doUpdateExprColumnIndex(pQueryAttr);
|
doUpdateExprColumnIndex(pQueryAttr);
|
||||||
|
@ -6528,8 +6891,6 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr
|
||||||
// todo refactor
|
// todo refactor
|
||||||
pQInfo->query.queryBlockDist = (numOfOutput == 1 && pExprs[0].base.colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX);
|
pQInfo->query.queryBlockDist = (numOfOutput == 1 && pExprs[0].base.colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX);
|
||||||
|
|
||||||
pQInfo->qId = atomic_add_fetch_64(&queryHandleId, 1);
|
|
||||||
*qId = pQInfo->qId;
|
|
||||||
qDebug("qmsg:%p QInfo:%" PRIu64 "-%p created", pQueryMsg, pQInfo->qId, pQInfo);
|
qDebug("qmsg:%p QInfo:%" PRIu64 "-%p created", pQueryMsg, pQInfo->qId, pQInfo);
|
||||||
return pQInfo;
|
return pQInfo;
|
||||||
|
|
||||||
|
@ -6548,6 +6909,10 @@ _cleanup_qinfo:
|
||||||
tExprTreeDestroy(pExprInfo->pExpr, NULL);
|
tExprTreeDestroy(pExprInfo->pExpr, NULL);
|
||||||
pExprInfo->pExpr = NULL;
|
pExprInfo->pExpr = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pExprInfo->base.pFilter) {
|
||||||
|
freeColumnFilterInfo(pExprInfo->base.pFilter, pExprInfo->base.filterNum);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tfree(pExprs);
|
tfree(pExprs);
|
||||||
|
@ -6635,7 +7000,7 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfFilters; i++) {
|
for (int32_t i = 0; i < numOfFilters; i++) {
|
||||||
if (pFilter[i].filterstr) {
|
if (pFilter[i].filterstr && pFilter[i].pz) {
|
||||||
free((void*)(pFilter[i].pz));
|
free((void*)(pFilter[i].pz));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6677,6 +7042,10 @@ static void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr) {
|
||||||
if (pExprInfo[i].pExpr != NULL) {
|
if (pExprInfo[i].pExpr != NULL) {
|
||||||
tExprTreeDestroy(pExprInfo[i].pExpr, NULL);
|
tExprTreeDestroy(pExprInfo[i].pExpr, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pExprInfo[i].base.pFilter) {
|
||||||
|
freeColumnFilterInfo(pExprInfo[i].base.pFilter, pExprInfo[i].base.filterNum);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tfree(pExprInfo);
|
tfree(pExprInfo);
|
||||||
|
@ -6692,6 +7061,9 @@ void freeQInfo(SQInfo *pQInfo) {
|
||||||
|
|
||||||
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
|
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||||
releaseQueryBuf(pRuntimeEnv->tableqinfoGroupInfo.numOfTables);
|
releaseQueryBuf(pRuntimeEnv->tableqinfoGroupInfo.numOfTables);
|
||||||
|
|
||||||
|
doDestroyTableQueryInfo(&pRuntimeEnv->tableqinfoGroupInfo);
|
||||||
|
|
||||||
teardownQueryRuntimeEnv(&pQInfo->runtimeEnv);
|
teardownQueryRuntimeEnv(&pQInfo->runtimeEnv);
|
||||||
|
|
||||||
SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr;
|
SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr;
|
||||||
|
@ -6729,7 +7101,6 @@ void freeQInfo(SQInfo *pQInfo) {
|
||||||
|
|
||||||
tsdbDestroyTableGroup(&pQueryAttr->tableGroupInfo);
|
tsdbDestroyTableGroup(&pQueryAttr->tableGroupInfo);
|
||||||
|
|
||||||
doDestroyTableQueryInfo(&pRuntimeEnv->tableqinfoGroupInfo);
|
|
||||||
|
|
||||||
tfree(pQInfo->pBuf);
|
tfree(pQInfo->pBuf);
|
||||||
tfree(pQInfo->sql);
|
tfree(pQInfo->sql);
|
||||||
|
@ -6777,6 +7148,7 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
|
||||||
}
|
}
|
||||||
|
|
||||||
fclose(f);
|
fclose(f);
|
||||||
|
*(FILE **)pColInfoData->pData = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// all data returned, set query over
|
// all data returned, set query over
|
||||||
|
|
|
@ -267,6 +267,7 @@ int32_t tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) {
|
||||||
size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file);
|
size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file);
|
||||||
if (retVal <= 0) { // failed to write to buffer, may be not enough space
|
if (retVal <= 0) { // failed to write to buffer, may be not enough space
|
||||||
ret = TAOS_SYSTEM_ERROR(errno);
|
ret = TAOS_SYSTEM_ERROR(errno);
|
||||||
|
pMemBuffer->pHead = first;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -124,7 +124,7 @@ bool greaterEqualOperator(SColumnFilterElem *pFilter, const char *minval, const
|
||||||
bool equalOperator(SColumnFilterElem *pFilter, const char *minval, const char *maxval, int16_t type) {
|
bool equalOperator(SColumnFilterElem *pFilter, const char *minval, const char *maxval, int16_t type) {
|
||||||
SColumnFilterInfo *pFilterInfo = &pFilter->filterInfo;
|
SColumnFilterInfo *pFilterInfo = &pFilter->filterInfo;
|
||||||
|
|
||||||
if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
|
if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||||
int64_t minv = -1, maxv = -1;
|
int64_t minv = -1, maxv = -1;
|
||||||
GET_TYPED_DATA(minv, int64_t, type, minval);
|
GET_TYPED_DATA(minv, int64_t, type, minval);
|
||||||
GET_TYPED_DATA(maxv, int64_t, type, maxval);
|
GET_TYPED_DATA(maxv, int64_t, type, maxval);
|
||||||
|
@ -202,7 +202,7 @@ bool likeOperator(SColumnFilterElem *pFilter, const char *minval, const char *ma
|
||||||
bool notEqualOperator(SColumnFilterElem *pFilter, const char *minval, const char *maxval, int16_t type) {
|
bool notEqualOperator(SColumnFilterElem *pFilter, const char *minval, const char *maxval, int16_t type) {
|
||||||
SColumnFilterInfo *pFilterInfo = &pFilter->filterInfo;
|
SColumnFilterInfo *pFilterInfo = &pFilter->filterInfo;
|
||||||
|
|
||||||
if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
|
if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||||
int64_t minv = -1, maxv = -1;
|
int64_t minv = -1, maxv = -1;
|
||||||
GET_TYPED_DATA(minv, int64_t, type, minval);
|
GET_TYPED_DATA(minv, int64_t, type, minval);
|
||||||
GET_TYPED_DATA(maxv, int64_t, type, maxval);
|
GET_TYPED_DATA(maxv, int64_t, type, maxval);
|
||||||
|
|
|
@ -287,6 +287,10 @@ static void lruListMoveToFront(SList *pList, SPageInfo* pi) {
|
||||||
tdListPrependNode(pList, pi->pn);
|
tdListPrependNode(pList, pi->pn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) {
|
||||||
|
return pageSize + POINTER_BYTES + 2 + sizeof(tFilePage);
|
||||||
|
}
|
||||||
|
|
||||||
tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t* pageId) {
|
tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t* pageId) {
|
||||||
pResultBuf->statis.getPages += 1;
|
pResultBuf->statis.getPages += 1;
|
||||||
|
|
||||||
|
@ -311,7 +315,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32
|
||||||
|
|
||||||
// allocate buf
|
// allocate buf
|
||||||
if (availablePage == NULL) {
|
if (availablePage == NULL) {
|
||||||
pi->pData = calloc(1, pResultBuf->pageSize + POINTER_BYTES + 2); // add extract bytes in case of zipped buffer increased.
|
pi->pData = calloc(1, getAllocPageSize(pResultBuf->pageSize)); // add extract bytes in case of zipped buffer increased.
|
||||||
} else {
|
} else {
|
||||||
pi->pData = availablePage;
|
pi->pData = availablePage;
|
||||||
}
|
}
|
||||||
|
@ -355,7 +359,7 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (availablePage == NULL) {
|
if (availablePage == NULL) {
|
||||||
(*pi)->pData = calloc(1, pResultBuf->pageSize + POINTER_BYTES);
|
(*pi)->pData = calloc(1, getAllocPageSize(pResultBuf->pageSize));
|
||||||
} else {
|
} else {
|
||||||
(*pi)->pData = availablePage;
|
(*pi)->pData = availablePage;
|
||||||
}
|
}
|
||||||
|
|
|
@ -310,6 +310,77 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
|
||||||
return pExpr;
|
return pExpr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static FORCE_INLINE int32_t tStrTokenCompare(SStrToken* left, SStrToken* right) {
|
||||||
|
return (left->type == right->type && left->n == right->n && strncasecmp(left->z, right->z, left->n) == 0) ? 0 : 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) {
|
||||||
|
if ((left == NULL && right) || (left && right == NULL)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (left->type != right->type) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (left->tokenId != right->tokenId) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (left->functionId != right->functionId) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((left->pLeft && right->pLeft == NULL)
|
||||||
|
|| (left->pLeft == NULL && right->pLeft)
|
||||||
|
|| (left->pRight && right->pRight == NULL)
|
||||||
|
|| (left->pRight == NULL && right->pRight)
|
||||||
|
|| (left->pParam && right->pParam == NULL)
|
||||||
|
|| (left->pParam == NULL && right->pParam)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tVariantCompare(&left->value, &right->value)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tStrTokenCompare(&left->colInfo, &right->colInfo)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (right->pParam && left->pParam) {
|
||||||
|
size_t size = taosArrayGetSize(right->pParam);
|
||||||
|
if (left->pParam && taosArrayGetSize(left->pParam) != size) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < size; i++) {
|
||||||
|
tSqlExprItem* pLeftElem = taosArrayGet(left->pParam, i);
|
||||||
|
tSqlExpr* pSubLeft = pLeftElem->pNode;
|
||||||
|
tSqlExprItem* pRightElem = taosArrayGet(left->pParam, i);
|
||||||
|
tSqlExpr* pSubRight = pRightElem->pNode;
|
||||||
|
|
||||||
|
if (tSqlExprCompare(pSubLeft, pSubRight)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (left->pLeft && tSqlExprCompare(left->pLeft, right->pLeft)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (left->pRight && tSqlExprCompare(left->pRight, right->pRight)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) {
|
tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) {
|
||||||
tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr));
|
tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr));
|
||||||
|
|
||||||
|
@ -645,7 +716,7 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
|
||||||
SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere,
|
SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere,
|
||||||
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval,
|
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval,
|
||||||
SSessionWindowVal *pSession, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit,
|
SSessionWindowVal *pSession, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit,
|
||||||
SLimitVal *psLimit) {
|
SLimitVal *psLimit, tSqlExpr *pHaving) {
|
||||||
assert(pSelNodeList != NULL);
|
assert(pSelNodeList != NULL);
|
||||||
|
|
||||||
SSqlNode *pSqlNode = calloc(1, sizeof(SSqlNode));
|
SSqlNode *pSqlNode = calloc(1, sizeof(SSqlNode));
|
||||||
|
@ -660,6 +731,7 @@ SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelat
|
||||||
pSqlNode->pSortOrder = pSortOrder;
|
pSqlNode->pSortOrder = pSortOrder;
|
||||||
pSqlNode->pWhere = pWhere;
|
pSqlNode->pWhere = pWhere;
|
||||||
pSqlNode->fillType = pFill;
|
pSqlNode->fillType = pFill;
|
||||||
|
pSqlNode->pHaving = pHaving;
|
||||||
|
|
||||||
if (pLimit != NULL) {
|
if (pLimit != NULL) {
|
||||||
pSqlNode->limit = *pLimit;
|
pSqlNode->limit = *pLimit;
|
||||||
|
|
|
@ -560,7 +560,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenId) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr, uint32_t numOfIgnoreToken, uint32_t* ignoreTokenTypes) {
|
SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr) {
|
||||||
SStrToken t0 = {0};
|
SStrToken t0 = {0};
|
||||||
|
|
||||||
// here we reach the end of sql string, null-terminated string
|
// here we reach the end of sql string, null-terminated string
|
||||||
|
@ -585,7 +585,10 @@ SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr, uint32_t numOfIgn
|
||||||
}
|
}
|
||||||
|
|
||||||
t0.n = tSQLGetToken(&str[*i], &t0.type);
|
t0.n = tSQLGetToken(&str[*i], &t0.type);
|
||||||
|
break;
|
||||||
|
|
||||||
|
// not support user specfied ignored symbol list
|
||||||
|
#if 0
|
||||||
bool ignore = false;
|
bool ignore = false;
|
||||||
for (uint32_t k = 0; k < numOfIgnoreToken; k++) {
|
for (uint32_t k = 0; k < numOfIgnoreToken; k++) {
|
||||||
if (t0.type == ignoreTokenTypes[k]) {
|
if (t0.type == ignoreTokenTypes[k]) {
|
||||||
|
@ -597,6 +600,7 @@ SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr, uint32_t numOfIgn
|
||||||
if (!ignore) {
|
if (!ignore) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if (t0.type == TK_SEMI) {
|
if (t0.type == TK_SEMI) {
|
||||||
|
|
|
@ -66,8 +66,8 @@ void cleanupResultRowInfo(SResultRowInfo *pResultRowInfo) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pResultRowInfo->type == TSDB_DATA_TYPE_BINARY || pResultRowInfo->type == TSDB_DATA_TYPE_NCHAR) {
|
|
||||||
for(int32_t i = 0; i < pResultRowInfo->size; ++i) {
|
for(int32_t i = 0; i < pResultRowInfo->size; ++i) {
|
||||||
|
if (pResultRowInfo->pResult[i]) {
|
||||||
tfree(pResultRowInfo->pResult[i]->key);
|
tfree(pResultRowInfo->pResult[i]->key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -153,11 +153,8 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16
|
||||||
pResultRow->offset = -1;
|
pResultRow->offset = -1;
|
||||||
pResultRow->closed = false;
|
pResultRow->closed = false;
|
||||||
|
|
||||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
|
||||||
tfree(pResultRow->key);
|
tfree(pResultRow->key);
|
||||||
} else {
|
|
||||||
pResultRow->win = TSWINDOW_INITIALIZER;
|
pResultRow->win = TSWINDOW_INITIALIZER;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO refactor: use macro
|
// TODO refactor: use macro
|
||||||
|
|
|
@ -198,6 +198,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
|
bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
|
||||||
SQInfo *pQInfo = (SQInfo *)qinfo;
|
SQInfo *pQInfo = (SQInfo *)qinfo;
|
||||||
assert(pQInfo && pQInfo->signature == pQInfo);
|
assert(pQInfo && pQInfo->signature == pQInfo);
|
||||||
|
@ -476,7 +477,7 @@ void qCleanupQueryMgmt(void* pQMgmt) {
|
||||||
qDebug("vgId:%d, queryMgmt cleanup completed", vgId);
|
qDebug("vgId:%d, queryMgmt cleanup completed", vgId);
|
||||||
}
|
}
|
||||||
|
|
||||||
void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo) {
|
void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo) {
|
||||||
if (pMgmt == NULL) {
|
if (pMgmt == NULL) {
|
||||||
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
|
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -517,8 +518,7 @@ void** qAcquireQInfo(void* pMgmt, uint64_t _key) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key;
|
void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &_key, sizeof(_key));
|
||||||
void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE));
|
|
||||||
if (handle == NULL || *handle == NULL) {
|
if (handle == NULL || *handle == NULL) {
|
||||||
terrno = TSDB_CODE_QRY_INVALID_QHANDLE;
|
terrno = TSDB_CODE_QRY_INVALID_QHANDLE;
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -2594,7 +2594,7 @@ static void yy_reduce(
|
||||||
break;
|
break;
|
||||||
case 156: /* select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */
|
case 156: /* select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */
|
||||||
{
|
{
|
||||||
yylhsminor.yy116 = tSetQuerySqlNode(&yymsp[-12].minor.yy0, yymsp[-11].minor.yy159, yymsp[-10].minor.yy236, yymsp[-9].minor.yy118, yymsp[-4].minor.yy159, yymsp[-3].minor.yy159, &yymsp[-8].minor.yy184, &yymsp[-7].minor.yy249, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy159, &yymsp[0].minor.yy440, &yymsp[-1].minor.yy440);
|
yylhsminor.yy116 = tSetQuerySqlNode(&yymsp[-12].minor.yy0, yymsp[-11].minor.yy159, yymsp[-10].minor.yy236, yymsp[-9].minor.yy118, yymsp[-4].minor.yy159, yymsp[-3].minor.yy159, &yymsp[-8].minor.yy184, &yymsp[-7].minor.yy249, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy159, &yymsp[0].minor.yy440, &yymsp[-1].minor.yy440, yymsp[-2].minor.yy118);
|
||||||
}
|
}
|
||||||
yymsp[-12].minor.yy116 = yylhsminor.yy116;
|
yymsp[-12].minor.yy116 = yylhsminor.yy116;
|
||||||
break;
|
break;
|
||||||
|
@ -2614,7 +2614,7 @@ static void yy_reduce(
|
||||||
break;
|
break;
|
||||||
case 161: /* select ::= SELECT selcollist */
|
case 161: /* select ::= SELECT selcollist */
|
||||||
{
|
{
|
||||||
yylhsminor.yy116 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy159, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
|
yylhsminor.yy116 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy159, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
|
||||||
}
|
}
|
||||||
yymsp[-1].minor.yy116 = yylhsminor.yy116;
|
yymsp[-1].minor.yy116 = yylhsminor.yy116;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -10,7 +10,7 @@ namespace {
|
||||||
// simple test
|
// simple test
|
||||||
void simpleTest() {
|
void simpleTest() {
|
||||||
SDiskbasedResultBuf* pResultBuf = NULL;
|
SDiskbasedResultBuf* pResultBuf = NULL;
|
||||||
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4096, NULL);
|
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4096, 1);
|
||||||
|
|
||||||
int32_t pageId = 0;
|
int32_t pageId = 0;
|
||||||
int32_t groupId = 0;
|
int32_t groupId = 0;
|
||||||
|
@ -52,7 +52,7 @@ void simpleTest() {
|
||||||
|
|
||||||
void writeDownTest() {
|
void writeDownTest() {
|
||||||
SDiskbasedResultBuf* pResultBuf = NULL;
|
SDiskbasedResultBuf* pResultBuf = NULL;
|
||||||
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4*1024, NULL);
|
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4*1024, 1);
|
||||||
|
|
||||||
int32_t pageId = 0;
|
int32_t pageId = 0;
|
||||||
int32_t writePageId = 0;
|
int32_t writePageId = 0;
|
||||||
|
@ -99,7 +99,7 @@ void writeDownTest() {
|
||||||
|
|
||||||
void recyclePageTest() {
|
void recyclePageTest() {
|
||||||
SDiskbasedResultBuf* pResultBuf = NULL;
|
SDiskbasedResultBuf* pResultBuf = NULL;
|
||||||
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4*1024, NULL);
|
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4*1024, 1);
|
||||||
|
|
||||||
int32_t pageId = 0;
|
int32_t pageId = 0;
|
||||||
int32_t writePageId = 0;
|
int32_t writePageId = 0;
|
||||||
|
|
|
@ -295,7 +295,7 @@ void *rpcOpen(const SRpcInit *pInit) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime);
|
pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 30);
|
||||||
if ( pRpc->pCache == NULL ) {
|
if ( pRpc->pCache == NULL ) {
|
||||||
tError("%s failed to init connection cache", pRpc->label);
|
tError("%s failed to init connection cache", pRpc->label);
|
||||||
rpcClose(pRpc);
|
rpcClose(pRpc);
|
||||||
|
@ -470,7 +470,7 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
|
||||||
taosTmrStopA(&pConn->pTimer);
|
taosTmrStopA(&pConn->pTimer);
|
||||||
|
|
||||||
// set the idle timer to monitor the activity
|
// set the idle timer to monitor the activity
|
||||||
taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
|
taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime * 30, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
|
||||||
rpcSendMsgToPeer(pConn, msg, msgLen);
|
rpcSendMsgToPeer(pConn, msg, msgLen);
|
||||||
|
|
||||||
// if not set to secured, set it expcet NOT_READY case, since client wont treat it as secured
|
// if not set to secured, set it expcet NOT_READY case, since client wont treat it as secured
|
||||||
|
@ -997,8 +997,8 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( rpcIsReq(pHead->msgType) ) {
|
if ( rpcIsReq(pHead->msgType) ) {
|
||||||
terrno = rpcProcessReqHead(pConn, pHead);
|
|
||||||
pConn->connType = pRecv->connType;
|
pConn->connType = pRecv->connType;
|
||||||
|
terrno = rpcProcessReqHead(pConn, pHead);
|
||||||
|
|
||||||
// stop idle timer
|
// stop idle timer
|
||||||
taosTmrStopA(&pConn->pIdleTimer);
|
taosTmrStopA(&pConn->pIdleTimer);
|
||||||
|
@ -1367,7 +1367,7 @@ static void rpcProcessConnError(void *param, void *id) {
|
||||||
|
|
||||||
tDebug("%s %p, connection error happens", pRpc->label, pContext->ahandle);
|
tDebug("%s %p, connection error happens", pRpc->label, pContext->ahandle);
|
||||||
|
|
||||||
if (pContext->numOfTry >= pContext->epSet.numOfEps) {
|
if (pContext->numOfTry >= pContext->epSet.numOfEps || pContext->msgType == TSDB_MSG_TYPE_FETCH) {
|
||||||
rpcMsg.msgType = pContext->msgType+1;
|
rpcMsg.msgType = pContext->msgType+1;
|
||||||
rpcMsg.ahandle = pContext->ahandle;
|
rpcMsg.ahandle = pContext->ahandle;
|
||||||
rpcMsg.code = pContext->code;
|
rpcMsg.code = pContext->code;
|
||||||
|
|
|
@ -35,7 +35,7 @@ extern "C" {
|
||||||
#define SYNC_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + sizeof(SSyncHead) + 16)
|
#define SYNC_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + sizeof(SSyncHead) + 16)
|
||||||
#define SYNC_RECV_BUFFER_SIZE (5*1024*1024)
|
#define SYNC_RECV_BUFFER_SIZE (5*1024*1024)
|
||||||
|
|
||||||
#define SYNC_MAX_FWDS 512
|
#define SYNC_MAX_FWDS 4096
|
||||||
#define SYNC_FWD_TIMER 300
|
#define SYNC_FWD_TIMER 300
|
||||||
#define SYNC_ROLE_TIMER 15000 // ms
|
#define SYNC_ROLE_TIMER 15000 // ms
|
||||||
#define SYNC_CHECK_INTERVAL 1000 // ms
|
#define SYNC_CHECK_INTERVAL 1000 // ms
|
||||||
|
|
|
@ -409,23 +409,22 @@ void syncConfirmForward(int64_t rid, uint64_t version, int32_t code, bool force)
|
||||||
syncReleaseNode(pNode);
|
syncReleaseNode(pNode);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 1
|
|
||||||
void syncRecover(int64_t rid) {
|
void syncRecover(int64_t rid) {
|
||||||
SSyncPeer *pPeer;
|
SSyncPeer *pPeer;
|
||||||
|
|
||||||
SSyncNode *pNode = syncAcquireNode(rid);
|
SSyncNode *pNode = syncAcquireNode(rid);
|
||||||
if (pNode == NULL) return;
|
if (pNode == NULL) return;
|
||||||
|
|
||||||
// to do: add a few lines to check if recover is OK
|
|
||||||
// if take this node to unsync state, the whole system may not work
|
|
||||||
|
|
||||||
nodeRole = TAOS_SYNC_ROLE_UNSYNCED;
|
nodeRole = TAOS_SYNC_ROLE_UNSYNCED;
|
||||||
(*pNode->notifyRoleFp)(pNode->vgId, nodeRole);
|
(*pNode->notifyRoleFp)(pNode->vgId, nodeRole);
|
||||||
nodeVersion = 0;
|
|
||||||
|
|
||||||
pthread_mutex_lock(&pNode->mutex);
|
pthread_mutex_lock(&pNode->mutex);
|
||||||
|
|
||||||
|
nodeVersion = 0;
|
||||||
|
|
||||||
for (int32_t i = 0; i < pNode->replica; ++i) {
|
for (int32_t i = 0; i < pNode->replica; ++i) {
|
||||||
|
if (i == pNode->selfIndex) continue;
|
||||||
|
|
||||||
pPeer = pNode->peerInfo[i];
|
pPeer = pNode->peerInfo[i];
|
||||||
if (pPeer->peerFd >= 0) {
|
if (pPeer->peerFd >= 0) {
|
||||||
syncRestartConnection(pPeer);
|
syncRestartConnection(pPeer);
|
||||||
|
@ -436,7 +435,6 @@ void syncRecover(int64_t rid) {
|
||||||
|
|
||||||
syncReleaseNode(pNode);
|
syncReleaseNode(pNode);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
int32_t syncGetNodesRole(int64_t rid, SNodesRole *pNodesRole) {
|
int32_t syncGetNodesRole(int64_t rid, SNodesRole *pNodesRole) {
|
||||||
SSyncNode *pNode = syncAcquireNode(rid);
|
SSyncNode *pNode = syncAcquireNode(rid);
|
||||||
|
@ -551,7 +549,10 @@ static void syncClosePeerConn(SSyncPeer *pPeer) {
|
||||||
if (pPeer->peerFd >= 0) {
|
if (pPeer->peerFd >= 0) {
|
||||||
pPeer->peerFd = -1;
|
pPeer->peerFd = -1;
|
||||||
void *pConn = pPeer->pConn;
|
void *pConn = pPeer->pConn;
|
||||||
if (pConn != NULL) syncFreeTcpConn(pPeer->pConn);
|
if (pConn != NULL) {
|
||||||
|
syncFreeTcpConn(pPeer->pConn);
|
||||||
|
pPeer->pConn = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -997,17 +998,24 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) {
|
||||||
|
|
||||||
sTrace("%s, forward is received, hver:%" PRIu64 ", len:%d", pPeer->id, pHead->version, pHead->len);
|
sTrace("%s, forward is received, hver:%" PRIu64 ", len:%d", pPeer->id, pHead->version, pHead->len);
|
||||||
|
|
||||||
|
int32_t code = 0;
|
||||||
if (nodeRole == TAOS_SYNC_ROLE_SLAVE) {
|
if (nodeRole == TAOS_SYNC_ROLE_SLAVE) {
|
||||||
// nodeVersion = pHead->version;
|
// nodeVersion = pHead->version;
|
||||||
(*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL);
|
code = (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL);
|
||||||
} else {
|
} else {
|
||||||
if (nodeSStatus != TAOS_SYNC_STATUS_INIT) {
|
if (nodeSStatus != TAOS_SYNC_STATUS_INIT) {
|
||||||
syncSaveIntoBuffer(pPeer, pHead);
|
code = syncSaveIntoBuffer(pPeer, pHead);
|
||||||
} else {
|
} else {
|
||||||
sError("%s, forward discarded since sstatus:%s, hver:%" PRIu64, pPeer->id, syncStatus[nodeSStatus],
|
sError("%s, forward discarded since sstatus:%s, hver:%" PRIu64, pPeer->id, syncStatus[nodeSStatus],
|
||||||
pHead->version);
|
pHead->version);
|
||||||
|
code = -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (code != 0) {
|
||||||
|
sError("%s, failed to process fwd msg, hver:%" PRIu64 ", len:%d", pPeer->id, pHead->version, pHead->len);
|
||||||
|
syncRestartConnection(pPeer);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void syncProcessPeersStatusMsg(SPeersStatus *pPeersStatus, SSyncPeer *pPeer) {
|
static void syncProcessPeersStatusMsg(SPeersStatus *pPeersStatus, SSyncPeer *pPeer) {
|
||||||
|
@ -1372,7 +1380,7 @@ static void syncMonitorNodeRole(void *param, void *tmrId) {
|
||||||
if (/*pPeer->role > TAOS_SYNC_ROLE_UNSYNCED && */ nodeRole > TAOS_SYNC_ROLE_UNSYNCED) continue;
|
if (/*pPeer->role > TAOS_SYNC_ROLE_UNSYNCED && */ nodeRole > TAOS_SYNC_ROLE_UNSYNCED) continue;
|
||||||
if (/*pPeer->sstatus > TAOS_SYNC_STATUS_INIT || */ nodeSStatus > TAOS_SYNC_STATUS_INIT) continue;
|
if (/*pPeer->sstatus > TAOS_SYNC_STATUS_INIT || */ nodeSStatus > TAOS_SYNC_STATUS_INIT) continue;
|
||||||
|
|
||||||
sDebug("%s, check roles since self:%s sstatus:%s, peer:%s sstatus:%s", pPeer->id, syncRole[pPeer->role],
|
sDebug("%s, check roles since peer:%s sstatus:%s, self:%s sstatus:%s", pPeer->id, syncRole[pPeer->role],
|
||||||
syncStatus[pPeer->sstatus], syncRole[nodeRole], syncStatus[nodeSStatus]);
|
syncStatus[pPeer->sstatus], syncRole[nodeRole], syncStatus[nodeSStatus]);
|
||||||
syncSendPeersStatusMsgToPeer(pPeer, 1, SYNC_STATUS_CHECK_ROLE, syncGenTranId());
|
syncSendPeersStatusMsgToPeer(pPeer, 1, SYNC_STATUS_CHECK_ROLE, syncGenTranId());
|
||||||
break;
|
break;
|
||||||
|
@ -1459,7 +1467,12 @@ static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle
|
||||||
|
|
||||||
if ((pNode->quorum > 1 || force) && code == 0) {
|
if ((pNode->quorum > 1 || force) && code == 0) {
|
||||||
code = syncSaveFwdInfo(pNode, pWalHead->version, mhandle);
|
code = syncSaveFwdInfo(pNode, pWalHead->version, mhandle);
|
||||||
if (code >= 0) code = 1;
|
if (code >= 0) {
|
||||||
|
code = 1;
|
||||||
|
} else {
|
||||||
|
pthread_mutex_unlock(&pNode->mutex);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t retLen = taosWriteMsg(pPeer->peerFd, pSyncHead, fwdLen);
|
int32_t retLen = taosWriteMsg(pPeer->peerFd, pSyncHead, fwdLen);
|
||||||
|
|
|
@ -526,7 +526,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) {
|
||||||
STable *pTable = pMeta->tables[i];
|
STable *pTable = pMeta->tables[i];
|
||||||
if (pTable && pTable->type == TSDB_STREAM_TABLE) {
|
if (pTable && pTable->type == TSDB_STREAM_TABLE) {
|
||||||
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql,
|
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql,
|
||||||
tsdbGetTableSchemaImpl(pTable, false, false, -1));
|
tsdbGetTableSchemaImpl(pTable, false, false, -1), 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -840,7 +840,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
|
||||||
if (lock && tsdbUnlockRepoMeta(pRepo) < 0) return -1;
|
if (lock && tsdbUnlockRepoMeta(pRepo) < 0) return -1;
|
||||||
if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) {
|
if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) {
|
||||||
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql,
|
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql,
|
||||||
tsdbGetTableSchemaImpl(pTable, false, false, -1));
|
tsdbGetTableSchemaImpl(pTable, false, false, -1), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||||
|
|
|
@ -2861,12 +2861,6 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta
|
||||||
if (pHandle->statis[i].numOfNull == -1) { // set the column data are all NULL
|
if (pHandle->statis[i].numOfNull == -1) { // set the column data are all NULL
|
||||||
pHandle->statis[i].numOfNull = pBlockInfo->compBlock->numOfRows;
|
pHandle->statis[i].numOfNull = pBlockInfo->compBlock->numOfRows;
|
||||||
}
|
}
|
||||||
|
|
||||||
SColumnInfo* pColInfo = taosArrayGet(pHandle->pColumns, i);
|
|
||||||
if (pColInfo->type == TSDB_DATA_TYPE_TIMESTAMP) {
|
|
||||||
pHandle->statis[i].min = pBlockInfo->compBlock->keyFirst;
|
|
||||||
pHandle->statis[i].max = pBlockInfo->compBlock->keyLast;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t elapsed = taosGetTimestampUs() - stime;
|
int64_t elapsed = taosGetTimestampUs() - stime;
|
||||||
|
|
|
@ -51,11 +51,9 @@ uint32_t tSQLGetToken(char *z, uint32_t *tokenType);
|
||||||
* @param str
|
* @param str
|
||||||
* @param i
|
* @param i
|
||||||
* @param isPrevOptr
|
* @param isPrevOptr
|
||||||
* @param numOfIgnoreToken
|
|
||||||
* @param ignoreTokenTypes
|
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
SStrToken tStrGetToken(char *str, int32_t *i, bool isPrevOptr, uint32_t numOfIgnoreToken, uint32_t *ignoreTokenTypes);
|
SStrToken tStrGetToken(char *str, int32_t *i, bool isPrevOptr);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* check if it is a keyword or not
|
* check if it is a keyword or not
|
||||||
|
|
|
@ -175,6 +175,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_ID, "Table name too long")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_NAME, "Table does not exist")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_NAME, "Table does not exist")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_TYPE, "Invalid table type in tsdb")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_TYPE, "Invalid table type in tsdb")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TAGS, "Too many tags")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TAGS, "Too many tags")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_COLUMNS, "Too many columns")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TIMESERIES, "Too many time series")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TIMESERIES, "Too many time series")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_SUPER_TABLE, "Not super table") // operation only available for super table
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_SUPER_TABLE, "Not super table") // operation only available for super table
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_COL_NAME_TOO_LONG, "Tag name too long")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_COL_NAME_TOO_LONG, "Tag name too long")
|
||||||
|
|
|
@ -37,6 +37,7 @@ extern int32_t vDebugFlag;
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t vgId; // global vnode group ID
|
int32_t vgId; // global vnode group ID
|
||||||
int32_t refCount; // reference count
|
int32_t refCount; // reference count
|
||||||
|
int64_t queuedWMsgSize;
|
||||||
int32_t queuedWMsg;
|
int32_t queuedWMsg;
|
||||||
int32_t queuedRMsg;
|
int32_t queuedRMsg;
|
||||||
int32_t flowctrlLevel;
|
int32_t flowctrlLevel;
|
||||||
|
|
|
@ -99,8 +99,13 @@ int32_t vnodeSync(int32_t vgId) {
|
||||||
return TSDB_CODE_VND_INVALID_VGROUP_ID;
|
return TSDB_CODE_VND_INVALID_VGROUP_ID;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pVnode->role != TAOS_SYNC_ROLE_MASTER) {
|
if (pVnode->role == TAOS_SYNC_ROLE_SLAVE) {
|
||||||
vInfo("vgId:%d, vnode will sync, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
|
vInfo("vgId:%d, vnode will sync, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
|
||||||
|
|
||||||
|
pVnode->version = 0;
|
||||||
|
pVnode->fversion = 0;
|
||||||
|
walResetVersion(pVnode->wal, pVnode->fversion);
|
||||||
|
|
||||||
syncRecover(pVnode->sync);
|
syncRecover(pVnode->sync);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,9 +232,28 @@ int32_t vnodeAlter(void *vparam, SCreateVnodeMsg *pVnodeCfg) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vnodeFindWalRootDir(int32_t vgId, char *walRootDir) {
|
||||||
|
char vnodeDir[TSDB_FILENAME_LEN] = "\0";
|
||||||
|
snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d/wal", vgId);
|
||||||
|
|
||||||
|
TDIR *tdir = tfsOpendir(vnodeDir);
|
||||||
|
if (!tdir) return;
|
||||||
|
|
||||||
|
const TFILE *tfile = tfsReaddir(tdir);
|
||||||
|
if (!tfile) {
|
||||||
|
tfsClosedir(tdir);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(tfile->level, tfile->id), vgId);
|
||||||
|
|
||||||
|
tfsClosedir(tdir);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t vnodeOpen(int32_t vgId) {
|
int32_t vnodeOpen(int32_t vgId) {
|
||||||
char temp[TSDB_FILENAME_LEN * 3];
|
char temp[TSDB_FILENAME_LEN * 3];
|
||||||
char rootDir[TSDB_FILENAME_LEN * 2];
|
char rootDir[TSDB_FILENAME_LEN * 2];
|
||||||
|
char walRootDir[TSDB_FILENAME_LEN * 2] = {0};
|
||||||
snprintf(rootDir, TSDB_FILENAME_LEN * 2, "%s/vnode%d", tsVnodeDir, vgId);
|
snprintf(rootDir, TSDB_FILENAME_LEN * 2, "%s/vnode%d", tsVnodeDir, vgId);
|
||||||
|
|
||||||
SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1);
|
SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1);
|
||||||
|
@ -316,7 +340,21 @@ int32_t vnodeOpen(int32_t vgId) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sprintf(temp, "%s/wal", rootDir);
|
// walRootDir for wal & syncInfo.path (not empty dir of /vnode/vnode{pVnode->vgId}/wal)
|
||||||
|
vnodeFindWalRootDir(pVnode->vgId, walRootDir);
|
||||||
|
if (walRootDir[0] == 0) {
|
||||||
|
int level = -1, id = -1;
|
||||||
|
|
||||||
|
tfsAllocDisk(TFS_PRIMARY_LEVEL, &level, &id);
|
||||||
|
if (level < 0 || id < 0) {
|
||||||
|
vnodeCleanUp(pVnode);
|
||||||
|
return terrno;
|
||||||
|
}
|
||||||
|
|
||||||
|
sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(level, id), vgId);
|
||||||
|
}
|
||||||
|
|
||||||
|
sprintf(temp, "%s/wal", walRootDir);
|
||||||
pVnode->walCfg.vgId = pVnode->vgId;
|
pVnode->walCfg.vgId = pVnode->vgId;
|
||||||
pVnode->wal = walOpen(temp, &pVnode->walCfg);
|
pVnode->wal = walOpen(temp, &pVnode->walCfg);
|
||||||
if (pVnode->wal == NULL) {
|
if (pVnode->wal == NULL) {
|
||||||
|
@ -348,7 +386,7 @@ int32_t vnodeOpen(int32_t vgId) {
|
||||||
|
|
||||||
pVnode->events = NULL;
|
pVnode->events = NULL;
|
||||||
|
|
||||||
vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode);
|
vDebug("vgId:%d, vnode is opened in %s - %s, pVnode:%p", pVnode->vgId, rootDir, walRootDir, pVnode);
|
||||||
|
|
||||||
vnodeAddIntoHash(pVnode);
|
vnodeAddIntoHash(pVnode);
|
||||||
|
|
||||||
|
@ -356,7 +394,7 @@ int32_t vnodeOpen(int32_t vgId) {
|
||||||
syncInfo.vgId = pVnode->vgId;
|
syncInfo.vgId = pVnode->vgId;
|
||||||
syncInfo.version = pVnode->version;
|
syncInfo.version = pVnode->version;
|
||||||
syncInfo.syncCfg = pVnode->syncCfg;
|
syncInfo.syncCfg = pVnode->syncCfg;
|
||||||
tstrncpy(syncInfo.path, rootDir, TSDB_FILENAME_LEN);
|
tstrncpy(syncInfo.path, walRootDir, TSDB_FILENAME_LEN);
|
||||||
syncInfo.getWalInfoFp = vnodeGetWalInfo;
|
syncInfo.getWalInfoFp = vnodeGetWalInfo;
|
||||||
syncInfo.writeToCacheFp = vnodeWriteToCache;
|
syncInfo.writeToCacheFp = vnodeWriteToCache;
|
||||||
syncInfo.confirmForward = vnodeConfirmForard;
|
syncInfo.confirmForward = vnodeConfirmForard;
|
||||||
|
|
|
@ -208,6 +208,7 @@ static void vnodeBuildNoResultQueryRsp(SRspRet *pRet) {
|
||||||
pRsp->completed = true;
|
pRsp->completed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
|
static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
|
||||||
void * pCont = pRead->pCont;
|
void * pCont = pRead->pCont;
|
||||||
int32_t contLen = pRead->contLen;
|
int32_t contLen = pRead->contLen;
|
||||||
|
@ -226,7 +227,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
|
||||||
|
|
||||||
if (contLen != 0) {
|
if (contLen != 0) {
|
||||||
qinfo_t pQInfo = NULL;
|
qinfo_t pQInfo = NULL;
|
||||||
uint64_t qId = 0;
|
uint64_t qId = genQueryId();
|
||||||
code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, &pQInfo, &qId);
|
code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, &pQInfo, &qId);
|
||||||
|
|
||||||
SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp));
|
SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp));
|
||||||
|
@ -239,7 +240,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
|
||||||
|
|
||||||
// current connect is broken
|
// current connect is broken
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
handle = qRegisterQInfo(pVnode->qMgmt, qId, (uint64_t)pQInfo);
|
handle = qRegisterQInfo(pVnode->qMgmt, qId, pQInfo);
|
||||||
if (handle == NULL) { // failed to register qhandle
|
if (handle == NULL) { // failed to register qhandle
|
||||||
pRsp->code = terrno;
|
pRsp->code = terrno;
|
||||||
terrno = 0;
|
terrno = 0;
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "vnodeStatus.h"
|
#include "vnodeStatus.h"
|
||||||
|
|
||||||
#define MAX_QUEUED_MSG_NUM 100000
|
#define MAX_QUEUED_MSG_NUM 100000
|
||||||
|
#define MAX_QUEUED_MSG_SIZE 1024*1024*1024 //1GB
|
||||||
|
|
||||||
extern void * tsDnodeTmr;
|
extern void * tsDnodeTmr;
|
||||||
static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *pCont, SRspRet *);
|
static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *pCont, SRspRet *);
|
||||||
|
@ -91,13 +92,17 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara
|
||||||
int32_t syncCode = 0;
|
int32_t syncCode = 0;
|
||||||
bool force = (pWrite == NULL ? false : pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT);
|
bool force = (pWrite == NULL ? false : pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT);
|
||||||
syncCode = syncForwardToPeer(pVnode->sync, pHead, pWrite, qtype, force);
|
syncCode = syncForwardToPeer(pVnode->sync, pHead, pWrite, qtype, force);
|
||||||
if (syncCode < 0) return syncCode;
|
if (syncCode < 0) {
|
||||||
|
pHead->version = 0;
|
||||||
|
return syncCode;
|
||||||
|
}
|
||||||
|
|
||||||
// write into WAL
|
// write into WAL
|
||||||
code = walWrite(pVnode->wal, pHead);
|
code = walWrite(pVnode->wal, pHead);
|
||||||
if (code < 0) {
|
if (code < 0) {
|
||||||
if (syncCode > 0) atomic_sub_fetch_32(&pWrite->processedCount, 1);
|
if (syncCode > 0) atomic_sub_fetch_32(&pWrite->processedCount, 1);
|
||||||
vError("vgId:%d, hver:%" PRIu64 " vver:%" PRIu64 " code:0x%x", pVnode->vgId, pHead->version, pVnode->version, code);
|
vError("vgId:%d, hver:%" PRIu64 " vver:%" PRIu64 " code:0x%x", pVnode->vgId, pHead->version, pVnode->version, code);
|
||||||
|
pHead->version = 0;
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -265,6 +270,13 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (tsAvailDataDirGB <= tsMinimalDataDirGB) {
|
||||||
|
vError("vgId:%d, failed to write into vwqueue since no diskspace, avail:%fGB", pVnode->vgId, tsAvailDataDirGB);
|
||||||
|
taosFreeQitem(pWrite);
|
||||||
|
vnodeRelease(pVnode);
|
||||||
|
return TSDB_CODE_VND_NO_DISKSPACE;
|
||||||
|
}
|
||||||
|
|
||||||
if (!vnodeInReadyOrUpdatingStatus(pVnode)) {
|
if (!vnodeInReadyOrUpdatingStatus(pVnode)) {
|
||||||
vError("vgId:%d, failed to write into vwqueue, vstatus is %s, refCount:%d pVnode:%p", pVnode->vgId,
|
vError("vgId:%d, failed to write into vwqueue, vstatus is %s, refCount:%d pVnode:%p", pVnode->vgId,
|
||||||
vnodeStatus[pVnode->status], pVnode->refCount, pVnode);
|
vnodeStatus[pVnode->status], pVnode->refCount, pVnode);
|
||||||
|
@ -274,14 +286,17 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t queued = atomic_add_fetch_32(&pVnode->queuedWMsg, 1);
|
int32_t queued = atomic_add_fetch_32(&pVnode->queuedWMsg, 1);
|
||||||
if (queued > MAX_QUEUED_MSG_NUM) {
|
int64_t queuedSize = atomic_add_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len);
|
||||||
|
|
||||||
|
if (queued > MAX_QUEUED_MSG_NUM || queuedSize > MAX_QUEUED_MSG_SIZE) {
|
||||||
int32_t ms = (queued / MAX_QUEUED_MSG_NUM) * 10 + 3;
|
int32_t ms = (queued / MAX_QUEUED_MSG_NUM) * 10 + 3;
|
||||||
if (ms > 100) ms = 100;
|
if (ms > 100) ms = 100;
|
||||||
vDebug("vgId:%d, too many msg:%d in vwqueue, flow control %dms", pVnode->vgId, queued, ms);
|
vDebug("vgId:%d, too many msg:%d in vwqueue, flow control %dms", pVnode->vgId, queued, ms);
|
||||||
taosMsleep(ms);
|
taosMsleep(ms);
|
||||||
}
|
}
|
||||||
|
|
||||||
vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedWMsg);
|
vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d size:%" PRId64, pVnode->vgId, pVnode->refCount,
|
||||||
|
pVnode->queuedWMsg, pVnode->queuedWMsgSize);
|
||||||
|
|
||||||
taosWriteQitem(pVnode->wqueue, pWrite->qtype, pWrite);
|
taosWriteQitem(pVnode->wqueue, pWrite->qtype, pWrite);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -304,7 +319,10 @@ void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) {
|
||||||
SVnodeObj *pVnode = vparam;
|
SVnodeObj *pVnode = vparam;
|
||||||
|
|
||||||
int32_t queued = atomic_sub_fetch_32(&pVnode->queuedWMsg, 1);
|
int32_t queued = atomic_sub_fetch_32(&pVnode->queuedWMsg, 1);
|
||||||
vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d", pVnode->vgId, pWrite, pWrite->rpcMsg.ahandle, queued);
|
int64_t queuedSize = atomic_sub_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len);
|
||||||
|
|
||||||
|
vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d size:%" PRId64, pVnode->vgId, pWrite,
|
||||||
|
pWrite->rpcMsg.ahandle, queued, queuedSize);
|
||||||
|
|
||||||
taosFreeQitem(pWrite);
|
taosFreeQitem(pWrite);
|
||||||
vnodeRelease(pVnode);
|
vnodeRelease(pVnode);
|
||||||
|
@ -340,7 +358,9 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
|
||||||
static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) {
|
static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) {
|
||||||
SVnodeObj *pVnode = pWrite->pVnode;
|
SVnodeObj *pVnode = pWrite->pVnode;
|
||||||
if (pWrite->qtype != TAOS_QTYPE_RPC) return 0;
|
if (pWrite->qtype != TAOS_QTYPE_RPC) return 0;
|
||||||
if (pVnode->queuedWMsg < MAX_QUEUED_MSG_NUM && pVnode->flowctrlLevel <= 0) return 0;
|
if (pVnode->queuedWMsg < MAX_QUEUED_MSG_NUM && pVnode->queuedWMsgSize < MAX_QUEUED_MSG_SIZE &&
|
||||||
|
pVnode->flowctrlLevel <= 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (tsEnableFlowCtrl == 0) {
|
if (tsEnableFlowCtrl == 0) {
|
||||||
int32_t ms = (int32_t)pow(2, pVnode->flowctrlLevel + 2);
|
int32_t ms = (int32_t)pow(2, pVnode->flowctrlLevel + 2);
|
||||||
|
|
|
@ -104,7 +104,7 @@ int32_t walAlter(void *handle, SWalCfg *pCfg) {
|
||||||
|
|
||||||
pWal->level = pCfg->walLevel;
|
pWal->level = pCfg->walLevel;
|
||||||
pWal->fsyncPeriod = pCfg->fsyncPeriod;
|
pWal->fsyncPeriod = pCfg->fsyncPeriod;
|
||||||
pWal->fsyncSeq = pCfg->fsyncPeriod % 1000;
|
pWal->fsyncSeq = pCfg->fsyncPeriod / 1000;
|
||||||
if (pWal->fsyncSeq <= 0) pWal->fsyncSeq = 1;
|
if (pWal->fsyncSeq <= 0) pWal->fsyncSeq = 1;
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
|
@ -230,8 +230,8 @@ pipeline {
|
||||||
post {
|
post {
|
||||||
success {
|
success {
|
||||||
emailext (
|
emailext (
|
||||||
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
|
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS",
|
||||||
body: '''<!DOCTYPE html>
|
body: """<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<head>
|
<head>
|
||||||
<meta charset="UTF-8">
|
<meta charset="UTF-8">
|
||||||
|
@ -247,29 +247,29 @@ pipeline {
|
||||||
<td>
|
<td>
|
||||||
<ul>
|
<ul>
|
||||||
<div style="font-size:18px">
|
<div style="font-size:18px">
|
||||||
<li>构建名称>>分支:${PROJECT_NAME}</li>
|
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
|
||||||
<li>构建结果:<span style="color:green"> Successful </span></li>
|
<li>构建结果:<span style="color:green"> Successful </span></li>
|
||||||
<li>构建编号:${BUILD_NUMBER}</li>
|
<li>构建编号:${BUILD_NUMBER}</li>
|
||||||
<li>触发用户:${CAUSE}</li>
|
<li>触发用户:${env.CHANGE_AUTHOR}</li>
|
||||||
<li>变更概要:${CHANGES}</li>
|
<li>提交信息:${env.CHANGE_TITLE}</li>
|
||||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||||
<li>变更集:${JELLY_SCRIPT}</li>
|
|
||||||
</div>
|
</div>
|
||||||
</ul>
|
</ul>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
</table></font>
|
</table></font>
|
||||||
</body>
|
</body>
|
||||||
</html>''',
|
</html>""",
|
||||||
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
||||||
from: "support@taosdata.com"
|
from: "support@taosdata.com"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
failure {
|
failure {
|
||||||
emailext (
|
emailext (
|
||||||
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
|
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' FAIL",
|
||||||
body: '''<!DOCTYPE html>
|
body: """<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<head>
|
<head>
|
||||||
<meta charset="UTF-8">
|
<meta charset="UTF-8">
|
||||||
|
@ -285,21 +285,21 @@ pipeline {
|
||||||
<td>
|
<td>
|
||||||
<ul>
|
<ul>
|
||||||
<div style="font-size:18px">
|
<div style="font-size:18px">
|
||||||
<li>构建名称>>分支:${PROJECT_NAME}</li>
|
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
|
||||||
<li>构建结果:<span style="color:green"> Successful </span></li>
|
<li>构建结果:<span style="color:red"> Failure </span></li>
|
||||||
<li>构建编号:${BUILD_NUMBER}</li>
|
<li>构建编号:${BUILD_NUMBER}</li>
|
||||||
<li>触发用户:${CAUSE}</li>
|
<li>触发用户:${env.CHANGE_AUTHOR}</li>
|
||||||
<li>变更概要:${CHANGES}</li>
|
<li>提交信息:${env.CHANGE_TITLE}</li>
|
||||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||||
<li>变更集:${JELLY_SCRIPT}</li>
|
|
||||||
</div>
|
</div>
|
||||||
</ul>
|
</ul>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
</table></font>
|
</table></font>
|
||||||
</body>
|
</body>
|
||||||
</html>''',
|
</html>""",
|
||||||
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
||||||
from: "support@taosdata.com"
|
from: "support@taosdata.com"
|
||||||
)
|
)
|
||||||
|
|
|
@ -19,7 +19,8 @@ using System.Runtime.InteropServices;
|
||||||
|
|
||||||
namespace TDengineDriver
|
namespace TDengineDriver
|
||||||
{
|
{
|
||||||
enum TDengineDataType {
|
enum TDengineDataType
|
||||||
|
{
|
||||||
TSDB_DATA_TYPE_NULL = 0, // 1 bytes
|
TSDB_DATA_TYPE_NULL = 0, // 1 bytes
|
||||||
TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
|
TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
|
||||||
TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
|
TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
|
||||||
|
@ -30,7 +31,11 @@ namespace TDengineDriver
|
||||||
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
|
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
|
||||||
TSDB_DATA_TYPE_BINARY = 8, // string
|
TSDB_DATA_TYPE_BINARY = 8, // string
|
||||||
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
|
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
|
||||||
TSDB_DATA_TYPE_NCHAR = 10 // unicode string
|
TSDB_DATA_TYPE_NCHAR = 10, // unicode string
|
||||||
|
TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte
|
||||||
|
TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes
|
||||||
|
TSDB_DATA_TYPE_UINT = 13, // 4 bytes
|
||||||
|
TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TDengineInitOption
|
enum TDengineInitOption
|
||||||
|
@ -52,15 +57,23 @@ namespace TDengineDriver
|
||||||
switch ((TDengineDataType)type)
|
switch ((TDengineDataType)type)
|
||||||
{
|
{
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
|
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
|
||||||
return "BOOLEAN";
|
return "BOOL";
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
|
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
|
||||||
return "BYTE";
|
return "TINYINT";
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
|
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
|
||||||
return "SHORT";
|
return "SMALLINT";
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_INT:
|
case TDengineDataType.TSDB_DATA_TYPE_INT:
|
||||||
return "INT";
|
return "INT";
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
|
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
|
||||||
return "LONG";
|
return "BIGINT";
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_UTINYINT:
|
||||||
|
return "TINYINT UNSIGNED";
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_USMALLINT:
|
||||||
|
return "SMALLINT UNSIGNED";
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_UINT:
|
||||||
|
return "INT UNSIGNED";
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_UBIGINT:
|
||||||
|
return "BIGINT UNSIGNED";
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
|
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
|
||||||
return "FLOAT";
|
return "FLOAT";
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
|
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
|
||||||
|
@ -81,19 +94,19 @@ namespace TDengineDriver
|
||||||
{
|
{
|
||||||
public const int TSDB_CODE_SUCCESS = 0;
|
public const int TSDB_CODE_SUCCESS = 0;
|
||||||
|
|
||||||
[DllImport("taos.dll", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
|
[DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
|
||||||
static extern public void Init();
|
static extern public void Init();
|
||||||
|
|
||||||
[DllImport("taos.dll", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
|
[DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
|
||||||
static extern public void Cleanup();
|
static extern public void Cleanup();
|
||||||
|
|
||||||
[DllImport("taos.dll", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
|
[DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
|
||||||
static extern public void Options(int option, string value);
|
static extern public void Options(int option, string value);
|
||||||
|
|
||||||
[DllImport("taos.dll", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
|
[DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
|
||||||
static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
|
static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
|
||||||
|
|
||||||
[DllImport("taos.dll", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
|
[DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
|
||||||
static extern private IntPtr taos_errstr(IntPtr res);
|
static extern private IntPtr taos_errstr(IntPtr res);
|
||||||
static public string Error(IntPtr res)
|
static public string Error(IntPtr res)
|
||||||
{
|
{
|
||||||
|
@ -101,19 +114,19 @@ namespace TDengineDriver
|
||||||
return Marshal.PtrToStringAnsi(errPtr);
|
return Marshal.PtrToStringAnsi(errPtr);
|
||||||
}
|
}
|
||||||
|
|
||||||
[DllImport("taos.dll", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
|
[DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
|
||||||
static extern public int ErrorNo(IntPtr res);
|
static extern public int ErrorNo(IntPtr res);
|
||||||
|
|
||||||
[DllImport("taos.dll", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
|
[DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
|
||||||
static extern public IntPtr Query(IntPtr conn, string sqlstr);
|
static extern public IntPtr Query(IntPtr conn, string sqlstr);
|
||||||
|
|
||||||
[DllImport("taos.dll", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
|
[DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
|
||||||
static extern public int AffectRows(IntPtr res);
|
static extern public int AffectRows(IntPtr res);
|
||||||
|
|
||||||
[DllImport("taos.dll", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
|
[DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
|
||||||
static extern public int FieldCount(IntPtr res);
|
static extern public int FieldCount(IntPtr res);
|
||||||
|
|
||||||
[DllImport("taos.dll", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
|
[DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
|
||||||
static extern private IntPtr taos_fetch_fields(IntPtr res);
|
static extern private IntPtr taos_fetch_fields(IntPtr res);
|
||||||
static public List<TDengineMeta> FetchFields(IntPtr res)
|
static public List<TDengineMeta> FetchFields(IntPtr res)
|
||||||
{
|
{
|
||||||
|
@ -142,13 +155,13 @@ namespace TDengineDriver
|
||||||
return metas;
|
return metas;
|
||||||
}
|
}
|
||||||
|
|
||||||
[DllImport("taos.dll", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
|
[DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
|
||||||
static extern public IntPtr FetchRows(IntPtr res);
|
static extern public IntPtr FetchRows(IntPtr res);
|
||||||
|
|
||||||
[DllImport("taos.dll", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
|
[DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
|
||||||
static extern public IntPtr FreeResult(IntPtr res);
|
static extern public IntPtr FreeResult(IntPtr res);
|
||||||
|
|
||||||
[DllImport("taos.dll", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
|
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
|
||||||
static extern public int Close(IntPtr taos);
|
static extern public int Close(IntPtr taos);
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -410,6 +410,22 @@ namespace TDengineDriver
|
||||||
string v10 = Marshal.PtrToStringAnsi(data);
|
string v10 = Marshal.PtrToStringAnsi(data);
|
||||||
builder.Append(v10);
|
builder.Append(v10);
|
||||||
break;
|
break;
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_UTINYINT:
|
||||||
|
byte v11 = Marshal.ReadByte(data);
|
||||||
|
builder.Append(v11);
|
||||||
|
break;
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_USMALLINT:
|
||||||
|
ushort v12 = (ushort)Marshal.ReadInt16(data);
|
||||||
|
builder.Append(v12);
|
||||||
|
break;
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_UINT:
|
||||||
|
uint v13 = (uint)Marshal.ReadInt32(data);
|
||||||
|
builder.Append(v13);
|
||||||
|
break;
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_UBIGINT:
|
||||||
|
ulong v14 = (ulong)Marshal.ReadInt64(data);
|
||||||
|
builder.Append(v14);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
builder.Append("---");
|
builder.Append("---");
|
||||||
|
|
|
@ -31,7 +31,11 @@ namespace TDengineDriver
|
||||||
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
|
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
|
||||||
TSDB_DATA_TYPE_BINARY = 8, // string
|
TSDB_DATA_TYPE_BINARY = 8, // string
|
||||||
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
|
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
|
||||||
TSDB_DATA_TYPE_NCHAR = 10 // unicode string
|
TSDB_DATA_TYPE_NCHAR = 10, // unicode string
|
||||||
|
TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte
|
||||||
|
TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes
|
||||||
|
TSDB_DATA_TYPE_UINT = 13, // 4 bytes
|
||||||
|
TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TDengineInitOption
|
enum TDengineInitOption
|
||||||
|
@ -53,15 +57,23 @@ namespace TDengineDriver
|
||||||
switch ((TDengineDataType)type)
|
switch ((TDengineDataType)type)
|
||||||
{
|
{
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
|
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
|
||||||
return "BOOLEAN";
|
return "BOOL";
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
|
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
|
||||||
return "BYTE";
|
return "TINYINT";
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
|
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
|
||||||
return "SHORT";
|
return "SMALLINT";
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_INT:
|
case TDengineDataType.TSDB_DATA_TYPE_INT:
|
||||||
return "INT";
|
return "INT";
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
|
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
|
||||||
return "LONG";
|
return "BIGINT";
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_UTINYINT:
|
||||||
|
return "TINYINT UNSIGNED";
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_USMALLINT:
|
||||||
|
return "SMALLINT UNSIGNED";
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_UINT:
|
||||||
|
return "INT UNSIGNED";
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_UBIGINT:
|
||||||
|
return "BIGINT UNSIGNED";
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
|
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
|
||||||
return "FLOAT";
|
return "FLOAT";
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
|
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
|
||||||
|
|
|
@ -63,7 +63,7 @@ namespace TDengineDriver
|
||||||
static void HelpPrint(string arg, string desc)
|
static void HelpPrint(string arg, string desc)
|
||||||
{
|
{
|
||||||
string indent = " ";
|
string indent = " ";
|
||||||
Console.WriteLine("{0}{1}", indent, arg.PadRight(25)+desc);
|
Console.WriteLine("{0}{1}", indent, arg.PadRight(25) + desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void PrintHelp(String[] argv)
|
static void PrintHelp(String[] argv)
|
||||||
|
@ -142,33 +142,33 @@ namespace TDengineDriver
|
||||||
verbose = this.GetArgumentAsFlag(argv, "-v", true);
|
verbose = this.GetArgumentAsFlag(argv, "-v", true);
|
||||||
debug = this.GetArgumentAsFlag(argv, "-g", true);
|
debug = this.GetArgumentAsFlag(argv, "-g", true);
|
||||||
|
|
||||||
VerbosePrint ("###################################################################\n");
|
VerbosePrint("###################################################################\n");
|
||||||
VerbosePrintFormat ("# Server IP: {0}\n", host);
|
VerbosePrintFormat("# Server IP: {0}\n", host);
|
||||||
VerbosePrintFormat ("# User: {0}\n", user);
|
VerbosePrintFormat("# User: {0}\n", user);
|
||||||
VerbosePrintFormat ("# Password: {0}\n", password);
|
VerbosePrintFormat("# Password: {0}\n", password);
|
||||||
VerbosePrintFormat ("# Number of Columns per record: {0}\n", colsPerRecord);
|
VerbosePrintFormat("# Number of Columns per record: {0}\n", colsPerRecord);
|
||||||
VerbosePrintFormat ("# Number of Threads: {0}\n", numOfThreads);
|
VerbosePrintFormat("# Number of Threads: {0}\n", numOfThreads);
|
||||||
VerbosePrintFormat ("# Number of Tables: {0}\n", numOfTables);
|
VerbosePrintFormat("# Number of Tables: {0}\n", numOfTables);
|
||||||
VerbosePrintFormat ("# Number of records per Table: {0}\n", recordsPerTable);
|
VerbosePrintFormat("# Number of records per Table: {0}\n", recordsPerTable);
|
||||||
VerbosePrintFormat ("# Records/Request: {0}\n", recordsPerRequest);
|
VerbosePrintFormat("# Records/Request: {0}\n", recordsPerRequest);
|
||||||
VerbosePrintFormat ("# Database name: {0}\n", dbName);
|
VerbosePrintFormat("# Database name: {0}\n", dbName);
|
||||||
VerbosePrintFormat ("# Replica: {0}\n", replica);
|
VerbosePrintFormat("# Replica: {0}\n", replica);
|
||||||
VerbosePrintFormat ("# Use STable: {0}\n", useStable);
|
VerbosePrintFormat("# Use STable: {0}\n", useStable);
|
||||||
VerbosePrintFormat ("# Table prefix: {0}\n", tablePrefix);
|
VerbosePrintFormat("# Table prefix: {0}\n", tablePrefix);
|
||||||
if (useStable == true)
|
if (useStable == true)
|
||||||
{
|
{
|
||||||
VerbosePrintFormat("# STable prefix: {0}\n", stablePrefix);
|
VerbosePrintFormat("# STable prefix: {0}\n", stablePrefix);
|
||||||
}
|
}
|
||||||
VerbosePrintFormat ("# Data order: {0}\n", order);
|
VerbosePrintFormat("# Data order: {0}\n", order);
|
||||||
VerbosePrintFormat ("# Data out of order rate: {0}\n", rateOfOutorder);
|
VerbosePrintFormat("# Data out of order rate: {0}\n", rateOfOutorder);
|
||||||
VerbosePrintFormat ("# Delete method: {0}\n", methodOfDelete);
|
VerbosePrintFormat("# Delete method: {0}\n", methodOfDelete);
|
||||||
VerbosePrintFormat ("# Query command: {0}\n", query);
|
VerbosePrintFormat("# Query command: {0}\n", query);
|
||||||
VerbosePrintFormat ("# Query Mode: {0}\n", queryMode);
|
VerbosePrintFormat("# Query Mode: {0}\n", queryMode);
|
||||||
VerbosePrintFormat ("# Insert Only: {0}\n", isInsertOnly);
|
VerbosePrintFormat("# Insert Only: {0}\n", isInsertOnly);
|
||||||
VerbosePrintFormat ("# Verbose output {0}\n", verbose);
|
VerbosePrintFormat("# Verbose output {0}\n", verbose);
|
||||||
VerbosePrintFormat ("# Test time: {0}\n", DateTime.Now.ToString("h:mm:ss tt"));
|
VerbosePrintFormat("# Test time: {0}\n", DateTime.Now.ToString("h:mm:ss tt"));
|
||||||
|
|
||||||
VerbosePrint ("###################################################################\n");
|
VerbosePrint("###################################################################\n");
|
||||||
|
|
||||||
if (skipReadKey == false)
|
if (skipReadKey == false)
|
||||||
{
|
{
|
||||||
|
@ -385,7 +385,7 @@ namespace TDengineDriver
|
||||||
public void CreateDb()
|
public void CreateDb()
|
||||||
{
|
{
|
||||||
StringBuilder sql = new StringBuilder();
|
StringBuilder sql = new StringBuilder();
|
||||||
sql.Append("CREATE DATABASE IF NOT EXISTS ").Append(this.dbName).Append(" replica ").Append(this.replica);
|
sql.Append("CREATE DATABASE IF NOT EXISTS ").Append(this.dbName).Append(" replica ").Append(this.replica).Append(" keep 36500");
|
||||||
IntPtr res = TDengine.Query(this.conn, sql.ToString());
|
IntPtr res = TDengine.Query(this.conn, sql.ToString());
|
||||||
if (res != IntPtr.Zero)
|
if (res != IntPtr.Zero)
|
||||||
{
|
{
|
||||||
|
@ -406,7 +406,7 @@ namespace TDengineDriver
|
||||||
sql.Clear();
|
sql.Clear();
|
||||||
sql.Append("CREATE TABLE IF NOT EXISTS ").
|
sql.Append("CREATE TABLE IF NOT EXISTS ").
|
||||||
Append(this.dbName).Append(".").Append(this.stablePrefix).
|
Append(this.dbName).Append(".").Append(this.stablePrefix).
|
||||||
Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10)) tags(t1 int)");
|
Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10), v10 tinyint unsigned, v11 smallint unsigned, v12 int unsigned, v13 bigint unsigned) tags(t1 int)");
|
||||||
IntPtr res = TDengine.Query(this.conn, sql.ToString());
|
IntPtr res = TDengine.Query(this.conn, sql.ToString());
|
||||||
if (res != IntPtr.Zero)
|
if (res != IntPtr.Zero)
|
||||||
{
|
{
|
||||||
|
@ -523,7 +523,7 @@ namespace TDengineDriver
|
||||||
int offset = IntPtr.Size * fields;
|
int offset = IntPtr.Size * fields;
|
||||||
IntPtr data = Marshal.ReadIntPtr(rowdata, offset);
|
IntPtr data = Marshal.ReadIntPtr(rowdata, offset);
|
||||||
|
|
||||||
builder.Append("---");
|
builder.Append(" | ");
|
||||||
|
|
||||||
if (data == IntPtr.Zero)
|
if (data == IntPtr.Zero)
|
||||||
{
|
{
|
||||||
|
@ -538,7 +538,7 @@ namespace TDengineDriver
|
||||||
builder.Append(v1);
|
builder.Append(v1);
|
||||||
break;
|
break;
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
|
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
|
||||||
byte v2 = Marshal.ReadByte(data);
|
sbyte v2 = (sbyte)Marshal.ReadByte(data);
|
||||||
builder.Append(v2);
|
builder.Append(v2);
|
||||||
break;
|
break;
|
||||||
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
|
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
|
||||||
|
@ -573,9 +573,25 @@ namespace TDengineDriver
|
||||||
string v10 = Marshal.PtrToStringAnsi(data);
|
string v10 = Marshal.PtrToStringAnsi(data);
|
||||||
builder.Append(v10);
|
builder.Append(v10);
|
||||||
break;
|
break;
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_UTINYINT:
|
||||||
|
byte v11 = Marshal.ReadByte(data);
|
||||||
|
builder.Append(v11);
|
||||||
|
break;
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_USMALLINT:
|
||||||
|
ushort v12 = (ushort)Marshal.ReadInt16(data);
|
||||||
|
builder.Append(v12);
|
||||||
|
break;
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_UINT:
|
||||||
|
uint v13 = (uint)Marshal.ReadInt32(data);
|
||||||
|
builder.Append(v13);
|
||||||
|
break;
|
||||||
|
case TDengineDataType.TSDB_DATA_TYPE_UBIGINT:
|
||||||
|
ulong v14 = (ulong)Marshal.ReadInt64(data);
|
||||||
|
builder.Append(v14);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
builder.Append("---");
|
builder.Append(" | ");
|
||||||
|
|
||||||
VerbosePrint(builder.ToString() + "\n");
|
VerbosePrint(builder.ToString() + "\n");
|
||||||
builder.Clear();
|
builder.Clear();
|
||||||
|
@ -643,7 +659,7 @@ namespace TDengineDriver
|
||||||
watch.Stop();
|
watch.Stop();
|
||||||
elapsedMs = watch.Elapsed.TotalMilliseconds;
|
elapsedMs = watch.Elapsed.TotalMilliseconds;
|
||||||
Console.WriteLine("C# taosdemo: Spent {0} seconds to query {1} records.\n",
|
Console.WriteLine("C# taosdemo: Spent {0} seconds to query {1} records.\n",
|
||||||
elapsedMs/1000,
|
elapsedMs / 1000,
|
||||||
tester.recordsPerTable * tester.numOfTables
|
tester.recordsPerTable * tester.numOfTables
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -712,9 +728,9 @@ namespace TDengineDriver
|
||||||
int m = now.Minute;
|
int m = now.Minute;
|
||||||
int s = now.Second;
|
int s = now.Second;
|
||||||
|
|
||||||
long baseTimestamp = 1609430400000; // 2021/01/01 0:0:0
|
long baseTimestamp = -16094340000; // 1969-06-29 01:21:00
|
||||||
VerbosePrintFormat("beginTime is {0} + {1}h:{2}m:{3}s\n", baseTimestamp, h, m, s);
|
VerbosePrintFormat("beginTime is {0} + {1}h:{2}m:{3}s\n", baseTimestamp, h, m, s);
|
||||||
long beginTimestamp = baseTimestamp + ((h*60 + m) * 60 + s) * 1000;
|
long beginTimestamp = baseTimestamp + ((h * 60 + m) * 60 + s) * 1000;
|
||||||
Random random = new Random();
|
Random random = new Random();
|
||||||
|
|
||||||
long rowsInserted = 0;
|
long rowsInserted = 0;
|
||||||
|
@ -755,11 +771,16 @@ namespace TDengineDriver
|
||||||
|
|
||||||
sql.Append("(")
|
sql.Append("(")
|
||||||
.Append(writeTimeStamp)
|
.Append(writeTimeStamp)
|
||||||
.Append(", 1, 2, 3,")
|
.Append(", 1, -2, -3,")
|
||||||
.Append(i + batch)
|
.Append(i + batch - 127)
|
||||||
.Append(", 5, 6, 7, 'abc', 'def')");
|
.Append(", -5, -6, -7, 'abc', 'def', 254, 65534,")
|
||||||
|
.Append(4294967294 - (uint)i - (uint)batch)
|
||||||
|
.Append(",")
|
||||||
|
.Append(18446744073709551614 - (ulong)i - (ulong)batch)
|
||||||
|
.Append(")");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
VerbosePrint(sql.ToString() + "\n");
|
||||||
IntPtr res = TDengine.Query(this.conn, sql.ToString());
|
IntPtr res = TDengine.Query(this.conn, sql.ToString());
|
||||||
if (res == IntPtr.Zero)
|
if (res == IntPtr.Zero)
|
||||||
{
|
{
|
||||||
|
@ -837,7 +858,7 @@ namespace TDengineDriver
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
sql = sql.Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10))");
|
sql = sql.Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10), v10 tinyint unsigned, v11 smallint unsigned, v12 int unsigned, v13 bigint unsigned)");
|
||||||
}
|
}
|
||||||
IntPtr res = TDengine.Query(this.conn, sql.ToString());
|
IntPtr res = TDengine.Query(this.conn, sql.ToString());
|
||||||
if (res != IntPtr.Zero)
|
if (res != IntPtr.Zero)
|
||||||
|
|
|
@ -36,8 +36,6 @@
|
||||||
"insert_mode": "taosc",
|
"insert_mode": "taosc",
|
||||||
"insert_rate": 0,
|
"insert_rate": 0,
|
||||||
"insert_rows": 100,
|
"insert_rows": 100,
|
||||||
"multi_thread_write_one_tbl": "no",
|
|
||||||
"number_of_tbl_in_one_sql": 0,
|
|
||||||
"interlace_rows": 3,
|
"interlace_rows": 3,
|
||||||
"max_sql_len": 1024,
|
"max_sql_len": 1024,
|
||||||
"disorder_ratio": 0,
|
"disorder_ratio": 0,
|
||||||
|
|
|
@ -28,7 +28,8 @@
|
||||||
|
|
||||||
int points = 5;
|
int points = 5;
|
||||||
int numOfTables = 3;
|
int numOfTables = 3;
|
||||||
int tablesProcessed = 0;
|
int tablesInsertProcessed = 0;
|
||||||
|
int tablesSelectProcessed = 0;
|
||||||
int64_t st, et;
|
int64_t st, et;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -134,6 +135,9 @@ int main(int argc, char *argv[])
|
||||||
gettimeofday(&systemTime, NULL);
|
gettimeofday(&systemTime, NULL);
|
||||||
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
|
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
|
||||||
|
|
||||||
|
tablesInsertProcessed = 0;
|
||||||
|
tablesSelectProcessed = 0;
|
||||||
|
|
||||||
for (i = 0; i<numOfTables; ++i) {
|
for (i = 0; i<numOfTables; ++i) {
|
||||||
// insert records in asynchronous API
|
// insert records in asynchronous API
|
||||||
sprintf(sql, "insert into %s values(%ld, 0)", tableList[i].name, 1546300800000 + i);
|
sprintf(sql, "insert into %s values(%ld, 0)", tableList[i].name, 1546300800000 + i);
|
||||||
|
@ -143,10 +147,20 @@ int main(int argc, char *argv[])
|
||||||
printf("once insert finished, presse any key to query\n");
|
printf("once insert finished, presse any key to query\n");
|
||||||
getchar();
|
getchar();
|
||||||
|
|
||||||
|
while(1) {
|
||||||
|
if (tablesInsertProcessed < numOfTables) {
|
||||||
|
printf("wait for process finished\n");
|
||||||
|
sleep(1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
printf("start to query...\n");
|
printf("start to query...\n");
|
||||||
gettimeofday(&systemTime, NULL);
|
gettimeofday(&systemTime, NULL);
|
||||||
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
|
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
|
||||||
tablesProcessed = 0;
|
|
||||||
|
|
||||||
for (i = 0; i < numOfTables; ++i) {
|
for (i = 0; i < numOfTables; ++i) {
|
||||||
// select records in asynchronous API
|
// select records in asynchronous API
|
||||||
|
@ -157,14 +171,8 @@ int main(int argc, char *argv[])
|
||||||
printf("\nonce finished, press any key to exit\n");
|
printf("\nonce finished, press any key to exit\n");
|
||||||
getchar();
|
getchar();
|
||||||
|
|
||||||
for (i = 0; i<numOfTables; ++i) {
|
|
||||||
printf("%s inserted:%d retrieved:%d\n", tableList[i].name, tableList[i].rowsInserted, tableList[i].rowsRetrieved);
|
|
||||||
}
|
|
||||||
|
|
||||||
getchar();
|
|
||||||
|
|
||||||
while(1) {
|
while(1) {
|
||||||
if (tablesProcessed < numOfTables) {
|
if (tablesSelectProcessed < numOfTables) {
|
||||||
printf("wait for process finished\n");
|
printf("wait for process finished\n");
|
||||||
sleep(1);
|
sleep(1);
|
||||||
continue;
|
continue;
|
||||||
|
@ -173,6 +181,10 @@ int main(int argc, char *argv[])
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i<numOfTables; ++i) {
|
||||||
|
printf("%s inserted:%d retrieved:%d\n", tableList[i].name, tableList[i].rowsInserted, tableList[i].rowsRetrieved);
|
||||||
|
}
|
||||||
|
|
||||||
taos_close(taos);
|
taos_close(taos);
|
||||||
free(tableList);
|
free(tableList);
|
||||||
|
|
||||||
|
@ -214,8 +226,8 @@ void taos_insert_call_back(void *param, TAOS_RES *tres, int code)
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
printf("%d rows data are inserted into %s\n", points, pTable->name);
|
printf("%d rows data are inserted into %s\n", points, pTable->name);
|
||||||
tablesProcessed++;
|
tablesInsertProcessed++;
|
||||||
if (tablesProcessed >= numOfTables) {
|
if (tablesInsertProcessed >= numOfTables) {
|
||||||
gettimeofday(&systemTime, NULL);
|
gettimeofday(&systemTime, NULL);
|
||||||
et = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
|
et = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
|
||||||
printf("%lld mseconds to insert %d data points\n", (et - st) / 1000, points*numOfTables);
|
printf("%lld mseconds to insert %d data points\n", (et - st) / 1000, points*numOfTables);
|
||||||
|
@ -251,15 +263,17 @@ void taos_retrieve_call_back(void *param, TAOS_RES *tres, int numOfRows)
|
||||||
//taos_free_result(tres);
|
//taos_free_result(tres);
|
||||||
printf("%d rows data retrieved from %s\n", pTable->rowsRetrieved, pTable->name);
|
printf("%d rows data retrieved from %s\n", pTable->rowsRetrieved, pTable->name);
|
||||||
|
|
||||||
tablesProcessed++;
|
tablesSelectProcessed++;
|
||||||
if (tablesProcessed >= numOfTables) {
|
if (tablesSelectProcessed >= numOfTables) {
|
||||||
gettimeofday(&systemTime, NULL);
|
gettimeofday(&systemTime, NULL);
|
||||||
et = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
|
et = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
|
||||||
printf("%lld mseconds to query %d data rows\n", (et - st) / 1000, points * numOfTables);
|
printf("%lld mseconds to query %d data rows\n", (et - st) / 1000, points * numOfTables);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
taos_free_result(tres);
|
taos_free_result(tres);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void taos_select_call_back(void *param, TAOS_RES *tres, int code)
|
void taos_select_call_back(void *param, TAOS_RES *tres, int code)
|
||||||
|
@ -276,6 +290,4 @@ void taos_select_call_back(void *param, TAOS_RES *tres, int code)
|
||||||
taos_cleanup();
|
taos_cleanup();
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
taos_free_result(tres);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ int main(int argc, char *argv[]) {
|
||||||
printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/);
|
printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < 4000000; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
Test(taos, qstr, i);
|
Test(taos, qstr, i);
|
||||||
}
|
}
|
||||||
taos_close(taos);
|
taos_close(taos);
|
||||||
|
|
|
@ -19,6 +19,10 @@ Run lua sample:
|
||||||
lua test.lua
|
lua test.lua
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Run performance test:
|
||||||
|
```
|
||||||
|
time lua benchmark.lua
|
||||||
|
```
|
||||||
## OpenResty Dependencies
|
## OpenResty Dependencies
|
||||||
- OpenResty:
|
- OpenResty:
|
||||||
```
|
```
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
local driver = require "luaconnector"
|
||||||
|
|
||||||
|
local config = {
|
||||||
|
password = "taosdata",
|
||||||
|
host = "127.0.0.1",
|
||||||
|
port = 6030,
|
||||||
|
database = "",
|
||||||
|
user = "root",
|
||||||
|
|
||||||
|
max_packet_size = 1024 * 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
local conn
|
||||||
|
local res = driver.connect(config)
|
||||||
|
if res.code ~=0 then
|
||||||
|
print("connect--- failed: "..res.error)
|
||||||
|
return
|
||||||
|
else
|
||||||
|
conn = res.conn
|
||||||
|
print("connect--- pass.")
|
||||||
|
end
|
||||||
|
|
||||||
|
local res = driver.query(conn,"drop database if exists demo")
|
||||||
|
|
||||||
|
res = driver.query(conn,"create database demo")
|
||||||
|
if res.code ~=0 then
|
||||||
|
print("create db--- failed: "..res.error)
|
||||||
|
return
|
||||||
|
else
|
||||||
|
print("create db--- pass.")
|
||||||
|
end
|
||||||
|
|
||||||
|
res = driver.query(conn,"use demo")
|
||||||
|
if res.code ~=0 then
|
||||||
|
print("select db--- failed: "..res.error)
|
||||||
|
return
|
||||||
|
else
|
||||||
|
print("select db--- pass.")
|
||||||
|
end
|
||||||
|
|
||||||
|
res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))")
|
||||||
|
if res.code ~=0 then
|
||||||
|
print("create table---failed: "..res.error)
|
||||||
|
return
|
||||||
|
else
|
||||||
|
print("create table--- pass.")
|
||||||
|
end
|
||||||
|
|
||||||
|
local base = 1617330000000
|
||||||
|
local index =0
|
||||||
|
local count = 100000
|
||||||
|
local t
|
||||||
|
while( index < count )
|
||||||
|
do
|
||||||
|
t = base + index
|
||||||
|
local q=string.format([[insert into m1 values (%d,0,'robotspace')]],t)
|
||||||
|
res = driver.query(conn,q)
|
||||||
|
if res.code ~=0 then
|
||||||
|
print("insert records failed: "..res.error)
|
||||||
|
return
|
||||||
|
else
|
||||||
|
|
||||||
|
end
|
||||||
|
index = index+1
|
||||||
|
end
|
||||||
|
print(string.format([["Done. %d records has been stored."]],count))
|
||||||
|
driver.close(conn)
|
|
@ -1,2 +1,2 @@
|
||||||
gcc lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos
|
gcc -std=c99 lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos
|
||||||
|
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
gcc lua_connector51.c -fPIC -shared -o luaconnector51.so -Wall -ltaos
|
gcc -std=c99 lua_connector51.c -fPIC -shared -o luaconnector51.so -Wall -ltaos
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ static int l_connect(lua_State *L){
|
||||||
|
|
||||||
luaL_checktype(L, 1, LUA_TTABLE);
|
luaL_checktype(L, 1, LUA_TTABLE);
|
||||||
|
|
||||||
lua_getfield(L,-1,"host");
|
lua_getfield(L,1,"host");
|
||||||
if (lua_isstring(L,-1)){
|
if (lua_isstring(L,-1)){
|
||||||
host = lua_tostring(L, -1);
|
host = lua_tostring(L, -1);
|
||||||
// printf("host = %s\n", host);
|
// printf("host = %s\n", host);
|
||||||
|
|
|
@ -40,8 +40,8 @@ function buildTDengine {
|
||||||
|
|
||||||
git remote update > /dev/null
|
git remote update > /dev/null
|
||||||
git reset --hard HEAD
|
git reset --hard HEAD
|
||||||
git checkout develop
|
git checkout master
|
||||||
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
REMOTE_COMMIT=`git rev-parse --short remotes/origin/master`
|
||||||
LOCAL_COMMIT=`git rev-parse --short @`
|
LOCAL_COMMIT=`git rev-parse --short @`
|
||||||
|
|
||||||
echo " LOCAL: $LOCAL_COMMIT"
|
echo " LOCAL: $LOCAL_COMMIT"
|
||||||
|
@ -73,6 +73,9 @@ function runQueryPerfTest {
|
||||||
python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT
|
python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT
|
||||||
|
|
||||||
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT
|
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT
|
||||||
|
|
||||||
|
python3 perfbenchmark/joinPerformance.py | tee -a $PERFORMANCE_TEST_REPORT
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -37,8 +37,6 @@
|
||||||
"insert_mode": "taosc",
|
"insert_mode": "taosc",
|
||||||
"insert_rate": 0,
|
"insert_rate": 0,
|
||||||
"insert_rows": 100000,
|
"insert_rows": 100000,
|
||||||
"multi_thread_write_one_tbl": "no",
|
|
||||||
"number_of_tbl_in_one_sql": 1,
|
|
||||||
"interlace_rows": 100,
|
"interlace_rows": 100,
|
||||||
"max_sql_len": 1024000,
|
"max_sql_len": 1024000,
|
||||||
"disorder_ratio": 0,
|
"disorder_ratio": 0,
|
||||||
|
|
|
@ -6,6 +6,25 @@ To effectively test and debug our TDengine product, we have developed a simple t
|
||||||
exercise various functions of the system in a randomized fashion, hoping to expose
|
exercise various functions of the system in a randomized fashion, hoping to expose
|
||||||
maximum number of problems, hopefully without a pre-determined scenario.
|
maximum number of problems, hopefully without a pre-determined scenario.
|
||||||
|
|
||||||
|
# Features
|
||||||
|
|
||||||
|
This tool can run as a test client with the following features:
|
||||||
|
|
||||||
|
1. Any number of concurrent threads
|
||||||
|
1. Any number of test steps/loops
|
||||||
|
1. Auto-create and writing to multiple databases
|
||||||
|
1. Ignore specific error codes
|
||||||
|
1. Write small or large data blocks
|
||||||
|
1. Auto-generate out-of-sequence data, if needed
|
||||||
|
1. Verify the result of write operations
|
||||||
|
1. Concurrent writing to a shadow database for later data verification
|
||||||
|
1. User specified number of replicas to use, against clusters
|
||||||
|
|
||||||
|
This tool can also use to start a TDengine service, either in stand-alone mode or
|
||||||
|
cluster mode. The features include:
|
||||||
|
|
||||||
|
1. User specified number of D-Nodes to create/use.
|
||||||
|
|
||||||
# Preparation
|
# Preparation
|
||||||
|
|
||||||
To run this tool, please ensure the followed preparation work is done first.
|
To run this tool, please ensure the followed preparation work is done first.
|
||||||
|
@ -16,7 +35,7 @@ To run this tool, please ensure the followed preparation work is done first.
|
||||||
Ubuntu 20.04LTS as our own development environment, and suggest you also use such
|
Ubuntu 20.04LTS as our own development environment, and suggest you also use such
|
||||||
an environment if possible.
|
an environment if possible.
|
||||||
|
|
||||||
# Simple Execution
|
# Simple Execution as Client Test Tool
|
||||||
|
|
||||||
To run the tool with the simplest method, follow the steps below:
|
To run the tool with the simplest method, follow the steps below:
|
||||||
|
|
||||||
|
@ -28,19 +47,21 @@ To run the tool with the simplest method, follow the steps below:
|
||||||
|
|
||||||
That's it!
|
That's it!
|
||||||
|
|
||||||
# Running Clusters
|
# Running Server-side Clusters
|
||||||
|
|
||||||
This tool also makes it easy to test/verify the clustering capabilities of TDengine. You
|
This tool also makes it easy to test/verify the clustering capabilities of TDengine. You
|
||||||
can start a cluster quite easily with the following command:
|
can start a cluster quite easily with the following command:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ cd tests/pytest/
|
$ cd tests/pytest/
|
||||||
$ ./crash_gen.sh -e -o 3
|
$ rm -rf ../../build/cluster_dnode_?; ./crash_gen.sh -e -o 3 # first part optional
|
||||||
```
|
```
|
||||||
|
|
||||||
The `-e` option above tells the tool to start the service, and do not run any tests, while
|
The `-e` option above tells the tool to start the service, and do not run any tests, while
|
||||||
the `-o 3` option tells the tool to start 3 DNodes and join them together in a cluster.
|
the `-o 3` option tells the tool to start 3 DNodes and join them together in a cluster.
|
||||||
Obviously you can adjust the the number here.
|
Obviously you can adjust the the number here. The `rm -rf` command line is optional
|
||||||
|
to clean up previous cluster data, so that we can start from a clean state with no data
|
||||||
|
at all.
|
||||||
|
|
||||||
## Behind the Scenes
|
## Behind the Scenes
|
||||||
|
|
||||||
|
@ -89,8 +110,9 @@ The exhaustive features of the tool is available through the `-h` option:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ ./crash_gen.sh -h
|
$ ./crash_gen.sh -h
|
||||||
usage: crash_gen_bootstrap.py [-h] [-a] [-b MAX_DBS] [-c CONNECTOR_TYPE] [-d] [-e] [-g IGNORE_ERRORS] [-i MAX_REPLICAS] [-l] [-n] [-o NUM_DNODES] [-p] [-r]
|
usage: crash_gen_bootstrap.py [-h] [-a] [-b MAX_DBS] [-c CONNECTOR_TYPE] [-d] [-e] [-g IGNORE_ERRORS]
|
||||||
[-s MAX_STEPS] [-t NUM_THREADS] [-v] [-x]
|
[-i NUM_REPLICAS] [-k] [-l] [-m] [-n]
|
||||||
|
[-o NUM_DNODES] [-p] [-r] [-s MAX_STEPS] [-t NUM_THREADS] [-v] [-w] [-x]
|
||||||
|
|
||||||
TDengine Auto Crash Generator (PLEASE NOTICE the Prerequisites Below)
|
TDengine Auto Crash Generator (PLEASE NOTICE the Prerequisites Below)
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
@ -109,11 +131,14 @@ optional arguments:
|
||||||
-e, --run-tdengine Run TDengine service in foreground (default: false)
|
-e, --run-tdengine Run TDengine service in foreground (default: false)
|
||||||
-g IGNORE_ERRORS, --ignore-errors IGNORE_ERRORS
|
-g IGNORE_ERRORS, --ignore-errors IGNORE_ERRORS
|
||||||
Ignore error codes, comma separated, 0x supported (default: None)
|
Ignore error codes, comma separated, 0x supported (default: None)
|
||||||
-i MAX_REPLICAS, --max-replicas MAX_REPLICAS
|
-i NUM_REPLICAS, --num-replicas NUM_REPLICAS
|
||||||
Maximum number of replicas to use, when testing against clusters. (default: 1)
|
Number (fixed) of replicas to use, when testing against clusters. (default: 1)
|
||||||
|
-k, --track-memory-leaks
|
||||||
|
Use Valgrind tool to track memory leaks (default: false)
|
||||||
-l, --larger-data Write larger amount of data during write operations (default: false)
|
-l, --larger-data Write larger amount of data during write operations (default: false)
|
||||||
|
-m, --mix-oos-data Mix out-of-sequence data into the test data stream (default: true)
|
||||||
-n, --dynamic-db-table-names
|
-n, --dynamic-db-table-names
|
||||||
Use non-fixed names for dbs/tables, useful for multi-instance executions (default: false)
|
Use non-fixed names for dbs/tables, for -b, useful for multi-instance executions (default: false)
|
||||||
-o NUM_DNODES, --num-dnodes NUM_DNODES
|
-o NUM_DNODES, --num-dnodes NUM_DNODES
|
||||||
Number of Dnodes to initialize, used with -e option. (default: 1)
|
Number of Dnodes to initialize, used with -e option. (default: 1)
|
||||||
-p, --per-thread-db-connection
|
-p, --per-thread-db-connection
|
||||||
|
@ -124,6 +149,7 @@ optional arguments:
|
||||||
-t NUM_THREADS, --num-threads NUM_THREADS
|
-t NUM_THREADS, --num-threads NUM_THREADS
|
||||||
Number of threads to run (default: 10)
|
Number of threads to run (default: 10)
|
||||||
-v, --verify-data Verify data written in a number of places by reading back (default: false)
|
-v, --verify-data Verify data written in a number of places by reading back (default: false)
|
||||||
|
-w, --use-shadow-db Use a shaddow database to verify data integrity (default: false)
|
||||||
-x, --continue-on-exception
|
-x, --continue-on-exception
|
||||||
Continue execution after encountering unexpected/disallowed errors/exceptions (default: false)
|
Continue execution after encountering unexpected/disallowed errors/exceptions (default: false)
|
||||||
```
|
```
|
||||||
|
|
|
@ -1574,9 +1574,9 @@ class TaskCreateDb(StateTransitionTask):
|
||||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||||
# was: self.execWtSql(wt, "create database db")
|
# was: self.execWtSql(wt, "create database db")
|
||||||
repStr = ""
|
repStr = ""
|
||||||
if gConfig.max_replicas != 1:
|
if gConfig.num_replicas != 1:
|
||||||
# numReplica = Dice.throw(gConfig.max_replicas) + 1 # 1,2 ... N
|
# numReplica = Dice.throw(gConfig.max_replicas) + 1 # 1,2 ... N
|
||||||
numReplica = gConfig.max_replicas # fixed, always
|
numReplica = gConfig.num_replicas # fixed, always
|
||||||
repStr = "replica {}".format(numReplica)
|
repStr = "replica {}".format(numReplica)
|
||||||
updatePostfix = "update 1" if gConfig.verify_data else "" # allow update only when "verify data" is active
|
updatePostfix = "update 1" if gConfig.verify_data else "" # allow update only when "verify data" is active
|
||||||
dbName = self._db.getName()
|
dbName = self._db.getName()
|
||||||
|
@ -2394,11 +2394,16 @@ class MainExec:
|
||||||
help='Ignore error codes, comma separated, 0x supported (default: None)')
|
help='Ignore error codes, comma separated, 0x supported (default: None)')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-i',
|
'-i',
|
||||||
'--max-replicas',
|
'--num-replicas',
|
||||||
action='store',
|
action='store',
|
||||||
default=1,
|
default=1,
|
||||||
type=int,
|
type=int,
|
||||||
help='Maximum number of replicas to use, when testing against clusters. (default: 1)')
|
help='Number (fixed) of replicas to use, when testing against clusters. (default: 1)')
|
||||||
|
parser.add_argument(
|
||||||
|
'-k',
|
||||||
|
'--track-memory-leaks',
|
||||||
|
action='store_true',
|
||||||
|
help='Use Valgrind tool to track memory leaks (default: false)')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-l',
|
'-l',
|
||||||
'--larger-data',
|
'--larger-data',
|
||||||
|
|
|
@ -19,6 +19,7 @@ from queue import Queue, Empty
|
||||||
|
|
||||||
from .misc import Logging, Status, CrashGenError, Dice, Helper, Progress
|
from .misc import Logging, Status, CrashGenError, Dice, Helper, Progress
|
||||||
from .db import DbConn, DbTarget
|
from .db import DbConn, DbTarget
|
||||||
|
import crash_gen.settings
|
||||||
|
|
||||||
class TdeInstance():
|
class TdeInstance():
|
||||||
"""
|
"""
|
||||||
|
@ -132,6 +133,7 @@ keep 36500
|
||||||
walLevel 1
|
walLevel 1
|
||||||
#
|
#
|
||||||
# maxConnections 100
|
# maxConnections 100
|
||||||
|
quorum 2
|
||||||
"""
|
"""
|
||||||
cfgContent = cfgTemplate.format_map(cfgValues)
|
cfgContent = cfgTemplate.format_map(cfgValues)
|
||||||
f = open(cfgFile, "w")
|
f = open(cfgFile, "w")
|
||||||
|
@ -164,7 +166,12 @@ walLevel 1
|
||||||
return "127.0.0.1"
|
return "127.0.0.1"
|
||||||
|
|
||||||
def getServiceCmdLine(self): # to start the instance
|
def getServiceCmdLine(self): # to start the instance
|
||||||
return [self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen()
|
cmdLine = []
|
||||||
|
if crash_gen.settings.gConfig.track_memory_leaks:
|
||||||
|
Logging.info("Invoking VALGRIND on service...")
|
||||||
|
cmdLine = ['valgrind', '--leak-check=yes']
|
||||||
|
cmdLine += ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen()
|
||||||
|
return cmdLine
|
||||||
|
|
||||||
def _getDnodes(self, dbc):
|
def _getDnodes(self, dbc):
|
||||||
dbc.query("show dnodes")
|
dbc.query("show dnodes")
|
||||||
|
@ -202,7 +209,7 @@ walLevel 1
|
||||||
self.generateCfgFile() # service side generates config file, client does not
|
self.generateCfgFile() # service side generates config file, client does not
|
||||||
self.rotateLogs()
|
self.rotateLogs()
|
||||||
|
|
||||||
self._smThread.start(self.getServiceCmdLine())
|
self._smThread.start(self.getServiceCmdLine(), self.getLogDir()) # May raise exceptions
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
self._smThread.stop()
|
self._smThread.stop()
|
||||||
|
@ -225,7 +232,7 @@ class TdeSubProcess:
|
||||||
# RET_SUCCESS = -4
|
# RET_SUCCESS = -4
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.subProcess = None
|
self.subProcess = None # type: subprocess.Popen
|
||||||
# if tInst is None:
|
# if tInst is None:
|
||||||
# raise CrashGenError("Empty instance not allowed in TdeSubProcess")
|
# raise CrashGenError("Empty instance not allowed in TdeSubProcess")
|
||||||
# self._tInst = tInst # Default create at ServiceManagerThread
|
# self._tInst = tInst # Default create at ServiceManagerThread
|
||||||
|
@ -263,7 +270,7 @@ class TdeSubProcess:
|
||||||
# print("Starting TDengine with env: ", myEnv.items())
|
# print("Starting TDengine with env: ", myEnv.items())
|
||||||
# print("Starting TDengine via Shell: {}".format(cmdLineStr))
|
# print("Starting TDengine via Shell: {}".format(cmdLineStr))
|
||||||
|
|
||||||
useShell = True
|
useShell = True # Needed to pass environments into it
|
||||||
self.subProcess = subprocess.Popen(
|
self.subProcess = subprocess.Popen(
|
||||||
# ' '.join(cmdLine) if useShell else cmdLine,
|
# ' '.join(cmdLine) if useShell else cmdLine,
|
||||||
# shell=useShell,
|
# shell=useShell,
|
||||||
|
@ -276,12 +283,12 @@ class TdeSubProcess:
|
||||||
env=myEnv
|
env=myEnv
|
||||||
) # had text=True, which interferred with reading EOF
|
) # had text=True, which interferred with reading EOF
|
||||||
|
|
||||||
STOP_SIGNAL = signal.SIGKILL # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process?
|
STOP_SIGNAL = signal.SIGINT # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process?
|
||||||
SIG_KILL_RETCODE = 137 # ref: https://stackoverflow.com/questions/43268156/process-finished-with-exit-code-137-in-pycharm
|
SIG_KILL_RETCODE = 137 # ref: https://stackoverflow.com/questions/43268156/process-finished-with-exit-code-137-in-pycharm
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
"""
|
"""
|
||||||
Stop a sub process, and try to return a meaningful return code.
|
Stop a sub process, DO NOT return anything, process all conditions INSIDE
|
||||||
|
|
||||||
Common POSIX signal values (from man -7 signal):
|
Common POSIX signal values (from man -7 signal):
|
||||||
SIGHUP 1
|
SIGHUP 1
|
||||||
|
@ -301,40 +308,99 @@ class TdeSubProcess:
|
||||||
"""
|
"""
|
||||||
if not self.subProcess:
|
if not self.subProcess:
|
||||||
Logging.error("Sub process already stopped")
|
Logging.error("Sub process already stopped")
|
||||||
return # -1
|
return
|
||||||
|
|
||||||
retCode = self.subProcess.poll() # ret -N means killed with signal N, otherwise it's from exit(N)
|
retCode = self.subProcess.poll() # ret -N means killed with signal N, otherwise it's from exit(N)
|
||||||
if retCode: # valid return code, process ended
|
if retCode: # valid return code, process ended
|
||||||
retCode = -retCode # only if valid
|
# retCode = -retCode # only if valid
|
||||||
Logging.warning("TSP.stop(): process ended itself")
|
Logging.warning("TSP.stop(): process ended itself")
|
||||||
self.subProcess = None
|
self.subProcess = None
|
||||||
return retCode
|
return
|
||||||
|
|
||||||
# process still alive, let's interrupt it
|
# process still alive, let's interrupt it
|
||||||
Logging.info("Terminate running process, send SIG_{} and wait...".format(self.STOP_SIGNAL))
|
self._stopForSure(self.subProcess, self.STOP_SIGNAL) # success if no exception
|
||||||
# sub process should end, then IPC queue should end, causing IO thread to end
|
|
||||||
topSubProc = psutil.Process(self.subProcess.pid)
|
|
||||||
for child in topSubProc.children(recursive=True): # or parent.children() for recursive=False
|
|
||||||
child.send_signal(self.STOP_SIGNAL)
|
|
||||||
time.sleep(0.2) # 200 ms
|
|
||||||
# topSubProc.send_signal(sig) # now kill the main sub process (likely the Shell)
|
|
||||||
|
|
||||||
self.subProcess.send_signal(self.STOP_SIGNAL) # main sub process (likely the Shell)
|
|
||||||
self.subProcess.wait(20)
|
|
||||||
retCode = self.subProcess.returncode # should always be there
|
|
||||||
# May throw subprocess.TimeoutExpired exception above, therefore
|
|
||||||
# The process is guranteed to have ended by now
|
|
||||||
self.subProcess = None
|
self.subProcess = None
|
||||||
if retCode == self.SIG_KILL_RETCODE:
|
|
||||||
Logging.info("TSP.stop(): sub proc KILLED, as expected")
|
# sub process should end, then IPC queue should end, causing IO thread to end
|
||||||
elif retCode == (- self.STOP_SIGNAL):
|
|
||||||
Logging.info("TSP.stop(), sub process STOPPED, as expected")
|
@classmethod
|
||||||
elif retCode != 0: # != (- signal.SIGINT):
|
def _stopForSure(cls, proc: subprocess.Popen, sig: int):
|
||||||
Logging.error("TSP.stop(): Failed to stop sub proc properly w/ SIG {}, retCode={}".format(
|
'''
|
||||||
self.STOP_SIGNAL, retCode))
|
Stop a process and all sub processes with a singal, and SIGKILL if necessary
|
||||||
|
'''
|
||||||
|
def doKillTdService(proc: subprocess.Popen, sig: int):
|
||||||
|
Logging.info("Killing sub-sub process {} with signal {}".format(proc.pid, sig))
|
||||||
|
proc.send_signal(sig)
|
||||||
|
try:
|
||||||
|
retCode = proc.wait(20)
|
||||||
|
if (- retCode) == signal.SIGSEGV: # Crashed
|
||||||
|
Logging.warning("Process {} CRASHED, please check CORE file!".format(proc.pid))
|
||||||
|
elif (- retCode) == sig :
|
||||||
|
Logging.info("TD service terminated with expected return code {}".format(sig))
|
||||||
else:
|
else:
|
||||||
Logging.info("TSP.stop(): sub proc successfully terminated with SIG {}".format(self.STOP_SIGNAL))
|
Logging.warning("TD service terminated, EXPECTING ret code {}, got {}".format(sig, -retCode))
|
||||||
return - retCode
|
return True # terminated successfully
|
||||||
|
except subprocess.TimeoutExpired as err:
|
||||||
|
Logging.warning("Failed to kill sub-sub process {} with signal {}".format(proc.pid, sig))
|
||||||
|
return False # failed to terminate
|
||||||
|
|
||||||
|
|
||||||
|
def doKillChild(child: psutil.Process, sig: int):
|
||||||
|
Logging.info("Killing sub-sub process {} with signal {}".format(child.pid, sig))
|
||||||
|
child.send_signal(sig)
|
||||||
|
try:
|
||||||
|
retCode = child.wait(20)
|
||||||
|
if (- retCode) == signal.SIGSEGV: # Crashed
|
||||||
|
Logging.warning("Process {} CRASHED, please check CORE file!".format(child.pid))
|
||||||
|
elif (- retCode) == sig :
|
||||||
|
Logging.info("Sub-sub process terminated with expected return code {}".format(sig))
|
||||||
|
else:
|
||||||
|
Logging.warning("Process terminated, EXPECTING ret code {}, got {}".format(sig, -retCode))
|
||||||
|
return True # terminated successfully
|
||||||
|
except psutil.TimeoutExpired as err:
|
||||||
|
Logging.warning("Failed to kill sub-sub process {} with signal {}".format(child.pid, sig))
|
||||||
|
return False # did not terminate
|
||||||
|
|
||||||
|
def doKill(proc: subprocess.Popen, sig: int):
|
||||||
|
pid = proc.pid
|
||||||
|
try:
|
||||||
|
topSubProc = psutil.Process(pid)
|
||||||
|
for child in topSubProc.children(recursive=True): # or parent.children() for recursive=False
|
||||||
|
Logging.warning("Unexpected child to be killed")
|
||||||
|
doKillChild(child, sig)
|
||||||
|
except psutil.NoSuchProcess as err:
|
||||||
|
Logging.info("Process not found, can't kill, pid = {}".format(pid))
|
||||||
|
|
||||||
|
return doKillTdService(proc, sig)
|
||||||
|
# TODO: re-examine if we need to kill the top process, which is always the SHELL for now
|
||||||
|
# try:
|
||||||
|
# proc.wait(1) # SHELL process here, may throw subprocess.TimeoutExpired exception
|
||||||
|
# # expRetCode = self.SIG_KILL_RETCODE if sig==signal.SIGKILL else (-sig)
|
||||||
|
# # if retCode == expRetCode:
|
||||||
|
# # Logging.info("Process terminated with expected return code {}".format(retCode))
|
||||||
|
# # else:
|
||||||
|
# # Logging.warning("Process terminated, EXPECTING ret code {}, got {}".format(expRetCode, retCode))
|
||||||
|
# # return True # success
|
||||||
|
# except subprocess.TimeoutExpired as err:
|
||||||
|
# Logging.warning("Failed to kill process {} with signal {}".format(pid, sig))
|
||||||
|
# return False # failed to kill
|
||||||
|
|
||||||
|
def softKill(proc, sig):
|
||||||
|
return doKill(proc, sig)
|
||||||
|
|
||||||
|
def hardKill(proc):
|
||||||
|
return doKill(proc, signal.SIGKILL)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
pid = proc.pid
|
||||||
|
Logging.info("Terminate running processes under {}, with SIG #{} and wait...".format(pid, sig))
|
||||||
|
if softKill(proc, sig):
|
||||||
|
return# success
|
||||||
|
if sig != signal.SIGKILL: # really was soft above
|
||||||
|
if hardKill(proc):
|
||||||
|
return
|
||||||
|
raise CrashGenError("Failed to stop process, pid={}".format(pid))
|
||||||
|
|
||||||
class ServiceManager:
|
class ServiceManager:
|
||||||
PAUSE_BETWEEN_IPC_CHECK = 1.2 # seconds between checks on STDOUT of sub process
|
PAUSE_BETWEEN_IPC_CHECK = 1.2 # seconds between checks on STDOUT of sub process
|
||||||
|
@ -561,6 +627,7 @@ class ServiceManagerThread:
|
||||||
# self._tInst = tInst or TdeInstance() # Need an instance
|
# self._tInst = tInst or TdeInstance() # Need an instance
|
||||||
|
|
||||||
self._thread = None # The actual thread, # type: threading.Thread
|
self._thread = None # The actual thread, # type: threading.Thread
|
||||||
|
self._thread2 = None # watching stderr
|
||||||
self._status = Status(Status.STATUS_STOPPED) # The status of the underlying service, actually.
|
self._status = Status(Status.STATUS_STOPPED) # The status of the underlying service, actually.
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
@ -568,11 +635,20 @@ class ServiceManagerThread:
|
||||||
self.getStatus(), self._tdeSubProcess)
|
self.getStatus(), self._tdeSubProcess)
|
||||||
|
|
||||||
def getStatus(self):
|
def getStatus(self):
|
||||||
|
'''
|
||||||
|
Get the status of the process being managed. (misnomer alert!)
|
||||||
|
'''
|
||||||
return self._status
|
return self._status
|
||||||
|
|
||||||
# Start the thread (with sub process), and wait for the sub service
|
# Start the thread (with sub process), and wait for the sub service
|
||||||
# to become fully operational
|
# to become fully operational
|
||||||
def start(self, cmdLine):
|
def start(self, cmdLine : str, logDir: str):
|
||||||
|
'''
|
||||||
|
Request the manager thread to start a new sub process, and manage it.
|
||||||
|
|
||||||
|
:param cmdLine: the command line to invoke
|
||||||
|
:param logDir: the logging directory, to hold stdout/stderr files
|
||||||
|
'''
|
||||||
if self._thread:
|
if self._thread:
|
||||||
raise RuntimeError("Unexpected _thread")
|
raise RuntimeError("Unexpected _thread")
|
||||||
if self._tdeSubProcess:
|
if self._tdeSubProcess:
|
||||||
|
@ -582,20 +658,30 @@ class ServiceManagerThread:
|
||||||
|
|
||||||
self._status.set(Status.STATUS_STARTING)
|
self._status.set(Status.STATUS_STARTING)
|
||||||
self._tdeSubProcess = TdeSubProcess()
|
self._tdeSubProcess = TdeSubProcess()
|
||||||
self._tdeSubProcess.start(cmdLine)
|
self._tdeSubProcess.start(cmdLine) # TODO: verify process is running
|
||||||
|
|
||||||
self._ipcQueue = Queue()
|
self._ipcQueue = Queue()
|
||||||
self._thread = threading.Thread( # First thread captures server OUTPUT
|
self._thread = threading.Thread( # First thread captures server OUTPUT
|
||||||
target=self.svcOutputReader,
|
target=self.svcOutputReader,
|
||||||
args=(self._tdeSubProcess.getStdOut(), self._ipcQueue))
|
args=(self._tdeSubProcess.getStdOut(), self._ipcQueue, logDir))
|
||||||
self._thread.daemon = True # thread dies with the program
|
self._thread.daemon = True # thread dies with the program
|
||||||
self._thread.start()
|
self._thread.start()
|
||||||
|
time.sleep(0.01)
|
||||||
|
if not self._thread.is_alive(): # What happened?
|
||||||
|
Logging.info("Failed to started process to monitor STDOUT")
|
||||||
|
self.stop()
|
||||||
|
raise CrashGenError("Failed to start thread to monitor STDOUT")
|
||||||
|
Logging.info("Successfully started process to monitor STDOUT")
|
||||||
|
|
||||||
self._thread2 = threading.Thread( # 2nd thread captures server ERRORs
|
self._thread2 = threading.Thread( # 2nd thread captures server ERRORs
|
||||||
target=self.svcErrorReader,
|
target=self.svcErrorReader,
|
||||||
args=(self._tdeSubProcess.getStdErr(), self._ipcQueue))
|
args=(self._tdeSubProcess.getStdErr(), self._ipcQueue, logDir))
|
||||||
self._thread2.daemon = True # thread dies with the program
|
self._thread2.daemon = True # thread dies with the program
|
||||||
self._thread2.start()
|
self._thread2.start()
|
||||||
|
time.sleep(0.01)
|
||||||
|
if not self._thread2.is_alive():
|
||||||
|
self.stop()
|
||||||
|
raise CrashGenError("Failed to start thread to monitor STDERR")
|
||||||
|
|
||||||
# wait for service to start
|
# wait for service to start
|
||||||
for i in range(0, 100):
|
for i in range(0, 100):
|
||||||
|
@ -643,7 +729,7 @@ class ServiceManagerThread:
|
||||||
Logging.info("Service already stopped")
|
Logging.info("Service already stopped")
|
||||||
return
|
return
|
||||||
if self.getStatus().isStopping():
|
if self.getStatus().isStopping():
|
||||||
Logging.info("Service is already being stopped")
|
Logging.info("Service is already being stopped, pid: {}".format(self._tdeSubProcess.getPid()))
|
||||||
return
|
return
|
||||||
# Linux will send Control-C generated SIGINT to the TDengine process
|
# Linux will send Control-C generated SIGINT to the TDengine process
|
||||||
# already, ref:
|
# already, ref:
|
||||||
|
@ -653,14 +739,14 @@ class ServiceManagerThread:
|
||||||
|
|
||||||
self._status.set(Status.STATUS_STOPPING)
|
self._status.set(Status.STATUS_STOPPING)
|
||||||
# retCode = self._tdeSubProcess.stop()
|
# retCode = self._tdeSubProcess.stop()
|
||||||
try:
|
# try:
|
||||||
retCode = self._tdeSubProcess.stop()
|
# retCode = self._tdeSubProcess.stop()
|
||||||
# print("Attempted to stop sub process, got return code: {}".format(retCode))
|
# # print("Attempted to stop sub process, got return code: {}".format(retCode))
|
||||||
if retCode == signal.SIGSEGV : # SGV
|
# if retCode == signal.SIGSEGV : # SGV
|
||||||
Logging.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)")
|
# Logging.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)")
|
||||||
except subprocess.TimeoutExpired as err:
|
# except subprocess.TimeoutExpired as err:
|
||||||
Logging.info("Time out waiting for TDengine service process to exit")
|
# Logging.info("Time out waiting for TDengine service process to exit")
|
||||||
else:
|
if not self._tdeSubProcess.stop(): # everything withing
|
||||||
if self._tdeSubProcess.isRunning(): # still running, should now never happen
|
if self._tdeSubProcess.isRunning(): # still running, should now never happen
|
||||||
Logging.error("FAILED to stop sub process, it is still running... pid = {}".format(
|
Logging.error("FAILED to stop sub process, it is still running... pid = {}".format(
|
||||||
self._tdeSubProcess.getPid()))
|
self._tdeSubProcess.getPid()))
|
||||||
|
@ -683,16 +769,18 @@ class ServiceManagerThread:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
"SMT.Join(): Unexpected status: {}".format(self._status))
|
"SMT.Join(): Unexpected status: {}".format(self._status))
|
||||||
|
|
||||||
|
if self._thread or self._thread2 :
|
||||||
if self._thread:
|
if self._thread:
|
||||||
self._thread.join()
|
self._thread.join()
|
||||||
self._thread = None
|
self._thread = None
|
||||||
self._status.set(Status.STATUS_STOPPED)
|
if self._thread2: # STD ERR thread
|
||||||
# STD ERR thread
|
|
||||||
self._thread2.join()
|
self._thread2.join()
|
||||||
self._thread2 = None
|
self._thread2 = None
|
||||||
else:
|
else:
|
||||||
print("Joining empty thread, doing nothing")
|
print("Joining empty thread, doing nothing")
|
||||||
|
|
||||||
|
self._status.set(Status.STATUS_STOPPED)
|
||||||
|
|
||||||
def _trimQueue(self, targetSize):
|
def _trimQueue(self, targetSize):
|
||||||
if targetSize <= 0:
|
if targetSize <= 0:
|
||||||
return # do nothing
|
return # do nothing
|
||||||
|
@ -739,11 +827,22 @@ class ServiceManagerThread:
|
||||||
print(pBar, end="", flush=True)
|
print(pBar, end="", flush=True)
|
||||||
print('\b\b\b\b', end="", flush=True)
|
print('\b\b\b\b', end="", flush=True)
|
||||||
|
|
||||||
def svcOutputReader(self, out: IO, queue):
|
def svcOutputReader(self, out: IO, queue, logDir: str):
|
||||||
|
'''
|
||||||
|
The infinite routine that processes the STDOUT stream for the sub process being managed.
|
||||||
|
|
||||||
|
:param out: the IO stream object used to fetch the data from
|
||||||
|
:param queue: the queue where we dump the roughly parsed line-by-line data
|
||||||
|
:param logDir: where we should dump a verbatim output file
|
||||||
|
'''
|
||||||
|
os.makedirs(logDir, exist_ok=True)
|
||||||
|
logFile = os.path.join(logDir,'stdout.log')
|
||||||
|
fOut = open(logFile, 'wb')
|
||||||
# Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
|
# Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
|
||||||
# print("This is the svcOutput Reader...")
|
# print("This is the svcOutput Reader...")
|
||||||
# for line in out :
|
# for line in out :
|
||||||
for line in iter(out.readline, b''):
|
for line in iter(out.readline, b''):
|
||||||
|
fOut.write(line)
|
||||||
# print("Finished reading a line: {}".format(line))
|
# print("Finished reading a line: {}".format(line))
|
||||||
# print("Adding item to queue...")
|
# print("Adding item to queue...")
|
||||||
try:
|
try:
|
||||||
|
@ -772,10 +871,16 @@ class ServiceManagerThread:
|
||||||
# queue.put(line)
|
# queue.put(line)
|
||||||
# meaning sub process must have died
|
# meaning sub process must have died
|
||||||
Logging.info("EOF for TDengine STDOUT: {}".format(self))
|
Logging.info("EOF for TDengine STDOUT: {}".format(self))
|
||||||
out.close()
|
out.close() # Close the stream
|
||||||
|
fOut.close() # Close the output file
|
||||||
|
|
||||||
def svcErrorReader(self, err: IO, queue):
|
def svcErrorReader(self, err: IO, queue, logDir: str):
|
||||||
|
os.makedirs(logDir, exist_ok=True)
|
||||||
|
logFile = os.path.join(logDir,'stderr.log')
|
||||||
|
fErr = open(logFile, 'wb')
|
||||||
for line in iter(err.readline, b''):
|
for line in iter(err.readline, b''):
|
||||||
|
fErr.write(line)
|
||||||
Logging.info("TDengine STDERR: {}".format(line))
|
Logging.info("TDengine STDERR: {}".format(line))
|
||||||
Logging.info("EOF for TDengine STDERR: {}".format(self))
|
Logging.info("EOF for TDengine STDERR: {}".format(self))
|
||||||
err.close()
|
err.close()
|
||||||
|
fErr.close()
|
|
@ -17247,3 +17247,88 @@
|
||||||
fun:_PyEval_EvalFrameDefault
|
fun:_PyEval_EvalFrameDefault
|
||||||
fun:_PyEval_EvalCodeWithName
|
fun:_PyEval_EvalCodeWithName
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
<insert_a_suppression_name_here>
|
||||||
|
Memcheck:Leak
|
||||||
|
match-leak-kinds: definite
|
||||||
|
fun:malloc
|
||||||
|
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
|
||||||
|
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
fun:_PyFunction_Vectorcall
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
fun:_PyEval_EvalCodeWithName
|
||||||
|
fun:_PyEval_EvalCodeWithName
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
fun:PyVectorcall_Call
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
}
|
||||||
|
{
|
||||||
|
<insert_a_suppression_name_here>
|
||||||
|
Memcheck:Leak
|
||||||
|
match-leak-kinds: definite
|
||||||
|
fun:malloc
|
||||||
|
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
|
||||||
|
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
|
||||||
|
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
fun:_PyFunction_Vectorcall
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
}
|
||||||
|
{
|
||||||
|
<insert_a_suppression_name_here>
|
||||||
|
Memcheck:Leak
|
||||||
|
match-leak-kinds: definite
|
||||||
|
fun:malloc
|
||||||
|
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
|
||||||
|
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
fun:_PyFunction_Vectorcall
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
fun:_PyEval_EvalCodeWithName
|
||||||
|
fun:PyEval_EvalCode
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
fun:PyVectorcall_Call
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
}
|
||||||
|
{
|
||||||
|
<insert_a_suppression_name_here>
|
||||||
|
Memcheck:Leak
|
||||||
|
match-leak-kinds: definite
|
||||||
|
fun:malloc
|
||||||
|
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
|
||||||
|
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
fun:_PyFunction_Vectorcall
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
fun:_PyEval_EvalCodeWithName
|
||||||
|
}
|
||||||
|
{
|
||||||
|
<insert_a_suppression_name_here>
|
||||||
|
Memcheck:Leak
|
||||||
|
match-leak-kinds: definite
|
||||||
|
fun:malloc
|
||||||
|
obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
fun:PyObject_CallMethod
|
||||||
|
fun:PyInit__openssl
|
||||||
|
fun:_PyImport_LoadDynamicModuleWithSpec
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
obj:/usr/bin/python3.8
|
||||||
|
fun:PyVectorcall_Call
|
||||||
|
fun:_PyEval_EvalFrameDefault
|
||||||
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ python3 ./test.py -f insert/alterTableAndInsert.py
|
||||||
python3 ./test.py -f insert/insertIntoTwoTables.py
|
python3 ./test.py -f insert/insertIntoTwoTables.py
|
||||||
python3 ./test.py -f insert/before_1970.py
|
python3 ./test.py -f insert/before_1970.py
|
||||||
python3 bug2265.py
|
python3 bug2265.py
|
||||||
|
python3 ./test.py -f insert/bug3654.py
|
||||||
|
|
||||||
#table
|
#table
|
||||||
python3 ./test.py -f table/alter_wal0.py
|
python3 ./test.py -f table/alter_wal0.py
|
||||||
|
@ -178,7 +179,7 @@ python3 ./test.py -f stable/query_after_reset.py
|
||||||
|
|
||||||
# perfbenchmark
|
# perfbenchmark
|
||||||
python3 ./test.py -f perfbenchmark/bug3433.py
|
python3 ./test.py -f perfbenchmark/bug3433.py
|
||||||
python3 ./test.py -f perfbenchmark/bug3589.py
|
#python3 ./test.py -f perfbenchmark/bug3589.py
|
||||||
|
|
||||||
|
|
||||||
#query
|
#query
|
||||||
|
@ -216,8 +217,8 @@ python3 ./test.py -f query/floatCompare.py
|
||||||
python3 ./test.py -f query/query1970YearsAf.py
|
python3 ./test.py -f query/query1970YearsAf.py
|
||||||
python3 ./test.py -f query/bug3351.py
|
python3 ./test.py -f query/bug3351.py
|
||||||
python3 ./test.py -f query/bug3375.py
|
python3 ./test.py -f query/bug3375.py
|
||||||
|
python3 ./test.py -f query/queryJoin10tables.py
|
||||||
|
python3 ./test.py -f query/queryStddevWithGroupby.py
|
||||||
|
|
||||||
#stream
|
#stream
|
||||||
python3 ./test.py -f stream/metric_1.py
|
python3 ./test.py -f stream/metric_1.py
|
||||||
|
@ -257,6 +258,8 @@ python3 test.py -f subscribe/singlemeter.py
|
||||||
#python3 test.py -f subscribe/stability.py
|
#python3 test.py -f subscribe/stability.py
|
||||||
python3 test.py -f subscribe/supertable.py
|
python3 test.py -f subscribe/supertable.py
|
||||||
|
|
||||||
|
# topic
|
||||||
|
python3 ./test.py -f topic/topicQuery.py
|
||||||
|
|
||||||
#======================p3-end===============
|
#======================p3-end===============
|
||||||
#======================p4-start===============
|
#======================p4-start===============
|
||||||
|
|
|
@ -36,12 +36,7 @@ for defiMemError in `grep 'definitely lost:' crash_gen-definitely-lost-out.log |
|
||||||
do
|
do
|
||||||
defiMemError=(${defiMemError//,/})
|
defiMemError=(${defiMemError//,/})
|
||||||
if [ -n "$defiMemError" ]; then
|
if [ -n "$defiMemError" ]; then
|
||||||
if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then
|
if [ "$defiMemError" -gt 0 ]; then
|
||||||
cat valgrind.err
|
|
||||||
echo -e "${RED} ## Memory errors number valgrind reports \
|
|
||||||
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
|
|
||||||
exit 8
|
|
||||||
elif [ "$defiMemError" -gt 1013 ];then #add for azure
|
|
||||||
cat valgrind.err
|
cat valgrind.err
|
||||||
echo -e "${RED} ## Memory errors number valgrind reports \
|
echo -e "${RED} ## Memory errors number valgrind reports \
|
||||||
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
|
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
|
||||||
|
|
|
@ -21,7 +21,7 @@ rm -rf /var/lib/taos/*
|
||||||
nohup valgrind --leak-check=yes $TAOSD_DIR > $TDIR/$VALGRIND_OUT 2> $TDIR/$VALGRIND_ERR &
|
nohup valgrind --leak-check=yes $TAOSD_DIR > $TDIR/$VALGRIND_OUT 2> $TDIR/$VALGRIND_ERR &
|
||||||
sleep 20
|
sleep 20
|
||||||
cd -
|
cd -
|
||||||
./crash_gen.sh -p -t 10 -s 200
|
./crash_gen.sh -p -t 10 -s 1000
|
||||||
ps -ef |grep valgrind|grep -v grep|awk '{print $2}'|xargs kill -term
|
ps -ef |grep valgrind|grep -v grep|awk '{print $2}'|xargs kill -term
|
||||||
while true
|
while true
|
||||||
do
|
do
|
||||||
|
@ -53,12 +53,7 @@ for defiMemError in `grep 'definitely lost:' taosd-definitely-lost-out.log | awk
|
||||||
do
|
do
|
||||||
defiMemError=(${defiMemError//,/})
|
defiMemError=(${defiMemError//,/})
|
||||||
if [ -n "$defiMemError" ]; then
|
if [ -n "$defiMemError" ]; then
|
||||||
if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then
|
if [ "$defiMemError" -gt 0 ]; then
|
||||||
cat $VALGRIND_ERR
|
|
||||||
echo -e "${RED} ## Memory errors number valgrind reports \
|
|
||||||
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
|
|
||||||
exit 8
|
|
||||||
elif [ "$defiMemError" -gt 1013 ];then #add for azure
|
|
||||||
cat $VALGRIND_ERR
|
cat $VALGRIND_ERR
|
||||||
echo -e "${RED} ## Memory errors number valgrind reports \
|
echo -e "${RED} ## Memory errors number valgrind reports \
|
||||||
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
|
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
|
||||||
|
|
|
@ -54,8 +54,8 @@ class TDTestCase:
|
||||||
tdSql.execute(" ".join(sqlcmd))
|
tdSql.execute(" ".join(sqlcmd))
|
||||||
|
|
||||||
tdLog.info("================= step3")
|
tdLog.info("================= step3")
|
||||||
tdSql.query('select * from tb1')
|
tdSql.query('select count(*) from tb1')
|
||||||
tdSql.checkRows(205)
|
tdSql.checkData(0, 0, 205)
|
||||||
|
|
||||||
tdLog.info("================= step4")
|
tdLog.info("================= step4")
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
|
@ -71,8 +71,8 @@ class TDTestCase:
|
||||||
tdSql.execute(" ".join(sqlcmd))
|
tdSql.execute(" ".join(sqlcmd))
|
||||||
|
|
||||||
tdLog.info("================= step6")
|
tdLog.info("================= step6")
|
||||||
tdSql.query('select * from tb1')
|
tdSql.query('select count(*) from tb1')
|
||||||
tdSql.checkRows(250)
|
tdSql.checkData(0, 0, 250)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -43,6 +43,9 @@ class TDTestCase:
|
||||||
tdSql.query("select * from tb")
|
tdSql.query("select * from tb")
|
||||||
tdSql.checkRows(insertRows + 4)
|
tdSql.checkRows(insertRows + 4)
|
||||||
|
|
||||||
|
# test case for https://jira.taosdata.com:18080/browse/TD-3716:
|
||||||
|
tdSql.error("insert into tb(now, 1)")
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
tdLog.success("%s successfully executed" % __file__)
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
|
@ -50,14 +50,16 @@ class TDTestCase:
|
||||||
sql += "'%s')" % self.get_random_string(22)
|
sql += "'%s')" % self.get_random_string(22)
|
||||||
tdSql.execute(sql % (self.ts + i))
|
tdSql.execute(sql % (self.ts + i))
|
||||||
|
|
||||||
tdSql.query("select * from stb")
|
time.sleep(10)
|
||||||
tdSql.checkRows(4096)
|
tdSql.query("select count(*) from stb")
|
||||||
|
tdSql.checkData(0, 0, 4096)
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
tdDnodes.start(1)
|
tdDnodes.start(1)
|
||||||
|
|
||||||
tdSql.query("select * from stb")
|
time.sleep(1)
|
||||||
tdSql.checkRows(4096)
|
tdSql.query("select count(*) from stb")
|
||||||
|
tdSql.checkData(0, 0, 4096)
|
||||||
|
|
||||||
endTime = time.time()
|
endTime = time.time()
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue